|
30 | 30 |
|
31 | 31 | int main(int argc, char *argv[]) {
|
32 | 32 |
|
33 |
| - int message[MESSAGE_LENGTH]; |
34 |
| - int send_partitions = PARTITIONS; |
35 |
| - int send_partlength = PARTLENGTH; |
36 |
| - int recv_partitions = 1; |
37 |
| - int recv_partlength = MESSAGE_LENGTH; |
38 |
| - |
39 |
| - int count = 1, source = 0, dest = 1, tag = 1, flag = 0; |
40 |
| - int i, j; |
41 |
| - int myrank; |
42 |
| - int provided; |
43 |
| - int my_thread_id; |
44 |
| - |
45 |
| - MPI_Request request; |
46 |
| - MPI_Status status; |
47 |
| - MPI_Datatype send_type; |
48 |
| - MPI_Info info = MPI_INFO_NULL; |
49 |
| - MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); |
50 |
| - if (provided < MPI_THREAD_MULTIPLE) MPI_Abort(MPI_COMM_WORLD , EXIT_FAILURE); |
51 |
| - MPI_Comm_rank(MPI_COMM_WORLD , &myrank); |
52 |
| - |
53 |
| - /* Sender uses this datatype */ |
54 |
| - MPI_Type_contiguous(send_partlength, MPI_INT, &send_type); |
55 |
| - MPI_Type_commit(&send_type); |
56 |
| - |
57 |
| - if (myrank == 0) { |
58 |
| - |
59 |
| - for (i = 0; i < send_partitions * send_partlength; ++i) message[i] = 100; |
60 |
| - |
61 |
| - MPI_Psend_init(message, send_partitions, count, send_type, dest, tag, MPI_COMM_WORLD, info, &request); |
62 |
| - MPI_Start(&request); |
63 |
| - |
64 |
| - #pragma omp parallel shared(request,message) private(i, my_thread_id) num_threads(NUM_THREADS) |
65 |
| - { |
66 |
| - #pragma omp single |
67 |
| - { |
68 |
| - /* single thread creates 64 tasks to be executed by 8 threads */ |
69 |
| - for (int task_num=0; task_num < NUM_TASKS; task_num++) { |
70 |
| - #pragma omp task firstprivate(task_num) |
71 |
| - { |
72 |
| - my_thread_id = omp_get_thread_num(); |
73 |
| - for (i=0; i < send_partlength; ++i) { |
74 |
| - message[i + (task_num * send_partlength)] = i + (task_num * send_partlength); |
| 33 | + int message[MESSAGE_LENGTH]; |
| 34 | + int send_partitions = PARTITIONS; |
| 35 | + int send_partlength = PARTLENGTH; |
| 36 | + int recv_partitions = 1; |
| 37 | + int recv_partlength = MESSAGE_LENGTH; |
| 38 | + |
| 39 | + int count = 1, source = 0, dest = 1, tag = 1, flag = 0; |
| 40 | + int i, j; |
| 41 | + int myrank; |
| 42 | + int provided; |
| 43 | + int my_thread_id; |
| 44 | + |
| 45 | + MPI_Request request; |
| 46 | + MPI_Status status; |
| 47 | + MPI_Datatype send_type; |
| 48 | + MPI_Info info = MPI_INFO_NULL; |
| 49 | + MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); |
| 50 | + if (provided < MPI_THREAD_MULTIPLE) MPI_Abort(MPI_COMM_WORLD , EXIT_FAILURE); |
| 51 | + MPI_Comm_rank(MPI_COMM_WORLD , &myrank); |
| 52 | + |
| 53 | + /* Sender uses this datatype */ |
| 54 | + MPI_Type_contiguous(send_partlength, MPI_INT, &send_type); |
| 55 | + MPI_Type_commit(&send_type); |
| 56 | + |
| 57 | + if (0 == myrank) { |
| 58 | + |
| 59 | + for (i = 0; i < send_partitions * send_partlength; ++i) message[i] = 100; |
| 60 | + |
| 61 | + MPI_Psend_init(message, send_partitions, count, send_type, dest, tag, MPI_COMM_WORLD, info, &request); |
| 62 | + MPI_Start(&request); |
| 63 | + |
| 64 | +#pragma omp parallel shared(request,message) private(i, my_thread_id) num_threads(NUM_THREADS) |
| 65 | + { |
| 66 | +#pragma omp single |
| 67 | + { |
| 68 | + /* single thread creates 64 tasks to be executed by 8 threads */ |
| 69 | + for (int task_num=0; task_num < NUM_TASKS; task_num++) { |
| 70 | +#pragma omp task firstprivate(task_num) |
| 71 | + { |
| 72 | + my_thread_id = omp_get_thread_num(); |
| 73 | + for (i=0; i < send_partlength; ++i) { |
| 74 | + message[i + (task_num * send_partlength)] = i + (task_num * send_partlength); |
| 75 | + } |
| 76 | + MPI_Pready(task_num, request); |
| 77 | + } /* end task */ |
| 78 | + } /* end for */ |
| 79 | + } /* end single */ |
| 80 | + } /* end parallel */ |
| 81 | + |
| 82 | + while(!flag) { |
| 83 | + MPI_Test(&request, &flag, MPI_STATUS_IGNORE); |
| 84 | + } |
| 85 | + |
| 86 | + MPI_Request_free(&request); |
| 87 | + |
| 88 | + } else if (1 == myrank) { |
| 89 | + for (i = 0; i < recv_partitions * recv_partlength; ++i) message[i] = 101; |
| 90 | + |
| 91 | + MPI_Precv_init(message, recv_partitions, recv_partlength, MPI_INT, source, tag, MPI_COMM_WORLD, info, &request); |
| 92 | + MPI_Start(&request); |
| 93 | + |
| 94 | + while(!flag) { |
| 95 | + MPI_Test(&request, &flag, MPI_STATUS_IGNORE); |
| 96 | + } |
| 97 | + |
| 98 | + MPI_Request_free(&request); |
| 99 | + |
| 100 | + /* all partitions received; check contents */ |
| 101 | + for (i = 0; i < MESSAGE_LENGTH; ++i) { |
| 102 | + if (message[i] != i) { |
| 103 | + fprintf(stderr, "ERROR: Contents received do not match contents sent (expected %d, found %d).\n",i,message[i]); |
| 104 | + MPI_Abort(MPI_COMM_WORLD, 1); |
75 | 105 | }
|
76 |
| - MPI_Pready(task_num, request); |
77 |
| - } /* end task */ |
78 |
| - } /* end for */ |
79 |
| - } /* end single */ |
80 |
| - } /* end parallel */ |
81 |
| - |
82 |
| - while(!flag) { |
83 |
| - MPI_Test(&request, &flag, MPI_STATUS_IGNORE); |
| 106 | + } |
84 | 107 | }
|
85 | 108 |
|
86 |
| - MPI_Request_free(&request); |
| 109 | + MPI_Barrier(MPI_COMM_WORLD); |
| 110 | + if (0 == myrank) {TEST_RAN_TO_COMPLETION();} |
87 | 111 |
|
88 |
| - } else if (myrank == 1) { |
89 |
| - for (i = 0; i < recv_partitions * recv_partlength; ++i) message[i] = 101; |
90 |
| - |
91 |
| - MPI_Precv_init(message, recv_partitions, recv_partlength, MPI_INT, source, tag, MPI_COMM_WORLD, info, &request); |
92 |
| - MPI_Start(&request); |
93 |
| - |
94 |
| - while(!flag) { |
95 |
| - MPI_Test(&request, &flag, MPI_STATUS_IGNORE); |
96 |
| - } |
97 |
| - |
98 |
| - MPI_Request_free(&request); |
99 |
| - |
100 |
| - /* all partitions received; check contents */ |
101 |
| - for (i = 0; i < MESSAGE_LENGTH; ++i) { |
102 |
| - if (message[i] != i) { |
103 |
| - fprintf(stderr, "ERROR: Contents received do not match contents sent (expected %d, found %d).\n",i,message[i]); |
104 |
| - MPI_Abort(MPI_COMM_WORLD, 1); |
105 |
| - } |
106 |
| - } |
107 |
| - } |
108 |
| - |
109 |
| - MPI_Barrier(MPI_COMM_WORLD); |
110 |
| - if (myrank == 0) {TEST_RAN_TO_COMPLETION();} |
111 |
| - |
112 |
| - MPI_Finalize (); |
113 |
| - return 0; |
114 |
| - |
| 112 | + MPI_Finalize(); |
| 113 | + return 0; |
115 | 114 | }
|
0 commit comments