-
Notifications
You must be signed in to change notification settings - Fork 1
/
SerialVersion.c
289 lines (234 loc) · 8.63 KB
/
SerialVersion.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
/*Dong-Son Nguyen-Huu - 40014054
COMP 428
November 29, 2017
Assignment 3 - Floyd All-Pairs Shortest Path Algorithm - Row and Column-wise
one-to-all broadcasts
*/
#include "mpi.h"
#include <limits.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//TaskID that will identify the master task
#define MASTER 0
//header function that will return the smaller of 2 number values
#define min(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a < _b ? _a : _b; })
//Calculates the current position to be used in comparisons for the FW algorithm
void calculatePosition(int i, int n, int* position) {
position[0] = i % n;
position[1] = i / n;
}
//Function that will return the index
int getIndex(int x, int y, int nodeNum) {
return x + (y * nodeNum);
}
//Add 2 numbers together, if any of them are 'inf' then return 'inf'
int intAdd(int a, int b) {
if (a == INT_MAX || b == INT_MAX) {
return INT_MAX;
}
else {
return a + b;
}
}
int main(int argc, char *argv[]) {
//Int values that will be used by the MPI protocol
int taskID, numTasks, numNodes;
//Mark the starting point, uses MPI
double begin = MPI_Wtime();
//Set up MPI
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numTasks);
MPI_Comm_rank(MPI_COMM_WORLD, &taskID);
//Create a random seed using "time"
srand(time(NULL));
//Initialize integer values
int sqrtNumTasks = sqrt(numTasks);
int numGraphVals = 0;
int* graphVal;
//Following statement will only be run by the Master task
if (taskID == MASTER) {
//Integer value that will
int num;
//Open a file that contains the weighted graph that will be computed
FILE *file;
file = fopen("input.txt", "r");
// Count number of numbers in the weighted graph from the input file in order to determine array size
// Continues as long as the file is not at the end yet
while (!feof(file)) {
int isInt = fscanf(file, "%d", &num);
//continues if it detects a non-int element
if (isInt != 1) {
char ch;
fscanf(file, "%c", &ch);
}
//iterate the number of values in the graph
numGraphVals++;
}
// Square root of the number of integers in a graph will corespond to teh number of nodes
numNodes = sqrt(numGraphVals);
// Array of the size of the number of integers in the graph will be created
// Memory needs to be allocated totalling the number of elements * the size of an int
graphVal = malloc(numGraphVals * sizeof(int));
// Move the pointer back to the beginning of the file so that it can be used again
rewind(file);
int num2 = 0;
//File will be scanned again to put values in the graph
while (!feof(file)) {
int isInt = fscanf(file, "%d", &graphVal[num2]);
//verifies that the value read is an integer
if (isInt != 1) {
//populate non-integer cells with an infinity
graphVal[num2] = INT_MAX;
char ch;
fscanf(file, "%c", &ch);
}
//iterate
num2++;
}
// Close file to avoid memory leaks
fclose(file);
}
// Use MPI to broadcase the number of nodes and vallues per graph to all tasks
MPI_Bcast(&numGraphVals, 1, MPI_INT, MASTER, MPI_COMM_WORLD);
MPI_Bcast(&numNodes, 1, MPI_INT, MASTER, MPI_COMM_WORLD);
//Sequence will only continue if the calling task is not the master
if (taskID != MASTER) {
//allocate enough space for the array that holds the values from the weighted graph
graphVal = malloc(numGraphVals * sizeof(int));
}
// Use MPI to broadcast the array that holds the graph values to all other tasks
MPI_Bcast(graphVal, numGraphVals, MPI_INT, MASTER, MPI_COMM_WORLD);
// Generate the number of rows and columns that will determine the buffer size
int rowColNum = numNodes / sqrtNumTasks;
int startIndex[sqrtNumTasks];
startIndex[0] = 0;
//Counter for for loop, loops through the available tasks
for (num3 = 1; num3 < sqrtNumTasks; num3++)
//change the start index based on our buffer size
startIndex[num3] = startIndex[num3 - 1] + rowColNum;
// Find the corresponding k for a processor's dimension
int dimension[numNodes];
int currentDimension = 0;
int num4;
// Iterate through the number of nodes
for (num4 = 0; num4 < numNodes; num4++) {
//Increase the size of the current dimension counter if a match is found
if (startIndex[currentDimension] + rowColNum == num4) {
currentDimension++;
}
//Populate the dimension array with the value of the current dimension
dimension[num4] = currentDimension;
}
int position[2];
calculatePosition(taskID, sqrtNumTasks, position);
// Place the processe into their respective comm worlds (for both row & columns)
// Use MPI to split the rows and columns
MPI_Comm rowComms, colComms;
MPI_Comm_split(MPI_COMM_WORLD, position[1], position[0], &rowComms);
MPI_Comm_split(MPI_COMM_WORLD, position[0], position[1], &colComms);
//Int values that will correspond to the matrix level, along with the current x and y values
int k, xVal, yVal;
//Iterate through all the matrix levels
for (k = 0; k < numNodes; k++) {
// Store the dimension that contains this matrix level k
int kFound = dimension[k];
// Allocate the buffers accordingly
int* rowBuffer = malloc(rowColNum * sizeof(int));
int* colBuffer = malloc(rowColNum * sizeof(int));
// If a match is found with the y coordinate, then a k needs to be rebroadcasted through MPI
if (position[1] == kFound) {
int num5;
for (num5 = 0; num5 < rowColNum; num5++) {
int index = getIndex(startIndex[position[0]] + num5, k, numNodes);
rowBuffer[num5] = graphVal[index];
}
}
// MPI Column broadcast
MPI_Bcast(rowBuffer, rowColNum, MPI_INT, kFound, colComms);
//If a match is found with the x coordinate, then a k needs to be rebroadcasted through MPI
if (position[0] == kFound) {
int num6;
for (num6 = 0; num6 < rowColNum; num6++) {
int index = getIndex(k, startIndex[position[1]] + num6, numNodes);
colBuffer[num6] = graphVal[index];
}
}
//MPI Row broadcast
MPI_Bcast(colBuffer, rowColNum, MPI_INT, kFound, rowComms);
// Perform Floyd's All-Pairs Shortest Path comparison
for (xVal = 0; xVal < rowColNum; xVal++) {
for (yVal = 0; yVal < rowColNum; yVal++) {
int x = xVal + startIndex[position[0]];
int y = yVal + startIndex[position[1]];
//skips the diagonal to avoid the values comparing with themselves
if (x == k || y == k || x == y) {
continue;
}
//Determine the smallest value from both that are getting compared
graphVal[getIndex(x, y, numNodes)] = min(graphVal[getIndex(x, y, numNodes)], intAdd(rowBuffer[xVal], colBuffer[yVal]));
}
}
// Deallocates memory
free(rowBuffer);
free(colBuffer);
}
//Non-Master task function
if (taskID != MASTER) {
//Create a buffer with size of the rows/cols
int sendBufferSize = rowColNum * rowColNum;
int* sendBuffer = malloc(sendBufferSize * sizeof(int));
//int values that will correspond to the rows and the columns
int j, k;
for (j = 0; j < rowColNum; j++) {
for (k = 0; k < rowColNum; k++) {
sendBuffer[j + (k * rowColNum)] = graphVal[getIndex(startIndex[position[0]] + j, startIndex[position[1]] + k, numNodes)];
}
}
// Use MPI to send the buffer
MPI_Send(sendBuffer, sendBufferSize, MPI_INT, MASTER, 0, MPI_COMM_WORLD);
//Deallocate the buffer
free(sendBuffer);
}
//Master task function
else {
int i = 0;
for (i = 1; i < numTasks; i++) {
//Hold the positions of current value being read
int process_position[2];
calculatePosition(i, sqrtNumTasks, process_position);
// Receive the process' elements
int procArraySz = rowColNum * rowColNum;
int* procArray = malloc(procArraySz * sizeof(int));
MPI_Recv(procArray, procArraySz, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
int j, k;
for (j = 0; j < rowColNum; j++)
for (k = 0; k < rowColNum; k++)
graphVal[getIndex(startIndex[process_position[0]] + j,startIndex[process_position[1]] + k,numNodes)] = procArray[j + (k * rowColNum)];
free(procArray);
}
// Store final graph
FILE *file;
//Set permission to write
file = fopen("output.txt", "w");
int j, k;
for (j = 0; j < numNodes; j++) {
for (k = 0; k < numNodes; k++)
fprintf(file, "%d\t", graphVal[getIndex(k, j, numNodes)]);
fprintf(file, "\n");
}
//Close the file to prevent further unwanted writing
fclose(file);
}
MPI_Finalize();
if (taskID == MASTER) {
double stop = MPI_Wtime();
printf("Run time in ms: %f\n", (stop - begin) * 1000);
}
free(graphVal);
return 0;
}