test-setup.c
1.49 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include "fdc_int.h"
int main(int argc, char *argv[])
{
fd_init(&argc, &argv);
return fd_solve_setup();
}
int fd_solve_setup()
{
#ifdef DISTRIBUTED_SOLVER
# ifdef SPLITGO
return _fd_dsolve_setup();
# else
# error "not ready to measure setup time of this distributed solver"
# endif
#else
return 0;
#endif
}
#ifdef SPLITGO
#ifdef SPLITGO_MPI
#include <mpi.h>
#endif
#define MAX_AGENTS 256
int _fd_agent_setup()
{
return 0;
}
int _fd_dsolve_setup()
{
int tid;
pthread_t threads[MAX_AGENTS];
int nagents = 4;
int i;
int processes, live_procs;
char *s;
if (s = getenv("FDC_AGENTS"))
{
int n = atoi(s);
if (0 <= n && n <= MAX_AGENTS)
nagents = n;
}
#ifdef SPLITGO_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &tid);
#if 0
if (tid == 0)
MPI_Comm_size(MPI_COMM_WORLD, &processes);
#endif
#endif
// _fd_init_store_depository(nagents);
for (i = 0; i < nagents; ++i)
pthread_create(&threads[i], NULL, (void *) _fd_agent_setup, NULL);
#if 0
#ifdef SPLITGO_MPI
if (tid == 0)
{
live_procs = processes;
while (--live_procs)
MPI_Recv(NULL, 0, MPI_CHAR, MPI_ANY_SOURCE, 0xdead, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
}
else
MPI_Send(NULL, 0, MPI_CHAR, 0, 0xdead, MPI_COMM_WORLD);
#endif
#endif
for (i = 0; i < nagents; ++i)
pthread_cancel(threads[i]);
#ifdef SPLITGO_MPI
// MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
#endif
return FD_OK;
}
#endif /* SPLITGO */