Merge branch 'charm' of charmgit:charm into charm
[charm.git] / src / ck-com / AAMLearner.C
1 // #ifdef filippo
2
3 // #include "AAMLearner.h"
4 // #include "ComlibManager.h"
5
6 // #include "EachToManyMulticastStrategy.h"
7 // //#include "RingMulticastStrategy.h"
8
9 // AAMLearner::AAMLearner() {
10 //    init();
11 // }
12
13 // void AAMLearner::init() {
14 //     alpha = ALPHA;
15 //     beta = BETA;
16 //     gamma = GAMMA;
17 // }
18
19 // Strategy *AAMLearner::optimizePattern(Strategy *strat, 
20 //                                            ComlibGlobalStats &stats) {
21 //     CharmStrategy *in_strat = (CharmStrategy *)strat;
22 //     double npes;              //, *pelist;
23 //     CharmStrategy *ostrat = NULL;
24
25 //     double degree = 0, msgsize = 0, nmsgs = 0;
26 //     stats.getAverageStats(strat->getInstance(), msgsize, nmsgs, 
27 //                           degree, npes);
28
29 //     double dcost = computeDirect(npes, msgsize, degree);
30 //     double mcost = computeMesh(npes, msgsize, degree);
31 //     double gcost = computeGrid(npes, msgsize, degree);
32 //     double hcost = computeHypercube(npes, msgsize, degree);
33 //     double mincost = min4(dcost, mcost, gcost, hcost);
34
35 //     int minstrat = -1;
36 //     if(in_strat->getType() == ARRAY_STRATEGY) {
37 //         CkArrayID said, daid;
38 //         CkArrayIndexMax *sidxlist, *didxlist;
39 //         int nsrc, ndest;
40         
41 //         in_strat->ainfo.getSourceArray(said, sidxlist, nsrc);
42 //         in_strat->ainfo.getDestinationArray(daid, didxlist, ndest);
43                
44 //         if(dcost == mincost) 
45 //             minstrat = USE_DIRECT;        
46         
47 //         else if(mcost == mincost) 
48 //             minstrat = USE_MESH;                
49 //         else if(gcost == mincost) 
50 //             minstrat = USE_GRID;
51 //         else if(hcost == mincost) 
52 //             minstrat = USE_HYPERCUBE;               
53
54 //         //CkPrintf("Choosing router %d, %g, %g, %g\n", minstrat, 
55 //         //       mcost, hcost, dcost);
56         
57 //         //if(minstrat != USE_DIRECT) {
58 //         ostrat = new EachToManyMulticastStrategy
59 //             (minstrat, said, daid,
60 //              nsrc, sidxlist, ndest,
61 //              didxlist);
62         
63 //         ostrat->setMulticast();
64
65 //         /*
66 //           }        
67 //           else {
68 //           ostrat = new RingMulticastStrategy(said, daid);
69           
70 //           }
71 //         */
72         
73 //         ostrat->setInstance(in_strat->getInstance());
74 //         ((EachToManyMulticastStrategy *)ostrat)->enableLearning();
75 //     }
76 //     else
77 //         CkAbort("Groups Not Implemented Yet\n");
78
79 //     //Group strategy implement later, foo bar !!
80     
81 //     return ostrat;
82 // }
83
84 // //P = number of processors, m = msgsize, d = degree
85 // double AAMLearner::computeDirect(double P, double m, double d) {
86 //     double cost = 0.0;
87 //     cost = d * alpha;
88 //     cost += d * m * beta;
89     
90 //     return cost;
91 // }
92
93 // /******************* CHECK EQUATIONS FOR AAM ***********/
94 // //P = number of processors, m = msgsize, d = degree
95 // double AAMLearner::computeMesh(double P, double m, double d) {
96 //     double cost = 0.0;
97 //     cost = 2 * sqrt((double) P) * alpha;
98 //     cost += d * m * (beta + gamma);
99     
100 //     return cost;
101 // }
102
103 // //P = number of processors, m = msgsize, d = degree
104 // double AAMLearner::computeHypercube(double P, double m, double d) {
105
106 //     if(P == 0)
107 //         return 0;
108
109 //     double cost = 0.0;
110 //     double log_2_P = log(P)/log(2.0);
111     
112 //     cost = log_2_P * alpha;
113 //     cost += d * m * (beta + gamma);
114
115 //     return cost;
116 // }
117
118 // //P = number of processors, m = msgsize, d = degree
119 // double AAMLearner::computeGrid(double P, double m, double d) {
120
121 //     double cost = 0.0;
122 //     cost = 3 * cubeRoot((double) P) * alpha;
123 //     cost += d * m * (beta + gamma);
124     
125 //     return cost;
126 // }
127
128 // #endif