• Main Page
  • Related Pages
  • Namespaces
  • Classes
  • Files
  • File List
  • File Members

MLAPI_MultiLevelAdaptiveSA.h

Go to the documentation of this file.
00001 #ifndef MLAPI_MULTILEVELADAPTIVESA_H
00002 #define MLAPI_MULTILEVELADAPTIVESA_H
00003 
00013 /* ******************************************************************** */
00014 /* See the file COPYRIGHT for a complete copyright notice, contact      */
00015 /* person and disclaimer.                                               */        
00016 /* ******************************************************************** */
00017 
00018 #include "ml_common.h"
00019 #include "ml_agg_genP.h"
00020 #include "MLAPI_Error.h"
00021 #include "MLAPI_CompObject.h"
00022 #include "MLAPI_TimeObject.h"
00023 #include "MLAPI_Operator.h"
00024 #include "MLAPI_Operator_Utils.h"
00025 #include "MLAPI_MultiVector.h"
00026 #include "MLAPI_MultiVector_Utils.h"
00027 #include "MLAPI_InverseOperator.h"
00028 #include "MLAPI_Expressions.h"
00029 #include "MLAPI_BaseOperator.h"
00030 #include "MLAPI_Workspace.h"
00031 #include "MLAPI_Aggregation.h"
00032 #include "MLAPI_Eig.h"
00033 #include <vector>
00034 
00035 namespace MLAPI {
00036 
00091 class MultiLevelAdaptiveSA : public BaseOperator, public CompObject, public TimeObject {
00092 
00093 public:
00094 
00095   // @{ \name Constructors and destructors
00096 
00098   MultiLevelAdaptiveSA(const Operator FineMatrix, Teuchos::ParameterList& List,
00099                        const int NumPDEEqns, const int MaxLevels = 20) :
00100     IsComputed_(false)
00101   {
00102     FineMatrix_ = FineMatrix;
00103     List_ = List;
00104     MaxLevels_  = MaxLevels;
00105     SetInputNumPDEEqns(NumPDEEqns);
00106     SetNumPDEEqns(NumPDEEqns);
00107 
00108     ResizeArrays(MaxLevels);
00109     A(0) = FineMatrix;
00110   }
00111 
00113   virtual ~MultiLevelAdaptiveSA()
00114   { }
00115 
00116   // @}
00117   // @{ \name Set and Get methods
00118 
00120   const Space GetOperatorDomainSpace() const 
00121   {
00122     return(FineMatrix_.GetDomainSpace());
00123   }
00124 
00126   const Space GetOperatorRangeSpace() const 
00127   {
00128     return(FineMatrix_.GetRangeSpace());
00129   }
00130 
00132   inline const Space GetDomainSpace() const 
00133   {
00134     return(FineMatrix_.GetDomainSpace());
00135   }
00136 
00138   inline const Space GetRangeSpace() const 
00139   {
00140     return(FineMatrix_.GetRangeSpace());
00141   }
00142 
00144   inline Operator& R(const int i) 
00145   {
00146     return(R_[i]);
00147   }
00148 
00150   inline const Operator& R(const int i) const
00151   {
00152     return(R_[i]);
00153   }
00154 
00156   inline Operator& A(const int i)
00157   {
00158     return(A_[i]);
00159   }
00160 
00162   inline const Operator& A(const int i) const
00163   {
00164     return(A_[i]);
00165   }
00166 
00168   inline Operator& P(const int i)
00169   {
00170     return(P_[i]);
00171   }
00172 
00174   inline const Operator& P(const int i) const
00175   {
00176     return(P_[i]);
00177   }
00178 
00180   inline InverseOperator& S(const int i)
00181   {
00182     return(S_[i]);
00183   }
00184 
00186   inline const InverseOperator& S(const int i) const
00187   {
00188     return(S_[i]);
00189   }
00190 
00192   inline int GetMaxLevels() const
00193   {
00194     return(MaxLevels_);
00195   }
00196 
00198   inline void SetMaxLevels(const int MaxLevels)
00199   {
00200     MaxLevels_ = MaxLevels;
00201   }
00202 
00204   inline const MultiVector GetNullSpace() const
00205   {
00206     return(NullSpace_);
00207   }
00208 
00210   inline void SetNullSpace(MultiVector& NullSpace)
00211   {
00212     NullSpace_ = NullSpace;
00213   }
00214 
00216   inline bool IsComputed() const
00217   {
00218     return(IsComputed_);
00219   }
00220 
00222   inline void SetList(Teuchos::ParameterList& List)
00223   {
00224     List_ = List;
00225   }
00226 
00228   inline string GetSmootherType()
00229   {
00230     return(List_.get("smoother: type", "symmetric Gauss-Seidel"));
00231   }
00232 
00234   inline string GetCoarseType() 
00235   {
00236     return(List_.get("coarse: type", "Amesos-KLU"));
00237   }
00238 
00240   inline void SetInputNumPDEEqns(const int n) 
00241   {
00242     NumPDEEqns_ = n;
00243   }
00244 
00246   inline int GetInputNumPDEEqns() 
00247   {
00248     return(NumPDEEqns_);
00249   }
00250 
00252   inline int GetNumPDEEqns() 
00253   {
00254     return(List_.get("PDE equations", 1));
00255   }
00256 
00257   inline void SetNumPDEEqns(const int NumPDEEqns)
00258   {
00259     List_.set("PDE equations", NumPDEEqns);
00260   }
00261 
00263   inline int GetMaxCoarseSize()
00264   {
00265     return(List_.get("coarse: max size", 32));
00266   }
00267 
00269   inline double GetMaxReduction()
00270   {
00271     return(List_.get("adapt: max reduction", 0.1));
00272   }
00273 
00275   inline int GetNumItersCoarse()
00276   {
00277     return(List_.get("adapt: iters coarse", 5));
00278   }
00279 
00281   inline int GetNumItersFine()
00282   {
00283     return(List_.get("adapt: iters fine", 15));
00284   }
00285 
00287   double GetComplexity()
00288   {
00289     double nnzFine = A_[0].GetNumGlobalNonzeros();
00290     double nnzTotal = nnzFine;
00291     for (int i = 1; i < GetMaxLevels(); i++) {
00292       nnzTotal += A_[i].GetNumGlobalNonzeros();
00293     }
00294     return nnzTotal / nnzFine;
00295   }
00296 
00297 
00298   // @}
00299   // @{ \name Hierarchy construction methods
00300 
00301   // ====================================================================== 
00303   // ====================================================================== 
00304   void Compute() 
00305   {
00306 
00307     ResetTimer();
00308     StackPush();
00309     IsComputed_ = false;
00310 
00311     // get parameter from the input list
00312     SetNumPDEEqns(GetInputNumPDEEqns());
00313     
00314     // retrive null space
00315     MultiVector ThisNS = GetNullSpace();
00316 
00317     if (GetPrintLevel()) {
00318       cout << endl;
00319       ML_print_line("-", 80);
00320       cout << "Computing the hierarchy, input null space dimension = "
00321            << ThisNS.GetNumVectors() << endl;
00322     }
00323 
00324     // build up the default null space
00325     if (ThisNS.GetNumVectors() == 0) {
00326       ThisNS.Reshape(FineMatrix_.GetDomainSpace(),GetNumPDEEqns());
00327       ThisNS = 0.0;
00328       for (int i = 0 ; i < ThisNS.GetMyLength() ; ++i)
00329         for (int j = 0 ; j < GetNumPDEEqns() ;++j)
00330           if (i % GetNumPDEEqns() == j)
00331             ThisNS(i,j) = 1.0;
00332 
00333       SetNullSpace(ThisNS);
00334     }
00335 
00336     MultiVector NextNS;     // contains the next-level null space
00337 
00338     // work on increasing hierarchies only.
00339     A(0) = FineMatrix_;
00340 
00341     int level;
00342 
00343     for (level = 0 ; level < GetMaxLevels() - 1 ; ++level) {
00344 
00345       if (GetPrintLevel()) ML_print_line("-", 80);
00346 
00347       if (level)
00348         SetNumPDEEqns(ThisNS.GetNumVectors());
00349 
00350       // load current level into database
00351       List_.set("workspace: current level", level);
00352 
00353       GetSmoothedP(A(level), List_, ThisNS, P(level), NextNS);
00354       ThisNS = NextNS;
00355 
00356       R(level) = GetTranspose(P(level));
00357       A(level + 1) = GetRAP(R(level),A(level),P(level));
00358       S(level).Reshape(A(level), GetSmootherType(), List_);
00359 
00360       // break if coarse matrix is below specified tolerance
00361       if (A(level + 1).GetNumGlobalRows() <= GetMaxCoarseSize()) {
00362         ++level;
00363         break;
00364       }
00365     }
00366 
00367     // set coarse solver
00368     S(level).Reshape(A(level), GetCoarseType(), List_);
00369     SetMaxLevels(level + 1);
00370 
00371     // set the label
00372     SetLabel("SA, L = " + GetString(GetMaxLevels()) +
00373              ", smoother = " + GetSmootherType());
00374 
00375     if (GetPrintLevel()) ML_print_line("-", 80);
00376 
00377     IsComputed_ = true;
00378 
00379     StackPop();
00380     // FIXME: update flops!
00381     UpdateTime();
00382 
00383   }
00384 
00385   // ====================================================================== 
00387   /* Computes the multilevel hierarchy as specified by the user.
00388    *
00389    * \param UseDefaultOrSpecified - (In) if \c true, the first call to Compute()
00390    *                     uses either the default null space or the null space
00391    *                     that has been set using SetNullSpace().
00392    *                     If \c false, then one null space component is 
00393    *                     computed using SetupInitialNullSpace().
00394    *
00395    * \param AdditionalCandidates - (In) Number of candidates, that is the
00396    *                     number of null space components that will be
00397    *                     computed using IncrementNullSpace(). If
00398    *                     \c "UseDefaultOrSpecified == false", the code computes
00399    *                     one additional candidate using
00400    *                     SetupInitialNullSpace(), and the remaining using
00401    *                     IncrementNullSpace().
00402    */
00403   // time is tracked within each method.
00404   // ====================================================================== 
00405   void AdaptCompute(const bool UseDefaultOrSpecified, int AdditionalCandidates)
00406   {
00407 
00408     StackPush();
00409 
00410     if (UseDefaultOrSpecified) 
00411       Compute();
00412     else {
00413       SetupInitialNullSpace();
00414       Compute();
00415       AdditionalCandidates--;
00416     }
00417 
00418     for (int i = 0 ; i < AdditionalCandidates ; ++i) {
00419       IncrementNullSpace();
00420       Compute();
00421     }
00422 
00423     StackPop();
00424   }
00425 
00426   // ====================================================================== 
00428   // ====================================================================== 
00429   void SetupInitialNullSpace() 
00430   {
00431     ResetTimer();
00432     StackPush();
00433 
00434     SetNumPDEEqns(GetInputNumPDEEqns());
00435 
00436     if (GetPrintLevel()) {
00437       cout << endl;
00438       ML_print_line("-", 80);
00439       cout << "Computing the first null space component" << endl;
00440     }
00441 
00442     MultiVector NS(A_[0].GetDomainSpace());
00443     MultiVector NewNS;
00444     for (int v = 0 ; v < NS.GetNumVectors() ; ++v)
00445       NS.Random(v);
00446 
00447     NS = (NS + 1.0) / 2.0;
00448 
00449     // zero out everything except for first dof on every  node 
00450     for (int j=0; j < NS.GetMyLength(); ++j)
00451     {
00452       if (j % GetNumPDEEqns() != 0)
00453         NS(j) = 0.;
00454     }
00455 
00456     // run pre-smoother
00457     MultiVector F(A(0).GetDomainSpace());
00458     F = 0.0;
00459 
00460     S(0).Reshape(A(0), GetSmootherType(), List_);
00461     S(0).Apply(F, NS);
00462 
00463     double MyEnergyBefore = sqrt((A(0) * NS) * NS);
00464     if (MyEnergyBefore == 0.0) {
00465       SetNullSpace(NewNS);
00466       return;
00467     }
00468 
00469     //compare last two iterates on fine level
00470     int SweepsBefore = List_.get("smoother: sweeps",1);
00471     List_.set("smoother: sweeps",1);
00472     S(0).Reshape(A(0), GetSmootherType(), List_);
00473     S(0).Apply(F, NS);
00474     double MyEnergyAfter = sqrt((A(0) * NS) * NS);
00475     if (MyEnergyAfter/MyEnergyBefore < GetMaxReduction()) {
00476       SetNullSpace(NewNS);
00477       return;
00478     }
00479 
00480     List_.set("smoother: sweeps",SweepsBefore);
00481 
00482     int level;
00483     for (level = 0 ; level < GetMaxLevels() - 2 ; ++level) {
00484 
00485       if (level) SetNumPDEEqns(NS.GetNumVectors());
00486 
00487       if (GetPrintLevel()) {
00488         ML_print_line("-", 80);
00489         cout << "current working level   = " << level << endl;
00490         cout << "number of global rows   = " 
00491           << A(level).GetDomainSpace().GetNumGlobalElements() << endl;
00492         cout << "number of PDE equations = " << GetNumPDEEqns() << endl;
00493         cout << "null space dimension    = " << NS.GetNumVectors() << endl;
00494       }
00495 
00496       GetSmoothedP(A(level), List_, NS, P(level), NewNS);
00497       NS = NewNS;
00498 
00499       R(level) = GetTranspose(P(level));
00500       A(level + 1) = GetRAP(R(level),A(level),P(level));
00501       S(level + 1).Reshape(A(level + 1),GetSmootherType(),List_);
00502 
00503       // break if coarse matrix is below specified size
00504       if (A(level + 1).GetDomainSpace().GetNumGlobalElements() <= GetMaxCoarseSize()) {
00505         ++level;
00506         break;
00507       }
00508 
00509       MultiVector F(A(level + 1).GetDomainSpace());
00510       F = 0.0;
00511       MyEnergyBefore = sqrt((A(level + 1) * NS) * NS);
00512       S(level + 1).Apply(F, NS);
00513       MyEnergyAfter = sqrt((A(level + 1) * NS) * NS);
00514       if (GetPrintLevel() == 0) {
00515         cout << "Energy before smoothing = " << MyEnergyBefore << endl;
00516         cout << "Energy after smoothing  = " << MyEnergyAfter << endl;
00517       }
00518 
00519       if (pow(MyEnergyAfter/MyEnergyBefore,1.0/SweepsBefore) < GetMaxReduction()) {
00520         ++level;
00521         break; 
00522       }
00523     }
00524 
00525     if (GetPrintLevel())
00526       ML_print_line("-", 80);
00527 
00528     // interpolate candidate back to fine level
00529     int MaxLevels = level;
00530     for (int level = MaxLevels ; level > 0 ; --level) {
00531       NS = P(level - 1) * NS;
00532     }
00533 
00534     F.Reshape(A(0).GetDomainSpace());
00535     F = 0.0;
00536     S(0).Apply(F, NS);
00537 
00538     double norm = NS.NormInf();
00539     NS.Scale(1.0 / norm);
00540 
00541     SetNullSpace(NS);
00542 
00543     StackPop();
00544     UpdateTime();
00545   }
00546 
00548   bool IncrementNullSpace()
00549   {
00550     ResetTimer();
00551     StackPush();
00552 
00553     SetNumPDEEqns(GetInputNumPDEEqns());
00554 
00555     MultiVector InputNS = GetNullSpace();
00556 
00557     if (InputNS.GetNumVectors() == 0)
00558       ML_THROW("Empty null space not allowed", -1);
00559 
00560     if (GetPrintLevel()) {
00561       cout << endl;
00562       ML_print_line("-", 80);
00563       cout << "Incrementing the hierarchy, input null space dimension = "
00564            << InputNS.GetNumVectors() << endl;
00565     }
00566 
00567     int level;
00568 
00569     // =========================================================== //
00570     // InputNS is the currently available (and stored) null space. //
00571     // ExpandedNS is InputNS + AdditionalNS.                       //
00572     // NCand is dimension of previous nullspace.                   //
00573     // AdditionalNS is set to random between 0 and 1.0; however,   //
00574     // we might need to zero out in the new candidate everybody    //
00575     // but the (Ncand+1)'th guy                                    //
00576     // Once the new candidate is set, we run the current V-cycle   //
00577     // on it. NOTE: I need the final copy instruction, since the   //
00578     // original pointer  in AdditionalNS could have been changed   //
00579     // under the hood.                                             //
00580     // =========================================================== //
00581 
00582     int NCand = InputNS.GetNumVectors();
00583     MultiVector ExpandedNS = InputNS;
00584     ExpandedNS.Append();
00585 
00586     MultiVector AdditionalNS = Extract(ExpandedNS, NCand);
00587     AdditionalNS.Random();
00588     AdditionalNS = (AdditionalNS + 1.) / 2.0;
00589 
00590     if (NCand+1 <= GetNumPDEEqns())
00591     {
00592       for (int i=0; i< AdditionalNS.GetMyLength(); i++)
00593         if ( (i+1) % (NCand+1) != 0) 
00594           AdditionalNS(i) = 0;
00595     }
00596 
00597     MultiVector b0(AdditionalNS.GetVectorSpace());
00598 
00599     for (int i=0; i< GetNumItersFine() ; i++)
00600       SolveMultiLevelSA(b0,AdditionalNS,0);
00601 
00602     double NormFirst = ExpandedNS.NormInf(0);
00603     AdditionalNS.Scale(NormFirst / AdditionalNS.NormInf());
00604 
00605     for (int i=0; i<AdditionalNS.GetMyLength(); i++)
00606       ExpandedNS(i,NCand) = AdditionalNS(i);
00607 
00608     // ===================== //
00609     // cycle over all levels //
00610     // ===================== //
00611 
00612     for (level = 0 ; level < GetMaxLevels() - 2 ; ++level) {
00613 
00614       if (GetPrintLevel()) ML_print_line("-", 80);
00615 
00616       List_.set("workspace: current level", level);
00617 
00618       // ======================================================= //
00619       // Create a new prolongator operator using the newly       //
00620       // available null space. NewNS is a temporary variable,   //
00621       // set to ExpandedNS for the next-level null space. We     //
00622       // also need to setup the smoother at this level (not on   //
00623       // the finest, since the finest-level matrix does not      //
00624       // change).                                                //
00625       // At this point, we stick the operators in the hierarchy. //
00626       // ======================================================= //
00627 
00628       MultiVector NewNS;
00629 
00630       GetSmoothedP(A(level), List_, ExpandedNS, P(level), NewNS);
00631       ExpandedNS = NewNS;
00632 
00633       R(level) = GetTranspose(P(level));
00634       A(level + 1) = GetRAP(R(level),A(level),P(level));
00635 
00636       SetNumPDEEqns(NewNS.GetNumVectors());
00637 
00638       if (level != 0)
00639         S(level).Reshape(A(level), GetSmootherType(), List_);
00640 
00641       S(level + 1).Reshape(A(level + 1), GetSmootherType(), List_); 
00642 
00643       // ======================================================= //
00644       // Need to setup the bridge. We need to extract the NCand  //
00645       // components of the "old" null space, and set them in a   //
00646       // temporary variable, OldNS. NewNS is simply discarded.   //
00647       // ======================================================= //
00648 
00649       MultiVector OldNS = ExpandedNS;
00650       OldNS.Delete(NCand);
00651 
00652       Operator Pbridge;
00653       GetSmoothedP(A(level + 1), List_, OldNS, Pbridge, NewNS);
00654 
00655       P(level + 1) = Pbridge;
00656       R(level + 1) = GetTranspose(Pbridge);
00657 
00658       AdditionalNS = Duplicate(Extract(ExpandedNS, NCand));
00659 
00660       double MyEnergyBefore = sqrt((A(level + 1) * AdditionalNS) * AdditionalNS);
00661 
00662       // FIXME scale with something: norm of the matrix, ...;
00663       if (MyEnergyBefore < 1e-10) {
00664         ++level;
00665         break;
00666       }
00667 
00668       // ======================================================= //
00669       // run Nits_coarse cycles, using AdditionalNS as starting  //
00670       // solution, and a zero right hand-side.                   //
00671       // ======================================================= //
00672 
00673       b0.Reshape(AdditionalNS.GetVectorSpace());
00674       b0 = 0.;
00675 
00676       for (int i=0; i< GetNumItersCoarse() ; i++)
00677         SolveMultiLevelSA(b0,AdditionalNS,level+1);
00678 
00679       // ======================================================= //
00680       // Get the norm of the first null space component, then    //
00681       // analyze the energy after the application of the cycle.  //
00682       // If the energy after is zero, we have to check whether   //
00683       // the new guy is zero or not.                             //
00684       // Then, we scale the new candidate so that its largest    //
00685       // entry is of the same magniture of the largest entry of  //
00686       // the first component.                                    //
00687       // ======================================================= //
00688 
00689       double NormFirstComponent = ExpandedNS.NormInf(0);
00690 
00691       double MyEnergyAfter = sqrt((A(level + 1) * AdditionalNS) * AdditionalNS);
00692 
00693       if (MyEnergyAfter == 0.0) {
00694         if (AdditionalNS.NormInf() != 0.0) {
00695           for (int i=0; i<AdditionalNS.GetMyLength(); i++)
00696             ExpandedNS(i,NCand) = AdditionalNS(i);
00697         }
00698       }
00699       else {
00700         for (int i=0; i<AdditionalNS.GetMyLength(); i++) {
00701           ExpandedNS(i,NCand) = AdditionalNS(i);
00702         }
00703       }
00704 
00705       double NormExpanded = ExpandedNS.NormInf(NCand);
00706       ExpandedNS.Scale(NormFirstComponent / NormExpanded, NCand);
00707 
00708       if (GetPrintLevel() == 0) {
00709         cout << "energy before cycle =" << MyEnergyBefore << endl;
00710         cout << "energy after        =" << MyEnergyAfter << endl;
00711       }
00712 
00713       // FIXME: still to do:
00714       // - scaling of the new computed component
00715 
00716       if (pow(MyEnergyAfter/MyEnergyBefore,1.0 / GetNumItersCoarse()) < GetMaxReduction()) {
00717         ++level;
00718         break;
00719       }
00720     }
00721 
00722     --level;
00723     AdditionalNS = Extract(ExpandedNS, NCand);
00724 
00725     // ======================================================= //
00726     // project back to fine level the AdditionalNS vector.     //
00727     // Then, reset the number of PDE equations, and finally    //
00728     // set the null space of this object using SetNullSpace(). //
00729     // Note that at this point the hierarchy is broken, and    //
00730     // must be reconstructed using Compute().                  //
00731     // ======================================================= //
00732 
00733     // FIXME: put scaling on every level in here?
00734     for (int i=level; i>=0 ; i--) {
00735       AdditionalNS = P(i) * AdditionalNS;
00736     }
00737 
00738     InputNS.Append(AdditionalNS);
00739 
00740     SetNullSpace(InputNS);
00741 
00742     if (GetPrintLevel()) ML_print_line("-", 80);
00743 
00744     StackPop();
00745     UpdateTime();
00746 
00747     IsComputed_ = false;
00748 
00749     return(true);
00750   }
00751 
00752   // @}
00753   // @{ \name Mathematical methods
00754 
00755   // ====================================================================== 
00757   // ====================================================================== 
00758   int Apply(const MultiVector& b_f, MultiVector& x_f) const
00759   {
00760     ResetTimer();
00761     StackPush();
00762 
00763     if (IsComputed() == false)
00764       ML_THROW("Method Compute() must be called", -1);
00765 
00766     SolveMultiLevelSA(b_f,x_f,0);
00767 
00768     StackPop();
00769     UpdateTime();
00770 
00771     return(0);
00772   }
00773 
00774   // ====================================================================== 
00776   // ====================================================================== 
00777   int SolveMultiLevelSA(const MultiVector& b_f,MultiVector& x_f, int level) const 
00778   {
00779     if (level == GetMaxLevels() - 1) {
00780       x_f = S(level) * b_f;
00781       return(0);
00782     }
00783 
00784     MultiVector r_f(P(level).GetRangeSpace());
00785     MultiVector r_c(P(level).GetDomainSpace());
00786     MultiVector z_c(P(level).GetDomainSpace());
00787 
00788     // reset flop counter
00789     S(level).SetFlops(0.0);
00790     A(level).SetFlops(0.0);
00791     R(level).SetFlops(0.0);
00792     P(level).SetFlops(0.0);
00793 
00794     // apply pre-smoother
00795     S(level).Apply(b_f,x_f); 
00796     // new residual
00797     r_f = b_f - A(level) * x_f;
00798     // restrict to coarse
00799     r_c = R(level) * r_f;
00800     // solve coarse problem
00801     SolveMultiLevelSA(r_c,z_c,level + 1);
00802     // prolongate back and add to solution
00803     x_f = x_f + P(level) * z_c;
00804     // apply post-smoother
00805     S(level).Apply(b_f,x_f); 
00806 
00807     UpdateFlops(2.0 * S(level).GetFlops());
00808     UpdateFlops(A(level).GetFlops());
00809     UpdateFlops(R(level).GetFlops());
00810     UpdateFlops(P(level).GetFlops());
00811     UpdateFlops(2.0 * x_f.GetGlobalLength());
00812 
00813     return(0);
00814   }
00815 
00816   // @}
00817   // @{ \name Miscellaneous methods
00818 
00820   std::ostream& Print(std::ostream& os, 
00821                       const bool verbose = true) const
00822   {
00823     if (GetMyPID() == 0) {
00824       os << endl;
00825       os << "*** MLAPI::MultiLevelSA, label = `" << GetLabel() << "'" << endl;
00826       os << endl;
00827       os << "Number of levels = " << GetMaxLevels() << endl;
00828       os << "Flop count       = " << GetFlops() << endl;
00829       os << "Cumulative time  = " << GetTime() << endl;
00830       if (GetTime() != 0.0)
00831         os << "MFlops rate      = " << 1.0e-6 * GetFlops() / GetTime() << endl;
00832       else
00833         os << "MFlops rate      = 0.0" << endl;
00834       os << endl;
00835 
00836       for (int level = 0 ; level < GetMaxLevels() ; ++level) {
00837         ML_print_line("-", 80);
00838         cout << "Information for level   = " << level;
00839         cout << "number of global rows   = " 
00840              << A(level).GetNumGlobalRows() << endl;
00841         cout << "number of global nnz    = " 
00842              << A(level).GetNumGlobalNonzeros() << endl;
00843       }
00844       ML_print_line("-", 80);
00845     }
00846     return(os);
00847   }
00848 
00849   // @}
00850 
00851 private:
00852 
00854   void GetSmoothedP(Operator A, Teuchos::ParameterList& List, MultiVector& NS,
00855                     Operator& P, MultiVector& NewNS)
00856   {
00857     double LambdaMax;
00858     Operator IminusA;
00859     string EigenAnalysis = List.get("eigen-analysis: type", "Anorm");
00860     double Damping       = List.get("aggregation: damping", 1.333);
00861 
00862     Operator Ptent;
00863 
00864     GetPtent(A, List, NS, Ptent, NewNS);
00865 
00866     if (EigenAnalysis == "Anorm")
00867       LambdaMax = MaxEigAnorm(A,true);
00868     else if (EigenAnalysis == "cg")
00869       LambdaMax = MaxEigCG(A,true);
00870     else if (EigenAnalysis == "power-method")
00871       LambdaMax = MaxEigPowerMethod(A,true);
00872     else
00873       ML_THROW("incorrect parameter (" + EigenAnalysis + ")", -1);
00874 
00875     if (GetPrintLevel()) {
00876       cout << "omega                   = " << Damping << endl;
00877       cout << "lambda max              = " << LambdaMax << endl;
00878       cout << "damping factor          = " << Damping / LambdaMax << endl;
00879     }
00880 
00881     if (Damping != 0.0) {
00882       IminusA = GetJacobiIterationOperator(A, Damping / LambdaMax);
00883       P = IminusA * Ptent;
00884     }
00885     else
00886       P = Ptent;
00887 
00888     // fix the number of equations in P, so that GetRAP() will
00889     // get the correct number for C= RAP
00890     P.GetML_Operator()->num_PDEs = Ptent.GetML_Operator()->num_PDEs;
00891 
00892     return;
00893   }
00894 
00895   void ResizeArrays(const int MaxLevels) 
00896   {
00897     A_.resize(MaxLevels);
00898     R_.resize(MaxLevels);
00899     P_.resize(MaxLevels);
00900     S_.resize(MaxLevels);
00901   }
00902     
00904   int MaxLevels_;
00906   Operator FineMatrix_;
00908   std::vector<Operator> A_;
00910   std::vector<Operator> R_;
00912   std::vector<Operator> P_;
00914   std::vector<InverseOperator> S_;
00915   Teuchos::ParameterList List_;
00917   MultiVector NullSpace_;
00919   bool IsComputed_;
00921   int NumPDEEqns_;
00922 
00923 }; // class MultiLevelAdaptiveSA
00924 
00925 } // namespace MLAPI
00926 #endif