ADMB Documentation  11.1.1916
 All Classes Files Functions Variables Typedefs Friends Defines
newmodm2.cpp
Go to the documentation of this file.
00001 /*
00002  * $Id: newmodm2.cpp 1780 2014-03-12 19:48:27Z johnoel $
00003  *
00004  * Author: David Fournier
00005  * Copyright (c) 2008-2012 Regents of the University of California
00006  */
00007 #include <admodel.h>
00008 #if defined(USE_LAPLACE)
00009 #  include <df1b2fun.h>
00010 #  include <adrndeff.h>
00011 #endif
00012 
00013 double function_minimizer::projected_hess_determinant(const dvector& g,
00014   const int underflow_flag)
00015 {
00016  int sgn=0;
00017  double lndet=0.0;
00018  //double ld1=0.0;
00019  //double ld2=0.0;
00020  //char ch;
00021  if (!underflow_flag)
00022  {
00023     adstring tmpstring="admodel.hes";
00024     if (ad_comm::wd_flag)
00025        tmpstring = ad_comm::adprogram_name + ".hes";
00026     uistream ifs((char*)tmpstring);
00027 
00028   if (!ifs)
00029   {
00030     cerr << "Error opening file admodel.hes" << endl;
00031   }
00032   int nvar = 0;
00033   ifs >> nvar;
00034   dmatrix S(1,nvar,1,nvar);
00035   {
00036     if (nvar != initial_params::nvarcalc())
00037     {
00038       cout << "the number of independent variables is wrong in admodel.hes"
00039          << endl;
00040     }
00041     dmatrix p(1,nvar,1,nvar);
00042     //dmatrix p1(1,nvar-1,1,nvar);
00043     dmatrix h(1,nvar,1,nvar);
00044     //dmatrix gram(1,nvar-1,1,nvar-1);
00045     dvector ss(1,nvar);
00046     ifs >> h;
00047     if (!ifs)
00048     {
00049       cerr << "Error reading the hessian from file admodel.hes" << endl;
00050     }
00051     dvector n=g/norm(g);
00052     // project the standard basis vectors onto the tangent space
00053     int i;
00054     for (i=1;i<=nvar;i++)
00055     {
00056       p(i)=-n(i)*n;
00057       p(i,i)+=1;
00058     }
00059     //cout << "p*n" << endl;
00060     //cout << p*n << endl;
00061     //cin >> ch;
00062 
00063     for (i=1;i<=nvar;i++)
00064     {
00065       ss(i)=norm(p(i));
00066     }
00067 
00068   /*
00069     double minsize=min(ss);
00070     for (i=1;i<=nvar;i++)
00071     {
00072       if (ss(i)==minsize) break;
00073       p1(i)=p(i);
00074     }
00075 
00076     for (int ii=i+1;ii<=nvar;ii++)
00077     {
00078       p1(ii-1)=p(ii);
00079     }
00080    */
00081 
00082     for (i=1;i<=nvar;i++)
00083     {
00084       for (int j=1;j<i;j++)
00085       {
00086         double tmp=(h(i,j)+h(j,i))/2.;
00087         h(i,j)=tmp;
00088         h(j,i)=tmp;
00089       }
00090     }
00091 
00092     //cout << "A" << endl;
00093    /*
00094     for (i=1;i<nvar;i++)
00095     {
00096       dvector tmp = h*p1(i);
00097      // cout << "aA" << endl;
00098       for (int j=1;j<nvar;j++)
00099       {
00100         gram(i,j)=tmp*p1(j);
00101       }
00102     }
00103     cout << "B" << endl;
00104     ld1=ln_det(gram,sgn);
00105     cout << "CX" << endl;
00106     for (i=1;i<nvar;i++)
00107     {
00108       for (int j=1;j<nvar;j++)
00109       {
00110         gram(i,j)=p1(i)*p1(j);
00111       }
00112       if (norm(p1(i))==0) cout <<"norm p1 =0 " << endl;
00113     }
00114  //   cout << "D" << endl;
00115 
00116     ld2=ln_det(gram,sgn);
00117    // cout << "E" << endl;
00118    */
00119 
00120  //   cout << "eigs of h" << endl;
00121    // cout << sort(eigenvalues(h)) << endl << endl;
00122 
00123  //   cout << "F" << endl;
00124     for (i=1;i<=nvar;i++)
00125     {
00126       dvector tmp=h*p(i);
00127       for (int j=1;j<=i;j++)
00128       {
00129         S(i,j)=tmp*p(j)+n(i)*n(j);
00130       }
00131     }
00132  //   cout << "G" << endl;
00133 
00134     for (i=1;i<=nvar;i++)
00135     {
00136       for (int j=1;j<i;j++)
00137       {
00138         S(j,i)=S(i,j);
00139       }
00140     }
00141   }
00142  // cout << "eigs of S" << endl;
00143  // cout << sort(eigenvalues(S)) << endl << endl;
00144   lndet=ln_det(S,sgn);
00145  // cout << "old det = " << lndet << endl;
00146  // lndet=ld1-ld2;
00147  // cout << "new det = " << lndet  << "  "  << ld1 << "  " << ld2 << endl;
00148   //char ch;
00149   //cin >> ch;
00150   //double lndet2=0.;
00151   if (sgn <= 0)
00152   {
00153     cerr << "Error restricted Hessian is not positive definite" << endl;
00154   }
00155   {
00156   //  cout << "doing S eigenvalues" << endl;
00157   //  dvector eig=eigenvalues(S);
00158   //  cout << "finished S eigenvalues" << endl;
00159     //lndet2=sum(log(eig));
00160   //  cout << sort(eig) << endl << endl;
00161   }
00162  }
00163  else
00164  {
00165    lndet=-50.0;
00166  }
00167  return lndet;
00168 }
00169 
00170 void function_minimizer::get_particular_grad(int iprof,int nvar,
00171   const dvector& fg, const dvector& g)
00172   {
00173     independent_variables x(1,nvar);
00174     // get the initial values into the x vector
00175     initial_params::xinit(x);
00176     dvariable vf=0.0;
00177     gradient_structure::set_YES_DERIVATIVES();
00178     vf=initial_params::reset(dvar_vector(x));
00179     #if defined(USE_LAPLACE)
00180       if (lapprox)
00181       {
00182         if (lapprox->hesstype==2)
00183         {
00184           lapprox->separable_calls_counter=0;
00185         }
00186       }
00187     #endif
00188     userfunction();
00189     vf=likeprof_params::likeprofptr[iprof]->variable();
00190     gradcalc(nvar, g, vf);
00191 
00192     vf=0.0;
00193     vf=initial_params::reset(dvar_vector(x));
00194     *objective_function_value::pobjfun=0.0;
00195     #if defined(USE_LAPLACE)
00196       if (lapprox)
00197       {
00198         if (lapprox->hesstype==2)
00199         {
00200           lapprox->separable_calls_counter=0;
00201         }
00202       }
00203     #endif
00204     userfunction();
00205     vf+=*objective_function_value::pobjfun;
00206     gradcalc(nvar, fg, vf);
00207 
00208     gradient_structure::set_NO_DERIVATIVES();
00209     double div=norm(g)*norm(fg);
00210 
00211     if (div==0.0)
00212       cout << "0" << endl;
00213     else
00214       cout << g*fg/(norm(g)*norm(fg)) << endl;
00215   }
00216 
00217 void function_minimizer::prof_minimize(int iprof, double sigma,
00218   double new_value, const double& _fprof, const int underflow_flag,
00219   double global_min, const double& _penalties, const double& _final_weight)
00220    {
00221      double& penalties=(double&) _penalties;
00222      double& fprof=(double&) _fprof;
00223      double& final_weight=(double&) _final_weight;
00224     if (!underflow_flag)
00225     {
00226      int max_profile_phases=3;
00227      int profile_phase=1;
00228      initial_params::current_phase = initial_params::max_number_phases;
00229      while (profile_phase <= max_profile_phases)
00230      {
00231       {
00232        int nvar=initial_params::nvarcalc(); // get the number of active
00233               // parameters
00234        dvector g(1,nvar);
00235        independent_variables x(1,nvar);
00236        // get the initial values into the x vector
00237        initial_params::xinit(x);
00238        fmm fmc(nvar);
00239        fmc.maxfn= maxfn;
00240        fmc.iprint= iprint;
00241        fmc.crit = crit;
00242        fmc.imax = imax;
00243        fmc.dfn= dfn;
00244        fmc.scroll_flag= scroll_flag;
00245        fmc.min_improve=min_improve;
00246        double f=0.0;
00247        gradient_structure::set_YES_DERIVATIVES();
00248        // set convergence criterion for this phase
00249        if (!(!convergence_criteria))
00250        {
00251          int ind=min(convergence_criteria.indexmax(),
00252            initial_params::current_phase);
00253          fmc.crit=convergence_criteria(ind);
00254        }
00255        if (!(!maximum_function_evaluations))
00256        {
00257          int ind=min(maximum_function_evaluations.indexmax(),
00258            initial_params::current_phase);
00259          fmc.maxfn=int(maximum_function_evaluations(ind));
00260        }
00261        int itnsave=0;
00262        //double weight=pow(50.0,profile_phase)/(sigma*sigma);
00263        double weight;
00264        if (sigma)
00265        {
00266          weight=pow(120.0,profile_phase)/(sigma*sigma);
00267        }
00268        else
00269        {
00270          weight=pow(120.0,profile_phase);
00271        }
00272        final_weight=weight;
00273        while (fmc.ireturn>=0)
00274        {
00275          fmc.fmin(f,x,g);
00276          double diff =
00277            new_value-value(likeprof_params::likeprofptr[iprof]->variable());
00278          if (fmc.itn>itnsave && diff < pow(.1,iprof)*sigma)
00279          {
00280            fmc.ifn=fmc.imax;
00281          }
00282          if (fmc.ireturn>0)
00283          {
00284            dvariable vf=0.0;
00285            vf=initial_params::reset(dvar_vector(x));
00286            *objective_function_value::pobjfun=0.0;
00287            userfunction();
00288            dvariable tv=likeprof_params::likeprofptr[iprof]->variable();
00289            vf+=weight*square(new_value-tv);
00290            vf+=*objective_function_value::pobjfun;
00291            gradcalc(nvar, g, vf);
00292          }
00293        }
00294        gradient_structure::set_NO_DERIVATIVES();
00295        iexit=fmc.iexit;
00296        ihflag=fmc.ihflag;
00297        ihang=fmc.ihang;
00298        maxfn_flag=fmc.maxfn_flag;
00299        quit_flag=fmc.quit_flag;
00300        fprof=value(initial_params::reset(dvar_vector(x)));
00301        *objective_function_value::pobjfun=0.0;
00302        userfunction();
00303        double tv=value(likeprof_params::likeprofptr[iprof]->variable());
00304        fprof+=value(*objective_function_value::pobjfun);
00305        penalties=weight*(new_value-tv)*(new_value-tv);
00306        fprof+=penalties;
00307        if (quit_flag=='Q') break;
00308        if (!quit_flag || quit_flag == 'N')
00309        {
00310          profile_phase++;
00311        }
00312       }
00313      }
00314     }
00315     else
00316     {
00317       fprof=global_min+20.0;
00318     }
00319    }