ADMB Documentation  11.1x.2711
 All Classes Files Functions Variables Typedefs Friends Defines
newmodm2.cpp
Go to the documentation of this file.
00001 /*
00002  * $Id: newmodm2.cpp 2581 2014-11-07 03:21:24Z johnoel $
00003  *
00004  * Author: David Fournier
00005  * Copyright (c) 2008-2012 Regents of the University of California
00006  */
00007 #include <admodel.h>
00008 #include <df1b2fun.h>
00009 #include <adrndeff.h>
00010 
00011 #ifdef ISZERO
00012   #undef ISZERO
00013 #endif
00014 #define ISZERO(d) ((d)==0.0)
00015 
00016 double function_minimizer::projected_hess_determinant(const dvector& g,
00017   const int underflow_flag)
00018 {
00019  double lndet=0.0;
00020  //double ld1=0.0;
00021  //double ld2=0.0;
00022  //char ch;
00023  if (!underflow_flag)
00024  {
00025    int sgn=0;
00026     adstring tmpstring="admodel.hes";
00027     if (ad_comm::wd_flag)
00028        tmpstring = ad_comm::adprogram_name + ".hes";
00029     uistream ifs((char*)tmpstring);
00030 
00031   if (!ifs)
00032   {
00033     cerr << "Error opening file admodel.hes" << endl;
00034   }
00035   int nvar = 0;
00036   ifs >> nvar;
00037   dmatrix S(1,nvar,1,nvar);
00038   if (nvar > 0)
00039   {
00040     if (nvar != initial_params::nvarcalc())
00041     {
00042       cout << "the number of independent variables is wrong in admodel.hes"
00043          << endl;
00044     }
00045     dmatrix p(1,nvar,1,nvar);
00046     //dmatrix p1(1,nvar-1,1,nvar);
00047     dmatrix h(1,nvar,1,nvar);
00048     //dmatrix gram(1,nvar-1,1,nvar-1);
00049     dvector ss(1,nvar);
00050     ifs >> h;
00051     if (!ifs)
00052     {
00053       cerr << "Error reading the hessian from file admodel.hes" << endl;
00054     }
00055     dvector n=g/norm(g);
00056     // project the standard basis vectors onto the tangent space
00057     int i;
00058     for (i=1;i<=nvar;i++)
00059     {
00060       p(i)=-n(i)*n;
00061       p(i,i)+=1;
00062     }
00063     //cout << "p*n" << endl;
00064     //cout << p*n << endl;
00065     //cin >> ch;
00066 
00067     for (i=1;i<=nvar;i++)
00068     {
00069       ss(i)=norm(p(i));
00070     }
00071 
00072   /*
00073     double minsize=min(ss);
00074     for (i=1;i<=nvar;i++)
00075     {
00076       if (ss(i)==minsize) break;
00077       p1(i)=p(i);
00078     }
00079 
00080     for (int ii=i+1;ii<=nvar;ii++)
00081     {
00082       p1(ii-1)=p(ii);
00083     }
00084    */
00085 
00086     for (i=1;i<=nvar;i++)
00087     {
00088       for (int j=1;j<i;j++)
00089       {
00090         double tmp=(h(i,j)+h(j,i))/2.;
00091         h(i,j)=tmp;
00092         h(j,i)=tmp;
00093       }
00094     }
00095 
00096     //cout << "A" << endl;
00097    /*
00098     for (i=1;i<nvar;i++)
00099     {
00100       dvector tmp = h*p1(i);
00101      // cout << "aA" << endl;
00102       for (int j=1;j<nvar;j++)
00103       {
00104         gram(i,j)=tmp*p1(j);
00105       }
00106     }
00107     cout << "B" << endl;
00108     ld1=ln_det(gram,sgn);
00109     cout << "CX" << endl;
00110     for (i=1;i<nvar;i++)
00111     {
00112       for (int j=1;j<nvar;j++)
00113       {
00114         gram(i,j)=p1(i)*p1(j);
00115       }
00116       if (norm(p1(i))==0) cout <<"norm p1 =0 " << endl;
00117     }
00118  //   cout << "D" << endl;
00119 
00120     ld2=ln_det(gram,sgn);
00121    // cout << "E" << endl;
00122    */
00123 
00124  //   cout << "eigs of h" << endl;
00125    // cout << sort(eigenvalues(h)) << endl << endl;
00126 
00127  //   cout << "F" << endl;
00128     for (i=1;i<=nvar;i++)
00129     {
00130       dvector tmp=h*p(i);
00131       for (int j=1;j<=i;j++)
00132       {
00133         S(i,j)=tmp*p(j)+n(i)*n(j);
00134       }
00135     }
00136  //   cout << "G" << endl;
00137 
00138     for (i=1;i<=nvar;i++)
00139     {
00140       for (int j=1;j<i;j++)
00141       {
00142         S(j,i)=S(i,j);
00143       }
00144     }
00145   }
00146  // cout << "eigs of S" << endl;
00147  // cout << sort(eigenvalues(S)) << endl << endl;
00148   lndet=ln_det(S,sgn);
00149  // cout << "old det = " << lndet << endl;
00150  // lndet=ld1-ld2;
00151  // cout << "new det = " << lndet  << "  "  << ld1 << "  " << ld2 << endl;
00152   //char ch;
00153   //cin >> ch;
00154   //double lndet2=0.;
00155   if (sgn <= 0)
00156   {
00157     cerr << "Error restricted Hessian is not positive definite" << endl;
00158   }
00159   {
00160   //  cout << "doing S eigenvalues" << endl;
00161   //  dvector eig=eigenvalues(S);
00162   //  cout << "finished S eigenvalues" << endl;
00163     //lndet2=sum(log(eig));
00164   //  cout << sort(eig) << endl << endl;
00165   }
00166  }
00167  else
00168  {
00169    lndet=-50.0;
00170  }
00171  return lndet;
00172 }
00173 
00174 void function_minimizer::get_particular_grad(int iprof,int nvar,
00175   const dvector& fg, const dvector& g)
00176   {
00177     independent_variables x(1,nvar);
00178     // get the initial values into the x vector
00179     initial_params::xinit(x);
00180     dvariable vf=0.0;
00181     gradient_structure::set_YES_DERIVATIVES();
00182     vf=initial_params::reset(dvar_vector(x));
00183       if (lapprox)
00184       {
00185         if (lapprox->hesstype==2)
00186         {
00187           lapprox->separable_calls_counter=0;
00188         }
00189       }
00190     userfunction();
00191     vf=likeprof_params::likeprofptr[iprof]->variable();
00192     gradcalc(nvar, g, vf);
00193 
00194     vf=0.0;
00195     vf=initial_params::reset(dvar_vector(x));
00196     *objective_function_value::pobjfun=0.0;
00197       if (lapprox)
00198       {
00199         if (lapprox->hesstype==2)
00200         {
00201           lapprox->separable_calls_counter=0;
00202         }
00203       }
00204     userfunction();
00205     vf+=*objective_function_value::pobjfun;
00206     gradcalc(nvar, fg, vf);
00207 
00208     gradient_structure::set_NO_DERIVATIVES();
00209     double div=norm(g)*norm(fg);
00210 
00211     if (div==0.0)
00212       cout << "0" << endl;
00213     else
00214       cout << g*fg/(norm(g)*norm(fg)) << endl;
00215   }
00216 
00217 void function_minimizer::prof_minimize(int iprof, double sigma,
00218   double new_value, const double& _fprof, const int underflow_flag,
00219   double global_min, const double& _penalties, const double& _final_weight)
00220    {
00221      double& penalties=(double&) _penalties;
00222      double& fprof=(double&) _fprof;
00223      double& final_weight=(double&) _final_weight;
00224     if (!underflow_flag)
00225     {
00226      int max_profile_phases=3;
00227      int profile_phase=1;
00228      initial_params::current_phase = initial_params::max_number_phases;
00229      while (profile_phase <= max_profile_phases)
00230      {
00231       {
00232        int nvar=initial_params::nvarcalc(); // get the number of active
00233               // parameters
00234        dvector g(1,nvar);
00235        independent_variables x(1,nvar);
00236        // get the initial values into the x vector
00237        initial_params::xinit(x);
00238        fmm fmc(nvar);
00239        fmc.maxfn= maxfn;
00240        fmc.iprint= iprint;
00241        fmc.crit = crit;
00242        fmc.imax = imax;
00243        fmc.dfn= dfn;
00244        fmc.scroll_flag= scroll_flag;
00245        fmc.min_improve=min_improve;
00246        double f=0.0;
00247        gradient_structure::set_YES_DERIVATIVES();
00248        // set convergence criterion for this phase
00249        if (!(!convergence_criteria))
00250        {
00251          int ind=min(convergence_criteria.indexmax(),
00252            initial_params::current_phase);
00253          fmc.crit=convergence_criteria(ind);
00254        }
00255        if (!(!maximum_function_evaluations))
00256        {
00257          int ind=min(maximum_function_evaluations.indexmax(),
00258            initial_params::current_phase);
00259          fmc.maxfn=int(maximum_function_evaluations(ind));
00260        }
00261        int itnsave=0;
00262        //double weight=pow(50.0,profile_phase)/(sigma*sigma);
00263        double weight = pow(120.0,profile_phase);
00264        if (!ISZERO(sigma))
00265        {
00266          weight /= (sigma*sigma);
00267        }
00268        final_weight=weight;
00269        while (fmc.ireturn>=0)
00270        {
00271          fmc.fmin(f,x,g);
00272          double diff =
00273            new_value-value(likeprof_params::likeprofptr[iprof]->variable());
00274          if (fmc.itn>itnsave && diff < pow(.1,iprof)*sigma)
00275          {
00276            fmc.ifn=fmc.imax;
00277          }
00278          if (fmc.ireturn>0)
00279          {
00280            dvariable vf=0.0;
00281            vf=initial_params::reset(dvar_vector(x));
00282            *objective_function_value::pobjfun=0.0;
00283            userfunction();
00284            dvariable tv=likeprof_params::likeprofptr[iprof]->variable();
00285            vf+=weight*square(new_value-tv);
00286            vf+=*objective_function_value::pobjfun;
00287            gradcalc(nvar, g, vf);
00288          }
00289        }
00290        gradient_structure::set_NO_DERIVATIVES();
00291        iexit=fmc.iexit;
00292        ihflag=fmc.ihflag;
00293        ihang=fmc.ihang;
00294        maxfn_flag=fmc.maxfn_flag;
00295        quit_flag=fmc.quit_flag;
00296        fprof=value(initial_params::reset(dvar_vector(x)));
00297        *objective_function_value::pobjfun=0.0;
00298        userfunction();
00299        double tv=value(likeprof_params::likeprofptr[iprof]->variable());
00300        fprof+=value(*objective_function_value::pobjfun);
00301        penalties=weight*(new_value-tv)*(new_value-tv);
00302        fprof+=penalties;
00303        if (quit_flag=='Q') break;
00304        if (!quit_flag || quit_flag == 'N')
00305        {
00306          profile_phase++;
00307        }
00308       }
00309      }
00310     }
00311     else
00312     {
00313       fprof=global_min+20.0;
00314     }
00315    }