ADMB Documentation  11.1.2274
 All Classes Files Functions Variables Typedefs Friends Defines
newmodm2.cpp
Go to the documentation of this file.
00001 /*
00002  * $Id: newmodm2.cpp 1935 2014-04-26 02:02:58Z johnoel $
00003  *
00004  * Author: David Fournier
00005  * Copyright (c) 2008-2012 Regents of the University of California
00006  */
00007 #include <admodel.h>
00008 #  include <df1b2fun.h>
00009 #  include <adrndeff.h>
00010 
00011 double function_minimizer::projected_hess_determinant(const dvector& g,
00012   const int underflow_flag)
00013 {
00014  int sgn=0;
00015  double lndet=0.0;
00016  //double ld1=0.0;
00017  //double ld2=0.0;
00018  //char ch;
00019  if (!underflow_flag)
00020  {
00021     adstring tmpstring="admodel.hes";
00022     if (ad_comm::wd_flag)
00023        tmpstring = ad_comm::adprogram_name + ".hes";
00024     uistream ifs((char*)tmpstring);
00025 
00026   if (!ifs)
00027   {
00028     cerr << "Error opening file admodel.hes" << endl;
00029   }
00030   int nvar = 0;
00031   ifs >> nvar;
00032   dmatrix S(1,nvar,1,nvar);
00033   {
00034     if (nvar != initial_params::nvarcalc())
00035     {
00036       cout << "the number of independent variables is wrong in admodel.hes"
00037          << endl;
00038     }
00039     dmatrix p(1,nvar,1,nvar);
00040     //dmatrix p1(1,nvar-1,1,nvar);
00041     dmatrix h(1,nvar,1,nvar);
00042     //dmatrix gram(1,nvar-1,1,nvar-1);
00043     dvector ss(1,nvar);
00044     ifs >> h;
00045     if (!ifs)
00046     {
00047       cerr << "Error reading the hessian from file admodel.hes" << endl;
00048     }
00049     dvector n=g/norm(g);
00050     // project the standard basis vectors onto the tangent space
00051     int i;
00052     for (i=1;i<=nvar;i++)
00053     {
00054       p(i)=-n(i)*n;
00055       p(i,i)+=1;
00056     }
00057     //cout << "p*n" << endl;
00058     //cout << p*n << endl;
00059     //cin >> ch;
00060 
00061     for (i=1;i<=nvar;i++)
00062     {
00063       ss(i)=norm(p(i));
00064     }
00065 
00066   /*
00067     double minsize=min(ss);
00068     for (i=1;i<=nvar;i++)
00069     {
00070       if (ss(i)==minsize) break;
00071       p1(i)=p(i);
00072     }
00073 
00074     for (int ii=i+1;ii<=nvar;ii++)
00075     {
00076       p1(ii-1)=p(ii);
00077     }
00078    */
00079 
00080     for (i=1;i<=nvar;i++)
00081     {
00082       for (int j=1;j<i;j++)
00083       {
00084         double tmp=(h(i,j)+h(j,i))/2.;
00085         h(i,j)=tmp;
00086         h(j,i)=tmp;
00087       }
00088     }
00089 
00090     //cout << "A" << endl;
00091    /*
00092     for (i=1;i<nvar;i++)
00093     {
00094       dvector tmp = h*p1(i);
00095      // cout << "aA" << endl;
00096       for (int j=1;j<nvar;j++)
00097       {
00098         gram(i,j)=tmp*p1(j);
00099       }
00100     }
00101     cout << "B" << endl;
00102     ld1=ln_det(gram,sgn);
00103     cout << "CX" << endl;
00104     for (i=1;i<nvar;i++)
00105     {
00106       for (int j=1;j<nvar;j++)
00107       {
00108         gram(i,j)=p1(i)*p1(j);
00109       }
00110       if (norm(p1(i))==0) cout <<"norm p1 =0 " << endl;
00111     }
00112  //   cout << "D" << endl;
00113 
00114     ld2=ln_det(gram,sgn);
00115    // cout << "E" << endl;
00116    */
00117 
00118  //   cout << "eigs of h" << endl;
00119    // cout << sort(eigenvalues(h)) << endl << endl;
00120 
00121  //   cout << "F" << endl;
00122     for (i=1;i<=nvar;i++)
00123     {
00124       dvector tmp=h*p(i);
00125       for (int j=1;j<=i;j++)
00126       {
00127         S(i,j)=tmp*p(j)+n(i)*n(j);
00128       }
00129     }
00130  //   cout << "G" << endl;
00131 
00132     for (i=1;i<=nvar;i++)
00133     {
00134       for (int j=1;j<i;j++)
00135       {
00136         S(j,i)=S(i,j);
00137       }
00138     }
00139   }
00140  // cout << "eigs of S" << endl;
00141  // cout << sort(eigenvalues(S)) << endl << endl;
00142   lndet=ln_det(S,sgn);
00143  // cout << "old det = " << lndet << endl;
00144  // lndet=ld1-ld2;
00145  // cout << "new det = " << lndet  << "  "  << ld1 << "  " << ld2 << endl;
00146   //char ch;
00147   //cin >> ch;
00148   //double lndet2=0.;
00149   if (sgn <= 0)
00150   {
00151     cerr << "Error restricted Hessian is not positive definite" << endl;
00152   }
00153   {
00154   //  cout << "doing S eigenvalues" << endl;
00155   //  dvector eig=eigenvalues(S);
00156   //  cout << "finished S eigenvalues" << endl;
00157     //lndet2=sum(log(eig));
00158   //  cout << sort(eig) << endl << endl;
00159   }
00160  }
00161  else
00162  {
00163    lndet=-50.0;
00164  }
00165  return lndet;
00166 }
00167 
00168 void function_minimizer::get_particular_grad(int iprof,int nvar,
00169   const dvector& fg, const dvector& g)
00170   {
00171     independent_variables x(1,nvar);
00172     // get the initial values into the x vector
00173     initial_params::xinit(x);
00174     dvariable vf=0.0;
00175     gradient_structure::set_YES_DERIVATIVES();
00176     vf=initial_params::reset(dvar_vector(x));
00177       if (lapprox)
00178       {
00179         if (lapprox->hesstype==2)
00180         {
00181           lapprox->separable_calls_counter=0;
00182         }
00183       }
00184     userfunction();
00185     vf=likeprof_params::likeprofptr[iprof]->variable();
00186     gradcalc(nvar, g, vf);
00187 
00188     vf=0.0;
00189     vf=initial_params::reset(dvar_vector(x));
00190     *objective_function_value::pobjfun=0.0;
00191       if (lapprox)
00192       {
00193         if (lapprox->hesstype==2)
00194         {
00195           lapprox->separable_calls_counter=0;
00196         }
00197       }
00198     userfunction();
00199     vf+=*objective_function_value::pobjfun;
00200     gradcalc(nvar, fg, vf);
00201 
00202     gradient_structure::set_NO_DERIVATIVES();
00203     double div=norm(g)*norm(fg);
00204 
00205     if (div==0.0)
00206       cout << "0" << endl;
00207     else
00208       cout << g*fg/(norm(g)*norm(fg)) << endl;
00209   }
00210 
00211 void function_minimizer::prof_minimize(int iprof, double sigma,
00212   double new_value, const double& _fprof, const int underflow_flag,
00213   double global_min, const double& _penalties, const double& _final_weight)
00214    {
00215      double& penalties=(double&) _penalties;
00216      double& fprof=(double&) _fprof;
00217      double& final_weight=(double&) _final_weight;
00218     if (!underflow_flag)
00219     {
00220      int max_profile_phases=3;
00221      int profile_phase=1;
00222      initial_params::current_phase = initial_params::max_number_phases;
00223      while (profile_phase <= max_profile_phases)
00224      {
00225       {
00226        int nvar=initial_params::nvarcalc(); // get the number of active
00227               // parameters
00228        dvector g(1,nvar);
00229        independent_variables x(1,nvar);
00230        // get the initial values into the x vector
00231        initial_params::xinit(x);
00232        fmm fmc(nvar);
00233        fmc.maxfn= maxfn;
00234        fmc.iprint= iprint;
00235        fmc.crit = crit;
00236        fmc.imax = imax;
00237        fmc.dfn= dfn;
00238        fmc.scroll_flag= scroll_flag;
00239        fmc.min_improve=min_improve;
00240        double f=0.0;
00241        gradient_structure::set_YES_DERIVATIVES();
00242        // set convergence criterion for this phase
00243        if (!(!convergence_criteria))
00244        {
00245          int ind=min(convergence_criteria.indexmax(),
00246            initial_params::current_phase);
00247          fmc.crit=convergence_criteria(ind);
00248        }
00249        if (!(!maximum_function_evaluations))
00250        {
00251          int ind=min(maximum_function_evaluations.indexmax(),
00252            initial_params::current_phase);
00253          fmc.maxfn=int(maximum_function_evaluations(ind));
00254        }
00255        int itnsave=0;
00256        //double weight=pow(50.0,profile_phase)/(sigma*sigma);
00257        double weight;
00258        if (sigma)
00259        {
00260          weight=pow(120.0,profile_phase)/(sigma*sigma);
00261        }
00262        else
00263        {
00264          weight=pow(120.0,profile_phase);
00265        }
00266        final_weight=weight;
00267        while (fmc.ireturn>=0)
00268        {
00269          fmc.fmin(f,x,g);
00270          double diff =
00271            new_value-value(likeprof_params::likeprofptr[iprof]->variable());
00272          if (fmc.itn>itnsave && diff < pow(.1,iprof)*sigma)
00273          {
00274            fmc.ifn=fmc.imax;
00275          }
00276          if (fmc.ireturn>0)
00277          {
00278            dvariable vf=0.0;
00279            vf=initial_params::reset(dvar_vector(x));
00280            *objective_function_value::pobjfun=0.0;
00281            userfunction();
00282            dvariable tv=likeprof_params::likeprofptr[iprof]->variable();
00283            vf+=weight*square(new_value-tv);
00284            vf+=*objective_function_value::pobjfun;
00285            gradcalc(nvar, g, vf);
00286          }
00287        }
00288        gradient_structure::set_NO_DERIVATIVES();
00289        iexit=fmc.iexit;
00290        ihflag=fmc.ihflag;
00291        ihang=fmc.ihang;
00292        maxfn_flag=fmc.maxfn_flag;
00293        quit_flag=fmc.quit_flag;
00294        fprof=value(initial_params::reset(dvar_vector(x)));
00295        *objective_function_value::pobjfun=0.0;
00296        userfunction();
00297        double tv=value(likeprof_params::likeprofptr[iprof]->variable());
00298        fprof+=value(*objective_function_value::pobjfun);
00299        penalties=weight*(new_value-tv)*(new_value-tv);
00300        fprof+=penalties;
00301        if (quit_flag=='Q') break;
00302        if (!quit_flag || quit_flag == 'N')
00303        {
00304          profile_phase++;
00305        }
00306       }
00307      }
00308     }
00309     else
00310     {
00311       fprof=global_min+20.0;
00312     }
00313    }