ADMB Documentation  11.1.2497
 All Classes Files Functions Variables Typedefs Friends Defines
newmodm2.cpp
Go to the documentation of this file.
00001 /*
00002  * $Id: newmodm2.cpp 2442 2014-10-01 19:54:53Z johnoel $
00003  *
00004  * Author: David Fournier
00005  * Copyright (c) 2008-2012 Regents of the University of California
00006  */
00007 #include <admodel.h>
00008 #include <df1b2fun.h>
00009 #include <adrndeff.h>
00010 
00011 double function_minimizer::projected_hess_determinant(const dvector& g,
00012   const int underflow_flag)
00013 {
00014  double lndet=0.0;
00015  //double ld1=0.0;
00016  //double ld2=0.0;
00017  //char ch;
00018  if (!underflow_flag)
00019  {
00020    int sgn=0;
00021     adstring tmpstring="admodel.hes";
00022     if (ad_comm::wd_flag)
00023        tmpstring = ad_comm::adprogram_name + ".hes";
00024     uistream ifs((char*)tmpstring);
00025 
00026   if (!ifs)
00027   {
00028     cerr << "Error opening file admodel.hes" << endl;
00029   }
00030   int nvar = 0;
00031   ifs >> nvar;
00032   dmatrix S(1,nvar,1,nvar);
00033   if (nvar > 0)
00034   {
00035     if ((size_t)nvar != initial_params::nvarcalc())
00036     {
00037       cout << "the number of independent variables is wrong in admodel.hes"
00038          << endl;
00039     }
00040     dmatrix p(1,nvar,1,nvar);
00041     //dmatrix p1(1,nvar-1,1,nvar);
00042     dmatrix h(1,nvar,1,nvar);
00043     //dmatrix gram(1,nvar-1,1,nvar-1);
00044     dvector ss(1,nvar);
00045     ifs >> h;
00046     if (!ifs)
00047     {
00048       cerr << "Error reading the hessian from file admodel.hes" << endl;
00049     }
00050     dvector n=g/norm(g);
00051     // project the standard basis vectors onto the tangent space
00052     int i;
00053     for (i=1;i<=nvar;i++)
00054     {
00055       p(i)=-n(i)*n;
00056       p(i,i)+=1;
00057     }
00058     //cout << "p*n" << endl;
00059     //cout << p*n << endl;
00060     //cin >> ch;
00061 
00062     for (i=1;i<=nvar;i++)
00063     {
00064       ss(i)=norm(p(i));
00065     }
00066 
00067   /*
00068     double minsize=min(ss);
00069     for (i=1;i<=nvar;i++)
00070     {
00071       if (ss(i)==minsize) break;
00072       p1(i)=p(i);
00073     }
00074 
00075     for (int ii=i+1;ii<=nvar;ii++)
00076     {
00077       p1(ii-1)=p(ii);
00078     }
00079    */
00080 
00081     for (i=1;i<=nvar;i++)
00082     {
00083       for (int j=1;j<i;j++)
00084       {
00085         double tmp=(h(i,j)+h(j,i))/2.;
00086         h(i,j)=tmp;
00087         h(j,i)=tmp;
00088       }
00089     }
00090 
00091     //cout << "A" << endl;
00092    /*
00093     for (i=1;i<nvar;i++)
00094     {
00095       dvector tmp = h*p1(i);
00096      // cout << "aA" << endl;
00097       for (int j=1;j<nvar;j++)
00098       {
00099         gram(i,j)=tmp*p1(j);
00100       }
00101     }
00102     cout << "B" << endl;
00103     ld1=ln_det(gram,sgn);
00104     cout << "CX" << endl;
00105     for (i=1;i<nvar;i++)
00106     {
00107       for (int j=1;j<nvar;j++)
00108       {
00109         gram(i,j)=p1(i)*p1(j);
00110       }
00111       if (norm(p1(i))==0) cout <<"norm p1 =0 " << endl;
00112     }
00113  //   cout << "D" << endl;
00114 
00115     ld2=ln_det(gram,sgn);
00116    // cout << "E" << endl;
00117    */
00118 
00119  //   cout << "eigs of h" << endl;
00120    // cout << sort(eigenvalues(h)) << endl << endl;
00121 
00122  //   cout << "F" << endl;
00123     for (i=1;i<=nvar;i++)
00124     {
00125       dvector tmp=h*p(i);
00126       for (int j=1;j<=i;j++)
00127       {
00128         S(i,j)=tmp*p(j)+n(i)*n(j);
00129       }
00130     }
00131  //   cout << "G" << endl;
00132 
00133     for (i=1;i<=nvar;i++)
00134     {
00135       for (int j=1;j<i;j++)
00136       {
00137         S(j,i)=S(i,j);
00138       }
00139     }
00140   }
00141  // cout << "eigs of S" << endl;
00142  // cout << sort(eigenvalues(S)) << endl << endl;
00143   lndet=ln_det(S,sgn);
00144  // cout << "old det = " << lndet << endl;
00145  // lndet=ld1-ld2;
00146  // cout << "new det = " << lndet  << "  "  << ld1 << "  " << ld2 << endl;
00147   //char ch;
00148   //cin >> ch;
00149   //double lndet2=0.;
00150   if (sgn <= 0)
00151   {
00152     cerr << "Error restricted Hessian is not positive definite" << endl;
00153   }
00154   {
00155   //  cout << "doing S eigenvalues" << endl;
00156   //  dvector eig=eigenvalues(S);
00157   //  cout << "finished S eigenvalues" << endl;
00158     //lndet2=sum(log(eig));
00159   //  cout << sort(eig) << endl << endl;
00160   }
00161  }
00162  else
00163  {
00164    lndet=-50.0;
00165  }
00166  return lndet;
00167 }
00168 
00169 void function_minimizer::get_particular_grad(int iprof,int nvar,
00170   const dvector& fg, const dvector& g)
00171   {
00172     independent_variables x(1,nvar);
00173     // get the initial values into the x vector
00174     initial_params::xinit(x);
00175     dvariable vf=0.0;
00176     gradient_structure::set_YES_DERIVATIVES();
00177     vf=initial_params::reset(dvar_vector(x));
00178       if (lapprox)
00179       {
00180         if (lapprox->hesstype==2)
00181         {
00182           lapprox->separable_calls_counter=0;
00183         }
00184       }
00185     userfunction();
00186     vf=likeprof_params::likeprofptr[iprof]->variable();
00187     gradcalc(nvar, g, vf);
00188 
00189     vf=0.0;
00190     vf=initial_params::reset(dvar_vector(x));
00191     *objective_function_value::pobjfun=0.0;
00192       if (lapprox)
00193       {
00194         if (lapprox->hesstype==2)
00195         {
00196           lapprox->separable_calls_counter=0;
00197         }
00198       }
00199     userfunction();
00200     vf+=*objective_function_value::pobjfun;
00201     gradcalc(nvar, fg, vf);
00202 
00203     gradient_structure::set_NO_DERIVATIVES();
00204     double div=norm(g)*norm(fg);
00205 
00206     if (div==0.0)
00207       cout << "0" << endl;
00208     else
00209       cout << g*fg/(norm(g)*norm(fg)) << endl;
00210   }
00211 
00212 void function_minimizer::prof_minimize(int iprof, double sigma,
00213   double new_value, const double& _fprof, const int underflow_flag,
00214   double global_min, const double& _penalties, const double& _final_weight)
00215    {
00216      double& penalties=(double&) _penalties;
00217      double& fprof=(double&) _fprof;
00218      double& final_weight=(double&) _final_weight;
00219     if (!underflow_flag)
00220     {
00221      int max_profile_phases=3;
00222      int profile_phase=1;
00223      initial_params::current_phase = initial_params::max_number_phases;
00224      while (profile_phase <= max_profile_phases)
00225      {
00226       {
00227        int nvar=initial_params::nvarcalc(); // get the number of active
00228               // parameters
00229        dvector g(1,nvar);
00230        independent_variables x(1,nvar);
00231        // get the initial values into the x vector
00232        initial_params::xinit(x);
00233        fmm fmc(nvar);
00234        fmc.maxfn= maxfn;
00235        fmc.iprint= iprint;
00236        fmc.crit = crit;
00237        fmc.imax = imax;
00238        fmc.dfn= dfn;
00239        fmc.scroll_flag= scroll_flag;
00240        fmc.min_improve=min_improve;
00241        double f=0.0;
00242        gradient_structure::set_YES_DERIVATIVES();
00243        // set convergence criterion for this phase
00244        if (!(!convergence_criteria))
00245        {
00246          int ind=min(convergence_criteria.indexmax(),
00247            initial_params::current_phase);
00248          fmc.crit=convergence_criteria(ind);
00249        }
00250        if (!(!maximum_function_evaluations))
00251        {
00252          int ind=min(maximum_function_evaluations.indexmax(),
00253            initial_params::current_phase);
00254          fmc.maxfn=int(maximum_function_evaluations(ind));
00255        }
00256        int itnsave=0;
00257        //double weight=pow(50.0,profile_phase)/(sigma*sigma);
00258        double weight;
00259        if (sigma)
00260        {
00261          weight=pow(120.0,profile_phase)/(sigma*sigma);
00262        }
00263        else
00264        {
00265          weight=pow(120.0,profile_phase);
00266        }
00267        final_weight=weight;
00268        while (fmc.ireturn>=0)
00269        {
00270          fmc.fmin(f,x,g);
00271          double diff =
00272            new_value-value(likeprof_params::likeprofptr[iprof]->variable());
00273          if (fmc.itn>itnsave && diff < pow(.1,iprof)*sigma)
00274          {
00275            fmc.ifn=fmc.imax;
00276          }
00277          if (fmc.ireturn>0)
00278          {
00279            dvariable vf=0.0;
00280            vf=initial_params::reset(dvar_vector(x));
00281            *objective_function_value::pobjfun=0.0;
00282            userfunction();
00283            dvariable tv=likeprof_params::likeprofptr[iprof]->variable();
00284            vf+=weight*square(new_value-tv);
00285            vf+=*objective_function_value::pobjfun;
00286            gradcalc(nvar, g, vf);
00287          }
00288        }
00289        gradient_structure::set_NO_DERIVATIVES();
00290        iexit=fmc.iexit;
00291        ihflag=fmc.ihflag;
00292        ihang=fmc.ihang;
00293        maxfn_flag=fmc.maxfn_flag;
00294        quit_flag=fmc.quit_flag;
00295        fprof=value(initial_params::reset(dvar_vector(x)));
00296        *objective_function_value::pobjfun=0.0;
00297        userfunction();
00298        double tv=value(likeprof_params::likeprofptr[iprof]->variable());
00299        fprof+=value(*objective_function_value::pobjfun);
00300        penalties=weight*(new_value-tv)*(new_value-tv);
00301        fprof+=penalties;
00302        if (quit_flag=='Q') break;
00303        if (!quit_flag || quit_flag == 'N')
00304        {
00305          profile_phase++;
00306        }
00307       }
00308      }
00309     }
00310     else
00311     {
00312       fprof=global_min+20.0;
00313     }
00314    }