![]() |
Object-oriented Scientific Computing Library: Version 0.910
|
00001 /* 00002 ------------------------------------------------------------------- 00003 00004 Copyright (C) 2006-2012, Andrew W. Steiner 00005 00006 This file is part of O2scl. 00007 00008 O2scl is free software; you can redistribute it and/or modify 00009 it under the terms of the GNU General Public License as published by 00010 the Free Software Foundation; either version 3 of the License, or 00011 (at your option) any later version. 00012 00013 O2scl is distributed in the hope that it will be useful, 00014 but WITHOUT ANY WARRANTY; without even the implied warranty of 00015 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00016 GNU General Public License for more details. 00017 00018 You should have received a copy of the GNU General Public License 00019 along with O2scl. If not, see <http://www.gnu.org/licenses/>. 00020 00021 ------------------------------------------------------------------- 00022 */ 00023 #ifndef O2SCL_GSL_MMIN_CONF_H 00024 #define O2SCL_GSL_MMIN_CONF_H 00025 00026 #include <gsl/gsl_blas.h> 00027 #include <gsl/gsl_multimin.h> 00028 #include <o2scl/multi_min.h> 00029 #include <o2scl/misc.h> 00030 #include <o2scl/cblas.h> 00031 00032 #ifndef DOXYGENP 00033 namespace o2scl { 00034 #endif 00035 00036 /** \brief Base minimization routines for gsl_mmin_conf and gsl_mmin_conp 00037 00038 This class is used by the gsl_mmin_conf and gsl_mmin_conp 00039 minimizers to perform the line minimization along a specified 00040 direction. It is not intended for a casual end-user. 00041 00042 Default template arguments 00043 - \c func_t - \ref multi_funct< \ref ovector_base > 00044 - \c vec_t - \ref ovector_base 00045 - \c alloc_vec_t - \ref ovector 00046 - \c alloc_t - \ref ovector_alloc 00047 - \c dfunc_t - \ref mm_funct< \ref ovector_base > 00048 - \c auto_grad_t - \ref gradient<func_t, \ref ovector_base > 00049 - \c def_auto_grad_t - \ref simple_grad<func_t,\ref ovector_base > 00050 00051 \comment 00052 Doxygen has had trouble parsing this template, so I have 00053 simplified it slightly for the documentation below. 00054 \endcomment 00055 */ 00056 template<class func_t = multi_funct<>, 00057 class vec_t = ovector_base, class alloc_vec_t = ovector, 00058 class alloc_t = ovector_alloc, class dfunc_t = grad_funct<ovector_base>, 00059 class auto_grad_t = gradient<func_t,ovector_base>, 00060 class def_auto_grad_t = simple_grad<func_t,ovector_base> > 00061 class gsl_mmin_base : public multi_min<func_t,func_t,vec_t> { 00062 00063 #ifndef DOXYGEN_INTERNAL 00064 00065 protected: 00066 00067 /// User-specified function 00068 func_t *func; 00069 00070 /// User-specified gradient 00071 dfunc_t *grad; 00072 00073 /// Automatic gradient object 00074 auto_grad_t *agrad; 00075 00076 /// If true, a gradient has been specified 00077 bool grad_given; 00078 00079 /// Memory size 00080 size_t dim; 00081 00082 /// Memory allocation 00083 alloc_t ao; 00084 00085 /// Take a step 00086 void take_step(const vec_t &x, const vec_t &px, 00087 double stepx, double lambda, vec_t &x1x, 00088 vec_t &dx) { 00089 00090 for(size_t i=0;i<this->dim;i++) dx[i]=0.0; 00091 o2scl_cblas::daxpy(-stepx*lambda,this->dim,px,dx); 00092 00093 for(size_t i=0;i<this->dim;i++) x1x[i]=x[i]; 00094 o2scl_cblas::daxpy(1.0,this->dim,dx,x1x); 00095 00096 return; 00097 } 00098 00099 /** \brief Line minimization 00100 00101 Do a line minimisation in the region (xa,fa) (xc,fc) to find 00102 an intermediate (xb,fb) satisifying fa > fb < fc. Choose an 00103 initial xb based on parabolic interpolation. 00104 */ 00105 void intermediate_point(const vec_t &x, const vec_t &px, 00106 double lambda, double pg, double stepa, 00107 double stepc, double fa, double fc, 00108 vec_t &x1x, vec_t &dx, vec_t &gradient, 00109 double *stepx, double *f) { 00110 double stepb, fb; 00111 00112 bool trial_failed; 00113 00114 // This looks like an infinite loop, but will eventually 00115 // stop because stepb will eventually become zero. This 00116 // error condition is handled later in iterate(). 00117 00118 do { 00119 00120 double u = fabs (pg * lambda * stepc); 00121 stepb = 0.5 * stepc * u / ((fc - fa) + u); 00122 00123 take_step(x, px, stepb, lambda, x1x, dx); 00124 00125 // Immediately exit if trial point is the same as the 00126 // initial point (as updated in GSL 1.15) 00127 bool vector_equal=true; 00128 for(size_t i=0;i<dim;i++) { 00129 if (x[i]!=x1x[i]) vector_equal=false; 00130 } 00131 if (vector_equal) { 00132 00133 *stepx=0.0; 00134 *f=fa; 00135 00136 // Evaluate the gradient 00137 if (grad_given) { 00138 (*grad)(dim,x1x,gradient); 00139 } else { 00140 (*agrad)(dim,x1x,gradient); 00141 } 00142 00143 return; 00144 } 00145 00146 // Evaluate the function 00147 fb=(*func)(dim,x1x); 00148 00149 trial_failed=false; 00150 if (fb >= fa && stepb > 0.0) { 00151 // [GSL] downhill step failed, reduce step-size and try again 00152 fc = fb; 00153 stepc = stepb; 00154 trial_failed=true; 00155 } 00156 00157 } while (trial_failed); 00158 00159 *stepx = stepb; 00160 *f = fb; 00161 00162 // Evaluate the gradient 00163 if (grad_given) { 00164 (*grad)(dim,x1x,gradient); 00165 } else { 00166 (*agrad)(dim,x1x,gradient); 00167 } 00168 00169 return; 00170 } 00171 00172 /** \brief Perform the minimization 00173 00174 Starting at (x0, f0) move along the direction p to find a 00175 minimum f(x0 - lambda * p), returning the new point x1 = 00176 x0-lambda*p, f1=f(x1) and g1 = grad(f) at x1. 00177 */ 00178 void minimize(const vec_t &x, const vec_t &xp, double lambda, 00179 double stepa, double stepb, double stepc, double fa, 00180 double fb, double fc, double xtol, vec_t &x1x, 00181 vec_t &dx1x, vec_t &x2x, vec_t &dx2x, vec_t &gradient, 00182 double *xstep, double *f, double *gnorm_u) { 00183 00184 double u = stepb; 00185 double v = stepa; 00186 double w = stepc; 00187 double fu = fb; 00188 double fv = fa; 00189 double fw = fc; 00190 00191 double old2 = fabs(w - v); 00192 double old1 = fabs(v - u); 00193 00194 double stepm, fm, pg, gnorm1; 00195 00196 int xiter = 0; 00197 00198 for(size_t i=0;i<dim;i++) { 00199 x2x[i]=x1x[i]; 00200 dx2x[i]=dx1x[i]; 00201 } 00202 00203 *f = fb; 00204 *xstep = stepb; 00205 *gnorm_u = o2scl_cblas::dnrm2(dim,gradient); 00206 00207 while (true) { 00208 00209 xiter++; 00210 00211 if (xiter > nmaxiter) { 00212 // Exceeded maximum number of iterations 00213 return; 00214 } 00215 00216 { 00217 double dw = w - u; 00218 double dv = v - u; 00219 double du = 0.0; 00220 double e1 = ((fv - fu) * dw * dw + (fu - fw) * dv * dv); 00221 double e2 = 2.0 * ((fv - fu) * dw + (fu - fw) * dv); 00222 00223 if (e2 != 0.0) { 00224 du = e1 / e2; 00225 } 00226 00227 if (du > 0.0 && du < (stepc - stepb) && fabs(du) < 0.5 * old2) { 00228 stepm = u + du; 00229 } else if (du < 0.0 && du > (stepa - stepb) && 00230 fabs(du) < 0.5 * old2) { 00231 stepm = u + du; 00232 } else if ((stepc - stepb) > (stepb - stepa)) { 00233 stepm = 0.38 * (stepc - stepb) + stepb; 00234 } else { 00235 stepm = stepb - 0.38 * (stepb - stepa); 00236 } 00237 } 00238 00239 take_step (x, xp, stepm, lambda, x1x, dx1x); 00240 00241 fm=(*func)(dim,x1x); 00242 00243 if (fm > fb) { 00244 00245 if (fm < fv) { 00246 w = v; 00247 v = stepm; 00248 fw = fv; 00249 fv = fm; 00250 } else if (fm < fw) { 00251 w = stepm; 00252 fw = fm; 00253 } 00254 if (stepm < stepb) { 00255 stepa = stepm; 00256 fa = fm; 00257 } else { 00258 stepc = stepm; 00259 fc = fm; 00260 } 00261 00262 } else if (fm <= fb) { 00263 00264 old2 = old1; 00265 old1 = fabs(u - stepm); 00266 w = v; 00267 v = u; 00268 u = stepm; 00269 fw = fv; 00270 fv = fu; 00271 fu = fm; 00272 00273 for(size_t i=0;i<dim;i++) { 00274 x2x[i]=x1x[i]; 00275 dx2x[i]=dx1x[i]; 00276 } 00277 00278 if (grad_given) { 00279 (*grad)(dim,x1x,gradient); 00280 } else { 00281 (*agrad)(dim,x1x,gradient); 00282 } 00283 00284 pg=o2scl_cblas::ddot(dim,xp,gradient); 00285 gnorm1 = o2scl_cblas::dnrm2(dim,gradient); 00286 00287 *f = fm; 00288 *xstep = stepm; 00289 *gnorm_u = gnorm1; 00290 00291 if (fabs (pg * lambda / gnorm1) < xtol) { 00292 // [GSL] SUCCESS 00293 return; 00294 } 00295 00296 if (stepm < stepb) { 00297 stepc = stepb; 00298 fc = fb; 00299 stepb = stepm; 00300 fb = fm; 00301 } else { 00302 stepa = stepb; 00303 fa = fb; 00304 stepb = stepm; 00305 fb = fm; 00306 } 00307 00308 } 00309 00310 } 00311 00312 return; 00313 } 00314 00315 #endif 00316 00317 public: 00318 00319 /// Stepsize for finite-differencing ( default \f$ 10^{-4} \f$ ) 00320 double deriv_h; 00321 00322 /// Maximum iterations for line minimization (default 10) 00323 int nmaxiter; 00324 00325 /// Default automatic \gradient object 00326 def_auto_grad_t def_grad; 00327 00328 gsl_mmin_base() { 00329 deriv_h=1.0e-4; 00330 nmaxiter=10; 00331 grad_given=false; 00332 agrad=&def_grad; 00333 } 00334 00335 /// Set the function 00336 int base_set(func_t &ufunc, auto_grad_t &u_def_grad) { 00337 func=&ufunc; 00338 agrad=&u_def_grad; 00339 grad_given=false; 00340 return 0; 00341 } 00342 00343 /// Set the function and the \gradient 00344 int base_set_de(func_t &ufunc, dfunc_t &udfunc) { 00345 func=&ufunc; 00346 grad=&udfunc; 00347 grad_given=true; 00348 return 0; 00349 } 00350 00351 /// Allocate memory 00352 int base_allocate(size_t nn) { 00353 dim=nn; 00354 return 0; 00355 } 00356 00357 /// Clear allocated memory 00358 int base_free() { 00359 dim=0; 00360 return 0; 00361 } 00362 00363 00364 }; 00365 00366 /** \brief Multidimensional minimization by the Fletcher-Reeves 00367 conjugate \gradient algorithm (GSL) 00368 00369 This class performs multidimensional minimization by the 00370 Fletcher-Reeves conjugate \gradient algorithm (GSL). The 00371 functions mmin() and mmin_de() \minimize a given function until 00372 the \gradient is smaller than the value of multi_min::tol_rel 00373 (which defaults to \f$ 10^{-4} \f$ ). 00374 00375 This class has a high-level interface using mmin() or mmin_de() 00376 which automatically performs the memory allocation and 00377 minimization, or a GSL-like interface using allocate(), free(), 00378 interate() and set() or set_simplex(). 00379 00380 See an example for the usage of this class 00381 in \ref ex_mmin_sect . 00382 00383 Default template arguments 00384 - \c func_t - \ref multi_funct< \ref ovector_base > 00385 - \c vec_t - \ref ovector_base 00386 - \c alloc_vec_t - \ref ovector 00387 - \c alloc_t - \ref ovector_alloc 00388 - \c dfunc_t - \ref grad_funct< \ref ovector_base > 00389 - \c auto_grad_t - \ref gradient<func_t, \ref ovector_base > 00390 - \c def_auto_grad_t - \ref simple_grad<func_t, \ref ovector_base > 00391 00392 Note that the state variable \c max_iter has not been included 00393 here, because it was not really used in the original GSL code 00394 for these minimizers. 00395 */ 00396 template<class func_t=multi_funct<>, 00397 class vec_t=ovector_base, class alloc_vec_t=ovector, 00398 class alloc_t=ovector_alloc, class dfunc_t=grad_funct<ovector_base>, 00399 class auto_grad_t=gradient<func_t,ovector_base>, 00400 class def_auto_grad_t=simple_grad<func_t,ovector_base> > 00401 class gsl_mmin_conf : 00402 public gsl_mmin_base<func_t,vec_t,alloc_vec_t,alloc_t,dfunc_t, 00403 auto_grad_t,def_auto_grad_t> { 00404 00405 #ifndef DOXYGEN_INTERNAL 00406 00407 protected: 00408 00409 /// \name The original variables from the GSL state structure 00410 //@{ 00411 /// Iteration number 00412 int iter; 00413 /// Stepsize 00414 double step; 00415 /// Tolerance 00416 double tol; 00417 /// Desc 00418 alloc_vec_t x1; 00419 /// Desc 00420 alloc_vec_t dx1; 00421 /// Desc 00422 alloc_vec_t x2; 00423 /// Desc 00424 double pnorm; 00425 /// Desc 00426 alloc_vec_t p; 00427 /// Desc 00428 double g0norm; 00429 /// Desc 00430 alloc_vec_t g0; 00431 //@} 00432 00433 /// \name Store the arguments to set() so we can use them for iterate() 00434 //@{ 00435 /// Proposed minimum 00436 alloc_vec_t ugx; 00437 /// Gradient 00438 alloc_vec_t ugg; 00439 /// Proposed step 00440 alloc_vec_t udx; 00441 /// Desc 00442 double it_min; 00443 //@} 00444 00445 #endif 00446 00447 public: 00448 00449 gsl_mmin_conf() { 00450 lmin_tol=1.0e-4; 00451 this->tol_rel=1.0e-4; 00452 step_size=0.01; 00453 } 00454 00455 virtual ~gsl_mmin_conf() {} 00456 00457 /// Tolerance for the line minimization (default \f$ 10^{-4} \f$) 00458 double lmin_tol; 00459 00460 /// Size of the initial step (default 0.01) 00461 double step_size; 00462 00463 /// \name GSL-like lower level interface 00464 //@{ 00465 /// Perform an iteration 00466 virtual int iterate() { 00467 00468 this->last_conv=0; 00469 00470 if (this->dim==0) { 00471 O2SCL_ERR2_RET("Memory not allocated in ", 00472 "gsl_mmin_conf::iterate().",gsl_efailed); 00473 } 00474 00475 alloc_vec_t &x=ugx; 00476 alloc_vec_t &gradient=ugg; 00477 alloc_vec_t &dx=udx; 00478 00479 double fa = it_min, fb, fc; 00480 double dir; 00481 double stepa = 0.0, stepb, stepc = step; 00482 00483 double g1norm; 00484 double pg; 00485 00486 if (pnorm == 0.0 || g0norm == 0.0) { 00487 for(size_t i=0;i<this->dim;i++) dx[i]=0.0; 00488 this->last_conv=gsl_enoprog; 00489 O2SCL_CONV2_RET("Either pnorm or g0norm vanished in ", 00490 "in gsl_mmin_conf::iterate().", 00491 gsl_enoprog,this->err_nonconv); 00492 } 00493 00494 /* [GSL] Determine which direction is downhill, +p or -p */ 00495 00496 pg=o2scl_cblas::ddot(this->dim,p,gradient); 00497 00498 dir = (pg >= 0.0) ? +1.0 : -1.0; 00499 00500 /* [GSL] Compute new trial point at x_c= x - step * p, where p is the 00501 current direction */ 00502 00503 this->take_step (x, p, stepc, dir / pnorm, x1, dx); 00504 00505 /* [GSL] Evaluate function and gradient at new point xc */ 00506 00507 fc=(*this->func)(this->dim,x1); 00508 00509 if (fc < fa) { 00510 00511 /* [GSL] Success, reduced the function value */ 00512 step = stepc * 2.0; 00513 it_min = fc; 00514 for(size_t i=0;i<this->dim;i++) { 00515 x[i]=x1[i]; 00516 } 00517 00518 if (this->grad_given) { 00519 (*this->grad)(this->dim,x1,gradient); 00520 } else { 00521 (*this->agrad)(this->dim,x1,gradient); 00522 } 00523 00524 return gsl_success; 00525 } 00526 00527 /* [GSL] Do a line minimization in the region (xa,fa) (xc,fc) to find an 00528 intermediate (xb,fb) satisifying fa > fb < fc. Choose an initial 00529 xb based on parabolic interpolation 00530 */ 00531 this->intermediate_point(x,p,dir / pnorm,pg,stepa,stepc,fa,fc,x1, 00532 dx1,gradient,&stepb,&fb); 00533 00534 if (stepb == 0.0) { 00535 this->last_conv=gsl_enoprog; 00536 O2SCL_CONV2_RET("Variable stepb vanished in ", 00537 "in gsl_mmin_conf::iterate().", 00538 gsl_enoprog,this->err_nonconv); 00539 } 00540 00541 this->minimize(x,p,dir / pnorm,stepa,stepb,stepc,fa,fb,fc,tol, 00542 x1,dx1,x2,dx,gradient,&step,&it_min,&g1norm); 00543 00544 for(size_t i=0;i<this->dim;i++) x[i]=x2[i]; 00545 00546 /* [GSL] Choose a new conjugate direction for the next step */ 00547 iter=(iter+1) % this->dim; 00548 00549 if (iter==0) { 00550 00551 for(size_t i=0;i<this->dim;i++) p[i]=gradient[i]; 00552 pnorm=g1norm; 00553 00554 } else { 00555 00556 /* [GSL] p' = g1 - beta * p */ 00557 double beta=-pow(g1norm/g0norm, 2.0); 00558 o2scl_cblas::dscal(-beta,this->dim,p); 00559 o2scl_cblas::daxpy(1.0,this->dim,gradient,p); 00560 pnorm=o2scl_cblas::dnrm2(this->dim,p); 00561 } 00562 00563 g0norm=g1norm; 00564 for(size_t i=0;i<this->dim;i++) { 00565 g0[i]=gradient[i]; 00566 } 00567 00568 return gsl_success; 00569 } 00570 00571 /** \brief Allocate the memory 00572 */ 00573 virtual int allocate(size_t n) { 00574 00575 this->ao.allocate(x1,n); 00576 this->ao.allocate(dx1,n); 00577 this->ao.allocate(x2,n); 00578 this->ao.allocate(p,n); 00579 this->ao.allocate(g0,n); 00580 this->ao.allocate(udx,n); 00581 this->ao.allocate(ugg,n); 00582 this->ao.allocate(ugx,n); 00583 00584 this->base_allocate(n); 00585 00586 return gsl_success; 00587 00588 } 00589 00590 /// Free the allocated memory 00591 virtual int free() { 00592 00593 this->ao.free(ugx); 00594 this->ao.free(ugg); 00595 this->ao.free(udx); 00596 this->ao.free(g0); 00597 this->ao.free(p); 00598 this->ao.free(x2); 00599 this->ao.free(dx1); 00600 this->ao.free(x1); 00601 00602 this->base_free(); 00603 00604 return 0; 00605 } 00606 00607 /// Reset the minimizer to use the current point as a new starting point 00608 int restart() { 00609 iter=0; 00610 return gsl_success; 00611 } 00612 00613 /// Set the function and initial guess 00614 virtual int set(vec_t &x, double u_step_size, double tol_u, 00615 func_t &ufunc) { 00616 00617 if (this->dim==0) { 00618 O2SCL_ERR2_RET("Memory not allocated in ", 00619 "gsl_mmin_conf::set().",gsl_efailed); 00620 } 00621 00622 this->base_set(ufunc,*this->agrad); 00623 for(size_t i=0;i<this->dim;i++) ugx[i]=x[i]; 00624 00625 iter=0; 00626 step=u_step_size; 00627 tol=tol_u; 00628 00629 /// Evaluate the function and its gradient 00630 it_min=ufunc(this->dim,x); 00631 this->agrad->set_function(ufunc); 00632 (*this->agrad)(this->dim,x,ugg); 00633 00634 /* [GSL] Use the gradient as the initial direction */ 00635 00636 for(size_t i=0;i<this->dim;i++) { 00637 p[i]=ugg[i]; 00638 g0[i]=ugg[i]; 00639 } 00640 00641 double gnorm=o2scl_cblas::dnrm2(this->dim,ugg); 00642 00643 pnorm=gnorm; 00644 g0norm=gnorm; 00645 00646 return gsl_success; 00647 } 00648 00649 /// Set the function and initial guess 00650 virtual int set_de(vec_t &x, double u_step_size, double tol_u, 00651 func_t &ufunc, dfunc_t &udfunc) { 00652 00653 if (this->dim==0) { 00654 O2SCL_ERR2_RET("Memory not allocated in ", 00655 "gsl_mmin_conf::set_de().",gsl_efailed); 00656 } 00657 00658 this->base_set_de(ufunc,udfunc); 00659 for(size_t i=0;i<this->dim;i++) ugx[i]=x[i]; 00660 00661 iter=0; 00662 step=u_step_size; 00663 tol=tol_u; 00664 00665 // Evaluate the function and its gradient 00666 it_min=ufunc(this->dim,x); 00667 udfunc(this->dim,x,ugg); 00668 00669 /* Use the gradient as the initial direction */ 00670 00671 for(size_t i=0;i<this->dim;i++) { 00672 p[i]=ugg[i]; 00673 g0[i]=ugg[i]; 00674 } 00675 00676 double gnorm=o2scl_cblas::dnrm2(this->dim,ugg); 00677 00678 pnorm=gnorm; 00679 g0norm=gnorm; 00680 00681 return gsl_success; 00682 } 00683 //@} 00684 00685 /// \name Basic usage 00686 //@{ 00687 /** \brief Calculate the minimum \c min of \c func w.r.t the 00688 array \c x of size \c nvar. 00689 */ 00690 virtual int mmin(size_t nn, vec_t &xx, double &fmin, 00691 func_t &ufunc) { 00692 00693 if (nn==0) { 00694 O2SCL_ERR2_RET("Tried to minimize over zero variables ", 00695 "in gsl_mmin_conf::mmin().",gsl_einval); 00696 } 00697 00698 int xiter=0, status; 00699 00700 allocate(nn); 00701 00702 set(xx,step_size,lmin_tol,ufunc); 00703 00704 do { 00705 00706 xiter++; 00707 00708 status=iterate(); 00709 00710 if (status) { 00711 break; 00712 } 00713 00714 // Equivalent to gsl_multimin_test_gradient with 00715 // additional code to print out present iteration 00716 00717 double norm=o2scl_cblas::dnrm2(nn,ugg); 00718 00719 if(this->verbose>0) { 00720 this->print_iter(nn,ugx,it_min,xiter, 00721 norm,this->tol_rel,type()); 00722 } 00723 00724 if (norm<this->tol_rel) status=gsl_success; 00725 else status=gsl_continue; 00726 00727 } while (status==gsl_continue && xiter < this->ntrial); 00728 00729 for(size_t i=0;i<nn;i++) xx[i]=ugx[i]; 00730 fmin=it_min; 00731 00732 free(); 00733 this->last_ntrial=xiter; 00734 00735 if (status==gsl_continue && xiter==this->ntrial) { 00736 this->last_conv=gsl_emaxiter; 00737 std::string err="Exceeded max number of iterations, "+ 00738 dtos(this->ntrial)+", in gsl_mmin_conf::mmin()."; 00739 O2SCL_CONV_RET(err.c_str(),gsl_emaxiter,this->err_nonconv); 00740 } 00741 00742 return status; 00743 } 00744 00745 /** \brief Calculate the minimum \c min of \c func w.r.t the 00746 array \c x of size \c nvar. 00747 */ 00748 virtual int mmin_de(size_t nn, vec_t &xx, double &fmin, 00749 func_t &ufunc, dfunc_t &udfunc) { 00750 00751 if (nn==0) { 00752 O2SCL_ERR2_RET("Tried to minimize over zero variables ", 00753 "in gsl_mmin_conf::mmin_de().",gsl_einval); 00754 } 00755 00756 int xiter=0, status; 00757 00758 allocate(nn); 00759 00760 set_de(xx,step_size,lmin_tol,ufunc,udfunc); 00761 00762 do { 00763 xiter++; 00764 00765 status=iterate(); 00766 00767 if (status) { 00768 break; 00769 } 00770 00771 // Equivalent to gsl_multimin_test_gradient with 00772 // additional code to print out present iteration 00773 00774 double norm=o2scl_cblas::dnrm2(nn,ugg); 00775 00776 if(this->verbose>0) { 00777 this->print_iter(nn,ugx,it_min,xiter, 00778 norm,this->tol_rel,type()); 00779 } 00780 00781 if (norm<this->tol_rel) status=gsl_success; 00782 else status=gsl_continue; 00783 00784 } 00785 while (status==gsl_continue && xiter < this->ntrial); 00786 00787 for(size_t i=0;i<nn;i++) xx[i]=ugx[i]; 00788 fmin=it_min; 00789 00790 free(); 00791 this->last_ntrial=xiter; 00792 00793 if (status==gsl_continue && xiter==this->ntrial) { 00794 this->last_conv=gsl_emaxiter; 00795 std::string err="Exceeded max number of iterations, "+ 00796 dtos(this->ntrial)+", in gsl_mmin_conf::mmin()."; 00797 O2SCL_CONV_RET(err.c_str(),gsl_emaxiter,this->err_nonconv); 00798 } 00799 00800 return status; 00801 } 00802 //@} 00803 00804 /// Return string denoting type("gsl_mmin_conf") 00805 virtual const char *type() { return "gsl_mmin_conf";} 00806 00807 }; 00808 00809 #ifndef DOXYGENP 00810 } 00811 #endif 00812 00813 #endif
Documentation generated with Doxygen. Provided under the GNU Free Documentation License (see License Information).