00001 /* 00002 ------------------------------------------------------------------- 00003 00004 Copyright (C) 2006, 2007, 2008, 2009, Andrew W. Steiner 00005 00006 This file is part of O2scl. 00007 00008 O2scl is free software; you can redistribute it and/or modify 00009 it under the terms of the GNU General Public License as published by 00010 the Free Software Foundation; either version 3 of the License, or 00011 (at your option) any later version. 00012 00013 O2scl is distributed in the hope that it will be useful, 00014 but WITHOUT ANY WARRANTY; without even the implied warranty of 00015 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00016 GNU General Public License for more details. 00017 00018 You should have received a copy of the GNU General Public License 00019 along with O2scl. If not, see <http://www.gnu.org/licenses/>. 00020 00021 ------------------------------------------------------------------- 00022 */ 00023 #ifndef O2SCL_GSL_MMIN_CONF_H 00024 #define O2SCL_GSL_MMIN_CONF_H 00025 00026 #include <gsl/gsl_blas.h> 00027 #include <gsl/gsl_multimin.h> 00028 #include <o2scl/multi_min.h> 00029 00030 #ifndef DOXYGENP 00031 namespace o2scl { 00032 #endif 00033 00034 /** 00035 \brief Base minimization routines for gsl_mmin_conf and gsl_mmin_conp 00036 00037 This class is used by the gsl_mmin_conf and gsl_mmin_conp 00038 minimizers to perform the line minimization along a 00039 specified direction. It is not intended for a casual end-user. 00040 00041 Default template arguments 00042 - \c param_t - no default 00043 - \c func_t - \ref multi_funct<param_t> 00044 - \c vec_t - \ref ovector_base 00045 - \c alloc_vec_t - \ref ovector 00046 - \c alloc_t - \ref ovector_alloc 00047 - \c dfunc_t - \ref grad_funct<param_t,ovector_base> 00048 - \c auto_grad_t - \ref gradient<param_t,func_t,ovector_base> 00049 - \c def_auto_grad_t - \ref simple_grad<param_t,func_t,ovector_base> 00050 00051 \comment 00052 Doxygen has had trouble parsing this template, so I have 00053 simplified it slightly for the documentation below. 00054 \endcomment 00055 */ 00056 #ifdef DOXYGENP 00057 template<class param_t, class func_t = multi_funct<param_t>, 00058 class vec_t = ovector_base, class alloc_vec_t = ovector, 00059 class alloc_t = ovector_alloc, 00060 class dfunc_t = grad_funct<param_t,ovector_base>, 00061 class auto_grad_t = gradient<param_t,func_t,ovector_base>, 00062 class def_auto_grad_t = simple_grad<param_t,func_t,ovector_base> > 00063 class gsl_mmin_base : public multi_min 00064 #else 00065 template<class param_t, class func_t = multi_funct<param_t>, 00066 class vec_t = ovector_base, class alloc_vec_t = ovector, 00067 class alloc_t = ovector_alloc, 00068 class dfunc_t = grad_funct<param_t,ovector_base>, 00069 class auto_grad_t = gradient<param_t,func_t,ovector_base>, 00070 class def_auto_grad_t = simple_grad<param_t,func_t,ovector_base> > 00071 class gsl_mmin_base : public multi_min<param_t,func_t,func_t,vec_t> 00072 #endif 00073 { 00074 00075 #ifndef DOXYGEN_INTERNAL 00076 00077 protected: 00078 00079 /// User-specified function 00080 func_t *func; 00081 00082 /// User-specified gradient 00083 dfunc_t *grad; 00084 00085 /// Automatic gradient object 00086 auto_grad_t *agrad; 00087 00088 /// If true, a gradient has been specified 00089 bool grad_given; 00090 00091 /// User-specified parameter 00092 param_t *params; 00093 /// Memory size 00094 size_t dim; 00095 /// Memory allocation 00096 alloc_t ao; 00097 /// Temporary vector 00098 alloc_vec_t avt; 00099 /// Temporary vector 00100 alloc_vec_t avt2; 00101 00102 /// Take a step 00103 void take_step(const gsl_vector *x, const gsl_vector *px, 00104 double stepx, double lambda, gsl_vector *x1x, 00105 gsl_vector *dx) { 00106 00107 gsl_vector_set_zero(dx); 00108 gsl_blas_daxpy(-stepx*lambda,px,dx); 00109 00110 gsl_vector_memcpy(x1x,x); 00111 gsl_blas_daxpy(1.0,dx,x1x); 00112 00113 return; 00114 } 00115 00116 /** 00117 \brief Line minimization 00118 00119 Do a line minimisation in the region (xa,fa) (xc,fc) to find 00120 an intermediate (xb,fb) satisifying fa > fb < fc. Choose an 00121 initial xb based on parabolic interpolation 00122 */ 00123 void intermediate_point(const gsl_vector *x, const gsl_vector *px, 00124 double lambda, double pg, double stepa, 00125 double stepc, double fa, double fc, 00126 gsl_vector *x1x, gsl_vector *dx, 00127 gsl_vector *gradient, double *stepx, double *f) { 00128 double stepb, fb; 00129 00130 bool trial_failed; 00131 00132 // This looks like an infinite loop, but will eventually 00133 // stop because stepb will eventually become zero. This 00134 // error condition is handled later in iterate(). 00135 do { 00136 00137 double u = fabs (pg * lambda * stepc); 00138 stepb = 0.5 * stepc * u / ((fc - fa) + u); 00139 00140 take_step (x, px, stepb, lambda, x1x, dx); 00141 00142 // Evaluate the function 00143 for(size_t i=0;i<dim;i++) avt[i]=gsl_vector_get(x1x,i); 00144 (*func)(dim,avt,fb,*params); 00145 00146 trial_failed=false; 00147 if (fb >= fa && stepb > 0.0) { 00148 /* downhill step failed, reduce step-size and try again */ 00149 fc = fb; 00150 stepc = stepb; 00151 trial_failed=true; 00152 } 00153 00154 } while (trial_failed); 00155 00156 *stepx = stepb; 00157 *f = fb; 00158 00159 // Evaluate the gradient 00160 if (grad_given) { 00161 (*grad)(dim,avt,avt2,*params); 00162 } else { 00163 (*agrad)(dim,avt,avt2,*params); 00164 } 00165 for(size_t i=0;i<dim;i++) gsl_vector_set(gradient,i,avt2[i]); 00166 00167 return; 00168 } 00169 00170 /** 00171 \brief Perform the minimization 00172 00173 Starting at (x0, f0) move along the direction p to find a 00174 minimum f(x0 - lambda * p), returning the new point x1 = 00175 x0-lambda*p, f1=f(x1) and g1 = grad(f) at x1. 00176 00177 \future Remove goto statements 00178 */ 00179 void minimize(const gsl_vector *x, const gsl_vector *xp, double lambda, 00180 double stepa, double stepb, double stepc, double fa, 00181 double fb, double fc, double xtol, gsl_vector *x1x, 00182 gsl_vector *dx1x, gsl_vector *x2x, gsl_vector *dx2x, 00183 gsl_vector *gradient, double *xstep, double *f, 00184 double *gnorm_u) { 00185 00186 double u = stepb; 00187 double v = stepa; 00188 double w = stepc; 00189 double fu = fb; 00190 double fv = fa; 00191 double fw = fc; 00192 00193 double old2 = fabs(w - v); 00194 double old1 = fabs(v - u); 00195 00196 double stepm, fm, pg, gnorm1; 00197 00198 int xiter = 0; 00199 00200 gsl_vector_memcpy (x2x, x1x); 00201 gsl_vector_memcpy (dx2x, dx1x); 00202 00203 *f = fb; 00204 *xstep = stepb; 00205 *gnorm_u = gsl_blas_dnrm2(gradient); 00206 00207 mid_trial: 00208 00209 xiter++; 00210 00211 if (xiter > nmaxiter) { 00212 // Exceeded maximum number of iterations 00213 return; 00214 } 00215 00216 { 00217 double dw = w - u; 00218 double dv = v - u; 00219 double du = 0.0; 00220 double e1 = ((fv - fu) * dw * dw + (fu - fw) * dv * dv); 00221 double e2 = 2.0 * ((fv - fu) * dw + (fu - fw) * dv); 00222 00223 if (e2 != 0.0) { 00224 du = e1 / e2; 00225 } 00226 00227 if (du > 0.0 && du < (stepc - stepb) && fabs(du) < 0.5 * old2) { 00228 stepm = u + du; 00229 } else if (du < 0.0 && du > (stepa - stepb) && 00230 fabs(du) < 0.5 * old2) { 00231 stepm = u + du; 00232 } else if ((stepc - stepb) > (stepb - stepa)) { 00233 stepm = 0.38 * (stepc - stepb) + stepb; 00234 } else { 00235 stepm = stepb - 0.38 * (stepb - stepa); 00236 } 00237 } 00238 00239 take_step (x, xp, stepm, lambda, x1x, dx1x); 00240 00241 for(size_t i=0;i<dim;i++) avt[i]=gsl_vector_get(x1x,i); 00242 (*func)(dim,avt,fm,*params); 00243 00244 if (fm > fb) { 00245 if (fm < fv) { 00246 w = v; 00247 v = stepm; 00248 fw = fv; 00249 fv = fm; 00250 } else if (fm < fw) { 00251 w = stepm; 00252 fw = fm; 00253 } 00254 if (stepm < stepb) { 00255 stepa = stepm; 00256 fa = fm; 00257 } else { 00258 stepc = stepm; 00259 fc = fm; 00260 } 00261 goto mid_trial; 00262 00263 } else if (fm <= fb) { 00264 00265 old2 = old1; 00266 old1 = fabs(u - stepm); 00267 w = v; 00268 v = u; 00269 u = stepm; 00270 fw = fv; 00271 fv = fu; 00272 fu = fm; 00273 00274 gsl_vector_memcpy (x2x, x1x); 00275 gsl_vector_memcpy (dx2x, dx1x); 00276 00277 for(size_t i=0;i<dim;i++) avt[i]=gsl_vector_get(x1x,i); 00278 if (grad_given) { 00279 (*grad)(dim,avt,avt2,*params); 00280 } else { 00281 (*agrad)(dim,avt,avt2,*params); 00282 } 00283 for(size_t i=0;i<dim;i++) gsl_vector_set(gradient,i,avt2[i]); 00284 00285 gsl_blas_ddot (xp, gradient, &pg); 00286 gnorm1 = gsl_blas_dnrm2 (gradient); 00287 00288 *f = fm; 00289 *xstep = stepm; 00290 *gnorm_u = gnorm1; 00291 00292 if (fabs (pg * lambda / gnorm1) < xtol) { 00293 /* SUCCESS */ 00294 return; 00295 } 00296 00297 if (stepm < stepb) { 00298 stepc = stepb; 00299 fc = fb; 00300 stepb = stepm; 00301 fb = fm; 00302 } else { 00303 stepa = stepb; 00304 fa = fb; 00305 stepb = stepm; 00306 fb = fm; 00307 } 00308 goto mid_trial; 00309 } 00310 00311 return; 00312 } 00313 00314 #endif 00315 00316 public: 00317 00318 /// Stepsize for finite-differencing ( default \f$ 10^{-4} \f$ ) 00319 double deriv_h; 00320 00321 /// Maximum iterations for line minimization (default 10) 00322 int nmaxiter; 00323 00324 /// Default automatic \gradient object 00325 def_auto_grad_t def_grad; 00326 00327 gsl_mmin_base() { 00328 deriv_h=1.0e-4; 00329 nmaxiter=10; 00330 grad_given=false; 00331 agrad=&def_grad; 00332 } 00333 00334 /// Set the function 00335 int base_set(func_t &ufunc, param_t &pa, auto_grad_t &u_def_grad) { 00336 func=&ufunc; 00337 params=&pa; 00338 agrad=&u_def_grad; 00339 grad_given=false; 00340 return 0; 00341 } 00342 00343 /// Set the function and the \gradient 00344 int base_set_de(func_t &ufunc, dfunc_t &udfunc, param_t &pa) { 00345 func=&ufunc; 00346 params=&pa; 00347 grad=&udfunc; 00348 grad_given=true; 00349 return 0; 00350 } 00351 00352 /// Allocate memory 00353 int base_allocate(size_t nn) { 00354 ao.allocate(avt,nn); 00355 ao.allocate(avt2,nn); 00356 dim=nn; 00357 return 0; 00358 } 00359 00360 /// Clear allocated memory 00361 int base_free() { 00362 ao.free(avt); 00363 ao.free(avt2); 00364 dim=0; 00365 return 0; 00366 } 00367 00368 00369 }; 00370 00371 /** \brief Multidimensional minimization by the Fletcher-Reeves 00372 conjugate \gradient algorithm (GSL) 00373 00374 The functions mmin() and mmin_de() \minimize a given function 00375 until the \gradient is smaller than the value of multi_min::tolf 00376 (which defaults to \f$ 10^{-4} \f$ ). 00377 00378 See an example for the usage of this class 00379 in \ref ex_mmin_sect . 00380 00381 \future A bit of needless copying is required in the function 00382 wrapper to convert from \c gsl_vector to the templated vector 00383 type. This can be fixed, probably by rewriting take_step 00384 to produce a vec_t &x1 rather than a gsl_vector *x1; 00385 00386 Note that the state variable \c max_iter has not been included 00387 here, because it was not really used in the original GSL code 00388 for these minimizers. 00389 00390 Default template arguments 00391 - \c param_t - no default 00392 - \c func_t - \ref multi_funct<param_t> 00393 - \c vec_t - \ref ovector_base 00394 - \c alloc_vec_t - \ref ovector 00395 - \c alloc_t - \ref ovector_alloc 00396 - \c dfunc_t - \ref grad_funct<param_t,ovector_base> 00397 - \c auto_grad_t - \ref gradient<param_t,func_t,ovector_base> 00398 - \c def_auto_grad_t - \ref simple_grad<param_t,func_t,ovector_base> 00399 00400 \comment 00401 \bug The examples in ex_mmin.cpp fail because they always 00402 iterate until ntrial is maxxed. (1/18/09 - They don't fail 00403 right now, and I think this was either fixed by properly ensuring 00404 that the derivative evaluation was sufficiently accurate, or 00405 by adding the it_info variable and ensuring iterate() doesn't 00406 unreasonably return a non-zero value. ) 00407 \endcomment 00408 */ 00409 template<class param_t, class func_t=multi_funct<param_t>, 00410 class vec_t=ovector_base, class alloc_vec_t=ovector, 00411 class alloc_t=ovector_alloc, 00412 class dfunc_t=grad_funct<param_t,ovector_base>, 00413 class auto_grad_t=gradient<param_t,func_t,ovector_base>, 00414 class def_auto_grad_t=simple_grad<param_t,func_t,ovector_base> > 00415 class gsl_mmin_conf : 00416 public gsl_mmin_base<param_t,func_t,vec_t,alloc_vec_t,alloc_t,dfunc_t, 00417 auto_grad_t,def_auto_grad_t> { 00418 00419 #ifndef DOXYGEN_INTERNAL 00420 00421 protected: 00422 00423 /// \name The original variables from the GSL state structure 00424 //@{ 00425 /// Iteration number 00426 int iter; 00427 /// Stepsize 00428 double step; 00429 /// Tolerance 00430 double tol; 00431 /// Desc 00432 gsl_vector *x1; 00433 /// Desc 00434 gsl_vector *dx1; 00435 /// Desc 00436 gsl_vector *x2; 00437 /// Desc 00438 double pnorm; 00439 /// Desc 00440 gsl_vector *p; 00441 /// Desc 00442 double g0norm; 00443 /// Desc 00444 gsl_vector *g0; 00445 //@} 00446 00447 /// \name Store the arguments to set() so we can use them for iterate() 00448 //@{ 00449 /// Proposed minimum 00450 gsl_vector *ugx; 00451 /// Gradient 00452 gsl_vector *ugg; 00453 /// Proposed step 00454 gsl_vector *udx; 00455 /// Desc 00456 double it_min; 00457 //@} 00458 00459 /// Temporary vector 00460 alloc_vec_t avt5; 00461 /// Temporary vector 00462 alloc_vec_t avt6; 00463 /// Temporary vector 00464 alloc_vec_t avt7; 00465 /// Temporary vector 00466 alloc_vec_t avt8; 00467 00468 #endif 00469 00470 public: 00471 00472 gsl_mmin_conf() { 00473 lmin_tol=1.0e-4; 00474 this->tolf=1.0e-4; 00475 step_size=0.01; 00476 } 00477 00478 virtual ~gsl_mmin_conf() {} 00479 00480 /// Tolerance for the line minimization (default \f$ 10^{-4} \f$) 00481 double lmin_tol; 00482 00483 /// Size of the initial step (default 0.01) 00484 double step_size; 00485 00486 /// Perform an iteration 00487 virtual int iterate() { 00488 00489 this->last_conv=0; 00490 00491 if (this->dim==0) { 00492 O2SCL_ERR2_RET("Memory not allocated in ", 00493 "gsl_mmin_conf::iterate().",gsl_efailed); 00494 } 00495 00496 gsl_vector *x=ugx; 00497 gsl_vector *gradient=ugg; 00498 gsl_vector *dx=udx; 00499 00500 double fa = it_min, fb, fc; 00501 double dir; 00502 double stepa = 0.0, stepb, stepc = step; 00503 00504 double g1norm; 00505 double pg; 00506 00507 if (pnorm == 0.0 || g0norm == 0.0) { 00508 gsl_vector_set_zero(dx); 00509 this->last_conv=gsl_enoprog; 00510 O2SCL_CONV2_RET("Either pnorm or g0norm vanished in ", 00511 "in gsl_mmin_conf::iterate().", 00512 gsl_enoprog,this->err_nonconv); 00513 } 00514 00515 /* Determine which direction is downhill, +p or -p */ 00516 00517 gsl_blas_ddot(p, gradient, &pg); 00518 00519 dir = (pg >= 0.0) ? +1.0 : -1.0; 00520 00521 /* Compute new trial point at x_c= x - step * p, where p is the 00522 current direction */ 00523 00524 this->take_step (x, p, stepc, dir / pnorm, x1, dx); 00525 00526 /* Evaluate function and gradient at new point xc */ 00527 00528 for(size_t i=0;i<this->dim;i++) avt5[i]=gsl_vector_get(x1,i); 00529 (*this->func)(this->dim,avt5,fc,*this->params); 00530 00531 if (fc < fa) { 00532 00533 /* Success, reduced the function value */ 00534 step = stepc * 2.0; 00535 it_min = fc; 00536 gsl_vector_memcpy(x,x1); 00537 00538 for(size_t i=0;i<this->dim;i++) avt6[i]=gsl_vector_get(x1,i); 00539 if (this->grad_given) { 00540 (*this->grad)(this->dim,avt6,avt7,*this->params); 00541 } else { 00542 (*this->agrad)(this->dim,avt6,avt7,*this->params); 00543 } 00544 for(size_t i=0;i<this->dim;i++) gsl_vector_set(gradient,i,avt7[i]); 00545 00546 return gsl_success; 00547 } 00548 00549 /* Do a line minimization in the region (xa,fa) (xc,fc) to find an 00550 intermediate (xb,fb) satisifying fa > fb < fc. Choose an initial 00551 xb based on parabolic interpolation 00552 */ 00553 this->intermediate_point(x,p,dir / pnorm,pg,stepa,stepc,fa,fc,x1, 00554 dx1,gradient,&stepb,&fb); 00555 00556 if (stepb == 0.0) { 00557 this->last_conv=gsl_enoprog; 00558 O2SCL_CONV2_RET("Variable stepb vanished in ", 00559 "in gsl_mmin_conf::iterate().", 00560 gsl_enoprog,this->err_nonconv); 00561 } 00562 00563 this->minimize(x,p,dir / pnorm,stepa,stepb,stepc,fa,fb,fc,tol, 00564 x1,dx1,x2,dx,gradient,&step,&it_min,&g1norm); 00565 00566 gsl_vector_memcpy (x, x2); 00567 00568 /* Choose a new conjugate direction for the next step */ 00569 iter=(iter+1) % x->size; 00570 00571 if (iter==0) { 00572 00573 gsl_vector_memcpy (p, gradient); 00574 pnorm=g1norm; 00575 00576 } else { 00577 00578 /* p' = g1 - beta * p */ 00579 double beta = -pow (g1norm / g0norm, 2.0); 00580 gsl_blas_dscal (-beta, p); 00581 gsl_blas_daxpy (1.0, gradient, p); 00582 pnorm=gsl_blas_dnrm2 (p); 00583 } 00584 00585 g0norm=g1norm; 00586 gsl_vector_memcpy (g0, gradient); 00587 00588 return gsl_success; 00589 } 00590 00591 /// Return string denoting type("gsl_mmin_conf") 00592 virtual const char *type() { return "gsl_mmin_conf";} 00593 00594 /** 00595 \brief Allocate the memory 00596 00597 \future Use a gsl_alloc_arrays() like function for this (but 00598 keep in mind these are calloc, not malloc statements 00599 */ 00600 virtual int allocate(size_t n) { 00601 00602 x1=gsl_vector_calloc(n); 00603 if (x1==0) { 00604 O2SCL_ERR_RET("Failed to allocate x1 in gsl_mmin_conf::allocate().", 00605 gsl_enomem); 00606 } 00607 00608 dx1=gsl_vector_calloc(n); 00609 if (dx1==0) { 00610 gsl_vector_free(x1); 00611 O2SCL_ERR_RET("Failed to allocate dx1 in gsl_mmin_conf::allocate().", 00612 gsl_enomem); 00613 } 00614 00615 x2=gsl_vector_calloc(n); 00616 if (x2==0) { 00617 gsl_vector_free(dx1); 00618 gsl_vector_free(x1); 00619 O2SCL_ERR_RET("Failed to allocate x2 in gsl_mmin_conf::allocate().", 00620 gsl_enomem); 00621 } 00622 00623 p=gsl_vector_calloc(n); 00624 if (p==0) { 00625 gsl_vector_free(x2); 00626 gsl_vector_free(dx1); 00627 gsl_vector_free(x1); 00628 O2SCL_ERR_RET("Failed to allocate p in gsl_mmin_conf::allocate().", 00629 gsl_enomem); 00630 } 00631 00632 g0=gsl_vector_calloc(n); 00633 if (g0==0) { 00634 gsl_vector_free(p); 00635 gsl_vector_free(x2); 00636 gsl_vector_free(dx1); 00637 gsl_vector_free(x1); 00638 O2SCL_ERR_RET("Failed to allocate g0 in gsl_mmin_conf::allocate().", 00639 gsl_enomem); 00640 } 00641 00642 udx=gsl_vector_calloc(n); 00643 if (udx==0) { 00644 gsl_vector_free(g0); 00645 gsl_vector_free(p); 00646 gsl_vector_free(x2); 00647 gsl_vector_free(dx1); 00648 gsl_vector_free(x1); 00649 O2SCL_ERR_RET("Failed to allocate udx in gsl_mmin_conf::allocate().", 00650 gsl_enomem); 00651 } 00652 00653 ugg=gsl_vector_calloc(n); 00654 if (ugg==0) { 00655 gsl_vector_free(udx); 00656 gsl_vector_free(g0); 00657 gsl_vector_free(p); 00658 gsl_vector_free(x2); 00659 gsl_vector_free(dx1); 00660 gsl_vector_free(x1); 00661 O2SCL_ERR_RET("Failed to allocate ugg in gsl_mmin_conf::allocate().", 00662 gsl_enomem); 00663 } 00664 00665 ugx=gsl_vector_calloc(n); 00666 if (ugx==0) { 00667 gsl_vector_free(ugg); 00668 gsl_vector_free(udx); 00669 gsl_vector_free(g0); 00670 gsl_vector_free(p); 00671 gsl_vector_free(x2); 00672 gsl_vector_free(dx1); 00673 gsl_vector_free(x1); 00674 O2SCL_ERR_RET("Failed to allocate ugx in gsl_mmin_conf::allocate().", 00675 gsl_enomem); 00676 } 00677 00678 this->ao.allocate(avt5,n); 00679 this->ao.allocate(avt6,n); 00680 this->ao.allocate(avt7,n); 00681 this->ao.allocate(avt8,n); 00682 00683 this->base_allocate(n); 00684 00685 return gsl_success; 00686 00687 } 00688 00689 /// Free the allocated memory 00690 virtual int free() { 00691 gsl_vector_free(ugx); 00692 gsl_vector_free(ugg); 00693 gsl_vector_free(udx); 00694 gsl_vector_free(g0); 00695 gsl_vector_free(p); 00696 gsl_vector_free(x2); 00697 gsl_vector_free(dx1); 00698 gsl_vector_free(x1); 00699 00700 this->ao.free(avt5); 00701 this->ao.free(avt6); 00702 this->ao.free(avt7); 00703 this->ao.free(avt8); 00704 00705 this->base_free(); 00706 00707 return 0; 00708 } 00709 00710 /// Reset the minimizer to use the current point as a new starting point 00711 int restart() { 00712 iter=0; 00713 return gsl_success; 00714 } 00715 00716 /// Set the function and initial guess 00717 virtual int set(vec_t &x, double u_step_size, double tol_u, 00718 func_t &ufunc, param_t &pa) { 00719 00720 if (this->dim==0) { 00721 O2SCL_ERR_RET("Memory not allocated in set().",gsl_efailed); 00722 } 00723 00724 this->base_set(ufunc,pa,*this->agrad); 00725 for(size_t i=0;i<this->dim;i++) gsl_vector_set(ugx,i,x[i]); 00726 00727 iter=0; 00728 step=u_step_size; 00729 tol=tol_u; 00730 00731 /// Evaluate the function and its gradient 00732 ufunc(this->dim,x,it_min,pa); 00733 this->agrad->set_function(ufunc); 00734 (*this->agrad)(this->dim,x,avt8,*this->params); 00735 for(size_t i=0;i<this->dim;i++) { 00736 gsl_vector_set(ugg,i,avt8[i]); 00737 } 00738 00739 /* Use the gradient as the initial direction */ 00740 00741 gsl_vector_memcpy(p,ugg); 00742 gsl_vector_memcpy(g0,ugg); 00743 00744 double gnorm=gsl_blas_dnrm2(ugg); 00745 00746 pnorm=gnorm; 00747 g0norm=gnorm; 00748 00749 return gsl_success; 00750 } 00751 00752 /// Set the function and initial guess 00753 virtual int set_de(vec_t &x, double u_step_size, double tol_u, 00754 func_t &ufunc, dfunc_t &udfunc, param_t &pa) { 00755 00756 if (this->dim==0) { 00757 O2SCL_ERR_RET("Memory not allocated in set().",gsl_efailed); 00758 } 00759 00760 this->base_set_de(ufunc,udfunc,pa); 00761 for(size_t i=0;i<this->dim;i++) gsl_vector_set(ugx,i,x[i]); 00762 00763 iter=0; 00764 step=u_step_size; 00765 tol=tol_u; 00766 00767 // Evaluate the function and its gradient 00768 ufunc(this->dim,x,it_min,pa); 00769 udfunc(this->dim,x,avt8,*this->params); 00770 for(size_t i=0;i<this->dim;i++) { 00771 gsl_vector_set(ugg,i,avt8[i]); 00772 } 00773 00774 /* Use the gradient as the initial direction */ 00775 00776 gsl_vector_memcpy(p,ugg); 00777 gsl_vector_memcpy(g0,ugg); 00778 00779 double gnorm=gsl_blas_dnrm2(ugg); 00780 00781 pnorm=gnorm; 00782 g0norm=gnorm; 00783 00784 return gsl_success; 00785 } 00786 00787 /** \brief Calculate the minimum \c min of \c func w.r.t the 00788 array \c x of size \c nvar. 00789 */ 00790 virtual int mmin(size_t nn, vec_t &xx, double &fmin, param_t &pa, 00791 func_t &ufunc) { 00792 00793 if (nn==0) { 00794 O2SCL_ERR2_RET("Tried to minimize over zero variables ", 00795 " in gsl_mmin_conf::mmin().",gsl_einval); 00796 } 00797 00798 int xiter=0, status; 00799 00800 allocate(nn); 00801 00802 set(xx,step_size,lmin_tol,ufunc,pa); 00803 00804 do { 00805 00806 xiter++; 00807 00808 status=iterate(); 00809 00810 if (status) { 00811 break; 00812 } 00813 00814 // Equivalent to gsl_multimin_test_gradient with 00815 // additional code to print out present iteration 00816 00817 double norm=gsl_blas_dnrm2(ugg); 00818 00819 if(this->verbose>0) { 00820 this->print_iter(nn,*((ovector *)ugx),it_min,xiter, 00821 norm,this->tolf,type()); 00822 } 00823 00824 if (norm<this->tolf) status=gsl_success; 00825 else status=gsl_continue; 00826 00827 } while (status==gsl_continue && xiter < this->ntrial); 00828 00829 for(size_t i=0;i<nn;i++) xx[i]=gsl_vector_get(ugx,i); 00830 fmin=it_min; 00831 00832 free(); 00833 this->last_ntrial=xiter; 00834 00835 if (status==gsl_continue && xiter==this->ntrial) { 00836 this->last_conv=gsl_emaxiter; 00837 O2SCL_CONV_RET("Too many iterations in gsl_mmin_conf::mmin().", 00838 gsl_emaxiter,this->err_nonconv); 00839 } 00840 00841 return status; 00842 } 00843 00844 /** \brief Calculate the minimum \c min of \c func w.r.t the 00845 array \c x of size \c nvar. 00846 */ 00847 virtual int mmin_de(size_t nn, vec_t &xx, double &fmin, param_t &pa, 00848 func_t &ufunc, dfunc_t &udfunc) { 00849 00850 if (nn==0) { 00851 O2SCL_ERR2_RET("Tried to minimize over zero variables ", 00852 " in gsl_mmin_conf::mmin_de().",gsl_einval); 00853 } 00854 00855 int xiter=0, status; 00856 00857 allocate(nn); 00858 00859 set_de(xx,step_size,lmin_tol,ufunc,udfunc,pa); 00860 00861 do { 00862 xiter++; 00863 00864 status=iterate(); 00865 00866 if (status) { 00867 break; 00868 } 00869 00870 // Equivalent to gsl_multimin_test_gradient with 00871 // additional code to print out present iteration 00872 00873 double norm=gsl_blas_dnrm2(ugg); 00874 00875 if(this->verbose>0) { 00876 this->print_iter(nn,*((ovector *)ugx),it_min,xiter, 00877 norm,this->tolf,type()); 00878 } 00879 00880 if (norm<this->tolf) status=gsl_success; 00881 else status=gsl_continue; 00882 00883 } 00884 while (status==gsl_continue && xiter < this->ntrial); 00885 00886 for(size_t i=0;i<nn;i++) xx[i]=gsl_vector_get(ugx,i); 00887 fmin=it_min; 00888 00889 free(); 00890 this->last_ntrial=xiter; 00891 00892 if (status==gsl_continue && xiter==this->ntrial) { 00893 this->last_conv=gsl_emaxiter; 00894 O2SCL_CONV_RET("Too many iterations in gsl_mmin_conf::mmin().", 00895 gsl_emaxiter,this->err_nonconv); 00896 } 00897 00898 return status; 00899 } 00900 00901 }; 00902 00903 /** 00904 \brief An array version of gsl_mmin_conf 00905 00906 Default template arguments 00907 - \c param_t - no default 00908 - \c func_t - \ref multi_vfunct<param_t> 00909 - \c vec_t - <tt>double [nv]</tt> 00910 - \c alloc_vec_t - <tt>double [nv]</tt> 00911 - \c alloc_t - \ref array_alloc<double[nv]> 00912 - \c dfunc_t - \ref grad_vfunct<param_t,nv> 00913 - \c auto_grad_t - \ref gradient_array 00914 <param_t,multi_vfunct<param_t,nv>,nv> 00915 - \c def_auto_grad_t - \ref simple_grad 00916 <param_t,multi_vfunct<param_t,nv>,nv> 00917 00918 \comment 00919 Doxygen has had trouble parsing this template, so I have 00920 simplified it slightly for the documentation below. 00921 \endcomment 00922 */ 00923 #ifdef DOXYGENP 00924 template<class param_t, size_t nv> class gsl_mmin_conf_array : 00925 public gsl_mmin_conf<param_t,func_t,vec_t,alloc_vec_t, 00926 alloc_t,dfunc_t,auto_grad_t,def_auto_grad_t> { 00927 }; 00928 #else 00929 template<class param_t, size_t nv> class gsl_mmin_conf_array : 00930 public gsl_mmin_conf<param_t,multi_vfunct<param_t,nv>, 00931 double[nv],double[nv],array_alloc<double[nv]>,grad_vfunct<param_t,nv>, 00932 gradient_array<param_t,multi_vfunct<param_t,nv>,nv>, 00933 simple_grad_array<param_t,multi_vfunct<param_t,nv>,nv> > { 00934 }; 00935 #endif 00936 00937 00938 #ifndef DOXYGENP 00939 } 00940 #endif 00941 00942 #endif
Documentation generated with Doxygen and provided under the GNU Free Documentation License. See License Information for details.
Project hosting provided by
,
O2scl Sourceforge Project Page