00001 /* 00002 ------------------------------------------------------------------- 00003 00004 Copyright (C) 2006, 2007, 2008, Andrew W. Steiner 00005 00006 This file is part of O2scl. 00007 00008 O2scl is free software; you can redistribute it and/or modify 00009 it under the terms of the GNU General Public License as published by 00010 the Free Software Foundation; either version 3 of the License, or 00011 (at your option) any later version. 00012 00013 O2scl is distributed in the hope that it will be useful, 00014 but WITHOUT ANY WARRANTY; without even the implied warranty of 00015 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00016 GNU General Public License for more details. 00017 00018 You should have received a copy of the GNU General Public License 00019 along with O2scl. If not, see <http://www.gnu.org/licenses/>. 00020 00021 ------------------------------------------------------------------- 00022 */ 00023 #ifndef O2SCL_GSL_MMIN_CONF_H 00024 #define O2SCL_GSL_MMIN_CONF_H 00025 00026 #include <gsl/gsl_blas.h> 00027 #include <gsl/gsl_multimin.h> 00028 #include <o2scl/multi_min.h> 00029 00030 #ifndef DOXYGENP 00031 namespace o2scl { 00032 #endif 00033 00034 /** \brief Base minimization routines for gsl_mmin_conf and gsl_mmin_conp 00035 */ 00036 template<class param_t, class func_t=multi_funct<param_t>, 00037 class vec_t=ovector_view, class alloc_vec_t=ovector, 00038 class alloc_t=ovector_alloc, 00039 class dfunc_t=grad_funct<param_t,ovector_view>, 00040 class auto_grad_t=gradient<param_t,func_t,ovector_view>, 00041 class def_auto_grad_t=simple_grad<param_t,func_t,ovector_view> > 00042 class gsl_mmin_base : public multi_min<param_t,func_t,func_t,vec_t> 00043 { 00044 00045 #ifndef DOXYGEN_INTERNAL 00046 00047 protected: 00048 00049 /// User-specified function 00050 func_t *func; 00051 00052 /// User-specified gradient 00053 dfunc_t *grad; 00054 00055 /// Automatic gradient object 00056 auto_grad_t *agrad; 00057 00058 /// If true, a gradient has been specified 00059 bool grad_given; 00060 00061 /// User-specified parameter 00062 param_t *params; 00063 /// Memory size 00064 size_t dim; 00065 /// Memory allocation 00066 alloc_t ao; 00067 /// Temporary vector 00068 alloc_vec_t avt, avt2; 00069 00070 /// Take a step 00071 void take_step(const gsl_vector * x, const gsl_vector *px, 00072 double stepx, double lambda, gsl_vector *x1x, 00073 gsl_vector *dx) { 00074 00075 gsl_vector_set_zero(dx); 00076 gsl_blas_daxpy(-stepx * lambda, px, dx); 00077 00078 gsl_vector_memcpy (x1x, x); 00079 gsl_blas_daxpy(1.0, dx, x1x); 00080 } 00081 00082 /** 00083 \brief Line minimization 00084 00085 Do a line minimisation in the region (xa,fa) (xc,fc) to find 00086 an intermediate (xb,fb) satisifying fa > fb < fc. Choose an 00087 initial xb based on parabolic interpolation 00088 */ 00089 void intermediate_point(const gsl_vector *x, const gsl_vector *px, 00090 double lambda, double pg, double stepa, 00091 double stepc, double fa, double fc, 00092 gsl_vector *x1x, gsl_vector *dx, 00093 gsl_vector *gradient, double *stepx, double *f) { 00094 double stepb, fb; 00095 00096 bool trial_failed; 00097 00098 do { 00099 00100 double u = fabs (pg * lambda * stepc); 00101 stepb = 0.5 * stepc * u / ((fc - fa) + u); 00102 00103 take_step (x, px, stepb, lambda, x1x, dx); 00104 00105 for(size_t i=0;i<dim;i++) avt[i]=gsl_vector_get(x1x,i); 00106 (*func)(dim,avt,fb,*params); 00107 00108 trial_failed=false; 00109 if (fb >= fa && stepb > 0.0) { 00110 /* downhill step failed, reduce step-size and try again */ 00111 fc = fb; 00112 stepc = stepb; 00113 trial_failed=true; 00114 } 00115 00116 } while (trial_failed); 00117 00118 *stepx = stepb; 00119 *f = fb; 00120 00121 for(size_t i=0;i<dim;i++) avt[i]=gsl_vector_get(x1x,i); 00122 if (grad_given) { 00123 (*grad)(dim,avt,avt2,*params); 00124 } else { 00125 (*agrad)(dim,avt,avt2,*params); 00126 } 00127 for(size_t i=0;i<dim;i++) gsl_vector_set(gradient,i,avt2[i]); 00128 00129 return; 00130 } 00131 00132 /** 00133 \brief Perform the minimization 00134 00135 Starting at (x0, f0) move along the direction p to find a minimum 00136 f(x0 - lambda * p), returning the new point x1 = x0-lambda*p, 00137 f1=f(x1) and g1 = grad(f) at x1. 00138 00139 */ 00140 void minimize(const gsl_vector *x, const gsl_vector *xp, double lambda, 00141 double stepa, double stepb, double stepc, double fa, 00142 double fb, double fc, double xtol, gsl_vector *x1x, 00143 gsl_vector *dx1x, gsl_vector *x2x, gsl_vector *dx2x, 00144 gsl_vector *gradient, double *xstep, double *f, 00145 double *gnorm_u) { 00146 00147 double u = stepb; 00148 double v = stepa; 00149 double w = stepc; 00150 double fu = fb; 00151 double fv = fa; 00152 double fw = fc; 00153 00154 double old2 = fabs(w - v); 00155 double old1 = fabs(v - u); 00156 00157 double stepm, fm, pg, gnorm1; 00158 00159 int xiter = 0; 00160 00161 gsl_vector_memcpy (x2x, x1x); 00162 gsl_vector_memcpy (dx2x, dx1x); 00163 00164 *f = fb; 00165 *xstep = stepb; 00166 *gnorm_u = gsl_blas_dnrm2(gradient); 00167 00168 mid_trial: 00169 00170 xiter++; 00171 00172 if (xiter > nmaxiter) { 00173 // Exceeded maximum number of iterations 00174 return; 00175 } 00176 00177 { 00178 double dw = w - u; 00179 double dv = v - u; 00180 double du = 0.0; 00181 double e1 = ((fv - fu) * dw * dw + (fu - fw) * dv * dv); 00182 double e2 = 2.0 * ((fv - fu) * dw + (fu - fw) * dv); 00183 00184 if (e2 != 0.0) { 00185 du = e1 / e2; 00186 } 00187 00188 if (du > 0.0 && du < (stepc - stepb) && fabs(du) < 0.5 * old2) { 00189 stepm = u + du; 00190 } else if (du < 0.0 && du > (stepa - stepb) && 00191 fabs(du) < 0.5 * old2) { 00192 stepm = u + du; 00193 } else if ((stepc - stepb) > (stepb - stepa)) { 00194 stepm = 0.38 * (stepc - stepb) + stepb; 00195 } else { 00196 stepm = stepb - 0.38 * (stepb - stepa); 00197 } 00198 } 00199 00200 take_step (x, xp, stepm, lambda, x1x, dx1x); 00201 00202 for(size_t i=0;i<dim;i++) avt[i]=gsl_vector_get(x1x,i); 00203 (*func)(dim,avt,fm,*params); 00204 00205 if (fm > fb) { 00206 if (fm < fv) { 00207 w = v; 00208 v = stepm; 00209 fw = fv; 00210 fv = fm; 00211 } else if (fm < fw) { 00212 w = stepm; 00213 fw = fm; 00214 } 00215 if (stepm < stepb) { 00216 stepa = stepm; 00217 fa = fm; 00218 } else { 00219 stepc = stepm; 00220 fc = fm; 00221 } 00222 goto mid_trial; 00223 00224 } else if (fm <= fb) { 00225 00226 old2 = old1; 00227 old1 = fabs(u - stepm); 00228 w = v; 00229 v = u; 00230 u = stepm; 00231 fw = fv; 00232 fv = fu; 00233 fu = fm; 00234 00235 gsl_vector_memcpy (x2x, x1x); 00236 gsl_vector_memcpy (dx2x, dx1x); 00237 00238 for(size_t i=0;i<dim;i++) avt[i]=gsl_vector_get(x1x,i); 00239 if (grad_given) { 00240 (*grad)(dim,avt,avt2,*params); 00241 } else { 00242 (*agrad)(dim,avt,avt2,*params); 00243 } 00244 for(size_t i=0;i<dim;i++) gsl_vector_set(gradient,i,avt2[i]); 00245 00246 gsl_blas_ddot (xp, gradient, &pg); 00247 gnorm1 = gsl_blas_dnrm2 (gradient); 00248 00249 *f = fm; 00250 *xstep = stepm; 00251 *gnorm_u = gnorm1; 00252 00253 if (fabs (pg * lambda / gnorm1) < xtol) { 00254 /* SUCCESS */ 00255 return; 00256 } 00257 00258 if (stepm < stepb) { 00259 stepc = stepb; 00260 fc = fb; 00261 stepb = stepm; 00262 fb = fm; 00263 } else { 00264 stepa = stepb; 00265 fa = fb; 00266 stepb = stepm; 00267 fb = fm; 00268 } 00269 goto mid_trial; 00270 } 00271 } 00272 00273 #endif 00274 00275 public: 00276 00277 /// Stepsize for finite-differencing ( default \f$ 10^{-4} \f$ ) 00278 double deriv_h; 00279 00280 /// Maximum iterations for line minimization (default 10) 00281 int nmaxiter; 00282 00283 /// Default automatic Gradient object 00284 def_auto_grad_t def_grad; 00285 00286 gsl_mmin_base() { 00287 deriv_h=1.0e-4; 00288 nmaxiter=10; 00289 grad_given=false; 00290 agrad=&def_grad; 00291 } 00292 00293 /// Set the function 00294 int base_set(func_t &ufunc, param_t &pa) { 00295 func=&ufunc; 00296 params=&pa; 00297 grad_given=false; 00298 return 0; 00299 } 00300 00301 /// Set the function and the gradient 00302 int base_set_de(func_t &ufunc, dfunc_t &udfunc, param_t &pa) { 00303 func=&ufunc; 00304 params=&pa; 00305 grad=&udfunc; 00306 grad_given=true; 00307 return 0; 00308 } 00309 00310 /// Allocate memory 00311 int base_allocate(size_t nn) { 00312 ao.allocate(avt,nn); 00313 ao.allocate(avt2,nn); 00314 dim=nn; 00315 return 0; 00316 } 00317 00318 /// Clear allocated memory 00319 int base_free() { 00320 ao.free(avt); 00321 ao.free(avt2); 00322 dim=0; 00323 return 0; 00324 } 00325 00326 00327 }; 00328 00329 /** \brief Multidimensional minimization by the Fletcher-Reeves 00330 conjugate gradient algorithm (GSL) 00331 00332 The variable multi_min::tolf is used as the maximum value 00333 of the gradient and is \f$ 10^{-4} \f$ be default. 00334 00335 The gsl iterate() function for this minimizer chooses to return 00336 \c GSL_ENOPROG if the iteration fails to make progress without 00337 calling the error handler. This is presumably because the 00338 iterate() function can fail to make progress when the algorithm 00339 has succeeded in finding the minimum. I prefer to return a 00340 non-zero value from a function only in cases where the error 00341 handler will also be called, so the user is clear on what an 00342 "error" means in the local context. Thus if iterate() is failing 00343 to make progress, instead of returning a non-zero value, it sets 00344 the value of \ref it_info to a non-zero value. 00345 00346 \todo A bit of needless copying is required in the function 00347 wrapper to convert from \c gsl_vector to the templated vector 00348 type. This can be fixed, probably by rewriting take_step 00349 to produce a vec_t &x1 rather than a gsl_vector *x1; 00350 00351 Those who look in detail at the code will note that the state 00352 variable \c max_iter has not been included here, because 00353 it was not really used in the original GSL code for 00354 these minimizers. 00355 */ 00356 template<class param_t, class func_t=multi_funct<param_t>, 00357 class vec_t=ovector_view, class alloc_vec_t=ovector, 00358 class alloc_t=ovector_alloc, 00359 class dfunc_t=grad_funct<param_t,ovector_view>, 00360 class auto_grad_t=gradient<param_t,func_t,ovector_view>, 00361 class def_auto_grad_t=simple_grad<param_t,func_t,ovector_view> > 00362 class gsl_mmin_conf : 00363 public gsl_mmin_base<param_t,func_t,vec_t,alloc_vec_t,alloc_t,dfunc_t, 00364 auto_grad_t,def_auto_grad_t> { 00365 00366 #ifndef DOXYGEN_INTERNAL 00367 00368 protected: 00369 00370 /// \name The original variables from the GSL state structure 00371 //@{ 00372 /// Desc 00373 int iter; 00374 /// Desc 00375 double step; 00376 /// Desc 00377 double tol; 00378 /// Desc 00379 gsl_vector *x1; 00380 /// Desc 00381 gsl_vector *dx1; 00382 /// Desc 00383 gsl_vector *x2; 00384 /// Desc 00385 double pnorm; 00386 /// Desc 00387 gsl_vector *p; 00388 /// Desc 00389 double g0norm; 00390 /// Desc 00391 gsl_vector *g0; 00392 //@} 00393 00394 /// \name Store the arguments to set() so we can use them for iterate() 00395 //@{ 00396 /// Desc 00397 gsl_vector *ugx; 00398 /// Desc 00399 gsl_vector *ugg; 00400 /// Desc 00401 gsl_vector *udx; 00402 /// Desc 00403 double it_min; 00404 //@} 00405 00406 /// Temporary vector 00407 alloc_vec_t avt5; 00408 /// Temporary vector 00409 alloc_vec_t avt6; 00410 /// Temporary vector 00411 alloc_vec_t avt7; 00412 /// Temporary vector 00413 alloc_vec_t avt8; 00414 00415 #endif 00416 00417 public: 00418 00419 gsl_mmin_conf() { 00420 lmin_tol=1.0e-4; 00421 this->tolf=1.0e-4; 00422 step_size=0.01; 00423 it_info=0; 00424 } 00425 00426 virtual ~gsl_mmin_conf() {} 00427 00428 /// Tolerance for the line minimization (default \f$ 10^{-4} \f$) 00429 double lmin_tol; 00430 00431 /// Size of the initial step 00432 double step_size; 00433 00434 /** 00435 \brief Information from the last call to iterate() 00436 00437 This is 1 if \c pnorm or \c gnorm are 0 and 2 if \c stepb is 00438 zero. 00439 00440 \todo Document this better 00441 */ 00442 int it_info; 00443 00444 /// Perform an iteration 00445 virtual int iterate() { 00446 00447 if (this->dim==0) { 00448 set_err_ret("Memory not allocated in iterate().",gsl_efailed); 00449 } 00450 00451 gsl_vector *x=ugx; 00452 gsl_vector *gradient=ugg; 00453 gsl_vector *dx=udx; 00454 00455 double fa = it_min, fb, fc; 00456 double dir; 00457 double stepa = 0.0, stepb, stepc = step; 00458 00459 double g1norm; 00460 double pg; 00461 00462 if (pnorm == 0.0 || g0norm == 0.0) { 00463 gsl_vector_set_zero(dx); 00464 it_info=1; 00465 return 0; 00466 } 00467 00468 /* Determine which direction is downhill, +p or -p */ 00469 00470 gsl_blas_ddot(p, gradient, &pg); 00471 00472 dir = (pg >= 0.0) ? +1.0 : -1.0; 00473 00474 /* Compute new trial point at x_c= x - step * p, where p is the 00475 current direction */ 00476 00477 this->take_step (x, p, stepc, dir / pnorm, x1, dx); 00478 00479 /* Evaluate function and gradient at new point xc */ 00480 00481 for(size_t i=0;i<this->dim;i++) avt5[i]=gsl_vector_get(x1,i); 00482 (*this->func)(this->dim,avt5,fc,*this->params); 00483 00484 if (fc < fa) { 00485 00486 /* Success, reduced the function value */ 00487 step = stepc * 2.0; 00488 it_min = fc; 00489 gsl_vector_memcpy(x,x1); 00490 00491 for(size_t i=0;i<this->dim;i++) avt6[i]=gsl_vector_get(x1,i); 00492 if (this->grad_given) { 00493 (*this->grad)(this->dim,avt6,avt7,*this->params); 00494 } else { 00495 (*this->agrad)(this->dim,avt6,avt7,*this->params); 00496 } 00497 for(size_t i=0;i<this->dim;i++) gsl_vector_set(gradient,i,avt7[i]); 00498 00499 it_info=0; 00500 return gsl_success; 00501 } 00502 00503 /* Do a line minimisation in the region (xa,fa) (xc,fc) to find an 00504 intermediate (xb,fb) satisifying fa > fb < fc. Choose an initial 00505 xb based on parabolic interpolation */ 00506 00507 this->intermediate_point(x,p,dir / pnorm,pg,stepa,stepc,fa,fc,x1, 00508 dx1,gradient,&stepb,&fb); 00509 00510 if (stepb == 0.0) { 00511 it_info=2; 00512 return 0; 00513 } 00514 00515 this->minimize(x,p,dir / pnorm,stepa,stepb,stepc,fa,fb,fc,tol, 00516 x1,dx1,x2,dx,gradient,&step,&it_min,&g1norm); 00517 00518 gsl_vector_memcpy (x, x2); 00519 00520 /* Choose a new conjugate direction for the next step */ 00521 iter=(iter+1) % x->size; 00522 00523 if (iter==0) { 00524 00525 gsl_vector_memcpy (p, gradient); 00526 pnorm=g1norm; 00527 00528 } else { 00529 00530 /* p' = g1 - beta * p */ 00531 double beta = -pow (g1norm / g0norm, 2.0); 00532 gsl_blas_dscal (-beta, p); 00533 gsl_blas_daxpy (1.0, gradient, p); 00534 pnorm=gsl_blas_dnrm2 (p); 00535 } 00536 00537 g0norm=g1norm; 00538 gsl_vector_memcpy (g0, gradient); 00539 00540 it_info=0; 00541 return gsl_success; 00542 00543 } 00544 00545 /// Return string denoting type("gsl_mmin_conf") 00546 virtual const char *type() { return "gsl_mmin_conf";} 00547 00548 /// Allocate the memory 00549 virtual int allocate(size_t n) { 00550 00551 x1=gsl_vector_calloc(n); 00552 if (x1==0) { 00553 set_err_ret("Failed to allocate x1 in gsl_mmin_conf::allocate().", 00554 gsl_enomem); 00555 } 00556 00557 dx1=gsl_vector_calloc(n); 00558 if (dx1==0) { 00559 gsl_vector_free(x1); 00560 set_err_ret("Failed to allocate dx1 in gsl_mmin_conf::allocate().", 00561 gsl_enomem); 00562 } 00563 00564 x2=gsl_vector_calloc(n); 00565 if (x2==0) { 00566 gsl_vector_free(dx1); 00567 gsl_vector_free(x1); 00568 set_err_ret("Failed to allocate x2 in gsl_mmin_conf::allocate().", 00569 gsl_enomem); 00570 } 00571 00572 p=gsl_vector_calloc(n); 00573 if (p==0) { 00574 gsl_vector_free(x2); 00575 gsl_vector_free(dx1); 00576 gsl_vector_free(x1); 00577 set_err_ret("Failed to allocate p in gsl_mmin_conf::allocate().", 00578 gsl_enomem); 00579 } 00580 00581 g0=gsl_vector_calloc(n); 00582 if (g0==0) { 00583 gsl_vector_free(p); 00584 gsl_vector_free(x2); 00585 gsl_vector_free(dx1); 00586 gsl_vector_free(x1); 00587 set_err_ret("Failed to allocate g0 in gsl_mmin_conf::allocate().", 00588 gsl_enomem); 00589 } 00590 00591 udx=gsl_vector_calloc(n); 00592 if (udx==0) { 00593 gsl_vector_free(g0); 00594 gsl_vector_free(p); 00595 gsl_vector_free(x2); 00596 gsl_vector_free(dx1); 00597 gsl_vector_free(x1); 00598 set_err_ret("Failed to allocate udx in gsl_mmin_conf::allocate().", 00599 gsl_enomem); 00600 } 00601 00602 ugg=gsl_vector_calloc(n); 00603 if (ugg==0) { 00604 gsl_vector_free(udx); 00605 gsl_vector_free(g0); 00606 gsl_vector_free(p); 00607 gsl_vector_free(x2); 00608 gsl_vector_free(dx1); 00609 gsl_vector_free(x1); 00610 set_err_ret("Failed to allocate ugg in gsl_mmin_conf::allocate().", 00611 gsl_enomem); 00612 } 00613 00614 ugx=gsl_vector_calloc(n); 00615 if (ugx==0) { 00616 gsl_vector_free(ugg); 00617 gsl_vector_free(udx); 00618 gsl_vector_free(g0); 00619 gsl_vector_free(p); 00620 gsl_vector_free(x2); 00621 gsl_vector_free(dx1); 00622 gsl_vector_free(x1); 00623 set_err_ret("Failed to allocate ugx in gsl_mmin_conf::allocate().", 00624 gsl_enomem); 00625 } 00626 00627 this->ao.allocate(avt5,n); 00628 this->ao.allocate(avt6,n); 00629 this->ao.allocate(avt7,n); 00630 this->ao.allocate(avt8,n); 00631 00632 this->base_allocate(n); 00633 00634 return gsl_success; 00635 00636 } 00637 00638 /// Free the allocated memory 00639 virtual int free() { 00640 gsl_vector_free(ugx); 00641 gsl_vector_free(ugg); 00642 gsl_vector_free(udx); 00643 gsl_vector_free(g0); 00644 gsl_vector_free(p); 00645 gsl_vector_free(x2); 00646 gsl_vector_free(dx1); 00647 gsl_vector_free(x1); 00648 00649 this->ao.free(avt5); 00650 this->ao.free(avt6); 00651 this->ao.free(avt7); 00652 this->ao.free(avt8); 00653 00654 this->base_free(); 00655 00656 return 0; 00657 } 00658 00659 /// Reset the minimizer to use the current point as a new starting point 00660 int restart() { 00661 iter=0; 00662 return gsl_success; 00663 } 00664 00665 /// Set the function and initial guess 00666 virtual int set(vec_t &x, double u_step_size, double tol_u, 00667 func_t &ufunc, param_t &pa) { 00668 00669 if (this->dim==0) { 00670 set_err_ret("Memory not allocated in set().",gsl_efailed); 00671 } 00672 00673 this->base_set(ufunc,pa); 00674 for(size_t i=0;i<this->dim;i++) gsl_vector_set(ugx,i,x[i]); 00675 00676 iter=0; 00677 step=u_step_size; 00678 tol =tol_u; 00679 00680 /// Evaluate the function and its gradient 00681 ufunc(this->dim,x,it_min,pa); 00682 this->agrad->set_function(ufunc); 00683 (*this->agrad)(this->dim,x,avt8,*this->params); 00684 for(size_t i=0;i<this->dim;i++) { 00685 gsl_vector_set(ugg,i,avt8[i]); 00686 } 00687 00688 /* Use the gradient as the initial direction */ 00689 00690 gsl_vector_memcpy(p,ugg); 00691 gsl_vector_memcpy(g0,ugg); 00692 00693 double gnorm=gsl_blas_dnrm2(ugg); 00694 00695 pnorm=gnorm; 00696 g0norm=gnorm; 00697 00698 return gsl_success; 00699 } 00700 00701 /// Set the function and initial guess 00702 virtual int set_de(vec_t &x, double u_step_size, double tol_u, 00703 func_t &ufunc, dfunc_t &udfunc, param_t &pa) { 00704 00705 if (this->dim==0) { 00706 set_err_ret("Memory not allocated in set().",gsl_efailed); 00707 } 00708 00709 this->base_set_de(ufunc,udfunc,pa); 00710 for(size_t i=0;i<this->dim;i++) gsl_vector_set(ugx,i,x[i]); 00711 00712 iter=0; 00713 step=u_step_size; 00714 tol =tol_u; 00715 00716 /// Evaluate the function and its gradient 00717 ufunc(this->dim,x,it_min,pa); 00718 (*this->agrad)(this->dim,x,avt8,*this->params); 00719 for(size_t i=0;i<this->dim;i++) { 00720 gsl_vector_set(ugg,i,avt8[i]); 00721 } 00722 00723 /* Use the gradient as the initial direction */ 00724 00725 gsl_vector_memcpy(p,ugg); 00726 gsl_vector_memcpy(g0,ugg); 00727 00728 double gnorm=gsl_blas_dnrm2(ugg); 00729 00730 pnorm=gnorm; 00731 g0norm=gnorm; 00732 00733 return gsl_success; 00734 } 00735 00736 /** \brief Calculate the minimum \c min of \c func w.r.t the 00737 array \c x of size \c nvar. 00738 */ 00739 virtual int mmin(size_t nn, vec_t &xx, double &fmin, param_t &pa, 00740 func_t &ufunc) { 00741 00742 int xiter=0, status; 00743 00744 allocate(nn); 00745 00746 set(xx,step_size,lmin_tol,ufunc,pa); 00747 00748 do { 00749 xiter++; 00750 00751 status=iterate(); 00752 00753 if (status || it_info!=0) { 00754 break; 00755 } 00756 00757 // Equivalent to gsl_multimin_test_gradient with 00758 // additional code to print out present iteration 00759 00760 double norm=gsl_blas_dnrm2(ugg); 00761 00762 if(this->verbose>0) { 00763 this->print_iter(nn,*((ovector *)ugx),it_min,xiter, 00764 norm,this->tolf,"gsl_mmin_conf"); 00765 } 00766 00767 if (norm<this->tolf) status=gsl_success; 00768 else status=gsl_continue; 00769 00770 } 00771 while (status==GSL_CONTINUE && xiter < this->ntrial); 00772 00773 for(size_t i=0;i<nn;i++) xx[i]=gsl_vector_get(ugx,i); 00774 fmin=it_min; 00775 00776 free(); 00777 00778 this->last_ntrial=xiter; 00779 00780 return status; 00781 } 00782 00783 /** \brief Calculate the minimum \c min of \c func w.r.t the 00784 array \c x of size \c nvar. 00785 */ 00786 virtual int mmin_de(size_t nn, vec_t &xx, double &fmin, param_t &pa, 00787 func_t &ufunc, dfunc_t &udfunc) { 00788 00789 int xiter=0, status; 00790 00791 allocate(nn); 00792 00793 set_de(xx,step_size,lmin_tol,ufunc,udfunc,pa); 00794 00795 do { 00796 xiter++; 00797 00798 status=iterate(); 00799 00800 if (status || it_info!=0) { 00801 break; 00802 } 00803 00804 // Equivalent to gsl_multimin_test_gradient with 00805 // additional code to print out present iteration 00806 00807 double norm=gsl_blas_dnrm2(ugg); 00808 00809 if(this->verbose>0) { 00810 this->print_iter(nn,*((ovector *)ugx),it_min,xiter, 00811 norm,this->tolf,"gsl_mmin_conf"); 00812 } 00813 00814 if (norm<this->tolf) status=gsl_success; 00815 else status=gsl_continue; 00816 00817 } 00818 while (status==GSL_CONTINUE && xiter < this->ntrial); 00819 00820 for(size_t i=0;i<nn;i++) xx[i]=gsl_vector_get(ugx,i); 00821 fmin=it_min; 00822 00823 free(); 00824 00825 this->last_ntrial=xiter; 00826 00827 return status; 00828 } 00829 00830 }; 00831 00832 /// An array version of gsl_mmin_conf 00833 template<class param_t, size_t nv> class gsl_mmin_conf_array : 00834 public gsl_mmin_conf<param_t,multi_vfunct<param_t,nv>, 00835 double[nv],double[nv],array_alloc<double[nv]>,grad_vfunct<param_t,nv>, 00836 gradient_array<param_t,multi_vfunct<param_t,nv>,nv>, 00837 simple_grad_array<param_t,multi_vfunct<param_t,nv>,nv> > { 00838 }; 00839 00840 00841 #ifndef DOXYGENP 00842 } 00843 #endif 00844 00845 #endif
Documentation generated with Doxygen and provided under the GNU Free Documentation License. See License Information for details.
Project hosting provided by
,
O2scl Sourceforge Project Page