00001 /* 00002 ------------------------------------------------------------------- 00003 00004 Copyright (C) 2006, 2007, Andrew W. Steiner 00005 00006 This file is part of O2scl. 00007 00008 O2scl is free software; you can redistribute it and/or modify 00009 it under the terms of the GNU General Public License as published by 00010 the Free Software Foundation; either version 3 of the License, or 00011 (at your option) any later version. 00012 00013 O2scl is distributed in the hope that it will be useful, 00014 but WITHOUT ANY WARRANTY; without even the implied warranty of 00015 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00016 GNU General Public License for more details. 00017 00018 You should have received a copy of the GNU General Public License 00019 along with O2scl. If not, see <http://www.gnu.org/licenses/>. 00020 00021 ------------------------------------------------------------------- 00022 */ 00023 #ifndef O2SCL_GSL_MMIN_CONF_H 00024 #define O2SCL_GSL_MMIN_CONF_H 00025 00026 #include <gsl/gsl_blas.h> 00027 #include <gsl/gsl_multimin.h> 00028 #include <o2scl/multi_min.h> 00029 00030 #ifndef DOXYGENP 00031 namespace o2scl { 00032 #endif 00033 00034 /** \brief Base minimization routines for gsl_mmin_conf and gsl_mmin_conp 00035 */ 00036 template<class param_t, class func_t=multi_funct<param_t>, 00037 class vec_t=ovector_view, class alloc_vec_t=ovector, 00038 class alloc_t=ovector_alloc, 00039 class dfunc_t=grad_funct<param_t,ovector_view>, 00040 class auto_grad_t=gradient<param_t,func_t,ovector_view>, 00041 class def_auto_grad_t=simple_grad<param_t,func_t,ovector_view> > 00042 class gsl_mmin_base : public multi_min<param_t,func_t,func_t,vec_t> 00043 { 00044 00045 #ifndef DOXYGEN_INTERNAL 00046 00047 protected: 00048 00049 /// User-specified function 00050 func_t *func; 00051 00052 /// User-specified gradient 00053 dfunc_t *grad; 00054 00055 /// Automatic gradient object 00056 auto_grad_t *agrad; 00057 00058 /// If true, a gradient has been specified 00059 bool grad_given; 00060 00061 /// User-specified parameter 00062 param_t *params; 00063 /// Memory size 00064 size_t dim; 00065 /// Memory allocation 00066 alloc_t ao; 00067 /// Temporary vector 00068 alloc_vec_t avt, avt2; 00069 00070 /// Take a step 00071 void take_step(const gsl_vector * x, const gsl_vector *px, 00072 double stepx, double lambda, gsl_vector *x1x, 00073 gsl_vector *dx) { 00074 00075 gsl_vector_set_zero(dx); 00076 gsl_blas_daxpy(-stepx * lambda, px, dx); 00077 00078 gsl_vector_memcpy (x1x, x); 00079 gsl_blas_daxpy(1.0, dx, x1x); 00080 } 00081 00082 /** 00083 \brief Line minimization 00084 00085 Do a line minimisation in the region (xa,fa) (xc,fc) to find 00086 an intermediate (xb,fb) satisifying fa > fb < fc. Choose an 00087 initial xb based on parabolic interpolation 00088 */ 00089 void intermediate_point(const gsl_vector *x, const gsl_vector *px, 00090 double lambda, double pg, double stepa, 00091 double stepc, double fa, double fc, 00092 gsl_vector *x1x, gsl_vector *dx, 00093 gsl_vector *gradient, double *stepx, double *f) { 00094 double stepb, fb; 00095 00096 bool trial_failed; 00097 00098 do { 00099 00100 double u = fabs (pg * lambda * stepc); 00101 stepb = 0.5 * stepc * u / ((fc - fa) + u); 00102 00103 take_step (x, px, stepb, lambda, x1x, dx); 00104 00105 for(size_t i=0;i<dim;i++) avt[i]=gsl_vector_get(x1x,i); 00106 (*func)(dim,avt,fb,*params); 00107 00108 trial_failed=false; 00109 if (fb >= fa && stepb > 0.0) { 00110 /* downhill step failed, reduce step-size and try again */ 00111 fc = fb; 00112 stepc = stepb; 00113 trial_failed=true; 00114 } 00115 00116 } while (trial_failed); 00117 00118 *stepx = stepb; 00119 *f = fb; 00120 00121 for(size_t i=0;i<dim;i++) avt[i]=gsl_vector_get(x1x,i); 00122 if (grad_given) { 00123 (*grad)(dim,avt,avt2,*params); 00124 } else { 00125 (*agrad)(dim,avt,avt2,*params); 00126 } 00127 for(size_t i=0;i<dim;i++) gsl_vector_set(gradient,i,avt2[i]); 00128 00129 return; 00130 } 00131 00132 /** 00133 \brief Perform the minimization 00134 00135 Starting at (x0, f0) move along the direction p to find a minimum 00136 f(x0 - lambda * p), returning the new point x1 = x0-lambda*p, 00137 f1=f(x1) and g1 = grad(f) at x1. 00138 00139 */ 00140 void minimize(const gsl_vector *x, const gsl_vector *xp, double lambda, 00141 double stepa, double stepb, double stepc, double fa, 00142 double fb, double fc, double xtol, gsl_vector *x1x, 00143 gsl_vector *dx1x, gsl_vector *x2x, gsl_vector *dx2x, 00144 gsl_vector *gradient, double *xstep, double *f, 00145 double *gnorm_u) { 00146 00147 double u = stepb; 00148 double v = stepa; 00149 double w = stepc; 00150 double fu = fb; 00151 double fv = fa; 00152 double fw = fc; 00153 00154 double old2 = fabs(w - v); 00155 double old1 = fabs(v - u); 00156 00157 double stepm, fm, pg, gnorm1; 00158 00159 int xiter = 0; 00160 00161 gsl_vector_memcpy (x2x, x1x); 00162 gsl_vector_memcpy (dx2x, dx1x); 00163 00164 *f = fb; 00165 *xstep = stepb; 00166 *gnorm_u = gsl_blas_dnrm2(gradient); 00167 00168 mid_trial: 00169 00170 xiter++; 00171 00172 if (xiter > nmaxiter) { 00173 // Exceeded maximum number of iterations 00174 return; 00175 } 00176 00177 { 00178 double dw = w - u; 00179 double dv = v - u; 00180 double du = 0.0; 00181 double e1 = ((fv - fu) * dw * dw + (fu - fw) * dv * dv); 00182 double e2 = 2.0 * ((fv - fu) * dw + (fu - fw) * dv); 00183 00184 if (e2 != 0.0) { 00185 du = e1 / e2; 00186 } 00187 00188 if (du > 0.0 && du < (stepc - stepb) && fabs(du) < 0.5 * old2) { 00189 stepm = u + du; 00190 } else if (du < 0.0 && du > (stepa - stepb) && 00191 fabs(du) < 0.5 * old2) { 00192 stepm = u + du; 00193 } else if ((stepc - stepb) > (stepb - stepa)) { 00194 stepm = 0.38 * (stepc - stepb) + stepb; 00195 } else { 00196 stepm = stepb - 0.38 * (stepb - stepa); 00197 } 00198 } 00199 00200 take_step (x, xp, stepm, lambda, x1x, dx1x); 00201 00202 for(size_t i=0;i<dim;i++) avt[i]=gsl_vector_get(x1x,i); 00203 (*func)(dim,avt,fm,*params); 00204 00205 if (fm > fb) { 00206 if (fm < fv) { 00207 w = v; 00208 v = stepm; 00209 fw = fv; 00210 fv = fm; 00211 } else if (fm < fw) { 00212 w = stepm; 00213 fw = fm; 00214 } 00215 if (stepm < stepb) { 00216 stepa = stepm; 00217 fa = fm; 00218 } else { 00219 stepc = stepm; 00220 fc = fm; 00221 } 00222 goto mid_trial; 00223 00224 } else if (fm <= fb) { 00225 00226 old2 = old1; 00227 old1 = fabs(u - stepm); 00228 w = v; 00229 v = u; 00230 u = stepm; 00231 fw = fv; 00232 fv = fu; 00233 fu = fm; 00234 00235 gsl_vector_memcpy (x2x, x1x); 00236 gsl_vector_memcpy (dx2x, dx1x); 00237 00238 for(size_t i=0;i<dim;i++) avt[i]=gsl_vector_get(x1x,i); 00239 if (grad_given) { 00240 (*grad)(dim,avt,avt2,*params); 00241 } else { 00242 (*agrad)(dim,avt,avt2,*params); 00243 } 00244 for(size_t i=0;i<dim;i++) gsl_vector_set(gradient,i,avt2[i]); 00245 00246 gsl_blas_ddot (xp, gradient, &pg); 00247 gnorm1 = gsl_blas_dnrm2 (gradient); 00248 00249 *f = fm; 00250 *xstep = stepm; 00251 *gnorm_u = gnorm1; 00252 00253 if (fabs (pg * lambda / gnorm1) < xtol) { 00254 /* SUCCESS */ 00255 return; 00256 } 00257 00258 if (stepm < stepb) { 00259 stepc = stepb; 00260 fc = fb; 00261 stepb = stepm; 00262 fb = fm; 00263 } else { 00264 stepa = stepb; 00265 fa = fb; 00266 stepb = stepm; 00267 fb = fm; 00268 } 00269 goto mid_trial; 00270 } 00271 } 00272 00273 #endif 00274 00275 public: 00276 00277 /// Stepsize for finite-differencing ( default \f$ 10^{-4} \f$ ) 00278 double deriv_h; 00279 00280 /// Maximum iterations for line minimization (default 10) 00281 int nmaxiter; 00282 00283 /// Default automatic Gradient object 00284 def_auto_grad_t def_grad; 00285 00286 gsl_mmin_base() { 00287 deriv_h=1.0e-4; 00288 nmaxiter=10; 00289 grad_given=false; 00290 agrad=&def_grad; 00291 } 00292 00293 /// Set the function 00294 int base_set(func_t &ufunc, param_t &pa) { 00295 func=&ufunc; 00296 params=&pa; 00297 grad_given=false; 00298 return 0; 00299 } 00300 00301 /// Set the function and the gradient 00302 int base_set_de(func_t &ufunc, dfunc_t &udfunc, param_t &pa) { 00303 func=&ufunc; 00304 params=&pa; 00305 grad=&udfunc; 00306 grad_given=true; 00307 return 0; 00308 } 00309 00310 /// Allocate memory 00311 int base_allocate(size_t nn) { 00312 ao.allocate(avt,nn); 00313 ao.allocate(avt2,nn); 00314 dim=nn; 00315 return 0; 00316 } 00317 00318 /// Clear allocated memory 00319 int base_free() { 00320 ao.free(avt); 00321 ao.free(avt2); 00322 dim=0; 00323 return 0; 00324 } 00325 00326 00327 }; 00328 00329 /** \brief Multidimensional minimization by the Fletcher-Reeves 00330 conjugate gradient algorithm (GSL) 00331 00332 The variable multi_min::tolf is used as the maximum value 00333 of the gradient and is \f$ 10^{-4} \f$ be default. 00334 00335 The gsl iterate() function for this minimizer chooses to return 00336 \c GSL_ENOPROG if the iteration fails to make progress without 00337 calling the error handler. This is presumably because the 00338 iterate() function can fail to make progress when the algorithm 00339 has succeeded in finding the minimum. I prefer to return a 00340 non-zero value from a function only in cases where the error 00341 handler will also be called, so the user is clear on what an 00342 "error" means in the local context. Thus if iterate() is failing 00343 to make progress, instead of returning a non-zero value, it sets 00344 the value of \ref it_info to a non-zero value. 00345 00346 \todo A bit of needless copying is required in the function 00347 wrapper to convert from \c gsl_vector to the templated vector 00348 type. This can be fixed, probably by rewriting take_step 00349 to produce a vec_t &x1 rather than a gsl_vector *x1; 00350 00351 Those who look in detail at the code will note that the state 00352 variable \c max_iter has not been included here, because 00353 it was not really used in the original GSL code for 00354 these minimizers. 00355 */ 00356 template<class param_t, class func_t=multi_funct<param_t>, 00357 class vec_t=ovector_view, class alloc_vec_t=ovector, 00358 class alloc_t=ovector_alloc, 00359 class dfunc_t=grad_funct<param_t,ovector_view>, 00360 class auto_grad_t=gradient<param_t,func_t,ovector_view>, 00361 class def_auto_grad_t=simple_grad<param_t,func_t,ovector_view> > 00362 class gsl_mmin_conf : 00363 public gsl_mmin_base<param_t,func_t,vec_t,alloc_vec_t,alloc_t,dfunc_t, 00364 auto_grad_t,def_auto_grad_t> { 00365 00366 #ifndef DOXYGEN_INTERNAL 00367 00368 protected: 00369 00370 /// \name The original variables from the GSL state structure 00371 //@{ 00372 /// Desc 00373 int iter; 00374 /// Desc 00375 double step; 00376 /// Desc 00377 double tol; 00378 /// Desc 00379 gsl_vector *x1; 00380 /// Desc 00381 gsl_vector *dx1; 00382 /// Desc 00383 gsl_vector *x2; 00384 /// Desc 00385 double pnorm; 00386 /// Desc 00387 gsl_vector *p; 00388 /// Desc 00389 double g0norm; 00390 /// Desc 00391 gsl_vector *g0; 00392 //@} 00393 00394 /// \name Store the arguments to set() so we can use them for iterate() 00395 //@{ 00396 /// Desc 00397 gsl_vector *ugx; 00398 /// Desc 00399 gsl_vector *ugg; 00400 /// Desc 00401 gsl_vector *udx; 00402 /// Desc 00403 double it_min; 00404 //@} 00405 00406 /// Temporary vector 00407 alloc_vec_t avt5; 00408 /// Temporary vector 00409 alloc_vec_t avt6; 00410 /// Temporary vector 00411 alloc_vec_t avt7; 00412 /// Temporary vector 00413 alloc_vec_t avt8; 00414 00415 #endif 00416 00417 public: 00418 00419 gsl_mmin_conf() { 00420 lmin_tol=1.0e-4; 00421 this->tolf=1.0e-4; 00422 step_size=0.01; 00423 it_info=0; 00424 } 00425 00426 virtual ~gsl_mmin_conf() {} 00427 00428 /// Tolerance for the line minimization (default \f$ 10^{-4} \f$) 00429 double lmin_tol; 00430 00431 /// Size of the initial step 00432 double step_size; 00433 00434 /** 00435 \brief Information from the last call to iterate() 00436 00437 This is 1 if \c pnorm or \c gnorm are 0 and 2 if \c stepb is 00438 zero. 00439 00440 \todo Document this better 00441 */ 00442 int it_info; 00443 00444 /// Perform an iteration 00445 virtual int iterate() { 00446 00447 if (this->dim==0) { 00448 set_err_ret("Memory not allocated in iterate().",gsl_efailed); 00449 } 00450 00451 gsl_vector *x=ugx; 00452 gsl_vector *gradient=ugg; 00453 gsl_vector *dx=udx; 00454 00455 double fa = it_min, fb, fc; 00456 double dir; 00457 double stepa = 0.0, stepb, stepc = step; 00458 00459 double g1norm; 00460 double pg; 00461 00462 if (pnorm == 0.0 || g0norm == 0.0) { 00463 gsl_vector_set_zero(dx); 00464 it_info=1; 00465 return 0; 00466 } 00467 00468 /* Determine which direction is downhill, +p or -p */ 00469 00470 gsl_blas_ddot(p, gradient, &pg); 00471 00472 dir = (pg >= 0.0) ? +1.0 : -1.0; 00473 00474 /* Compute new trial point at x_c= x - step * p, where p is the 00475 current direction */ 00476 00477 this->take_step (x, p, stepc, dir / pnorm, x1, dx); 00478 00479 /* Evaluate function and gradient at new point xc */ 00480 00481 for(size_t i=0;i<this->dim;i++) avt5[i]=gsl_vector_get(x1,i); 00482 (*this->func)(this->dim,avt5,fc,*this->params); 00483 00484 if (fc < fa) { 00485 00486 /* Success, reduced the function value */ 00487 step = stepc * 2.0; 00488 it_min = fc; 00489 gsl_vector_memcpy(x,x1); 00490 00491 for(size_t i=0;i<this->dim;i++) avt6[i]=gsl_vector_get(x1,i); 00492 if (this->grad_given) { 00493 (*this->grad)(this->dim,avt6,avt7,*this->params); 00494 } else { 00495 (*this->agrad)(this->dim,avt6,avt7,*this->params); 00496 } 00497 for(size_t i=0;i<this->dim;i++) gsl_vector_set(gradient,i,avt7[i]); 00498 00499 it_info=0; 00500 return gsl_success; 00501 } 00502 00503 /* Do a line minimisation in the region (xa,fa) (xc,fc) to find an 00504 intermediate (xb,fb) satisifying fa > fb < fc. Choose an initial 00505 xb based on parabolic interpolation */ 00506 00507 this->intermediate_point(x,p,dir / pnorm,pg,stepa,stepc,fa,fc,x1, 00508 dx1,gradient,&stepb,&fb); 00509 00510 if (stepb == 0.0) { 00511 it_info=2; 00512 return 0; 00513 } 00514 00515 this->minimize(x,p,dir / pnorm,stepa,stepb,stepc,fa,fb,fc,tol, 00516 x1,dx1,x2,dx,gradient,&step,&it_min,&g1norm); 00517 00518 gsl_vector_memcpy (x, x2); 00519 00520 /* Choose a new conjugate direction for the next step */ 00521 iter=(iter+1) % x->size; 00522 00523 if (iter==0) { 00524 00525 gsl_vector_memcpy (p, gradient); 00526 pnorm=g1norm; 00527 00528 } else { 00529 00530 /* p' = g1 - beta * p */ 00531 double beta = -pow (g1norm / g0norm, 2.0); 00532 gsl_blas_dscal (-beta, p); 00533 gsl_blas_daxpy (1.0, gradient, p); 00534 pnorm=gsl_blas_dnrm2 (p); 00535 } 00536 00537 g0norm=g1norm; 00538 gsl_vector_memcpy (g0, gradient); 00539 00540 it_info=0; 00541 return gsl_success; 00542 00543 } 00544 00545 /// Return string denoting type("gsl_mmin_conf") 00546 virtual const char *type() { return "gsl_mmin_conf";} 00547 00548 /// Allocate the memory 00549 virtual int allocate(size_t n) { 00550 00551 x1 = gsl_vector_calloc(n); 00552 00553 if (x1 == 0) { 00554 GSL_ERROR ("failed to allocate space for x1", GSL_ENOMEM); 00555 } 00556 00557 dx1 = gsl_vector_calloc(n); 00558 00559 if ( dx1 == 0) { 00560 gsl_vector_free(x1); 00561 GSL_ERROR ("failed to allocate space for dx1", GSL_ENOMEM); 00562 } 00563 00564 x2 = gsl_vector_calloc(n); 00565 00566 if ( x2 == 0) { 00567 gsl_vector_free(dx1); 00568 gsl_vector_free(x1); 00569 GSL_ERROR ("failed to allocate space for x2", GSL_ENOMEM); 00570 } 00571 00572 p = gsl_vector_calloc(n); 00573 00574 if ( p == 0) { 00575 gsl_vector_free(x2); 00576 gsl_vector_free(dx1); 00577 gsl_vector_free(x1); 00578 GSL_ERROR ("failed to allocate space for p", GSL_ENOMEM); 00579 } 00580 00581 g0 = gsl_vector_calloc(n); 00582 00583 if ( g0 == 0) { 00584 gsl_vector_free(p); 00585 gsl_vector_free(x2); 00586 gsl_vector_free(dx1); 00587 gsl_vector_free(x1); 00588 GSL_ERROR ("failed to allocate space for g0", GSL_ENOMEM); 00589 } 00590 00591 udx=gsl_vector_calloc(n); 00592 if (udx==0) { 00593 gsl_vector_free(g0); 00594 gsl_vector_free(p); 00595 gsl_vector_free(x2); 00596 gsl_vector_free(dx1); 00597 gsl_vector_free(x1); 00598 set_err_ret("Failed to allocate space for udx",gsl_enomem); 00599 } 00600 00601 ugg=gsl_vector_calloc(n); 00602 if (ugg==0) { 00603 gsl_vector_free(udx); 00604 gsl_vector_free(g0); 00605 gsl_vector_free(p); 00606 gsl_vector_free(x2); 00607 gsl_vector_free(dx1); 00608 gsl_vector_free(x1); 00609 set_err_ret("Failed to allocate space for ugg",gsl_enomem); 00610 } 00611 00612 ugx=gsl_vector_calloc(n); 00613 if (ugx==0) { 00614 gsl_vector_free(ugg); 00615 gsl_vector_free(udx); 00616 gsl_vector_free(g0); 00617 gsl_vector_free(p); 00618 gsl_vector_free(x2); 00619 gsl_vector_free(dx1); 00620 gsl_vector_free(x1); 00621 set_err_ret("Failed to allocate space for ugx",gsl_enomem); 00622 } 00623 00624 this->ao.allocate(avt5,n); 00625 this->ao.allocate(avt6,n); 00626 this->ao.allocate(avt7,n); 00627 this->ao.allocate(avt8,n); 00628 00629 this->base_allocate(n); 00630 00631 return gsl_success; 00632 00633 } 00634 00635 /// Free the allocated memory 00636 virtual int free() { 00637 gsl_vector_free(ugx); 00638 gsl_vector_free(ugg); 00639 gsl_vector_free(udx); 00640 gsl_vector_free(g0); 00641 gsl_vector_free(p); 00642 gsl_vector_free(x2); 00643 gsl_vector_free(dx1); 00644 gsl_vector_free(x1); 00645 00646 this->ao.free(avt5); 00647 this->ao.free(avt6); 00648 this->ao.free(avt7); 00649 this->ao.free(avt8); 00650 00651 this->base_free(); 00652 00653 return 0; 00654 } 00655 00656 /// Reset the minimizer to use the current point as a new starting point 00657 int restart() { 00658 iter = 0; 00659 return gsl_success; 00660 } 00661 00662 /// Set the function and initial guess 00663 virtual int set(vec_t &x, double u_step_size, double tol_u, 00664 func_t &ufunc, param_t &pa) { 00665 00666 if (this->dim==0) { 00667 set_err_ret("Memory not allocated in set().",gsl_efailed); 00668 } 00669 00670 this->base_set(ufunc,pa); 00671 for(size_t i=0;i<this->dim;i++) gsl_vector_set(ugx,i,x[i]); 00672 00673 iter = 0; 00674 step = u_step_size; 00675 tol = tol_u; 00676 00677 /// Evaluate the function and its gradient 00678 ufunc(this->dim,x,it_min,pa); 00679 this->agrad->set_function(ufunc); 00680 (*this->agrad)(this->dim,x,avt8,*this->params); 00681 for(size_t i=0;i<this->dim;i++) { 00682 gsl_vector_set(ugg,i,avt8[i]); 00683 } 00684 00685 /* Use the gradient as the initial direction */ 00686 00687 gsl_vector_memcpy(p,ugg); 00688 gsl_vector_memcpy(g0,ugg); 00689 00690 double gnorm = gsl_blas_dnrm2(ugg); 00691 00692 pnorm = gnorm; 00693 g0norm = gnorm; 00694 00695 return gsl_success; 00696 } 00697 00698 /// Set the function and initial guess 00699 virtual int set_de(vec_t &x, double u_step_size, double tol_u, 00700 func_t &ufunc, dfunc_t &udfunc, param_t &pa) { 00701 00702 if (this->dim==0) { 00703 set_err_ret("Memory not allocated in set().",gsl_efailed); 00704 } 00705 00706 this->base_set_de(ufunc,udfunc,pa); 00707 for(size_t i=0;i<this->dim;i++) gsl_vector_set(ugx,i,x[i]); 00708 00709 iter = 0; 00710 step = u_step_size; 00711 tol = tol_u; 00712 00713 /// Evaluate the function and its gradient 00714 ufunc(this->dim,x,it_min,pa); 00715 (*this->agrad)(this->dim,x,avt8,*this->params); 00716 for(size_t i=0;i<this->dim;i++) { 00717 gsl_vector_set(ugg,i,avt8[i]); 00718 } 00719 00720 /* Use the gradient as the initial direction */ 00721 00722 gsl_vector_memcpy(p,ugg); 00723 gsl_vector_memcpy(g0,ugg); 00724 00725 double gnorm = gsl_blas_dnrm2(ugg); 00726 00727 pnorm = gnorm; 00728 g0norm = gnorm; 00729 00730 return gsl_success; 00731 } 00732 00733 /** \brief Calculate the minimum \c min of \c func w.r.t the 00734 array \c x of size \c nvar. 00735 */ 00736 virtual int mmin(size_t nn, vec_t &xx, double &fmin, param_t &pa, 00737 func_t &ufunc) { 00738 00739 int xiter=0, status; 00740 00741 allocate(nn); 00742 00743 set(xx,step_size,lmin_tol,ufunc,pa); 00744 00745 do { 00746 xiter++; 00747 00748 status=iterate(); 00749 00750 if (status || it_info!=0) { 00751 break; 00752 } 00753 00754 // Equivalent to gsl_multimin_test_gradient with 00755 // additional code to print out present iteration 00756 00757 double norm=gsl_blas_dnrm2(ugg); 00758 00759 if(this->verbose>0) { 00760 this->print_iter(nn,*((ovector *)ugx),it_min,xiter, 00761 norm,this->tolf,"gsl_mmin_conf"); 00762 } 00763 00764 if (norm<this->tolf) status=gsl_success; 00765 else status=gsl_continue; 00766 00767 } 00768 while (status == GSL_CONTINUE && xiter < this->ntrial); 00769 00770 for(size_t i=0;i<nn;i++) xx[i]=gsl_vector_get(ugx,i); 00771 fmin=it_min; 00772 00773 free(); 00774 00775 this->last_ntrial=xiter; 00776 00777 return status; 00778 } 00779 00780 /** \brief Calculate the minimum \c min of \c func w.r.t the 00781 array \c x of size \c nvar. 00782 */ 00783 virtual int mmin_de(size_t nn, vec_t &xx, double &fmin, param_t &pa, 00784 func_t &ufunc, dfunc_t &udfunc) { 00785 00786 int xiter=0, status; 00787 00788 allocate(nn); 00789 00790 set_de(xx,step_size,lmin_tol,ufunc,udfunc,pa); 00791 00792 do { 00793 xiter++; 00794 00795 status=iterate(); 00796 00797 if (status || it_info!=0) { 00798 break; 00799 } 00800 00801 // Equivalent to gsl_multimin_test_gradient with 00802 // additional code to print out present iteration 00803 00804 double norm=gsl_blas_dnrm2(ugg); 00805 00806 if(this->verbose>0) { 00807 this->print_iter(nn,*((ovector *)ugx),it_min,xiter, 00808 norm,this->tolf,"gsl_mmin_conf"); 00809 } 00810 00811 if (norm<this->tolf) status=gsl_success; 00812 else status=gsl_continue; 00813 00814 } 00815 while (status == GSL_CONTINUE && xiter < this->ntrial); 00816 00817 for(size_t i=0;i<nn;i++) xx[i]=gsl_vector_get(ugx,i); 00818 fmin=it_min; 00819 00820 free(); 00821 00822 this->last_ntrial=xiter; 00823 00824 return status; 00825 } 00826 00827 }; 00828 00829 /// An array version of gsl_mmin_conf 00830 template<class param_t, size_t nv> class gsl_mmin_conf_array : 00831 public gsl_mmin_conf<param_t,multi_vfunct<param_t,nv>, 00832 double[nv],double[nv],array_alloc<double[nv]>,grad_vfunct<param_t,nv>, 00833 gradient_array<param_t,multi_vfunct<param_t,nv>,nv>, 00834 simple_grad_array<param_t,multi_vfunct<param_t,nv>,nv> > { 00835 }; 00836 00837 00838 #ifndef DOXYGENP 00839 } 00840 #endif 00841 00842 #endif
Documentation generated with Doxygen and provided under the GNU Free Documentation License. See License Information for details.
Project hosting provided by
,
O2scl Sourceforge Project Page