![]() |
Object-oriented Scientific Computing Library: Version 0.910
|
00001 /* 00002 ------------------------------------------------------------------- 00003 00004 Copyright (C) 2006-2012, Andrew W. Steiner 00005 00006 This file is part of O2scl. 00007 00008 O2scl is free software; you can redistribute it and/or modify 00009 it under the terms of the GNU General Public License as published by 00010 the Free Software Foundation; either version 3 of the License, or 00011 (at your option) any later version. 00012 00013 O2scl is distributed in the hope that it will be useful, 00014 but WITHOUT ANY WARRANTY; without even the implied warranty of 00015 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00016 GNU General Public License for more details. 00017 00018 You should have received a copy of the GNU General Public License 00019 along with O2scl. If not, see <http://www.gnu.org/licenses/>. 00020 00021 ------------------------------------------------------------------- 00022 */ 00023 /*--------------------------------------------------------------------------* 00024 * Open Optimization Library - Projected Gradient Method 00025 * 00026 * pgrad/pgrad.c 00027 * 00028 * This program is free software; you can redistribute it and/or modify 00029 * it under the terms of the GNU General Public License as published by 00030 * the Free Software Foundation; either version 2 of the License, or (at 00031 * your option) any later version. 00032 * 00033 * This program is distributed in the hope that it will be useful, but 00034 * WITHOUT ANY WARRANTY; without even the implied warranty of 00035 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00036 * General Public License for more details. 00037 * 00038 * You should have received a copy of the GNU General Public License 00039 * along with this program; if not, write to the Free Software 00040 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 00041 * 00042 * Iara da Cunha 00043 * since: June, 29, 2004 00044 * 00045 * $Id: pgrad.c,v 1.14 2005/05/19 19:37:07 biloti Exp $ 00046 *--------------------------------------------------------------------------*/ 00047 #ifndef O2SCL_OOL_MMIN_PGRAD_H 00048 #define O2SCL_OOL_MMIN_PGRAD_H 00049 00050 #include <gsl/gsl_math.h> 00051 00052 #include <o2scl/ovector_tlate.h> 00053 #include <o2scl/multi_funct.h> 00054 #include <o2scl/ool_constr_mmin.h> 00055 00056 #ifndef DOXYGENP 00057 namespace o2scl { 00058 #endif 00059 00060 /** \brief Constrained minimization by the projected gradient method (OOL) 00061 00062 This is the simple extension of the steepest descent algorithm 00063 to constrained minimization. Each step is a line search is 00064 performed along the projected gradient direction subject to 00065 the specified constraints. 00066 00067 This algorithm is likely not ideal for most problems and is 00068 provided mostly for demonstration and educational purposes. 00069 Based on implementation of \ref Kelley99 in OOL. 00070 00071 Default template arguments 00072 - \c func_t - \ref multi_funct<> 00073 - \c dfunc_t - \ref grad_funct<> 00074 - \c vec_t - \ref ovector_base 00075 - \c alloc_vec_t - \ref ovector 00076 - \c alloc_t - \ref ovector_alloc 00077 00078 \future Replace the explicit norm computation below with 00079 the more accurate dnrm2 from linalg 00080 \future Replace the generic variable 'tol' with 'tolf' 00081 or 'tolx' from \ref multi_min. 00082 */ 00083 template<class func_t=multi_funct<>, class dfunc_t = grad_funct<>, 00084 class vec_t=ovector_base, class alloc_vec_t=ovector, 00085 class alloc_t=ovector_alloc> class ool_mmin_pgrad : 00086 public ool_constr_mmin<func_t,dfunc_t,ool_hfunct<int>,vec_t, 00087 alloc_vec_t,alloc_t> { 00088 00089 #ifndef DOXYGEN_INTERNAL 00090 00091 protected: 00092 00093 /// A convenient typedef for the unused Hessian product type 00094 typedef ool_hfunct<int> hfunc_t; 00095 00096 /// Temporary vector 00097 alloc_vec_t xx; 00098 00099 /// Project into feasible region 00100 int proj(vec_t &xt) { 00101 size_t ii, n=this->dim; 00102 00103 for(ii=0;ii<n;ii++) { 00104 xt[ii]=GSL_MAX(this->L[ii],GSL_MIN(xt[ii],this->U[ii])); 00105 } 00106 return 0; 00107 } 00108 00109 /// Line search 00110 int line_search() { 00111 00112 double t, tnew, fx, fxx, dif2, gtd; 00113 00114 fx=this->f; 00115 00116 tnew=1; 00117 00118 do { 00119 00120 t = tnew; 00121 /* xx = x - t df */ 00122 o2scl::vector_copy(this->dim,this->x,xx); 00123 for(size_t i=0;i<this->dim;i++) xx[i]-=t*this->gradient[i]; 00124 proj(xx); 00125 00126 fxx=(*this->func)(this->dim,xx); 00127 00128 o2scl::vector_copy(this->dim,xx,this->dx); 00129 for(size_t i=0;i<this->dim;i++) this->dx[i]-=this->x[i]; 00130 00131 dif2=0.0; 00132 for(size_t i=0;i<this->dim;i++) dif2+=this->dx[i]*this->dx[i]; 00133 00134 gtd=0.0; 00135 for(size_t i=0;i<this->dim;i++) { 00136 gtd+=this->gradient[i]*this->dx[i]; 00137 } 00138 00139 tnew = - t * t * gtd / (2*(fxx - this->f - gtd)); 00140 00141 double arg1=sigma1*t; 00142 double arg2=GSL_MIN(sigma2*t,tnew); 00143 tnew=GSL_MAX(arg1,arg2); 00144 00145 // sufficient decrease condition (Armijo) 00146 } while(fxx > fx - (alpha/t) * dif2 ); 00147 00148 o2scl::vector_copy(this->dim,xx,this->x); 00149 this->f = fxx; 00150 00151 (*this->dfunc)(this->dim,this->x,this->gradient); 00152 00153 return 0; 00154 } 00155 00156 #endif 00157 00158 public: 00159 00160 ool_mmin_pgrad() { 00161 fmin=-1.0e99; 00162 tol=1.0e-4; 00163 alpha=1.0e-4; 00164 sigma1=0.1; 00165 sigma2=0.9; 00166 this->ntrial=1000; 00167 } 00168 00169 /** \brief Minimum function value (default \f$ 10^{-99} \f$) 00170 00171 If the function value is below this value, then the algorithm 00172 assumes that the function is not bounded and exits. 00173 */ 00174 double fmin; 00175 00176 /// Tolerance on infinite norm 00177 double tol; 00178 00179 /** \brief Constant for the sufficient decrease condition 00180 (default \f$ 10^{-4} \f$) 00181 */ 00182 double alpha; 00183 00184 /// Lower bound to the step length reduction 00185 double sigma1; 00186 00187 /// Upper bound to the step length reduction 00188 double sigma2; 00189 00190 /// Allocate memory 00191 virtual int allocate(const size_t n) { 00192 if (this->dim>0) free(); 00193 this->ao.allocate(xx,n); 00194 return ool_constr_mmin<func_t,dfunc_t,hfunc_t,vec_t,alloc_vec_t, 00195 alloc_t>::allocate(n); 00196 } 00197 00198 /// Free previously allocated memory 00199 virtual int free() { 00200 if (this->dim>0) this->ao.free(xx); 00201 return ool_constr_mmin<func_t,dfunc_t,hfunc_t,vec_t,alloc_vec_t, 00202 alloc_t>::free(); 00203 } 00204 00205 /// Set the function, the initial guess, and the parameters 00206 virtual int set(func_t &fn, dfunc_t &dfn, vec_t &init) { 00207 00208 int ret=ool_constr_mmin<func_t,dfunc_t,hfunc_t,vec_t, 00209 alloc_vec_t,alloc_t>::set(fn,dfn,init); 00210 00211 // Turn initial guess into a feasible point 00212 proj(this->x); 00213 00214 // Evaluate function and gradient 00215 this->f=(*this->func)(this->dim,this->x); 00216 (*this->dfunc)(this->dim,this->x,this->gradient); 00217 00218 return 0; 00219 } 00220 00221 /// Restart the minimizer 00222 virtual int restart() { 00223 // Turn x into a feasible point 00224 proj(this->x); 00225 00226 // Evaluate function and gradient 00227 this->f=(*this->func)(this->dim,this->x); 00228 (*this->dfunc)(this->dim,this->x,this->gradient); 00229 00230 return 0; 00231 } 00232 00233 /// Perform an iteration 00234 virtual int iterate() { 00235 double t; 00236 00237 line_search(); 00238 00239 /* Infinite norm of g1 = d <- [P(x - g) - x] */ 00240 o2scl::vector_copy(this->dim,this->x,xx); 00241 for(size_t i=0;i<this->dim;i++) { 00242 xx[i]-=this->gradient[i]; 00243 } 00244 proj(xx); 00245 for(size_t i=0;i<this->dim;i++) xx[i]-=this->x[i]; 00246 00247 double maxval=fabs(xx[0]); 00248 for(size_t i=1;i<this->dim;i++) { 00249 if (fabs(xx[i])>maxval) { 00250 maxval=fabs(xx[i]); 00251 } 00252 } 00253 this->size=fabs(maxval); 00254 00255 return gsl_success; 00256 } 00257 00258 /// See if we're finished 00259 virtual int is_optimal() { 00260 if (this->size>tol && this->f>fmin) { 00261 return gsl_continue; 00262 } 00263 return gsl_success; 00264 } 00265 00266 /// Return string denoting type ("ool_mmin_pgrad") 00267 const char *type() { return "ool_mmin_pgrad"; } 00268 00269 }; 00270 00271 #ifndef DOXYGENP 00272 } 00273 #endif 00274 00275 #endif 00276
Documentation generated with Doxygen. Provided under the GNU Free Documentation License (see License Information).