00001 /* 00002 ------------------------------------------------------------------- 00003 00004 Copyright (C) 2006, 2007, 2008, Andrew W. Steiner 00005 00006 This file is part of O2scl. 00007 00008 O2scl is free software; you can redistribute it and/or modify 00009 it under the terms of the GNU General Public License as published by 00010 the Free Software Foundation; either version 3 of the License, or 00011 (at your option) any later version. 00012 00013 O2scl is distributed in the hope that it will be useful, 00014 but WITHOUT ANY WARRANTY; without even the implied warranty of 00015 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00016 GNU General Public License for more details. 00017 00018 You should have received a copy of the GNU General Public License 00019 along with O2scl. If not, see <http://www.gnu.org/licenses/>. 00020 00021 ------------------------------------------------------------------- 00022 */ 00023 #ifndef O2SCL_TENSOR_H 00024 #define O2SCL_TENSOR_H 00025 00026 /** \file tensor.h 00027 \brief File for definitions of tensors 00028 */ 00029 00030 #include <iostream> 00031 #include <cstdlib> 00032 #include <string> 00033 #include <fstream> 00034 #include <sstream> 00035 00036 #include <gsl/gsl_matrix.h> 00037 #include <gsl/gsl_ieee_utils.h> 00038 00039 #include <o2scl/err_hnd.h> 00040 #include <o2scl/uvector_tlate.h> 00041 #include <o2scl/umatrix_tlate.h> 00042 #include <o2scl/smart_interp.h> 00043 00044 #ifndef DOXYGENP 00045 namespace o2scl { 00046 #endif 00047 00048 /** 00049 \brief Tensor class with arbitrary dimensions 00050 00051 \todo More complete testing. 00052 \todo Add const get functions for const references 00053 00054 \future Could implement arithmetic operators + and - and some 00055 different products. 00056 \future Add slicing to get \ref ovector or \ref omatrix objects 00057 */ 00058 class tensor { 00059 00060 #ifndef DOXYGEN_INTERNAL 00061 00062 protected: 00063 00064 // The data 00065 double *data; 00066 00067 /// A rank-sized array of the sizes of each dimension 00068 size_t *size; 00069 00070 /// Rank 00071 size_t rk; 00072 00073 #endif 00074 00075 public: 00076 00077 /// Create an empty tensor with zero rank 00078 tensor() { 00079 rk=0; 00080 data=0; 00081 size=0; 00082 } 00083 00084 /** 00085 \brief Create a tensor of rank \c rank with sizes given in \c dim 00086 00087 The parameter \c dim must be a pointer to an array of sizes 00088 with length \c rank. If the user requests any of the sizes to 00089 be zero, this constructor will call the error handler, create 00090 an empty tensor, and will allocate no memory. 00091 */ 00092 tensor(size_t rank, size_t *dim) { 00093 for(size_t i=0;i<rk;i++) { 00094 if (dim[i]==0) { 00095 rk=0; 00096 set_err("Requested zero size in tensor::tensor(size_t, size_t *)", 00097 gsl_einval); 00098 } 00099 } 00100 rk=rank; 00101 size=new size_t[rk]; 00102 size_t tot=1; 00103 for(size_t i=0;i<rk;i++) { 00104 size[i]=dim[i]; 00105 tot*=size[i]; 00106 } 00107 data=new double[tot]; 00108 } 00109 00110 virtual ~tensor() { 00111 if (rk>0) { 00112 delete[] size; 00113 delete[] data; 00114 rk=0; 00115 } 00116 } 00117 00118 /// Set the element indexed by \c index to value \c val 00119 virtual int set(size_t *index, double val) { 00120 #if O2SCL_NO_RANGE_CHECK 00121 #else 00122 if (rk==0) { 00123 set_err("Empty tensor in set().",gsl_einval); 00124 return 0; 00125 } 00126 if (index[0]>=size[0]) { 00127 set_err("Index greater than size in set().",gsl_index); 00128 return 0; 00129 } 00130 #endif 00131 size_t ix=index[0]; 00132 for(size_t i=1;i<rk;i++) { 00133 #if O2SCL_NO_RANGE_CHECK 00134 #else 00135 if (index[i]>=size[i]) { 00136 set_err("Index greater than size in get().",gsl_index); 00137 return 0; 00138 } 00139 #endif 00140 ix*=size[i]; 00141 ix+=index[i]; 00142 } 00143 data[ix]=val; 00144 return 0; 00145 } 00146 00147 /// Get the element indexed by \c index 00148 virtual double get(size_t *index) { 00149 #if O2SCL_NO_RANGE_CHECK 00150 #else 00151 if (rk==0) { 00152 set_err("Empty tensor in get().",gsl_einval); 00153 return 0.0; 00154 } 00155 if (index[0]>=size[0]) { 00156 set_err("Index greater than size in get().",gsl_index); 00157 return 0.0; 00158 } 00159 #endif 00160 size_t ix=index[0]; 00161 for(size_t i=1;i<rk;i++) { 00162 #if O2SCL_NO_RANGE_CHECK 00163 #else 00164 if (index[i]>=size[i]) { 00165 set_err("Index greater than size in get().",gsl_index); 00166 return 0.0; 00167 } 00168 #endif 00169 ix*=size[i]; 00170 ix+=index[i]; 00171 } 00172 return data[ix]; 00173 } 00174 00175 /** 00176 \brief Fix all but one index to create a vector 00177 00178 This fixes all of the indices to the values given in \c index 00179 except for the index number \c ix, and returns the 00180 corresponding vector, whose length is equal to the size of the 00181 tensor in that index. The value <tt>index[ix]</tt> is ignored. 00182 00183 For example, for a rank 3 tensor allocated with 00184 \code 00185 tensor t; 00186 size_t dim[3]={3,4,5}; 00187 t.tensor_allocate(3,dim); 00188 \endcode 00189 the following code 00190 \code 00191 size_t index[3]={1,0,3}; 00192 ovector_view v=t.vector_slice(index,1); 00193 \endcode 00194 Gives a vector \c v of length 4 which refers to the values 00195 <tt>t(1,0,3)</tt>, <tt>t(1,1,3)</tt>, <tt>t(1,2,3)</tt>, and 00196 <tt>t(1,3,3)</tt>. 00197 */ 00198 ovector_view vector_slice(size_t ix, size_t *index) { 00199 size_t start=index[0]; 00200 if (ix==0) start=0; 00201 for(size_t i=1;i<rk;i++) { 00202 start*=size[i]; 00203 if (i!=ix) start+=index[i]; 00204 } 00205 size_t stride=1; 00206 for(size_t i=ix+1;i<rk;i++) stride*=size[i]; 00207 return ovector_array_stride(size[ix],data+start,stride); 00208 } 00209 00210 /** 00211 \brief Fix all but two indices to create a matrix 00212 00213 This fixes all of the indices to the values given in \c index 00214 except for the index number \c ix and the last index, and 00215 returns the corresponding matrix, whose size is equal to the 00216 size of the tensor in the two indices which are not fixed. 00217 */ 00218 omatrix_view matrix_slice(size_t *index, size_t ix) { 00219 size_t start=index[0], tot=size[0]; 00220 if (ix==0) start=0; 00221 for(size_t i=1;i<rk;i++) { 00222 tot*=size[i]; 00223 start*=size[i]; 00224 if (i!=ix) start+=index[i]; 00225 } 00226 size_t tda=1; 00227 if (ix==rk-1) { 00228 tda=0; 00229 } else { 00230 for(size_t i=ix+1;i<rk;i++) tda*=size[i]; 00231 } 00232 return omatrix_array(tot,data,start,tda,size[ix],size[rk-1]); 00233 } 00234 00235 /// Return the rank of the tensor 00236 virtual int get_rank() { return rk; } 00237 00238 /** 00239 \brief Allocate space for a 00240 tensor of rank \c rank with sizes given in \c dim 00241 00242 The parameter \c dim must be a pointer to an array of sizes 00243 with length \c rank. 00244 00245 If memory was previously allocated, it will be freed before 00246 the new allocation and previously specified grid data will 00247 be lost. 00248 00249 If the user requests any of the sizes to be zero, this 00250 function will call the error handler and will allocate no 00251 memory. If memory was previously allocated, the tensor is left 00252 unmodified and no deallocation is performed. 00253 */ 00254 virtual int tensor_allocate(size_t rank, size_t *dim) { 00255 for(size_t i=0;i<rk;i++) { 00256 if (dim[i]==0) { 00257 set_err_ret 00258 ("Requested zero size in tensor::tensor(size_t, size_t *)", 00259 gsl_einval); 00260 } 00261 } 00262 if (rk>0) tensor_free(); 00263 rk=rank; 00264 size=new size_t[rk]; 00265 size_t tot=1; 00266 for(size_t i=0;i<rk;i++) { 00267 size[i]=dim[i]; 00268 tot*=size[i]; 00269 } 00270 data=new double[tot]; 00271 return 0; 00272 } 00273 00274 /// Free allocated space (also sets rank to zero) 00275 virtual int tensor_free() { 00276 if (rk>0) { 00277 delete[] size; 00278 delete[] data; 00279 rk=0; 00280 } 00281 return 0; 00282 } 00283 00284 /// Returns the size of the ith index 00285 virtual size_t get_size(size_t i) { 00286 if (i<rk) { 00287 return size[i]; 00288 } 00289 set_err_ret("Requested index exceeding rank in get_size()", 00290 gsl_einval); 00291 }; 00292 00293 /// Returns the size of the tensor 00294 virtual size_t total_size() { 00295 if (rk==0) return 0; 00296 size_t tot=1; 00297 for(size_t i=0;i<rk;i++) tot*=size[i]; 00298 return tot; 00299 } 00300 00301 /// Pack the indices into a single array index 00302 size_t pack_indices(size_t *index) { 00303 if (rk==0) { 00304 set_err("Empty tensor in pack_indices().",gsl_einval); 00305 return 0; 00306 } 00307 if (index[0]>=size[0]) { 00308 set_err("Index greater than size in pack_indices().",gsl_index); 00309 return 0; 00310 } 00311 size_t ix=index[0]; 00312 for(size_t i=1;i<rk;i++) { 00313 if (index[i]>=size[i]) { 00314 set_err("Index greater than size in pack_indices().",gsl_index); 00315 return 0; 00316 } 00317 ix*=size[i]; 00318 ix+=index[i]; 00319 } 00320 return ix; 00321 } 00322 00323 /// Unpack the single array index into indices 00324 int unpack_indices(size_t ix, size_t *index) { 00325 if (ix>total_size()) { 00326 set_err("Index greater than total size in unpack_indices().", 00327 gsl_index); 00328 return 0; 00329 } 00330 size_t ix2, sub_size; 00331 for(size_t i=0;i<rk;i++) { 00332 if (i==rk-1) { 00333 index[i]=ix; 00334 } else { 00335 sub_size=1; 00336 for(size_t j=i+1;j<rk;j++) sub_size*=size[j]; 00337 index[i]=ix/sub_size; 00338 // (Remember we're doing integer arithmetic here.) 00339 ix-=sub_size*(ix/sub_size); 00340 } 00341 } 00342 return 0; 00343 } 00344 00345 }; 00346 00347 /** 00348 \brief Tensor class with arbitrary dimensions 00349 00350 This tensor class allows one to assign the indexes to 00351 numerical scales, so that n-dimensional interpolation can 00352 be performed. To set the grid, use set_grid() and then 00353 interpolation can be done using interpolate(). 00354 00355 \future Only allocate space for grid if it is set 00356 \future Could implement arithmetic operators + and - and some 00357 different products. 00358 */ 00359 template<template<class c_t> class base_interp_t=cspline_interp> 00360 class tensor_grid : public tensor { 00361 00362 #ifndef DOXYGEN_INTERNAL 00363 00364 protected: 00365 00366 /// A rank-sized set of arrays for the grid points 00367 double **grd; 00368 00369 /// If true, the grid has been set by the user 00370 bool grid_set; 00371 00372 #endif 00373 00374 public: 00375 00376 /// Create an empty tensor with zero rank 00377 tensor_grid() : tensor() { 00378 grd=0; 00379 grid_set=false; 00380 } 00381 00382 /** 00383 \brief Create a tensor of rank \c rank with sizes given in \c dim 00384 00385 The parameter \c dim must be a pointer to an array of sizes 00386 with length \c rank. If the user requests any of the sizes to 00387 be zero, this constructor will call the error handler, create 00388 an empty tensor, and will allocate no memory. 00389 */ 00390 tensor_grid(size_t rank, size_t *dim) : tensor(rank,dim) { 00391 grid_set=false; 00392 for(size_t i=0;i<rk;i++) { 00393 if (dim[i]==0) { 00394 rk=0; 00395 set_err("Requested zero size in tensor::tensor(size_t, size_t *)", 00396 gsl_einval); 00397 } 00398 } 00399 grd=new double *[rk]; 00400 for(size_t i=0;i<rk;i++) { 00401 grd[i]=new double[size[i]]; 00402 } 00403 } 00404 00405 virtual ~tensor_grid() { 00406 if (rk>0) { 00407 for(size_t i=0;i<rk;i++) delete[] grd[i]; 00408 delete[] size; 00409 delete[] data; 00410 rk=0; 00411 } 00412 } 00413 00414 /// Set the element closest to grid point \c grdp to value \c val 00415 virtual int set_vals(double *grdp, double val) { 00416 00417 // Find indices 00418 size_t *index=new size_t[rk]; 00419 for(size_t i=0;i<rk;i++) index[i]=lookup_grid(i,grdp[i]); 00420 00421 // Pack 00422 size_t ix=index[0]; 00423 for(size_t i=1;i<rk;i++) { 00424 ix*=size[i]; 00425 ix+=index[i]; 00426 } 00427 00428 // Delete memory for indices 00429 delete[] index; 00430 00431 // Set value 00432 data[ix]=val; 00433 00434 return 0; 00435 } 00436 00437 /// Set the element closest to grid point \c grdp to value \c val 00438 virtual int set_vals(double *grdp, double val, double *closest) { 00439 00440 // Find indices 00441 size_t *index=new size_t[rk]; 00442 for(size_t i=0;i<rk;i++) { 00443 index[i]=lookup_grid_val(i,grdp[i],closest[i]); 00444 } 00445 00446 // Pack 00447 size_t ix=index[0]; 00448 for(size_t i=1;i<rk;i++) { 00449 ix*=size[i]; 00450 ix+=index[i]; 00451 } 00452 00453 // Delete memory for indices 00454 delete[] index; 00455 00456 // Set value 00457 data[ix]=val; 00458 00459 return 0; 00460 } 00461 00462 /// Get the element closest to grid point \c grdp to value \c val 00463 virtual double get_vals(double *grdp, double val) { 00464 00465 // Find indices 00466 size_t *index=new size_t[rk]; 00467 for(size_t i=0;i<rk;i++) index[i]=lookup_grid(i,grdp[i]); 00468 00469 // Pack 00470 size_t ix=index[0]; 00471 for(size_t i=1;i<rk;i++) { 00472 ix*=size[i]; 00473 ix+=index[i]; 00474 } 00475 00476 // Delete memory for indices 00477 delete[] index; 00478 00479 // Set value 00480 return data[ix]; 00481 } 00482 00483 /// Get the element closest to grid point \c grdp to value \c val 00484 virtual double get_vals(double *grdp, double val, double *closest) { 00485 00486 // Find indices 00487 size_t *index=new size_t[rk]; 00488 for(size_t i=0;i<rk;i++) { 00489 index[i]=lookup_grid_val(i,grdp[i],closest[i]); 00490 } 00491 00492 // Pack 00493 size_t ix=index[0]; 00494 for(size_t i=1;i<rk;i++) { 00495 ix*=size[i]; 00496 ix+=index[i]; 00497 } 00498 00499 // Delete memory for indices 00500 delete[] index; 00501 00502 // Set value 00503 return data[ix]; 00504 } 00505 00506 /** 00507 \brief Set the grid 00508 00509 The parameter \c grid must define the grid, so that \c 00510 val[i][j] is the jth grid point for the ith index. The size 00511 of array \c grid[i] should be given by \c dim[i] where \c dim 00512 was the argument given in the constructor or to the function 00513 \ref tensor_allocate(). 00514 */ 00515 virtual int set_grid(double **val) { 00516 for(size_t i=0;i<rk;i++) { 00517 for(size_t j=0;j<size[i];j++) { 00518 grd[i][j]=val[i][j]; 00519 } 00520 } 00521 grid_set=true; 00522 return 0; 00523 } 00524 00525 /** 00526 \brief Allocate space for a tensor of rank \c rank with sizes 00527 given in \c dim 00528 00529 The parameter \c dim must be a pointer to an array of sizes 00530 with length \c rank. 00531 00532 If memory was previously allocated, it will be freed before 00533 the new allocation and previously specified grid data will 00534 be lost. 00535 00536 If the user requests any of the sizes to be zero, this 00537 function will call the error handler and will allocate no 00538 memory. If memory was previously allocated, the tensor is left 00539 unmodified and no deallocation is performed. 00540 */ 00541 virtual int tensor_allocate(size_t rank, size_t *dim) { 00542 for(size_t i=0;i<rk;i++) { 00543 if (dim[i]==0) { 00544 set_err_ret 00545 ("Requested zero size in tensor::tensor(size_t, size_t *)", 00546 gsl_einval); 00547 } 00548 } 00549 if (rk>0) tensor_free(); 00550 rk=rank; 00551 size=new size_t[rk]; 00552 grd=new double *[rk]; 00553 size_t tot=1; 00554 for(size_t i=0;i<rk;i++) { 00555 size[i]=dim[i]; 00556 grd[i]=new double[size[i]]; 00557 tot*=size[i]; 00558 } 00559 data=new double[tot]; 00560 return 0; 00561 } 00562 00563 /// Free allocated space (also sets rank to zero) 00564 virtual int tensor_free() { 00565 if (rk>0) { 00566 for(size_t i=0;i<rk;i++) delete[] grd[i]; 00567 delete[] size; 00568 delete[] grd; 00569 delete[] data; 00570 rk=0; 00571 } 00572 return 0; 00573 } 00574 00575 /// Lookup index for grid closest to \c val 00576 virtual size_t lookup_grid(size_t i, double val) { 00577 if (i<rk && grid_set) { 00578 size_t best=0; 00579 double min=fabs(grd[i][0]-val); 00580 for(size_t j=0;j<size[i];j++) { 00581 if (fabs(grd[i][j]-val)<min) { 00582 best=j; 00583 min=fabs(grd[i][j]-val); 00584 } 00585 } 00586 return best; 00587 } 00588 return 0; 00589 } 00590 00591 /// Lookup index for grid closest to \c val 00592 virtual double get_grid(size_t i, size_t j) { 00593 if (i<rk && grid_set) { 00594 return grd[i][j]; 00595 } 00596 return 0; 00597 } 00598 00599 /// Lookup indices for grid closest to \c val 00600 virtual int lookup_grid(double* vals, size_t *indices) { 00601 for(size_t k=0;k<rk;k++) { 00602 indices[k]=0; 00603 double min=fabs(grd[k][0]-vals[k]); 00604 for(size_t j=0;j<size[k];j++) { 00605 if (fabs(grd[k][j]-vals[k])<min) { 00606 indices[k]=j; 00607 min=fabs(grd[k][j]-vals[k]); 00608 } 00609 } 00610 } 00611 return 0; 00612 } 00613 00614 /// Lookup index for grid closest to \c val, returning the grid point 00615 virtual size_t lookup_grid_val(size_t i, double val, double &val2) { 00616 if (i<rk && grid_set) { 00617 size_t best=0; 00618 double min=fabs(grd[i][0]-val); 00619 val2=grd[i][0]; 00620 for(size_t j=0;j<size[i];j++) { 00621 if (fabs(grd[i][j]-val)<min) { 00622 best=j; 00623 min=fabs(grd[i][j]-val); 00624 val2=grd[i][j]; 00625 } 00626 } 00627 return best; 00628 } 00629 return 0; 00630 } 00631 00632 /** 00633 \brief Interpolate values \c vals into the tensor, 00634 returning the result 00635 00636 This is a quick and dirty implementation of n-dimensional 00637 interpolation by recursive application of the 1-dimensional 00638 routine from \ref smart_interp_vec, using the base 00639 interpolation object specified in the template parameter \c 00640 base_interp_t. This will be slow for sufficiently large data 00641 sets. 00642 00643 \future It should be straightforward to improve the scaling 00644 of this algorithm significantly by creating a "window" of 00645 local points around the point of interest. This could 00646 be done easily by constructing an initial subtensor. 00647 */ 00648 virtual double interpolate(double *vals) { 00649 typedef smart_interp_vec<double *,array_const_subvector, 00650 double *, pointer_alloc<double> > interp_t; 00651 00652 if (rk==1) { 00653 00654 sma_interp_vec<double *> si(size[0],grd[0],data); 00655 return si.interp(vals[0]); 00656 00657 } else { 00658 00659 // Get total number of interpolations at this level 00660 size_t ss=1; 00661 for(size_t i=1;i<rk;i++) ss*=size[i]; 00662 00663 // Create space for y vectors and interpolators 00664 double **yvec=new double *[ss]; 00665 interp_t **si=new interp_t *[ss]; 00666 base_interp_t<double *> **it1=new base_interp_t<double *> *[ss]; 00667 base_interp_t<array_const_subvector> **it2= 00668 new base_interp_t<array_const_subvector> *[ss]; 00669 for(size_t i=0;i<ss;i++) yvec[i]=new double[size[0]]; 00670 00671 // Create space for interpolation results 00672 tensor_grid tdat; 00673 tdat.tensor_allocate(rk-1,size+1); 00674 00675 // Set grid for temporary tensor 00676 tdat.set_grid(grd+1); 00677 00678 // Create starting coordinate and counter 00679 size_t *co=new size_t[rk]; 00680 for(size_t i=0;i<rk;i++) co[i]=0; 00681 size_t cnt=0; 00682 00683 // Loop over every interpolation 00684 bool done=false; 00685 while(done==false) { 00686 00687 // Fill yvector with the appropriate data 00688 for(size_t i=0;i<size[0];i++) { 00689 co[0]=i; 00690 yvec[cnt][i]=get(co); 00691 } 00692 00693 it1[cnt]=new base_interp_t<double *>; 00694 it2[cnt]=new base_interp_t<array_const_subvector>; 00695 si[cnt]=new interp_t(*it1[cnt],*it2[cnt],size[0],grd[0],yvec[cnt]); 00696 00697 tdat.set(co+1,si[cnt]->interp(vals[0])); 00698 00699 // Go to next interpolation 00700 cnt++; 00701 co[rk-1]++; 00702 // carry if necessary 00703 for(int j=rk-1;j>0;j--) { 00704 if (co[j]>=size[j]) { 00705 co[j]=0; 00706 co[j-1]++; 00707 } 00708 } 00709 00710 // Test if done 00711 if (cnt==ss) done=true; 00712 00713 // End of while loop 00714 } 00715 00716 // Now call the next level of interpolation 00717 double res=tdat.interpolate(vals+1); 00718 00719 tdat.tensor_free(); 00720 for(size_t i=0;i<ss;i++) { 00721 delete[] yvec[i]; 00722 delete si[i]; 00723 delete it1[i]; 00724 delete it2[i]; 00725 } 00726 delete[] co; 00727 delete[] si; 00728 delete[] it1; 00729 delete[] it2; 00730 delete[] yvec; 00731 00732 return res; 00733 } 00734 } 00735 }; 00736 00737 /** 00738 \brief Rank 1 tensor 00739 */ 00740 class tensor1 : public tensor { 00741 public: 00742 00743 /// Create an empty tensor 00744 tensor1() : tensor() {} 00745 00746 /// Create a rank 1 tensory of size \c sz 00747 tensor1(size_t sz) : tensor(1,&sz) {} 00748 00749 /// Get the element indexed by \c index 00750 virtual double get(size_t *index) { 00751 return tensor::get(index); 00752 } 00753 00754 /// Set the element indexed by \c index to value \c val 00755 virtual int set(size_t *index, double val) 00756 { return tensor::set(index,val); } 00757 00758 /// Get the element indexed by \c ix 00759 virtual double get(size_t ix) { return tensor::get(&ix); } 00760 00761 /// Set the element indexed by \c index to value \c val 00762 virtual int set(size_t index, double val) 00763 { return tensor::set(&index,val); } 00764 00765 /// Get an element using array-like indexing 00766 virtual double &operator[](size_t ix) { return this->data[ix]; } 00767 00768 /// Get an element using operator() 00769 virtual double &operator()(size_t ix) { return this->data[ix]; } 00770 }; 00771 00772 /** 00773 \brief Rank 2 tensor 00774 */ 00775 class tensor2 : public tensor { 00776 public: 00777 00778 /// Create an empty tensor 00779 tensor2() : tensor() {} 00780 00781 /// Create a rank 2 tensor of size \c (sz,sz2) 00782 tensor2(size_t sz, size_t sz2) : tensor() { 00783 this->rk=2; 00784 this->size=new size_t[2]; 00785 this->size[0]=sz; 00786 this->size[1]=sz2; 00787 size_t tot=sz*sz2; 00788 this->data=new double[tot]; 00789 } 00790 00791 /// Get the element indexed by \c index 00792 virtual double get(size_t *index) { 00793 return tensor::get(index); 00794 } 00795 00796 /// Set the element indexed by \c index to value \c val 00797 virtual int set(size_t *index, double val) 00798 { return tensor::set(index,val); } 00799 00800 00801 /// Get the element indexed by \c (ix1,ix2) 00802 virtual double get(size_t ix1, size_t ix2) { 00803 size_t sz[2]={ix1,ix2}; 00804 return tensor::get(sz); 00805 } 00806 00807 /// Set the element indexed by \c (ix1,ix2) to value \c val 00808 virtual int set(size_t ix1, size_t ix2, double val) { 00809 size_t sz[2]={ix1,ix2}; 00810 return tensor::set(sz,val); 00811 } 00812 00813 /// Get the element indexed by \c (ix1,ix2) 00814 virtual double &operator()(size_t ix, size_t iy) 00815 { return this->data[ix*this->size[1]+iy]; } 00816 }; 00817 00818 /** 00819 \brief Rank 3 tensor 00820 */ 00821 class tensor3 : public tensor { 00822 public: 00823 00824 /// Create an empty tensor 00825 tensor3() : tensor() {} 00826 00827 /// Create a rank 3 tensor of size \c (sz,sz2,sz3) 00828 tensor3(size_t sz, size_t sz2, size_t sz3) : tensor() { 00829 this->rk=3; 00830 this->size=new size_t[3]; 00831 this->size[0]=sz; 00832 this->size[1]=sz2; 00833 this->size[2]=sz3; 00834 size_t tot=sz*sz2*sz3; 00835 this->data=new double[tot]; 00836 } 00837 00838 /// Get the element indexed by \c index 00839 virtual double get(size_t *index) { 00840 return tensor::get(index); 00841 } 00842 00843 /// Set the element indexed by \c index to value \c val 00844 virtual int set(size_t *index, double val) 00845 { return tensor::set(index,val); } 00846 00847 /// Get the element indexed by \c (ix1,ix2,ix3) 00848 virtual double get(size_t ix1, size_t ix2, size_t ix3) { 00849 size_t sz[3]={ix1,ix2,ix3}; 00850 return tensor::get(sz); 00851 } 00852 00853 /// Set the element indexed by \c (ix1,ix2,ix3) to value \c val 00854 virtual int set(size_t ix1, size_t ix2, size_t ix3, double val) { 00855 size_t sz[3]={ix1,ix2, ix3}; 00856 return tensor::set(sz,val); 00857 } 00858 }; 00859 00860 /** 00861 \brief Rank 3 tensor with a grid 00862 */ 00863 template<template<class c_t> class interp_t=cspline_interp> 00864 class tensor_grid3 : public tensor_grid<interp_t> { 00865 00866 public: 00867 00868 /// Create an empty tensor 00869 tensor_grid3() : tensor_grid<interp_t>() {} 00870 00871 /// Create a rank 3 tensor of size \c (sz,sz2,sz3) 00872 tensor_grid3(size_t sz, size_t sz2, size_t sz3) : tensor_grid<interp_t>() { 00873 this->rk=3; 00874 this->size=new size_t[3]; 00875 this->grd=new double *[3]; 00876 this->size[0]=sz; 00877 this->size[1]=sz2; 00878 this->size[2]=sz3; 00879 this->grd[0]=new double[sz]; 00880 this->grd[1]=new double[sz2]; 00881 this->grd[2]=new double[sz3]; 00882 size_t tot=sz*sz2*sz3; 00883 this->data=new double[tot]; 00884 this->grid_set=false; 00885 } 00886 00887 virtual ~tensor_grid3() { 00888 if (this->rk>0) { 00889 for(size_t i=0;i<this->rk;i++) { 00890 delete[] this->grd[i]; 00891 } 00892 delete[] this->size; 00893 delete[] this->grd; 00894 delete[] this->data; 00895 this->rk=0; 00896 } 00897 } 00898 00899 /// Get the element indexed by \c index 00900 virtual double get(size_t *index) { 00901 return tensor_grid<interp_t>::get(index); 00902 } 00903 00904 /// Set the element indexed by \c index to value \c val 00905 virtual int set(size_t *index, double val) 00906 { return tensor_grid<interp_t>::set(index,val); } 00907 00908 /// Get the element indexed by \c (ix1,ix2,ix3) 00909 virtual double get(size_t ix1, size_t ix2, size_t ix3) { 00910 size_t sz[3]={ix1,ix2,ix3}; 00911 return tensor_grid<interp_t>::get(sz); 00912 } 00913 00914 /// Set the element indexed by \c (ix1,ix2,ix3) to value \c val 00915 virtual int set(size_t ix1, size_t ix2, size_t ix3, double val) { 00916 size_t sz[3]={ix1,ix2, ix3}; 00917 return tensor_grid<interp_t>::set(sz,val); 00918 } 00919 }; 00920 00921 /** 00922 \brief Rank 4 tensor 00923 */ 00924 class tensor4 : public tensor { 00925 00926 public: 00927 00928 /// Create an empty tensor 00929 tensor4() : tensor() {} 00930 00931 /// Create a rank 4 tensor of size \c (sz,sz2,sz3,sz4) 00932 tensor4(size_t sz, size_t sz2, size_t sz3, size_t sz4) : 00933 tensor() { 00934 this->rk=4; 00935 this->size=new size_t[4]; 00936 this->size[0]=sz; 00937 this->size[1]=sz2; 00938 this->size[2]=sz3; 00939 this->size[3]=sz4; 00940 size_t tot=sz*sz2*sz3*sz4; 00941 this->data=new double[tot]; 00942 } 00943 00944 /// Get the element indexed by \c index 00945 virtual double get(size_t *index) { 00946 return tensor::get(index); 00947 } 00948 00949 /// Set the element indexed by \c index to value \c val 00950 virtual int set(size_t *index, double val) 00951 { return tensor::set(index,val); } 00952 00953 /// Get the element indexed by \c (ix1,ix2,ix3,ix4) 00954 virtual double get(size_t ix1, size_t ix2, size_t ix3, size_t ix4) { 00955 size_t sz[4]={ix1,ix2,ix3,ix4}; 00956 return tensor::get(sz); 00957 } 00958 00959 /// Set the element indexed by \c (ix1,ix2,ix3,ix4) to value \c val 00960 virtual int set(size_t ix1, size_t ix2, size_t ix3, size_t ix4, 00961 double val) { 00962 size_t sz[4]={ix1,ix2,ix3,ix4}; 00963 return tensor::set(sz,val); 00964 } 00965 }; 00966 00967 #ifndef DOXYGENP 00968 } 00969 #endif 00970 00971 #endif 00972 00973 00974
Documentation generated with Doxygen and provided under the GNU Free Documentation License. See License Information for details.
Project hosting provided by
,
O2scl Sourceforge Project Page