MachineIntelligenceCore:Algorithms
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
Tensor.hpp
Go to the documentation of this file.
1 
26 #ifndef SRC_TYPES_TENSOR_HPP_
27 #define SRC_TYPES_TENSOR_HPP_
28 
29 #include <stdio.h>
30 #include <vector>
31 #include <random>
32 #include <memory> // std::shared_ptr
33 #include <cstring> // memcpy
34 
35 #include <boost/serialization/serialization.hpp>
36 // include this header to serialize vectors
37 #include <boost/serialization/vector.hpp>
38 // include this header to serialize arrays
39 #include <boost/serialization/array.hpp>
40 #include <boost/serialization/version.hpp>
41 
42 // Forward declaration of class boost::serialization::access
43 namespace boost {
44 namespace serialization {
45 class access;
46 }//: serialization
47 }//: access
48 
49 namespace mic {
50 namespace types {
51 
52 // Forward declaration of a class Matrix.
53 template<typename T>
54 class Matrix;
55 
62 template<class T>
63 class Tensor {
64 public:
65 
69  Tensor() : elements(0), data_ptr(nullptr) {
70 
71  }
72 
77  Tensor(std::initializer_list<size_t> dims_) {
78  // Set dimensions.
79  elements = 1;
80  for (auto ith_dimension : dims_) {
81  // Every dimension must be greater than 0!
82  assert(ith_dimension > 0);
83  // Add dimension.
84  dimensions.push_back(ith_dimension);
85  elements *= ith_dimension;
86  }//: for
87 
88  // Allocate memory.
89  data_ptr = new T[elements];
90  // Check whether data was allocated. :]
91  assert(data_ptr != nullptr);
92 
93  // Initialize: set all elements to zeros.
94  zeros();
95  }
96 
101  Tensor(std::vector<size_t> dims_) {
102  // Set dimensions.
103  elements = 1;
104  for (auto ith_dimension : dims_) {
105  // Every dimension must be greater than 0!
106  assert(ith_dimension > 0);
107  // Add dimension.
108  dimensions.push_back(ith_dimension);
109  elements *= ith_dimension;
110  }//: for
111 
112  // Allocate memory.
113  data_ptr = new T[elements];
114  // Check whether data was allocated. :]
115  assert(data_ptr != nullptr);
116 
117  // Initialize: set all elements to zeros.
118  zeros();
119  }
120 
125  Tensor(const Tensor<T>& t) {
126  // Copy dimensions.
127  elements = t.elements;
128  dimensions.reserve(t.dimensions.size());
129  std::copy(t.dimensions.begin(), t.dimensions.end(),
130  std::back_inserter(dimensions));
131 
132  // Allocate memory.
133  data_ptr = new T[t.elements];
134  // Copy data.
135  memcpy(data_ptr, t.data_ptr, sizeof(T) * elements);
136  }
137 
143  // Copy dimensions.
144  elements = mat_.cols() * mat_.rows();
145  dimensions.push_back(mat_.cols());
146  dimensions.push_back(mat_.rows());
147 
148  // Allocate memory.
149  data_ptr = new T[elements];
150  // Copy data.
151  memcpy(data_ptr, mat_.data(), sizeof(T) * elements);
152  }
153 
158  const Tensor<T>& operator=(const Tensor<T>& t) {
159  // Check the dimensions.
160  if (elements != t.elements) {
161  elements = t.elements;
162  // Allocate memory.
163  if (data_ptr != nullptr)
164  delete[] data_ptr;
165  data_ptr = new T[t.elements];
166  }//: if
167 
168  // Copy dimensions.
169  dimensions.clear();
170  dimensions.reserve(t.dimensions.size());
171  std::copy(t.dimensions.begin(), t.dimensions.end(),
172  std::back_inserter(dimensions));
173 
174  // Copy data.
175  memcpy(data_ptr, t.data_ptr, sizeof(T) * elements);
176  return *this;
177  }
178 
183  // Free memory.
184  if (data_ptr)
185  delete[] data_ptr;
186  }
190  void flatten() {
191  dimensions.clear();
192  dimensions.push_back(elements);
193  }
194 
200  void conservativeResize(std::vector<size_t> dims_) {
201  // Check whether new dimensions are ok.
202  size_t new_size = 1;
203  for (auto ith_dimension : dims_) {
204  // Every dimension must be greater than 0!
205  assert(ith_dimension > 0);
206  new_size *= ith_dimension;
207  }
208  assert(new_size == elements);
209 
210  // Set new dimensions.
211  dimensions.clear();
212  dimensions.reserve(dims_.size());
213  std::copy(dims_.begin(), dims_.end(), std::back_inserter(dimensions));
214  }
215 
224  void resize(std::vector<size_t> dims_) {
225  // Check whether new dimensions are ok.
226  size_t new_size = 1;
227  for (auto ith_dimension : dims_) {
228  // Every dimension must be greater than 0!
229  assert(ith_dimension > 0);
230  new_size *= ith_dimension;
231  }
232 
233  // Set new dimensions.
234  dimensions.clear();
235  dimensions.reserve(dims_.size());
236  std::copy(dims_.begin(), dims_.end(), std::back_inserter(dimensions));
237 
238  // Copy data.
239  if (new_size != elements) {
240  T* old_prt = data_ptr;
241  // Allocate memory.
242  data_ptr = new T[new_size];
243  // Estimate the size of block that must be copied.
244  size_t block_size = (new_size < elements) ? new_size : elements;
245  // Change number of elements.
246  elements = new_size;
247  // Zero elements.
248  zeros();
249  // Copy data.
250  memcpy(data_ptr, old_prt, sizeof(T) * block_size);
251  // Free the old block.
252  delete[] old_prt;
253  } //: if
254  //: else: do nothing;)
255  }
256 
261  void elementwiseFunction(T (*func)(T)) {
262 #pragma omp parallel for
263  for (size_t i = 0; i < elements; i++) {
264  data_ptr[i] = (*func)(data_ptr[i]);
265  } //: for
266  }
267 
273  void elementwiseFunctionScalar(T (*func)(T, T), T scalar) {
274 #pragma omp parallel for
275  for (size_t i = 0; i < elements; i++) {
276  data_ptr[i] = (*func)(data_ptr[i], scalar);
277  } //: for
278  }
279 
280 
286  void normRandReal(float mean = 0, float stddev = 1) {
287  // Initialize random number generator with normal distribution.
288  std::random_device rd;
289  std::mt19937 mt(rd());
290  std::normal_distribution<> dist(mean, stddev);
291  // Set value of all elements to random.
292 #pragma omp parallel for
293  for (size_t i = 0; i < elements; i++) {
294  data_ptr[i] = dist(mt);
295  }
296  }
297 
301  T* data() {
302  return data_ptr;
303  }
304 
308  std::vector<size_t> dims() {
309  return dimensions;
310  }
311 
315  size_t dim(size_t k) {
316  return dimensions[k];
317  }
318 
323  size_t size() {
324  return elements;
325  }
326 
330  void zeros() {
331  memset(data_ptr, 0, elements * sizeof(T));
332  }
333 
337  void ones() {
338 #pragma omp parallel for
339  for (size_t i = 0; i < elements; i++)
340  data_ptr[i] = 1;
341  }
342 
346  void enumerate() {
347 #pragma omp parallel for
348  for (size_t i = 0; i < elements; i++)
349  data_ptr[i] = i;
350  }
351 
356  void setValue(T value_) {
357 #pragma omp parallel for
358  for (size_t i = 0; i < elements; i++)
359  data_ptr[i] = value_;
360  }
361 
362 
368  void randn(T mean = 0, T stddev = 1) {
369 
370  // Initialize random number generator with normal distribution.
371  std::random_device rd;
372  std::mt19937 mt(rd());
373  std::normal_distribution<T> dist(mean, stddev);
374 
375 #pragma omp parallel for
376  for (size_t i = 0; i < elements; i++) {
377  data_ptr[i] = (T)dist(mt);
378  }
379  }
380 
387  void rand(T min = 0, T max = 1) {
388 
389  // Initialize random number generator with normal distribution.
390  std::random_device rd;
391  std::mt19937 mt(rd());
392  std::uniform_real_distribution<T> dist(min, max);
393 
394 #pragma omp parallel for
395  for (size_t i = 0; i < elements; i++) {
396  data_ptr[i] = (T)dist(rd);
397  }
398  }
399 
400 
407  // Dimensions must match.
408  assert(dims().size() == obj_.dims().size());
409  for (size_t d=1; d<dimensions.size(); d++) {
410  assert(dimensions[d] == obj_.dimensions[d]);
411  }//: for
412 
413  // Create new tensor.
414  mic::types::Tensor<T> new_tensor(dimensions);
415 #pragma omp parallel for
416  for (size_t i = 0; i < elements; i++)
417  new_tensor.data_ptr[i] = data_ptr[i] + obj_.data_ptr[i];
418 
419  // Return it.
420  return new_tensor;
421  }
422 
429  // Dimensions must match.
430  assert(dims().size() == obj_.dims().size());
431  for (size_t d=1; d<dimensions.size(); d++) {
432  assert(dimensions[d] == obj_.dimensions[d]);
433  }//: for
434 
435  // Create new tensor.
436  mic::types::Tensor<T> new_tensor(dimensions);
437 #pragma omp parallel for
438  for (size_t i = 0; i < elements; i++)
439  new_tensor.data_ptr[i] = data_ptr[i] - obj_.data_ptr[i];
440 
441  // Return it.
442  return new_tensor;
443  }
444 
449  T sum() {
450  T sum = 0;
451  for (size_t i = 0; i < elements; i++)
452  sum += data_ptr[i];
453 
454  // Return the sum.
455  return sum;
456  }
457 
463  friend std::ostream& operator<<(std::ostream& os_, const Tensor& obj_) {
464  // Display dimensions.
465  os_ << "[";
466  for (size_t i = 0; i < obj_.dimensions.size() - 1; i++)
467  os_ << obj_.dimensions[i] << " x ";
468  os_ << obj_.dimensions.back() << "]: [";
469 
470  // Display elements.
471  for (size_t i = 0; i < obj_.elements - 1; i++)
472  os_ << obj_.data_ptr[i] << ", ";
473  os_ << obj_.data_ptr[obj_.elements - 1] << "]";
474 
475  return os_;
476  }
477 
483  inline T& operator()(size_t index_) {
484  return data_ptr[index_];
485  }
486 
492  inline T& operator()(std::vector<size_t> coordinates_) {
493  return data_ptr[getIndex(coordinates_)];
494  }
495 
501  inline const T& operator()(size_t index_) const {
502  return data_ptr[index_];
503  }
504 
510  inline const T& operator()(std::vector<size_t> coordinates_) const {
511  return (T) data_ptr[getIndex(coordinates_)];
512  }
513 
520  inline size_t getIndex(std::vector<size_t> coordinates_) {
521  // Dimensions must match!
522  assert(dims().size() == coordinates_.size());
523  // Do the magic - iterate through all dimensions in order to compute the index.
524  // 1 - x
525  // 2 - y*width + x
526  // 3 - z*height*width + y*width + x = (z*height +y)*width + x
527  // 4 - v*depth*height*width + z*height*width + y*width + x = (v*d +z)(h +y)*w +x
528  // ...
529 
530  // But first: solve the simple 1d-2d-3d cases.
531  switch (coordinates_.size()) {
532  case 1:
533  return coordinates_[0];
534  case 2:
535  return coordinates_[1] * dimensions[0] + coordinates_[0];
536  case 3:
537  return (coordinates_[2] * dimensions[1] + coordinates_[1]) * dimensions[0] + coordinates_[0];
538  default:
539  return recursiveIndex(0, coordinates_);
540  } //: switch
541  }
542 
543 
550  Tensor<T> block(std::vector< std::vector<size_t> > ranges_) {
551  // All dimensions (tensor and lower and higher) must be equal!
552  assert(dimensions.size() == ranges_.size());
553 
554  // Set dimensions.
555  std::vector<size_t> new_dims;
556  for (size_t i=0; i < ranges_.size(); i++) {
557  // Every range must be given.
558  assert((ranges_[i].size() >0) && (ranges_[i].size() < 3));
559  // Add second range - just duble the first one.
560  if (ranges_[i].size() == 1)
561  ranges_[i].push_back(ranges_[i][0]);
562  // Calculate the dimension.
563  size_t ith_dimension = ranges_[i][1] - ranges_[i][0] +1;
564  // Every dimension must be greater than 0!
565  assert(ranges_[i][0] >= 0);
566  assert(ranges_[i][1] < dimensions[i]);
567  assert(ith_dimension > 0);
568  // Add dimension.
569  new_dims.push_back(ith_dimension);
570  }//: for
571  // Create tensor of a required size.
572  mic::types::Tensor<T> new_tensor(new_dims);
573 
574  // Do the magic.
575  // Get block by block and copy it in the right places.
576  // But first: solve the simple 1d-2d-3d cases.
577  switch (new_dims.size()) {
578  case 1: {
579  // Copy data from lower to higher.
580  memcpy(new_tensor.data_ptr, (data_ptr + ranges_[0][0]), new_dims[0]* sizeof(T));
581  break;
582  }
583  case 2: {
584  // Iterate through blocks.
585  for (size_t i=ranges_[1][0], j=0; i<=ranges_[1][1]; i++, j++) {
586  // Copy data from lower to higher.
587  memcpy(new_tensor.data_ptr + j* new_dims[0], (data_ptr + i * dimensions[0] + ranges_[0][0]), new_dims[0]* sizeof(T));
588  }//: for
589  break;
590  }
591  case 3: {
592  // Iterate through blocks.
593  for (size_t i2=ranges_[2][0], j2=0; i2<=ranges_[2][1]; i2++, j2++) {
594  for (size_t i1=ranges_[1][0], j1=0; i1<=ranges_[1][1]; i1++, j1++) {
595  // Copy data from lower to higher.
596  memcpy(new_tensor.data_ptr + (j2* new_dims[1] + j1)* new_dims[0], (data_ptr + (i2 * dimensions[1] + i1) * dimensions[0] + ranges_[0][0]), new_dims[0]* sizeof(T));
597  }//: for
598  }//: for
599  break;
600  }
601  default:
602  // Vector of indices - empty for now, will be filled by recursive block copy.
603  std::vector<size_t> is;
604  std::vector<size_t> js;
605  // Recursively copy block by block - starting from 1st dimension, as 0th is treated as the size of block.
606  recursiveBlockCopy(1, ranges_, is, js, new_dims, new_tensor.data_ptr);
607  }//: switch
608 
609  return new_tensor;
610  }
611 
617  void concatenate(const Tensor& obj_) {
618  // All dimensions (except 0th) must be equal!
619  assert(dimensions.size() == obj_.dimensions.size());
620  for (size_t d=1; d<dimensions.size(); d++) {
621  assert(dimensions[d] == obj_.dimensions[d]);
622  }//: for
623 
624  // Copy data.
625  T* old_prt = data_ptr;
626  // Allocate a new block of memory.
627  data_ptr = new T[elements + obj_.elements];
628 
629  // Copy data.
630  memcpy(data_ptr, old_prt, sizeof(T) * elements);
631  memcpy(data_ptr + elements, obj_.data_ptr, sizeof(T) * obj_.elements);
632 
633  // Free the old block.
634  delete[] old_prt;
635 
636  // Adjust the dimensions.
637  dimensions[0] += obj_.dimensions[0];
638  elements += obj_.elements;
639  }
640 
646  void concatenate(std::vector<mic::types::Tensor<T> > tensors_) {
647  // All dimensions (except 0th) of all tensors must be equal!
648  size_t new_block_size = 0;
649  size_t added_zero_dim = 0;
650  for (auto tensor: tensors_) {
651  assert(dimensions.size() == tensor.dimensions.size());
652  for (size_t d=1; d<dimensions.size(); d++) {
653  assert(dimensions[d] == tensor.dimensions[d]);
654  }//: for
655  new_block_size += tensor.elements;
656  added_zero_dim += tensor.dimensions[0];
657  }//: for
658 
659  // Ptr to old data.
660  T* old_prt = data_ptr;
661 
662  // Allocate a new block of memory.
663  data_ptr = new T[elements + new_block_size];
664  // Copy old data.
665  memcpy(data_ptr, old_prt, sizeof(T) * elements);
666  // Free the old block.
667  delete[] old_prt;
668 
669  // Copy the rest.
670  size_t block_end = elements;
671  for (auto tensor: tensors_) {
672  memcpy(data_ptr + block_end, tensor.data_ptr, sizeof(T) * tensor.elements);
673  block_end += tensor.elements;
674  }//: for
675 
676  // Adjust the dimensions.
677  dimensions[0] += added_zero_dim;
678  elements += new_block_size;
679  }
680 
681 
682 
683 private:
687  size_t elements;
688 
692  std::vector<size_t> dimensions;
693 
698 
705  size_t recursiveIndex(size_t dim_, std::vector<size_t> coordinates_) {
706  if (dim_ == coordinates_.size() - 1)
707  return coordinates_[dim_];
708  else
709  return recursiveIndex(dim_ + 1, coordinates_) * dimensions[dim_] + coordinates_[dim_];
710  }
711 
712 
726  void recursiveBlockCopy (size_t dim_, std::vector< std::vector<size_t> > ranges_, std::vector<size_t> is_, std::vector<size_t> js_, std::vector<size_t> new_dims_, T* tgt_data_ptr_) {
727  // Check dimensions!
728  assert(new_dims_.size()>1);
729  //
730  if (dim_ == new_dims_.size()){
731  // Calculate destination index.
732  size_t tgt_index = recursiveCalculateTargetIndex(0, js_, new_dims_);
733  //std::cout << "recursive tgt_index = " << tgt_index << std::endl;
734  // Calculate destination index.
735  size_t src_index = recursiveCalculateSourceIndex(0, is_, ranges_[0][0]);
736  //std::cout << "recursive src_index = " << src_index << std::endl;
737  // Copy data from source to target.
738  memcpy(tgt_data_ptr_ + tgt_index, (data_ptr + src_index), new_dims_[0]* sizeof(T));
739  return;
740  }
741  // For all ranges collect indices list for source and targets.
742  for (size_t i=ranges_[dim_][0], j=0; i<=ranges_[dim_][1]; i++, j++) {
743  // Add indices to lists.
744  is_.push_back(i);
745  js_.push_back(j);
746  // Better call Saul! Otherwise call block copy ;)
747  recursiveBlockCopy(dim_ + 1, ranges_, is_, js_, new_dims_, tgt_data_ptr_);
748  // Remove recently added indices from lists. ;)
749  is_.pop_back();
750  js_.pop_back();
751  }
752  }
753 
761  size_t recursiveCalculateTargetIndex(size_t dim_, std::vector<size_t> js_, std::vector<size_t> dims_) {
762  if (dim_ == js_.size()) {
763  //std::cout << " returning js_["<< dim_-1 <<"] = " << js_[dim_-1] << std::endl;
764  return js_[dim_-1];
765  } else if (dim_ == 0) {
766  //std::cout << "calling recursiveCalculateTargetIndex(dim_+1 ("<< dim_ + 1 << "), js_, dims_)"<< std::endl;
767  size_t tmp = recursiveCalculateTargetIndex(dim_ + 1, js_, dims_);
768  size_t tmp2 = tmp * dims_[dim_];
769  //std::cout << " returning tmp2 ("<< tmp2 <<") = tmp (" << tmp << ") * dims_[dim_ ("<< dim_<< ")] ("<< dims_[dim_] << ") "<< std::endl;
770  return tmp2;
771  } else {
772 
773  //std::cout << "calling recursiveCalculateTargetIndex(dim_+1 ("<< dim_ + 1 << "), js_, dims_)"<< std::endl;
774  size_t tmp = recursiveCalculateTargetIndex(dim_ + 1, js_, dims_);
775  size_t tmp2 = tmp * dims_[dim_] + js_[dim_-1];
776  //std::cout << " returning tmp2 ("<< tmp2 <<") = tmp (" << tmp << ") * dims_[dim_ ("<< dim_<< ")] ("<< dims_[dim_] << ") + js_[dim_-1] ("<< js_[dim_-1] << ") "<< std::endl;
777  return tmp2;
778  }//: else
779  }
780 
788  size_t recursiveCalculateSourceIndex(size_t dim_, std::vector<size_t> is_, size_t offset_) {
789  if (dim_ == is_.size()) {
790  //std::cout << " returning is_["<< dim_-1 <<"] = " << is_[dim_-1] << std::endl;
791  return is_[dim_-1];
792  } else if (dim_ == 0) {
793  //std::cout << "calling recursiveCalculateSourceIndex(dim_+1 ("<< dim_ + 1 << "), is_, dimensions)"<< std::endl;
794  size_t tmp = recursiveCalculateSourceIndex(dim_ + 1, is_, offset_);
795  size_t tmp2 = tmp * dimensions[dim_] + offset_;
796  //std::cout << " returning tmp2 ("<< tmp2 <<") = tmp (" << tmp << ") * dimensions[dim_ ("<< dim_<< ")] ("<< dimensions[dim_] << ") + offset (" << offset_ <<")"<< std::endl;
797  return tmp2;
798  } else {
799  //std::cout << "calling recursiveCalculateSourceIndex(dim_+1 ("<< dim_ + 1 << "), is_, dims_)"<< std::endl;
800  size_t tmp = recursiveCalculateSourceIndex(dim_ + 1, is_, offset_);
801  size_t tmp2 = tmp * dimensions[dim_] + is_[dim_-1];
802  //std::cout << " returning tmp2 ("<< tmp2 <<") = tmp (" << tmp << ") * dimensions[dim_ ("<< dim_<< ")] ("<< dimensions[dim_] << ") + is_[dim_-1] ("<< is_[dim_-1] << ") "<< std::endl;
803  return tmp2;
804  }//: else
805  }
806 
807  // Friend class - required for using boost serialization.
809 
815  template<class Archive>
816  void save(Archive & ar, const unsigned int version) const {
817  ar & elements;
818  ar & dimensions;
819  ar & boost::serialization::make_array<T>(data_ptr, elements);
820  }
821 
827  template<class Archive>
828  void load(Archive & ar, const unsigned int version) {
829  ar & elements;
830  ar & dimensions;
831  // Allocate memory.
832  if (data_ptr != nullptr)
833  delete[] data_ptr;
834  data_ptr = new T[elements];
835  ar & boost::serialization::make_array<T>(data_ptr, elements);
836  }
837 
838  // The serialization must be splited as load requires to allocate the memory.
839  BOOST_SERIALIZATION_SPLIT_MEMBER()
840 };
841 
842 } //: namespace types
843 } //: namespace mic
844 
845 // Just in case if something important will change in the tensor class - set version.
846 BOOST_CLASS_VERSION(mic::types::Tensor<bool>, 1)
847 BOOST_CLASS_VERSION(mic::types::Tensor<short>, 1)
848 BOOST_CLASS_VERSION(mic::types::Tensor<int>, 1)
849 BOOST_CLASS_VERSION(mic::types::Tensor<long>, 1)
850 BOOST_CLASS_VERSION(mic::types::Tensor<float>, 1)
851 BOOST_CLASS_VERSION(mic::types::Tensor<double>, 1)
852 
853 #endif /* SRC_TYPES_TENSOR_HPP_ */
friend std::ostream & operator<<(std::ostream &os_, const Tensor &obj_)
Definition: Tensor.hpp:463
void conservativeResize(std::vector< size_t > dims_)
Definition: Tensor.hpp:200
const T & operator()(std::vector< size_t > coordinates_) const
Definition: Tensor.hpp:510
T & operator()(std::vector< size_t > coordinates_)
Definition: Tensor.hpp:492
size_t dim(size_t k)
Definition: Tensor.hpp:315
mic::types::Tensor< T > operator-(mic::types::Tensor< T > obj_)
Definition: Tensor.hpp:428
Tensor(const Tensor< T > &t)
Definition: Tensor.hpp:125
size_t recursiveIndex(size_t dim_, std::vector< size_t > coordinates_)
Definition: Tensor.hpp:705
void recursiveBlockCopy(size_t dim_, std::vector< std::vector< size_t > > ranges_, std::vector< size_t > is_, std::vector< size_t > js_, std::vector< size_t > new_dims_, T *tgt_data_ptr_)
Definition: Tensor.hpp:726
void concatenate(const Tensor &obj_)
Definition: Tensor.hpp:617
void elementwiseFunction(T(*func)(T))
Definition: Tensor.hpp:261
std::vector< size_t > dimensions
Definition: Tensor.hpp:692
void setValue(T value_)
Definition: Tensor.hpp:356
void randn(T mean=0, T stddev=1)
Definition: Tensor.hpp:368
void concatenate(std::vector< mic::types::Tensor< T > > tensors_)
Definition: Tensor.hpp:646
void elementwiseFunctionScalar(T(*func)(T, T), T scalar)
Definition: Tensor.hpp:273
void rand(T min=0, T max=1)
Definition: Tensor.hpp:387
const Tensor< T > & operator=(const Tensor< T > &t)
Definition: Tensor.hpp:158
void resize(std::vector< size_t > dims_)
Definition: Tensor.hpp:224
void normRandReal(float mean=0, float stddev=1)
Definition: Tensor.hpp:286
Tensor< T > block(std::vector< std::vector< size_t > > ranges_)
Definition: Tensor.hpp:550
const T & operator()(size_t index_) const
Definition: Tensor.hpp:501
friend class boost::serialization::access
Definition: Tensor.hpp:808
size_t getIndex(std::vector< size_t > coordinates_)
Definition: Tensor.hpp:520
void save(Archive &ar, const unsigned int version) const
Definition: Tensor.hpp:816
size_t recursiveCalculateSourceIndex(size_t dim_, std::vector< size_t > is_, size_t offset_)
Definition: Tensor.hpp:788
mic::types::Tensor< T > operator+(mic::types::Tensor< T > obj_)
Definition: Tensor.hpp:406
Tensor(std::vector< size_t > dims_)
Definition: Tensor.hpp:101
size_t recursiveCalculateTargetIndex(size_t dim_, std::vector< size_t > js_, std::vector< size_t > dims_)
Definition: Tensor.hpp:761
void load(Archive &ar, const unsigned int version)
Definition: Tensor.hpp:828
Template class representing an nD (n-Dimensional) tensor. Tensor is row-major, i.e. first dimension is height (rows), second is width (cols), third is depth (channels) etc.
Definition: Matrix.hpp:49
Template-typed Matrix of dynamic size. Uses OpenBLAS if found by CMAKE - overloaded, specializations of * operator for types: float, double.
Definition: Matrix.hpp:64
Tensor(std::initializer_list< size_t > dims_)
Definition: Tensor.hpp:77
Tensor(const mic::types::Matrix< T > &mat_)
Definition: Tensor.hpp:142
T & operator()(size_t index_)
Definition: Tensor.hpp:483
std::vector< size_t > dims()
Definition: Tensor.hpp:308