RockyML  0.0.1
A High-Performance Scientific Computing Framework
linear.h
1 #ifndef ROCKY_ETNA_LINEAR
2 #define ROCKY_ETNA_LINEAR
3 
4 #include <Eigen/Core>
5 #include <type_traits>
6 #include <algorithm>
7 
8 namespace rocky{
9 namespace etna{
10 enum opt {bias, no_bias};
11 
16 template<typename T_e, int T_in_num, int T_in_dim, int T_out_dim,
17  opt T_opt_bias=opt::bias>
18 class linear{
19 public:
20  static constexpr int deduce_num_params_weights(){
21  return T_in_dim * T_out_dim;
22  }
23  static constexpr int deduce_num_params_bias(){
24  if constexpr(T_opt_bias == opt::bias)
25  return T_out_dim;
26  else
27  return 0;
28  }
29  static constexpr int deduce_num_params(){
30  return deduce_num_params_weights() + deduce_num_params_bias();
31  }
35  void feed(T_e* layer_mem_ptr, T_e* in_mem_ptr, T_e* out_mem_ptr){
36  Eigen::Map<Eigen::Matrix<T_e, T_in_dim, T_out_dim, Eigen::RowMajor>> W_(layer_mem_ptr);
37  Eigen::Map<Eigen::Matrix<T_e, T_in_num, T_in_dim, Eigen::RowMajor>> In_(in_mem_ptr);
38  Eigen::Map<Eigen::Matrix<T_e, T_in_num, T_out_dim, Eigen::RowMajor>> Out_(out_mem_ptr);
39  Out_ = In_ * W_;
40  // adding bias to each row
41  if constexpr (T_opt_bias == opt::bias){
42  Eigen::Map<Eigen::Matrix<T_e, 1, T_out_dim, Eigen::RowMajor>> Bias_(layer_mem_ptr + T_in_dim * T_out_dim);
43  Out_.rowwise() += Bias_;
44  }
45  }
46 }; // end linear
47 
48 template<typename T_e, int T_layers_num,
49  int T_in_num, int T_in_dim,
50  int T_out_dim, int T_hidden_dim,
51  opt T_opt_bias=opt::bias>
52 class mlp{
53 public:
54  static constexpr int deduce_num_params_in(){
56  }
57  static constexpr int deduce_num_params_hidden(){
59  }
60  static constexpr int deduce_num_params_out(){
62  }
63  static constexpr int deduce_num_params(){
64  return T_layers_num * deduce_num_params_hidden() + deduce_num_params_in() + deduce_num_params_out();
65  }
75  void feed(T_e* layer_mem_ptr, T_e* in_mem_ptr, T_e* out_mem_ptr){
76  // layers
80  // reserving space for intermediate matrices
81  T_e* H1_ = new T_e[T_in_num * T_hidden_dim];
82  T_e* H2_ = new T_e[T_in_num * T_hidden_dim];
83  // apply input layer
84  l_in.feed(layer_mem_ptr, in_mem_ptr, H1_);
85  // apply hidden layers
86  T_e* src, *dest;
87  int offset = l_in.deduce_num_params();
88  for (int hidden=0; hidden<T_layers_num; hidden++){
89  if (hidden % 2 == 0){ src = H1_; dest = H2_;}
90  else{ src = H2_; dest = H1_;}
91  l_hidden.feed(layer_mem_ptr + offset, src, dest);
92  offset += l_hidden.deduce_num_params();
93  }
94  // apply output layer
95  if constexpr (T_layers_num % 2 == 0)
96  l_out.feed(layer_mem_ptr + offset, H1_, out_mem_ptr);
97  else
98  l_out.feed(layer_mem_ptr + offset, H2_, out_mem_ptr);
99 
100  delete[] H1_;
101  delete[] H2_;
102  }
103 
104 };
105 
106 };
107 };
108 
109 #endif
rocky::etna::mlp
Definition: linear.h:52
rocky::etna::mlp::feed
void feed(T_e *layer_mem_ptr, T_e *in_mem_ptr, T_e *out_mem_ptr)
apply the multi-layer perceptron on data in in_mem_ptr
Definition: linear.h:75
rocky::etna::linear::feed
void feed(T_e *layer_mem_ptr, T_e *in_mem_ptr, T_e *out_mem_ptr)
Definition: linear.h:35
rocky::etna::linear
base class for static layers
Definition: linear.h:18