tiny_dnn  1.0.0
A header only, dependency-free deep learning framework in C++11
backend_nnp.h
1 /*
2  Copyright (c) 2016, Taiga Nomi, Edgar Riba
3  All rights reserved.
4 
5  Redistribution and use in source and binary forms, with or without
6  modification, are permitted provided that the following conditions are met:
7  * Redistributions of source code must retain the above copyright
8  notice, this list of conditions and the following disclaimer.
9  * Redistributions in binary form must reproduce the above copyright
10  notice, this list of conditions and the following disclaimer in the
11  documentation and/or other materials provided with the distribution.
12  * Neither the name of the <organization> nor the
13  names of its contributors may be used to endorse or promote products
14  derived from this software without specific prior written permission.
15 
16  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
17  EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 #pragma once
28 
29 #include "tiny_dnn/core/backend.h"
30 #include "tiny_dnn/core/kernels/nnp_deconv2d_kernel.h"
31 
32 namespace tiny_dnn {
33 namespace core {
34 
35 class nnp_backend : public backend {
36  public:
37  // context holds solution-dependent parameters
38  // context should be able to hold any types of structures (like boost::any)
39 
40  // convolution
41  nnp_backend(conv_params* params,
42  std::function<void(const tensor_t&)> f1,
44  : params_c_(params)
45  , conv_layer_worker_storage_(ptr)
46  , copy_and_pad_input(f1) { init_nnp_engine(); }
47 
48  // deconvolution
49  explicit nnp_backend(deconv_params* params)
50  : params_d_(params) { init_nnp_engine(); }
51 
52  // maxpool
53  explicit nnp_backend(maxpool_params* params)
54  : params_m_(params) { init_nnp_engine(); }
55 
56  // fully_connected
57  explicit nnp_backend(fully_params* params)
58  : params_f_(params) { init_nnp_engine(); }
59 
60  nnp_backend() { init_nnp_engine(); }
61 
62  // core math functions
63 
64  void conv2d(const std::vector<tensor_t*>& in_data,
65  std::vector<tensor_t*>& out_data) override {
66  if (params_c_) return; // workaround to fix warnings
67  if (params_f_) return; // workaround to fix warnings
68  if (params_d_) return; // workaround to fix warnings
69  if (conv_layer_worker_storage_) return; // workaround to fix warnings
70  if (deconv_layer_worker_storage_) return; // workaround to fix warnings
71 
72  /*if (!params_c_->has_bias) {
73  throw nn_error("NNPACK Convolution requires a bias term.");
74  }
75 
76  if (params_c_->w_stride != 1 || params_c_->h_stride != 1) {
77  throw nn_error("NNPACK Convolution requires stride 1.");
78  }
79 
80  copy_and_pad_input(*in_data[0]);
81  const vec_t& W = (*in_data[1])[0];
82  const vec_t& bias = (*in_data[2])[0];
83  tensor_t& a = *out_data[1];
84  const std::vector<const vec_t*> &in = (*conv_layer_worker_storage_).prev_out_padded_; // input // NOLINT
85 
86  fill_tensor(a, float_t(0));
87 
88  // TODO
89  throw nn_not_implemented_error();
90 
91  kernels::nnp_conv2d_kernel(*params_c_, in, W, bias, a);*/
92  }
93 
94  void conv2d_q(const std::vector<tensor_t*>& in_data,
95  std::vector<tensor_t*>& out_data) override {
96  throw nn_error("not implemented yet.");
97  }
98 
99  void conv2d_eq(const std::vector<tensor_t*>& in_data,
100  std::vector<tensor_t*>& out_data) override {
101  throw nn_error("not implemented yet.");
102  }
103 
104  void conv2d(const std::vector<tensor_t*>& in_data,
105  const std::vector<tensor_t*>& out_data,
106  std::vector<tensor_t*>& out_grad,
107  std::vector<tensor_t*>& in_grad) override {
108  throw nn_error("NNPACK does not support back propagation.");
109  }
110 
111  void conv2d_q(const std::vector<tensor_t*>& in_data,
112  const std::vector<tensor_t*>& out_data,
113  std::vector<tensor_t*>& out_grad,
114  std::vector<tensor_t*>& in_grad) override {
115  throw nn_error("NNPACK does not support back propagation.");
116  }
117 
118  void deconv2d(const std::vector<tensor_t*>& in_data,
119  std::vector<tensor_t*>& out_data) override {
120  }
121 
122  void deconv2d_q(const std::vector<tensor_t*>& in_data,
123  std::vector<tensor_t*>& out_data) override {
124  throw nn_error("not implemented yet.");
125  }
126 
127  void deconv2d_eq(const std::vector<tensor_t*>& in_data,
128  std::vector<tensor_t*>& out_data) override {
129  throw nn_error("not implemented yet.");
130  }
131 
132  void deconv2d(const std::vector<tensor_t*>& in_data,
133  const std::vector<tensor_t*>& out_data,
134  std::vector<tensor_t*>& out_grad,
135  std::vector<tensor_t*>& in_grad) override {
136  throw nn_error("NNPACK does not support back propagation.");
137  }
138 
139  void deconv2d_q(const std::vector<tensor_t*>& in_data,
140  const std::vector<tensor_t*>& out_data,
141  std::vector<tensor_t*>& out_grad,
142  std::vector<tensor_t*>& in_grad) override {
143  throw nn_error("NNPACK does not support back propagation.");
144  }
145 
146  void maxpool(const std::vector<tensor_t*>& in_data,
147  std::vector<tensor_t*>& out_data) override {
148  // just to fix warning: remove in future
149  if (params_m_) {}
150 
163  }
164 
165  void maxpool(const std::vector<tensor_t*>& in_data,
166  const std::vector<tensor_t*>& out_data,
167  std::vector<tensor_t*>& out_grad,
168  std::vector<tensor_t*>& in_grad) override {
169  throw nn_error("NNPACK does not support back propagation.");
170  }
171 
172  void fully(const std::vector<tensor_t*>& in_data,
173  std::vector<tensor_t*>& out_data) override {
174  /*const tensor_t& in = *in_data[0];
175  const vec_t& W = (*in_data[1])[0];
176  vec_t& b = (*in_data[2])[0];
177  tensor_t& a = *out_data[1];
178 
179  kernels::nnp_fully_connected_kernel(*params_f_,
180  in, W, b, a, layer_->parallelize());*/
181  }
182 
183  void fully_q(const std::vector<tensor_t*>& in_data,
184  std::vector<tensor_t*>& out_data) override {
185  throw nn_error("not implemented yet.");
186  }
187 
188  void fully_eq(const std::vector<tensor_t*>& in_data,
189  std::vector<tensor_t*>& out_data) override {
190  throw nn_error("not implemented yet.");
191  }
192 
193  void fully(const std::vector<tensor_t*>& in_data,
194  const std::vector<tensor_t*>& out_data,
195  std::vector<tensor_t*>& out_grad,
196  std::vector<tensor_t*>& in_grad) override {
197  throw nn_error("NNPACK does not support back propagation.");
198  }
199 
200  void fully_q(const std::vector<tensor_t*>& in_data,
201  const std::vector<tensor_t*>& out_data,
202  std::vector<tensor_t*>& out_grad,
203  std::vector<tensor_t*>& in_grad) override {
204  throw nn_error("NNPACK does not support back propagation.");
205  }
206 
207  backend_t type() const override { return backend_t::nnpack; }
208 
209  private:
210  /* Pointer to the convolution parameters */
211  conv_params* params_c_;
212  deconv_params* params_d_;
213  maxpool_params* params_m_;
214  fully_params* params_f_;
215 
216  /* Pointer to the convolution workers */
217  conv_layer_worker_specific_storage* conv_layer_worker_storage_;
218  deconv_layer_worker_specific_storage* deconv_layer_worker_storage_;
219 
220  /* Pointers to parent class functions */
221  std::function<void(const tensor_t&)> copy_and_pad_input;
222  std::function<void(const tensor_t&, tensor_t&)> copy_and_pad_delta;
223 
224  void init_nnp_engine() {
225 #ifdef CNN_USE_NNPACK
226  nnp_status init_status = nnp_initialize();
227  check_nnp_status(init_status);
228 
229  if (init_status != nnp_status_success) {
230  throw nn_error("Could not initialize NNPACK.");
231  }
232 #else
233  throw nn_error("Tiny-cnn has not been compiled with NNPACK support.");
234 #endif
235  }
236 
237 #ifdef CNN_USE_NNPACK
238  void check_nnp_status(nnp_status status) {
239  switch (status) {
240  case nnp_status_success:
241  break;
242  case nnp_status_invalid_batch_size:
243  nn_warn("NNPACK function was called with batch_size == 0");
244  break;
245  case nnp_status_invalid_channels:
246  nn_warn("NNPACK function was called with channels == 0.");
247  break;
248  case nnp_status_invalid_input_channels:
249  nn_warn("NNPACK function was called with input_channels == 0.");
250  break;
251  case nnp_status_invalid_output_channels:
252  nn_warn("NNPACK function was called with output_channels == 0.");
253  break;
254  case nnp_status_invalid_input_size:
255  nn_warn(" NNPACK function was called with input_size.height == 0 or input_size.width == 0.");
256  break;
257  case nnp_status_invalid_input_stride:
258  nn_warn(" NNPACK function was called with input_stride.height == 0 or input_stride.width == 0.");
259  break;
260  case nnp_status_invalid_input_padding:
261  nn_warn("NNPACK function was called with input_padding not less than respective kernel (or pooling) size.");
262  break;
263  case nnp_status_invalid_kernel_size:
264  nn_warn("NNPACK function was called with kernel_size.height == 0 or kernel_size.width == 0.");
265  break;
266  case nnp_status_invalid_pooling_size:
267  nn_warn("NNPACK function was called with pooling_size.height == 0 or pooling_size.width == 0.");
268  break;
269  //case nnp_status_invalid_pooling_stride:
270  // nn_warn("NNPACK function was called with pooling_stride.height == 0 or pooling_stride.width == 0.");
271  // break;
272  case nnp_status_invalid_algorithm:
273  nn_warn("NNPACK function was called with convolution algorithm not in nnp_convolution_algorithm enumeration.");
274  break;
275  case nnp_status_unsupported_input_size:
276  nn_warn("NNPACK does not support the particular input size for the function.");
277  break;
278  case nnp_status_unsupported_input_stride:
279  nn_warn("NNPACK does not support the particular input sride for the function.");
280  break;
281  case nnp_status_unsupported_input_padding:
282  nn_warn("NNPACK does not support the particular input padding for the function.");
283  break;
284  case nnp_status_unsupported_kernel_size:
285  nn_warn("NNPACK does not support the particular kernel size for the function.");
286  break;
287  case nnp_status_unsupported_pooling_size:
288  nn_warn("NNPACK does not support the particular pooling size for the function.");
289  break;
290  case nnp_status_unsupported_pooling_stride:
291  nn_warn("NNPACK does not support the particular pooling stride for the function .");
292  break;
293  case nnp_status_unsupported_algorithm:
294  nn_warn("NNPACK does not support the particular convolution algorithm for the function.");
295  break;
296  case nnp_status_uninitialized:
297  nn_warn("NNPACK function was called before the library was initialized.");
298  break;
299  case nnp_status_unsupported_hardware:
300  nn_warn("NNPACK does not implement this function for the host CPU.");
301  break;
302  case nnp_status_out_of_memory:
303  nn_warn("NNPACK failed to allocate memory for temporary buffers.");
304  break;
305  }
306  }
307 #endif
308 };
309 
310 } // namespace core
311 } // namespace tiny_dnn
Definition: backend.h:68
Definition: conv_params.h:92
Definition: fully_params.h:34
Definition: maxpool_params.h:33
Definition: backend_nnp.h:35
void maxpool(const std::vector< tensor_t * > &in_data, std::vector< tensor_t * > &out_data) override
Definition: backend_nnp.h:146
error exception class for tiny-dnn
Definition: nn_error.h:37
Definition: deconv_params.h:39