tiny_dnn  1.0.0
A header only, dependency-free deep learning framework in C++11
backend.h
1 /*
2  Copyright (c) 2016, Taiga Nomi, Edgar Riba
3  All rights reserved.
4 
5  Redistribution and use in source and binary forms, with or without
6  modification, are permitted provided that the following conditions are met:
7  * Redistributions of source code must retain the above copyright
8  notice, this list of conditions and the following disclaimer.
9  * Redistributions in binary form must reproduce the above copyright
10  notice, this list of conditions and the following disclaimer in the
11  documentation and/or other materials provided with the distribution.
12  * Neither the name of the <organization> nor the
13  names of its contributors may be used to endorse or promote products
14  derived from this software without specific prior written permission.
15 
16  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
17  EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 #pragma once
28 
29 #include "tiny_dnn/layers/layer.h"
30 #include "tiny_dnn/core/params/conv_params.h"
31 #include "tiny_dnn/core/params/deconv_params.h"
32 #include "tiny_dnn/core/params/maxpool_params.h"
33 #include "tiny_dnn/core/params/fully_params.h"
34 
35 namespace tiny_dnn {
36 namespace core {
37 
38 // TODO(edgar): remove this
39 class context;
40 
41 enum class backend_t { internal, nnpack, libdnn, avx, opencl };
42 
43 inline std::ostream& operator << (std::ostream& os, backend_t type) {
44  switch (type) {
45  case backend_t::internal: os << "Internal"; break;
46  case backend_t::nnpack: os << "NNPACK"; break;
47  case backend_t::libdnn: os << "LibDNN"; break;
48  case backend_t::avx: os << "AVX"; break;
49  case backend_t::opencl: os << "OpenCL"; break;
50  default:
51  throw nn_error("Not supported ostream enum.");
52  break;
53  }
54  return os;
55 }
56 
57 /*enum class Engine { OpenCL };*/
58 
59 inline backend_t default_engine() {
60 #ifdef CNN_USE_AVX
61 #if defined(__AVX__) || defined(__AVX2__)
62  return backend_t::avx;
63 #endif
64 #endif // CNN_USE_AVX
65  return backend_t::internal;
66 }
67 
68 class backend {
69  public:
70  // context holds solution-dependent parameters
71  // context should be able to hold any types of structures (like boost::any)
72  explicit backend(context* ctx_ = nullptr) {}
73 
74  // core math functions
75 
76  virtual void conv2d(const std::vector<tensor_t*>& in_data,
77  std::vector<tensor_t*>& out_data) = 0;
78 
79  virtual void conv2d_q(const std::vector<tensor_t*>& in_data,
80  std::vector<tensor_t*>& out_data) = 0;
81 
82  virtual void conv2d_eq(const std::vector<tensor_t*>& in_data,
83  std::vector<tensor_t*>& out_data) = 0;
84 
85  virtual void conv2d(const std::vector<tensor_t*>& in_data,
86  const std::vector<tensor_t*>& out_data,
87  std::vector<tensor_t*>& out_grad,
88  std::vector<tensor_t*>& in_grad) = 0;
89 
90  virtual void conv2d_q(const std::vector<tensor_t*>& in_data,
91  const std::vector<tensor_t*>& out_data,
92  std::vector<tensor_t*>& out_grad,
93  std::vector<tensor_t*>& in_grad) = 0;
94 
95  virtual void deconv2d(const std::vector<tensor_t*>& in_data,
96  std::vector<tensor_t*>& out_data) = 0;
97 
98  virtual void deconv2d_q(const std::vector<tensor_t*>& in_data,
99  std::vector<tensor_t*>& out_data) = 0;
100 
101  virtual void deconv2d_eq(const std::vector<tensor_t*>& in_data,
102  std::vector<tensor_t*>& out_data) = 0;
103 
104  virtual void deconv2d(const std::vector<tensor_t*>& in_data,
105  const std::vector<tensor_t*>& out_data,
106  std::vector<tensor_t*>& out_grad,
107  std::vector<tensor_t*>& in_grad) = 0;
108 
109  virtual void deconv2d_q(const std::vector<tensor_t*>& in_data,
110  const std::vector<tensor_t*>& out_data,
111  std::vector<tensor_t*>& out_grad,
112  std::vector<tensor_t*>& in_grad) = 0;
113 
114  virtual void maxpool(const std::vector<tensor_t*>& in_data,
115  std::vector<tensor_t*>& out_data) = 0;
116 
117  virtual void maxpool(const std::vector<tensor_t*>& in_data,
118  const std::vector<tensor_t*>& out_data,
119  std::vector<tensor_t*>& out_grad,
120  std::vector<tensor_t*>& in_grad) = 0;
121 
122  virtual void fully(const std::vector<tensor_t*>& in_data,
123  std::vector<tensor_t*>& out_data) = 0;
124 
125  virtual void fully_q(const std::vector<tensor_t*>& in_data,
126  std::vector<tensor_t*>& out_data) = 0;
127 
128  virtual void fully_eq(const std::vector<tensor_t*>& in_data,
129  std::vector<tensor_t*>& out_data) = 0;
130 
131  virtual void fully(const std::vector<tensor_t*>& in_data,
132  const std::vector<tensor_t*>& out_data,
133  std::vector<tensor_t*>& out_grad,
134  std::vector<tensor_t*>& in_grad) = 0;
135 
136  virtual void fully_q(const std::vector<tensor_t*>& in_data,
137  const std::vector<tensor_t*>& out_data,
138  std::vector<tensor_t*>& out_grad,
139  std::vector<tensor_t*>& in_grad) = 0;
140 
141  context* get_context() const { return ctx_; }
142 
143  void set_layer(layerptr_t layer) { layer_ = layer; }
144 
145  virtual backend_t type() const = 0;
146 
147  protected:
148  context* ctx_;
149  layerptr_t layer_;
150 };
151 
152 } // namespace core
153 } // namespace tiny_dnn
Definition: backend.h:68
base class of all kind of NN layers
Definition: layer.h:62