tiny_dnn  1.0.0
A header only, dependency-free deep learning framework in C++11
op_kernel.h
1 /*
2  COPYRIGHT
3 
4  All contributions by Taiga Nomi
5  Copyright (c) 2013, Taiga Nomi
6  All rights reserved.
7 
8  All other contributions:
9  Copyright (c) 2013-2016, the respective contributors.
10  All rights reserved.
11 
12  Each contributor holds copyright over their respective contributions.
13  The project versioning (Git) records all such contribution source information.
14 
15  LICENSE
16 
17  The BSD 3-Clause License
18 
19 
20  Redistribution and use in source and binary forms, with or without
21  modification, are permitted provided that the following conditions are met:
22 
23  * Redistributions of source code must retain the above copyright notice, this
24  list of conditions and the following disclaimer.
25 
26  * Redistributions in binary form must reproduce the above copyright notice,
27  this list of conditions and the following disclaimer in the documentation
28  and/or other materials provided with the distribution.
29 
30  * Neither the name of tiny-dnn nor the names of its
31  contributors may be used to endorse or promote products derived from
32  this software without specific prior written permission.
33 
34  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
35  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
37  DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
38  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
40  SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
41  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
42  OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 */
45 #pragma once
46 
47 #include "tiny_dnn/core/framework/device.fwd.h"
48 #include "tiny_dnn/core/params/conv_params.h"
49 
50 namespace tiny_dnn {
51 namespace core {
52 
53 class OpKernel; // delared below
54 
56  public:
57  explicit OpKernelConstruction() {}
58  explicit OpKernelConstruction(Device* device, Params* params)
59  : device_(device), params_(params) {}
60 
61  // Returns the device raw pointer
62  Device* device() const { return device_; }
63 
64  // Returns the device raw pointer
65  Params* params() const { return params_; }
66 
67  private:
68  Device* device_ = nullptr;
69  Params* params_ = nullptr;
70 };
71 
73  public:
74  struct OpParams {
75  // the op kernel being computed.
76  OpKernel* op_kernel_ptr = nullptr;
77 
78  // the device on which the kernel is running.
79  Device* device_ptr = nullptr;
80 
81  // the layer on which kernel is runnning
82  layer* layer_ptr_ = nullptr;
83 
84  // the operation params
85  Params* params_ptr_ = nullptr;
86 
87  // parallelize operation
88  bool parallelize = false;
89 
90  backend_t engine = default_engine();
91  };
92 
93  explicit OpKernelContext(const std::vector<tensor_t*>& in_data,
94  std::vector<tensor_t*>& out_data)
95  : in_data_(in_data), out_data_(out_data) {
96  op_params_ = std::unique_ptr<OpParams>(new OpParams());
97  }
98 
99  explicit OpKernelContext(const std::vector<tensor_t*>& in_data,
100  const std::vector<tensor_t*>& out_data,
101  std::vector<tensor_t*>& out_grad,
102  std::vector<tensor_t*>& in_grad)
103  : in_data_(in_data)
104  , out_data_(out_data)
105  , out_grad_(out_grad)
106  , in_grad_(in_grad) {
107  op_params_ = std::unique_ptr<OpParams>(new OpParams());
108  }
109 
110  tensor_t& input(const int idx) const {
111  return *in_data_[idx];
112  }
113 
114  tensor_t& output(const int idx) const {
115  return *out_data_[idx];
116  }
117 
118  tensor_t& input_grad(const int idx) const {
119  return *in_grad_[idx];
120  }
121 
122  tensor_t& output_grad(const int idx) const {
123  return *out_grad_[idx];
124  }
125 
126  void setParams(Params* params) {
127  op_params_->params_ptr_ = params;
128  }
129 
130  Params* params() const {
131  return op_params_->params_ptr_;
132  }
133 
134  void setParallelize(const bool parallelize) {
135  op_params_->parallelize = parallelize;
136  }
137 
138  bool parallelize() const {
139  return op_params_->parallelize;
140  }
141 
142  void setDevice(Device* device) {
143  op_params_->device_ptr = device;
144  }
145 
146  Device* device() const {
147  return op_params_->device_ptr;
148  }
149 
150  void setLayer(layer* layer) {
151  op_params_->layer_ptr_ = layer;
152  }
153 
154  layer* Layer() const {
155  return op_params_->layer_ptr_;
156  }
157 
158  backend_t engine() const {
159  return op_params_->engine;
160  }
161 
162  void setEngine(const backend_t engine) {
163  op_params_->engine = engine;
164  }
165 
166  private:
167  std::vector<tensor_t*> in_data_;
168  std::vector<tensor_t*> out_data_;
169  std::vector<tensor_t*> out_grad_;
170  std::vector<tensor_t*> in_grad_;
171 
172  std::unique_ptr<OpParams> op_params_;
173 };
174 
175 class OpKernel {
176  public:
177  explicit OpKernel() {}
178  explicit OpKernel(const OpKernelConstruction& context)
179  : device_(context.device())
180  , params_(context.params()) {}
181 
182  virtual ~OpKernel() {}
183 
184  virtual void compute(const OpKernelContext& context) = 0;
185 
186  protected:
187  Device* device_ = nullptr;
188  Params* params_ = nullptr;
189 };
190 
191 } // namespace core
192 } // namespace tiny_dnn
Definition: device.fwd.h:73
Definition: op_kernel.h:55
Definition: op_kernel.h:72
Definition: op_kernel.h:175
Definition: params.h:37
base class of all kind of NN layers
Definition: layer.h:62