tiny_dnn  1.0.0
A header only, dependency-free deep learning framework in C++11
max_unpooling_layer.h
1 /*
2  Copyright (c) 2015, Taiga Nomi
3  All rights reserved.
4 
5  Redistribution and use in source and binary forms, with or without
6  modification, are permitted provided that the following conditions are met:
7  * Redistributions of source code must retain the above copyright
8  notice, this list of conditions and the following disclaimer.
9  * Redistributions in binary form must reproduce the above copyright
10  notice, this list of conditions and the following disclaimer in the
11  documentation and/or other materials provided with the distribution.
12  * Neither the name of the <organization> nor the
13  names of its contributors may be used to endorse or promote products
14  derived from this software without specific prior written permission.
15 
16  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
17  EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 #pragma once
28 #include "tiny_dnn/util/util.h"
29 #include "tiny_dnn/util/image.h"
30 #include "tiny_dnn/activations/activation_function.h"
31 
32 namespace tiny_dnn {
33 
37 template <typename Activation = activation::identity>
38 class max_unpooling_layer : public feedforward_layer<Activation> {
39 public:
40  CNN_USE_LAYER_MEMBERS;
42 
49  max_unpooling_layer(serial_size_t in_width,
50  serial_size_t in_height,
51  serial_size_t in_channels,
52  serial_size_t unpooling_size)
53  : max_unpooling_layer(in_width, in_height, in_channels, unpooling_size, unpooling_size)
54  {}
55 
57  serial_size_t unpooling_size,
58  serial_size_t stride)
59  : max_unpooling_layer(in_size.width_, in_size.height_, in_size.depth_, unpooling_size, unpooling_size)
60  {}
61 
69  max_unpooling_layer(serial_size_t in_width,
70  serial_size_t in_height,
71  serial_size_t in_channels,
72  serial_size_t unpooling_size,
73  serial_size_t stride)
74  : Base({vector_type::data}),
75  unpool_size_(unpooling_size),
76  stride_(stride),
77  in_(in_width, in_height, in_channels),
78  out_(unpool_out_dim(in_width, unpooling_size, stride), unpool_out_dim(in_height, unpooling_size, stride), in_channels)
79  {
80  //set_worker_count(CNN_TASK_SIZE);
81  init_connection();
82  }
83 
84  size_t fan_in_size() const override {
85  return 1;
86  }
87 
88  size_t fan_out_size() const override {
89  return in2out_[0].size();
90  }
91 
92  void forward_propagation(serial_size_t index,
93  const std::vector<vec_t*>& in_data,
94  std::vector<vec_t*>& out_data) override {
95  const vec_t& in = *in_data[0];
96  // vec_t& out = *out_data[0];
97  vec_t& a = *out_data[1];
98  std::vector<serial_size_t>& max_idx = max_unpooling_layer_worker_storage_[index].in2outmax_;
99 
100  for_(parallelize_, 0, in2out_.size(), [&](const blocked_range& r) {
101  for (int i = r.begin(); i < r.end(); i++) {
102  const auto& in_index = out2in_[i];
103  a[i] = (max_idx[in_index] == i) ? in[in_index] : float_t(0);
104  }
105  });
106 
107  this->forward_activation(*out_data[0], *out_data[1]);
108  }
109 
110  void back_propagation(serial_size_t index,
111  const std::vector<vec_t*>& in_data,
112  const std::vector<vec_t*>& out_data,
113  std::vector<vec_t*>& out_grad,
114  std::vector<vec_t*>& in_grad) override {
115  vec_t& prev_delta = *in_grad[0];
116  vec_t& curr_delta = *out_grad[1];
117  std::vector<serial_size_t>& max_idx = max_unpooling_layer_worker_storage_[index].in2outmax_;
118 
119  CNN_UNREFERENCED_PARAMETER(in_data);
120 
121  this->backward_activation(*out_grad[0], *out_data[0], curr_delta);
122 
123  for_(parallelize_, 0, in2out_.size(), [&](const blocked_range& r) {
124  for (int i = r.begin(); i != r.end(); i++) {
125  serial_size_t outi = out2in_[i];
126  prev_delta[i] = (max_idx[outi] == i) ? curr_delta[outi] : float_t(0);
127  }
128  });
129  }
130 
131  std::vector<index3d<serial_size_t>> in_shape() const override { return {in_}; }
132  std::vector<index3d<serial_size_t>> out_shape() const override { return {out_, out_}; }
133  std::string layer_type() const override { return "max-unpool"; }
134  size_t unpool_size() const {return unpool_size_;}
135 
136  virtual void set_worker_count(serial_size_t worker_count) override {
137  Base::set_worker_count(worker_count);
138  max_unpooling_layer_worker_storage_.resize(worker_count);
139  for (max_unpooling_layer_worker_specific_storage& mws : max_unpooling_layer_worker_storage_) {
140  mws.in2outmax_.resize(out_.size());
141  }
142  }
143 
144  template <class Archive>
145  static void load_and_construct(Archive & ar, cereal::construct<max_unpooling_layer> & construct) {
146  shape3d in;
147  serial_size_t stride, unpool_size;
148 
149  ar(cereal::make_nvp("in_size", in), cereal::make_nvp("unpool_size", unpool_size), cereal::make_nvp("stride", stride));
150  construct(in, unpool_size, stride);
151  }
152 
153  template <class Archive>
154  void serialize(Archive & ar) {
155  layer::serialize_prolog(ar);
156  ar(cereal::make_nvp("in_size", in_), cereal::make_nvp("unpool_size", unpool_size_), cereal::make_nvp("stride", stride_));
157  }
158 
159 private:
160  serial_size_t unpool_size_;
161  serial_size_t stride_;
162  std::vector<serial_size_t> out2in_; // mapping out => in (N:1)
163  std::vector<std::vector<serial_size_t> > in2out_; // mapping in => out (1:N)
164 
165  struct max_unpooling_layer_worker_specific_storage {
166  std::vector<serial_size_t> in2outmax_; // mapping max_index(out) => in (1:1)
167  };
168 
169  std::vector<max_unpooling_layer_worker_specific_storage> max_unpooling_layer_worker_storage_;
170 
171  index3d<serial_size_t> in_;
172  index3d<serial_size_t> out_;
173 
174  static serial_size_t unpool_out_dim(serial_size_t in_size, serial_size_t unpooling_size, serial_size_t stride) {
175  return (int) (float_t)in_size * stride + unpooling_size - 1;
176  }
177 
178  void connect_kernel(serial_size_t unpooling_size, serial_size_t inx, serial_size_t iny, serial_size_t c)
179  {
180  serial_size_t dxmax = static_cast<serial_size_t>(std::min(unpooling_size, inx * stride_ - out_.width_));
181  serial_size_t dymax = static_cast<serial_size_t>(std::min(unpooling_size, iny * stride_ - out_.height_));
182 
183  for (serial_size_t dy = 0; dy < dymax; dy++) {
184  for (serial_size_t dx = 0; dx < dxmax; dx++) {
185  serial_size_t out_index = out_.get_index(static_cast<serial_size_t>(inx * stride_ + dx),
186  static_cast<serial_size_t>(iny * stride_ + dy), c);
187  serial_size_t in_index = in_.get_index(inx, iny, c);
188 
189  if (in_index >= in2out_.size())
190  throw nn_error("index overflow");
191  if (out_index >= out2in_.size())
192  throw nn_error("index overflow");
193  out2in_[out_index] = in_index;
194  in2out_[in_index].push_back(out_index);
195  }
196  }
197  }
198 
199  void init_connection()
200  {
201  in2out_.resize(in_.size());
202  out2in_.resize(out_.size());
203 
204  for (max_unpooling_layer_worker_specific_storage& mws : max_unpooling_layer_worker_storage_) {
205  mws.in2outmax_.resize(in_.size());
206  }
207 
208  for (serial_size_t c = 0; c < in_.depth_; ++c)
209  for (serial_size_t y = 0; y < in_.height_; ++y)
210  for (serial_size_t x = 0; x < in_.width_; ++x)
211  connect_kernel(static_cast<serial_size_t>(unpool_size_),
212  x, y, c);
213  }
214 
215 };
216 
217 } // namespace tiny_dnn
single-input, single-output network with activation function
Definition: feedforward_layer.h:37
serial_size_t in_size() const
!
Definition: layer.h:176
bool parallelize_
Flag indicating whether the layer/node operations ara paralellized.
Definition: layer.h:696
serial_size_t in_channels() const
number of outgoing edges in this layer
Definition: layer.h:146
applies max-pooing operaton to the spatial data
Definition: max_unpooling_layer.h:38
std::vector< index3d< serial_size_t > > in_shape() const override
array of input shapes (width x height x depth)
Definition: max_unpooling_layer.h:131
max_unpooling_layer(serial_size_t in_width, serial_size_t in_height, serial_size_t in_channels, serial_size_t unpooling_size, serial_size_t stride)
Definition: max_unpooling_layer.h:69
max_unpooling_layer(serial_size_t in_width, serial_size_t in_height, serial_size_t in_channels, serial_size_t unpooling_size)
Definition: max_unpooling_layer.h:49
size_t fan_out_size() const override
number of outgoing connections for each input unit used only for weight/bias initialization methods w...
Definition: max_unpooling_layer.h:88
std::string layer_type() const override
name of layer, should be unique for each concrete class
Definition: max_unpooling_layer.h:133
std::vector< index3d< serial_size_t > > out_shape() const override
array of output shapes (width x height x depth)
Definition: max_unpooling_layer.h:132
size_t fan_in_size() const override
number of incoming connections for each output unit used only for weight/bias initialization methods ...
Definition: max_unpooling_layer.h:84
Definition: parallel_for.h:70