28 #include "tiny_dnn/util/util.h"
33 enum class norm_region {
41 template<
typename Activation>
44 CNN_USE_LAYER_MEMBERS;
49 serial_size_t local_size,
52 norm_region region = norm_region::across_channels)
53 :
Base({ vector_type::data }),
59 in_square_(in_shape_.area()) {
70 serial_size_t local_size,
73 norm_region region = norm_region::across_channels)
74 :
lrn_layer(prev->out_data_shape()[0], local_size, alpha, beta, region) {}
85 serial_size_t in_height,
86 serial_size_t local_size,
90 norm_region region = norm_region::across_channels)
102 return { in_shape_ };
106 return { in_shape_, in_shape_ };
112 std::vector<tensor_t*>& out_data)
override {
115 for (
size_t sample = 0, sample_count = in_data[0]->size(); sample < sample_count; ++sample) {
116 vec_t& in = (*in_data[0])[sample];
117 vec_t& out = (*out_data[0])[sample];
118 vec_t& a = (*out_data[1])[sample];
120 if (region_ == norm_region::across_channels) {
121 forward_across(in, a);
124 forward_within(in, a);
134 const std::vector<tensor_t*>& out_data,
135 std::vector<tensor_t*>& out_grad,
136 std::vector<tensor_t*>& in_grad)
override {
137 CNN_UNREFERENCED_PARAMETER(in_data);
138 CNN_UNREFERENCED_PARAMETER(out_data);
139 CNN_UNREFERENCED_PARAMETER(out_grad);
140 CNN_UNREFERENCED_PARAMETER(in_grad);
144 template <
class Archive>
145 static void load_and_construct(Archive & ar, cereal::construct<lrn_layer> & construct) {
151 ar(cereal::make_nvp(
"in_shape",
in_shape),
152 cereal::make_nvp(
"size", size),
153 cereal::make_nvp(
"alpha", alpha),
154 cereal::make_nvp(
"beta", beta),
155 cereal::make_nvp(
"region", region));
156 construct(
in_shape, size, alpha, beta, region);
159 template <
class Archive>
160 void serialize(Archive & ar) {
161 layer::serialize_prolog(ar);
162 ar(cereal::make_nvp(
"in_shape", in_shape_),
163 cereal::make_nvp(
"size", size_),
164 cereal::make_nvp(
"alpha", alpha_),
165 cereal::make_nvp(
"beta", beta_),
166 cereal::make_nvp(
"region", region_));
170 void forward_across(
const vec_t& in, vec_t& out) {
171 std::fill(in_square_.begin(), in_square_.end(), float_t(0));
173 for (serial_size_t i = 0; i < size_ / 2; i++) {
174 serial_size_t idx = in_shape_.get_index(0, 0, i);
175 add_square_sum(&in[idx], in_shape_.area(), &in_square_[0]);
178 serial_size_t head = size_ / 2;
179 long tail =
static_cast<long>(head) -
static_cast<long>(size_);
180 serial_size_t channels = in_shape_.depth_;
181 const serial_size_t wxh = in_shape_.area();
182 const float_t alpha_div_size = alpha_ / size_;
184 for (serial_size_t i = 0; i < channels; i++, head++, tail++) {
186 add_square_sum(&in[in_shape_.get_index(0, 0, head)], wxh, &in_square_[0]);
189 sub_square_sum(&in[in_shape_.get_index(0, 0, tail)], wxh, &in_square_[0]);
191 float_t *dst = &out[in_shape_.get_index(0, 0, i)];
192 const float_t *src = &in[in_shape_.get_index(0, 0, i)];
193 for (serial_size_t j = 0; j < wxh; j++)
194 dst[j] = src[j] * std::pow(float_t(1) + alpha_div_size * in_square_[j], -beta_);
198 void forward_within(
const vec_t& in, vec_t& out) {
199 CNN_UNREFERENCED_PARAMETER(in);
200 CNN_UNREFERENCED_PARAMETER(out);
201 throw nn_error(
"not implemented");
204 void add_square_sum(
const float_t *src, serial_size_t size, float_t *dst) {
205 for (serial_size_t i = 0; i < size; i++)
206 dst[i] += src[i] * src[i];
209 void sub_square_sum(
const float_t *src, serial_size_t size, float_t *dst) {
210 for (serial_size_t i = 0; i < size; i++)
211 dst[i] -= src[i] * src[i];
217 float_t alpha_, beta_;
single-input, single-output network with activation function
Definition: feedforward_layer.h:37
base class of all kind of NN layers
Definition: layer.h:62
bool parallelize_
Flag indicating whether the layer/node operations ara paralellized.
Definition: layer.h:696
serial_size_t in_channels() const
number of outgoing edges in this layer
Definition: layer.h:146
local response normalization
Definition: lrn_layer.h:42
serial_size_t fan_out_size() const override
number of outgoing connections for each input unit used only for weight/bias initialization methods w...
Definition: lrn_layer.h:97
serial_size_t fan_in_size() const override
number of incoming connections for each output unit used only for weight/bias initialization methods ...
Definition: lrn_layer.h:93
std::vector< shape3d > in_shape() const override
array of input shapes (width x height x depth)
Definition: lrn_layer.h:101
lrn_layer(serial_size_t in_width, serial_size_t in_height, serial_size_t local_size, serial_size_t in_channels, float_t alpha=1.0, float_t beta=5.0, norm_region region=norm_region::across_channels)
Definition: lrn_layer.h:84
lrn_layer(layer *prev, serial_size_t local_size, float_t alpha=1.0, float_t beta=5.0, norm_region region=norm_region::across_channels)
Definition: lrn_layer.h:69
void back_propagation(const std::vector< tensor_t * > &in_data, const std::vector< tensor_t * > &out_data, std::vector< tensor_t * > &out_grad, std::vector< tensor_t * > &in_grad) override
return delta of previous layer (delta=\frac{dE}{da}, a=wx in fully-connected layer)
Definition: lrn_layer.h:133
std::string layer_type() const override
name of layer, should be unique for each concrete class
Definition: lrn_layer.h:109
std::vector< shape3d > out_shape() const override
array of output shapes (width x height x depth)
Definition: lrn_layer.h:105
void forward_propagation(const std::vector< tensor_t * > &in_data, std::vector< tensor_t * > &out_data) override
Definition: lrn_layer.h:111
error exception class for tiny-dnn
Definition: nn_error.h:37