34 #include <unordered_map>
38 #include "tiny_dnn/layers/convolutional_layer.h"
39 #include "tiny_dnn/layers/deconvolutional_layer.h"
40 #include "tiny_dnn/layers/fully_connected_layer.h"
41 #include "tiny_dnn/layers/average_pooling_layer.h"
42 #include "tiny_dnn/layers/max_pooling_layer.h"
43 #include "tiny_dnn/layers/linear_layer.h"
44 #include "tiny_dnn/layers/lrn_layer.h"
45 #include "tiny_dnn/layers/dropout_layer.h"
53 #define CNN_OPEN_BINARY(filename) open(filename, _O_RDONLY|_O_BINARY)
54 #define CNN_OPEN_TXT(filename) open(filename, _O_RDONLY)
56 #pragma warning(disable:4996)
59 #include <sys/types.h>
62 #define CNN_OPEN_BINARY(filename) open(filename, O_RDONLY)
63 #define CNN_OPEN_TXT(filename) open(filename, O_RDONLY)
69 inline void read_proto_from_text(
const std::string& prototxt,
70 google::protobuf::Message *message) {
71 int fd = CNN_OPEN_TXT(prototxt.c_str());
73 throw nn_error(
"file not fonud: " + prototxt);
76 google::protobuf::io::FileInputStream input(fd);
77 input.SetCloseOnDelete(
true);
79 if (!google::protobuf::TextFormat::Parse(&input, message)) {
80 throw nn_error(
"failed to parse");
84 inline void read_proto_from_binary(
const std::string& protobinary,
85 google::protobuf::Message *message) {
86 int fd = CNN_OPEN_BINARY(protobinary.c_str());
87 google::protobuf::io::FileInputStream rawstr(fd);
88 google::protobuf::io::CodedInputStream codedstr(&rawstr);
90 rawstr.SetCloseOnDelete(
true);
91 codedstr.SetTotalBytesLimit(std::numeric_limits<int>::max(),
92 std::numeric_limits<int>::max() / 2);
94 if (!message->ParseFromCodedStream(&codedstr)) {
95 throw nn_error(
"failed to parse");
99 inline std::shared_ptr<weight_init::function>
100 create_filler(
const std::string& filler) {
101 if (filler ==
"xavier") {
102 return std::make_shared<weight_init::xavier>();
103 }
else if (filler ==
"constant") {
104 return std::make_shared<weight_init::constant>();
105 }
else if (filler ==
"gaussian") {
106 return std::make_shared<weight_init::gaussian>();
108 throw nn_error(
"unsupported filler type");
112 template <
typename param>
113 inline bool get_kernel_size_2d(
const param& p, layer_size_t *kernel) {
114 if (p.has_kernel_w() && p.has_kernel_h()) {
115 if (p.kernel_w() != p.kernel_h()) {
116 throw nn_error(
"unsupported kernel shape");
118 *kernel = p.kernel_w();
124 template <
typename param>
125 inline bool get_kernel_size_2d(
const param& p, layer_size_t *kernel_w, layer_size_t *kernel_h) {
126 if (p.has_kernel_w() && p.has_kernel_h()) {
127 *kernel_w = p.kernel_w();
128 *kernel_h = p.kernel_h();
134 inline layer_size_t get_kernel_size_2d(
const caffe::ConvolutionParameter& p) {
135 layer_size_t window_size;
136 if (!get_kernel_size_2d(p, &window_size)) {
137 if (p.kernel_size_size() > 1) {
138 throw nn_error(
"unsupported kernel shape");
140 window_size = p.kernel_size(0);
145 inline std::shared_ptr<layer> create_max_pool(layer_size_t pool_size_w,
146 layer_size_t pool_size_h,
147 layer_size_t stride_w,
148 layer_size_t stride_h,
152 using max_pool = max_pooling_layer<activation::identity>;
153 auto mp = std::make_shared<max_pool>(bottom_shape.width_,
154 bottom_shape.height_,
156 pool_size_w, pool_size_h, stride_w, stride_h, pad_type);
158 *top_shape = mp->out_shape()[0];
164 inline std::shared_ptr<layer> create_ave_pool(layer_size_t pool_size_w,
165 layer_size_t pool_size_h,
166 layer_size_t stride_w,
167 layer_size_t stride_h,
171 using ave_pool = average_pooling_layer<activation::identity>;
172 auto ap = std::make_shared<ave_pool>(bottom_shape.width_,
173 bottom_shape.height_,
175 pool_size_w, pool_size_h, stride_w, stride_h, pad_type);
178 float_t weight = float_t(1) / (pool_size_w * pool_size_h);
180 vec_t& w = *ap->weights()[0];
181 vec_t& b = *ap->weights()[1];
183 std::fill(w.begin(), w.end(), weight);
184 std::fill(b.begin(), b.end(), float_t(0));
186 *top_shape = ap->out_shape()[0];
188 ap->set_trainable(
false);
194 std::shared_ptr<layer> create_softmax(
const caffe::LayerParameter& layer,
196 auto sm = std::make_shared<linear_layer<activation::softmax>>(
197 bottom_shape.size());
203 std::shared_ptr<layer> create_sigmoid(
const caffe::LayerParameter& layer,
205 auto ce = std::make_shared<linear_layer<activation::sigmoid>>(
206 bottom_shape.size());
211 std::shared_ptr<layer> create_tanh(
const caffe::LayerParameter& layer,
213 auto tanh = std::make_shared<linear_layer<activation::tan_h>>(
214 bottom_shape.size());
219 std::shared_ptr<layer> create_power(
const caffe::LayerParameter& layer,
221 auto power = std::make_shared<power_layer>(bottom_shape, layer.power_param().power(), layer.power_param().scale());
227 std::shared_ptr<layer> create_pooling(
const caffe::LayerParameter& layer,
230 if (!layer.has_pooling_param()) {
231 throw nn_error(
"pool param missing");
234 auto pool_param = layer.pooling_param();
236 layer_size_t h_stride = 0;
237 layer_size_t w_stride = 0;
238 layer_size_t pool_size_w = 0;
239 layer_size_t pool_size_h = 0;
240 layer_size_t h_pad = 0;
241 layer_size_t w_pad = 0;
242 padding pad_type = padding::valid;
244 if (!get_kernel_size_2d(pool_param, &pool_size_w, &pool_size_h)) {
245 pool_size_w = pool_size_h = pool_param.kernel_size();
248 if (pool_param.has_stride() || pool_param.has_stride_h()) {
249 h_stride = pool_param.has_stride() ?
250 pool_param.stride() : pool_param.stride_h();
253 if (pool_param.has_stride() || pool_param.has_stride_w()) {
254 w_stride = pool_param.has_stride() ?
255 pool_param.stride() : pool_param.stride_w();
258 if (pool_param.has_pad() || pool_param.has_pad_w()) {
259 w_pad = pool_param.has_pad() ?
260 pool_param.pad() : pool_param.pad_w();
263 if (pool_param.has_pad() || pool_param.has_pad_h()) {
264 h_pad = pool_param.has_pad() ?
265 pool_param.pad() : pool_param.pad_h();
269 if (w_pad == pool_size_w - 1) {
270 pad_type = padding::same;
273 throw nn_error(
"unsupported padding type");
278 if (h_pad == pool_size_h - 1) {
279 pad_type = padding::same;
282 throw nn_error(
"unsupported padding type");
286 if (pool_param.has_pool()) {
287 auto type = pool_param.pool();
290 case caffe::PoolingParameter_PoolMethod_MAX:
291 return create_max_pool(pool_size_w, pool_size_h, w_stride, h_stride, pad_type,
292 bottom_shape, top_shape);
293 case caffe::PoolingParameter_PoolMethod_AVE:
294 return create_ave_pool(pool_size_w, pool_size_h, w_stride, h_stride, pad_type,
295 bottom_shape, top_shape);
297 throw nn_error(
"unsupported layer type");
302 return create_max_pool(pool_size_w, pool_size_h, w_stride, h_stride, pad_type, bottom_shape, top_shape);
306 std::shared_ptr<layer> create_relu(
const caffe::LayerParameter& layer,
308 auto relu = std::make_shared<linear_layer<activation::relu>>(
309 bottom_shape.size());
313 inline std::shared_ptr<layer> create_batchnorm(
const caffe::LayerParameter& layer,
315 using bn_layer = batch_normalization_layer;
317 *top_shape = bottom_shape;
320 float_t momentum = 0.999f;
322 if (layer.has_batch_norm_param()) {
323 auto bn_param = layer.batch_norm_param();
325 if (bn_param.has_eps()) {
326 eps = bn_param.eps();
328 if (bn_param.has_moving_average_fraction()) {
329 momentum = bn_param.moving_average_fraction();
333 auto bn = std::make_shared<bn_layer>(bottom_shape.area(), bottom_shape.depth_, eps, momentum, net_phase::test);
336 if (layer.blobs_size() > 0) {
337 auto global_stats = layer.blobs();
338 if (global_stats.size() != 3) {
339 throw std::runtime_error(
"unexpected bn stored statistics");
342 float_t scale_factor = global_stats.Get(2).data(0) == 0 ? 0 : 1 / global_stats.Get(2).data(0);
343 vec_t mean(bottom_shape.depth_);
344 vec_t variance(bottom_shape.depth_);
346 for (
size_t i = 0; i < mean.size(); i++) {
347 mean[i] = global_stats.Get(0).data(i) * scale_factor;
348 variance[i] = global_stats.Get(1).data(i) * scale_factor;
351 bn->set_variance(variance);
358 inline void load_weights_fullyconnected(
const caffe::LayerParameter& src,
360 auto weights = src.blobs(0);
363 const auto dst_out_size = dst->out_size();
364 const auto dst_in_size = dst->in_size();
366 if (dst_out_size * dst_in_size !=
367 static_cast<serial_size_t
>(weights.data_size())) {
369 std::string(
"layer size mismatch!") +
370 "caffe(" + src.name() +
"):" + to_string(weights.data_size()) +
"\n" +
371 "tiny-dnn(" + dst->layer_type() +
"):" + to_string(dst->weights().size()));
374 vec_t& w = *dst->weights()[0];
375 vec_t& b = *dst->weights()[1];
378 for (
size_t o = 0; o < dst_out_size; o++) {
379 for (
size_t i = 0; i < dst_in_size; i++) {
382 w[i * dst_out_size + o] = weights.data(curr++);
387 if (src.inner_product_param().bias_term()) {
388 auto biases = src.blobs(1);
389 for (
size_t o = 0; o < dst_out_size; o++) {
392 b[o] = biases.data(o);
397 inline std::shared_ptr<layer> create_fullyconnected(
398 const caffe::LayerParameter& layer,
400 using fc_layer = fully_connected_layer<activation::identity>;
402 if (!layer.has_inner_product_param()) {
403 throw nn_error(
"inner-product param missing");
406 layer_size_t dim_input = 0, dim_output = 0;
407 bool has_bias =
true;
409 auto ip_param = layer.inner_product_param();
410 has_bias = ip_param.bias_term();
412 dim_output = ip_param.num_output();
413 dim_input = bottom_shape.size();
415 auto ip = std::make_shared<fc_layer>(dim_input, dim_output, has_bias);
418 if (ip_param.has_weight_filler()) {
419 ip->weight_init(create_filler(ip_param.weight_filler().type()));
422 if (ip_param.has_bias_filler()) {
423 ip->bias_init(create_filler(ip_param.bias_filler().type()));
427 if (layer.blobs_size() > 0) {
428 load_weights_fullyconnected(layer, ip.get());
432 *top_shape = ip->out_shape()[0];
436 inline void load_weights_conv(
const caffe::LayerParameter& src, layer *dst) {
438 auto weights = src.blobs(0);
443 int out_channels = dst->out_data_shape()[0].depth_;
444 int in_channels = dst->in_data_shape()[0].depth_;
447 auto conv_param = src.convolution_param();
450 int window_size = get_kernel_size_2d(conv_param);
452 if (conv_param.has_group()) {
456 vec_t& w = *dst->weights()[0];
457 vec_t& b = *dst->weights()[1];
460 for (
int o = 0; o < out_channels; o++) {
461 for (
int i = 0; i < in_channels; i++) {
462 if (!table.is_connected(o, i)) {
463 dst_idx += window_size * window_size;
466 for (
int x = 0; x < window_size * window_size; x++) {
469 w[dst_idx++] = weights.data(src_idx++);
475 if (conv_param.bias_term()) {
476 auto biases = src.blobs(1);
477 for (
int o = 0; o < out_channels; o++) {
480 b[o] = biases.data(o);
485 inline void load_weights_pool(
const caffe::LayerParameter& src, layer *dst) {
486 auto pool_param = src.pooling_param();
490 if (dst->weights().size()) {
491 layer_size_t pool_size = 0;
493 if (!get_kernel_size_2d(pool_param, &pool_size)) {
494 pool_size = pool_param.kernel_size();
498 float_t weight = float_t(1) / sqr(pool_size);
509 vec_t& w = *dst->weights()[0];
510 vec_t& b = *dst->weights()[1];
513 std::fill(w.begin(), w.end(), weight);
516 std::fill(b.begin(), b.end(), float_t(0));
523 std::shared_ptr<layer> create_lrn(
const caffe::LayerParameter& layer,
526 using lrn_layer = lrn_layer<activation::identity>;
528 if (!layer.has_lrn_param()) {
529 throw nn_error(
"lrn param missing");
532 auto lrn_param = layer.lrn_param();
533 layer_size_t local_size = 5;
536 norm_region region = norm_region::across_channels;
538 if (lrn_param.has_local_size()) local_size = lrn_param.local_size();
539 if (lrn_param.has_alpha()) alpha = lrn_param.alpha();
540 if (lrn_param.has_beta()) beta = lrn_param.beta();
541 if (lrn_param.has_norm_region()) {
542 if (lrn_param.norm_region() == caffe::LRNParameter_NormRegion_WITHIN_CHANNEL)
543 region = norm_region::within_channels;
546 auto lrn = std::make_shared<lrn_layer>(bottom_shape.width_,
547 bottom_shape.height_,
550 alpha, beta, region);
555 std::shared_ptr<layer> create_dropout(
const caffe::LayerParameter& layer,
558 if (!layer.has_dropout_param()) {
559 throw nn_error(
"dropout param missing");
562 float_t dropout_rate = float_t(0.5);
564 if (layer.dropout_param().has_dropout_ratio()) {
565 dropout_rate = layer.dropout_param().dropout_ratio();
568 auto dropout = std::make_shared<dropout_layer>(bottom_shape.size(),
575 std::shared_ptr<layer> create_convlayer(
const caffe::LayerParameter& layer,
578 using conv_layer = convolutional_layer<activation::identity>;
580 if (!layer.has_convolution_param()) {
581 throw nn_error(
"convolution param missing");
585 layer_size_t in_width = 0, in_height = 0, window_size = 0;
586 layer_size_t in_channels = 0, out_channels = 0;
587 layer_size_t w_stride = 1, h_stride = 1;
588 bool has_bias =
true;
589 padding pad_type = padding::valid;
592 auto conv_param = layer.convolution_param();
595 out_channels = conv_param.num_output();
596 in_channels = bottom_shape.depth_;
597 in_width = bottom_shape.width_;
598 in_height = bottom_shape.height_;
599 has_bias = conv_param.bias_term();
600 window_size = get_kernel_size_2d(conv_param);
603 if (conv_param.pad_size() == 1 ||
604 (conv_param.has_pad_w() && conv_param.has_pad_h())) {
605 uint32_t pad_w = conv_param.pad_size() == 1 ?
606 conv_param.pad(0) : conv_param.pad_w();
608 uint32_t pad_h = conv_param.pad_size() == 1 ?
609 conv_param.pad(0) : conv_param.pad_h();
611 if (pad_w != pad_h) {
612 throw nn_error(
"conv:not supported padding size");
616 if (pad_w == (window_size - 1) / 2) {
617 pad_type = padding::same;
618 }
else if (pad_w == 0) {
619 pad_type = padding::valid;
621 throw nn_error(
"conv:not supported padding size");
626 if (conv_param.stride_size() == 1 || conv_param.has_stride_h()) {
627 h_stride = conv_param.stride_size() == 1 ?
628 conv_param.stride(0) : conv_param.stride_h();
631 if (conv_param.stride_size() == 1 || conv_param.has_stride_w()) {
632 w_stride = conv_param.stride_size() == 1 ?
633 conv_param.stride(0) : conv_param.stride_w();
637 if (conv_param.has_group()) {
641 auto conv = std::make_shared<conv_layer>(in_width, in_height,
643 in_channels, out_channels,
649 if (conv_param.has_weight_filler()) {
650 conv->weight_init(create_filler(conv_param.weight_filler().type()));
653 if (conv_param.has_bias_filler()) {
654 conv->bias_init(create_filler(conv_param.bias_filler().type()));
658 if (layer.blobs_size() > 0) {
659 load_weights_conv(layer, conv.get());
662 *top_shape = conv->out_shape()[0];
667 std::shared_ptr<layer> create_deconvlayer(
const caffe::LayerParameter& layer,
670 using deconv_layer = deconvolutional_layer<activation::identity>;
672 if (!layer.has_convolution_param()) {
673 throw nn_error(
"deconvolution param missing");
677 layer_size_t in_width = 0, in_height = 0, window_size = 0;
678 layer_size_t in_channels = 0, out_channels = 0;
679 layer_size_t w_stride = 1, h_stride = 1;
680 bool has_bias =
true;
681 padding pad_type = padding::valid;
684 auto deconv_param = layer.convolution_param();
687 out_channels = deconv_param.num_output();
688 in_channels = bottom_shape.depth_;
689 in_width = bottom_shape.width_;
690 in_height = bottom_shape.height_;
691 has_bias = deconv_param.bias_term();
692 window_size = get_kernel_size_2d(deconv_param);
695 if (deconv_param.pad_size() == 1 ||
696 (deconv_param.has_pad_w() && deconv_param.has_pad_h())) {
697 uint32_t unpad_w = deconv_param.pad_size() == 1 ?
698 deconv_param.pad(0) : deconv_param.pad_w();
700 uint32_t unpad_h = deconv_param.pad_size() == 1 ?
701 deconv_param.pad(0) : deconv_param.pad_h();
703 if (unpad_w != unpad_h) {
704 throw nn_error(
"deconv:not supported unpadding size");
708 if (unpad_w == (window_size - 1) / 2) {
709 pad_type = padding::same;
710 }
else if (unpad_w == 0) {
711 pad_type = padding::valid;
713 throw nn_error(
"deconv:not supported unpadding size");
718 if (deconv_param.stride_size() == 1 || deconv_param.has_stride_h()) {
719 h_stride = deconv_param.stride_size() == 1 ?
720 deconv_param.stride(0) : deconv_param.stride_h();
723 if (deconv_param.stride_size() == 1 || deconv_param.has_stride_w()) {
724 w_stride = deconv_param.stride_size() == 1 ?
725 deconv_param.stride(0) : deconv_param.stride_w();
729 if (deconv_param.has_group()) {
733 auto deconv = std::make_shared<deconv_layer>(in_width, in_height,
735 in_channels, out_channels,
741 if (deconv_param.has_weight_filler()) {
742 deconv->weight_init(create_filler(deconv_param.weight_filler().type()));
745 if (deconv_param.has_bias_filler()) {
746 deconv->bias_init(create_filler(deconv_param.bias_filler().type()));
750 if (layer.blobs_size() > 0) {
751 load_weights_conv(layer, deconv.get());
755 *top_shape = deconv->out_shape()[0];
759 inline bool layer_skipped(
const std::string& type) {
760 if (type ==
"Data" || type ==
"EuclideanLoss" || type ==
"Input")
return true;
764 inline bool layer_supported(
const std::string& type) {
765 static const char* supported[] = {
766 "InnerProduct",
"Convolution",
"Deconvolution",
"Pooling",
768 "SoftmaxWithLoss",
"SigmoidCrossEntropyLoss",
769 "ReLU",
"Sigmoid",
"TanH",
"Softmax",
"BatchNorm",
"Power"
772 for (
size_t i = 0; i <
sizeof(supported) /
sizeof(supported[0]); i++) {
773 if (supported[i] == type)
return true;
778 inline bool layer_match(
const std::string& caffetype,
779 const std::string& tiny_dnn_type) {
780 const char* conversions[][2] = {
781 {
"InnerProduct",
"fully-connected" },
782 {
"Convolution",
"conv" },
783 {
"Deconvolution",
"deconv" },
784 {
"Pooling",
"ave-pool" },
785 {
"Pooling",
"max-pool" }
788 for (
size_t i = 0; i <
sizeof(conversions) /
sizeof(conversions[0]); i++) {
789 if (conversions[i][0] == caffetype &&
790 conversions[i][1] == tiny_dnn_type)
return true;
795 inline std::shared_ptr<layer> create(
const caffe::LayerParameter& layer,
798 const std::string layer_type = layer.type();
800 if (layer_type ==
"Convolution") {
801 return detail::create_convlayer(layer, in_shape, out_shape);
804 if (layer_type ==
"Deconvolution") {
805 return detail::create_deconvlayer(layer, in_shape, out_shape);
808 if (layer_type ==
"InnerProduct") {
809 return detail::create_fullyconnected(layer, in_shape, out_shape);
812 if (layer_type ==
"Pooling") {
813 return detail::create_pooling(layer, in_shape, out_shape);
816 if (layer_type ==
"BatchNorm") {
817 return detail::create_batchnorm(layer, in_shape, out_shape);
820 if (layer_type ==
"LRN") {
821 return detail::create_lrn(layer, in_shape, out_shape);
824 if (layer_type ==
"Dropout") {
825 return detail::create_dropout(layer, in_shape, out_shape);
828 if (layer_type ==
"SoftmaxWithLoss" ||
829 layer_type ==
"Softmax") {
830 return detail::create_softmax(layer, in_shape, out_shape);
833 if (layer_type ==
"SigmoidCrossEntropyLoss" ||
834 layer_type ==
"Sigmoid") {
835 return detail::create_sigmoid(layer, in_shape, out_shape);
838 if (layer_type ==
"ReLU") {
839 return detail::create_relu(layer, in_shape, out_shape);
842 if (layer_type ==
"TanH") {
843 return detail::create_tanh(layer, in_shape, out_shape);
846 if (layer_type ==
"Power") {
847 return detail::create_power(layer, in_shape, out_shape);
850 throw nn_error(
"layer parser not found");
853 inline void load(
const caffe::LayerParameter& src, layer *dst) {
854 typedef std::function<void(
const caffe::LayerParameter&, layer*)> factoryimpl;
855 std::unordered_map<std::string, factoryimpl> factory_registry;
857 factory_registry[
"Convolution"] = detail::load_weights_conv;
858 factory_registry[
"Deconvolution"] = detail::load_weights_conv;
859 factory_registry[
"InnerProduct"] = detail::load_weights_fullyconnected;
860 factory_registry[
"Pooling"] = detail::load_weights_pool;
862 if (factory_registry.find(src.type()) == factory_registry.end()) {
863 throw nn_error(
"layer parser not found");
866 return factory_registry[src.type()](src, dst);
871 const caffe::LayerParameter *
layer;
876 explicit layer_node(
const caffe::LayerParameter *l)
877 :
layer(l), next(0), prev(0) {}
885 if (net.layers_size() > 0) {
886 upgradev1net(net_orig, &net);
889 nodes.reserve(net.layer_size());
891 for (
int i = 0; i < net.layer_size(); i++) {
892 auto& l = net.layer(i);
894 if (layer_table.find(l.name()) != layer_table.end())
continue;
896 nodes.emplace_back(&l);
897 layer_table[l.name()] = &
nodes.back();
900 for (
size_t i = 0; i <
nodes.size(); i++) {
903 if (l.layer->bottom_size() > 0 && blob_table[l.layer->bottom(0)]) {
904 auto& bottom = blob_table[l.layer->bottom(0)];
906 layer_table[bottom->layer->name()]->next = &l;
909 if (l.layer->top_size() > 0) {
910 blob_table[l.layer->top(0)] = &l;
914 auto root = std::find_if(
nodes.begin(),
919 if (root ==
nodes.end()) {
920 throw nn_error(
"root layer not found");
927 node_list.push_back(current->layer);
928 current = current->next;
932 size_t size()
const {
933 return node_list.size();
936 const caffe::LayerParameter& operator[] (
size_t index)
const {
937 return *(node_list[index]);
941 void upgradev1net(
const caffe::NetParameter& old,
942 caffe::NetParameter *dst)
const {
947 for (
int i = 0; i < old.layers_size(); i++) {
948 upgradev1layer(old.layers(i), dst->add_layer());
952 const char* v1type2name(caffe::V1LayerParameter_LayerType type)
const {
954 case caffe::V1LayerParameter_LayerType_NONE:
956 case caffe::V1LayerParameter_LayerType_ABSVAL:
958 case caffe::V1LayerParameter_LayerType_ACCURACY:
960 case caffe::V1LayerParameter_LayerType_ARGMAX:
962 case caffe::V1LayerParameter_LayerType_BNLL:
964 case caffe::V1LayerParameter_LayerType_CONCAT:
966 case caffe::V1LayerParameter_LayerType_CONTRASTIVE_LOSS:
967 return "ContrastiveLoss";
968 case caffe::V1LayerParameter_LayerType_CONVOLUTION:
969 return "Convolution";
970 case caffe::V1LayerParameter_LayerType_DECONVOLUTION:
971 return "Deconvolution";
972 case caffe::V1LayerParameter_LayerType_DATA:
974 case caffe::V1LayerParameter_LayerType_DROPOUT:
976 case caffe::V1LayerParameter_LayerType_DUMMY_DATA:
978 case caffe::V1LayerParameter_LayerType_EUCLIDEAN_LOSS:
979 return "EuclideanLoss";
980 case caffe::V1LayerParameter_LayerType_ELTWISE:
982 case caffe::V1LayerParameter_LayerType_EXP:
984 case caffe::V1LayerParameter_LayerType_FLATTEN:
986 case caffe::V1LayerParameter_LayerType_HDF5_DATA:
988 case caffe::V1LayerParameter_LayerType_HDF5_OUTPUT:
990 case caffe::V1LayerParameter_LayerType_HINGE_LOSS:
992 case caffe::V1LayerParameter_LayerType_IM2COL:
994 case caffe::V1LayerParameter_LayerType_IMAGE_DATA:
996 case caffe::V1LayerParameter_LayerType_INFOGAIN_LOSS:
997 return "InfogainLoss";
998 case caffe::V1LayerParameter_LayerType_INNER_PRODUCT:
999 return "InnerProduct";
1000 case caffe::V1LayerParameter_LayerType_LRN:
1002 case caffe::V1LayerParameter_LayerType_MEMORY_DATA:
1003 return "MemoryData";
1004 case caffe::V1LayerParameter_LayerType_MULTINOMIAL_LOGISTIC_LOSS:
1005 return "MultinomialLogisticLoss";
1006 case caffe::V1LayerParameter_LayerType_MVN:
1008 case caffe::V1LayerParameter_LayerType_POOLING:
1010 case caffe::V1LayerParameter_LayerType_POWER:
1012 case caffe::V1LayerParameter_LayerType_RELU:
1014 case caffe::V1LayerParameter_LayerType_SIGMOID:
1016 case caffe::V1LayerParameter_LayerType_SIGMOID_CROSS_ENTROPY_LOSS:
1017 return "SigmoidCrossEntropyLoss";
1018 case caffe::V1LayerParameter_LayerType_SILENCE:
1020 case caffe::V1LayerParameter_LayerType_SOFTMAX:
1022 case caffe::V1LayerParameter_LayerType_SOFTMAX_LOSS:
1023 return "SoftmaxWithLoss";
1024 case caffe::V1LayerParameter_LayerType_SPLIT:
1026 case caffe::V1LayerParameter_LayerType_SLICE:
1028 case caffe::V1LayerParameter_LayerType_TANH:
1030 case caffe::V1LayerParameter_LayerType_WINDOW_DATA:
1031 return "WindowData";
1032 case caffe::V1LayerParameter_LayerType_THRESHOLD:
1035 throw nn_error(
"unknown v1 layer-type");
1039 void upgradev1layer(
const caffe::V1LayerParameter& old,
1040 caffe::LayerParameter *dst)
const {
1043 for (
int i = 0; i < old.bottom_size(); i++) {
1044 dst->add_bottom(old.bottom(i));
1047 for (
int i = 0; i < old.top_size(); i++) {
1048 dst->add_top(old.top(i));
1051 if (old.has_name()) dst->set_name(old.name());
1052 if (old.has_type()) dst->set_type(v1type2name(old.type()));
1054 for (
int i = 0; i < old.blobs_size(); i++) {
1055 dst->add_blobs()->CopyFrom(old.blobs(i));
1058 for (
int i = 0; i < old.param_size(); i++) {
1059 while (dst->param_size() <= i) dst->add_param();
1060 dst->mutable_param(i)->set_name(old.param(i));
1063 #define COPY_PARAM(name) if (old.has_##name##_param()) dst->mutable_##name##_param()->CopyFrom(old.name##_param())
1065 COPY_PARAM(accuracy);
1068 COPY_PARAM(contrastive_loss);
1069 COPY_PARAM(convolution);
1071 COPY_PARAM(dropout);
1072 COPY_PARAM(dummy_data);
1073 COPY_PARAM(eltwise);
1075 COPY_PARAM(hdf5_data);
1076 COPY_PARAM(hdf5_output);
1077 COPY_PARAM(hinge_loss);
1078 COPY_PARAM(image_data);
1079 COPY_PARAM(infogain_loss);
1080 COPY_PARAM(inner_product);
1082 COPY_PARAM(memory_data);
1084 COPY_PARAM(pooling);
1087 COPY_PARAM(sigmoid);
1088 COPY_PARAM(softmax);
1091 COPY_PARAM(threshold);
1092 COPY_PARAM(window_data);
1093 COPY_PARAM(transform);
1098 caffe::NetParameter net;
1101 std::map<std::string, layer_node*> layer_table;
1103 std::map<std::string, layer_node*> blob_table;
1104 std::vector<layer_node>
nodes;
1105 std::vector<const caffe::LayerParameter*> node_list;
1112 #pragma warning(pop)
Definition: layer_factory_impl.h:881
base class of all kind of NN layers
Definition: layer.h:62
error exception class for tiny-dnn
Definition: nn_error.h:37
basic class of various network types (sequential, multi-in/multi-out).
Definition: nodes.h:85
element-wise pow: y = scale*x^factor
Definition: power_layer.h:38
slice an input data into multiple outputs along a given slice dimension.
Definition: slice_layer.h:42
Definition: conv_params.h:40
Definition: layer_factory_impl.h:870