28 #include "tiny_dnn/layers/layer.h"
29 #include "tiny_dnn/util/product.h"
36 template<
typename Activation>
40 CNN_USE_LAYER_MEMBERS;
48 serial_size_t out_dim,
50 backend_t backend_type = core::backend_t::internal)
51 :
Base(std_input_order(has_bias)) {
52 set_params(in_dim, out_dim, has_bias);
53 init_backend(backend_type);
58 : Base(std::move(other))
59 , params_(std::move(other.params_)) {
60 init_backend(core::backend_t::internal);
64 return params_.in_size_;
68 return params_.out_size_;
71 std::vector<index3d<serial_size_t>>
in_shape()
const override {
72 if (params_.has_bias_) {
75 params_.out_size_, 1),
80 params_.out_size_, 1) };
84 std::vector<index3d<serial_size_t>>
out_shape()
const override {
90 std::vector<tensor_t*>& out_data)
override {
91 if (in_data.size() == 2 || in_data.size() == 3) {
95 this->forward_activation(*out_data[0], *out_data[1]);
96 }
else if (in_data.size() == 4 || in_data.size() == 6) {
102 const std::vector<tensor_t*>& out_data,
103 std::vector<tensor_t*>& out_grad,
104 std::vector<tensor_t*>& in_grad)
override {
108 std::string
layer_type()
const override {
return "q_fully-connected"; }
111 fully_params params_;
113 void set_params(
const serial_size_t
in_size,
118 params_.has_bias_ = has_bias;
121 void init_backend(backend_t backend_type) {
122 std::shared_ptr<core::backend>
backend =
nullptr;
125 if (backend_type == backend_t::internal) {
126 backend = std::make_shared<core::tiny_backend>(¶ms_,
127 [
this](
const tensor_t& p_delta,
128 const tensor_t& out, tensor_t& c_delta) {
129 return Base::backward_activation(p_delta, out, c_delta);
131 }
else if (backend_type == backend_t::nnpack) {
132 backend = std::make_shared<core::nnp_backend>(¶ms_);
133 }
else if (backend_type == backend_t::libdnn) {
134 backend = std::make_shared<core::dnn_backend>();
136 throw nn_error(
"Not supported backend type.");
141 Base::set_backend_type(backend_type);
144 throw nn_error(
"Could not allocate the backend.");
single-input, single-output network with activation function
Definition: feedforward_layer.h:37
std::shared_ptr< core::backend > backend_
The backend instance (deprecated)
Definition: layer.h:708
serial_size_t out_size() const
!
Definition: layer.h:181
serial_size_t in_size() const
!
Definition: layer.h:176
std::shared_ptr< core::backend > backend()
number of incoming edges in this layer
Definition: layer.h:143
compute fully-connected(matmul) operation
Definition: quantized_fully_connected_layer.h:37
std::string layer_type() const override
name of layer, should be unique for each concrete class
Definition: quantized_fully_connected_layer.h:108
serial_size_t fan_in_size() const override
number of incoming connections for each output unit used only for weight/bias initialization methods ...
Definition: quantized_fully_connected_layer.h:63
quantized_fully_connected_layer(serial_size_t in_dim, serial_size_t out_dim, bool has_bias=true, backend_t backend_type=core::backend_t::internal)
Definition: quantized_fully_connected_layer.h:47
std::vector< index3d< serial_size_t > > out_shape() const override
array of output shapes (width x height x depth)
Definition: quantized_fully_connected_layer.h:84
serial_size_t fan_out_size() const override
number of outgoing connections for each input unit used only for weight/bias initialization methods w...
Definition: quantized_fully_connected_layer.h:67
std::vector< index3d< serial_size_t > > in_shape() const override
array of input shapes (width x height x depth)
Definition: quantized_fully_connected_layer.h:71
void back_propagation(const std::vector< tensor_t * > &in_data, const std::vector< tensor_t * > &out_data, std::vector< tensor_t * > &out_grad, std::vector< tensor_t * > &in_grad) override
return delta of previous layer (delta=\frac{dE}{da}, a=wx in fully-connected layer)
Definition: quantized_fully_connected_layer.h:101
void forward_propagation(const std::vector< tensor_t * > &in_data, std::vector< tensor_t * > &out_data) override
Definition: quantized_fully_connected_layer.h:89