tiny_dnn  1.0.0
A header only, dependency-free deep learning framework in C++11
Public Types | Public Member Functions | Public Attributes | Protected Member Functions | Protected Attributes | List of all members
tiny_dnn::quantized_fully_connected_layer< Activation > Class Template Reference

compute fully-connected(matmul) operation More...

#include <quantized_fully_connected_layer.h>

Inheritance diagram for tiny_dnn::quantized_fully_connected_layer< Activation >:
Inheritance graph
[legend]
Collaboration diagram for tiny_dnn::quantized_fully_connected_layer< Activation >:
Collaboration graph
[legend]

Public Types

typedef feedforward_layer< Activation > Base
 

Public Member Functions

 quantized_fully_connected_layer (serial_size_t in_dim, serial_size_t out_dim, bool has_bias=true, backend_t backend_type=core::backend_t::internal)
 
 quantized_fully_connected_layer (quantized_fully_connected_layer &&other)
 
serial_size_t fan_in_size () const override
 number of incoming connections for each output unit used only for weight/bias initialization methods which require fan-in size (e.g. More...
 
serial_size_t fan_out_size () const override
 number of outgoing connections for each input unit used only for weight/bias initialization methods which require fan-out size (e.g. More...
 
std::vector< index3d< serial_size_t > > in_shape () const override
 array of input shapes (width x height x depth)
 
std::vector< index3d< serial_size_t > > out_shape () const override
 array of output shapes (width x height x depth)
 
void forward_propagation (const std::vector< tensor_t * > &in_data, std::vector< tensor_t * > &out_data) override
 
void back_propagation (const std::vector< tensor_t * > &in_data, const std::vector< tensor_t * > &out_data, std::vector< tensor_t * > &out_grad, std::vector< tensor_t * > &in_grad) override
 return delta of previous layer (delta=\frac{dE}{da}, a=wx in fully-connected layer) More...
 
std::string layer_type () const override
 name of layer, should be unique for each concrete class
 
- Public Member Functions inherited from tiny_dnn::feedforward_layer< Activation >
 feedforward_layer (const std::vector< vector_type > &in_data_type)
 
activation::functionactivation_function ()
 
std::pair< float_t, float_t > out_value_range () const override
 return output value range used only for calculating target value from label-id in final(output) layer override properly if the layer is intended to be used as output layer
 
void forward_activation (tensor_t &a_tensor, tensor_t &out_tensor)
 
void backward_activation (const tensor_t &prev_delta, const tensor_t &this_out, tensor_t &curr_delta)
 
- Public Member Functions inherited from tiny_dnn::layer
 layer (const std::vector< vector_type > &in_type, const std::vector< vector_type > &out_type)
 Defaul layer constructor that instantiates a N-input, M-output layer. More...
 
 layer (const layer &)=default
 
layeroperator= (const layer &)=default
 
void set_parallelize (bool parallelize)
 
void set_backend (std::shared_ptr< core::backend > backend)
 
void set_backend_type (core::backend_t backend_type)
 
bool parallelize () const
 
core::backend_t backend_type () const
 
core::backend_t engine () const
 
virtual std::string kernel_file () const
 
virtual std::string kernel_header () const
 
virtual void createOp ()
 
void setDevice (const Device &device)
 
Devicedevice () const
 
std::shared_ptr< core::backendbackend ()
 number of incoming edges in this layer
 
serial_size_t in_channels () const
 number of outgoing edges in this layer
 
serial_size_t out_channels () const
 
serial_size_t in_data_size () const
 
serial_size_t out_data_size () const
 
std::vector< shape3din_data_shape ()
 
std::vector< shape3dout_data_shape ()
 
serial_size_t in_size () const
 ! More...
 
serial_size_t out_size () const
 ! More...
 
std::vector< const vec_t * > weights () const
 
std::vector< vec_t * > weights ()
 
std::vector< tensor_t * > weights_grads ()
 
std::vector< edgeptr_t > inputs ()
 
std::vector< edgeptr_t > outputs ()
 
std::vector< edgeptr_t > outputs () const
 
void set_out_grads (const std::vector< tensor_t > &grad)
 
void set_in_data (const std::vector< tensor_t > &data)
 
std::vector< tensor_t > output () const
 
std::vector< vector_type > in_types () const
 
std::vector< vector_type > out_types () const
 
void set_trainable (bool trainable)
 
bool trainable () const
 
template<typename WeightInit >
layerweight_init (const WeightInit &f)
 
template<typename BiasInit >
layerbias_init (const BiasInit &f)
 
template<typename WeightInit >
layerweight_init (std::shared_ptr< WeightInit > f)
 
template<typename BiasInit >
layerbias_init (std::shared_ptr< BiasInit > f)
 
template<typename Archive >
void serialize (Archive &ar)
 
virtual void save (std::ostream &os) const
 
virtual void load (std::istream &is)
 
virtual void load (const std::vector< float_t > &src, int &idx)
 
virtual image output_to_image (size_t channel=0) const
 < visualize latest output of this layer default implementation interpret output as 1d-vector, so "visual" layer(like convolutional layer) should override this for better visualization.
 
virtual void post_update ()
 return delta2 of previous layer (delta2=\frac{d^2E}{da^2}, diagonal of hessian matrix) it is never called if optimizer is hessian-free
 
virtual void set_context (net_phase ctx)
 notify changing context (train <=> test)
 
std::vector< tensor_t > forward (const std::vector< tensor_t > &input)
 
std::vector< tensor_t > backward (const std::vector< tensor_t > &out_grads)
 
void forward ()
 
void backward ()
 
void setup (bool reset_weight)
 
void init_weight ()
 
void clear_grads ()
 
void update_weight (optimizer *o, serial_size_t batch_size)
 
bool has_same_weights (const layer &rhs, float_t eps) const
 
virtual void set_sample_count (serial_size_t sample_count)
 
template<class Archive >
void serialize_prolog (Archive &ar)
 
- Public Member Functions inherited from tiny_dnn::node
 node (serial_size_t in_size, serial_size_t out_size)
 
const std::vector< edgeptr_t > & prev () const
 
const std::vector< edgeptr_t > & next () const
 
serial_size_t prev_port (const edge &e) const
 
serial_size_t next_port (const edge &e) const
 
std::vector< node * > prev_nodes () const
 
std::vector< node * > next_nodes () const
 

Public Attributes

 CNN_USE_LAYER_MEMBERS
 
- Public Attributes inherited from tiny_dnn::feedforward_layer< Activation >
Activation h_
 

Protected Member Functions

void set_params (const serial_size_t in_size, const serial_size_t out_size, bool has_bias)
 
void init_backend (backend_t backend_type)
 

Protected Attributes

fully_params params_
 
- Protected Attributes inherited from tiny_dnn::layer
bool initialized_
 Flag indication whether the layer/node is initialized.
 
bool parallelize_
 Flag indicating whether the layer/node operations ara paralellized.
 
serial_size_t in_channels_
 The number of input vectors/edges.
 
serial_size_t out_channels_
 The number of output vectors/edges.
 
std::vector< vector_type > in_type_
 Vector containing the type of data for inputs.
 
std::vector< vector_type > out_type_
 Vector containing the type of data for outputs.
 
core::backend_t backend_type_
 The current backend type for operations.
 
std::shared_ptr< core::backendbackend_
 The backend instance (deprecated)
 
Devicedevice_ptr_ = nullptr
 Pointer to the device on which the layer/node will run.
 
- Protected Attributes inherited from tiny_dnn::node
std::vector< edgeptr_t > prev_
 
std::vector< edgeptr_t > next_
 

Additional Inherited Members

- Static Public Member Functions inherited from tiny_dnn::layer
template<typename InputArchive >
static std::shared_ptr< layerload_layer (InputArchive &ia)
 generate layer from cereal's Archive
 
template<typename OutputArchive >
static void save_layer (OutputArchive &oa, const layer &l)
 

Detailed Description

template<typename Activation>
class tiny_dnn::quantized_fully_connected_layer< Activation >

compute fully-connected(matmul) operation

Constructor & Destructor Documentation

◆ quantized_fully_connected_layer()

template<typename Activation >
tiny_dnn::quantized_fully_connected_layer< Activation >::quantized_fully_connected_layer ( serial_size_t  in_dim,
serial_size_t  out_dim,
bool  has_bias = true,
backend_t  backend_type = core::backend_t::internal 
)
inline
Parameters
in_dim[in] number of elements of the input
out_dim[in] number of elements of the output
has_bias[in] whether to include additional bias to the layer

Member Function Documentation

◆ back_propagation()

template<typename Activation >
void tiny_dnn::quantized_fully_connected_layer< Activation >::back_propagation ( const std::vector< tensor_t * > &  in_data,
const std::vector< tensor_t * > &  out_data,
std::vector< tensor_t * > &  out_grad,
std::vector< tensor_t * > &  in_grad 
)
inlineoverridevirtual

return delta of previous layer (delta=\frac{dE}{da}, a=wx in fully-connected layer)

Parameters
in_datainput vectors (same vectors as forward_propagation)
out_dataoutput vectors (same vectors as forward_propagation)
out_gradgradient of output vectors (i-th vector correspond with out_data[i])
in_gradgradient of input vectors (i-th vector correspond with in_data[i])

Implements tiny_dnn::layer.

◆ fan_in_size()

template<typename Activation >
serial_size_t tiny_dnn::quantized_fully_connected_layer< Activation >::fan_in_size ( ) const
inlineoverridevirtual

number of incoming connections for each output unit used only for weight/bias initialization methods which require fan-in size (e.g.

xavier) override if the layer has trainable weights, and scale of initialization is important

Reimplemented from tiny_dnn::layer.

◆ fan_out_size()

template<typename Activation >
serial_size_t tiny_dnn::quantized_fully_connected_layer< Activation >::fan_out_size ( ) const
inlineoverridevirtual

number of outgoing connections for each input unit used only for weight/bias initialization methods which require fan-out size (e.g.

xavier) override if the layer has trainable weights, and scale of initialization is important

Reimplemented from tiny_dnn::layer.

◆ forward_propagation()

template<typename Activation >
void tiny_dnn::quantized_fully_connected_layer< Activation >::forward_propagation ( const std::vector< tensor_t * > &  in_data,
std::vector< tensor_t * > &  out_data 
)
inlineoverridevirtual
Parameters
in_datainput vectors of this layer (data, weight, bias)
out_dataoutput vectors

Implements tiny_dnn::layer.


The documentation for this class was generated from the following file: