tiny_dnn  1.0.0
A header only, dependency-free deep learning framework in C++11
Public Member Functions | Static Public Member Functions | Protected Attributes | Friends | List of all members
tiny_dnn::layer Class Referenceabstract

base class of all kind of NN layers More...

#include <layer.h>

Inheritance diagram for tiny_dnn::layer:
Inheritance graph
[legend]
Collaboration diagram for tiny_dnn::layer:
Collaboration graph
[legend]

Public Member Functions

 layer (const std::vector< vector_type > &in_type, const std::vector< vector_type > &out_type)
 Defaul layer constructor that instantiates a N-input, M-output layer. More...
 
 layer (const layer &)=default
 
layeroperator= (const layer &)=default
 
void set_parallelize (bool parallelize)
 
void set_backend (std::shared_ptr< core::backend > backend)
 
void set_backend_type (core::backend_t backend_type)
 
bool parallelize () const
 
core::backend_t backend_type () const
 
core::backend_t engine () const
 
virtual std::string kernel_file () const
 
virtual std::string kernel_header () const
 
virtual void createOp ()
 
void setDevice (const Device &device)
 
Devicedevice () const
 
std::shared_ptr< core::backendbackend ()
 number of incoming edges in this layer
 
serial_size_t in_channels () const
 number of outgoing edges in this layer
 
serial_size_t out_channels () const
 
serial_size_t in_data_size () const
 
serial_size_t out_data_size () const
 
std::vector< shape3din_data_shape ()
 
std::vector< shape3dout_data_shape ()
 
serial_size_t in_size () const
 ! More...
 
serial_size_t out_size () const
 ! More...
 
std::vector< const vec_t * > weights () const
 
std::vector< vec_t * > weights ()
 
std::vector< tensor_t * > weights_grads ()
 
std::vector< edgeptr_t > inputs ()
 
std::vector< edgeptr_t > outputs ()
 
std::vector< edgeptr_t > outputs () const
 
void set_out_grads (const std::vector< tensor_t > &grad)
 
void set_in_data (const std::vector< tensor_t > &data)
 
std::vector< tensor_t > output () const
 
std::vector< vector_type > in_types () const
 
std::vector< vector_type > out_types () const
 
void set_trainable (bool trainable)
 
bool trainable () const
 
virtual std::pair< float_t, float_t > out_value_range () const
 return output value range used only for calculating target value from label-id in final(output) layer override properly if the layer is intended to be used as output layer
 
virtual std::vector< shape3din_shape () const =0
 array of input shapes (width x height x depth)
 
virtual std::vector< shape3dout_shape () const =0
 array of output shapes (width x height x depth)
 
virtual std::string layer_type () const =0
 name of layer, should be unique for each concrete class
 
virtual serial_size_t fan_in_size () const
 number of incoming connections for each output unit used only for weight/bias initialization methods which require fan-in size (e.g. More...
 
virtual serial_size_t fan_out_size () const
 number of outgoing connections for each input unit used only for weight/bias initialization methods which require fan-out size (e.g. More...
 
template<typename WeightInit >
layerweight_init (const WeightInit &f)
 
template<typename BiasInit >
layerbias_init (const BiasInit &f)
 
template<typename WeightInit >
layerweight_init (std::shared_ptr< WeightInit > f)
 
template<typename BiasInit >
layerbias_init (std::shared_ptr< BiasInit > f)
 
template<typename Archive >
void serialize (Archive &ar)
 
virtual void save (std::ostream &os) const
 
virtual void load (std::istream &is)
 
virtual void load (const std::vector< float_t > &src, int &idx)
 
virtual image output_to_image (size_t channel=0) const
 < visualize latest output of this layer default implementation interpret output as 1d-vector, so "visual" layer(like convolutional layer) should override this for better visualization.
 
virtual void forward_propagation (const std::vector< tensor_t * > &in_data, std::vector< tensor_t * > &out_data)=0
 
virtual void back_propagation (const std::vector< tensor_t * > &in_data, const std::vector< tensor_t * > &out_data, std::vector< tensor_t * > &out_grad, std::vector< tensor_t * > &in_grad)=0
 return delta of previous layer (delta=\frac{dE}{da}, a=wx in fully-connected layer) More...
 
virtual void post_update ()
 return delta2 of previous layer (delta2=\frac{d^2E}{da^2}, diagonal of hessian matrix) it is never called if optimizer is hessian-free
 
virtual void set_context (net_phase ctx)
 notify changing context (train <=> test)
 
std::vector< tensor_t > forward (const std::vector< tensor_t > &input)
 
std::vector< tensor_t > backward (const std::vector< tensor_t > &out_grads)
 
void forward ()
 
void backward ()
 
void setup (bool reset_weight)
 
void init_weight ()
 
void clear_grads ()
 
void update_weight (optimizer *o, serial_size_t batch_size)
 
bool has_same_weights (const layer &rhs, float_t eps) const
 
virtual void set_sample_count (serial_size_t sample_count)
 
template<class Archive >
void serialize_prolog (Archive &ar)
 
- Public Member Functions inherited from tiny_dnn::node
 node (serial_size_t in_size, serial_size_t out_size)
 
const std::vector< edgeptr_t > & prev () const
 
const std::vector< edgeptr_t > & next () const
 
serial_size_t prev_port (const edge &e) const
 
serial_size_t next_port (const edge &e) const
 
std::vector< node * > prev_nodes () const
 
std::vector< node * > next_nodes () const
 

Static Public Member Functions

template<typename InputArchive >
static std::shared_ptr< layerload_layer (InputArchive &ia)
 generate layer from cereal's Archive
 
template<typename OutputArchive >
static void save_layer (OutputArchive &oa, const layer &l)
 

Protected Attributes

bool initialized_
 Flag indication whether the layer/node is initialized.
 
bool parallelize_
 Flag indicating whether the layer/node operations ara paralellized.
 
serial_size_t in_channels_
 The number of input vectors/edges.
 
serial_size_t out_channels_
 The number of output vectors/edges.
 
std::vector< vector_type > in_type_
 Vector containing the type of data for inputs.
 
std::vector< vector_type > out_type_
 Vector containing the type of data for outputs.
 
core::backend_t backend_type_
 The current backend type for operations.
 
std::shared_ptr< core::backendbackend_
 The backend instance (deprecated)
 
Devicedevice_ptr_ = nullptr
 Pointer to the device on which the layer/node will run.
 
- Protected Attributes inherited from tiny_dnn::node
std::vector< edgeptr_t > prev_
 
std::vector< edgeptr_t > next_
 

Friends

void connection_mismatch (const layer &from, const layer &to)
 

Detailed Description

base class of all kind of NN layers

sub-class should override these methods:

Constructor & Destructor Documentation

◆ layer()

tiny_dnn::layer::layer ( const std::vector< vector_type > &  in_type,
const std::vector< vector_type > &  out_type 
)
inline

Defaul layer constructor that instantiates a N-input, M-output layer.

Parameters
in_type[N]type of input vector (data, weight, bias...)
out_type[M]type of output vector

Member Function Documentation

◆ back_propagation()

virtual void tiny_dnn::layer::back_propagation ( const std::vector< tensor_t * > &  in_data,
const std::vector< tensor_t * > &  out_data,
std::vector< tensor_t * > &  out_grad,
std::vector< tensor_t * > &  in_grad 
)
pure virtual

◆ fan_in_size()

virtual serial_size_t tiny_dnn::layer::fan_in_size ( ) const
inlinevirtual

◆ fan_out_size()

virtual serial_size_t tiny_dnn::layer::fan_out_size ( ) const
inlinevirtual

◆ forward_propagation()

virtual void tiny_dnn::layer::forward_propagation ( const std::vector< tensor_t * > &  in_data,
std::vector< tensor_t * > &  out_data 
)
pure virtual

◆ in_size()

serial_size_t tiny_dnn::layer::in_size ( ) const
inline

!

Deprecated:
use in_data_size() instead

◆ out_size()

serial_size_t tiny_dnn::layer::out_size ( ) const
inline

!

Deprecated:
use out_data_size() instead

The documentation for this class was generated from the following files: