tiny_dnn  1.0.0
A header only, dependency-free deep learning framework in C++11
Public Types | Public Member Functions | Public Attributes | List of all members
tiny_dnn::deconvolutional_layer< Activation > Class Template Reference

2D deconvolution layer More...

#include <deconvolutional_layer.h>

Inheritance diagram for tiny_dnn::deconvolutional_layer< Activation >:
Inheritance graph
[legend]
Collaboration diagram for tiny_dnn::deconvolutional_layer< Activation >:
Collaboration graph
[legend]

Public Types

typedef feedforward_layer< Activation > Base
 

Public Member Functions

 deconvolutional_layer (serial_size_t in_width, serial_size_t in_height, serial_size_t window_size, serial_size_t in_channels, serial_size_t out_channels, padding pad_type=padding::valid, bool has_bias=true, serial_size_t w_stride=1, serial_size_t h_stride=1, backend_t backend_type=core::default_engine())
 constructing deconvolutional layer More...
 
 deconvolutional_layer (serial_size_t in_width, serial_size_t in_height, serial_size_t window_width, serial_size_t window_height, serial_size_t in_channels, serial_size_t out_channels, padding pad_type=padding::valid, bool has_bias=true, serial_size_t w_stride=1, serial_size_t h_stride=1, backend_t backend_type=core::default_engine())
 constructing deconvolutional layer More...
 
 deconvolutional_layer (serial_size_t in_width, serial_size_t in_height, serial_size_t window_size, serial_size_t in_channels, serial_size_t out_channels, const connection_table &connection_table, padding pad_type=padding::valid, bool has_bias=true, serial_size_t w_stride=1, serial_size_t h_stride=1, backend_t backend_type=core::default_engine())
 constructing deconvolutional layer More...
 
 deconvolutional_layer (serial_size_t in_width, serial_size_t in_height, serial_size_t window_width, serial_size_t window_height, serial_size_t in_channels, serial_size_t out_channels, const connection_table &connection_table, padding pad_type=padding::valid, bool has_bias=true, serial_size_t w_stride=1, serial_size_t h_stride=1, backend_t backend_type=core::default_engine())
 constructing deconvolutional layer More...
 
 deconvolutional_layer (deconvolutional_layer &&other)
 number of incoming connections for each output unit
 
virtual serial_size_t fan_in_size () const override
 number of outgoing connections for each input unit
 
virtual serial_size_t fan_out_size () const override
 number of outgoing connections for each input unit used only for weight/bias initialization methods which require fan-out size (e.g. More...
 
void forward_propagation (const std::vector< tensor_t * > &in_data, std::vector< tensor_t * > &out_data) override
 
void back_propagation (const std::vector< tensor_t * > &in_data, const std::vector< tensor_t * > &out_data, std::vector< tensor_t * > &out_grad, std::vector< tensor_t * > &in_grad) override
 return delta of previous layer (delta=\frac{dE}{da}, a=wx in fully-connected layer) More...
 
std::vector< index3d< serial_size_t > > in_shape () const override
 array of input shapes (width x height x depth)
 
std::vector< index3d< serial_size_t > > out_shape () const override
 array of output shapes (width x height x depth)
 
std::string layer_type () const override
 name of layer, should be unique for each concrete class
 
image weightto_image () const
 
- Public Member Functions inherited from tiny_dnn::feedforward_layer< activation::identity >
 feedforward_layer (const std::vector< vector_type > &in_data_type)
 
activation::functionactivation_function ()
 
std::pair< float_t, float_t > out_value_range () const override
 return output value range used only for calculating target value from label-id in final(output) layer override properly if the layer is intended to be used as output layer
 
void forward_activation (tensor_t &a_tensor, tensor_t &out_tensor)
 
void backward_activation (const tensor_t &prev_delta, const tensor_t &this_out, tensor_t &curr_delta)
 
- Public Member Functions inherited from tiny_dnn::layer
 layer (const std::vector< vector_type > &in_type, const std::vector< vector_type > &out_type)
 Defaul layer constructor that instantiates a N-input, M-output layer. More...
 
 layer (const layer &)=default
 
layeroperator= (const layer &)=default
 
void set_parallelize (bool parallelize)
 
void set_backend (std::shared_ptr< core::backend > backend)
 
void set_backend_type (core::backend_t backend_type)
 
bool parallelize () const
 
core::backend_t backend_type () const
 
core::backend_t engine () const
 
virtual std::string kernel_file () const
 
virtual std::string kernel_header () const
 
virtual void createOp ()
 
void setDevice (const Device &device)
 
Devicedevice () const
 
std::shared_ptr< core::backendbackend ()
 number of incoming edges in this layer
 
serial_size_t in_channels () const
 number of outgoing edges in this layer
 
serial_size_t out_channels () const
 
serial_size_t in_data_size () const
 
serial_size_t out_data_size () const
 
std::vector< shape3din_data_shape ()
 
std::vector< shape3dout_data_shape ()
 
serial_size_t in_size () const
 ! More...
 
serial_size_t out_size () const
 ! More...
 
std::vector< const vec_t * > weights () const
 
std::vector< vec_t * > weights ()
 
std::vector< tensor_t * > weights_grads ()
 
std::vector< edgeptr_t > inputs ()
 
std::vector< edgeptr_t > outputs ()
 
std::vector< edgeptr_t > outputs () const
 
void set_out_grads (const std::vector< tensor_t > &grad)
 
void set_in_data (const std::vector< tensor_t > &data)
 
std::vector< tensor_t > output () const
 
std::vector< vector_type > in_types () const
 
std::vector< vector_type > out_types () const
 
void set_trainable (bool trainable)
 
bool trainable () const
 
template<typename WeightInit >
layerweight_init (const WeightInit &f)
 
template<typename BiasInit >
layerbias_init (const BiasInit &f)
 
template<typename WeightInit >
layerweight_init (std::shared_ptr< WeightInit > f)
 
template<typename BiasInit >
layerbias_init (std::shared_ptr< BiasInit > f)
 
template<typename Archive >
void serialize (Archive &ar)
 
virtual void save (std::ostream &os) const
 
virtual void load (std::istream &is)
 
virtual void load (const std::vector< float_t > &src, int &idx)
 
virtual image output_to_image (size_t channel=0) const
 < visualize latest output of this layer default implementation interpret output as 1d-vector, so "visual" layer(like convolutional layer) should override this for better visualization.
 
virtual void post_update ()
 return delta2 of previous layer (delta2=\frac{d^2E}{da^2}, diagonal of hessian matrix) it is never called if optimizer is hessian-free
 
virtual void set_context (net_phase ctx)
 notify changing context (train <=> test)
 
std::vector< tensor_t > forward (const std::vector< tensor_t > &input)
 
std::vector< tensor_t > backward (const std::vector< tensor_t > &out_grads)
 
void forward ()
 
void backward ()
 
void setup (bool reset_weight)
 
void init_weight ()
 
void clear_grads ()
 
void update_weight (optimizer *o, serial_size_t batch_size)
 
bool has_same_weights (const layer &rhs, float_t eps) const
 
virtual void set_sample_count (serial_size_t sample_count)
 
template<class Archive >
void serialize_prolog (Archive &ar)
 
- Public Member Functions inherited from tiny_dnn::node
 node (serial_size_t in_size, serial_size_t out_size)
 
const std::vector< edgeptr_t > & prev () const
 
const std::vector< edgeptr_t > & next () const
 
serial_size_t prev_port (const edge &e) const
 
serial_size_t next_port (const edge &e) const
 
std::vector< node * > prev_nodes () const
 
std::vector< node * > next_nodes () const
 

Public Attributes

 CNN_USE_LAYER_MEMBERS
 
- Public Attributes inherited from tiny_dnn::feedforward_layer< activation::identity >
activation::identity h_
 

Additional Inherited Members

- Static Public Member Functions inherited from tiny_dnn::layer
template<typename InputArchive >
static std::shared_ptr< layerload_layer (InputArchive &ia)
 generate layer from cereal's Archive
 
template<typename OutputArchive >
static void save_layer (OutputArchive &oa, const layer &l)
 
- Protected Attributes inherited from tiny_dnn::layer
bool initialized_
 Flag indication whether the layer/node is initialized.
 
bool parallelize_
 Flag indicating whether the layer/node operations ara paralellized.
 
serial_size_t in_channels_
 The number of input vectors/edges.
 
serial_size_t out_channels_
 The number of output vectors/edges.
 
std::vector< vector_type > in_type_
 Vector containing the type of data for inputs.
 
std::vector< vector_type > out_type_
 Vector containing the type of data for outputs.
 
core::backend_t backend_type_
 The current backend type for operations.
 
std::shared_ptr< core::backendbackend_
 The backend instance (deprecated)
 
Devicedevice_ptr_ = nullptr
 Pointer to the device on which the layer/node will run.
 
- Protected Attributes inherited from tiny_dnn::node
std::vector< edgeptr_t > prev_
 
std::vector< edgeptr_t > next_
 

Detailed Description

template<typename Activation = activation::identity>
class tiny_dnn::deconvolutional_layer< Activation >

2D deconvolution layer

take input as two-dimensional image and applying filtering operation.

Constructor & Destructor Documentation

◆ deconvolutional_layer() [1/4]

template<typename Activation = activation::identity>
tiny_dnn::deconvolutional_layer< Activation >::deconvolutional_layer ( serial_size_t  in_width,
serial_size_t  in_height,
serial_size_t  window_size,
serial_size_t  in_channels,
serial_size_t  out_channels,
padding  pad_type = padding::valid,
bool  has_bias = true,
serial_size_t  w_stride = 1,
serial_size_t  h_stride = 1,
backend_t  backend_type = core::default_engine() 
)
inline

constructing deconvolutional layer

Parameters
in_width[in] input image width
in_height[in] input image height
window_size[in] window(kernel) size of convolution
in_channels[in] input image channels (grayscale=1, rgb=3)
out_channels[in] output image channels
padding[in] rounding strategy valid: use valid pixels of input only. output-size = (in-width - window_size + 1) * (in-height - window_size + 1) * out_channels same: add zero-padding to keep same width/height. output-size = in-width * in-height * out_channels
has_bias[in] whether to add a bias vector to the filter outputs
w_stride[in] specify the horizontal interval at which to apply the filters to the input
h_stride[in] specify the vertical interval at which to apply the filters to the input

◆ deconvolutional_layer() [2/4]

template<typename Activation = activation::identity>
tiny_dnn::deconvolutional_layer< Activation >::deconvolutional_layer ( serial_size_t  in_width,
serial_size_t  in_height,
serial_size_t  window_width,
serial_size_t  window_height,
serial_size_t  in_channels,
serial_size_t  out_channels,
padding  pad_type = padding::valid,
bool  has_bias = true,
serial_size_t  w_stride = 1,
serial_size_t  h_stride = 1,
backend_t  backend_type = core::default_engine() 
)
inline

constructing deconvolutional layer

Parameters
in_width[in] input image width
in_height[in] input image height
window_width[in] window_width(kernel) size of convolution
window_height[in] window_height(kernel) size of convolution
in_channels[in] input image channels (grayscale=1, rgb=3)
out_channels[in] output image channels
padding[in] rounding strategy valid: use valid pixels of input only. output-size = (in-width - window_width + 1) * (in-height - window_height + 1) * out_channels same: add zero-padding to keep same width/height. output-size = in-width * in-height * out_channels
has_bias[in] whether to add a bias vector to the filter outputs
w_stride[in] specify the horizontal interval at which to apply the filters to the input
h_stride[in] specify the vertical interval at which to apply the filters to the input

◆ deconvolutional_layer() [3/4]

template<typename Activation = activation::identity>
tiny_dnn::deconvolutional_layer< Activation >::deconvolutional_layer ( serial_size_t  in_width,
serial_size_t  in_height,
serial_size_t  window_size,
serial_size_t  in_channels,
serial_size_t  out_channels,
const connection_table connection_table,
padding  pad_type = padding::valid,
bool  has_bias = true,
serial_size_t  w_stride = 1,
serial_size_t  h_stride = 1,
backend_t  backend_type = core::default_engine() 
)
inline

constructing deconvolutional layer

Parameters
in_width[in] input image width
in_height[in] input image height
window_size[in] window(kernel) size of convolution
in_channels[in] input image channels (grayscale=1, rgb=3)
out_channels[in] output image channels
connection_table[in] definition of connections between in-channels and out-channels
pad_type[in] rounding strategy valid: use valid pixels of input only. output-size = (in-width - window_size + 1) * (in-height - window_size + 1) * out_channels same: add zero-padding to keep same width/height. output-size = in-width * in-height * out_channels
has_bias[in] whether to add a bias vector to the filter outputs
w_stride[in] specify the horizontal interval at which to apply the filters to the input
h_stride[in] specify the vertical interval at which to apply the filters to the input

◆ deconvolutional_layer() [4/4]

template<typename Activation = activation::identity>
tiny_dnn::deconvolutional_layer< Activation >::deconvolutional_layer ( serial_size_t  in_width,
serial_size_t  in_height,
serial_size_t  window_width,
serial_size_t  window_height,
serial_size_t  in_channels,
serial_size_t  out_channels,
const connection_table connection_table,
padding  pad_type = padding::valid,
bool  has_bias = true,
serial_size_t  w_stride = 1,
serial_size_t  h_stride = 1,
backend_t  backend_type = core::default_engine() 
)
inline

constructing deconvolutional layer

Parameters
in_width[in] input image width
in_height[in] input image height
window_width[in] window_width(kernel) size of convolution
window_height[in] window_height(kernel) size of convolution
in_channels[in] input image channels (grayscale=1, rgb=3)
out_channels[in] output image channels
connection_table[in] definition of connections between in-channels and out-channels
pad_type[in] rounding strategy valid: use valid pixels of input only. output-size = (in-width - window_size + 1) * (in-height - window_size + 1) * out_channels same: add zero-padding to keep same width/height. output-size = in-width * in-height * out_channels
has_bias[in] whether to add a bias vector to the filter outputs
w_stride[in] specify the horizontal interval at which to apply the filters to the input
h_stride[in] specify the vertical interval at which to apply the filters to the input

Member Function Documentation

◆ back_propagation()

template<typename Activation = activation::identity>
void tiny_dnn::deconvolutional_layer< Activation >::back_propagation ( const std::vector< tensor_t * > &  in_data,
const std::vector< tensor_t * > &  out_data,
std::vector< tensor_t * > &  out_grad,
std::vector< tensor_t * > &  in_grad 
)
inlineoverridevirtual

return delta of previous layer (delta=\frac{dE}{da}, a=wx in fully-connected layer)

Parameters
worker_indexid of current worker-task
in_datainput vectors (same vectors as forward_propagation)
out_dataoutput vectors (same vectors as forward_propagation)
out_gradgradient of output vectors (i-th vector correspond with out_data[i])
in_gradgradient of input vectors (i-th vector correspond with in_data[i])

Implements tiny_dnn::layer.

◆ fan_out_size()

template<typename Activation = activation::identity>
virtual serial_size_t tiny_dnn::deconvolutional_layer< Activation >::fan_out_size ( ) const
inlineoverridevirtual

number of outgoing connections for each input unit used only for weight/bias initialization methods which require fan-out size (e.g.

xavier) override if the layer has trainable weights, and scale of initialization is important

Reimplemented from tiny_dnn::layer.

◆ forward_propagation()

template<typename Activation = activation::identity>
void tiny_dnn::deconvolutional_layer< Activation >::forward_propagation ( const std::vector< tensor_t * > &  in_data,
std::vector< tensor_t * > &  out_data 
)
inlineoverridevirtual
Parameters
in_datainput vectors of this layer (data, weight, bias)
out_dataoutput vectors

Implements tiny_dnn::layer.


The documentation for this class was generated from the following file: