tiny_dnn  1.0.0
A header only, dependency-free deep learning framework in C++11
Classes | Public Types | Public Member Functions | Static Public Member Functions | Public Attributes | List of all members
tiny_dnn::max_unpooling_layer< Activation > Class Template Reference

applies max-pooing operaton to the spatial data More...

#include <max_unpooling_layer.h>

Inheritance diagram for tiny_dnn::max_unpooling_layer< Activation >:
Inheritance graph
[legend]
Collaboration diagram for tiny_dnn::max_unpooling_layer< Activation >:
Collaboration graph
[legend]

Public Types

typedef feedforward_layer< Activation > Base
 

Public Member Functions

 max_unpooling_layer (serial_size_t in_width, serial_size_t in_height, serial_size_t in_channels, serial_size_t unpooling_size)
 
 max_unpooling_layer (const shape3d &in_size, serial_size_t unpooling_size, serial_size_t stride)
 
 max_unpooling_layer (serial_size_t in_width, serial_size_t in_height, serial_size_t in_channels, serial_size_t unpooling_size, serial_size_t stride)
 
size_t fan_in_size () const override
 number of incoming connections for each output unit used only for weight/bias initialization methods which require fan-in size (e.g. More...
 
size_t fan_out_size () const override
 number of outgoing connections for each input unit used only for weight/bias initialization methods which require fan-out size (e.g. More...
 
void forward_propagation (serial_size_t index, const std::vector< vec_t * > &in_data, std::vector< vec_t * > &out_data) override
 
void back_propagation (serial_size_t index, const std::vector< vec_t * > &in_data, const std::vector< vec_t * > &out_data, std::vector< vec_t * > &out_grad, std::vector< vec_t * > &in_grad) override
 
std::vector< index3d< serial_size_t > > in_shape () const override
 array of input shapes (width x height x depth)
 
std::vector< index3d< serial_size_t > > out_shape () const override
 array of output shapes (width x height x depth)
 
std::string layer_type () const override
 name of layer, should be unique for each concrete class
 
size_t unpool_size () const
 
virtual void set_worker_count (serial_size_t worker_count) override
 
template<class Archive >
void serialize (Archive &ar)
 
- Public Member Functions inherited from tiny_dnn::feedforward_layer< activation::identity >
 feedforward_layer (const std::vector< vector_type > &in_data_type)
 
activation::functionactivation_function ()
 
std::pair< float_t, float_t > out_value_range () const override
 return output value range used only for calculating target value from label-id in final(output) layer override properly if the layer is intended to be used as output layer
 
void forward_activation (tensor_t &a_tensor, tensor_t &out_tensor)
 
void backward_activation (const tensor_t &prev_delta, const tensor_t &this_out, tensor_t &curr_delta)
 
- Public Member Functions inherited from tiny_dnn::layer
 layer (const std::vector< vector_type > &in_type, const std::vector< vector_type > &out_type)
 Defaul layer constructor that instantiates a N-input, M-output layer. More...
 
 layer (const layer &)=default
 
layeroperator= (const layer &)=default
 
void set_parallelize (bool parallelize)
 
void set_backend (std::shared_ptr< core::backend > backend)
 
void set_backend_type (core::backend_t backend_type)
 
bool parallelize () const
 
core::backend_t backend_type () const
 
core::backend_t engine () const
 
virtual std::string kernel_file () const
 
virtual std::string kernel_header () const
 
virtual void createOp ()
 
void setDevice (const Device &device)
 
Devicedevice () const
 
std::shared_ptr< core::backendbackend ()
 number of incoming edges in this layer
 
serial_size_t in_channels () const
 number of outgoing edges in this layer
 
serial_size_t out_channels () const
 
serial_size_t in_data_size () const
 
serial_size_t out_data_size () const
 
std::vector< shape3din_data_shape ()
 
std::vector< shape3dout_data_shape ()
 
serial_size_t in_size () const
 ! More...
 
serial_size_t out_size () const
 ! More...
 
std::vector< const vec_t * > weights () const
 
std::vector< vec_t * > weights ()
 
std::vector< tensor_t * > weights_grads ()
 
std::vector< edgeptr_t > inputs ()
 
std::vector< edgeptr_t > outputs ()
 
std::vector< edgeptr_t > outputs () const
 
void set_out_grads (const std::vector< tensor_t > &grad)
 
void set_in_data (const std::vector< tensor_t > &data)
 
std::vector< tensor_t > output () const
 
std::vector< vector_type > in_types () const
 
std::vector< vector_type > out_types () const
 
void set_trainable (bool trainable)
 
bool trainable () const
 
template<typename WeightInit >
layerweight_init (const WeightInit &f)
 
template<typename BiasInit >
layerbias_init (const BiasInit &f)
 
template<typename WeightInit >
layerweight_init (std::shared_ptr< WeightInit > f)
 
template<typename BiasInit >
layerbias_init (std::shared_ptr< BiasInit > f)
 
template<typename Archive >
void serialize (Archive &ar)
 
virtual void save (std::ostream &os) const
 
virtual void load (std::istream &is)
 
virtual void load (const std::vector< float_t > &src, int &idx)
 
virtual image output_to_image (size_t channel=0) const
 < visualize latest output of this layer default implementation interpret output as 1d-vector, so "visual" layer(like convolutional layer) should override this for better visualization.
 
virtual void forward_propagation (const std::vector< tensor_t * > &in_data, std::vector< tensor_t * > &out_data)=0
 
virtual void back_propagation (const std::vector< tensor_t * > &in_data, const std::vector< tensor_t * > &out_data, std::vector< tensor_t * > &out_grad, std::vector< tensor_t * > &in_grad)=0
 return delta of previous layer (delta=\frac{dE}{da}, a=wx in fully-connected layer) More...
 
virtual void post_update ()
 return delta2 of previous layer (delta2=\frac{d^2E}{da^2}, diagonal of hessian matrix) it is never called if optimizer is hessian-free
 
virtual void set_context (net_phase ctx)
 notify changing context (train <=> test)
 
std::vector< tensor_t > forward (const std::vector< tensor_t > &input)
 
std::vector< tensor_t > backward (const std::vector< tensor_t > &out_grads)
 
void forward ()
 
void backward ()
 
void setup (bool reset_weight)
 
void init_weight ()
 
void clear_grads ()
 
void update_weight (optimizer *o, serial_size_t batch_size)
 
bool has_same_weights (const layer &rhs, float_t eps) const
 
virtual void set_sample_count (serial_size_t sample_count)
 
template<class Archive >
void serialize_prolog (Archive &ar)
 
- Public Member Functions inherited from tiny_dnn::node
 node (serial_size_t in_size, serial_size_t out_size)
 
const std::vector< edgeptr_t > & prev () const
 
const std::vector< edgeptr_t > & next () const
 
serial_size_t prev_port (const edge &e) const
 
serial_size_t next_port (const edge &e) const
 
std::vector< node * > prev_nodes () const
 
std::vector< node * > next_nodes () const
 

Static Public Member Functions

template<class Archive >
static void load_and_construct (Archive &ar, cereal::construct< max_unpooling_layer > &construct)
 
- Static Public Member Functions inherited from tiny_dnn::layer
template<typename InputArchive >
static std::shared_ptr< layerload_layer (InputArchive &ia)
 generate layer from cereal's Archive
 
template<typename OutputArchive >
static void save_layer (OutputArchive &oa, const layer &l)
 

Public Attributes

 CNN_USE_LAYER_MEMBERS
 
- Public Attributes inherited from tiny_dnn::feedforward_layer< activation::identity >
activation::identity h_
 

Additional Inherited Members

- Protected Attributes inherited from tiny_dnn::layer
bool initialized_
 Flag indication whether the layer/node is initialized.
 
bool parallelize_
 Flag indicating whether the layer/node operations ara paralellized.
 
serial_size_t in_channels_
 The number of input vectors/edges.
 
serial_size_t out_channels_
 The number of output vectors/edges.
 
std::vector< vector_type > in_type_
 Vector containing the type of data for inputs.
 
std::vector< vector_type > out_type_
 Vector containing the type of data for outputs.
 
core::backend_t backend_type_
 The current backend type for operations.
 
std::shared_ptr< core::backendbackend_
 The backend instance (deprecated)
 
Devicedevice_ptr_ = nullptr
 Pointer to the device on which the layer/node will run.
 
- Protected Attributes inherited from tiny_dnn::node
std::vector< edgeptr_t > prev_
 
std::vector< edgeptr_t > next_
 

Detailed Description

template<typename Activation = activation::identity>
class tiny_dnn::max_unpooling_layer< Activation >

applies max-pooing operaton to the spatial data

Constructor & Destructor Documentation

◆ max_unpooling_layer() [1/2]

template<typename Activation = activation::identity>
tiny_dnn::max_unpooling_layer< Activation >::max_unpooling_layer ( serial_size_t  in_width,
serial_size_t  in_height,
serial_size_t  in_channels,
serial_size_t  unpooling_size 
)
inline
Parameters
in_width[in] width of input image
in_height[in] height of input image
in_channels[in] the number of input image channels(depth)
unpooling_size[in] factor by which to upscale

◆ max_unpooling_layer() [2/2]

template<typename Activation = activation::identity>
tiny_dnn::max_unpooling_layer< Activation >::max_unpooling_layer ( serial_size_t  in_width,
serial_size_t  in_height,
serial_size_t  in_channels,
serial_size_t  unpooling_size,
serial_size_t  stride 
)
inline
Parameters
in_width[in] width of input image
in_height[in] height of input image
in_channels[in] the number of input image channels(depth)
unpooling_size[in] factor by which to upscale
stride[in] interval at which to apply the filters to the input

Member Function Documentation

◆ fan_in_size()

template<typename Activation = activation::identity>
size_t tiny_dnn::max_unpooling_layer< Activation >::fan_in_size ( ) const
inlineoverridevirtual

number of incoming connections for each output unit used only for weight/bias initialization methods which require fan-in size (e.g.

xavier) override if the layer has trainable weights, and scale of initialization is important

Reimplemented from tiny_dnn::layer.

◆ fan_out_size()

template<typename Activation = activation::identity>
size_t tiny_dnn::max_unpooling_layer< Activation >::fan_out_size ( ) const
inlineoverridevirtual

number of outgoing connections for each input unit used only for weight/bias initialization methods which require fan-out size (e.g.

xavier) override if the layer has trainable weights, and scale of initialization is important

Reimplemented from tiny_dnn::layer.


The documentation for this class was generated from the following file: