|
| partial_connected_layer (serial_size_t in_dim, serial_size_t out_dim, size_t weight_dim, size_t bias_dim, float_t scale_factor=float_t(1)) |
|
size_t | param_size () const |
|
serial_size_t | fan_in_size () const override |
| number of incoming connections for each output unit used only for weight/bias initialization methods which require fan-in size (e.g. More...
|
|
serial_size_t | fan_out_size () const override |
| number of outgoing connections for each input unit used only for weight/bias initialization methods which require fan-out size (e.g. More...
|
|
void | connect_weight (serial_size_t input_index, serial_size_t output_index, serial_size_t weight_index) |
|
void | connect_bias (serial_size_t bias_index, serial_size_t output_index) |
|
void | forward_propagation (const std::vector< tensor_t * > &in_data, std::vector< tensor_t * > &out_data) override |
|
void | back_propagation (const std::vector< tensor_t * > &in_data, const std::vector< tensor_t * > &out_data, std::vector< tensor_t * > &out_grad, std::vector< tensor_t * > &in_grad) override |
| return delta of previous layer (delta=\frac{dE}{da}, a=wx in fully-connected layer) More...
|
|
| feedforward_layer (const std::vector< vector_type > &in_data_type) |
|
activation::function & | activation_function () |
|
std::pair< float_t, float_t > | out_value_range () const override |
| return output value range used only for calculating target value from label-id in final(output) layer override properly if the layer is intended to be used as output layer
|
|
void | forward_activation (tensor_t &a_tensor, tensor_t &out_tensor) |
|
void | backward_activation (const tensor_t &prev_delta, const tensor_t &this_out, tensor_t &curr_delta) |
|
| layer (const std::vector< vector_type > &in_type, const std::vector< vector_type > &out_type) |
| Defaul layer constructor that instantiates a N-input, M-output layer. More...
|
|
| layer (const layer &)=default |
|
layer & | operator= (const layer &)=default |
|
void | set_parallelize (bool parallelize) |
|
void | set_backend (std::shared_ptr< core::backend > backend) |
|
void | set_backend_type (core::backend_t backend_type) |
|
bool | parallelize () const |
|
core::backend_t | backend_type () const |
|
core::backend_t | engine () const |
|
virtual std::string | kernel_file () const |
|
virtual std::string | kernel_header () const |
|
virtual void | createOp () |
|
void | setDevice (const Device &device) |
|
Device * | device () const |
|
std::shared_ptr< core::backend > | backend () |
| number of incoming edges in this layer
|
|
serial_size_t | in_channels () const |
| number of outgoing edges in this layer
|
|
serial_size_t | out_channels () const |
|
serial_size_t | in_data_size () const |
|
serial_size_t | out_data_size () const |
|
std::vector< shape3d > | in_data_shape () |
|
std::vector< shape3d > | out_data_shape () |
|
serial_size_t | in_size () const |
| ! More...
|
|
serial_size_t | out_size () const |
| ! More...
|
|
std::vector< const vec_t * > | weights () const |
|
std::vector< vec_t * > | weights () |
|
std::vector< tensor_t * > | weights_grads () |
|
std::vector< edgeptr_t > | inputs () |
|
std::vector< edgeptr_t > | outputs () |
|
std::vector< edgeptr_t > | outputs () const |
|
void | set_out_grads (const std::vector< tensor_t > &grad) |
|
void | set_in_data (const std::vector< tensor_t > &data) |
|
std::vector< tensor_t > | output () const |
|
std::vector< vector_type > | in_types () const |
|
std::vector< vector_type > | out_types () const |
|
void | set_trainable (bool trainable) |
|
bool | trainable () const |
|
virtual std::vector< shape3d > | in_shape () const =0 |
| array of input shapes (width x height x depth)
|
|
virtual std::vector< shape3d > | out_shape () const =0 |
| array of output shapes (width x height x depth)
|
|
virtual std::string | layer_type () const =0 |
| name of layer, should be unique for each concrete class
|
|
template<typename WeightInit > |
layer & | weight_init (const WeightInit &f) |
|
template<typename BiasInit > |
layer & | bias_init (const BiasInit &f) |
|
template<typename WeightInit > |
layer & | weight_init (std::shared_ptr< WeightInit > f) |
|
template<typename BiasInit > |
layer & | bias_init (std::shared_ptr< BiasInit > f) |
|
template<typename Archive > |
void | serialize (Archive &ar) |
|
virtual void | save (std::ostream &os) const |
|
virtual void | load (std::istream &is) |
|
virtual void | load (const std::vector< float_t > &src, int &idx) |
|
virtual image | output_to_image (size_t channel=0) const |
| < visualize latest output of this layer default implementation interpret output as 1d-vector, so "visual" layer(like convolutional layer) should override this for better visualization.
|
|
virtual void | post_update () |
| return delta2 of previous layer (delta2=\frac{d^2E}{da^2}, diagonal of hessian matrix) it is never called if optimizer is hessian-free
|
|
virtual void | set_context (net_phase ctx) |
| notify changing context (train <=> test)
|
|
std::vector< tensor_t > | forward (const std::vector< tensor_t > &input) |
|
std::vector< tensor_t > | backward (const std::vector< tensor_t > &out_grads) |
|
void | forward () |
|
void | backward () |
|
void | setup (bool reset_weight) |
|
void | init_weight () |
|
void | clear_grads () |
|
void | update_weight (optimizer *o, serial_size_t batch_size) |
|
bool | has_same_weights (const layer &rhs, float_t eps) const |
|
virtual void | set_sample_count (serial_size_t sample_count) |
|
template<class Archive > |
void | serialize_prolog (Archive &ar) |
|
| node (serial_size_t in_size, serial_size_t out_size) |
|
const std::vector< edgeptr_t > & | prev () const |
|
const std::vector< edgeptr_t > & | next () const |
|
serial_size_t | prev_port (const edge &e) const |
|
serial_size_t | next_port (const edge &e) const |
|
std::vector< node * > | prev_nodes () const |
|
std::vector< node * > | next_nodes () const |
|