|
virtual void | backward (const std::vector< tensor_t > &first)=0 |
| propagate gradient More...
|
|
virtual std::vector< tensor_t > | forward (const std::vector< tensor_t > &first)=0 |
|
virtual void | update_weights (optimizer *opt, int batch_size) |
| update weights and clear all gradients
|
|
virtual void | setup (bool reset_weight) |
| setup all weights, must be called before forward/backward
|
|
void | clear_grads () |
|
size_t | size () const |
|
iterator | begin () |
|
iterator | end () |
|
const_iterator | begin () const |
|
const_iterator | end () const |
|
layer * | operator[] (size_t index) |
|
const layer * | operator[] (size_t index) const |
|
serial_size_t | in_data_size () const |
|
serial_size_t | out_data_size () const |
|
template<typename T > |
const T & | at (size_t index) const |
|
template<typename T > |
T & | at (size_t index) |
|
virtual float_t | target_value_min (int out_channel=0) const |
|
virtual float_t | target_value_max (int out_channel=0) const |
|
void | save (std::ostream &os) const |
|
void | load (std::istream &is) |
|
virtual void | load (const std::vector< float_t > &vec) |
|
void | label2vec (const label_t *t, serial_size_t num, std::vector< vec_t > *vec) const |
|
template<typename OutputArchive > |
void | save_model (OutputArchive &oa) const |
|
template<typename InputArchive > |
void | load_model (InputArchive &ia) |
|
template<typename OutputArchive > |
void | save_weights (OutputArchive &oa) const |
|
template<typename InputArchive > |
void | load_weights (InputArchive &ia) |
|
basic class of various network types (sequential, multi-in/multi-out).
this class holds list of pointer of Node, and provides entry point of forward / backward operations. Node is a computational unit of tiny-dnn (for example, convolution). Currently 2 kinds of implementation are available: sequential and graph.
Nodes can accept lvalue, rvalue and shared_ptr forms of node. If given type is rvalue or shared_ptr, nodes create shared_ptr<node> to keep given node alive. If given type is lvalue, tiny-dnn holds raw-pointer only (to avoid double-free).
sequential s;
s.add(fc<tan_h>(100, 200)); // rvalue, moved into nodes
s.add(std::make_shared<fc<tan_h>>(200, 100)); // shared_ptr, shared by nodes
fc<softmax> out(100, 10);
s.add(out); // lvalue, hold raw-pointer only