tiny_dnn  1.0.0
A header only, dependency-free deep learning framework in C++11
Public Types | Public Member Functions | Protected Member Functions | Friends | List of all members
tiny_dnn::network< NetType > Class Template Reference

A model of neural networks in tiny-dnn. More...

#include <network.h>

Public Types

typedef std::vector< layerptr_t >::iterator iterator
 
typedef std::vector< layerptr_t >::const_iterator const_iterator
 

Public Member Functions

 network (const std::string &name="")
 
std::string name () const
 name of the network
 
void init_weight ()
 explicitly initialize weights of all layers
 
vec_t predict (const vec_t &in)
 executes forward-propagation and returns output
 
tensor_t predict (const tensor_t &in)
 executes forward-propagation and returns output
 
std::vector< tensor_t > predict (const std::vector< tensor_t > &in)
 executes forward-propagation and returns output
 
float_t predict_max_value (const vec_t &in)
 executes forward-propagation and returns maximum output
 
label_t predict_label (const vec_t &in)
 executes forward-propagation and returns maximum output index
 
template<typename Range >
vec_t predict (const Range &in)
 executes forward-propagation and returns output More...
 
template<typename Error , typename Optimizer , typename OnBatchEnumerate , typename OnEpochEnumerate >
bool train (Optimizer &optimizer, const std::vector< vec_t > &inputs, const std::vector< label_t > &class_labels, size_t batch_size, int epoch, OnBatchEnumerate on_batch_enumerate, OnEpochEnumerate on_epoch_enumerate, const bool reset_weights=false, const int n_threads=CNN_TASK_SIZE, const std::vector< vec_t > &t_cost=std::vector< vec_t >())
 trains the network for a fixed number of epochs (for classification task) More...
 
template<typename Error , typename Optimizer , typename OnBatchEnumerate , typename OnEpochEnumerate , typename T , typename U >
bool fit (Optimizer &optimizer, const std::vector< T > &inputs, const std::vector< U > &desired_outputs, size_t batch_size, int epoch, OnBatchEnumerate on_batch_enumerate, OnEpochEnumerate on_epoch_enumerate, const bool reset_weights=false, const int n_threads=CNN_TASK_SIZE, const std::vector< U > &t_cost=std::vector< U >())
 trains the network for a fixed number of epochs to generate desired output. More...
 
template<typename Error , typename Optimizer , typename T , typename U >
bool fit (Optimizer &optimizer, const std::vector< T > &inputs, const std::vector< U > &desired_outputs, size_t batch_size=1, int epoch=1)
 
template<typename Error , typename Optimizer >
bool train (Optimizer &optimizer, const std::vector< vec_t > &inputs, const std::vector< label_t > &class_labels, size_t batch_size=1, int epoch=1)
 
template<typename Error , typename Optimizer >
bool train (Optimizer &optimizer, const std::vector< vec_t > &in, const std::vector< vec_t > &t, size_t batch_size=1, int epoch=1)
 
void set_netphase (net_phase phase)
 set the netphase to train or test More...
 
result test (const std::vector< vec_t > &in, const std::vector< label_t > &t)
 test and generate confusion-matrix for classification task
 
std::vector< vec_t > test (const std::vector< vec_t > &in)
 generate output for each input
 
template<typename E >
float_t get_loss (const std::vector< vec_t > &in, const std::vector< vec_t > &t)
 calculate loss value (the smaller, the better) for regression task
 
template<typename E , typename T >
float_t get_loss (const std::vector< T > &in, const std::vector< tensor_t > &t)
 calculate loss value (the smaller, the better) for regression task
 
template<typename E >
bool gradient_check (const std::vector< tensor_t > &in, const std::vector< std::vector< label_t >> &t, float_t eps, grad_check_mode mode)
 checking gradients calculated by bprop detail information: http://ufldl.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization
 
size_t layer_size () const
 return number of layers
 
size_t depth () const
 
const layeroperator[] (size_t index) const
 return raw pointer of index-th layer
 
layeroperator[] (size_t index)
 return raw pointer of index-th layer
 
template<typename T >
const T & at (size_t index) const
 return index-th layer as <T> throw nn_error if index-th layer cannot be converted to T
 
template<typename T >
T & at (size_t index)
 
serial_size_t out_data_size () const
 return total number of elements of output data
 
serial_size_t in_data_size () const
 return total number of elements of input data
 
template<typename WeightInit >
networkweight_init (const WeightInit &f)
 set weight initializer to all layers
 
template<typename BiasInit >
networkbias_init (const BiasInit &f)
 set bias initializer to all layers
 
template<typename T >
bool has_same_weights (const network< T > &rhs, float_t eps) const
 returns if 2 networks have almost(<eps) the same weights
 
iterator begin ()
 
iterator end ()
 
const_iterator begin () const
 
const_iterator end () const
 
void load (const std::string &filename, content_type what=content_type::weights_and_model, file_format format=file_format::binary)
 
void save (const std::string &filename, content_type what=content_type::weights_and_model, file_format format=file_format::binary) const
 
std::string to_json () const
 save the network architecture as json string
 
void from_json (const std::string &json_string)
 load the network architecture from json string More...
 
void save (std::ostream &os) const
 
void load (std::istream &is)
 
void fast_load (const char *filepath)
 load network weights from filepath, 30 times faster than stream reading More...
 
template<typename OutputArchive >
void to_archive (OutputArchive &ar, content_type what=content_type::weights_and_model) const
 
template<typename InputArchive >
void from_archive (InputArchive &ar, content_type what=content_type::weights_and_model)
 

Protected Member Functions

float_t fprop_max (const vec_t &in, int idx=0)
 
label_t fprop_max_index (const vec_t &in)
 

Friends

template<typename Layer >
network< sequential > & operator<< (network< sequential > &n, Layer &&l)
 
void construct_graph (network< graph > &graph, const std::vector< std::shared_ptr< layer >> &inputs, const std::vector< std::shared_ptr< layer >> &outputs)
 
void construct_graph (network< graph > &graph, const std::vector< layer * > &inputs, const std::vector< layer * > &outputs)
 

Detailed Description

template<typename NetType>
class tiny_dnn::network< NetType >

A model of neural networks in tiny-dnn.

There are two types of network model available: sequential and graph. A graph representation describe network as computational graph - each node of graph is layer, and each directed edge holds tensor and its gradients. Sequential representation describe network as linked list - each layer has at most one predecessor and one successor layer.

Two types of network is represented as network<sequential> and network<graph> class. These two classes have same API, except for its construction.

using namespace tiny_dnn;
using namespace tiny_dnn::layers;

std::vector<vec_t> data;
std::vector<label_t> label;

network<sequential> net("foo");
std::cout << net.name(); // "foo"

// simply stack layers by operator <<
net << fc<tan_h>(50, 200) << fc<tan_h>(200, 10);

// prepare optimizer
adagrad opt;

// then train
net.train<mse>(opt, data, label, 10, 20);
Parameters
NetTypespecify the network is "sequential" or "graph". "sequential" means the network doesn't have any branch or merge pass. if the network has branch/merge, "graph" can be used.

Member Function Documentation

◆ depth()

template<typename NetType >
size_t tiny_dnn::network< NetType >::depth ( ) const
inline
Deprecated:
use layer_size() instread.

◆ fast_load()

template<typename NetType >
void tiny_dnn::network< NetType >::fast_load ( const char *  filepath)
inline

load network weights from filepath, 30 times faster than stream reading

Deprecated:
use load_weights instead.

◆ fit() [1/2]

template<typename NetType >
template<typename Error , typename Optimizer , typename OnBatchEnumerate , typename OnEpochEnumerate , typename T , typename U >
bool tiny_dnn::network< NetType >::fit ( Optimizer &  optimizer,
const std::vector< T > &  inputs,
const std::vector< U > &  desired_outputs,
size_t  batch_size,
int  epoch,
OnBatchEnumerate  on_batch_enumerate,
OnEpochEnumerate  on_epoch_enumerate,
const bool  reset_weights = false,
const int  n_threads = CNN_TASK_SIZE,
const std::vector< U > &  t_cost = std::vector<U>() 
)
inline

trains the network for a fixed number of epochs to generate desired output.

This method execute fixed number of training steps and invoke callbacks for each mini-batch/epochs. The network is trained to minimize given loss function(specified by template parameter).

Shape of inputs and desired_outputs must be same to network inputs. For example, if your network has 2 input layers that takes N dimensional array, for each element of inputs must be [2xN] array.

network<sequential> net;
adagrad opt;
net << layers::fc<tan_h>(2,3) << layers::fc<relu>(3,1);
// 2training data, each data is float_t[2]
std::vector<vec_t> data { { 1, 0 }, { 0, 2 } };
std::vector<vec_t> out { { 2 }, { 1 } };
net.fit<mse>(opt, data, out, 1, 1);
// 2training data, each data is float_t[1][2]
// this form is also valid
std::vector<tensor_t> data2{ { { 1, 0 } }, { { 0, 2 } } };
std::vector<tensor_t> out2 { { { 2 } }, { { 1 } } };
net.fit<mse>(opt, data2, out2, 1, 1);
Parameters
optimizeroptimizing algorithm for training
inputsarray of input data
desired_outputsarray of desired output
batch_sizenumber of samples per parameter update
epochnumber of training epochs
on_batch_enumeratecallback for each mini-batch enumerate
on_epoch_enumeratecallback for each epoch
reset_weightsset true if reset current network weights
n_threadsnumber of tasks
t_costtarget costs (leave to nullptr in order to assume equal cost for every target)

◆ fit() [2/2]

template<typename NetType >
template<typename Error , typename Optimizer , typename T , typename U >
bool tiny_dnn::network< NetType >::fit ( Optimizer &  optimizer,
const std::vector< T > &  inputs,
const std::vector< U > &  desired_outputs,
size_t  batch_size = 1,
int  epoch = 1 
)
inline
Parameters
optimizeroptimizing algorithm for training
inputsarray of input data
desired_outputsarray of desired output
batch_sizenumber of samples per parameter update
epochnumber of training epochs

◆ from_json()

template<typename NetType >
void tiny_dnn::network< NetType >::from_json ( const std::string &  json_string)
inline

load the network architecture from json string

Deprecated:
use save(filename,target,format) instead.

◆ predict()

template<typename NetType >
template<typename Range >
vec_t tiny_dnn::network< NetType >::predict ( const Range &  in)
inline

executes forward-propagation and returns output

Parameters
ininput value range(double[], std::vector<double>, std::list<double> etc)

◆ save()

template<typename NetType >
void tiny_dnn::network< NetType >::save ( std::ostream &  os) const
inline
Deprecated:
use load(filename,target,format) instead.

◆ set_netphase()

template<typename NetType >
void tiny_dnn::network< NetType >::set_netphase ( net_phase  phase)
inline

set the netphase to train or test

Parameters
phasephase of network, could be train or test

◆ train() [1/3]

template<typename NetType >
template<typename Error , typename Optimizer >
bool tiny_dnn::network< NetType >::train ( Optimizer &  optimizer,
const std::vector< vec_t > &  in,
const std::vector< vec_t > &  t,
size_t  batch_size = 1,
int  epoch = 1 
)
inline
Deprecated:
use fit instead for regression task

◆ train() [2/3]

template<typename NetType >
template<typename Error , typename Optimizer , typename OnBatchEnumerate , typename OnEpochEnumerate >
bool tiny_dnn::network< NetType >::train ( Optimizer &  optimizer,
const std::vector< vec_t > &  inputs,
const std::vector< label_t > &  class_labels,
size_t  batch_size,
int  epoch,
OnBatchEnumerate  on_batch_enumerate,
OnEpochEnumerate  on_epoch_enumerate,
const bool  reset_weights = false,
const int  n_threads = CNN_TASK_SIZE,
const std::vector< vec_t > &  t_cost = std::vector<vec_t>() 
)
inline

trains the network for a fixed number of epochs (for classification task)

The difference between train and fit method is how to specify desired output. This method takes label_t argument and convert to target vector automatically. To train correctly, output dimension of last layer must be greater or equal to number of label-ids.

Parameters
optimizeroptimizing algorithm for training
inputsarray of input data
class_labelsarray of label-id for each input data(0-origin)
batch_sizenumber of samples per parameter update
epochnumber of training epochs
on_batch_enumeratecallback for each mini-batch enumerate
on_epoch_enumeratecallback for each epoch
reset_weightsset true if reset current network weights
n_threadsnumber of tasks
t_costtarget costs (leave to nullptr in order to assume equal cost for every target)

◆ train() [3/3]

template<typename NetType >
template<typename Error , typename Optimizer >
bool tiny_dnn::network< NetType >::train ( Optimizer &  optimizer,
const std::vector< vec_t > &  inputs,
const std::vector< label_t > &  class_labels,
size_t  batch_size = 1,
int  epoch = 1 
)
inline
Parameters
optimizeroptimizing algorithm for training
inputsarray of input data
class_labelsarray of label-id for each input data(0-origin)
batch_sizenumber of samples per parameter update
epochnumber of training epochs

The documentation for this class was generated from the following file: