tiny_dnn
1.0.0
A header only, dependency-free deep learning framework in C++11
Class Hierarchy
Go to the graphical class hierarchy
This inheritance list is sorted roughly, but not completely, alphabetically:
[detail level
1
2
3
4
5
6
]
C
tiny_dnn::absolute
C
tiny_dnn::absolute_eps< fraction >
C
tiny_dnn::aligned_allocator< T, alignment >
▼
C
tiny_dnn::core::backend
C
tiny_dnn::core::avx_backend
C
tiny_dnn::core::dnn_backend
C
tiny_dnn::core::nnp_backend
C
tiny_dnn::core::tiny_backend
C
tiny_dnn::blocked_range
C
tiny_dnn::detail::caffe_layer_vector
C
tiny_dnn::core::connection_table
C
tiny_dnn::core::Conv2dPadding
C
tiny_dnn::core::conv_layer_worker_specific_storage
C
tiny_dnn::cross_entropy
C
tiny_dnn::cross_entropy_multiclass
C
tiny_dnn::core::deconv_layer_worker_specific_storage
C
tiny_dnn::core::deconv_params
C
tiny_dnn::deserialization_helper< InputArchive >
C
tiny_dnn::Device
C
tiny_dnn::edge
Class containing input/output data
▼
C
std::enable_shared_from_this
▼
C
tiny_dnn::node
Base class of all kind of tinny-cnn data
►
C
tiny_dnn::layer
Base class of all kind of NN layers
►
C
tiny_dnn::feedforward_layer< activation::identity >
►
C
tiny_dnn::partial_connected_layer< activation::identity >
C
tiny_dnn::average_pooling_layer< Activation >
Average pooling with trainable weights
C
tiny_dnn::average_unpooling_layer< Activation >
Average pooling with trainable weights
C
tiny_dnn::convolutional_layer< Activation >
2D convolution layer
C
tiny_dnn::deconvolutional_layer< Activation >
2D deconvolution layer
C
tiny_dnn::max_pooling_layer< Activation >
Applies max-pooing operaton to the spatial data
C
tiny_dnn::max_unpooling_layer< Activation >
Applies max-pooing operaton to the spatial data
C
tiny_dnn::quantized_convolutional_layer< Activation >
2D convolution layer
C
tiny_dnn::quantized_deconvolutional_layer< Activation >
2D deconvolution layer
C
tiny_dnn::batch_normalization_layer
Batch Normalization
C
tiny_dnn::concat_layer
Concat N layers along depth
C
tiny_dnn::dropout_layer
Applies dropout to the input
C
tiny_dnn::elementwise_add_layer
Element-wise add N vectors
y_i = x0_i + x1_i + ..
►
C
tiny_dnn::feedforward_layer< Activation >
Single-input, single-output network with activation function
C
tiny_dnn::fully_connected_layer< Activation >
Compute fully-connected(matmul) operation
C
tiny_dnn::linear_layer< Activation >
Element-wise operation:
f(x) = h(scale*x+bias)
C
tiny_dnn::lrn_layer< Activation >
Local response normalization
C
tiny_dnn::partial_connected_layer< Activation >
C
tiny_dnn::quantized_fully_connected_layer< Activation >
Compute fully-connected(matmul) operation
C
tiny_dnn::input_layer
C
tiny_dnn::power_layer
Element-wise pow:
y = scale*x^factor
C
tiny_dnn::slice_layer
Slice an input data into multiple outputs along a given slice dimension
▼
C
std::exception
▼
C
tiny_dnn::nn_error
Error exception class for tiny-dnn
C
tiny_dnn::nn_not_implemented_error
▼
C
std::false_type
C
foobar< n >
▼
C
tiny_dnn::activation::function
C
tiny_dnn::activation::elu
C
tiny_dnn::activation::identity
C
tiny_dnn::activation::leaky_relu
C
tiny_dnn::activation::relu
C
tiny_dnn::activation::sigmoid
C
tiny_dnn::activation::softmax
C
tiny_dnn::activation::tan_h
C
tiny_dnn::activation::tan_hp1m2
▼
C
tiny_dnn::weight_init::function
▼
C
tiny_dnn::weight_init::scalable
C
tiny_dnn::weight_init::constant
C
tiny_dnn::weight_init::gaussian
C
tiny_dnn::weight_init::he
C
tiny_dnn::weight_init::lecun
Use fan-in(number of input weight for each neuron) for scaling
C
tiny_dnn::weight_init::xavier
Use fan-in and fan-out for scaling
C
vectorize::detail::generic_vec_type< T >
C
tiny_dnn::graph_visualizer
Utility for graph visualization
C
tiny_dnn::image< T >
Simple image utility class
C
tiny_dnn::index3d< T >
C
tiny_dnn::index3d< serial_size_t >
C
tiny_dnn::detail::layer_node
C
tiny_dnn::core::max_pooling_layer_worker_specific_storage
C
tiny_dnn::detail::mnist_header
C
tiny_dnn::mse
▼
C
network
C
models::alexnet
C
tiny_dnn::network< NetType >
A model of neural networks in tiny-dnn
C
tiny_dnn::nn_info
Info class for tiny-dnn (for debug)
C
tiny_dnn::nn_warn
Warning class for tiny-dnn (for debug)
C
tiny_dnn::node_tuple< T >
▼
C
tiny_dnn::nodes
Basic class of various network types (sequential, multi-in/multi-out)
C
tiny_dnn::graph
Generic graph network
C
tiny_dnn::sequential
Single-input, single-output feedforward network
▼
C
tiny_dnn::core::OpKernel
C
tiny_dnn::Conv2dGradOp
C
tiny_dnn::Conv2dLibDNNBackwardOp
C
tiny_dnn::Conv2dLibDNNForwardOp
C
tiny_dnn::Conv2dOp
C
tiny_dnn::Conv2dOpenCLBackwardOp
C
tiny_dnn::Conv2dOpenCLForwardOp
C
tiny_dnn::FullyConnectedGradOp
C
tiny_dnn::FullyConnectedOp
C
tiny_dnn::MaxPoolGradOp
C
tiny_dnn::MaxPoolOp
C
tiny_dnn::core::OpKernelConstruction
C
tiny_dnn::core::OpKernelContext
C
tiny_dnn::core::OpKernelContext::OpParams
▼
C
tiny_dnn::optimizer
Base class of optimizer usesHessian : true if an optimizer uses hessian (2nd order derivative of loss function)
▼
C
tiny_dnn::stateful_optimizer< 1 >
C
tiny_dnn::RMSprop
RMSprop
C
tiny_dnn::adagrad
Adaptive gradient method
C
tiny_dnn::momentum
SGD with momentum
▼
C
tiny_dnn::stateful_optimizer< 2 >
C
tiny_dnn::adam
[a new optimizer (2015)]
C
tiny_dnn::gradient_descent
SGD without momentum
C
tiny_dnn::stateful_optimizer< N >
▼
C
tiny_dnn::core::Params
C
tiny_dnn::core::conv_params
C
tiny_dnn::core::fully_params
C
tiny_dnn::core::maxpool_params
C
tiny_dnn::Program
C
tiny_dnn::ProgramHash
C
tiny_dnn::ProgramManager
C
tiny_dnn::progress_display
C
tiny_dnn::random_generator
C
tiny_dnn::aligned_allocator< T, alignment >::rebind< U >
C
tiny_dnn::result
C
tiny_dnn::serialization_helper< OutputArchive >
C
tiny_dnn::core::session
C
tiny_dnn::Tensor< U >
C
tiny_dnn::timer
Generated by
1.9.1