47 #include "tiny_dnn/layers/layer.h"
48 #include "tiny_dnn/core/framework/device.fwd.h"
50 #if defined(USE_OPENCL) || defined(USE_CUDA)
52 #include "third_party/CLCudaAPI/clpp11.h"
54 #include "third_party/CLCudaAPI/cupp11.h"
66 : device_(device), op_(op) {}
69 const Device* device()
const {
return device_; }
72 const layer* op()
const {
return op_; }
74 bool operator==(
const Program& p)
const {
75 if (p.device() == this->device() &&
76 p.op()->
layer_type() == this->op()->layer_type()) {
91 size_t operator()(
const Program& p)
const {
94 if (p.device() ==
nullptr || p.op() ==
nullptr) {
95 throw nn_error(
"No Op or Device in Program.");
100 return (std::hash<int>()(
static_cast<int>(p.device()->type())) ^
101 std::hash<bool>()(p.device()->hasCLCudaAPI()) ^
102 std::hash<int>()(p.device()->platformId()) ^
103 std::hash<int>()(p.device()->deviceId()) ^
104 std::hash<std::string>()(p.op()->
layer_type()));
Definition: device.fwd.h:73
base class of all kind of NN layers
Definition: layer.h:62
virtual std::string layer_type() const =0
name of layer, should be unique for each concrete class
error exception class for tiny-dnn
Definition: nn_error.h:37