tiny_dnn  1.0.0
A header only, dependency-free deep learning framework in C++11
device.fwd.h
1 /*
2  COPYRIGHT
3 
4  All contributions by Taiga Nomi
5  Copyright (c) 2013, Taiga Nomi
6  All rights reserved.
7 
8  All other contributions:
9  Copyright (c) 2013-2016, the respective contributors.
10  All rights reserved.
11 
12  Each contributor holds copyright over their respective contributions.
13  The project versioning (Git) records all such contribution source information.
14 
15  LICENSE
16 
17  The BSD 3-Clause License
18 
19 
20  Redistribution and use in source and binary forms, with or without
21  modification, are permitted provided that the following conditions are met:
22 
23  * Redistributions of source code must retain the above copyright notice, this
24  list of conditions and the following disclaimer.
25 
26  * Redistributions in binary form must reproduce the above copyright notice,
27  this list of conditions and the following disclaimer in the documentation
28  and/or other materials provided with the distribution.
29 
30  * Neither the name of tiny-dnn nor the names of its
31  contributors may be used to endorse or promote products derived from
32  this software without specific prior written permission.
33 
34  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
35  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
37  DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
38  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
40  SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
41  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
42  OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 */
45 #pragma once
46 
47 #if defined(USE_OPENCL) || defined(USE_CUDA)
48 #ifdef USE_OPENCL
49 #include "third_party/CLCudaAPI/clpp11.h"
50 #else
51 #include "third_party/CLCudaAPI/cupp11.h"
52 #endif
53 #endif
54 
55 namespace tiny_dnn {
56 
57 enum class device_t { NONE, CPU, GPU /*, FPGA */ };
58 
59 inline std::ostream& operator << (std::ostream& os, device_t type) {
60  switch (type) {
61  case device_t::NONE: os << "NONE"; break;
62  case device_t::CPU: os << "CPU"; break;
63  case device_t::GPU: os << "GPU"; break;
64  default:
65  throw nn_error("Not supported ostream enum: " +
66  to_string(static_cast<int>(type)));
67  break;
68  }
69  return os;
70 }
71 
72 /* The class models a physical device */
73 class Device {
74  public:
75  /* Custom CPU constructor
76  *
77  * @param type The device type. Can be only CPU.
78  */
79  inline explicit Device(device_t type);
80 
81  /* CPU/GPU OpenCL constructor.
82  * Device context is initialized in constructor.
83  *
84  * @param type The device type. Can be both CPU and GPU.
85  * @param platform_id The platform identification number.
86  * @param device_id The device identification number.
87  */
88  inline explicit Device(device_t type,
89  const int platform_id,
90  const int device_id);
91 
92  // Returns the device type
93  device_t type() const { return type_; }
94 
95  // Returns true if CLCudaAPI is enabled to this device
96  bool hasCLCudaAPI() const { return has_clcuda_api_; }
97 
98  // Returns the platform id
99  int platformId() const { return platform_id_; }
100 
101  // Returns the device id
102  int deviceId() const { return device_id_; }
103 
104 #if defined(USE_OPENCL) || defined(USE_CUDA)
105  // Returns the CLCudaAPI Device object
106  CLCudaAPI::Device device() const { return *device_; }
107 
108  // Returns the CLCudaAPI Context object
109  CLCudaAPI::Context context() const { return *context_; }
110 
111  // Returns the CLCudaAPI Queue object
112  CLCudaAPI::Queue queue() const { return *queue_; }
113 #endif
114 
115  bool operator==(const Device& d) const {
116  if (d.type() == this->type() &&
117  d.hasCLCudaAPI() == this->hasCLCudaAPI() &&
118  d.platformId() == this->platformId() &&
119  d.deviceId() == this->deviceId()) {
120  return true;
121  }
122  return false;
123  }
124 
125  /* Registers and create an OpenCL program per Operation type.
126  *
127  * @param l The layer to be registered
128  */
129  inline void registerOp(layer& l);
130 
131  private:
132  /* The device type */
133  device_t type_;
134  /* Boolean to check if device has OpenCL */
135  bool has_clcuda_api_;
136  /* The platform identification number */
137  int platform_id_;
138  /* The device identification number */
139  int device_id_;
140 #if defined(USE_OPENCL) || defined(USE_CUDA)
141  /* The CLCudaAPI device */
142  std::shared_ptr<CLCudaAPI::Device> device_;
143  /* The CLCudaAPI device context */
144  std::shared_ptr<CLCudaAPI::Context> context_;
145  /* The CLCudaAPI device queue */
146  std::shared_ptr<CLCudaAPI::Queue> queue_;
147 #endif
148 };
149 
150 } // namespace tiny_dnn
Definition: device.fwd.h:73
base class of all kind of NN layers
Definition: layer.h:62