Actual source code: cupmdevice.cxx
1: #include "../../interface/cupmdevice.hpp"
2: #include <algorithm>
3: #include <csetjmp> // for cuda mpi awareness
4: #include <csignal> // SIGSEGV
5: #include <iterator>
6: #include <type_traits>
8: #if PetscDefined(USE_LOG)
9: PETSC_INTERN PetscErrorCode PetscLogInitialize(void);
10: #else
11: #define PetscLogInitialize() 0
12: #endif
14: namespace Petsc
15: {
17: namespace Device
18: {
20: namespace CUPM
21: {
23: // internal "impls" class for CUPMDevice. Each instance represents a single cupm device
24: template <DeviceType T>
25: class Device<T>::DeviceInternal
26: {
27: const int id_;
28: bool devInitialized_ = false;
29: cupmDeviceProp_t dprop_; // cudaDeviceProp appears to be an actual struct, i.e. you can't
30: // initialize it with nullptr or NULL (i've tried)
32: PETSC_CXX_COMPAT_DECL(bool CUPMAwareMPI_());
34: public:
35: // default constructor
36: explicit constexpr DeviceInternal(int dev) noexcept : id_(dev) { }
38: // gather all relevant information for a particular device, a cupmDeviceProp_t is
39: // usually sufficient here
40: PETSC_NODISCARD PetscErrorCode initialize() noexcept;
41: PETSC_NODISCARD PetscErrorCode configure() noexcept;
42: PETSC_NODISCARD PetscErrorCode view(PetscViewer) const noexcept;
43: PETSC_NODISCARD PetscErrorCode finalize() noexcept;
45: PETSC_NODISCARD auto id() const -> decltype(id_) { return id_; }
46: PETSC_NODISCARD auto initialized() const -> decltype(devInitialized_) { return devInitialized_; }
47: PETSC_NODISCARD auto prop() const -> const decltype(dprop_)& { return dprop_; }
49: // factory
50: PETSC_CXX_COMPAT_DECL(std::unique_ptr<DeviceInternal> makeDevice(int i))
51: {
52: return std::unique_ptr<DeviceInternal>(new DeviceInternal(i));
53: }
54: };
56: // the goal here is simply to get the cupm backend to create its context, not to do any type of
57: // modification of it, or create objects (since these may be affected by subsequent
58: // configuration changes)
59: template <DeviceType T>
60: PetscErrorCode Device<T>::DeviceInternal::initialize() noexcept
61: {
62: if (devInitialized_) return 0;
63: devInitialized_ = true;
64: // need to do this BEFORE device has been set, although if the user
65: // has already done this then we just ignore it
66: if (cupmSetDeviceFlags(cupmDeviceMapHost) == cupmErrorSetOnActiveProcess) {
67: // reset the error if it was cupmErrorSetOnActiveProcess
68: const auto PETSC_UNUSED unused = cupmGetLastError();
69: } else {cupmGetLastError();}
70: // cuda 5.0+ will create a context when cupmSetDevice is called
71: if (cupmSetDevice(id_) != cupmErrorDeviceAlreadyInUse) cupmGetLastError();
72: // forces cuda < 5.0 to initialize a context
73: cupmFree(nullptr);
74: // where is this variable defined and when is it set? who knows! but it is defined and set
75: // at this point. either way, each device must make this check since I guess MPI might not be
76: // aware of all of them?
77: if (use_gpu_aware_mpi) {
78: // For OpenMPI, we could do a compile time check with
79: // "defined(PETSC_HAVE_OMPI_MAJOR_VERSION) && defined(MPIX_CUDA_AWARE_SUPPORT) &&
80: // MPIX_CUDA_AWARE_SUPPORT" to see if it is CUDA-aware. However, recent versions of IBM
81: // Spectrum MPI (e.g., 10.3.1) on Summit meet above conditions, but one has to use jsrun
82: // --smpiargs=-gpu to really enable GPU-aware MPI. So we do the check at runtime with a
83: // code that works only with GPU-aware MPI.
84: if (PetscUnlikely(!CUPMAwareMPI_())) {
85: (*PetscErrorPrintf)("PETSc is configured with GPU support, but your MPI is not GPU-aware. For better performance, please use a GPU-aware MPI.\n");
86: (*PetscErrorPrintf)("If you do not care, add option -use_gpu_aware_mpi 0. To not see the message again, add the option to your .petscrc, OR add it to the env var PETSC_OPTIONS.\n");
87: (*PetscErrorPrintf)("If you do care, for IBM Spectrum MPI on OLCF Summit, you may need jsrun --smpiargs=-gpu.\n");
88: (*PetscErrorPrintf)("For OpenMPI, you need to configure it --with-cuda (https://www.open-mpi.org/faq/?category=buildcuda)\n");
89: (*PetscErrorPrintf)("For MVAPICH2-GDR, you need to set MV2_USE_CUDA=1 (http://mvapich.cse.ohio-state.edu/userguide/gdr/)\n");
90: (*PetscErrorPrintf)("For Cray-MPICH, you need to set MPICH_RDMA_ENABLED_CUDA=1 (https://www.olcf.ornl.gov/tutorials/gpudirect-mpich-enabled-cuda/)\n");
91: PETSCABORT(PETSC_COMM_SELF,PETSC_ERR_LIB);
92: }
93: }
94: return 0;
95: }
97: template <DeviceType T>
98: PetscErrorCode Device<T>::DeviceInternal::configure() noexcept
99: {
100: PetscAssert(devInitialized_,PETSC_COMM_SELF,PETSC_ERR_COR,"Device %d being configured before it was initialized",id_);
101: // why on EARTH nvidia insists on making otherwise informational states into
102: // fully-fledged error codes is beyond me. Why couldn't a pointer to bool argument have
103: // sufficed?!?!?!
104: if (cupmSetDevice(id_) != cupmErrorDeviceAlreadyInUse) cupmGetLastError();
105: // need to update the device properties
106: cupmGetDeviceProperties(&dprop_,id_);
107: PetscInfo(nullptr,"Configured device %d\n",id_);
108: return 0;
109: }
111: template <DeviceType T>
112: PetscErrorCode Device<T>::DeviceInternal::view(PetscViewer viewer) const noexcept
113: {
114: PetscBool iascii;
116: PetscAssert(devInitialized_,PETSC_COMM_SELF,PETSC_ERR_COR,"Device %d being viewed before it was initialized or configured",id_);
117: PetscObjectTypeCompare(PetscObjectCast(viewer),PETSCVIEWERASCII,&iascii);
118: if (iascii) {
119: MPI_Comm comm;
120: PetscMPIInt rank;
121: PetscViewer sviewer;
123: PetscObjectGetComm(PetscObjectCast(viewer),&comm);
124: MPI_Comm_rank(comm,&rank);
125: PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
126: PetscViewerASCIIPrintf(sviewer,"[%d] device %d: %s\n",rank,id_,dprop_.name);
127: PetscViewerASCIIPushTab(sviewer);
128: PetscViewerASCIIPrintf(sviewer,"Compute capability: %d.%d\n",dprop_.major,dprop_.minor);
129: PetscViewerASCIIPrintf(sviewer,"Multiprocessor Count: %d\n",dprop_.multiProcessorCount);
130: PetscViewerASCIIPrintf(sviewer,"Maximum Grid Dimensions: %d x %d x %d\n",dprop_.maxGridSize[0],dprop_.maxGridSize[1],dprop_.maxGridSize[2]);
131: PetscViewerASCIIPrintf(sviewer,"Maximum Block Dimensions: %d x %d x %d\n",dprop_.maxThreadsDim[0],dprop_.maxThreadsDim[1],dprop_.maxThreadsDim[2]);
132: PetscViewerASCIIPrintf(sviewer,"Maximum Threads Per Block: %d\n",dprop_.maxThreadsPerBlock);
133: PetscViewerASCIIPrintf(sviewer,"Warp Size: %d\n",dprop_.warpSize);
134: PetscViewerASCIIPrintf(sviewer,"Total Global Memory (bytes): %zu\n",dprop_.totalGlobalMem);
135: PetscViewerASCIIPrintf(sviewer,"Total Constant Memory (bytes): %zu\n",dprop_.totalConstMem);
136: PetscViewerASCIIPrintf(sviewer,"Shared Memory Per Block (bytes): %zu\n",dprop_.sharedMemPerBlock);
137: PetscViewerASCIIPrintf(sviewer,"Multiprocessor Clock Rate (KHz): %d\n",dprop_.clockRate);
138: PetscViewerASCIIPrintf(sviewer,"Memory Clock Rate (KHz): %d\n",dprop_.memoryClockRate);
139: PetscViewerASCIIPrintf(sviewer,"Memory Bus Width (bits): %d\n",dprop_.memoryBusWidth);
140: PetscViewerASCIIPrintf(sviewer,"Peak Memory Bandwidth (GB/s): %f\n",2.0*dprop_.memoryClockRate*(dprop_.memoryBusWidth/8)/1.0e6);
141: PetscViewerASCIIPrintf(sviewer,"Can map host memory: %s\n",dprop_.canMapHostMemory ? "PETSC_TRUE" : "PETSC_FALSE");
142: PetscViewerASCIIPrintf(sviewer,"Can execute multiple kernels concurrently: %s\n",dprop_.concurrentKernels ? "PETSC_TRUE" : "PETSC_FALSE");
143: PetscViewerASCIIPopTab(sviewer);
144: PetscViewerFlush(sviewer);
145: PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
146: PetscViewerFlush(viewer);
147: }
148: return 0;
149: }
151: static std::jmp_buf cupmMPIAwareJumpBuffer;
152: static bool cupmMPIAwareJumpBufferSet;
154: // godspeed to anyone that attempts to call this function
155: void SilenceVariableIsNotNeededAndWillNotBeEmittedWarning_ThisFunctionShouldNeverBeCalled()
156: {
157: PETSCABORT(MPI_COMM_NULL,INT_MAX);
158: if (cupmMPIAwareJumpBufferSet) (void)cupmMPIAwareJumpBuffer;
159: }
161: #define CHKCUPMAWARE(...) do { \
162: cupmError_t cerr_ = __VA_ARGS__; \
163: if (PetscUnlikely(cerr_ != cupmSuccess)) return false; \
164: } while (0)
166: template <DeviceType T>
167: PETSC_CXX_COMPAT_DEFN(bool Device<T>::DeviceInternal::CUPMAwareMPI_())
168: {
169: constexpr int bufSize = 2;
170: constexpr int hbuf[bufSize] = {1,0};
171: int *dbuf = nullptr;
172: constexpr auto bytes = bufSize*sizeof(*dbuf);
173: auto awareness = false;
174: const auto cupmSignalHandler = [](int signal, void *ptr) -> PetscErrorCode {
175: if ((signal == SIGSEGV) && cupmMPIAwareJumpBufferSet) std::longjmp(cupmMPIAwareJumpBuffer,1);
176: return PetscSignalHandlerDefault(signal,ptr);
177: };
179: CHKCUPMAWARE(cupmMalloc(reinterpret_cast<void**>(&dbuf),bytes));
180: CHKCUPMAWARE(cupmMemcpy(dbuf,hbuf,bytes,cupmMemcpyHostToDevice));
181: PETSC_COMM_SELF,PetscPushSignalHandler(cupmSignalHandler,nullptr);
182: cupmMPIAwareJumpBufferSet = true;
183: if (setjmp(cupmMPIAwareJumpBuffer)) {
184: // if a segv was triggered in the MPI_Allreduce below, it is very likely due to MPI not
185: // being GPU-aware
186: awareness = false;
187: // control flow up until this point:
188: // 1. CUPMDevice<T>::CUPMDeviceInternal::MPICUPMAware__()
189: // 2. MPI_Allreduce
190: // 3. SIGSEGV
191: // 4. PetscSignalHandler_Private
192: // 5. cupmSignalHandler (lambda function)
193: // 6. here
195: // so we must undo this. This would be most naturally done in cupmSignalHandler, however
196: // the C/C++ standard dictates:
197: //
198: // After invoking longjmp(), non-volatile-qualified local objects should not be accessed if
199: // their values could have changed since the invocation of setjmp(). Their value in this
200: // case is considered indeterminate, and accessing them is undefined behavior.
201: //
202: // so for safety (since we don't know what PetscStackPop may try to read/declare) we do it
203: // outside of the longjmp control flow
204: PetscStackPop;
205: } else if (!MPI_Allreduce(dbuf,dbuf+1,1,MPI_INT,MPI_SUM,PETSC_COMM_SELF)) awareness = true;
206: cupmMPIAwareJumpBufferSet = false;
207: PETSC_COMM_SELF,PetscPopSignalHandler();
208: CHKCUPMAWARE(cupmFree(dbuf));
209: return awareness;
210: }
212: #undef CHKCUPMAWARE
214: template <DeviceType T>
215: PetscErrorCode Device<T>::DeviceInternal::finalize() noexcept
216: {
217: devInitialized_ = false;
218: return 0;
219: }
221: template <DeviceType T>
222: PetscErrorCode Device<T>::finalize_() noexcept
223: {
224: if (!initialized_) return 0;
225: for (auto&& device : devices_) {
226: if (device) {
227: device->finalize();
228: device.reset();
229: }
230: }
231: defaultDevice_ = PETSC_CUPM_DEVICE_NONE; // disabled by default
232: initialized_ = false;
233: return 0;
234: }
236: // these functions should be named identically to the option they produce where "CUPMTYPE" and
237: // "cupmtype" are the uppercase and lowercase string versions of the cupm backend respectively
238: template <DeviceType T>
239: PETSC_CXX_COMPAT_DECL(PETSC_CONSTEXPR_14 const char* PetscDevice_CUPMTYPE_Options())
240: {
241: switch (T) {
242: case DeviceType::CUDA: return "PetscDevice CUDA Options";
243: case DeviceType::HIP: return "PetscDevice HIP Options";
244: }
245: PetscUnreachable();
246: return "PETSC_ERROR_PLIB";
247: }
249: template <DeviceType T>
250: PETSC_CXX_COMPAT_DECL(PETSC_CONSTEXPR_14 const char* device_enable_cupmtype())
251: {
252: switch (T) {
253: case DeviceType::CUDA: return "-device_enable_cuda";
254: case DeviceType::HIP: return "-device_enable_hip";
255: }
256: PetscUnreachable();
257: return "PETSC_ERROR_PLIB";
258: }
260: template <DeviceType T>
261: PETSC_CXX_COMPAT_DECL(PETSC_CONSTEXPR_14 const char* device_select_cupmtype())
262: {
263: switch (T) {
264: case DeviceType::CUDA: return "-device_select_cuda";
265: case DeviceType::HIP: return "-device_select_hip";
266: }
267: PetscUnreachable();
268: return "PETSC_ERROR_PLIB";
269: }
271: template <DeviceType T>
272: PETSC_CXX_COMPAT_DECL(PETSC_CONSTEXPR_14 const char* device_view_cupmtype())
273: {
274: switch (T) {
275: case DeviceType::CUDA: return "-device_view_cuda";
276: case DeviceType::HIP: return "-device_view_hip";
277: }
278: PetscUnreachable();
279: return "PETSC_ERROR_PLIB";
280: }
282: template <DeviceType T>
283: PETSC_CXX_COMPAT_DECL(PETSC_CONSTEXPR_14 const char* CUPM_VISIBLE_DEVICES())
284: {
285: switch (T) {
286: case DeviceType::CUDA: return "CUDA_VISIBLE_DEVICES";
287: case DeviceType::HIP: return "HIP_VISIBLE_DEVICES";
288: }
289: PetscUnreachable();
290: return "PETSC_ERROR_PLIB";
291: }
293: template <DeviceType T>
294: PetscErrorCode Device<T>::initialize(MPI_Comm comm, PetscInt *defaultDeviceId, PetscDeviceInitType *defaultInitType) noexcept
295: {
296: PetscInt initTypeCUPM = *defaultInitType,id = *defaultDeviceId;
297: PetscBool view = PETSC_FALSE,flg;
298: int ndev;
299: cupmError_t cerr;
301: if (initialized_) return 0;
302: initialized_ = true;
303: PetscRegisterFinalize(finalize_);
305: {
308: // the functions to populate the command line strings are named after the string they return
309: PetscOptionsBegin(comm,nullptr,PetscDevice_CUPMTYPE_Options<T>(),"Sys");
310: PetscOptionsEList(device_enable_cupmtype<T>(),"How (or whether) to initialize a device","CUPMDevice<CUPMDeviceType>::initialize()",PetscDeviceInitTypes,3,PetscDeviceInitTypes[initTypeCUPM],&initTypeCUPM,nullptr);
311: PetscOptionsRangeInt(device_select_cupmtype<T>(),"Which device to use. Pass " PetscStringize(PETSC_DECIDE) " to have PETSc decide or (given they exist) [0-NUM_DEVICE) for a specific device","PetscDeviceCreate",id,&id,nullptr,PETSC_DECIDE,std::numeric_limits<decltype(defaultDevice_)>::max());
312: PetscOptionsBool(device_view_cupmtype<T>(),"Display device information and assignments (forces eager initialization)",nullptr,view,&view,&flg);
313: PetscOptionsEnd();
314: }
316: cerr = cupmGetDeviceCount(&ndev);
317: switch (cerr) {
318: case cupmErrorNoDevice: {
319: PetscBool found;
320: PetscBool ignoreCupmError = PETSC_FALSE;
321: char buf[16];
323: PetscOptionsGetenv(comm,CUPM_VISIBLE_DEVICES<T>(),buf,sizeof(buf),&found);
324: if (found) {
325: size_t len;
327: PetscStrlen(buf,&len);
328: if (!len || buf[0] == '-') ignoreCupmError = PETSC_TRUE;
329: }
330: id = PETSC_CUPM_DEVICE_NONE; // there are no devices anyway
331: if (ignoreCupmError) {
332: initTypeCUPM = PETSC_DEVICE_INIT_NONE;
333: auto PETSC_UNUSED ignored = cupmGetLastError();
334: break;
335: }
336: // if we don't outright ignore the error we then drop and check if the user tried to
337: // eagerly initialize the device
338: }
339: case cupmErrorStubLibrary:
340: if (PetscUnlikely((initTypeCUPM == PETSC_DEVICE_INIT_EAGER) || (view && flg))) {
341: const auto name = cupmGetErrorName(cerr);
342: const auto desc = cupmGetErrorString(cerr);
343: const auto backend = cupmName();
344: SETERRQ(comm,PETSC_ERR_USER_INPUT,"Cannot eagerly initialize %s, as doing so results in %s error %d (%s) : %s",backend,backend,static_cast<PetscErrorCode>(cerr),name,desc);
345: }
346: initTypeCUPM = PETSC_DEVICE_INIT_NONE;
347: {auto PETSC_UNUSED ignored = cupmGetLastError();}
348: break;
349: default:
350: cerr;
351: break;
352: }
354: if (initTypeCUPM == PETSC_DEVICE_INIT_NONE) {
355: if ((id > 0) || (id == PETSC_DECIDE)) id = PETSC_CUPM_DEVICE_NONE;
356: } else {
357: PetscDeviceCheckDeviceCount_Internal(ndev);
358: if (id == PETSC_DECIDE) {
359: if (ndev) {
360: PetscMPIInt rank;
362: MPI_Comm_rank(comm,&rank);
363: id = rank % ndev;
364: } else id = 0;
365: }
366: view = static_cast<decltype(view)>(view && flg);
367: if (view) initTypeCUPM = PETSC_DEVICE_INIT_EAGER;
368: }
370: static_assert(std::is_same<PetscMPIInt,decltype(defaultDevice_)>::value,"");
371: // id is PetscInt, _defaultDevice is int
372: PetscMPIIntCast(id,&defaultDevice_);
373: if (initTypeCUPM == PETSC_DEVICE_INIT_EAGER) {
374: devices_[defaultDevice_] = DeviceInternal::makeDevice(defaultDevice_);
375: devices_[defaultDevice_]->initialize();
376: devices_[defaultDevice_]->configure();
377: if (view) {
378: PetscViewer vwr;
380: PetscLogInitialize();
381: PetscViewerASCIIGetStdout(comm,&vwr);
382: devices_[defaultDevice_]->view(vwr);
383: }
384: }
386: // record the results of the initialization
387: *defaultInitType = static_cast<PetscDeviceInitType>(initTypeCUPM);
388: *defaultDeviceId = id;
389: return 0;
390: }
392: template <DeviceType T>
393: PetscErrorCode Device<T>::getDevice(PetscDevice device, PetscInt id) const noexcept
394: {
395: const auto cerr = static_cast<cupmError_t>(-defaultDevice_);
399: if (id == PETSC_DECIDE) id = defaultDevice_;
400: PetscAssert(static_cast<decltype(devices_.size())>(id) < devices_.size(),PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Only supports %zu number of devices but trying to get device with id %" PetscInt_FMT,devices_.size(),id);
401: if (devices_[id]) {
402: PetscAssert(id == devices_[id]->id(),PETSC_COMM_SELF,PETSC_ERR_PLIB,"Entry %" PetscInt_FMT " contains device with mismatching id %d",id,devices_[id]->id());
403: } else devices_[id] = DeviceInternal::makeDevice(id);
404: devices_[id]->initialize();
405: device->deviceId = devices_[id]->id(); // technically id = _devices[id]->_id here
406: device->ops->createcontext = create_;
407: device->ops->configure = this->configureDevice;
408: device->ops->view = this->viewDevice;
409: return 0;
410: }
412: template <DeviceType T>
413: PetscErrorCode Device<T>::configureDevice(PetscDevice device) noexcept
414: {
415: devices_[device->deviceId]->configure();
416: return 0;
417: }
419: template <DeviceType T>
420: PetscErrorCode Device<T>::viewDevice(PetscDevice device, PetscViewer viewer) noexcept
421: {
422: // now this __shouldn't__ reconfigure the device, but there is a petscinfo call to indicate
423: // it is being reconfigured
424: devices_[device->deviceId]->configure();
425: devices_[device->deviceId]->view(viewer);
426: return 0;
427: }
429: // explicitly instantiate the classes
430: #if PetscDefined(HAVE_CUDA)
431: template class Device<DeviceType::CUDA>;
432: #endif
433: #if PetscDefined(HAVE_HIP)
434: template class Device<DeviceType::HIP>;
435: #endif
437: } // namespace CUPM
439: } // namespace Device
441: } // namespace Petsc