oneAPI Deep Neural Network Library (oneDNN)  1.4.0
Performance library for Deep Learning
example_utils.hpp
1 /*******************************************************************************
2 * Copyright 2019-2020 Intel Corporation
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *******************************************************************************/
16 
17 #ifndef EXAMPLE_UTILS_HPP
18 #define EXAMPLE_UTILS_HPP
19 
20 #include <algorithm>
21 #include <cassert>
22 #include <functional>
23 #include <iostream>
24 #include <numeric>
25 #include <stdexcept>
26 #include <stdlib.h>
27 #include <string>
28 #include <initializer_list>
29 
30 #include "dnnl.hpp"
31 #include "dnnl_debug.h"
32 
33 #if DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_OMP
34 
35 #ifdef _MSC_VER
36 #define PRAGMA_MACRo(x) __pragma(x)
37 #define PRAGMA_MACRO(x) PRAGMA_MACRo(x)
38 #else
39 #define PRAGMA_MACRo(x) _Pragma(#x)
40 #define PRAGMA_MACRO(x) PRAGMA_MACRo(x)
41 #endif
42 
43 // MSVC doesn't support collapse clause in omp parallel
44 #if defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER)
45 #define collapse(x)
46 #endif
47 
48 #define PRAGMA_OMP_PARALLEL_FOR_COLLAPSE(n) PRAGMA_MACRO(omp parallel for collapse(n))
49 #else // DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_OMP
50 #define PRAGMA_OMP_PARALLEL_FOR_COLLAPSE(n)
51 #endif
52 
53 // Exception class to indicate that the example uses a feature that is not
54 // available on the current systems. It is not treated as an error then, but
55 // just notifies a user.
56 struct example_allows_unimplemented : public std::exception {
57  example_allows_unimplemented(const char *message) noexcept
58  : message(message) {}
59  virtual const char *what() const noexcept override { return message; }
60  const char *message;
61 };
62 
63 inline const char *engine_kind2str_upper(dnnl::engine::kind kind);
64 
65 // Runs example function with signature void() and catches errors.
66 // Returns `0` on success, `1` or oneDNN error, and `2` on example error.
67 inline int handle_example_errors(
68  std::initializer_list<dnnl::engine::kind> engine_kinds,
69  std::function<void()> example) {
70  int exit_code = 0;
71 
72  try {
73  example();
74  } catch (example_allows_unimplemented &e) {
75  std::cout << e.message << std::endl;
76  exit_code = 0;
77  } catch (dnnl::error &e) {
78  std::cout << "oneDNN error caught: " << std::endl
79  << "\tStatus: " << dnnl_status2str(e.status) << std::endl
80  << "\tMessage: " << e.what() << std::endl;
81  exit_code = 1;
82  } catch (std::exception &e) {
83  std::cout << "Error in the example: " << e.what() << "." << std::endl;
84  exit_code = 2;
85  }
86 
87  std::string engine_kind_str;
88  for (auto it = engine_kinds.begin(); it != engine_kinds.end(); ++it) {
89  if (it != engine_kinds.begin()) engine_kind_str += "/";
90  engine_kind_str += engine_kind2str_upper(*it);
91  }
92 
93  std::cout << "Example " << (exit_code ? "failed" : "passed") << " on "
94  << engine_kind_str << "." << std::endl;
95  return exit_code;
96 }
97 
98 // Same as above, but for functions with signature
99 // void(dnnl::engine::kind engine_kind, int argc, char **argv).
100 inline int handle_example_errors(
101  std::function<void(dnnl::engine::kind, int, char **)> example,
102  dnnl::engine::kind engine_kind, int argc, char **argv) {
103  return handle_example_errors(
104  {engine_kind}, [&]() { example(engine_kind, argc, argv); });
105 }
106 
107 // Same as above, but for functions with signature void(dnnl::engine::kind).
108 inline int handle_example_errors(
109  std::function<void(dnnl::engine::kind)> example,
110  dnnl::engine::kind engine_kind) {
111  return handle_example_errors(
112  {engine_kind}, [&]() { example(engine_kind); });
113 }
114 
115 inline dnnl::engine::kind parse_engine_kind(
116  int argc, char **argv, int extra_args = 0) {
117  // Returns default engine kind, i.e. CPU, if none given
118  if (argc == 1) {
120  } else if (argc <= extra_args + 2) {
121  std::string engine_kind_str = argv[1];
122  // Checking the engine type, i.e. CPU or GPU
123  if (engine_kind_str == "cpu") {
125  } else if (engine_kind_str == "gpu") {
126  // Checking if a GPU exists on the machine
128  std::cout << "Could not find compatible GPU" << std::endl
129  << "Please run the example with CPU instead"
130  << std::endl;
131  exit(1);
132  }
134  }
135  }
136 
137  // If all above fails, the example should be ran properly
138  std::cout << "Inappropriate engine kind." << std::endl
139  << "Please run the example like this: " << argv[0] << " [cpu|gpu]"
140  << (extra_args ? " [extra arguments]" : "") << "." << std::endl;
141  exit(1);
142 }
143 
144 inline const char *engine_kind2str_upper(dnnl::engine::kind kind) {
145  if (kind == dnnl::engine::kind::cpu) return "CPU";
146  if (kind == dnnl::engine::kind::gpu) return "GPU";
147  assert(!"not expected");
148  return "<Unknown engine>";
149 }
150 
151 inline dnnl::memory::dim product(const dnnl::memory::dims &dims) {
152  return std::accumulate(dims.begin(), dims.end(), (dnnl::memory::dim)1,
153  std::multiplies<dnnl::memory::dim>());
154 }
155 
156 // Read from memory, write to handle
157 inline void read_from_dnnl_memory(void *handle, dnnl::memory &mem) {
158  dnnl::engine eng = mem.get_engine();
159  size_t bytes = mem.get_desc().get_size();
160 
161  if (eng.get_kind() == dnnl::engine::kind::cpu) {
162  uint8_t *src = static_cast<uint8_t *>(mem.get_data_handle());
163  for (size_t i = 0; i < bytes; ++i)
164  ((uint8_t *)handle)[i] = src[i];
165  }
166 #if DNNL_GPU_RUNTIME == DNNL_RUNTIME_OCL
167  else if (eng.get_kind() == dnnl::engine::kind::gpu) {
168  dnnl::stream s(eng);
169  cl_command_queue q = s.get_ocl_command_queue();
170  cl_mem m = mem.get_ocl_mem_object();
171 
172  cl_int ret = clEnqueueReadBuffer(
173  q, m, CL_TRUE, 0, bytes, handle, 0, NULL, NULL);
174  if (ret != CL_SUCCESS)
175  throw std::runtime_error("clEnqueueReadBuffer failed.");
176  }
177 #endif
178 }
179 
180 // Read from handle, write to memory
181 inline void write_to_dnnl_memory(void *handle, dnnl::memory &mem) {
182  dnnl::engine eng = mem.get_engine();
183  size_t bytes = mem.get_desc().get_size();
184 
185  if (eng.get_kind() == dnnl::engine::kind::cpu) {
186  uint8_t *dst = static_cast<uint8_t *>(mem.get_data_handle());
187  for (size_t i = 0; i < bytes; ++i)
188  dst[i] = ((uint8_t *)handle)[i];
189  }
190 #if DNNL_GPU_RUNTIME == DNNL_RUNTIME_OCL
191  else if (eng.get_kind() == dnnl::engine::kind::gpu) {
192  dnnl::stream s(eng);
193  cl_command_queue q = s.get_ocl_command_queue();
194  cl_mem m = mem.get_ocl_mem_object();
195  size_t bytes = mem.get_desc().get_size();
196 
197  cl_int ret = clEnqueueWriteBuffer(
198  q, m, CL_TRUE, 0, bytes, handle, 0, NULL, NULL);
199  if (ret != CL_SUCCESS)
200  throw std::runtime_error("clEnqueueWriteBuffer failed.");
201  }
202 #endif
203 }
204 
205 #endif
dnnl::memory::desc::get_size
size_t get_size() const
Returns size of the memory descriptor in bytes.
Definition: dnnl.hpp:1944
dnnl::error::what
const char * what() const noexcept override
Returns the explanatory string.
Definition: dnnl.hpp:103
dnnl::stream
An execution stream.
Definition: dnnl.hpp:1047
dnnl::engine
An execution engine.
Definition: dnnl.hpp:844
dnnl::engine::kind
kind
Kinds of engines.
Definition: dnnl.hpp:849
dnnl.hpp
dnnl::memory::get_desc
desc get_desc() const
Returns the associated memory descriptor.
Definition: dnnl.hpp:2010
dnnl::engine::kind::gpu
@ gpu
GPU engine.
dnnl::error
oneDNN exception class.
Definition: dnnl.hpp:91
dnnl::memory::dim
dnnl_dim_t dim
Integer type for representing dimension sizes and indices.
Definition: dnnl.hpp:1190
dnnl::engine::get_count
static size_t get_count(kind kind)
Returns the number of engines of a certain kind.
Definition: dnnl.hpp:868
dnnl_debug.h
dnnl::memory
Memory object.
Definition: dnnl.hpp:1188
dnnl::memory::dims
std::vector< dim > dims
Vector of dimensions.
Definition: dnnl.hpp:1193
dnnl::memory::get_engine
engine get_engine() const
Returns the associated engine.
Definition: dnnl.hpp:2018
dnnl::memory::get_data_handle
void * get_data_handle() const
Returns the underlying memory buffer.
Definition: dnnl.hpp:2028
dnnl::memory::get_ocl_mem_object
cl_mem get_ocl_mem_object() const
Returns the OpenCL memory object associated with the memory.
Definition: dnnl.hpp:2125
dnnl::engine::kind::cpu
@ cpu
CPU engine.