#include <algorithm>
#include <cmath>
#include <iostream>
#include <string>
#include <vector>
#include "example_utils.hpp"
IC = 3,
IH = 227,
IW = 227,
OC = 96;
std::vector<float> src_data(product(src_dims));
std::vector<float> weights_data(product(weights_dims));
std::vector<float> bias_data(OC);
std::vector<float> dst_data(product(dst_dims));
std::generate(src_data.begin(), src_data.end(), []() {
static int i = 0;
return std::cos(i++ / 10.f);
});
std::generate(weights_data.begin(), weights_data.end(), []() {
static int i = 0;
return std::sin(i++ * 2.f);
});
std::generate(bias_data.begin(), bias_data.end(), []() {
static int i = 0;
return std::tanh(i++);
});
auto src_md = memory::desc(src_dims, dt::f32, tag::nchw);
auto bias_md = memory::desc(bias_dims, dt::f32, tag::a);
auto dst_md = memory::desc(dst_dims, dt::f32, tag::nc);
auto bias_mem = memory(bias_md,
engine);
auto user_weights_mem = memory({weights_dims, dt::f32, tag::oihw},
engine);
write_to_dnnl_memory(src_data.data(), src_mem);
write_to_dnnl_memory(bias_data.data(), bias_mem);
write_to_dnnl_memory(weights_data.data(), user_weights_mem);
auto inner_product_weights_md
= memory::desc(weights_dims, dt::f32, tag::any);
inner_product_weights_md, bias_md,
dst_md);
const float scale = 1.0f;
const float alpha = 0.f;
const float beta = 0.f;
post_ops inner_product_ops;
inner_product_ops.append_eltwise(
primitive_attr inner_product_attr;
inner_product_attr.set_post_ops(inner_product_ops);
auto inner_product_pd = inner_product_forward::primitive_desc(
auto inner_product_weights_mem = user_weights_mem;
if (inner_product_pd.weights_desc() != user_weights_mem.get_desc()) {
inner_product_weights_mem
= memory(inner_product_pd.weights_desc(),
engine);
reorder(user_weights_mem, inner_product_weights_mem)
.execute(engine_stream, user_weights_mem,
inner_product_weights_mem);
}
auto inner_product_prim = inner_product_forward(inner_product_pd);
std::unordered_map<int, memory> inner_product_args;
inner_product_prim.execute(engine_stream, inner_product_args);
read_from_dnnl_memory(dst_data.data(), dst_mem);
}
int main(int argc, char **argv) {
return handle_example_errors(
inner_product_example, parse_engine_kind(argc, argv));
}
@ eltwise_relu
Elementwise: rectified linear unit (ReLU)
@ forward_training
Forward data propagation (training mode).
#define DNNL_ARG_DST
A special mnemonic for destination argument for primitives that have a single destination.
Definition: dnnl_types.h:2422
#define DNNL_ARG_SRC
A special mnemonic for source argument for primitives that have a single source.
Definition: dnnl_types.h:2398
#define DNNL_ARG_BIAS
Bias tensor argument.
Definition: dnnl_types.h:2472
#define DNNL_ARG_WEIGHTS
A special mnemonic for primitives that have a single weights argument.
Definition: dnnl_types.h:2445
@ dst_md
destination memory desc
@ src_md
source memory desc
@ inner_product_d
inner product descriptor
oneDNN namespace
Definition: dnnl.hpp:74
An execution engine.
Definition: dnnl.hpp:895
kind
Kinds of engines.
Definition: dnnl.hpp:900
dnnl_dim_t dim
Integer type for representing dimension sizes and indices.
Definition: dnnl.hpp:1138
format_tag
Memory format tag specification.
Definition: dnnl.hpp:1237
data_type
Data type specification.
Definition: dnnl.hpp:1156
std::vector< dim > dims
Vector of dimensions.
Definition: dnnl.hpp:1141
An execution stream.
Definition: dnnl.hpp:1011
stream & wait()
Waits for all primitives executing in the stream to finish.
Definition: dnnl.hpp:1051