This C++ API example demonstrates basics of Intel MKL-DNN programming model.
#include <iostream>
#include <sstream>
#include <cmath>
#include <numeric>
#include <string>
#include <vector>
void cpu_getting_started_tutorial() {
stream cpu_stream(cpu_engine);
const int N = 1, H = 13, W = 13, C = 3;
const int stride_N = H * W * C;
const int stride_H = W * C;
const int stride_W = C;
const int stride_C = 1;
auto offset = [=](int n, int h, int w, int c)
{ return n * stride_N + h * stride_H + w * stride_W + c * stride_C; };
const int image_size = N * H * W * C;
std::vector<float> image(image_size);
for (int n = 0; n < N; ++n)
for (int h = 0; h < H; ++h)
for (int w = 0; w < W; ++w)
for (int c = 0; c < C; ++c) {
int off = offset(n, h, w, c);
image[off] = -std::cos(off / 10.f);
}
{N, C, H, W},
);
{N, C, H, W},
{stride_N, stride_C, stride_H, stride_W}
);
throw std::string("memory descriptor initialization mismatch");
0.f,
0.f
);
relu_d,
cpu_engine
);
relu.execute(
cpu_stream,
{
{MKLDNN_ARG_SRC, src_mem},
{MKLDNN_ARG_DST, dst_mem},
});
for (int n = 0; n < N; ++n)
for (int h = 0; h < H; ++h)
for (int w = 0; w < W; ++w)
for (int c = 0; c < C; ++c) {
int off = offset(n, h, w, c);
float expected = image[off] < 0 ? 0.f : image[off];
if (relu_image[off] != expected) {
std::stringstream ss;
ss << "Unexpected output at index("
<< n << ", " << c << ", " << h << ", " << w << "): "
<< "Expect " << expected << " "
<< "Got " << relu_image[off];
throw ss.str();
}
}
}
int main(int argc, char **argv) {
try {
cpu_getting_started_tutorial();
std::cerr <<
"Intel MKL-DNN error: " << e.
what() << std::endl
<<
"Error status: " << mkldnn_status2str(e.
status) << std::endl;
return 1;
} catch (std::string &e) {
std::cerr << "Error in the example: " << e << std::endl;
return 2;
}
std::cout << "Example passes" << std::endl;
return 0;
}