.. index:: pair: example; cross_engine_reorder.cpp .. _doxid-cross_engine_reorder_8cpp-example: cross_engine_reorder.cpp ======================== This C++ API example demonstrates programming flow when reordering memory between CPU and GPU engines. Annotated version: :ref:`Reorder between CPU and GPU engines ` This C++ API example demonstrates programming flow when reordering memory between CPU and GPU engines. Annotated version: :ref:`Reorder between CPU and GPU engines ` .. ref-code-block:: cpp /******************************************************************************* * Copyright 2019-2022 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ #include #include #include // [Prologue] #include "example_utils.hpp" #include "oneapi/dnnl/dnnl.hpp" #include "example_utils.hpp" using namespace :ref:`dnnl `; using namespace :ref:`std `; // [Prologue] void fill(:ref:`memory ` &mem, const :ref:`memory::dims ` &adims) { std::vector array(product(adims)); for (size_t e = 0; e < array.size(); ++e) { array[e] = e % 7 ? 1.0f : -1.0f; } write_to_dnnl_memory(array.data(), mem); } int find_negative(:ref:`memory ` &mem, const :ref:`memory::dims ` &adims) { int negs = 0; size_t nelems = product(adims); std::vector array(nelems); read_from_dnnl_memory(array.data(), mem); for (size_t e = 0; e < nelems; ++e) negs += array[e] < 0.0f; return negs; } void cross_engine_reorder_tutorial() { // [Initialize engine] auto cpu_engine = :ref:`engine `(validate_engine_kind(:ref:`engine::kind::cpu `), 0); auto gpu_engine = :ref:`engine `(validate_engine_kind(:ref:`engine::kind::gpu `), 0); // [Initialize engine] // [Initialize stream] auto stream_gpu = :ref:`stream `(gpu_engine, :ref:`stream::flags::in_order `); // [Initialize stream] // [reorder cpu2gpu] const auto tz = :ref:`memory::dims ` {2, 16, 1, 1}; auto m_cpu = :ref:`memory `({{tz}, :ref:`memory::data_type::f32 `, :ref:`memory::format_tag::nchw `}, cpu_engine); auto m_gpu = :ref:`memory `({{tz}, :ref:`memory::data_type::f32 `, :ref:`memory::format_tag::nchw `}, gpu_engine); fill(m_cpu, tz); auto r1 = :ref:`reorder `(m_cpu, m_gpu); // [reorder cpu2gpu] // [Create a ReLU primitive] // ReLU primitive descriptor, which corresponds to a particular // implementation in the library. Specify engine type for the ReLU // primitive. Use a GPU engine here. auto relu_pd = :ref:`eltwise_forward::primitive_desc `(gpu_engine, :ref:`prop_kind::forward `, :ref:`algorithm::eltwise_relu `, m_gpu.:ref:`get_desc `(), m_gpu.:ref:`get_desc `(), 0.0f); // ReLU primitive auto relu = :ref:`eltwise_forward `(relu_pd); // [Create a ReLU primitive] // [reorder gpu2cpu] auto r2 = :ref:`reorder `(m_gpu, m_cpu); // [reorder gpu2cpu] // [Execute primitives] // wrap source data from CPU to GPU r1.execute(stream_gpu, m_cpu, m_gpu); // Execute ReLU on a GPU stream relu.execute(stream_gpu, {{:ref:`DNNL_ARG_SRC `, m_gpu}, {:ref:`DNNL_ARG_DST `, m_gpu}}); // Get result data from GPU to CPU r2.execute(stream_gpu, m_gpu, m_cpu); stream_gpu.wait(); // [Execute primitives] // [Check the results] if (find_negative(m_cpu, tz) != 0) throw std::logic_error( "Unexpected output, find a negative value after the ReLU " "execution."); // [Check the results] } int main(int argc, char **argv) { return handle_example_errors({:ref:`engine::kind::cpu `, :ref:`engine::kind::gpu `}, cross_engine_reorder_tutorial); }