Deep Neural Network Library (DNNL)  1.1.3
Performance library for Deep Learning
Enumerations
Common data types and enumerations

A proxy to Types in C API. More...

Enumerations

enum  dnnl::scratchpad_mode { dnnl::scratchpad_mode::library = dnnl_scratchpad_mode_library, dnnl::scratchpad_mode::user = dnnl_scratchpad_mode_user }
 Scratchpad mode. More...
 
enum  dnnl::prop_kind {
  dnnl::prop_kind::undef = dnnl_prop_kind_undef, dnnl::prop_kind::forward_training = dnnl_forward_training, dnnl::prop_kind::forward_inference = dnnl_forward_inference, dnnl::prop_kind::forward_scoring = dnnl_forward_scoring,
  dnnl::prop_kind::forward = dnnl_forward, dnnl::prop_kind::backward = dnnl_backward, dnnl::prop_kind::backward_data = dnnl_backward_data, dnnl::prop_kind::backward_weights = dnnl_backward_weights,
  dnnl::prop_kind::backward_bias = dnnl_backward_bias
}
 Propagation kind. More...
 
enum  dnnl::algorithm { ,
  dnnl::algorithm::convolution_auto = dnnl_convolution_auto, dnnl::algorithm::convolution_direct = dnnl_convolution_direct, dnnl::algorithm::convolution_winograd = dnnl_convolution_winograd, dnnl::algorithm::deconvolution_direct = dnnl_deconvolution_direct,
  dnnl::algorithm::deconvolution_winograd = dnnl_deconvolution_winograd, dnnl::algorithm::eltwise_relu = dnnl_eltwise_relu, dnnl::algorithm::eltwise_tanh = dnnl_eltwise_tanh, dnnl::algorithm::eltwise_elu = dnnl_eltwise_elu,
  dnnl::algorithm::eltwise_square = dnnl_eltwise_square, dnnl::algorithm::eltwise_abs = dnnl_eltwise_abs, dnnl::algorithm::eltwise_sqrt = dnnl_eltwise_sqrt, dnnl::algorithm::eltwise_swish = dnnl_eltwise_swish,
  dnnl::algorithm::eltwise_linear = dnnl_eltwise_linear, dnnl::algorithm::eltwise_bounded_relu = dnnl_eltwise_bounded_relu, dnnl::algorithm::eltwise_soft_relu = dnnl_eltwise_soft_relu, dnnl::algorithm::eltwise_logistic = dnnl_eltwise_logistic,
  dnnl::algorithm::eltwise_exp = dnnl_eltwise_exp, dnnl::algorithm::eltwise_gelu = dnnl_eltwise_gelu, dnnl::algorithm::lrn_across_channels = dnnl_lrn_across_channels, dnnl::algorithm::lrn_within_channel = dnnl_lrn_within_channel,
  dnnl::algorithm::pooling_max = dnnl_pooling_max, dnnl::algorithm::pooling_avg = dnnl_pooling_avg, dnnl::algorithm::pooling_avg_include_padding = dnnl_pooling_avg_include_padding, dnnl::algorithm::pooling_avg_exclude_padding = dnnl_pooling_avg_exclude_padding,
  dnnl::algorithm::vanilla_rnn = dnnl_vanilla_rnn, dnnl::algorithm::vanilla_lstm = dnnl_vanilla_lstm, dnnl::algorithm::vanilla_gru = dnnl_vanilla_gru, dnnl::algorithm::lbr_gru = dnnl_lbr_gru,
  dnnl::algorithm::binary_add = dnnl_binary_add, dnnl::algorithm::binary_mul = dnnl_binary_mul
}
 Kinds of algorithms. More...
 
enum  dnnl::normalization_flags : unsigned { dnnl::normalization_flags::use_global_stats = dnnl_use_global_stats, dnnl::normalization_flags::use_scale_shift = dnnl_use_scaleshift, dnnl::normalization_flags::fuse_norm_relu = dnnl_fuse_norm_relu }
 Flags for batch normalization primitive. More...
 
enum  dnnl::query {
  dnnl::query::undef = dnnl_query_undef, dnnl::query::engine = dnnl_query_engine, dnnl::query::primitive_kind = dnnl_query_primitive_kind, dnnl::query::num_of_inputs_s32 = dnnl_query_num_of_inputs_s32,
  dnnl::query::num_of_outputs_s32 = dnnl_query_num_of_outputs_s32, dnnl::query::time_estimate_f64 = dnnl_query_time_estimate_f64, dnnl::query::memory_consumption_s64 = dnnl_query_memory_consumption_s64, dnnl::query::scratchpad_engine = dnnl_query_scratchpad_engine,
  dnnl::query::reorder_src_engine = dnnl_query_reorder_src_engine, dnnl::query::reorder_dst_engine = dnnl_query_reorder_dst_engine, dnnl::query::impl_info_str = dnnl_query_impl_info_str, dnnl::query::op_d = dnnl_query_op_d,
  dnnl::query::convolution_d = dnnl_query_convolution_d, dnnl::query::deconvolution_d = dnnl_query_deconvolution_d, dnnl::query::shuffle_d = dnnl_query_shuffle_d, dnnl::query::eltwise_d = dnnl_query_eltwise_d,
  dnnl::query::softmax_d = dnnl_query_softmax_d, dnnl::query::pooling_d = dnnl_query_pooling_d, dnnl::query::lrn_d = dnnl_query_lrn_d, dnnl::query::batch_normalization_d = dnnl_query_batch_normalization_d,
  dnnl::query::layer_normalization_d = dnnl_query_layer_normalization_d, dnnl::query::inner_product_d = dnnl_query_inner_product_d, dnnl::query::rnn_d = dnnl_query_rnn_d, dnnl::query::binary_d = dnnl_query_binary_d,
  dnnl::query::src_md = dnnl_query_src_md, dnnl::query::diff_src_md = dnnl_query_diff_src_md, dnnl::query::weights_md = dnnl_query_weights_md, dnnl::query::diff_weights_md = dnnl_query_diff_weights_md,
  dnnl::query::dst_md = dnnl_query_dst_md, dnnl::query::diff_dst_md = dnnl_query_diff_dst_md, dnnl::query::workspace_md = dnnl_query_workspace_md, dnnl::query::scratchpad_md = dnnl_query_scratchpad_md
}
 Primitive descriptor query specification. More...
 

Detailed Description

A proxy to Types in C API.

Enumeration Type Documentation

◆ scratchpad_mode

enum dnnl::scratchpad_mode
strong

Scratchpad mode.

Enumerator
library 

The library manages scratchpad (default)

user 

A user shall query and provide the scratchpad memory to primitives.

◆ prop_kind

enum dnnl::prop_kind
strong

Propagation kind.

Enumerator
undef 

Undefined propagation kind.

forward_training 

Forward data propagation (training mode).

In this mode primitives perform computations necessary for subsequent backward propagation.

forward_inference 

Forward data propagation (inference mode).

In this mode primitives perform only computations that are necessary for inference and omit computations that are necessary only for backward propagation.

forward_scoring 

Forward data propagation, alias for dnnl::prop_kind::forward_inference.

forward 

Forward data propagation, alias for dnnl::prop_kind::forward_training.

backward 

Backward propagation (with respect to all parameters).

backward_data 

Backward data propagation.

backward_weights 

Backward weights propagation.

backward_bias 

Backward bias propagation.

◆ algorithm

enum dnnl::algorithm
strong

Kinds of algorithms.

Enumerator
convolution_auto 

Convolution algorithm(either direct or Winograd) is chosen just in time.

convolution_direct 

Direct convolution.

convolution_winograd 

Winograd convolution.

deconvolution_direct 

Direct deconvolution.

deconvolution_winograd 

Winograd deconvolution.

eltwise_relu 

Eltwise: ReLU.

eltwise_tanh 

Eltwise: hyperbolic tangent non-linearity (tanh)

eltwise_elu 

Eltwise: parametric exponential linear unit (elu)

eltwise_square 

Eltwise: square.

eltwise_abs 

Eltwise: abs.

eltwise_sqrt 

Eltwise: square root.

eltwise_swish 

Eltwise: x*sigmoid(a*x)

eltwise_linear 

Eltwise: linear.

eltwise_bounded_relu 

Eltwise: bounded_relu.

eltwise_soft_relu 

Eltwise: soft_relu.

eltwise_logistic 

Eltwise: logistic.

eltwise_exp 

Eltwise: exponent.

eltwise_gelu 

Eltwise: gelu.

lrn_across_channels 

Local response normalization (LRN) across multiple channels.

lrn_within_channel 

LRN within a single channel.

pooling_max 

Max pooling.

pooling_avg 

Average pooling exclude padding, alias for dnnl::algorithm::pooling_avg_include_padding.

pooling_avg_include_padding 

Average pooling include padding.

pooling_avg_exclude_padding 

Average pooling exclude padding.

vanilla_rnn 

RNN cell.

vanilla_lstm 

LSTM cell.

vanilla_gru 

GRU cell.

lbr_gru 

GRU cell with linear before reset.

Modification of original GRU cell. Differs from dnnl_vanilla_gru in how the new memory gate is calculated:

\[ c_t = tanh(W_c*x_t + b_{c_x} + r_t*(U_c*h_{t-1}+b_{c_h})) \]

Primitive expects 4 biases on input: \([b_{u}, b_{r}, b_{c_x}, b_{c_h}]\)

binary_add 

Binary add.

binary_mul 

Binary mul.

◆ normalization_flags

enum dnnl::normalization_flags : unsigned
strong

Flags for batch normalization primitive.

Enumerator
use_global_stats 

Use global statistics.

If specified

  • on forward propagation use mean and variance provided by user (input)
  • on backward propagation reduces the amount of computations, since mean and variance are considered as constants

If not specified:

  • on forward propagation mean and variance are computed and stored in output
  • on backward propagation compute full derivative wrt to data
use_scale_shift 

Use scale and shift parameters.

If specified:

  • on forward propagation use scale and shift (aka scale and bias) for the batch normalization results
  • on backward propagation (for prop_kind == dnnl::prop_kind::backward) compute diff wrt to scale and shift (hence one extra output used)

If not specified:

fuse_norm_relu 

Fuse with ReLU.

If specified:

  • on inference this option behaves the same as if the primitive were fused with ReLU via post ops API
  • on training primitive requires workspace (required to be able to perform backward pass)

◆ query

enum dnnl::query
strong

Primitive descriptor query specification.

In general should be used from C++ API since required queries are directly implemented as class members (for instance, a query for source memory descriptor).

For more information see dnnl_query_t.

Enumerator
undef 

no query

engine 

execution engine

primitive_kind 

primitive kind

num_of_inputs_s32 

number of inputs expected

num_of_outputs_s32 

number of outputs expected

time_estimate_f64 

runtime estimation (seconds), unimplemented

memory_consumption_s64 

memory consumption (bytes)

extra (scratch) memory, additional to all inputs and outputs memory

See also
Primitive Attributes: Scratchpad
scratchpad_engine 

scratchpad engine

engine to be used for creating scratchpad memory

reorder_src_engine 

reorder source engine

reorder_dst_engine 

reorder destination engine

impl_info_str 

implementation name

op_d 

op descriptor

convolution_d 

convolution descriptor

deconvolution_d 

deconvolution descriptor

shuffle_d 

shuffle descriptor

eltwise_d 

eltwise descriptor

softmax_d 

softmax descriptor

pooling_d 

pooling descriptor

lrn_d 

lrn descriptor

batch_normalization_d 

batch normalization descriptor

layer_normalization_d 

layer normalization descriptor

inner_product_d 

inner product descriptor

rnn_d 

rnn descriptor

binary_d 

binary descriptor

src_md 

source memory desc

diff_src_md 

source gradient memory desc

weights_md 

weights memory descriptor desc

diff_weights_md 

weights grad. memory desc

dst_md 

destination memory desc

diff_dst_md 

destination grad. memory desc

workspace_md 

workspace memory desc

scratchpad_md 

scratchpad memory desc