Intel(R) Math Kernel Library for Deep Neural Networks (Intel(R) MKL-DNN)  0.21.0
Performance library for Deep Learning
Classes | Enumerations
Generic

Classes

struct  mkldnn_version_t
 Intel(R) MKL-DNN Version type. More...
 

Enumerations

enum  mkldnn_status_t {
  mkldnn_success = 0, mkldnn_out_of_memory = 1, mkldnn_try_again = 2, mkldnn_invalid_arguments = 3,
  mkldnn_not_ready = 4, mkldnn_unimplemented = 5, mkldnn_iterator_ends = 6, mkldnn_runtime_error = 7,
  mkldnn_not_required = 8
}
 Status values returned by Intel(R) MKL-DNN functions. More...
 
enum  mkldnn_data_type_t {
  mkldnn_data_type_undef = 0, mkldnn_f32 = 1, mkldnn_s32 = 2, mkldnn_s16 = 4,
  mkldnn_s8 = 5, mkldnn_u8 = 6, mkldnn_bf16 = 7
}
 Data type specification. More...
 
enum  mkldnn_round_mode_t { mkldnn_round_nearest = 1, mkldnn_round_down = 2 }
 Rounding mode. More...
 
enum  mkldnn_memory_format_t {
  mkldnn_format_undef = 0, mkldnn_any, mkldnn_blocked, mkldnn_x,
  mkldnn_nc, mkldnn_ncw, mkldnn_nwc, mkldnn_nchw,
  mkldnn_nhwc, mkldnn_chwn, mkldnn_ncdhw, mkldnn_ndhwc,
  mkldnn_oi, mkldnn_io, mkldnn_oiw, mkldnn_owi,
  mkldnn_wio, mkldnn_oihw, mkldnn_hwio, mkldnn_ohwi,
  mkldnn_ihwo, mkldnn_iohw, mkldnn_oidhw, mkldnn_dhwio,
  mkldnn_odhwi, mkldnn_goiw, mkldnn_goihw, mkldnn_hwigo,
  mkldnn_giohw, mkldnn_goidhw, mkldnn_ntc, mkldnn_tnc,
  mkldnn_ldsnc, mkldnn_ldigo, mkldnn_ldgoi, mkldnn_ldgo,
  mkldnn_nCw4c, mkldnn_nCw8c, mkldnn_nCw16c, mkldnn_nChw4c,
  mkldnn_nChw8c, mkldnn_nChw16c, mkldnn_nCdhw4c, mkldnn_nCdhw8c,
  mkldnn_nCdhw16c, mkldnn_Owi4o, mkldnn_OIw4i4o, mkldnn_Owi8o,
  mkldnn_OIw8i8o, mkldnn_OIw8o8i, mkldnn_OIw16i16o, mkldnn_OIw16o16i,
  mkldnn_Oiw4o, mkldnn_Oiw16o, mkldnn_Owi16o, mkldnn_OIw8i16o2i,
  mkldnn_OIw8o16i2o, mkldnn_IOw8o16i2o, mkldnn_IOw16o16i, mkldnn_OIw4i16o4i,
  mkldnn_OIw4i16o4i_s8s8, mkldnn_hwio_s8s8, mkldnn_oIhw8i, mkldnn_oIhw16i,
  mkldnn_OIhw4i4o, mkldnn_OIhw8i8o, mkldnn_OIhw16i16o, mkldnn_OIhw4i16o4i,
  mkldnn_OIhw4i16o4i_s8s8, mkldnn_OIhw8i16o2i, mkldnn_IOhw8i16o2i, mkldnn_OIhw8o16i2o,
  mkldnn_IOhw8o16i2o, mkldnn_OIhw8o8i, mkldnn_OIhw16o16i, mkldnn_IOhw16o16i,
  mkldnn_Oihw8o, mkldnn_Oihw4o, mkldnn_Oihw16o, mkldnn_Ohwi8o,
  mkldnn_Ohwi4o, mkldnn_Ohwi16o, mkldnn_OhIw16o4i, mkldnn_oIdhw8i,
  mkldnn_oIdhw16i, mkldnn_OIdhw4i4o, mkldnn_Odhwi4o, mkldnn_OIdhw8i8o,
  mkldnn_OIdhw8o8i, mkldnn_Odhwi8o, mkldnn_OIdhw16i16o, mkldnn_OIdhw16o16i,
  mkldnn_Oidhw4o, mkldnn_Oidhw16o, mkldnn_Odhwi16o, mkldnn_OIdhw8i16o2i,
  mkldnn_OIdhw8o16i2o, mkldnn_IOdhw8o16i2o, mkldnn_gOwi4o, mkldnn_gOIw4i4o,
  mkldnn_gOwi8o, mkldnn_gOIw8o8i, mkldnn_gOIw8i8o, mkldnn_gOIw16i16o,
  mkldnn_gOIw16o16i, mkldnn_gOiw4o, mkldnn_gOiw16o, mkldnn_gOwi16o,
  mkldnn_gOIw8i16o2i, mkldnn_gOIw8o16i2o, mkldnn_gIOw8o16i2o, mkldnn_gIOw16o16i,
  mkldnn_gOIw4i16o4i, mkldnn_gOIw4i16o4i_s8s8, mkldnn_Goiw16g, mkldnn_Goiw16g_s8s8,
  mkldnn_hwigo_s8s8, mkldnn_gOIhw4i4o, mkldnn_gOIhw8i8o, mkldnn_gOIhw16i16o,
  mkldnn_gOIhw4i16o4i, mkldnn_gOIhw4i16o4i_s8s8, mkldnn_gOIhw2i8o4i, mkldnn_gOIhw2i8o4i_s8s8,
  mkldnn_gOIhw8i16o2i, mkldnn_gIOhw8i16o2i, mkldnn_gOIhw8o16i2o, mkldnn_gIOhw8o16i2o,
  mkldnn_gOIhw4o4i, mkldnn_gOIhw4o4i_s8s8, mkldnn_gOIhw8o8i, mkldnn_gOIhw16o16i,
  mkldnn_gIOhw16o16i, mkldnn_gOihw8o, mkldnn_gOihw4o, mkldnn_gOihw16o,
  mkldnn_gOhwi8o, mkldnn_gOhwi4o, mkldnn_gOhwi16o, mkldnn_Goihw8g,
  mkldnn_Goihw16g, mkldnn_Goihw16g_s8s8, mkldnn_gOhIw16o4i, mkldnn_gOIdhw4i4o,
  mkldnn_gOdhwi4o, mkldnn_gOIdhw8i8o, mkldnn_gOIdhw8o8i, mkldnn_gOdhwi8o,
  mkldnn_gOIdhw8i16o2i, mkldnn_gOIdhw8o16i2o, mkldnn_gIOdhw8o16i2o, mkldnn_gOIdhw16i16o,
  mkldnn_gOIdhw16o16i, mkldnn_gOidhw4o, mkldnn_gOidhw16o, mkldnn_gOdhwi16o,
  mkldnn_wino_fmt, mkldnn_rnn_packed, mkldnn_format_last
}
 Memory format specification. More...
 
enum  mkldnn_padding_kind_t { mkldnn_padding_zero }
 Kinds of padding. More...
 
enum  mkldnn_prop_kind_t {
  mkldnn_prop_kind_undef = 0, mkldnn_forward_training = 64, mkldnn_forward_inference = 96, mkldnn_forward_scoring = mkldnn_forward_inference,
  mkldnn_forward = mkldnn_forward_training, mkldnn_backward = 128, mkldnn_backward_data = 160, mkldnn_backward_weights = 192,
  mkldnn_backward_bias = 193
}
 Kinds of propagation. More...
 
enum  mkldnn_primitive_kind_t {
  mkldnn_undefined_primitive, mkldnn_memory, mkldnn_view, mkldnn_reorder,
  mkldnn_shuffle, mkldnn_concat, mkldnn_concat_inplace, mkldnn_sum,
  mkldnn_convolution, mkldnn_deconvolution, mkldnn_eltwise, mkldnn_softmax,
  mkldnn_pooling, mkldnn_lrn, mkldnn_batch_normalization, mkldnn_inner_product,
  mkldnn_rnn
}
 Kinds of primitives. More...
 
enum  mkldnn_alg_kind_t {
  mkldnn_alg_kind_undef, mkldnn_convolution_direct = 0x1, mkldnn_convolution_winograd = 0x2, mkldnn_convolution_auto = 0x3,
  mkldnn_deconvolution_direct = 0xa, mkldnn_deconvolution_winograd = 0xb, mkldnn_eltwise_relu = 0x1f, mkldnn_eltwise_tanh = 0x2f,
  mkldnn_eltwise_elu = 0x3f, mkldnn_eltwise_square = 0x4f, mkldnn_eltwise_abs = 0x5f, mkldnn_eltwise_sqrt = 0x6f,
  mkldnn_eltwise_linear = 0x7f, mkldnn_eltwise_bounded_relu = 0x8f, mkldnn_eltwise_soft_relu = 0x9f, mkldnn_eltwise_logistic = 0xaf,
  mkldnn_eltwise_exp = 0xbf, mkldnn_eltwise_gelu = 0xcf, mkldnn_pooling_max = 0x1ff, mkldnn_pooling_avg_include_padding = 0x2ff,
  mkldnn_pooling_avg_exclude_padding = 0x3ff, mkldnn_pooling_avg = mkldnn_pooling_avg_exclude_padding, mkldnn_lrn_across_channels = 0xaff, mkldnn_lrn_within_channel = 0xbff,
  mkldnn_vanilla_rnn = 0x1fff, mkldnn_vanilla_lstm = 0x2fff, mkldnn_vanilla_gru = 0x3fff, mkldnn_gru_linear_before_reset = 0x4fff
}
 Kinds of algorithms. More...
 
enum  mkldnn_batch_normalization_flag_t { mkldnn_use_global_stats = 0x1U, mkldnn_use_scaleshift = 0x2U, mkldnn_fuse_bn_relu = 0x4U }
 Flags for batch-normalization primititve. More...
 

Detailed Description

Enumeration Type Documentation

◆ mkldnn_status_t

Status values returned by Intel(R) MKL-DNN functions.

Enumerator
mkldnn_success 

The operation was successful.

mkldnn_out_of_memory 

The operation failed due to an out-of-memory condition.

mkldnn_try_again 

The operation failed and should be retried.

mkldnn_invalid_arguments 

The operation failed because of incorrect function arguments.

mkldnn_not_ready 

The operation failed because a primitive was not ready for execution.

mkldnn_unimplemented 

The operation failed because requested functionality is not implemented.

mkldnn_iterator_ends 

Primitive iterator passed over last primitive descriptor.

mkldnn_runtime_error 

Primitive or engine failed on execution.

mkldnn_not_required 

Queried element is not required for given primitive.

◆ mkldnn_data_type_t

Data type specification.

Enumerator
mkldnn_data_type_undef 

Undefined data type, used for empty memory descriptors.

mkldnn_f32 

32-bit/single-precision floating point.

mkldnn_s32 

32-bit signed integer.

mkldnn_s16 

16-bit signed integer.

mkldnn_s8 

8-bit signed integer.

mkldnn_u8 

8-bit unsigned integer.

mkldnn_bf16 

bfloat 16-bit.

◆ mkldnn_round_mode_t

Rounding mode.

Enumerator
mkldnn_round_nearest 

Round nearest.

mkldnn_round_down 

Round down.

◆ mkldnn_memory_format_t

Memory format specification.

Intel MKL-DNN formats describe physical data layout. The physical layout is described as a sequence of the dimensions as they are laid out in the memory (from the outer-most to the inner-most). Note that this order doesn't affect the logical order of the dimensions that is kept in the dims field of the mkldnn_memory_desc_t structure. The logical order of the dimensions is specified by the type of tensor.

For example, CNN 5D tensor always has its logical dimensions in the order (batch, channels, depth, height, width), while the physical layout might be mkldnn_ncdhw or mkldnn_ndhwc:

int batch = 2, channels = 16, depth = 13, height = 13, width = 13;
int ndims = 5; // 5D tensor
mkldnn_dims_t dims = {batch, channels, depth, height, width};
mkldnn_memory_desc_t data_in_ncdhw;
mkldnn_memory_desc_init(&data_in_ncdhw, 5, dims, mlkdnn_ncdhw);
// note that in both cases dims passed are the same
mkldnn_memory_desc_t data_in_ndhwc;
mkldnn_memory_desc_init(&data_in_ndhwc, 5, dims, mlkdnn_ndhwc);

The following notation applies to memory format names:

  • 'n' denotes the mini-batch dimension
  • 'c' denotes a channels dimension
  • When there are multiple channel dimensions (for example, in convolution weights tensor), 'i' and 'o' denote dimensions of input and output channels
  • 'd', 'h', and 'w' denote spatial depth, height, and width respectively
  • Upper-case letters indicate that the data is laid out in blocks for a particular dimension. In such cases, the format name contains both upper- and lower-case letters for that dimension with a lower-case letter preceded by the block size. For example: 'mkldnn_nChw8c' describes a format where the outermost dimension is mini-batch, followed by the channel block number, followed by the spatial height and width, and finally followed by 8-element channel blocks.
Note
Channel designations can be different. For example, both the 'mkldnn_nc' and 'mkldnn_io' formats can be used to describe a 2D tensor.
See also
Understanding Memory Formats
Enumerator
mkldnn_format_undef 

Undefined memory format, used for empty memory descriptors.

mkldnn_any 

Unspecified format.

The primitive selects a format automatically.

mkldnn_blocked 

A tensor in a generic format described by the stride and blocking values in each dimension.

See mkldnn_blocking_desc_t for more information.

mkldnn_x 

1D data tensor.

mkldnn_nc 

2D data tensor.

mkldnn_ncw 

3D data tensor with the physical layout ncw.

Logical dimensions come in the order: (n, c, w)

mkldnn_nwc 

3D data tensor with the physical layout nwc.

Logical dimensions come in the order: (n, c, w)

mkldnn_nchw 

4D data tensor with the physical layout nchw, used in Caffe.

Logical dimensions come in the order: (n, c, h, w)

mkldnn_nhwc 

4D data tensor with the physical layout nhwc, used in TensorFlow.

Logical dimensions come in the order: (n, c, h, w)

mkldnn_chwn 

4D data tensor with the physical layout chwn, used in Neon.

Logical dimensions come in the order: (n, c, h, w)

mkldnn_ncdhw 

5D data tensor with the physical layout ncdhw.

Logical dimensions come in the order: (n, c, d, h, w)

mkldnn_ndhwc 

5D data tensor with the physical layout ndhwc, used in TensorFlow.

Logical dimensions come in the order: (n, c, d, h, w)

mkldnn_oi 

2D weights tensor with physical layout oi.

Logical dimensions come in the order: (o, i)

mkldnn_io 

2D weights tensor with physical layout io.

Logical dimensions come in the order: (o, i)

mkldnn_oiw 

3D weights tensor with physical layout oiw.

Logical dimensions come in the order: (o, i, w)

mkldnn_owi 

3D weights tensor with physical layout owi.

Logical dimensions come in the order: (o, i, w)

mkldnn_wio 

3D weights tensor with physical layout wio.

Logical dimensions come in the order: (o, i, w)

mkldnn_oihw 

4D weights tensor with physical layout oihw, used in Caffe.

Logical dimensions come in the order: (o, i, h, w)

mkldnn_hwio 

4D weights tensor with physical layout hwio, used in TensorFlow.

Logical dimensions come in the order: (o, i, h, w)

mkldnn_ohwi 

4D weights tensor with physical layout ohwi.

Logical dimensions come in the order: (o, i, h, w)

mkldnn_ihwo 

4D weights tensor with physical layout ihwo.

Logical dimensions come in the order: (o, i, h, w)

mkldnn_iohw 

4D weights tensor with physical layout iohw.

Logical dimensions come in the order: (o, i, h, w)

mkldnn_oidhw 

5D weights tensor with physical layout iodhw, used in Caffe.

Logical dimensions come in the order: (o, i, d, h, w)

mkldnn_dhwio 

5D weights tensor with physical layout dhwio, used in TensorFlow.

Logical dimensions come in the order: (o, i, d, h, w)

mkldnn_odhwi 

5D weights tensor with physical layout odhwi.

Logical dimensions come in the order: (o, i, d, h, w)

mkldnn_goiw 

4D grouped weights tensor with the physical layout goiw.

Logical dimensions come in the order: (g, o, i, w)

mkldnn_goihw 

5D grouped weights tensor with the physical layout goihw, used in Caffe.

Logical dimensions come in the order: (g, o, i, h, w)

mkldnn_hwigo 

5D grouped weights tensor with the physical layout hwigo, used in TensorFlow.

Logical dimensions come in the order: (g, o, i, h, w)

mkldnn_giohw 

5D grouped weights tensor with the physical layout giohw.

Logical dimensions come in the order: (g, o, i, h, w)

mkldnn_goidhw 

6D grouped weights tensor with the physical layout goidhw, used in Caffe.

Logical dimensions come in the order: (g, o, i, d, h, w)

mkldnn_ntc 

3D RNN data tensor in the format (batch, seq_length, input channels).

mkldnn_tnc 

3D RNN data tensor in the format (seq_length, batch, input channels).

mkldnn_ldsnc 

5D RNN states tensor in the format (num_layers, num_directions, num_states, batch, state channels).

mkldnn_ldigo 

5D RNN weights tensor in the format (num_layers, num_directions, input_channels, num_gates, output_channels).

  • For LSTM cells, the gates order is input, forget, candidate and output gate.
  • For GRU cells, the gates order is update, reset and output gate.
mkldnn_ldgoi 

5D RNN weights tensor in the format (num_layers, num_directions, num_gates, output_channels, input_channels).

  • For LSTM cells, the gates order is input, forget, candidate and output gate.
  • For GRU cells, the gates order is update, reset and output gate.
mkldnn_ldgo 

4D RNN bias tensor in the format (num_layers, num_directions, num_gates, output_channels).

  • For LSTM cells, the gates order is input, forget, candidate and output gate.
  • For GRU cells, the gates order is update, reset and output gate.
mkldnn_nCw4c 

blocked data format

mkldnn_nCw8c 

blocked data format

mkldnn_nCw16c 

blocked data format

mkldnn_nChw4c 

blocked data format

mkldnn_nChw8c 

blocked data format

mkldnn_nChw16c 

blocked data format

mkldnn_nCdhw4c 

blocked data format

mkldnn_nCdhw8c 

blocked data format

mkldnn_nCdhw16c 

blocked data format

mkldnn_Owi4o 

blocked weights format

mkldnn_OIw4i4o 

blocked weights format

mkldnn_Owi8o 

blocked weights format

mkldnn_OIw8i8o 

blocked weights format

mkldnn_OIw8o8i 

blocked weights format

mkldnn_OIw16i16o 

blocked weights format

mkldnn_OIw16o16i 

blocked weights format

mkldnn_Oiw4o 

blocked weights format

mkldnn_Oiw16o 

blocked weights format

mkldnn_Owi16o 

blocked weights format

mkldnn_OIw8i16o2i 

blocked weights format

mkldnn_OIw8o16i2o 

blocked weights format

mkldnn_IOw8o16i2o 

blocked weights format

mkldnn_IOw16o16i 

blocked weights format

mkldnn_OIw4i16o4i 

blocked weights format

mkldnn_OIw4i16o4i_s8s8 

blocked weights format with additional buffer with size equal to the number of output channels and containing the values: O[i:0,OC] = -128 * SUM(j:0,IC;w:0,W)(weights(i,j,w))

mkldnn_hwio_s8s8 

weights format with additional buffer size equal to the number of output channels and containing the values: O[i:0,OC] = -128 * SUM(j:0,IC;h:0,H;w:0,W)(weights(i,j,h,w))

mkldnn_oIhw8i 

blocked weights format

mkldnn_oIhw16i 

blocked weights format

mkldnn_OIhw4i4o 

blocked weights format

mkldnn_OIhw8i8o 

blocked weights format

mkldnn_OIhw16i16o 

blocked weights format

mkldnn_OIhw4i16o4i 

blocked weights format

mkldnn_OIhw4i16o4i_s8s8 

blocked weights format with additional buffer with size equal to the number of output channels and containing the values: O[i:0,OC] = -128 * SUM(j:0,IC;h:0,H;w:0,W)(weights(i,j,h,w))

mkldnn_OIhw8i16o2i 

blocked weights format

mkldnn_IOhw8i16o2i 

blocked weights format

mkldnn_OIhw8o16i2o 

blocked weights format

mkldnn_IOhw8o16i2o 

blocked weights format

mkldnn_OIhw8o8i 

blocked weights format

mkldnn_OIhw16o16i 

blocked weights format

mkldnn_IOhw16o16i 

blocked weights format

mkldnn_Oihw8o 

blocked weights format

mkldnn_Oihw4o 

blocked weights format

mkldnn_Oihw16o 

blocked weights format

mkldnn_Ohwi8o 

blocked weights format

mkldnn_Ohwi4o 

blocked weights format

mkldnn_Ohwi16o 

blocked weights format

mkldnn_OhIw16o4i 

blocked weights format

mkldnn_oIdhw8i 

blocked weights format

mkldnn_oIdhw16i 

blocked weights format

mkldnn_OIdhw4i4o 

blocked weights format

mkldnn_Odhwi4o 

blocked weights format

mkldnn_OIdhw8i8o 

blocked weights format

mkldnn_OIdhw8o8i 

blocked weights format

mkldnn_Odhwi8o 

blocked weights format

mkldnn_OIdhw16i16o 

blocked weights format

mkldnn_OIdhw16o16i 

blocked weights format

mkldnn_Oidhw4o 

blocked weights format

mkldnn_Oidhw16o 

blocked weights format

mkldnn_Odhwi16o 

blocked weights format

mkldnn_OIdhw8i16o2i 

blocked weights format

mkldnn_OIdhw8o16i2o 

blocked weights format

mkldnn_IOdhw8o16i2o 

blocked weights format

mkldnn_gOwi4o 

blocked weights format

mkldnn_gOIw4i4o 

blocked weights format

mkldnn_gOwi8o 

blocked weights format

mkldnn_gOIw8o8i 

blocked weights format

mkldnn_gOIw8i8o 

blocked weights format

mkldnn_gOIw16i16o 

blocked weights format

mkldnn_gOIw16o16i 

blocked weights format

mkldnn_gOiw4o 

blocked weights format

mkldnn_gOiw16o 

blocked weights format

mkldnn_gOwi16o 

blocked weights format

mkldnn_gOIw8i16o2i 

blocked weights format

mkldnn_gOIw8o16i2o 

blocked weights format

mkldnn_gIOw8o16i2o 

blocked weights format

mkldnn_gIOw16o16i 

blocked weights format

mkldnn_gOIw4i16o4i 

blocked weights format

mkldnn_gOIw4i16o4i_s8s8 

blocked weights format with additional buffer with size equal to the number of output channels multiplied by number of groups and containing the values: O[i:0,G*OC] = -128 * SUM(j:0,IC;w:0,W)(weights(i,j,w))

mkldnn_Goiw16g 

blocked weights format

mkldnn_Goiw16g_s8s8 

blocked weights format with additional buffer with size equal to the number of groups and containing the values: O[i:0,G] = -128 * SUM(w:0,W)(weights(i,i,w))

mkldnn_hwigo_s8s8 

weights format with additional buffer size equal to the number of output channels multiplied by number of groups and containing the values: O[i:0,G*OC] = -128 * SUM(j:0,IC;h:0,H;w:0,W)(weights(i,j,h,w))

mkldnn_gOIhw4i4o 

blocked weights format

mkldnn_gOIhw8i8o 

blocked weights format

mkldnn_gOIhw16i16o 

blocked weights format

mkldnn_gOIhw4i16o4i 

blocked weights format

mkldnn_gOIhw4i16o4i_s8s8 

blocked weights format with additional buffer with size equal to the number of output channels multiplied by number of groups and containing the values: O[i:0,G*OC] = -128 * SUM(j:0,IC;h:0,H;w:0,W)(weights(i,j,h,w))

mkldnn_gOIhw2i8o4i 

blocked weights format

mkldnn_gOIhw2i8o4i_s8s8 

blocked weights format with additional buffer with size equal to the number of output channels multiplied by number of groups and containing the values: O[i:0,G*OC] = -128 * SUM(j:0,IC;h:0,H;w:0,W)(weights(i,j,h,w))

mkldnn_gOIhw8i16o2i 

blocked weights format

mkldnn_gIOhw8i16o2i 

blocked weights format

mkldnn_gOIhw8o16i2o 

blocked weights format

mkldnn_gIOhw8o16i2o 

blocked weights format

mkldnn_gOIhw4o4i 

blocked weights format

mkldnn_gOIhw4o4i_s8s8 

blocked weights format with additional buffer with size equal to the number of output channels and containing the values: O[i:0,OC] = -128 * SUM(j:0,IC;h:0,H;w:0,W)(weights(i,j,h,w))

blocked weights format

mkldnn_gOIhw8o8i 

blocked weights format

mkldnn_gOIhw16o16i 

blocked weights format

mkldnn_gIOhw16o16i 

blocked weights format

mkldnn_gOihw8o 

blocked weights format

mkldnn_gOihw4o 

blocked weights format

mkldnn_gOihw16o 

blocked weights format

mkldnn_gOhwi8o 

blocked weights format

mkldnn_gOhwi4o 

blocked weights format

mkldnn_gOhwi16o 

blocked weights format

mkldnn_Goihw8g 

blocked weights format

mkldnn_Goihw16g 

blocked weights format

mkldnn_Goihw16g_s8s8 

blocked weights format with additional buffer with size equal to the number of groups and containing the values: O[i:0,G] = -128 * SUM(h:0,H;w:0,W)(weights(i,i,h,w))

mkldnn_gOhIw16o4i 

blocked weights format

mkldnn_gOIdhw4i4o 

blocked weights format

mkldnn_gOdhwi4o 

blocked weights format

mkldnn_gOIdhw8i8o 

blocked weights format

mkldnn_gOIdhw8o8i 

blocked weights format

mkldnn_gOdhwi8o 

blocked weights format

mkldnn_gOIdhw8i16o2i 

blocked weights format

mkldnn_gOIdhw8o16i2o 

blocked weights format

mkldnn_gIOdhw8o16i2o 

blocked weights format

mkldnn_gOIdhw16i16o 

blocked weights format

mkldnn_gOIdhw16o16i 

blocked weights format

mkldnn_gOidhw4o 

blocked weights format

mkldnn_gOidhw16o 

blocked weights format

mkldnn_gOdhwi16o 

blocked weights format

mkldnn_wino_fmt 

Weights format used in 8bit Winograd convolution.

mkldnn_rnn_packed 

Packed weights format used in RNN.

mkldnn_format_last 

Just a sentinel, not real memory format.

Must be changed after new format is added.

◆ mkldnn_padding_kind_t

Kinds of padding.

Define how to interpret the data in padding regions.

Enumerator
mkldnn_padding_zero 

The data in padding regions is zero.

◆ mkldnn_prop_kind_t

Kinds of propagation.

Enumerator
mkldnn_prop_kind_undef 

Undefined propagation type.

mkldnn_forward_training 

Forward data propagation (training mode).

In this mode primitives perform computations necessary for subsequent backward propagation.

mkldnn_forward_inference 

Forward data propagation (inference mode).

In this mode primitives perform only computations that are necessary for inference and omit computations that are necessary only for backward propagation.

mkldnn_forward_scoring 

Forward data propagation (alias for mkldnn_forward_inference)

mkldnn_forward 

Forward data propagation (alias for mkldnn_forward_training)

mkldnn_backward 

Backward propagation (with respect to all parameters.

mkldnn_backward_data 

Backward data propagation.

mkldnn_backward_weights 

Backward weights propagation.

mkldnn_backward_bias 

Backward bias propagation.

◆ mkldnn_primitive_kind_t

Kinds of primitives.

Used to implement a way to extend the library with new primitives without changing the ABI.

Enumerator
mkldnn_undefined_primitive 

Undefined primitive (XXX: why do we have it?).

mkldnn_memory 

A memory primitive.

mkldnn_view 

A view primitive.

mkldnn_reorder 

A reorder primitive.

mkldnn_shuffle 

A shuffle primitive.

mkldnn_concat 

A (out-of-place) concat primitive.

mkldnn_concat_inplace 

A (in-place) concat primitive.

mkldnn_sum 

A sum primitive.

mkldnn_convolution 

A convolution primitive.

mkldnn_deconvolution 

A deconvolution primitive.

mkldnn_eltwise 

An element-wise primitive.

mkldnn_softmax 

A Softmax primitive.

mkldnn_pooling 

A pooling primitive.

mkldnn_lrn 

An LRN primitive.

mkldnn_batch_normalization 

An batch normalization primitive.

mkldnn_inner_product 

An inner product primitive.

mkldnn_rnn 

A rnn primitive.

◆ mkldnn_alg_kind_t

Kinds of algorithms.

Enumerator
mkldnn_alg_kind_undef 
mkldnn_convolution_direct 

Direct convolution.

mkldnn_convolution_winograd 

Winograd convolution.

mkldnn_convolution_auto 

Convolution algorithm(either direct or Winograd) is chosen just in time.

mkldnn_deconvolution_direct 

Direct deconvolution.

mkldnn_deconvolution_winograd 

Winograd deconvolution.

mkldnn_eltwise_relu 

Eltwise: ReLU.

mkldnn_eltwise_tanh 

Eltwise: hyperbolic tangent non-linearity (tanh)

mkldnn_eltwise_elu 

Eltwise: parametric exponential linear unit (elu)

mkldnn_eltwise_square 

Eltwise: square.

mkldnn_eltwise_abs 

Eltwise: abs.

mkldnn_eltwise_sqrt 

Eltwise: square root.

mkldnn_eltwise_linear 

Eltwise: linear.

mkldnn_eltwise_bounded_relu 

Eltwise: bounded_relu.

mkldnn_eltwise_soft_relu 

Eltwise: soft_relu.

mkldnn_eltwise_logistic 

Eltwise: logistic.

mkldnn_eltwise_exp 

Eltwise: exponent.

mkldnn_eltwise_gelu 

Eltwise: gelu.

mkldnn_pooling_max 

Max pooling.

mkldnn_pooling_avg_include_padding 

Average pooling include padding.

mkldnn_pooling_avg_exclude_padding 

Average pooling exclude padding.

mkldnn_pooling_avg 
mkldnn_lrn_across_channels 

Local response normalization (LRN) across multiple channels.

mkldnn_lrn_within_channel 

LRN within a single channel.

mkldnn_vanilla_rnn 

RNN cell.

mkldnn_vanilla_lstm 

LSTM cell.

mkldnn_vanilla_gru 

GRU cell.

mkldnn_gru_linear_before_reset 

GRU cell with linear before reset.

Modification of original GRU cell. Differs from mkldnn_vanilla_gru in how the new memory gate is calculated:

\[ c_t = tanh(W_c*x_t + b_{c_x} + r_t*(U_c*h_{t-1}+b_{c_h})) \]

Primitive expects 4 biases on input: $[b_{u}, b_{r}, b_{c_x}, b_{c_h}]$

◆ mkldnn_batch_normalization_flag_t

Flags for batch-normalization primititve.

Enumerator
mkldnn_use_global_stats 

Use global statistics.

If specified

  • on forward propagation use mean and variance provided by user (input)
  • on backward propagation reduces the amount of computations, since mean and variance are considered as constants

If not specified:

  • on forward propagation mean and variance are computed and stored in output
  • on backward propagation compute full derivative wrt to data
mkldnn_use_scaleshift 

Use scale and shift parameters.

If specified:

  • on forward propagation use scale and shift (aka scale and bias) for the batch normalization results
  • on backward propagation (for prop_kind == mkldnn_backward) compute diff wrt to scale and shift (hence one extra output used)

If no specified:

mkldnn_fuse_bn_relu 

Fuse with ReLU.

If specified:

  • on inference this option behaves the same as if the primitive were fused with ReLU via post ops API
  • on training primitive requires workspace (required to be able to perform backward pass)