@Namespace(value="dnnl") @Properties(inherit=dnnl.class) public class post_ops extends dnnl_post_ops_handle
Post-ops. Post-ops are computations executed after the main primitive computations and are attached to the primitive via primitive attributes.
dev_guide_attributes_post_opsPointer.CustomDeallocator, Pointer.Deallocator, Pointer.NativeDeallocator, Pointer.ReferenceCounter| Constructor and Description |
|---|
post_ops() |
post_ops(dnnl_post_ops t) |
post_ops(dnnl_post_ops t,
boolean weak) |
post_ops(long size)
Native array allocator.
|
post_ops(Pointer p)
Pointer cast constructor.
|
post_ops(post_ops arg0) |
| Modifier and Type | Method and Description |
|---|---|
void |
append_binary(dnnl.algorithm aalgorithm,
memory.desc src1_desc)
Appends a binary post-op.
|
void |
append_binary(int aalgorithm,
memory.desc src1_desc) |
void |
append_dw_k3s1p1(memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
int mask,
float[] scales) |
void |
append_dw_k3s1p1(memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
int mask,
FloatBuffer scales) |
void |
append_dw_k3s1p1(memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
int mask,
FloatPointer scales)
Appends a depthwise post-op convolution with stride 1.
|
void |
append_dw_k3s2p1(memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
int mask,
float[] scales) |
void |
append_dw_k3s2p1(memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
int mask,
FloatBuffer scales) |
void |
append_dw_k3s2p1(memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
int mask,
FloatPointer scales)
Appends a depthwise post-op convolution with stride 2.
|
void |
append_dw(memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
long kernel_size,
long stride_size,
long padding_l_size,
int mask,
float[] scales) |
void |
append_dw(memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
long kernel_size,
long stride_size,
long padding_l_size,
int mask,
FloatBuffer scales) |
void |
append_dw(memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
long kernel_size,
long stride_size,
long padding_l_size,
int mask,
FloatPointer scales)
Appends a depthwise post-op convolution.
|
void |
append_eltwise(float scale,
dnnl.algorithm aalgorithm,
float alpha,
float beta)
Appends an elementwise post-op.
|
void |
append_eltwise(float scale,
int aalgorithm,
float alpha,
float beta) |
void |
append_prelu(int mask)
Appends a prelu forward post-op.
|
void |
append_sum() |
void |
append_sum(float scale,
int zero_point) |
void |
append_sum(float scale,
int zero_point,
memory.data_type data_type)
Appends an accumulation (sum) post-op.
|
void |
append_sum(float scale,
memory.data_type data_type)
Appends an accumulation (sum) post-op.
|
void |
get_params_binary(int index,
int[] aalgorithm,
memory.desc src1_desc) |
void |
get_params_binary(int index,
IntBuffer aalgorithm,
memory.desc src1_desc) |
void |
get_params_binary(int index,
IntPointer aalgorithm,
memory.desc src1_desc)
Returns the parameters of a binary post-op.
|
void |
get_params_dw_k3s1p1(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
int[] mask,
float[] scales) |
void |
get_params_dw_k3s1p1(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
IntBuffer mask,
FloatBuffer scales) |
void |
get_params_dw_k3s1p1(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
IntPointer mask,
FloatPointer scales)
Returns the parameters of an depthwise post-op with stride 1.
|
void |
get_params_dw_k3s2p1(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
int[] mask,
float[] scales) |
void |
get_params_dw_k3s2p1(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
IntBuffer mask,
FloatBuffer scales) |
void |
get_params_dw_k3s2p1(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
IntPointer mask,
FloatPointer scales)
Returns the parameters of an depthwise post-op with stride 2.
|
void |
get_params_dw(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
long[] kernel_size,
long[] stride_size,
long[] padding_l_size,
int[] mask,
float[] scales) |
void |
get_params_dw(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
LongBuffer kernel_size,
LongBuffer stride_size,
LongBuffer padding_l_size,
IntBuffer mask,
FloatBuffer scales) |
void |
get_params_dw(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
LongPointer kernel_size,
LongPointer stride_size,
LongPointer padding_l_size,
IntPointer mask,
FloatPointer scales)
Returns the parameters of an depthwise post-op.
|
void |
get_params_eltwise(int index,
float[] scale,
int[] aalgorithm,
float[] alpha,
float[] beta) |
void |
get_params_eltwise(int index,
FloatBuffer scale,
IntBuffer aalgorithm,
FloatBuffer alpha,
FloatBuffer beta) |
void |
get_params_eltwise(int index,
FloatPointer scale,
IntPointer aalgorithm,
FloatPointer alpha,
FloatPointer beta)
Returns parameters of an elementwise post-op.
|
void |
get_params_prelu(int index,
int[] mask) |
void |
get_params_prelu(int index,
IntBuffer mask) |
void |
get_params_prelu(int index,
IntPointer mask)
Returns the parameters of a prelu post-op.
|
void |
get_params_sum(int index,
float[] scale) |
void |
get_params_sum(int index,
float[] scale,
int[] zero_point,
memory.data_type data_type) |
void |
get_params_sum(int index,
float[] scale,
memory.data_type data_type) |
void |
get_params_sum(int index,
FloatBuffer scale) |
void |
get_params_sum(int index,
FloatBuffer scale,
IntBuffer zero_point,
memory.data_type data_type) |
void |
get_params_sum(int index,
FloatBuffer scale,
memory.data_type data_type) |
void |
get_params_sum(int index,
FloatPointer scale)
Returns the parameters of an accumulation (sum) post-op.
|
void |
get_params_sum(int index,
FloatPointer scale,
IntPointer zero_point,
memory.data_type data_type)
Returns the parameters of an accumulation (sum) post-op.
|
void |
get_params_sum(int index,
FloatPointer scale,
memory.data_type data_type)
Returns the parameters of an accumulation (sum) post-op.
|
post_ops |
getPointer(long i) |
primitive.kind |
kind(int index)
Returns the primitive kind of post-op at entry with a certain index.
|
int |
len()
Returns the number of post-ops entries.
|
post_ops |
position(long position) |
asBoolean, asDnnl_post_ops, equals, get, get, notEquals, put, reset, resetaddress, asBuffer, asByteBuffer, availablePhysicalBytes, calloc, capacity, capacity, close, deallocate, deallocate, deallocateReferences, deallocator, deallocator, equals, fill, formatBytes, free, getDirectBufferAddress, getPointer, getPointer, getPointer, hashCode, interruptDeallocatorThread, isNull, isNull, limit, limit, malloc, maxBytes, maxPhysicalBytes, memchr, memcmp, memcpy, memmove, memset, offsetAddress, offsetof, offsetof, parseBytes, physicalBytes, physicalBytesInaccurate, position, put, realloc, referenceCount, releaseReference, retainReference, setNull, sizeof, sizeof, toString, totalBytes, totalCount, totalPhysicalBytes, withDeallocator, zeropublic post_ops()
public post_ops(dnnl_post_ops t, @Cast(value="bool") boolean weak)
public post_ops(dnnl_post_ops t)
public post_ops(Pointer p)
Pointer(Pointer).public post_ops(long size)
Pointer.position(long).public post_ops position(long position)
position in class dnnl_post_ops_handlepublic post_ops getPointer(long i)
getPointer in class dnnl_post_ops_handlepublic int len()
public primitive.kind kind(int index)
index - Index of the post-op to return the kind for.public void append_sum(float scale,
memory.data_type data_type)
dst[:] := scale * dst[:] + op(...)
instead of dst[:] := op(...).
If \p data_type is specified, the original dst tensor will be
reinterpreted as a tensor with the provided data type. Because it is a
reinterpretation, data_type and dst data type should have the same size.
As a result, computations will be dst[:] <- scale *
as_data_type(dst[:]) + op(...) instead of dst[:] <- op(...).
\note
This post-op executes in-place and does not change the
destination layout.scale - Scaling factor.data_type - Data type.public void append_sum()
public void append_sum(float scale,
int zero_point,
memory.data_type data_type)
dst[:] := scale * (dst[:] - zero_point) +
op(...) instead of dst[:] := op(...).
If \p data_type is specified, the original dst tensor will be
reinterpreted as a tensor with the provided data type. Because it is a
reinterpretation, data_type and dst data type should have the same size.
As a result, computations will be dst[:] <- scale *
(as_data_type(dst[:]) - zero_point) + op(...) instead of
dst[:] <- op(...).
\note
This post-op executes in-place and does not change the
destination layout.scale - Scaling factor.zero_point - Zero point.data_type - Data type.public void append_sum(float scale,
int zero_point)
public void get_params_sum(int index,
@ByRef
FloatPointer scale)
index - Index of the sum post-op.scale - Scaling factor of the sum post-op.public void get_params_sum(int index,
@ByRef
FloatBuffer scale)
public void get_params_sum(int index,
@ByRef
float[] scale)
public void get_params_sum(int index,
@ByRef
FloatPointer scale,
memory.data_type data_type)
index - Index of the sum post-op.scale - Scaling factor of the sum post-op.data_type - Data type of the sum post-op.public void get_params_sum(int index,
@ByRef
FloatBuffer scale,
memory.data_type data_type)
public void get_params_sum(int index,
@ByRef
float[] scale,
memory.data_type data_type)
public void get_params_sum(int index,
@ByRef
FloatPointer scale,
@ByRef
IntPointer zero_point,
memory.data_type data_type)
index - Index of the sum post-op.scale - Scaling factor of the sum post-op.zero_point - Single scalar int32_t value of zeropoint.data_type - Data type of the sum post-op.public void get_params_sum(int index,
@ByRef
FloatBuffer scale,
@ByRef
IntBuffer zero_point,
memory.data_type data_type)
public void get_params_sum(int index,
@ByRef
float[] scale,
@ByRef
int[] zero_point,
memory.data_type data_type)
public void append_eltwise(float scale,
dnnl.algorithm aalgorithm,
float alpha,
float beta)
dst[:] := scale * eltwise_op (op(...)) instead
of dst[:] <- op(...), where eltwise_op is configured with the given
parameters.scale - Scaling factor.aalgorithm - Elementwise algorithm.alpha - Alpha parameter for the elementwise algorithm.beta - Beta parameter for the elementwise algorithm.public void append_eltwise(float scale,
@Cast(value="dnnl::algorithm")
int aalgorithm,
float alpha,
float beta)
public void get_params_eltwise(int index,
@ByRef
FloatPointer scale,
@ByRef @Cast(value="dnnl::algorithm*")
IntPointer aalgorithm,
@ByRef
FloatPointer alpha,
@ByRef
FloatPointer beta)
index - Index of the post-op.scale - Output scaling factor.aalgorithm - Output elementwise algorithm kind.alpha - Output alpha parameter for the elementwise algorithm.beta - Output beta parameter for the elementwise algorithm.public void get_params_eltwise(int index,
@ByRef
FloatBuffer scale,
@ByRef @Cast(value="dnnl::algorithm*")
IntBuffer aalgorithm,
@ByRef
FloatBuffer alpha,
@ByRef
FloatBuffer beta)
public void get_params_eltwise(int index,
@ByRef
float[] scale,
@ByRef @Cast(value="dnnl::algorithm*")
int[] aalgorithm,
@ByRef
float[] alpha,
@ByRef
float[] beta)
public void append_dw(memory.data_type weights_data_type, memory.data_type bias_data_type, memory.data_type dst_data_type, @Cast(value="dnnl::memory::dim") long kernel_size, @Cast(value="dnnl::memory::dim") long stride_size, @Cast(value="dnnl::memory::dim") long padding_l_size, int mask, @StdVector FloatPointer scales)
weights_data_type - Weights data type of depthwise post-opbias_data_type - Bias data type of depthwise post-opdst_data_type - Output data type of depthwise post-opkernel_size - Size of kernel of depthwise post-opstride_size - Size of stride of depthwise post-oppadding_l_size - Size of left and top paddings of depthwise post-opmask - Output scaling factors correspondence mask that defines the
correspondence between the output tensor dimensions and the
\p scales array. The set i-th bit indicates that a dedicated output
scaling factor is used for each index along that dimension. The mask
value of 0 implies a common scaling factor for the whole output
tensor.scales - Output pointer to a constant array of float scaling
factors.public void append_dw(memory.data_type weights_data_type, memory.data_type bias_data_type, memory.data_type dst_data_type, @Cast(value="dnnl::memory::dim") long kernel_size, @Cast(value="dnnl::memory::dim") long stride_size, @Cast(value="dnnl::memory::dim") long padding_l_size, int mask, @StdVector FloatBuffer scales)
public void append_dw(memory.data_type weights_data_type, memory.data_type bias_data_type, memory.data_type dst_data_type, @Cast(value="dnnl::memory::dim") long kernel_size, @Cast(value="dnnl::memory::dim") long stride_size, @Cast(value="dnnl::memory::dim") long padding_l_size, int mask, @StdVector float[] scales)
public void get_params_dw(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
@Cast(value="dnnl::memory::dim*") @ByRef
LongPointer kernel_size,
@Cast(value="dnnl::memory::dim*") @ByRef
LongPointer stride_size,
@Cast(value="dnnl::memory::dim*") @ByRef
LongPointer padding_l_size,
@ByRef
IntPointer mask,
@StdVector
FloatPointer scales)
index - Index of the elementwise post-op.weights_data_type - Weights data type of depthwise post-opbias_data_type - Bias data type of depthwise post-opdst_data_type - Output data type of depthwise post-opkernel_size - Size of kernel of depthwise post-opstride_size - Size of stride of depthwise post-oppadding_l_size - Size of left and top paddings of depthwise post-opmask - Output scaling factors correspondence mask that defines the
correspondence between the output tensor dimensions and the
\p scales array. The set i-th bit indicates that a dedicated output
scaling factor is used for each index along that dimension. The mask
value of 0 implies a common scaling factor for the whole output
tensor.scales - Output pointer to a constant array of float scaling
factors.public void get_params_dw(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
@Cast(value="dnnl::memory::dim*") @ByRef
LongBuffer kernel_size,
@Cast(value="dnnl::memory::dim*") @ByRef
LongBuffer stride_size,
@Cast(value="dnnl::memory::dim*") @ByRef
LongBuffer padding_l_size,
@ByRef
IntBuffer mask,
@StdVector
FloatBuffer scales)
public void get_params_dw(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
@Cast(value="dnnl::memory::dim*") @ByRef
long[] kernel_size,
@Cast(value="dnnl::memory::dim*") @ByRef
long[] stride_size,
@Cast(value="dnnl::memory::dim*") @ByRef
long[] padding_l_size,
@ByRef
int[] mask,
@StdVector
float[] scales)
public void append_dw_k3s1p1(memory.data_type weights_data_type, memory.data_type bias_data_type, memory.data_type dst_data_type, int mask, @StdVector FloatPointer scales)
weights_data_type - Weights data type of depthwise post-opbias_data_type - Bias data type of depthwise post-opdst_data_type - Output data type of depthwise post-opmask - Output scaling factors correspondence mask that defines the
correspondence between the output tensor dimensions and the
\p scales array. The set i-th bit indicates that a dedicated output
scaling factor is used for each index along that dimension. The mask
value of 0 implies a common scaling factor for the whole output
tensor.scales - Output pointer to a constant array of float scaling
factors.public void append_dw_k3s1p1(memory.data_type weights_data_type, memory.data_type bias_data_type, memory.data_type dst_data_type, int mask, @StdVector FloatBuffer scales)
public void append_dw_k3s1p1(memory.data_type weights_data_type, memory.data_type bias_data_type, memory.data_type dst_data_type, int mask, @StdVector float[] scales)
public void get_params_dw_k3s1p1(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
@ByRef
IntPointer mask,
@StdVector
FloatPointer scales)
index - Index of the elementwise post-op.weights_data_type - Weights data type of depthwise post-opbias_data_type - Bias data type of depthwise post-opdst_data_type - Output data type of depthwise post-opmask - Output scaling factors correspondence mask that defines the
correspondence between the output tensor dimensions and the
\p scales array. The set i-th bit indicates that a dedicated output
scaling factor is used for each index along that dimension. The mask
value of 0 implies a common scaling factor for the whole output
tensor.scales - Output pointer to a constant array of float scaling
factors.public void get_params_dw_k3s1p1(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
@ByRef
IntBuffer mask,
@StdVector
FloatBuffer scales)
public void get_params_dw_k3s1p1(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
@ByRef
int[] mask,
@StdVector
float[] scales)
public void append_dw_k3s2p1(memory.data_type weights_data_type, memory.data_type bias_data_type, memory.data_type dst_data_type, int mask, @StdVector FloatPointer scales)
weights_data_type - Weights data type of depthwise post-opbias_data_type - Bias data type of depthwise post-opdst_data_type - Output data type of depthwise post-opmask - Output scaling factors correspondence mask that defines the
correspondence between the output tensor dimensions and the
\p scales array. The set i-th bit indicates that a dedicated output
scaling factor is used for each index along that dimension. The mask
value of 0 implies a common scaling factor for the whole output
tensor.scales - Output pointer to a constant array of float scaling
factors.public void append_dw_k3s2p1(memory.data_type weights_data_type, memory.data_type bias_data_type, memory.data_type dst_data_type, int mask, @StdVector FloatBuffer scales)
public void append_dw_k3s2p1(memory.data_type weights_data_type, memory.data_type bias_data_type, memory.data_type dst_data_type, int mask, @StdVector float[] scales)
public void get_params_dw_k3s2p1(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
@ByRef
IntPointer mask,
@StdVector
FloatPointer scales)
index - Index of the elementwise post-op.weights_data_type - Weights data type of depthwise post-opbias_data_type - Bias data type of depthwise post-opdst_data_type - Output data type of depthwise post-opmask - Output scaling factors correspondence mask that defines the
correspondence between the output tensor dimensions and the
\p scales array. The set i-th bit indicates that a dedicated output
scaling factor is used for each index along that dimension. The mask
value of 0 implies a common scaling factor for the whole output
tensor.scales - Output pointer to a constant array of float scaling
factors.public void get_params_dw_k3s2p1(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
@ByRef
IntBuffer mask,
@StdVector
FloatBuffer scales)
public void get_params_dw_k3s2p1(int index,
memory.data_type weights_data_type,
memory.data_type bias_data_type,
memory.data_type dst_data_type,
@ByRef
int[] mask,
@StdVector
float[] scales)
public void append_binary(dnnl.algorithm aalgorithm, @Const @ByRef memory.desc src1_desc)
aalgorithm - Binary algorithm for the post-op.src1_desc - Memory descriptor of a second operand.public void append_binary(@Cast(value="dnnl::algorithm") int aalgorithm, @Const @ByRef memory.desc src1_desc)
public void get_params_binary(int index,
@ByRef @Cast(value="dnnl::algorithm*")
IntPointer aalgorithm,
@ByRef
memory.desc src1_desc)
index - Index of the binary post-op.aalgorithm - Output binary algorithm kind.src1_desc - Output memory descriptor of a second operand.public void get_params_binary(int index,
@ByRef @Cast(value="dnnl::algorithm*")
IntBuffer aalgorithm,
@ByRef
memory.desc src1_desc)
public void get_params_binary(int index,
@ByRef @Cast(value="dnnl::algorithm*")
int[] aalgorithm,
@ByRef
memory.desc src1_desc)
public void append_prelu(int mask)
int mb = 32, oc = 32,
oh = 14, ow = 14; // convolution output params
// unique weights per output channel
vector<float> weights = { ... };
int oc_dim = 1; // mb_dim = 0, channel_dim = 1, height_dim = 2, ...
// construct a convolution descriptor
dnnl::convolution::desc conv_d;
dnnl::primitive_attr attr;
attr.append_prelu(1 << oc_dim);
dnnl::primitive_desc conv_pd(conv_d, attr, engine);
memory prelu_weights({{1}, dt::f32, {1}}, eng, weights.data());
std::unordered_map<int, memory> conv_args;
conv_args.insert(
{DNNL_ARG_ATTR_MULTIPLE_POST_OP(0) | DNNL_ARG_WEIGHTS, prelu_weights})
\note
The order of dimensions does not depend on how elements are laid
out in memory. For example:
- for a 2D CNN activations tensor the order is always (n, c)
- for a 4D CNN activations tensor the order is always (n, c, h, w)
- for a 5D CNN weights tensor the order is always
(g, oc, ic, kh, kw)
Prelu weights tensor is passed in runtime execution phase. Prelu
weights tensor data type is implicitly assumed as f32 using plain
layout (a, ab, acb, acdb, acdeb).mask - Defines the correspondence between the output tensor
dimensions and the prelu weights tensor. The set i-th bit indicates
that a dedicated weights value is used for each index along that
dimension. Set the mask to 0 to use a common weights value
for the whole output tensor.public void get_params_prelu(int index,
@ByRef
IntPointer mask)
index - Index of the prelu post-op.mask - Weights mask of prelu post-op.public void get_params_prelu(int index,
@ByRef
int[] mask)
Copyright © 2022. All rights reserved.