Operators#

Operator base class#

Operator is Aidge’s base class for describing a mathematical Operator. It does not make any assumption on the data coding.

class aidge_core.Operator#
__init__(*args, **kwargs)#
add_hook(self: aidge_core.aidge_core.Operator, arg0: str) None#
associate_input(self: aidge_core.aidge_core.Operator, inputIdx: int, data: aidge_core.aidge_core.Data) None#
backend(self: aidge_core.aidge_core.Operator) str#
forward(self: aidge_core.aidge_core.Operator) None#
get_hook(self: aidge_core.aidge_core.Operator, arg0: str) Aidge::Hook#
get_impl(self: aidge_core.aidge_core.Operator) aidge_core.aidge_core.OperatorImpl#
get_raw_input(self: aidge_core.aidge_core.Operator, inputIdx: int) aidge_core.aidge_core.Data#
get_raw_output(self: aidge_core.aidge_core.Operator, outputIdx: int) aidge_core.aidge_core.Data#
input_category(self: aidge_core.aidge_core.Operator, idx: int) aidge_core.aidge_core.InputCategory#

Category of a specific input (Data or Param, optional or not). Data inputs exclude inputs expecting parameters (weights or bias).

Return type:

InputCategory

nb_inputs(self: aidge_core.aidge_core.Operator) int#
nb_outputs(self: aidge_core.aidge_core.Operator) int#
set_backend(self: aidge_core.aidge_core.Operator, name: str, device: int = 0) None#
set_datatype(self: aidge_core.aidge_core.Operator, dataType: aidge_core.aidge_core.dtype) None#
set_impl(self: aidge_core.aidge_core.Operator, implementation: aidge_core.aidge_core.OperatorImpl) None#
set_input(*args, **kwargs)#

Overloaded function.

  1. set_input(self: aidge_core.aidge_core.Operator, inputIdx: int, data: aidge_core.aidge_core.Data) -> None

  2. set_input(self: aidge_core.aidge_core.Operator, inputIdx: int, data: aidge_core.aidge_core.Data) -> None

set_output(self: aidge_core.aidge_core.Operator, outputIdx: int, data: aidge_core.aidge_core.Data) None#
type(self: aidge_core.aidge_core.Operator) str#
class Operator : public std::enable_shared_from_this<Operator>#

Subclassed by Aidge::OperatorTensor

Public Functions

Operator() = delete#
inline Operator(const std::string &type, const std::vector<InputCategory> &inputsCategory, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data)#
inline Operator(const Operator &op)#
virtual ~Operator() noexcept#
virtual std::shared_ptr<Operator> clone() const = 0#
inline virtual std::shared_ptr<Attributes> attributes() const#
virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data> &data) = 0#

Set the specified input with a shallow copy.

Parameters:
  • inputIdx – Index of the input to set.

  • data – Data to copy.

virtual void resetInput(const IOIndex_t inputIdx) = 0#
virtual void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data> &data) = 0#

Set the specified input value by performing a deep copy of the given data. The pointer itself is not changed, thus keeping the current connections.

Parameters:
  • inputIdx – Index of the input to set.

  • data – Data to copy.

virtual std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const = 0#
virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data> &data) = 0#

Set the specified output value by performing a deep copy of the given data. The pointer itself is not changed, thus keeping the current connections.

Parameters:

inputIdx – Index of the input to set.

virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0#
inline std::shared_ptr<Hook> getHook(const std::string &hookName)#
inline void addHook(const std::string &hookName)#
void runHooks() const#
inline std::string backend() const noexcept#
virtual void setBackend(const std::string &name, DeviceIdx_t device = 0) = 0#
virtual void setDataType(const DataType &dataType) const = 0#
virtual void setDataFormat(const DataFormat &dataFormat) const = 0#
inline void setImpl(std::shared_ptr<OperatorImpl> impl)#

Set a new OperatorImpl to the Operator.

inline std::shared_ptr<OperatorImpl> getImpl() const noexcept#

Get the OperatorImpl of the Operator.

virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const#

Minimum amount of data from a specific input for one computation pass.

Parameters:

inputIdx – Index of the input analysed.

Returns:

Elts_t

virtual Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const#
virtual Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const#
virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const#

Total amount of consumed data from a specific input.

Parameters:

inputIdx – Index of the input analysed.

Returns:

Elts_t

virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const#

Total amount of produced data ready to be used on a specific output.

Parameters:

outputIdx – Index of the output analysed.

Returns:

Elts_t

virtual void updateConsummerProducer()#
virtual void resetConsummerProducer()#
virtual void forward()#
virtual void backward()#
inline std::string type() const noexcept#
inline OperatorType operatorType() const noexcept#
inline InputCategory inputCategory(IOIndex_t idx) const#
inline virtual bool isAtomic() const noexcept#
inline IOIndex_t nbInputs() const noexcept#
inline IOIndex_t nbOutputs() const noexcept#

Public Static Functions

static inline const std::vector<std::string> getInputsName()#
static inline const std::vector<std::string> getOutputsName()#

OperatorTensor base class#

OperatorTensor derives from the Operator base class and is the base class for any tensor-based operator.

class aidge_core.OperatorTensor#
__init__(*args, **kwargs)#
add_hook(self: aidge_core.aidge_core.Operator, arg0: str) None#
associate_input(self: aidge_core.aidge_core.Operator, inputIdx: int, data: aidge_core.aidge_core.Data) None#
backend(self: aidge_core.aidge_core.Operator) str#
dims_forwarded(self: aidge_core.aidge_core.OperatorTensor) bool#
forward(self: aidge_core.aidge_core.Operator) None#
forward_dims(self: aidge_core.aidge_core.OperatorTensor, allow_data_dependency: bool = False) bool#
get_hook(self: aidge_core.aidge_core.Operator, arg0: str) Aidge::Hook#
get_impl(self: aidge_core.aidge_core.Operator) aidge_core.aidge_core.OperatorImpl#
get_input(self: aidge_core.aidge_core.OperatorTensor, inputIdx: int) aidge_core.aidge_core.Tensor#
get_output(self: aidge_core.aidge_core.OperatorTensor, outputIdx: int) aidge_core.aidge_core.Tensor#
get_raw_input(self: aidge_core.aidge_core.Operator, inputIdx: int) aidge_core.aidge_core.Data#
get_raw_output(self: aidge_core.aidge_core.Operator, outputIdx: int) aidge_core.aidge_core.Data#
input_category(self: aidge_core.aidge_core.Operator, idx: int) aidge_core.aidge_core.InputCategory#

Category of a specific input (Data or Param, optional or not). Data inputs exclude inputs expecting parameters (weights or bias).

Return type:

InputCategory

nb_inputs(self: aidge_core.aidge_core.Operator) int#
nb_outputs(self: aidge_core.aidge_core.Operator) int#
set_backend(self: aidge_core.aidge_core.Operator, name: str, device: int = 0) None#
set_datatype(self: aidge_core.aidge_core.Operator, dataType: aidge_core.aidge_core.dtype) None#
set_impl(self: aidge_core.aidge_core.Operator, implementation: aidge_core.aidge_core.OperatorImpl) None#
set_input(self: aidge_core.aidge_core.OperatorTensor, outputIdx: int, data: aidge_core.aidge_core.Data) None#
set_output(self: aidge_core.aidge_core.OperatorTensor, outputIdx: int, data: aidge_core.aidge_core.Data) None#
type(self: aidge_core.aidge_core.Operator) str#
class OperatorTensor : public Aidge::Operator#

Subclassed by Aidge::Add_Op, Aidge::AvgPooling_Op< DIM >, Aidge::BatchNorm_Op< DIM >, Aidge::Cast_Op, Aidge::Concat_Op, Aidge::ConvDepthWise_Op< DIM >, Aidge::Conv_Op< DIM >, Aidge::Div_Op, Aidge::Erf_Op, Aidge::FC_Op, Aidge::Gather_Op, Aidge::GenericOperator_Op, Aidge::GlobalAveragePooling_Op, Aidge::Identity_Op, Aidge::LeakyReLU_Op, Aidge::Ln_Op, Aidge::MatMul_Op, Aidge::MaxPooling_Op< DIM >, Aidge::Memorize_Op, Aidge::MetaOperator_Op, Aidge::Move_Op, Aidge::Mul_Op, Aidge::Pad_Op< DIM >, Aidge::Pop_Op, Aidge::Pow_Op, Aidge::Producer_Op, Aidge::ReLU_Op, Aidge::ReduceMean_Op, Aidge::Reshape_Op, Aidge::Resize_Op, Aidge::Scaling_Op, Aidge::Shape_Op, Aidge::ShiftGELU_Op, Aidge::ShiftMax_Op, Aidge::Sigmoid_Op, Aidge::Slice_Op, Aidge::Softmax_Op, Aidge::Split_Op, Aidge::Sqrt_Op, Aidge::Sub_Op, Aidge::Tanh_Op, Aidge::Transpose_Op

Public Functions

OperatorTensor() = delete#
OperatorTensor(const std::string &type, const std::vector<InputCategory> &inputsCategory, const IOIndex_t nbOut)#
OperatorTensor(const OperatorTensor &other)#
~OperatorTensor()#
virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data> &data) override#

Set the specified input with a shallow copy.

Parameters:
  • inputIdx – Index of the input to set.

  • data – Data to copy.

virtual void resetInput(const IOIndex_t inputIdx) final override#
virtual void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data> &data) override#

Set the specified input value by performing a deep copy of the given data. The pointer itself is not changed, thus keeping the current connections.

Parameters:
  • inputIdx – Index of the input to set.

  • data – Data to copy.

const std::shared_ptr<Tensor> &getInput(const IOIndex_t inputIdx) const#
virtual std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const final override#
virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data> &data) override#

Set the specified output value by performing a deep copy of the given data. The pointer itself is not changed, thus keeping the current connections.

Parameters:

inputIdx – Index of the input to set.

virtual const std::shared_ptr<Tensor> &getOutput(const IOIndex_t outputIdx) const#
virtual std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const final override#
virtual std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t> &firstEltDims, const std::vector<DimSize_t> &outputDims, const IOIndex_t outputIdx = 0) const#

For a given output feature area, compute the associated receptive field for each data input.

Parameters:
  • firstIdx – First index of the output feature.

  • outputDims – Size of output feature.

  • outputIdx – Index of the output. Default 0.

Returns:

std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.

virtual bool forwardDims(bool allowDataDependency = false)#
virtual bool dimsForwarded() const#
virtual void setDataType(const DataType &dataType) const override#
virtual void setDataFormat(const DataFormat &dataFormat) const override#
virtual void forward() override#

Generic Operator#

A generic tensor-based operator can be used to model any kind of mathematical operator that takes a defined number of inputs, produces a defined number of outputs and can have some attributes. It is possible to provide a function that produces the output tensors size w.r.t. the inputs size. It has a default consumer-producer model (require and consume all inputs full tensors and produces output full tensors).

This is the default operator used for unsupported ONNX operators when loading an ONNX model. While it obviously cannot be executed, a generic operator has still some usefulness:

  • It allows loading any graph even with unknown operators. It is possible to identify exactly all the missing operator types and their position in the graph;

  • It can be searched and manipuled with graph matching, allowing for example to replace it with alternative operators;

  • It can be scheduled and included in the graph static scheduling;

  • 🚧 A custom implementation may be provided in the future, even in pure Python, for rapid integration and prototyping.

aidge_core.GenericOperator(type: str, nb_data: int, nb_param: int, nb_out: int, name: str = '', **kwargs) aidge_core.aidge_core.Node#

Warning

doxygenfunction: Unable to resolve function “Aidge::GenericOperator” with arguments None in doxygen xml output for project “aidge” from directory: xml/. Potential matches:

- std::shared_ptr<Node> GenericOperator(const std::string &type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut, const std::string &name = "")
- std::shared_ptr<Node> GenericOperator(const std::string &type, const std::vector<InputCategory> &inputCategory, IOIndex_t nbOut, const std::string &name = "")

Meta Operator#

A meta-operator (or composite operator) is internally built from a sub-graph.

aidge_core.meta_operator(type: str, graph: aidge_core.aidge_core.GraphView, name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::MetaOperator(const char *type, const std::shared_ptr<GraphView> &graph, const std::string &name = "")#

Building a new meta-operator is simple:

auto graph = Sequential({
    Pad<2>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
    MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, ceil_mode)
});

return MetaOperator("PaddedMaxPooling2D", graph, name);

You can use the Expand meta operators recipe to flatten the meta-operators in a graph.

Predefined operators#

Add#

%%{init: {'flowchart': { 'curve': 'monotoneY'}, 'fontFamily': 'Verdana' } }%% graph TD Op("<b>AddOp</b> "):::operator In0[data_input_0]:::text-only -->|"In[0]"| Op In1[data_input_n]:::text-only -->|"In[1]"| Op Op -->|"Out[0]"| Out0[data_output]:::text-only classDef text-only fill-opacity:0, stroke-opacity:0; classDef operator stroke-opacity:0;
aidge_core.Add(nb_inputs: int, name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Add(const IOIndex_t nbIn, const std::string &name = "")#

Average Pooling#

%%{init: {'flowchart': { 'curve': 'monotoneY'}, 'fontFamily': 'Verdana' } }%% graph TD Op("<b>AvgPoolingOp2D</b> "):::operator In0[data_input]:::text-only -->|"In[0]"| Op Op -->|"Out[0]"| Out0[data_output]:::text-only classDef text-only fill-opacity:0, stroke-opacity:0; classDef operator stroke-opacity:0;
aidge_core.AvgPooling1D(kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1]) aidge_core.aidge_core.Node#
aidge_core.AvgPooling2D(kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1]) aidge_core.aidge_core.Node#
aidge_core.AvgPooling3D(kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1, 1]) aidge_core.aidge_core.Node#
template<DimSize_t DIM>
inline std::shared_ptr<Node> Aidge::AvgPooling(DimSize_t const (&kernel_dims)[DIM], const std::string &name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1))#

BatchNorm#

%%{init: {'flowchart': { 'curve': 'monotoneY'}, 'fontFamily': 'Verdana' } }%% graph TD Op("<b>BatchNormOp2D</b> "):::operator In0[data_input]:::text-only -->|"In[0]"| Op In1[scale]:::text-only -->|"In[1]"| Op In2[shift]:::text-only -->|"In[2]"| Op In3[mean]:::text-only -->|"In[3]"| Op In4[variance]:::text-only -->|"In[4]"| Op Op -->|"Out[0]"| Out0[data_output]:::text-only classDef text-only fill-opacity:0, stroke-opacity:0; classDef operator stroke-opacity:0;
aidge_core.BatchNorm2D(nb_features: int, epsilon: float = 9.999999747378752e-06, momentum: float = 0.10000000149011612, name: str = '') aidge_core.aidge_core.Node#
template<DimSize_t DIM>
std::shared_ptr<Node> Aidge::BatchNorm(const DimSize_t nbFeatures, const float epsilon = 1.0e-5F, const float momentum = 0.1F, const std::string &name = "")#

Cast#

Not available yet !

inline std::shared_ptr<Node> Aidge::Cast(const DataType targetType, const std::string &name = "")#

Concat#

aidge_core.Concat(nb_inputs: int, axis: int, name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Concat(const IOIndex_t nbIn, const std::int32_t axis = 0, const std::string &name = "")#

Conv#

%%{init: {'flowchart': { 'curve': 'monotoneY'}, 'fontFamily': 'Verdana' } }%% graph TD Op("<b>ConvOp2D</b> "):::operator In0[data_input]:::text-only -->|"In[0]"| Op In1[weight]:::text-only -->|"In[1]"| Op In2[bias]:::text-only -->|"In[2]"| Op Op -->|"Out[0]"| Out0[data_output]:::text-only classDef text-only fill-opacity:0, stroke-opacity:0; classDef operator stroke-opacity:0;
aidge_core.Conv1D(in_channels: int, out_channels: int, kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1], dilation_dims: List[int] = [1], no_bias: bool = False) aidge_core.aidge_core.Node#
aidge_core.Conv2D(in_channels: int, out_channels: int, kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1], dilation_dims: List[int] = [1, 1], no_bias: bool = False) aidge_core.aidge_core.Node#
template<DimSize_t DIM>
inline std::shared_ptr<Node> Aidge::Conv(DimSize_t inChannels, DimSize_t outChannels, DimSize_t const (&kernelDims)[DIM], const std::string &name = "", const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t, DIM>(1), const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t, DIM>(1), bool noBias = false)#

ConvDepthWise#

%%{init: {'flowchart': { 'curve': 'monotoneY'}, 'fontFamily': 'Verdana' } }%% graph TD Op("<b>ConvDepthWiseOp2D</b> "):::operator In0[data_input]:::text-only -->|"In[0]"| Op In1[weight]:::text-only -->|"In[1]"| Op In2[bias]:::text-only -->|"In[2]"| Op Op -->|"Out[0]"| Out0[data_output]:::text-only classDef text-only fill-opacity:0, stroke-opacity:0; classDef operator stroke-opacity:0;
aidge_core.ConvDepthWise1D(nb_channenls: int, kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1], dilation_dims: List[int] = [1], no_bias: bool = False) aidge_core.aidge_core.Node#
aidge_core.ConvDepthWise2D(nb_channenls: int, kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1], dilation_dims: List[int] = [1, 1], no_bias: bool = False) aidge_core.aidge_core.Node#
template<DimSize_t DIM>
inline std::shared_ptr<Node> Aidge::ConvDepthWise(const DimSize_t nbChannels, DimSize_t const (&kernelDims)[DIM], const std::string &name = "", const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t, DIM>(1), const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t, DIM>(1), bool noBias = false)#

Div#

aidge_core.Div(name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Div(const std::string &name = "")#

Erf#

aidge_core.Erf(name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Erf(const std::string &name = "")#

FC#

%%{init: {'flowchart': { 'curve': 'monotoneY'}, 'fontFamily': 'Verdana' } }%% graph TD Op("<b>FCOp</b> "):::operator In0[data_input]:::text-only -->|"In[0]"| Op In1[weight]:::text-only -->|"In[1]"| Op In2[bias]:::text-only -->|"In[2]"| Op Op -->|"Out[0]"| Out0[data_output]:::text-only classDef text-only fill-opacity:0, stroke-opacity:0; classDef operator stroke-opacity:0;
aidge_core.FC(in_channels: int, out_channels: int, no_bias: bool = False, name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string &name = "")#

Gather#

aidge_core.Gather(axis: int = 0, indices: List[int] = [], gathered_shape: List[int] = [], name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Gather(std::int8_t axis = 0, const std::vector<int64_t> &indices = {}, const std::vector<DimSize_t> &gatheredShape = {}, const std::string &name = "")#

Identity#

aidge_core.Identity(name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Identity(const std::string &name = "")#

LeakyReLU#

%%{init: {'flowchart': { 'curve': 'monotoneY'}, 'fontFamily': 'Verdana' } }%% graph TD Op("<b>LeakyReLUOp</b> "):::operator In0[data_input]:::text-only -->|"In[0]"| Op Op -->|"Out[0]"| Out0[data_output]:::text-only classDef text-only fill-opacity:0, stroke-opacity:0; classDef operator stroke-opacity:0;
aidge_core.LeakyReLU(negative_slope: float = 0.0, name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::LeakyReLU(float negativeSlope = 0.0f, const std::string &name = "")#

MatMul#

%%{init: {'flowchart': { 'curve': 'monotoneY'}, 'fontFamily': 'Verdana' } }%% graph TD Op("<b>MatMulOp</b> "):::operator In0[data_input1]:::text-only -->|"In[0]"| Op In1[data_input2]:::text-only -->|"In[1]"| Op Op -->|"Out[0]"| Out0[data_output]:::text-only classDef text-only fill-opacity:0, stroke-opacity:0; classDef operator stroke-opacity:0;
aidge_core.MatMul(name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::MatMul(const std::string &name = "")#

Memorize#

Not available yet !

inline std::shared_ptr<Node> Aidge::Memorize(const std::uint32_t endStep, const std::string &name = "")#

Move#

Not available yet !

inline std::shared_ptr<Node> Aidge::Move(const std::string &name = "")#

Mul#

aidge_core.Mul(name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Mul(const std::string &name = "")#

Pad#

Not available yet !

template<std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Aidge::Pad(const std::array<DimSize_t, 2 * DIM> &beginEndTuples, const std::string &name = "", const PadBorderType &borderType = PadBorderType::Constant, double borderValue = 0.0)#

Pop#

aidge_core.Pop(name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Pop(const std::string &name = "")#

Pow#

aidge_core.Pow(name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Pow(const std::string &name = "")#

Producer#

%%{init: {'flowchart': { 'curve': 'monotoneY'}, 'fontFamily': 'Verdana' } }%% graph TD Op("<b>ProducerOp</b> "):::operator Op -->|"Out[0]"| Out0[data_output]:::text-only classDef text-only fill-opacity:0, stroke-opacity:0; classDef operator stroke-opacity:0;
aidge_core.Producer(*args, **kwargs)#

Overloaded function.

  1. Producer(tensor: aidge_core.aidge_core.Tensor, name: str = ‘’, constant: bool = False) -> aidge_core.aidge_core.Node

  2. Producer(dims: List[int[1]], name: str = ‘’, constant: bool = False) -> aidge_core.aidge_core.Node

  3. Producer(dims: List[int[2]], name: str = ‘’, constant: bool = False) -> aidge_core.aidge_core.Node

  4. Producer(dims: List[int[3]], name: str = ‘’, constant: bool = False) -> aidge_core.aidge_core.Node

  5. Producer(dims: List[int[4]], name: str = ‘’, constant: bool = False) -> aidge_core.aidge_core.Node

  6. Producer(dims: List[int[5]], name: str = ‘’, constant: bool = False) -> aidge_core.aidge_core.Node

  7. Producer(dims: List[int[6]], name: str = ‘’, constant: bool = False) -> aidge_core.aidge_core.Node

inline std::shared_ptr<Node> Aidge::Producer(const std::shared_ptr<Tensor> tensor, const std::string &name = "", bool constant = false)#
template<std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Aidge::Producer(const std::array<DimSize_t, DIM> &dims, const std::string &name = "", bool constant = false)#

ReduceMean#

aidge_core.ReduceMean(axes: List[int], keep_dims: int = 1, name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::ReduceMean(const std::vector<std::int32_t> &axes, DimSize_t keep_dims = 1, const std::string &name = "")#

Compute the mean value of a Tensor over the provided axes. Dimensions may be reduced by erasing the provided axes or not.

Parameters:
  • axes – Dimensions over which data mean should be computed.

  • keep_dims – Whether or not reduced dimensions are to be erased.

  • name – Name of the Operator.

Returns:

std::shared_ptr<Node> Node containing the Operator.

ReLU#

%%{init: {'flowchart': { 'curve': 'monotoneY'}, 'fontFamily': 'Verdana' } }%% graph TD Op("<b>ReLUOp</b> "):::operator In0[data_input]:::text-only -->|"In[0]"| Op Op -->|"Out[0]"| Out0[data_output]:::text-only classDef text-only fill-opacity:0, stroke-opacity:0; classDef operator stroke-opacity:0;
aidge_core.ReLU(name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::ReLU(const std::string &name = "")#

Reshape#

aidge_core.Reshape(shape: List[int] = [], allowzero: bool = False, name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Reshape(const std::vector<std::int64_t> &shape = {}, bool allowzero = false, const std::string &name = "")#

Scaling#

Not available yet !

inline std::shared_ptr<Node> Aidge::Scaling(float scalingFactor = 1.0f, std::size_t quantizedNbBits = 8, bool isOutputUnsigned = true, const std::string &name = "")#

Sigmoid#

aidge_core.Sigmoid(name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Sigmoid(const std::string &name = "")#

Slice#

aidge_core.Slice(starts: List[int] = [], ends: List[int] = [], axes: List[int] = [], steps: List[int] = [], name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Slice(const std::vector<std::int64_t> &starts = {}, const std::vector<std::int64_t> &ends = {}, const std::vector<std::int8_t> &axes = {}, const std::vector<std::int64_t> &steps = {}, const std::string &name = "")#

Exract a sub-Tensor from a bigger original Tensor.

Parameters:

name – Name of the Operator.

Returns:

std::shared_ptr<Node> A Node containing the Operator.

Softmax#

%%{init: {'flowchart': { 'curve': 'monotoneY'}, 'fontFamily': 'Verdana' } }%% graph TD Op("<b>SoftmaxOp</b> "):::operator In0[data_input]:::text-only -->|"In[0]"| Op Op -->|"Out[0]"| Out0[data_output]:::text-only classDef text-only fill-opacity:0, stroke-opacity:0; classDef operator stroke-opacity:0;
aidge_core.Softmax(axis: int, name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Softmax(std::int32_t axis, const std::string &name = "")#

Sqrt#

aidge_core.Sqrt(name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Sqrt(const std::string &name = "")#

Sub#

aidge_core.Sub(name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Sub(const std::string &name = "")#

Tanh#

aidge_core.Tanh(name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Tanh(const std::string &name = "")#

Transpose#

aidge_core.Transpose(output_dims_order: List[int], name: str = '') aidge_core.aidge_core.Node#
inline std::shared_ptr<Node> Aidge::Transpose(const std::vector<DimSize_t> &outputDimsOrder, const std::string &name = "")#

Predefined meta-operators#

Some meta-operators (or composite operators) are predefined for conveniance and/or for compatibility with others frameworks.

PaddedConv#

aidge_core.PaddedConv2D(in_channels: int, out_channels: int, kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1], padding_dims: List[int] = [0, 0, 0, 0], dilation_dims: List[int] = [1, 1], no_bias: bool = False) aidge_core.aidge_core.Node#
template<DimSize_t DIM>
inline std::shared_ptr<Node> Aidge::PaddedConv(DimSize_t in_channels, DimSize_t out_channels, DimSize_t const (&kernel_dims)[DIM], const std::string &name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1), const std::array<DimSize_t, 2 * DIM> &padding_dims = create_array<DimSize_t, 2 * DIM>(0), const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t, DIM>(1), bool no_bias = false)#

PaddedConvDepthWise#

aidge_core.PaddedConvDepthWise2D(nb_channels: int, kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1], padding_dims: List[int] = [0, 0, 0, 0], dilation_dims: List[int] = [1, 1], no_bias: bool = False) aidge_core.aidge_core.Node#
template<std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Aidge::PaddedConvDepthWise(const DimSize_t nb_channels, const std::array<DimSize_t, DIM> &kernel_dims, const std::string &name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1), const std::array<DimSize_t, 2 * DIM> &padding_dims = create_array<DimSize_t, 2 * DIM>(0), const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t, DIM>(1), bool no_bias = false)#

PaddedAvgPooling#

aidge_core.PaddedAvgPooling2D(kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1], padding_dims: List[int] = [0, 0, 0, 0]) aidge_core.aidge_core.Node#
template<DimSize_t DIM>
std::shared_ptr<Node> Aidge::PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM], const std::string &name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1), const std::array<DimSize_t, 2 * DIM> &padding_dims = create_array<DimSize_t, 2 * DIM>(0))#

PaddedMaxPooling#

aidge_core.PaddedMaxPooling2D(kernel_dims: List[int], name: str = '', stride_dims: List[int] = [1, 1], padding_dims: List[int] = [0, 0, 0, 0], ceil_mode: bool = False) aidge_core.aidge_core.Node#
template<std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Aidge::PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims, const std::string &name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t, DIM>(1), const std::array<DimSize_t, 2 * DIM> &padding_dims = create_array<DimSize_t, 2 * DIM>(0), bool ceil_mode = false)#

LSTM#

aidge_core.LSTM(in_channels: int, hidden_channels: int, seq_length: int, nobias: bool = False, name: str = '') aidge_core.aidge_core.Node#
std::shared_ptr<Node> Aidge::LSTM(DimSize_t in_channels, DimSize_t hidden_channels, DimSize_t seq_length, bool noBias = false, const std::string &name = "")#