Data#

Tensor#

class aidge_core.Tensor#
__init__(*args, **kwargs)#

Overloaded function.

  1. __init__(self: aidge_core.aidge_core.Tensor) -> None

  2. __init__(self: aidge_core.aidge_core.Tensor, array: numpy.ndarray[numpy.int32], backend: str = ‘cpu’) -> None

  3. __init__(self: aidge_core.aidge_core.Tensor, array: numpy.ndarray[numpy.int64], backend: str = ‘cpu’) -> None

  4. __init__(self: aidge_core.aidge_core.Tensor, array: numpy.ndarray[numpy.float32], backend: str = ‘cpu’) -> None

  5. __init__(self: aidge_core.aidge_core.Tensor, array: numpy.ndarray[numpy.float64], backend: str = ‘cpu’) -> None

capacity(self: aidge_core.aidge_core.Tensor) int#
dims(self: aidge_core.aidge_core.Tensor) List[int]#
dtype(self: aidge_core.aidge_core.Tensor) aidge_core.aidge_core.dtype#
static get_available_backends() Set[str]#
get_coord(self: aidge_core.aidge_core.Tensor, arg0: int) List[int]#
get_idx(self: aidge_core.aidge_core.Tensor, arg0: List[int]) int#
grad(self: aidge_core.aidge_core.Tensor) aidge_core.aidge_core.Tensor#
has_impl(self: aidge_core.aidge_core.Tensor) bool#
resize(self: aidge_core.aidge_core.Tensor, arg0: List[int], arg1: List[int]) None#
set_backend(self: aidge_core.aidge_core.Tensor, name: str, device: int = 0, copyFrom: bool = True) None#
set_datatype(self: aidge_core.aidge_core.Tensor, datatype: aidge_core.aidge_core.dtype, copyCast: bool = True) None#
set_grad(self: aidge_core.aidge_core.Tensor, arg0: aidge_core.aidge_core.Tensor) None#
size(self: aidge_core.aidge_core.Tensor) int#
sqrt(self: aidge_core.aidge_core.Tensor) aidge_core.aidge_core.Tensor#
class Tensor : public Aidge::Data, public Aidge::Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>#

Description for the tensor data structure.

Sets the properties of the tensor without actually containing any data. Contains a pointer to an actual contiguous implementation of data.

Public Functions

inline Tensor(DataType dtype = DataType::Float32, DataFormat dformat = DataFormat::Default)#

Construct a new empty Tensor object. It has the features of an undefined scalar.

template<typename T, typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
inline Tensor(T val)#

Construct a new Tensor object from an arithmetic parameter.

Template Parameters:
  • T – Type of the input parameter.

  • VT – Decayed type of the input paramter.

Parameters:

val – Input value.

inline Tensor(const std::vector<DimSize_t> &dims)#

Construct a new Tensor object from dimensions.

Parameters:

dims – dimensions of the tensor

template<typename T, std::size_t SIZE_0>
inline constexpr Tensor(Array1D<T, SIZE_0> &&arr)#

Construct a new Tensor object from the 1-dimension Array helper.

Template Parameters:
  • T – datatype

  • SIZE_0 – first array dimension.

template<typename T, std::size_t SIZE_0, std::size_t SIZE_1>
inline constexpr Tensor(Array2D<T, SIZE_0, SIZE_1> &&arr)#

Construct a new Tensor object from the 2-dimensions Array helper.

Template Parameters:
  • T – datatype

  • SIZE_0 – first array dimension.

  • SIZE_1 – second array dimension.

template<typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
inline constexpr Tensor(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr)#

Construct a new Tensor object from the 3-dimensions Array helper.

Template Parameters:
  • T – datatype

  • SIZE_0 – first array dimension.

  • SIZE_1 – second array dimension.

  • SIZE_2 – third array dimension.

template<typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
inline constexpr Tensor(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr)#

Construct a new Tensor object from the 4-dimensions Array helper.

Template Parameters:
  • T – datatype

  • SIZE_0 – first array dimension.

  • SIZE_1 – second array dimension.

  • SIZE_2 – third array dimension.

  • SIZE_3 – fourth array dimension.

Tensor(const Tensor &other) = default#

Copy constructor. Construct a new Tensor object from another one (shallow copy). Data memory is not copied, but shared between the new Tensor and the initial one.

Parameters:

other

Tensor(Tensor &&other) = default#

Move constructor.

Parameters:

other

Tensor &operator=(const Tensor &other)#

Copy dimensions, datatype and data from another Tensor. If current Tensor already has an implementation, data is copied to the existing implementation. Tensor backend/device remain untouched. If current Tensor does not have an implementation, only a shallow copy is performed and the Tensor will share data with t.

Parameters:

other – other Tensor object.

Returns:

Tensor&

template<typename T, std::size_t SIZE_0>
inline constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr)#
template<typename T, std::size_t SIZE_0, std::size_t SIZE_1>
inline constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr)#
template<typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
inline constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr)#
template<typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
inline constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr)#
inline bool operator==(const Tensor &otherTensor) const#

Assess data type, dimensions, backend and data are the same.

Parameters:

otherTensor

Tensor operator+(const Tensor &other) const#

Element-wise addition operation for two Tensors.

Todo:

If input Tensors have a different dataType, the output should have the dataType of the Tensor with the highest precision.

Note

Tensors should be stored on the same backend.

Parameters:

other

Returns:

Tensor

Tensor operator-(const Tensor &other) const#

Element-wise substraction operation for two Tensors.

Todo:

If input Tensors have a different dataType, the output should have the dataType of the Tensor with the highest precision.

Note

Tensors should be stored on the same backend.

Parameters:

other

Returns:

Tensor

Tensor operator*(const Tensor &other) const#

Element-wise multiplication operation for two Tensors.

Todo:

If input Tensors have a different dataType, the output should have the dataType of the Tensor with the highest precision.

Note

Tensors should be stored on the same backend.

Parameters:

other

Returns:

Tensor

Tensor operator/(const Tensor &other) const#

Element-wise division operation for two Tensors.

Todo:

If input Tensors have a different dataType, the output should have the dataType of the Tensor with the highest precision.

Note

Tensors should be stored on the same backend.

Parameters:

other

Returns:

Tensor

Tensor sqrt() const#

Element-wise sqrt operation for Tensor.

Returns:

Tensor

~Tensor() noexcept#
inline Tensor clone() const#

Perform a deep copy of the tensor.

inline const std::string backend() const#
inline void setBackend(const std::string &name, DeviceIdx_t device = 0, bool copyFrom = true)#

Set the backend of the Tensor associated implementation. If there was no previous implementation set, data will be allocated, but it will not be initialized to any particular value. If data was already initialized in a previous backend, it will be moved to the new one except if copyFrom is false.

Parameters:
  • name – Backend name

  • device – Backend device

  • copyFrom – If true (default), move data from previous backend/device to the new one. Previous data is lost otherwise.

inline constexpr DataType dataType() const noexcept#

Get the data type enum.

Returns:

constexpr DataType

inline constexpr DataFormat dataFormat() const noexcept#

Get the data format enum.

Returns:

constexpr DataFormat

inline void setDataType(const DataType dt, bool copyCast = true)#

Set the DataType of the Tensor and converts data if the Tensor has already been initialized and copyCast is true.

Parameters:
  • dt – DataType

  • copyCast – If true (default), previous data is copy-casted. Otherwise previous data is lost.

inline void setDataFormat(const DataFormat df, bool copyTrans = true)#

Set the DataFormat of the Tensor and transpose data, only if the Tensor has already been initialized and copyTrans is true. In this case, a transposition occurs only if both previous format and new format are different from DataFormat::Default.

Parameters:
  • df – New DataFormat

  • copyTrans – If true (default), when both previous format and new format are different from DataFormat::Default, previous data is copy-transposed.

inline constexpr const std::shared_ptr<TensorImpl> &getImpl() const noexcept#

Get the Impl object.

Returns:

constexpr const std::shared_ptr<TensorImpl>&

inline constexpr std::size_t getImplOffset() const noexcept#
inline void setImpl(std::shared_ptr<TensorImpl> impl, std::size_t implOffset = 0)#

Set the Impl object.

Parameters:
  • impl – New impl shared pointer

  • implOffset – Storage offset in this new impl for this Tensor

inline bool hasImpl() const noexcept#

Return if an implementaiton has been associated.

Returns:

true

Returns:

false

inline std::size_t nbDims() const#

Get number of dimensions of the Tensor.

Returns:

std::size_t

template<DimIdx_t DIM>
inline constexpr std::array<DimSize_t, DIM> dims() const#

Get dimensions of the Tensor object.

Template Parameters:

DIM – number of dimensions.

Returns:

constexpr std::array<DimSize_t, DIM>

inline constexpr const std::vector<DimSize_t> &dims() const noexcept#

Get dimensions of the Tensor object.

Returns:

constexpr const std::vector<DimSize_t>&

inline constexpr const std::vector<DimSize_t> &strides() const noexcept#

Get strides of the Tensor object.

Returns:

constexpr const std::vector<DimSize_t>&

inline constexpr bool isContiguous() const noexcept#

Return true if Tensor is contiguous in memory.

Returns:

bool

inline constexpr std::size_t size() const noexcept#

Get the number of elements in the Tensor object.

Returns:

constexpr std::size_t

inline std::size_t capacity() const noexcept#

Return the current capacity of the tensor, i.e. the actual memory currently being allocated. It can be different from the size:

  • Capacity can be 0 if the tensor memory was not yet initialized (because of lazy initialization, memory is allocated only when it needs to be accessed the first time).

  • Capacity can be > size if the tensor was downsized but memory was not reallocated.

template<std::array<DimSize_t, 1>::size_type DIM>
inline void resize(const std::array<DimSize_t, DIM> &dims)#

Change the dimensions of the Tensor object according to the given argument. If the overall size is not changed (meaning we actually only performed a reshape), data is garanteed to remain valid. Otherwise, no garantee is provided regarding the validy of previous data (unlike std::vector). If the new overall size is larger than the previous one, all previous data is invalided. Otherwise, previous data may or may not remain valid, depending on the backend implementation.

Template Parameters:

DIM – Number of dimensions.

Parameters:

dims – New dimensions

void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>())#

Change the dimensions of the Tensor object according to the given argument. If the overall size is not changed (meaning we actually only performed a reshape), data is garanteed to remain valid. Otherwise, no garantee is provided regarding the validy of previous data (unlike std::vector). If the new overall size is larger than the previous one, all previous data is invalided. Otherwise, previous data may or may not remain valid, depending on the backend implementation.

Parameters:
  • dims – New dimensions

  • strides – Stride of the tensor (if not specified, “nested” stride is used)

inline bool empty() const#

Return if the Tensor object has at leastone element.

Returns:

true

Returns:

false

inline void zeros() const#

Set each element of the tensor to zero.

template<typename expectedType>
inline const expectedType &get(std::size_t idx) const#
template<typename expectedType>
inline const expectedType &get(std::vector<std::size_t> coordIdx) const#
template<typename expectedType>
inline void set(std::size_t idx, expectedType value)#
template<typename expectedType>
inline void set(std::vector<std::size_t> coordIdx, expectedType value)#
virtual std::string toString() const override#
inline void print() const#
inline std::shared_ptr<Tensor> grad()#

Get the gradient Tensor. If not initialized, set a Tensor instance and set its implementation if none was previously set.

Note

Dimensions for the Tensor instance are copied from the original current Tensor.

Note

If a Tensor instance was already associated, only the implementation is created with values set to 0.

Note

If Tensor instance and implementation already existed for the gradient nothing is done.

inline void setGrad(std::shared_ptr<Tensor> newGrad)#
inline std::vector<std::size_t> getCoord(std::size_t flatIdx) const#

From the the 1D contiguous index, return the coordinate of an element in the tensor. Beware: do not use this function with the storage index!

Parameters:

flatIdx – 1D contiguous index of the value considering a flatten, contiguous, tensor.

Returns:

std::vector<DimSize_t>

inline std::size_t getIdx(const std::vector<std::size_t> &coordIdx) const#

From the coordinate returns the 1D contiguous index of an element in the tensor. If the number of coordinates is inferior to the number of dimensions, the remaining coordinates are assumed to be 0. Beware: the contiguous index will only correspond to the storage index if the tensor is contiguous!

Parameters:

coordIdx – Coordinate to an element in the tensor

Returns:

DimSize_t Contiguous index

inline std::size_t getStorageIdx(const std::vector<std::size_t> &coordIdx) const#

From the coordinate returns the 1D storage index of an element in the tensor. If the number of coordinates is inferior to the number of dimensions, the remaining coordinates are assumed to be 0.

Parameters:

coordIdx – Coordinate to an element in the tensor

Returns:

DimSize_t Storage index

Tensor extract(const std::vector<std::size_t> &coordIdx) const#

Returns a sub-tensor with equal or lower number of dimensions.

Note

For instance, t.extract({1}) on a CHW tensor will return the HW tensor of channel #1. Likewise, t.extract({0, 1}) on a NCHW tensor will return the HW tensor of batch #0 and channel #1.

Note

No memory copy is performed, the returned tensor does not own the memory.

Note

If the number of coordinates matches the number of dimensions, a scalar tensor is returned.

Note

If current tensor was contiguous, the returned tensor is garanteed to be contiguous as well.

Parameters:

coordIdx – Coordinates of the sub-tensor to extract

Returns:

Tensor Sub-tensor.

Tensor extract(const std::vector<std::size_t> &coordIdx, const std::vector<std::size_t> &dims) const#

Returns a sub-tensor at some coordinate and with some dimension.

Note

Data contiguity of the returned Tensor is not guaranted.

Parameters:
  • coordIdx – First coordinates of the sub-tensor to extract

  • dims – Dimensions of the sub-tensor to extract

Returns:

Tensor Sub-tensor.

void makeContiguous()#

Make the tensor’s storage contiguous, if it is not already the case. If not contiguous, a new memory space is allocated.

void copyCast(const Tensor &src)#

Copy-cast data from a Tensor on the same device. If current tensor backend/device is set and is different from src, an assertion is raised.

Parameters:

src – Source tensor to copy-cast from.

void copyFrom(const Tensor &src)#

Copy data from a Tensor from another backend/device. If current tensor data type is set and is different from src, an assertion is raised.

Parameters:

src – Source tensor to copy from.

void copyTranspose(const Tensor &src, const std::vector<DimSize_t> &transpose)#

Transpose data from another Tensor (which can be itself).

Parameters:

src – Source tensor to copy from.

void copyTranspose(const Tensor &src, const DataFormatTranspose &transpose)#
void copyCastFrom(const Tensor &src, std::shared_ptr<Tensor> &movedSrc)#

Copy-cast data from a Tensor.

Parameters:
  • src – Source tensor to copy-cast from.

  • movedSrc – shared_ptr to an indermediate Tensor that will contain the moved data if a device change should occur AND a type conversion is necessary (otherwise it remains unused). Any data already present will be overwritten. No new memory allocation will occur if movedSrc has already been allocated with the right type/size/device. If required, memory is always allocated on current (destination) Tensor’s device.

inline void copyCastFrom(const Tensor &src)#

Copy-cast data from a Tensor. In case of both a device change AND a data type conversion, an intermediate buffer on will be allocated and deallocated each time. If required, buffer’s memory is always allocated on current (destination) Tensor’s device.

Parameters:

src – Source tensor to copy-cast from.

Tensor &refContiguous(std::shared_ptr<Tensor> &fallback)#

Return a reference to a Tensor that is garanteed to be contiguous:

  • itself, if already contiguous;

  • the provided Tensor, overwritten with the copied data. The data type, backend and device stay the same.

Parameters:

fallback – A shared_ptr to Tensor ready to be overwritten if necessary. The shared_ptr does not need to be initialized. No new memory allocation will occur if fallback has already been allocated with the right type/size/device.

Returns:

Reference to either itself or to fallback.

const Tensor &refContiguous(std::shared_ptr<Tensor> &fallback) const#
Tensor &refCast(std::shared_ptr<Tensor> &fallback, const Aidge::DataType &dt)#

Return a reference to a Tensor casted to the desired data type:

  • itself, if already at the right data type;

  • the provided Tensor, overwritten with the copy-casted data. The backend stays the same.

Parameters:
  • fallback – A shared_ptr to Tensor ready to be overwritten if necessary. The shared_ptr does not need to be initialized. No new memory allocation will occur if fallback has already been allocated with the right type/size/device.

  • dt – The desired data type.

Returns:

Reference to either itself or to fallback.

const Tensor &refCast(std::shared_ptr<Tensor> &fallback, const Aidge::DataType &dt) const#
Tensor &refFrom(std::shared_ptr<Tensor> &fallback, const std::string &backend, DeviceIdx_t device = 0)#

Return a reference to a Tensor on the desired backend/device:

  • itself, if already on the right device;

  • the provided Tensor, overwritten with the copied data. The data type stays the same.

Parameters:
  • fallback – A shared_ptr to Tensor ready to be overwritten if necessary. The shared_ptr does not need to be initialized. No new memory allocation will occur if fallback has already been allocated with the right type/size/device.

  • backend – The desired backend.

  • device – The desired device.

Returns:

Reference to either itself or to fallback.

const Tensor &refFrom(std::shared_ptr<Tensor> &fallback, const std::string &backend, DeviceIdx_t device = 0) const#
inline Tensor &refCastFrom(std::shared_ptr<Tensor> &fallback, const Aidge::DataType &dt, const std::string &backend, DeviceIdx_t device = 0)#

Return a reference to a Tensor on desired data type and backend/device:

  • itself, if already with the right characteristics;

  • the provided Tensor, overwritten with the copy-casted data. If required, fallback is always allocated on desired (destination) device.

Parameters:
  • fallback – A shared_ptr to Tensor ready to be overwritten if necessary. The shared_ptr does not need to be initialized. No new memory allocation will occur if fallback has already been allocated with the right type/size/device.

  • dt – The desired data type.

  • backend – The desired backend.

  • device – The desired device.

Returns:

Reference to either itself or to fallback.

inline Tensor &refCastFrom(std::shared_ptr<Tensor> &fallback, const Tensor &targetReqs)#

Return a reference to a Tensor with same characteristics (data type, backend/device) as targetReqs Tensor:

  • itself, if already with the right characteristics;

  • the provided Tensor, overwritten with the copy-casted data. If required, fallback is always allocated on current (destination) Tensor’s device.

Parameters:
  • fallback – A shared_ptr to Tensor ready to be overwritten if necessary. The shared_ptr does not need to be initialized. No new memory allocation will occur if fallback has already been allocated with the right type/size/device.

  • targetReqsTensor with the desired target characteristics.

Returns:

Reference to either itself or to fallback.

Tensor &ref(std::shared_ptr<Tensor> &fallback, const Aidge::DataType &dt, const std::string &backend, DeviceIdx_t device = 0)#

Return a reference to a Tensor on desired data type and backend/device:

  • itself, if already with the right characteristics;

  • the provided Tensor, overwritten with the right characteristics.

Note

no data is copy-casted. If it was so in a previous refCastFrom() on the same fallback, it remains valid, otherwise, data is invalid.

Parameters:
  • fallback – A shared_ptr to Tensor ready to be overwritten if necessary. The shared_ptr does not need to be initialized. No new memory allocation will occur if fallback has already been allocated with the right type/size/device.

  • dt – The desired data type.

  • backend – The desired backend.

  • device – The desired device.

Returns:

Reference to either itself or to fallback.

const Tensor &ref(std::shared_ptr<Tensor> &fallback, const Aidge::DataType &dt, const std::string &backend, DeviceIdx_t device = 0) const#
inline Tensor &ref(std::shared_ptr<Tensor> &fallback, const Tensor &targetReqs)#

Return a reference to a Tensor with same characteristics (data type, backend/device) as targetReqs Tensor:

  • itself, if already with the right characteristics;

  • the provided Tensor, overwritten with the right characteristics.

Note

no data is copy-casted. If it was so in a previous refCastFrom() on the same fallback, it remains valid, otherwise, data is invalid.

Parameters:
  • fallback – A shared_ptr to Tensor ready to be overwritten if necessary. The shared_ptr does not need to be initialized. No new memory allocation will occur if fallback has already been allocated with the right type/size/device.

  • targetReqsTensor with the desired target characteristics.

Returns:

Reference to either itself or to fallback.

Public Static Functions

static std::set<std::string> getAvailableBackends()#

Get a list of available backends.

Returns:

std::set<std::string>

Public Static Attributes

static constexpr const char *Type = "Tensor"#

Database#

class Database#

Abstract class representing a map from a key to data. All databases should inherit from this class. All subclasses should overwrite :cpp:function:Database::getItem to fetch data from a given index.

Subclassed by Aidge::MNIST

Public Functions

Database() = default#
virtual ~Database() noexcept = default#
virtual std::vector<std::shared_ptr<Tensor>> getItem(const std::size_t index) const = 0#

Fetch an item of the database.

Parameters:

index – index of the item.

Returns:

vector of data mapped to index.

virtual std::size_t getLen() const noexcept = 0#

Get the number of items in the database.

Returns:

std::size_t

virtual std::size_t getNbModalities() const noexcept = 0#

Get the number of modalities in one database item.

Returns:

std::size_t

DataProvider#

class aidge_core.DataProvider#
__init__(self: aidge_core.aidge_core.DataProvider, database: aidge_core.aidge_core.Database, batch_size: int, shuffle: bool, drop_last: bool) None#
class DataProvider#

Data Provider. Takes in a database and compose batches by fetching data from the given database.

Todo:

Implement Drop last batch option. Currently returns the last batch with less elements in the batch.

Implement readRandomBatch to compose batches from the database with a random sampling startegy. Necessary for training.

Public Functions

DataProvider(const Database &database, const std::size_t batchSize, const bool shuffle = false, const bool dropLast = false)#

Constructor of Data Provider.

Parameters:
  • database – database from which to load the data.

  • batchSize – number of data samples per batch.

std::vector<std::shared_ptr<Tensor>> readBatch() const#

Create a batch for each data modality in the database.

Returns:

a vector of tensors. Each tensor is a batch corresponding to one modality.

inline std::size_t getNbBatch()#

Get the Number of Batch.

Returns:

std::size_t

inline std::size_t getIndexBatch()#

Get the current Index Batch.

Returns:

std::size_t

inline void resetIndexBatch()#

Reset the internal index batch that browses the data of the database to zero.

inline void incrementIndexBatch()#

Increment the internal index batch that browses the data of the database.

void setBatches()#

Setup the batches for one pass on the database.

inline bool done()#

End condition of dataProvider for one pass on the database.

Returns:

true when all batch were fetched, False otherwise

DataProvider *iter()#

iter method for iterator protocol

Returns:

DataProvider*

std::vector<std::shared_ptr<Aidge::Tensor>> next()#

next method for iterator protocol

Returns:

std::vector<std::shared_ptr<Aidge::Tensor>>