array
C++ library for multi-dimensional arrays
Classes | Macros | Typedefs | Functions | Variables
array.h File Reference

Main header for array library. More...

#include <array>
#include <cassert>
#include <cstdio>
#include <limits>
#include <memory>
#include <tuple>
#include <type_traits>

Go to the source code of this file.

Classes

class  index_iterator
 
class  dim< Min_, Extent_, Stride_ >
 
class  interval< Min_, Extent_ >
 
class  dim< Min_, Extent_, Stride_ >
 
class  shape< Dims >
 
class  shape< Dims >
 
class  shape_traits< Shape >
 
class  copy_shape_traits< ShapeSrc, ShapeDst >
 
class  array_ref< T, Shape >
 
class  array< T, Shape, Alloc >
 
class  array_ref< T, Shape >
 
class  array< T, Shape, Alloc >
 
class  auto_allocator< T, N, Alignment, BaseAlloc >
 
class  uninitialized_allocator< BaseAlloc >
 

Macros

#define NDARRAY_INLINE   inline
 
#define NDARRAY_UNIQUE   static
 
#define NDARRAY_HOST_DEVICE
 
#define NDARRAY_RESTRICT
 
#define NDARRAY_PRINT_ERR(...)   fprintf(stderr, __VA_ARGS__)
 
#define NDARRAY_INDEX_T_FMT   "%td"
 

Typedefs

using size_t = std::size_t
 
using index_t = std::ptrdiff_t
 
template<index_t Extent>
using fixed_interval = interval< dynamic, Extent >
 
template<index_t Extent, index_t Stride = dynamic>
using fixed_dim = dim< dynamic, Extent, Stride >
 
template<index_t Min = dynamic, index_t Extent = dynamic>
using dense_dim = dim< Min, Extent, 1 >
 
template<index_t Stride>
using strided_dim = dim< dynamic, dynamic, Stride >
 
template<index_t Min = dynamic, index_t Extent = dynamic>
using broadcast_dim = dim< Min, Extent, 0 >
 
template<size_t Rank>
using index_of_rank = internal::tuple_of_n< index_t, Rank >
 
template<size_t Rank>
using shape_of_rank = decltype(make_shape_from_tuple(internal::tuple_of_n< dim<>, Rank >()))
 
template<size_t Rank>
using dense_shape = decltype(internal::make_default_dense_shape< Rank >())
 
template<index_t... Extents>
using fixed_dense_shape = decltype(make_shape_from_tuple(internal::make_compact_dims< 1 >(dim< 0, Extents >()...)))
 
template<class T , class Shape >
using const_array_ref = array_ref< const T, Shape >
 
template<class T , size_t Rank>
using array_ref_of_rank = array_ref< T, shape_of_rank< Rank >>
 
template<class T , size_t Rank>
using const_array_ref_of_rank = array_ref_of_rank< const T, Rank >
 
template<class T , size_t Rank>
using dense_array_ref = array_ref< T, dense_shape< Rank >>
 
template<class T , size_t Rank>
using const_dense_array_ref = dense_array_ref< const T, Rank >
 
template<class T , size_t Rank, class Alloc = std::allocator<T>>
using array_of_rank = array< T, shape_of_rank< Rank >, Alloc >
 
template<class T , size_t Rank, class Alloc = std::allocator<T>>
using dense_array = array< T, dense_shape< Rank >, Alloc >
 
template<class T , class = std::enable_if_t<std::is_trivial<T>::value>>
using uninitialized_std_allocator = uninitialized_allocator< std::allocator< T >>
 
template<class T , size_t N, size_t Alignment = sizeof(T), class = std::enable_if_t<std::is_trivial<T>::value>>
using uninitialized_auto_allocator = uninitialized_allocator< auto_allocator< T, N, Alignment >>
 

Functions

NDARRAY_INLINE NDARRAY_HOST_DEVICE interval range (index_t begin, index_t end)
 
NDARRAY_INLINE NDARRAY_HOST_DEVICE interval r (index_t begin, index_t end)
 
template<index_t Extent>
NDARRAY_HOST_DEVICE fixed_interval< Extent > range (index_t begin)
 
template<index_t Extent>
NDARRAY_HOST_DEVICE fixed_interval< Extent > r (index_t begin)
 
template<index_t Min, index_t Extent>
NDARRAY_HOST_DEVICE index_iterator begin (const interval< Min, Extent > &d)
 
template<index_t Min, index_t Extent>
NDARRAY_HOST_DEVICE index_iterator end (const interval< Min, Extent > &d)
 
NDARRAY_INLINE NDARRAY_HOST_DEVICE index_t clamp (index_t x, index_t min, index_t max)
 
template<class Range >
NDARRAY_HOST_DEVICE index_t clamp (index_t x, const Range &r)
 
template<index_t InnerExtent, index_t Min, index_t Extent>
NDARRAY_HOST_DEVICE internal::split_result< InnerExtent > split (const interval< Min, Extent > &v)
 
template<index_t InnerExtent, index_t Min, index_t Extent, index_t Stride>
NDARRAY_HOST_DEVICE internal::split_result< InnerExtent > split (const dim< Min, Extent, Stride > &v)
 
template<index_t Min, index_t Extent>
NDARRAY_HOST_DEVICE internal::split_result split (const interval< Min, Extent > &v, index_t inner_extent)
 
template<index_t Min, index_t Extent, index_t Stride>
NDARRAY_HOST_DEVICE internal::split_result split (const dim< Min, Extent, Stride > &v, index_t inner_extent)
 
template<class... Dims>
NDARRAY_HOST_DEVICE auto make_shape (Dims...dims)
 
template<class... Dims>
NDARRAY_HOST_DEVICE shape< Dims... > make_shape_from_tuple (const std::tuple< Dims... > &dims)
 
template<size_t... DimIndices, class... Dims, class = internal::enable_if_permutation<sizeof...(DimIndices), DimIndices...>>
NDARRAY_HOST_DEVICE auto transpose (const shape< Dims... > &shape)
 
template<size_t... DimIndices, class... Dims>
NDARRAY_HOST_DEVICE auto reorder (const shape< Dims... > &shape)
 
template<class Shape >
NDARRAY_HOST_DEVICE auto make_compact (const Shape &s)
 
template<class ShapeDst , class ShapeSrc , class = internal::enable_if_shapes_compatible<ShapeSrc, ShapeDst>>
NDARRAY_HOST_DEVICE bool is_compatible (const ShapeSrc &src)
 
template<class ShapeDst , class ShapeSrc , class = internal::enable_if_shapes_explicitly_compatible<ShapeDst, ShapeSrc>>
NDARRAY_HOST_DEVICE ShapeDst convert_shape (const ShapeSrc &src)
 
template<class ShapeDst , class ShapeSrc , class = internal::enable_if_shapes_explicitly_compatible<ShapeSrc, ShapeDst>>
NDARRAY_HOST_DEVICE bool is_explicitly_compatible (const ShapeSrc &src)
 
template<class Shape , class Fn , class = internal::enable_if_callable<Fn, typename Shape::index_type>>
NDARRAY_UNIQUE NDARRAY_HOST_DEVICE void for_each_index_in_order (const Shape &shape, Fn &&fn)
 
template<class Shape , class Ptr , class Fn , class = internal::enable_if_callable<Fn, typename std::remove_pointer<Ptr>::type&>>
NDARRAY_UNIQUE NDARRAY_HOST_DEVICE void for_each_value_in_order (const Shape &shape, Ptr base, Fn &&fn)
 
template<class Shape , class ShapeA , class PtrA , class ShapeB , class PtrB , class Fn , class = internal::enable_if_callable<Fn, typename std::remove_pointer<PtrA>::type&, typename std::remove_pointer<PtrB>::type&>>
NDARRAY_UNIQUE NDARRAY_HOST_DEVICE void for_each_value_in_order (const Shape &shape, const ShapeA &shape_a, PtrA base_a, const ShapeB &shape_b, PtrB base_b, Fn &&fn)
 
template<size_t... LoopOrder, class Shape , class Fn , class = internal::enable_if_callable<Fn, typename Shape::index_type>, std::enable_if_t<(sizeof...(LoopOrder)==0), int > = 0>
NDARRAY_UNIQUE NDARRAY_HOST_DEVICE void for_each_index (const Shape &s, Fn &&fn)
 
template<size_t... LoopOrder, class Shape , class Fn , class = internal::enable_if_applicable<Fn, typename Shape::index_type>, std::enable_if_t<(sizeof...(LoopOrder)==0), int > = 0>
NDARRAY_UNIQUE NDARRAY_HOST_DEVICE void for_all_indices (const Shape &s, Fn &&fn)
 
template<class T , class Shape >
NDARRAY_HOST_DEVICE array_ref< T, Shape > make_array_ref (T *base, const Shape &shape)
 
template<class T , class Shape , class Alloc = std::allocator<T>, class = internal::enable_if_allocator<Alloc>>
auto make_array (const Shape &shape, const Alloc &alloc=Alloc())
 
template<class T , class Shape , class Alloc = std::allocator<T>, class = internal::enable_if_allocator<Alloc>>
auto make_array (const Shape &shape, const T &value, const Alloc &alloc=Alloc())
 
template<class T , class Shape , class Alloc >
void swap (array< T, Shape, Alloc > &a, array< T, Shape, Alloc > &b)
 
template<class TSrc , class TDst , class ShapeSrc , class ShapeDst , class = internal::enable_if_shapes_copy_compatible<ShapeDst, ShapeSrc>>
void copy (const array_ref< TSrc, ShapeSrc > &src, const array_ref< TDst, ShapeDst > &dst)
 
template<class TSrc , class TDst , class ShapeSrc , class ShapeDst , class AllocDst , class = internal::enable_if_shapes_copy_compatible<ShapeDst, ShapeSrc>>
void copy (const array_ref< TSrc, ShapeSrc > &src, array< TDst, ShapeDst, AllocDst > &dst)
 
template<class TSrc , class TDst , class ShapeSrc , class ShapeDst , class AllocSrc , class = internal::enable_if_shapes_copy_compatible<ShapeDst, ShapeSrc>>
void copy (const array< TSrc, ShapeSrc, AllocSrc > &src, const array_ref< TDst, ShapeDst > &dst)
 
template<class TSrc , class TDst , class ShapeSrc , class ShapeDst , class AllocSrc , class AllocDst , class = internal::enable_if_shapes_copy_compatible<ShapeDst, ShapeSrc>>
void copy (const array< TSrc, ShapeSrc, AllocSrc > &src, array< TDst, ShapeDst, AllocDst > &dst)
 
template<class T , class ShapeSrc , class Alloc = std::allocator<typename std::remove_const<T>::type>>
auto make_copy (const array_ref< T, ShapeSrc > &src, const Alloc &alloc=Alloc())
 
template<class T , class ShapeSrc , class AllocSrc , class AllocDst = AllocSrc, class = internal::enable_if_allocator<AllocDst>>
auto make_copy (const array< T, ShapeSrc, AllocSrc > &src, const AllocDst &alloc=AllocDst())
 
template<class T , class ShapeSrc , class ShapeDst , class Alloc = std::allocator<typename std::remove_const<T>::type>, class = internal::enable_if_shapes_copy_compatible<ShapeDst, ShapeSrc>>
auto make_copy (const array_ref< T, ShapeSrc > &src, const ShapeDst &shape, const Alloc &alloc=Alloc())
 
template<class T , class ShapeSrc , class ShapeDst , class AllocSrc , class AllocDst = AllocSrc, class = internal::enable_if_shapes_copy_compatible<ShapeDst, ShapeSrc>>
auto make_copy (const array< T, ShapeSrc, AllocSrc > &src, const ShapeDst &shape, const AllocDst &alloc=AllocDst())
 
template<class T , class Shape , class Alloc = std::allocator<typename std::remove_const<T>::type>>
auto make_compact_copy (const array_ref< T, Shape > &src, const Alloc &alloc=Alloc())
 
template<class T , class Shape , class AllocSrc , class AllocDst = AllocSrc>
auto make_compact_copy (const array< T, Shape, AllocSrc > &src, const AllocDst &alloc=AllocDst())
 
template<class TSrc , class TDst , class ShapeSrc , class ShapeDst , class = internal::enable_if_shapes_copy_compatible<ShapeDst, ShapeSrc>>
void move (const array_ref< TSrc, ShapeSrc > &src, const array_ref< TDst, ShapeDst > &dst)
 
template<class TSrc , class TDst , class ShapeSrc , class ShapeDst , class AllocDst , class = internal::enable_if_shapes_copy_compatible<ShapeDst, ShapeSrc>>
void move (const array_ref< TSrc, ShapeSrc > &src, array< TDst, ShapeDst, AllocDst > &dst)
 
template<class TSrc , class TDst , class ShapeSrc , class ShapeDst , class AllocSrc , class = internal::enable_if_shapes_copy_compatible<ShapeDst, ShapeSrc>>
void move (array< TSrc, ShapeSrc, AllocSrc > &src, const array_ref< TDst, ShapeDst > &dst)
 
template<class TSrc , class TDst , class ShapeSrc , class ShapeDst , class AllocSrc , class AllocDst , class = internal::enable_if_shapes_copy_compatible<ShapeDst, ShapeSrc>>
void move (array< TSrc, ShapeSrc, AllocSrc > &src, array< TDst, ShapeDst, AllocDst > &dst)
 
template<class T , class Shape , class Alloc >
void move (array< T, Shape, Alloc > &&src, array< T, Shape, Alloc > &dst)
 
template<class T , class ShapeSrc , class ShapeDst , class Alloc = std::allocator<T>, class = internal::enable_if_shapes_copy_compatible<ShapeDst, ShapeSrc>>
auto make_move (const array_ref< T, ShapeSrc > &src, const ShapeDst &shape, const Alloc &alloc=Alloc())
 
template<class T , class ShapeSrc , class ShapeDst , class AllocSrc , class AllocDst = AllocSrc, class = internal::enable_if_shapes_copy_compatible<ShapeDst, ShapeSrc>>
auto make_move (array< T, ShapeSrc, AllocSrc > &src, const ShapeDst &shape, const AllocDst &alloc=AllocDst())
 
template<class T , class Shape , class Alloc >
auto make_move (array< T, Shape, Alloc > &&src, const Shape &shape, const Alloc &alloc=Alloc())
 
template<class T , class Shape , class Alloc = std::allocator<T>>
auto make_compact_move (const array_ref< T, Shape > &src, const Alloc &alloc=Alloc())
 
template<class T , class Shape , class AllocSrc , class AllocDst = AllocSrc>
auto make_compact_move (array< T, Shape, AllocSrc > &src, const AllocDst &alloc=AllocDst())
 
template<class T , class Shape , class Alloc >
auto make_compact_move (array< T, Shape, Alloc > &&src, const Alloc &alloc=Alloc())
 
template<class T , class Shape >
NDARRAY_HOST_DEVICE void fill (const array_ref< T, Shape > &dst, const T &value)
 
template<class T , class Shape , class Alloc >
void fill (array< T, Shape, Alloc > &dst, const T &value)
 
template<class T , class Shape , class Generator , class = internal::enable_if_callable<Generator>>
NDARRAY_HOST_DEVICE void generate (const array_ref< T, Shape > &dst, Generator &&g)
 
template<class T , class Shape , class Alloc , class Generator , class = internal::enable_if_callable<Generator>>
void generate (array< T, Shape, Alloc > &dst, Generator &&g)
 
template<typename T , typename Shape , class Fn >
NDARRAY_HOST_DEVICE void transform_index (const array_ref< T, Shape > &dst, Fn &&fn)
 
template<typename T , typename Shape , class Fn >
void transform_index (array< T, Shape > &dst, Fn &&fn)
 
template<typename T , typename Shape , class Fn >
NDARRAY_HOST_DEVICE void transform_indices (const array_ref< T, Shape > &dst, Fn &&fn)
 
template<typename T , typename Shape , class Fn >
void transform_indices (array< T, Shape > &dst, Fn &&fn)
 
template<class TA , class ShapeA , class TB , class ShapeB >
NDARRAY_HOST_DEVICE bool equal (const array_ref< TA, ShapeA > &a, const array_ref< TB, ShapeB > &b)
 
template<class TA , class ShapeA , class TB , class ShapeB , class AllocB >
bool equal (const array_ref< TA, ShapeA > &a, const array< TB, ShapeB, AllocB > &b)
 
template<class TA , class ShapeA , class AllocA , class TB , class ShapeB >
bool equal (const array< TA, ShapeA, AllocA > &a, const array_ref< TB, ShapeB > &b)
 
template<class TA , class ShapeA , class AllocA , class TB , class ShapeB , class AllocB >
bool equal (const array< TA, ShapeA, AllocA > &a, const array< TB, ShapeB, AllocB > &b)
 
template<class NewShape , class T , class OldShape >
NDARRAY_HOST_DEVICE array_ref< T, NewShape > convert_shape (const array_ref< T, OldShape > &a)
 
template<class NewShape , class T , class OldShape , class Allocator >
array_ref< T, NewShape > convert_shape (array< T, OldShape, Allocator > &a)
 
template<class NewShape , class T , class OldShape , class Allocator >
const_array_ref< T, NewShape > convert_shape (const array< T, OldShape, Allocator > &a)
 
template<class U , class T , class Shape , class = std::enable_if_t<sizeof(T) == sizeof(U)>>
NDARRAY_HOST_DEVICE array_ref< U, Shape > reinterpret (const array_ref< T, Shape > &a)
 
template<class U , class T , class Shape , class Alloc , class = std::enable_if_t<sizeof(T) == sizeof(U)>>
array_ref< U, Shape > reinterpret (array< T, Shape, Alloc > &a)
 
template<class U , class T , class Shape , class Alloc , class = std::enable_if_t<sizeof(T) == sizeof(U)>>
const_array_ref< U, Shape > reinterpret (const array< T, Shape, Alloc > &a)
 
template<class U , class T , class Shape >
array_ref< U, Shape > reinterpret_const (const const_array_ref< T, Shape > &a)
 
template<class NewShape , class T , class OldShape >
NDARRAY_HOST_DEVICE array_ref< T, NewShape > reinterpret_shape (const array_ref< T, OldShape > &a, const NewShape &new_shape, index_t offset=0)
 
template<class NewShape , class T , class OldShape , class Allocator >
array_ref< T, NewShape > reinterpret_shape (array< T, OldShape, Allocator > &a, const NewShape &new_shape, index_t offset=0)
 
template<class NewShape , class T , class OldShape , class Allocator >
const_array_ref< T, NewShape > reinterpret_shape (const array< T, OldShape, Allocator > &a, const NewShape &new_shape, index_t offset=0)
 
template<typename NewShape , typename T , typename OldShape , typename Alloc >
array< T, NewShape, Alloc > move_reinterpret_shape (array< T, OldShape, Alloc > &&from, const NewShape &new_shape, index_t offset=0)
 
template<typename NewShape , typename T , typename OldShape , typename Alloc >
array< T, NewShape, Alloc > move_reinterpret_shape (array< T, OldShape, Alloc > &&from, index_t offset=0)
 
template<size_t... DimIndices, class T , class OldShape , class = internal::enable_if_permutation<OldShape::rank(), DimIndices...>>
NDARRAY_HOST_DEVICE auto transpose (const array_ref< T, OldShape > &a)
 
template<size_t... DimIndices, class T , class OldShape , class Allocator , class = internal::enable_if_permutation<OldShape::rank(), DimIndices...>>
auto transpose (array< T, OldShape, Allocator > &a)
 
template<size_t... DimIndices, class T , class OldShape , class Allocator , class = internal::enable_if_permutation<OldShape::rank(), DimIndices...>>
auto transpose (const array< T, OldShape, Allocator > &a)
 
template<size_t... DimIndices, class T , class OldShape >
NDARRAY_HOST_DEVICE auto reorder (const array_ref< T, OldShape > &a)
 
template<size_t... DimIndices, class T , class OldShape , class Allocator >
auto reorder (array< T, OldShape, Allocator > &a)
 
template<size_t... DimIndices, class T , class OldShape , class Allocator >
auto reorder (const array< T, OldShape, Allocator > &a)
 

Variables

constexpr index_t dynamic = -9
 
constexpr index_t unresolved = std::numeric_limits<index_t>::min()
 
const interval< 0,-1 > all
 
const interval< 0,-1 > _
 

Detailed Description

Main header for array library.

Typedef Documentation

using index_t = std::ptrdiff_t

When NDARRAY_INT_INDICES is defined, array indices are int values, otherwise they are std::ptrdiff_t. std::ptrdiff_t is helpful for the compiler to optimize address arithmetic, because it has the same size as a pointer.

using fixed_interval = interval<dynamic, Extent>

An alias of interval with a fixed extent and dynamic min. This is useful as the inner part of a split with a fixed extent.

using fixed_dim = dim<dynamic, Extent, Stride>

Alias of dim where the min is not specified at compile time.

using dense_dim = dim<Min, Extent, 1>

Alias of dim where the compile-time stride parameter is known to be one.

using strided_dim = dim<dynamic, dynamic, Stride>

Alias of dim where only the stride parameter is specified at compile time.

using broadcast_dim = dim<Min, Extent, 0>

Alias of dim where the compile-time stride parameter is known to be zero.

using index_of_rank = internal::tuple_of_n<index_t, Rank>

Type of an index for an array of rank Rank. This will be std::tuple<...> with Rank index_t values.

For example, index_of_rank<3> is std::tuple<index_t, index_t, index_t>.

using shape_of_rank = decltype(make_shape_from_tuple(internal::tuple_of_n<dim<>, Rank>()))

An arbitrary shape with the specified rank Rank. This shape is compatible with any other shape of the same rank.

using dense_shape = decltype(internal::make_default_dense_shape<Rank>())

A shape where the innermost dimension is a dense_dim, and all other dimensions are arbitrary.

using fixed_dense_shape = decltype(make_shape_from_tuple(internal::make_compact_dims<1>(dim<0, Extents>()...)))

A shape where all extents (and automatically computed compact strides) are constant.

using array_ref_of_rank = array_ref<T, shape_of_rank<Rank>>

array_ref with an arbitrary shape of rank Rank.

using dense_array_ref = array_ref<T, dense_shape<Rank>>

array_ref with a shape dense_shape<Rank>.

using array_of_rank = array<T, shape_of_rank<Rank>, Alloc>

An array type with an arbitrary shape of rank Rank.

using dense_array = array<T, dense_shape<Rank>, Alloc>

An array type with shape dense_shape<Rank>.

using uninitialized_std_allocator = uninitialized_allocator<std::allocator<T>>

Allocator equivalent to std::allocator<T> that does not default construct values.

using uninitialized_auto_allocator = uninitialized_allocator<auto_allocator<T, N, Alignment>>

Allocator equivalent to auto_allocator<T, N, Alignment> that does not default construct values.

Function Documentation

NDARRAY_INLINE NDARRAY_HOST_DEVICE interval nda::range ( index_t  begin,
index_t  end 
)

Make an interval from a half-open range [begin, end).

NDARRAY_HOST_DEVICE fixed_interval<Extent> nda::range ( index_t  begin)

Make an interval from a half-open range [begin, begin + Extent).

NDARRAY_HOST_DEVICE index_iterator nda::begin ( const interval< Min, Extent > &  d)

Overloads of std::begin and std::end for an interval.

NDARRAY_INLINE NDARRAY_HOST_DEVICE index_t nda::clamp ( index_t  x,
index_t  min,
index_t  max 
)

Clamp x to the interval [min, max].

NDARRAY_HOST_DEVICE index_t nda::clamp ( index_t  x,
const Range &  r 
)

Clamp x to the range described by an object r with a min() and max() method.

NDARRAY_HOST_DEVICE internal::split_result<InnerExtent> nda::split ( const interval< Min, Extent > &  v)

Split an interval v into an iteratable range of intervals by a compile-time constant InnerExtent. If InnerExtent does not divide v.extent(), the last interval will be shifted to overlap with the second-to-last iteration, to preserve the compile-time constant extent, which implies v.extent() must be larger InnerExtent.

Examples:

  • split<4>(interval<>(0, 8)) produces the intervals [0, 4), [4, 8).
  • split<5>(interval<>(0, 12)) produces the intervals [0, 5), [5, 10), [7, 12). Note the last two intervals overlap.
NDARRAY_HOST_DEVICE internal::split_result nda::split ( const interval< Min, Extent > &  v,
index_t  inner_extent 
)

Split an interval v into an iterable range of intervals by inner_extent. If inner_extent does not divide v.extent(), the last iteration will be clamped to the outer interval.

Examples:

  • split(interval<>(0, 12), 5) produces the intervals [0, 5), [5, 10), [10, 12).
NDARRAY_HOST_DEVICE auto nda::make_shape ( Dims...  dims)

Helper function to make a shape from a variadic list of dims....

NDARRAY_HOST_DEVICE auto nda::transpose ( const shape< Dims... > &  shape)

Create a new shape using a list of DimIndices... to use as the dimensions of the shape. The new shape's i'th dimension will be the j'th dimension of shape where j is the i'th value of DimIndices.... If i is not in DimIndices..., then i and j are equal.

DimIndices... must be a permutation of [0, N) where N is the number of indices provided.

Examples:

  • transpose<2, 0, 1>(s) == make_shape(s.dim<2>(), s.dim<0>(), s.dim<1>())
  • transpose<1, 0>(s) == make_shape(s.dim<1>(), s.dim<0>(), ...) where ... is all dimensions after dimension 1.
NDARRAY_HOST_DEVICE auto nda::reorder ( const shape< Dims... > &  shape)

Create a new shape using a list of DimIndices... to use as the dimensions of the shape. The new shape's i'th dimension will be the j'th dimension of shape where j is the i'th value of DimIndices....

Examples:

  • reorder<1, 2>(s) == make_shape(s.dim<1>(), s.dim<2>())
NDARRAY_HOST_DEVICE auto nda::make_compact ( const Shape &  s)

Attempt to make both the compile-time and run-time strides of s compact such that there is no padding between dimensions. Only dynamic strides are potentially replaced with static strides, existing compile-time strides are not modified. Run-time strides are then populated using the shape::resolve algorithm.

For a shape without any existing constant strides, this will return an instance of dense_shape<Shape::rank()>.

The resulting shape may not have Shape::is_compact return true if the shape has existing non-compact compile-time constant strides.

NDARRAY_HOST_DEVICE bool nda::is_compatible ( const ShapeSrc &  src)

Returns true if a shape src can be assigned to a shape of type ShapeDst without error.

NDARRAY_HOST_DEVICE ShapeDst nda::convert_shape ( const ShapeSrc &  src)

Convert a shape src to shape type ShapeDst. This explicit conversion allows converting a low rank shape to a higher ranked shape where new dimensions have min 0 and extent 1, and it allows converting a high rank shape to a lower rank shape if the dimensions being sliced are trivial (they have extent one).

NDARRAY_HOST_DEVICE bool nda::is_explicitly_compatible ( const ShapeSrc &  src)

Test if a shape src can be explicitly converted to a shape of type ShapeDst using convert_shape without error.

NDARRAY_UNIQUE NDARRAY_HOST_DEVICE void nda::for_each_index_in_order ( const Shape &  shape,
Fn &&  fn 
)

Iterate over all indices in the shape, calling a function fn for each set of indices. The indices are in the same order as the dims in the shape. The first dim is the 'inner' loop of the iteration, and the last dim is the 'outer' loop.

These functions are typically used to implement shape_traits<> and copy_shape_traits<> objects. Use for_each_index, array_ref<>::for_each_value, or array<>::for_each_value instead.

NDARRAY_UNIQUE NDARRAY_HOST_DEVICE void nda::for_each_value_in_order ( const Shape &  shape,
const ShapeA &  shape_a,
PtrA  base_a,
const ShapeB &  shape_b,
PtrB  base_b,
Fn &&  fn 
)

Similar to for_each_value_in_order, but iterates over two arrays simultaneously. shape defines the loop nest, while shape_a and shape_b define the memory layout of base_a and base_b.

The min and extent of arrays a and b must be contained in shape, otherwise values may be read out of bounds. No bounds checking is done in this function; only shape_a and shape_b's strides are considered.

NDARRAY_UNIQUE NDARRAY_HOST_DEVICE void for_each_index ( const Shape &  s,
Fn &&  fn 
)

Iterate over all indices in the shape s, calling a function fn for each set of indices. for_all_indices calls fn with a list of arguments corresponding to each dim. for_each_index calls fn with an index tuple describing the indices.

If the LoopOrder... permutation is empty, the order of the loops is defined by shape_traits<Shape>, and the callable fn must accept a Shape::index_type in the case of for_each_index, or Shape::rank() index_t objects in the case of for_all_indices.

If the LoopOrder... permutation is not empty, the order of the loops is defined by this ordering. The first index of LoopOrder... is the innermost loop of the loop nest. The callable fn must accept an index_of_rank<sizeof...(LoopOprder)> in the case of for_each_index<>, or sizeof...(LoopOrder) index_t objects in the case of for_all_indices<>.

NDARRAY_HOST_DEVICE array_ref<T, Shape> nda::make_array_ref ( T *  base,
const Shape &  shape 
)

Make a new array_ref with shape shape and base pointer base.

auto nda::make_array ( const Shape &  shape,
const Alloc &  alloc = Alloc() 
)

Make a new array with shape shape, allocated using the allocator alloc.

void nda::swap ( array< T, Shape, Alloc > &  a,
array< T, Shape, Alloc > &  b 
)

Swap the contents of two arrays.

void nda::copy ( const array_ref< TSrc, ShapeSrc > &  src,
const array_ref< TDst, ShapeDst > &  dst 
)

Copy the contents of the src array or array_ref to the dst array or array_ref. The elements in the shape of dst will be copied, and must be in bounds of src.

auto nda::make_copy ( const array_ref< T, ShapeSrc > &  src,
const Alloc &  alloc = Alloc() 
)

Make a copy of the src array or array_ref with a new allocator alloc.

auto nda::make_copy ( const array_ref< T, ShapeSrc > &  src,
const ShapeDst &  shape,
const Alloc &  alloc = Alloc() 
)

Make a copy of the src array or array_ref with a new shape shape.

auto nda::make_compact_copy ( const array_ref< T, Shape > &  src,
const Alloc &  alloc = Alloc() 
)

Make a copy of the src array or array_ref with a compact version of src's shape.

void nda::move ( const array_ref< TSrc, ShapeSrc > &  src,
const array_ref< TDst, ShapeDst > &  dst 
)

Move the contents from the src array or array_ref to the dst array or array_ref. The interval of the shape of dst will be moved, and must be in bounds of src.

auto nda::make_move ( const array_ref< T, ShapeSrc > &  src,
const ShapeDst &  shape,
const Alloc &  alloc = Alloc() 
)

Make a copy of the src array or array_ref with a new shape shape. The elements of src are moved to the result.

auto nda::make_compact_move ( const array_ref< T, Shape > &  src,
const Alloc &  alloc = Alloc() 
)

Make a copy of the src array or array_ref with a compact version of src's shape. The elements of src are moved to the result.

NDARRAY_HOST_DEVICE void nda::fill ( const array_ref< T, Shape > &  dst,
const T &  value 
)

Fill the dst array or array_ref by copy-assigning value.

NDARRAY_HOST_DEVICE void nda::generate ( const array_ref< T, Shape > &  dst,
Generator &&  g 
)

Fill the dst array or array_ref with the result of calling a generator function g. The order in which g is called is the same as shape_traits<Shape>::for_each_value.

NDARRAY_HOST_DEVICE void nda::transform_index ( const array_ref< T, Shape > &  dst,
Fn &&  fn 
)

Fill the dst array or array_ref with the result of calling a "pattern function" fn, which takes a single shape::index_type argument. The order in which fn is called is the same as shape_traits<Shape>::for_each_index.

NDARRAY_HOST_DEVICE void nda::transform_indices ( const array_ref< T, Shape > &  dst,
Fn &&  fn 
)

Fill the dst array or array_ref with the result of calling a "pattern function" fn, which takes as as many index_t arguments as there are dimensions in dst. The order in which fn is called is the same as shape_traits<Shape>::for_each_index.

NDARRAY_HOST_DEVICE bool nda::equal ( const array_ref< TA, ShapeA > &  a,
const array_ref< TB, ShapeB > &  b 
)

Check if two array or array_refs have equal contents.

NDARRAY_HOST_DEVICE array_ref<T, NewShape> nda::convert_shape ( const array_ref< T, OldShape > &  a)

Convert the shape of the array or array_ref a to a new type of shape NewShape. The new shape is copy constructed from a.shape().

NDARRAY_HOST_DEVICE array_ref<U, Shape> nda::reinterpret ( const array_ref< T, Shape > &  a)

Reinterpret the array or array_ref a of type T to have a different type U. sizeof(T) must be equal to sizeof(U).

array_ref<U, Shape> nda::reinterpret_const ( const const_array_ref< T, Shape > &  a)

Reinterpret the const_array_ref a of type T (aka array_ref<const T>) to have a different type U using const_cast.

NDARRAY_HOST_DEVICE array_ref<T, NewShape> nda::reinterpret_shape ( const array_ref< T, OldShape > &  a,
const NewShape &  new_shape,
index_t  offset = 0 
)

Reinterpret the shape of the array or array_ref a to be a new shape new_shape, with a base pointer offset offset.

array<T, NewShape, Alloc> nda::move_reinterpret_shape ( array< T, OldShape, Alloc > &&  from,
const NewShape &  new_shape,
index_t  offset = 0 
)

Move an array from to a new array, reinterpreting the shape of the array to new_shape, with a base pointer offset offset. This is only available for trivial T, because it does not guarantee that newly accessible elements are constructed, or newly inaccessible elements are destructed.

NDARRAY_HOST_DEVICE auto nda::transpose ( const array_ref< T, OldShape > &  a)

Reinterpret the shape of the array or array_ref a to be transposed or reordered using transpose<DimIndices...>(a.shape()) or reorder<DimIndices...>(a.shape()).

Variable Documentation

constexpr index_t dynamic = -9

This value indicates a compile-time constant parameter is an unknown value, and to use the corresponding runtime value instead. If a compile-time constant value is not dynamic, it is said to be static. A runtime value is said to be 'compatible with' a compile-time constant value if the values are equal, or the compile-time constant value is dynamic.

constexpr index_t unresolved = std::numeric_limits<index_t>::min()

This value indicates a runtime parameter is an unknown value, and may be replaced by a default value computed by the library.

const interval<0, -1> all

Placeholder object representing an interval that indicates keep the whole dimension when used in an indexing expression.