Skip to content

Commit

Permalink
product function for tensor-times-vector improved.
Browse files Browse the repository at this point in the history
  • Loading branch information
Cem Bassoy committed Jun 20, 2021
1 parent 2849086 commit a43ac47
Show file tree
Hide file tree
Showing 13 changed files with 459 additions and 242 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/windows.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ jobs:
# - {os: windows-2016, toolset: msvc, version: 14.16, cxxstd: 11}
# - {os: windows-2019, toolset: msvc, version: 14.28, cxxstd: 11}
# - {os: windows-2019, toolset: msvc, version: 14.28, cxxstd: 17}
- {os: windows-2019, toolset: msvc, version: 14.28, cxxstd: latest}
- {os: windows-2019, toolset: msvc, version: 14.29, cxxstd: latest}

steps:
- uses: actions/checkout@v2
Expand Down
31 changes: 31 additions & 0 deletions include/boost/numeric/ublas/tensor/extents/extents_functions.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,37 @@ template <class D>
// std::all_of(cbegin(e)+2,cend(e) , [](auto a){return a==1UL;});
}

/** @brief Returns true if extents equals (m,[1,1,...,1]) with m>=1 */
template <class D>
[[nodiscard]] inline constexpr bool is_row_vector(extents_base<D> const& e)
{
if (empty(e) || size(e) == 1 ) {return false;}

if(cbegin(e)[0] == 1ul &&
cbegin(e)[1] > 1ul &&
std::all_of(cbegin(e)+2ul,cend (e) , [](auto a){return a==1ul;})){
return true;
}

return false;
}


/** @brief Returns true if extents equals (m,[1,1,...,1]) with m>=1 */
template <class D>
[[nodiscard]] inline constexpr bool is_col_vector(extents_base<D> const& e)
{
if (empty(e) || size(e) == 1 ) {return false;}

if(cbegin(e)[0] > 1ul &&
cbegin(e)[1] == 1ul &&
std::all_of(cbegin(e)+2ul,cend (e) , [](auto a){return a==1ul;})){
return true;
}

return false;
}

/** @brief Returns true if (m,[n,1,...,1]) with m>=1 or n>=1 */
template <class D>
[[nodiscard]] inline constexpr bool is_matrix(extents_base<D> const& e)
Expand Down
258 changes: 225 additions & 33 deletions include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include <stdexcept>
#include <type_traits>

#include "../multiplication.hpp"
#include "../extents.hpp"
#include "../type_traits.hpp"
#include "../tags.hpp"
Expand Down Expand Up @@ -49,6 +50,190 @@ using enable_ttv_if_extent_has_dynamic_rank = std::enable_if_t<is_dynamic_rank_v
} // namespace detail


namespace detail {
template <class TC, class TA, class V, class EC>
inline auto scalar_scalar_prod(TA const &a, V const &b, EC const& nc_base)
{
assert(ublas::is_scalar(a.extents()));
using tensor = TC;
using value = typename tensor::value_type;
using shape = typename tensor::extents_type;
return tensor(shape(nc_base),value(a[0]*b(0)));
}

template <class TC, class TA, class V, class EC>
inline auto vector_vector_prod(TA const &a, V const &b, EC& nc_base, std::size_t m)
{
auto const& na = a.extents();

assert( ublas::is_vector(na));
assert(!ublas::is_scalar(na));
assert( ublas::size(na) > 1u);
assert(m > 0);

using tensor = TC;
using value = typename tensor::value_type;
using shape = typename tensor::extents_type;

auto const n1 = na[0];
auto const n2 = na[1];
auto const s = b.size();

// general
// [n1 n2 1 ... 1] xj [s 1] for any 1 <= j <= p with n1==1 or n2==1


// [n1 1 1 ... 1] x1 [n1 1] -> [1 1 1 ... 1]
// [1 n2 1 ... 1] x2 [n2 1] -> [1 1 1 ... 1]


assert(n1>1 || n2>1);

if( (n1>1u && m==1u) || (n2>1u && m==2u) ){
if(m==1u) assert(n2==1u && n1==s);
if(m==2u) assert(n1==1u && n2==s);
auto cc = std::inner_product( a.begin(), a.end(), b.begin(), value(0) );
return tensor(shape(nc_base),value(cc));
}

// [n1 1 1 ... 1] xj [1 1] -> [n1 1 1 ... 1] with j != 1
// [1 n2 1 ... 1] xj [1 1] -> [1 n2 1 ... 1] with j != 2

//if( (n1>1u && m!=1u) && (n2>0u && m!=2u) ){

if(n1>1u) assert(m!=1u);
if(n2>1u) assert(m!=2u);
assert(s==1u);

if(n1>1u) assert(n2==1u);
if(n2>1u) assert(n1==1u);

if(n1>1u) nc_base[0] = n1;
if(n2>1u) nc_base[1] = n2;

auto bb = b(0);
auto c = tensor(shape(nc_base));
std::transform(a.begin(),a.end(),c.begin(),[bb](auto aa){ return aa*bb; });
return c;
//}


}


/** Computes a matrix-vector product.
*
*
* @note assume stride 1 for specific dimensions and therefore requires refactoring for subtensor
*
*/
template <class TC, class TA, class V, class EC>
inline auto matrix_vector_prod(TA const &a, V const &b, EC& nc_base, std::size_t m)
{
auto const& na = a.extents();

assert( ublas::is_matrix(na));
assert(!ublas::is_vector(na));
assert(!ublas::is_scalar(na));
assert( ublas::size(na) > 1u);
assert(m > 0);

using tensor = TC;
using shape = typename tensor::extents_type;
using size_t = typename shape::value_type;

auto const n1 = na[0];
auto const n2 = na[1];
auto const s = b.size();

// general
// [n1 n2 1 ... 1] xj [s 1] for any 1 <= j <= p with either n1>1 and n2>1


// if [n1 n2 1 ... 1] xj [1 1] -> [n1 n2 1 ... 1] for j > 2
if(m > 2){
nc_base[0] = n1;
nc_base[1] = n2;
assert(s == 1);
auto c = tensor(shape(nc_base));
auto const bb = b(0);
std::transform(a.begin(),a.end(), c.begin(), [bb](auto aa){return aa*bb;});
return c;
}


// [n1 n2 1 ... 1] x1 [n1 1] -> [n2 1 ... 1] -> vector-times-matrix
// [n1 n2 1 ... 1] x2 [n2 1] -> [n1 1 ... 1] -> matrix-times-vector

nc_base[0] = m==1 ? n2 : n1;

auto c = tensor(shape(nc_base));
auto const& wa = a.strides();
auto const* bdata = &(b(0));

detail::recursive::mtv(m-1,n1,n2, c.data(), size_t(1), a.data(), wa[0], wa[1], bdata, size_t(1));

return c;
}



template <class TC, class TA, class V, class EC>
inline auto tensor_vector_prod(TA const &a, V const &b, EC& nc_base, std::size_t m)
{
auto const& na = a.extents();

assert( ublas::is_tensor(na));
assert( ublas::size(na) > 1u);
assert(m > 0);

using tensor = TC;
using shape = typename tensor::extents_type;
using layout = typename tensor::layout_type;

auto const pa = a.rank();
auto const nm = na[m-1];
auto const s = b.size();

auto nb = extents<2>{std::size_t(b.size()),std::size_t(1ul)};
auto wb = ublas::to_strides(nb,layout{} );

//TODO: Include an outer product when legacy vector becomes a new vector.

for (auto i = 0ul, j = 0ul; i < pa; ++i)
if (i != m - 1)
nc_base[j++] = na.at(i);

auto c = tensor(shape(nc_base));

// [n1 n2 ... nm ... np] xm [1 1] -> [n1 n2 ... nm-1 nm+1 ... np]

if(s == 0){
assert(nm == 1);
auto const bb = b(0);
std::transform(a.begin(),a.end(), c.begin(), [bb](auto aa){return aa*bb;});
return c;
}


// if [n1 n2 n3 ... np] xm [nm 1] -> [n1 n2 ... nm-1 nm+1 ... np]

auto const& nc = c.extents();
auto const& wc = c.strides();
auto const& wa = a.strides();
auto const* bp = &(b(0));

ttv(m, pa,
c.data(), nc.data(), wc.data(),
a.data(), na.data(), wa.data(),
bp, nb.data(), wb.data());

return c;
}

}//namespace detail


/** @brief Computes the m-mode tensor-times-vector product
*
* Implements C[i1,...,im-1,im+1,...,ip] = A[i1,i2,...,ip] * b[im]
Expand All @@ -63,45 +248,49 @@ using enable_ttv_if_extent_has_dynamic_rank = std::enable_if_t<is_dynamic_rank_v
*/
template <class TE, class A, class T = typename tensor_core< TE >::value,
detail::enable_ttv_if_extent_has_dynamic_rank<TE> = true >
inline decltype(auto) prod( tensor_core< TE > const &a, vector<T, A> const &b, const std::size_t m)
inline auto prod( tensor_core< TE > const &a, vector<T, A> const &b, const std::size_t m)
{

using tensor = tensor_core< TE >;
using shape = typename tensor::extents_type;
using value = typename tensor::value_type;
using layout = typename tensor::layout_type;
using resize_tag = typename tensor::resizable_tag;

auto const p = a.rank();
auto const pa = a.rank();

static_assert(std::is_same_v<resize_tag,storage_resizable_container_tag>);
static_assert(is_dynamic_v<shape>);

if (m == 0ul) throw std::length_error("error in boost::numeric::ublas::prod(ttv): contraction mode must be greater than zero.");
if (p < m) throw std::length_error("error in boost::numeric::ublas::prod(ttv): rank of tensor must be greater than or equal to the contraction mode.");
if (pa < m) throw std::length_error("error in boost::numeric::ublas::prod(ttv): rank of tensor must be greater than or equal to the contraction mode.");
if (a.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): first argument tensor should not be empty.");
if (b.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): second argument vector should not be empty.");

auto const& na = a.extents();
auto nb = extents<2>{std::size_t(b.size()),std::size_t(1ul)};
auto wb = ublas::to_strides(nb,layout{} );

if(b.size() != na[m-1]) throw std::length_error("error in boost::numeric::ublas::prod(ttv): dimension mismatch of tensor and vector.");

auto const sz = std::max( std::size_t(ublas::size(na)-1u), std::size_t(2) );
auto nc_base = typename shape::base_type(sz,1);

for (auto i = 0ul, j = 0ul; i < p; ++i)
if (i != m - 1)
nc_base[j++] = na.at(i);
// output scalar tensor
if(ublas::is_scalar(na)){
return detail::scalar_scalar_prod<tensor>(a,b,nc_base);
}

// output scalar tensor or vector tensor
if (ublas::is_vector(na)){
return detail::vector_vector_prod<tensor>(a,b,nc_base,m);
}

// output scalar tensor or vector tensor
if (ublas::is_matrix(na)){
return detail::matrix_vector_prod<tensor>(a,b,nc_base,m);
}

assert(ublas::is_tensor(na));
return detail::tensor_vector_prod<tensor>(a,b,nc_base,m);

auto nc = shape(nc_base);
auto c = tensor( nc, value{} );

auto const* bb = &(b(0));
ttv(m, p,
c.data(), c.extents().data(), c.strides().data(),
a.data(), a.extents().data(), a.strides().data(),
bb, nb.data(), wb.data());
return c;
}


Expand Down Expand Up @@ -143,7 +332,6 @@ inline auto prod( tensor_core< TE > const &a, vector<T, A> const &b, const std::
constexpr auto p = std::tuple_size_v<shape>;
constexpr auto sz = std::max(std::size_t(std::tuple_size_v<shape>-1U),std::size_t(2));

using shape_b = ublas::extents<2>;
using shape_c = ublas::extents<sz>;
using tensor_c = tensor_core<tensor_engine<shape_c,layout,container>>;

Expand All @@ -158,21 +346,25 @@ inline auto prod( tensor_core< TE > const &a, vector<T, A> const &b, const std::

auto nc_base = typename shape_c::base_type{};
std::fill(nc_base.begin(), nc_base.end(),std::size_t(1));
for (auto i = 0ul, j = 0ul; i < p; ++i)
if (i != m - 1)
nc_base[j++] = na.at(i);

auto nc = shape_c(std::move(nc_base));
auto nb = shape_b{b.size(),1UL};
auto wb = ublas::to_strides(nb,layout{});
auto c = tensor_c( std::move(nc) );
auto const* bb = &(b(0));

ttv(m, p,
c.data(), c.extents().data(), c.strides().data(),
a.data(), a.extents().data(), a.strides().data(),
bb, nb.data(), wb.data() );
return c;
// output scalar tensor
if(ublas::is_scalar(na)){
return detail::scalar_scalar_prod<tensor_c>(a,b,nc_base);
}

// output scalar tensor or vector tensor
if (ublas::is_vector(na)){
return detail::vector_vector_prod<tensor_c>(a,b,nc_base,m);
}

// output scalar tensor or vector tensor
if (ublas::is_matrix(na)){
return detail::matrix_vector_prod<tensor_c>(a,b,nc_base,m);
}

assert(ublas::is_tensor(na));
return detail::tensor_vector_prod<tensor_c>(a,b,nc_base,m);
}


Expand Down Expand Up @@ -201,7 +393,7 @@ inline auto prod( tensor_core< TE > const &a, vector<T, A> const &b)
using shape = typename tensor::extents;
using layout = typename tensor::layout;
using shape_b = extents<2>;
using shape_c = remove_element_t<m,shape>;
using shape_c = remove_element_t<m,shape>; // this is wrong
using container_c = rebind_storage_size_t<shape_c,container>;
using tensor_c = tensor_core<tensor_engine<shape_c,layout,container_c>>;

Expand Down
Loading

0 comments on commit a43ac47

Please sign in to comment.