Skip to content

Commit

Permalink
Adds CompositeTensor
Browse files Browse the repository at this point in the history
  • Loading branch information
jturney committed Feb 15, 2016
1 parent 547786f commit 896a5a6
Show file tree
Hide file tree
Showing 3 changed files with 77 additions and 11 deletions.
46 changes: 46 additions & 0 deletions include/ambit/composite_tensor.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
//
// Created by Justin Turney on 2/4/16.
//

#ifndef AMBIT_COMPOSITE_TENSOR_H
#define AMBIT_COMPOSITE_TENSOR_H

#include <ambit/common_types.h>
#include <ambit/tensor.h>
#include <ambit/blocked_tensor.h>

namespace ambit
{

template <typename TensorType>
class CompositeTensor
{
string name_;
vector<TensorType> tensors_;

public:

CompositeTensor(const string& name, size_t ntensors = 0)
: name_(name), tensors_(ntensors+1)
{}

TensorType& operator()(size_t elem)
{
return tensors_[elem];
}

const TensorType& operator()(size_t elem) const
{
return tensors_[elem];
}

CompositeTensor& add(TensorType& newTensor)
{
tensors_.push_back(newTensor);
return *this;
}
};

}

#endif //AMBIT_COMPOSITE_TENSOR_H
8 changes: 5 additions & 3 deletions src/helpers/psi4/convert.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,11 @@ void convert(const psi::Matrix &matrix, ambit::Tensor *target)

Tensor local_tensor = Tensor::build(CoreTensor, "Local Data", {row, col});

// copy data from SharedMatrix to local_tensor
std::copy(matrix.pointer()[0], matrix.pointer()[0] + (row * col),
local_tensor.data().begin());
if (row && col) {
// copy data from SharedMatrix to local_tensor
std::copy(matrix.pointer()[0], matrix.pointer()[0] + (row * col),
local_tensor.data().begin());
}

// Splice data into the target tensor
(*target)() = local_tensor();
Expand Down
34 changes: 26 additions & 8 deletions src/tensor/tensorimpl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,16 @@ TensorImpl::TensorImpl(TensorType type, const string &name,
numel_ = std::accumulate(dims_.begin(), dims_.end(), static_cast<size_t>(1),
std::multiplies<size_t>());
}

void TensorImpl::slice(ConstTensorImplPtr A, const IndexRange &Cinds,
const IndexRange &Ainds, double alpha, double beta)
{
ambit::slice(this, A, Cinds, Ainds, alpha, beta);
}
void TensorImpl::zero() { scale(0.0); }

void TensorImpl::zero()
{ scale(0.0); }

void TensorImpl::copy(ConstTensorImplPtr other)
{
TensorImpl::dimensionCheck(this, other);
Expand All @@ -35,6 +39,7 @@ void TensorImpl::copy(ConstTensorImplPtr other)
}
slice(other, ranges, ranges, 1.0, 0.0);
}

TensorImplPtr TensorImpl::clone(TensorType t) const
{
if (t == CurrentTensor)
Expand All @@ -51,10 +56,10 @@ TensorImplPtr TensorImpl::clone(TensorType t) const
tensor = new DiskTensorImpl(name(), dims());
}
#if defined(HAVE_ELEMENTAL)
else if (t == DistributedTensor)
{
tensor = new cyclops::CyclopsTensorImpl(name(), dims());
}
else if (t == DistributedTensor)
{
tensor = new cyclops::CyclopsTensorImpl(name(), dims());
}
#endif
else
{
Expand All @@ -63,6 +68,7 @@ TensorImplPtr TensorImpl::clone(TensorType t) const
tensor->copy(this);
return tensor;
}

void TensorImpl::print(FILE *fh, bool level, const string & /*format*/,
int maxcols) const
{
Expand Down Expand Up @@ -150,8 +156,8 @@ void TensorImpl::print(FILE *fh, bool level, const string & /*format*/,
j += static_cast<size_t>(maxcols))
{
size_t ncols = (j + static_cast<size_t>(maxcols) >= cols
? cols - j
: static_cast<size_t>(maxcols));
? cols - j
: static_cast<size_t>(maxcols));

// Column Header
fprintf(fh, " %5s", "");
Expand Down Expand Up @@ -180,33 +186,40 @@ void TensorImpl::print(FILE *fh, bool level, const string & /*format*/,
}
}
}

bool TensorImpl::typeCheck(TensorType type, ConstTensorImplPtr A,
bool throwIfDiff)
{
if (A->type() != type)
{
if (throwIfDiff)
{
throw std::runtime_error("TensorImpl::typeCheck: type mismatch");
}
return true;
}
else
{
return false;
}
}

bool TensorImpl::rankCheck(size_t rank, ConstTensorImplPtr A, bool throwIfDiff)
{
if (A->rank() != rank)
{
if (throwIfDiff)
{
throw std::runtime_error("TensorImpl::rankCheck: Rank mismatch");
}
return true;
}
else
{
return false;
}
}

bool TensorImpl::squareCheck(ConstTensorImplPtr A, bool throwIfDiff)
{
if (TensorImpl::rankCheck(2, A, throwIfDiff))
Expand All @@ -217,11 +230,14 @@ bool TensorImpl::squareCheck(ConstTensorImplPtr A, bool throwIfDiff)
{
bool diff = (A->dims()[0] != A->dims()[1]);
if (diff && throwIfDiff)
{
throw std::runtime_error(
"TensorImpl::squareCheck: Dimension mismatch");
}
return diff;
}
}

bool TensorImpl::dimensionCheck(ConstTensorImplPtr A, ConstTensorImplPtr B,
bool throwIfDiff)
{
Expand All @@ -243,8 +259,10 @@ bool TensorImpl::dimensionCheck(ConstTensorImplPtr A, ConstTensorImplPtr B,
}
}
if (diff && throwIfDiff)
{
throw std::runtime_error(
"TensorImpl::dimensionCheck: Dimension mismatch"); // Minor TODO
"TensorImpl::dimensionCheck: Dimension mismatch");
} // Minor TODO
return diff;
}
}
Expand Down

2 comments on commit 896a5a6

@fevangelista
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@jturney: this looks interesting! So what can you do with composite tensors?

@jturney
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This initial version is nothing more than a "wrapper" to a vector which allows me to do this:

    BlockedTensor Dbar = ...;
    CompositeTensor<BlockedTensor> D("densities", 2);

    ...

    Dbar("q,s,p,r") = 2.0 * D(1)("q,p") * D(1)("s,r");
    Dbar("q,s,p,r") -= D(1)("s,p") * D(1)("q,r");
    Dbar("q,s,p,r") -= D(2)("q,s,p,r");

I'm considering making it able to select the appropriate tensor:

    BlockedTensor Dbar = ...;
    CompositeTensor<BlockedTensor> D("densities", 2);

    ...

    Dbar("q,s,p,r") = 2.0 * D("q,p") * D(1)("s,r");
    Dbar("q,s,p,r") -= D("s,p") * D(1)("q,r");
    Dbar("q,s,p,r") -= D("q,s,p,r");

Thoughts?

Please sign in to comment.