Skip to content

Commit

Permalink
Fix Forte [Ambit Part] (#53)
Browse files Browse the repository at this point in the history
* Fixes for Forte

* Let there be class functions
  • Loading branch information
JonathonMisiewicz authored Mar 26, 2022
1 parent dd75319 commit f967974
Show file tree
Hide file tree
Showing 5 changed files with 190 additions and 56 deletions.
66 changes: 37 additions & 29 deletions include/ambit/blocked_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -329,6 +329,43 @@ class BlockedTensor
static std::vector<std::vector<size_t>>
label_to_block_keys(const std::vector<std::string> &indices);

// => Simple Arithmetic <= //

void axpy(double alpha, const BlockedTensor& other);
double vector_dot(const BlockedTensor& other);

// Clone the BlockedTensor
BlockedTensor clone();

/**
* This function saves the blocked tensor to a binary file on disk
*
* @param filename the name of the binary file
* @param overwrite overwrite an existing file?
*
*/
void save(const std::string &filename, bool overwrite = true);

/**
* This function loads a blocked tensor from a binary file on disk and copies
* the data to an existing tensor. If the tensor passed in is empty, it will be
* resized
*
* @param t a tensor
* @param filename the name of the binary file
*
*/
void load(const std::string &filename);

/**
* This function loads a blocked tensor from a binary file and returns it
*
* @param t a tensor
* @return a blocked tensor
*
*/
static BlockedTensor load_and_build(const std::string &filename);

private:
std::string name_;
std::size_t rank_;
Expand Down Expand Up @@ -390,35 +427,6 @@ class BlockedTensor
LabeledBlockedTensor operator[](const std::string &indices) const;
};

/**
* This function saves a blocked tensor to a binary file on disk
*
* @param t a tensor
* @param filename the name of the binary file
* @param overwrite overwrite an existing file?
*
*/
void save(BlockedTensor bt, const std::string &filename, bool overwrite = true);

/**
* This function loads a blocked tensor from a binary file on disk and copies
* the data to an existing tensor. If the tensor passed in is empty, it will be
* resized
*
* @param t a tensor
* @param filename the name of the binary file
*
*/
void load(BlockedTensor &bt, const std::string &filename);

/**
* This function loads a blocked tensor from a binary file and returns it
*
* @param t a tensor
* @return a blocked tensor
*
*/
BlockedTensor load_blocked_tensor(const std::string &filename);

class LabeledBlockedTensor
{
Expand Down
60 changes: 58 additions & 2 deletions lib/test_blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -1123,8 +1123,6 @@ def test_dot_product3(self):
ambit.BlockedTensor.reset_mo_space()
ambit.BlockedTensor.add_mo_space("o", "i,j,k,l", [0,1,2,3,4], ambit.SpinType.AlphaSpin)
ambit.BlockedTensor.add_mo_space("v", "a,b,c,d", [5,6,7,8,9,10,11], ambit.SpinType.AlphaSpin)

A = ambit.BlockedTensor.build(ambit.TensorType.CoreTensor, "A", ["oo", "ov", "vo", "vv"])
B = ambit.BlockedTensor.build(ambit.TensorType.CoreTensor, "B", ["oo", "ov", "vo", "vv"])

no = 5
Expand Down Expand Up @@ -1185,5 +1183,63 @@ def test_contraction_exception1(self):

C['ij'] = A['ia'] * B['aj']

def test_vector_dot(self):
ambit.BlockedTensor.reset_mo_space()
ambit.BlockedTensor.add_mo_space("o", "i,j,k,l", [0,1,2,3,4], ambit.SpinType.AlphaSpin)
ambit.BlockedTensor.add_mo_space("v", "a,b,c,d", [5,6,7,8], ambit.SpinType.AlphaSpin)

A = ambit.BlockedTensor.build(ambit.TensorType.CoreTensor, "A", ["oo", "ov", "vo", "vv"])
B = ambit.BlockedTensor.build(ambit.TensorType.CoreTensor, "B", ["oo", "ov", "vo", "vv"])
no = 5

[Aoo_t, a2] = self.build_and_fill("Aoo", [no, no])
[Boo_t, b2] = self.build_and_fill("Boo", [no, no])
A.block('oo')['pq'] = Aoo_t['pq']
B.block('oo')['pq'] = Boo_t['pq']

dot1 = A.vector_dot(B)
dot2 = np.einsum("ij,ij->", np.array(a2), np.array(b2))

self.assertAlmostEqual(dot1, dot2, places=12)

def test_axpy(self):
ambit.BlockedTensor.reset_mo_space()
ambit.BlockedTensor.add_mo_space("o", "i,j,k,l", [0,1,2,3,4], ambit.SpinType.AlphaSpin)
ambit.BlockedTensor.add_mo_space("v", "a,b,c,d", [5,6,7,8], ambit.SpinType.AlphaSpin)

A = ambit.BlockedTensor.build(ambit.TensorType.CoreTensor, "A", ["oo", "ov", "vo", "vv"])
B = ambit.BlockedTensor.build(ambit.TensorType.CoreTensor, "B", ["oo", "ov", "vo", "vv"])
no = 5

[Aoo_t, a2] = self.build_and_fill("Aoo", [no, no])
[Boo_t, b2] = self.build_and_fill("Boo", [no, no])
A.block('oo')['pq'] = Aoo_t['pq']
B.block('oo')['pq'] = Boo_t['pq']

A.axpy(7, B)
foo = np.array(a2)
bar = np.array(b2)
foo += 7 * bar

residual = foo - np.array(A.block('oo'))
self.assertAlmostEqual(0.0, np.linalg.norm(residual), places=12)

def test_copy(self):
ambit.BlockedTensor.reset_mo_space()
ambit.BlockedTensor.add_mo_space("o", "i,j,k,l", [0,1,2,3,4], ambit.SpinType.AlphaSpin)
ambit.BlockedTensor.add_mo_space("v", "a,b,c,d", [5,6,7,8,9], ambit.SpinType.AlphaSpin)

A = ambit.BlockedTensor.build(ambit.TensorType.CoreTensor, "A", ["oo", "ov", "vo", "vv"])
no = 5

[Aoo_t, a2] = self.build_and_fill("Aoo", [no, no])
A.block('oo')['pq'] = Aoo_t['pq']

B = A.clone()
B.scale(2)

residual = 2 * np.array(A.block('oo')) - np.array(B.block('oo'))


if __name__ == '__main__':
unittest.main()
102 changes: 83 additions & 19 deletions src/blocked_tensor/blocked_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
#include <set>
#include <stdexcept>
#include <string>
#include <unordered_set>

#include <ambit/blocked_tensor.h>
#include <tensor/indices.h>
Expand Down Expand Up @@ -247,6 +248,15 @@ void BlockedTensor::reset_mo_spaces()

BlockedTensor::BlockedTensor() : rank_(0) {}

BlockedTensor BlockedTensor::clone() {
auto bt = BlockedTensor();
bt.set_name(name_);
for (const auto& [label, tensor]: blocks_) {
bt.set_block(label, tensor.clone());
}
return bt;
}

std::vector<std::string> BlockedTensor::block_labels() const
{
std::vector<std::string> labels;
Expand Down Expand Up @@ -723,6 +733,60 @@ void BlockedTensor::citerate(
}
}

void BlockedTensor::axpy(double alpha, const BlockedTensor& other)
{
// => Validate the rank and get a label for each block. <=
std::string all_indices = "pqrstuvwxyzabcdefghijklmno";
auto rank1 = rank();
auto rank2 = other.rank();
if (rank1 != rank2) {
throw std::invalid_argument("Can only axpy two blocked tensors with the same rank.");
}
auto label = all_indices.substr(0, rank1);

// => Validate blocks. <=
auto self_label_vec = block_labels();
std::unordered_set<string> self_labels(self_label_vec.begin(), self_label_vec.end());
auto other_label_vec = other.block_labels();
std::unordered_set<string> other_labels(other_label_vec.begin(), other_label_vec.end());
if (self_labels != other_labels) {
throw std::invalid_argument("Can only axpy two blocked tensors with identical labels.");
}

// => axpy each block. <=
for (const auto& [block_label, tensor]: blocks_) {
tensor(label) += alpha * other.block(block_label)(label);
}
}

double BlockedTensor::vector_dot(const BlockedTensor& other)
{
// => Validate the rank and get a label for each block. <=
std::string all_indices = "pqrstuvwxyzabcdefghijklmno";
auto rank1 = rank();
auto rank2 = other.rank();
if (rank1 != rank2) {
throw std::invalid_argument("Can only axpy two blocked tensors with the same rank.");
}
auto label = all_indices.substr(0, rank1);

// => Validate blocks. <=
auto self_label_vec = block_labels();
std::unordered_set<string> self_labels(self_label_vec.begin(), self_label_vec.end());
auto other_label_vec = other.block_labels();
std::unordered_set<string> other_labels(other_label_vec.begin(), other_label_vec.end());
if (self_labels != other_labels) {
throw std::invalid_argument("Can only axpy two blocked tensors with identical labels.");
}

// => axpy each block. <=
double result = 0.0;
for (const auto& [block_label, tensor]: blocks_) {
result += static_cast<double>(tensor(label) * other.block(block_label)(label));
}
return result;
}

void BlockedTensor::print(FILE *fh, bool level, std::string const &format,
int maxcols) const
{
Expand All @@ -735,7 +799,7 @@ void BlockedTensor::print(FILE *fh, bool level, std::string const &format,
}
}

void save(BlockedTensor bt, const std::string &filename, bool overwrite)
void BlockedTensor::save(const std::string &filename, bool overwrite)
{
// check if file exists or not
struct stat buf;
Expand All @@ -759,35 +823,35 @@ void save(BlockedTensor bt, const std::string &filename, bool overwrite)
// create the file
std::ofstream out(filename.c_str(), std::ios_base::binary);

auto block_labels = bt.block_labels();
auto tensor_block_labels = block_labels();

// write the name
auto name = bt.name();
auto name = name_;
size_t size = name.size();
out.write(reinterpret_cast<char *>(&size), sizeof(size_t));
out.write(&name[0], size);

// write the number of blocks
size_t num_blocks = block_labels.size();
size_t num_blocks = tensor_block_labels.size();
out.write(reinterpret_cast<char *>(&num_blocks), sizeof(size_t));

// write the block labels
for (const std::string &block : block_labels)
for (const std::string &tensor_block : tensor_block_labels)
{
size_t block_label_size = block.size();
size_t block_label_size = tensor_block.size();
out.write(reinterpret_cast<char *>(&block_label_size), sizeof(size_t));
out.write(&block[0], block_label_size);
out.write(&tensor_block[0], block_label_size);
}

// write the blocks
for (const std::string &block : block_labels)
for (const std::string &tensor_block : tensor_block_labels)
{
auto t = bt.block(block);
auto t = block(tensor_block);
write_tensor_to_file(t, out);
}
}

void load(BlockedTensor &bt, const std::string &filename)
void BlockedTensor::load(const std::string &filename)
{
// check if file exists or not
std::ifstream in(filename.c_str(), std::ios_base::binary);
Expand All @@ -809,33 +873,33 @@ void load(BlockedTensor &bt, const std::string &filename)
in.read(reinterpret_cast<char *>(&num_blocks), sizeof(size_t));

// read the block labels
std::vector<std::string> block_labels;
std::vector<std::string> tensor_block_labels;
for (size_t b = 0; b < num_blocks; b++)
{
std::string block;
std::string tensor_block;
size_t block_label_size = 0;
in.read(reinterpret_cast<char *>(&block_label_size), sizeof(size_t));
block.resize(block_label_size);
in.read(&block[0], block_label_size);
block_labels.push_back(block);
tensor_block.resize(block_label_size);
in.read(&tensor_block[0], block_label_size);
tensor_block_labels.push_back(tensor_block);
}

// read tensor from file
for (const std::string &block : block_labels)
for (const auto &tensor_block : tensor_block_labels)
{
Tensor t;
read_tensor_from_file(t, in);
bt.set_block(block, t);
set_block(tensor_block, t);
}

// 4. close the file
in.close();
}

BlockedTensor load_blocked_tensor(const std::string &filename)
BlockedTensor BlockedTensor::load_and_build(const std::string &filename)
{
BlockedTensor bt;
load(bt, filename);
bt.load(filename);
return bt;
}

Expand Down
6 changes: 6 additions & 0 deletions src/python/bindings.cc
Original file line number Diff line number Diff line change
Expand Up @@ -287,16 +287,22 @@ PYBIND11_MODULE(pyambit, m)
.def_static("add_mo_space", py::overload_cast<const std::string &, const std::string &, std::vector<size_t>, SpinType>(&BlockedTensor::add_mo_space))
.def_static("add_composite_mo_space", &BlockedTensor::add_composite_mo_space)
.def_static("build", &BlockedTensor::build)
.def_static("load_and_build", &BlockedTensor::load_and_build)
.def_static("reset_mo_space", &BlockedTensor::reset_mo_spaces)
.def("__getitem__", [](const BlockedTensor& b, const std::string& s){ return b[s]; })
.def("__setitem__", [](BlockedTensor& t, const std::string& key, const LabeledBlockedTensor& s) { t[key] = s; })
.def("__setitem__", [](BlockedTensor& t, const std::string& key, const LabeledBlockedTensorAddition& s) { t[key] = s; })
.def("__setitem__", [](BlockedTensor& t, const std::string& key, const LabeledBlockedTensorDistributive& s) { t[key] = s; })
.def("__setitem__", [](BlockedTensor& t, const std::string& key, const LabeledBlockedTensorProduct& s) { t[key] = s; })
.def("axpy", &BlockedTensor::axpy)
.def("block", py::overload_cast<const std::string &>(&BlockedTensor::block))
.def("clone", &BlockedTensor::clone)
.def_property("name", &BlockedTensor::name, &BlockedTensor::set_name)
.def("norm", &BlockedTensor::norm)
.def("save", &BlockedTensor::save, "filename"_a, "overwrite"_a=true)
.def("scale", &BlockedTensor::scale)
.def("set", &BlockedTensor::set)
.def("vector_dot", &BlockedTensor::vector_dot)
.def("zero", &BlockedTensor::zero);

py::class_<LabeledBlockedTensor>(m, "LabeledBlockedTensor")
Expand Down
12 changes: 6 additions & 6 deletions test/test_io.cc
Original file line number Diff line number Diff line change
Expand Up @@ -156,12 +156,12 @@ bool test_tensor_io_blocked_1()
testTensor.set_block("OOV", t_oov);

// save the tensor
save(testTensor, "block.ten");
testTensor.save("block.ten");

// load the data from disk
BlockedTensor testTensor2 =
BlockedTensor::build(CoreTensor, "T", {"OOO", "OOV"});
load(testTensor2, "block.ten");
testTensor2.load("block.ten");

// compare the data to the original tensors
testTensor2("ijk") += -testTensor("ijk");
Expand Down Expand Up @@ -190,11 +190,11 @@ bool test_tensor_io_blocked_2()
testTensor.set_block("OOV", t_oov);

// save the tensor
save(testTensor, "block.ten");
testTensor.save("block.ten");

// load the data from disk
BlockedTensor testTensor2;
load(testTensor2, "block.ten");
testTensor2.load("block.ten");

// compare the data to the original tensors
testTensor2("ijk") += -testTensor("ijk");
Expand Down Expand Up @@ -223,10 +223,10 @@ bool test_tensor_io_blocked_3()
testTensor.set_block("OOV", t_oov);

// save the tensor
save(testTensor, "block.ten");
testTensor.save("block.ten");

// load the data from disk
BlockedTensor testTensor2 = load_blocked_tensor("block.ten");
BlockedTensor testTensor2 = BlockedTensor::load_and_build("block.ten");

// compare the data to the original tensors
testTensor2("ijk") += -testTensor("ijk");
Expand Down

0 comments on commit f967974

Please sign in to comment.