From f9679743aae0bc948047cb70513afbe621fdc476 Mon Sep 17 00:00:00 2001 From: Jonathon Misiewicz Date: Sat, 26 Mar 2022 18:14:15 -0400 Subject: [PATCH] Fix Forte [Ambit Part] (#53) * Fixes for Forte * Let there be class functions --- include/ambit/blocked_tensor.h | 66 +++++++++-------- lib/test_blocks.py | 60 +++++++++++++++- src/blocked_tensor/blocked_tensor.cc | 102 ++++++++++++++++++++++----- src/python/bindings.cc | 6 ++ test/test_io.cc | 12 ++-- 5 files changed, 190 insertions(+), 56 deletions(-) diff --git a/include/ambit/blocked_tensor.h b/include/ambit/blocked_tensor.h index 3fe69b4..13dd754 100644 --- a/include/ambit/blocked_tensor.h +++ b/include/ambit/blocked_tensor.h @@ -329,6 +329,43 @@ class BlockedTensor static std::vector> label_to_block_keys(const std::vector &indices); + // => Simple Arithmetic <= // + + void axpy(double alpha, const BlockedTensor& other); + double vector_dot(const BlockedTensor& other); + + // Clone the BlockedTensor + BlockedTensor clone(); + + /** + * This function saves the blocked tensor to a binary file on disk + * + * @param filename the name of the binary file + * @param overwrite overwrite an existing file? + * + */ + void save(const std::string &filename, bool overwrite = true); + + /** + * This function loads a blocked tensor from a binary file on disk and copies + * the data to an existing tensor. If the tensor passed in is empty, it will be + * resized + * + * @param t a tensor + * @param filename the name of the binary file + * + */ + void load(const std::string &filename); + + /** + * This function loads a blocked tensor from a binary file and returns it + * + * @param t a tensor + * @return a blocked tensor + * + */ + static BlockedTensor load_and_build(const std::string &filename); + private: std::string name_; std::size_t rank_; @@ -390,35 +427,6 @@ class BlockedTensor LabeledBlockedTensor operator[](const std::string &indices) const; }; -/** - * This function saves a blocked tensor to a binary file on disk - * - * @param t a tensor - * @param filename the name of the binary file - * @param overwrite overwrite an existing file? - * - */ -void save(BlockedTensor bt, const std::string &filename, bool overwrite = true); - -/** - * This function loads a blocked tensor from a binary file on disk and copies - * the data to an existing tensor. If the tensor passed in is empty, it will be - * resized - * - * @param t a tensor - * @param filename the name of the binary file - * - */ -void load(BlockedTensor &bt, const std::string &filename); - -/** - * This function loads a blocked tensor from a binary file and returns it - * - * @param t a tensor - * @return a blocked tensor - * - */ -BlockedTensor load_blocked_tensor(const std::string &filename); class LabeledBlockedTensor { diff --git a/lib/test_blocks.py b/lib/test_blocks.py index fa5763b..f94dd66 100644 --- a/lib/test_blocks.py +++ b/lib/test_blocks.py @@ -1123,8 +1123,6 @@ def test_dot_product3(self): ambit.BlockedTensor.reset_mo_space() ambit.BlockedTensor.add_mo_space("o", "i,j,k,l", [0,1,2,3,4], ambit.SpinType.AlphaSpin) ambit.BlockedTensor.add_mo_space("v", "a,b,c,d", [5,6,7,8,9,10,11], ambit.SpinType.AlphaSpin) - - A = ambit.BlockedTensor.build(ambit.TensorType.CoreTensor, "A", ["oo", "ov", "vo", "vv"]) B = ambit.BlockedTensor.build(ambit.TensorType.CoreTensor, "B", ["oo", "ov", "vo", "vv"]) no = 5 @@ -1185,5 +1183,63 @@ def test_contraction_exception1(self): C['ij'] = A['ia'] * B['aj'] + def test_vector_dot(self): + ambit.BlockedTensor.reset_mo_space() + ambit.BlockedTensor.add_mo_space("o", "i,j,k,l", [0,1,2,3,4], ambit.SpinType.AlphaSpin) + ambit.BlockedTensor.add_mo_space("v", "a,b,c,d", [5,6,7,8], ambit.SpinType.AlphaSpin) + + A = ambit.BlockedTensor.build(ambit.TensorType.CoreTensor, "A", ["oo", "ov", "vo", "vv"]) + B = ambit.BlockedTensor.build(ambit.TensorType.CoreTensor, "B", ["oo", "ov", "vo", "vv"]) + no = 5 + + [Aoo_t, a2] = self.build_and_fill("Aoo", [no, no]) + [Boo_t, b2] = self.build_and_fill("Boo", [no, no]) + A.block('oo')['pq'] = Aoo_t['pq'] + B.block('oo')['pq'] = Boo_t['pq'] + + dot1 = A.vector_dot(B) + dot2 = np.einsum("ij,ij->", np.array(a2), np.array(b2)) + + self.assertAlmostEqual(dot1, dot2, places=12) + + def test_axpy(self): + ambit.BlockedTensor.reset_mo_space() + ambit.BlockedTensor.add_mo_space("o", "i,j,k,l", [0,1,2,3,4], ambit.SpinType.AlphaSpin) + ambit.BlockedTensor.add_mo_space("v", "a,b,c,d", [5,6,7,8], ambit.SpinType.AlphaSpin) + + A = ambit.BlockedTensor.build(ambit.TensorType.CoreTensor, "A", ["oo", "ov", "vo", "vv"]) + B = ambit.BlockedTensor.build(ambit.TensorType.CoreTensor, "B", ["oo", "ov", "vo", "vv"]) + no = 5 + + [Aoo_t, a2] = self.build_and_fill("Aoo", [no, no]) + [Boo_t, b2] = self.build_and_fill("Boo", [no, no]) + A.block('oo')['pq'] = Aoo_t['pq'] + B.block('oo')['pq'] = Boo_t['pq'] + + A.axpy(7, B) + foo = np.array(a2) + bar = np.array(b2) + foo += 7 * bar + + residual = foo - np.array(A.block('oo')) + self.assertAlmostEqual(0.0, np.linalg.norm(residual), places=12) + + def test_copy(self): + ambit.BlockedTensor.reset_mo_space() + ambit.BlockedTensor.add_mo_space("o", "i,j,k,l", [0,1,2,3,4], ambit.SpinType.AlphaSpin) + ambit.BlockedTensor.add_mo_space("v", "a,b,c,d", [5,6,7,8,9], ambit.SpinType.AlphaSpin) + + A = ambit.BlockedTensor.build(ambit.TensorType.CoreTensor, "A", ["oo", "ov", "vo", "vv"]) + no = 5 + + [Aoo_t, a2] = self.build_and_fill("Aoo", [no, no]) + A.block('oo')['pq'] = Aoo_t['pq'] + + B = A.clone() + B.scale(2) + + residual = 2 * np.array(A.block('oo')) - np.array(B.block('oo')) + + if __name__ == '__main__': unittest.main() diff --git a/src/blocked_tensor/blocked_tensor.cc b/src/blocked_tensor/blocked_tensor.cc index 9fceb08..5e92c30 100644 --- a/src/blocked_tensor/blocked_tensor.cc +++ b/src/blocked_tensor/blocked_tensor.cc @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -247,6 +248,15 @@ void BlockedTensor::reset_mo_spaces() BlockedTensor::BlockedTensor() : rank_(0) {} +BlockedTensor BlockedTensor::clone() { + auto bt = BlockedTensor(); + bt.set_name(name_); + for (const auto& [label, tensor]: blocks_) { + bt.set_block(label, tensor.clone()); + } + return bt; +} + std::vector BlockedTensor::block_labels() const { std::vector labels; @@ -723,6 +733,60 @@ void BlockedTensor::citerate( } } +void BlockedTensor::axpy(double alpha, const BlockedTensor& other) +{ + // => Validate the rank and get a label for each block. <= + std::string all_indices = "pqrstuvwxyzabcdefghijklmno"; + auto rank1 = rank(); + auto rank2 = other.rank(); + if (rank1 != rank2) { + throw std::invalid_argument("Can only axpy two blocked tensors with the same rank."); + } + auto label = all_indices.substr(0, rank1); + + // => Validate blocks. <= + auto self_label_vec = block_labels(); + std::unordered_set self_labels(self_label_vec.begin(), self_label_vec.end()); + auto other_label_vec = other.block_labels(); + std::unordered_set other_labels(other_label_vec.begin(), other_label_vec.end()); + if (self_labels != other_labels) { + throw std::invalid_argument("Can only axpy two blocked tensors with identical labels."); + } + + // => axpy each block. <= + for (const auto& [block_label, tensor]: blocks_) { + tensor(label) += alpha * other.block(block_label)(label); + } +} + +double BlockedTensor::vector_dot(const BlockedTensor& other) +{ + // => Validate the rank and get a label for each block. <= + std::string all_indices = "pqrstuvwxyzabcdefghijklmno"; + auto rank1 = rank(); + auto rank2 = other.rank(); + if (rank1 != rank2) { + throw std::invalid_argument("Can only axpy two blocked tensors with the same rank."); + } + auto label = all_indices.substr(0, rank1); + + // => Validate blocks. <= + auto self_label_vec = block_labels(); + std::unordered_set self_labels(self_label_vec.begin(), self_label_vec.end()); + auto other_label_vec = other.block_labels(); + std::unordered_set other_labels(other_label_vec.begin(), other_label_vec.end()); + if (self_labels != other_labels) { + throw std::invalid_argument("Can only axpy two blocked tensors with identical labels."); + } + + // => axpy each block. <= + double result = 0.0; + for (const auto& [block_label, tensor]: blocks_) { + result += static_cast(tensor(label) * other.block(block_label)(label)); + } + return result; +} + void BlockedTensor::print(FILE *fh, bool level, std::string const &format, int maxcols) const { @@ -735,7 +799,7 @@ void BlockedTensor::print(FILE *fh, bool level, std::string const &format, } } -void save(BlockedTensor bt, const std::string &filename, bool overwrite) +void BlockedTensor::save(const std::string &filename, bool overwrite) { // check if file exists or not struct stat buf; @@ -759,35 +823,35 @@ void save(BlockedTensor bt, const std::string &filename, bool overwrite) // create the file std::ofstream out(filename.c_str(), std::ios_base::binary); - auto block_labels = bt.block_labels(); + auto tensor_block_labels = block_labels(); // write the name - auto name = bt.name(); + auto name = name_; size_t size = name.size(); out.write(reinterpret_cast(&size), sizeof(size_t)); out.write(&name[0], size); // write the number of blocks - size_t num_blocks = block_labels.size(); + size_t num_blocks = tensor_block_labels.size(); out.write(reinterpret_cast(&num_blocks), sizeof(size_t)); // write the block labels - for (const std::string &block : block_labels) + for (const std::string &tensor_block : tensor_block_labels) { - size_t block_label_size = block.size(); + size_t block_label_size = tensor_block.size(); out.write(reinterpret_cast(&block_label_size), sizeof(size_t)); - out.write(&block[0], block_label_size); + out.write(&tensor_block[0], block_label_size); } // write the blocks - for (const std::string &block : block_labels) + for (const std::string &tensor_block : tensor_block_labels) { - auto t = bt.block(block); + auto t = block(tensor_block); write_tensor_to_file(t, out); } } -void load(BlockedTensor &bt, const std::string &filename) +void BlockedTensor::load(const std::string &filename) { // check if file exists or not std::ifstream in(filename.c_str(), std::ios_base::binary); @@ -809,33 +873,33 @@ void load(BlockedTensor &bt, const std::string &filename) in.read(reinterpret_cast(&num_blocks), sizeof(size_t)); // read the block labels - std::vector block_labels; + std::vector tensor_block_labels; for (size_t b = 0; b < num_blocks; b++) { - std::string block; + std::string tensor_block; size_t block_label_size = 0; in.read(reinterpret_cast(&block_label_size), sizeof(size_t)); - block.resize(block_label_size); - in.read(&block[0], block_label_size); - block_labels.push_back(block); + tensor_block.resize(block_label_size); + in.read(&tensor_block[0], block_label_size); + tensor_block_labels.push_back(tensor_block); } // read tensor from file - for (const std::string &block : block_labels) + for (const auto &tensor_block : tensor_block_labels) { Tensor t; read_tensor_from_file(t, in); - bt.set_block(block, t); + set_block(tensor_block, t); } // 4. close the file in.close(); } -BlockedTensor load_blocked_tensor(const std::string &filename) +BlockedTensor BlockedTensor::load_and_build(const std::string &filename) { BlockedTensor bt; - load(bt, filename); + bt.load(filename); return bt; } diff --git a/src/python/bindings.cc b/src/python/bindings.cc index 4d42374..44dc71e 100644 --- a/src/python/bindings.cc +++ b/src/python/bindings.cc @@ -287,16 +287,22 @@ PYBIND11_MODULE(pyambit, m) .def_static("add_mo_space", py::overload_cast, SpinType>(&BlockedTensor::add_mo_space)) .def_static("add_composite_mo_space", &BlockedTensor::add_composite_mo_space) .def_static("build", &BlockedTensor::build) + .def_static("load_and_build", &BlockedTensor::load_and_build) .def_static("reset_mo_space", &BlockedTensor::reset_mo_spaces) .def("__getitem__", [](const BlockedTensor& b, const std::string& s){ return b[s]; }) .def("__setitem__", [](BlockedTensor& t, const std::string& key, const LabeledBlockedTensor& s) { t[key] = s; }) .def("__setitem__", [](BlockedTensor& t, const std::string& key, const LabeledBlockedTensorAddition& s) { t[key] = s; }) .def("__setitem__", [](BlockedTensor& t, const std::string& key, const LabeledBlockedTensorDistributive& s) { t[key] = s; }) .def("__setitem__", [](BlockedTensor& t, const std::string& key, const LabeledBlockedTensorProduct& s) { t[key] = s; }) + .def("axpy", &BlockedTensor::axpy) .def("block", py::overload_cast(&BlockedTensor::block)) + .def("clone", &BlockedTensor::clone) + .def_property("name", &BlockedTensor::name, &BlockedTensor::set_name) .def("norm", &BlockedTensor::norm) + .def("save", &BlockedTensor::save, "filename"_a, "overwrite"_a=true) .def("scale", &BlockedTensor::scale) .def("set", &BlockedTensor::set) + .def("vector_dot", &BlockedTensor::vector_dot) .def("zero", &BlockedTensor::zero); py::class_(m, "LabeledBlockedTensor") diff --git a/test/test_io.cc b/test/test_io.cc index 7610633..66469c7 100644 --- a/test/test_io.cc +++ b/test/test_io.cc @@ -156,12 +156,12 @@ bool test_tensor_io_blocked_1() testTensor.set_block("OOV", t_oov); // save the tensor - save(testTensor, "block.ten"); + testTensor.save("block.ten"); // load the data from disk BlockedTensor testTensor2 = BlockedTensor::build(CoreTensor, "T", {"OOO", "OOV"}); - load(testTensor2, "block.ten"); + testTensor2.load("block.ten"); // compare the data to the original tensors testTensor2("ijk") += -testTensor("ijk"); @@ -190,11 +190,11 @@ bool test_tensor_io_blocked_2() testTensor.set_block("OOV", t_oov); // save the tensor - save(testTensor, "block.ten"); + testTensor.save("block.ten"); // load the data from disk BlockedTensor testTensor2; - load(testTensor2, "block.ten"); + testTensor2.load("block.ten"); // compare the data to the original tensors testTensor2("ijk") += -testTensor("ijk"); @@ -223,10 +223,10 @@ bool test_tensor_io_blocked_3() testTensor.set_block("OOV", t_oov); // save the tensor - save(testTensor, "block.ten"); + testTensor.save("block.ten"); // load the data from disk - BlockedTensor testTensor2 = load_blocked_tensor("block.ten"); + BlockedTensor testTensor2 = BlockedTensor::load_and_build("block.ten"); // compare the data to the original tensors testTensor2("ijk") += -testTensor("ijk");