Automatic building of matrix product operators (autoMPO)#

namespace autompo#

This approach is applicable when the number of term is not too big.

Typedefs

template<class T = double, int d = 2>
using LocOp = typename arma::Mat<T>::template fixed<d, d>#

Functions

template<class T, int d>
ProdOp<T, d> operator*(ProdOp<T, d> const &A, ProdOp<T, d> const &B)#
template<class T, int d>
ProdOp<T, d> operator*(ProdOp<T, d> A, T c)#
template<class T, int d>
PolyOp<T, d> operator+(PolyOp<T, d> A, PolyOp<T, d> const &B)#
template<class T = double, int d = 2>
struct ProdOp : public std::map<int, LocOp<double, 2>>#
template<class T = double, int d = 2>
struct PolyOp#
#include <auto_mpo.h>

just a collection of ProdOp

Public Functions

inline T overlap(const TensorTrain<T> &mps) const#

compute the overlap with a given mps (the physical dimensions should match)

Public Members

int compressEvery = 20#

These parameters control the conversion to mps. They are also used when the size of the collection reached maxNTerm.

number of terms to form one mps.

int maxNTerm = 100000#

to protect against memory overflow, when this number is reached, the terms are compressed to tt.

double reltol = 1e-9#

the relative tolerance of the matrix compression.

bool use_svd = false#

the method for compression: CI by default.