Best Python code snippet using autotest_python
mps.py
Source:mps.py
1from __future__ import annotations2import logging3import time4import numpy as np5import scipy6import pandas as pd7import tensorflow as tf8import torch9from typing import Tuple, Union, Dict10from scipy.sparse import linalg as spslinalg11from scipy.sparse import csr_matrix12from ..constants import BASE_INT_TYPE13from ..constants import BASE_COMPLEX_TYPE14from ..constants import INT_TYPE_TO_BIT_DEPTH15from ..logging import Logging16from ..picklable import Picklable17from .abstract_tensor import AbstractTensor18from ..svd import trunc_svd19from .constants import DEFAULT_CUTOFF, DEFAULT_MAX_PHYS_DIM, DEFAULT_MAX_BOND_DIM20from .constants import BACKENDS, DEFAULT_SVD_BACKEND, DEFAULT_BACKPROP_BACKEND21from .constants import CANO_DECOMPS, DEFAULT_CANO_DECOMP22from .constants import DEFAULT_MPS_OPERATION_MODE23from .projector import Projector24from ..tensor_initialiser import TensorInitialiser25from ..custom_diag import custom_diag26class MPS(AbstractTensor, Picklable, Logging):27 SVD_BACKEND = DEFAULT_SVD_BACKEND28 CANO_DECOMP = DEFAULT_CANO_DECOMP29 OPERATION_MODE = DEFAULT_MPS_OPERATION_MODE30 SVD_MATRICES_ROOT = './'31 SVD_MATRICES_NUM = 032 SVD_STAT_COLS = ('ext_contractor',33 'backend',34 'type',35 'height',36 'width',37 'svd_time',38 'init_bond_dim',39 'cutoff_dim',40 'max_bond_dim',41 'new_bond_dim',42 'matrix_id',43 'sparseness')44 SVD_STAT = pd.DataFrame(columns=SVD_STAT_COLS)45 EXT_CONTRACTOR = None46 def __init__(self,47 *,48 name: str = None,49 visible_num: int,50 phys_dims: Union[int, list] = None,51 bond_dims: Union[int, list] = None,52 given_orth_idx: int = None,53 new_orth_idx: int = None,54 max_bond_dim: int = DEFAULT_MAX_BOND_DIM,55 cutoff: float = DEFAULT_CUTOFF,56 dtype=BASE_COMPLEX_TYPE,57 idx_dtype=BASE_INT_TYPE,58 tensors: list = None,59 init_method=None):60 super(MPS, self).__init__(name=name,61 dtype=dtype)62 self._idx_dtype = idx_dtype63 self._visible_num = visible_num64 if self._visible_num > INT_TYPE_TO_BIT_DEPTH[self._idx_dtype]:65 self._logger.warning(f'Number of physical indices in the MPS {self._name} '66 f'{self._visible_num} is larger than bit depth of '67 f'underlying integer data type '68 f'{self._idx_dtype}: {INT_TYPE_TO_BIT_DEPTH[self._idx_dtype]}. '69 f'Please, be careful calling amplitude() member function.')70 if isinstance(phys_dims, int):71 self._phys_dims = [phys_dims] * visible_num72 elif isinstance(phys_dims, list):73 assert len(phys_dims) == visible_num74 self._phys_dims = [phys_dim for phys_dim in phys_dims]75 else:76 raise TypeError(f'phys_dims should be either int, or list. '77 f'In fact they are: {type(bond_dims)}.')78 if isinstance(bond_dims, int):79 self._bond_dims = [bond_dims] * (visible_num - 1)80 elif isinstance(bond_dims, list):81 if visible_num > 0:82 assert len(bond_dims) == (visible_num - 1)83 self._bond_dims = [bond_dim for bond_dim in bond_dims]84 else:85 raise TypeError(f'bond_dims should be either int, or list. '86 f'In fact they are: {type(bond_dims)}.')87 self._ext_bond_dims = [1] + [bond_dim for bond_dim in self._bond_dims] + [1]88 self._max_bond_dim = max_bond_dim89 self._cutoff = cutoff90 if tensors is None:91 tensor_initialiser = TensorInitialiser(dtype=self._dtype, init_method=init_method)92 # Initialise tensors93 self._tensors = [tensor_initialiser((self._ext_bond_dims[idx],94 self._phys_dims[idx],95 self._ext_bond_dims[idx + 1]))96 for idx in range(self._visible_num)]97 else:98 assert np.all([tensor.dtype == self._dtype for tensor in tensors])99 assert init_method is None100 assert isinstance(tensors, list)101 if self._visible_num > 0:102 assert (len(tensors) == self._visible_num)103 # Check all tensors are 3-way104 assert np.all([len(tensor.shape) == 3 for tensor in tensors])105 # Check consistency of all physical and bond dimensions106 input_phys_dims = [tensor.shape[1] for tensor in tensors]107 assert np.all(np.equal(input_phys_dims, self._phys_dims))108 input_bond_dims = [tensor.shape[2] for tensor in tensors[:-1]]109 assert np.all(np.equal(input_bond_dims, self._bond_dims))110 input_bond_dims = [tensor.shape[0] for tensor in tensors[1:]]111 assert np.all(np.equal(input_bond_dims, self._bond_dims))112 # Check left and right caps are caps113 assert tensors[0].shape[0] == 1114 assert tensors[-1].shape[-1] == 1115 self._tensors = tensors116 assert MPS.SVD_BACKEND in BACKENDS117 assert MPS.CANO_DECOMP in CANO_DECOMPS118 self._orth_idx = given_orth_idx119 if new_orth_idx is not None:120 if self._visible_num > 0:121 self._canonicalise(new_orth_idx)122 # A dict which keeps track of all index permutations (which effectively123 # happen only during the swap_adjacent execution)124 self._cur_to_old = {idx: idx for idx in range(visible_num)}125 @property126 def shape(self) -> Tuple[int, ...]:127 return tuple(self._phys_dims)128 @property129 def hidden_shape(self) -> Tuple[int, ...]:130 return tuple(self._ext_bond_dims)131 def __str__(self) -> str:132 return (f'MPS {self._name}:\n'133 f'\tvisible_num = {self._visible_num}\n'134 f'\tphys_dims = {self._phys_dims}\n'135 f'\tbond_dims = {self._bond_dims}\n'136 f'\text_bond_dims = {self._ext_bond_dims}\n'137 f'\torth_idx = {self._orth_idx}\n')138 def _idx_to_visible(self, idx):139 return tf.cast(tf.squeeze(tf.math.floormod(tf.bitwise.right_shift(tf.reshape(idx, (-1, 1)),140 tf.range(self._visible_num,141 dtype=self._idx_dtype)),142 2)),143 dtype=self._idx_dtype)144 def _visible_to_idx(self, visible):145 if visible.dtype != self._idx_dtype:146 visible = tf.cast(visible, self._idx_dtype)147 two_powers = 2 ** tf.range(0, self._visible_num, dtype=self._idx_dtype)148 return tf.tensordot(visible, two_powers, axes=1)149 def _visible_to_amplitude(self, visible):150 """151 Batched calculation of MPS amplitudes corresponding to the ndarray152 of input visible states with shape (batch_index, visible_num).153 :param visible:154 :return:155 """156 assert visible.shape[-1] == self._visible_num157 rolling_tensor = tf.gather(self._tensors[0], visible[:, 0], axis=1)158 for idx in range(1, self._visible_num):159 rolling_tensor = tf.einsum('iaj,jak->iak',160 rolling_tensor,161 tf.gather(self._tensors[idx], visible[:, idx], axis=1))162 return tf.squeeze(rolling_tensor)163 def amplitude(self, idx: tf.Tensor) -> tf.Tensor:164 return self._visible_to_amplitude(self._idx_to_visible(idx))165 def part_func(self):166 result = tf.reduce_sum(self._tensors[0], axis=1)167 for idx in range(1, self._visible_num):168 result = tf.matmul(result, tf.reduce_sum(self._tensors[idx], axis=1))169 return tf.reduce_sum(result)170 def norm(self):171 # Up -> bottom172 result = tf.squeeze(tf.tensordot(self._tensors[0],173 tf.math.conj(self._tensors[0]),174 axes=[1, 1]))175 for idx in range(1, self._visible_num):176 # Left -> right177 result = tf.tensordot(result,178 tf.math.conj(self._tensors[idx]),179 axes=[-1, 0])180 # Up -> bottom181 result = tf.tensordot(self._tensors[idx],182 result,183 axes=[[0, 1], [0, 1]])184 return tf.squeeze(tf.sqrt(result))185 def to_tensor(self):186 result = self._tensors[0]187 for idx in range(1, self._visible_num):188 result = tf.tensordot(result, self._tensors[idx], axes=[-1, 0])189 return tf.squeeze(result, axis=[0, -1])190 def to_state_vector(self):191 return self.amplitude(tf.range(2 ** self._visible_num))192 @staticmethod193 def truncated_svd(*,194 matrix: tf.Tensor = None,195 cutoff: float = DEFAULT_CUTOFF,196 max_bond_dim: int = None) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:197 assert len(matrix.shape) == 2198 assert MPS.SVD_BACKEND in BACKENDS199 was_sparse = False200 start_time = time.time()201 if MPS.SVD_BACKEND == 'TF':202 s, u, v = tf.linalg.svd(matrix)203 elif MPS.SVD_BACKEND == 'TORCH':204 u, s, v = torch.svd(torch.from_numpy(matrix.numpy()))205 u = tf.constant(u.numpy(), dtype=matrix.dtype)206 s = tf.constant(s.numpy(), dtype=matrix.dtype)207 v = tf.constant(v.numpy(), dtype=matrix.dtype)208 elif MPS.SVD_BACKEND == 'SCIPY':209 if MPS.OPERATION_MODE == 'FAST':210 if min(matrix.shape[0], matrix.shape[1]) > max_bond_dim:211 was_sparse = True212 sparseness = np.isclose(matrix, np.zeros_like(matrix)).sum() / (matrix.shape[0] * matrix.shape[1])213 if sparseness >= 0.95:214 sparse_matrix = csr_matrix(np.where(np.isclose(matrix,215 np.zeros_like(matrix),216 atol=1e-15),217 np.zeros_like(matrix.numpy()),218 matrix.numpy()))219 else:220 sparse_matrix = matrix.numpy()221 sparse_matrix = matrix.numpy()222 u, s, v_h = spslinalg.svds(sparse_matrix, k=max_bond_dim)223 u = u[:, ::-1]224 s = s[::-1]225 v_h = v_h[::-1, :]226 else:227 u, s, v_h = scipy.linalg.svd(matrix.numpy(), full_matrices=False)228 elif MPS.OPERATION_MODE == 'SLOW':229 u, s, v_h = scipy.linalg.svd(matrix.numpy(), full_matrices=False)230 else:231 raise ValueError(f'Wrong MPS operation mode: {MPS.OPERATION_MODE}')232 u = tf.constant(u, dtype=matrix.dtype)233 s = tf.constant(s, dtype=matrix.dtype)234 v = tf.linalg.adjoint(tf.constant(v_h, dtype=matrix.dtype))235 argsort = tf.argsort(tf.math.real(s), direction='DESCENDING')236 u = tf.gather(u, argsort, axis=1)237 s = tf.gather(s, argsort)238 v = tf.gather(v, argsort, axis=1)239 else:240 raise ValueError(f'Somehow wrong backend {MPS.SVD_BACKEND} leaked through the assert '241 f'in MPS.truncated_svd')242 svd_time = time.time() - start_time243 # Truncate singular values which are too small244 init_bond_dim = s.shape[0]245 cutoff_dim = len(s.numpy()[s.numpy() > cutoff])246 new_bond_dim = min(max_bond_dim, cutoff_dim) if max_bond_dim is not None else cutoff_dim247 if new_bond_dim == 0:248 logger = logging.getLogger(f'nnqs.MPS')249 logger.warning(f'Zero new_bond_dim encountered during truncated_svd')250 new_bond_dim = 1251 s = tf.cast(tf.linalg.diag(s[:new_bond_dim]), matrix.dtype)252 u = u[:, :new_bond_dim]253 v_t = tf.linalg.adjoint(v[:, :new_bond_dim])254 #np.save(os.path.join(MPS.SVD_MATRICES_ROOT, f'{MPS.SVD_MATRICES_NUM}'),255 # matrix.numpy())256 summary = {257 'ext_contractor': MPS.EXT_CONTRACTOR,258 'backend': MPS.SVD_BACKEND,259 'type': 'sparse' if was_sparse else 'dense',260 'height': matrix.shape[0],261 'width': matrix.shape[1],262 'svd_time': svd_time,263 'init_bond_dim': init_bond_dim,264 'cutoff_dim': cutoff_dim,265 'max_bond_dim': max_bond_dim,266 'new_bond_dim': new_bond_dim,267 'matrix_id': MPS.SVD_MATRICES_NUM,268 'sparseness': np.isclose(matrix, np.zeros_like(matrix), atol=1e-15).sum() / (matrix.shape[0] * matrix.shape[1])269 }270 #MPS.SVD_STAT = MPS.SVD_STAT.append([summary], ignore_index=True)271 MPS.SVD_MATRICES_NUM += 1272 return u, s, v_t273 @staticmethod274 def from_tensor(*,275 name: str = None,276 tensor: tf.Tensor = None,277 new_orth_idx: int = None,278 max_bond_dim: int = DEFAULT_MAX_BOND_DIM,279 cutoff: float = DEFAULT_CUTOFF,280 idx_dtype=BASE_INT_TYPE) -> MPS:281 phys_dims = list(tensor.shape)282 visible_num = len(phys_dims)283 tensors = []284 bond_dims = []285 ext_bond_dims = [1]286 for idx in range(visible_num - 1):287 tensor = tf.reshape(tensor, (ext_bond_dims[idx] * phys_dims[idx], -1))288 u, s, v_t = MPS.truncated_svd(matrix=tensor,289 cutoff=cutoff,290 max_bond_dim=max_bond_dim)291 bond_dims.append(u.shape[1])292 ext_bond_dims.append(u.shape[1])293 tensors.append(tf.reshape(u, (ext_bond_dims[idx], phys_dims[idx], ext_bond_dims[idx + 1])))294 tensor = tf.matmul(s, v_t)295 ext_bond_dims.append(1)296 tensors.append(tf.reshape(tensor, (ext_bond_dims[visible_num - 1],297 phys_dims[visible_num - 1],298 ext_bond_dims[visible_num])))299 return MPS(name=name,300 visible_num=visible_num,301 phys_dims=phys_dims,302 bond_dims=bond_dims,303 given_orth_idx=visible_num - 1,304 new_orth_idx=new_orth_idx,305 max_bond_dim=max_bond_dim,306 cutoff=cutoff,307 dtype=tensor.dtype,308 idx_dtype=idx_dtype,309 tensors=tensors)310 def _set_bond_dim(self, bond_idx, val):311 """312 A function to simultaneously update an entry in the list of bond313 dimensions (`self._bond_dims`) and in the extended list of bond314 dimensions (`self._ext_bond_dims`)315 :param bond_idx:316 :param val:317 :return:318 """319 self._bond_dims[bond_idx] = val320 self._ext_bond_dims[bond_idx + 1] = val321 def _canonicalise(self, new_orth_idx):322 assert (0 <= new_orth_idx) and (new_orth_idx < self._visible_num)323 if self._orth_idx is None:324 forward_start_idx = 0325 backward_start_idx = self._visible_num - 1326 else:327 forward_start_idx = self._orth_idx328 backward_start_idx = self._orth_idx329 for idx in range(forward_start_idx, new_orth_idx):330 matrix = tf.reshape(self._tensors[idx],331 (self._ext_bond_dims[idx] * self._phys_dims[idx],332 self._ext_bond_dims[idx + 1]))333 if MPS.CANO_DECOMP == 'QR':334 q, r = tf.linalg.qr(matrix)335 else:336 u, s, v = self.truncated_svd(matrix=matrix,337 cutoff=self._cutoff,338 max_bond_dim=self._max_bond_dim)339 q, r = u, s @ v340 self._set_bond_dim(idx, q.shape[1])341 self._tensors[idx] = tf.reshape(q, (self._ext_bond_dims[idx],342 self._phys_dims[idx],343 self._ext_bond_dims[idx + 1]))344 self._tensors[idx + 1] = tf.tensordot(r,345 self._tensors[idx + 1],346 axes=[1, 0])347 for idx in range(backward_start_idx, new_orth_idx, -1):348 matrix = tf.transpose(tf.reshape(self._tensors[idx],349 (self._ext_bond_dims[idx],350 self._ext_bond_dims[idx + 1] * self._phys_dims[idx])))351 if MPS.CANO_DECOMP == 'QR':352 q_t, r_t = tf.linalg.qr(matrix)353 else:354 u, s, v = self.truncated_svd(matrix=matrix,355 cutoff=self._cutoff,356 max_bond_dim=self._max_bond_dim)357 q_t, r_t = u, s @ v358 q = tf.transpose(q_t)359 r = tf.transpose(r_t)360 self._set_bond_dim(idx - 1, q.shape[0])361 self._tensors[idx] = tf.reshape(q, (self._ext_bond_dims[idx],362 self._phys_dims[idx],363 self._ext_bond_dims[idx + 1]))364 self._tensors[idx - 1] = tf.tensordot(self._tensors[idx - 1],365 r,366 axes=[-1, 0])367 self._orth_idx = new_orth_idx368 def cut_bond_dims(self,369 *,370 svd_backend: str = DEFAULT_SVD_BACKEND,371 backprop_backend: str = DEFAULT_BACKPROP_BACKEND):372 for bond_idx, bond_dim in enumerate(self._bond_dims):373 if bond_dim > self._max_bond_dim:374 ldx, rdx = bond_idx, bond_idx + 1375 self._canonicalise(ldx)376 bond_tensor = tf.einsum('iaj,jbk->iabk',377 self._tensors[ldx],378 self._tensors[rdx])379 # Calculate external bond dimensions of left and right matrices380 left_dim = self._ext_bond_dims[ldx] * self._phys_dims[ldx]381 right_dim = self._ext_bond_dims[rdx + 1] * self._phys_dims[rdx]382 bond_tensor = tf.reshape(bond_tensor, (left_dim, right_dim))383 u, s, v = trunc_svd(matrix=bond_tensor,384 max_bond_dim=self._max_bond_dim,385 cutoff=self._cutoff,386 svd_backend=svd_backend,387 backprop_backend=backprop_backend)388 self._set_bond_dim(ldx, u.shape[1])389 ltensor = tf.matmul(u, tf.linalg.diag(s))390 rtensor = tf.linalg.adjoint(v)391 self._tensors[ldx] = tf.reshape(ltensor, (self._ext_bond_dims[ldx],392 self._phys_dims[ldx],393 self._ext_bond_dims[ldx + 1]))394 self._tensors[rdx] = tf.reshape(rtensor, (self._ext_bond_dims[rdx],395 self._phys_dims[rdx],396 self._ext_bond_dims[rdx + 1]))397 def swap_adjacent(self, fdx, tdx):398 """399 Swaps neighbouring physical indices fdx and tdx.400 Places the orthogonality center in the tdx-th tensor.401 :param fdx:402 :param tdx:403 :return:404 """405 assert (0 <= fdx) and (fdx < self._visible_num)406 assert (0 <= tdx) and (tdx < self._visible_num)407 assert abs(fdx - tdx) == 1408 (ldx, rdx) = (min(fdx, tdx), max(fdx, tdx))409 if self._orth_idx is None:410 self._canonicalise(ldx)411 elif self._orth_idx < ldx:412 self._canonicalise(ldx)413 elif self._orth_idx > rdx:414 self._canonicalise(rdx)415 # Calculate merged tensor and swap its physical axes416 contracted_tensor = tf.einsum('iaj,jbk->ibak',417 self._tensors[ldx],418 self._tensors[rdx])419 # Calculate external bond dimensions of left and right matrices420 left_dim = self._ext_bond_dims[ldx] * self._phys_dims[rdx]421 right_dim = self._ext_bond_dims[rdx + 1] * self._phys_dims[ldx]422 contracted_tensor = tf.reshape(contracted_tensor,423 (left_dim, right_dim))424 u, s, v_t = self.truncated_svd(matrix=contracted_tensor,425 cutoff=self._cutoff,426 max_bond_dim=self._max_bond_dim)427 self._set_bond_dim(ldx, u.shape[1])428 if fdx < tdx:429 ltensor = u430 rtensor = tf.matmul(s, v_t)431 else:432 ltensor = tf.matmul(u, s)433 rtensor = v_t434 self._orth_idx = tdx435 self._tensors[ldx] = tf.reshape(ltensor, (self._ext_bond_dims[ldx],436 self._phys_dims[rdx],437 self._ext_bond_dims[ldx + 1]))438 self._tensors[rdx] = tf.reshape(rtensor, (self._ext_bond_dims[rdx],439 self._phys_dims[ldx],440 self._ext_bond_dims[rdx + 1]))441 self._phys_dims[ldx] = self._tensors[ldx].shape[1]442 self._phys_dims[rdx] = self._tensors[rdx].shape[1]443 self._cur_to_old[fdx], self._cur_to_old[tdx] = self._cur_to_old[tdx], self._cur_to_old[fdx]444 def move(self, fdx, tdx):445 assert (0 <= fdx) and (fdx < self.rank)446 assert (0 <= tdx) and (tdx < self.rank)447 if fdx < tdx:448 for idx in range(fdx, tdx):449 self.swap_adjacent(idx, idx + 1)450 elif fdx > tdx:451 for idx in range(fdx, tdx, -1):452 self.swap_adjacent(idx, idx - 1)453 else:454 self._canonicalise(tdx)455 def old_to_cur(self):456 return {self._cur_to_old[cur_idx]: cur_idx for cur_idx in self._cur_to_old}457 def move_to_tail(self, fdx):458 if fdx < 0:459 raise ValueError(f"move_to_tail(): fdx should be larger than 0, fdx = {fdx}")460 if fdx == self._visible_num - 1:461 self._canonicalise(new_orth_idx=self._visible_num - 1)462 return463 for idx in range(fdx, self._visible_num - 1):464 self.swap_adjacent(idx, idx + 1)465 self._canonicalise(new_orth_idx=self._visible_num - 1)466 def merge_adjacent(self, idx, jdx):467 assert (0 <= idx) and (idx < self.rank)468 assert (0 <= jdx) and (jdx < self.rank)469 assert abs(idx - jdx) == 1470 ldx, rdx = min(idx, jdx), max(idx, jdx)471 #self._canonicalise(ldx)472 tensor = tf.tensordot(self._tensors[ldx],473 self._tensors[rdx],474 axes=[-1, 0])475 tensor = tf.reshape(tensor, (self._ext_bond_dims[ldx],476 self._phys_dims[ldx] * self._phys_dims[rdx],477 self._ext_bond_dims[rdx + 1]))478 self._visible_num -= 1479 self._phys_dims[ldx] = self._phys_dims[ldx] * self._phys_dims[rdx]480 self._phys_dims.pop(rdx)481 self._bond_dims.pop(ldx)482 self._ext_bond_dims.pop(rdx)483 self._tensors.pop(rdx)484 self._tensors[ldx] = tensor485 #self._canonicalise(ldx)486 if self._orth_idx is not None:487 self._orth_idx = self._orth_idx - 1 if self._orth_idx > ldx else self._orth_idx488 @staticmethod489 def cut_phys_dim_slow(*,490 ltensor: MPS,491 ldx: int,492 rtensor: MPS,493 rdx: int,494 max_phys_dim: int = DEFAULT_MAX_PHYS_DIM,495 **kwargs):496 assert ltensor._dtype == rtensor._dtype497 assert ltensor._idx_dtype == rtensor._idx_dtype498 assert ltensor._max_bond_dim == rtensor._max_bond_dim499 assert ltensor._cutoff == rtensor._cutoff500 assert ltensor._phys_dims[ldx] == rtensor._phys_dims[rdx]501 assert max_phys_dim is not None502 ltensor._canonicalise(ldx)503 rtensor._canonicalise(rdx)504 contracted_tensor = tf.einsum(f'iaj,kal->ijkl',505 ltensor._tensors[ldx],506 rtensor._tensors[rdx])507 contracted_tensor = tf.reshape(contracted_tensor, (ltensor._ext_bond_dims[ldx]508 * ltensor._ext_bond_dims[ldx + 1],509 rtensor._ext_bond_dims[rdx]510 * rtensor._ext_bond_dims[rdx + 1]))511 u, s, v_t = MPS.truncated_svd(matrix=contracted_tensor,512 cutoff=ltensor._cutoff,513 max_bond_dim=max_phys_dim)514 sqrt_s = tf.sqrt(s)515 new_phys_dim = s.shape[1]516 lmatrix = u @ sqrt_s517 rmatrix = sqrt_s @ v_t518 ltensor._tensors[ldx] = tf.transpose(tf.reshape(lmatrix, (ltensor._ext_bond_dims[ldx],519 ltensor._ext_bond_dims[ldx + 1],520 new_phys_dim)),521 perm=(0, 2, 1))522 ltensor._phys_dims[ldx] = new_phys_dim523 rtensor._tensors[rdx] = tf.transpose(tf.reshape(rmatrix, (new_phys_dim,524 rtensor._ext_bond_dims[rdx],525 rtensor._ext_bond_dims[rdx + 1])),526 perm=(1, 0, 2))527 rtensor._phys_dims[rdx] = new_phys_dim528 @staticmethod529 def cut_phys_dim_fast(*,530 ltensor: MPS,531 ldx: int,532 rtensor: MPS,533 rdx: int,534 max_phys_dim: int = DEFAULT_MAX_PHYS_DIM,535 **kwargs):536 assert ltensor._dtype == rtensor._dtype537 assert ltensor._idx_dtype == rtensor._idx_dtype538 assert ltensor._max_bond_dim == rtensor._max_bond_dim539 assert ltensor._cutoff == rtensor._cutoff540 assert ltensor._phys_dims[ldx] == rtensor._phys_dims[rdx]541 assert max_phys_dim is not None542 ltensor._canonicalise(ldx)543 rtensor._canonicalise(rdx)544 lfull_matrix = tf.reshape(tf.transpose(ltensor._tensors[ldx],545 perm=(0, 2, 1)),546 (ltensor._ext_bond_dims[ldx] * ltensor._ext_bond_dims[ldx + 1],547 ltensor._phys_dims[ldx]))548 rfull_matrix = tf.reshape(tf.transpose(rtensor._tensors[rdx],549 perm=(1, 0, 2)),550 (rtensor._phys_dims[rdx],551 rtensor._ext_bond_dims[rdx] * rtensor._ext_bond_dims[rdx + 1]))552 projector = Projector(a_full=lfull_matrix, b_full=rfull_matrix)553 ltrunc_matrix, rtrunc_matrix = projector.project(max_bond_dim=max_phys_dim,554 tol=1e-5,555 cutoff=ltensor._cutoff)556 new_phys_dim = ltrunc_matrix.shape[1]557 ltensor._tensors[ldx] = tf.transpose(tf.reshape(ltrunc_matrix, (ltensor._ext_bond_dims[ldx],558 ltensor._ext_bond_dims[ldx + 1],559 new_phys_dim)),560 perm=(0, 2, 1))561 ltensor._phys_dims[ldx] = new_phys_dim562 rtensor._tensors[rdx] = tf.transpose(tf.reshape(rtrunc_matrix, (new_phys_dim,563 rtensor._ext_bond_dims[rdx],564 rtensor._ext_bond_dims[rdx + 1])),565 perm=(1, 0, 2))566 rtensor._phys_dims[rdx] = new_phys_dim567 @staticmethod568 def cut_phys_dim(*,569 ltensor: MPS,570 ldx: int,571 rtensor: MPS,572 rdx: int,573 max_phys_dim: int = DEFAULT_MAX_PHYS_DIM,574 **kwargs):575 if MPS.OPERATION_MODE == 'FAST':576 MPS.cut_phys_dim_fast(ltensor=ltensor,577 ldx=ldx,578 rtensor=rtensor,579 rdx=rdx,580 max_phys_dim=max_phys_dim)581 elif MPS.OPERATION_MODE == 'SLOW':582 MPS.cut_phys_dim_slow(ltensor=ltensor,583 ldx=ldx,584 rtensor=rtensor,585 rdx=rdx,586 max_phys_dim=max_phys_dim)587 else:588 raise ValueError(f'Wrong MPS operation mode: {MPS.OPERATION_MODE}')589 @staticmethod590 def contract_by_idx(ltensor: MPS,591 ldx: int,592 rtensor: MPS,593 rdx: int,594 name: str = None,595 new_orth_idx: int = None) -> Tuple[MPS, tf.Tensor]:596 """597 Contracts two MPS by one physical index and returns the MPS representation of the598 resulting tensor (all calculations are performed in MPS representations, no full tensors599 are obtained at any point). If orth_idx is not specified (is None), sets orthonogality600 center position to `lmps._visible_num - 2`.601 :param ltensor:602 :param ldx:603 :param rtensor:604 :param rdx:605 :param name:606 :param new_orth_idx:607 :return:608 """609 assert ltensor._dtype == rtensor._dtype610 assert ltensor._idx_dtype == rtensor._idx_dtype611 assert ltensor._max_bond_dim == rtensor._max_bond_dim612 assert ltensor._cutoff == rtensor._cutoff613 assert ltensor._phys_dims[ldx] == rtensor._phys_dims[rdx]614 # Align MPSes so that left and right caps are contracted615 ltensor.move(ldx, ltensor.rank - 1)616 rtensor.move(rdx, 0)617 bond_tensor = tf.matmul(tf.reshape(ltensor._tensors[-1],618 (ltensor._ext_bond_dims[-2],619 ltensor._phys_dims[-1])),620 tf.reshape(rtensor._tensors[0],621 (rtensor._phys_dims[0],622 rtensor._ext_bond_dims[1])))623 tensors = ltensor._tensors[:-1] + rtensor._tensors[1:]624 if ltensor.rank > 1:625 tensors[ltensor.rank - 2] = tf.einsum('iaj,jk->iak',626 tensors[ltensor.rank - 2],627 bond_tensor)628 bond_dims = ltensor._bond_dims[:-1] + rtensor._bond_dims629 elif rtensor.rank > 1:630 tensors[0] = tf.einsum('ij,jak->iak',631 bond_tensor,632 tensors[0])633 bond_dims = rtensor._bond_dims[1:]634 else:635 assert bond_tensor.shape == (1, 1)636 tensors.append(bond_tensor)637 bond_dims = []638 norm = None639 if ltensor.rank > 1:640 norm = tf.norm(tensors[ltensor.rank - 2])641 tensors[ltensor.rank - 2] = tf.divide(tensors[ltensor.rank - 2], norm)642 else:643 norm = tf.norm(tensors[0])644 tensors[0] = tf.divide(tensors[0], norm)645 name = f'({ltensor._name}_{ldx}-{rtensor._name}_{rdx})' if name is None else name646 visible_num = (ltensor.rank - 1) + (rtensor.rank - 1)647 phys_dims = ltensor._phys_dims[:-1] + rtensor._phys_dims[1:]648 return MPS(name=name,649 visible_num=visible_num,650 phys_dims=phys_dims,651 bond_dims=bond_dims,652 given_orth_idx=ltensor.rank - 2 if ltensor.rank > 1 else 0,653 new_orth_idx=new_orth_idx if new_orth_idx is not None else visible_num - 1,654 max_bond_dim=ltensor._max_bond_dim,655 cutoff=ltensor._cutoff,656 dtype=ltensor._dtype,657 idx_dtype=ltensor._idx_dtype,658 tensors=tensors), norm659 @staticmethod660 def create_diag(*,661 name: str = None,662 rank: int = -1,663 diag: tf.Tensor = None,664 dtype=None,665 max_bond_dim: int = DEFAULT_MAX_BOND_DIM,666 cutoff: float = DEFAULT_CUTOFF) -> MPS:667 assert len(diag.shape) == 1668 tensors = list()669 if rank == 1:670 tensors.append(tf.expand_dims(tf.expand_dims(diag, axis=0), axis=-1))671 else:672 tensors.append(tf.expand_dims(custom_diag(2,673 diag,674 dtype=dtype),675 axis=0))676 for idx in range(1, rank - 1):677 tensors.append(custom_diag(3,678 tf.ones(diag.shape[0]),679 dtype=dtype))680 tensors.append(tf.expand_dims(custom_diag(2,681 tf.ones(diag.shape[0]),682 dtype=dtype),683 axis=-1))684 return MPS(name=name,685 visible_num=rank,686 phys_dims=diag.shape[0],687 bond_dims=diag.shape[0],688 given_orth_idx=0,689 dtype=dtype if dtype is not None else diag.dtype,690 max_bond_dim=max_bond_dim,691 cutoff=cutoff if cutoff is not None else cutoff,692 tensors=tensors)693 @classmethod694 def calc_inversion_num(cls,695 *,696 perm: Tuple[int, ...] = None) -> int:697 """698 Calculates the number of inversions in a permutation of n integers ranging from 0 to (n-1).699 :param perm:700 :return:701 """702 result = 0703 for idx in range(len(perm) - 1):704 for jdx in range(idx, len(perm)):705 if perm[idx] > perm[jdx]:706 result += 1707 return result708 @classmethod709 def calc_positions(cls,710 *,711 perm: Tuple[int, ...] = None) -> Tuple[int, ...]:712 """713 Calculates such array positions that perm[positions[idx]] = idx714 :param perm:715 :return:716 """717 return tuple([tup[0] for tup in sorted(enumerate(perm), key=lambda tup: tup[1])])718 @classmethod719 def calc_contraction_perm(cls,720 *,721 lindices: Tuple[int, ...] = None,722 rindices: Tuple[int, ...] = None) -> Tuple[int, ...]:723 """724 lindices and rindices are lists of indices connecting two MPSes, which are being contracted.725 Thus, len(lindices) == len(rindices) =: idx_num.726 This function calculates a permutation required to resolve all crosses after indices727 are sorted in each MPS (in the left one indices are sorted in the ascending order, in the728 right one indices are sorted in the descending order).729 For example, if730 lindices = (0, 2, 3, 8, 5, 6),731 rindices = (7, 6, 5, 4, 2, 0),732 then summed pairs are:733 ((0, 7), (2, 6), (3, 5), (8, 4), (5, 2), (6, 0)).734 We sort pairs according to the rdx and substitute rdx to a range (0, idx_num - 1)735 sort by rdx: ((6, 0), (5, 2), (8, 4), (3, 5), (2, 6), (0, 7))736 substitute: ((6, 0), (5, 1), (8, 2), (3, 3), (2, 4), (0, 5))737 Now, we sort pairs in descending order of ldx and readout the permutation from second element738 of each couple739 sort by -ldx: ((8, 2), (6, 0), (5, 1), (3, 3), (2, 4), (0, 5))740 perm: (2, 0, 1, 3, 4, 5)741 :param lindices:742 :param rindices:743 :return:744 """745 assert len(lindices) == len(rindices)746 summed_pairs = list(zip(lindices, rindices))747 summed_pairs = sorted(summed_pairs, key=lambda tup: tup[1])748 for pos, idx_pair in enumerate(summed_pairs):749 summed_pairs[pos] = idx_pair[0], pos750 summed_pairs = sorted(summed_pairs, key=lambda tup: -tup[0])751 return tuple([tup[1] for tup in summed_pairs])752 @classmethod753 def calc_min_swap_num(cls,754 *,755 ltensor: MPS = None,756 lindices: Tuple[int, ...] = None,757 rtensor: MPS = None,758 rindices: Tuple[int, ...] = None) -> int:759 """760 Calculates the minimum required number of swaps to align to MPSes given their relative order761 and all connected indices.762 :param ltensor:763 :param lindices:764 :param rtensor:765 :param rindices:766 :return:767 """768 shift_swap_num = 0769 for pos, ldx in enumerate(sorted(lindices)[::-1]):770 shift_swap_num += (ltensor.rank - 1) - ldx - pos771 for pos, rdx in enumerate(sorted(rindices)):772 shift_swap_num += rdx - pos773 perm = cls.calc_contraction_perm(lindices=lindices, rindices=rindices)774 return shift_swap_num + cls.calc_inversion_num(perm=perm)775 @classmethod776 def resolve_crosses(cls,777 *,778 ltensor: MPS = None,779 lindices: Tuple[int, ...] = None,780 rtensor: MPS = None,781 rindices: Tuple[int, ...] = None):782 assert ltensor._dtype == rtensor._dtype783 assert ltensor._idx_dtype == rtensor._idx_dtype784 assert ltensor._max_bond_dim == rtensor._max_bond_dim785 assert ltensor._cutoff == rtensor._cutoff786 assert len(lindices) == len(rindices)787 idx_num = len(lindices)788 # Check that lindices are idx_num rightmost indices of ltensor789 assert np.all(np.asarray(sorted(lindices)) == np.arange(ltensor.rank - idx_num,790 ltensor.rank))791 # Check that rindices are idx_num leftmost indices of rtensor792 assert np.all(np.asarray(sorted(rindices)) == np.arange(0, idx_num))793 # Check that all indices have same dimension794 assert np.all(np.asarray(ltensor.shape)[np.asarray(lindices)]795 == np.asarray(rtensor.shape)[np.asarray(rindices)])796 perm = cls.calc_contraction_perm(lindices=lindices,797 rindices=rindices)798 for perm_idx in range(idx_num):799 positions = cls.calc_positions(perm=perm)800 fdx = positions[perm_idx]801 offset_fdx = ltensor.rank - 1 - fdx802 tdx = perm_idx803 offset_tdx = ltensor.rank - 1 - tdx804 # Permute actual indices805 ltensor.move(fdx=offset_fdx, tdx=offset_tdx)806 # Permute helper indices807 perm = np.asarray(perm)808 if fdx < tdx:809 perm[fdx:tdx] = perm[(fdx + 1):(tdx + 1)]810 else:811 perm[(tdx + 1):(fdx + 1)] = perm[tdx:fdx]812 perm[tdx] = tdx813 @classmethod814 def contract_by_indices(cls,815 *,816 lmps: MPS = None,817 lindices: Tuple[int, ...] = None,818 rmps: MPS = None,819 rindices: Tuple[int, ...] = None,820 name: str = None,821 new_orth_idx: int = None) -> Tuple[MPS, tf.Tensor, Dict[int, int], Dict[int, int]]:822 assert lmps._dtype == rmps._dtype823 assert lmps._idx_dtype == rmps._idx_dtype824 assert lmps._max_bond_dim == rmps._max_bond_dim825 assert lmps._cutoff == rmps._cutoff826 assert len(lindices) == len(rindices)827 idx_num = len(lindices)828 # Check that all indices have same dimension829 assert np.all(np.asarray(lmps.shape)[np.asarray(lindices)]830 == np.asarray(rmps.shape)[np.asarray(rindices)])831 lmps._cur_to_old = {ldx: ldx for ldx in range(lmps.rank)}832 rmps._cur_to_old = {rdx: rdx for rdx in range(rmps.rank)}833 summed_pairs = list(zip(lindices, rindices))834 # Group all coupled indices of the left MPS at its right edge835 for pos, (ldx, rdx) in enumerate(sorted(summed_pairs, key=lambda tup: tup[0])[::-1]):836 lmps.move(ldx, lmps.rank - 1 - pos)837 summed_pairs[pos] = (lmps.rank - 1 - pos, rdx)838 for pos, (ldx, rdx) in enumerate(sorted(summed_pairs, key=lambda tup: tup[1])):839 rmps.move(rdx, pos)840 summed_pairs[pos] = (ldx, pos)841 shifted_lindices = tuple([tup[0] for tup in summed_pairs])842 shifted_rindices = tuple([tup[1] for tup in summed_pairs])843 cls.resolve_crosses(ltensor=lmps,844 lindices=shifted_lindices,845 rtensor=rmps,846 rindices=shifted_rindices)847 #TODO: Test if the following canonicalisation is required848 lmps._canonicalise(lmps.rank - 1)849 rmps._canonicalise(0)850 # TODO: Calculate bond tensor in a very straightforward way851 bond_tensor = tf.eye(num_rows=lmps._tensors[lmps.rank - 1].shape[2],852 num_columns=rmps._tensors[0].shape[0],853 dtype=lmps._dtype)854 for idx in range(idx_num):855 ldx = lmps.rank - 1 - idx856 rdx = idx857 assert lmps._tensors[ldx].shape[2] == bond_tensor.shape[0]858 bond_tensor = tf.tensordot(lmps._tensors[ldx],859 bond_tensor,860 axes=[[2], [0]])861 assert bond_tensor.shape[1] == rmps._tensors[rdx].shape[1]862 assert bond_tensor.shape[2] == rmps._tensors[rdx].shape[0]863 bond_tensor = tf.tensordot(bond_tensor,864 rmps._tensors[rdx],865 axes=[[1, 2], [1, 0]])866 # TODO: Create a new MPS867 tensors = lmps._tensors[:-idx_num] + rmps._tensors[idx_num:]868 if lmps.rank > idx_num:869 tensors[lmps.rank - 1 - idx_num] = tf.einsum('iaj,jk->iak',870 tensors[lmps.rank - 1 - idx_num],871 bond_tensor)872 bond_dims = lmps._bond_dims[:-idx_num] + rmps._bond_dims[idx_num - 1:]873 elif rmps.rank > idx_num:874 tensors[0] = tf.einsum('ij,jak->iak',875 bond_tensor,876 tensors[0])877 bond_dims = rmps._bond_dims[idx_num:]878 else:879 assert bond_tensor.shape == (1, 1)880 tensors.append(bond_tensor)881 bond_dims = []882 norm = None883 if lmps.rank > idx_num:884 norm = tf.norm(tensors[lmps.rank - 1 - idx_num])885 tensors[lmps.rank - 1 - idx_num] = tf.divide(tensors[lmps.rank - 1 - idx_num], norm)886 else:887 norm = tf.norm(tensors[0])888 tensors[0] = tf.divide(tensors[0], norm)889 name = f'({lmps._name}_{lindices}-{rmps._name}_{rindices})' if name is None else name890 visible_num = (lmps.rank - idx_num) + (rmps.rank - idx_num)891 phys_dims = lmps._phys_dims[:-idx_num] + rmps._phys_dims[idx_num:]892 return MPS(name=name,893 visible_num=visible_num,894 phys_dims=phys_dims,895 bond_dims=bond_dims,896 given_orth_idx=lmps.rank - 1 - idx_num if lmps.rank > idx_num else 0,897 new_orth_idx=new_orth_idx if new_orth_idx is not None else None,898 max_bond_dim=lmps._max_bond_dim,899 cutoff=lmps._cutoff,900 dtype=lmps._dtype,901 idx_dtype=lmps._idx_dtype,...
lammps_init_v2.py
Source:lammps_init_v2.py
1import numpy as np2import pbc_utils as pbc3import math4import os5class input_config: 6 def __init__(self, xbox, ybox, zbox):7 self.natoms = 08 self.nbonds = 09 self.nmasses = 010 self.ndihedrals = 011 self.nimpropers = 012 self.masses = []13 self.ang_types = []14 self.bond_types = []15 self.bonds = np.array([], dtype=np.int64).reshape(0,4)16 self.nbond_types = 017 self.nangles = 018 self.nang_types = 019 self.x = np.array([], dtype=np.int64).reshape(0,6)20 self.RESID = np.zeros((0, 3), 'd')21 self.L = np.zeros(3, 'd')22 self.L[0] = float(xbox)23 self.L[1] = float(ybox)24 self.L[2] = float(zbox)25 self.lo = -(self.L)/226 self.hi = (self.L)/227 self.xlo = self.lo[0]28 self.ylo = self.lo[1]29 self.zlo = self.lo[2]30 self.xhi = self.hi[0]31 self.yhi = self.hi[1]32 self.zhi = self.hi[2]33 self.np_list = np.array([], dtype=np.int64).reshape(0,4)34 self.num_chns = 035 self.periodic = False36 37 def __add_particle_type(self, part_type):38 if ( not part_type in self.masses and not part_type == None ):39 self.masses.append(part_type)40 self.nmasses += 141 def __add_bond_type(self, bond_type):42 if ( not bond_type in self.bond_types and not bond_type == None):43 self.bond_types.append(bond_type)44 self.nbond_types+= 145 def __update_particle_count(self, count_new_atoms):46 self.natoms += count_new_atoms47 def __update_chain_count(self, count_new_chains):48 self.num_chns += count_new_chains 49 def __add_bond_check_bond_overlap(self,loc_array, index, monomer_increment, Lbond, rmin, old_index = None, rad = None):50 if old_index == None:51 old_index = index - 152 theta = 2 * np.pi * np.random.random_sample()53 phi = np.pi * np.random.random_sample()54 dx = Lbond * np.sin(phi) * np.cos(theta)55 dy = Lbond * np.sin(phi) * np.sin(theta)56 dz = Lbond * np.cos(theta)57 xprev = loc_array[old_index,3]58 yprev = loc_array[old_index,4]59 zprev = loc_array[old_index,5]60 61 restriction = True62 while restriction:63 theta = 2 * np.pi * np.random.random_sample()64 phi = np.pi * np.random.random_sample()65 dx = Lbond * np.sin(phi) * np.cos(theta)66 dy = Lbond * np.sin(phi) * np.sin(theta)67 dz = Lbond * np.cos(phi)68 xx = xprev + dx69 yy = yprev + dy70 zz = zprev + dz71 new_loc = np.array([xx, yy, zz])72 Restriction = False73 # if (self.np_list.size > 0 ):74 # if (rad is None or rad < 0):75 # rad = 076 # checking_distances = np.linalg.norm(self.np_list[:,0:3] - new_loc, axis = 1) - (np_list[:,3] + rad )77 # if checking_distances.min() < 0:78 # Restriction = True79 # else: 80 # if (rad > 0):81 # np.append(temp_locations, np.array([np.append(new_loc, rad)]))82 # Restriction = False83 if (Restriction is False):84 Restriction = True85 if np.abs(zz) < self.L[2]/2. :86 if monomer_increment == 1:87 restriction = False88 else:89 xpp = loc_array[index-2,3]90 ypp = loc_array[index-2,4]91 zpp = loc_array[index-2,5]92 dxp = xx - xpp93 dyp = yy - ypp94 dzp = zz - zpp95 rpsq = dxp*dxp+dyp*dyp+dzp*dzp96 rp = np.sqrt(rpsq)97 if rp > rmin:98 restriction = False99 100 if self.periodic == True:101 if xx > self.xhi:102 xx -= self.L[0]103 if yy > self.yhi:104 yy -= self.L[1]105 if xx < self.xlo:106 xx += self.L[0]107 if yy < self.ylo:108 yy += self.L[1]109 loc_array[index,3] = xx110 loc_array[index,4] = yy111 loc_array[index,5] = zz112 113 def add_diblock_rho0(self, part1, part2, frac, chl, rho0, Lbond, bond_type, rmin = 0.0):114 num_chns = int(self.L[0] * self.L[1] * self.L[2] * rho0/chl)115 self.add_diblock(part1, part2, frac, chl, num_chns, Lbond, bond_type, rmin)116 def add_diblock(self, part1, part2, frac, chl, num_chns, Lbond,bond_type, rmin = 0.0, rad = None):117 self.__add_particle_type(part1)118 self.__add_particle_type(part2)119 if (chl > 1): 120 self.__add_bond_type(bond_type)121 # resid = self.natoms + 1122 ns_loc = chl * num_chns123 xloc = np.zeros((ns_loc, 6), 'd')124 nbonds_loc = num_chns * (chl - 1)125 bond_loc = np.empty((nbonds_loc,4), int)126 atom_id = self.natoms127 molecule_len = chl 128 self.__update_particle_count(molecule_len*num_chns)129 chn_id = self.num_chns130 self.num_chns += num_chns131 bond_count = 0132 for i_ch in range(num_chns):133 for i_monomer in range(chl):134 atom_id += 1135 tmp_index = i_ch * molecule_len + i_monomer136 if float(i_monomer)/float(chl) < frac:137 xloc[tmp_index,2] = part1138 else:139 xloc[tmp_index,2] = part2140 xloc[tmp_index, 0] = atom_id141 xloc[tmp_index, 1] = chn_id + i_ch142 if i_monomer == 0:143 xloc[tmp_index, 3] = self.xlo + np.random.random_sample() * self.L[0]144 xloc[tmp_index, 4] = self.ylo + np.random.random_sample() * self.L[1]145 xloc[tmp_index, 5] = self.zlo + np.random.random_sample() * self.L[2]146 else:147 bond_loc[bond_count, 0] = self.nbonds148 bond_loc[bond_count, 1] = bond_type149 bond_loc[bond_count, 2] = atom_id150 bond_loc[bond_count, 3] = atom_id - 1 151 bond_count += 1152 self.nbonds += 1153 self.__add_bond_check_bond_overlap(xloc, tmp_index, i_monomer, Lbond, rmin)154 self.x = np.concatenate([self.x, xloc])155 self.bonds = np.vstack([self.bonds, bond_loc])156 def add_homopolymer(self, part, chl, num_chns, Lbond, bond_type):157 self.add_diblock(part, part, 1.0, chl, num_chns, Lbond, bond_type)158 def add_np(self, part, num_part, radius):159 self.add_diblock(part, part, 1.0, chl, num_chns, Lbond=0, bond_type=None, rad = radius)160 def add_homopolymer_rho0(self, part, chl, rho0, Lbond, bond_type):161 num_chns = int(self.L[0] * self.L[1] * self.L[2] * rho0/chl)162 self.add_diblock(part, part, 1.0, chl, num_chns, Lbond, bond_type)163 164 def add_comb_rho0(self, bb_part1, Nb,Ns, rho0, ss_pt1, back_bond, bb_part2=None, frac_bb=1, ss_pt2=None,165 frac_side=1.0, Lbond=1.0, freq=1,166 back_side_bond=None, side_bond=None, rmin = 0.0):167 num_chns = int(self.L[0] * self.L[1] * self.L[2] * rho0 / (Nb + math.ceil(float(Nb)/freq ) * Ns))168 self.add_comb(bb_part1, Nb, Ns, num_chns, ss_pt1, back_bond, bb_part2=bb_part2, frac_bb=frac_bb, ss_pt2=ss_pt2,169 frac_side = frac_side, Lbond = Lbond, freq = freq, 170 back_side_bond = back_side_bond, side_bond = side_bond, rmin = rmin)171 def add_comb(self, bb_part1, Nb,Ns, num_chns, ss_pt1, back_bond, bb_part2=None, frac_bb=1, ss_pt2=None,172 frac_side=1.0, Lbond=1.0, freq=1,173 back_side_bond=None, side_bond=None, rmin = 0.0):174 self.__add_particle_type(bb_part1)175 self.__add_particle_type(bb_part2)176 self.__add_particle_type(ss_pt1)177 self.__add_particle_type(ss_pt2)178 179 self.__add_bond_type(back_bond)180 self.__add_bond_type(back_side_bond)181 self.__add_bond_type(side_bond)182 if side_bond == None:183 side_bond = back_bond184 if back_side_bond == None:185 back_side_bond = back_bond186 # resid = self.natoms + 1187 ns_loc = (Nb + Ns * Nb//freq) * num_chns188 xloc = np.zeros((ns_loc, 6), 'd')189 old_natoms = self.natoms190 nbonds_loc = num_chns * ( (Nb - 1) + Nb//freq * (Ns) )191 bond_loc = np.empty((nbonds_loc,4), int)192 atom_id = self.natoms193 molecule_len = Nb + Ns * Nb//freq194 self.__update_particle_count( molecule_len * num_chns)195 chn_id = self.num_chns196 self.num_chns += num_chns197 bond_count = 0198 for i_ch in range(num_chns):199 for i_monomer in range(Nb):200 atom_id += 1201 tmp_index = i_ch * molecule_len + i_monomer202 if float(i_monomer)/float(Nb) < frac_bb:203 xloc[i_ch*molecule_len+i_monomer,2] = bb_part1204 else:205 xloc[i_ch*molecule_len+i_monomer,2] = bb_part2206 xloc[tmp_index,0] = atom_id207 xloc[tmp_index,1] = chn_id + i_ch # molecule id 208 if i_monomer == 0:209 xloc[tmp_index,3] = self.xlo + np.random.random_sample() * self.L[0]210 xloc[tmp_index,4] = self.ylo + np.random.random_sample() * self.L[1]211 xloc[tmp_index,5] = self.zlo + np.random.random_sample() * self.L[2]212 else:213 bond_loc[bond_count, 0] = self.nbonds214 bond_loc[bond_count, 1] = back_bond 215 bond_loc[bond_count, 2] = atom_id - 1216 bond_loc[bond_count, 3] = atom_id 217 bond_count += 1218 self.nbonds += 1219 self.__add_bond_check_bond_overlap(xloc, tmp_index, i_monomer, Lbond, rmin)220 for i_monomer in range(Nb):221 for i_side in range(Ns): 222 atom_id += 1223 tmp_index = i_ch * molecule_len + Nb + i_monomer // freq * Ns + i_side224 indbb = i_ch * molecule_len + i_monomer + 1225 xloc[tmp_index,0] = atom_id 226 xloc[tmp_index,1] = chn_id + i_ch # molecule id 227 if float(i_side)/float(Ns) < frac_side:228 xloc[tmp_index,2] = ss_pt1229 else:230 xloc[tmp_index,2] = ss_pt2231 if i_side == 0:232 bond_loc[bond_count, 0] = self.nbonds233 bond_loc[bond_count, 1] = back_side_bond 234 bond_loc[bond_count, 2] = indbb + old_natoms235 # bond_loc[bond_count, 3] = tmp_index 236 bond_loc[bond_count, 3] = atom_id237 bond_count += 1238 self.nbonds += 1239 self.__add_bond_check_bond_overlap(xloc, tmp_index, i_side+1, Lbond, rmin, old_index = indbb-1 )240 else:241 bond_loc[bond_count, 0] = self.nbonds242 bond_loc[bond_count, 1] = side_bond 243 bond_loc[bond_count, 2] = atom_id- 1244 bond_loc[bond_count, 3] = atom_id245 bond_count += 1246 self.nbonds += 1247 self.__add_bond_check_bond_overlap(xloc, tmp_index, i_side+1, Lbond, rmin)248 self.x = np.concatenate([self.x, xloc])249 self.bonds = np.vstack([self.bonds, bond_loc])250 def add_simple_ABA_rho0(self,part1, part2, fracA, chl, rho0, Lbond=1.0, bond_type=1, rmin = 0.0):251 self.add_triblock(part1, part2, part1, fracA, 1-2*fracA, chl, int(self.L[0] * self.L[1] * self.L[2] * rho0/chl), Lbond = Lbond, 252 bond_type12 = bond_type, bond_type23= bond_type,rmin=rmin)253 def add_simple_ABA(self,part1, part2, fracA, chl, num_chns, Lbond=1.0, bond_type=1, rmin = 0.0):254 self.add_triblock(part1, part2, part1, fracA, 1-2*fracA, chl, num_chns, Lbond = Lbond, 255 bond_type12 = bond_type, bond_type23= bond_type,rmin=rmin)256 def add_triblock_rho0(self,part1, part2, part3, frac1, frac2, chl, rho0, Lbond=1.0, bond_type12=1, bond_type23=1, rmin = 0.0):257 self.add_triblock(part1, part2, part3, frac1, frac2, chl, int(self.L[0] * self.L[1] * self.L[2] * rho0/chl), Lbond = Lbond, 258 bond_type12 = bond_type12, bond_type23= bond_type23, rmin=rmin)259 def add_triblock(self, part1, part2, part3, frac1, frac2, chl, num_chns, Lbond=1.0,bond_type12=1, bond_type23=1, rmin = 0.0):260 self.__add_particle_type(part1)261 self.__add_particle_type(part2)262 self.__add_particle_type(part3)263 self.__add_bond_type(bond_type12)264 self.__add_bond_type(bond_type23)265 ns_loc = chl * num_chns266 xloc = np.zeros((ns_loc, 6), 'd')267 nbonds_loc = num_chns * (chl - 1)268 bond_loc = np.empty((nbonds_loc,4), int)269 270 molecule_len = chl271 atom_id = self.natoms272 self.natoms += chl * num_chns273 chn_id = self.num_chns274 self.num_chns += chl275 bond_count = 0276 for i_ch in range(num_chns):277 for i_monomer in range(chl):278 atom_id += 1279 tmp_index = i_ch * molecule_len + i_monomer280 f_along = float(i_monomer)/float(chl) 281 if f_along < frac1:282 xloc[tmp_index,2] = part1283 elif f_along < frac1+frac2:284 xloc[tmp_index,2] = part2285 else:286 xloc[tmp_index,2] = part3287 xloc[tmp_index,0] = atom_id 288 xloc[tmp_index,1] = chn_id + i_ch289 if i_monomer == 0:290 xloc[tmp_index,3] = self.xlo + np.random.random_sample() * self.L[0]291 xloc[tmp_index,4] = self.ylo + np.random.random_sample() * self.L[1]292 xloc[tmp_index,5] = self.zlo + np.random.random_sample() * self.L[2]293 else:294 if f_along >= frac1 + frac2:295 bndtyp = bond_type23296 else: 297 bndtyp = bond_type12298 bond_loc[bond_count, 0] = self.nbonds299 bond_loc[bond_count, 1] = bndtyp300 bond_loc[bond_count, 2] = atom_id301 bond_loc[bond_count, 3] = atom_id - 1 302 bond_count += 1303 self.nbonds += 1304 self.__add_bond_check_bond_overlap(xloc, tmp_index, i_monomer, Lbond, rmin)305 self.x = np.concatenate([self.x, xloc])306 self.bonds = np.vstack([self.bonds, bond_loc])307 def write(self, output):308 path = (os.path.dirname(os.path.abspath(output))) 309 os.makedirs(path, exist_ok=True)310 otp = open(output, 'w')311 otp.write("Generated by Chris' code\n\n")312 313 line = "%d atoms\n" % (self.natoms )314 otp.write(line)315 line = "%d bonds\n" % len(self.bonds)316 otp.write(line)317 line = "%d angles\n" % (self.nangles)318 otp.write(line)319 # line = "%d dihedrals\n" % (self.ndihedrals)320 # otp.write(line)321 # line = "%d impropers\n" % (self.ndihedrals)322 # otp.write(line)323 line = "\n" 324 otp.write(line)325 line = "%d atom types\n" % len(self.masses)326 otp.write(line)327 line = "%d bond types\n" % len(self.bond_types)328 otp.write(line)329 line = "%d angle types\n" % self.nang_types330 otp.write(line)331 # line = "%d dihedral types\n" % self.ndihedrals332 # otp.write(line)333 # line = "%d improper types\n" % self.nimpropers334 # otp.write(line)335 line = "\n" 336 otp.write(line)337 line = '%f %f xlo xhi\n' % (self.lo[0], self.hi[0])338 otp.write(line)339 line = '%f %f ylo yhi\n' % (self.lo[1], self.hi[1])340 otp.write(line)341 line = '%f %f zlo zhi\n\n' % (self.lo[2], self.hi[2])342 otp.write(line)343 344 if len(self.masses) > 0 :345 otp.write("Masses \n\n")346 for i, val in enumerate(self.masses):347 line = "%d 1.000\n" % (val)348 otp.write(line)349 otp.write("\nAtoms \n\n")350 for i, val in enumerate(self.x):351 line = "{:d} {:d} {:d} {:f} {:f} {:f}\n" 352 idx,m,t,x,y,z = val353 otp.write(line.format(int(idx),int(m),int(t),x,y,z))354 if len(self.bonds) > 0 :355 otp.write("\nBonds \n\n")356 for i, val in enumerate(self.bonds):357 line = ' '.join(map(str, val))358 otp.write(line + "\n")359 def add_diblock_angle(self, part1, part2, frac, chl, num_chns, Lbond,bond_type,angle_type = None, rmin = 0.0):360 self.__add_particle_type(part1)361 self.__add_particle_type(part2)362 self.__add_bond_type(bond_type)363 resid = self.natoms + 1364 ns_loc = chl * num_chns365 xloc = np.zeros((ns_loc, 6), 'd')366 # bond_loc = np.zeros((0, 4), 'd')367 # bond_loc = np.([], dtype=np.float).reshape(0,4)368 nbonds_loc = num_chns * (chl - 1)369 bond_loc = np.empty((nbonds_loc,4), int)370 nangles_loc = num_chns * (chl -2 )371 bond_loc = np.empty((nangles_loc,4), int)372 # self.nbonds 373 natoms = self.natoms374 self.natoms += chl * num_chns375 chn_id = self.num_chns376 self.num_chns += chl377 bond_count = 0378 if not angle_type == None:379 if ( not angle_type in self.ang_types):380 self.ang_types.append(part2)381 for i_ch in range(num_chns):382 for i_monomer in range(chl):383 natoms += 1384 if float(i_monomer)/float(chl) < frac:385 xloc[i_ch*chl+i_monomer,2] = part1386 else:387 xloc[i_ch*chl+i_monomer,2] = part2388 xloc[i_ch*chl+i_monomer,0] = natoms389 xloc[i_ch*chl+i_monomer,1] = chn_id + i_ch390 if i_monomer == 0:391 xloc[i_ch*chl,3] = self.xlo + np.random.random_sample() * self.L[0]392 xloc[i_ch*chl,4] = self.ylo + np.random.random_sample() * self.L[1]393 xloc[i_ch*chl,5] = self.zlo + np.random.random_sample() * self.L[2]394 else:395 bond_loc[bond_count, 0] = self.nbonds396 bond_loc[bond_count, 1] = bond_type397 bond_loc[bond_count, 2] = natoms398 bond_loc[bond_count, 3] = natoms - 1 399 bond_count += 1400 self.nbonds += 1401 theta = 2 * np.pi * np.random.random_sample()402 phi = np.pi * np.random.random_sample()403 dx = Lbond * np.sin(phi) * np.cos(theta)404 dy = Lbond * np.sin(phi) * np.sin(theta)405 dz = Lbond * np.cos(theta)406 xprev = xloc[i_ch*chl+i_monomer-1,3]407 yprev = xloc[i_ch*chl+i_monomer-1,4]408 zprev = xloc[i_ch*chl+i_monomer-1,5]409 410 restriction = True411 while restriction:412 theta = 2 * np.pi * np.random.random_sample()413 phi = np.pi * np.random.random_sample()414 dx = Lbond * np.sin(phi) * np.cos(theta)415 dy = Lbond * np.sin(phi) * np.sin(theta)416 dz = Lbond * np.cos(phi)417 xx = xprev + dx418 yy = yprev + dy419 zz = zprev + dz420 if np.abs(zz) < self.L[2]/2. :421 if i_monomer == 1:422 restriction = False423 else:424 xpp = xloc[i_ch*chl+i_monomer-2,3]425 ypp = xloc[i_ch*chl+i_monomer-2,4]426 zpp = xloc[i_ch*chl+i_monomer-2,5]427 dxp = xx - xpp428 dyp = yy - ypp429 dzp = zz - zpp430 rpsq = dxp*dxp+dyp*dyp+dzp*dzp431 rp = np.sqrt(rpsq)432 if rp > rmin:433 restriction = False434 435 if self.periodic == True:436 if xx > self.xhi:437 xx -= self.L[0]438 if yy > self.yhi:439 yy -= self.L[1]440 if xx < self.xlo:441 xx += self.L[0]442 if yy < self.ylo:443 yy += self.L[1]444 xloc[i_ch*chl+i_monomer,3] = xx445 xloc[i_ch*chl+i_monomer,4] = yy446 xloc[i_ch*chl+i_monomer,5] = zz447 self.x = np.concatenate([self.x, xloc])...
calculate_eta.py
Source:calculate_eta.py
1import numpy as np2from functools import lru_cache3from scipy.integrate import cumtrapz4from algebra import sthDerivativeOff, evaluateFunction5from optimization import range_memoize6from H_operator import Hi7N_POINTS = 10008def _discountFactors(f, F, tRangeForBond):9 return np.exp( -Hi(f, F, tRangeForBond) )10def _bondPeriodsAndTRangeForBond(Fik,tSpan):11 bondPeriods = int(np.nonzero(Fik)[0][-1]) + 112 return (bondPeriods,tSpan[:bondPeriods])13def _onlyPositiveSegmentForFunction(func,x):14 result= evaluateFunction(func, x)15 return np.clip(result,0,None)16############################IVP APPROACH############################17# def _pDerivativeOfEtaK(t, Fik, f, F, p, tSpan):18# bondPeriods, tRangeForBond = _bondPeriodsAndTRangeForBond(Fik, tSpan)19# discountFactors = _discountFactors(f, F, tRangeForBond)20# diffedF = _evaluateFunction(_sthDerivativeOff(1, F), f(tRangeForBond))21# integralFunc=np.power(_onlyPositiveSegmentForFunction(lambda u:(t-u),tRangeForBond),(p-1))/np.math.factorial(p-1)22# integral=cumtrapz( diffedF * integralFunc, tRangeForBond, initial=0)23#24# return -np.sum(discountFactors * Fik[:bondPeriods] * integral)25# def _sDerivativeOfEtaKIn0(Fik, f, F, s, tSpan):26# bondPeriods, tRangeForBond = _bondPeriodsAndTRangeForBond(Fik, tSpan)27# discountFactors = _discountFactors(f, F, tRangeForBond)28# diffedF = _evaluateFunction(_sthDerivativeOff(1, F), f(tRangeForBond))29# integralFunc = np.power(tRangeForBond,s)/ np.math.factorial(s)30# integral = cumtrapz(diffedF * integralFunc, tRangeForBond, initial=0)31#32# return np.sum(discountFactors * Fik[:bondPeriods] * integral)33# def _getODEfun(Fik, f, F, p, tSpan):34# func = lambda t:_pDerivativeOfEtaK(t,Fik,f,F,p,tSpan)35# def odefun(func,x,y):36# result=np.zeros(y.shape)37# for i in range(y.shape[0]-1):38# result[i]=y[i+1]39# result[-1]=func(x)40# return result41# return partial(odefun,func)42# def _getY0(Fik, f, F, p, tSpan):43# func = lambda s: _sDerivativeOfEtaKIn0(Fik, f, F, s, tSpan)44# return [func(s) for s in range(p)]45# def _eta_k(Fik, f, F, p, tSpan):46# result=solve_ivp(_getODEfun(Fik,f,F,p,tSpan),(tSpan[0],tSpan[-1]),_getY0(Fik, f, F, p, tSpan),dense_output=True)47# return (result.t,result.y[0])48# def eta_k(Fik, f, F, p, tSpan):49# x,y=_eta_k(Fik, f, F, p, tSpan)50# return UnivariateSpline(x,y)51############################INTEGRAL APPROACH############################52def _inner_sum(t, tau, p):53 inner_sum = np.sum([((t*tau) ** s) / (np.math.factorial(s))**2 for s in range(p)])54 return inner_sum55@lru_cache(maxsize=None)56def _inner_integral(t, tau, p, Tk):57 u=np.linspace(0,Tk,N_POINTS)58 integralFunc=_onlyPositiveSegmentForFunction(lambda x:(t-x),u)*_onlyPositiveSegmentForFunction(lambda x:(tau-x),u)59 return np.trapz( np.power(integralFunc,p-1),u)/(np.math.factorial((p-1))**2)60@range_memoize(4)61def _outter_integral(F,f,t,p,tRangeForBond,tSpan):62 diffedF = sthDerivativeOff(1, F)63 inner_term = np.vectorize( lambda tau: _inner_sum(t, tau, p)+_inner_integral(t,tau,p,tSpan[-1]) )64 return cumtrapz(evaluateFunction(diffedF, f(tRangeForBond)) * inner_term(tRangeForBond), tRangeForBond, initial=0)65def eta_k(t, Fik, f, F, p, tSpan):66 '''67 :param t: t where I want to evaluate eta_k68 :param Fik: Array of payments of kth bond69 :param f: f function iteration(what we want to solve)70 :param F: function F which transforms f71 :param p: Number of derivative72 :param tSpan: Time vector73 :return: eta for kth bond evaluated in t74 '''75 bondPeriods,tRangeForBond=_bondPeriodsAndTRangeForBond(Fik,tSpan)76 discountFactors=_discountFactors(f, F, tRangeForBond)77 outterIntegral=_outter_integral(F, f, t, p, tRangeForBond,tSpan)...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!