repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
cassiofragadantas/torch | [
"0db832121a82eef34cb75f6006825836cae379ac"
] | [
"tltorch/factorized_tensors/tensorized_matrices.py"
] | [
"import math\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\nimport tensorly as tl\ntl.set_backend('pytorch')\nfrom tensorly import tenalg\nfrom tensorly.decomposition import parafac, tucker, tensor_train, tensor_train_matrix\n\nfrom .core import TensorizedMatrix\nfrom ..utils.parameter_list import FactorList\n\n# Author: Jean Kossaifi\n# License: BSD 3 clause\n\n\ndef _ensure_tuple(value):\n \"\"\"Returns a tuple if `value` isn't one already\"\"\"\n if isinstance(value, int):\n if value == 1:\n return ()\n else:\n return (value, )\n elif isinstance(value, tuple):\n if value == (1,):\n return ()\n return tuple(value)\n else:\n return tuple(value)\n\n\nclass CPMatrix(TensorizedMatrix, name='CP'):\n \"\"\"Tensorized Matrix in CP Form\n\n \"\"\"\n def __init__(self, weights, factors, tensorized_row_shape, tensorized_column_shape, rank=None, n_matrices=()):\n super().__init__()\n if rank is None:\n _, self.rank = tl.cp_tensor._validate_cp_tensor((weights, factors))\n else:\n self.rank = rank\n self.shape = (np.prod(tensorized_row_shape), np.prod(tensorized_column_shape))\n self.tensorized_shape = tensorized_row_shape + tensorized_column_shape\n self.tensorized_row_shape = tensorized_row_shape\n self.tensorized_column_shape = tensorized_column_shape\n \n self.n_matrices = _ensure_tuple(n_matrices)\n self.order = len(factors)\n self.weights = weights\n self.factors = factors\n\n @classmethod\n def new(cls, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=(), **kwargs):\n n_matrices = _ensure_tuple(n_matrices)\n tensor_shape = n_matrices + tensorized_row_shape + tensorized_column_shape\n rank = tl.cp_tensor.validate_cp_rank(tensor_shape, rank)\n\n # Register the parameters\n weights = nn.Parameter(torch.Tensor(rank))\n # Avoid the issues with ParameterList\n factors = [nn.Parameter(torch.Tensor(s, rank)) for s in tensor_shape]\n\n return cls(weights, factors, tensorized_row_shape, tensorized_column_shape, rank=rank, n_matrices=n_matrices)\n \n @classmethod\n def from_tensor(cls, tensor, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=(), init='random', **kwargs):\n n_matrices = _ensure_tuple(n_matrices)\n rank = tl.cp_tensor.validate_cp_rank(n_matrices + tensorized_row_shape + tensorized_column_shape, rank)\n\n with torch.no_grad():\n weights, factors = parafac(tensor, rank, **kwargs)\n weights = nn.Parameter(weights)\n factors = [nn.Parameter(f) for f in factors]\n\n return cls(weights, factors, tensorized_row_shape, tensorized_column_shape, rank, n_matrices)\n\n @classmethod\n def from_matrix(cls, matrix, tensorized_row_shape, tensorized_column_shape, rank, **kwargs):\n if matrix.ndim > 2:\n n_matrices = _ensure_tuple(tl.shape(matrix)[:-2])\n else:\n n_matrices = ()\n\n tensor = matrix.reshape((*n_matrices, *tensorized_row_shape, *tensorized_column_shape))\n return cls.from_tensor(tensor, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=n_matrices, **kwargs)\n\n def init_from_tensor(self, tensor, **kwargs):\n with torch.no_grad():\n weights, factors = parafac(tensor, self.rank, **kwargs)\n self.weights = nn.Parameter(weights)\n self.factors = FactorList([nn.Parameter(f) for f in factors])\n return self\n\n def init_from_matrix(self, matrix, **kwargs):\n tensor = matrix.reshape((*self.n_matrices, *self.tensorized_row_shape, *self.tensorized_column_shape))\n return self.init_from_tensor(tensor, **kwargs)\n\n\n @property\n def decomposition(self):\n return self.weights, self.factors\n\n def to_tensor(self):\n return tl.cp_to_tensor(self.decomposition)\n\n def normal_(self, mean=0, std=1):\n super().normal_(mean, std)\n std_factors = (std/math.sqrt(self.rank))**(1/self.order)\n\n with torch.no_grad():\n self.weights.fill_(1)\n for factor in self.factors:\n factor.data.normal_(0, std_factors)\n return self\n \n def __getitem__(self, indices):\n if isinstance(indices, int):\n # Select one dimension of one mode\n mixing_factor, *factors = self.factors\n weights = self.weights*mixing_factor[indices, :]\n return self.__class__(weights, factors, self.tensorized_row_shape, \n self.tensorized_column_shape, n_matrices=self.n_matrices[1:])\n\n elif isinstance(indices, slice):\n # Index part of a factor\n mixing_factor, *factors = self.factors\n factors = [mixing_factor[indices], *factors]\n weights = self.weights\n return self.__class__(weights, factors, self.tensorized_row_shape, \n self.tensorized_column_shape, n_matrices=self.n_matrices[1:])\n \n else:\n # Index multiple dimensions\n factors = self.factors\n index_factors = []\n weights = self.weights\n for index in indices:\n if index is Ellipsis:\n raise ValueError(f'Ellipsis is not yet supported, yet got indices={indices} which contains one.')\n\n mixing_factor, *factors = factors\n if isinstance(index, int):\n if factors or index_factors:\n weights = weights*mixing_factor[index, :]\n else:\n # No factors left\n return tl.sum(weights*mixing_factor[index, :])\n else:\n index_factors.append(mixing_factor[index])\n\n return self.__class__(weights, index_factors+factors, self.shape, self.tensorized_row_shape, \n self.tensorized_column_shape, n_matrices=self.n_matrices[len(indices):])\n\n\nclass TuckerMatrix(TensorizedMatrix, name='Tucker'):\n \"\"\"Tensorized Matrix in Tucker Form\n\n \"\"\"\n def __init__(self, core, factors, tensorized_row_shape, tensorized_column_shape, rank=None, n_matrices=()):\n super().__init__()\n if rank is None:\n _, self.rank = tl.tucker_tensor._validate_tucker_tensor((core, factors))\n else:\n self.rank = rank\n self.order = self.n_factors = len(factors)\n self.shape = (np.prod(tensorized_row_shape), np.prod(tensorized_column_shape))\n self.tensorized_row_shape = tensorized_row_shape\n self.tensorized_column_shape = tensorized_column_shape\n\n self.n_matrices = _ensure_tuple(n_matrices)\n\n setattr(self, 'core', core)\n self.factors = FactorList(factors)\n \n @classmethod\n def new(cls, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=(), **kwargs):\n n_matrices = _ensure_tuple(n_matrices)\n full_shape = n_matrices + tensorized_row_shape + tensorized_column_shape\n rank = tl.tucker_tensor.validate_tucker_rank(full_shape, rank)\n\n core = nn.Parameter(torch.Tensor(*rank))\n factors = [nn.Parameter(torch.Tensor(s, r)) for (s, r) in zip(full_shape, rank)]\n return cls(core, factors, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=n_matrices)\n \n @classmethod\n def from_tensor(cls, tensor, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=(), **kwargs):\n n_matrices = _ensure_tuple(n_matrices)\n rank = tl.tucker_tensor.validate_tucker_rank(n_matrices + tensorized_row_shape + tensorized_column_shape, rank)\n\n with torch.no_grad():\n core, factors = tucker(tensor, rank, **kwargs)\n \n return cls(nn.Parameter(core), [nn.Parameter(f) for f in factors], tensorized_row_shape, tensorized_column_shape, rank, n_matrices=n_matrices)\n \n @classmethod\n def from_matrix(cls, matrix, tensorized_row_shape, tensorized_column_shape, rank, **kwargs):\n if matrix.ndim > 2:\n n_matrices = _ensure_tuple(tl.shape(matrix)[:-2])\n else:\n n_matrices = ()\n\n tensor = matrix.reshape((*n_matrices, *tensorized_row_shape, *tensorized_column_shape))\n return cls.from_tensor(tensor, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=n_matrices, **kwargs)\n\n def init_from_tensor(self, tensor, init='svd', **kwargs):\n with torch.no_grad():\n core, factors = tucker(tensor, self.rank, **kwargs)\n \n self.core = nn.Parameter(core)\n self.factors = FactorList([nn.Parameter(f) for f in factors])\n\n return self\n\n def init_from_matrix(self, matrix, **kwargs):\n tensor = matrix.reshape((*self.n_matrices, *self.tensorized_row_shape, *self.tensorized_column_shape))\n return self.init_from_tensor(tensor, **kwargs)\n\n @property\n def decomposition(self):\n return self.core, self.factors\n\n def to_tensor(self):\n return tl.tucker_to_tensor(self.decomposition)\n \n def normal_(self, mean=0, std=1):\n if mean != 0:\n raise ValueError(f'Currently only mean=0 is supported, but got mean={mean}')\n \n r = np.prod([math.sqrt(r) for r in self.rank])\n std_factors = (std/r)**(1/(self.order+1))\n \n with torch.no_grad():\n self.core.data.normal_(0, std_factors)\n for factor in self.factors:\n factor.data.normal_(0, std_factors)\n return self\n\n def __getitem__(self, indices):\n if isinstance(indices, int):\n # Select one dimension of one mode\n mixing_factor, *factors = self.factors\n core = tenalg.mode_dot(self.core, mixing_factor[indices, :], 0)\n return self.__class__(core, factors, self.tensorized_row_shape, \n self.tensorized_column_shape, n_matrices=self.n_matrices[1:])\n\n elif isinstance(indices, slice):\n mixing_factor, *factors = self.factors\n factors = [mixing_factor[indices], *factors]\n return self.__class__(self.core, factors, self.tensorized_row_shape, \n self.tensorized_column_shape, n_matrices=self.n_matrices[1:])\n \n else:\n # Index multiple dimensions\n modes = []\n factors = []\n factors_contract = []\n for i, (index, factor) in enumerate(zip(indices, self.factors)):\n if index is Ellipsis:\n raise ValueError(f'Ellipsis is not yet supported, yet got indices={indices}, indices[{i}]={index}.')\n if isinstance(index, int):\n modes.append(i)\n factors_contract.append(factor[index])\n else:\n factors.append(factor[index])\n\n core = tenalg.multi_mode_dot(self.core, factors_contract, modes=modes)\n factors = factors + self.factors[i+1:]\n\n if factors:\n return self.__class__(core, factors, self.tensorized_row_shape, \n self.tensorized_column_shape, n_matrices=self.n_matrices[len(indices):])\n\n # Fully contracted tensor\n return core\n\n\nclass TTTensorized(TensorizedMatrix, name='TT'):\n \"\"\"Tensorized Matrix in Tensor-Train (MPS) Form\n\n Notes\n -----\n It may be preferable to use TTMatrix instead\n\n See Also\n --------\n TTMatrix\n \"\"\"\n def __init__(self, factors, tensorized_row_shape, tensorized_column_shape, rank=None, n_matrices=()):\n super().__init__()\n if rank is None:\n _, self.rank = tl.tt_tensor._validate_tt_tensor(factors)\n else:\n self.rank = rank\n self.order = self.n_factors = len(factors)\n self.shape = (np.prod(tensorized_row_shape), np.prod(tensorized_column_shape))\n self.tensorized_row_shape = tensorized_row_shape\n self.tensorized_column_shape = tensorized_column_shape\n\n self.n_matrices = _ensure_tuple(n_matrices)\n self.factors = FactorList(factors)\n \n @classmethod\n def new(cls, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=(), **kwargs):\n n_matrices = _ensure_tuple(n_matrices)\n full_shape = n_matrices + tensorized_row_shape + tensorized_column_shape\n rank = tl.tt_tensor.validate_tt_rank(full_shape, rank)\n\n # Avoid the issues with ParameterList\n factors = [nn.Parameter(torch.Tensor(rank[i], s, rank[i+1])) for i, s in enumerate(full_shape)]\n\n return cls(factors, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=n_matrices)\n \n @classmethod\n def from_tensor(cls, tensor, tensorized_row_shape, tensorized_column_shape, rank='same', **kwargs):\n full_shape = tensorized_row_shape + tensorized_column_shape\n n_matrices = _ensure_tuple(tensor.shape[:-len(full_shape)])\n rank = tl.tt_tensor.validate_tt_rank(n_matrices + full_shape, rank)\n\n with torch.no_grad():\n factors = tensor_train(tensor, rank, **kwargs)\n \n return cls([nn.Parameter(f) for f in factors], tensorized_row_shape, tensorized_column_shape, rank=rank, n_matrices=n_matrices)\n \n @classmethod\n def from_matrix(cls, matrix, tensorized_row_shape, tensorized_column_shape, rank, **kwargs):\n if matrix.ndim > 2:\n n_matrices = _ensure_tuple(tl.shape(matrix)[:-2])\n else:\n n_matrices=(),\n tensor = matrix.reshape((*n_matrices, *tensorized_row_shape, *tensorized_column_shape))\n return cls.from_tensor(tensor, tensorized_row_shape, tensorized_column_shape, rank, **kwargs)\n\n def init_from_tensor(self, tensor, **kwargs):\n with torch.no_grad():\n factors = tensor_train(tensor, self.rank, **kwargs)\n \n self.factors = FactorList([nn.Parameter(f) for f in factors])\n return self\n\n def init_from_matrix(self, matrix, **kwargs):\n tensor = matrix.reshape((*self.n_matrices, *self.tensorized_row_shape, *self.tensorized_column_shape))\n return self.init_from_tensor(tensor, **kwargs)\n\n @property\n def decomposition(self):\n return self.factors\n\n def to_tensor(self):\n return tl.tt_to_tensor(self.decomposition)\n\n def normal_(self, mean=0, std=1):\n if mean != 0:\n raise ValueError(f'Currently only mean=0 is supported, but got mean={mean}')\n\n r = np.prod(self.rank)\n std_factors = (std/r)**(1/self.order)\n with torch.no_grad():\n for factor in self.factors:\n factor.data.normal_(0, std_factors)\n return self\n\n def __getitem__(self, indices):\n if isinstance(indices, int):\n # Select one dimension of one mode\n factor, next_factor, *factors = self.factors\n next_factor = tenalg.mode_dot(next_factor, factor[:, indices, :].squeeze(1), 0)\n return self.__class__([next_factor, *factors], self.tensorized_row_shape, \n self.tensorized_column_shape, n_matrices=self.n_matrices[1:])\n \n elif isinstance(indices, slice):\n mixing_factor, *factors = self.factors\n factors = [mixing_factor[:, indices], *factors]\n return self.__class__(factors, self.tensorized_row_shape, \n self.tensorized_column_shape, n_matrices=self.n_matrices[1:])\n\n else:\n factors = []\n all_contracted = True\n for i, index in enumerate(indices):\n if index is Ellipsis:\n raise ValueError(f'Ellipsis is not yet supported, yet got indices={indices}, indices[{i}]={index}.')\n if isinstance(index, int):\n if i:\n factor = tenalg.mode_dot(factor, self.factors[i][:, index, :].T, -1)\n else:\n factor = self.factors[i][:, index, :]\n else:\n if i:\n if all_contracted:\n factor = tenalg.mode_dot(self.factors[i][:, index, :], factor, 0)\n else:\n factors.append(factor)\n factor = self.factors[i][:, index, :]\n else:\n factor = self.factors[i][:, index, :]\n all_contracted = False\n\n # We have contracted all cores, so have a 2D matrix\n if factor.ndim == 2:\n if self.order == (i+1):\n # No factors left\n return factor.squeeze()\n else:\n next_factor, *factors = self.factors[i+1:]\n factor = tenalg.mode_dot(next_factor, factor, 0)\n return self.__class__([factor, *factors], self.tensorized_row_shape, \n self.tensorized_column_shape,\n n_matrices=self.n_matrices[len(indices):])\n else:\n return self.__class__([*factors, factor, *self.factors[i+1:]], self.tensorized_row_shape, \n self.tensorized_column_shape,\n n_matrices=self.n_matrices[len(indices):])\n\n\nclass TTMatrix(TensorizedMatrix, name='TTM'):\n \"\"\"Tensorized Matrix in the Tensor-Train Matrix (MPO) Form\n \"\"\"\n def __init__(self, factors, tensorized_row_shape, tensorized_column_shape, rank=None, n_matrices=1):\n super().__init__()\n\n if rank is None:\n _, self.rank = tl.tt_matrix._validate_tt_matrix(factors)\n \n self.tensorized_row_shape = tensorized_row_shape\n self.tensorized_column_shape = tensorized_column_shape\n self.tensorized_shape = tensorized_row_shape + tensorized_column_shape\n self.shape = (np.prod(tensorized_row_shape), np.prod(tensorized_column_shape))\n self.order = len(tensorized_row_shape)\n\n self.factors = FactorList(factors)\n self.rank = rank\n self.n_matrices = _ensure_tuple(n_matrices)\n\n @classmethod\n def new(cls, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=(), **kwargs):\n n_matrices = _ensure_tuple(n_matrices)\n shape = tensorized_row_shape + tensorized_column_shape\n rank = tl.tt_matrix.validate_tt_matrix_rank(shape, rank)\n\n if not n_matrices:\n factors = [nn.Parameter(torch.Tensor(rank[i], tensorized_row_shape[i], tensorized_column_shape[i], rank[i + 1]))\\\n for i in range(len(tensorized_row_shape))]\n elif len(n_matrices) == 1:\n factors = [nn.Parameter(torch.Tensor(n_matrices[0], rank[i], tensorized_row_shape[i], tensorized_column_shape[i], rank[i + 1]))\\\n for i in range(len(tensorized_row_shape))]\n else:\n raise ValueError(f'Currently a single dimension is supported for n_matrices, it should an integer (by default, 1) but got n_matrices={n_matrices}.')\n \n return cls(factors, tensorized_row_shape, tensorized_column_shape, rank=rank, n_matrices=n_matrices)\n \n @classmethod\n def from_tensor(cls, tensor, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=(), **kwargs):\n rank = tl.tt_matrix.validate_tt_matrix_rank(tensorized_row_shape + tensorized_column_shape, rank)\n\n if n_matrices == ():\n with torch.no_grad():\n factors = tensor_train_matrix(tensor, rank, **kwargs)\n factors = [nn.Parameter(f) for f in factors]\n\n else:\n factors = [torch.zeros(n_matrices[0], rank[i], tensorized_row_shape[i], tensorized_column_shape[i], rank[i + 1])\\\n for i in range(len(tensorized_row_shape))]\n for i in range(n_matrices[0]):\n with torch.no_grad():\n factors_i = tensor_train_matrix(tensor[i], rank, **kwargs)\n print(factors_i)\n for j, factor in enumerate(factors_i):\n factors[j][i, ...] = factor\n factors = [nn.Parameter(f) for f in factors]\n return cls(factors, tensorized_row_shape, tensorized_column_shape, rank, n_matrices)\n\n @classmethod\n def from_matrix(cls, matrix, tensorized_row_shape, tensorized_column_shape, rank, **kwargs):\n if matrix.ndim > 2:\n n_matrices = _ensure_tuple(tl.shape(matrix)[:-2])\n else:\n n_matrices = ()\n tensor = matrix.reshape((*n_matrices, *tensorized_row_shape, *tensorized_column_shape))\n return cls.from_tensor(tensor, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=n_matrices, **kwargs)\n\n def init_from_tensor(self, tensor, **kwargs):\n if self.n_matrices == ():\n with torch.no_grad():\n factors = tensor_train_matrix(tensor, self.rank, **kwargs)\n factors = [nn.Parameter(f) for f in factors]\n\n else:\n factors = [torch.zeros(self.n_matrices[0], self.rank[i], self.tensorized_row_shape[i], self.tensorized_column_shape[i], self.rank[i + 1])\\\n for i in range(len(self.tensorized_row_shape))]\n for i in range(self.n_matrices[0]):\n with torch.no_grad():\n factors_i = tensor_train_matrix(tensor[i], self.rank, **kwargs)\n print(factors_i)\n for j, factor in enumerate(factors_i):\n factors[j][i, ...] = factor\n factors = [nn.Parameter(f) for f in factors]\n \n self.factors = FactorList(factors)\n return self\n\n def init_from_matrix(self, matrix, **kwargs):\n tensor = matrix.reshape((*self.n_matrices, *self.tensorized_row_shape, *self.tensorized_column_shape))\n return self.init_from_tensor(tensor, **kwargs)\n\n @property\n def decomposition(self):\n return self.factors\n\n def to_tensor(self):\n if not self.n_matrices:\n return tl.tt_matrix_to_tensor(self.decomposition)\n else:\n ten = tl.tt_matrix_to_tensor(self[0].decomposition)\n res = torch.zeros(*self.n_matrices, *ten.shape)\n res[0, ...] = ten\n for i in range(1, self.n_matrices[0]):\n res[i, ...] = tl.tt_matrix_to_tensor(self[i].decomposition)\n return res\n\n def normal_(self, mean=0, std=1):\n if mean != 0:\n raise ValueError(f'Currently only mean=0 is supported, but got mean={mean}')\n\n r = np.prod(self.rank)\n std_factors = (std/r)**(1/self.order)\n with torch.no_grad():\n for factor in self.factors:\n factor.data.normal_(0, std_factors)\n return self\n\n def to_matrix(self):\n if not self.n_matrices:\n return tl.tt_matrix_to_matrix(self.decomposition)\n else:\n res = torch.zeros(*(self.n_matrices + self.shape))\n for i in range(self.n_matrices[0]):\n res[i, ...] = tl.tt_matrix_to_matrix(self[i].decomposition)\n return res\n\n def __getitem__(self, indices):\n if not isinstance(indices, int) or not self.n_matrices:\n raise ValueError(f'Currently only indexing over n_matrices is supported for TTMatrices.')\n\n return self.__class__([f[indices, ...] for f in self.factors],\n self.tensorized_row_shape, self.tensorized_column_shape, self.rank, self.n_matrices[1:])\n\n def __torch_function__(self, func, types, args=(), kwargs=None):\n if kwargs is None:\n kwargs = {}\n\n args = [t.to_matrix() if hasattr(t, 'to_matrix') else t for t in args]\n return func(*args, **kwargs)\n\n### Auxiliary functions for SuKro factorization\n\ndef rearrange(D,n,m,n_matrices=()):\n# Input matrix D is of size (n_matrices x prod(n) x prod(m))\n# Output tensor R_D is of size (n_matrices x n[0]m[0] x n[1]m[1] x ... x n[-1]m[-1])\n assert len(n)==len(m)\n #Handle n_matrices\n tosqueeze = ()\n if n_matrices == ():\n D = torch.unsqueeze(D,0)\n tosqueeze = True\n n_matrices = (1,)\n\n # Main recursion\n if len(n)==1: # Base case: vectorizes a given block.\n return D.reshape(n_matrices + (-1,))\n else:\n # Block sizes\n n_rows = np.prod(n[1:])\n n_cols = np.prod(m[1:])\n\n # Go over each block of the matrix. Then recursively go over all sub-blocks in the block.\n for i1 in range(n[0]):\n for j1 in range(m[0]):\n # Reorders the block and concatenates the results\n res = rearrange(D[:,i1*n_rows:(i1+1)*n_rows, j1*n_cols:(j1+1)*n_cols], n[1:], m[1:], n_matrices)\n res = torch.unsqueeze(res,1)\n R_D = tl.concatenate((R_D,res), axis=1) if (i1,j1) != (0,0) else res\n\n return torch.squeeze(R_D,0) if tosqueeze else R_D\n\ndef rearrange_inv(R_D,n,m,n_matrices=()):\n# Input tensor R_D is of size (n_matrices x n[0]m[0] x n[1]m[1] x ... x n[-1]m[-1])\n# Output matrix D is of size (n_matrices x prod(n) x prod(m))\n assert len(n)==len(m)\n #Handle n_matrices\n tosqueeze = ()\n if n_matrices == ():\n R_D = torch.unsqueeze(R_D,0)\n tosqueeze = True\n n_matrices = (1,)\n\n # Main recursion\n if len(n)==1: # Base case: unvectorizes a given block.\n return R_D.reshape(n_matrices+n+m)\n else:\n # Go over each block of the tensor. Then recursively go over all fibers in the slice.\n for i1 in range(n[0]):\n for j1 in range(m[0]):\n # Reorders the block and concatenates the results\n res = rearrange_inv(R_D[:,i1*m[0] + j1], n[1:], m[1:], n_matrices)\n D_i = tl.concatenate((D_i, res), axis=2) if j1 != 0 else res\n D = tl.concatenate((D,D_i), axis=1) if i1 != 0 else D_i\n return torch.squeeze(D,0) if tosqueeze else D\n\n\n\nclass SuKroMatrix(TensorizedMatrix, name='SuKro'):\n \"\"\"Tensorized Matrix in SuKro (sum of Kroneckers) Form.\n\n Matrix is tensorized with a particular entries rearrangement. Then CP factorization is applied.\n \"\"\"\n def __init__(self, weights, factors, tensorized_row_shape, tensorized_column_shape, rank=None, n_matrices=()):\n super().__init__()\n if rank is None:\n _, self.rank = tl.cp_tensor._validate_cp_tensor((weights, factors))\n else:\n self.rank = rank\n self.shape = (np.prod(tensorized_row_shape), np.prod(tensorized_column_shape))\n self.tensorized_shape = tensorized_row_shape + tensorized_column_shape\n self.tensorized_row_shape = tensorized_row_shape\n self.tensorized_column_shape = tensorized_column_shape\n\n self.n_matrices = _ensure_tuple(n_matrices)\n self.order = len(factors)\n self.weights = weights\n self.factors = factors\n\n @classmethod\n def new(cls, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=(), **kwargs):\n n_matrices = _ensure_tuple(n_matrices)\n # Shape of the rearrange tensor is (n[0]m[0]x...x n[-1]m[-1]) not (n[0]x...x n[-1] x m[0]x...x m[-1])\n tensor_shape = n_matrices + tuple([n*m for n, m in zip(tensorized_row_shape, tensorized_column_shape)])\n rank = tl.cp_tensor.validate_cp_rank(tensor_shape, rank)\n if len(tensor_shape)==2: # rank cannot exceed matrix dimensions when using SVD\n rank = min(rank, max(tensor_shape))\n\n # Register the parameters\n weights = nn.Parameter(torch.Tensor(rank))\n # Avoid the issues with ParameterList\n factors = [nn.Parameter(torch.Tensor(s, rank)) for s in tensor_shape]\n\n return cls(weights, factors, tensorized_row_shape, tensorized_column_shape, rank=rank, n_matrices=n_matrices)\n\n @classmethod\n def from_tensor(cls, tensor, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=(), init='random', **kwargs):\n # tensor is supposed to be already rearranged\n n_matrices = _ensure_tuple(n_matrices)\n rank = tl.cp_tensor.validate_cp_rank(n_matrices + tensorized_row_shape + tensorized_column_shape, rank)\n\n with torch.no_grad():\n weights, factors = parafac(tensor, rank, **kwargs)\n weights = nn.Parameter(weights)\n factors = [nn.Parameter(f) for f in factors]\n\n return cls(weights, factors, tensorized_row_shape, tensorized_column_shape, rank, n_matrices)\n\n @classmethod\n def from_matrix(cls, matrix, tensorized_row_shape, tensorized_column_shape, rank, **kwargs):\n if matrix.ndim > 2:\n n_matrices = _ensure_tuple(tl.shape(matrix)[:-2])\n else:\n n_matrices = ()\n\n tensor = rearrange(matrix, tensorized_row_shape, tensorized_column_shape, n_matrices)\n return cls.from_tensor(tensor, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=n_matrices, **kwargs)\n\n def init_from_tensor(self, tensor, **kwargs):\n #tensor is supposed to be already rearranged\n with torch.no_grad():\n weights, factors = parafac(tensor, self.rank, **kwargs)\n self.weights = nn.Parameter(weights)\n self.factors = FactorList([nn.Parameter(f) for f in factors])\n return self\n\n def init_from_matrix(self, matrix, **kwargs):\n tensor = rearrange(matrix, self.tensorized_row_shape, self.tensorized_column_shape, self.n_matrices)\n return self.init_from_tensor(tensor, **kwargs)\n\n\n @property\n def decomposition(self):\n return self.weights, self.factors\n\n def to_tensor(self):\n return tl.cp_to_tensor(self.decomposition)\n\n def to_matrix(self):\n # Create sukro_to_matrix in tensorly, similar to tt_matrix_to_matrix or cp_to_tensor ?\n # There are two ways to implement this:\n # 1) Inverse rearrangement on the resulting CP tensor (implemented below)\n # 2) Matricizing columns of the cp factors and taking the Kronecker product\n tensor = tl.cp_to_tensor(self.decomposition)\n return rearrange_inv(tensor, self.tensorized_row_shape, self.tensorized_column_shape, self.n_matrices)\n\n\n def normal_(self, mean=0, std=1):\n super().normal_(mean, std)\n std_factors = (std/math.sqrt(self.rank))**(1/self.order)\n\n with torch.no_grad():\n self.weights.fill_(1)\n for factor in self.factors:\n factor.data.normal_(0, std_factors)\n return self\n\n def __getitem__(self, indices):\n if isinstance(indices, int):\n # Select one dimension of one mode\n mixing_factor, *factors = self.factors\n weights = self.weights*mixing_factor[indices, :]\n return self.__class__(weights, factors, self.tensorized_row_shape,\n self.tensorized_column_shape, n_matrices=self.n_matrices[1:])\n\n elif isinstance(indices, slice):\n # Index part of a factor\n mixing_factor, *factors = self.factors\n factors = [mixing_factor[indices], *factors]\n weights = self.weights\n return self.__class__(weights, factors, self.tensorized_row_shape,\n self.tensorized_column_shape, n_matrices=self.n_matrices[1:])\n\n else:\n # Index multiple dimensions\n factors = self.factors\n index_factors = []\n weights = self.weights\n for index in indices:\n if index is Ellipsis:\n raise ValueError(f'Ellipsis is not yet supported, yet got indices={indices} which contains one.')\n\n mixing_factor, *factors = factors\n if isinstance(index, int):\n if factors or index_factors:\n weights = weights*mixing_factor[index, :]\n else:\n # No factors left\n return tl.sum(weights*mixing_factor[index, :])\n else:\n index_factors.append(mixing_factor[index])\n\n return self.__class__(weights, index_factors+factors, self.shape, self.tensorized_row_shape,\n self.tensorized_column_shape, n_matrices=self.n_matrices[len(indices):])"
] | [
[
"torch.nn.Parameter",
"torch.Tensor",
"torch.zeros",
"torch.unsqueeze",
"torch.no_grad",
"numpy.prod",
"torch.squeeze"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kfoerderer/ANN-based-surrogates | [
"aef0eca9e969858e47babfc73a15c04262285e6b"
] | [
"modules/neuralnetwork/layer.py"
] | [
"import torch\nimport torch.nn as nn\n\nclass DebugLayer(nn.Module):\n\n def __init__(self, depth):\n super().__init__()\n\n self.depth = depth\n\n def forward(self, input):\n print('nn, DebugLayer %d, %s'%(self.depth, str(input.size())))\n return input\n\nclass SkipConnection(nn.Module):\n \"\"\"\n Module for skipping other modules.\n \n The first time called, it passes through the input and saves the reference. Any subsequent time the stored reference is appended to the input.\n \"\"\"\n def __init__(self):\n super().__init__()\n \n self.store = None\n \n def forward(self, x): \n if self.store is None:\n self.store = x.clone()\n return x \n\n result = torch.cat((x, self.store), dim=-1)\n self.store = None\n \n return result"
] | [
[
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
oscarramos2001/Oscar-Marino-Ramos | [
"c05e497b467aab4572f3578f1b9068d4585106d2"
] | [
"tema2/mountain_car_qlearner.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 11 10:27:43 2018\n\n@author: juangabriel\n\"\"\"\n\nimport gym\nimport numpy as np\n\n\n# EPISILON_MIN : vamos aprendiendo, mientras el incremento de aprendizaje sea superior a dicho valor\n# MAX_NUM_EPISONES : número máximo de iteraciones que estamos dispuestos a realizar\n# STEPS_PER_EPISODE: número máximo de pasos a realizar en cada episodio\n# ALPHA: ratio de aprendizaje del agente \n# GAMMA: factor de descuento del agente\n# NUM_DISCRETE_BINS: número de divisiones en el caso de discretizar el espacio de estados continuo. \n\nMAX_NUM_EPISODES = 50000\nSTEPS_PER_EPISODE = 200\nEPSILON_MIN = 0.005\nmax_num_steps = MAX_NUM_EPISODES * STEPS_PER_EPISODE\nEPSILON_DECAY = 500 * EPSILON_MIN / max_num_steps\nALPHA = 0.05\nGAMMA = 0.98\nNUM_DISCRETE_BINS = 30\n\n# QLearner Class\n# __init__(self, environment)\n# discretize(self, obs) [-2,2] -> [-2,-1], [-1,0], [0,1], [1,2]\n# get_action(self, obs)\n# learn(self, obs, action, reward, next_obs)\n\n\nclass QLearner(object):\n def __init__(self, environment):\n self.obs_shape = environment.observation_space.shape\n self.obs_high = environment.observation_space.high\n self.obs_low = environment.observation_space.low\n self.obs_bins = NUM_DISCRETE_BINS\n self.bin_width = (self.obs_high-self.obs_low)/self.obs_bins\n \n self.action_shape = environment.action_space.n\n self.Q = np.zeros((self.obs_bins+1, self.obs_bins+1, self.action_shape)) #matriz de 31 x 31 x 3\n self.alpha = ALPHA\n self.gamma = GAMMA\n self.epsilon = 1.0\n \n def discretize(self, obs):\n return tuple(((obs-self.obs_low)/self.bin_width).astype(int))\n \n def get_action(self, obs):\n discrete_obs = self.discretize(obs)\n # Selección de la acción en base a Epsilon-Greedy\n if self.epsilon > EPSILON_MIN:\n self.epsilon -= EPSILON_DECAY\n if np.random.random() > self.epsilon: #Con probabilidad 1-epsilon, elegimos la mejor posible\n return np.argmax(self.Q[discrete_obs])\n else:\n return np.random.choice([a for a in range(self.action_shape)])#Con probabilidad epsilon, elegimos una al azar\n \n \n def learn(self, obs, action, reward, next_obs):\n discrete_obs = self.discretize(obs)\n discrete_next_obs = self.discretize(next_obs)\n self.Q[discrete_obs][action] += self.alpha*(reward + self.gamma * np.max(self.Q[discrete_next_obs]) - self.Q[discrete_obs][action])\n \n \n## Método para entrenar a nuestro agente\ndef train(agent, environment):\n best_reward = -float('inf')\n for episode in range(MAX_NUM_EPISODES):\n done = False\n obs = environment.reset()\n total_reward = 0.0\n while not done:\n action = agent.get_action(obs)# Acción elegida según la ecuación de Q-LEarning\n next_obs, reward, done, info = environment.step(action)\n agent.learn(obs, action, reward, next_obs)\n obs = next_obs\n total_reward += reward\n if total_reward > best_reward:\n best_reward = total_reward\n print(\"EPisodio número {} con recompensa: {}, mejor recompensa: {}, epsilon: {}\".format(episode, total_reward, best_reward, agent.epsilon))\n \n ## De todas las políticas de entrenamiento que hemos obtenido devolvemos la mejor de todas\n return np.argmax(agent.Q, axis = 2)\n \n \ndef test(agent, environment, policy):\n done = False\n obs = environment.reset()\n total_reward = 0.0\n while not done:\n action = policy[agent.discretize(obs)] #acción que dictamina la política que hemos entrenado\n next_obs, reward, done, info = environment.step(action)\n obs = next_obs\n total_reward += reward\n return total_reward\n\nif __name__ == \"__main__\":\n environment = gym.make(\"MountainCar-v0\")\n agent = QLearner(environment)\n learned_policy = train(agent, environment)\n monitor_path = \"./monitor_output\"\n environment = gym.wrappers.Monitor(environment, monitor_path, force = True)\n for _ in range(1000):\n test(agent, environment, learned_policy)\n environment.close()\n"
] | [
[
"numpy.random.random",
"numpy.max",
"numpy.argmax",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
takemiyamakoto/mpltools | [
"5235bc6b2522abf28b5fac13ca8ff3a55434b99c"
] | [
"examples/plot_all_styles.py"
] | [
"\"\"\"\nSave test plots for all styles defined in `mpltools.style`.\n\nNote that `test_artists_plot` calls `matplotlib.pyplot.tight_layout` so subplot\nspacing is not tested for this plot.\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport os.path as pth\n\nimport numpy as np\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nfrom mpltools import style\n\n\nPATH = pth.abspath(pth.dirname(__file__))\n\nTEST_DIRS = ('test_artists_png', 'test_artists_pdf',\n 'test_simple_png', 'test_simple_pdf')\nfor d in TEST_DIRS:\n test_dir = pth.join(PATH, d)\n if not pth.exists(test_dir):\n os.mkdir(test_dir)\n\n\ndef test_artists_plot():\n fig, axes = plt.subplots(2, 2)\n axes = axes.ravel()\n\n x = np.linspace(0, 1)\n axes[0].plot(x, np.sin(2*np.pi*x), label='line')\n c = plt.Circle((0.25, 0), radius=0.1, label='patch')\n axes[0].add_patch(c)\n axes[0].grid()\n axes[0].legend()\n\n img = axes[1].imshow(np.random.random(size=(20, 20)))\n axes[1].set_title('image')\n\n ncolors = len(plt.rcParams['axes.color_cycle'])\n phi = np.linspace(0, 2*np.pi, ncolors + 1)[:-1]\n for p in phi:\n axes[2].plot(x, np.sin(2*np.pi*x + p))\n axes[2].set_title('color cycle')\n\n axes[3].text(0, 0, 'hello world')\n axes[3].set_xlabel('x-label')\n axes[3].set_ylabel('y-label')\n axes[3].set_title('title')\n\n try:\n fig.tight_layout()\n except AttributeError:\n pass\n # `colorbar` should be called after `tight_layout`.\n fig.colorbar(img, ax=axes[1])\n return fig\n\ndef test_simple_plot():\n fig, ax = plt.subplots()\n\n ax.plot([0, 1])\n ax.set_xlabel('x-label')\n ax.set_ylabel('y-label')\n ax.set_title('title')\n\n return fig\n\n\n# Only show styles defined by package, not by user.\nbase_styles = list(style.baselib.keys())\nfor sty in base_styles:\n # reset matplotlib defaults before applying new style\n plt.rcdefaults()\n\n style.use(sty, use_baselib=True)\n print(\"Plotting tests for '%s' style\" % sty)\n\n fig = test_artists_plot()\n fig.savefig(pth.join(PATH, 'test_artists_png', sty + '.png'))\n fig.savefig(pth.join(PATH, 'test_artists_pdf', sty + '.pdf'))\n\n fig = test_simple_plot()\n fig.savefig(pth.join(PATH, 'test_simple_png', sty + '.png'))\n fig.savefig(pth.join(PATH, 'test_simple_pdf', sty + '.pdf'))\n\n"
] | [
[
"numpy.random.random",
"numpy.linspace",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"numpy.sin",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.rcdefaults"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gibsramen/q2-convexhull | [
"6f9228f286535589509f7313ff39f1a29d6dcbdf"
] | [
"q2_convexhull/convexhull.py"
] | [
"# ----------------------------------------------------------------------------\n# Copyright (c) 2022--, convex-hull development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# ----------------------------------------------------------------------------\n\nimport pandas as pd\nfrom scipy.spatial import ConvexHull\nfrom skbio import OrdinationResults\nfrom q2_convexhull._defaults import (DEFAULT_N_DIMENSIONS)\nfrom warnings import warn\n\n\ndef validate(metadata, pcoa, individual_id_column):\n\n try:\n meta = metadata.loc[list(pcoa.samples.index)]\n except KeyError:\n raise KeyError('PCoA result indeces do not match metadata.')\n if individual_id_column not in metadata.columns:\n raise ValueError(f'Unique column id {individual_id_column}'\n f'not found in metadata columns.')\n if len(pcoa.samples.columns) < 2:\n raise ValueError(f'PCoA result has too few dimensions: '\n f'({len(pcoa.samples.columns)})')\n\n return meta\n\n\ndef convex_hull(metadata: pd.DataFrame,\n pcoa: OrdinationResults,\n individual_id_column: str,\n number_of_dimensions: int = DEFAULT_N_DIMENSIONS) \\\n -> (pd.DataFrame):\n \"\"\" Computes Convex Hull of a set of samples with multiple\n timepoints for each sample.\n\n Parameters\n ----------\n metadata: pd.DataFrame\n Metadata table associated with PCoA results.\n\n pcoa: skbio.OrdinationResults\n PCoA result.\n\n individual_id_column: str\n Unique subject identifier column in `metadata`. Must\n be unique to each subject. Can be repeated for\n multiple time points.\n\n number_of_dimensions: int (Default 3)\n Number of dimensions along which to calculate the\n convex hull volume and area.\n\n Returns\n -------\n pandas.DataFrame\n Data frame with unique ID, convex hull volume,\n and convex hull area. Columns are\n `column`, convexhull_volume, convexhull_area.\n\n Raises\n ------\n TypeError, ValueError\n If inputs are of incorrect type. If column ID not\n found in metadata.\n \"\"\"\n\n if number_of_dimensions > 3:\n warn_message = (f'Number of dimensions {number_of_dimensions} '\n f'not supported. Setting to default (3).')\n warn(warn_message, Warning)\n number_of_dimensions = 3\n\n if len(pcoa.samples.columns):\n warn_message = (f'PCoA result has {len(pcoa.samples.columns)} '\n f\"dimensions. Truncating to 3 PC's\")\n warn(warn_message, Warning)\n pcoa = OrdinationResults(pcoa.short_method_name,\n pcoa.long_method_name,\n pcoa.eigvals[:3],\n pcoa.samples[pcoa.samples.columns[:3]])\n meta = validate(metadata, pcoa, individual_id_column)\n hulls = []\n for person, group in meta.groupby(individual_id_column):\n n_timepts = len(group)\n if n_timepts <= number_of_dimensions:\n warn_message = (f'Number of timepoints less than '\n f'number of dimensions.'\n f'Skipping individual {person}')\n warn(warn_message, Warning)\n continue\n coords = pcoa.samples.loc[group.index].values[:, :number_of_dimensions]\n c_hull = ConvexHull(coords)\n hulls.append([person, c_hull.volume, c_hull.area])\n hulls = pd.DataFrame(hulls, columns=[individual_id_column,\n 'convexhull_volume',\n 'convexhull_area'])\n return hulls\n"
] | [
[
"pandas.DataFrame",
"scipy.spatial.ConvexHull"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mjoshi07/AutoPano | [
"1d60ac1c717ccdaae40c129959fed5a27533d225"
] | [
"Phase2/Code/Test_unsupervised.py"
] | [
"\"\"\"\nCMSC733 Spring 2022: Classical and Deep Learning Approaches for Geometric Computer Vision\nProject1: MyAutoPano: Phase 2\n\nAuthor(s):\nMayank Joshi\nMasters student in Robotics,\nUniversity of Maryland, College Park\n\nAdithya Gaurav Singh\nMasters student in Robotics,\nUniversity of Maryland, College Park\n\"\"\"\n\nfrom Misc.MiscUtils import *\nfrom Misc.DataUtils import *\nfrom Network.Unsupervised_Network import *\nimport numpy as np\n\n# Don't generate pyc codes\nsys.dont_write_bytecode = True\n\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = tf.Session(config=config)\n\n\ndef load_test_data(folder_name, files_in_dir, points_list, NumTestSamples):\n\n patch_pairs = []\n corners1 = []\n patches2 = []\n images1 = []\n\n for n in range(NumTestSamples):\n\n index = n\n patch1_name = folder_name + os.sep + \"PA/\" + files_in_dir[index, 0]\n patch1 = cv2.imread(patch1_name, cv2.IMREAD_GRAYSCALE)\n\n patch2_name = folder_name + os.sep + \"PB/\" + files_in_dir[index, 0]\n patch2 = cv2.imread(patch2_name, cv2.IMREAD_GRAYSCALE)\n\n image1_name = folder_name + os.sep + \"IA/\" + files_in_dir[index, 0]\n image1 = cv2.imread(image1_name, cv2.IMREAD_GRAYSCALE)\n\n if patch1 is None or patch2 is None:\n continue\n\n patch1 = np.float32(patch1)\n patch2 = np.float32(patch2)\n image1 = np.float32(image1)\n\n patch_pair = np.dstack((patch1, patch2))\n corner1 = points_list[index, :, :, 0]\n\n patch_pairs.append(patch_pair)\n corners1.append(corner1)\n patches2.append(patch2.reshape(128, 128, 1))\n\n images1.append(image1.reshape(image1.shape[0], image1.shape[1], 1))\n\n patch_indices = getPatchIndices(np.array(corners1))\n return np.array(patch_pairs), np.array(corners1), np.array(patches2), np.array(images1), patch_indices\n\n\ndef inference(img_pairs_PH, corners_PH, img2_PH, img1_PH,patch_idx_PH, Model_Path, Base_Path, all_files, corners_list, Save_Path, Num_Samples):\n\n _, H_batches = unsupervised_HomographyNet(img_pairs_PH, corners_PH, img1_PH, patch_idx_PH, Num_Samples)\n\n Saver = tf.train.Saver()\n with tf.Session() as sess:\n Saver.restore(sess, Model_Path)\n print('Number of parameters in this model are %d ' % np.sum([np.prod(x.get_shape().as_list()) for x in tf.trainable_variables()]))\n\n img_pairs, corner1, img2, img1, img_idx = load_test_data(Base_Path, all_files, corners_list, Num_Samples)\n feed_dict = {img_pairs_PH: img_pairs, corners_PH: corner1, img2_PH: img2, img1_PH: img1, patch_idx_PH: img_idx}\n\n H_pred = sess.run(H_batches, feed_dict)\n np.save(os.path.join(Save_Path, 'H_Pred.npy'), H_pred)\n\n\ndef run_unsupervised(ModelPath, BasePath, SavePath, NumTestSamples):\n\n if not os.path.exists(SavePath):\n print(SavePath)\n os.makedirs(SavePath)\n\n all_files, SaveCheckPoint, ImageSize, _, _ = SetupAll(BasePath)\n\n MaxSamplesForTest = 100\n if NumTestSamples > MaxSamplesForTest:\n print(\"Can Test for only atmax of 100 samples, setting test sample size to 100\")\n NumTestSamples = 100\n\n print(\"Images for Testing: \", NumTestSamples)\n corners_list = np.load(BasePath+'/corners.npy')\n\n corners_PH = tf.placeholder(tf.float32, shape=(MaxSamplesForTest, 4, 2))\n img_pair_PH = tf.placeholder(tf.float32, shape=(MaxSamplesForTest, 128, 128, 2))\n img2_PH = tf.placeholder(tf.float32, shape=(MaxSamplesForTest, 128, 128, 1))\n img1_PH = tf.placeholder(tf.float32, shape=(MaxSamplesForTest, 240, 320, 1))\n patch_idx_PH = tf.placeholder(tf.int32, shape=(MaxSamplesForTest, 128, 128, 2))\n\n inference(img_pair_PH, corners_PH, img2_PH, img1_PH, patch_idx_PH, ModelPath, BasePath, all_files, corners_list, SavePath, MaxSamplesForTest)\n\n rand_i = np.random.randint(0, MaxSamplesForTest, size=NumTestSamples)\n for i in rand_i:\n comparison = draw(i, BasePath, SavePath)\n print(\"Processing image : \" + str(i + 1) + 'a.jpg')\n cv2.imwrite(SavePath+'//' + str(i)+'.png', comparison)\n print('Check Results/unsupervised folder..')\n\n"
] | [
[
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.trainable_variables",
"numpy.dstack",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.placeholder",
"numpy.float32",
"numpy.load",
"numpy.array",
"tensorflow.compat.v1.train.Saver",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mahitha2001/DSA-Questions | [
"f8970f3489fff9c79a67c58ac26e5923436ec652"
] | [
"Matrix/Python/Largest_Rectangle_Of_0.py"
] | [
"'''\r\n@Author 28Pollux28\r\nIn order to calculate the largest rectangle position and area in the matrix, we will first transform the matrix into an\r\narray of histogram (where each number tells how many consecutive zeros (the zero of the row is included) are above the\r\n number we are looking at). For example, the matrix :\r\n[[0,1,0,1],\r\n[1,1,0,0],\r\n[0,0,1,0],\r\n[1,1,1,0]]\r\nbecomes :\r\n[[1,0,1,0],\r\n[0,0,2,1]\r\n[1,1,0,2],\r\n[0,0,0,3]]\r\n\r\nThen for each histogram, we will calculate what I called L and R :\r\nL represent the minimum index i such as 0<=i<=x and histogram[j]>=histogram[x] for j such as i<=j<=x\r\nThe same process goes for R\r\nThe idea is to calculate for each index x the width of the rectangle of height histogram[x]. This is given by :\r\nwidth = D[x]-G[x]+1\r\nThen width*histogram[x] gives the area of the rectangle.\r\nBy iterating on the whole matrix, we can find the largest rectangle.\r\n\r\nAs for complexity, the create_histogram_tab is a O(n^2), and the largest_rectangle_histogram is a O(n),\r\ngiving us an optimal complexity of O(n^2)\r\n'''\r\n\r\nimport numpy as np\r\n\r\nn = 8\r\n\r\n\r\ndef main():\r\n # Construction of the matrix\r\n matrix = np.zeros((n, n))\r\n for i in range(n):\r\n for j in range(n):\r\n matrix[i, j] = np.random.randint(0, 2)\r\n\r\n max_area, bottomX, bottomY, width, height = all_zeros_rectangle(matrix)\r\n print(\"The bottom-left corner of the largest rectangle is at (\" + str(int(bottomX)) + \",\" + str(int(bottomY)) + \")\")\r\n print(\"It is \" + str(int(width)) + \" wide and \" + str(int(height)) + \" high for a total area of \" + str(\r\n int(max_area)))\r\n\r\n\r\ndef create_histogram_tab(matrix): # O(n^2)\r\n col = np.zeros((n, n))\r\n for y in range(n):\r\n c = 0\r\n for x in range(n):\r\n if matrix[x, y] != 0:\r\n c = 0\r\n else:\r\n c += 1\r\n col[x, y] = c\r\n return col\r\n\r\n\r\ndef all_zeros_rectangle(matrix):\r\n max_area = 0\r\n column = create_histogram_tab(matrix) # O(n^2)\r\n rectX = 0\r\n rectY = 0\r\n rectDX = 0\r\n rectDY = 0\r\n for y in range(n):\r\n histo = [column[y, x] for x in range(n)]\r\n area, rectx, rectdx, rectdy = largest_rectangle_histogram(histo) # O(n)\r\n if area > max_area:\r\n max_area = area\r\n rectX = rectx\r\n rectY = y\r\n rectDX = rectdx\r\n rectDY = rectdy\r\n return max_area, rectX, rectY, rectDX, rectDY\r\n\r\n\r\ndef calculateL(histogram):\r\n L = []\r\n for x in range(n):\r\n i = x - 1\r\n while i >= 0 and histogram[i] >= histogram[x]:\r\n i = L[i] - 1\r\n L.append(i + 1)\r\n return L\r\n\r\n\r\ndef calculateR(histogram):\r\n R = []\r\n for x in range(n - 1, -1, -1):\r\n i = x + 1\r\n while i <= n - 1 and histogram[i] >= histogram[x]:\r\n i = R[n - 1 - i] + 1\r\n R.append(i - 1)\r\n # swap order of R\r\n R.reverse()\r\n return R\r\n\r\n\r\ndef largest_rectangle_histogram(histo):\r\n L = calculateL(histo) # O(n)\r\n R = calculateR(histo) # O(n)\r\n max_area = 0\r\n xRect = 0\r\n dx = 0\r\n dy = 0\r\n for x in range(n):\r\n x_ = R[x] - L[x] + 1\r\n histo_x_ = histo[x]\r\n area = histo_x_ * (x_)\r\n if area > max_area:\r\n max_area = area\r\n xRect = L[x]\r\n dx = x_\r\n dy = histo_x_\r\n\r\n return max_area, xRect, dx, dy\r\n\r\n\r\nif __name__ == '__main__':\r\n n = int(input(\"Enter matrix size :\"))\r\n main()\r\n"
] | [
[
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kewitz/mestrado | [
"04af87b6533cb26fcfc9a6579d691e5d019ac9dc"
] | [
"Eletromagnetismo Computacional I/FDTD2D.yee .py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nThe MIT License (MIT)\nCopyright (c) 2014 Leonardo Kewitz\n\nSimulação do erro de dispersão do FDTD quando não considerado o passo mágico em\num impulso gaussiano modulado em seno.\n\nCreated on Wed May 28 11:11:30 2014\n@author: leo\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport mpl_toolkits.mplot3d.axes3d as p3\n\nfrom src.Yee2D.fastYee import FYee\n\ndef gauss(k, t, fdtd):\n\twidth = (2*np.power(fdtd.tal,2))\n\tomega = 6*np.pi*fdtd.fop\n\tfunc = lambda t: np.exp(-np.power(t-2*fdtd.t0,2) / width)\n\tfdtd.Ez[k,1,:] = func(t)\n\na = FYee()\na.setFreq(2.4E9)\n\na.bound['Ez'][0,:] = 0\na.bound['Ez'][-1,:] = 0\na.bound['Ez'][20:50+1,40:60+1] = 0\n\na.bound['Hx'][0,:] = 0\na.bound['Hx'][-1,:] = 0\na.bound['Hx'][:,0] = 0\na.bound['Hx'][:,-1] = 0\na.bound['Hx'][20,40:60+1] = 0\na.bound['Hx'][50,40:60+1] = 0\n\na.bound['Hy'][20:50+1,40] = 0\na.bound['Hy'][20:50+1,60] = 0\n\na.run(gauss,t=3000)\n\n#%%Plot\nfig = plt.figure()\nims = []\n\n\nfor k in a.Ez[::15,:,:]:\n im = plt.imshow(k)\n ims.append([im])\n\nani = animation.ArtistAnimation(fig, ims, interval=30, blit=True, repeat_delay=0)\n\nplt.show()\n\n#%% Save Plot\n#Writer = animation.writers['mencoder_file']\n#writer = Writer(fps=30, metadata=dict(artist='Me'), bitrate=1800)\n#ani.save('img.mp4', writer=writer)"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.animation.ArtistAnimation",
"numpy.power",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
samuelfu/Visualization | [
"f960aa8045d9aaeeaec73e0277093f46ac49a422"
] | [
"multipleRegression.py"
] | [
"import matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom scipy.stats import linregress\n\nfile = 'data.xlsx'\nspreadsheet = pd.ExcelFile(file)\ndf = spreadsheet.parse(spreadsheet.sheet_names[0])\ntarget = spreadsheet.parse(spreadsheet.sheet_names[1])\n\nregressor = LinearRegression()\nregressor.fit(df,target)\n\n# plot\nfig = plt.figure()\n\nplt.plot(df['beauty'], target['course_eval'], linestyle='none', marker='o')\nresult = []\nfor index, row in df.iterrows():\n result.append((regressor.intercept_ + regressor.coef_[0][0]* row['minority'] \\\n + regressor.coef_[0][1] * row['age'] \\\n + regressor.coef_[0][2] * row['female'] \\\n + regressor.coef_[0][3] * row['onecredit'] \\\n + regressor.coef_[0][4] * row['beauty'] \\\n + regressor.coef_[0][5] * row['intro'] \\\n + regressor.coef_[0][6] * row['nnenglish']).tolist())\nresult = sum(result,[])\n\nplt.plot( df['beauty'], pd.DataFrame(result,columns=['course_eval']) )\n\nfig.suptitle('Beauty of Teacher and Course Ratings', fontsize=20)\nplt.xlabel('Beauty', fontsize=18)\nplt.ylabel('Course Evaluation', fontsize=16)\n\nplt.savefig('multipleRegression.png')\n"
] | [
[
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"sklearn.linear_model.LinearRegression",
"pandas.ExcelFile",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
pasindubawantha/sherlock-framework | [
"92d64fbc86256a61c6b00b7ca9eb0a17634c7446"
] | [
"scripts/model_runner_CDD_CNN.py"
] | [
"import json\nimport pandas\nimport math\nimport sys\nimport os\nimport numpy as np\nimport re\nimport shutil\nimport helpers\nimport confusion_metrics\n\n# args\nsherlock = \"../debug/src/SherlockCDDCNN\"\ninput_dir = \"../data/nab_tuned/\"\n# input_dir = \"../data/test/\"\ninput_summary_file = \"../data/nab_tuned_summary.csv\"\noutput_dir = \"../data/nab_tuned_results_CDD_CNN/\"\n# output_dir = \"../data/test_results/\"\nmodel_summary_file = \"../data/sherlock-CDD__CNN_list.csv\"\nnan_folder = \"../data/nab_nan/CDD_CNN/\"\nk\nmax_training_ratio = 0.15\nprediction_training_ratio_fraction = 0.75\nthreshold_max_multipler = 2\nmax_training_ratio_buffer = 0.95\n\ninput_summary = pandas.read_csv(input_summary_file, index_col=\"file\")\n\nmodel_summary = []\nmodel_summary.append(\"file,mse,TP,FP,FN,TN,parameters,threshold_parameters,no_of_anomalies,first_label,length,first_label_ratio\")\nhelpers.sherlock_dump_summary(model_summary, model_summary_file)\n\nreg_x = re.compile(r'\\.(csv)')\ncsv_input_files = []\nfor path, dnames, fnames in os.walk(input_dir):\n csv_input_files.extend([os.path.join(path, f) for f in fnames if reg_x.search(f)])\n\ncsv_input_files.sort()\n\ntry:\n shutil.rmtree(output_dir)\n shutil.rmtree(nan_folder)\nexcept OSError:\n print(\"No previous \", output_dir)\n\nos.mkdir(output_dir)\nos.mkdir(nan_folder)\n\nfile_count = 0\nfor f in csv_input_files:\n print(\"####### Running Sherlock : \" + f)\n fconfig = f[:-3] + \"json\"\n fout = output_dir + f.split('/')[-1]\n\n shutil.copyfile(fconfig, output_dir + fconfig.split('/')[-1])\n\n # Training ratio\n dataframe = pandas.read_csv(f)\n value = np.array(dataframe['value'])\n timestamp = np.array(dataframe['timestamp'])\n label = np.array(dataframe['label'])\n\n jsonf = open(fconfig, \"r\")\n jsond = json.load(jsonf)\n sequance_length = int(jsond['prediction_model']['model']['CNN']['matWidth']) * int(jsond['prediction_model']['model']['CNN']['matHeight'])\n\n fname = f.split(\"/\")[-1]\n first_label_ratio = input_summary['first_label_ratio'][fname]\n\n ## Prediction training ratio\n if first_label_ratio < max_training_ratio:\n prediction_training_ratio = first_label_ratio*max_training_ratio_buffer*prediction_training_ratio_fraction\n else:\n prediction_training_ratio = max_training_ratio*prediction_training_ratio_fraction\n \n if int(prediction_training_ratio*len(value)) +3 < sequance_length:\n prediction_training_ratio = float(sequance_length + 3) / float(len(value))\n \n ## Threshold training ratio\n if first_label_ratio < max_training_ratio:\n total_training_length = int(first_label_ratio * max_training_ratio_buffer * len(value))\n else:\n total_training_length = int(max_training_ratio * len(value)) ## TODO Fix training ratio issue \n \n threshold_training_count = total_training_length - (int(prediction_training_ratio*len(value)))\n\n if threshold_training_count <= 0:\n print(\"Cant train threshold !! (lstmcnn input window)sequance length is too large\")\n model_summary.append(fname+\",-,-,-,-,-,-,Cant train threshold !! (lstmcnn input window)sequance length is too large,\"+str(input_summary['no_of_anomalies'][fname])+\",\"+str(input_summary['first_label'][fname])+\",\"+str(input_summary['length'][fname])+\",\"+str(input_summary['first_label_ratio'][fname]))\n helpers.sherlock_dump_summary(model_summary, model_summary_file)\n continue\n\n threshold_training_ratio = float(threshold_training_count) / float(len(value) - (int(prediction_training_ratio*len(value))))\n\n args = f+\" \"+fout+\" \"+fconfig+\" \"+str(prediction_training_ratio)+\" \"+str(threshold_training_ratio)+\" \"+str(threshold_max_multipler)\n\n print(sherlock + \" \" + args)\n task_output = os.popen(sherlock + \" \" + args).read()\n print(task_output)\n\n print(\"####### Post processing : \" + f)\n\n # TODO Read the fout and calculate mse,TP,FP,FN,TN,\n dataframe = pandas.read_csv(fout)\n prediction_training = np.array(dataframe['prediction_training'])\n prediction = np.array(dataframe['prediction'])\n warp_distance = np.array(dataframe['warp_distance'])\n threshold_training = np.array(dataframe['threshold_training'])\n distance_threshold = np.array(dataframe['distance_threshold'])\n positive_detection = np.array(dataframe['positive_detection'])\n\n ## Check if its a nan generating file\n if np.isnan(prediction[-1]):\n print(\"file with nan\")\n shutil.copyfile(fconfig, nan_folder + fconfig.split('/')[-1])\n shutil.copyfile(f, nan_folder + f.split('/')[-1])\n model_summary.append(fname+\"nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan\")\n helpers.sherlock_dump_summary(model_summary, model_summary_file)\n continue\n \n prediction_training_count = 0\n for i in prediction_training:\n if i == 0:\n break\n prediction_training_count += 1\n\n mse = helpers.MSE(value[prediction_training_count:], prediction[prediction_training_count:])\n\n metrics = confusion_metrics.confusion_metrics(label=label, positive_detection=positive_detection, prediction_training=prediction_training, threshold_training=threshold_training)\n metrics.calculate_metrics()\n\n TP = metrics.get_TP()\n TN = metrics.get_TN()\n FP = metrics.get_FP()\n FN = metrics.get_FN()\n\n # Getting Params to write\n\n ## prediction params \n prediction_params = \"lstmWeight=\"+str(jsond['prediction_model']['model']['lstmW'])+\";\"\n prediction_params += \"cnnWeight=\"+str(jsond['prediction_model']['model']['cnnW'])+\";\"\n prediction_params += \"lstmCells=\"+str(jsond['prediction_model']['model']['LSTM']['memCells'])+\";\"\n prediction_params += \"CL1filters=\"+str(jsond['prediction_model']['model']['CNN']['ConvolutionLayers'][0]['filters'])+\";\"\n prediction_params += \"CL1kernal_size=\"+str(jsond['prediction_model']['model']['CNN']['ConvolutionLayers'][0]['filterSize'])+\";\"\n prediction_params += \"CL1strides=\"+str(jsond['prediction_model']['model']['CNN']['ConvolutionLayers'][0]['stride'])+\";\"\n prediction_params += \"PL1pool_size=\"+str(1)+\";\"\n prediction_params += \"CNNDL1units=\"+str(jsond['prediction_model']['model']['CNN']['FullyConnectedLayers'][0]['outputs'])+\";\"\n prediction_params += \"CNNDL2units=\"+str(jsond['prediction_model']['model']['CNN']['FullyConnectedLayers'][1]['outputs'])+\";\"\n prediction_params += \"CNNDL3units=\"+str(jsond['prediction_model']['model']['CNN']['FullyConnectedLayers'][2]['outputs'])+\";\"\n prediction_params += \"epochs=\"+str(jsond['prediction_model']['trainingIterations'])+\";\"\n prediction_params += \"sequance_length=\"+str(sequance_length)+\";\"\n prediction_params += \"training_ratio=\"+str(prediction_training_ratio)\n\n ## threshold params \n threshold_params = \"comparision_window_size=\"+str(jsond['dtw_window'])+\";\"\n threshold_params += \"threshold_max_multipler=\"+str(threshold_max_multipler)+\";\"\n threshold_params += \"training_ratio=\"+str(threshold_training_ratio)+\";\"\n\n ## Update model summary\n model_summary_row = fname+\",\"\n model_summary_row += str(mse)+\",\"\n model_summary_row += str(TP)+\",\"\n model_summary_row += str(FP)+\",\"\n model_summary_row += str(FN)+\",\"\n model_summary_row += str(TN)+\",\"\n model_summary_row += prediction_params+\",\"\n model_summary_row += threshold_params+\",\"\n model_summary_row += str(input_summary['no_of_anomalies'][fname])+\",\"\n model_summary_row += str(input_summary['first_label'][fname])+\",\"\n model_summary_row += str(input_summary['length'][fname])+\",\"\n model_summary_row += str(input_summary['first_label_ratio'][fname])\n model_summary.append(model_summary_row)\n helpers.sherlock_dump_summary(model_summary, model_summary_file)\n\n file_count += 1\n print(\"+++++++++++++++ Processed \" +str(file_count) + \" of \" + str(len(csv_input_files)))\n\nprint(\"Done !!\")"
] | [
[
"numpy.isnan",
"numpy.array",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
umitanuki/chainer | [
"225c56b233e684ff4855451d2af4c2fb66915f21"
] | [
"chainer/utils/conv.py"
] | [
"import numpy\nimport six\n\nfrom chainer import cuda\n\n\ndef get_conv_outsize(size, k, s, p, cover_all=False):\n if cover_all:\n return (size + p * 2 - k + s - 1) // s + 1\n else:\n return (size + p * 2 - k) // s + 1\n\n\ndef im2col_cpu(img, kh, kw, sy, sx, ph, pw, pval=0, cover_all=False):\n n, c, h, w = img.shape\n out_h = get_conv_outsize(h, kh, sy, ph, cover_all)\n out_w = get_conv_outsize(w, kw, sx, pw, cover_all)\n\n img = numpy.pad(img,\n ((0, 0), (0, 0), (ph, ph + sy - 1), (pw, pw + sx - 1)),\n mode='constant', constant_values=(pval,))\n col = numpy.ndarray((n, c, kh, kw, out_h, out_w), dtype=img.dtype)\n\n for i in six.moves.range(kh):\n i_lim = i + sy * out_h\n for j in six.moves.range(kw):\n j_lim = j + sx * out_w\n col[:, :, i, j, :, :] = img[:, :, i:i_lim:sy, j:j_lim:sx]\n\n return col\n\n\ndef im2col_gpu(img, kh, kw, sy, sx, ph, pw, cover_all=False):\n n, c, h, w = img.shape\n out_h = get_conv_outsize(h, kh, sy, ph, cover_all)\n out_w = get_conv_outsize(w, kw, sx, pw, cover_all)\n\n col = cuda.empty((n, c, kh, kw, out_h, out_w), dtype=img.dtype)\n cuda.elementwise(\n 'raw T img, int32 h, int32 w, int32 out_h, int32 out_w,'\n 'int32 kh, int32 kw, int32 sy, int32 sx, int32 ph, int32 pw',\n 'T col',\n '''\n int c0 = i / (kh * kw * out_h * out_w);\n int ky = i / (kw * out_h * out_w) % kh;\n int kx = i / (out_h * out_w) % kw;\n int out_y = i / out_w % out_h;\n int out_x = i % out_w;\n\n int in_y = ky + out_y * sy - ph;\n int in_x = kx + out_x * sx - pw;\n if (in_y >= 0 && in_y < h && in_x >= 0 && in_x < w) {\n col = img[in_x + w * (in_y + h * c0)];\n } else {\n col = 0;\n }\n ''',\n 'im2col')(img, h, w, out_h, out_w, kh, kw, sy, sx, ph, pw, col)\n return col\n\n\ndef col2im_cpu(col, sy, sx, ph, pw, h, w):\n n, c, kh, kw, out_h, out_w = col.shape\n\n img = numpy.zeros((n, c, h + 2 * ph + sy - 1, w + 2 * pw + sx - 1),\n dtype=col.dtype)\n for i in six.moves.range(kh):\n i_lim = i + sy * out_h\n for j in six.moves.range(kw):\n j_lim = j + sx * out_w\n img[:, :, i:i_lim:sy, j:j_lim:sx] += col[:, :, i, j, :, :]\n\n return img[:, :, ph:h + ph, pw:w + pw]\n\n\ndef col2im_gpu(col, sy, sx, ph, pw, h, w):\n n, c, kh, kw, out_h, out_w = col.shape\n\n img = cuda.empty((n, c, h, w), dtype=col.dtype)\n cuda.elementwise(\n 'raw T col, int32 h, int32 w, int32 out_h, int32 out_w,'\n 'int32 kh, int32 kw, int32 sy, int32 sx, int32 ph, int32 pw',\n 'T img',\n '''\n int c0 = i / (h * w);\n int y = i / w % h + ph;\n int x = i % w + pw;\n\n int out_y_0 = max(0, (y - kh + sy) / sy);\n int out_y_1 = min(out_h, (y + sy) / sy);\n int out_x_0 = max(0, (x - kw + sx) / sx);\n int out_x_1 = min(out_w, (x + sx) / sx);\n\n T val = 0;\n for (int out_y = out_y_0; out_y < out_y_1; ++out_y) {\n int ky = y - out_y * sy;\n for (int out_x = out_x_0; out_x < out_x_1; ++out_x) {\n int kx = x - out_x * sx;\n int k = out_y + out_h * (kx + kw * (ky + kh * c0));\n val += col[out_x + out_w * k];\n }\n }\n img = val;\n ''',\n 'col2im')(col, h, w, out_h, out_w, kh, kw, sy, sx, ph, pw, img)\n return img\n"
] | [
[
"numpy.zeros",
"numpy.pad",
"numpy.ndarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
polceanum/data.augmentation | [
"d47d93f20bca453bfda94e5cd714399fd35a6287"
] | [
"scripts/dataClean.py"
] | [
"# script: Data cleaner. Reads video files and generates pickles of cropped objects.\n# author: Mihai Polceanu\n\nimport cv2\nimport numpy as np\nimport os\nimport sys\nimport pickle\nimport argparse\n\ndef processImage(img):\n # the following two lines define the range of colors that is THROWN AWAY\n # change these values if you use a different background in the videos\n\n lower_red = np.array([100,100,100])\n upper_red = np.array([240,240,240])\n\n # obtain mask given the range\n mask = cv2.inRange(img, lower_red, upper_red)\n\n # retain regions given by mask\n res = cv2.bitwise_and(img,img, mask= mask)\n\n # remove regions calculated above\n res = img-res\n\n # result is the image regions that are NOT in the defined interval\n return res, cv2.bitwise_not(mask)\n\ndef cropObject(img, mask):\n # given that colors have been well selected, search for margins\n\n top = -1\n for i in range(img.shape[0]):\n if np.sum(mask[i,:]) > 0:\n top = i\n break\n\n bottom = -1\n for i in range(img.shape[0]):\n if np.sum(mask[img.shape[0]-i-1,:]) > 0:\n bottom = img.shape[0]-i-1\n break\n\n left = -1\n for j in range(img.shape[1]):\n if np.sum(mask[:,j]) > 0:\n left = j\n break\n\n right = -1\n for j in range(img.shape[1]):\n if np.sum(mask[:,img.shape[1]-j-1]) > 0:\n right = img.shape[1]-j-1\n break\n\n # result is the cropped region, given the found margins\n return img[top:bottom, left:right, :], mask[top:bottom, left:right]\n\n\nprint('OpenCV version', cv2.__version__)\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Data cleaner.')\n parser.add_argument('--skip', metavar='N', type=int, default=1, help='keep one in every N frames, default 1')\n parser.add_argument('--maxCropSize', metavar='C', type=int, default=480, help='max crop size (width or height), default 480')\n parser.add_argument('--display', metavar='D', type=int, default=1, help='show cropped objects (0==False, 1==True), default 1')\n parser.add_argument('--verbose', metavar='V', type=int, default=1, help='output text to console (0==False, 1==True), default 1')\n args = parser.parse_args()\n \n # safety check to avoid replacing previous data !\n if not os.path.exists('../generated_crop_data/'):\n # generate folders\n os.makedirs('../generated_crop_data/')\n\n # load all files in the video folder\n for root, dirs, files in os.walk(\"../dataset_videos\"): \n for filename in files:\n if args.verbose:\n print('Processing file', filename)\n\n # start video capture\n vidcap = cv2.VideoCapture(\"../dataset_videos/\"+filename)\n\n # read first frame\n success,image = vidcap.read()\n\n # label is the first string before '_'\n label = filename.split(\"_\")[0]\n\n if args.verbose:\n print('Video data shape', image.shape)\n\n count = 0\n success = True\n\n cropList = []\n while success:\n skip = args.skip\n\n if count % skip == 0:\n if args.verbose:\n print('frame',count)\n\n img, mask = processImage(image)\n\n kernel1 = np.ones((4,4), np.uint8)\n kernel2 = np.ones((7,7), np.uint8)\n\n mask = cv2.erode(mask, kernel1, iterations=2)\n\n mask = cv2.dilate(mask, kernel2, iterations=2)\n\n crop, cmask = cropObject(img, mask)\n\n maxSize = max(crop.shape[0], crop.shape[1])\n\n #ratio = 480.0/maxSize # all crops have max size (width or height) of 480 px\n ratio = float(args.maxCropSize)/maxSize # all crops have maxCropSize (width or height)\n\n small_crop = cv2.resize(crop, (int(crop.shape[1]*ratio), int(crop.shape[0]*ratio)))\n small_cmask = cv2.resize(cmask, (int(cmask.shape[1]*ratio), int(cmask.shape[0]*ratio)))\n\n cropList.append(small_crop)\n\n if args.display:\n cv2.imshow(\"Cropped object\", small_crop)\n cv2.waitKey(1)\n\n #read next frame\n success,image = vidcap.read()\n count += 1\n\n # save pickle with crops\n with open('../generated_crop_data/'+filename+'_cropped.pickle', 'wb') as handle:\n pickle.dump(cropList, handle, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n print('---------------------------------------------------------------')\n print('| SANITY CHECK: ../generated_crop_data folder already exists! |')\n print('| Please move or remove it to generate new data. |')\n print('---------------------------------------------------------------')"
] | [
[
"numpy.array",
"numpy.sum",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
benlazarine/GARD | [
"567bdcf59a258bdf981b395f24a5af669da628b3"
] | [
"helpers/erai/convert.py"
] | [
"import numpy as np\nfrom bunch import Bunch\n\nR=8.3144621 # J/mol/K\ncp=29.19 # J/mol/K =1.012 J/g/K\ng=9.81 # m/s^2\n\ndef subset_pressure_levels(output_data):\n levels = [0,6,11,15,20,26,28,31]\n\n output_data.u = output_data.u[:,levels,:,:]\n output_data.v = output_data.v[:,levels,:,:]\n output_data.p = output_data.p[:,levels,:,:]\n output_data.z = output_data.z[:,levels,:,:]\n output_data.t = output_data.t[:,levels,:,:]\n output_data.qv = output_data.qv[:,levels,:,:]\n output_data.rh = output_data.rh[:,levels,:,:]\n\n\n\n# gard_atm_var=[\"u\",\"v\",\"gph\",\"t\",\"qv\",\"ln_p_sfc\",\"cloud\",\"ice\",\"sigma\"]\ndef convert_atm(data):\n output_data=Bunch()\n output_data.u = data.u[np.newaxis, ::-1,::-1,:] # m/s\n output_data.v = data.v[np.newaxis, ::-1,::-1,:] # m/s\n output_data.hgt = data.gph[-1,::-1,:]/g # (m^2/s^2) / (m/s^2) = m\n\n # calculate pressure in Pa from ln(sfc_press) and hybrid sigma coordinates\n output_data.p = np.zeros((output_data.u.shape))\n nlevels = len(data.Plev)\n for i in range(nlevels):\n output_data.p[0,i,:,:] = data.Plev[nlevels - i - 1]\n\n output_data.z = data.gph[np.newaxis,::-1,::-1,:]/9.81 # m\n output_data.t = data.t[np.newaxis,::-1,::-1,:] # K\n output_data.qv = data.qv[np.newaxis,::-1,::-1,:] # kg/kg\n output_data.rh = data.rh[np.newaxis,::-1,::-1,:] # kg/kg\n\n subset_pressure_levels(output_data)\n\n return output_data\n\n# gard_sfc_var=[\"sensible_heat\",\"latent_heat\",\"hgt_98\",\"PBL_height\"]\ndef convert_sfc(data):\n\n dt = 3.0 * 60.0 * 60.0\n output_data=Bunch()\n\n output_data.precip_total = data.precip_total[np.newaxis,::-1,:] # kg/m^2\n output_data.precip_conv = data.precip_conv[np.newaxis,::-1,:] # kg/m^2\n output_data.sensible_heat = data.sensible_heat[np.newaxis,::-1,:]/dt # W/m^2\n output_data.latent_heat = data.latent_heat[np.newaxis,::-1,:]/dt # W/m^2\n output_data.sfc_hgt = data.hgt_98[::-1,:]/g # (m^2/s^2) / (m/s^2) = m\n output_data.PBL_height = data.PBL_height[np.newaxis,::-1,:] # m\n output_data.tskin = data.tskin[np.newaxis,::-1,:] # K\n output_data.tmax = data.tmax[np.newaxis,::-1,:] # K\n output_data.tmin = data.tmin[np.newaxis,::-1,:] # K\n output_data.sw = data.sw[np.newaxis,::-1,:] / dt # convert from Joules to W /m^2\n output_data.lw = data.lw[np.newaxis,::-1,:] / dt # convert from Joules to W /m^2\n\n return output_data\n\ndef era2gard(data):\n output_data=Bunch()\n atm=convert_atm(data.atm)\n sfc=convert_sfc(data.sfc)\n\n for k in atm.keys():\n output_data[k]=atm[k]\n for k in sfc.keys():\n output_data[k]=sfc[k]\n\n return output_data\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
khalednakhleh/vq_algorithms | [
"2b3a2954c3cf4b72f9a9b6c5d739c873e4d6f999"
] | [
"main.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 11 16:37:55 2018\n\nName: khalednakhleh\n\"\"\"\nimport numpy as np\nfrom vq_algorithms import Lloyd_max, LBG\nimport time\n\n#####################################################################\n\"\"\"\nScalar quantization of a 1-D input source vector x. \n\nLloyd-max algorithm was used to find the optimal quantization points \nand intervals that best minimize the quantization error rate. \n\nThe script file starts with an inital set of intervals. Algorithm implementation\ncan be found in the lloyd_max.py file.\n\"\"\"\n#####################################################################\n\ndef main():\n \n # seed for random number generation \n np.random.seed(1996) \n # number of samples for random variable\n n = 10 ** 6\n # number of iterations\n iterations = 200\n # input vector's dimension\n dim = 1\n # number of quantization intervals\n m = 10\n # variance for Gaussian distributed values \n variance = 1 \n # mean for Gaussian distributed values \n mean = 0 \n # sample values\n x = variance * np.random.randn(dim, n) + mean\n \n#####################################################################\n \n if dim == 1:\n start = time.time() # Starting timer\n model = Lloyd_max(m, x, iterations) # Initalizing the algorithm\n end = time.time() # Ending timer \n else:\n start = time.time() # Starting timer\n model = LBG(m, x, iterations) # Initalizing the algorithm\n end = time.time() # Ending timer \n\n \n # Printing the results and elapsed time\n print(\"\\nFinal centroids values: \\n\\n\" + str(model.centroids))\n print(\"\\nFinal intervals values: \\n\\n\" + str(model.intervals))\n print(\"\\n\\nTotal run time: \" + str(round(end - start, 3)) + \" seconds.\\n\")\n\nif __name__ == \"__main__\":\n main()\n\n"
] | [
[
"numpy.random.randn",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YujiaBao/Predict-then-Interpolate | [
"50ab39de5ff70217d47a08a146a0e7b60f317b8e"
] | [
"src/models/embedding/resnet.py"
] | [
"import torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torchvision.models import resnet50\n\n\nclass Resnet50(nn.Module):\n def __init__(self):\n super(Resnet50, self).__init__()\n resnet = resnet50(pretrained=True, progress=True)\n modules=list(resnet.children())[:-1]\n self.main = nn.Sequential(*modules)\n self.out_dim = resnet.fc.in_features\n\n def forward(self, x):\n x = self.main(x)\n\n return x.squeeze(-1).squeeze(-1)\n"
] | [
[
"torch.nn.Sequential"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kjang96/flow-1 | [
"92b16feee3ee62cc176aa047dbb89d3b164b972a"
] | [
"flow/envs/green_wave_env.py"
] | [
"\"\"\"Environments for scenarios with traffic lights.\n\nThese environments are used to train traffic lights to regulate traffic flow\nthrough an n x m grid.\n\"\"\"\n\nimport numpy as np\nimport re\n\nfrom gym.spaces.box import Box\nfrom gym.spaces.discrete import Discrete\nfrom gym.spaces.tuple_space import Tuple\n\nfrom flow.core import rewards\nfrom flow.envs.base_env import Env\n\nADDITIONAL_ENV_PARAMS = {\n # minimum switch time for each traffic light (in seconds)\n \"switch_time\": 2.0,\n # whether the traffic lights should be actuated by sumo or RL\n # options are \"controlled\" and \"actuated\"\n \"tl_type\": \"controlled\",\n # determines whether the action space is meant to be discrete or continuous\n \"discrete\": False,\n}\n\nADDITIONAL_PO_ENV_PARAMS = {\n # num of vehicles the agent can observe on each incoming edge\n \"num_observed\": 2,\n # velocity to use in reward functions\n \"target_velocity\": 30,\n}\n\n\nclass TrafficLightGridEnv(Env):\n \"\"\"Environment used to train traffic lights.\n\n Required from env_params:\n\n * switch_time: minimum time a light must be constant before\n it switches (in seconds).\n Earlier RL commands are ignored.\n * tl_type: whether the traffic lights should be actuated by sumo or RL,\n options are respectively \"actuated\" and \"controlled\"\n * discrete: determines whether the action space is meant to be discrete or\n continuous\n\n States\n An observation is the distance of each vehicle to its intersection, a\n number uniquely identifying which edge the vehicle is on, and the speed\n of the vehicle.\n\n Actions\n The action space consist of a list of float variables ranging from 0-1\n specifying whether a traffic light is supposed to switch or not. The\n actions are sent to the traffic light in the grid from left to right\n and then top to bottom.\n\n Rewards\n The reward is the negative per vehicle delay minus a penalty for\n switching traffic lights\n\n Termination\n A rollout is terminated once the time horizon is reached.\n\n Additional\n Vehicles are rerouted to the start of their original routes once they\n reach the end of the network in order to ensure a constant number of\n vehicles.\n\n Attributes\n ----------\n grid_array : dict\n Array containing information on the grid, such as the length of roads,\n row_num, col_num, number of initial cars\n rows : int\n Number of rows in this grid scenario\n cols : int\n Number of columns in this grid scenario\n num_traffic_lights : int\n Number of intersection in this grid scenario\n tl_type : str\n Type of traffic lights, either 'actuated' or 'static'\n steps : int\n Horizon of this experiment, see EnvParams.horion\n obs_var_labels : dict\n Referenced in the visualizer. Tells the visualizer which\n metrics to track\n node_mapping : dict\n Dictionary mapping intersections / nodes (nomenclature is used\n interchangeably here) to the edges that are leading to said\n intersection / node\n last_change : np array [num_traffic_lights]x1 np array\n Multi-dimensional array keeping track, in timesteps, of how much time\n has passed since the last change to yellow for each traffic light\n direction : np array [num_traffic_lights]x1 np array\n Multi-dimensional array keeping track of which direction in traffic\n light is flowing. 0 indicates flow from top to bottom, and\n 1 indicates flow from left to right\n currently_yellow : np array [num_traffic_lights]x1 np array\n Multi-dimensional array keeping track of whether or not each traffic\n light is currently yellow. 1 if yellow, 0 if not\n min_switch_time : np array [num_traffic_lights]x1 np array\n The minimum time in timesteps that a light can be yellow. Serves\n as a lower bound\n discrete : bool\n Indicates whether or not the action space is discrete. See below for\n more information:\n https://github.com/openai/gym/blob/master/gym/spaces/discrete.py\n \"\"\"\n\n def __init__(self, env_params, sim_params, scenario, simulator='traci'):\n\n for p in ADDITIONAL_ENV_PARAMS.keys():\n if p not in env_params.additional_params:\n raise KeyError(\n 'Environment parameter \"{}\" not supplied'.format(p))\n\n self.grid_array = scenario.net_params.additional_params[\"grid_array\"]\n self.rows = self.grid_array[\"row_num\"]\n self.cols = self.grid_array[\"col_num\"]\n # self.num_observed = self.grid_array.get(\"num_observed\", 3)\n self.num_traffic_lights = self.rows * self.cols\n self.tl_type = env_params.additional_params.get('tl_type')\n\n super().__init__(env_params, sim_params, scenario, simulator)\n\n # Saving env variables for plotting\n self.steps = env_params.horizon\n self.obs_var_labels = {\n 'edges': np.zeros((self.steps, self.k.vehicle.num_vehicles)),\n 'velocities': np.zeros((self.steps, self.k.vehicle.num_vehicles)),\n 'positions': np.zeros((self.steps, self.k.vehicle.num_vehicles))\n }\n\n # Keeps track of the last time the traffic lights in an intersection\n # were allowed to change (the last time the lights were allowed to\n # change from a red-green state to a red-yellow state.)\n self.last_change = np.zeros((self.rows * self.cols, 1))\n # Keeps track of the direction of the intersection (the direction that\n # is currently being allowed to flow. 0 indicates flow from top to\n # bottom, and 1 indicates flow from left to right.)\n self.direction = np.zeros((self.rows * self.cols, 1))\n # Value of 1 indicates that the intersection is in a red-yellow state.\n # value 0 indicates that the intersection is in a red-green state.\n self.currently_yellow = np.zeros((self.rows * self.cols, 1))\n\n # when this hits min_switch_time we change from yellow to red\n # the second column indicates the direction that is currently being\n # allowed to flow. 0 is flowing top to bottom, 1 is left to right\n # For third column, 0 signifies yellow and 1 green or red\n self.min_switch_time = env_params.additional_params[\"switch_time\"]\n\n if self.tl_type != \"actuated\":\n for i in range(self.rows * self.cols):\n self.k.traffic_light.set_state(\n node_id='center' + str(i), state=\"GrGr\")\n self.currently_yellow[i] = 0\n\n # # Additional Information for Plotting\n # self.edge_mapping = {\"top\": [], \"bot\": [], \"right\": [], \"left\": []}\n # for i, veh_id in enumerate(self.k.vehicle.get_ids()):\n # edge = self.k.vehicle.get_edge(veh_id)\n # for key in self.edge_mapping:\n # if key in edge:\n # self.edge_mapping[key].append(i)\n # break\n\n # check whether the action space is meant to be discrete or continuous\n self.discrete = env_params.additional_params.get(\"discrete\", False)\n\n @property\n def action_space(self):\n \"\"\"See class definition.\"\"\"\n if self.discrete:\n return Discrete(2 ** self.num_traffic_lights)\n else:\n return Box(\n low=-1,\n high=1,\n shape=(self.num_traffic_lights,),\n dtype=np.float32)\n\n @property\n def observation_space(self):\n \"\"\"See class definition.\"\"\"\n speed = Box(\n low=0,\n high=1,\n shape=(self.initial_vehicles.num_vehicles,),\n dtype=np.float32)\n dist_to_intersec = Box(\n low=0.,\n high=np.inf,\n shape=(self.initial_vehicles.num_vehicles,),\n dtype=np.float32)\n edge_num = Box(\n low=0.,\n high=1,\n shape=(self.initial_vehicles.num_vehicles,),\n dtype=np.float32)\n traffic_lights = Box(\n low=0.,\n high=1,\n shape=(3 * self.rows * self.cols,),\n dtype=np.float32)\n return Tuple((speed, dist_to_intersec, edge_num, traffic_lights))\n\n def get_state(self):\n \"\"\"See class definition.\"\"\"\n # compute the normalizers\n grid_array = self.net_params.additional_params[\"grid_array\"]\n max_dist = max(grid_array[\"short_length\"],\n grid_array[\"long_length\"],\n grid_array[\"inner_length\"])\n\n # get the state arrays\n speeds = [\n self.k.vehicle.get_speed(veh_id) / self.k.scenario.max_speed()\n for veh_id in self.k.vehicle.get_ids()\n ]\n dist_to_intersec = [\n self.get_distance_to_intersection(veh_id) / max_dist\n for veh_id in self.k.vehicle.get_ids()\n ]\n edges = [\n self._convert_edge(self.k.vehicle.get_edge(veh_id)) /\n (self.k.scenario.network.num_edges - 1)\n for veh_id in self.k.vehicle.get_ids()\n ]\n\n state = [\n speeds, dist_to_intersec, edges,\n self.last_change.flatten().tolist(),\n self.direction.flatten().tolist(),\n self.currently_yellow.flatten().tolist()\n ]\n return np.array(state)\n\n def _apply_rl_actions(self, rl_actions):\n \"\"\"See class definition.\"\"\"\n # check if the action space is discrete\n if self.discrete:\n # convert single value to list of 0's and 1's\n rl_mask = [int(x) for x in list('{0:0b}'.format(rl_actions))]\n rl_mask = [0] * (self.num_traffic_lights - len(rl_mask)) + rl_mask\n else:\n # convert values less than 0.0 to zero and above to 1. 0's indicate\n # that we should not switch the direction\n rl_mask = rl_actions > 0.0\n\n for i, action in enumerate(rl_mask):\n if self.currently_yellow[i] == 1: # currently yellow\n self.last_change[i] += self.sim_step\n # Check if our timer has exceeded the yellow phase, meaning it\n # should switch to red\n if self.last_change[i] >= self.min_switch_time:\n if self.direction[i] == 0:\n self.k.traffic_light.set_state(\n node_id='center{}'.format(i),\n state=\"GrGr\")\n else:\n self.k.traffic_light.set_state(\n node_id='center{}'.format(i),\n state='rGrG')\n self.currently_yellow[i] = 0\n else:\n if action:\n if self.direction[i] == 0:\n self.k.traffic_light.set_state(\n node_id='center{}'.format(i),\n state='yryr')\n else:\n self.k.traffic_light.set_state(\n node_id='center{}'.format(i),\n state='ryry')\n self.last_change[i] = 0.0\n self.direction[i] = not self.direction[i]\n self.currently_yellow[i] = 1\n\n def compute_reward(self, rl_actions, **kwargs):\n \"\"\"See class definition.\"\"\"\n return - rewards.min_delay_unscaled(self) \\\n - rewards.boolean_action_penalty(rl_actions >= 0.5, gain=1.0)\n\n # ===============================\n # ============ UTILS ============\n # ===============================\n\n def get_distance_to_intersection(self, veh_ids):\n \"\"\"Determine the distance from a vehicle to its next intersection.\n\n Parameters\n ----------\n veh_ids : str or str list\n vehicle(s) identifier(s)\n\n Returns\n -------\n float (or float list)\n distance to closest intersection\n \"\"\"\n if isinstance(veh_ids, list):\n return [self.get_distance_to_intersection(veh_id)\n for veh_id in veh_ids]\n return self.find_intersection_dist(veh_ids)\n\n def find_intersection_dist(self, veh_id):\n \"\"\"Return distance from intersection.\n\n Return the distance from the vehicle's current position to the position\n of the node it is heading toward.\n \"\"\"\n edge_id = self.k.vehicle.get_edge(veh_id)\n # FIXME this might not be the best way of handling this\n if edge_id == \"\":\n return -10\n if 'center' in edge_id:\n return 0\n edge_len = self.k.scenario.edge_length(edge_id)\n relative_pos = self.k.vehicle.get_position(veh_id)\n dist = edge_len - relative_pos\n return dist\n\n def _convert_edge(self, edges):\n \"\"\"Convert the string edge to a number.\n\n Start at the bottom left vertical edge and going right and then up, so\n the bottom left vertical edge is zero, the right edge beside it is 1.\n\n The numbers are assigned along the lowest column, then the lowest row,\n then the second lowest column, etc. Left goes before right, top goes\n before bottom.\n\n The values are zero indexed.\n\n Parameters\n ----------\n edges : list of str or str\n name of the edge(s)\n\n Returns\n -------\n list of int or int\n a number uniquely identifying each edge\n \"\"\"\n if isinstance(edges, list):\n return [self._split_edge(edge) for edge in edges]\n else:\n return self._split_edge(edges)\n\n def _split_edge(self, edge):\n \"\"\"Act as utility function for convert_edge.\"\"\"\n if edge:\n if edge[0] == \":\": # center\n center_index = int(edge.split(\"center\")[1][0])\n base = ((self.cols + 1) * self.rows * 2) \\\n + ((self.rows + 1) * self.cols * 2)\n return base + center_index + 1\n else:\n pattern = re.compile(r\"[a-zA-Z]+\")\n edge_type = pattern.match(edge).group()\n edge = edge.split(edge_type)[1].split('_')\n row_index, col_index = [int(x) for x in edge]\n if edge_type in ['bot', 'top']:\n rows_below = 2 * (self.cols + 1) * row_index\n cols_below = 2 * (self.cols * (row_index + 1))\n edge_num = rows_below + cols_below + 2 * col_index + 1\n return edge_num if edge_type == 'bot' else edge_num + 1\n if edge_type in ['left', 'right']:\n rows_below = 2 * (self.cols + 1) * row_index\n cols_below = 2 * (self.cols * row_index)\n edge_num = rows_below + cols_below + 2 * col_index + 1\n return edge_num if edge_type == 'left' else edge_num + 1\n else:\n return 0\n\n def _get_relative_node(self, agent_id, direction):\n \"\"\"Yield node number of traffic light agent in a given direction.\n\n For example, the nodes in a grid with 2 rows and 3 columns are\n indexed as follows:\n\n | | |\n --- 3 --- 4 --- 5 ---\n | | |\n --- 0 --- 1 --- 2 ---\n | | |\n\n See flow.scenarios.grid for more information.\n\n Example of function usage:\n - Seeking the \"top\" direction to \":center0\" would return 3.\n - Seeking the \"bottom\" direction to \":center0\" would return -1.\n\n :param agent_id: agent id of the form \":center#\"\n :param direction: top, bottom, left, right\n :return: node number\n \"\"\"\n ID_IDX = 1\n agent_id_num = int(agent_id.split(\"center\")[ID_IDX])\n if direction == \"top\":\n node = agent_id_num + self.cols\n if node >= self.cols * self.rows:\n node = -1\n elif direction == \"bottom\":\n node = agent_id_num - self.cols\n if node < 0:\n node = -1\n elif direction == \"left\":\n if agent_id_num % self.cols == 0:\n node = -1\n else:\n node = agent_id_num - 1\n elif direction == \"right\":\n if agent_id_num % self.cols == self.cols - 1:\n node = -1\n else:\n node = agent_id_num + 1\n else:\n raise NotImplementedError\n\n return node\n\n def additional_command(self):\n \"\"\"See parent class.\n\n Used to insert vehicles that are on the exit edge and place them\n back on their entrance edge.\n \"\"\"\n for veh_id in self.k.vehicle.get_ids():\n self._reroute_if_final_edge(veh_id)\n\n def _reroute_if_final_edge(self, veh_id):\n \"\"\"Reroute vehicle associated with veh_id.\n\n Checks if an edge is the final edge. If it is return the route it\n should start off at.\n \"\"\"\n edge = self.k.vehicle.get_edge(veh_id)\n if edge == \"\":\n return\n if edge[0] == \":\": # center edge\n return\n pattern = re.compile(r\"[a-zA-Z]+\")\n edge_type = pattern.match(edge).group()\n edge = edge.split(edge_type)[1].split('_')\n row_index, col_index = [int(x) for x in edge]\n\n # find the route that we're going to place the vehicle on if we are\n # going to remove it\n route_id = None\n if edge_type == 'bot' and col_index == self.cols:\n route_id = \"bot{}_0\".format(row_index)\n elif edge_type == 'top' and col_index == 0:\n route_id = \"top{}_{}\".format(row_index, self.cols)\n elif edge_type == 'left' and row_index == 0:\n route_id = \"left{}_{}\".format(self.rows, col_index)\n elif edge_type == 'right' and row_index == self.rows:\n route_id = \"right0_{}\".format(col_index)\n\n if route_id is not None:\n type_id = self.k.vehicle.get_type(veh_id)\n lane_index = self.k.vehicle.get_lane(veh_id)\n # remove the vehicle\n self.k.vehicle.remove(veh_id)\n # reintroduce it at the start of the network\n self.k.vehicle.add(\n veh_id=veh_id,\n edge=route_id,\n type_id=str(type_id),\n lane=str(lane_index),\n pos=\"0\",\n speed=\"max\")\n\n def get_closest_to_intersection(self, edges, num_closest, padding=False):\n \"\"\"Return the IDs of the vehicles that are closest to an intersection.\n\n For each edge in edges, return the IDs (veh_id) of the num_closest\n vehicles in edge that are closest to an intersection (the intersection\n they are heading towards).\n\n This function performs no check on whether or not edges are going\n towards an intersection or not, it just gets the vehicles that are\n closest to the end of their edges.\n\n If there are less than num_closest vehicles on an edge, the function\n performs padding by adding empty strings \"\" instead of vehicle ids if\n the padding parameter is set to True.\n\n Parameters\n ----------\n edges : str | str list\n ID of an edge or list of edge IDs.\n num_closest : int (> 0)\n Number of vehicles to consider on each edge.\n padding : bool (default False)\n If there are less than num_closest vehicles on an edge, perform\n padding by adding empty strings \"\" instead of vehicle ids if the\n padding parameter is set to True (note: leaving padding to False\n while passing a list of several edges as parameter can lead to\n information loss since you will not know which edge, if any,\n contains less than num_closest vehicles).\n\n Usage\n -----\n For example, consider the following network, composed of 4 edges\n whose ids are \"edge0\", \"edge1\", \"edge2\" and \"edge3\", the numbers\n being vehicles all headed towards intersection x. The ID of the vehicle\n with number n is \"veh{n}\" (edge \"veh0\", \"veh1\"...).\n\n edge1\n | |\n | 7 |\n | 8 |\n -------------| |-------------\n edge0 1 2 3 4 5 6 x edge2\n -------------| |-------------\n | 9 |\n | 10|\n | 11|\n edge3\n\n And consider the following example calls on the previous network:\n\n >>> get_closest_to_intersection(\"edge0\", 4)\n [\"veh6\", \"veh5\", \"veh4\", \"veh3\"]\n\n >>> get_closest_to_intersection(\"edge0\", 8)\n [\"veh6\", \"veh5\", \"veh4\", \"veh3\", \"veh2\", \"veh1\"]\n\n >>> get_closest_to_intersection(\"edge0\", 8, padding=True)\n [\"veh6\", \"veh5\", \"veh4\", \"veh3\", \"veh2\", \"veh1\", \"\", \"\"]\n\n >>> get_closest_to_intersection([\"edge0\", \"edge1\", \"edge2\", \"edge3\"],\n 3, padding=True)\n [\"veh6\", \"veh5\", \"veh4\", \"veh8\", \"veh7\", \"\", \"\", \"\", \"\", \"veh9\",\n \"veh10\", \"veh11\"]\n\n Returns\n -------\n str list\n If n is the number of edges given as parameters, then the returned\n list contains n * num_closest vehicle IDs.\n\n Raises\n ------\n ValueError\n if num_closest <= 0\n \"\"\"\n if num_closest <= 0:\n raise ValueError(\"Function get_closest_to_intersection called with\"\n \"parameter num_closest={}, but num_closest should\"\n \"be positive\".format(num_closest))\n\n if isinstance(edges, list):\n ids = [self.get_closest_to_intersection(edge, num_closest)\n for edge in edges]\n # flatten the list and return it\n return [veh_id for sublist in ids for veh_id in sublist]\n\n # get the ids of all the vehicles on the edge 'edges' ordered by\n # increasing distance to end of edge (intersection)\n veh_ids_ordered = sorted(self.k.vehicle.get_ids_by_edge(edges),\n key=self.get_distance_to_intersection)\n\n # return the ids of the num_closest vehicles closest to the\n # intersection, potentially with \"\"-padding.\n pad_lst = [\"\"] * (num_closest - len(veh_ids_ordered))\n return veh_ids_ordered[:num_closest] + (pad_lst if padding else [])\n\n\nclass PO_TrafficLightGridEnv(TrafficLightGridEnv):\n \"\"\"Environment used to train traffic lights.\n\n Required from env_params:\n\n * switch_time: minimum switch time for each traffic light (in seconds).\n Earlier RL commands are ignored.\n * num_observed: number of vehicles nearest each intersection that is\n observed in the state space; defaults to 2\n\n States\n An observation is the number of observed vehicles in each intersection\n closest to the traffic lights, a number uniquely identifying which\n edge the vehicle is on, and the speed of the vehicle.\n\n Actions\n The action space consist of a list of float variables ranging from 0-1\n specifying whether a traffic light is supposed to switch or not. The\n actions are sent to the traffic light in the grid from left to right\n and then top to bottom.\n\n Rewards\n The reward is the delay of each vehicle minus a penalty for switching\n traffic lights\n\n Termination\n A rollout is terminated once the time horizon is reached.\n\n Additional\n Vehicles are rerouted to the start of their original routes once they\n reach the end of the network in order to ensure a constant number of\n vehicles.\n\n \"\"\"\n\n def __init__(self, env_params, sim_params, scenario, simulator='traci'):\n super().__init__(env_params, sim_params, scenario, simulator)\n\n for p in ADDITIONAL_PO_ENV_PARAMS.keys():\n if p not in env_params.additional_params:\n raise KeyError(\n 'Environment parameter \"{}\" not supplied'.format(p))\n\n # number of vehicles nearest each intersection that is observed in the\n # state space; defaults to 2\n self.num_observed = env_params.additional_params.get(\"num_observed\", 2)\n\n # used during visualization\n self.observed_ids = []\n\n @property\n def observation_space(self):\n \"\"\"State space that is partially observed.\n\n Velocities, distance to intersections, edge number (for nearby\n vehicles) from each direction, edge information, and traffic light\n state.\n \"\"\"\n tl_box = Box(\n low=0.,\n high=1,\n shape=(3 * 4 * self.num_observed * self.num_traffic_lights +\n 2 * len(self.k.scenario.get_edge_list()) +\n 3 * self.num_traffic_lights,),\n dtype=np.float32)\n return tl_box\n\n def get_state(self):\n \"\"\"See parent class.\n\n Returns self.num_observed number of vehicles closest to each traffic\n light and for each vehicle its velocity, distance to intersection,\n edge_number traffic light state. This is partially observed\n \"\"\"\n speeds = []\n dist_to_intersec = []\n edge_number = []\n max_speed = max(\n self.k.scenario.speed_limit(edge)\n for edge in self.k.scenario.get_edge_list())\n grid_array = self.net_params.additional_params[\"grid_array\"]\n max_dist = max(grid_array[\"short_length\"], grid_array[\"long_length\"],\n grid_array[\"inner_length\"])\n all_observed_ids = []\n\n for _, edges in self.scenario.node_mapping:\n for edge in edges:\n observed_ids = \\\n self.get_closest_to_intersection(edge, self.num_observed)\n all_observed_ids += observed_ids\n\n # check which edges we have so we can always pad in the right\n # positions\n speeds += [\n self.k.vehicle.get_speed(veh_id) / max_speed\n for veh_id in observed_ids\n ]\n dist_to_intersec += [\n (self.k.scenario.edge_length(\n self.k.vehicle.get_edge(veh_id)) -\n self.k.vehicle.get_position(veh_id)) / max_dist\n for veh_id in observed_ids\n ]\n edge_number += \\\n [self._convert_edge(self.k.vehicle.get_edge(veh_id)) /\n (self.k.scenario.network.num_edges - 1)\n for veh_id in observed_ids]\n\n if len(observed_ids) < self.num_observed:\n diff = self.num_observed - len(observed_ids)\n speeds += [0] * diff\n dist_to_intersec += [0] * diff\n edge_number += [0] * diff\n\n # now add in the density and average velocity on the edges\n density = []\n velocity_avg = []\n for edge in self.k.scenario.get_edge_list():\n ids = self.k.vehicle.get_ids_by_edge(edge)\n if len(ids) > 0:\n # TODO(cathywu) Why is there a 5 here?\n density += [5 * len(ids) / self.k.scenario.edge_length(edge)]\n velocity_avg += [np.mean(\n [self.k.vehicle.get_speed(veh_id) for veh_id in\n ids]) / max_speed]\n else:\n density += [0]\n velocity_avg += [0]\n self.observed_ids = all_observed_ids\n return np.array(\n np.concatenate([\n speeds, dist_to_intersec, edge_number, density, velocity_avg,\n self.last_change.flatten().tolist(),\n self.direction.flatten().tolist(),\n self.currently_yellow.flatten().tolist()\n ]))\n\n def compute_reward(self, rl_actions, **kwargs):\n \"\"\"See class definition.\"\"\"\n if self.env_params.evaluate:\n return - rewards.min_delay_unscaled(self)\n else:\n return (- rewards.min_delay_unscaled(self) +\n rewards.penalize_standstill(self, gain=0.2))\n\n def additional_command(self):\n \"\"\"See class definition.\"\"\"\n # specify observed vehicles\n [self.k.vehicle.set_observed(veh_id) for veh_id in self.observed_ids]\n\n\nclass GreenWaveTestEnv(TrafficLightGridEnv):\n \"\"\"\n Class for use in testing.\n\n This class overrides RL methods of green wave so we can test\n construction without needing to specify RL methods\n \"\"\"\n\n def _apply_rl_actions(self, rl_actions):\n \"\"\"See class definition.\"\"\"\n pass\n\n def compute_reward(self, rl_actions, **kwargs):\n \"\"\"No return, for testing purposes.\"\"\"\n return 0\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ecacikgoz97/Probing | [
"5df8f9fedeffdd2c6f9328b6ff47e36adca49dbb"
] | [
"pos_tagging/probe_train7.py"
] | [
"# -----------------------------------------------------------\n# Date: 2021/12/19 \n# Author: Muge Kural\n# Description: Trainer of surface form pos tagging probe, saves the results under ./results directory.\n# -----------------------------------------------------------\n\nimport sys, argparse, random, torch, json, matplotlib, os\nimport torch.nn as nn\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch import optim\nfrom common.utils import *\nfrom data.data import build_data, log_data\nfrom models.gpt3 import GPT3\nfrom common.vocab import VocabEntry\nfrom probe import MiniGPT_Probe, MiniGPT_Probe2\nmatplotlib.use('Agg')\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nlocal_set = False\nif local_set == True:\n working_path = \"/Users/emrecanacikgoz/Desktop/\"\nelse:\n working_path = \"/kuacc/users/eacikgoz17/\" \n\ndef test(batches, mode, args):\n epoch_loss = 0; epoch_acc = 0; epoch_num_instances = 0\n numbatches = len(batches)\n indices = list(range(numbatches))\n for i, idx in enumerate(indices):\n # (batchsize, t)\n surf, surfpos = batches[idx] \n loss, acc = args.model.probe_loss(surf, surfpos)\n epoch_num_instances += surf.size(0)\n epoch_loss += loss.item()\n epoch_acc += acc\n nll = epoch_loss / numbatches\n acc = epoch_acc / epoch_num_instances\n args.logger.write('%s --- avg_loss: %.4f, acc: %.4f \\n' % (mode, nll, acc))\n return nll, acc\n\ndef train(data, args):\n trnbatches, valbatches, tstbatches = data\n opt = optim.Adam(filter(lambda p: p.requires_grad, args.model.parameters()), lr=args.lr)\n scheduler = ReduceLROnPlateau(opt, 'min', verbose=1, factor=0.5)\n for name, prm in args.model.named_parameters():\n args.logger.write('\\n'+name+', '+str(prm.shape) + ': '+ str(prm.requires_grad))\n numbatches = len(trnbatches)\n indices = list(range(numbatches))\n random.seed(0)\n best_loss = 1e4\n trn_loss_values = []; trn_acc_values = []\n val_loss_values = []; val_acc_values = []\n for epc in range(args.epochs):\n epoch_loss = 0; epoch_acc = 0; epoch_num_instances = 0\n random.shuffle(indices) # this breaks continuity if there is\n for i, idx in enumerate(indices):\n args.model.zero_grad() \n # (batchsize, t)\n surf, surfpos = trnbatches[idx]\n loss, acc = args.model.probe_loss(surf, surfpos)\n loss.backward()\n opt.step()\n epoch_num_instances += surf.size(0) \n epoch_loss += loss.item()\n epoch_acc += acc\n nll = epoch_loss / numbatches\n acc = epoch_acc / epoch_num_instances\n trn_loss_values.append(nll)\n trn_acc_values.append(acc)\n args.logger.write('\\nepoch: %.1d avg_loss: %.4f, acc: %.4f \\n' % (epc, nll, acc))\n # VAL\n args.model.eval()\n with torch.no_grad():\n nll, acc = test(valbatches, \"val\", args)\n val_loss_values.append(nll)\n val_acc_values.append(acc)\n scheduler.step(nll)\n if nll < best_loss:\n args.logger.write('update best loss \\n')\n best_loss = nll\n torch.save(args.model.state_dict(), args.save_path)\n args.model.train()\n plot_curves(args.task, args.mname, args.fig, args.axs[0], trn_loss_values, val_loss_values, args.plt_style, 'loss')\n plot_curves(args.task, args.mname, args.fig, args.axs[1], trn_acc_values, val_acc_values, args.plt_style, 'acc')\n \n\n# CONFIG\nparser = argparse.ArgumentParser(description='')\nargs = parser.parse_args()\nargs.device = device \nargs.mname = 'MiniGPT_500epochs_lr001_scheduler' \nmodel_path = working_path + 'NLP/EXPERIMENTS/exp14/charlm_miniGPT/results/50000_instances500epochs.pt'\nmodel_vocab = working_path + 'NLP/EXPERIMENTS/exp14/charlm_miniGPT/results/surf_vocab.json'\n\n# training\nargs.batchsize = 32; args.epochs = 500\nargs.opt= 'Adam'; args.lr = 0.001\nargs.task = 'surf2surfpos'\nargs.seq_to_no_pad = 'surface'\n\n# data\nwith open(model_vocab) as f:\n word2id = json.load(f)\n surf_vocab = VocabEntry(word2id)\nargs.trndata = working_path + 'NLP/Probing/pos_tagging/data/surfpos.uniquesurfs.trn.txt' \nargs.valdata = working_path + 'NLP/Probing/pos_tagging/data/surfpos.uniquesurfs.val.txt'\nargs.tstdata = working_path + 'NLP/Probing/pos_tagging/data/surfpos.uniquesurfs.val.txt' \nargs.maxtrnsize = 57769; args.maxvalsize = 10000; args.maxtstsize = 10000\nrawdata, batches, vocab = build_data(args, surf_vocab)\n_, surfpos_vocab = vocab\ntrndata, vlddata, tstdata = rawdata\nargs.trnsize , args.valsize, args.tstsize = len(trndata), len(vlddata), len(tstdata)\n\n# model\nnum_layers=3\nembed_dim=128\nnum_heads=16\nblock_size=128\nembedding_dropout_rate=0.15 \nattention_dropout_rate=0.15\nresidual_dropout_rate=0.15\nexpand_ratio = 4\nargs.pretrained_model = GPT3(vocab=surf_vocab,\n num_layers=num_layers,\n embed_dim=embed_dim,\n num_heads=num_heads,\n block_size=block_size,\n embedding_dropout_rate=embedding_dropout_rate,\n attention_dropout_rate=attention_dropout_rate,\n residual_dropout_rate=residual_dropout_rate,\n expand_ratio=expand_ratio\n )\nargs.pretrained_model.load_state_dict(torch.load(model_path))\nargs.embed = embed_dim\nargs.model = MiniGPT_Probe(args, surfpos_vocab)\nprint(args.model)\nfor param in args.model.token_embedding.parameters():\n param.requires_grad = False\nfor param in args.model.decoder1.parameters():\n param.requires_grad = False\nfor param in args.model.decoder2.parameters():\n param.requires_grad = False\nfor param in args.model.decoder3.parameters():\n param.requires_grad = False\nargs.model.to(args.device)\nprint(args.model)\n\n# logging\nargs.modelname = working_path + 'NLP/Probing/pos_tagging/results/'+args.mname+'/'+str(len(trndata))+'_instances/'\ntry:\n os.makedirs(args.modelname)\n print(\"Directory \" , args.modelname , \" Created \") \nexcept FileExistsError:\n print(\"Directory \" , args.modelname , \" already exists\")\nargs.save_path = args.modelname + str(args.epochs)+'epochs.pt'\nargs.log_path = args.modelname + str(args.epochs)+'epochs.log'\nargs.fig_path = args.modelname + str(args.epochs)+'epochs.png'\nargs.logger = Logger(args.log_path)\nwith open(args.modelname+'/surf_vocab.json', 'w') as f:\n f.write(json.dumps(surf_vocab.word2id))\nwith open(args.modelname+'/surfpos_vocab.json', 'w') as f:\n f.write(json.dumps(surfpos_vocab.word2id))\nargs.logger.write('\\nnumber of params: %d \\n' % count_parameters(args.model))\nargs.logger.write(args)\nargs.logger.write('\\n')\n\n# plotting\nargs.fig, args.axs = plt.subplots(2, sharex=True)\nargs.plt_style = pstyle = '-'\n\n# run\ntrain(batches, args)\nplt.savefig(args.fig_path)\n\n\n "
] | [
[
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.load",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"torch.no_grad",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wakepon21/Gasyori100knock | [
"b9c92bfda9d2288d2b4742c2fb67719ee0ab338b"
] | [
"Question_11_20/answers_py/answer_14.py"
] | [
"import cv2\nimport numpy as np\n\n# Gray scale\ndef BGR2GRAY(img):\n\tb = img[:, :, 0].copy()\n\tg = img[:, :, 1].copy()\n\tr = img[:, :, 2].copy()\n\n\t# Gray scale\n\tout = 0.2126 * r + 0.7152 * g + 0.0722 * b\n\tout = out.astype(np.uint8)\n\n\treturn out\n\n# different filter\ndef different_filter(img, K_size=3):\n\tif len(img.shape) == 3:\n\t\tH, W, C = img.shape\n\telse:\n\t\timg = np.expand_dims(img, axis=-1)\n\t\tH, W, C = img.shape\n\n\t# Zero padding\n\tpad = K_size // 2\n\tout = np.zeros((H + pad * 2, W + pad * 2), dtype=np.float)\n\tout[pad: pad + H, pad: pad + W] = gray.copy().astype(np.float)\n\ttmp = out.copy()\n\n\tout_v = out.copy()\n\tout_h = out.copy()\n\n\t# vertical kernel\n\tKv = [[0., -1., 0.],[0., 1., 0.],[0., 0., 0.]]\n\t# horizontal kernel\n\tKh = [[0., 0., 0.],[-1., 1., 0.], [0., 0., 0.]]\n\n\t# filtering\n\tfor y in range(H):\n\t\tfor x in range(W):\n\t\t\tout_v[pad + y, pad + x] = np.sum(Kv * (tmp[y: y + K_size, x: x + K_size]))\n\t\t\tout_h[pad + y, pad + x] = np.sum(Kh * (tmp[y: y + K_size, x: x + K_size]))\n\n\tout_v = np.clip(out_v, 0, 255)\n\tout_h = np.clip(out_h, 0, 255)\n\n\tout_v = out_v[pad: pad + H, pad: pad + W].astype(np.uint8)\n\tout_h = out_h[pad: pad + H, pad: pad + W].astype(np.uint8)\n\n\treturn out_v, out_h\n\n# Read image\nimg = cv2.imread(\"imori.jpg\").astype(np.float)\n\n# grayscale\ngray = BGR2GRAY(img)\n\n# different filtering\nout_v, out_h = different_filter(gray, K_size=3)\n\n\n\n# Save result\ncv2.imwrite(\"out_v.jpg\", out_v)\ncv2.imshow(\"result_v\", out_v)\nwhile cv2.waitKey(100) != 27:# loop if not get ESC\n if cv2.getWindowProperty('result_v',cv2.WND_PROP_VISIBLE) <= 0:\n break\ncv2.destroyWindow('result_v')\n\ncv2.imwrite(\"out_h.jpg\", out_h)\ncv2.imshow(\"result_h\", out_h)\n# loop if not get ESC or click x\nwhile cv2.waitKey(100) != 27:\n if cv2.getWindowProperty('result_h',cv2.WND_PROP_VISIBLE) <= 0:\n break\ncv2.destroyWindow('result_h')\ncv2.destroyAllWindows()\n"
] | [
[
"numpy.expand_dims",
"numpy.zeros",
"numpy.sum",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MukundVarmaT/tf-lightning | [
"bdd5493cbb99c3eb1b12979745dacd20be62c51d"
] | [
"tf_lightning/trainer/training_loop.py"
] | [
"\"\"\"\n\n@author: vasudevgupta\n\"\"\"\nfrom pathlib import Path\nimport tensorflow as tf\n\nfrom tf_lightning.callbacks import Callback\nfrom tf_lightning.loggers import WandbLogger\nfrom tf_lightning.trainer.checkpointer import Checkpointer\nfrom tf_lightning.trainer.precision_training import PrecisionTraining\nfrom tf_lightning.trainer.distributed_training import DistributedTraining\n\n\nclass TrainingLoop(Checkpointer, PrecisionTraining, DistributedTraining):\n\n start_epoch = 1\n epochs = 10\n\n # these arguments are valid only for defalt wandb\n # wandb related arguments\n project_name = 'tf-lightning-project'\n config = None\n sync_tensorboard = False\n save_code = None\n\n log_dir = 'logs'\n\n # Related to mixed-precision based training\n policy_name = 'mixed_float16'\n\n # running on only 1 batch for only 1 epoch, No ckpts will be saved\n fast_dev_run = False\n\n def __init__(self):\n\n # You can override Callback class and customize methods\n callbacks = Callback()\n\n # Wandb is supported by default\n self.lit_logger = WandbLogger(project_name=project_name,\n config=config,\n log_dir=Path(\n lightning_base_dir, log_dir),\n sync_tensorboard=sync_tensorboard,\n save_code=save_code)\n\n Checkpointer.__init__()\n PrecisionTraining.__init__(self.policy_name)\n\n def fit(self, lit_module):\n\n # adding methods of lightning-module to trainer\n self.integrate_train_step(lit_module)\n\n if self.enable_precision_training:\n self.wrap_mixed_precision_optimizer()\n self._wrapped_train_step = self._wrapper_precision_train_step\n\n self.wrap_tf_function()\n\n def train(self, tr_dataset, val_dataset):\n\n if bool(self.callbacks):\n self.callbacks.on_train_begin()\n\n for epoch in range(self.start_epoch, 1+self.epochs):\n\n if bool(self.callbacks):\n self.callbacks.on_epoch_begin(epoch)\n\n batch_idx = tf.constant(0)\n\n for batch in tr_dataset:\n if bool(self.callbacks):\n self.callbacks.on_batch_begin(batch_idx)\n\n batch_idx += tf.constant(1)\n\n tr_result = self.wrapped_train_step(batch, batch_idx)\n\n val_result = self.evaluate(val_dataset)\n\n # logging stuff defined in training_step\n if bool(tr_result.log) and (not self.fast_dev_run):\n self.lit_logger.log(tr_result.log)\n\n # logging stuff defined in val_step\n if bool(val_result.log) and (not self.fast_dev_run):\n self.lit_logger.log(val_result.log)\n\n if bool(self.callbacks):\n step_metrics = self.callbacks.on_batch_end(\n batch_idx, tr_result['loss'], val_result['loss'])\n\n if self.save_every_ckpt:\n self.manager.save()\n\n if bool(self.callbacks):\n tr_result = self.evaluate(tr_dataset)\n epoch_metrics = self.callbacks.on_epoch_end(\n epoch, tr_result['loss'], val_result['loss'])\n\n if bool(self.callbacks):\n self.callbacks.on_train_end()\n\n if self.save_only_final_ckpts:\n self.manager.save()\n\n return\n\n def evaluate(self, val_dataset):\n # called inside train method\n batch_idx = tf.constant(0)\n\n for batch in val_dataset:\n\n batch_idx += tf.constant(1)\n\n val_result = self.val_step(batch, batch_idx, optimizer_idx=0)\n\n return val_result\n\n def wrap_tf_function(self):\n # wrapping inside tf.function\n self.wrapped_train_step = tf.function(self._wrapped_train_step)\n self.val_step = tf.function(self.val_step)\n\n def _wrapper_train_step(self, batch, batch_idx):\n # this method is called inside wrap_tf_function\n \"\"\"\n This method is simply wrapping everything:\n - forward propogation\n - backward propogation\n - parameters update\n Overwrite this method if necessary else you need not really take care of anything\n \"\"\"\n for optimizer_idx in self.opt_indices:\n\n result = self.training_step(batch, batch_idx, optimizer_idx)\n\n grads = self.backward(\n result['loss'], result['trainable_variables'], batch_idx, optimizer_idx)\n\n self.optimizer_step(grads, trainable_variables,\n batch_idx, optimizer_idx)\n\n return result\n\n def integrate_train_step(self, lit_module):\n # this will run only once\n # called inside fit method\n\n self.training_step = lit_module.training_step\n self.val_step = lit_module.val_step\n\n optimizer, opt_indices = self._get_optimizer(lit_module)\n self.opt_indices = opt_indices\n for i in opt_indices:\n setattr(self, f\"optimizer_{i}\", optimizer[i])\n\n self.backward = lit_module.backward\n\n self.optimizer_step = lit_module.optimizer_step\n\n def _get_optimizer(self, lit_module):\n # called inside integrate_train_step method\n optimizers = lit_module.configure_optimizers()\n\n if isinstance(optimizers, tf.keras.optimizers.Optimizer):\n opt_indices = [0]\n\n elif isinstance(optimizers, (tuple, list)):\n opt_indices = list(range(len(optimizers)))\n\n return optimizers, opt_indices\n"
] | [
[
"tensorflow.function",
"tensorflow.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
}
] |
aditya02acharya/TypingAgent | [
"34c5230be72c3878942457a6e44b7078fbd08ea0"
] | [
"src/supervisor/supervisor_agent.py"
] | [
"import csv\nimport tqdm\nimport random\nimport logging\nimport numpy as np\nimport pandas as pd\nfrom os import path\nfrom datetime import datetime\n\nimport chainer\nimport chainerrl\nimport chainer.links as L\nfrom chainerrl import misc\nimport chainer.functions as F\nfrom chainer import serializers\nfrom chainerrl.agents import PPO\nfrom chainer.backends import cuda\nfrom chainerrl import experiments\nfrom chainer.backends import cuda\n\nfrom src.abstract.agent import Agent\nfrom src.visualise.visualise import visualise_agent\nfrom src.supervisor.supervisor_agent_environment import SupervisorEnvironment\nfrom src.supervisor.supervisor_agent_two_finger_env import SupervisorEnvironment_\n\n\nclass SupervisorAgent(Agent):\n\n def __init__(self, layout_config, agent_params, train, finger_two, verbose=False):\n self.logger = logging.getLogger(__name__)\n\n self.layout_config = layout_config\n self.agent_params = agent_params\n self.train_model = train\n self.finger_two = finger_two\n self.verbose = verbose\n\n if finger_two:\n self.env = SupervisorEnvironment_(self.layout_config, self.agent_params, self.train_model)\n else:\n self.env = SupervisorEnvironment(self.layout_config, self.agent_params, self.train_model)\n\n optimizer_name = 'Adam' if agent_params is None else agent_params['supervisor']['optimizer_name']\n lr = 0.001 if agent_params is None else agent_params['supervisor']['learning_rate']\n n_units = 512 if agent_params is None else int(agent_params['supervisor']['n_units'])\n device_id = 0 if agent_params is None else int(agent_params['supervisor']['device_id'])\n pre_load = False if agent_params is None else bool(agent_params['supervisor']['pre_load'])\n self.gpu = True if agent_params is None else bool(agent_params['supervisor']['gpu'])\n self.save_path = path.join('data', 'models', 'supervisor') if agent_params is None \\\n else agent_params['supervisor']['save_path']\n self.episodes = 1000000 if agent_params is None else int(agent_params['supervisor']['episodes'])\n self.log_interval = 1000 if agent_params is None else int(agent_params['supervisor']['log_interval'])\n self.log_filename = agent_params['supervisor']['log_file']\n\n winit_last = chainer.initializers.LeCunNormal(1e-2)\n\n self.model = chainer.Sequential(\n L.Linear(None, n_units),\n F.relu,\n L.Linear(None, n_units),\n F.relu,\n chainerrl.links.Branched(\n chainer.Sequential(\n L.Linear(None, self.env.action_space.n, initialW=winit_last),\n chainerrl.distribution.SoftmaxDistribution,\n ),\n L.Linear(None, 1)\n )\n )\n\n if pre_load:\n serializers.load_npz(path.join(self.save_path, 'best', 'model.npz'), self.model)\n\n if self.gpu:\n self.model.to_gpu(device_id)\n\n if optimizer_name == 'Adam':\n self.optimizer = chainer.optimizers.Adam(alpha=lr)\n elif optimizer_name == 'RMSprop':\n self.optimizer = chainer.optimizers.RMSprop(lr=lr)\n else:\n self.optimizer = chainer.optimizers.MomentumSGD(lr=lr)\n\n self.optimizer.setup(self.model)\n\n self.optimizer.add_hook(chainer.optimizer.GradientClipping(1.0))\n\n phi = lambda x: x.astype(np.float32, copy=False)\n\n self.agent = PPO(\n self.model,\n self.optimizer,\n phi=phi,\n update_interval=1000,\n standardize_advantages=True,\n entropy_coef=1e-2,\n recurrent=False,\n )\n\n if train:\n chainer.config.train = True\n if self.verbose:\n self.pbar = tqdm.tqdm(total=self.episodes, ascii=True, bar_format='{l_bar}{n}, {remaining}\\n')\n else:\n self.pbar = tqdm.tqdm(total=self.episodes)\n else:\n chainer.config.train = False\n self.agent.act_deterministically = False\n\n def train(self, episodes):\n \"\"\"\n Trains the model for given number of episodes.\n \"\"\"\n\n progress_bar = ProgressBar(self.pbar, episodes)\n\n experiments.train_agent_with_evaluation(\n self.agent, self.env,\n steps=episodes, # Train the agent for 2000 steps\n eval_n_steps=None, # We evaluate for episodes, not time\n eval_n_episodes=10, # 10 episodes are sampled for each evaluation\n train_max_episode_len=100, # Maximum length of each episode\n eval_interval=self.log_interval, # Evaluate the agent after every 1000 steps\n step_hooks=[progress_bar], # add hooks\n logger=self.logger,\n outdir=self.save_path) # Save everything to 'supervisor' directory\n\n def evaluate(self, sentence, batch, n_users, **kwargs):\n \"\"\"\n Function to evaluate trained agent.\n :param sentence: sentence to type.\n :param batch: run evaluation in batch mode.\n :param n_users: number of users to simulate.\n \"\"\"\n\n done = False\n if not (sentence == \"\" or sentence is None):\n self.env.sentences = [sentence]\n self.env.sentences_bkp = [sentence]\n\n if batch:\n sentence_agg_data = [[\"sentence.id\", \"agent.id\", \"target.sentence\", \"wpm\", \"lev.distance\",\n \"gaze.shift\", \"bs\", \"immediate.bs\", \"delayed.bs\",\n \"gaze.keyboard.ratio\", \"fix.count\", \"finger.travel\", \"iki\", \"correct.error\",\n \"uncorrected.error\", \"fix.duration\", \"chunk.length\"]]\n if self.verbose:\n iter = tqdm.tqdm(iterable=range(n_users), ascii=True,\n bar_format='{l_bar}{n}, {remaining}\\n')\n else:\n iter = tqdm.tqdm(range(n_users))\n for i in iter:\n\n if self.finger_two:\n self.env = SupervisorEnvironment_(self.layout_config, self.agent_params, self.train_model)\n else:\n self.env = SupervisorEnvironment(self.layout_config, self.agent_params, self.train_model)\n self.env.agent_id = i\n\n # reinitialise random seed.\n np.random.seed(datetime.now().microsecond)\n random.seed(datetime.now().microsecond)\n\n while len(self.env.sentences) > 0:\n state = self.env.reset()\n done = False\n while not done:\n action = self.agent.act(state)\n state, reward, done, info = self.env.step(action)\n\n sentence_agg_data += self.env.sentence_test_data\n\n with open(path.join(\"data\", \"output\", \"SupervisorAgent_sentence_test.csv\"), \"w\", newline=\"\",\n encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerows(sentence_agg_data)\n\n if not self.finger_two:\n with open(path.join(\"data\", \"output\", \"SupervisorAgent_Vision_Viz.csv\"), \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(self.env.eye_viz_log)\n\n with open(path.join(\"data\", \"output\", \"SupervisorAgent_Finger_Viz.csv\"), \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(self.env.finger_viz_log)\n\n with open(path.join(\"data\", \"output\", \"SupervisorAgent_Typing_Viz.csv\"), \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(self.env.typing_viz_log)\n\n else:\n self.env.sentence_test_data.append([\"sentence.id\", \"agent.id\", \"target.sentence\", \"wpm\", \"lev.distance\",\n \"gaze.shift\", \"bs\", \"immediate.bs\", \"delayed.bs\",\n \"gaze.keyboard.ratio\", \"fix.count\", \"finger.travel\", \"iki\",\n \"correct.error\",\n \"uncorrected.error\", \"fix.duration\", \"chunk.length\"])\n state = self.env.reset()\n while not done:\n action = self.agent.act(state)\n state, reward, done, info = self.env.step(action)\n\n with open(path.join(\"data\", \"output\", \"SupervisorAgent_vision_test.csv\"), \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(self.env.eye_test_data)\n\n with open(path.join(\"data\", \"output\", \"SupervisorAgent_finger_test.csv\"), \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(self.env.finger_test_data)\n\n with open(path.join(\"data\", \"output\", \"SupervisorAgent_sentence_test.csv\"), \"w\", newline=\"\",\n encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerows(self.env.sentence_test_data)\n\n # TODO: This is from legacy code. Need to update.\n visualise_agent(True, True, path.join(\"data\", \"output\", \"SupervisorAgent_vision_test.csv\"),\n path.join(\"data\", \"output\", \"SupervisorAgent_finger_test.csv\"),\n path.join(\"data\", \"output\", \"SupervisorAgent.mp4\"))\n\n self.save_senetence_agg_data(path.join(\"data\", \"output\", \"SupervisorAgent_sentence_test.csv\"))\n self.save_user_agg_data(path.join(\"data\", \"output\", \"SupervisorAgent_sentence_test.csv\"))\n\n def save_senetence_agg_data(self, filename):\n \"\"\"\n generates sentence level aggregate data.\n :param filename: raw data file path.\n \"\"\"\n data = pd.read_csv(filename, sep=',', encoding='utf-8')\n data = data.groupby(\"target.sentence\").agg(['mean', 'std'])\n data.to_csv(path.join(\"data\", \"output\", \"SupervisorAgent_sentence_aggregate.csv\"), encoding='utf-8')\n\n def save_user_agg_data(self, filename):\n \"\"\"\n generates user level aggregate data.\n :param filename: raw data file path.\n \"\"\"\n data = pd.read_csv(filename, sep=',', encoding='utf-8')\n data = data.groupby(\"agent.id\").agg(['mean', 'std'])\n data.to_csv(path.join(\"data\", \"output\", \"SupervisorAgent_user_aggregate.csv\"), encoding='utf-8')\n\n\nclass ProgressBar(chainerrl.experiments.hooks.StepHook):\n \"\"\"\n Hook class to update progress bar.\n \"\"\"\n\n def __init__(self, pbar, max_length):\n self.pbar = pbar\n self.max = max_length\n\n def __call__(self, env, agent, step):\n self.pbar.update()\n if self.max <= step:\n self.pbar.close()\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
iwyoo/TPS_STN-tensorflow | [
"e7bed913a644629042c9dbd141b15b5792f0b1e6"
] | [
"test.py"
] | [
"import tensorflow as tf\nimport numpy as np\nfrom PIL import Image\nfrom TPS_STN import TPS_STN\n\nimg = np.array(Image.open(\"original.png\"))\nout_size = list(img.shape)\nshape = [1]+out_size+[1]\n\nnx = 2\nny = 2\n\nv = np.array([\n [0.2, 0.2],\n [0.4, 0.4],\n [0.6, 0.6],\n [0.8, 0.8]])\n\np = tf.constant(v.reshape([1, nx*ny, 2]), dtype=tf.float32)\nt_img = tf.constant(img.reshape(shape), dtype=tf.float32)\nt_img = TPS_STN(t_img, nx, ny, p, out_size)\n\nwith tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n img1 = sess.run(t_img)\n Image.fromarray(np.uint8(img1.reshape(out_size))).save(\"transformed.png\") \n"
] | [
[
"tensorflow.initialize_all_variables",
"numpy.array",
"tensorflow.Session"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
bol-edu/xilinx-acc-2021_Submission | [
"1f212212d12e3be9638f92f3e7953dc81f89606d"
] | [
"test_toolkit/pcap_gen.py"
] | [
"from cgi import print_exception\nfrom util.graph import Edge, Graph, createGraph, isNegCycleBellmanFord\nfrom util.pcap import PcapGen\nimport numpy as np\nimport pandas as pd\nimport argparse\n\n\n# 18 total\nexch_index2id = np.array([[1, 3], [3, 1], [0, 4], [4, 0], [3, 2], [2, 3], [1, 2], [2, 1], [\n 0, 2], [2, 0], [3, 0], [0, 3], [1, 0], [0, 1], [1, 4], [4, 1], [4, 2], [2, 4]], dtype=int)\n# security id from 1024 to 18432\nid2secid = np.array([(i+1)*1024 for i in range(18)])\n\n\ndef random_data_gen(require_arb, no_arb):\n rates = 10**((np.random.random(18) - 0.5)*8) # 0.0001 - 10000\n if(require_arb):\n while(not check_cycle(rates)):\n rates = 10**((np.random.random(18) - 0.5)*8)\n elif(no_arb):\n while(check_cycle(rates)):\n rates = 10**((np.random.random(18) - 0.5)*8)\n time_stamp_start = 1643350419136975104\n # each packet is separated about 0.1 sec\n timpe_stamps = (np.arange(0, 18)*10**8 + (np.random.random(18) - 0.5)\n * 10**7).astype(int) + time_stamp_start\n # bid, ask ,bid, ask ....\n entry_type = np.array([48, 49]*9)\n d = {\n \"Timestamp\": timpe_stamps,\n \"MDEntryType\": entry_type,\n \"SecurityID\": np.array(id2secid),\n # price times divides by 10000000 after input into AAT\n \"MDEntryPx\": (rates*10000000).astype(int),\n }\n df = pd.DataFrame(d)\n\n # Print rates for pricingEngine module test\n print(\"\"\"# OrderBookResponse\n# The only differences between responses\n# are `bidPrice` and `askPrice`.\n# `symbolIndex` should be different, but\n# in our testbench they are set to dummy numbers.\n# Make sure '#' at the beginning of each line\n# of the comment is followed by at least one space.\n# Remember to add new line at the end of the file\n# and make sure no empty lines in the middle of the file.\n# `responseCount` is the integer in first line.\"\"\")\n print(9)\n for i in range(9):\n print(rates[i], rates[i+1])\n\n return df, rates\n\n\ndef check_cycle(rates):\n # fixed with 5 currencies and 18 exchange pairs\n V = 5 # Number of vertices in graph\n E = 18 # Number of edges in graph\n graph = createGraph(V, E)\n logged_rates = np.log(rates)\n for i in range(18):\n graph.edge[i].src = exch_index2id[i][0]\n graph.edge[i].dest = exch_index2id[i][0]\n graph.edge[i].weight = -logged_rates[i]\n return isNegCycleBellmanFord(graph, 0)\n\n\ndef parse_arg():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(required=True, dest=\"selected_sub\")\n parser_csv = subparsers.add_parser('r', help='read a exsisting csv file')\n parser_csv.add_argument(\n '-c', '--csv', help=\"path of csv file\", required=True)\n parser_random = subparsers.add_parser('g', help='generate random data')\n required_arbitrage_group = parser_random.add_mutually_exclusive_group()\n required_arbitrage_group.add_argument(\n '--no_arb', action=\"store_true\", help=\"required the output data to have no arbitrage route\")\n required_arbitrage_group.add_argument(\n '--req_arb', action=\"store_true\", help=\"required the output data to have at least one arbitrage route\")\n parser_random.add_argument(\n '--output_csv', help=\"also output the csv file of generated data to a designated path\")\n parser.add_argument(\n '-o', '--output', help=\"output path and name of pcap, default: cme_input_gen.pcap\", default='cme_input_gen.pcap')\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n args = parse_arg()\n data, rates = None, None\n if(args.selected_sub == 'r'):\n #read in csv\n data = pd.read_csv(args.csv)\n elif(args.selected_sub == 'g'):\n data, rates = random_data_gen(args.req_arb, args.no_arb)\n import sys\n if(check_cycle(rates)):\n print(\"The generated Data has a cycle.\", file=sys.stderr)\n else:\n print(\"The generated Data don't have a cycle.\", file=sys.stderr)\n else:\n exit()\n pcap_gen = PcapGen(pcap_path=args.output)\n # write the pcap file\n for index, row in data.iterrows():\n pcap_gen.construct_payload(\n row['Timestamp'], row['MDEntryType'], row['SecurityID'], row['MDEntryPx'])\n pcap_gen.write_pcap()\n # output csv\n if(vars(args).get(\"output_csv\") != None):\n data.to_csv(args.output_csv, index=False)\n"
] | [
[
"numpy.log",
"pandas.read_csv",
"numpy.random.random",
"numpy.arange",
"pandas.DataFrame",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
NunchakusLei/OpenCV_examples | [
"74974ecd5dae096863fa8915135bfbcfd20ed9a4"
] | [
"ModifyImages/load_float_image.py"
] | [
"import cv2, numpy as np\nimport struct\nimport sys\n\ndef load_float_image(filename):\n # open file\n file = open(filename, \"rb\")\n\n # load info\n value = file.read(4)\n width = struct.unpack('i', value)[0]\n value = file.read(4)\n height = struct.unpack('i', value)[0]\n\n z_image = np.zeros((height, width))\n for y in range(height):\n for x in range(width):\n value = file.read(4)\n a_pixel = struct.unpack('f', value)[0]\n z_image[y][x] = a_pixel\n\n return z_image\n\n\n\nif __name__ == \"__main__\":\n filename = \"32FC1.bin\"\n filename = sys.argv[1]\n depth_image = load_float_image(filename)\n print(depth_image)\n depth_image = 255. * (depth_image / 4.)\n cv2.imshow(\"test\", depth_image.astype(np.uint8))\n cv2.waitKey()\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
skmatti/streamz | [
"47917b93f5d1ee6716f4c99f235b791cd55f117d"
] | [
"streamz/dataframe/tests/test_cudf.py"
] | [
"\"\"\"\nTests for cudf DataFrame\nAll these tests are taken from test_dataframes module in the same folder.\nSome of these tests pass with cudf as they are, and others are marked xfail\nwhere a pandas like method is not implemented yet in cudf.\nBut these tests should pass as cudf implement more pandas like methods.\n\"\"\"\nfrom __future__ import division, print_function\n\nimport numpy as np\nimport pytest\nimport operator\nfrom dask.dataframe.utils import assert_eq\nfrom distributed import Client\n\nfrom streamz import Stream\nfrom streamz.dask import DaskStream\nfrom streamz.dataframe import DataFrame, Series, DataFrames, Aggregation\n\ncudf = pytest.importorskip(\"cudf\")\n\n\[email protected](scope=\"module\")\ndef client():\n client = Client(processes=False, asynchronous=False)\n try:\n yield client\n finally:\n client.close()\n\n\[email protected](params=['core', 'dask'])\ndef stream(request, client): # flake8: noqa\n if request.param == 'core':\n return Stream()\n else:\n return DaskStream()\n\n\ndef test_identity(stream):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n sdf = DataFrame(example=df, stream=stream)\n L = sdf.stream.gather().sink_to_list()\n\n sdf.emit(df)\n\n assert L[0] is df\n assert list(sdf.example.columns) == ['x', 'y']\n\n x = sdf.x\n assert isinstance(x, Series)\n L2 = x.stream.gather().sink_to_list()\n assert not L2\n\n sdf.emit(df)\n assert isinstance(L2[0], cudf.Series)\n assert assert_eq(L2[0], df.x)\n\n\ndef test_dtype(stream):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n sdf = DataFrame(example=df, stream=stream)\n\n assert str(sdf.dtypes) == str(df.dtypes)\n assert sdf.x.dtype == df.x.dtype\n assert sdf.index.dtype == df.index.dtype\n\n\ndef test_attributes():\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n sdf = DataFrame(example=df)\n\n assert 'x' in dir(sdf)\n assert 'z' not in dir(sdf)\n\n sdf.x\n with pytest.raises(AttributeError):\n sdf.z\n\n\ndef test_exceptions(stream):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n sdf = DataFrame(example=df, stream=stream)\n with pytest.raises(TypeError):\n sdf.emit(1)\n\n with pytest.raises(IndexError):\n sdf.emit(cudf.DataFrame())\n\n\[email protected]('func', [\n pytest.param(lambda x: x.sum(),\n marks=pytest.mark.xfail(\n reason=\"'Series' object does not support item assignment\")),\n pytest.param(lambda x: x.mean(),\n marks=pytest.mark.xfail(\n reason=\"'Series' object does not support item assignment\")),\n lambda x: x.count(),\n pytest.param(lambda x: x.size,\n marks=pytest.mark.xfail(reason=\"Not implemented\"))\n])\ndef test_reductions(stream, func):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n for example in [df, df.iloc[:0]]:\n sdf = DataFrame(example=example, stream=stream)\n\n df_out = func(sdf).stream.gather().sink_to_list()\n\n x = sdf.x\n x_out = func(x).stream.gather().sink_to_list()\n\n sdf.emit(df)\n sdf.emit(df)\n\n assert_eq(df_out[-1], func(cudf.concat([df, df])))\n assert_eq(x_out[-1], func(cudf.concat([df, df]).x))\n\n\[email protected]('op', [\n operator.add,\n operator.and_,\n operator.eq,\n operator.floordiv,\n operator.ge,\n operator.gt,\n operator.le,\n operator.lshift,\n operator.lt,\n operator.mod,\n operator.mul,\n operator.ne,\n operator.or_,\n operator.pow,\n operator.rshift,\n operator.sub,\n operator.truediv,\n operator.xor,\n])\[email protected]('getter', [lambda df: df, lambda df: df.x])\ndef test_binary_operators(op, getter, stream):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n try:\n left = op(getter(df), 2)\n right = op(2, getter(df))\n except Exception:\n return\n\n a = DataFrame(example=df, stream=stream)\n li = op(getter(a), 2).stream.gather().sink_to_list()\n r = op(2, getter(a)).stream.gather().sink_to_list()\n\n a.emit(df)\n\n assert_eq(li[0], left)\n assert_eq(r[0], right)\n\n\[email protected]('op', [\n operator.abs,\n operator.inv,\n operator.invert,\n operator.neg,\n lambda x: x.map(lambda x: x + 1),\n lambda x: x.reset_index(),\n lambda x: x.astype(float),\n])\[email protected]('getter', [lambda df: df, lambda df: df.x])\ndef test_unary_operators(op, getter):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n try:\n expected = op(getter(df))\n except Exception:\n return\n\n a = DataFrame(example=df)\n b = op(getter(a)).stream.sink_to_list()\n\n a.emit(df)\n\n assert_eq(b[0], expected)\n\n\[email protected]('func', [\n lambda df: df.query('x > 1 and x < 4'),\n pytest.param(lambda df: df.x.value_counts().nlargest(2),\n marks=pytest.mark.xfail(reason=\"`cudf.Series.add` is'nt implemented\"))\n])\ndef test_dataframe_simple(func):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n expected = func(df)\n\n a = DataFrame(example=df)\n L = func(a).stream.sink_to_list()\n\n a.emit(df)\n\n assert_eq(L[0], expected)\n\n\ndef test_set_index():\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n\n a = DataFrame(example=df)\n\n b = a.set_index('x').stream.sink_to_list()\n a.emit(df)\n assert_eq(b[0], df.set_index('x'))\n\n b = a.set_index(a.y + 1).stream.sink_to_list()\n a.emit(df)\n assert_eq(b[0], df.set_index(df.y + 1))\n\n\ndef test_binary_stream_operators(stream):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n\n expected = df.x + df.y\n\n a = DataFrame(example=df, stream=stream)\n b = (a.x + a.y).stream.gather().sink_to_list()\n\n a.emit(df)\n\n assert_eq(b[0], expected)\n\n\ndef test_index(stream):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n a = DataFrame(example=df, stream=stream)\n b = a.index + 5\n L = b.stream.gather().sink_to_list()\n\n a.emit(df)\n a.emit(df)\n\n assert_eq(L[0], df.index + 5)\n assert_eq(L[1], df.index + 5)\n\n\ndef test_pair_arithmetic(stream):\n df = cudf.DataFrame({'x': list(range(10)), 'y': [1] * 10})\n\n a = DataFrame(example=df.iloc[:0], stream=stream)\n L = ((a.x + a.y) * 2).stream.gather().sink_to_list()\n\n a.emit(df.iloc[:5])\n a.emit(df.iloc[5:])\n\n assert len(L) == 2\n assert_eq(cudf.concat(L), (df.x + df.y) * 2)\n\n\ndef test_getitem(stream):\n df = cudf.DataFrame({'x': list(range(10)), 'y': [1] * 10})\n\n a = DataFrame(example=df.iloc[:0], stream=stream)\n L = a[a.x > 4].stream.gather().sink_to_list()\n\n a.emit(df.iloc[:5])\n a.emit(df.iloc[5:])\n\n assert len(L) == 2\n assert_eq(cudf.concat(L), df[df.x > 4])\n\n\[email protected](reason=\"`cudf.DataFrame.add` is not implemented\")\[email protected]('agg', [\n lambda x: x.sum(),\n lambda x: x.mean(),\n lambda x: x.count(),\n lambda x: x.var(ddof=1),\n lambda x: x.std(),\n pytest.param(lambda x: x.var(ddof=0),\n marks=pytest.mark.xfail(reason=\"unknown\"))\n])\[email protected]('grouper', [lambda a: a.x % 3,\n lambda a: 'x',\n lambda a: a.index % 2,\n lambda a: ['x']])\[email protected]('indexer', [lambda g: g.y,\n lambda g: g,\n lambda g: g[['y']],\n pytest.param(lambda g: g[['x', 'y']],\n marks=pytest.mark.xfail(\n reason=\"Indexer column matches grouper\"))\n ])\ndef test_groupby_aggregate(agg, grouper, indexer, stream):\n df = cudf.DataFrame({'x': (np.arange(10) // 2).astype(float), 'y': [1.0, 2.0] * 5})\n\n a = DataFrame(example=df.iloc[:0], stream=stream)\n\n def f(x):\n return agg(indexer(x.groupby(grouper(x))))\n\n L = f(a).stream.gather().sink_to_list()\n\n a.emit(df.iloc[:3])\n a.emit(df.iloc[3:7])\n a.emit(df.iloc[7:])\n\n first = df.iloc[:3]\n assert assert_eq(L[0], f(first))\n assert assert_eq(L[-1], f(df))\n\n\[email protected](reason=\"`cudf.Series.add` is not implemented\")\ndef test_value_counts(stream):\n s = cudf.Series([1, 2, 1])\n\n a = Series(example=s, stream=stream)\n\n b = a.value_counts()\n assert b._stream_type == 'updating'\n result = b.stream.gather().sink_to_list()\n\n a.emit(s)\n a.emit(s)\n\n assert_eq(result[-1], cudf.concat([s, s]).value_counts())\n\n\[email protected](reason=\"'Series' object does not support item assignment\")\ndef test_setitem(stream):\n df = cudf.DataFrame({'x': list(range(10)), 'y': [1] * 10})\n\n sdf = DataFrame(example=df.iloc[:0], stream=stream)\n stream = sdf.stream\n\n sdf['z'] = sdf['x'] * 2\n sdf['a'] = 10\n sdf[['c', 'd']] = sdf[['x', 'y']]\n\n L = sdf.mean().stream.gather().sink_to_list()\n\n stream.emit(df.iloc[:3])\n stream.emit(df.iloc[3:7])\n stream.emit(df.iloc[7:])\n\n df['z'] = df['x'] * 2\n df['a'] = 10\n df[['c', 'd']] = df[['x', 'y']]\n\n assert_eq(L[-1], df.mean())\n\n\ndef test_setitem_overwrites(stream):\n df = cudf.DataFrame({'x': list(range(10))})\n sdf = DataFrame(example=df.iloc[:0], stream=stream)\n stream = sdf.stream\n\n sdf['x'] = sdf['x'] * 2\n\n L = sdf.stream.gather().sink_to_list()\n\n stream.emit(df.iloc[:3])\n stream.emit(df.iloc[3:7])\n stream.emit(df.iloc[7:])\n\n assert_eq(L[-1], df.iloc[7:] * 2)\n\n\ndef test_stream_to_dataframe(stream):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n source = stream\n L = source.to_dataframe(example=df).x.sum().stream.gather().sink_to_list()\n\n source.emit(df)\n source.emit(df)\n source.emit(df)\n\n assert L == [6, 12, 18]\n\n\ndef test_to_frame(stream):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n sdf = DataFrame(example=df, stream=stream)\n\n assert sdf.to_frame() is sdf\n\n a = sdf.x.to_frame()\n assert isinstance(a, DataFrame)\n assert list(a.columns) == ['x']\n\n\ndef test_instantiate_with_dict(stream):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n sdf = DataFrame(example=df, stream=stream)\n\n sdf2 = DataFrame({'a': sdf.x, 'b': sdf.x * 2,\n 'c': sdf.y % 2})\n L = sdf2.stream.gather().sink_to_list()\n assert len(sdf2.columns) == 3\n\n sdf.emit(df)\n sdf.emit(df)\n\n assert len(L) == 2\n for x in L:\n assert_eq(x[['a', 'b', 'c']],\n cudf.DataFrame({'a': df.x, 'b': df.x * 2, 'c': df.y % 2}))\n\n\[email protected]('op', ['cumsum', 'cummax', 'cumprod', 'cummin'])\[email protected]('getter', [lambda df: df, lambda df: df.x])\ndef test_cumulative_aggregations(op, getter, stream):\n df = cudf.DataFrame({'x': list(range(10)), 'y': [1] * 10})\n expected = getattr(getter(df), op)()\n\n sdf = DataFrame(example=df, stream=stream)\n\n L = getattr(getter(sdf), op)().stream.gather().sink_to_list()\n\n for i in range(0, 10, 3):\n sdf.emit(df.iloc[i: i + 3])\n sdf.emit(df.iloc[:0])\n\n assert len(L) > 1\n\n assert_eq(cudf.concat(L), expected)\n\n\ndef test_display(stream):\n pytest.importorskip('ipywidgets')\n pytest.importorskip('IPython')\n\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n sdf = DataFrame(example=df, stream=stream)\n\n s = sdf.x.sum()\n\n s._ipython_display_()\n\n\ndef test_tail(stream):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n sdf = DataFrame(example=df, stream=stream)\n\n L = sdf.tail(2).stream.gather().sink_to_list()\n\n sdf.emit(df)\n sdf.emit(df)\n\n assert_eq(L[0], df.tail(2))\n assert_eq(L[1], df.tail(2))\n\n\ndef test_example_type_error_message():\n try:\n DataFrame(example=[123])\n except Exception as e:\n assert 'DataFrame' in str(e)\n assert '[123]' in str(e)\n\n\ndef test_dataframes(stream):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n sdf = DataFrames(example=df, stream=stream)\n L = sdf.x.sum().stream.gather().sink_to_list()\n\n sdf.emit(df)\n sdf.emit(df)\n\n assert L == [6, 6]\n\n\ndef test_aggregate_updating(stream):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n sdf = DataFrame(example=df, stream=stream)\n\n assert sdf.x.sum()._stream_type == 'updating'\n assert (sdf.x.sum() + 1)._stream_type == 'updating'\n\n\ndef test_window_sum(stream):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n sdf = DataFrame(example=df, stream=stream)\n L = sdf.window(n=4).x.sum().stream.gather().sink_to_list()\n\n sdf.emit(df)\n assert L == [6]\n sdf.emit(df)\n assert L == [6, 9]\n sdf.emit(df)\n assert L == [6, 9, 9]\n\n\[email protected](reason=\"'Series' object does not support item assignment\")\ndef test_window_sum_dataframe(stream):\n df = cudf.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})\n sdf = DataFrame(example=df, stream=stream)\n L = sdf.window(n=4).sum().stream.gather().sink_to_list()\n\n sdf.emit(df)\n assert_eq(L[0], cudf.Series([6, 15], index=['x', 'y']))\n sdf.emit(df)\n assert_eq(L[0], cudf.Series([6, 15], index=['x', 'y']))\n assert_eq(L[1], cudf.Series([9, 21], index=['x', 'y']))\n sdf.emit(df)\n assert_eq(L[0], cudf.Series([6, 15], index=['x', 'y']))\n assert_eq(L[1], cudf.Series([9, 21], index=['x', 'y']))\n assert_eq(L[2], cudf.Series([9, 21], index=['x', 'y']))\n\n\[email protected]('func', [\n lambda x: x.sum(),\n lambda x: x.mean(),\n lambda x: x.count(),\n lambda x: x.var(ddof=1),\n lambda x: x.std(ddof=1),\n lambda x: x.var(ddof=0),\n])\[email protected]('n', [2, 4])\[email protected]('getter', [\n lambda df: df.x,\n])\ndef test_windowing_n(func, n, getter):\n df = cudf.DataFrame({'x': list(range(10)), 'y': [1, 2] * 5})\n\n sdf = DataFrame(example=df)\n L = func(getter(sdf).window(n=n)).stream.gather().sink_to_list()\n\n for i in range(0, 10, 3):\n sdf.emit(df.iloc[i: i + 3])\n sdf.emit(df.iloc[:0])\n\n assert len(L) == 5\n\n assert_eq(L[0], func(getter(df).iloc[max(0, 3 - n): 3]))\n assert_eq(L[-1], func(getter(df).iloc[len(df) - n:]))\n\n\ndef test_window_full():\n df = cudf.DataFrame({'x': np.arange(10, dtype=float), 'y': [1.0, 2.0] * 5})\n\n sdf = DataFrame(example=df)\n\n L = sdf.window(n=4).apply(lambda x: x).stream.sink_to_list()\n\n sdf.emit(df.iloc[:3])\n sdf.emit(df.iloc[3:8])\n sdf.emit(df.iloc[8:])\n\n assert_eq(L[0], df.iloc[:3])\n assert_eq(L[1], df.iloc[4:8])\n assert_eq(L[2], df.iloc[-4:])\n\n\ndef test_custom_aggregation():\n df = cudf.DataFrame({'x': np.arange(10, dtype=float), 'y': [1.0, 2.0] * 5})\n\n class Custom(Aggregation):\n def initial(self, new):\n return 0\n\n def on_new(self, state, new):\n return state + 1, state\n\n def on_old(self, state, new):\n return state - 100, state\n\n sdf = DataFrame(example=df)\n L = sdf.aggregate(Custom()).stream.sink_to_list()\n\n sdf.emit(df)\n sdf.emit(df)\n sdf.emit(df)\n\n assert L == [0, 1, 2]\n\n sdf = DataFrame(example=df)\n L = sdf.window(n=5).aggregate(Custom()).stream.sink_to_list()\n\n sdf.emit(df)\n sdf.emit(df)\n sdf.emit(df)\n\n assert L == [1, -198, -397]\n"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pedromanrique/tensorflow-tts | [
"473eb717503454fa2eabadefd7bcd2459673f6f2"
] | [
"tensorflow_tts/models/tacotron2.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright 2020 The Tacotron-2 Authors, Minh Nguyen (@dathudeptrai), Eren Gölge (@erogol) and Jae Yoo (@jaeyoo)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tacotron-2 Modules.\"\"\"\n\nimport collections\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom tensorflow_addons.seq2seq import Sampler\nfrom tensorflow_addons.seq2seq import BahdanauAttention\n# TODO: once https://github.com/tensorflow/addons/pull/1964 is fixed,\n# uncomment this line.\n# from tensorflow_addons.seq2seq import dynamic_decode\nfrom tensorflow_addons.seq2seq import Decoder\nfrom tensorflow_tts.utils import dynamic_decode\n\n\ndef get_initializer(initializer_range=0.02):\n \"\"\"Creates a `tf.initializers.truncated_normal` with the given range.\n Args:\n initializer_range: float, initializer range for stddev.\n Returns:\n TruncatedNormal initializer with stddev = `initializer_range`.\n \"\"\"\n return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)\n\n\ndef gelu(x):\n \"\"\"Gaussian Error Linear unit.\"\"\"\n cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))\n return x * cdf\n\n\ndef gelu_new(x):\n \"\"\"Smoother gaussian Error Linear Unit.\"\"\"\n cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf\n\n\ndef swish(x):\n \"\"\"Swish activation function.\"\"\"\n return x * tf.sigmoid(x)\n\n\ndef mish(x):\n return x * tf.math.tanh(tf.math.softplus(x))\n\n\nACT2FN = {\n \"identity\": tf.keras.layers.Activation(\"linear\"),\n \"tanh\": tf.keras.layers.Activation(\"tanh\"),\n \"gelu\": tf.keras.layers.Activation(gelu),\n \"relu\": tf.keras.activations.relu,\n \"swish\": tf.keras.layers.Activation(swish),\n \"gelu_new\": tf.keras.layers.Activation(gelu_new),\n \"mish\": tf.keras.layers.Activation(mish),\n}\n\n\nclass TFTacotronConvBatchNorm(tf.keras.layers.Layer):\n \"\"\"Tacotron-2 Convolutional Batchnorm module.\"\"\"\n\n def __init__(\n self, filters, kernel_size, dropout_rate, activation=None, name_idx=None\n ):\n super().__init__()\n self.conv1d = tf.keras.layers.Conv1D(\n filters,\n kernel_size,\n kernel_initializer=get_initializer(0.02),\n padding=\"same\",\n name=\"conv_._{}\".format(name_idx),\n )\n self.norm = tf.keras.layers.BatchNormalization(\n axis=-1, name=\"batch_norm_._{}\".format(name_idx)\n )\n self.dropout = tf.keras.layers.Dropout(\n rate=dropout_rate, name=\"dropout_._{}\".format(name_idx)\n )\n self.act = ACT2FN[activation]\n\n def call(self, inputs, training=False):\n outputs = self.conv1d(inputs)\n outputs = self.norm(outputs, training=training)\n outputs = self.act(outputs)\n outputs = self.dropout(outputs, training=training)\n return outputs\n\n\nclass TFTacotronEmbeddings(tf.keras.layers.Layer):\n \"\"\"Construct character/phoneme/positional/speaker embeddings.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.vocab_size = config.vocab_size\n self.embedding_hidden_size = config.embedding_hidden_size\n self.initializer_range = config.initializer_range\n self.config = config\n\n if config.n_speakers > 1:\n self.speaker_embeddings = tf.keras.layers.Embedding(\n config.n_speakers,\n config.embedding_hidden_size,\n embeddings_initializer=get_initializer(self.initializer_range),\n name=\"speaker_embeddings\",\n )\n self.speaker_fc = tf.keras.layers.Dense(\n units=config.embedding_hidden_size, name=\"speaker_fc\"\n )\n\n self.LayerNorm = tf.keras.layers.LayerNormalization(\n epsilon=config.layer_norm_eps, name=\"LayerNorm\"\n )\n self.dropout = tf.keras.layers.Dropout(config.embedding_dropout_prob)\n\n def build(self, input_shape):\n \"\"\"Build shared character/phoneme embedding layers.\"\"\"\n with tf.name_scope(\"character_embeddings\"):\n self.character_embeddings = self.add_weight(\n \"weight\",\n shape=[self.vocab_size, self.embedding_hidden_size],\n initializer=get_initializer(self.initializer_range),\n )\n super().build(input_shape)\n\n def call(self, inputs, training=False):\n \"\"\"Get character embeddings of inputs.\n Args:\n 1. character, Tensor (int32) shape [batch_size, length].\n 2. speaker_id, Tensor (int32) shape [batch_size]\n Returns:\n Tensor (float32) shape [batch_size, length, embedding_size].\n \"\"\"\n return self._embedding(inputs, training=training)\n\n def _embedding(self, inputs, training=False):\n \"\"\"Applies embedding based on inputs tensor.\"\"\"\n input_ids, speaker_ids = inputs\n\n # create embeddings\n inputs_embeds = tf.gather(self.character_embeddings, input_ids)\n embeddings = inputs_embeds\n\n if self.config.n_speakers > 1:\n speaker_embeddings = self.speaker_embeddings(speaker_ids)\n speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))\n # extended speaker embeddings\n extended_speaker_features = speaker_features[:, tf.newaxis, :]\n # sum all embedding\n embeddings += extended_speaker_features\n\n # apply layer-norm and dropout for embeddings.\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings, training=training)\n\n return embeddings\n\n\nclass TFTacotronEncoderConvs(tf.keras.layers.Layer):\n \"\"\"Tacotron-2 Encoder Convolutional Batchnorm module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.conv_batch_norm = []\n for i in range(config.n_conv_encoder):\n conv = TFTacotronConvBatchNorm(\n filters=config.encoder_conv_filters,\n kernel_size=config.encoder_conv_kernel_sizes,\n activation=config.encoder_conv_activation,\n dropout_rate=config.encoder_conv_dropout_rate,\n name_idx=i,\n )\n self.conv_batch_norm.append(conv)\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n outputs = inputs\n for conv in self.conv_batch_norm:\n outputs = conv(outputs, training=training)\n return outputs\n\n\nclass TFTacotronEncoder(tf.keras.layers.Layer):\n \"\"\"Tacotron-2 Encoder.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.embeddings = TFTacotronEmbeddings(config, name=\"embeddings\")\n self.convbn = TFTacotronEncoderConvs(config, name=\"conv_batch_norm\")\n self.bilstm = tf.keras.layers.Bidirectional(\n tf.keras.layers.LSTM(\n units=config.encoder_lstm_units, return_sequences=True\n ),\n name=\"bilstm\",\n )\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n input_ids, speaker_ids, input_mask = inputs\n\n # create embedding and mask them since we sum\n # speaker embedding to all character embedding.\n input_embeddings = self.embeddings([input_ids, speaker_ids], training=training)\n\n # pass embeddings to convolution batch norm\n conv_outputs = self.convbn(input_embeddings, training=training)\n\n # bi-lstm.\n outputs = self.bilstm(conv_outputs, mask=input_mask)\n\n return outputs\n\n\nclass TrainingSampler(Sampler):\n \"\"\"Training sampler for Seq2Seq training.\"\"\"\n\n def __init__(\n self, config,\n ):\n super().__init__()\n self.config = config\n # create schedule factor.\n # the input of a next decoder cell is calculated by formular:\n # next_inputs = ratio * prev_groundtruth_outputs + (1.0 - ratio) * prev_predicted_outputs.\n self._ratio = tf.constant(1.0, dtype=tf.float32)\n self._reduction_factor = self.config.reduction_factor\n\n def setup_target(self, targets, mel_lengths):\n \"\"\"Setup ground-truth mel outputs for decoder.\"\"\"\n self.mel_lengths = mel_lengths\n self.set_batch_size(tf.shape(targets)[0])\n self.targets = targets[\n :, self._reduction_factor - 1 :: self._reduction_factor, :\n ]\n self.max_lengths = tf.tile([tf.shape(self.targets)[1]], [self._batch_size])\n\n @property\n def batch_size(self):\n return self._batch_size\n\n @property\n def sample_ids_shape(self):\n return tf.TensorShape([])\n\n @property\n def sample_ids_dtype(self):\n return tf.int32\n\n @property\n def reduction_factor(self):\n return self._reduction_factor\n\n def initialize(self):\n \"\"\"Return (Finished, next_inputs).\"\"\"\n return (\n tf.tile([False], [self._batch_size]),\n tf.tile([[0.0]], [self._batch_size, self.config.n_mels]),\n )\n\n def sample(self, time, outputs, state):\n return tf.tile([0], [self._batch_size])\n\n def next_inputs(self, time, outputs, state, sample_ids, **kwargs):\n finished = time + 1 >= self.max_lengths\n next_inputs = (\n self._ratio * self.targets[:, time, :]\n + (1.0 - self._ratio) * outputs[:, -self.config.n_mels :]\n )\n next_state = state\n return (finished, next_inputs, next_state)\n\n def set_batch_size(self, batch_size):\n self._batch_size = batch_size\n\n\nclass TestingSampler(TrainingSampler):\n \"\"\"Testing sampler for Seq2Seq training.\"\"\"\n\n def __init__(\n self, config,\n ):\n super().__init__(config)\n\n def next_inputs(self, time, outputs, state, sample_ids, **kwargs):\n stop_token_prediction = kwargs.get(\"stop_token_prediction\")\n stop_token_prediction = tf.nn.sigmoid(stop_token_prediction)\n finished = tf.cast(tf.round(stop_token_prediction), tf.bool)\n finished = tf.reduce_all(finished)\n next_inputs = outputs[:, -self.config.n_mels :]\n next_state = state\n return (finished, next_inputs, next_state)\n\n\nclass TFTacotronLocationSensitiveAttention(BahdanauAttention):\n \"\"\"Tacotron-2 Location Sensitive Attention module.\"\"\"\n\n def __init__(\n self,\n config,\n memory,\n mask_encoder=True,\n memory_sequence_length=None,\n is_cumulate=True,\n ):\n \"\"\"Init variables.\"\"\"\n memory_length = memory_sequence_length if (mask_encoder is True) else None\n super().__init__(\n units=config.attention_dim,\n memory=memory,\n memory_sequence_length=memory_length,\n probability_fn=\"softmax\",\n name=\"LocationSensitiveAttention\",\n )\n self.location_convolution = tf.keras.layers.Conv1D(\n filters=config.attention_filters,\n kernel_size=config.attention_kernel,\n padding=\"same\",\n use_bias=False,\n name=\"location_conv\",\n )\n self.location_layer = tf.keras.layers.Dense(\n units=config.attention_dim, use_bias=False, name=\"location_layer\"\n )\n\n self.v = tf.keras.layers.Dense(1, use_bias=True, name=\"scores_attention\")\n self.config = config\n self.is_cumulate = is_cumulate\n self.use_window = False\n\n def setup_window(self, win_front=2, win_back=4):\n self.win_front = tf.constant(win_front, tf.int32)\n self.win_back = tf.constant(win_back, tf.int32)\n\n self._indices = tf.expand_dims(tf.range(tf.shape(self.keys)[1]), 0)\n self._indices = tf.tile(\n self._indices, [tf.shape(self.keys)[0], 1]\n ) # [batch_size, max_time]\n\n self.use_window = True\n\n def _compute_window_mask(self, max_alignments):\n \"\"\"Compute window mask for inference.\n Args:\n max_alignments (int): [batch_size]\n \"\"\"\n expanded_max_alignments = tf.expand_dims(max_alignments, 1) # [batch_size, 1]\n low = expanded_max_alignments - self.win_front\n high = expanded_max_alignments + self.win_back\n mlow = tf.cast((self._indices < low), tf.float32)\n mhigh = tf.cast((self._indices > high), tf.float32)\n mask = mlow + mhigh\n return mask # [batch_size, max_length]\n\n def __call__(self, inputs, training=False):\n query, state, prev_max_alignments = inputs\n\n processed_query = self.query_layer(query) if self.query_layer else query\n processed_query = tf.expand_dims(processed_query, 1)\n\n expanded_alignments = tf.expand_dims(state, axis=2)\n f = self.location_convolution(expanded_alignments)\n processed_location_features = self.location_layer(f)\n\n energy = self._location_sensitive_score(\n processed_query, processed_location_features, self.keys\n )\n\n # mask energy on inference steps.\n if self.use_window is True:\n window_mask = self._compute_window_mask(prev_max_alignments)\n energy = energy + window_mask * -1e20\n\n alignments = self.probability_fn(energy, state)\n\n if self.is_cumulate:\n state = alignments + state\n else:\n state = alignments\n\n expanded_alignments = tf.expand_dims(alignments, 2)\n context = tf.reduce_sum(expanded_alignments * self.values, 1)\n\n return context, alignments, state\n\n def _location_sensitive_score(self, W_query, W_fil, W_keys):\n \"\"\"Calculate location sensitive energy.\"\"\"\n return tf.squeeze(self.v(tf.nn.tanh(W_keys + W_query + W_fil)), -1)\n\n def get_initial_state(self, batch_size, size):\n \"\"\"Get initial alignments.\"\"\"\n return tf.zeros(shape=[batch_size, size], dtype=tf.float32)\n\n def get_initial_context(self, batch_size):\n \"\"\"Get initial attention.\"\"\"\n return tf.zeros(\n shape=[batch_size, self.config.encoder_lstm_units * 2], dtype=tf.float32\n )\n\n\nclass TFTacotronPrenet(tf.keras.layers.Layer):\n \"\"\"Tacotron-2 prenet.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.prenet_dense = [\n tf.keras.layers.Dense(\n units=config.prenet_units,\n activation=ACT2FN[config.prenet_activation],\n name=\"dense_._{}\".format(i),\n )\n for i in range(config.n_prenet_layers)\n ]\n self.dropout = tf.keras.layers.Dropout(\n rate=config.prenet_dropout_rate, name=\"dropout\"\n )\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n outputs = inputs\n for layer in self.prenet_dense:\n outputs = layer(outputs)\n outputs = self.dropout(outputs, training=True)\n return outputs\n\n\nclass TFTacotronPostnet(tf.keras.layers.Layer):\n \"\"\"Tacotron-2 postnet.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.conv_batch_norm = []\n for i in range(config.n_conv_postnet):\n conv = TFTacotronConvBatchNorm(\n filters=config.postnet_conv_filters,\n kernel_size=config.postnet_conv_kernel_sizes,\n dropout_rate=config.postnet_dropout_rate,\n activation=\"identity\" if i + 1 == config.n_conv_postnet else \"tanh\",\n name_idx=i,\n )\n self.conv_batch_norm.append(conv)\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n outputs = inputs\n for _, conv in enumerate(self.conv_batch_norm):\n outputs = conv(outputs, training=training)\n return outputs\n\n\nTFTacotronDecoderCellState = collections.namedtuple(\n \"TFTacotronDecoderCellState\",\n [\n \"attention_lstm_state\",\n \"decoder_lstms_state\",\n \"context\",\n \"time\",\n \"state\",\n \"alignment_history\",\n \"max_alignments\",\n ],\n)\n\nTFDecoderOutput = collections.namedtuple(\n \"TFDecoderOutput\", (\"mel_output\", \"token_output\", \"sample_id\")\n)\n\n\nclass TFTacotronDecoderCell(tf.keras.layers.AbstractRNNCell):\n \"\"\"Tacotron-2 custom decoder cell.\"\"\"\n\n def __init__(self,\n config,\n training,\n enable_tflite_convertible = False,\n **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.training = training\n self.enable_tflite_convertible = enable_tflite_convertible\n self.prenet = TFTacotronPrenet(config, name=\"prenet\")\n\n # define lstm cell on decoder.\n # TODO(@dathudeptrai) switch to zone-out lstm.\n self.attention_lstm = tf.keras.layers.LSTMCell(\n units=config.decoder_lstm_units, name=\"attention_lstm_cell\"\n )\n lstm_cells = []\n for i in range(config.n_lstm_decoder):\n lstm_cell = tf.keras.layers.LSTMCell(\n units=config.decoder_lstm_units, name=\"lstm_cell_._{}\".format(i)\n )\n lstm_cells.append(lstm_cell)\n self.decoder_lstms = tf.keras.layers.StackedRNNCells(\n lstm_cells, name=\"decoder_lstms\"\n )\n\n # define attention layer.\n if config.attention_type == \"lsa\":\n # create location-sensitive attention.\n self.attention_layer = TFTacotronLocationSensitiveAttention(\n config,\n memory=None,\n mask_encoder=True,\n memory_sequence_length=None,\n is_cumulate=True,\n )\n else:\n raise ValueError(\"Only lsa (location-sensitive attention) is supported\")\n\n # frame, stop projection layer.\n self.frame_projection = tf.keras.layers.Dense(\n units=config.n_mels * config.reduction_factor, name=\"frame_projection\"\n )\n self.stop_projection = tf.keras.layers.Dense(\n units=config.reduction_factor, name=\"stop_projection\"\n )\n\n self.config = config\n\n def set_alignment_size(self, alignment_size):\n self.alignment_size = alignment_size\n\n @property\n def output_size(self):\n \"\"\"Return output (mel) size.\"\"\"\n return self.frame_projection.units\n\n @property\n def state_size(self):\n \"\"\"Return hidden state size.\"\"\"\n return TFTacotronDecoderCellState(\n attention_lstm_state=self.attention_lstm.state_size,\n decoder_lstms_state=self.decoder_lstms.state_size,\n time=tf.TensorShape([]),\n attention=self.config.attention_dim,\n state=self.alignment_size,\n alignment_history=(),\n max_alignments=tf.TensorShape([1]),\n )\n\n def get_initial_state(self, batch_size):\n \"\"\"Get initial states.\"\"\"\n initial_attention_lstm_cell_states = self.attention_lstm.get_initial_state(\n None, batch_size, dtype=tf.float32\n )\n initial_decoder_lstms_cell_states = self.decoder_lstms.get_initial_state(\n None, batch_size, dtype=tf.float32\n )\n initial_context = tf.zeros(\n shape=[batch_size, self.config.encoder_lstm_units * 2], dtype=tf.float32\n )\n initial_state = self.attention_layer.get_initial_state(\n batch_size, size=self.alignment_size\n )\n if self.enable_tflite_convertible:\n initial_alignment_history = ()\n else:\n initial_alignment_history = tf.TensorArray(\n dtype=tf.float32, size=0, dynamic_size=True\n )\n return TFTacotronDecoderCellState(\n attention_lstm_state=initial_attention_lstm_cell_states,\n decoder_lstms_state=initial_decoder_lstms_cell_states,\n time=tf.zeros([], dtype=tf.int32),\n context=initial_context,\n state=initial_state,\n alignment_history=initial_alignment_history,\n max_alignments=tf.zeros([batch_size], dtype=tf.int32),\n )\n\n def call(self, inputs, states):\n \"\"\"Call logic.\"\"\"\n decoder_input = inputs\n\n # 1. apply prenet for decoder_input.\n prenet_out = self.prenet(\n decoder_input, training=self.training\n ) # [batch_size, dim]\n\n # 2. concat prenet_out and prev context vector\n # then use it as input of attention lstm layer.\n attention_lstm_input = tf.concat([prenet_out, states.context], axis=-1)\n attention_lstm_output, next_attention_lstm_state = self.attention_lstm(\n attention_lstm_input, states.attention_lstm_state\n )\n\n # 3. compute context, alignment and cumulative alignment.\n prev_state = states.state\n if not self.enable_tflite_convertible:\n prev_alignment_history = states.alignment_history\n prev_max_alignments = states.max_alignments\n context, alignments, state = self.attention_layer(\n [attention_lstm_output, prev_state, prev_max_alignments],\n training=self.training,\n )\n\n # 4. run decoder lstm(s)\n decoder_lstms_input = tf.concat([attention_lstm_output, context], axis=-1)\n decoder_lstms_output, next_decoder_lstms_state = self.decoder_lstms(\n decoder_lstms_input, states.decoder_lstms_state\n )\n\n # 5. compute frame feature and stop token.\n projection_inputs = tf.concat([decoder_lstms_output, context], axis=-1)\n decoder_outputs = self.frame_projection(projection_inputs)\n\n stop_inputs = tf.concat([decoder_lstms_output, decoder_outputs], axis=-1)\n stop_tokens = self.stop_projection(stop_inputs)\n\n # 6. save alignment history to visualize.\n if self.enable_tflite_convertible:\n alignment_history = ()\n else:\n alignment_history = prev_alignment_history.write(states.time,\n alignments)\n\n # 7. return new states.\n new_states = TFTacotronDecoderCellState(\n attention_lstm_state=next_attention_lstm_state,\n decoder_lstms_state=next_decoder_lstms_state,\n time=states.time + 1,\n context=context,\n state=state,\n alignment_history=alignment_history,\n max_alignments=tf.argmax(alignments, -1, output_type=tf.int32),\n )\n\n return (decoder_outputs, stop_tokens), new_states\n\n\nclass TFTacotronDecoder(Decoder):\n \"\"\"Tacotron-2 Decoder.\"\"\"\n\n def __init__(self,\n decoder_cell,\n decoder_sampler,\n output_layer=None,\n enable_tflite_convertible=False):\n \"\"\"Initial variables.\"\"\"\n self.cell = decoder_cell\n self.sampler = decoder_sampler\n self.output_layer = output_layer\n self.enable_tflite_convertible = enable_tflite_convertible\n\n def setup_decoder_init_state(self, decoder_init_state):\n self.initial_state = decoder_init_state\n\n def initialize(self, **kwargs):\n return self.sampler.initialize() + (self.initial_state,)\n\n @property\n def output_size(self):\n return TFDecoderOutput(\n mel_output=tf.nest.map_structure(\n lambda shape: tf.TensorShape(shape), self.cell.output_size\n ),\n token_output=tf.TensorShape(self.sampler.reduction_factor),\n sample_id=tf.TensorShape([1]) \\\n if self.enable_tflite_convertible \\\n else self.sampler.sample_ids_shape # tf.TensorShape([])\n )\n\n @property\n def output_dtype(self):\n return TFDecoderOutput(tf.float32, tf.float32, self.sampler.sample_ids_dtype)\n\n @property\n def batch_size(self):\n return self.sampler._batch_size\n\n def step(self, time, inputs, state, training=False):\n (mel_outputs, stop_tokens), cell_state = self.cell(\n inputs, state, training=training\n )\n if self.output_layer is not None:\n mel_outputs = self.output_layer(mel_outputs)\n sample_ids = self.sampler.sample(\n time=time, outputs=mel_outputs, state=cell_state\n )\n (finished, next_inputs, next_state) = self.sampler.next_inputs(\n time=time,\n outputs=mel_outputs,\n state=cell_state,\n sample_ids=sample_ids,\n stop_token_prediction=stop_tokens,\n )\n\n outputs = TFDecoderOutput(mel_outputs, stop_tokens, sample_ids)\n return (outputs, next_state, next_inputs, finished)\n\n\nclass TFTacotron2(tf.keras.Model):\n \"\"\"Tensorflow tacotron-2 model.\"\"\"\n\n def __init__(self, config, training, enable_tflite_convertible = False, **kwargs):\n \"\"\"Initalize tacotron-2 layers.\"\"\"\n super().__init__(self, **kwargs)\n self.encoder = TFTacotronEncoder(config, name=\"encoder\")\n self.decoder_cell = TFTacotronDecoderCell(\n config, training=training, name=\"decoder_cell\",\n enable_tflite_convertible = enable_tflite_convertible\n )\n self.decoder = TFTacotronDecoder(\n self.decoder_cell,\n TrainingSampler(config) if training is True else TestingSampler(config),\n enable_tflite_convertible = enable_tflite_convertible\n )\n self.postnet = TFTacotronPostnet(config, name=\"post_net\")\n self.post_projection = tf.keras.layers.Dense(\n units=config.n_mels, name=\"residual_projection\"\n )\n\n self.config = config\n self.use_window_mask = False\n self.maximum_iterations = 4000\n self.enable_tflite_convertible = enable_tflite_convertible\n\n def setup_window(self, win_front, win_back):\n \"\"\"Call only for inference.\"\"\"\n self.use_window_mask = True\n self.win_front = win_front\n self.win_back = win_back\n\n def setup_maximum_iterations(self, maximum_iterations):\n \"\"\"Call only for inference.\"\"\"\n self.maximum_iterations = maximum_iterations\n\n def _build(self):\n input_ids = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9]])\n input_lengths = np.array([9])\n speaker_ids = np.array([0])\n mel_outputs = np.random.normal(size=(1, 50, 80)).astype(np.float32)\n mel_lengths = np.array([50])\n self(\n input_ids,\n input_lengths,\n speaker_ids,\n mel_outputs,\n mel_lengths,\n 10,\n training=True,\n )\n\n @tf.function(experimental_relax_shapes=True)\n def call(\n self,\n input_ids,\n input_lengths,\n speaker_ids,\n mel_outputs,\n mel_lengths,\n maximum_iterations=2000,\n use_window_mask=False,\n win_front=2,\n win_back=3,\n training=False,\n ):\n \"\"\"Call logic.\"\"\"\n # create input-mask based on input_lengths\n input_mask = tf.sequence_mask(\n input_lengths,\n maxlen=tf.reduce_max(input_lengths),\n name=\"input_sequence_masks\",\n )\n\n # Encoder Step.\n encoder_hidden_states = self.encoder(\n [input_ids, speaker_ids, input_mask], training=training\n )\n\n batch_size = tf.shape(encoder_hidden_states)[0]\n alignment_size = tf.shape(encoder_hidden_states)[1]\n\n # Setup some initial placeholders for decoder step. Include:\n # 1. mel_outputs, mel_lengths for teacher forcing mode.\n # 2. alignment_size for attention size.\n # 3. initial state for decoder cell.\n # 4. memory (encoder hidden state) for attention mechanism.\n self.decoder.sampler.setup_target(targets=mel_outputs, mel_lengths=mel_lengths)\n self.decoder.cell.set_alignment_size(alignment_size)\n self.decoder.setup_decoder_init_state(\n self.decoder.cell.get_initial_state(batch_size)\n )\n self.decoder.cell.attention_layer.setup_memory(\n memory=encoder_hidden_states,\n memory_sequence_length=input_lengths, # use for mask attention.\n )\n if use_window_mask:\n self.decoder.cell.attention_layer.setup_window(\n win_front=win_front, win_back=win_back\n )\n\n # run decode step.\n (\n (frames_prediction, stop_token_prediction, _),\n final_decoder_state,\n _,\n ) = dynamic_decode(self.decoder,\n maximum_iterations=maximum_iterations,\n enable_tflite_convertible=self.enable_tflite_convertible)\n\n decoder_output = tf.reshape(\n frames_prediction, [batch_size, -1, self.config.n_mels]\n )\n stop_token_prediction = tf.reshape(stop_token_prediction, [batch_size, -1])\n\n residual = self.postnet(decoder_output, training=training)\n residual_projection = self.post_projection(residual)\n\n mel_outputs = decoder_output + residual_projection\n\n if self.enable_tflite_convertible:\n mask = tf.math.not_equal(\n tf.cast(tf.reduce_sum(tf.abs(decoder_output), axis=-1),\n dtype=tf.int32),\n 0)\n decoder_output = tf.expand_dims(\n tf.boolean_mask(decoder_output, mask), axis=0)\n mel_outputs = tf.expand_dims(\n tf.boolean_mask(mel_outputs, mask), axis=0)\n alignment_history = ()\n else:\n alignment_history = tf.transpose(\n final_decoder_state.alignment_history.stack(), [1, 2, 0]\n )\n\n return decoder_output, mel_outputs, stop_token_prediction, alignment_history\n\n @tf.function(\n experimental_relax_shapes=True,\n input_signature=[\n tf.TensorSpec([None, None], dtype=tf.int32),\n tf.TensorSpec([None,], dtype=tf.int32),\n tf.TensorSpec([None,], dtype=tf.int32),\n ],\n )\n def inference(self, input_ids, input_lengths, speaker_ids):\n \"\"\"Call logic.\"\"\"\n # create input-mask based on input_lengths\n input_mask = tf.sequence_mask(\n input_lengths,\n maxlen=tf.reduce_max(input_lengths),\n name=\"input_sequence_masks\",\n )\n\n # Encoder Step.\n encoder_hidden_states = self.encoder(\n [input_ids, speaker_ids, input_mask], training=False\n )\n\n batch_size = tf.shape(encoder_hidden_states)[0]\n alignment_size = tf.shape(encoder_hidden_states)[1]\n\n # Setup some initial placeholders for decoder step. Include:\n # 1. batch_size for inference.\n # 2. alignment_size for attention size.\n # 3. initial state for decoder cell.\n # 4. memory (encoder hidden state) for attention mechanism.\n # 5. window front/back to solve long sentence synthesize problems. (call after setup memory.)\n self.decoder.sampler.set_batch_size(batch_size)\n self.decoder.cell.set_alignment_size(alignment_size)\n self.decoder.setup_decoder_init_state(\n self.decoder.cell.get_initial_state(batch_size)\n )\n self.decoder.cell.attention_layer.setup_memory(\n memory=encoder_hidden_states,\n memory_sequence_length=input_lengths, # use for mask attention.\n )\n if self.use_window_mask:\n self.decoder.cell.attention_layer.setup_window(\n win_front=self.win_front, win_back=self.win_back\n )\n\n # run decode step.\n (\n (frames_prediction, stop_token_prediction, _),\n final_decoder_state,\n _,\n ) = dynamic_decode(self.decoder, maximum_iterations=self.maximum_iterations)\n\n decoder_output = tf.reshape(\n frames_prediction, [batch_size, -1, self.config.n_mels]\n )\n stop_token_prediction = tf.reshape(stop_token_prediction, [batch_size, -1])\n\n residual = self.postnet(decoder_output, training=False)\n residual_projection = self.post_projection(residual)\n\n mel_outputs = decoder_output + residual_projection\n\n return decoder_output, mel_outputs, stop_token_prediction, alignment_history\n\n @tf.function(\n experimental_relax_shapes=True,\n input_signature=[\n tf.TensorSpec([1, None], dtype=tf.int32),\n tf.TensorSpec([1,], dtype=tf.int32),\n tf.TensorSpec([1,], dtype=tf.int32),\n ],\n )\n def inference_tflite(self, input_ids, input_lengths, speaker_ids):\n \"\"\"Call logic.\"\"\"\n # create input-mask based on input_lengths\n input_mask = tf.sequence_mask(\n input_lengths,\n maxlen=tf.reduce_max(input_lengths),\n name=\"input_sequence_masks\",\n )\n\n # Encoder Step.\n encoder_hidden_states = self.encoder(\n [input_ids, speaker_ids, input_mask], training=False\n )\n\n batch_size = tf.shape(encoder_hidden_states)[0]\n alignment_size = tf.shape(encoder_hidden_states)[1]\n\n # Setup some initial placeholders for decoder step. Include:\n # 1. batch_size for inference.\n # 2. alignment_size for attention size.\n # 3. initial state for decoder cell.\n # 4. memory (encoder hidden state) for attention mechanism.\n # 5. window front/back to solve long sentence synthesize problems. (call after setup memory.)\n self.decoder.sampler.set_batch_size(batch_size)\n self.decoder.cell.set_alignment_size(alignment_size)\n self.decoder.setup_decoder_init_state(\n self.decoder.cell.get_initial_state(batch_size)\n )\n self.decoder.cell.attention_layer.setup_memory(\n memory=encoder_hidden_states,\n memory_sequence_length=input_lengths, # use for mask attention.\n )\n if self.use_window_mask:\n self.decoder.cell.attention_layer.setup_window(\n win_front=self.win_front, win_back=self.win_back\n )\n\n # run decode step.\n (\n (frames_prediction, stop_token_prediction, _),\n final_decoder_state,\n _,\n ) = dynamic_decode(self.decoder,\n maximum_iterations=self.maximum_iterations,\n enable_tflite_convertible=self.enable_tflite_convertible)\n\n decoder_output = tf.reshape(\n frames_prediction, [batch_size, -1, self.config.n_mels]\n )\n stop_token_prediction = tf.reshape(stop_token_prediction, [batch_size, -1])\n\n residual = self.postnet(decoder_output, training=False)\n residual_projection = self.post_projection(residual)\n\n mel_outputs = decoder_output + residual_projection\n\n if self.enable_tflite_convertible:\n mask = tf.math.not_equal(\n tf.cast(tf.reduce_sum(tf.abs(decoder_output), axis=-1),\n dtype=tf.int32),\n 0)\n decoder_output = tf.expand_dims(\n tf.boolean_mask(decoder_output, mask), axis=0)\n mel_outputs = tf.expand_dims(\n tf.boolean_mask(mel_outputs, mask), axis=0)\n alignment_history = ()\n else:\n alignment_history = tf.transpose(\n final_decoder_state.alignment_history.stack(), [1, 2, 0]\n )\n\n return decoder_output, mel_outputs, stop_token_prediction, alignment_history\n"
] | [
[
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.concat",
"numpy.sqrt",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.keras.layers.StackedRNNCells",
"tensorflow.boolean_mask",
"tensorflow.gather",
"tensorflow.math.softplus",
"tensorflow.name_scope",
"tensorflow.keras.initializers.TruncatedNormal",
"tensorflow.keras.layers.LSTMCell",
"tensorflow.argmax",
"tensorflow.tile",
"tensorflow.TensorShape",
"tensorflow.nn.sigmoid",
"tensorflow.shape",
"tensorflow.TensorArray",
"tensorflow.keras.layers.Dense",
"tensorflow.pow",
"tensorflow.nn.tanh",
"tensorflow.function",
"numpy.array",
"tensorflow.round",
"tensorflow.reduce_max",
"tensorflow.math.sqrt",
"tensorflow.keras.layers.Activation",
"tensorflow.constant",
"tensorflow.keras.layers.Conv1D",
"tensorflow.reshape",
"tensorflow.sigmoid",
"tensorflow.expand_dims",
"numpy.random.normal",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Dropout",
"tensorflow.reduce_all",
"tensorflow.TensorSpec",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
ConnorPeng/PathPlanning | [
"28afd52864cb53b53fe2bbd876c2f22b6a4363d7"
] | [
"Sampling_based_Planning/rrt_2D/env.py"
] | [
"\"\"\"\nEnvironment for rrt_2D\n@author: huiming zhou\n\"\"\"\nimport numpy as np\nimport binvox_rw\nfrom matplotlib import pyplot as plt\n\n\n\nclass Env:\n def __init__(self):\n self.x_range = (0, 1000)\n self.y_range = (0, 1000)\n self.obs_boundary = self.obs_boundary()\n self.obs_circle = self.obs_circle()\n self.obs_rectangle = self.obs_rectangle()\n\n @staticmethod\n def obs_boundary():\n obs_boundary = [\n # [0, 0, 1, 30],\n # [0, 30, 50, 1],\n # [1, 0, 50, 1],\n # [50, 1, 1, 30]\n ]\n return obs_boundary\n\n @staticmethod\n def obs_rectangle():\n obs_rectangle = [\n # [14, 12, 8, 2],\n # [18, 22, 8, 3],\n # [26, 7, 2, 12],\n # [32, 14, 10, 2]\n ]\n return obs_rectangle\n\n @staticmethod\n def obs_circle():\n \n \n obs_cir = [\n # [7, 12, 3],\n # [46, 20, 2],\n # [15, 5, 2],\n # [37, 7, 3],\n # [37, 23, 3]\n \n ]\n img_array = np.load('map.npy')\n #print(img_array)\n for i in range (500):\n for j in range (500):\n if img_array[i][j] == 1:\n obs_cir.append([i,j,1])\n # for i in range(300):\n # if (z[i] > 100):\n # xPos = x[i]\n # yPos = y[i]\n # obs_cir.append([xPos, yPos, 1])\n \n\n return obs_cir\n"
] | [
[
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Homeronius/ASL | [
"7cfd489c1603b94bce1e7f40c081896f7a69a77d"
] | [
"helper_scripts/plot_performance_alt.py"
] | [
"from os import path\nimport argparse\nfrom typing import Type\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom math import log2\n\nAMD = r\" $\\bf{AMD\\ Ryzen\\ 7\\ 4800H\\ (Zen\\ 2),\\ 2.9GHz}$\"\nINTEL = r\" $\\bf{Intel\\ i5-7300U\\ (Kaby\\ Lake),\\ 2.6GHz}$\"\n\n\ndef format_func(value, tick_number):\n # find exponent for tick\n return r\"$2^{{ {:} }}$\".format(int(log2(value)))\n\n\ndef read_dataset(path, system=\"intel\"):\n data = np.loadtxt(open(path, \"rb\"), delimiter=\",\", skiprows=1)\n if system == \"intel\":\n return data[:, 0], data[:, 1], data[:, 6], data[:, 2], data[:, 3], data[:, 10]\n else:\n return data[:, 0], data[:, 1], data[:, 6], data[:, 2], data[:, 3], data[:, 11]\n\n\ndef main(args):\n datadir = path.relpath(args.data_path)\n if not path.exists(datadir):\n raise Exception(\"directory {} does not exist\".format(datadir))\n\n sns.set_theme()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n system_name = INTEL if args.system == \"intel\" else AMD\n title = (\n r\"$\\bf{Performance\\ on}$\" if args.metric == \"fp/c\" else r\"$\\bf{Runtime\\ on}$\"\n )\n\n title += system_name\n if args.metric == \"fp/c\":\n title += \"\\n [flops/cycle]\"\n elif args.metric == \"time\":\n title += \"\\n [seconds]\"\n elif args.metric == \"cycles\":\n title += \"\\n [\" + r\"$\\times 10^9$\" + \" cycles]\"\n ax.set_title(title, loc=\"left\")\n\n mpts_mode = False\n\n freq = 2.6e9 if args.system == \"intel\" else 2.9e9\n for file in args.files[0]:\n fpath = path.join(datadir, file)\n N, D, flops, cycles, time, mpts = read_dataset(fpath, args.system)\n\n # hack hack hack\n mpts_mode = all(elem == N[0] for elem in N) and all(elem == D[0] for elem in D)\n dim_mode = not mpts_mode and all(elem == N[0] for elem in N)\n # flops *= 1e-9\n if args.metric == \"fp/c\":\n y = np.divide(flops, cycles)\n elif args.metric == \"time\":\n y = cycles / freq\n elif args.metric == \"cycles\":\n y = cycles / 1e9\n else:\n raise TypeError(\"unsupported metric to plot\")\n\n if not mpts_mode and not dim_mode:\n N = N[2:]\n y = y[2:]\n\n X_axis = []\n if mpts_mode:\n X_axis = mpts\n elif dim_mode:\n X_axis = D\n else:\n X_axis = N\n\n if args.x_scale == \"linear\":\n (line,) = ax.plot(X_axis, y, linestyle=\"-\", marker=\"o\")\n ax.xaxis.set_major_locator(plt.MultipleLocator(2048))\n # ax.xaxis.set_major_locator(plt.LogLocator(base=2, subs=[16]))\n # ax.xaxis.set_major_formatter(plt.FuncFormatter(format_func))\n # ax.set_xlim(right=17000)\n elif args.x_scale == \"log\":\n (line,) = ax.semilogx(X_axis, y, linestyle=\"-\", marker=\"o\", base=2)\n else:\n raise TypeError(\"unsupported scale for x-axis\")\n\n line.set_label(path.splitext(file)[0])\n\n ax.set_xlabel(\"k\" if mpts_mode else (\"d\" if dim_mode else \"n\"))\n # ax.set_ylabel('flops/cycle')\n ax.legend(loc=\"center left\", bbox_to_anchor=(1, 0.5))\n\n if args.save_path is None:\n plt.show()\n else:\n fig.savefig(args.save_path, bbox_inches=\"tight\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--system\", type=str, default=\"intel\", choices=[\"intel\", \"amd\"])\n parser.add_argument(\n \"--data-path\",\n type=str,\n default=\"data/timings/\",\n help=\"relative path to where timings are stored\",\n )\n parser.add_argument(\n \"--files\",\n type=str,\n nargs=\"+\",\n required=True,\n action=\"append\",\n help=\"name of one or more .csv performance measurements\",\n )\n parser.add_argument(\n \"--save-path\",\n type=str,\n default=None,\n help=\"path or filename to store plot to\",\n )\n parser.add_argument(\n \"--metric\",\n type=str,\n default=\"fp/c\",\n help=\"which metric to plot on y axis\",\n choices=[\"fp/c\", \"cycles\", \"time\"],\n )\n parser.add_argument(\n \"--x-scale\",\n type=str,\n default=\"log\",\n help=\"scaling of x-axis\",\n choices=[\"log\", \"linear\"],\n )\n\n args = parser.parse_args()\n main(args)\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.MultipleLocator",
"numpy.divide",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
siddsach/OpenNRE-PyTorch | [
"7b38a12b7920e5313cd7c3500cab70c53e4e290f"
] | [
"networks/classifier.py"
] | [
"import torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nclass Classifier(nn.Module):\n def __init__(self, params):\n super(Classifier, self).__init__()\n self.params = params\n self.loss = nn.CrossEntropyLoss()\n def forward(self, logits, label):\n loss = self.loss(logits, label)\n _, output = torch.max(logits, dim = 1)\n return loss, output.data\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.max"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mraggi/DifferentialEvolutionPytorch | [
"f316da5de439a4b2cb6bef50b2daf3e9af803347"
] | [
"differential_evolution.py"
] | [
"import torch\nfrom progress_bar import progress_bar\nfrom helpers import *\n\ndef individual2population(f):\n return lambda P : torch.stack([f(p) for p in P])\n\nclass DifferentialEvolver:\n def __init__(self, f, \n initial_pop = None, # In next version, I'll remove the option of giving pop_size, dims and num_populations instead of initial_population\n pop_size=50, dim = (1,), # ignored if initial_pop is given\n num_populations=1, # If initial_pop is given, then num_populations must divide initial_pop.shape[0]\n proj_to_domain = lambda x : x, \n f_for_individuals = False, proj_for_individuals = None,\n maximize = False,\n use_cuda = False,\n prob_choosing_method = 'automatic', # either 'randint', 'multinomial' or 'automatic'\n chromosome_replacement_dimension = None # None means that every single number could be replaced independently from others.\n # 0 means the whole individual is either replaced or not (stupid!)\n # 1 means every component of the individual is either replaced or not, etc.\n ):\n \n if isinstance(dim,int): dim = (dim,)\n \n if initial_pop is None: P = torch.randn(pop_size*num_populations, *dim)\n else: P = initial_pop\n \n self.pop_size, *self.dim = P.shape\n self.num_populations = num_populations\n assert(self.pop_size%self.num_populations == 0)\n block_size = self.pop_size//self.num_populations\n \n if proj_for_individuals is None: proj_for_individuals = f_for_individuals\n\n if f_for_individuals: f = individual2population(f)\n if proj_for_individuals: proj_to_domain = individual2population(proj_to_domain)\n \n if use_cuda: P = P.cuda()\n \n P = proj_to_domain(P)\n\n self.use_randint = (prob_choosing_method in ['randint', 'random', 'rand_int'])\n \n if prob_choosing_method in ['automatic', 'auto', None]: self.use_randint = (block_size >= 100)\n \n if self.use_randint:\n n = self.pop_size\n s = self.num_populations\n b = n//s\n if s == 1: \n self._rand_indices = lambda : torch.randint(0,n,(3,n),device=P.device)\n else: \n S = torch.arange(s,device=P.device).repeat_interleave(b)[None].contiguous()\n self._rand_indices = lambda : S + torch.randint(0,b,(3,n),device=P.device)\n else:\n self.idx_prob = get_block_eye(block_size,self.num_populations).to(P)\n \n self.f = f if not maximize else (lambda x: -f(x)) \n self.cost = self.f(P).squeeze()\n self.P = P\n self.proj_to_domain = proj_to_domain\n self.maximize = maximize\n \n self._dims_1 = tuple([self.pop_size] + [1 for _ in self.dim])\n \n crp = chromosome_replacement_dimension\n if crp is None: crp = len(self.dim)\n self._crossp_dims = tuple([self.pop_size] + [d for d in self.dim[:crp]] + [1 for _ in self.dim[crp:]])\n \n def _cross_pollination(self, crossp):\n return (torch.rand(self._crossp_dims, device=self.P.device) < crossp).to(self.P)\n \n def shuffle(self):\n I = torch.randperm(self.P.shape[0], device=self.P.device)\n self.P = self.P[I]\n self.cost = self.cost[I]\n \n def step(self, mut=0.8, crossp=0.7):\n A,B,C = self._get_ABC()\n \n mutants = A + mut*(B - C)\n \n T = self._cross_pollination(crossp)\n \n candidates = self.proj_to_domain(T*mutants + (1-T)*self.P)\n f_candidates = self.f(candidates).squeeze()\n \n should_replace = (f_candidates <= self.cost)\n \n self.cost = torch.where(should_replace,f_candidates,self.cost)\n \n # adjust dimensions for broadcasting\n S = should_replace.to(self.P).view(*self._dims_1) \n \n self.P = S*candidates + (1-S)*self.P\n \n def best(self):\n best_cost, best_index = torch.min(self.cost, dim=0)\n if self.maximize:\n best_cost *= -1\n \n return best_cost.item(), self.P[best_index]\n \n def _get_ABC(self):\n I = self._rand_indices() if self.use_randint else torch.multinomial(self.idx_prob,3).T\n return self.P[I]\n \n \ndef optimize(f, initial_pop = None, \n pop_size=20, dim = (1,), \n num_populations=1, shuffles = 0,\n mut=0.8, crossp=0.7, \n epochs=1000,\n proj_to_domain = lambda x : x, \n f_for_individuals = False, proj_for_individuals = None, \n maximize = False,\n use_cuda = False,\n prob_choosing_method = 'automatic',\n chromosome_replacement_dimension = 1\n ):\n \n if num_populations == 1: shuffles = 0 # no point in shuffling otherwise!!\n \n D = DifferentialEvolver(f=f, \n initial_pop=initial_pop,\n pop_size=pop_size, dim = dim, \n num_populations=num_populations,\n proj_to_domain = proj_to_domain, \n f_for_individuals = f_for_individuals, \n proj_for_individuals = proj_for_individuals,\n maximize=maximize,\n use_cuda=use_cuda,\n prob_choosing_method=prob_choosing_method,\n chromosome_replacement_dimension = chromosome_replacement_dimension\n )\n if isinstance(epochs, int): epochs = range(epochs)\n mut, crossp = tofunc(mut), tofunc(crossp)\n \n pbar = progress_bar(epochs)\n \n test_each = 20\n \n try:\n remaining_before_test = test_each+1\n \n i = 0\n shuffles_so_far = 0\n \n for _ in pbar:\n remaining_before_test -= 1\n D.step(mut=mut(), crossp=crossp())\n \n i += 1\n progress = i/pbar.total\n \n if progress > (shuffles_so_far+1)/(shuffles+1):\n shuffles_so_far += 1\n D.shuffle()\n \n if remaining_before_test == 0:\n remaining_before_test = test_each\n best_cost, _ = D.best()\n pbar.comment = f\"| best cost = {best_cost:.4f}\"\n \n except KeyboardInterrupt:\n print(\"Interrupting! Returning best found so far\")\n \n return D.best()\n"
] | [
[
"torch.randint",
"torch.randperm",
"torch.randn",
"torch.min",
"torch.multinomial",
"torch.rand",
"torch.where",
"torch.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
leonardozcm/ASFM-Net-Review | [
"2584d2d098c760e559d3f632b72b9ad9881c59d5"
] | [
"core/test_baseline.py"
] | [
"# -*- coding: utf-8 -*-\n# @Author: XP\n\nimport logging\nimport torch\nfrom models.modelutils import fps_subsample\nimport utils.data_loaders\nimport utils.helpers\nfrom tqdm import tqdm\nfrom utils.average_meter import AverageMeter\nfrom utils.metrics import Metrics\nfrom utils.loss_utils import chamfer_sqrt\nfrom models.pcn import AutoEncoder as Model\n\n\ndef test_baseline(cfg, epoch_idx=-1, test_data_loader=None, test_writer=None, model=None):\n # Enable the inbuilt cudnn auto-tuner to find the best algorithm to use\n torch.backends.cudnn.benchmark = True\n\n if test_data_loader is None:\n # Set up data loader\n dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TEST_DATASET](\n cfg)\n test_data_loader = torch.utils.data.DataLoader(dataset=dataset_loader.get_dataset(\n utils.data_loaders.DatasetSubset.TEST),\n batch_size=1,\n num_workers=cfg.CONST.NUM_WORKERS,\n collate_fn=utils.data_loaders.collate_fn,\n pin_memory=True,\n shuffle=False)\n\n # Setup networks and initialize networks\n if model is None:\n model = Model()\n if torch.cuda.is_available():\n model = torch.nn.DataParallel(model).cuda()\n\n logging.info('Recovering from %s ...' % (cfg.CONST.WEIGHTS))\n checkpoint = torch.load(cfg.CONST.WEIGHTS)\n model.load_state_dict(checkpoint['model'])\n\n # Switch models to evaluation mode\n model.eval()\n\n n_samples = len(test_data_loader)\n test_losses = AverageMeter(\n ['cd_fine'])\n test_metrics = AverageMeter(Metrics.names())\n category_metrics = dict()\n\n # Testing loop\n with tqdm(test_data_loader) as t:\n for model_idx, (taxonomy_id, model_id, data) in enumerate(t):\n taxonomy_id = taxonomy_id[0] if isinstance(\n taxonomy_id[0], str) else taxonomy_id[0].item()\n model_id = model_id[0]\n\n with torch.no_grad():\n for k, v in data.items():\n data[k] = utils.helpers.var_or_cuda(v)\n\n # partial = data['partial_cloud']\n gt = data['gtcloud']\n\n # downsample gt to 2048\n gt = fps_subsample(gt, cfg.NETWORK.NUM_GT_POINTS)\n input_pl = gt\n\n # preprocess transpose\n input_pl = input_pl.permute(0, 2, 1)\n\n v, _, y_detail = model(input_pl)\n\n y_detail = y_detail.permute(0, 2, 1)\n\n loss_fine = chamfer_sqrt(gt, y_detail)\n # print(gt.shape, \" \", y_detail.shape)\n\n cd_fine = loss_fine.item() * 1e3\n\n _metrics = [loss_fine]\n test_losses.update([cd_fine])\n\n test_metrics.update(_metrics)\n if taxonomy_id not in category_metrics:\n category_metrics[taxonomy_id] = AverageMeter(\n Metrics.names())\n category_metrics[taxonomy_id].update(_metrics)\n\n t.set_description('Test[%d/%d] Taxonomy = %s Sample = %s Losses = %s Metrics = %s' %\n (model_idx + 1, n_samples, taxonomy_id, model_id, ['%.4f' % l for l in test_losses.val()\n ], ['%.4f' % m for m in _metrics]))\n\n # Print testing results\n print('============================ TEST RESULTS ============================')\n print('Taxonomy', end='\\t')\n print('#Sample', end='\\t')\n for metric in test_metrics.items:\n print(metric, end='\\t')\n print()\n\n for taxonomy_id in category_metrics:\n print(taxonomy_id, end='\\t')\n print(category_metrics[taxonomy_id].count(0), end='\\t')\n for value in category_metrics[taxonomy_id].avg():\n print('%.4f' % value, end='\\t')\n print()\n\n print('Overall', end='\\t\\t\\t')\n for value in test_metrics.avg():\n print('%.4f' % value, end='\\t')\n print('\\n')\n\n print('Epoch ', epoch_idx, end='\\t')\n for value in test_losses.avg():\n print('%.4f' % value, end='\\t')\n print('\\n')\n\n # Add testing results to TensorBoard\n if test_writer is not None:\n test_writer.add_scalar('Loss/Epoch/cd_fine',\n test_losses.avg(0), epoch_idx)\n for i, metric in enumerate(test_metrics.items):\n test_writer.add_scalar('Metric/%s' %\n metric, test_metrics.avg(i), epoch_idx)\n\n return test_losses.avg(0)\n"
] | [
[
"torch.nn.DataParallel",
"torch.no_grad",
"torch.cuda.is_available",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xiaow2/orofacial_pipeline | [
"fdfac5d1a2dd780f017966dc353f77eda1d21b93"
] | [
"pipeline/util.py"
] | [
"import numpy as np\n\nfrom . import (experiment, psth, ephys)\n\n\ndef _get_trial_event_times(events, units, trial_cond_name):\n \"\"\"\n Get median event start times from all unit-trials from the specified \"trial_cond_name\" and \"units\" - aligned to GO CUE\n For trials with multiple events of the same type, use the one occurred last\n :param events: list of events\n \"\"\"\n events = list(events) + ['go']\n\n tr_OI = (psth.TrialCondition().get_trials(trial_cond_name) & units).proj()\n tr_events = {}\n for eve in events:\n if eve not in tr_events:\n tr_events[eve] = (tr_OI.aggr(\n experiment.TrialEvent & {'trial_event_type': eve}, trial_event_id='max(trial_event_id)')\n * experiment.TrialEvent).fetch('trial_event_time', order_by='trial')\n\n present_events, event_starts = [], []\n for etype, etime in tr_events.items():\n if etype in events[:-1]:\n present_events.append(etype)\n event_starts.append(np.nanmedian(etime.astype(float) - tr_events[\"go\"].astype(float)))\n\n return np.array(present_events), np.array(event_starts)\n\n\ndef _get_stim_onset_time(units, trial_cond_name):\n stim_onsets = (experiment.PhotostimEvent.proj('photostim_event_time')\n * (experiment.TrialEvent & 'trial_event_type=\"go\"').proj(go_time='trial_event_time')\n & psth.TrialCondition().get_trials(trial_cond_name) & units).proj(\n stim_onset_from_go='photostim_event_time - go_time').fetch('stim_onset_from_go')\n return np.nanmean(stim_onsets.astype(float))\n\n\ndef _get_units_hemisphere(units):\n \"\"\"\n Return the hemisphere (\"left\" or \"right\") that the specified units belong to,\n based on the targeted insertion location - \"ephys.ProbeInsertion.InsertionLocation\"\n :param units: either a list of unit_keys or a query of the ephys.Unit table\n :return: \"left\" or \"right\"\n \"\"\"\n ml_locations = np.unique((ephys.ProbeInsertion.InsertionLocation & units).fetch('ml_location'))\n if len(ml_locations) == 0:\n raise Exception('No ProbeInsertion.InsertionLocation available')\n if (ml_locations > 0).any() and (ml_locations < 0).any():\n raise ValueError('The specified units belongs to both hemispheres...')\n if (ml_locations > 0).all():\n return 'right'\n elif (ml_locations < 0).all():\n return 'left'\n else:\n assert (ml_locations == 0).all() # sanity check\n raise ValueError('Ambiguous hemisphere: ML locations are all 0...')\n\n\ndef _get_clustering_method(probe_insertion):\n \"\"\"\n Return the \"clustering_method\" used to estimate the all the units for the provided \"probe_insertion\"\n :param probe_insertion: an \"ephys.ProbeInsertion\" key\n :return: clustering_method\n \"\"\"\n clustering_methods = (ephys.ClusteringMethod & (ephys.Unit & probe_insertion)).fetch('clustering_method')\n if len(clustering_methods) == 1:\n return clustering_methods[0]\n else:\n raise ValueError(f'Found multiple clustering methods: {clustering_methods}')\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pauldmccarthy/fslpy | [
"4ef642f362486f42628a1c81dcdd8ea6b92a5faf"
] | [
"tests/test_transform/test_nonlinear.py"
] | [
"#!/usr/bin/env python\n\nimport itertools as it\nimport os.path as op\n\nimport numpy as np\n\nimport fsl.data.image as fslimage\nimport fsl.utils.image.resample as resample\nimport fsl.utils.image.roi as roi\nimport fsl.transform.affine as affine\nimport fsl.transform.nonlinear as nonlinear\nimport fsl.transform.fnirt as fnirt\n\n\ndatadir = op.join(op.dirname(__file__), 'testdata')\n\n\ndef _random_image():\n vx, vy, vz = np.random.randint(10, 50, 3)\n dx, dy, dz = np.random.randint( 1, 10, 3)\n data = (np.random.random((vx, vy, vz)) - 0.5) * 10\n aff = affine.compose(\n (dx, dy, dz),\n np.random.randint(1, 100, 3),\n np.random.random(3) * np.pi / 2)\n\n return fslimage.Image(data, xform=aff)\n\n\ndef _random_field():\n\n src = _random_image()\n vx, vy, vz = np.random.randint(10, 50, 3)\n dx, dy, dz = np.random.randint( 1, 10, 3)\n\n field = (np.random.random((vx, vy, vz, 3)) - 0.5) * 10\n aff = affine.compose(\n (dx, dy, dz),\n np.random.randint(1, 100, 3),\n np.random.random(3) * np.pi / 2)\n\n return nonlinear.DeformationField(field, src=src, xform=aff)\n\n\ndef _affine_field(src, ref, xform, srcSpace, refSpace, shape=None, fv2w=None):\n\n if shape is None: shape = ref.shape[:3]\n if fv2w is None: fv2w = ref.getAffine('voxel', 'world')\n\n rx, ry, rz = np.meshgrid(np.arange(shape[0]),\n np.arange(shape[1]),\n np.arange(shape[2]), indexing='ij')\n\n rvoxels = np.vstack((rx.flatten(), ry.flatten(), rz.flatten())).T\n f2r = affine.concat(ref.getAffine('world', refSpace), fv2w)\n rcoords = affine.transform(rvoxels, f2r)\n scoords = affine.transform(rcoords, xform)\n\n field = np.zeros(list(shape[:3]) + [3])\n field[:] = (scoords - rcoords).reshape(*it.chain(shape, [3]))\n field = nonlinear.DeformationField(field, src, ref,\n srcSpace=srcSpace,\n refSpace=refSpace,\n xform=fv2w,\n header=ref.header,\n defType='relative')\n return field\n\n\ndef _random_affine_field():\n\n src = _random_image()\n ref = _random_image()\n\n # our test field just encodes an affine\n xform = affine.compose(\n np.random.randint(2, 5, 3),\n np.random.randint(1, 10, 3),\n np.random.random(3))\n\n rx, ry, rz = np.meshgrid(np.arange(ref.shape[0]),\n np.arange(ref.shape[1]),\n np.arange(ref.shape[2]), indexing='ij')\n\n rvoxels = np.vstack((rx.flatten(), ry.flatten(), rz.flatten())).T\n rcoords = affine.transform(rvoxels, ref.voxToScaledVoxMat)\n scoords = affine.transform(rcoords, xform)\n\n field = np.zeros(list(ref.shape[:3]) + [3])\n field[:] = (scoords - rcoords).reshape(*it.chain(ref.shape, [3]))\n field = nonlinear.DeformationField(field, src, ref,\n header=ref.header,\n defType='relative')\n return field, xform\n\n\ndef _field_coords(field):\n vx, vy, vz = field.shape[ :3]\n coords = np.meshgrid(np.arange(vx),\n np.arange(vy),\n np.arange(vz), indexing='ij')\n coords = np.array(coords).transpose((1, 2, 3, 0))\n return affine.transform(\n coords.reshape(-1, 3),\n field.getAffine('voxel', 'fsl')).reshape(field.shape)\n\n\ndef test_detectDeformationType():\n relfield = _random_field()\n coords = _field_coords(relfield)\n absfield = nonlinear.DeformationField(\n relfield.data + coords,\n src=relfield.src,\n xform=relfield.voxToWorldMat)\n\n assert nonlinear.detectDeformationType(relfield) == 'relative'\n assert nonlinear.detectDeformationType(absfield) == 'absolute'\n\n\ndef test_convertDeformationType():\n\n relfield = _random_field()\n coords = _field_coords(relfield)\n absfield = nonlinear.DeformationField(\n relfield.data + coords,\n src=relfield.src,\n xform=relfield.voxToWorldMat)\n\n gotconvrel1 = nonlinear.convertDeformationType(relfield)\n gotconvabs1 = nonlinear.convertDeformationType(absfield)\n gotconvrel2 = nonlinear.convertDeformationType(relfield, 'absolute')\n gotconvabs2 = nonlinear.convertDeformationType(absfield, 'relative')\n\n tol = dict(atol=1e-3, rtol=1e-3)\n\n assert np.all(np.isclose(gotconvrel1, absfield.data, **tol))\n assert np.all(np.isclose(gotconvabs1, relfield.data, **tol))\n assert np.all(np.isclose(gotconvrel2, absfield.data, **tol))\n assert np.all(np.isclose(gotconvabs2, relfield.data, **tol))\n\n\ndef test_convertDeformationSpace():\n\n basefield, xform = _random_affine_field()\n src = basefield.src\n ref = basefield.ref\n\n # generate reference fsl->fsl coordinate mappings\n\n # For each combination of srcspace->tospace\n # Generate random coordinates, check that\n # displacements are correct\n spaces = ['fsl', 'voxel', 'world']\n spaces = list(it.combinations_with_replacement(spaces, 2))\n spaces = spaces + [(r, s) for s, r in spaces]\n spaces = list(set(spaces))\n\n for from_, to in spaces:\n\n refcoords = [np.random.randint(0, basefield.shape[0], 5),\n np.random.randint(0, basefield.shape[1], 5),\n np.random.randint(0, basefield.shape[2], 5)]\n refcoords = np.array(refcoords, dtype=int).T\n refcoords = affine.transform(refcoords, ref.voxToScaledVoxMat)\n srccoords = basefield.transform(refcoords)\n\n field = nonlinear.convertDeformationSpace(basefield, from_, to)\n premat = ref.getAffine('fsl', from_)\n postmat = src.getAffine('fsl', to)\n\n input = affine.transform(refcoords, premat)\n expect = affine.transform(srccoords, postmat)\n\n got = field.transform(input)\n enan = np.isnan(expect)\n gnan = np.isnan(got)\n\n assert np.all(np.isclose(enan, gnan))\n assert np.all(np.isclose(expect[~enan], got[~gnan]))\n\n\ndef test_DeformationField_transform():\n\n relfield, xform = _random_affine_field()\n src = relfield.src\n ref = relfield.ref\n\n rx, ry, rz = np.meshgrid(np.arange(ref.shape[0]),\n np.arange(ref.shape[1]),\n np.arange(ref.shape[2]), indexing='ij')\n rvoxels = np.vstack((rx.flatten(), ry.flatten(), rz.flatten())).T\n rcoords = affine.transform(rvoxels, ref.voxToScaledVoxMat)\n scoords = affine.transform(rcoords, xform)\n svoxels = affine.transform(scoords, src.scaledVoxToVoxMat)\n\n absfield = np.zeros(list(ref.shape[:3]) + [3])\n absfield[:] = scoords.reshape(*it.chain(ref.shape, [3]))\n absfield = nonlinear.DeformationField(absfield, src, ref,\n header=ref.header,\n defType='absolute')\n\n got = relfield.transform(rcoords)\n assert np.all(np.isclose(got, scoords))\n got = absfield.transform(rcoords)\n assert np.all(np.isclose(got, scoords))\n\n # test single set of coords\n got = absfield.transform(rcoords[0])\n assert np.all(np.isclose(got, scoords[0]))\n\n got = relfield.transform(rvoxels, from_='voxel', to='voxel')\n assert np.all(np.isclose(got, svoxels))\n got = absfield.transform(rvoxels, from_='voxel', to='voxel')\n assert np.all(np.isclose(got, svoxels))\n\n # test out of bounds are returned as nan\n rvoxels = np.array([[-1, -1, -1],\n [ 0, 0, 0]])\n rcoords = affine.transform(rvoxels, ref.voxToScaledVoxMat)\n scoords = affine.transform(rcoords, xform)\n svoxels = affine.transform(scoords, src.scaledVoxToVoxMat)\n\n got = relfield.transform(rcoords)\n assert np.all(np.isnan(got[0, :]))\n assert np.all(np.isclose(got[1, :], scoords[1, :]))\n got = absfield.transform(rcoords)\n assert np.all(np.isnan(got[0, :]))\n assert np.all(np.isclose(got[1, :], scoords[1, :]))\n\n\ndef test_CoefficientField_displacements():\n\n nldir = op.join(datadir, 'nonlinear')\n src = op.join(nldir, 'src.nii.gz')\n ref = op.join(nldir, 'ref.nii.gz')\n cf = op.join(nldir, 'coefficientfield.nii.gz')\n df = op.join(nldir, 'displacementfield_no_premat.nii.gz')\n\n src = fslimage.Image(src)\n ref = fslimage.Image(ref)\n cf = fnirt.readFnirt(cf, src, ref)\n df = fnirt.readFnirt(df, src, ref)\n\n ix, iy, iz = ref.shape[:3]\n x, y, z = np.meshgrid(np.arange(ix),\n np.arange(iy),\n np.arange(iz), indexing='ij')\n x = x.flatten()\n y = y.flatten()\n z = z.flatten()\n xyz = np.vstack((x, y, z)).T\n\n disps = cf.displacements(xyz)\n disps = disps.reshape(df.shape)\n\n tol = dict(atol=1e-5, rtol=1e-5)\n assert np.all(np.isclose(disps, df.data, **tol))\n\n\ndef test_CoefficientField_transform():\n nldir = op.join(datadir, 'nonlinear')\n src = op.join(nldir, 'src.nii.gz')\n ref = op.join(nldir, 'ref.nii.gz')\n cf = op.join(nldir, 'coefficientfield.nii.gz')\n df = op.join(nldir, 'displacementfield.nii.gz')\n dfnp = op.join(nldir, 'displacementfield_no_premat.nii.gz')\n\n src = fslimage.Image(src)\n ref = fslimage.Image(ref)\n cf = fnirt.readFnirt(cf, src, ref)\n df = fnirt.readFnirt(df, src, ref)\n dfnp = fnirt.readFnirt(dfnp, src, ref)\n\n spaces = ['fsl', 'voxel', 'world']\n spaces = list(it.combinations_with_replacement(spaces, 2))\n spaces = spaces + [(r, s) for s, r in spaces]\n spaces = list(set(spaces))\n\n rx, ry, rz = np.meshgrid(np.arange(ref.shape[0]),\n np.arange(ref.shape[1]),\n np.arange(ref.shape[2]), indexing='ij')\n rvoxels = np.vstack((rx.flatten(), ry.flatten(), rz.flatten())).T\n\n refcoords = {\n 'voxel' : rvoxels,\n 'fsl' : affine.transform(rvoxels, ref.getAffine('voxel', 'fsl')),\n 'world' : affine.transform(rvoxels, ref.getAffine('voxel', 'world'))\n }\n\n srccoords = refcoords['fsl'] + df.data.reshape(-1, 3)\n srccoords = {\n 'voxel' : affine.transform(srccoords, src.getAffine('fsl', 'voxel')),\n 'fsl' : srccoords,\n 'world' : affine.transform(srccoords, src.getAffine('fsl', 'world'))\n }\n\n srccoordsnp = refcoords['fsl'] + dfnp.data.reshape(-1, 3)\n srccoordsnp = {\n 'voxel' : affine.transform(srccoordsnp, src.getAffine('fsl', 'voxel')),\n 'fsl' : srccoordsnp,\n 'world' : affine.transform(srccoordsnp, src.getAffine('fsl', 'world'))\n }\n\n tol = dict(atol=1e-5, rtol=1e-5)\n for srcspace, refspace in spaces:\n got = cf.transform(refcoords[refspace], refspace, srcspace)\n gotnp = cf.transform(refcoords[refspace], refspace, srcspace,\n premat=False)\n assert np.all(np.isclose(got, srccoords[ srcspace], **tol))\n assert np.all(np.isclose(gotnp, srccoordsnp[srcspace], **tol))\n\n\ndef test_coefficientField_transform_altref():\n\n # test coordinates (manually determined).\n # original ref image is 2mm isotropic,\n # resampled is 1mm. Each tuple contains:\n #\n # (src, ref2mm, ref1mm)\n coords = [\n ((18.414, 26.579, 25.599), (11, 19, 11), (22, 38, 22)),\n ((14.727, 22.480, 20.340), ( 8, 17, 8), (16, 34, 16)),\n ((19.932, 75.616, 27.747), (11, 45, 5), (22, 90, 10))\n ]\n\n nldir = op.join(datadir, 'nonlinear')\n src = op.join(nldir, 'src.nii.gz')\n ref = op.join(nldir, 'ref.nii.gz')\n cf = op.join(nldir, 'coefficientfield.nii.gz')\n\n src = fslimage.Image(src)\n ref2mm = fslimage.Image(ref)\n ref1mm = ref2mm.adjust((1, 1, 1))\n cfref2mm = fnirt.readFnirt(cf, src, ref2mm)\n cfref1mm = fnirt.readFnirt(cf, src, ref1mm)\n\n for srcc, ref2mmc, ref1mmc in coords:\n ref2mmc = cfref2mm.transform(ref2mmc, 'voxel', 'voxel')\n ref1mmc = cfref1mm.transform(ref1mmc, 'voxel', 'voxel')\n\n assert np.all(np.isclose(ref2mmc, srcc, 1e-4))\n assert np.all(np.isclose(ref1mmc, srcc, 1e-4))\n\n\ndef test_coefficientFieldToDeformationField():\n\n nldir = op.join(datadir, 'nonlinear')\n src = op.join(nldir, 'src.nii.gz')\n ref = op.join(nldir, 'ref.nii.gz')\n cf = op.join(nldir, 'coefficientfield.nii.gz')\n df = op.join(nldir, 'displacementfield.nii.gz')\n dfnp = op.join(nldir, 'displacementfield_no_premat.nii.gz')\n\n src = fslimage.Image(src)\n ref = fslimage.Image(ref)\n cf = fnirt.readFnirt(cf, src, ref)\n rdf = fnirt.readFnirt(df, src, ref)\n rdfnp = fnirt.readFnirt(dfnp, src, ref)\n adf = nonlinear.convertDeformationType(rdf)\n adfnp = nonlinear.convertDeformationType(rdfnp)\n\n rcnv = nonlinear.coefficientFieldToDeformationField(cf)\n acnv = nonlinear.coefficientFieldToDeformationField(cf,\n defType='absolute')\n acnvnp = nonlinear.coefficientFieldToDeformationField(cf,\n defType='absolute',\n premat=False)\n rcnvnp = nonlinear.coefficientFieldToDeformationField(cf,\n premat=False)\n\n tol = dict(atol=1e-5, rtol=1e-5)\n assert np.all(np.isclose(rcnv.data, rdf .data, **tol))\n assert np.all(np.isclose(acnv.data, adf .data, **tol))\n assert np.all(np.isclose(rcnvnp.data, rdfnp.data, **tol))\n assert np.all(np.isclose(acnvnp.data, adfnp.data, **tol))\n\n\ndef test_applyDeformation():\n\n src2ref = affine.compose(\n np.random.randint(2, 5, 3),\n np.random.randint(1, 10, 3),\n np.random.random(3))\n ref2src = affine.invert(src2ref)\n\n srcdata = np.random.randint(1, 65536, (10, 10, 10))\n refdata = np.random.randint(1, 65536, (10, 10, 10))\n\n src = fslimage.Image(srcdata)\n ref = fslimage.Image(refdata, xform=src2ref)\n field = _affine_field(src, ref, ref2src, 'world', 'world')\n\n expect, xf = resample.resampleToReference(\n src, ref, matrix=src2ref, order=1, mode='nearest')\n result = nonlinear.applyDeformation(\n src, field, order=1, mode='nearest')\n\n assert np.all(np.isclose(expect, result))\n\n\ndef test_applyDeformation_altsrc():\n\n src2ref = affine.compose(\n np.random.randint(2, 5, 3),\n np.random.randint(1, 10, 3),\n [0, 0, 0])\n ref2src = affine.invert(src2ref)\n\n srcdata = np.random.randint(1, 65536, (10, 10, 10))\n refdata = np.random.randint(1, 65536, (10, 10, 10))\n\n src = fslimage.Image(srcdata)\n ref = fslimage.Image(refdata, xform=src2ref)\n field = _affine_field(src, ref, ref2src, 'world', 'world')\n\n # First try a down-sampled version\n # of the original source image\n altsrc, xf = resample.resample(src, (5, 5, 5), origin='corner')\n altsrc = fslimage.Image(altsrc, xform=xf, header=src.header)\n expect, xf = resample.resampleToReference(\n altsrc, ref, matrix=src2ref, order=1, mode='nearest')\n result = nonlinear.applyDeformation(\n altsrc, field, order=1, mode='nearest')\n assert np.all(np.isclose(expect, result))\n\n # Now try a down-sampled ROI\n # of the original source image\n altsrc = roi.roi(src, [(2, 9), (2, 9), (2, 9)])\n altsrc, xf = resample.resample(altsrc, (4, 4, 4))\n altsrc = fslimage.Image(altsrc, xform=xf, header=src.header)\n expect, xf = resample.resampleToReference(\n altsrc, ref, matrix=src2ref, order=1, mode='nearest')\n result = nonlinear.applyDeformation(\n altsrc, field, order=1, mode='nearest')\n assert np.all(np.isclose(expect, result))\n\n # down-sampled and offset ROI\n # of the original source image\n altsrc = roi.roi(src, [(-5, 8), (-5, 8), (-5, 8)])\n altsrc, xf = resample.resample(altsrc, (6, 6, 6))\n altsrc = fslimage.Image(altsrc, xform=xf, header=src.header)\n expect, xf = resample.resampleToReference(\n altsrc, ref, matrix=src2ref, order=1, mode='nearest')\n result = nonlinear.applyDeformation(\n altsrc, field, order=1, mode='nearest')\n assert np.all(np.isclose(expect, result))\n\n\ndef test_applyDeformation_premat():\n\n src2ref = affine.compose(\n np.random.randint(2, 5, 3),\n np.random.randint(1, 10, 3),\n [0, 0, 0])\n ref2src = affine.invert(src2ref)\n\n srcdata = np.random.randint(1, 65536, (10, 10, 10))\n refdata = np.random.randint(1, 65536, (10, 10, 10))\n\n src = fslimage.Image(srcdata)\n ref = fslimage.Image(refdata, xform=src2ref)\n field = _affine_field(src, ref, ref2src, 'world', 'world')\n\n # First try a down-sampled version\n # of the original source image\n altsrc, xf = resample.resample(src, (5, 5, 5), origin='corner')\n altsrc = fslimage.Image(altsrc, xform=xf, header=src.header)\n expect, xf = resample.resampleToReference(\n altsrc, ref, matrix=src2ref, order=1, mode='nearest')\n premat = affine.concat(src .getAffine('world', 'voxel'),\n altsrc.getAffine('voxel', 'world'))\n result = nonlinear.applyDeformation(\n altsrc, field, order=1, mode='nearest', premat=premat)\n assert np.all(np.isclose(expect, result))\n\n # Now try a down-sampled ROI\n # of the original source image\n altsrc = roi.roi(src, [(2, 9), (2, 9), (2, 9)])\n altsrc, xf = resample.resample(altsrc, (4, 4, 4))\n altsrc = fslimage.Image(altsrc, xform=xf, header=src.header)\n expect, xf = resample.resampleToReference(\n altsrc, ref, matrix=src2ref, order=1, mode='nearest')\n premat = affine.concat(src .getAffine('world', 'voxel'),\n altsrc.getAffine('voxel', 'world'))\n result = nonlinear.applyDeformation(\n altsrc, field, order=1, mode='nearest', premat=premat)\n assert np.all(np.isclose(expect, result))\n\n # down-sampled and offset ROI\n # of the original source image\n altsrc = roi.roi(src, [(-5, 8), (-5, 8), (-5, 8)])\n altsrc, xf = resample.resample(altsrc, (6, 6, 6))\n altsrc = fslimage.Image(altsrc, xform=xf, header=src.header)\n expect, xf = resample.resampleToReference(\n altsrc, ref, matrix=src2ref, order=1, mode='nearest')\n premat = affine.concat(src .getAffine('world', 'voxel'),\n altsrc.getAffine('voxel', 'world'))\n result = nonlinear.applyDeformation(\n altsrc, field, order=1, mode='nearest', premat=premat)\n assert np.all(np.isclose(expect, result))\n\n\ndef test_applyDeformation_altref():\n src2ref = affine.compose(\n np.random.randint(2, 5, 3),\n np.random.randint(1, 10, 3),\n np.random.random(3))\n ref2src = affine.invert(src2ref)\n\n srcdata = np.random.randint(1, 65536, (10, 10, 10))\n refdata = np.random.randint(1, 65536, (10, 10, 10))\n\n src = fslimage.Image(srcdata)\n ref = fslimage.Image(refdata, xform=src2ref)\n field = _affine_field(src, ref, ref2src, 'world', 'world')\n\n altrefxform = affine.concat(\n src2ref,\n affine.scaleOffsetXform([1, 1, 1], [5, 0, 0]))\n\n altref = fslimage.Image(refdata, xform=altrefxform)\n\n expect, xf = resample.resampleToReference(\n src, altref, matrix=src2ref, order=1, mode='constant', cval=0)\n result = nonlinear.applyDeformation(\n src, field, ref=altref, order=1, mode='constant', cval=0)\n\n # boundary voxels can get truncated\n # (4 is the altref-ref overlap boundary)\n expect[4, :, :] = 0\n result[4, :, :] = 0\n expect = expect[1:-1, 1:-1, 1:-1]\n result = result[1:-1, 1:-1, 1:-1]\n\n assert np.all(np.isclose(expect, result))\n\n\n# test when reference/field\n# are not voxel-aligned\ndef test_applyDeformation_worldAligned():\n refv2w = affine.scaleOffsetXform([1, 1, 1], [10, 10, 10])\n fieldv2w = affine.scaleOffsetXform([2, 2, 2], [10.5, 10.5, 10.5])\n src2ref = refv2w\n ref2src = affine.invert(src2ref)\n\n srcdata = np.random.randint(1, 65536, (10, 10, 10))\n\n src = fslimage.Image(srcdata)\n ref = fslimage.Image(srcdata, xform=src2ref)\n field = _affine_field(src, ref, ref2src, 'world', 'world',\n shape=(5, 5, 5), fv2w=fieldv2w)\n\n field = nonlinear.DeformationField(\n nonlinear.convertDeformationType(field, 'absolute'),\n header=field.header,\n src=src,\n ref=ref,\n srcSpace='world',\n refSpace='world',\n defType='absolute')\n\n expect, xf = resample.resampleToReference(\n src, ref, matrix=src2ref, order=1, mode='constant', cval=0)\n result = nonlinear.applyDeformation(\n src, field, order=1, mode='constant', cval=0)\n\n expect = expect[1:-1, 1:-1, 1:-1]\n result = result[1:-1, 1:-1, 1:-1]\n\n assert np.all(np.isclose(expect, result))\n"
] | [
[
"numpy.random.random",
"numpy.isnan",
"numpy.arange",
"numpy.random.randint",
"numpy.array",
"numpy.vstack",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sauceda/deepqmc | [
"36ae8cf5bcb1ae723d00dd34fa2b023662f90a24"
] | [
"tests/test_det.py"
] | [
"import pytest\nimport torch\n\nfrom deepqmc import torchext\n\n\[email protected]\ndef xs():\n return torch.randn(10, 4, 4).double().requires_grad_()\n\n\ndef test_1st_deriv(xs):\n assert torch.autograd.gradcheck(torchext.bdet, xs)\n\n\ndef test_2nd_deriv(xs):\n assert torch.autograd.gradgradcheck(torchext.bdet, xs)\n\n\ndef test_3rd_deriv(xs):\n def func(xs):\n ys = torchext.bdet(xs)\n (dys,) = torch.autograd.grad(ys, xs, torch.ones_like(ys), create_graph=True)\n (ddys,) = torch.autograd.grad(dys, xs, torch.ones_like(xs), create_graph=True)\n return (ddys ** 2).sum()\n\n assert torch.autograd.gradcheck(func, xs)\n"
] | [
[
"torch.randn",
"torch.autograd.gradgradcheck",
"torch.autograd.gradcheck",
"torch.ones_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
misialq/qiime2 | [
"6d8932eda130d4a9356f977fece2e252c135d0b9"
] | [
"qiime2/sdk/action.py"
] | [
"# ----------------------------------------------------------------------------\n# Copyright (c) 2016-2021, QIIME 2 development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# ----------------------------------------------------------------------------\n\nimport abc\nimport concurrent.futures\nimport inspect\nimport tempfile\nimport textwrap\nimport itertools\n\nimport decorator\n\nimport qiime2.sdk\nimport qiime2.core.type as qtype\nimport qiime2.core.archive as archive\nfrom qiime2.core.util import LateBindingAttribute, DropFirstParameter, tuplize\n\n\ndef _subprocess_apply(action, args, kwargs):\n # Preprocess input artifacts as we've got pickled clones which shouldn't\n # self-destruct.\n for arg in itertools.chain(args, kwargs.values()):\n if isinstance(arg, qiime2.sdk.Artifact):\n # We can't rely on the subprocess preventing atexit hooks as the\n # destructor is also called when the artifact goes out of scope\n # (which happens).\n arg._destructor.detach()\n\n results = action(*args, **kwargs)\n for r in results:\n # The destructor doesn't keep its detatched state when sent back to the\n # main process. Something about the context-manager from ctx seems to\n # cause a GC of the artifacts before the process actually ends, so we\n # do need to detach these. The specifics are not understood.\n r._destructor.detach()\n return results\n\n\nclass Action(metaclass=abc.ABCMeta):\n \"\"\"QIIME 2 Action\"\"\"\n type = 'action'\n _ProvCaptureCls = archive.ActionProvenanceCapture\n\n __call__ = LateBindingAttribute('_dynamic_call')\n asynchronous = LateBindingAttribute('_dynamic_async')\n\n # Converts a callable's signature into its wrapper's signature (i.e.\n # converts the \"view API\" signature into the \"artifact API\" signature).\n # Accepts a callable as input and returns a callable as output with\n # converted signature.\n @abc.abstractmethod\n def _callable_sig_converter_(self, callable):\n raise NotImplementedError\n\n # Executes a callable on the provided `view_args`, wrapping and returning\n # the callable's outputs. In other words, executes the \"view API\", wrapping\n # and returning the outputs as the \"artifact API\". `view_args` is a dict\n # mapping parameter name to unwrapped value (i.e. view). `view_args`\n # contains an entry for each parameter accepted by the wrapper. It is the\n # executor's responsibility to perform any additional transformations on\n # these parameters, or provide extra parameters, in order to execute the\n # callable. `output_types` is an OrderedDict mapping output name to QIIME\n # type (e.g. semantic type).\n @abc.abstractmethod\n def _callable_executor_(self, scope, view_args, output_types):\n raise NotImplementedError\n\n # Private constructor\n @classmethod\n def _init(cls, callable, signature, plugin_id, name, description,\n citations, deprecated, examples):\n \"\"\"\n\n Parameters\n ----------\n callable : callable\n signature : qiime2.core.type.Signature\n plugin_id : str\n name : str\n Human-readable name for this action.\n description : str\n Human-readable description for this action.\n\n \"\"\"\n self = cls.__new__(cls)\n self.__init(callable, signature, plugin_id, name, description,\n citations, deprecated, examples)\n return self\n\n # This \"extra private\" constructor is necessary because `Action` objects\n # can be initialized from a static (classmethod) context or on an\n # existing instance (see `_init` and `__setstate__`, respectively).\n def __init(self, callable, signature, plugin_id, name, description,\n citations, deprecated, examples):\n self._callable = callable\n self.signature = signature\n self.plugin_id = plugin_id\n self.name = name\n self.description = description\n self.citations = citations\n self.deprecated = deprecated\n self.examples = examples\n\n self.id = callable.__name__\n self._dynamic_call = self._get_callable_wrapper()\n self._dynamic_async = self._get_async_wrapper()\n\n def __init__(self):\n raise NotImplementedError(\n \"%s constructor is private.\" % self.__class__.__name__)\n\n @property\n def source(self):\n \"\"\"\n The source code for the action's callable.\n\n Returns\n -------\n str\n The source code of this action's callable formatted as Markdown\n text.\n\n \"\"\"\n try:\n source = inspect.getsource(self._callable)\n except OSError:\n raise TypeError(\n \"Cannot retrieve source code for callable %r\" %\n self._callable.__name__)\n return markdown_source_template % {'source': source}\n\n def get_import_path(self, include_self=True):\n path = f'qiime2.plugins.{self.plugin_id}.{self.type}s'\n if include_self:\n path += f'.{self.id}'\n return path\n\n def __repr__(self):\n return \"<%s %s>\" % (self.type, self.get_import_path())\n\n def __getstate__(self):\n return {\n 'callable': self._callable,\n 'signature': self.signature,\n 'plugin_id': self.plugin_id,\n 'name': self.name,\n 'description': self.description,\n 'citations': self.citations,\n 'deprecated': self.deprecated,\n 'examples': self.examples,\n }\n\n def __setstate__(self, state):\n self.__init(**state)\n\n def _bind(self, context_factory):\n \"\"\"Bind an action to a Context factory, returning a decorated function.\n\n This is a very primitive API and should be used primarily by the\n framework and very advanced interfaces which need deep control over\n the calling semantics of pipelines and garbage collection.\n\n The basic idea behind this is outlined as follows:\n\n Every action is defined as an *instance* that a plugin constructs.\n This means that `self` represents the internal details as to what\n the action is. If you need to associate additional state with the\n *application* of an action, you cannot mutate `self` without\n changing all future applications. So there needs to be an\n additional instance variable that can serve as the state of a given\n application. We call this a Context object. It is also important\n that each application of an action has *independent* state, so\n providing an instance of Context won't work. We need a factory.\n\n Parameterizing the context is necessary because it is possible for\n an action to call other actions. The details need to be coordinated\n behind the scenes to the user, so we can parameterize the behavior\n by providing different context factories to `bind` at different\n points in the \"call stack\".\n\n \"\"\"\n def bound_callable(*args, **kwargs):\n # This function's signature is rewritten below using\n # `decorator.decorator`. When the signature is rewritten,\n # args[0] is the function whose signature was used to rewrite\n # this function's signature.\n args = args[1:]\n ctx = context_factory()\n # Set up a scope under which we can track destructable references\n # if something goes wrong, the __exit__ handler of this context\n # manager will clean up. (It also cleans up when things go right)\n with ctx as scope:\n provenance = self._ProvCaptureCls(\n self.type, self.plugin_id, self.id)\n scope.add_reference(provenance)\n\n # Collate user arguments\n user_input = {name: value for value, name in\n zip(args, self.signature.signature_order)}\n user_input.update(kwargs)\n\n # Type management\n self.signature.check_types(**user_input)\n output_types = self.signature.solve_output(**user_input)\n callable_args = {}\n\n # Record parameters\n for name, spec in self.signature.parameters.items():\n parameter = callable_args[name] = user_input[name]\n provenance.add_parameter(name, spec.qiime_type, parameter)\n\n # Record and transform inputs\n for name, spec in self.signature.inputs.items():\n artifact = user_input[name]\n provenance.add_input(name, artifact)\n if artifact is None:\n callable_args[name] = None\n elif spec.has_view_type():\n recorder = provenance.transformation_recorder(name)\n if qtype.is_collection_type(spec.qiime_type):\n # Always put in a list. Sometimes the view isn't\n # hashable, which isn't relevant, but would break\n # a Set[SomeType].\n callable_args[name] = [\n a._view(spec.view_type, recorder)\n for a in user_input[name]]\n else:\n callable_args[name] = artifact._view(\n spec.view_type, recorder)\n else:\n callable_args[name] = artifact\n\n if self.deprecated:\n with qiime2.core.util.warning() as warn:\n warn(self._build_deprecation_message(),\n FutureWarning)\n\n # Execute\n outputs = self._callable_executor_(scope, callable_args,\n output_types, provenance)\n\n if len(outputs) != len(self.signature.outputs):\n raise ValueError(\n \"Number of callable outputs must match number of \"\n \"outputs defined in signature: %d != %d\" %\n (len(outputs), len(self.signature.outputs)))\n\n # Wrap in a Results object mapping output name to value so\n # users have access to outputs by name or position.\n return qiime2.sdk.Results(self.signature.outputs.keys(),\n outputs)\n\n bound_callable = self._rewrite_wrapper_signature(bound_callable)\n self._set_wrapper_properties(bound_callable)\n self._set_wrapper_name(bound_callable, self.id)\n return bound_callable\n\n def _get_callable_wrapper(self):\n # This is a \"root\" level invocation (not a nested call within a\n # pipeline), so no special factory is needed.\n callable_wrapper = self._bind(qiime2.sdk.Context)\n self._set_wrapper_name(callable_wrapper, '__call__')\n return callable_wrapper\n\n def _get_async_wrapper(self):\n def async_wrapper(*args, **kwargs):\n # TODO handle this better in the future, but stop the massive error\n # caused by MacOSX asynchronous runs for now.\n try:\n import matplotlib as plt\n if plt.rcParams['backend'].lower() == 'macosx':\n raise EnvironmentError(backend_error_template %\n plt.matplotlib_fname())\n except ImportError:\n pass\n\n # This function's signature is rewritten below using\n # `decorator.decorator`. When the signature is rewritten, args[0]\n # is the function whose signature was used to rewrite this\n # function's signature.\n args = args[1:]\n\n pool = concurrent.futures.ProcessPoolExecutor(max_workers=1)\n future = pool.submit(_subprocess_apply, self, args, kwargs)\n # TODO: pool.shutdown(wait=False) caused the child process to\n # hang unrecoverably. This seems to be a bug in Python 3.7\n # It's probably best to gut concurrent.futures entirely, so we're\n # ignoring the resource leakage for the moment.\n return future\n\n async_wrapper = self._rewrite_wrapper_signature(async_wrapper)\n self._set_wrapper_properties(async_wrapper)\n self._set_wrapper_name(async_wrapper, 'asynchronous')\n return async_wrapper\n\n def _rewrite_wrapper_signature(self, wrapper):\n # Convert the callable's signature into the wrapper's signature and set\n # it on the wrapper.\n return decorator.decorator(\n wrapper, self._callable_sig_converter_(self._callable))\n\n def _set_wrapper_name(self, wrapper, name):\n wrapper.__name__ = wrapper.__qualname__ = name\n\n def _set_wrapper_properties(self, wrapper):\n wrapper.__module__ = self.get_import_path(include_self=False)\n wrapper.__doc__ = self._build_numpydoc()\n wrapper.__annotations__ = self._build_annotations()\n # This is necessary so that `inspect` doesn't display the wrapped\n # function's annotations (the annotations apply to the \"view API\" and\n # not the \"artifact API\").\n del wrapper.__wrapped__\n\n def _build_annotations(self):\n annotations = {}\n for name, spec in self.signature.signature_order.items():\n annotations[name] = spec.qiime_type\n\n output = []\n for spec in self.signature.outputs.values():\n output.append(spec.qiime_type)\n output = tuple(output)\n\n annotations[\"return\"] = output\n\n return annotations\n\n def _build_numpydoc(self):\n numpydoc = []\n numpydoc.append(textwrap.fill(self.name, width=75))\n if self.deprecated:\n base_msg = textwrap.indent(\n textwrap.fill(self._build_deprecation_message(), width=72),\n ' ')\n numpydoc.append('.. deprecated::\\n' + base_msg)\n numpydoc.append(textwrap.fill(self.description, width=75))\n\n sig = self.signature\n parameters = self._build_section(\"Parameters\", sig.signature_order)\n returns = self._build_section(\"Returns\", sig.outputs)\n\n # TODO: include Usage-rendered examples here\n\n for section in (parameters, returns):\n if section:\n numpydoc.append(section)\n\n return '\\n\\n'.join(numpydoc) + '\\n'\n\n def _build_section(self, header, iterable):\n section = []\n\n if iterable:\n section.append(header)\n section.append('-'*len(header))\n for key, value in iterable.items():\n variable_line = (\n \"{item} : {type}\".format(item=key, type=value.qiime_type))\n if value.has_default():\n variable_line += \", optional\"\n section.append(variable_line)\n if value.has_description():\n section.append(textwrap.indent(textwrap.fill(\n str(value.description), width=71), ' '))\n\n return '\\n'.join(section).strip()\n\n def _build_deprecation_message(self):\n return (f'This {self.type.title()} is deprecated and will be removed '\n 'in a future version of this plugin.')\n\n\nclass Method(Action):\n \"\"\"QIIME 2 Method\"\"\"\n\n type = 'method'\n\n # Abstract method implementations:\n\n def _callable_sig_converter_(self, callable):\n # No conversion necessary.\n return callable\n\n def _callable_executor_(self, scope, view_args, output_types, provenance):\n output_views = self._callable(**view_args)\n output_views = tuplize(output_views)\n\n # TODO this won't work if the user has annotated their \"view API\" to\n # return a `typing.Tuple` with some number of components. Python will\n # return a tuple when there are multiple return values, and this length\n # check will fail because the tuple as a whole should be matched up to\n # a single output type instead of its components. This is an edgecase\n # due to how Python handles multiple returns, and can be worked around\n # by using something like `typing.List` instead.\n if len(output_views) != len(output_types):\n raise TypeError(\n \"Number of output views must match number of output \"\n \"semantic types: %d != %d\"\n % (len(output_views), len(output_types)))\n\n output_artifacts = []\n for output_view, (name, spec) in zip(output_views,\n output_types.items()):\n if type(output_view) is not spec.view_type:\n raise TypeError(\n \"Expected output view type %r, received %r\" %\n (spec.view_type.__name__, type(output_view).__name__))\n\n prov = provenance.fork(name)\n scope.add_reference(prov)\n\n artifact = qiime2.sdk.Artifact._from_view(\n spec.qiime_type, output_view, spec.view_type, prov)\n scope.add_parent_reference(artifact)\n\n output_artifacts.append(artifact)\n\n return tuple(output_artifacts)\n\n @classmethod\n def _init(cls, callable, inputs, parameters, outputs, plugin_id, name,\n description, input_descriptions, parameter_descriptions,\n output_descriptions, citations, deprecated, examples):\n signature = qtype.MethodSignature(callable, inputs, parameters,\n outputs, input_descriptions,\n parameter_descriptions,\n output_descriptions)\n return super()._init(callable, signature, plugin_id, name, description,\n citations, deprecated, examples)\n\n\nclass Visualizer(Action):\n \"\"\"QIIME 2 Visualizer\"\"\"\n\n type = 'visualizer'\n\n # Abstract method implementations:\n\n def _callable_sig_converter_(self, callable):\n return DropFirstParameter.from_function(callable)\n\n def _callable_executor_(self, scope, view_args, output_types, provenance):\n # TODO use qiime2.plugin.OutPath when it exists, and update visualizers\n # to work with OutPath instead of str. Visualization._from_data_dir\n # will also need to be updated to support OutPath instead of str.\n with tempfile.TemporaryDirectory(prefix='qiime2-temp-') as temp_dir:\n ret_val = self._callable(output_dir=temp_dir, **view_args)\n if ret_val is not None:\n raise TypeError(\n \"Visualizer %r should not return anything. \"\n \"Received %r as a return value.\" % (self, ret_val))\n provenance.output_name = 'visualization'\n viz = qiime2.sdk.Visualization._from_data_dir(temp_dir,\n provenance)\n scope.add_parent_reference(viz)\n\n return (viz,)\n\n @classmethod\n def _init(cls, callable, inputs, parameters, plugin_id, name, description,\n input_descriptions, parameter_descriptions, citations,\n deprecated, examples):\n signature = qtype.VisualizerSignature(callable, inputs, parameters,\n input_descriptions,\n parameter_descriptions)\n return super()._init(callable, signature, plugin_id, name, description,\n citations, deprecated, examples)\n\n\nclass Pipeline(Action):\n \"\"\"QIIME 2 Pipeline\"\"\"\n type = 'pipeline'\n _ProvCaptureCls = archive.PipelineProvenanceCapture\n\n def _callable_sig_converter_(self, callable):\n return DropFirstParameter.from_function(callable)\n\n def _callable_executor_(self, scope, view_args, output_types, provenance):\n outputs = self._callable(scope.ctx, **view_args)\n outputs = tuplize(outputs)\n\n for output in outputs:\n if not isinstance(output, qiime2.sdk.Result):\n raise TypeError(\"Pipelines must return `Result` objects, \"\n \"not %s\" % (type(output), ))\n\n # This condition *is* tested by the caller of _callable_executor_, but\n # the kinds of errors a plugin developer see will make more sense if\n # this check happens before the subtype check. Otherwise forgetting an\n # output would more likely error as a wrong type, which while correct,\n # isn't root of the problem.\n if len(outputs) != len(output_types):\n raise TypeError(\n \"Number of outputs must match number of output \"\n \"semantic types: %d != %d\"\n % (len(outputs), len(output_types)))\n\n results = []\n for output, (name, spec) in zip(outputs, output_types.items()):\n if not (output.type <= spec.qiime_type):\n raise TypeError(\n \"Expected output type %r, received %r\" %\n (spec.qiime_type, output.type))\n prov = provenance.fork(name, output)\n scope.add_reference(prov)\n\n aliased_result = output._alias(prov)\n scope.add_parent_reference(aliased_result)\n\n results.append(aliased_result)\n\n return tuple(results)\n\n @classmethod\n def _init(cls, callable, inputs, parameters, outputs, plugin_id, name,\n description, input_descriptions, parameter_descriptions,\n output_descriptions, citations, deprecated, examples):\n signature = qtype.PipelineSignature(callable, inputs, parameters,\n outputs, input_descriptions,\n parameter_descriptions,\n output_descriptions)\n return super()._init(callable, signature, plugin_id, name, description,\n citations, deprecated, examples)\n\n\nmarkdown_source_template = \"\"\"\n```python\n%(source)s\n```\n\"\"\"\n\n# TODO add unit test for callables raising this\nbackend_error_template = \"\"\"\nYour current matplotlib backend (MacOSX) does not work with asynchronous calls.\nA recommended backend is Agg, and can be changed by modifying your\nmatplotlibrc \"backend\" parameter, which can be found at: \\n\\n %s\n\"\"\"\n"
] | [
[
"matplotlib.matplotlib_fname"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
webclinic017/hyperdrive | [
"3c930336efc1e46947548c30d48bb6ac1d6392f9"
] | [
"test/test_History.py"
] | [
"import sys\r\nimport numpy as np\r\nimport pandas as pd\r\nsys.path.append('hyperdrive')\r\nfrom History import Historian # noqa autopep8\r\n\r\n\r\nhist = Historian()\r\nls = [np.nan, True, True, np.nan, False, np.nan, np.nan, np.nan, True]\r\nfs = [True, True, True, True, False, False, False, False, True]\r\nunfilled_fs = [True, None, None, None,\r\n False, None, None, None, True]\r\nns = [True, True, True, True, False, False, False, True, True]\r\nunfilled_ns = [True, None, None, None,\r\n False, None, None, True, True]\r\narr = np.array(ls)\r\ntest_ffill = np.array(fs)\r\ntest_nfill = np.array(ns)\r\n\r\n\r\nclose = np.array([3, 2, 5, 1, 100, 75, 50, 25, 1])\r\n\r\ntotal = 100\r\nmajority = 80\r\nminority = total - majority\r\ndata = np.arange(total)\r\nX = pd.DataFrame({'i': data, 'j': data})\r\ny = np.array([True] * majority + [False] * minority)\r\n\r\n\r\nclass TestHistorian:\r\n def test_buy_and_hold(self):\r\n stats = hist.buy_and_hold(close).stats()\r\n assert 'Sortino Ratio' in stats\r\n\r\n def test_create_portfolio(self):\r\n stats = hist.create_portfolio(close, test_ffill).stats()\r\n assert 'Sortino Ratio' in stats\r\n\r\n def test_fill(self):\r\n ffill = hist.fill(arr)\r\n assert np.array_equal(ffill, test_ffill)\r\n nfill = hist.fill(arr, 'nearest')\r\n assert np.array_equal(nfill, test_nfill)\r\n\r\n def test_unfill(self):\r\n hist.unfill(fs) == unfilled_fs\r\n hist.unfill(ns) == unfilled_ns\r\n\r\n def test_get_optimal_signals(self):\r\n f_signals = hist.get_optimal_signals(close, n=2, method='ffill')\r\n assert np.array_equal(f_signals, test_ffill)\r\n n_signals = hist.get_optimal_signals(close, n=2, method='nfill')\r\n assert np.array_equal(n_signals, test_nfill)\r\n\r\n def test_generate_random(self):\r\n strats = hist.generate_random(close, num=100)\r\n assert 0 < len(strats) <= 25\r\n\r\n def test_undersample(self):\r\n y_train = hist.undersample(X, y)[2]\r\n assert np.mean(y_train) == 0.5\r\n\r\n def test_run_classifiers(self):\r\n X_train, X_test, y_train, y_test = hist.undersample(X, y)[:4]\r\n clfs = hist.run_classifiers(X_train, X_test, y_train, y_test)\r\n for _, clf in clfs:\r\n assert 'score' in clf\r\n"
] | [
[
"numpy.array_equal",
"numpy.arange",
"pandas.DataFrame",
"numpy.mean",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
samharnew/moneytrack | [
"6c798623ea93229e04fda69deea82bca6d5273c1"
] | [
"test/test_moneyframe.py"
] | [
"import logging\nimport unittest\nfrom datetime import datetime\n\nimport numpy as np\nimport pandas as pd\n\nfrom moneytrack import BalanceTransfers, BalanceUpdates, MoneyFrame\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\nclass TestMoneyFrame(unittest.TestCase):\n\n def test_get_daily_balances(self):\n bal_trans = BalanceTransfers.from_dict({\n \"from_account_key\": [],\n \"to_account_key\": [],\n \"amount\": [],\n \"date\": [],\n })\n\n balance_updates = BalanceUpdates.from_dict({\n \"account_key\": [\"1\", \"1\", \"1\", \"1\", \"1\"],\n \"balance\": [100.0, 200.0, 220.0, 220.0, 200.0],\n \"date\": [\"2019-01-01\", \"2019-01-03\", \"2019-01-04\", \"2019-01-06\", \"2019-01-08\"],\n })\n\n hist = MoneyFrame.from_updates_and_transfers(balance_transfers=bal_trans,\n balance_updates=balance_updates, account_key=\"1\")\n daily_bals = hist.get_daily_balance()\n exp = pd.DataFrame({\n \"date\": pd.to_datetime([\"2019-01-01\", \"2019-01-02\", \"2019-01-03\", \"2019-01-04\", \"2019-01-05\", \"2019-01-06\",\n \"2019-01-07\", \"2019-01-08\"]),\n \"balance\": [100.0, 141.421356, 200.0, 220.0, 220.0, 220.0, 209.761770, 200.0],\n }).set_index(\"date\")\n\n np.testing.assert_equal(exp.index.values, daily_bals.index.values)\n np.testing.assert_array_almost_equal(exp[\"balance\"].values, daily_bals.values)\n\n def test_from_fixed_rate(self):\n dah = MoneyFrame.from_fixed_rate(days=5, start_bal=100.0, ayr_prcnt=5.0)\n self.assertTrue(len(dah) == 5)\n self.assertTrue(dah.max_date() == datetime.today().date())\n\n dah = MoneyFrame.from_fixed_rate(days=(\"2020-01-01\", \"2020-01-05\"), start_bal=100.0, ayr_prcnt=5.0)\n self.assertTrue(len(dah) == 5)\n\n def test_slice(self):\n dah = MoneyFrame.from_fixed_rate(days=(\"2020-01-01\", \"2020-01-05\"), start_bal=100.0, ayr_prcnt=5.0)\n self.assertTrue(len(dah[2]) == 1)\n self.assertTrue(len(dah[2:3]) == 1)\n self.assertTrue(len(dah[2:4]) == 2)\n self.assertTrue(len(dah[\"2020-01-01\"]) == 1)\n self.assertTrue(len(dah[\"2020-01-01\":\"2020-01-03\"]) == 3)\n\n def test_get_daily_balances_with_trans(self):\n bal_trans = BalanceTransfers.from_dict({\n \"from_account_key\": [\"1\", \"2\", \"1\", \"2\", \"1\", \"2\"],\n \"to_account_key\": [\"2\", \"1\", \"3\", \"1\", \"2\", \"2\"],\n \"amount\": [10.0, 5.0, 4.0, 15.0, 5.0, 100],\n \"date\": [\"2019-01-03\", \"2019-01-03\", \"2019-01-03\", \"2019-01-04\", \"2019-01-05\", \"2019-01-01\"],\n })\n\n balance_updates = BalanceUpdates.from_dict({\n \"account_key\": [\"1\", \"1\"],\n \"balance\": [100.0, 200.0],\n \"date\": [\"2019-01-01\", \"2019-01-05\"],\n })\n\n hist = MoneyFrame.from_updates_and_transfers(balance_transfers=bal_trans,\n balance_updates=balance_updates, account_key=\"1\")\n daily_bals = hist.get_daily_balance()\n print(daily_bals)\n mult = 0.18904171541952763 + 1.0\n exp = pd.DataFrame({\n \"date\": pd.to_datetime([\"2019-01-01\", \"2019-01-02\", \"2019-01-03\", \"2019-01-04\", \"2019-01-05\"]),\n \"balance\": [\n 100.0,\n 100.0 * mult,\n 100.0 * mult * mult - 9.0,\n (100.0 * mult * mult - 9.0) * mult + 15.0,\n ((100.0 * mult * mult - 9.0) * mult + 15.0) * mult - 5.0\n ],\n }).set_index(\"date\")\n\n np.testing.assert_equal(exp.index.values, daily_bals.index.values)\n np.testing.assert_array_almost_equal(exp[\"balance\"].values, daily_bals.values)\n\n def test_get_daily_balances_edge_case_1(self):\n bal_trans = BalanceTransfers.from_dict({\n \"from_account_key\": [\"0\", \"0\"],\n \"to_account_key\": [\"1\", \"1\"],\n \"amount\": [90.0, 90.0],\n \"date\": [\"2019-01-03\", \"2018-12-30\"],\n })\n\n balance_updates = BalanceUpdates.from_dict({\n \"account_key\": [],\n \"balance\": [],\n \"date\": [],\n })\n hist = MoneyFrame.from_updates_and_transfers(balance_transfers=bal_trans,\n balance_updates=balance_updates, account_key=\"1\")\n daily_bals = hist.get_daily_balance()\n exp = pd.DataFrame({\n \"date\": pd.to_datetime(\n [\"2018-12-29\", \"2018-12-30\", \"2018-12-31\", \"2019-01-01\", \"2019-01-02\", \"2019-01-03\"]),\n \"balance\": [0.0, 90.0, 90.0, 90.0, 90.0, 180.0],\n }).set_index(\"date\")\n\n np.testing.assert_equal(exp.index.values, daily_bals.index.values)\n np.testing.assert_array_almost_equal(exp[\"balance\"].values, daily_bals.values)\n\n def test_get_daily_balances_with_early_trans(self):\n bal_trans = BalanceTransfers.from_dict({\n \"from_account_key\": [\"0\", \"0\"],\n \"to_account_key\": [\"1\", \"1\"],\n \"amount\": [90.0, 90.0],\n \"date\": [\"2019-01-03\", \"2018-12-30\"],\n })\n\n balance_updates = BalanceUpdates.from_dict({\n \"account_key\": [\"1\", \"1\"],\n \"balance\": [100.0, 200.0],\n \"date\": [\"2019-01-01\", \"2019-01-05\"],\n })\n\n hist = MoneyFrame.from_updates_and_transfers(balance_transfers=bal_trans,\n balance_updates=balance_updates, account_key=\"1\")\n daily_bals = hist.get_daily_balance()\n mult = 1.05409255338945984\n mult2 = 1.01689832725085294\n exp = pd.DataFrame({\n \"date\": pd.to_datetime([\"2018-12-29\", \"2018-12-30\", \"2018-12-31\", \"2019-01-01\", \"2019-01-02\",\n \"2019-01-03\", \"2019-01-04\", \"2019-01-05\"]),\n \"balance\": [\n 0.0, # 29\n 90.0, # 30\n 90.0 * mult, # 31\n 90.0 * mult * mult, # 01\n 100.0 * mult2, # 02\n 100.0 * mult2 * mult2 + 90.0, # 03\n (100.0 * mult2 * mult2 + 90.0) * mult2, # 04\n (100.0 * mult2 * mult2 + 90.0) * mult2 * mult2, # 05\n ],\n }).set_index(\"date\")\n\n np.testing.assert_equal(exp.index.values, daily_bals.index.values)\n np.testing.assert_array_almost_equal(exp[\"balance\"].values, daily_bals.values)\n\n def test_daily_interest_amounts(self):\n bal_trans = BalanceTransfers.from_dict({\n \"from_account_key\": [\"0\", \"0\"],\n \"to_account_key\": [\"1\", \"1\"],\n \"amount\": [90.0, 90.0],\n \"date\": [\"2019-01-03\", \"2018-12-30\"],\n })\n\n balance_updates = BalanceUpdates.from_dict({\n \"account_key\": [\"1\", \"1\"],\n \"balance\": [100.0, 200.0],\n \"date\": [\"2019-01-01\", \"2019-01-05\"],\n })\n\n exp = pd.DataFrame({\n \"date\": pd.to_datetime([\"2018-12-29\", \"2018-12-30\", \"2018-12-31\", \"2019-01-01\", \"2019-01-02\",\n \"2019-01-03\", \"2019-01-04\", \"2019-01-05\"]),\n \"interest\": [0.0, 0.0, 4.868330, 10.0 - 4.868330, 1.689833, 193.408221 - 90.0 - 101.689833,\n 196.676496 - 193.408221,\n 200.0 - 196.676496],\n }).set_index(\"date\")[\"interest\"]\n\n hist = MoneyFrame.from_updates_and_transfers(balance_transfers=bal_trans,\n balance_updates=balance_updates, account_key=\"1\")\n print(hist.get_daily_balance())\n print(hist.get_daily_transfers())\n\n daily_interest = hist.get_daily_interest()\n print(daily_interest)\n\n np.testing.assert_equal(exp.index.values, daily_interest.index.values)\n np.testing.assert_array_almost_equal(exp.values, daily_interest.values, 4)\n\n def test_daily_summary(self):\n bal_trans = BalanceTransfers.from_dict({\n \"from_account_key\": [\"0\", \"0\"],\n \"to_account_key\": [\"1\", \"1\"],\n \"amount\": [90.0, 90.0],\n \"date\": [\"2019-01-03\", \"2018-12-30\"],\n })\n\n balance_updates = BalanceUpdates.from_dict({\n \"account_key\": [\"1\", \"1\"],\n \"balance\": [100.0, 200.0],\n \"date\": [\"2019-01-01\", \"2019-01-05\"],\n })\n\n hist = MoneyFrame.from_updates_and_transfers(balance_transfers=bal_trans, balance_updates=balance_updates,\n account_key=\"1\")\n print(hist.to_df())\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.testing.assert_equal",
"pandas.to_datetime",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Arpan-29/Machine-Learning | [
"3e1e32bcc77884f8ecb964324a1ab2d74badff98"
] | [
"Hyperparameter Tuning/Gradient Checking/gc_utils.py"
] | [
"import numpy as np\n\ndef sigmoid(x):\n\n s = 1/(1+np.exp(-x))\n \n return s\n\ndef relu(x):\n\n s = np.maximum(0,x)\n \n return s\n\ndef dictionary_to_vector(parameters):\n\n keys = []\n count = 0\n for key in [\"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\"]:\n \n # flatten parameter\n new_vector = np.reshape(parameters[key], (-1,1))\n keys = keys + [key]*new_vector.shape[0]\n \n if count == 0:\n theta = new_vector\n else:\n theta = np.concatenate((theta, new_vector), axis=0)\n count = count + 1\n\n return theta, keys\n\ndef vector_to_dictionary(theta):\n\n parameters = {}\n parameters[\"W1\"] = theta[:20].reshape((5,4))\n parameters[\"b1\"] = theta[20:25].reshape((5,1))\n parameters[\"W2\"] = theta[25:40].reshape((3,5))\n parameters[\"b2\"] = theta[40:43].reshape((3,1))\n parameters[\"W3\"] = theta[43:46].reshape((1,3))\n parameters[\"b3\"] = theta[46:47].reshape((1,1))\n\n return parameters\n\ndef gradients_to_vector(gradients):\n\n count = 0\n for key in [\"dW1\", \"db1\", \"dW2\", \"db2\", \"dW3\", \"db3\"]:\n\n new_vector = np.reshape(gradients[key], (-1,1))\n \n if count == 0:\n theta = new_vector\n else:\n theta = np.concatenate((theta, new_vector), axis=0)\n count = count + 1\n\n return theta"
] | [
[
"numpy.reshape",
"numpy.exp",
"numpy.maximum",
"numpy.concatenate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CedricVallee/pythonFinancialAnalyst | [
"64c562134de7801aeef3981f4ef4ac5d5b5fd70b"
] | [
"FinancialAnalystV2/main.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Mar 05 2016\r\nAuthor: Cedric Vallee\r\n\"\"\"\r\n\r\nimport os\r\nimport re\r\nimport pandas as pd\r\nimport csv\r\nimport sklearn\r\nimport numpy as np\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom nltk.corpus import stopwords\r\nfrom collections import defaultdict\r\nimport Matcher as matcher\r\nimport Validator as validator\r\n\r\n# Function to create a list \"dataset\" with 3 columns: 'MD&A_Text','Filename','Sentiment'\r\ndef get_dataset(path):\r\n dataset=[]\r\n for filename in os.listdir(path):\r\n if filename.endswith(\"pos\"): \r\n t = open(path + filename,\"r\").read() \r\n dataset.append([re.sub('[^a-zA-Z]+', ' ', t), re.sub(r\"(?:_pos)$\",'', filename), \"pos\"]) \r\n elif filename.endswith(\"neg\"): \r\n t = open(path + filename,\"r\").read()\r\n dataset.append([re.sub('[^a-zA-Z]+', ' ', t), re.sub(r\"(?:_neg)$\",'', filename), \"neg\"]) \r\n return dataset\r\n\r\n### Main function\r\n\r\n# FEATURE 1 - Match with the McDonald Dictionary\r\ndataset = get_dataset(\"../mdatest/\")\r\ndico = matcher.get_dico(dataset) # dico is a column with the matching scores of the MDAs versus the Finance Dictionary \r\ndf=pd.DataFrame(dataset)\r\ndf[3] = pd.Series(dico)\r\ndf.columns = ['MD&A_Text','Filename','Actual','MatchDico'] \r\n\r\n# FEATURE 2 and 3 - Match with the Compustat financial data to get the indices 'delta_sales' and 'delta_at'\r\ncompustat = pd.read_csv('compustat_filenames.csv', sep=',')\r\nde = compustat['delta_sale']\r\ndt = compustat['delta_at']\r\nds = pd.merge(df, compustat, left_on='Filename', right_on='Filename')\r\n\r\n\r\n# We split the global matrix \"result\" into a training and a testing set\r\ntrain, test = validator.split(ds,0.5)\r\n\r\n# We fit a Random Forest model \t (n_estimators default=10, min_samples_leaf default=1)\r\nrf = RandomForestClassifier(n_estimators=100)\r\nrf.fit(train[3].reshape(-1, 1), train[2].reshape(-1, 1))\r\npredictions = rf.predict(test[3].reshape(-1, 1))\r\ntest[5] = pd.Series(predictions, index=test.index)\r\n\r\ntest.columns = ['MD&A_Text','Filename','Actual','MatchDico','WordCrisis','Predicted'] \r\nprint(test)\r\n\r\ntab = pd.crosstab(test['Actual'], test['Predicted'], rownames=['Actual'], colnames=['Predicted'], margins=True) # Print confusion matrix\r\nprint(tab)\r\nprint(classification_report(test['Actual'], test['Predicted'])) # Print accuracy, precision, recall, F measure"
] | [
[
"pandas.crosstab",
"pandas.merge",
"pandas.read_csv",
"pandas.Series",
"sklearn.ensemble.RandomForestClassifier",
"pandas.DataFrame",
"sklearn.metrics.classification_report"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
vishalbelsare/pymanopt | [
"ca14b6d1c2cc9adb1f6a5561bfcc7047c267c60f"
] | [
"examples/low_rank_psd_matrix_approximation.py"
] | [
"import autograd.numpy as np\nimport tensorflow as tf\nimport theano.tensor as T\nimport torch\nfrom examples._tools import ExampleRunner\nfrom numpy import linalg as la, random as rnd\n\nimport pymanopt\nfrom pymanopt.manifolds import PSDFixedRank\nfrom pymanopt.solvers import TrustRegions\n\nSUPPORTED_BACKENDS = (\n \"Autograd\", \"Callable\", \"PyTorch\", \"TensorFlow\", \"Theano\"\n)\n\n\ndef create_cost_egrad_ehess(backend, A, rank):\n num_rows = A.shape[0]\n egrad = ehess = None\n\n if backend == \"Autograd\":\n @pymanopt.function.Autograd\n def cost(Y):\n return np.linalg.norm(Y @ Y.T - A, \"fro\") ** 2\n elif backend == \"Callable\":\n @pymanopt.function.Callable\n def cost(Y):\n return la.norm(Y @ Y.T - A, \"fro\") ** 2\n\n @pymanopt.function.Callable\n def egrad(Y):\n return 4 * (Y @ Y.T - A) @ Y\n\n @pymanopt.function.Callable\n def ehess(Y, U):\n return 4 * ((Y @ U.T + U @ Y.T) @ Y + (Y @ Y.T - A) @ U)\n elif backend == \"PyTorch\":\n A_ = torch.from_numpy(A)\n\n @pymanopt.function.PyTorch\n def cost(Y):\n X = torch.matmul(Y, torch.transpose(Y, 1, 0))\n return torch.norm(X - A_) ** 2\n elif backend == \"TensorFlow\":\n Y = tf.Variable(tf.zeros((num_rows, rank), dtype=np.float64), name=\"Y\")\n\n @pymanopt.function.TensorFlow\n def cost(Y):\n X = tf.matmul(Y, tf.transpose(Y))\n return tf.norm(X - A) ** 2\n elif backend == \"Theano\":\n Y = T.matrix()\n\n @pymanopt.function.Theano(Y)\n def cost(Y):\n return T.sum((T.dot(Y, Y.T) - A) ** 2)\n else:\n raise ValueError(\"Unsupported backend '{:s}'\".format(backend))\n\n return cost, egrad, ehess\n\n\ndef run(backend=SUPPORTED_BACKENDS[0], quiet=True):\n num_rows = 1000\n rank = 5\n low_rank_factor = rnd.randn(num_rows, rank)\n matrix = low_rank_factor @ low_rank_factor.T\n\n cost, egrad, ehess = create_cost_egrad_ehess(backend, matrix, rank)\n manifold = PSDFixedRank(num_rows, rank)\n problem = pymanopt.Problem(manifold, cost=cost, egrad=egrad, ehess=ehess)\n if quiet:\n problem.verbosity = 0\n\n solver = TrustRegions(maxiter=500, minstepsize=1e-6)\n low_rank_factor_estimate = solver.solve(problem)\n\n if quiet:\n return\n\n print(\"Rank of target matrix:\", la.matrix_rank(matrix))\n matrix_estimate = low_rank_factor_estimate @ low_rank_factor_estimate.T\n print(\"Frobenius norm error of low-rank estimate:\",\n la.norm(matrix - matrix_estimate))\n\n\nif __name__ == \"__main__\":\n runner = ExampleRunner(run, \"Low-rank PSD matrix approximation\",\n SUPPORTED_BACKENDS)\n runner.run()\n"
] | [
[
"tensorflow.norm",
"torch.transpose",
"torch.norm",
"numpy.linalg.matrix_rank",
"tensorflow.transpose",
"tensorflow.zeros",
"torch.from_numpy",
"numpy.linalg.norm",
"numpy.random.randn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.0",
"1.2"
]
}
] |
yasokada/ADDA_pySpherepts_171217 | [
"ae1688c788811c616c0594f4d2c1eeca7643b797"
] | [
"test_use_PAZ_pySpherepts_171217.py"
] | [
"import numpy as np\nimport polarAzimuthCalc_171209 as pac\nimport getIcosNodes_171126 as gIN\nimport sys\n\n'''\nDec. 17, 2017\nThis script requires the setup of [pySpherepts_171126] package shown at\n https://github.com/yasokada/pySpherepts_171126\nfor [getIcosNodes_171126]\n'''\n\n'''\nv0.1 Dec. 17, 2017\n - add test_pySpherepts_IcosNodes()\n'''\n\n\ndef test_pySpherepts_IcosNodes():\n xs, tris = gIN.getIcosNodes(4,0)\n pvecs = xs\n for elem in pvecs:\n beta_rad = pac.calc_beta_rad(elem)\n gamma_rad = pac.calc_gamma_rad(elem)\n beta_deg = np.rad2deg(beta_rad)\n gamma_deg = np.rad2deg(gamma_rad)\n\n fmt = \"{0} \\tbeta:{1:6.1f} gamma:{2:6.1f}\"\n msg = fmt.format(elem, beta_deg, gamma_deg)\n print(msg)\n\n\nif __name__ == '__main__':\n test_pySpherepts_IcosNodes()"
] | [
[
"numpy.rad2deg"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
IvanKosik/retinal-fundus-models | [
"a2135f0ef9a70b403aa9bcff91de511f3f635296"
] | [
"src/bsmu/retinal_fundus/models/utils/csv.py"
] | [
"import random\nfrom pathlib import Path\n\nimport pandas as pd\n\n\ndef generate_train_valid_csv(image_dir: Path, mask_dir: Path,\n train_csv_path: Path, valid_csv_path: Path,\n filter_predicate=lambda file_name: True, train_part: float = 0.75):\n data_file_names = []\n for image_path in image_dir.iterdir():\n if not filter_predicate(image_path.name):\n continue\n\n mask_path = mask_dir / image_path.name\n if mask_path.exists():\n data_file_names.append(image_path.name)\n else:\n print('WARNING: no mask', mask_path)\n\n train_file_names = random.sample(data_file_names, int(train_part * len(data_file_names)))\n valid_file_names = [file_name for file_name in data_file_names if file_name not in train_file_names]\n\n COLUMNS = ['file_names']\n train_data_frame = pd.DataFrame(data=train_file_names, columns=COLUMNS)\n train_data_frame.to_csv(str(train_csv_path), index=False)\n\n valid_data_frame = pd.DataFrame(data=valid_file_names, columns=COLUMNS)\n valid_data_frame.to_csv(str(valid_csv_path), index=False)\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jer-green/triage-bandit-sandbox | [
"18a824a4af736dff2246f4d86dd1d7104874b044"
] | [
"triage_bandit_sandbox/triage_models_sandbox/triage_data_transformers/label_encoders.py"
] | [
"from typing import Optional\nimport numpy as np\nfrom scipy.stats import mode\n\nfrom babylon_pgm.triage_models.triage_decisions import (\n TRIAGE_MODEL_DECISION_TYPE,\n TriageModelDecisionDefault,\n)\n\nfrom ..interfaces import PreparedDataTransformer, PreparedData\n\n\ndef _check_correct_decisions_type(correct_decisions):\n assert isinstance(correct_decisions, list)\n assert isinstance(correct_decisions[0], list)\n check_flag = False\n for t in TRIAGE_MODEL_DECISION_TYPE.__args__:\n if isinstance(correct_decisions[0][0], t):\n check_flag = True\n break\n if check_flag is False:\n raise AssertionError(\"Type not recognized.\")\n\nclass MultiClassEncoder(PreparedDataTransformer):\n \"\"\"\n Encode correct triage decisions into labels for multi-class classifiers.\n The mode of doctors' outcomes is used as the correct decision.\n \"\"\"\n def __init__(self):\n pass\n\n def fit(self, dataset: PreparedData):\n pass\n\n def transform(\n self, dataset: PreparedData, train: Optional[bool] = True\n ) -> PreparedData:\n \"\"\"\n Transform a PreparedData object into a PreparedData object, where\n correct_decision are encoded as labels for a multi-class classifier.\n\n :param dataset: Data to be transformed.\n :param train: Whether the data are used to train the model.\n :return: PreparedData object.\n \"\"\"\n if dataset.correct_decisions is not None:\n _check_correct_decisions_type(dataset.correct_decisions)\n y = np.zeros(len(dataset),)\n for i, decisions in enumerate(dataset.correct_decisions):\n y[i] = mode([d.index for d in decisions])[0][0]\n dataset.correct_decisions = y.astype(int)\n return dataset\n\n def fit_transform(\n self, dataset: PreparedData, train: Optional[bool] = True\n ) -> PreparedData:\n \"\"\"\n Transform a PreparedData object into a PreparedData object, where\n correct_decision are encoded as labels for a multi-class classifier.\n\n :param dataset: Data to be transformed.\n :param train: Whether the data are used to train the model.\n :return: PreparedData object.\n \"\"\"\n return self.transform(dataset)\n\n\nclass MultiLabelEncoder(PreparedDataTransformer):\n \"\"\"\n Encode correct triage decisions into labels for multi-label classifiers.\n All doctor's outcomes are considered as valid decisions.\n \"\"\"\n def __init__(\n self,\n triage_decisions: Optional[TRIAGE_MODEL_DECISION_TYPE] = TriageModelDecisionDefault,\n ):\n self._triage_decisions = triage_decisions\n self.num_classes = len(self._triage_decisions)\n\n def fit(self, dataset: PreparedData):\n pass\n\n def transform(\n self, dataset: PreparedData, train: Optional[bool] = True\n ) -> PreparedData:\n \"\"\"\n Transform a PreparedData object into a PreparedData object, where\n correct_decision are encoded as labels for a multi-label classifier.\n\n :param dataset: Data to be transformed.\n :param train: Whether the data are used to train the model.\n :return: PreparedData object.\n \"\"\"\n if dataset.correct_decisions is not None:\n _check_correct_decisions_type(dataset.correct_decisions)\n y = np.zeros((len(dataset), self.num_classes))\n for i, decisions in enumerate(dataset.correct_decisions):\n y[i, [d.index for d in decisions]] = 1\n dataset.correct_decisions = y\n return dataset\n\n def fit_transform(\n self, dataset: PreparedData, train: Optional[bool] = True\n ) -> PreparedData:\n \"\"\"\n Transform a PreparedData object into a PreparedData object, where\n correct_decision are encoded as labels for a multi-label classifier.\n\n :param dataset: Data to be transformed.\n :param train: Whether the data are used to train the model.\n :return: PreparedData object.\n \"\"\"\n return self.transform(dataset)"
] | [
[
"scipy.stats.mode"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
jksr/cemba_data | [
"c796c33a2fd262b2ef893df1951a90b8d0ba9289"
] | [
"cemba_data/mapping/stats/utilities.py"
] | [
"import pathlib\nfrom collections import defaultdict\nfrom pysam import TabixFile\nimport pandas as pd\n\nfrom ...utilities import parse_mc_pattern\n\n\ndef parse_trim_fastq_stats(stat_path):\n # example trim fastq stats\n \"\"\"\nstatus\tin_reads\tin_bp\ttoo_short\ttoo_long\ttoo_many_n\tout_reads\tw/adapters\tqualtrim_bp\tout_bp\n0\tOK\t1490\t213724\t0\t0\t0\t1490\t4\t0\t213712\n1\tstatus\tin_reads\tin_bp\ttoo_short\ttoo_long\ttoo_many_n\tout_reads\tw/adapters\tqualtrim_bp\tout_bp\n2\tOK\t1490\t213712\t0\t0\t0\t1482\t0\t1300\t182546\n\"\"\"\n *cell_id, read_type = pathlib.Path(stat_path).name.split('.')[0].split('-')\n cell_id = '-'.join(cell_id)\n trim_stats = pd.read_csv(stat_path, sep='\\t')\n trim_stats = trim_stats.iloc[[0, 2], :].reset_index() # skip the duplicated title row\n\n data = pd.Series({\n f'{read_type}InputReads': trim_stats['in_reads'][0],\n f'{read_type}InputReadsBP': trim_stats['in_bp'][0],\n f'{read_type}WithAdapters': trim_stats['w/adapters'][0],\n f'{read_type}QualTrimBP': trim_stats['qualtrim_bp'][1],\n f'{read_type}TrimmedReads': trim_stats['out_reads'][1],\n f'{read_type}TrimmedReadsBP': trim_stats['out_bp'][1],\n f'{read_type}TrimmedReadsRate': int(trim_stats['out_reads'][1]) / int(trim_stats['in_reads'][0])\n }, name=cell_id)\n return data\n\n\ndef parse_trim_fastq_stats_mct(stat_path):\n *cell_id, read_type = pathlib.Path(stat_path).name.split('.')[0].split('-')\n cell_id = '-'.join(cell_id)\n with open(stat_path) as f:\n lines = f.readlines()\n adapter_str = ''.join(lines[:-2])\n trim_lines = lines[-2:]\n\n # adapter counts\n total_dict = {}\n for line in adapter_str.replace('===\\n\\n', '; ').replace('=== Adapter ', 'Adapter: ').split('\\n'):\n if line.startswith('=== Summary'):\n total_dict[f'{read_type}InputReads'] = int(line.strip('\\n').split(' ')[-1].replace(',', ''))\n if line.startswith('Adapter: '):\n line_list = line.split('; ')\n line_dict = {}\n for l in line_list:\n k, v = l.split(': ')\n line_dict[k] = v\n name = line_dict.pop('Adapter').strip(' ')\n total_dict[f'{read_type}With{name}'] = int(line_dict['Trimmed'][:-5])\n data = pd.Series(total_dict, name=cell_id)\n\n # add trimmed counts, the last two rows in tsv format, the same as normal mc\n trim_data = pd.DataFrame([\n line.strip('\\n').split('\\t') for line in trim_lines\n ]).T.set_index(0)[1]\n data[f'{read_type}QualTrimBP'] = int(trim_data['qualtrim_bp'])\n data[f'{read_type}TrimmedReads'] = int(trim_data['out_reads'])\n data[f'{read_type}TrimmedReadsBP'] = int(trim_data['out_bp'])\n data[f'{read_type}TrimmedReadsRate'] = data[f'{read_type}TrimmedReads'] / data[f'{read_type}InputReads']\n return data\n\n\ndef parse_bismark_report(stat_path):\n \"\"\"\n parse bismark output\n \"\"\"\n *cell_id, read_type = pathlib.Path(stat_path).name.split('.')[0].split('-')\n cell_id = '-'.join(cell_id)\n term_dict = {\n 'Number of alignments with a unique best hit from the different alignments': f'{read_type}UniqueMappedReads',\n 'Mapping efficiency': f'{read_type}MappingRate',\n 'Sequences with no alignments under any condition': f'{read_type}UnmappedReads',\n 'Sequences did not map uniquely': f'{read_type}UnuniqueMappedReads',\n 'CT/CT': f'{read_type}OT',\n 'CT/GA': f'{read_type}OB',\n 'GA/CT': f'{read_type}CTOT',\n 'GA/GA': f'{read_type}CTOB',\n \"Total number of C's analysed\": f'{read_type}TotalC',\n 'C methylated in CpG context': f'{read_type}TotalmCGRate',\n 'C methylated in CHG context': f'{read_type}TotalmCHGRate',\n 'C methylated in CHH context': f'{read_type}TotalmCHHRate'}\n\n with open(stat_path) as rep:\n report_dict = {}\n for line in rep:\n try:\n start, rest = line.split(':')\n except ValueError:\n continue # more or less than 2 after split\n try:\n report_dict[term_dict[start]] = rest.strip().split('\\t')[0].strip('%')\n except KeyError:\n pass\n return pd.Series(report_dict, name=cell_id)\n\n\ndef parse_deduplicate_stat(stat_path):\n *cell_id, read_type = pathlib.Path(stat_path).name.split('.')[0].split('-')\n cell_id = '-'.join(cell_id)\n try:\n dedup_result_series = pd.read_csv(stat_path, comment='#', sep='\\t').T[0]\n rename_dict = {\n 'UNPAIRED_READS_EXAMINED': f'{read_type}MAPQFilteredReads',\n 'UNPAIRED_READ_DUPLICATES': f'{read_type}DuplicatedReads',\n 'PERCENT_DUPLICATION': f'{read_type}DuplicationRate'\n }\n dedup_result_series = dedup_result_series.loc[rename_dict.keys()].rename(rename_dict)\n\n dedup_result_series[f'{read_type}FinalBismarkReads'] = dedup_result_series[f'{read_type}MAPQFilteredReads'] - \\\n dedup_result_series[f'{read_type}DuplicatedReads']\n dedup_result_series.name = cell_id\n except pd.errors.EmptyDataError:\n # if a BAM file is empty, picard matrix is also empty\n dedup_result_series = pd.Series({f'{read_type}MAPQFilteredReads': 0,\n f'{read_type}DuplicatedReads': 0,\n f'{read_type}FinalBismarkReads': 0,\n f'{read_type}DuplicationRate': 0}, name=cell_id)\n return dedup_result_series\n\n\ndef generate_allc_stats(output_dir, config):\n output_dir = pathlib.Path(output_dir).absolute()\n allc_list = list(output_dir.glob('allc/*tsv.gz'))\n allc_stats_dict = {p.name.split('.')[0]: p for p in output_dir.glob('allc/*count.csv')}\n\n patterns = config['mc_stat_feature'].split(' ')\n patterns_alias = config['mc_stat_alias'].split(' ')\n pattern_translate = {k: v for k, v in zip(patterns, patterns_alias)}\n\n # real all cell stats\n total_stats = []\n for cell_id, path in allc_stats_dict.items():\n allc_stat = pd.read_csv(path, index_col=0)\n allc_stat['cell_id'] = cell_id\n total_stats.append(allc_stat)\n total_stats = pd.concat(total_stats)\n cell_genome_cov = pd.Series(total_stats.set_index('cell_id')['genome_cov'].to_dict())\n # aggregate into patterns\n cell_records = []\n for pattern in pattern_translate.keys():\n contexts = parse_mc_pattern(pattern)\n pattern_stats = total_stats[total_stats.index.isin(contexts)]\n cell_level_data = pattern_stats.groupby('cell_id')[['mc', 'cov']].sum()\n cell_level_data['frac'] = cell_level_data['mc'] / cell_level_data['cov']\n\n # prettify col name\n _pattern = pattern_translate[pattern]\n cell_level_data = cell_level_data.rename(\n columns={'frac': f'{_pattern}Frac',\n 'mc': f'{_pattern}mC',\n 'cov': f'{_pattern}Cov'})\n cell_records.append(cell_level_data)\n final_df = pd.concat(cell_records, axis=1, sort=True).reindex(allc_stats_dict.keys())\n final_df['GenomeCov'] = cell_genome_cov\n\n # add lambda DNA mCY fraction and coverage\n lambda_frac = get_allc_lambda_frac(allc_list, config['num_upstr_bases'])\n for col, data in lambda_frac.items():\n final_df[col] = data\n\n final_df.index.name = 'cell_id'\n return final_df\n\n\ndef get_allc_lambda_frac(allc_list, num_upstr_bases):\n num_upstr_bases = int(num_upstr_bases)\n records = {}\n for path in allc_list:\n mc_counts = defaultdict(int)\n cov_counts = defaultdict(int)\n with TabixFile(str(path)) as allc:\n cell = pathlib.Path(path).name.split('.')[0]\n try:\n for line in allc.fetch('chrL'):\n chrom, pos, strand, context, mc, cov, _ = line.split('\\t')\n # this will lead to only four contexts: CA, CC, CT, CG\n context = context[num_upstr_bases:num_upstr_bases + 2]\n mc_counts[context] += int(mc)\n cov_counts[context] += int(cov)\n df = pd.DataFrame({'mc': pd.Series(mc_counts), 'cov': pd.Series(cov_counts)})\n df = df.reindex(['CG', 'CC', 'CT', 'CA']).fillna(0) # reindex to make all four context exist\n cy_cov = df.loc['CT', 'cov'] + df.loc['CC', 'cov']\n if cy_cov > 0:\n cy_frac = (df.loc['CT', 'mc'] + df.loc['CC', 'mc']) / cy_cov\n else:\n cy_frac = 0\n records[cell] = {'LambdaCYFrac': cy_frac, 'LambdaCYCov': cy_cov}\n except ValueError:\n # no chrL lines\n records[cell] = {'LambdaCYFrac': 0, 'LambdaCYCov': 0}\n records = pd.DataFrame(records).T\n return records\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.Series",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
sshepherd637/aiida-core | [
"99fd841f33a5c2afa6a0c808c5e6ef9eff73a9df"
] | [
"aiida/orm/nodes/data/array/projection.py"
] | [
"# -*- coding: utf-8 -*-\n###########################################################################\n# Copyright (c), The AiiDA team. All rights reserved. #\n# This file is part of the AiiDA code. #\n# #\n# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #\n# For further information on the license, see the LICENSE.txt file #\n# For further information please visit http://www.aiida.net #\n###########################################################################\n\"\"\"Data plugin to represet arrays of projected wavefunction components.\"\"\"\nimport copy\nimport numpy as np\n\nfrom aiida.common import exceptions\nfrom aiida.plugins import OrbitalFactory\n\nfrom ..orbital import OrbitalData\nfrom .array import ArrayData\nfrom .bands import BandsData\n\n__all__ = ('ProjectionData',)\n\n\nclass ProjectionData(OrbitalData, ArrayData):\n \"\"\"\n A class to handle arrays of projected wavefunction data. That is projections\n of a orbitals, usually an atomic-hydrogen orbital, onto a\n given bloch wavefunction, the bloch wavefunction being indexed by\n s, n, and k. E.g. the elements are the projections described as\n < orbital | Bloch wavefunction (s,n,k) >\n \"\"\"\n\n def _check_projections_bands(self, projection_array):\n \"\"\"\n Checks to make sure that a reference bandsdata is already set, and that\n projection_array is of the same shape of the bands data\n\n :param projwfc_arrays: nk x nb x nwfc array, to be\n checked against bands\n\n :raise: AttributeError if energy is not already set\n :raise: AttributeError if input_array is not of same shape as\n dos_energy\n \"\"\"\n try:\n shape_bands = np.shape(self.get_reference_bandsdata())\n except AttributeError:\n raise exceptions.ValidationError('Bands must be set first, then projwfc')\n # The [0:2] is so that each array, and not collection of arrays\n # is used to make the comparison\n if np.shape(projection_array) != shape_bands:\n raise AttributeError('These arrays are not the same shape as' ' the bands')\n\n def set_reference_bandsdata(self, value):\n \"\"\"\n Sets a reference bandsdata, creates a uuid link between this data\n object and a bandsdata object, must be set before any projection arrays\n\n :param value: a BandsData instance, a uuid or a pk\n :raise: exceptions.NotExistent if there was no BandsData associated with uuid or pk\n \"\"\"\n from aiida.orm import load_node\n\n if isinstance(value, BandsData):\n uuid = value.uuid\n else:\n try:\n pk = int(value)\n bands = load_node(pk=pk)\n uuid = bands.uuid\n except ValueError:\n uuid = str(value)\n try:\n bands = load_node(uuid=uuid)\n uuid = bands.uuid\n except Exception: # pylint: disable=bare-except\n raise exceptions.NotExistent(\n 'The value passed to '\n 'set_reference_bandsdata was not '\n 'associated to any bandsdata'\n )\n\n self.set_attribute('reference_bandsdata_uuid', uuid)\n\n def get_reference_bandsdata(self):\n \"\"\"\n Returns the reference BandsData, using the set uuid via\n set_reference_bandsdata\n\n :return: a BandsData instance\n :raise AttributeError: if the bandsdata has not been set yet\n :raise exceptions.NotExistent: if the bandsdata uuid did not retrieve bandsdata\n \"\"\"\n from aiida.orm import load_node\n try:\n uuid = self.get_attribute('reference_bandsdata_uuid')\n except AttributeError:\n raise AttributeError('BandsData has not been set for this instance')\n try:\n bands = load_node(uuid=uuid)\n except exceptions.NotExistent:\n raise exceptions.NotExistent('The bands referenced to this class have not been found in this database.')\n return bands\n\n def _find_orbitals_and_indices(self, **kwargs):\n \"\"\"\n Finds all the orbitals and their indicies associated with kwargs\n essential for retrieving the other indexed array parameters\n\n :param kwargs: kwargs that can call orbitals as in get_orbitals()\n :return: retrieve_indexes, list of indicicies of orbitals corresponding\n to the kwargs\n :return: all_orbitals, list of orbitals to which the indexes correspond\n \"\"\"\n selected_orbitals = self.get_orbitals(**kwargs)\n selected_orb_dicts = [orb.get_orbital_dict() for orb in selected_orbitals]\n all_orbitals = self.get_orbitals()\n all_orb_dicts = [orb.get_orbital_dict() for orb in all_orbitals]\n retrieve_indices = [i for i in range(len(all_orb_dicts)) if all_orb_dicts[i] in selected_orb_dicts]\n return retrieve_indices, all_orbitals\n\n def get_pdos(self, **kwargs):\n \"\"\"\n Retrieves all the pdos arrays corresponding to the input kwargs\n\n :param kwargs: inputs describing the orbitals associated with the pdos\n arrays\n :return: a list of tuples containing the orbital, energy array and pdos\n array associated with all orbitals that correspond to kwargs\n\n \"\"\"\n retrieve_indices, all_orbitals = self._find_orbitals_and_indices(**kwargs)\n out_list = [(\n all_orbitals[i], self.get_array(f'pdos_{self._from_index_to_arrayname(i)}'),\n self.get_array(f'energy_{self._from_index_to_arrayname(i)}')\n ) for i in retrieve_indices]\n return out_list\n\n def get_projections(self, **kwargs):\n \"\"\"\n Retrieves all the pdos arrays corresponding to the input kwargs\n\n :param kwargs: inputs describing the orbitals associated with the pdos\n arrays\n :return: a list of tuples containing the orbital, and projection arrays\n associated with all orbitals that correspond to kwargs\n\n \"\"\"\n retrieve_indices, all_orbitals = self._find_orbitals_and_indices(**kwargs)\n out_list = [\n (all_orbitals[i], self.get_array(f'proj_{self._from_index_to_arrayname(i)}')) for i in retrieve_indices\n ]\n return out_list\n\n @staticmethod\n def _from_index_to_arrayname(index):\n \"\"\"\n Used internally to determine the array names.\n \"\"\"\n return f'array_{index}'\n\n def set_projectiondata(\n self,\n list_of_orbitals,\n list_of_projections=None,\n list_of_energy=None,\n list_of_pdos=None,\n tags=None,\n bands_check=True\n ):\n \"\"\"\n Stores the projwfc_array using the projwfc_label, after validating both.\n\n :param list_of_orbitals: list of orbitals, of class orbital data.\n They should be the ones up on which the\n projection array corresponds with.\n\n :param list_of_projections: list of arrays of projections of a atomic\n wavefunctions onto bloch wavefunctions. Since the\n projection is for every bloch wavefunction which\n can be specified by its spin (if used), band, and\n kpoint the dimensions must be\n nspin x nbands x nkpoints for the projwfc array.\n Or nbands x nkpoints if spin is not used.\n\n :param energy_axis: list of energy axis for the list_of_pdos\n\n :param list_of_pdos: a list of projected density of states for the\n atomic wavefunctions, units in states/eV\n\n :param tags: A list of tags, not supported currently.\n\n :param bands_check: if false, skips checks of whether the bands has\n been already set, and whether the sizes match. For\n use in parsers, where the BandsData has not yet\n been stored and therefore get_reference_bandsdata\n cannot be called\n \"\"\"\n\n # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements\n\n def single_to_list(item):\n \"\"\"\n Checks if the item is a list or tuple, and converts it to a list\n if it is not already a list or tuple\n\n :param item: an object which may or may not be a list or tuple\n :return: item_list: the input item unchanged if list or tuple and\n [item] otherwise\n \"\"\"\n if isinstance(item, (list, tuple)):\n return item\n\n return [item]\n\n def array_list_checker(array_list, array_name, orb_length):\n \"\"\"\n Does basic checks over everything in the array_list. Makes sure that\n all the arrays are np.ndarray floats, that the length is same as\n required_length, raises exception using array_name if there is\n a failure\n \"\"\"\n if not all(isinstance(_, np.ndarray) for _ in array_list):\n raise exceptions.ValidationError(f'{array_name} was not composed entirely of ndarrays')\n if len(array_list) != orb_length:\n raise exceptions.ValidationError(f'{array_name} did not have the same length as the list of orbitals')\n\n ##############\n list_of_orbitals = single_to_list(list_of_orbitals)\n list_of_orbitals = copy.deepcopy(list_of_orbitals)\n\n # validates the input data\n if not list_of_pdos and not list_of_projections:\n raise exceptions.ValidationError('Must set either pdos or projections')\n if bool(list_of_energy) != bool(list_of_pdos):\n raise exceptions.ValidationError('list_of_pdos and list_of_energy must always be set together')\n\n orb_length = len(list_of_orbitals)\n\n # verifies and sets the orbital dicts\n list_of_orbital_dicts = []\n for i, _ in enumerate(list_of_orbitals):\n this_orbital = list_of_orbitals[i]\n orbital_dict = this_orbital.get_orbital_dict()\n try:\n orbital_type = orbital_dict.pop('_orbital_type')\n except KeyError:\n raise exceptions.ValidationError(f'No _orbital_type key found in dictionary: {orbital_dict}')\n cls = OrbitalFactory(orbital_type)\n test_orbital = cls(**orbital_dict)\n list_of_orbital_dicts.append(test_orbital.get_orbital_dict())\n self.set_attribute('orbital_dicts', list_of_orbital_dicts)\n\n # verifies and sets the projections\n if list_of_projections:\n list_of_projections = single_to_list(list_of_projections)\n array_list_checker(list_of_projections, 'projections', orb_length)\n for i, _ in enumerate(list_of_projections):\n this_projection = list_of_projections[i]\n array_name = self._from_index_to_arrayname(i)\n if bands_check:\n self._check_projections_bands(this_projection)\n self.set_array(f'proj_{array_name}', this_projection)\n\n # verifies and sets both pdos and energy\n if list_of_pdos:\n list_of_pdos = single_to_list(list_of_pdos)\n list_of_energy = single_to_list(list_of_energy)\n array_list_checker(list_of_pdos, 'pdos', orb_length)\n array_list_checker(list_of_energy, 'energy', orb_length)\n for i, _ in enumerate(list_of_pdos):\n this_pdos = list_of_pdos[i]\n this_energy = list_of_energy[i]\n array_name = self._from_index_to_arrayname(i)\n if bands_check:\n self._check_projections_bands(this_projection)\n self.set_array(f'pdos_{array_name}', this_pdos)\n self.set_array(f'energy_{array_name}', this_energy)\n\n # verifies and sets the tags\n if tags is not None:\n try:\n if len(tags) != len(list_of_orbitals):\n raise exceptions.ValidationError('must set as many tags as projections')\n except IndexError:\n return exceptions.ValidationError('tags must be a list')\n\n if not all(isinstance(_, str) for _ in tags):\n raise exceptions.ValidationError('Tags must set a list of strings')\n self.set_attribute('tags', tags)\n\n def set_orbitals(self, **kwargs): # pylint: disable=arguments-differ\n \"\"\"\n This method is inherited from OrbitalData, but is blocked here.\n If used will raise a NotImplementedError\n \"\"\"\n raise NotImplementedError(\n 'You cannot set orbitals using this class!'\n ' This class is for setting orbitals and '\n ' projections only!'\n )\n"
] | [
[
"numpy.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
waq1129/GP-RNN_UAI2019 | [
"2d223bdda962e8571b10b80ba17fb4031cd97bd4"
] | [
"funs.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport edward as ed\nfrom edward.models import Normal, MultivariateNormalTriL\nfrom tensorflow.contrib import slim\n\n\ndef normal_cell(hprev, zt, H):\n return tf.ones(H)\n\ndef ar1_cell(hprev, zt, name=None, reuse=False):\n return zt\n\ndef rnn_cell(hprev, zt, name=None, reuse=False):\n \"\"\"basic RNN returning next hidden state at a specific timestep.\"\"\"\n nin = zt.shape[-1].value\n nout = hprev.shape[-1].value\n with tf.variable_scope(name, default_name=\"rnn\", values=[hprev, zt], reuse=reuse):\n wz = get_variable_wrap(\"kernel/input\", [nin, nout], dtype=tf.float32, \n initializer=tf.random_normal_initializer(0, 0.01))\n wh = get_variable_wrap(\"kernel/hidden\", [nout, nout],dtype=tf.float32, \n initializer=tf.random_normal_initializer(0, 0.01))\n bh = get_variable_wrap(\"bias\", [nout], dtype=tf.float32, \n initializer=tf.random_normal_initializer(0, 0.01))\n \n return tf.tanh(ed.dot(hprev, wh) + ed.dot(zt, wz) + bh)\n \ndef lstm_cell(x, h, c, name=None, reuse=False):\n \"\"\"LSTM returning hidden state and content cell at a specific timestep.\"\"\"\n nin = x.shape[-1].value\n nout = h.shape[-1].value\n with tf.variable_scope(name, default_name=\"lstm\", values=[x, h, c], reuse=reuse):\n wx = get_variable_wrap(\"kernel/input\", [nin, nout * 4], dtype=tf.float32, \n initializer=tf.orthogonal_initializer(1.0))\n wh = get_variable_wrap(\"kernel/hidden\", [nout, nout * 4],dtype=tf.float32,\n initializer=tf.orthogonal_initializer(1.0))\n b = get_variable_wrap(\"bias\", [nout * 4], dtype=tf.float32,\n initializer=tf.constant_initializer(0.0))\n\n z = ed.dot(x, wx) + ed.dot(h, wh) + b\n i, f, o, u = tf.split(z, 4, axis=0)\n i = tf.sigmoid(i)\n f = tf.sigmoid(f + 1.0)\n o = tf.sigmoid(o)\n u = tf.tanh(u)\n c = f * c + i * u\n h = o * tf.tanh(c)\n return h, c\n\ndef lstm_cell_1(x, h, c, name=None, reuse=False):\n \"\"\"LSTM returning hidden state and content cell at a specific timestep.\"\"\"\n nin = x.shape[-1].value\n nout = h.shape[-1].value\n with tf.variable_scope(name, default_name=\"lstm_1\", values=[x, h, c], reuse=reuse):\n wx = get_variable_wrap(\"kernel/input\", [nin, nout * 4], dtype=tf.float32, \n initializer=tf.orthogonal_initializer(1.0))\n wh = get_variable_wrap(\"kernel/hidden\", [nout, nout * 4],dtype=tf.float32,\n initializer=tf.orthogonal_initializer(1.0))\n b = get_variable_wrap(\"bias\", [nout * 4], dtype=tf.float32,\n initializer=tf.constant_initializer(0.0))\n\n z = ed.dot(x, wx) + ed.dot(h, wh) + b\n i, f, o, u = tf.split(z, 4, axis=0)\n i = tf.sigmoid(i)\n f = tf.sigmoid(f + 1.0)\n o = tf.sigmoid(o)\n u = tf.tanh(u)\n c = f * c + i * u\n h = o * tf.tanh(c)\n return h, c\n\ndef lstm_cell_2(x, h, c, name=None, reuse=False):\n \"\"\"LSTM returning hidden state and content cell at a specific timestep.\"\"\"\n nin = x.shape[-1].value\n nout = h.shape[-1].value\n with tf.variable_scope(name, default_name=\"lstm_2\", values=[x, h, c], reuse=reuse):\n wx = get_variable_wrap(\"kernel/input\", [nin, nout * 4], dtype=tf.float32, \n initializer=tf.orthogonal_initializer(1.0))\n wh = get_variable_wrap(\"kernel/hidden\", [nout, nout * 4],dtype=tf.float32,\n initializer=tf.orthogonal_initializer(1.0))\n b = get_variable_wrap(\"bias\", [nout * 4], dtype=tf.float32,\n initializer=tf.constant_initializer(0.0))\n\n z = ed.dot(x, wx) + ed.dot(h, wh) + b\n i, f, o, u = tf.split(z, 4, axis=0)\n i = tf.sigmoid(i)\n f = tf.sigmoid(f + 1.0)\n o = tf.sigmoid(o)\n u = tf.tanh(u)\n c = f * c + i * u\n h = o * tf.tanh(c)\n return h, c\n\ndef encode_z(hprev, L, name=None, reuse=False):\n # input: hprev should change to [#batch, dim] \n #hprev = tf.expand_dims(hprev, 0)\n #hidden_dim = 15\n #with tf.variable_scope(\"prior\"):\n # prior = fc_act(hprev, hidden_dim, act=tf.nn.relu, name=\"fc_prior\")\n #with tf.variable_scope(\"prior_mu\"):\n # prior_mu = fc_act(prior, L, name=\"fc_prior_mu\")\n #with tf.variable_scope(\"prior_sigma\"):\n # prior_sigma = fc_act(prior, L, act=tf.nn.softplus, name=\"fc_prior_sigma\")\n #zt = Normal(loc=tf.squeeze(prior_mu, 0), scale = tf.squeeze(prior_sigma, 0))\n \n #AR1 cell using difussion process: z_t = z_t-1 + eta\n #zt = Normal(hprev, 0.1)\n \n # NN for encoding ht -> mu_zt, sigma_zt\n H = hprev.shape[0]\n \n with tf.variable_scope(name, default_name=\"encode_z\", reuse=reuse):\n Whz_mean = get_variable_wrap(\"Wmean\", [H, L], dtype=tf.float32, \n initializer=tf.constant_initializer(0.0))\n bhz_mean = get_variable_wrap(\"bmean\", [L], dtype=tf.float32, \n initializer=tf.constant_initializer(0.0))\n Whz_cov = get_variable_wrap(\"Wvar\", [H, L], dtype=tf.float32, \n initializer=tf.constant_initializer(0.0))\n bhz_cov = get_variable_wrap(\"bvar\", [L], dtype=tf.float32, \n initializer=tf.constant_initializer(0.0))\n \n #Whz_mean = tf.Variable(np.zeros([H, L]), dtype=tf.float32)\n #bhz_mean = tf.Variable(np.zeros(L), dtype=tf.float32) \n #Whz_cov = tf.Variable(np.zeros([H, L]), dtype=tf.float32) \n #bhz_cov = tf.Variable(np.zeros(L), dtype=tf.float32)\n \n zt = Normal(loc=ed.dot(hprev, Whz_mean) + bhz_mean, \n scale=tf.nn.softplus(ed.dot(hprev, Whz_cov) + bhz_cov))\n return zt\n\ndef encode_z_ar1(hprev, L):\n H = hprev.shape[0]\n var = tf.Variable(np.ones([H]), dtype=tf.float32)\n zt = Normal(hprev, var)\n return zt\n\ndef get_variable_wrap(*args, **kwargs):\n try:\n return tf.get_variable(*args, **kwargs)\n except ValueError:\n tf.get_variable_scope().reuse_variables()\n return tf.get_variable(*args, **kwargs)\n\ndef fc_act(x, next_layer_size, act=None, name=\"fc\"):\n nbatches = x.get_shape()[0]\n prev_layer_size = x.get_shape()[1]\n with tf.name_scope(\"fc\"):\n w = get_variable_wrap(\"weights\", [prev_layer_size, next_layer_size], \n dtype=tf.float, initializer=tf.random_normal_initializer())\n b = get_variable_wrap(\"bias\", [next_layer_size], \n dtype=tf.float32, initializer=tf.constant_initializer(0.1))\n o = tf.add(tf.matmul(x, w), b)\n if act: return act(o)\n else: return o\n\ndef neural_network(z, dim_out):\n \"\"\"neural network model for mapping\"\"\"\n hidden_dim = 15\n net1 = slim.fully_connected(z, hidden_dim, activation_fn=None)\n net2 = slim.fully_connected(net1, dim_out, activation_fn=tf.tanh)\n return net2\n\ndef compute_optimal_rotation(L, L_true, scale=True):\n \"\"\"Find a rotation matrix R such that F_inf.dot(R) ~= F_true\"\"\"\n from scipy.linalg import orthogonal_procrustes\n R = orthogonal_procrustes(L, L_true)[0]\n\n if scale:\n Lp = L.dot(R)\n s = (L_true*Lp).sum() / (Lp*Lp).sum()\n return R*s\n else:\n return R\n \ndef match_z(x,z):\n cp = np.corrcoef(x.T,z.T)[0,1]\n cn = np.corrcoef(-x.T,z.T)[0,1]\n if cp<cn:\n return -x\n else:\n return x\n\ndef dyn_lorenz(T, dt=0.01):\n\n stepCnt = T\n \n def lorenz(x, y, z, s=10, r=28, b=2.667):\n x_dot = s*(y - x)\n y_dot = r*x - y - x*z\n z_dot = x*y - b*z\n return x_dot, y_dot, z_dot\n \n # Need one more for the initial values\n xs = np.empty((stepCnt + 1,))\n ys = np.empty((stepCnt + 1,))\n zs = np.empty((stepCnt + 1,))\n\n # Setting initial values\n xs[0], ys[0], zs[0] = (0., 1., 1.05)\n\n # Stepping through \"time\".\n for i in range(stepCnt):\n # Derivatives of the X, Y, Z state\n x_dot, y_dot, z_dot = lorenz(xs[i], ys[i], zs[i])\n xs[i + 1] = xs[i] + (x_dot * dt)\n ys[i + 1] = ys[i] + (y_dot * dt)\n zs[i + 1] = zs[i] + (z_dot * dt)\n \n z = np.zeros((T, 3))\n z[:,0] = xs[:-1]\n z[:,1] = ys[:-1]\n z[:,2] = zs[:-1]\n return z\n \ndef dyn_sine(T, N, L):\n x = np.arange(T)\n z_true = np.zeros((N,L))\n z_true[:,0] = 0.5 * np.cos(2 * np.pi * x / 300 + np.pi)\n z_true[:,1] = 0.5 * np.cos(2 * np.pi * x / 25 + 1/3 * np.pi)\n z_true[:,2] = 0.5 * np.cos(2 * np.pi * x / 25 + 2/3 * np.pi)\n return z_true\n\ndef map_linear(z, L, D):\n Wz_true = np.random.normal(0, 1,[L,D])\n bz_true = np.random.normal(0, 1,[D])\n mu = np.dot(z, Wz_true)\n x = np.random.normal(mu, 0.1)\n return x\n\ndef map_sine(z, L, D):\n Wz_true = np.random.normal(0, 1,[L,D])\n bz_true = np.random.normal(0, 1,[D])\n mu = np.dot(z, Wz_true)\n x = np.random.normal(np.sin(mu), 0.1)\n return x\n\ndef map_tanh(z, L, D):\n Wz_true = np.random.normal(0, 1,[L,D])\n bz_true = np.random.normal(0, 1,[D])\n mu = np.dot(z, Wz_true)\n x = np.tanh(mu)\n return x\n\n# define kernel \ndef kernel_fun(X, X2=None, lengthscale=1.0, variance=1.0, name=None):\n from tensorflow.python.ops import control_flow_ops\n \n lengthscale = tf.convert_to_tensor(lengthscale)\n variance = tf.convert_to_tensor(variance)\n dependencies = [tf.assert_positive(lengthscale), tf.assert_positive(variance)]\n lengthscale = control_flow_ops.with_dependencies(dependencies, lengthscale)\n variance = control_flow_ops.with_dependencies(dependencies, variance)\n\n X = tf.convert_to_tensor(X)\n X = X / lengthscale\n Xs = tf.reduce_sum(tf.square(X), 1)\n if X2 is None:\n X2 = X\n X2s = Xs\n else:\n X2 = tf.convert_to_tensor(X2)\n X2 = X2 / lengthscale\n X2s = tf.reduce_sum(tf.square(X2), 1)\n\n r = tf.reshape(Xs, [-1, 1]) + tf.reshape(X2s, [1, -1]) - \\\n 2 * tf.matmul(X, X2, transpose_b=True)\n \n output = {\n 'rbf': lambda r: variance * tf.exp(-r / 2),\n 'matern32': lambda r: variance * (1. + np.sqrt(3.) * tf.sqrt(r + 1e-6)) * \\\n tf.exp(-np.sqrt(3.) * tf.sqrt(r + 1e-6)),\n 'matern52': lambda r: variance * (1. + np.sqrt(5.) * tf.sqrt(r + 1e-6) + \\\n 5./3. * (r + 1e-6)) * tf.exp(-np.sqrt(5.) * tf.sqrt(r + 1e-6)),\n 'cosine': lambda r: variance * tf.cos(tf.sqrt(r + 1e-6))\n }[name](r)\n return output"
] | [
[
"tensorflow.convert_to_tensor",
"numpy.dot",
"tensorflow.get_variable",
"numpy.sqrt",
"tensorflow.orthogonal_initializer",
"tensorflow.tanh",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"numpy.arange",
"numpy.sin",
"tensorflow.name_scope",
"tensorflow.square",
"tensorflow.random_normal_initializer",
"numpy.zeros",
"tensorflow.matmul",
"tensorflow.exp",
"tensorflow.get_variable_scope",
"tensorflow.contrib.slim.fully_connected",
"numpy.corrcoef",
"tensorflow.split",
"scipy.linalg.orthogonal_procrustes",
"numpy.tanh",
"tensorflow.reshape",
"tensorflow.sigmoid",
"tensorflow.ones",
"numpy.cos",
"numpy.ones",
"tensorflow.constant_initializer",
"numpy.random.normal",
"tensorflow.variable_scope",
"tensorflow.sqrt",
"tensorflow.assert_positive",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.15"
],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
rBatt/mystock | [
"0944b4e46d1720fa47126d95f9b5a857276c8e38"
] | [
"tests/test_stockdata.py"
] | [
"import os\nimport re\nimport pandas as pd\nfrom pandas.util.testing import assert_frame_equal\nimport unittest\nfrom unittest.mock import patch\n\nfrom mystock.stockdata import StockData\n\nclass TestApiCall(unittest.TestCase):\n\n def setUp(self):\n self.input_patch = patch('mystock.apicall.input')\n self.input_mock = self.input_patch.start()\n self.input_mock.return_value = \"testKey\"\n\n opts = {\n \"date_from\": \"2019-12-05\"\n , \"date_to\": \"2019-12-06\"\n }\n self.data1 = StockData(options=opts)\n\n self.execute_patch = patch('mystock.stockdata.ApiCall.execute_call')\n self.execute_mock = self.execute_patch.start()\n\n test_out_dict = {\n 'close': {\n ('VOO', '2019-12-05'): '286.64',\n ('VOO', '2019-12-06'): '289.19',\n ('VTI', '2019-12-05'): '158.88',\n ('VTI', '2019-12-06'): '160.29'},\n 'high': {\n ('VOO', '2019-12-05'): '286.83',\n ('VOO', '2019-12-06'): '289.65',\n ('VTI', '2019-12-05'): '159.00',\n ('VTI', '2019-12-06'): '160.54'},\n 'low': {\n ('VOO', '2019-12-05'): '285.29',\n ('VOO', '2019-12-06'): '288.55',\n ('VTI', '2019-12-05'): '158.18',\n ('VTI', '2019-12-06'): '159.89'},\n 'open': {\n ('VOO', '2019-12-05'): '286.83',\n ('VOO', '2019-12-06'): '288.58',\n ('VTI', '2019-12-05'): '159.00',\n ('VTI', '2019-12-06'): '159.98'},\n 'volume': {\n ('VOO', '2019-12-05'): '1597255',\n ('VOO', '2019-12-06'): '1609493',\n ('VTI', '2019-12-05'): '1858086',\n ('VTI', '2019-12-06'): '2021049'}\n }\n test_out = pd.DataFrame(test_out_dict)\n test_out.index.names = ['symbol', 'date']\n test_out = test_out.reorder_levels(['date','symbol'])\n self.execute_mock.return_value = test_out\n\n def tearDown(self):\n self.input_patch.stop()\n\n def test_get_data(self):\n test_dat = self.data1.get_data(tckr='asdf')\n self.assertIsNone(assert_frame_equal(test_dat, self.execute_mock()))\n test_dat2 = self.data1.get_data(tckr=['asdf', 'asdff'])\n self.assertIsNone(assert_frame_equal(test_dat2, pd.concat([self.execute_mock()]*2)))\n\nif __name__ == '__main__':\n unittest.main()"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jiangwei1995910/cnocr | [
"e685db43622916b127728e0a714fef81036b8caa"
] | [
"cnocr/data_utils/data_iter.py"
] | [
"from __future__ import print_function\n\nimport os\nfrom PIL import Image\nimport numpy as np\nimport mxnet as mx\nimport random\n\nfrom .multiproc_data import MPData\n\n\nclass SimpleBatch(object):\n def __init__(self, data_names, data, label_names=list(), label=list()):\n self._data = data\n self._label = label\n self._data_names = data_names\n self._label_names = label_names\n\n self.pad = 0\n self.index = None # TODO: what is index?\n\n @property\n def data(self):\n return self._data\n\n @property\n def label(self):\n return self._label\n\n @property\n def data_names(self):\n return self._data_names\n\n @property\n def label_names(self):\n return self._label_names\n\n @property\n def provide_data(self):\n return [(n, x.shape) for n, x in zip(self._data_names, self._data)]\n\n @property\n def provide_label(self):\n return [(n, x.shape) for n, x in zip(self._label_names, self._label)]\n\n\n# class ImageIter(mx.io.DataIter):\n#\n# \"\"\"\n# Iterator class for generating captcha image data\n# \"\"\"\n# def __init__(self, data_root, data_list, batch_size, data_shape, num_label, name=None):\n# \"\"\"\n# Parameters\n# ----------\n# data_root: str\n# root directory of images\n# data_list: str\n# a .txt file stores the image name and corresponding labels for each line\n# batch_size: int\n# name: str\n# \"\"\"\n# super(ImageIter, self).__init__()\n# self.batch_size = batch_size\n# self.data_shape = data_shape\n# self.num_label = num_label\n#\n# self.data_root = data_root\n# self.dataset_lst_file = open(data_list)\n#\n# self.provide_data = [('data', (batch_size, 1, data_shape[1], data_shape[0]))]\n# self.provide_label = [('label', (self.batch_size, self.num_label))]\n# self.name = name\n#\n# def __iter__(self):\n# data = []\n# label = []\n# cnt = 0\n# for m_line in self.dataset_lst_file:\n# img_lst = m_line.strip().split(' ')\n# img_path = os.path.join(self.data_root, img_lst[0])\n#\n# cnt += 1\n# img = Image.open(img_path).resize(self.data_shape, Image.BILINEAR).convert('L')\n# img = np.array(img).reshape((1, self.data_shape[1], self.data_shape[0]))\n# data.append(img)\n#\n# ret = np.zeros(self.num_label, int)\n# for idx in range(1, len(img_lst)):\n# ret[idx-1] = int(img_lst[idx])\n#\n# label.append(ret)\n# if cnt % self.batch_size == 0:\n# data_all = [mx.nd.array(data)]\n# label_all = [mx.nd.array(label)]\n# data_names = ['data']\n# label_names = ['label']\n# data.clear()\n# label.clear()\n# yield SimpleBatch(data_names, data_all, label_names, label_all)\n# continue\n#\n#\n# def reset(self):\n# if self.dataset_lst_file.seekable():\n# self.dataset_lst_file.seek(0)\n\nclass ImageIterLstm(mx.io.DataIter):\n\n \"\"\"\n Iterator class for generating captcha image data\n \"\"\"\n\n def __init__(self, data_root, data_list, batch_size, data_shape, num_label, lstm_init_states, name=None):\n \"\"\"\n Parameters\n ----------\n data_root: str\n root directory of images\n data_list: str\n a .txt file stores the image name and corresponding labels for each line\n batch_size: int\n name: str\n \"\"\"\n super(ImageIterLstm, self).__init__()\n self.batch_size = batch_size\n self.data_shape = data_shape\n self.num_label = num_label\n\n self.init_states = lstm_init_states\n self.init_state_arrays = [mx.nd.zeros(x[1]) for x in lstm_init_states]\n\n self.data_root = data_root\n self.dataset_lines = open(data_list).readlines()\n\n self.provide_data = [('data', (batch_size, 1, data_shape[1], data_shape[0]))] + lstm_init_states\n self.provide_label = [('label', (self.batch_size, self.num_label))]\n self.name = name\n\n def __iter__(self):\n init_state_names = [x[0] for x in self.init_states]\n data = []\n label = []\n cnt = 0\n for m_line in self.dataset_lines:\n img_lst = m_line.strip().split(' ')\n img_path = os.path.join(self.data_root, img_lst[0])\n\n cnt += 1\n img = Image.open(img_path).resize(self.data_shape, Image.BILINEAR).convert('L')\n img = np.array(img).reshape((1, self.data_shape[1], self.data_shape[0])) # res: [1, height, width]\n data.append(img)\n\n ret = np.zeros(self.num_label, int)\n for idx in range(1, len(img_lst)):\n ret[idx - 1] = int(img_lst[idx])\n\n label.append(ret)\n if cnt % self.batch_size == 0:\n data_all = [mx.nd.array(data)] + self.init_state_arrays\n label_all = [mx.nd.array(label)]\n data_names = ['data'] + init_state_names\n label_names = ['label']\n data = []\n label = []\n yield SimpleBatch(data_names, data_all, label_names, label_all)\n continue\n\n def reset(self):\n # if self.dataset_lst_file.seekable():\n # self.dataset_lst_file.seek(0)\n random.shuffle(self.dataset_lines)\n\n\nclass MPOcrImages(object):\n \"\"\"\n Handles multi-process Chinese OCR image generation\n \"\"\"\n def __init__(self, data_root, data_list, data_shape, num_label, num_processes, max_queue_size):\n \"\"\"\n\n Parameters\n ----------\n data_shape: [width, height]\n num_processes: int\n Number of processes to spawn\n max_queue_size: int\n Maximum images in queue before processes wait\n \"\"\"\n self.data_shape = data_shape\n self.num_label = num_label\n\n self.data_root = data_root\n self.dataset_lines = open(data_list).readlines()\n\n self.mp_data = MPData(num_processes, max_queue_size, self._gen_sample)\n\n def _gen_sample(self):\n m_line = random.choice(self.dataset_lines)\n img_lst = m_line.strip().split(' ')\n img_path = os.path.join(self.data_root, img_lst[0])\n\n img = Image.open(img_path).resize(self.data_shape, Image.BILINEAR).convert('L')\n img = np.array(img)\n # print(img.shape)\n img = np.transpose(img, (1, 0)) # res: [1, width, height]\n # if len(img.shape) == 2:\n # img = np.expand_dims(np.transpose(img, (1, 0)), axis=0) # res: [1, width, height]\n\n labels = np.zeros(self.num_label, int)\n for idx in range(1, len(img_lst)):\n labels[idx - 1] = int(img_lst[idx])\n\n return img, labels\n\n @property\n def size(self):\n return len(self.dataset_lines)\n\n @property\n def shape(self):\n return self.data_shape\n\n def start(self):\n \"\"\"\n Starts the processes\n \"\"\"\n self.mp_data.start()\n\n def get(self):\n \"\"\"\n Get an image from the queue\n\n Returns\n -------\n np.ndarray\n A captcha image, normalized to [0, 1]\n \"\"\"\n return self.mp_data.get()\n\n def reset(self):\n \"\"\"\n Resets the generator by stopping all processes\n \"\"\"\n self.mp_data.reset()\n\n\nclass OCRIter(mx.io.DataIter):\n \"\"\"\n Iterator class for generating captcha image data\n \"\"\"\n def __init__(self, count, batch_size, lstm_init_states, captcha, num_label, name):\n \"\"\"\n Parameters\n ----------\n count: int\n Number of batches to produce for one epoch\n batch_size: int\n lstm_init_states: list of tuple(str, tuple)\n A list of tuples with [0] name and [1] shape of each LSTM init state\n captcha MPCaptcha\n Captcha image generator. Can be MPCaptcha or any other class providing .shape and .get() interface\n name: str\n \"\"\"\n super(OCRIter, self).__init__()\n self.batch_size = batch_size\n self.count = count if count > 0 else captcha.size // batch_size\n self.init_states = lstm_init_states\n self.init_state_arrays = [mx.nd.zeros(x[1]) for x in lstm_init_states]\n data_shape = captcha.shape\n self.provide_data = [('data', (batch_size, 1, data_shape[1], data_shape[0]))] + lstm_init_states\n self.provide_label = [('label', (self.batch_size, num_label))]\n self.mp_captcha = captcha\n self.name = name\n\n def __iter__(self):\n init_state_names = [x[0] for x in self.init_states]\n for k in range(self.count):\n data = []\n label = []\n for i in range(self.batch_size):\n img, labels = self.mp_captcha.get()\n # print(img.shape)\n img = np.expand_dims(np.transpose(img, (1, 0)), axis=0) # size: [1, height, width]\n # import pdb; pdb.set_trace()\n data.append(img)\n label.append(labels)\n data_all = [mx.nd.array(data)] + self.init_state_arrays\n label_all = [mx.nd.array(label)]\n data_names = ['data'] + init_state_names\n label_names = ['label']\n\n data_batch = SimpleBatch(data_names, data_all, label_names, label_all)\n yield data_batch\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.transpose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Edelbert/tf2-mobile-2d-single-pose-estimation | [
"a6961b2c12e8edfd9b7c4e87d0925c046ff7b673"
] | [
"data_loader/dataset_prepare.py"
] | [
"# -*- coding: utf-8 -*-\n# @Time : 18-3-6 3:20 PM\n# @Author : [email protected]\n# @FileName: data_prepare.py\n# @Software: PyCharm\n# @updated by Jaewook Kang 20181010 for tf-tiny-pose-estimation\n\n\nimport numpy as np\nimport cv2\nimport struct\nimport math\n\n\nclass CocoMetadata:\n\n @staticmethod\n def parse_float(four_np):\n assert len(four_np) == 4\n return struct.unpack('<f', bytes(four_np))[0]\n\n @staticmethod\n def parse_floats(four_nps, adjust=0):\n assert len(four_nps) % 4 == 0\n return [(CocoMetadata.parse_float(four_nps[x * 4:x * 4 + 4]) + adjust) for x in range(len(four_nps) // 4)]\n\n def __init__(self, idx, img_path, img_meta, keypoint_infos, number_of_heatmap, sigma, dataset_name: str = 'COCO'):\n self.idx = idx\n self.img = self.read_image(img_path)\n self.sigma = sigma\n\n self.height = int(img_meta['height'])\n self.width = int(img_meta['width'])\n self.number_of_heatmap = number_of_heatmap\n\n joint_list = []\n #print(keypoint_infos)\n for keypoint_info in keypoint_infos:\n if keypoint_info.get('num_keypoints', 0) == 0:\n continue\n\n kp = np.array(keypoint_info['keypoints'])\n xs = kp[0::3]\n ys = kp[1::3]\n vs = kp[2::3]\n\n joint_list.append([(x, y) if v >= 1 else (-1000, -1000) for x, y, v in zip(xs, ys, vs)])\n\n self.joint_list = []\n # transform = list(zip(\n # [1, 2, 3, 4],\n # [1, 2, 3, 4]\n # )) # receipt\n # transform = list(zip(\n # [1, 2, 4, 6, 8, 3, 5, 7, 10, 12, 14, 9, 11, 13],\n # [1, 2, 4, 6, 8, 3, 5, 7, 10, 12, 14, 9, 11, 13]\n # ))\n # transform = list(zip(\n # [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],\n # [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]\n # )) # ai challenge\n if dataset_name == 'COCO':\n transform = list(zip(\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]\n )) # coco\n elif dataset_name == 'MHP':\n transform = list(zip(\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]\n )) # coco\n else:\n raise RuntimeError(f'Unknown dataset {dataset_name}')\n\n \n for prev_joint in joint_list:\n new_joint = []\n for idx1, idx2 in transform:\n j1 = prev_joint[idx1 - 1]\n j2 = prev_joint[idx2 - 1]\n if j1[0] <= 0 or j1[1] <= 0 or j2[0] <= 0 or j2[1] <= 0:\n new_joint.append((-1000, -1000))\n else:\n new_joint.append((((j1[0] + j2[0]) / 2), ((j1[1] + j2[1]) / 2)))\n # background\n # new_joint.append((-1000, -1000))\n self.joint_list.append(new_joint)\n \n\n def get_heatmap(self, target_size):\n\n heatmap = np.zeros((self.number_of_heatmap, self.height, self.width), dtype=np.float32)\n\n for joints in self.joint_list:\n for idx, point in enumerate(joints):\n #print('point fo heatmap', idx, point)\n if point[0] < 0 or point[1] < 0:\n continue\n CocoMetadata.put_heatmap(heatmap, idx, point, self.sigma)\n\n heatmap = heatmap.transpose((1, 2, 0))\n\n # background\n # heatmap[:, :, -1] = np.clip(1 - np.amax(heatmap, axis=2), 0.0, 1.0)\n\n #print(heatmap)\n if target_size:\n # print(heatmap.shape, \"->\", target_size)\n #print('get heatma[p', heatmap.shape, np.sum(heatmap), target_size)\n heatmap = cv2.resize(heatmap, target_size, interpolation=cv2.INTER_AREA)\n #print(heatmap.shape)\n return heatmap.astype(np.float16)\n\n @staticmethod\n # the below function actually made heatmap\n def put_heatmap(heatmap, plane_idx, center, sigma):\n center_x, center_y = center\n _, height, width = heatmap.shape[:3]\n\n th = 1.6052\n delta = math.sqrt(th * 2)\n\n x0 = int(max(0, center_x - delta * sigma))\n y0 = int(max(0, center_y - delta * sigma))\n\n x1 = int(min(width, center_x + delta * sigma))\n y1 = int(min(height, center_y + delta * sigma))\n\n # gaussian filter\n for y in range(y0, y1):\n for x in range(x0, x1):\n d = (x - center_x) ** 2 + (y - center_y) ** 2\n exp = d / 2.0 / sigma / sigma\n if exp > th:\n continue\n heatmap[plane_idx][y][x] = max(heatmap[plane_idx][y][x], math.exp(-exp))\n heatmap[plane_idx][y][x] = min(heatmap[plane_idx][y][x], 1.0)\n\n def read_image(self, img_path):\n #print(img_path)\n img_str = open(img_path, \"rb\").read()\n #print(img_path)\n if not img_str:\n print(\"image not read, path=%s\" % img_path)\n nparr = np.fromstring(img_str, np.uint8)\n return cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n"
] | [
[
"numpy.array",
"numpy.fromstring",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gjmulder/gluon-ts | [
"cb5b257e7dc38a3da2b07a1c9be07a5e58653105"
] | [
"src/gluonts/trainer/_base.py"
] | [
"# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Standard library imports\nimport logging\nimport os\nimport tempfile\nimport time\nimport uuid\nfrom typing import Any, List, NamedTuple, Optional, Union\n\n# Third-party imports\nimport mxnet as mx\nimport mxnet.autograd as autograd\nimport mxnet.gluon.nn as nn\nimport numpy as np\n\n# First-party imports\nfrom gluonts.core.component import get_mxnet_context, validated\nfrom gluonts.core.exception import GluonTSDataError\nfrom gluonts.dataset.loader import TrainDataLoader, ValidationDataLoader\nfrom gluonts.support.util import HybridContext\nfrom gluonts.gluonts_tqdm import tqdm\n\n# Relative imports\nfrom . import learning_rate_scheduler as lrs\n\nlogger = logging.getLogger(\"trainer\")\n\nMODEL_ARTIFACT_FILE_NAME = \"model\"\nSTATE_ARTIFACT_FILE_NAME = \"state\"\n\n# make the IDE happy: mx.py does not explicitly import autograd\nmx.autograd = autograd\n\n\ndef check_loss_finite(val: float) -> None:\n if not np.isfinite(val):\n raise GluonTSDataError(\n \"Encountered invalid loss value! Try reducing the learning rate \"\n \"or try a different likelihood.\"\n )\n\n\ndef loss_value(loss: mx.metric.Loss) -> float:\n return loss.get_name_value()[0][1]\n\n\nclass BestEpochInfo(NamedTuple):\n params_path: str\n epoch_no: int\n metric_value: float\n\n\nclass Trainer:\n r\"\"\"\n A trainer specifies how a network is going to be trained.\n\n A trainer is mainly defined by two sets of parameters. The first one determines the number of examples\n that the network will be trained on (`epochs`, `num_batches_per_epoch` and `batch_size`), while the\n second one specifies how the gradient updates are performed (`learning_rate`, `learning_rate_decay_factor`,\n `patience`, `minimum_learning_rate`, `clip_gradient` and `weight_decay`).\n\n Parameters\n ----------\n ctx\n epochs\n Number of epochs that the network will train (default: 1).\n batch_size\n Number of examples in each batch (default: 32).\n num_batches_per_epoch\n Number of batches at each epoch (default: 100).\n learning_rate\n Initial learning rate (default: :math:`10^{-3}`).\n learning_rate_decay_factor\n Factor (between 0 and 1) by which to decrease the learning rate (default: 0.5).\n patience\n The patience to observe before reducing the learning rate, nonnegative integer (default: 10).\n minimum_learning_rate\n Lower bound for the learning rate (default: :math:`5\\cdot 10^{-5}`).\n clip_gradient\n Maximum value of gradient. The gradient is clipped if it is too large (default: 10).\n weight_decay\n The weight decay (or L2 regularization) coefficient. Modifies objective by adding a penalty for having\n large weights (default :math:`10^{-8}`).\n init\n Initializer of the weights of the network (default: \"xavier\").\n hybridize\n \"\"\"\n\n @validated()\n def __init__(\n self,\n ctx: Optional[mx.Context] = None,\n epochs: int = 100,\n batch_size: int = 32,\n num_batches_per_epoch: int = 50,\n learning_rate: float = 1e-3,\n learning_rate_decay_factor: float = 0.5,\n patience: int = 10,\n minimum_learning_rate: float = 5e-5,\n clip_gradient: float = 10.0,\n weight_decay: float = 1e-8,\n init: Union[str, mx.initializer.Initializer] = \"xavier\",\n hybridize: bool = True,\n ) -> None:\n\n assert (\n 0 <= epochs < float(\"inf\")\n ), \"The value of `epochs` should be >= 0\"\n assert 0 < batch_size, \"The value of `batch_size` should be > 0\"\n assert (\n 0 < num_batches_per_epoch\n ), \"The value of `num_batches_per_epoch` should be > 0\"\n assert (\n 0 < learning_rate < float(\"inf\")\n ), \"The value of `learning_rate` should be > 0\"\n assert (\n 0 <= learning_rate_decay_factor < 1\n ), \"The value of `learning_rate_decay_factor` should be in the [0, 1) range\"\n assert 0 <= patience, \"The value of `patience` should be >= 0\"\n assert (\n 0 <= minimum_learning_rate\n ), \"The value of `minimum_learning_rate` should be >= 0\"\n assert 0 < clip_gradient, \"The value of `clip_gradient` should be > 0\"\n assert 0 <= weight_decay, \"The value of `weight_decay` should be => 0\"\n\n self.epochs = epochs\n self.batch_size = batch_size\n self.num_batches_per_epoch = num_batches_per_epoch\n self.learning_rate = learning_rate\n self.learning_rate_decay_factor = learning_rate_decay_factor\n self.patience = patience\n self.minimum_learning_rate = minimum_learning_rate\n self.clip_gradient = clip_gradient\n self.weight_decay = weight_decay\n self.init = init\n self.hybridize = hybridize\n self.ctx = ctx if ctx is not None else get_mxnet_context()\n self.halt = False\n\n def set_halt(self, signum: int, stack_frame: Any) -> None:\n logging.info(\"Received signal: {}\".format(signum))\n self.halt = True\n\n def count_model_params(self, net: nn.HybridBlock) -> int:\n params = net.collect_params()\n num_params = 0\n for p in params:\n v = params[p]\n num_params += np.prod(v.shape)\n return num_params\n\n def __call__(\n self,\n net: nn.HybridBlock,\n input_names: List[str],\n train_iter: TrainDataLoader,\n validation_iter: Optional[ValidationDataLoader] = None,\n ) -> None: # TODO: we may want to return some training information here\n is_validation_available = validation_iter is not None\n self.halt = False\n\n with tempfile.TemporaryDirectory(\n prefix=\"gluonts-trainer-temp-\"\n ) as gluonts_temp:\n\n def base_path() -> str:\n return os.path.join(\n gluonts_temp,\n \"{}_{}\".format(STATE_ARTIFACT_FILE_NAME, uuid.uuid4()),\n )\n\n logging.info(\"Start model training\")\n\n net.initialize(ctx=self.ctx, init=self.init)\n\n with HybridContext(\n net=net,\n hybridize=self.hybridize,\n static_alloc=True,\n static_shape=True,\n ):\n batch_size = train_iter.batch_size\n\n best_epoch_info = BestEpochInfo(\n params_path=\"%s-%s.params\" % (base_path(), \"init\"),\n epoch_no=-1,\n metric_value=np.Inf,\n )\n\n lr_scheduler = lrs.MetricAttentiveScheduler(\n objective=\"min\",\n patience=self.patience,\n decay_factor=self.learning_rate_decay_factor,\n min_lr=self.minimum_learning_rate,\n )\n\n optimizer = mx.optimizer.Adam(\n learning_rate=self.learning_rate,\n lr_scheduler=lr_scheduler,\n wd=self.weight_decay,\n clip_gradient=self.clip_gradient,\n )\n\n trainer = mx.gluon.Trainer(\n net.collect_params(),\n optimizer=optimizer,\n kvstore=\"device\", # FIXME: initialize properly\n )\n\n def loop(\n epoch_no, batch_iter, is_training: bool = True\n ) -> mx.metric.Loss:\n tic = time.time()\n\n epoch_loss = mx.metric.Loss()\n\n with tqdm(batch_iter) as it:\n for batch_no, data_entry in enumerate(it, start=1):\n if self.halt:\n break\n\n inputs = [data_entry[k] for k in input_names]\n\n with mx.autograd.record():\n output = net(*inputs)\n\n # network can returns several outputs, the first being always the loss\n # when having multiple outputs, the forward returns a list in the case of hybrid and a\n # tuple otherwise\n # we may wrap network outputs in the future to avoid this type check\n if isinstance(output, (list, tuple)):\n loss = output[0]\n else:\n loss = output\n\n if is_training:\n loss.backward()\n trainer.step(batch_size)\n\n epoch_loss.update(None, preds=loss)\n it.set_postfix(\n ordered_dict={\n (\"\" if is_training else \"validation_\")\n + \"avg_epoch_loss\": loss_value(epoch_loss)\n },\n refresh=False,\n )\n # print out parameters of the network at the first pass\n if batch_no == 1 and epoch_no == 0:\n net_name = type(net).__name__\n num_model_param = self.count_model_params(net)\n logging.info(\n f\"Number of parameters in {net_name}: {num_model_param}\"\n )\n # mark epoch end time and log time cost of current epoch\n toc = time.time()\n if (epoch_no % 16 == 0):\n logging.info(\n \"Epoch[%d] Elapsed time %.3f seconds\",\n epoch_no,\n (toc - tic),\n )\n\n # check and log epoch loss\n check_loss_finite(loss_value(epoch_loss))\n\n if (epoch_no % 16 == 0):\n logging.info(\n \"Epoch[%d] Evaluation metric '%s'=%f\",\n epoch_no,\n (\"\" if is_training else \"validation_\") + \"epoch_loss\",\n loss_value(epoch_loss),\n )\n return epoch_loss\n\n for epoch_no in range(self.epochs):\n if self.halt:\n logging.info(\n f\"Epoch[{epoch_no}] Interrupting training\"\n )\n break\n\n curr_lr = trainer.learning_rate\n\n if (epoch_no % 16 == 0):\n logging.info(\n f\"Epoch[{epoch_no}] Learning rate is {curr_lr}\"\n )\n\n epoch_loss = loop(epoch_no, train_iter)\n if is_validation_available:\n epoch_loss = loop(\n epoch_no, validation_iter, is_training=False\n )\n\n lr_scheduler.step(loss_value(epoch_loss))\n\n if loss_value(epoch_loss) < best_epoch_info.metric_value:\n best_epoch_info = BestEpochInfo(\n params_path=\"%s-%04d.params\"\n % (base_path(), epoch_no),\n epoch_no=epoch_no,\n metric_value=loss_value(epoch_loss),\n )\n net.save_parameters(\n best_epoch_info.params_path\n ) # TODO: handle possible exception\n\n if not trainer.learning_rate == curr_lr:\n logging.info(\n f\"Loading parameters from best epoch \"\n f\"({best_epoch_info.epoch_no})\"\n )\n net.load_parameters(\n best_epoch_info.params_path, self.ctx\n )\n\n logging.info(\n f\"Loading parameters from best epoch \"\n f\"({best_epoch_info.epoch_no})\"\n )\n net.load_parameters(best_epoch_info.params_path, self.ctx)\n\n logging.info(\"Final loss: %.4f occurred at epoch %d\" % (best_epoch_info.metric_value, best_epoch_info.epoch_no))\n\n # save net parameters\n net.save_parameters(best_epoch_info.params_path)\n\n logging.getLogger().info(\"End model training\")\n"
] | [
[
"numpy.prod",
"numpy.isfinite"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KushGabani/ml4a-guides | [
"d71b61a99c417b9ace3404420b37d22f6da06153"
] | [
"ml4a/utils/latents.py"
] | [
"import numpy as np\n\n\ndef interpolation_walk(endpoints, num_frames_per=30, loop=False):\n z1, z2 = endpoints[:-1], endpoints[1:]\n if loop:\n z1.append(endpoints[-1])\n z2.append(endpoints[0])\n z1, z2 = endpoints[:-1], endpoints[1:]\n if loop:\n z1.append(endpoints[-1])\n z2.append(endpoints[0])\n Z = np.concatenate([np.linspace(z_from, z_to, num_frames_per+1, axis=0)[:-1, :] \n for z_from, z_to in zip(z1, z2)], axis=0)\n Z = np.squeeze(Z)\n return Z\n "
] | [
[
"numpy.squeeze",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
patrickherring-TRI/beep | [
"7d3e8745f54ae86dcc4ac5679656dd70457ddd10"
] | [
"beep/utils/splice.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) 2019 Toyota Research Institute\n\n\"\"\"Script for joining together two Maccor files that correspond to the same run\nbut data has been split between two files. The column increment function makes sure\nthat certain columns are monotonically increasing across the two files. The metadata\nline from the first file is used in the final output file\n\nUsage:\n splice.py [options]\n splice.py (-h | --help)\n\nOptions:\n -h --help Show this screen\n --version Show version\n\n\"\"\"\nimport pandas as pd\nfrom beep import StringIO\nfrom beep import TEST_FILE_DIR\nimport os\n\n\nclass MaccorSplice:\n def __init__(self, input_1, input_2, output):\n \"\"\"\n Args:\n input_1 (str): Filename corresponding to first file.\n input_2 (str): Filename corresponding to second file.\n output (str): Filename of output file.\n \"\"\"\n self.input_1 = input_1\n self.input_2 = input_2\n self.output = output\n\n def read_maccor_file(self, filename):\n \"\"\"\n Loads Maccor file and returns meta data line and data frame containing\n file data.\n\n Args:\n filename (str): path to file.\n\n Returns:\n str: first line of maccor file containing meta data.\n pandas.DataFrame: data frame with file data.\n\n \"\"\"\n with open(filename) as f:\n lines = f.readlines()\n metadata_line = lines[0]\n data_lines = lines[1:]\n\n # Parse data\n data_text = '\\n'.join(data_lines)\n tsv_part = pd.read_csv(StringIO(data_text), delimiter=\"\\t\")\n return metadata_line, tsv_part\n\n def write_maccor_file(self, metadata_line, dataframe, output):\n \"\"\"\n Writes data and meta data into a Maccor file.\n\n Args:\n metadata_line (str): line containing meta data.\n dataframe (pandas.DataFrame): content data.\n output (str): output file name.\n output (str): output file name.\n \"\"\"\n with open(output, 'w') as write_tsv:\n write_tsv.writelines(metadata_line)\n write_tsv.write(dataframe.to_csv(sep='\\t', index=False))\n\n def column_increment(self, data_1, data_2):\n \"\"\"\n Special increment logic.\n\n Args:\n data_1 (pandas.DataFrame):\n data_2 (pandas.DataFrame):\n\n Returns:\n pandas.DataFrame: data_1 transformed (incremented)\n pandas.DataFrame: data_2 transformed (incremented)\n \"\"\"\n columns_to_update = ['Rec#', 'Cyc#', 'Test (Sec)', 'Loop1', 'Loop2', 'Loop3', 'Loop4']\n for column in columns_to_update:\n if data_2[column].iloc[0] < data_1[column].iloc[-1]:\n data_2[column] = data_2[column] + data_1[column].iloc[-1]\n\n return data_1, data_2\n\n def splice_operation(self, data_1, data_2):\n \"\"\"\n Concatenates two data frames.\n\n Args:\n data_1 (pandas.DataFrame):\n data_2 (pandas.DataFrame):\n\n Returns:\n pandas.DataFrame: concatenated data frame.\n\n \"\"\"\n data_final = pd.concat([data_1, data_2])\n return data_final\n\n def run_splice(self):\n \"\"\"\n Reads two input maccor files. Concatenates the respective data frames.\n Writes to a new Maccor file.\n \"\"\"\n\n metadata_line_1, data_1 = self.read_maccor_file(self.input_1)\n metadata_line_2, data_2 = self.read_maccor_file(self.input_2)\n data_1, data_2 = self.column_increment(data_1, data_2)\n data_final = self.splice_operation(data_1, data_2)\n self.write_maccor_file(metadata_line_1, data_final, self.output)\n\n\nif __name__ == \"__main__\":\n filename_part_1 = '/Users/patrickherring/Downloads/xTESLADIAG_000038.078'\n filename_part_2 = '/Users/patrickherring/Downloads/xTESLADIAG_000038con.078'\n test = '/Users/patrickherring/Downloads/xTESLADIAG_000038test.078'\n output = '/Users/patrickherring/Downloads/xTESLADIAG_000038joined.078'\n\n filename_part_1 = os.path.join(TEST_FILE_DIR, \"xTESLADIAG_000038.078\")\n filename_part_2 = os.path.join(TEST_FILE_DIR, \"xTESLADIAG_000038con.078\")\n output = os.path.join(TEST_FILE_DIR, \"xTESLADIAG_000038joined.078\")\n\n splicer = MaccorSplice(filename_part_1, filename_part_2, output)\n splicer.run_splice()\n"
] | [
[
"pandas.concat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
youhanamikhaiel/BigGANs_testy | [
"3bf1cc5976a6b310ea7c44f32cb5f5170cb95609"
] | [
"BigGANOriginal/train_fns.py"
] | [
"''' train_fns.py\nFunctions for the main loop of training different conditional image models\n'''\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport os\n\nimport utils\nimport losses\n\n\n# Dummy training function for debugging\ndef dummy_training_function():\n def train(x, y):\n return {}\n return train\n\n\ndef GAN_training_function(G, D, GD, z_, y_, ema, state_dict, config):\n def train(x, y, w):\n G.optim.zero_grad()\n D.optim.zero_grad()\n # How many chunks to split x and y into?\n x = torch.split(x, config['batch_size'])\n y = torch.split(y, config['batch_size'])\n w = torch.Tensor(w).to('cuda')\n w = ((w-0.5)/0.5)+0.5\n wt = torch.split(w, config['batch_size'])\n counter = 0\n \n # Optionally toggle D and G's \"require_grad\"\n if config['toggle_grads']:\n utils.toggle_grad(D, True)\n utils.toggle_grad(G, False)\n \n for step_index in range(config['num_D_steps']):\n # If accumulating gradients, loop multiple times before an optimizer step\n D.optim.zero_grad()\n for accumulation_index in range(config['num_D_accumulations']):\n z_.sample_()\n y_.sample_()\n D_fake, D_real = GD(z_[:config['batch_size']], y_[:config['batch_size']], \n x[counter], y[counter], train_G=False, \n split_D=config['split_D'])\n \n # Compute components of D's loss, average them, and divide by \n # the number of gradient accumulations\n D_loss_real, D_loss_fake = losses.discriminator_loss(D_fake, D_real*wt[counter])\n D_loss = (D_loss_real + D_loss_fake) / float(config['num_D_accumulations'])\n D_loss.backward()\n counter += 1\n \n # Optionally apply ortho reg in D\n if config['D_ortho'] > 0.0:\n # Debug print to indicate we're using ortho reg in D.\n print('using modified ortho reg in D')\n utils.ortho(D, config['D_ortho'])\n \n D.optim.step()\n \n # Optionally toggle \"requires_grad\"\n if config['toggle_grads']:\n utils.toggle_grad(D, False)\n utils.toggle_grad(G, True)\n \n # Zero G's gradients by default before training G, for safety\n G.optim.zero_grad()\n \n # If accumulating gradients, loop multiple times\n for accumulation_index in range(config['num_G_accumulations']): \n z_.sample_()\n y_.sample_()\n D_fake = GD(z_, y_, train_G=True, split_D=config['split_D'])\n G_loss = losses.generator_loss(D_fake*w) / float(config['num_G_accumulations'])\n G_loss.backward()\n \n # Optionally apply modified ortho reg in G\n if config['G_ortho'] > 0.0:\n print('using modified ortho reg in G') # Debug print to indicate we're using ortho reg in G\n # Don't ortho reg shared, it makes no sense. Really we should blacklist any embeddings for this\n utils.ortho(G, config['G_ortho'], \n blacklist=[param for param in G.shared.parameters()])\n G.optim.step()\n \n # If we have an ema, update it, regardless of if we test with it or not\n if config['ema']:\n ema.update(state_dict['itr'])\n \n out = {'G_loss': float(G_loss.item()), \n 'D_loss_real': float(D_loss_real.item()),\n 'D_loss_fake': float(D_loss_fake.item())}\n # Return G's loss and the components of D's loss.\n return out\n return train\n \n''' This function takes in the model, saves the weights (multiple copies if \n requested), and prepares sample sheets: one consisting of samples given\n a fixed noise seed (to show how the model evolves throughout training),\n a set of full conditional sample sheets, and a set of interp sheets. '''\ndef save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y, \n state_dict, config, experiment_name):\n utils.save_weights(G, D, state_dict, config['weights_root'],\n experiment_name, None, G_ema if config['ema'] else None)\n # Save an additional copy to mitigate accidental corruption if process\n # is killed during a save (it's happened to me before -.-)\n if config['num_save_copies'] > 0:\n utils.save_weights(G, D, state_dict, config['weights_root'],\n experiment_name,\n 'copy%d' % state_dict['save_num'],\n G_ema if config['ema'] else None)\n state_dict['save_num'] = (state_dict['save_num'] + 1 ) % config['num_save_copies']\n \n # Use EMA G for samples or non-EMA?\n which_G = G_ema if config['ema'] and config['use_ema'] else G\n \n # Accumulate standing statistics?\n if config['accumulate_stats']:\n utils.accumulate_standing_stats(G_ema if config['ema'] and config['use_ema'] else G,\n z_, y_, config['n_classes'],\n config['num_standing_accumulations'])\n \n # Save a random sample sheet with fixed z and y \n with torch.no_grad():\n if config['parallel']:\n fixed_Gz = nn.parallel.data_parallel(which_G, (fixed_z, which_G.shared(fixed_y)))\n else:\n fixed_Gz = which_G(fixed_z, which_G.shared(fixed_y))\n if not os.path.isdir('%s/%s' % (config['samples_root'], experiment_name)):\n os.mkdir('%s/%s' % (config['samples_root'], experiment_name))\n image_filename = '%s/%s/fixed_samples%d.jpg' % (config['samples_root'], \n experiment_name,\n state_dict['itr'])\n torchvision.utils.save_image(fixed_Gz.float().cpu(), image_filename,\n nrow=int(fixed_Gz.shape[0] **0.5), normalize=True)\n # For now, every time we save, also save sample sheets\n utils.sample_sheet(which_G,\n classes_per_sheet=utils.classes_per_sheet_dict[config['dataset']],\n num_classes=config['n_classes'],\n samples_per_class=10, parallel=config['parallel'],\n samples_root=config['samples_root'],\n experiment_name=experiment_name,\n folder_number=state_dict['itr'],\n z_=z_)\n # Also save interp sheets\n for fix_z, fix_y in zip([False, False, True], [False, True, False]):\n utils.interp_sheet(which_G,\n num_per_sheet=16,\n num_midpoints=8,\n num_classes=config['n_classes'],\n parallel=config['parallel'],\n samples_root=config['samples_root'],\n experiment_name=experiment_name,\n folder_number=state_dict['itr'],\n sheet_number=0,\n fix_z=fix_z, fix_y=fix_y, device='cuda')\n\n\n \n''' This function runs the inception metrics code, checks if the results\n are an improvement over the previous best (either in IS or FID, \n user-specified), logs the results, and saves a best_ copy if it's an \n improvement. '''\ndef test(G, D, G_ema, z_, y_, state_dict, config, sample, get_inception_metrics,\n experiment_name, test_log):\n print('Gathering inception metrics...')\n if config['accumulate_stats']:\n utils.accumulate_standing_stats(G_ema if config['ema'] and config['use_ema'] else G,\n z_, y_, config['n_classes'],\n config['num_standing_accumulations'])\n IS_mean, IS_std, FID = get_inception_metrics(sample, \n config['num_inception_images'],\n num_splits=10)\n print('Itr %d: PYTORCH UNOFFICIAL Inception Score is %3.3f +/- %3.3f, PYTORCH UNOFFICIAL FID is %5.4f' % (state_dict['itr'], IS_mean, IS_std, FID))\n # If improved over previous best metric, save approrpiate copy\n if ((config['which_best'] == 'IS' and IS_mean > state_dict['best_IS'])\n or (config['which_best'] == 'FID' and FID < state_dict['best_FID'])):\n print('%s improved over previous best, saving checkpoint...' % config['which_best'])\n utils.save_weights(G, D, state_dict, config['weights_root'],\n experiment_name, 'best%d' % state_dict['save_best_num'],\n G_ema if config['ema'] else None)\n state_dict['save_best_num'] = (state_dict['save_best_num'] + 1 ) % config['num_best_copies']\n state_dict['best_IS'] = max(state_dict['best_IS'], IS_mean)\n state_dict['best_FID'] = min(state_dict['best_FID'], FID)\n # Log results to file\n test_log.log(itr=int(state_dict['itr']), IS_mean=float(IS_mean),\n IS_std=float(IS_std), FID=float(FID))\n"
] | [
[
"torch.no_grad",
"torch.Tensor",
"torch.split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OpenSourceDocs/magenta | [
"11ee0dbe9d2dce7160af7db737d7a21920cacc40"
] | [
"magenta/models/onsets_frames_transcription/onsets_frames_transcription_transcribe.py"
] | [
"# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transcribe a recording of piano audio.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom magenta.models.onsets_frames_transcription import configs\nfrom magenta.models.onsets_frames_transcription import constants\nfrom magenta.models.onsets_frames_transcription import data\nfrom magenta.models.onsets_frames_transcription import split_audio_and_label_data\nfrom magenta.models.onsets_frames_transcription import train_util\nfrom magenta.music import midi_io\nfrom magenta.music import sequences_lib\nfrom magenta.protobuf import music_pb2\nimport tensorflow as tf\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('config', 'onsets_frames',\n 'Name of the config to use.')\ntf.app.flags.DEFINE_string('model_dir', None,\n 'Path to look for acoustic checkpoints.')\ntf.app.flags.DEFINE_string(\n 'checkpoint_path', None,\n 'Filename of the checkpoint to use. If not specified, will use the latest '\n 'checkpoint')\ntf.app.flags.DEFINE_string(\n 'hparams',\n '',\n 'A comma-separated list of `name=value` hyperparameter values.')\ntf.app.flags.DEFINE_float(\n 'frame_threshold', 0.5,\n 'Threshold to use when sampling from the acoustic model.')\ntf.app.flags.DEFINE_float(\n 'onset_threshold', 0.5,\n 'Threshold to use when sampling from the acoustic model.')\ntf.app.flags.DEFINE_string(\n 'log', 'INFO',\n 'The threshold for what messages will be logged: '\n 'DEBUG, INFO, WARN, ERROR, or FATAL.')\n\n\ndef create_example(filename):\n \"\"\"Processes an audio file into an Example proto.\"\"\"\n wav_data = tf.gfile.Open(filename, 'rb').read()\n example_list = list(\n split_audio_and_label_data.process_record(\n wav_data=wav_data,\n ns=music_pb2.NoteSequence(),\n example_id=filename,\n min_length=0,\n max_length=-1,\n allow_empty_notesequence=True))\n assert len(example_list) == 1\n return example_list[0].SerializeToString()\n\n\ndef transcribe_audio(prediction, hparams, frame_threshold, onset_threshold):\n \"\"\"Transcribes an audio file.\"\"\"\n frame_predictions = prediction['frame_probs_flat'] > frame_threshold\n onset_predictions = prediction['onset_probs_flat'] > onset_threshold\n velocity_values = prediction['velocity_values_flat']\n\n sequence_prediction = sequences_lib.pianoroll_to_note_sequence(\n frame_predictions,\n frames_per_second=data.hparams_frames_per_second(hparams),\n min_duration_ms=0,\n min_midi_pitch=constants.MIN_MIDI_PITCH,\n onset_predictions=onset_predictions,\n velocity_values=velocity_values)\n\n return sequence_prediction\n\n\ndef main(argv):\n tf.logging.set_verbosity(FLAGS.log)\n\n config = configs.CONFIG_MAP[FLAGS.config]\n hparams = config.hparams\n # For this script, default to not using cudnn.\n hparams.use_cudnn = False\n hparams.parse(FLAGS.hparams)\n hparams.batch_size = 1\n hparams.truncated_length_secs = 0\n\n with tf.Graph().as_default():\n examples = tf.placeholder(tf.string, [None])\n\n dataset = data.provide_batch(\n examples=examples,\n preprocess_examples=True,\n hparams=hparams,\n is_training=False)\n\n estimator = train_util.create_estimator(config.model_fn,\n os.path.expanduser(FLAGS.model_dir),\n hparams)\n\n iterator = dataset.make_initializable_iterator()\n next_record = iterator.get_next()\n\n with tf.Session() as sess:\n sess.run([\n tf.initializers.global_variables(),\n tf.initializers.local_variables()\n ])\n\n for filename in argv[1:]:\n tf.logging.info('Starting transcription for %s...', filename)\n\n # The reason we bounce between two Dataset objects is so we can use\n # the data processing functionality in data.py without having to\n # construct all the Example protos in memory ahead of time or create\n # a temporary tfrecord file.\n tf.logging.info('Processing file...')\n sess.run(iterator.initializer, {examples: [create_example(filename)]})\n\n def input_fn(params):\n del params\n return tf.data.Dataset.from_tensors(sess.run(next_record))\n\n tf.logging.info('Running inference...')\n checkpoint_path = None\n if FLAGS.checkpoint_path:\n checkpoint_path = os.path.expanduser(FLAGS.checkpoint_path)\n prediction_list = list(\n estimator.predict(\n input_fn,\n checkpoint_path=checkpoint_path,\n yield_single_examples=False))\n assert len(prediction_list) == 1\n\n sequence_prediction = transcribe_audio(prediction_list[0], hparams,\n FLAGS.frame_threshold,\n FLAGS.onset_threshold)\n\n midi_filename = filename + '.midi'\n midi_io.sequence_proto_to_midi_file(sequence_prediction, midi_filename)\n\n tf.logging.info('Transcription written to %s.', midi_filename)\n\n\ndef console_entry_point():\n tf.app.run(main)\n\nif __name__ == '__main__':\n console_entry_point()\n"
] | [
[
"tensorflow.Graph",
"tensorflow.initializers.local_variables",
"tensorflow.gfile.Open",
"tensorflow.placeholder",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.logging.set_verbosity",
"tensorflow.app.flags.DEFINE_float",
"tensorflow.Session",
"tensorflow.logging.info",
"tensorflow.initializers.global_variables",
"tensorflow.app.run"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
isunjin/pytext | [
"898c41c4881f30480cf744ea524993339df7213a"
] | [
"pytext/models/embeddings/scriptable_embedding_list.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom inspect import signature\nfrom typing import Dict, Iterable, List, Tuple\n\nimport torch\nfrom pytext.models.embeddings import EmbeddingBase\nfrom torch.utils.tensorboard import SummaryWriter\n\n\nclass ScriptableEmbeddingList(EmbeddingBase):\n \"\"\"\n\n This class is a Torchscript-friendly version of\n pytext.models.embeddings.EmbeddingList. The main differences are that it\n requires input arguments to be passed in as a list of Tensors, since\n Torchscript does not allow variable arguments, and that it only supports\n concat mode, since Torchscript does not support return value variance.\n\n \"\"\"\n\n # Different embedding classes in PyText take different numbers of input\n # arguments. Therefore, we want to allow users of this class to simply pass\n # in a list of tensors for each embedding. Unfortunately, Torchscript\n # requires that we know the signature of the function we're calling before\n # .forward() is called. These classes and adapter method allow us to\n # provide a flexible interface to callers of this class while satisfying\n # Torchscript.\n\n # N.B.: it may be possible to generate these classes dynamically, but given\n # that we currently only need two of them it seems better to define them\n # here so we get clean stack traces, autocomplete, etc.\n\n class Wrapper1(torch.nn.Module):\n def __init__(self, embedding: EmbeddingBase):\n super().__init__()\n self._embedding = embedding\n\n def forward(self, xs: List[torch.Tensor]):\n return self._embedding(xs[0])\n\n class Wrapper3(torch.nn.Module):\n def __init__(self, embedding: EmbeddingBase):\n super().__init__()\n self._embedding = embedding\n\n def forward(self, xs: List[torch.Tensor]):\n return self._embedding(xs[0], xs[1], xs[2])\n\n @staticmethod\n def _adapt_embedding(embedding: torch.nn.Module) -> torch.nn.Module:\n param_count = len(signature(embedding.forward).parameters)\n\n if param_count == 1:\n return ScriptableEmbeddingList.Wrapper1(embedding)\n elif param_count == 3:\n return ScriptableEmbeddingList.Wrapper3(embedding)\n raise AssertionError(\n f\"Unsupported parameter count {param_count}. If a new embedding \"\n \"class has been added, you will need to add support in this class.\"\n )\n\n def __init__(self, embeddings: Iterable[EmbeddingBase]):\n EmbeddingBase.__init__(self, 0)\n embeddings = list(filter(None, embeddings))\n self.num_emb_modules = sum(emb.num_emb_modules for emb in embeddings)\n embeddings_list: List[EmbeddingBase] = []\n input_start_indices: List[int] = []\n start = 0\n embedding_dim = 0\n for emb in embeddings:\n if emb.embedding_dim > 0:\n embeddings_list.append(emb)\n input_start_indices.append(start)\n embedding_dim += emb.embedding_dim\n start += emb.num_emb_modules\n self.embeddings_list = torch.nn.ModuleList(\n map(ScriptableEmbeddingList._adapt_embedding, embeddings_list)\n )\n self.input_start_indices: Tuple[int] = tuple(input_start_indices)\n assert len(self.embeddings_list) > 0, \"must have at least 1 sub embedding\"\n self.embedding_dim = embedding_dim\n\n def forward(self, emb_input: List[List[torch.Tensor]]) -> torch.Tensor:\n \"\"\"\n Get embeddings from all sub-embeddings and either concatenate them\n into one Tensor or return them in a tuple.\n\n Args:\n emb_input (type): Sequence of token level embeddings to combine.\n The inputs should match the size of configured embeddings. Each\n of them is a List of Tensors.\n\n Returns:\n torch.Tensor: a Tensor is returned by concatenating all embeddings.\n\n \"\"\"\n # tokens dim: (bsz, max_seq_len) -> (bsz, max_seq_len, dim) OR\n # (bsz, max_num_sen, max_seq_len) -> (bsz, max_num_sen, max_seq_len, dim)\n # for seqnn\n if self.num_emb_modules != len(emb_input):\n raise Exception(\n f\"expecting {self.num_emb_modules} embeddings, \"\n + f\"but got {len(emb_input)} input\"\n )\n tensors = []\n for emb, start in zip(self.embeddings_list, self.input_start_indices):\n tensors.append(emb(emb_input[start]))\n\n return torch.cat(tensors, 2)\n\n def get_param_groups_for_optimizer(self) -> List[Dict[str, torch.nn.Parameter]]:\n \"\"\"\n Organize child embedding parameters into param_groups (or layers), so the\n optimizer and / or schedulers can have custom behavior per layer. The\n param_groups from each child embedding are aligned at the first (lowest)\n param_group.\n \"\"\"\n param_groups: List[Dict[str, torch.nn.Parameter]] = []\n\n for module_name, embedding_module in self.embeddings_list.named_children():\n child_params = embedding_module.get_param_groups_for_optimizer()\n\n for i, child_param_group in enumerate(child_params):\n if i >= len(param_groups):\n param_groups.append({})\n\n for param_name, param in child_param_group.items():\n param_name = \"%s.%s\" % (module_name, param_name)\n param_groups[i][param_name] = param\n\n return param_groups\n\n def visualize(self, summary_writer: SummaryWriter):\n for child in self:\n child.visualize(summary_writer)\n"
] | [
[
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
basedrhys/detectron2 | [
"979cf435cb302df46c1e07def5b07816216d971a"
] | [
"detectron2/engine/defaults.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n\"\"\"\nThis file contains components with some default boilerplate logic user may need\nin training / testing. They will not work for everyone, but many users may find them useful.\n\nThe behavior of functions/classes in this file is subject to change,\nsince they are meant to represent the \"common default behavior\" people need in their projects.\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport sys\nfrom collections import OrderedDict\nimport torch\nfrom fvcore.common.file_io import PathManager\nfrom fvcore.nn.precise_bn import get_bn_modules\nfrom torch.nn.parallel import DistributedDataParallel\nfrom shutil import copyfile\nimport tempfile\n\nimport detectron2.data.transforms as T\nfrom detectron2.checkpoint import DetectionCheckpointer\nfrom detectron2.data import (\n MetadataCatalog,\n build_detection_test_loader,\n build_detection_train_loader,\n)\nfrom detectron2.evaluation import (\n DatasetEvaluator,\n inference_on_dataset,\n print_csv_format,\n verify_results,\n)\nfrom detectron2.modeling import build_model\nfrom detectron2.solver import build_lr_scheduler, build_optimizer\nfrom detectron2.utils import comm\nfrom detectron2.utils.collect_env import collect_env_info\nfrom detectron2.utils.env import seed_all_rng\nfrom detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter\nfrom detectron2.utils.logger import setup_logger\n\nfrom . import hooks\nfrom .train_loop import SimpleTrainer\n\nimport wandb\n\n__all__ = [\"default_argument_parser\", \"default_setup\", \"DefaultPredictor\", \"DefaultTrainer\"]\n\nfrom detectron2.evaluation import COCOEvaluator\n\ndef default_argument_parser(epilog=None):\n \"\"\"\n Create a parser with some common arguments used by detectron2 users.\n\n Args:\n epilog (str): epilog passed to ArgumentParser describing the usage.\n\n Returns:\n argparse.ArgumentParser:\n \"\"\"\n parser = argparse.ArgumentParser(\n epilog=epilog\n or f\"\"\"\nExamples:\n\nRun on single machine:\n $ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth\n\nRun on multiple machines:\n (machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags]\n (machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags]\n\"\"\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n parser.add_argument(\"--config-file\", default=\"\", metavar=\"FILE\", help=\"path to config file\")\n parser.add_argument(\n \"--resume\",\n action=\"store_true\",\n help=\"whether to attempt to resume from the checkpoint directory\",\n )\n parser.add_argument(\"--eval-only\", action=\"store_true\", help=\"perform evaluation only\")\n parser.add_argument(\"--num-gpus\", type=int, default=1, help=\"number of gpus *per machine*\")\n parser.add_argument(\"--num-machines\", type=int, default=1, help=\"total number of machines\")\n parser.add_argument(\n \"--machine-rank\", type=int, default=0, help=\"the rank of this machine (unique per machine)\"\n )\n\n # PyTorch still may leave orphan processes in multi-gpu training.\n # Therefore we use a deterministic way to obtain port,\n # so that users are aware of orphan processes by seeing the port occupied.\n port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != \"win32\" else 1) % 2 ** 14\n parser.add_argument(\n \"--dist-url\",\n default=\"tcp://127.0.0.1:{}\".format(port),\n help=\"initialization URL for pytorch distributed backend. See \"\n \"https://pytorch.org/docs/stable/distributed.html for details.\",\n )\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n return parser\n\n\ndef default_setup(cfg, args):\n \"\"\"\n Perform some basic common setups at the beginning of a job, including:\n\n 1. Set up the detectron2 logger\n 2. Log basic information about environment, cmdline arguments, and config\n 3. Backup the config to the output directory\n\n Args:\n cfg (CfgNode): the full config to be used\n args (argparse.NameSpace): the command line arguments to be logged\n \"\"\"\n output_dir = cfg.OUTPUT_DIR\n if comm.is_main_process() and output_dir:\n PathManager.mkdirs(output_dir)\n\n rank = comm.get_rank()\n setup_logger(output_dir, distributed_rank=rank, name=\"fvcore\")\n logger = setup_logger(output_dir, distributed_rank=rank)\n\n logger.info(\"Rank of current process: {}. World size: {}\".format(rank, comm.get_world_size()))\n logger.info(\"Environment info:\\n\" + collect_env_info())\n\n logger.info(\"Command line arguments: \" + str(args))\n if hasattr(args, \"config_file\") and args.config_file != \"\":\n logger.info(\n \"Contents of args.config_file={}:\\n{}\".format(\n args.config_file, PathManager.open(args.config_file, \"r\").read()\n )\n )\n\n logger.info(\"Running with full config:\\n{}\".format(cfg))\n if comm.is_main_process() and output_dir:\n # Note: some of our scripts may expect the existence of\n # config.yaml in output directory\n path = os.path.join(output_dir, \"config.yaml\")\n with PathManager.open(path, \"w\") as f:\n f.write(cfg.dump())\n logger.info(\"Full config saved to {}\".format(path))\n\n # make sure each worker has a different, yet deterministic seed if specified\n seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank)\n\n # cudnn benchmark has large overhead. It shouldn't be used considering the small size of\n # typical validation set.\n if not (hasattr(args, \"eval_only\") and args.eval_only):\n torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK\n\n\nclass DefaultPredictor:\n \"\"\"\n Create a simple end-to-end predictor with the given config that runs on\n single device for a single input image.\n\n Compared to using the model directly, this class does the following additions:\n\n 1. Load checkpoint from `cfg.MODEL.WEIGHTS`.\n 2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.\n 3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.\n 4. Take one input image and produce a single output, instead of a batch.\n\n If you'd like to do anything more fancy, please refer to its source code\n as examples to build and use the model manually.\n\n Attributes:\n metadata (Metadata): the metadata of the underlying dataset, obtained from\n cfg.DATASETS.TEST.\n\n Examples:\n ::\n pred = DefaultPredictor(cfg)\n inputs = cv2.imread(\"input.jpg\")\n outputs = pred(inputs)\n \"\"\"\n\n def __init__(self, cfg):\n self.cfg = cfg.clone() # cfg can be modified by model\n self.model = build_model(self.cfg)\n self.model.eval()\n self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])\n\n checkpointer = DetectionCheckpointer(self.model)\n checkpointer.load(cfg.MODEL.WEIGHTS)\n\n self.aug = T.ResizeShortestEdge(\n [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST\n )\n\n self.input_format = cfg.INPUT.FORMAT\n assert self.input_format in [\"RGB\", \"BGR\"], self.input_format\n\n def __call__(self, original_image):\n \"\"\"\n Args:\n original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).\n\n Returns:\n predictions (dict):\n the output of the model for one image only.\n See :doc:`/tutorials/models` for details about the format.\n \"\"\"\n with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258\n # Apply pre-processing to image.\n if self.input_format == \"RGB\":\n # whether the model expects BGR inputs or RGB\n original_image = original_image[:, :, ::-1]\n height, width = original_image.shape[:2]\n image = self.aug.get_transform(original_image).apply_image(original_image)\n image = torch.as_tensor(image.astype(\"float32\").transpose(2, 0, 1))\n\n inputs = {\"image\": image, \"height\": height, \"width\": width}\n predictions = self.model([inputs])[0]\n return predictions\n\n\nclass DefaultTrainer(SimpleTrainer):\n \"\"\"\n A trainer with default training logic.\n It is a subclass of `SimpleTrainer` which instantiates everything needed from the\n config. It does the following:\n\n 1. Create model, optimizer, scheduler, dataloader from the given config.\n 2. Load a checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when\n `resume_or_load` is called.\n 3. Register a few common hooks defined by the config.\n\n It is created to simplify the **standard model training workflow** and reduce code boilerplate\n for users who only need the standard training workflow, with standard features.\n It means this class makes *many assumptions* about your training logic that\n may easily become invalid in a new research. In fact, any assumptions beyond those made in the\n :class:`SimpleTrainer` are too much for research.\n\n The code of this class has been annotated about restrictive assumptions it mades.\n When they do not work for you, you're encouraged to:\n\n 1. Overwrite methods of this class, OR:\n 2. Use :class:`SimpleTrainer`, which only does minimal SGD training and\n nothing else. You can then add your own hooks if needed. OR:\n 3. Write your own training loop similar to `tools/plain_train_net.py`.\n\n Also note that the behavior of this class, like other functions/classes in\n this file, is not stable, since it is meant to represent the \"common default behavior\".\n It is only guaranteed to work well with the standard models and training workflow in detectron2.\n To obtain more stable behavior, write your own training logic with other public APIs.\n\n Examples:\n ::\n trainer = DefaultTrainer(cfg)\n trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS\n trainer.train()\n\n Attributes:\n scheduler:\n checkpointer (DetectionCheckpointer):\n cfg (CfgNode):\n \"\"\"\n\n def __init__(self, cfg):\n \"\"\"\n Args:\n cfg (CfgNode):\n \"\"\"\n logger = logging.getLogger(\"detectron2\")\n if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2\n setup_logger()\n cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())\n # Assume these objects must be constructed in this order.\n model = self.build_model(cfg)\n optimizer = self.build_optimizer(cfg, model)\n data_loader = self.build_train_loader(cfg)\n\n # For training, wrap with DDP. But don't need this for inference.\n if comm.get_world_size() > 1:\n model = DistributedDataParallel(\n model, device_ids=[comm.get_local_rank()], broadcast_buffers=False\n )\n super().__init__(model, data_loader, optimizer)\n\n self.scheduler = self.build_lr_scheduler(cfg, optimizer)\n # Assume no other objects need to be checkpointed.\n # We can later make it checkpoint the stateful hooks\n self.checkpointer = DetectionCheckpointer(\n # Assume you want to save checkpoints together with logs/statistics\n model,\n cfg.OUTPUT_DIR,\n optimizer=optimizer,\n scheduler=self.scheduler,\n )\n self.start_iter = 0\n self.max_iter = cfg.SOLVER.MAX_ITER\n self.cfg = cfg\n\n self.best_ap = -1\n\n self.register_hooks(self.build_hooks())\n\n def resume_or_load(self, resume=True):\n \"\"\"\n If `resume==True`, and last checkpoint exists, resume from it, load all checkpointables\n (eg. optimizer and scheduler) and update iteration counter.\n\n Otherwise, load the model specified by the config (skip all checkpointables) and start from\n the first iteration.\n\n Args:\n resume (bool): whether to do resume or not\n \"\"\"\n checkpoint = self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)\n if resume and self.checkpointer.has_checkpoint():\n self.start_iter = checkpoint.get(\"iteration\", -1) + 1\n # The checkpoint stores the training iteration that just finished, thus we start\n # at the next iteration (or iter zero if there's no checkpoint).\n\n def build_hooks(self):\n \"\"\"\n Build a list of default hooks, including timing, evaluation,\n checkpointing, lr scheduling, precise BN, writing events.\n\n Returns:\n list[HookBase]:\n \"\"\"\n cfg = self.cfg.clone()\n cfg.defrost()\n cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN\n\n ret = [\n hooks.IterationTimer(),\n hooks.LRScheduler(self.optimizer, self.scheduler),\n hooks.PreciseBN(\n # Run at the same freq as (but before) evaluation.\n cfg.TEST.EVAL_PERIOD,\n self.model,\n # Build a new data loader to not affect training\n self.build_train_loader(cfg),\n cfg.TEST.PRECISE_BN.NUM_ITER,\n )\n if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)\n else None,\n ]\n\n # Do PreciseBN before checkpointer, because it updates the model and need to\n # be saved by checkpointer.\n # This is not always the best: if checkpointing has a different frequency,\n # some checkpoints may have more precise statistics than others.\n if comm.is_main_process():\n ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD))\n\n def test_and_save_results():\n evaluators = [COCOEvaluator(test_set, cfg, False, output_dir=\"./output/\") for test_set in cfg.DATASETS.TEST]\n self._last_eval_results = self.test(self.cfg, self.model, evaluators=evaluators)\n wandb.log({\n \"AP\": self._last_eval_results['bbox']['AP'],\n \"AP50\": self._last_eval_results['bbox']['AP50'],\n \"AP75\": self._last_eval_results['bbox']['AP75']},\n step=self.iter)\n\n ap = self._last_eval_results['bbox']['AP']\n if ap > self.best_ap:\n print(\"New best model, saving to WANDB\")\n model_path = os.path.join(cfg.OUTPUT_DIR, 'model_{}.pth'.format(str(self.iter).zfill(7)))\n tmp_model_path = os.path.join(tempfile.gettempdir(), 'model_best_iter-{}_ap-{}.pth'.format(self.iter, round(ap, 3)))\n copyfile(model_path, tmp_model_path)\n # Save the model to WANDB\n wandb.save(tmp_model_path)\n self.best_ap = ap\n elif abs(ap - self.best_ap) < 1:\n print(\"Fairly similar AP, saving anyway\")\n model_path = os.path.join(cfg.OUTPUT_DIR, 'model_{}.pth'.format(str(self.iter).zfill(7)))\n tmp_model_path = os.path.join(tempfile.gettempdir(), 'model_iter-{}_ap-{}.pth'.format(self.iter, round(ap, 3)))\n copyfile(model_path, tmp_model_path)\n # Save the model to WANDB\n wandb.save(tmp_model_path)\n\n print(\"TESTING FINISHED\")\n print(self._last_eval_results)\n print()\n return self._last_eval_results\n\n # Do evaluation after checkpointer, because then if it fails,\n # we can use the saved checkpoint to debug.\n ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))\n\n if comm.is_main_process():\n # run writers in the end, so that evaluation metrics are written\n ret.append(hooks.PeriodicWriter(self.build_writers()))\n return ret\n\n def build_writers(self):\n \"\"\"\n Build a list of writers to be used. By default it contains\n writers that write metrics to the screen,\n a json file, and a tensorboard event file respectively.\n If you'd like a different list of writers, you can overwrite it in\n your trainer.\n\n Returns:\n list[EventWriter]: a list of :class:`EventWriter` objects.\n\n It is now implemented by:\n ::\n return [\n CommonMetricPrinter(self.max_iter),\n JSONWriter(os.path.join(self.cfg.OUTPUT_DIR, \"metrics.json\")),\n TensorboardXWriter(self.cfg.OUTPUT_DIR),\n ]\n\n \"\"\"\n # Assume the default print/log frequency.\n return [\n # It may not always print what you want to see, since it prints \"common\" metrics only.\n CommonMetricPrinter(self.max_iter),\n JSONWriter(os.path.join(self.cfg.OUTPUT_DIR, \"metrics.json\")),\n TensorboardXWriter(self.cfg.OUTPUT_DIR),\n ]\n\n def wandb_config(self):\n w_config = wandb.config\n cfg = self.cfg\n w_config.imgs_per_batch = cfg.SOLVER.IMS_PER_BATCH\n w_config.batch_size = cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE\n w_config.learning_rate = cfg.SOLVER.BASE_LR\n w_config.max_iter = cfg.SOLVER.MAX_ITER\n w_config.eval_period = cfg.TEST.EVAL_PERIOD\n w_config.model_weights = cfg.MODEL.WEIGHTS\n\n def train(self):\n \"\"\"\n Run training.\n\n Returns:\n OrderedDict of results, if evaluation is enabled. Otherwise None.\n \"\"\"\n self.wandb_config()\n super().train(self.start_iter, self.max_iter)\n if hasattr(self, \"_last_eval_results\") and comm.is_main_process():\n verify_results(self.cfg, self._last_eval_results)\n return self._last_eval_results\n\n @classmethod\n def build_model(cls, cfg):\n \"\"\"\n Returns:\n torch.nn.Module:\n\n It now calls :func:`detectron2.modeling.build_model`.\n Overwrite it if you'd like a different model.\n \"\"\"\n model = build_model(cfg)\n logger = logging.getLogger(__name__)\n logger.info(\"Model:\\n{}\".format(model))\n return model\n\n @classmethod\n def build_optimizer(cls, cfg, model):\n \"\"\"\n Returns:\n torch.optim.Optimizer:\n\n It now calls :func:`detectron2.solver.build_optimizer`.\n Overwrite it if you'd like a different optimizer.\n \"\"\"\n return build_optimizer(cfg, model)\n\n @classmethod\n def build_lr_scheduler(cls, cfg, optimizer):\n \"\"\"\n It now calls :func:`detectron2.solver.build_lr_scheduler`.\n Overwrite it if you'd like a different scheduler.\n \"\"\"\n return build_lr_scheduler(cfg, optimizer)\n\n @classmethod\n def build_train_loader(cls, cfg):\n \"\"\"\n Returns:\n iterable\n\n It now calls :func:`detectron2.data.build_detection_train_loader`.\n Overwrite it if you'd like a different data loader.\n \"\"\"\n return build_detection_train_loader(cfg)\n\n @classmethod\n def build_test_loader(cls, cfg, dataset_name):\n \"\"\"\n Returns:\n iterable\n\n It now calls :func:`detectron2.data.build_detection_test_loader`.\n Overwrite it if you'd like a different data loader.\n \"\"\"\n return build_detection_test_loader(cfg, dataset_name)\n\n @classmethod\n def build_evaluator(cls, cfg, dataset_name):\n \"\"\"\n Returns:\n DatasetEvaluator or None\n\n It is not implemented by default.\n \"\"\"\n raise NotImplementedError(\n \"\"\"\nIf you want DefaultTrainer to automatically run evaluation,\nplease implement `build_evaluator()` in subclasses (see train_net.py for example).\nAlternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example).\n\"\"\"\n )\n\n @classmethod\n def test(cls, cfg, model, evaluators=None):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (nn.Module):\n evaluators (list[DatasetEvaluator] or None): if None, will call\n :meth:`build_evaluator`. Otherwise, must have the same length as\n `cfg.DATASETS.TEST`.\n\n Returns:\n dict: a dict of result metrics\n \"\"\"\n logger = logging.getLogger(__name__)\n if isinstance(evaluators, DatasetEvaluator):\n evaluators = [evaluators]\n if evaluators is not None:\n assert len(cfg.DATASETS.TEST) == len(evaluators), \"{} != {}\".format(\n len(cfg.DATASETS.TEST), len(evaluators)\n )\n\n results = OrderedDict()\n for idx, dataset_name in enumerate(cfg.DATASETS.TEST):\n data_loader = cls.build_test_loader(cfg, dataset_name)\n # When evaluators are passed in as arguments,\n # implicitly assume that evaluators can be created before data_loader.\n if evaluators is not None:\n evaluator = evaluators[idx]\n else:\n try:\n evaluator = cls.build_evaluator(cfg, dataset_name)\n except NotImplementedError:\n logger.warn(\n \"No evaluator found. Use `DefaultTrainer.test(evaluators=)`, \"\n \"or implement its `build_evaluator` method.\"\n )\n results[dataset_name] = {}\n continue\n results_i = inference_on_dataset(model, data_loader, evaluator)\n results[dataset_name] = results_i\n if comm.is_main_process():\n assert isinstance(\n results_i, dict\n ), \"Evaluator must return a dict on the main process. Got {} instead.\".format(\n results_i\n )\n logger.info(\"Evaluation results for {} in csv format:\".format(dataset_name))\n print_csv_format(results_i)\n\n if len(results) == 1:\n results = list(results.values())[0]\n return results\n\n @staticmethod\n def auto_scale_workers(cfg, num_workers: int):\n \"\"\"\n When the config is defined for certain number of workers (according to\n ``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of\n workers currently in use, returns a new cfg where the total batch size\n is scaled so that the per-GPU batch size stays the same as the\n original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``.\n\n Other config options are also scaled accordingly:\n * training steps and warmup steps are scaled inverse proportionally.\n * learning rate are scaled proportionally, following :paper:`ImageNet in 1h`.\n\n It returns the original config if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``.\n\n Returns:\n CfgNode: a new config\n \"\"\"\n old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE\n if old_world_size == 0 or old_world_size == num_workers:\n return cfg\n cfg = cfg.clone()\n frozen = cfg.is_frozen()\n cfg.defrost()\n\n assert (\n cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0\n ), \"Invalid REFERENCE_WORLD_SIZE in config!\"\n scale = num_workers / old_world_size\n bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale))\n lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale\n max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale))\n warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale))\n cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS)\n cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale))\n cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant\n logger = logging.getLogger(__name__)\n logger.info(\n f\"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, \"\n f\"max_iter={max_iter}, warmup={warmup_iter}.\"\n )\n\n if frozen:\n cfg.freeze()\n return cfg\n"
] | [
[
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ahmedABDELFATTAH1/GradesAutoFiller | [
"b850f073d4270be559fbfd9518785aaf70017b0f"
] | [
"GradeAutofiller/DMv2/DigitsModule.py"
] | [
"import pickle\nfrom commonfunctions import *\nfrom skimage.feature import hog\nfrom sklearn.preprocessing import normalize\nimport skimage\nfrom skimage.morphology import binary_erosion, binary_dilation, binary_closing,skeletonize, thin,erosion,dilation\nfrom skimage.transform import rotate\nfrom skimage.measure import find_contours\n\nmodel = pickle.load(open('hognn_model.sav', 'rb'))\n\ndef hog_features(X, imgshape=(28, 28), pixels_per_cell=(6, 6)):\n features = []\n for row in X:\n img = row.reshape(imgshape)\n img_feature = hog(img, orientations=8, pixels_per_cell=pixels_per_cell, cells_per_block=(2, 2))\n features.append(img_feature)\n return np.array(features)\n\ndef predict_img(img):\n img = rgb2gray(img)\n img = (img.reshape(1,-1))\n Xhog = hog_features(img)\n Xhog = normalize(Xhog)\n Y = (model.predict(Xhog))\n return (np.argmax(Y))\n\ndef digits_locv2(img,X=28,Y=28):\n \"\"\"\n Localization of the digits , it separates each digit into a fixed size output\n Arugments :\n -- img : numpy array\n Returns\n -- digits : Array of fixed size matrices for each digit .\n \"\"\"\n img = rgb2gray(img) \n\n X = int(X)\n Y = int(X)\n \n Xh = int(X/2)\n Yh = int(Y/2)\n if(img.shape[0]>28):\n img = skimage.transform.resize(img, (28, img.shape[1]))\n io.imshow(img)\n img = img != 0\n #img = thin(img)\n labeled_array = measure.label(img)\n n = np.amax(labeled_array)\n digits = {}\n h,w = img.shape\n for i in range (1 , n+1):\n \n digit = labeled_array == i\n white_pixels = np.array(np.where(digit == 1))\n Ymin,Xmin = white_pixels[:,0]\n Ymax,Xmax = white_pixels[:,-1]\n shaped_digit = np.zeros([X,Y])\n \n Sx = max(0,int(Ymin-3))\n Fx = min(Ymax+3,h)\n Dx = Fx-Sx\n \n Sy = max(0,int(Xmin-3))\n Fy = min(w,Xmax+3)\n Dy = Fy-Sy\n \n digit = (digit[Sx:Fx+1,Sy:Fy+1])\n if (digit.shape[1] > 28):\n digit = skimage.transform.resize(digit, (digit.shape[0],28))\n \n shaped_digit[ :digit.shape[0] ,:digit.shape[1]] = digit\n \n digits[Xmin]=shaped_digit\n output = []\n for i in sorted (digits):\n output.append(1 - digits[i])\n return output\n \ndef digits_locv1(img,X=28,Y=28):\n \"\"\"\n Localization of the digits , it separates each digit into a fixed size output\n Arugments :\n -- img : numpy array\n Returns\n -- digits : Array of fixed size matrices for each digit .\n \"\"\"\n X = int(X/2)\n Y = int(Y/2)\n img = rgb2gray(img) \n img = rotate(img,270,resize=True)\n img_hist = histogram(img, nbins=2)\n # Checking the image`s background must be black and digits be white\n # Negative Transformation in case of white (objects) is more than black (background)\n if ( img_hist[0][0] < img_hist[0][1] ):\n img = 1 - img \n \n digits = []\n # Find contours for each digit has its own contour\n contours = find_contours(img, 0.7,fully_connected='high',positive_orientation='high')\n for n, contour in enumerate(contours):\n \n #print(len(contour))\n if(len(contour) < 50) :\n continue\n Ymax = np.amax(contour[:, 0])\n Ymin = np.amin(contour[:, 0])\n Xmax = np.amax(contour[:, 1])\n Xmin = np.amin(contour[:, 1])\n digit_seg = ([img[int(Ymin): int(Ymax)+1, int(Xmin): int(Xmax)+1]])\n digit = np.zeros([X*2,Y*2])\n h,w = np.array(digit_seg[0]).shape\n if(h > 28 or w>28):\n continue\n digit[X-int((h+1)/2):X+int(h/2) ,Y-int((w+1)/2):Y+int(w/2) , ] = digit_seg[0]\n digit = rotate(digit,90,resize=True)\n digit = erosion(digit)\n digit = dilation(digit)\n digits.append(digit)\n \n return digits\n \"\"\"\n Localization of the digits , it separates each digit into a fixed size output\n Arugments :\n -- img : numpy array\n Returns\n -- digits : Array of fixed size matrices for each digit .\n \"\"\"\n X = int(X/2)\n Y = int(Y/2)\n img = rgb2gray(img) \n img = rotate(img,270,resize=True)\n img_hist = histogram(img, nbins=2)\n # Checking the image`s background must be black and digits be white\n # Negative Transformation in case of white (objects) is more than black (background)\n if ( img_hist[0][0] < img_hist[0][1] ):\n img = 1 - img \n \n digits = []\n # Find contours for each digit has its own contour\n contours = find_contours(img, 0.7,fully_connected='high',positive_orientation='high')\n for n, contour in enumerate(contours):\n \n # print(len(contour))\n if(len(contour) < 40) :\n continue\n Ymax = np.amax(contour[:, 0])\n Ymin = np.amin(contour[:, 0])\n Xmax = np.amax(contour[:, 1])\n Xmin = np.amin(contour[:, 1])\n digit_seg = ([img[int(Ymin): int(Ymax)+1, int(Xmin): int(Xmax)+1]])\n digit = np.zeros([X*2,Y*2])\n h,w = np.array(digit_seg[0]).shape\n if(h > 28 or w>28):\n continue\n digit[X-int((h+1)/2):X+int(h/2) ,Y-int((w+1)/2):Y+int(w/2) , ] = digit_seg[0]\n digit = rotate(digit,90,resize=True)\n digit = erosion(digit)\n digit = dilation(digit)\n digits.append(digit)\n \n return digits\n \ndef rec_cell(img):\n digit_strv1 = \"\"\n digit_strv2 = \"\"\n img = rgb2gray(img)\n NotThin = (np.amax(thin(img)-img))\n \n imgv1 = np.copy(img)\n imgv1 = erosion(imgv1)\n imgv1 = binary_dilation(imgv1)\n digitsv1 = digits_locv1(imgv1)\n #show_images(digitsv1)\n \n imgv2 = np.copy(img)\n imgv2 = rgb2gray(imgv2)\n imgv2 = erosion(imgv2)\n digitsv2 = digits_locv1(imgv2)\n #show_images(digitsv2)\n for dig in digitsv1:\n digit_strv1 += str(predict_img(dig))\n \n for dig in digitsv2:\n digit_strv2 += str(predict_img(dig))\n \n if(len(digit_strv1)!= \"\" and NotThin != 0):\n return digit_strv1\n else:\n return digit_strv2\n digit_strv1 = \"\"\n digit_strv2 = \"\"\n \n imgv1 = np.copy(img)\n imgv1 = erosion(imgv1)\n imgv1 = binary_dilation(imgv1)\n digitsv1 = digits_locv1(imgv1)\n \n imgv2 = np.copy(img)\n imgv2 = rgb2gray(imgv2)\n imgv2 = erosion(imgv2)\n digitsv2 = digits_locv1(imgv2)\n \n for dig in digitsv1:\n digit_strv1 += str(predict_img(dig))\n \n for dig in digitsv2:\n digit_strv2 += str(predict_img(dig))\n \n if(len(digit_strv1)!= \"\"):\n return digit_strv1\n else:\n return digit_strv2"
] | [
[
"sklearn.preprocessing.normalize"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jenniferbrennan/regmod | [
"8e1a9d6970b32449bab6f7897c42a82a9a59651b"
] | [
"tests/test_function.py"
] | [
"\"\"\"\nTest function module\n\"\"\"\nimport numpy as np\nimport pytest\nfrom regmod.function import fun_dict\n\n\ndef ad_dfun(fun, x, eps=1e-16):\n return fun(x + eps*1j).imag/eps\n\n\[email protected](\"x\", np.random.randn(3))\ndef test_identity(x):\n fun = fun_dict[\"identity\"]\n assert np.isclose(fun.dfun(x), ad_dfun(fun.fun, x))\n assert np.isclose(fun.d2fun(x), ad_dfun(fun.dfun, x))\n\n\[email protected](\"x\", np.random.randn(3))\ndef test_quad(x):\n fun = fun_dict[\"quad\"]\n assert np.isclose(fun.dfun(x), ad_dfun(fun.fun, x))\n assert np.isclose(fun.d2fun(x), ad_dfun(fun.dfun, x))\n\n\[email protected](\"x\", np.random.randn(3))\ndef test_exp(x):\n fun = fun_dict[\"exp\"]\n assert np.isclose(fun.dfun(x), ad_dfun(fun.fun, x))\n assert np.isclose(fun.d2fun(x), ad_dfun(fun.dfun, x))\n\n\[email protected](\"x\", np.random.randn(3))\ndef test_expit(x):\n fun = fun_dict[\"expit\"]\n assert np.isclose(fun.dfun(x), ad_dfun(fun.fun, x))\n assert np.isclose(fun.d2fun(x), ad_dfun(fun.dfun, x))\n\n\[email protected](\"x\", np.random.rand(3) + 0.1)\ndef test_log(x):\n fun = fun_dict[\"log\"]\n assert np.isclose(fun.dfun(x), ad_dfun(fun.fun, x))\n assert np.isclose(fun.d2fun(x), ad_dfun(fun.dfun, x))\n\n\[email protected](\"x\", 0.8*np.random.rand(3) + 0.1)\ndef test_logit(x):\n fun = fun_dict[\"logit\"]\n assert np.isclose(fun.dfun(x), ad_dfun(fun.fun, x))\n assert np.isclose(fun.d2fun(x), ad_dfun(fun.dfun, x))\n"
] | [
[
"numpy.random.randn",
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dongwuuu/Cell-calssification | [
"c80a9b41d42e3a4d36dc6a6e1f910ae0b467ed1f"
] | [
"train.py"
] | [
"import argparse\nimport os\nimport copy\nimport random\nimport shutil\nimport time\nimport csv\nfrom random import shuffle\nimport warnings\nfrom PIL import Image\nimport cv2\nimport matplotlib.pyplot as plt\nwarnings.filterwarnings(\"ignore\")\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms, models\nfrom tensorboardX import SummaryWriter\nfrom sklearn.metrics import roc_auc_score, f1_score, recall_score, confusion_matrix, precision_score, accuracy_score, roc_curve, auc\n\n\nparser = argparse.ArgumentParser(description='PyTorch Training')\nparser.add_argument('data', metavar='DIR', help='path to dataset')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=90, type=int, metavar='N', help='number of total epochs to run')\nparser.add_argument('-b', '--batch_size', default=64, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning_rate', default=0.0001, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')\nparser.add_argument('--wd', '--weight_decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-e', '--evaluate', default=False, type=bool, help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model')\nparser.add_argument('--arch', metavar='ARCH', default='googlenet')\nparser.add_argument('--image_size', type=int, default=299)\nparser.add_argument('--model_path', default=\"./model/model.pth\", type=str)\n\n\ndef draw_roc(ground_truth, p_proba, args):\n fpr,tpr,threshold = roc_curve(ground_truth, p_proba)\n roc_auc = auc(fpr,tpr)\n\n plt.figure()\n plt.figure(figsize=(10,10))\n plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve (area = %0.3f)' % roc_auc) ###假正率为横坐标,真正率为纵坐标做曲线\n plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic example - ' + args.data.split(\"/\")[-2] + \"-\" + args.data.split(\"/\")[-1] +\"-\" + args.model_path.split(\"/\")[-1])\n plt.legend(loc=\"lower right\")\n plt.savefig(\"./result/\" + args.data.split(\"/\")[-2] + \"-\" + args.data.split(\"/\")[-1] +\"-\" + args.model_path.split(\"/\")[-1] + \".png\")\n\n\ndef get_dataloader(args):\n data_transforms = {\n \"train\": transforms.Compose([\n transforms.Resize(600),\n transforms.RandomRotation(degrees=15),\n transforms.RandomResizedCrop(args.image_size),\n # transforms.ColorJitter(brightness=(0, 36), contrast=(0, 10), saturation=(0, 25), hue=(-0.5, 0.5)),\n # transforms.RandomCrop((244, 244)),\n transforms.RandomHorizontalFlip(0.5),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n \"val\": transforms.Compose([\n transforms.Resize((args.image_size, args.image_size)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n }\n\n image_datasets = {x: datasets.ImageFolder(os.path.join(args.data, x), data_transforms[x]) for x in [\"train\", \"val\"]}\n dataloader_dict = {x: DataLoader(image_datasets[x], shuffle=True, batch_size=args.batch_size, num_workers=args.workers) for x in [\"train\", \"val\"]}\n return dataloader_dict\n\n\ndef train(args):\n dataloaders = get_dataloader(args)\n if args.arch == \"inception_v3\":\n model = models.__dict__[args.arch](pretrained=True)\n else:\n model = models.__dict__[args.arch](aux_logits=False, transform_input=False, pretrained=True)\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 2)\n model.cuda()\n\n save_model = copy.deepcopy(model)\n\n train_writer = SummaryWriter(\"./run/train\")\n val_writer = SummaryWriter(\"./run/val\")\n\n\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n # optimizer = optim.Adam(model.parameters(), lr=args.lr)\n criterion = nn.CrossEntropyLoss()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.\n best_epoch = 0\n for epoch in range(args.epochs):\n print(\"Epoch {}/{}\".format(epoch + 1, args.epochs))\n print(\"-\" * 10)\n\n for phase in [\"train\", \"val\"]:\n running_loss = 0.\n running_corrects = 0.\n ground_truth = []\n pred_prob = []\n bin_pred = []\n\n if phase == \"train\":\n model.train()\n else:\n model.eval()\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.cuda()\n labels = labels.cuda()\n\n with torch.autograd.set_grad_enabled(phase == \"train\"):\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n\n softmax_outputs = F.softmax(outputs, 1)\n pred_prob.extend(list(softmax_outputs.cpu().data.numpy()[:, 1]))\n ground_truth.extend(list(labels.cpu().data.numpy()))\n\n _, preds = torch.max(outputs, 1)\n bin_pred.extend(list(preds.cpu().data.numpy()))\n\n if phase == \"train\":\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds.view(-1) == labels.view(-1)).item()\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n epoch_acc = running_corrects / len(dataloaders[phase].dataset)\n recall = recall_score(ground_truth, bin_pred)\n precision = precision_score(ground_truth, bin_pred)\n auc = roc_auc_score(ground_truth, pred_prob)\n f1 = f1_score(ground_truth, bin_pred)\n\n print(\"{} Loss: {} Acc: {:0.4f} AUC: {:0.4f} Sensitivity: {:0.4f} Precision: {:0.4f} F1: {:0.4f}\".format(\n phase,\n epoch_loss,\n epoch_acc,\n auc,\n recall,\n precision,\n f1))\n\n if phase == \"train\":\n train_writer.add_scalar('Loss', epoch_loss, global_step=epoch)\n train_writer.add_scalar(\"ACC\", epoch_acc, global_step=epoch)\n train_writer.add_scalar(\"AUC\", auc, global_step=epoch)\n train_writer.add_scalar(\"Sensitivity\", recall, global_step=epoch)\n train_writer.add_scalar(\"Precision\", precision, global_step=epoch)\n train_writer.add_scalar(\"F1\", f1, global_step=epoch)\n else:\n if epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n best_epoch = epoch + 1\n\n val_writer.add_scalar(\"Loss\", epoch_loss, global_step=epoch)\n val_writer.add_scalar(\"ACC\", epoch_acc, global_step=epoch)\n val_writer.add_scalar(\"AUC\", auc, global_step=epoch)\n val_writer.add_scalar(\"Sensitivity\", recall, global_step=epoch)\n val_writer.add_scalar(\"Precision\", precision, global_step=epoch)\n val_writer.add_scalar(\"F1\", f1, global_step=epoch)\n print()\n\n if epoch % 10 == 0:\n save_model.load_state_dict(best_model_wts)\n torch.save(save_model, \"./model/\" + args.arch + \"_\" + args.data.split(\"/\")[-1] + \".pth\")\n\n train_writer.close()\n val_writer.close()\n model.load_state_dict(best_model_wts)\n print(best_epoch, \" model saved\")\n return model\n\n\ndef evaluate(model, args):\n data_dir = args.data\n model.cuda()\n model.eval()\n transform = transforms.Compose([\n transforms.Resize((args.image_size, args.image_size)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n res = []\n labels = []\n ground_truth = []\n postive_prob = []\n \n for root, dirs, names in os.walk(data_dir):\n for name in names:\n img_path = os.path.join(root, name)\n img = Image.open(img_path)\n img = transform(img)\n img = img.unsqueeze(0)\n img = img.cuda()\n\n with torch.no_grad():\n py = model(img)\n softmax_output = F.softmax(py, 1).cpu().data.numpy()\n np, pp = softmax_output[0][0], softmax_output[0][1]\n postive_prob.append(pp)\n if np >= 0.5:\n label = 0\n else:\n label = 1\n labels.append(label)\n # print(img_path.split(\"/\")[-1], label, np, pp)\n if \"non-cancer\" in img_path:\n ground_truth.append(0)\n res.append([img_path.split(\"/\")[-1], np, pp, 0, label])\n else:\n ground_truth.append(1)\n res.append([img_path.split(\"/\")[-1], np, pp, 1, label])\n\n acc = accuracy_score(ground_truth, labels)\n recall = recall_score(ground_truth, labels)\n auc_score = roc_auc_score(ground_truth, postive_prob)\n f1 = f1_score(ground_truth, labels)\n\n result_file_path = \"./result/acc-\" + str(acc) + \"-recall-\" + str(recall) + \"-auc-\" + str(auc_score) + \"-f1-\" + str(f1) + \"-\" + args.data.split(\"/\")[-2] + \"-\" + args.data.split(\"/\")[-1] +\"-\" + args.model_path.split(\"/\")[-1] + \".csv\"\n with open (result_file_path, \"w\", newline='') as f:\n f_csv = csv.writer(f)\n f_csv.writerow([\"img\", \"non-cancer prob\", \"cancer prob\", \"ground truth\", \"predict label\"])\n f_csv.writerows(res)\n\n # draw_roc(ground_truth, postive_prob, args)\n fpr,tpr,threshold = roc_curve(ground_truth, postive_prob)\n roc_auc = auc(fpr,tpr)\n\n plt.figure()\n plt.figure(figsize=(10,10))\n plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve (area = %0.3f)' % roc_auc) ###假正率为横坐标,真正率为纵坐标做曲线\n plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic example - ' + args.data.split(\"/\")[-2] + \"-\" + args.data.split(\"/\")[-1] +\"-\" + args.model_path.split(\"/\")[-1])\n plt.legend(loc=\"lower right\")\n plt.savefig(\"./result/\" + args.data.split(\"/\")[-2] + \"-\" + args.data.split(\"/\")[-1] +\"-\" + args.model_path.split(\"/\")[-1] + \".png\")\n\n # res = pd.DataFrame(res, columns=[\"img\", \"non-cancer prob\", \"cancer prob\", \"ground truth\", \"predict label\"])\n # res.to_csv(\"./result/acc-\" + str(acc) + \"-recall-\" + str(recall) + \"-auc-\" + str(auc) + \"-f1-\" + str(f1) + \"-\" + args.data.split(\"/\")[-2] + \"-\" + args.data.split(\"/\")[-1] +\"-\" + args.model_path.split(\"/\")[-1] + \".csv\", index=False)\n\n\n\ndef main():\n args = parser.parse_args()\n if not args.evaluate:\n model = train(args)\n torch.save(model, \"./model/\" + args.arch + \"_\" + args.data.split(\"/\")[-1] + \".pth\")\n else:\n evaluate(torch.load(args.model_path), args)\n\n\nif __name__==\"__main__\":\n main()"
] | [
[
"matplotlib.pyplot.legend",
"sklearn.metrics.roc_auc_score",
"torch.nn.functional.softmax",
"torch.max",
"torch.load",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.plot",
"torch.no_grad",
"sklearn.metrics.f1_score",
"torch.nn.CrossEntropyLoss",
"torch.autograd.set_grad_enabled",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"sklearn.metrics.precision_score",
"sklearn.metrics.roc_curve",
"torch.nn.Linear",
"sklearn.metrics.recall_score",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
richardbaihe/conversation | [
"ba85518a1ac2a57988188fc5f2b8fe42e1facf64"
] | [
"source/modules/decoders/rnn_decoder.py"
] | [
"#!/usr/bin/env python\n################################################################################\n#\n# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved\n#\n################################################################################\n\"\"\"\nFile: source/decoders/rnn_decoder.py\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nfrom source.modules.attention import Attention\nfrom source.modules.decoders.state import DecoderState\nfrom source.utils.misc import Pack\nfrom source.utils.misc import sequence_mask\n\n\nclass RNNDecoder(nn.Module):\n \"\"\"\n A GRU recurrent neural network decoder.\n \"\"\"\n def __init__(self,\n input_size,\n hidden_size,\n output_size,\n embedder=None,\n num_layers=1,\n attn_mode=None,\n attn_hidden_size=None,\n memory_size=None,\n feature_size=None,\n dropout=0.0):\n super(RNNDecoder, self).__init__()\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.embedder = embedder\n self.num_layers = num_layers\n self.attn_mode = None if attn_mode == 'none' else attn_mode\n self.attn_hidden_size = attn_hidden_size or hidden_size // 2\n self.memory_size = memory_size or hidden_size\n self.feature_size = feature_size\n self.dropout = dropout\n\n self.rnn_input_size = self.input_size\n self.out_input_size = self.hidden_size\n\n if self.feature_size is not None:\n self.rnn_input_size += self.feature_size\n\n if self.attn_mode is not None:\n self.attention = Attention(query_size=self.hidden_size,\n memory_size=self.memory_size,\n hidden_size=self.attn_hidden_size,\n mode=self.attn_mode,\n project=False)\n self.rnn_input_size += self.memory_size\n self.out_input_size += self.memory_size\n\n self.rnn = nn.GRU(input_size=self.rnn_input_size,\n hidden_size=self.hidden_size,\n num_layers=self.num_layers,\n dropout=self.dropout if self.num_layers > 1 else 0,\n batch_first=True)\n\n if self.out_input_size > self.hidden_size:\n self.output_layer = nn.Sequential(\n nn.Dropout(p=self.dropout),\n nn.Linear(self.out_input_size, self.hidden_size),\n nn.Linear(self.hidden_size, self.output_size),\n nn.LogSoftmax(dim=-1),\n )\n else:\n self.output_layer = nn.Sequential(\n nn.Dropout(p=self.dropout),\n nn.Linear(self.out_input_size, self.output_size),\n nn.LogSoftmax(dim=-1),\n )\n\n def initialize_state(self,\n hidden,\n feature=None,\n attn_memory=None,\n attn_mask=None,\n memory_lengths=None):\n \"\"\"\n initialize_state\n \"\"\"\n if self.feature_size is not None:\n assert feature is not None\n\n if self.attn_mode is not None:\n assert attn_memory is not None\n\n if memory_lengths is not None and attn_mask is None:\n max_len = attn_memory.size(1)\n attn_mask = sequence_mask(memory_lengths, max_len).eq(0)\n\n init_state = DecoderState(\n hidden=hidden,\n feature=feature,\n attn_memory=attn_memory,\n attn_mask=attn_mask,\n )\n return init_state\n\n def decode(self, input, state, is_training=False):\n \"\"\"\n decode\n \"\"\"\n hidden = state.hidden\n rnn_input_list = []\n out_input_list = []\n output = Pack()\n\n if self.embedder is not None:\n input = self.embedder(input)\n\n # shape: (batch_size, 1, input_size)\n input = input.unsqueeze(1)\n rnn_input_list.append(input)\n\n if self.feature_size is not None:\n feature = state.feature.unsqueeze(1)\n rnn_input_list.append(feature)\n\n if self.attn_mode is not None:\n attn_memory = state.attn_memory\n attn_mask = state.attn_mask\n query = hidden[-1].unsqueeze(1)\n weighted_context, attn = self.attention(query=query,\n memory=attn_memory,\n mask=attn_mask)\n rnn_input_list.append(weighted_context)\n out_input_list.append(weighted_context)\n output.add(attn=attn)\n\n rnn_input = torch.cat(rnn_input_list, dim=-1)\n rnn_output, new_hidden = self.rnn(rnn_input, hidden)\n out_input_list.append(rnn_output)\n\n out_input = torch.cat(out_input_list, dim=-1)\n state.hidden = new_hidden\n\n if is_training:\n return out_input, state, output\n else:\n log_prob = self.output_layer(out_input)\n return log_prob, state, output\n\n def forward(self, inputs, state):\n \"\"\"\n forward\n \"\"\"\n inputs, lengths = inputs\n batch_size, max_len = inputs.size()\n\n out_inputs = inputs.new_zeros(\n size=(batch_size, max_len, self.out_input_size),\n dtype=torch.float)\n\n # sort by lengths\n sorted_lengths, indices = lengths.sort(descending=True)\n inputs = inputs.index_select(0, indices)\n state = state.index_select(indices)\n\n # number of valid input (i.e. not padding index) in each time step\n num_valid_list = sequence_mask(sorted_lengths).int().sum(dim=0)\n\n for i, num_valid in enumerate(num_valid_list):\n dec_input = inputs[:num_valid, i]\n valid_state = state.slice_select(num_valid)\n out_input, valid_state, _ = self.decode(\n dec_input, valid_state, is_training=True)\n state.hidden[:, :num_valid] = valid_state.hidden\n out_inputs[:num_valid, i] = out_input.squeeze(1)\n\n # Resort\n _, inv_indices = indices.sort()\n state = state.index_select(inv_indices)\n out_inputs = out_inputs.index_select(0, inv_indices)\n\n log_probs = self.output_layer(out_inputs)\n return log_probs, state\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.LogSoftmax",
"torch.cat",
"torch.nn.GRU",
"torch.nn.Linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rentainhe/glasses | [
"34300a76985c7fc643094fa8d617114926a0ee75"
] | [
"test/test_RegNet.py"
] | [
"from functools import partial\n\nimport torch\nfrom glasses.models.classification.regnet import *\nfrom glasses.models.classification.resnet import ResNetShorcutD, ResNetStemC\n\n\ndef test_regnet():\n x = torch.rand(1, 3, 224, 224)\n with torch.no_grad():\n model = RegNet.regnetx_002()\n pred = model(x)\n assert pred.shape[-1] == 1000\n n_classes = 10\n model = RegNet.regnetx_002(n_classes=n_classes).eval()\n pred = model(x)\n assert pred.shape[-1] == n_classes\n model = RegNet.regnetx_002(block=RegNetYBotteneckBlock)\n pred = model(x) \n assert pred.shape[-1] == 1000\n # change the steam\n model = RegNet.regnetx_002(stem=ResNetStemC)\n pred = model(x)\n assert pred.shape[-1] == 1000\n model = RegNet.regnetx_002(block=partial(RegNetYBotteneckBlock, shortcut=ResNetShorcutD))\n pred = model(x)\n assert pred.shape[-1] == 1000\n model = RegNet.regnetx_004().eval()\n pred = model(x)\n assert pred.shape[-1] == 1000\n model = RegNet.regnetx_006().eval()\n pred = model(x)\n assert pred.shape[-1] == 1000\n model = RegNet.regnetx_008().eval()\n pred = model(x)\n assert pred.shape[-1] == 1000\n model = RegNet.regnetx_016().eval()\n pred = model(x)\n assert pred.shape[-1] == 1000\n model = RegNet.regnety_002().eval()\n pred = model(x)\n assert pred.shape[-1] == 1000\n model = RegNet.regnety_004().eval()\n pred = model(x)\n assert pred.shape[-1] == 1000\n model = RegNet.regnety_006().eval()\n pred = model(x)\n assert pred.shape[-1] == 1000\n model = RegNet.regnety_008().eval()\n pred = model(x)\n assert pred.shape[-1] == 1000\n model = RegNet.regnety_016().eval()\n pred = model(x)\n assert pred.shape[-1] == 1000\n\ndef test_regnet_scaler():\n depths, widths, groups_width = RegNetScaler()(w_0 = 24, w_a = 24.48, w_m = 2.54, group_w = 16, depth = 22 )\n assert depths == [1, 2, 7, 12]\n assert widths == [32, 64, 160, 384]\n assert groups_width == 16\n"
] | [
[
"torch.no_grad",
"torch.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pabloe4993/foolbox | [
"2daabba8355afce9dfbec3de8d71dadadcfbd10b"
] | [
"foolbox/attacks/iterative_projected_gradient.py"
] | [
"from __future__ import division\nimport numpy as np\nfrom abc import abstractmethod\nimport logging\nimport warnings\n\nfrom .base import Attack\nfrom .base import call_decorator\nfrom .. import distances\nfrom ..utils import crossentropy\nfrom .. import nprng\n\n\nclass IterativeProjectedGradientBaseAttack(Attack):\n \"\"\"Base class for iterative (projected) gradient attacks.\n\n Concrete subclasses should implement __call__, _gradient\n and _clip_perturbation.\n\n TODO: add support for other loss-functions, e.g. the CW loss function,\n see https://github.com/MadryLab/mnist_challenge/blob/master/pgd_attack.py\n \"\"\"\n\n @abstractmethod\n def _gradient(self, a, x, class_, strict=True):\n raise NotImplementedError\n\n @abstractmethod\n def _clip_perturbation(self, a, noise, epsilon):\n raise NotImplementedError\n\n @abstractmethod\n def _check_distance(self, a):\n raise NotImplementedError\n\n def _get_mode_and_class(self, a):\n # determine if the attack is targeted or not\n target_class = a.target_class()\n targeted = target_class is not None\n\n if targeted:\n class_ = target_class\n else:\n class_ = a.original_class\n return targeted, class_\n\n def _run(self, a, binary_search,\n epsilon, stepsize, iterations,\n random_start, return_early):\n if not a.has_gradient():\n warnings.warn('applied gradient-based attack to model that'\n ' does not provide gradients')\n return\n\n self._check_distance(a)\n\n targeted, class_ = self._get_mode_and_class(a)\n\n if binary_search:\n if isinstance(binary_search, bool):\n k = 20\n else:\n k = int(binary_search)\n return self._run_binary_search(\n a, epsilon, stepsize, iterations,\n random_start, targeted, class_, return_early, k=k)\n else:\n return self._run_one(\n a, epsilon, stepsize, iterations,\n random_start, targeted, class_, return_early)\n\n def _run_binary_search(self, a, epsilon, stepsize, iterations,\n random_start, targeted, class_, return_early, k):\n\n factor = stepsize / epsilon\n\n def try_epsilon(epsilon):\n stepsize = factor * epsilon\n return self._run_one(\n a, epsilon, stepsize, iterations,\n random_start, targeted, class_, return_early)\n\n for i in range(k):\n if try_epsilon(epsilon):\n logging.info('successful for eps = {}'.format(epsilon))\n break\n logging.info('not successful for eps = {}'.format(epsilon))\n epsilon = epsilon * 1.5\n else:\n logging.warning('exponential search failed')\n return\n\n bad = 0\n good = epsilon\n\n for i in range(k):\n epsilon = (good + bad) / 2\n if try_epsilon(epsilon):\n good = epsilon\n logging.info('successful for eps = {}'.format(epsilon))\n else:\n bad = epsilon\n logging.info('not successful for eps = {}'.format(epsilon))\n\n def _run_one(self, a, epsilon, stepsize, iterations,\n random_start, targeted, class_, return_early):\n min_, max_ = a.bounds()\n s = max_ - min_\n\n original = a.unperturbed.copy()\n\n if random_start:\n # using uniform noise even if the perturbation clipping uses\n # a different norm because cleverhans does it the same way\n noise = nprng.uniform(\n -epsilon * s, epsilon * s, original.shape).astype(\n original.dtype)\n x = original + self._clip_perturbation(a, noise, epsilon)\n strict = False # because we don't enforce the bounds here\n else:\n x = original\n strict = True\n\n success = False\n for _ in range(iterations):\n gradient = self._gradient(a, x, class_, strict=strict)\n # non-strict only for the first call and\n # only if random_start is True\n strict = True\n if targeted:\n gradient = -gradient\n\n # untargeted: gradient ascent on cross-entropy to original class\n # targeted: gradient descent on cross-entropy to target class\n x = x + stepsize * gradient\n\n x = original + self._clip_perturbation(a, x - original, epsilon)\n\n x = np.clip(x, min_, max_)\n\n logits, is_adversarial = a.forward_one(x)\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n if targeted:\n ce = crossentropy(a.original_class, logits)\n logging.debug('crossentropy to {} is {}'.format(\n a.original_class, ce))\n ce = crossentropy(class_, logits)\n logging.debug('crossentropy to {} is {}'.format(class_, ce))\n if is_adversarial:\n if return_early:\n return True\n else:\n success = True\n return success\n\n\nclass LinfinityGradientMixin(object):\n def _gradient(self, a, x, class_, strict=True):\n gradient = a.gradient_one(x, class_, strict=strict)\n gradient = np.sign(gradient)\n min_, max_ = a.bounds()\n gradient = (max_ - min_) * gradient\n return gradient\n\n\nclass L1GradientMixin(object):\n def _gradient(self, a, x, class_, strict=True):\n gradient = a.gradient_one(x, class_, strict=strict)\n # using mean to make range of epsilons comparable to Linf\n gradient = gradient / np.mean(np.abs(gradient))\n min_, max_ = a.bounds()\n gradient = (max_ - min_) * gradient\n return gradient\n\n\nclass L2GradientMixin(object):\n def _gradient(self, a, x, class_, strict=True):\n gradient = a.gradient_one(x, class_, strict=strict)\n # using mean to make range of epsilons comparable to Linf\n gradient = gradient / np.sqrt(np.mean(np.square(gradient)))\n min_, max_ = a.bounds()\n gradient = (max_ - min_) * gradient\n return gradient\n\n\nclass LinfinityClippingMixin(object):\n def _clip_perturbation(self, a, perturbation, epsilon):\n min_, max_ = a.bounds()\n s = max_ - min_\n clipped = np.clip(perturbation, -epsilon * s, epsilon * s)\n return clipped\n\n\nclass L1ClippingMixin(object):\n def _clip_perturbation(self, a, perturbation, epsilon):\n # using mean to make range of epsilons comparable to Linf\n norm = np.mean(np.abs(perturbation))\n norm = max(1e-12, norm) # avoid divsion by zero\n min_, max_ = a.bounds()\n s = max_ - min_\n # clipping, i.e. only decreasing norm\n factor = min(1, epsilon * s / norm)\n return perturbation * factor\n\n\nclass L2ClippingMixin(object):\n def _clip_perturbation(self, a, perturbation, epsilon):\n # using mean to make range of epsilons comparable to Linf\n norm = np.sqrt(np.mean(np.square(perturbation)))\n norm = max(1e-12, norm) # avoid divsion by zero\n min_, max_ = a.bounds()\n s = max_ - min_\n # clipping, i.e. only decreasing norm\n factor = min(1, epsilon * s / norm)\n return perturbation * factor\n\n\nclass LinfinityDistanceCheckMixin(object):\n def _check_distance(self, a):\n if not isinstance(a.distance, distances.Linfinity):\n logging.warning('Running an attack that tries to minimize the'\n ' Linfinity norm of the perturbation without'\n ' specifying foolbox.distances.Linfinity as'\n ' the distance metric might lead to suboptimal'\n ' results.')\n\n\nclass L1DistanceCheckMixin(object):\n def _check_distance(self, a):\n if not isinstance(a.distance, distances.MAE):\n logging.warning('Running an attack that tries to minimize the'\n ' L1 norm of the perturbation without'\n ' specifying foolbox.distances.MAE as'\n ' the distance metric might lead to suboptimal'\n ' results.')\n\n\nclass L2DistanceCheckMixin(object):\n def _check_distance(self, a):\n if not isinstance(a.distance, distances.MSE):\n logging.warning('Running an attack that tries to minimize the'\n ' L2 norm of the perturbation without'\n ' specifying foolbox.distances.MSE as'\n ' the distance metric might lead to suboptimal'\n ' results.')\n\n\nclass LinfinityBasicIterativeAttack(\n LinfinityGradientMixin,\n LinfinityClippingMixin,\n LinfinityDistanceCheckMixin,\n IterativeProjectedGradientBaseAttack):\n\n \"\"\"The Basic Iterative Method introduced in [1]_.\n\n This attack is also known as Projected Gradient\n Descent (PGD) (without random start) or FGMS^k.\n\n References\n ----------\n .. [1] Alexey Kurakin, Ian Goodfellow, Samy Bengio,\n \"Adversarial examples in the physical world\",\n https://arxiv.org/abs/1607.02533\n\n .. seealso:: :class:`ProjectedGradientDescentAttack`\n\n \"\"\"\n\n @call_decorator\n def __call__(self, input_or_adv, label=None, unpack=True,\n binary_search=True,\n epsilon=0.3,\n stepsize=0.05,\n iterations=10,\n random_start=False,\n return_early=True):\n\n \"\"\"Simple iterative gradient-based attack known as\n Basic Iterative Method, Projected Gradient Descent or FGSM^k.\n\n Parameters\n ----------\n input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n The original, unperturbed input as a `numpy.ndarray` or\n an :class:`Adversarial` instance.\n label : int\n The reference label of the original input. Must be passed\n if `a` is a `numpy.ndarray`, must not be passed if `a` is\n an :class:`Adversarial` instance.\n unpack : bool\n If true, returns the adversarial input, otherwise returns\n the Adversarial object.\n binary_search : bool or int\n Whether to perform a binary search over epsilon and stepsize,\n keeping their ratio constant and using their values to start\n the search. If False, hyperparameters are not optimized.\n Can also be an integer, specifying the number of binary\n search steps (default 20).\n epsilon : float\n Limit on the perturbation size; if binary_search is True,\n this value is only for initialization and automatically\n adapted.\n stepsize : float\n Step size for gradient descent; if binary_search is True,\n this value is only for initialization and automatically\n adapted.\n iterations : int\n Number of iterations for each gradient descent run.\n random_start : bool\n Start the attack from a random point rather than from the\n original input.\n return_early : bool\n Whether an individual gradient descent run should stop as\n soon as an adversarial is found.\n \"\"\"\n\n a = input_or_adv\n del input_or_adv\n del label\n del unpack\n\n assert epsilon > 0\n\n self._run(a, binary_search,\n epsilon, stepsize, iterations,\n random_start, return_early)\n\n\nBasicIterativeMethod = LinfinityBasicIterativeAttack\nBIM = BasicIterativeMethod\n\n\nclass L1BasicIterativeAttack(\n L1GradientMixin,\n L1ClippingMixin,\n L1DistanceCheckMixin,\n IterativeProjectedGradientBaseAttack):\n\n \"\"\"Modified version of the Basic Iterative Method\n that minimizes the L1 distance.\n\n .. seealso:: :class:`LinfinityBasicIterativeAttack`\n\n \"\"\"\n\n @call_decorator\n def __call__(self, input_or_adv, label=None, unpack=True,\n binary_search=True,\n epsilon=0.3,\n stepsize=0.05,\n iterations=10,\n random_start=False,\n return_early=True):\n\n \"\"\"Simple iterative gradient-based attack known as\n Basic Iterative Method, Projected Gradient Descent or FGSM^k.\n\n Parameters\n ----------\n input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n The original, unperturbed input as a `numpy.ndarray` or\n an :class:`Adversarial` instance.\n label : int\n The reference label of the original input. Must be passed\n if `a` is a `numpy.ndarray`, must not be passed if `a` is\n an :class:`Adversarial` instance.\n unpack : bool\n If true, returns the adversarial input, otherwise returns\n the Adversarial object.\n binary_search : bool or int\n Whether to perform a binary search over epsilon and stepsize,\n keeping their ratio constant and using their values to start\n the search. If False, hyperparameters are not optimized.\n Can also be an integer, specifying the number of binary\n search steps (default 20).\n epsilon : float\n Limit on the perturbation size; if binary_search is True,\n this value is only for initialization and automatically\n adapted.\n stepsize : float\n Step size for gradient descent; if binary_search is True,\n this value is only for initialization and automatically\n adapted.\n iterations : int\n Number of iterations for each gradient descent run.\n random_start : bool\n Start the attack from a random point rather than from the\n original input.\n return_early : bool\n Whether an individual gradient descent run should stop as\n soon as an adversarial is found.\n \"\"\"\n\n a = input_or_adv\n del input_or_adv\n del label\n del unpack\n\n assert epsilon > 0\n\n self._run(a, binary_search,\n epsilon, stepsize, iterations,\n random_start, return_early)\n\n\nclass L2BasicIterativeAttack(\n L2GradientMixin,\n L2ClippingMixin,\n L2DistanceCheckMixin,\n IterativeProjectedGradientBaseAttack):\n\n \"\"\"Modified version of the Basic Iterative Method\n that minimizes the L2 distance.\n\n .. seealso:: :class:`LinfinityBasicIterativeAttack`\n\n \"\"\"\n\n @call_decorator\n def __call__(self, input_or_adv, label=None, unpack=True,\n binary_search=True,\n epsilon=0.3,\n stepsize=0.05,\n iterations=10,\n random_start=False,\n return_early=True):\n\n \"\"\"Simple iterative gradient-based attack known as\n Basic Iterative Method, Projected Gradient Descent or FGSM^k.\n\n Parameters\n ----------\n input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n The original, unperturbed input as a `numpy.ndarray` or\n an :class:`Adversarial` instance.\n label : int\n The reference label of the original input. Must be passed\n if `a` is a `numpy.ndarray`, must not be passed if `a` is\n an :class:`Adversarial` instance.\n unpack : bool\n If true, returns the adversarial input, otherwise returns\n the Adversarial object.\n binary_search : bool or int\n Whether to perform a binary search over epsilon and stepsize,\n keeping their ratio constant and using their values to start\n the search. If False, hyperparameters are not optimized.\n Can also be an integer, specifying the number of binary\n search steps (default 20).\n epsilon : float\n Limit on the perturbation size; if binary_search is True,\n this value is only for initialization and automatically\n adapted.\n stepsize : float\n Step size for gradient descent; if binary_search is True,\n this value is only for initialization and automatically\n adapted.\n iterations : int\n Number of iterations for each gradient descent run.\n random_start : bool\n Start the attack from a random point rather than from the\n original input.\n return_early : bool\n Whether an individual gradient descent run should stop as\n soon as an adversarial is found.\n \"\"\"\n\n a = input_or_adv\n del input_or_adv\n del label\n del unpack\n\n assert epsilon > 0\n\n self._run(a, binary_search,\n epsilon, stepsize, iterations,\n random_start, return_early)\n\n\nclass ProjectedGradientDescentAttack(\n LinfinityGradientMixin,\n LinfinityClippingMixin,\n LinfinityDistanceCheckMixin,\n IterativeProjectedGradientBaseAttack):\n\n \"\"\"The Projected Gradient Descent Attack\n introduced in [1]_ without random start.\n\n When used without a random start, this attack\n is also known as Basic Iterative Method (BIM)\n or FGSM^k.\n\n References\n ----------\n .. [1] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt,\n Dimitris Tsipras, Adrian Vladu, \"Towards Deep Learning\n Models Resistant to Adversarial Attacks\",\n https://arxiv.org/abs/1706.06083\n\n .. seealso::\n\n :class:`LinfinityBasicIterativeAttack` and\n :class:`RandomStartProjectedGradientDescentAttack`\n\n \"\"\"\n\n @call_decorator\n def __call__(self, input_or_adv, label=None, unpack=True,\n binary_search=True,\n epsilon=0.3,\n stepsize=0.01,\n iterations=40,\n random_start=False,\n return_early=True):\n\n \"\"\"Simple iterative gradient-based attack known as\n Basic Iterative Method, Projected Gradient Descent or FGSM^k.\n\n Parameters\n ----------\n input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n The original, unperturbed input as a `numpy.ndarray` or\n an :class:`Adversarial` instance.\n label : int\n The reference label of the original input. Must be passed\n if `a` is a `numpy.ndarray`, must not be passed if `a` is\n an :class:`Adversarial` instance.\n unpack : bool\n If true, returns the adversarial input, otherwise returns\n the Adversarial object.\n binary_search : bool or int\n Whether to perform a binary search over epsilon and stepsize,\n keeping their ratio constant and using their values to start\n the search. If False, hyperparameters are not optimized.\n Can also be an integer, specifying the number of binary\n search steps (default 20).\n epsilon : float\n Limit on the perturbation size; if binary_search is True,\n this value is only for initialization and automatically\n adapted.\n stepsize : float\n Step size for gradient descent; if binary_search is True,\n this value is only for initialization and automatically\n adapted.\n iterations : int\n Number of iterations for each gradient descent run.\n random_start : bool\n Start the attack from a random point rather than from the\n original input.\n return_early : bool\n Whether an individual gradient descent run should stop as\n soon as an adversarial is found.\n \"\"\"\n\n a = input_or_adv\n del input_or_adv\n del label\n del unpack\n\n assert epsilon > 0\n\n self._run(a, binary_search,\n epsilon, stepsize, iterations,\n random_start, return_early)\n\n\nProjectedGradientDescent = ProjectedGradientDescentAttack\nPGD = ProjectedGradientDescent\n\n\nclass RandomStartProjectedGradientDescentAttack(\n LinfinityGradientMixin,\n LinfinityClippingMixin,\n LinfinityDistanceCheckMixin,\n IterativeProjectedGradientBaseAttack):\n\n \"\"\"The Projected Gradient Descent Attack\n introduced in [1]_ with random start.\n\n References\n ----------\n .. [1] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt,\n Dimitris Tsipras, Adrian Vladu, \"Towards Deep Learning\n Models Resistant to Adversarial Attacks\",\n https://arxiv.org/abs/1706.06083\n\n .. seealso:: :class:`ProjectedGradientDescentAttack`\n\n \"\"\"\n\n @call_decorator\n def __call__(self, input_or_adv, label=None, unpack=True,\n binary_search=True,\n epsilon=0.3,\n stepsize=0.01,\n iterations=40,\n random_start=True,\n return_early=True):\n\n \"\"\"Simple iterative gradient-based attack known as\n Basic Iterative Method, Projected Gradient Descent or FGSM^k.\n\n Parameters\n ----------\n input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n The original, unperturbed input as a `numpy.ndarray` or\n an :class:`Adversarial` instance.\n label : int\n The reference label of the original input. Must be passed\n if `a` is a `numpy.ndarray`, must not be passed if `a` is\n an :class:`Adversarial` instance.\n unpack : bool\n If true, returns the adversarial input, otherwise returns\n the Adversarial object.\n binary_search : bool or int\n Whether to perform a binary search over epsilon and stepsize,\n keeping their ratio constant and using their values to start\n the search. If False, hyperparameters are not optimized.\n Can also be an integer, specifying the number of binary\n search steps (default 20).\n epsilon : float\n Limit on the perturbation size; if binary_search is True,\n this value is only for initialization and automatically\n adapted.\n stepsize : float\n Step size for gradient descent; if binary_search is True,\n this value is only for initialization and automatically\n adapted.\n iterations : int\n Number of iterations for each gradient descent run.\n random_start : bool\n Start the attack from a random point rather than from the\n original input.\n return_early : bool\n Whether an individual gradient descent run should stop as\n soon as an adversarial is found.\n \"\"\"\n\n a = input_or_adv\n del input_or_adv\n del label\n del unpack\n\n assert epsilon > 0\n\n self._run(a, binary_search,\n epsilon, stepsize, iterations,\n random_start, return_early)\n\n\nRandomProjectedGradientDescent = RandomStartProjectedGradientDescentAttack\nRandomPGD = RandomProjectedGradientDescent\n\n\nclass MomentumIterativeAttack(\n LinfinityClippingMixin,\n LinfinityDistanceCheckMixin,\n IterativeProjectedGradientBaseAttack):\n\n \"\"\"The Momentum Iterative Method attack\n introduced in [1]_. It's like the Basic\n Iterative Method or Projected Gradient\n Descent except that it uses momentum.\n\n References\n ----------\n .. [1] Yinpeng Dong, Fangzhou Liao, Tianyu Pang, Hang Su,\n Jun Zhu, Xiaolin Hu, Jianguo Li, \"Boosting Adversarial\n Attacks with Momentum\",\n https://arxiv.org/abs/1710.06081\n\n \"\"\"\n\n def _gradient(self, a, x, class_, strict=True):\n # get current gradient\n gradient = a.gradient_one(x, class_, strict=strict)\n gradient = gradient / max(1e-12, np.mean(np.abs(gradient)))\n\n # combine with history of gradient as new history\n self._momentum_history = \\\n self._decay_factor * self._momentum_history + gradient\n\n # use history\n gradient = self._momentum_history\n gradient = np.sign(gradient)\n min_, max_ = a.bounds()\n gradient = (max_ - min_) * gradient\n return gradient\n\n def _run_one(self, *args, **kwargs):\n # reset momentum history every time we restart\n # gradient descent\n self._momentum_history = 0\n return super(MomentumIterativeAttack, self)._run_one(*args, **kwargs)\n\n @call_decorator\n def __call__(self, input_or_adv, label=None, unpack=True,\n binary_search=True,\n epsilon=0.3,\n stepsize=0.06,\n iterations=10,\n decay_factor=1.0,\n random_start=False,\n return_early=True):\n\n \"\"\"Momentum-based iterative gradient attack known as\n Momentum Iterative Method.\n\n Parameters\n ----------\n input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n The original, unperturbed input as a `numpy.ndarray` or\n an :class:`Adversarial` instance.\n label : int\n The reference label of the original input. Must be passed\n if `a` is a `numpy.ndarray`, must not be passed if `a` is\n an :class:`Adversarial` instance.\n unpack : bool\n If true, returns the adversarial input, otherwise returns\n the Adversarial object.\n binary_search : bool\n Whether to perform a binary search over epsilon and stepsize,\n keeping their ratio constant and using their values to start\n the search. If False, hyperparameters are not optimized.\n Can also be an integer, specifying the number of binary\n search steps (default 20).\n epsilon : float\n Limit on the perturbation size; if binary_search is True,\n this value is only for initialization and automatically\n adapted.\n stepsize : float\n Step size for gradient descent; if binary_search is True,\n this value is only for initialization and automatically\n adapted.\n iterations : int\n Number of iterations for each gradient descent run.\n decay_factor : float\n Decay factor used by the momentum term.\n random_start : bool\n Start the attack from a random point rather than from the\n original input.\n return_early : bool\n Whether an individual gradient descent run should stop as\n soon as an adversarial is found.\n \"\"\"\n a = input_or_adv\n del input_or_adv\n del label\n del unpack\n\n assert epsilon > 0\n\n self._decay_factor = decay_factor\n\n self._run(a, binary_search,\n epsilon, stepsize, iterations,\n random_start, return_early)\n\n\nMomentumIterativeMethod = MomentumIterativeAttack\n"
] | [
[
"numpy.sign",
"numpy.square",
"numpy.abs",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Combustion-Transport-Analysis/Virial-Coefficient-Data-for-Combustion-Species | [
"035c74ac23b30aefef9ee91065b61b62c9fc3ca4"
] | [
"databaseExp.py"
] | [
"# -*- coding: utf-8 -*- \n\n# Headers for Python\nimport pandas as pd\nimport numpy as np\n\n# FUNCTIONS NEEDED IN THIS DATABASE\ndef BerrCalc(Bvalues, DataQuality):\n # determine the error class as defined by Dymond & Smith, 1980\n # class I: estimated precision < 2% or < 1 cm^3 mol^-1, whichever is greater\n # class II: estimated precision < 10% or < 15 cm^3 mol^-1, whichever is greater\n # class III: estimated precision > 10% or > 15 cm^3 mol^-1, whichever is greater\n if (DataQuality == 1):\n percentError = 0.02;\n cm3mol1Error = 1;\n elif (DataQuality == 2):\n percentError = 0.10;\n cm3mol1Error = 15;\n elif (DataQuality == 3):\n percentError = 0.20;\n cm3mol1Error = 30;\n # initialize B error vector\n BerrReturn = np.zeros(len(Bvalues));\n \n # determine the error and assign it to the vector, element by element\n for ii in range(len(Bvalues)):\n percentEstimate = abs(Bvalues[ii])*percentError;\n cm3mol1Estimate = cm3mol1Error;\n BerrReturn[ii] = max(percentEstimate, cm3mol1Estimate);\n return BerrReturn\n\ndef Bcalc(T, sigma, epsilon, mu, calcMethod):\n # Calculate the second coefficient of the virial equation of state using\n # Lennard Jones / Stockmayer parameters\n # T in Kelvin, sigma in Angstroms, epsilon in Kelvin, mu in Debyes\n # Only one method (calcMethod = \"Inf\") works with confidence right now, and\n # that method involves a full integration of the intermolecular potential \n \n # basic definitions needed in this function\n N_A = 6.022140E23 # Avagadro's number\n k_B = 1.38064852E-23 # Boltzmann constant\n epsilon_0 = 8.8541878176E-12 # permittivity of free space \n pi = np.pi # pi\n \n if (calcMethod == \"Inf\"):\n # use the full integration of the potential to infinity (or in this case 100 angstroms)\n # This can apply to either a Lennard-Jones fluid or a Stockmayer fluid\n # However, using the Stockmayer expression with a permanent dipole results in erraneously low values of \n # B, so for now, mu = 0.0\n # \n # Set up a grid from 0 to 100 Angstroms\n radius = np.linspace(0.0001, 100.0001, 10000)\n # non-dimensionalize the grid by the collision diameter, sigma\n r_star = np.array(radius/sigma)\n # nondimensionalize the dipole moment by the well depth and collision diameter\n # take the maximum value of the nondimensional dipole moment, defined by Kee (pp 496, 2003)\n # the electric constant must be in here to be truly nondimensional, but then disagrees with tabulated results \n # The 1.0E-18 is to convert to statC, the 1.0E7 is convert to ergs, and the 1.0E-8 is to convert to cm \n delta_max = (((mu*1.0E-18)**2.0))/(2.0*epsilon*k_B*(1.0E7)*((sigma*1.0E-8)**3.0))\n constantConvert = 1.0/(4*pi*epsilon_0*8.998E9) # divide delta_max by this number to convert for statC\n # as a sanity check, this should give something close to unity \n\n # create the integral \n integral_expression = np.array((r_star**2.0)*(np.exp(-4.0*(r_star**(-12.0) - r_star**(-6.0) - (delta_max*constantConvert)*(r_star**(-3.0)))*(epsilon/T)) -1))\n B_result = 0.6022140*(-2.0*pi*(sigma**3.0))*np.trapz(integral_expression, x=r_star)\n return B_result\n\n# to do: function that determines virial coefficient from PVT data...?\n\n# Enter data for methane, CH4\n\n# original index in compilation: 3\nspeciesName = [\"CH4\"]\ndataRef = [\"F.A. Freeth and T.T.H. Verschoyle, Proc. R. Soc. A130 453 (1931)\"]\ndataRefID = [\"10.1098/rspa.1931.0016\"]\n# 4-term fit of PV data at 293.15 K, \ndataClass = [\"class I\"]\ndataT = [np.array([273.15, 293.15])]\ndataB = [np.array([-53.91, -48.68])]\ndataBerr = [BerrCalc(dataB[0], 1)]\n\n# original index in compilation: 4\nspeciesName.append(\"CH4\")\ndataRef.append(\"A. Michels and G.W. Nederbragt, Physica 2 1000 (1935)\")\ndataRefID.append(\"10.1016/S0031-8914(35)90186-0\")\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-54.07, -43.38, -34.72, -27.87, -21.74, -16.09, -11.46]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 5\nspeciesName.append(\"CH4\")\ndataRef.append(\"A. Michels and G.W. Nederbragt, Physica 3 569 (1936)\")\ndataRefID.append(\"10.1016/S0031-8914(36)80363-2\")\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-53.86, -43.34, -34.62, -27.73, -21.58, -16.36, -11.62]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 6\nspeciesName.append(\"CH4\")\ndataRef.append(\"J.A. Beattie and W.H. Stockmayer, J. chem Phys. 10 473 (1942)\")\ndataRefID.append(\"10.1063/1.1723750\")\ndataClass.append(\"class I\")\ndataT.append(np.array([423.15, 448.15, 473.15, 498.15, 523.15, 548.15, 573.15]))\ndataB.append(np.array([-11.4, -7.5, -4.0, -0.9, 1.9, 4.5, 6.8]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 8\nspeciesName.append(\"CH4\")\ndataRef.append(\"Eizo Kanda, Sc. Rep. Res. Insts Tohoku Univ. Ser. A1 157 (1949)\")\ndataRefID.append(\"http://ci.nii.ac.jp/naid/110004636624/\")\n# values of B taken from Chem. Abstr. 45 5993b (1951). Calculated by the\n# author from other thermodynamic measurements\ndataClass.append(\"class II\")\ndataT.append(np.array([150, 200, 250, 300, 350, 400, 450]))\ndataB.append(np.array([-169.1, -100.1, -63.14, -43.32, -26.80, -15.33, -3.91]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in compilation: 9\n\nspeciesName.append(\"CH4\")\ndataRef.append(\"S.D. Hamann, J.A. Lambert, and R.B. Thomas, Aust. J. Chem. 8 149 (1955)\")\ndataRefID.append(\"10.1071/CH9550149\")\ndataClass.append(\"class I\")\ndataT.append(np.array([303.15, 323.15, 333.15, 343.15, 363.15, 383.15]))\ndataB.append(np.array([-38.2, -35.2, -33.9, -28.5, -22.7, -19.7]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 10a\nspeciesName.append(\"CH4\")\ndataRef.append(\"H.W. Schamp, Jr., E.A. Mason, A.C.B. Richardson, and A. Altman, Physics Fluids 1 329 (1958)\")\ndataRefID.append(\"10.1063/1.1705891\")\ndataClass.append(\"class I\")\n# 3 term fit of PV data\ndataT.append(np.array([273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-53.43, -43.03, -34.42, -27.29, -21.26, -15.99, -11.41]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 10b\nspeciesName.append(\"CH4\")\ndataRef.append(\"H.W. Schamp, Jr., E.A. Mason, A.C.B. Richardson, and A. Altman, Physics Fluids 1 329 (1958)\")\ndataRefID.append(\"10.1063/1.1705891\")\ndataClass.append(\"class I\")\n# 4 term fit of PV data\ndataT.append(np.array([273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-53.62, -43.26, -34.58, -27.45, -21.26, -15.93, -11.24]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 11\nspeciesName.append(\"CH4\")\ndataRef.append(\"G. Thomes and R. van Steenwinkel, Nature, Lond. 187 229 (1960)\")\ndataRefID.append(\"10.1038/187229a0\")\ndataClass.append(\"class II\")\ndataT.append(np.array([108.45, 108.45, 125.2, 125.2, 149.1, 149.1, 186.4, 186.4, 223.6, 223.6, 249.3, 249.3]))\ndataB.append(np.array([-364.99, -361.54, -267.97, -268.92, -188.04, -187.64, -126.10, -126.20, \\\n -82.62, -82.69, -68.53, -68.38]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in compilation: 12\nspeciesName.append(\"CH4\")\ndataRef.append(\"J.A. Huff and T.M. Reed, J. chem. Engng Data 8 306 (1963)\")\ndataRefID.append(\"10.1021/je60018a010\")\n# Values of B originally given by R.D. Gunn, M.S. Thesis, University of California (Berkeley) (1958)\ndataClass.append(\"class I\")\ndataT.append(np.array([273.2, 444.3, 447.6, 510.9]))\ndataB.append(np.array([-54.1, -8.1, -3.6, 0.0]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 13\nspeciesName.append(\"CH4\")\ndataRef.append(\"W.H. Mueller, T.W. Leland, Jr., and R. Kobayashi, AIChE. J. 7 267 (1961)\")\ndataRefID.append(\"10.1002/aic.690070220\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([144.28, 172.05, 199.83, 227.60, 255.38, 283.16]))\ndataB.append(np.array([-221.0, -153.5, -107.8, -81.7, -63.3, -49.1]))\ndataBerr.append(np.array([2, 1, 2, 1, 1, 2]))\n\n# original index in compilation: 15\nspeciesName.append(\"CH4\")\ndataRef.append(\"D.R. Douslin, R.H. Harrison, R.T. Moore, and J.P. McCullough, J. chem. Engng Data 9 358 (1964)\")\ndataRefID.append(\"10.1021/j100870a021\")\n# Errors in B less than 0.2\n# These values are quoted by D.R. Douslin, Progress in international\n# research in thermodynamic and transport properties, ASME, (1962) p 135,\n# where a short discussion of errors is given \ndataClass.append(\"N/A\")\ndataT.append(np.array([273.15, 298.15, 303.15, 323.15, 348.15, 373.15, 398.15, 423.15, 448.15, 473.15, \\\n 498.15, 523.15, 548.15, 573.15, 598.15, 623.15]))\ndataB.append(np.array([-53.35, -42.82, -40.91, -34.23, -27.06, -21.00, -15.87, -11.40, -7.56, -4.16, \\\n -1.16, 1.49, 3.89, 5.98, 7.88, 9.66]))\ndataBerr.append(np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2]))\n\n# original index in compilation: 17\nspeciesName.append(\"CH4\")\ndataRef.append(\"A.E. Hoover, I. Nagata, T.W. Leland, Jr., and R. Kobayashi, J. chem. phys. 48 2633 (1968)\")\ndataRefID.append(\"10.1063/1.1669494\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([131.93, 191.06, 200.00, 215.00, 240.00, 273.15]))\ndataB.append(np.array([-224, -116.31, -106.12, -92.59, -72.72, -53.28]))\ndataBerr.append(np.multiply(np.array([0.09, 0.01, 0.01, 0.004, 0.003, 0.002]), -1.*dataB[-1]))\n\n# original index in compilation: 18\nspeciesName.append(\"CH4\")\ndataRef.append(\"M.A. Byrne, M.R. Jones, and L.A.K. Staveley, Trans. Faraday Soc. 64 1747 (1968)\")\ndataRefID.append(\"10.1039/TF9686401747\")\ndataClass.append(\"class I\")\ndataT.append(np.array([110.83, 112.43, 114.45, 116.79, 121.25, 128.84, 136.75, 148.28, 162.29, 178.41, \\\n 202.49, 221.10, 243.80, 273.17]))\ndataB.append(np.array([-330.1, -319.9, -307.8, -295.5, -274.5, -244.3, -218.9, -187.7, -158.4, -132.2, \\\n -103.4, -85.8, -70.3, -53.7]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 19\nspeciesName.append(\"CH4\")\ndataRef.append(\"R.N. Lichtenthaler and K. Schafer, Ber. (dtsch.) Bunsenges. phys. Chem. 73 42 (1969)\")\ndataRefID.append(\"10.1002/bbpc.19690730110\")\n# estimated absolute error in B +/- 1\ndataClass.append(\"N/A\")\ndataT.append(np.array([288.2, 296.0, 303.2, 313.2, 323.1]))\ndataB.append(np.array([-46.20, -43.13, -40.40, -37.00, -33.80]))\ndataBerr.append(np.array([1, 1, 1, 1, 1]))\n\n# original index in compilation: 22a\nspeciesName.append(\"CH4\")\ndataRef.append(\"R.C. Lee and W.C. Edminster, AIChE. J. 16 1047 (1970)\")\ndataRefID.append(\"10.1002/aic.690160631\")\ndataClass.append(\"N/A\")\n# slope intercept calculations\ndataT.append(np.array([298.15, 323.15, 348.15]))\ndataB.append(np.array([-42.88, -33.22, -26.54]))\ndataBerr.append(np.array([1.5, 1.0, 1.1]))\n\n# original index in compilation: 22b\nspeciesName.append(\"CH4\")\ndataRef.append(\"R.C. Lee and W.C. Edminster, AIChE. J. 16 1047 (1970)\")\ndataRefID.append(\"10.1002/aic.690160631\")\ndataClass.append(\"N/A\")\n# curve fit\ndataT.append(np.array([298.15, 323.15, 348.15]))\ndataB.append(np.array([-42.70, -33.46, -25.69]))\ndataBerr.append(np.array([2.3, 0.3, 0.9]))\n\n# original index in compilation: 24\nspeciesName.append(\"CH4\")\ndataRef.append(\"K. Strein, R.N. Lichtenthaler, B. Schramm, and Kl. Schafer, Ber. (dtsch.) Bunsenges. phys. Chem. 75 1308 (1971)\")\ndataRefID.append(\"10.1002/bbpc.197100009\")\ndataClass.append(\"N/A\")\n# estimated accuracy of +/- 1\ndataT.append(np.array([296.1, 308.0, 333.5, 353.8, 374.0, 393.9, 413.8, 434.0, 453.4, 473.5, 493.0, 511.1]))\ndataB.append(np.array([-44.5, -39.5, -29.8, -23.8, -19.4, -15.4, -11.5, -8.0, -5.0, -3.0, -1.0, 0.5]))\ndataBerr.append(np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))\n\n# original index in compilation: 25\nspeciesName.append(\"CH4\")\ndataRef.append(\"T.K. Bose, J.S. Sochanski, and R.H. Cole, J. chem. Phys. 57 3592 (1972)\")\ndataRefID.append(\"10.1063/1.1678813\")\ndataClass.append(\"N/A\")\n# B derived from low pressure dielectric measurements\ndataT.append(np.array([279.8, 322.5, 373.4]))\ndataB.append(np.array([-52.9, -35.8, -21.8]))\ndataBerr.append(np.array([1.4, 2.3, 1.0]))\n\n# original index in compilation: 26\nspeciesName.append(\"CH4\")\ndataRef.append(\"D.R. Roe, PhD thesis, University of London (1972)\")\ndataRefID.append(\"N/A\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([155.89, 167.67, 181.86, 192.64, 204.61, 218.87, 234.05, 248.54, 263.08, 291.41]))\ndataB.append(np.array([-167.95, -146.55, -125.70, -112.85, -100.15, -87.15, -75.90, -66.50, -58.35, -45.50]))\ndataBerr.append(np.array([0.60, 0.40, 0.40, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20]))\n\n# original index in compilation: 27\nspeciesName.append(\"CH4\")\ndataRef.append(\"G.A. Pope, P.S. Chappelear and R. Kobayashi, J. chem. Phys. 59 423 (1973)\")\ndataRefID.append(\"10.1063/1.1679822\")\n# burnett method\ndataClass.append(\"N/A\")\ndataT.append(np.array([126.584, 135.994, 147.583, 158.909, 173.485, 191.097]))\ndataB.append(np.array([-242.27, -215.20, -185.00, -161.51, -137.63, -114.29]))\ndataBerr.append(np.array([0.72, 0.12, 0.94, 0.23, 0.15, 0.25]))\n\n# original index in compilation: 28\nspeciesName.append(\"CH4\")\ndataRef.append(\"J. Bellm, W. Reineke, K. Schafer, and B. Schramm, Ber. (dtsch.) Bunsenges. phys. Chem. 78 282 (1974)\")\ndataRefID.append(\"10.1002/bbpc.19740780312\")\n# estimated accuracy is +/- 2\ndataClass.append(\"N/A\")\ndataT.append(np.array([300, 320, 340, 370, 400, 430, 460, 490, 520, 550]))\ndataB.append(np.array([-42.8, -35.2, -28.9, -21.3, -15.0, -9.6, -5.3, -2.0, 0.6, 2.5]))\ndataBerr.append(np.array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))\n\n# original index in compilation: 29\nspeciesName.append(\"CH4\")\ndataRef.append(\"R. Hahn, K. Schafer, and B. Schramm, Ber. (dtsch.) Bunsenges. phys. Chem. 78 287 (1974)\")\ndataRefID.append(\"10.1002/bbpc.19740780313\")\n# B dtermined using B (296 K) = -44.5\n# quoted accuracy is +/-2 \ndataClass.append(\"N/A\")\ndataT.append(np.array([200.5, 231.2, 251.5, 273.0]))\ndataB.append(np.array([-106, -79.6, -65.2, -54.7]))\ndataBerr.append(np.array([2, 2, 2, 2]))\n\n# New data from 2002 compilation begin here\n\n# original index in 2002 compilation: 92-hae/kle\nspeciesName.append(\"CH4\")\ndataRef.append(\"Haendel, G.; Kleinrahm, R.; Wagner, W.; J. Chem. Thermodyn. 24 (1992) 685\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([160, 180, 186, 189, 190.55, 193, 196, 200, 207, 220, \\\n 240, 260]))\ndataB.append(np.array([-160.37, -128.45, -120.61, -116.93, -115.08, -112.26, -108.91, -104.66, -97.71, -86.30, \\\n -71.76, -59.98]))\ndataBerr.append(np.array([0.50, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, \\\n 0.30, 0.30]))\n\n# original index in 2002 compilation: 94-tru\nspeciesName.append(\"CH4\")\ndataRef.append(\"Trusler, J. P. M.; J. Chem. Thermodyn. 26 (1994) 751\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([200, 225, 240, 250, 275, 300, 325, 350, 375]))\ndataB.append(np.array([-104.7, -82.20, -71.60, -65.40, -52.40, -41.90, -33.40, -26.40, -20.40]))\ndataBerr.append(np.array([0.30, 0.30, 0.28, 0.25, 0.20, 0.20, 0.15, 0.10, 0.10]))\n\n# original index in 2002 compilation: 88-mic/sch\nspeciesName.append(\"CH4\")\ndataRef.append(\"Michels, J. P. J.; Schouten, J. A.; Jaeschke, M.; Int. J. Thermophys. 9 (1988) 985\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([273.0, 273.0, 298.0, 298.0]))\ndataB.append(np.array([-53.37, -53.41, -42.77, -42.88]))\ndataBerr.append(np.array([0.80, 0.80, 0.80, 0.80]))\n\n# original index in 2002 compilation: 79-tra/was\nspeciesName.append(\"CH4\")\ndataRef.append(\"Trappeniers, N. J.; Wassenaar, T.; Abels, J. C.; Physica A: (Amsterdam). 98 (1979) 289\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([273.15, 285.65, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-53.20, -47.58, -42.60, -33.90, -26.70, -20.70, -15.50, -11.10]))\ndataBerr.append(np.array([1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00]))\n\n# original index in 2002 compilation: 70-hol\nspeciesName.append(\"CH4\")\ndataRef.append(\"Holleran, E. M.; J. Chem. Thermodyn. 2 (1970) 779\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([298.15, 323.15, 348.15, 373.16, 398.17, 423.18, 473.21, 498.23, \\\n 523.25, 548.26, 573.15, 598.29, 623.30, 648.31, 673.31]))\ndataB.append(np.array([-42.87, -34.26, -27.11, -21.10, -15.97, -11.54, -4.31, -1.31, \\\n 1.36, 3.75, 5.90, 7.86, 9.64, 11.25, 12.75]))\ndataBerr.append(np.array([0.10, 0.10, 0.08, 0.08, 0.08, 0.10, 0.15, 0.15, 0.15, 0.15, \\\n 0.10, 0.10, 0.08, 0.08, 0.08]))\n\n# original index in 2002 compilation: 96-hou/hol\nspeciesName.append(\"CH4\")\ndataRef.append(\"Hou, H.; Holste, J. C.; Hall, K. R.; Marsh, K. N.; Gammon, B. E.; J. Chem. Eng. Data. 41 (1996) 344\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([300, 320]))\ndataB.append(np.array([-42.43, -35.32]))\ndataBerr.append(np.array([0.01, 0.02]))\n\n# Enter data for oxygen, O2\n\n# original index in compilation: 1\nspeciesName.append(\"O2\")\ndataRef.append(\"H.A. Kuypers and H. Kamerlingh Onnes, Archs neerl. Sci. 6 227 (1923)\")\ndataRefID.append(\"N/A\")\n# % 3-term fit of PV data \ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 293.15]))\ndataB.append(np.array([-21.43, -16.76]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 2\nspeciesName.append(\"O2\")\ndataRef.append(\"L. Holborn and J. Otto, Z. Phys. 33 1 (1925)(*)\")\ndataRefID.append(\"10.1007/BF01328287\")\n# 5-term fit of PV data\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 323.15, 373.15]))\ndataB.append(np.array([-22.14, -10.81, -3.46]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 3\nspeciesName.append(\"O2\")\ndataRef.append(\"G.P. Nijhoff and W.H. Keesom, Communs phys. lab. Univ. Leiden 179b (1925)\")\ndataRefID.append(\"http://www.dwc.knaw.nl/DL/publications/PU00015241.pdf\")\ndataClass.append(\"class II\")\ndataT.append(np.array([120.60, 127.77, 137.87, 148.21, 154.58, 156.15, 157.15, 159.22, 163.17, 170.67, \\\n 193.16, 233.15]))\ndataB.append(np.array([-134.83, -121.76, -104.42, -90.40, -82.65, -81.38, -79.77, -78.24, -73.72, \\\n -67.51, -51.23, -32.98]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in compilation: 4\nspeciesName.append(\"O2\")\ndataRef.append(\"G.A. Bottemley, D.S. Massie, and R. Whytlaw-Gray, Proc. R. Soc. A200 201 (1950)\")\ndataRefID.append(\"10.1098/rspa.1950.0012\")\ndataClass.append(\"class II\")\ndataT.append(np.array([295.21]))\ndataB.append(np.array([-15.5]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in compilation: 5\nspeciesName.append(\"O2\")\ndataRef.append(\"D. White, J.-H. Hu, and H.L. Johnston, J. chem Phys. 21 1149 (1953)\")\ndataRefID.append(\"10.1063/1.1699153\")\n# values determined using a constant volume gas thermometer \ndataClass.append(\"class II\")\ndataT.append(np.array([80, 90, 100, 125, 150, 175, 200, 250]))\ndataB.append(np.array([-339, -237, -170, -112, -94, -62, -42, -35]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in compilation: 6a\nspeciesName.append(\"O2\")\ndataRef.append(\"A. Michels, H.W. Schamp, and W. de Graaff, Physica. 20 1209 (1954)\")\ndataRefID.append(\"10.1016/S0031-8914(54)80265-5\")\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.15, 323.15]))\ndataB.append(np.array([-21.89, -16.24, -11.62]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 6b\nspeciesName.append(\"O2\")\ndataRef.append(\"A. Michels, H.W. Schamp, and W. de Graaff, Physica. 20 1209 (1954)\")\ndataRefID.append(\"10.1016/S0031-8914(54)80265-5\")\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.15, 323.15]))\ndataB.append(np.array([-21.80, -16.50, -11.91]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 7\nspeciesName.append(\"O2\")\ndataRef.append(\"T.L. Cottrell, R.A. Hamilton, and R.P. Taubinger, Trans. Faraday Soc. 52 1310 (1956)\")\ndataRefID.append(\"10.1039/TF9565201310\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([303, 333, 363]))\ndataB.append(np.array([-16.6, -7.3, -2.5]))\ndataBerr.append(np.array([5.1, 2.0, 2.3]))\n\n# original index in compilation: 8\nspeciesName.append(\"O2\")\ndataRef.append(\"R.A.H. Pool, G. Saville, T.M. Herrington, B.D.C. Shields, L.A.K. Staveley, Trans. Faraday Soc. 58 1692 (1962)\")\ndataRefID.append(\"10.1039/TF9625801692\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([90]))\ndataB.append(np.array([-245]))\ndataBerr.append(np.array([2]))\n\n# original index in compilation: 9\nspeciesName.append(\"O2\")\ndataRef.append(\"L.A. Weber, J. Res. natn. Bur. Stand. 74A 93 (1970)\")\ndataRefID.append(\"https://ntrs.nasa.gov/search.jsp?R=19700046840\")\n# 3-term fit of PV data at densities up to 0.006709 mol cm^-3\ndataClass.append(\"N/A\")\ndataT.append(np.array([85, 90, 95, 100, 105, 110, 115, 120, 125, 130, 135, \\\n 140, 145, 150, 155, 160, 165, 170, 175, 180, 185, 190, 195, 200, 205, \\\n 210, 215, 220, 225, 230, 235, 240, 245, 250, 255, 260, 265, 270, 275, \\\n 280, 285, 290, 295, 300]))\ndataB.append(np.array([-267.78, -240.67, -217.51, -197.54, -180.20, -165.05, \\\n -151.71, -139.91, -129.41, -120.02, -111.59, -103.98, -97.08, -90.81, \\\n -85.09, -79.84, -75.02, -70.58, -66.48, -62.67, -59.14, -55.85, -52.77, \\\n -49.89, -47.20, -44.66, -42.27, -40.02, -37.90, -35.89, -33.98, -32.17, \\\n -30.45, -28.81, -27.25, -25.77, -24.34, -22.98, -21.68, -20.44, -19.24, \\\n -18.09, -16.98, -15.92]))\ndataBerr.append(np.array([30, 30, 30, 15, 15, 15, 15, 5, 5, 5, 5, 1, 1, 0.25, \\\n 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.3, 0.3, 0.3, 0.3, \\\n 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, \\\n 0.3, 0.3]))\n\n# New data from 2002 compilation begin here\n\n# original index in 2002 compilation: 86-eli/hoa\nspeciesName.append(\"O2\")\ndataRef.append(\"Elias, E.; Hoang, N.; Sommer, J.; Schramm, B.; Ber. Bunsen-Ges. Phys. Chem. 90 (1986) 342\")\ndataRefID.append(\"10.1002/bbpc.19860900406\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([77.30, 87.20, 90.10]))\ndataB.append(np.array([-338.0, -253.0, -239.0]))\ndataBerr.append(np.array([6, 5, 5]))\n\n# original index in 2002 compilation: 84-wag/ewe\nspeciesName.append(\"O2\")\ndataRef.append(\"Wagner, W.; Ewers, J.; Schmidt, R.; Cryogenics. 24 (1984) 37.\")\ndataRefID.append(\"10.1016/0011-2275(84)90056-0\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, \\\n 210, 220, 230, 240, 250, 260, 270, 280, 290, 300, 320, 340, 360, 380, 400, 420, \\\n 440, 460, 480, 500]))\ndataB.append(np.array([-269.0, -217.6, -183.8, -157.5, -136.1, -118.2, -103.3, -90.6, \\\n -79.8, -70.6, -62.7, -55.9, -49.9, -44.6, -39.9, -35.8, -32.1, -28.7, -25.7, \\\n -22.9, -20.4, -18.1, -15.9, -12.1, -8.7, -5.8, -3.1, -0.7, 1.5, 3.4, 5.3, 7.0, 8.6]))\ndataBerr.append(np.array([20.0, 15.0, 10.0, 8.0, 4.0, 2.0, 1.0, 0.3, 0.3, 0.3, 0.3, 0.3, \\\n 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, \\\n 0.3, 0.3, 0.3, 0.3]))\n\n# original index in 2002 compilation: 88-fos/nat\nspeciesName.append(\"O2\")\ndataRef.append(\"Fostiropoulos, K.; Natour, G.; Sommer, J.; Schramm, B.; Ber. Bunsen-Ges. Phys. Chem. 92 (1988) 925\")\ndataRefID.append(\"10.1002/bbpc.198800221\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([290, 296.20, 300, 303, 320, 353, 403, 455, 476]))\ndataB.append(np.array([-17.3, -16.4, -15.5, -16.0, -12.0, -6.0, 0.2, 5.5, 6.6]))\ndataBerr.append(np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]))\n\n# Data entry for N2\n# original index in compilation: 1\nspeciesName.append(\"N2\")\ndataRef.append(\"H. Kamerlingh Onnes and A.T. van Urk, Communs phys. Lab. Univ. Leiden 169d, e(1924)\")\ndataRefID.append(\"N/A\")\ndataClass.append(\"class I\")\ndataT.append(np.array([126.83, 128.69, 131.62, 141.88, 151.96, 170.90, 192.05, 222.89, 249.53, 273.15, 293.15]))\ndataB.append(np.array([-101.8, -98.3, -94.5, -80.7, -69.83, -52.53, -39.12, -24.79, -16.73, -9.19, -5.48]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 2\nspeciesName.append(\"N2\")\ndataRef.append(\"L. Holborn and J. Otto, Z. Phys. 33 1 (1925)\")\ndataRefID.append(\"10.1007/BF01328287\")\n# 5-term fit of PV data\ndataClass.append(\"class I\")\ndataT.append(np.array([143.15, 173.15, 223.15, 273.15, 323.15, 373.15, 423.15, 473.15, 573.15, 673.15]))\ndataB.append(np.array([-79.79, -51.86, -26.38, -10.34, -0.26, 6.14, 11.53, 15.34, 20.64, 23.51]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 3\nspeciesName.append(\"N2\")\ndataRef.append(\"T.T.H. Verschoyle, Proc. R. Soc. A111 552 (1926)\")\ndataRefID.append(\"10.1098/rspa.1926.0081\")\n# 3-term fit of PV data\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 293.15]))\ndataB.append(np.array([-11.11, -6.27]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 6\nspeciesName.append(\"N2\")\ndataRef.append(\"J. Otto, A. Michels, and H. Wouters, Phys. Z. 35 97 (1934)\")\ndataRefID.append(\"N/A\")\n# 6-term fit of PV data\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-10.14, -4.87, -0.50, 3.25, 6.23, 9.04, 11.37]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 7\nspeciesName.append(\"N2\")\ndataRef.append(\"A. Michels, H. Wouters, and J. de Boer, Physica,'s Grav. 1 587 (1934). (See also Physica,'s Grav. 3 585 (1936))\")\ndataRefID.append(\"10.1016/S0031-8914(34)80245-5\")\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-10.27, -4.71, -0.28, 3.20, 6.56, 9.45, 12.29]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 10\nspeciesName.append(\"N2\")\ndataRef.append(\"A. Michels, R.J. Lunbeck, and G.J. Wolkers, Physica,'s Grav. 17 801 (1951)\")\ndataRefID.append(\"10.1016/0031-8914(51)90105-X\")\n# 8-term fit of PV data\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-10.05, -4.46, -0.25, 3.38, 6.50, 9.21, 11.51]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 11a\nspeciesName.append(\"N2\")\ndataRef.append(\"D. White, J.-H. Hu, and H.L. Johnston, J. chem. Phys. 21 1149 (1953)\")\ndataRefID.append(\"10.1063/1.1699153\")\n# values of B determined using constant volume gas thermometer\ndataClass.append(\"class II\")\ndataT.append(np.array([80, 85, 90, 95, 100, 110, 125, 150, 175, 200, 250]))\ndataB.append(np.array([-265, -247, -213, -174, -158, -142, -90, -56, -29, -34, -23]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in compilation: 11b\nspeciesName.append(\"N2\")\ndataRef.append(\"D. White, J.-H. Hu, and H.L. Johnston, J. chem. Phys. 21 1149 (1953)\")\ndataRefID.append(\"10.1063/1.1699153\")\n# the following obtained from gas density measurements, these results are considered more accurate\ndataClass.append(\"class II\")\ndataT.append(np.array([80, 85, 90, 95, 100, 110, 125, 150, 175, 200, 250]))\ndataB.append(np.array([-251, -223, -199, -178, -161, -134, -105, -71, -50, -35, -16]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in compilation: 12\nspeciesName.append(\"N2\")\ndataRef.append(\"W.C. Pfefferle, Jr., J.A. Goff, and J.G. Miller, J. chem Phys. 23 509 (1955)\")\ndataRefID.append(\"10.1063/1.1742020\")\n# burnett method\ndataClass.append(\"N/A\")\ndataT.append(np.array([303.15, 303.15, 303.15]))\ndataB.append(np.array([-4.17, -4.13, -4.17]))\ndataBerr.append(np.array([0.06, 0.08, 0.04]))\n\n# original index in compilation: 13\nspeciesName.append(\"N2\")\ndataRef.append(\"A. van Ittereek, H. Lambert, and G. Forres, Appl. scient. Res. 6A 15 (1956)\")\ndataRefID.append(\"10.1007/BF03184684\")\n# values of B calculated from velocity of sound measurements\ndataClass.append(\"class II\")\ndataT.append(np.array([70, 80, 90, 100, 110, 120, 130, 140, 150]))\ndataB.append(np.array([-306, -239, -193, -159, -134, -113, -97, -83, -72]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in compilation: 14\nspeciesName.append(\"N2\")\ndataRef.append(\"J.A. Huff and T.M. Reed, J. chem. Engng Data 8 306 (1963)\")\ndataRefID.append(\"10.1021/je60018a010\")\n# Values of B originally given by R.D. Gunn, M.S. Thesis, University of California (Berkeley) (1958)\ndataClass.append(\"class I\")\ndataT.append(np.array([277.6, 298.2, 310.9, 323.2, 348.2, 373.2, 398.2, 427.6, 444.3, 460.9, 477.6, 510.9]))\ndataB.append(np.array([-8.5, -4.84, -2.0, -0.52, 3.31, 6.19, 9.05, 11.6, 13.1, 14.2, 15.4, 17.4]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 15\nspeciesName.append(\"N2\")\ndataRef.append(\"R.A.H. Pool, G. Saville, T.M. Herrington, B.D.C. Shields, L.A.K. Staveley, Trans. Faraday Soc. 58 1692 (1962)\")\ndataRefID.append(\"10.1039/TF9625801692\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([90]))\ndataB.append(np.array([-201]))\ndataBerr.append(np.array([2]))\n\n# original index in compilation: 16\nspeciesName.append(\"N2\")\ndataRef.append(\"R.J. Witonzky and J.G. Miller, J. Am. chem. Soc. 85 282 (1963)\")\ndataRefID.append(\"10.1021/ja00886a007\")\ndataClass.append(\"class I\")\ndataT.append(np.array([448.15, 523.15, 598.15, 673.15, 748.15]))\ndataB.append(np.array([14.26, 18.32, 20.80, 23.41, 24.73]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 17\nspeciesName.append(\"N2\")\ndataRef.append(\"F.B. Canfield, T.W. Leland, and R. Kobayashi, Adv. Cryogen. Engng 8 146 (1963)\")\ndataRefID.append(\"N/A\")\n# Burnett method\ndataClass.append(\"class I\")\ndataT.append(np.array([133.15, 143.14, 158.15, 183.15, 223.13, 273.15]))\ndataB.append(np.array([-91.95, -79.56, -63.50, -45.35, -25.17, -9.70]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 18\nspeciesName.append(\"N2\")\ndataRef.append(\"A.E. Hoover, F.B. Canfield, R. Kobayashi, and T.W. Leland, Jr., J. chem Engng Data 9 568 (1964)\")\ndataRefID.append(\"10.1021/je60023a030\")\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 223.13, 183.15, 158.15, 143.14, 133.15]))\ndataB.append(np.array([-10.56, -26.05, -45.15, -64.14, -76.59, -91.99]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 19\nspeciesName.append(\"N2\")\ndataRef.append(\"R.W. Crain, Jr. and R.E. Sonntag, Adv. cryogen. Engng 11 379 (1966)\")\ndataRefID.append(\"10.1007/978-1-4757-0522-5_42\")\ndataClass.append(\"class I\")\ndataT.append(np.array([143.15, 163.15, 203.15, 273.15]))\ndataB.append(np.array([-79.45, -59.42, -33.85, -10.26]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 21\nspeciesName.append(\"N2\")\ndataRef.append(\"P.S. Ku and B.F. Dodge, J. chem Engng Data 12 158 (1967)\")\ndataRefID.append(\"10.1021/je60033a001\")\n# 3 term fit of PV data\ndataClass.append(\"class I\")\ndataT.append(np.array([311.65, 373.15]))\ndataB.append(np.array([-2.73, 5.97]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in compilation: 23\nspeciesName.append(\"N2\")\ndataRef.append(\"K.R. Hall and F.B. Canfield, Physica. 47 219 (1970)\")\ndataRefID.append(\"10.1016/0031-8914(70)90281-8\")\n# Burnett method\ndataClass.append(\"class II\")\ndataT.append(np.array([103.15, 113.15]))\ndataB.append(np.array([-148.46, -117.78]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in compilation: 25\nspeciesName.append(\"N2\")\ndataRef.append(\"D.R. Roe, PhD thesis, University of London (1972)\")\ndataRefID.append(\"N/A\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([155.89, 181.86, 192.64, 204.61, 204.61, 218.87, 218.87, 234.05, 248.54, \\\n 248.54, 263.08, 276.94, 291.41]))\ndataB.append(np.array([-65.95, -45.85, -39.60, -33.60, -33.50, -27.30, -27.30, -21.60, -17.00, \\\n -16.90, -12.95, -9.40, -6.20]))\ndataBerr.append(np.array([0.20, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20]))\n\n# original index in compilation: 26\nspeciesName.append(\"N2\")\ndataRef.append(\"G. Pocock and C.J. Wormald, J.C.S. Faraday I 71 705 (1975)\")\ndataRefID.append(\"10.1039/F19757100705\")\n# B values calculated from measured isothermal Joule-Thomsen coefficients\n# errors in B estimated at 1% below 130 K, 1 above 130 K\ndataClass.append(\"class I\")\ndataT.append(np.array([75, 80, 85, 90, 95, 100, 110, 120, 130, 140, 150, 175, 200, 225, 250, 275, \\\n 300, 350, 400, 450, 500, 600, 700]))\ndataB.append(np.array([-302, -264, -233, -207, -187, -169, -140, -118, -100, -86.2, -74.3, -52.0, \\\n -36.4, -25.0, -16.3, -9.6, -4.2, 3.8, 9.5, 13.6, 16.7, 21.2, 24.1]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# New data from 2002 compilation begin here\n\n# original index in 2002 compilation: 92-ewi/tru-1\nspeciesName.append(\"N2\")\ndataRef.append(\"Ewing, M. B.; Trusler, J. P. M.; Physica A: (Amsterdam). 184 (1992) 415\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([75, 80, 90, 100, 110, 125, 150, 200, 250, 300, 400, 500, 600, 700]))\ndataB.append(np.array([-276.1, -243.9, -195.0, -159.8, -133.3, -104.0, -71.5, -35.6, -16.3, -4.5, 9.2, 16.6, 21.1, 24.1]))\ndataBerr.append(np.array([0.5, 0.5, 0.4, 0.3, 0.3, 0.4, 0.4, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]))\n\n# original index in 2002 compilation: 79-sch/geh-1\nspeciesName.append(\"N2\")\ndataRef.append(\"Schramm, B.; Gehrmann, R.; J. Chem. Soc., Faraday Trans. 1. 75 (1979) 479\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([77.30, 87.20, 90.10]))\ndataB.append(np.array([-278.0, -222.0, -204.0]))\ndataBerr.append(np.array([7.0, 7.0, 7.0]))\n\n# original index in 2002 compilation: 91-sch/eli\nspeciesName.append(\"N2\")\ndataRef.append(\"Schramm, B.; Elias, E.; Kern, L.; Natour, G.; Schmitt, A.; Weber, C.; Ber. Bunsen-Ges. Phys. Chem. 95 (1991) 615\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([77.30, 77.30, 77.30]))\ndataB.append(np.array([-266.0, -265.0, -264.0]))\ndataBerr.append(np.array([4.0, 4.0, 4.0]))\n\n# original index in 2002 compilation: 80-sch/geh\nspeciesName.append(\"N2\")\ndataRef.append(\"Schmiedel, H.; Gehrmann, R.; Schramm, B.; Ber. Bunsen-Ges. Phys. Chem. 84 (1980) 721\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([213.0, 223.0, 242.0, 262.0, 276.0, 295.0, 330.0, 365.0, 400.0, 425.0, 450.0, 475.0]))\ndataB.append(np.array([-29.5, -25.5, -19.0, -13.3, -9.8, -5.7, 0.4, 5.2, 9.0, 11.6, 13.4, 15.1]))\ndataBerr.append(np.array([2.0, 2.0, 2.0, 2.0, 1.5, 1.5, 1.0, 1.5, 1.5, 2.0, 2.0, 2.0]))\n\n# original index in 2002 compilation: 92-zha/sch\nspeciesName.append(\"N2\")\ndataRef.append(\"Zhang, W.; Schouten, J. A.; Hinze, H. M.; Jaeschke, M.; J. Chem. Eng. Data. 37 (1992) 114\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([269.3, 273.15, 290.0, 293.15, 310.0, 313.15, 323.15, 330.0, 350.0, 353.15]))\ndataB.append(np.array([-11.2, -10.2, -6.5, -5.9, -2.7, -2.1, -0.4, 0.6, 3.6, 3.9]))\ndataBerr.append(np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2]))\n\n# original index in 2002 compilation: 95-esp/lem\nspeciesName.append(\"N2\")\ndataRef.append(\"Esper, G.; Lemming, W.; Beckermann, W.; Kohler, F.; Fluid Phase Equilib. 105 (1995) 173\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([249.97, 269.83, 289.64, 310.23, 331.88, 350.81]))\ndataB.append(np.array([-15.5, -10.3, -5.9, -2.1, 1.3, 3.9]))\ndataBerr.append(np.array([2.0, 1.0, 0.6, 0.2, 0.1, 0.2]))\n\n# original index in 2002 compilation: 91-bou/moo\nspeciesName.append(\"N2\")\ndataRef.append(\"Boushehri, A.; Moosavipour, S. H.; Iran. J. Chem. Eng. 10 (1991) 35\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([293.15, 313.15, 333.15]))\ndataB.append(np.array([-6.0, -2.8, 3.0]))\ndataBerr.append(np.array([0.4, 0.2, 0.2]))\n\n# original index in 2002 compilation: 88-dus/kle\nspeciesName.append(\"N2\")\ndataRef.append(\"Duschek, W.; Kleinrahm, R.; Wagner, W.; Jaeschke, M.; J. Chem. Thermodyn. 20 (1988) 1069\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([273.15, 283.15, 293.15, 303.15, 313.15, 323.15]))\ndataB.append(np.array([-10.3, -8.0, -5.9, -4.0, -2.2, -0.6]))\ndataBerr.append(np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2]))\n\n# original index in 2002 compilation: 89-bru/hwa\nspeciesName.append(\"N2\")\ndataRef.append(\"Brugge, H. B.; Hwang, C.-A.; Rogers, W. J.; Holste, J. C.; Hall, K. R.; Lemming, W.; Esper, G. J.; Marsh, K. N.; Gammon, B. E.; Physica A: (Amsterdam). 156 (1989) 382\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([300.0, 320.0]))\ndataB.append(np.array([-4.5, -1.2]))\ndataBerr.append(np.array([0.1, 0.1]))\n\n# original index in 2002 compilation: 88-pat/jof\nspeciesName.append(\"N2\")\ndataRef.append(\"Patel, M. R.; Joffrion, L. L.; Eubank, P. T.; AIChE J. 34 (1988) 1229\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([103.15, 113.15, 303.15]))\ndataB.append(np.array([-148.6, -118.5, -4.2]))\ndataBerr.append(np.array([1.5, 1.5, 1.5]))\n\n# original index in 2002 compilation: 58-bot/rem\nspeciesName.append(\"N2\")\ndataRef.append(\"Bottomley, G. A.; Remmington, T. A.; J. Chem. Soc. (1958) 3800\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([295.20, 308.20]))\ndataB.append(np.array([-5.3, -2.9]))\ndataBerr.append(np.array([0.1, 0.1]))\n \n# original index in 2002 compilation: 58-bro/raw\nspeciesName.append(\"N2\")\ndataRef.append(\"Brooks, G. L.; Raw, C. J. G.; Trans. Faraday Soc. 54 (1958) 972\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([303.20]))\ndataB.append(np.array([-4.0]))\ndataBerr.append(np.array([20]))\n\n# original index in 2002 compilation: 57-kra/mil\nspeciesName.append(\"N2\")\ndataRef.append(\"Kramer, G. M.; Miller, J. G.; J. Phys. Chem. 61 (1957) 785\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([303.15]))\ndataB.append(np.array([-3.5]))\ndataBerr.append(np.array([0.3]))\n\n# original index in 2002 compilation: 84-izu\nspeciesName.append(\"N2\")\ndataRef.append(\"Izuchi, M.; Keiryo Kenkyusho Hokoku. 33 (1984) 191\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([290.0, 300.0]))\ndataB.append(np.array([-6.9, -4.9]))\ndataBerr.append(np.array([0.1, 0.1]))\n\n# original index in 2002 compilation: 91-lop/roz\nspeciesName.append(\"N2\")\ndataRef.append(\"Lopatinskii, E. S.; Rozhnov, M. S.; Zhdanov, V. I.; Parnovskii, S. L.; Kudrya, Y. N.; Zh. Fiz. Khim. 65 (1991) 2060\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([293.15, 293.15]))\ndataB.append(np.array([-5.5, -5.4]))\ndataBerr.append(np.array([3.3, 3.3]))\n\n# H2 data input \n\n# original index in 1980 compilation: 1\nspeciesName.append(\"H2\")\ndataRef.append(\"H. Kamerlingh Onnes and C. Braak, Communs phys. Lab. Univ. Leiden 100a (1907)\")\ndataRefID.append(\"http://www.dwc.knaw.nl/DL/publications/PU00013658.pdf\")\n# comment\ndataClass.append(\"class I\")\ndataT.append(np.array([55.74, 60.33, 68.45, 77.88, 90.34, 109.01, 133.27, 169.58]))\ndataB.append(np.array([-26.95, -22.60, -15.50, -9.69, -5.39, 0.41, 5.14, 8.82]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\nspeciesName.append(\"H2\")\ndataRef.append(\"H. Kamerlingh Onnes and C. Braak, Communs phys. Lab. Univ. Leiden 100b (1907)\")\ndataRefID.append(\"http://www.dwc.knaw.nl/DL/publications/PU00013659.pdf\")\n# comment\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 373.15]))\ndataB.append(np.array([13.02, 14.17]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 2\nspeciesName.append(\"H2\")\ndataRef.append(\"W.J. de Haas and H. Kamerlingh Onnes, Communs phys. Lab. Univ. Leiden 127c (1912)\")\ndataRefID.append(\"http://www.dwc.knaw.nl/DL/publications/PU00012986.pdf\")\n# comment \ndataClass.append(\"class II\")\ndataT.append(np.array([15.89, 17.69, 20.52]))\ndataB.append(np.array([-244.9, -168.6, -143.0]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 3\nspeciesName.append(\"H2\")\ndataRef.append(\"L. Holborn, Annalen d. Phys. 63 674 (1920)\")\ndataRefID.append(\"10.1002/andp.19203682305\")\n# 3-term fit of PV data (P range 20 - 100 atm)\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 293.15, 323.15, 373.15]))\ndataB.append(np.array([14.00, 14.47, 15.17, 15.58]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 4\nspeciesName.append(\"H2\")\ndataRef.append(\"L. Holborn, and J. Otto, Z. Phys. 33 1 (1925); 38 359 (1926)\")\ndataRefID.append(\"10.1007/BF01328287\")\n# 5-term fit of PV data (P series; terms in P^0, P^1, P^2, P^4, and P^6; max pressure 100 atm)\ndataClass.append(\"class I\")\ndataT.append(np.array([65.25, 90.15, 123.15, 173.15, 223.15, 273.15, 323.15, 373.15, 473.15]))\ndataB.append(np.array([-18.36, -5.54, 2.95, 9.16, 12.10, 14.00, 15.17, 15.58, 15.71]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 5\nspeciesName.append(\"H2\")\ndataRef.append(\"F.P.G.A.J. van Agt and H.K. Onnes, Communs phys. Lab. Univ. Leiden 176b (1925)\")\ndataRefID.append(\"http://www.dwc.knaw.nl/DL/publications/PU00015209.pdf\")\ndataClass.append(\"class II\")\ndataT.append(np.array([14.57, 15.71, 16.72, 18.23, 18.29, 20.60, 20.62, 69.93, 90.30]))\ndataB.append(np.array([-245.6, -216.8, -194.0, -170.5, -170.1, -141.4, -140.2, -17.01, -8.16]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 6\nspeciesName.append(\"H2\")\ndataRef.append(\"T.T.H. Verschoyle, Proc. R. Soc. A111 552 (1926)\")\ndataRefID.append(\"10.1098/rspa.1926.0081\")\n# 3-term fit of PV data. Maximum pressure 210 atm\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 293.15]))\ndataB.append(np.array([14.05, 14.59]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 7\nspeciesName.append(\"H2\")\ndataRef.append(\"C.W. Gibby, C.C. Tanner, and I. Masson, Proc. R. Soc. A122 283 (1928)\")\ndataRefID.append(\"10.1098/rspa.1929.0020\")\n# 2-term fit of PV data, maximum pressure 125 atm\ndataClass.append(\"class I\")\ndataT.append(np.array([298.15, 323.15, 348.15, 373.53, 398.35, 423.25, 448.15]))\ndataB.append(np.array([14.71, 15.05, 15.39, 15.54, 15.74, 15.54, 15.41]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 8\nspeciesName.append(\"H2\")\ndataRef.append(\"G.P. Nijhoff and W.H. Keesom, Communs phys. Lab. Univ. Leiden 188d (1928)\")\ndataRefID.append(\"http://www.dwc.knaw.nl/DL/publications/PU00015596.pdf\")\ndataClass.append(\"class II\")\ndataT.append(np.array([273.16, 293.16, 373.16]))\ndataB.append(np.array([13.58, 14.16, 15.39]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 8\nspeciesName.append(\"H2\")\ndataRef.append(\"G.P. Nijhoff and W.H. Keesom, Communs phys. Lab. Univ. Leiden 188e (1928)\")\ndataRefID.append(\"http://www.dwc.knaw.nl/DL/publications/PU00015597.pdf\")\ndataClass.append(\"class II\")\ndataT.append(np.array([24.84, 31.82, 36.60, 41.64, 47.62]))\ndataB.append(np.array([-108.62, -76.48, -57.05, -45.55, -34.54]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 10\nspeciesName.append(\"H2\")\ndataRef.append(\"G.A. Scott, Proc. R. Soc. A125 330 (1929)\")\ndataRefID.append(\"10.1098/rspa.1929.0171\")\ndataClass.append(\"class II\")\n# 3-term fit of PV data. Max press. 170 atm\ndataT.append(np.array([298.15]))\ndataB.append(np.array([14.60]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 11\nspeciesName.append(\"H2\")\ndataRef.append(\"C.C. Tanner and I. Masson, Proc. R. Soc A126 268 (1930)\")\ndataRefID.append(\"10.1098/rspa.1930.0007\")\ndataClass.append(\"class I\")\n# 2-term fit of PV data (3-term fit at 298 K). Max press 126 atm\ndataT.append(np.array([298.15, 323.15, 348.15, 373.15, 398.15, 423.15, 447.15]))\ndataB.append(np.array([14.71, 15.25, 15.45, 15.63, 15.81, 15.95, 15.92]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 13\nspeciesName.append(\"H2\")\ndataRef.append(\"D.T.A. Townend and L.A. Bhatt, Proc. R. Soc. A134 502 (1932)\")\ndataRefID.append(\"10.1098/rspa.1931.0210\")\ndataClass.append(\"class I\")\n# 4-term fit of PV data (P series). Max pressure 600 atm\ndataT.append(np.array([273.15, 298.15]))\ndataB.append(np.array([14.64, 13.65]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 14\nspeciesName.append(\"H2\")\ndataRef.append(\"E.A. Long and O.L.I. Brown, J. Am. chem. Soc. 59 1922 (1937)\")\ndataRefID.append(\"10.1021/ja01289a039\")\ndataClass.append(\"class II\")\n# double constant-volume gas thermometer\ndataT.append(np.array([20.87, 24.11, 27.65, 32.43, 37.08, 41.64, 46.45]))\ndataB.append(np.array([-136.5, -110.3, -90.2, -70.1, -56.0, -45.6, -37.2]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 16a\nspeciesName.append(\"H2\")\ndataRef.append(\"A. Michels and M. Goudeket, Physica. 8 347 (1941)\")\ndataRefID.append(\"10.1016/S0031-8914(41)90076-9\")\ndataClass.append(\"class I\")\n# (a) 3-term fit of PV data max pressure 50 atm\ndataT.append(np.array([273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([13.71, 13.81, 14.21, 14.76, 14.72, 14.94, 15.10]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 16b\nspeciesName.append(\"H2\")\ndataRef.append(\"A. Michels and M. Goudeket, Physica. 8 347 (1941)\")\ndataRefID.append(\"10.1016/S0031-8914(41)90076-9\")\ndataClass.append(\"class I\")\n# (b) 4-term fit of PV data. max press. 50 atm\ndataT.append(np.array([273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([13.50, 13.58, 13.86, 14.14, 14.52, 14.34, 14.50]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 16c\nspeciesName.append(\"H2\")\ndataRef.append(\"A. Michels and M. Goudeket, Physica. 8 347 (1941)\")\ndataRefID.append(\"10.1016/S0031-8914(41)90076-9\")\ndataClass.append(\"class I\")\n# (c) 3-term fit of PV data. max press 230 atm\ndataT.append(np.array([273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([13.81, 14.12, 14.55, 14.90, 15.16, 15.37, 15.59]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 20\nspeciesName.append(\"H2\")\ndataRef.append(\"T.L. Cottrell, R.A. Hamilton, and R.P. Taubinger, Trans. Faraday Soc. 52 1310 (1956)\")\ndataRefID.append(\"10.1039/TF9565201310\")\ndataClass.append(\"N/A\")\n# comment\ndataT.append(np.array([303.2]))\ndataB.append(np.array([14.1]))\ndataBerr.append(np.array([1.7]))\n\n# original index in 1980 compilation: 21\nspeciesName.append(\"H2\")\ndataRef.append(\"J.J.M. Beenakker, F.H. Varekamp, and A. Van Itterbeek, Physica 25 9 (1959)\")\ndataRefID.append(\"10.1016/S0031-8914(59)90626-3\")\ndataClass.append(\"N/A\")\n# comment\ndataT.append(np.array([20.4]))\ndataB.append(np.array([-152]))\ndataBerr.append(np.array([1]))\n\n# original index in 1980 compilation: 22\nspeciesName.append(\"H2\")\ndataRef.append(\"A. Michels, W. de Graff, T. Wassenaar, J.M.H. Levelt, and P. Louwerse, Physica. 25 25 (1959)\")\ndataRefID.append(\"10.1016/S0031-8914(59)90713-X\")\ndataClass.append(\"class I\")\n# PVT data 6-term fit in density series. P range 5 - 3000 atm\ndataT.append(np.array([98.15, 103.15, 113.15, 123.15, 138.15, 153.15, 173.15, \\\n 198.15, 223.15, 248.15, 273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-2.99, -1.60, -0.80, 2.68, 5.03, 6.98, 8.93, 10.79, 12.05,\\\n 13.03, 13.74, 14.37, 14.92, 15.38, 15.67, 15.86, 16.08]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 23\nspeciesName.append(\"H2\")\ndataRef.append(\"F.H. Varekamp and J.J.M. Beenakker, Physica 25 889 (1959)\")\ndataRefID.append(\"10.1016/0031-8914(59)90010-2\")\ndataClass.append(\"class II\")\n# differential method - the non-ideality of the gas is comprared with that of helium\ndataT.append(np.array([14, 15, 16, 17, 18, 19, 20, 21]))\ndataB.append(np.array([-255, -232, -212, -196, -181, -168, -156, -146]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 24a\nspeciesName.append(\"H2\")\ndataRef.append(\"H.F.P. Knaap, M. Knoester, C.M. Knobler, and J.J.M. Beenakker, Physica. 28 21 (1962)\")\ndataRefID.append(\"10.1016/0031-8914(62)90088-5\")\ndataClass.append(\"class I\")\n# low pressure differential method\ndataT.append(np.array([20.47, 20.53, 20.58, 34.46, 36.21, 39.17, 39.36]))\ndataB.append(np.array([-151.3, -150.8, -150.2, -72.4, -67.2, -59.6, -59.4]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 24b\nspeciesName.append(\"H2\")\ndataRef.append(\"H.F.P. Knaap, M. Knoester, C.M. Knobler, and J.J.M. Beenakker, Physica. 28 21 (1962)\")\ndataRefID.append(\"10.1016/0031-8914(62)90088-5\")\ndataClass.append(\"class II\")\n# The authors adopt the value B = -149.7 at 20.4 K, and give the following recommended values for B \ndataT.append(np.array([14, 15, 16, 17, 18, 19, 20, 21, 25, 30, 35, 40, 45, 50, 55, 60, 65]))\ndataB.append(np.array([-253, -229, -210, -193, -178, -165, -154, -144, -113, -85.8, -68.6, -55.6, -45.3, -37.2, -30.0, -23.7, -18.4]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 25\nspeciesName.append(\"H2\")\ndataRef.append(\"Z.E.H.A. El Hadi, J.A. Dorrepaal, and M. Durieux, Physica. 41 320 (1969)\")\ndataRefID.append(\"10.1016/0031-8914(69)90121-9\")\ndataClass.append(\"class II\")\n# comment \ndataT.append(np.array([19.26, 20.37, 21.40, 21.71, 22.19, 22.69, 23.26]))\ndataB.append(np.array([-158.5, -145.3, -134.0, -131.2, -126.3, -121.7, -117.3]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 27\nspeciesName.append(\"H2\")\ndataRef.append(\"B. Schramm and H. Schmiedel (unpublished) (1979)\")\ndataRefID.append(\"N/A\")\ndataClass.append(\"N/A\")\n# comment\ndataT.append(np.array([295, 350, 400, 450]))\ndataB.append(np.array([14.5, 15.0, 15.3, 15.4]))\ndataBerr.append(np.array([4, 4, 4, 4]))\n\n# New data from 2002 compilation begin here\n\n# original index in 2002 compilation: 91-lop/roz\nspeciesName.append(\"H2\")\ndataRef.append(\"Lopatinskii, E. S.; Rozhnov, M. S.; Zhdanov, V. I.; Parnovskii, S. L.; Kudrya, Y. N.; Zh. Fiz. Khim. 65 (1991) 2060\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\n# comment\ndataT.append(np.array([293.15, 293.15]))\ndataB.append(np.array([15.4, 15.4]))\ndataBerr.append(np.array([21.0, 21.0]))\n\n# original index in 2002 compilation: 91-sch/eli\nspeciesName.append(\"H2\")\ndataRef.append(\"Schramm, B.; Elias, E.; Kern, L.; Natour, G.; Schmitt, A.; Weber, C.; Ber. Bunsen-Ges. Phys. Chem. 95 (1991) 615\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\n# comment\ndataT.append(np.array([296.15]))\ndataB.append(np.array([14.1]))\ndataBerr.append(np.array([0.5]))\n\n# original index in 2002 compilation: 77-mih/sag\nspeciesName.append(\"H2\")\ndataRef.append(\"Mihara, S.; Sagara, H.; Arai, Y.; Saito, S.; J. Chem. Eng. Jpn. 10 (1977) 395\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\n# comment\ndataT.append(np.array([298.15, 323.15, 348.15]))\ndataB.append(np.array([14.2, 14.7, 15.3]))\ndataBerr.append(np.array([0.5, 0.5, 0.5]))\n\n# original index in 2002 compilation: 80-per/sch\nspeciesName.append(\"H2\")\ndataRef.append(\"Perez, S.; Schmiedel, H.; Schramm, B.; Z. Phys. Chem. (Munich). 123 (1980) 35\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\n# comment\ndataT.append(np.array([300, 350, 400, 450, 500]))\ndataB.append(np.array([14.3, 15.0, 15.4, 15.6, 15.6]))\ndataBerr.append(np.array([2.0, 2.0, 2.0, 2.0, 2.0]))\n\n# data for CO\n\n# original index in 1980 compilation: 1\nspeciesName.append(\"CO\")\ndataRef.append(\"G.A. Scott, Proc. R. Soc. A125 330 (1929)\")\ndataRefID.append(\"10.1098/rspa.1929.0171\")\n# 3-term fit of PV data, max press 170 atm \ndataClass.append(\"class I\")\ndataT.append(np.array([298.15]))\ndataB.append(np.array([-9.84]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 3\nspeciesName.append(\"CO\")\ndataRef.append(\"D.T.A. Townend and L.A. Bhatt, Proc. R. Soc. A134 502 (1932)\")\ndataRefID.append(\"10.1098/rspa.1931.0210\")\n# 4-term fit of PV data. Max press 600 atm\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.15]))\ndataB.append(np.array([-16.44, -9.98]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 4\nspeciesName.append(\"CO\")\ndataRef.append(\"G.A. Bottomley, D.S. Massie, and R. Whytlaw-Gray, Proc. R. Soc. A200 201 (1950)\")\ndataRefID.append(\"10.1098/rspa.1950.0012\")\n# comment\ndataClass.append(\"class I\")\ndataT.append(np.array([295.21]))\ndataB.append(np.array([-8.43]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 5a\nspeciesName.append(\"CO\")\ndataRef.append(\"A. Michels, J.M. Lupton, T. Wassenaar, and W. de Graaff, Physica. 18 121 (1952)\")\ndataRefID.append(\"10.1016/S0031-8914(52)80275-7\")\n# 3 term fit to PV data\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-14.19, -8.28, -3.40, 0.90, 4.49, 7.52, 10.04]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 5b\nspeciesName.append(\"CO\")\ndataRef.append(\"A. Michels, J.M. Lupton, T. Wassenaar, and W. de Graaff, Physica. 18 121 (1952)\")\ndataRefID.append(\"10.1016/S0031-8914(52)80275-7\")\n# 8 term fit to PV data\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-13.65, -7.95, -3.29, 1.06, 4.57, 7.67, 10.16]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 6\nspeciesName.append(\"CO\")\ndataRef.append(\"V. Mathot, L.A.K. Staveley, J.A. Young, and N.G. Parsonage, Trans. Faraday Soc. 52 1488 (1956)\")\ndataRefID.append(\"10.1063/1.1742360\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([90.67]))\ndataB.append(np.array([-233]))\ndataBerr.append(np.array([5]))\n\n# original index in 1980 compilation: 7\nspeciesName.append(\"CO\")\ndataRef.append(\"J.F. Connolly, Physics Fluids 7 1023 (1964)\")\ndataRefID.append(\"10.1063/1.1711317\")\n# comment\ndataClass.append(\"class I\")\ndataT.append(np.array([323.2, 423.2, 473.2, 513.2, 573.2]))\ndataB.append(np.array([-3.7, 9.6, 14.5, 17.3, 20.5]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 9\nspeciesName.append(\"CO\")\ndataRef.append(\"B. Schramm and H. Schmiedel (unpublished) (1979)\")\ndataRefID.append(\"N/A\")\n# Estimated eror in B is +/- 3\ndataClass.append(\"N/A\")\ndataT.append(np.array([295, 330, 365, 400, 425, 450, 475]))\ndataB.append(np.array([-9.0, -4.3, 0.3, 5.0, 7.5, 9.6, 11.0]))\ndataBerr.append(np.array([3, 3, 3, 3, 3, 3, 3]))\n\n# original index in 1980 compilation: 10\nspeciesName.append(\"CO\")\ndataRef.append(\"B. Schramm and R. Gehrmann (unpublished) (1979)\")\ndataRefID.append(\"N/A\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([213, 223, 242, 262, 272, 295]))\ndataB.append(np.array([-35.0, -30.5, -22.8, -16.5, -13.0, -9.0]))\ndataBerr.append(np.array([6, 6, 6, 6, 6, 6]))\n\n# New data from 2002 compilation begin here\n\n# original index in 2002 compilation: 83-goo-1\nspeciesName.append(\"CO\")\ndataRef.append(\"Goodwin, R. D.; Cryogenics. 23 (1983) 403\")\ndataRefID.append(\"10.1016/0011-2275(83)90209-6\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([120, 140, 160, 180, 200, 220, 240, 260, 280, 295, 300, 330, 400, 450]))\ndataB.append(np.array([-127.8, -94.7, -71.5, -54.8, -42, -32.3, -24.3, -17.5, -12.0, -9.0, -7.2, -4.3, 5.0, 9.6]))\ndataBerr.append(np.array([0.4, 0.5, 0.6, 0.7, 0.8, 0.2, 0.3, 0.3, 0.4, 0.4, 0.4, 0.5, 0.6, 0.6]))\n\n# need to dig up further references: 63-mul/kir-1, 80-sch/geh, 82-sch/mue, 83-goo-1(set- 2, set-3), 86-eli/hoa, 87-bar/cal, 91-bou/moo, 91-sch/eli, 96-vat/sch\n\n# data for Ar\n\n# original index in 1980 compilation: 1\nspeciesName.append(\"Ar\")\ndataRef.append(\"H. Kamerlingh Onnes and C.A. Crommelin, Communs phys. Lab. Univ. Leiden 188b (1910)\")\ndataRefID.append(\"TO DO\")\n# 6-term fit of PV data (V series; terms in V^0, V^-1, V^-2, V^-4, V^-6, V^-8). \n# (Values above 200 K are probably high)\n# Maximum pressure 60 atm\ndataClass.append(\"class I\")\ndataT.append(np.array([151.94, 152.91, 153.96, 156.53, 157.29, 159.35, 163.27, 170.64, 186.10, 215.43, 273.15, 293.54]))\ndataB.append(np.array([-82.53, -82.17, -81.19, -79.01, -79.26, -75.70, -72.23, -65.09, -53.85, -37.03, -16.56, -12.55]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 2\nspeciesName.append(\"Ar\")\ndataRef.append(\"L. Holborn and J. Otto, Z. Phys. 33 1 (1925)\")\ndataRefID.append(\"TO DO\")\n# 5-term fit of PV data (P series; terms in P^0, P^1, P^2, P^4, P^6). \n# Maximum pressure 100 atm\ndataClass.append(\"class I\")\ndataT.append(np.array([173.15, 223.15, 273.15, 323.15, 373.15, 423.15, 473.15, 573.15, 673.15]))\ndataB.append(np.array([-64.32, -37.78, -22.08, -11.02, -4.29, 1.16, 4.67, 11.22, 15.29]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 3\nspeciesName.append(\"Ar\")\ndataRef.append(\"C.C. Tanner and I. Masson, Proc. R. Soc. A126 268 (1930)\")\ndataRefID.append(\"TO DO\")\n# 3-term fit of PV data (P series). \n# Maximum pressure 126 atm\ndataClass.append(\"class I\")\ndataT.append(np.array([298.15, 323.15, 348.15, 373.15, 398.15, 423.15, 447.15]))\ndataB.append(np.array([-16.35, -11.49, -7.48, -4.10, -0.72, 2.17, 3.72]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 4a\nspeciesName.append(\"Ar\")\ndataRef.append(\"A. Michels, Hub. Wijker, and Hk. Wijker, Physia, 15 627 (1949)\")\ndataRefID.append(\"TO DO\")\n# 7-term fit of PV data (P-> 2900 atm). \ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-21.13, -15.49, -11.06, -7.14, -3.89, -1.08, 1.42]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 4b\nspeciesName.append(\"Ar\")\ndataRef.append(\"A. Michels, Hub. Wijker, and Hk. Wijker, Physia, 15 627 (1949)\")\ndataRefID.append(\"TO DO\")\n# 3-term fit of PV data (P-> 80 atm). \ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-21.45, -15.76, -11.24, -7.25, -4.00, -1.18, 1.38]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 5\nspeciesName.append(\"Ar\")\ndataRef.append(\"E. Whalley, Y. Lupien, and W.G. Schneider, Can. J. Chem. 31 722 (1953)\")\ndataRefID.append(\"TO DO\")\n# 3-term fit of PV data (P series) up to 873.15 K, 2-term fit at higher T. Standard dev. 0.2 for B. \ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 323.15, 373.15, 423.15, 473.15, 573.15, 673.15, 773.15, 873.15]))\ndataB.append(np.array([-22.41, -11.20, -4.34, 1.01, 5.28, 10.77, 15.74, 17.76, 19.48]))\ndataBerr.append(np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2]))\n\n# original index in 1980 compilation: 6\nspeciesName.append(\"Ar\")\ndataRef.append(\"T.L. Cottrell, R.A. Hamilton, and R.P. Taubinger, Trans. Faraday Soc. 52 1310 (1956)\")\ndataRefID.append(\"TO DO\")\n# comment \ndataClass.append(\"class I\")\ndataT.append(np.array([303.2, 333.2, 363.2]))\ndataB.append(np.array([-13.2, -9.3, -4.3]))\ndataBerr.append(np.array([1.8, 1.3, 2.2]))\n\n# original index in 1980 compilation: 7a\nspeciesName.append(\"Ar\")\ndataRef.append(\"A. Michels, J.M. Levelt, and W. de Graff, Physica, 24 659 (1958)\")\ndataRefID.append(\"TO DO\")\n# 7-term fit of PV data (P->1000 atm)\ndataClass.append(\"class I\")\ndataT.append(np.array([248.15, 223.15, 203.15, 188.15, 173.15, 163.15]))\ndataB.append(np.array([-28.25, -37.09, -45.99, -54.27, -64.24, -72.87]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 7b\nspeciesName.append(\"Ar\")\ndataRef.append(\"A. Michels, J.M. Levelt, and W. de Graff, Physica, 24 659 (1958)\")\ndataRefID.append(\"TO DO\")\n# 4-term fit of PV data (P->80 atm)\ndataClass.append(\"class I\")\ndataT.append(np.array([150.65, 148.15]))\ndataB.append(np.array([-85.58, -88.29]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 7c\nspeciesName.append(\"Ar\")\ndataRef.append(\"A. Michels, J.M. Levelt, and W. de Graff, Physica, 24 659 (1958)\")\ndataRefID.append(\"TO DO\")\n# 8-term fit of PV data (P->350 atm)\ndataClass.append(\"class I\")\ndataT.append(np.array([153.15]))\ndataB.append(np.array([-82.54]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 7d\nspeciesName.append(\"Ar\")\ndataRef.append(\"A. Michels, J.M. Levelt, and W. de Graff, Physica, 24 659 (1958)\")\ndataRefID.append(\"TO DO\")\n# 3-term fit of PV data (P->50 atm)\ndataClass.append(\"class I\")\ndataT.append(np.array([248.15, 223.15, 203.15, 188.15, 173.15, 163.15, 153.15, 150.65, 148.15, 143.15, 138.15, 133.15]))\ndataB.append(np.array([-28.57, -37.43, -46.52, -54.83, -65.21, -73.25, -82.97, -85.63, -88.45, -94.42, -100.88, -107.98]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 8\nspeciesName.append(\"Ar\")\ndataRef.append(\"A. Lecocq, J. Rech. Cent. Nat. Rech. Scient. 50 55 (1960)\")\ndataRefID.append(\"TO DO\")\n# 3-term fit to PV data \ndataClass.append(\"class I\")\ndataT.append(np.array([573.16, 673.16, 773.16, 923.16, 1073.16, 1223.16]))\ndataB.append(np.array([9.79, 13.10, 16.01, 18.85, 19.93, 21.05]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 10\nspeciesName.append(\"Ar\")\ndataRef.append(\"R.A.H. Pool, G. Saville, T.M. Herrington, B.D.C. Shields, and L.A.K. Staveley, Trans. Faraday Soc. 58 1692 (1962)\")\ndataRefID.append(\"TO DO\")\n# comment \ndataClass.append(\"N/A\")\ndataT.append(np.array([90]))\ndataB.append(np.array([-231]))\ndataBerr.append(np.array([2]))\n\n# original index in 1980 compilation: 11\nspeciesName.append(\"Ar\")\ndataRef.append(\"B.E.F. Fender and G.D. Halsey, Jr., J. chem. Phys. 36 1881 (1962)\")\ndataRefID.append(\"TO DO\")\n# 3-term fit of PV data (P less than 1 atm). Max err in B is +/- 1.5% \ndataClass.append(\"N/A\")\ndataT.append(np.array([84.79, 88.34, 92.30, 95.06, 101.40, 102.01, 105.51, 108.15, 113.32, 117.50, 123.99]))\ndataB.append(np.array([-249.34, -229.89, -211.79, -200.87, -178.73, -177.65, -166.06, -160.27, -149.58, \\\n -140.58, -127.99]))\ndataBerr.append(0.015*abs(dataB[-1]))\n\n# original index in 1980 compilation: 12\nspeciesName.append(\"Ar\")\ndataRef.append(\"J.H. Dymond and E.B. Smith (unpublished) (1962)\")\ndataRefID.append(\"N/A\")\n# estimated accuracy of B +/- 2\ndataClass.append(\"N/A\")\ndataT.append(np.array([273.15, 298.15, 323.15]))\ndataB.append(np.array([-23.1, -16.5, -10.9]))\ndataBerr.append(np.array([2, 2, 2]))\n\n# original index in 1980 compilation: 13\nspeciesName.append(\"Ar\")\ndataRef.append(\"R.W. Crain, Jr., and R.E. Sonntag, Adv. cryogen. Engng 11 379 (1966)\")\ndataRefID.append(\"TO DO\")\n# Burnett method; max. press. 500 atm\ndataClass.append(\"class I\")\ndataT.append(np.array([143.15, 163.15, 203.15, 273.15]))\ndataB.append(np.array([-94.69, -73.20, -46.35, -21.18]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 14\nspeciesName.append(\"Ar\")\ndataRef.append(\"R.D. Weir, I. Wynn Jones, J.S. Rowlinson, and G. Saville, Trans. Faraday Soc. 63 1320 (1967)\")\ndataRefID.append(\"TO DO\")\n# 3-term fit of PV data. Errors +/-10 at 80 K decreasing to +/-3 for T 82-85 K and +/-1 at high T\ndataClass.append(\"N/A\")\ndataT.append(np.array([80.43, 81.95, 84.23, 87.12, 88.85, 92.78, 94.75, 97.65, 102.08, 105.89, 108.07, \\\n 120.0, 129.56, 144.60, 146.05, 157.41, 190.52]))\ndataB.append(np.array([-285.1, -272.9, -268.5, -245.9, -235.9, -216.9, -207.3, -194.8, -179.1, -167.8, \\\n -161.5, -134.3, -114.9, -93.18, -92.08, -80.34, -52.85]))\ndataBerr.append(np.array([10, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))\n\n# original index in 1980 compilation: 15\nspeciesName.append(\"Ar\")\ndataRef.append(\"N.K. Kalfoglou and J.G. Miller, J. phys. Chem., Ithaca 71 1256 (1967)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class I\")\ndataT.append(np.array([303.2, 373.2, 473.2, 573.2, 673.2, 773.2]))\ndataB.append(np.array([-15.05, -4.10, 5.05, 10.77, 14.25, 17.07]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 16\nspeciesName.append(\"Ar\")\ndataRef.append(\"M.A. Byrne, M.R. Jones, abd L.A.K. Staveley, Trans. Faraday Soc. 64 1747 (1968)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class I\")\ndataT.append(np.array([84.03, 85.96, 88.94, 89.57, 93.59, 97.69, 102.79, 107.93, 113.97, 122.38, 124.70, \\\n 130.96, 140.04, 159.72, 179.85, 209.94, 241.04, 271.39]))\ndataB.append(np.array([-264.3, -250.2, -233.5, -228.2, -209.7, -193.6, -175.8, -161.1, -146.0, -127.3, \\\n -123.5, -112.7, -99.1, -77.1, -60.7, -43.7, -30.9, -21.9]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 18\nspeciesName.append(\"Ar\")\ndataRef.append(\"A.L. Blancett, K.R. Hall, and F.B. Canfield, Physica, 47 75 (1970)\")\ndataRefID.append(\"TO DO\")\n# Burnett method. Max press. 700 atm\ndataClass.append(\"N/A\")\ndataT.append(np.array([223.15, 273.15, 323.15]))\ndataB.append(np.array([-37.30, -20.90, -10.82]))\ndataBerr.append(np.array([0.25, 0.09, 0.07]))\n\n# original index in 1980 compilation: 19\nspeciesName.append(\"Ar\")\ndataRef.append(\"R.N. Lichtenthaler and K Schäfer, Ber. (dtsch.) Bunsenges, phys. Chem. 73 42 (1969)\")\ndataRefID.append(\"TO DO\")\n# Estimated absolute error in B +/- 1\ndataClass.append(\"N/A\")\ndataT.append(np.array([288.2, 296.0, 303.2, 313.2, 323.1]))\ndataB.append(np.array([-17.95, -16.07, -14.69, -12.82, -11.10]))\ndataBerr.append(np.array([1, 1, 1, 1, 1]))\n\n# original index in 1980 compilation: 21\nspeciesName.append(\"Ar\")\ndataRef.append(\"T.K. Bose and R.H. Cole, J. chem. Phys. 52 140 (1970)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([322.85]))\ndataB.append(np.array([-15.8]))\ndataBerr.append(np.array([1.0]))\n\n# original index in 1980 compilation: 22\nspeciesName.append(\"Ar\")\ndataRef.append(\"J.A. Provine and F.B. Canfield, Physica, 52 79 (1971)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([143.15, 158.15, 183.15]))\ndataB.append(np.array([-94.04, -77.87, -56.48]))\ndataBerr.append(np.array([0.56, 0.52, 0.18]))\n\n# original index in 1980 compilation: 23\nspeciesName.append(\"Ar\")\ndataRef.append(\"J. Osborne, Ph.D. thesis, University of London (1972)\")\ndataRefID.append(\"N/A\")\n# Burnett method, P -> 800 bar. Max. estimated err is +/- 1 in B \ndataClass.append(\"N/A\")\ndataT.append(np.array([300, 443, 478, 533, 585, 635, 684, 731, 777, 831, 876, 924, 975, 1024]))\ndataB.append(np.array([-15.8, 4.2, 6.7, 10.2, 11.6, 14.4, 16.1, 16.0, 17.5, 19.1, 19.8, 21.0, 22.3, 23.2]))\ndataBerr.append(np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))\n\n# original index in 1980 compilation: 24\nspeciesName.append(\"Ar\")\ndataRef.append(\"G.A. Pope, P.S. Chappelear and R. Kobayashi, J. chen. Phys. 59 423 (1973)\")\ndataRefID.append(\"TO DO\")\n# burnett method\ndataClass.append(\"N/A\")\ndataT.append(np.array([101.202, 116.421, 138.224]))\ndataB.append(np.array([-176.03, -138.28, -101.05]))\ndataBerr.append(np.array([0.65, 0.05, 0.09]))\n\n# original index in 1980 compilation: 25\nspeciesName.append(\"Ar\")\ndataRef.append(\"J. Bellm, W. Reineke, K. Schäfer, and B. Schramm, Ber. (dtsch.) Bunsenges. phys. Chem. 78 282 (1974)\")\ndataRefID.append(\"TO DO\")\n# estimated accuracy of B is +/- 2\ndataClass.append(\"N/A\")\ndataT.append(np.array([300, 320, 340, 370, 400, 430, 460, 490, 520, 550]))\ndataB.append(np.array([-15.4, -12.1, -8.9, -4.6, -1.0, 1.9, 4.2, 5.8, 7.0, 7.8]))\ndataBerr.append(np.array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))\n\n# original index in 1980 compilation: 26\nspeciesName.append(\"Ar\")\ndataRef.append(\"R. Hahn, K. Schäfer, and B. Schramm, Ber. (dtsch.) Bunsenges. phys. Chem. 78 287 (1974)\")\ndataRefID.append(\"TO DO\")\n# B values determined assuming B(296 K) = -16.2\n# estimated accuracy of B is +/- 2\ndataClass.append(\"N/A\")\ndataT.append(np.array([200.5, 210.9, 231.5, 251.5, 273.2]))\ndataB.append(np.array([-47.7, -42.4, -34.1, -27.4, -21.6]))\ndataBerr.append(np.array([2, 2, 2, 2, 2]))\n\n# original index in 1980 compilation: 27\nspeciesName.append(\"Ar\")\ndataRef.append(\"B. Schramm and U. Hebgen, Chem. phys. Letters 29 137 (1974)\")\ndataRefID.append(\"TO DO\")\n# Values measured relative to B(T) for neon and B(296) for neon and argon\ndataClass.append(\"class II\")\ndataT.append(np.array([77.3, 87.2, 90.2]))\ndataB.append(np.array([-303, -239, -224]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 28\nspeciesName.append(\"Ar\")\ndataRef.append(\"J. Santafe, J.S. Urieta and C.G. Losa, Chem. phys. 18 341 (1976)\")\ndataRefID.append(\"TO DO\")\n# Compressibility measurements\n# estimated accuracy of B is +/- 3\ndataClass.append(\"N/A\")\ndataT.append(np.array([273.2, 283.2, 293.2, 303.2, 313.2, 323.2]))\ndataB.append(np.array([-21.6, -18.6, -16.4, -14.3, -12.5, -11.0]))\ndataBerr.append(np.array([3, 3, 3, 3, 3, 3]))\n\n# original index in 1980 compilation: 29\nspeciesName.append(\"Ar\")\ndataRef.append(\"H.-P. Rentschler and B. Schramm, Ber. (dtsch.) Bunsenges. phys. Chem. 81 319 (1977)\")\ndataRefID.append(\"TO DO\")\n# Compressibility measurements\n# estimated accuracy of B is +/- 4\ndataClass.append(\"N/A\")\ndataT.append(np.array([326, 416, 485, 553, 620, 713]))\ndataB.append(np.array([-10.9, 0.6, 7.0, 10.4, 12.5, 15.9]))\ndataBerr.append(np.array([4, 4, 4, 4, 4, 4]))\n\n# original index in 1980 compilation: 30\nspeciesName.append(\"Ar\")\ndataRef.append(\"B. Schramm, H. Schmiedel, R. Gehrmann, and R. Bartl, Ber. (dtsch.) Bunsenges. phys. Chem. 81 316 (1977)\")\ndataRefID.append(\"TO DO\")\n# Compressibility measurements\n# estimated accuracy of B is +/- 4\ndataClass.append(\"N/A\")\ndataT.append(np.array([202.5, 217.9, 233.1, 264.2, 295.2, 332.8, 367.0, 401.9, 431.3, 466.2, 499.9]))\ndataB.append(np.array([-47.3, -39.0, -32.9, -22.7, -15.8, -8.8, -5.3, -0.8, 1.8, 4.0, 6.0]))\ndataBerr.append(np.array([4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4]))\n\n# New data from 2002 compilation begin here\n\n# original index in 2002 compilation: 92-ewi/tru-1\nspeciesName.append(\"Ar\")\ndataRef.append(\"Ewing, M. B.; Trusler, J. P. M.; Physica A: (Amsterdam). 184 (1992) 415\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([75, 80, 90, 100, 110, 125, 150, 200, 250, 300, 400, 500, 600, 700]))\ndataB.append(np.array([-313.8, -276.7, -221.4, -182.1, -152.9, -121.0, -86.0, -47.9, -27.7, \\\n -15.2, -0.9, 7.0, 11.8, 15.1]))\ndataBerr.append(np.array([1.0, 1.0, 0.8, 0.8, 0.6, 0.4, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]))\n\n# original index in 2002 compilation: 94-gil/kle-1\nspeciesName.append(\"Ar\")\ndataRef.append(\"Gilgen, R.; Kleinrahm, R.; Wagner, W.; J. Chem. Thermodyn. 26 (1994) 383.\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([110, 120, 130, 135, 140, 143, 146, 148, 150.7, 153.0, 155, 157, 160, \\\n 165, 170, 175, 180, 190, 200, 220, 250, 265, 280, 295, 310, 325, 340]))\ndataB.append(np.array([-152.7, -130.7, -113.0, -105.4, -98.5, -94.7, -91.0, -88.7, -85.7, \\\n -83.2, -81.2, -79.2, -76.3, -71.8, -67.6, -63.8, -60.2, -53.7, -48.0, -38.5, -27.7, -23.4, \\\n -19.6, -16.3, -13.3, -10.7, -8.3]))\ndataBerr.append(np.array([1.3, 0.6, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, \\\n 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3]))\n\n# original index in 2002 compilation: 96-est/tru\nspeciesName.append(\"Ar\")\ndataRef.append(\"Estrada-Alexanders, A. F.; Trusler, J. P. M.; Int. J. Thermophys. 17 (1996) 1325\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([110, 120, 130, 140, 145, 150, 155, 160, 175, 190, 205, 220, 250, 295, 355, 400, 450]))\ndataB.append(np.array([-153.6, -130.9, -112.9, -97.2, -92.2, -86.7, -81.4, -76.4, -63.8, -53.7, -45.5, -38.6, -27.9, -16.4, -6.4, -1.1, 3.3]))\ndataBerr.append(np.array([0.5, 0.4, 0.4, 0.3, 0.3, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1]))\n\n# Data for hydrogen cyanide, HCN\n\n# original index in 1980 compilation: 1\nspeciesName.append(\"HCN\")\ndataRef.append(\"T.L. Cottrell, I.M. MacFarlane, and A.W. Read, Trans. Faraday Soc. 61 1632 (1965)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([303.15, 348.15]))\ndataB.append(np.array([-1332, -765]))\ndataBerr.append(np.array([100, 75]))\n\n# original index in 1980 compilation: 2\nspeciesName.append(\"HCN\")\ndataRef.append(\"W.A. Felsing and G.W. Dranke, J. Am. chem. soc. 58 1714 (1936)\")\ndataRefID.append(\"TO DO\")\n# Calculated from PVT data by T.L. Cottrell, I.M. MacFarlane, and A.W. Read, Trans. Faraday Soc. 61 1632 (1965)\ndataClass.append(\"class III\")\ndataT.append(np.array([303.15, 343.15, 383.15]))\ndataB.append(np.array([-1602, -811, -507]))\ndataBerr.append(BerrCalc(dataB[-1], 3))\n\n# additional data from 2002 compilation starts here\n\n# original index in 2002 compilation: 65-cot/mac\nspeciesName.append(\"HCN\")\ndataRef.append(\"T.L. Cottrell, I.M. MacFarlane, and A.W. Read, Trans. Faraday Soc. 61 1632 (1965)\")\ndataRefID.append(\"TO DO\")\n# Additional calculations and estimates not included in previous data entries\ndataClass.append(\"N/A\")\ndataT.append(np.array([303.15, 343.15, 348.15, 383.15]))\ndataB.append(np.array([-1564, -989, -940, -672]))\ndataBerr.append(np.array([250, 200, 200, 150]))\n\n# Data for methanol, CH3OH\n\n# original index in 1980 compilation: 1\nspeciesName.append(\"CH3OH\")\ndataRef.append(\"J.D. Lambert, G.A.H. Roberts, J.S. Rowlinson, and V.J. Wilkinson, Proc. R. Soc. A196 113 (1949)\")\ndataRefID.append(\"TO DO\")\n# Max. press. 600 Torr. Accuracy +/-50\ndataClass.append(\"N/A\")\ndataT.append(np.array([319.3, 329.8, 335.0, 340.0, 340.1, 345.5, 350.8, 350.9, 351.4, 351.4, 360.6, \\\n 378.9, 392.8, 404.8]))\ndataB.append(np.array([-1424.3, -1316.0, -1251.1, -1189.1, -1172.6, -1093.4, -1064.7, -1045.8, -1038.6, \\\n -1022.8, -933.3, -730.2, -626.6, -525.8]))\n#dataBerr = [np.array([50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50])]\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 2\nspeciesName.append(\"CH3OH\")\ndataRef.append(\"C.B. Kretschmer and R. Wiebe, J. Am. chem. Soc. 76 2579 (1954)\")\ndataRefID.append(\"TO DO\")\n# Low pressure vapour density measurement. Data fir to 3-term P-series (P^0, P^1, P^3). \n# Uncertainty in B is 5 at 373.15 K and aboce, but increases to 100 at 313.15 K\n# This set is suspect and I may not include it in the end\ndataClass.append(\"N/A\")\ndataT.append(np.array([313.15, 333.15, 353.15, 373.15, 393.15]))\ndataB.append(np.array([-1463, -926, -701, -543, -433]))\ndataBerr.append(np.array([100, 100, 100, 5, 5]))\n\n# original index in 1980 compilation: 3\nspeciesName.append(\"CH3OH\")\ndataRef.append(\"O.R. Fox, J. Morcillo, and A. Mendez, An. R. Soc. esp. Fis. Quim. 17B 23 (1954)\")\ndataRefID.append(\"TO DO\")\n# Values read from diagram\ndataClass.append(\"class II\")\ndataT.append(np.array([350.0, 371.0, 402.0, 424.2, 448.7, 474.1]))\ndataB.append(np.array([-900, -660, -470, -400, -370, -320]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 4\nspeciesName.append(\"CH3OH\")\ndataRef.append(\"G.A. Bottomley and T.H. Spurling, Aust. J. Chem. 20 1789 (1967)\")\ndataRefID.append(\"TO DO\")\n# 3 term fit. max pressure 155 Torr\ndataClass.append(\"class II\")\ndataT.append(np.array([323.2, 333.2, 348.2, 373.2, 398.2, 423.2]))\ndataB.append(np.array([-1144, -1033, -886, -691, -546, -412]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 5\nspeciesName.append(\"CH3OH\")\ndataRef.append(\"D.H. Knoebel and W.C. Edmister, J. chem. Engng Data 13 312 (1968)\")\ndataRefID.append(\"TO DO\")\n# Low pressure PVT measurements\ndataClass.append(\"class II\")\ndataT.append(np.array([313.2, 333.2, 353.2, 373.2]))\ndataB.append(np.array([-2079, -1079, -752, -542]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 6\nspeciesName.append(\"CH3OH\")\ndataRef.append(\"G.S. Kell and G.E. McLaurin, J. chem. Phys. 51 4345 (1969)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([423.16, 423.16, 448.16, 448.16, 473.16, 473.16, 473.16, 473.16, 498.16, \\\n 498.16, 523.16, 523.16, 573.16, 573.16]))\ndataB.append(np.array([-323, -318, -258.8, -258.2, -220.5, -220.0, -216.9, -218.1, -181.2, -180.7, \\\n -156.6, -155.9, -113.1, -117.2]))\ndataBerr.append(np.array([2, 2, 0.7, 0.4, 0.5, 1.3, 0.2, 0.3, 0.4, 0.5, 0.4, 0.1, 0.4, 0.4]))\n\n# original index in 1980 compilation: 8\nspeciesName.append(\"CH3OH\")\ndataRef.append(\"A.P. Kudchadker and P.T. Eubank, J. chem. Engng. Data 15 7 (1970)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([298.15, 323.15, 348.15, 373.15, 398.15, 423.15, 448.15, 473.15]))\ndataB.append(np.array([-2075, -1185, -737, -535, -413, -321, -251, -185]))\ndataBerr.append(np.array([104, 53, 30, 19, 15, 10, 8, 6]))\n\n# data from 2002 compilation begin here\n\n# original index in 2002 compilation: 92-boy/ewi \nspeciesName.append(\"CH3OH\")\ndataRef.append(\"Boyes, S. J.; Ewing, M. B.; Goodwin, A. R. H.; J. Chem. Thermodyn. 24 (1992) 1151\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([310, 315, 320, 330, 340, 360]))\ndataB.append(np.array([-1483, -1349, -1230, -1031, -873, -643]))\ndataBerr.append(np.array([60, 50, 45, 40, 35, 30]))\n\n# original index in 2002 compilation: 92-mas/dug\nspeciesName.append(\"CH3OH\")\ndataRef.append(\"Massucci, M.; Du'Gay, A. P.; Diaz-Laviada, A. M.; Wormald, C. J.; J. Chem. Soc., Faraday Trans. 1. 88 (1992) 427\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([338.2, 343.2, 348.2, 353.2, 363.2, 373.2, 398.2, 423.2]))\ndataB.append(np.array([-884, -822, -767, -718, -631, -560, -427, -337]))\ndataBerr.append(np.array([17, 16, 15, 14, 12, 11, 9, 7]))\n\n# original index in 2002 compilation: 97-bic/hen\nspeciesName.append(\"CH3OH\")\ndataRef.append(\"Bich, E.; Hendl, H.; Vogel, E.; Fluid Phase Equil. 133 (1997) 129\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([373, 400, 422, 450, 474, 493, 528, 583, 628]))\ndataB.append(np.array([-504.2, -380.2, -307.1, -263.1, -202.8, -173.7, -142.2, -94.6, -75.5]))\ndataBerr.append(np.array([20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0]))\n\n# to do: get Further references: 29-euc/mey, 50-lam/sta, 63-eve/mun, 69-zub/bag, 74-mil/min, 79-pat, 84-bic/pie, 84-ohg/han, 89-abu/ver-1, 89-olf/sch, 91-ker/var, 92-boy/ewi(280 K – 305 K), 93-bar/mar, 93-sha/naz-2\n\n# Data for carbon dioxide, CO2\n\n# original index in 1980 compilation: 1\nspeciesName.append(\"CO2\")\ndataRef.append(\"A. Luduc and P. Sacerdote, C. r. hebd. Seanc. Acad. Sci., Paris 125 297 (1897)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class II\")\ndataT.append(np.array([289.2]))\ndataB.append(np.array([-124.8]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 2\nspeciesName.append(\"CO2\")\ndataRef.append(\"Lord Rayleigh, Phil. Trans. R. Soc. A204 351 (1905)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class II\")\ndataT.append(np.array([288.2]))\ndataB.append(np.array([-131.9]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 3\nspeciesName.append(\"CO2\")\ndataRef.append(\"A. Michels and C. Michels, Proc. R. Soc. A153 201 (1935)\")\ndataRefID.append(\"TO DO\")\n# 4-term fit to PV data (P -> 240 atm)\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.20, 303.05, 304.19, 305.23, 313.25, 322.86, 348.41, 372.92, \\\n 398.46, 412.98, 418.20, 423.29]))\ndataB.append(np.array([-151.18, -123.56, -119.45, -118.37, -117.29, -110.83, -103.52, -86.68, -73.68, \\\n -62.20, -55.76, -54.02, -52.23]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 4\nspeciesName.append(\"CO2\")\ndataRef.append(\"W. Cawood and H.S. Patterson, J. chem. Soc. 619 (1933)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class II\")\ndataT.append(np.array([273.15, 294.15, 304.15]))\ndataB.append(np.array([-148.6, -127.0, -120.8]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 4a\nspeciesName.append(\"CO2\")\ndataRef.append(\"W. Cawood and H.S. Patterson, Phil. Trans. R. Soc. A236 77 (1937)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class II\")\ndataT.append(np.array([294.15]))\ndataB.append(np.array([-126.0]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 5\nspeciesName.append(\"CO2\")\ndataRef.append(\"K. Schafer, Z. phys. Chem. B36 85 (1937)\")\ndataRefID.append(\"TO DO\")\n# 2-term fit of PV data. Max. press. less than 1 atm. Uncertainty in B +/- 2%\ndataClass.append(\"N/A\")\ndataT.append(np.array([203.83, 206.63, 207.72, 209.03, 210.12, 211.60, 223.75, 225.63, \\\n 226.47, 229.96, 230.93, 231.79, 233.34, 235.05, 244.93, 273.15]))\ndataB.append(np.array([-330, -316, -313, -302, -300, -286, -226, -229, -225, -212, -216, \\\n -213, -210, -198, -175, -142]))\ndataBerr.append(0.02*abs(dataB[-1]))\n\n# original index in 1980 compilation: 7\nspeciesName.append(\"CO2\")\ndataRef.append(\"G.A. Bottomley, D.S. Massie, and R. Whytlaw-Gray, Proc. R. Soc. A200 201 (1950)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class II\")\ndataT.append(np.array([295.21]))\ndataB.append(np.array([-125.2]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 8\nspeciesName.append(\"CO2\")\ndataRef.append(\"K.E. MacCormack and W.G. Schneider, J. chem. Phys. 18 1269 (1950)\")\ndataRefID.append(\"TO DO\")\n# Also J. chem. Phys. 19 849 (1951)\n# 4-term fit of PV data (P series). Max press. 50 atm. Accuracy of B +/- 0.5 to 1% at 273.15, +/- 1.5% at 473.15 K\ndataClass.append(\"N/A\")\ndataT.append(np.array([273.15, 323.15, 373.15]))\ndataB.append(np.array([-156.36, -102.63, -71.85]))\ndataBerr.append(0.01*abs(dataB[-1]))\n\n# original index in 1980 compilation: 8\nspeciesName.append(\"CO2\")\ndataRef.append(\"K.E. MacCormack and W.G. Schneider, J. chem. Phys. 19 849 (1951)\")\ndataRefID.append(\"TO DO\")\n# Also J. chem. Phys. 18 1269 (1950)\n# 4-term fit of PV data (P series). Max press. 50 atm. Accuracy of B +/- 0.5 to 1% at 273.15, +/- 1.5% at 473.15 K\ndataClass.append(\"N/A\")\ndataT.append(np.array([423.15, 473.15, 573.15, 673.15, 773.15, 873.15]))\ndataB.append(np.array([-50.59, -34.08, -13.48, -1.58, 6.05, 12.11]))\ndataBerr.append(0.015*abs(dataB[-1]))\n\n# original index in 1980 compilation: 9\nspeciesName.append(\"CO2\")\ndataRef.append(\"W.C. Pfefferle, Jr., J.A. Goff, and J.G. Miller, J. chem. Phys. 23 509 (1955)\")\ndataRefID.append(\"TO DO\")\n# Burnett method, max. press. 55 atm\ndataClass.append(\"class II\")\ndataT.append(np.array([303.15, 303.15]))\ndataB.append(np.array([-117.7, -117.9]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 10\nspeciesName.append(\"CO2\")\ndataRef.append(\"T.L. Cottrell, R.A. Hamilton, and R.P. Taubinger, Trans. Faraday Soc. 52 1310 (1956)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([303.15, 333.15, 363.15]))\ndataB.append(np.array([-116.4, -96.7, -75.9]))\ndataBerr.append(np.array([4.6, 4.6, 4.0]))\n\n# original index in 1980 compilation: 11\nspeciesName.append(\"CO2\")\ndataRef.append(\"D. Cook, Can. J. Chem. 35 268 (1957)\")\ndataRefID.append(\"TO DO\")\n# 2-term fit of PV data, max. press. 2.5 atm\ndataClass.append(\"N/A\")\ndataT.append(np.array([213.2, 223.2, 233.2, 248.2, 273.2, 303.2]))\ndataB.append(np.array([-310, -302, -266, -204, -168, -127]))\ndataBerr.append(np.array([7, 15, 11, 12, 8, 13]))\n\n# original index in 1980 compilation: 12\nspeciesName.append(\"CO2\")\ndataRef.append(\"A. Perez Masia and M. Diaz Pena, An. R. Soc. esp. Fis. Quim. 54B 661 (1958)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class II\")\ndataT.append(np.array([298.15, 303.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-125.7, -119.3, -104.3, -85.1, -73.9, -59.4, -52.6]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 13\nspeciesName.append(\"CO2\")\ndataRef.append(\"J.A. Huff and T.M. Reed, J. chem. Engng Data 8 306 (1963)\")\ndataRefID.append(\"TO DO\")\n# data also in R.D. Gunn, M.S. Thesis, University of California, Berkeley (1958)\ndataClass.append(\"class I\")\ndataT.append(np.array([298.2, 310.9, 323.2, 344.3, 377.6, 398.2, 410.9, 444.3, 477.5, 510.9]))\ndataB.append(np.array([-124.6, -112.7, -103.0, -88.8, -70.7, -61.2, -56.5, -44.6, -34.9, -26.4]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 14\nspeciesName.append(\"CO2\")\ndataRef.append(\"J.H. Dymond and E.B. Smith (unpublished) (1962)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([298.15]))\ndataB.append(np.array([-127]))\ndataBerr.append(np.array([5]))\n\n# original index in 1980 compilation: 15\nspeciesName.append(\"CO2\")\ndataRef.append(\"E.G. Butcher and R.S. Dadson, Proc. R. Soc. A277 448 (1964)\")\ndataRefID.append(\"TO DO\")\n# 4-term fit of PV data\ndataClass.append(\"class I\")\ndataT.append(np.array([262.65, 273.15, 283.15, 299.65, 309.65, 323.15, 333.15, 343.15, 353.15, \\\n 363.15, 373.15, 423.15, 473.15]))\ndataB.append(np.array([-159.9, -147.4, -136.7, -120.5, -111.3, -100.7, -93.9, -87.1, -80.9, \\\n -75.3, -69.5, -46.3, -29.1]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 16\nspeciesName.append(\"CO2\")\ndataRef.append(\"M.P. Vukalovich and Ya. F. Masalov, Teploenergetika 13 5, 58 (1966)\")\ndataRefID.append(\"TO DO\")\n# Also in Heat Pwr. Engng, Was. 13 (5), 73 (1966)\n# 3-term fit of PV data. P range 5 - 200 atm\ndataClass.append(\"class I\")\ndataT.append(np.array([423.2, 473.2, 523.2, 573.2, 623.2, 673.2, 723.2, 773.2]))\ndataB.append(np.array([-52.4, -36.7, -25.1, -16.3, -9.4, -3.7, 0.9, 4.8]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 17\nspeciesName.append(\"CO2\")\ndataRef.append(\"P.S. Ku and B.F. Dodge, J. chem. Engng Data 12 158 (1967)\")\ndataRefID.append(\"TO DO\")\n# 3-term fit of PV data\ndataClass.append(\"class II\")\ndataT.append(np.array([373.15]))\ndataB.append(np.array([-77.5]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 18\nspeciesName.append(\"CO2\")\ndataRef.append(\"A. Sass, B.F. Dodge, and R.H. Bretton, J. chem. Engng Data 12 168 (1967)\")\ndataRefID.append(\"TO DO\")\n# 7-term fit of PV data (P series). P range 8 - 500 atm\ndataClass.append(\"class I\")\ndataT.append(np.array([348.15, 373.15, 398.15]))\ndataB.append(np.array([-81.8, -69.8, -61.4]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 19\nspeciesName.append(\"CO2\")\ndataRef.append(\"R.S. Dadson, E.J. Evans, and J.H. King, Proc. phys. Soc. 92 1115 (1967)\")\ndataRefID.append(\"TO DO\")\n# Determined using Piezometer method and used to confirm earlier results obtained by the series-expansion method\ndataClass.append(\"class I\")\ndataT.append(np.array([263.2, 273.2, 293.2, 298.2, 313.2, 333.2, 353.2, 373.2, 398.2]))\ndataB.append(np.array([-159.8, -147.4, -125.6, -121.8, -108.4, -93.2, -80.9, -70.2, -58.4]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 20\nspeciesName.append(\"CO2\")\ndataRef.append(\"R.N. Lichtenthaler and K. Schäfer, Ber. (dtsch.) Bunsenges. phys. Chem. 73 42 (1969)\")\ndataRefID.append(\"TO DO\")\n# Estimated absolute error in B +/- 1\ndataClass.append(\"N/A\")\ndataT.append(np.array([288.2, 296.0, 303.2, 313.2, 323.1]))\ndataB.append(np.array([-137.1, -129.1, -122.2, -112.0, -103.1]))\ndataBerr.append(np.array([1, 1, 1, 1, 1]))\n\n# original index in 1980 compilation: 21\nspeciesName.append(\"CO2\")\ndataRef.append(\"T.K. Bose and R.H. Cole, J. chem. Phys. 52 140 (1970)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([322.85]))\ndataB.append(np.array([-109.3]))\ndataBerr.append(np.array([4.4]))\n\n# original index in 1980 compilation: 22\nspeciesName.append(\"CO2\")\ndataRef.append(\"M. Waxman, H.A. Davis, and J.R. Hastings, Proc. Sixth Symp. Thermophys. Props., A.S.M.E., New York, 245 (1973)\")\ndataRefID.append(\"TO DO\")\n# Burnett method, reliability of B values given as +/- 0.3\ndataClass.append(\"N/A\")\ndataT.append(np.array([273.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-150.07, -102.25, -85.42, -71.85, -60.69, -51.41]))\ndataBerr.append(np.array([0.3, 0.3, 0.3, 0.3, 0.3, 0.3]))\n\n# original index in 1980 compilation: 23\nspeciesName.append(\"CO2\")\ndataRef.append(\"S. Angus, B. Armstrong, and K.M. de Reuck, International THermodynamic Tables of the Fluid State Carbon Dioxide, Pergamon Press, Oxford (1976)\")\ndataRefID.append(\"TO DO\")\n# B estimated to be accuratte to +/- 2 above 260 K, but the uncertainty increases at lower temperatures\ndataClass.append(\"N/A\")\ndataT.append(np.array([220, 230, 240, 250, 260, 270, 273.15, 280, 290, 298.15, 300, 310, \\\n 320, 330, 340, 350, 360, 370, 380, 390, 400, 410, 420, 430, 440, 450, 460, 470, 480, \\\n 490, 500, 510, 520, 530, 540, 550, 560, 570, 580, 590, 600, 610, 620, 630, 640, 650, \\\n 660, 670, 680, 690, 700, 710, 720, 730, 740, 750, 760, 770, 780, 790, 800, 810, 820, \\\n 830, 840, 850, 860, 870, 880, 890, 900, 910, 920, 930, 940, 950, 960, 970, 980, 990, \\\n 1000, 1010, 1020, 1030, 1040, 1050, 1060, 1070, 1080, 1090, 1100]))\ndataB.append(np.array([-248.2, -223.3, -202.4, -184.5, -169.0, -155.4, -151.4, -143.3, -132.5, -124.5, -122.7, -113.9, \\\n -105.8, -98.5, -91.7, -85.5, -79.7, -74.4, -69.5, -64.8, -60.5, -56.5, -52.8, -49.3, -45.9, -42.8, -39.9, -37.2, -34.6, \\\n -32.1, -29.8, -27.6, -25.5, -23.5, -21.6, -19.9, -18.2, -16.5, -15.0, -13.5, -12.1, -10.8, -9.5, -8.3, -7.1, -6.0, \\\n -5.0, -3.9, -2.9, -2.0, -1.1, -0.2, 0.6, 1.4, 2.2, 2.9, 3.7, 4.4, 5.0, 5.7, 6.3, 6.9, 7.5, \\\n 8.0, 8.6, 9.1, 9.6, 10.1, 10.6, 11.0, 11.5, 11.9, 12.3, 12.7, 13.1, 13.5, 13.9, 14.2, 14.6, 14.9, \\\n 15.3, 15.6, 15.9, 16.2, 16.5, 16.8, 17.1, 17.3, 17.6, 17.9, 18.1]))\ndataBerr.append(np.array([10, 10, 10, 10, 10, 2, 2, 2, 2, 2, 2, 2, \\\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, \\\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, \\\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, \\\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, \\\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))\n\n# original index in 1980 compilation: 24\nspeciesName.append(\"CO2\")\ndataRef.append(\"B. Schramm and R. Gehrmann (unpublished) (1979)\")\ndataRefID.append(\"TO DO\")\n# Estimated error in B is +/- 6 \ndataClass.append(\"N/A\")\ndataT.append(np.array([213, 223, 242, 262, 276, 295]))\ndataB.append(np.array([-245.9, -221.3, -183.5, -155.9, -141.0, -126.0]))\ndataBerr.append(np.array([6, 6, 6, 6, 6, 6]))\n\n# original index in 1980 compilation: 25\nspeciesName.append(\"CO2\")\ndataRef.append(\"B. Schramm and H. Schmiedel (unpublished) (1979)\")\ndataRefID.append(\"TO DO\")\n# Estimated error in B is +/- 4\ndataClass.append(\"N/A\")\ndataT.append(np.array([295, 330, 365, 400, 425, 450, 475]))\ndataB.append(np.array([-126.0, -98.0, -76.8, -58.0, -47.5, -40.7, -34.6]))\ndataBerr.append(np.array([4, 4, 4, 4, 4, 4, 4]))\n\n# data from 2002 compilation starts here\n\n# original index in 2002 compilation: 87-hol/hal\nspeciesName.append(\"CO2\")\ndataRef.append(\"Holste, J. C.; Hall, K. R.; Eubank, P. T.; Esper, G.; Watson, M. Q.; Warowny, W.; Bailey, D. M.; Young, J. G.; Bellomy, M. T.; J. Chem. Thermodyn. 19 (1987) 1233.\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([217, 220, 230, 240, 250, 260, 270, 280, 290, 298.15, 300, 320, \\\n 323.15, 348.15, 373.15, 398.15, 423.15, 448.15]))\ndataB.append(np.array([-255.4, -247.5, -223.7, -202.8, -184.8, -168.9, -155.1, -142.1, \\\n -131.6, -123.2, -121.7, -104.7, -102.0, -85.2, -71.6, -60.5, -51.3, -43.5]))\ndataBerr.append(np.array([1.0, 1.0, 0.5, 0.4, 0.3, 0.3, 0.3, 0.3, 0.3, 0.2, 0.2, 0.2,\n 0.2, 0.2, 0.2, 0.1, 0.1, 0.1]))\n\n# original index in 2002 compilation: 90-dus/kle\nspeciesName.append(\"CO2\")\ndataRef.append(\"Duschek, W.; Kleinrahm, R.; Wagner, W.; J. Chem. Thermodyn. 22 (1990) 827.\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([220, 240, 260, 280, 300, 320, 340]))\ndataB.append(np.array([-247.5, -202.1, -168.3, -142.1, -121.4, -104.5, -90.6]))\ndataBerr.append(np.array([1.0, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3]))\n\n# original index in 2002 compilation: 87-jae\nspeciesName.append(\"CO2\")\ndataRef.append(\"Jaeschke, M.; Int. J. Thermophys. 8 (1987) 81.\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([273.20, 293.20, 313.20, 333.20, 353.20]))\ndataB.append(np.array([-150.4, -128.0, -109.8, -95.0, -83.1]))\ndataBerr.append(np.array([0.5, 0.5, 0.5, 0.5, 0.5]))\n\n# original index in 2002 compilation: 90-glo\nspeciesName.append(\"CO2\")\ndataRef.append(\"Glowka, S.; Pol. J. Chem. 64 (1990) 699\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([304.19, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-117.7, -102.2, -85.4, -72.0, -60.6, -51.3]))\ndataBerr.append(np.array([1.0, 0.9, 0.8, 0.7, 0.7, 0.6]))\n\n# to do: get further references: 50-lam/sta, 63-mul/kir-1, 70-tim/kob, 80-hol/wat, 80-kat/ohg, 80-sch/geh, 81- ben/bie, 82-ohg/nak-1, 82-sch/mue, 84-ohg/sak, 86-eli/hoa, 87-mal/nat, 88-pat/jof, 91-lop/roz, 91-sch/eli 92-web.\n\n### Data for water, H2O\n\n# original index in 1980 compilation: 1\nspeciesName.append(\"H2O\")\ndataRef.append(\"F.G. Keyes, L.B. Smith, and H.T. Gerry, Proc. Am. Acad. Arts Sci. 70 319 (1936)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class I\")\ndataT.append(np.array([473.15, 523.15, 573.15, 623.15, 673.15, 723.15]))\ndataB.append(np.array([-196.7, -145.3, -112.0, -89.1, -72.6, -59.9]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 1\nspeciesName.append(\"H2O\")\ndataRef.append(\"S.C. Collins and F.G. Keyes, Proc. Am. Acad. Arts Sci. 72 283 (1938)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class I\")\ndataT.append(np.array([323.15, 373.15, 423.15]))\ndataB.append(np.array([-838.3, -451.0, -283.5]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 3\nspeciesName.append(\"H2O\")\ndataRef.append(\"G.S. Kell, G.E. McLaurin, and E. Whalley, Advances in Thermophysical Properties at Extreme Temperatures and Pressures, p. 104, Lafayette, Indiana, 1965. Am. Soc. mec. Engrs., New York\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([523.2, 523.2, 623.2, 623.2, 723.2, 723.2]))\ndataB.append(np.array([-150.8, -148.8, -91.2, -92.3, -59.93, -59.96]))\ndataBerr.append(np.array([1, 1, 1, 1, 1, 1]))\n\n# original index in 1980 compilation: 4\nspeciesName.append(\"H2O\")\ndataRef.append(\"M.P. Vukalovich, M.S. Trakhrengerts, and G.A. Spiridonov, Teploenergetika 14 7 65 (1967)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class I\")\ndataT.append(np.array([353.15, 373.15, 423.15, 473.15, 523.15, 573.15, 623.15, \\\n 673.15, 723.15, 773.15, 823.15, 873.15, 923.15, 973.15, 1023.15, 1073.15, 1123.15, 1173.15]))\ndataB.append(np.array([-844.4, -453.6, -283.3, -196.1, -145.4, -112.9, -90.2, -72.4, -60.6, \\\n -50.4, -42.0, -35.2, -29.4, -24.6, -20.5, -17.0, -14.1, -11.6]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 5\nspeciesName.append(\"H2O\")\ndataRef.append(\"G.S. Kell, G.E. McLaurin, and E. Whalley, J.chem. Phys. 48 3805 (1968)\")\ndataRefID.append(\"TO DO\")\n# The following values of the virial coefficients were determined by the authoes by analysis of their isothermal\n# compressibility data. \n# Standard errors are quoted in array below, but multiplied by 7 to correspond with estimated uncertainty \n# by Dymond & Smith (1980). \ndataClass.append(\"N/A\")\ndataT.append(np.array([423.13, 423.14, 448.12, 448.12, 473.17, 473.18, 473.12, 498.16, \\\n 498.16, 523.17, 523.18, 548.18, 548.18, 548.19, 573.14, 573.14, 598.17, 598.17, 623.15, 623.14, \\\n 648.12, 648.16, 673.15, 673.16, 698.21, 698.21, 723.19, 723.19]))\ndataB.append(np.array([-334, -326, -263, -264, -209, -209, -215, -178.4, \\\n -181.7, -152.5, -151.8, -133.2, -133.0, -133.3, -117.9, -117.1, -103.5, -103.6, -92.38, -91.64, \\\n -81.78, -82.30, -73.47, -73.26, -65.92, -65.74, -59.36, -59.25]))\ndataBerr.append(7*np.array([2, 2, 2, 1, 2, 6, 3, 1.3, \\\n 3.0, 0.2, 0.3, 0.2, 0.1, 0.1, 0.3, 0.1, 0.1, 0.1, 0.27, 0.11, \\\n 0.09, 0.23, 0.07, 0.09, 0.1, 0.08, 0.06, 0.09]))\n\n# original index in 1980 compilation: 6\nspeciesName.append(\"H2O\")\ndataRef.append(\"E.J. Le Fevre, M.R. Nightingale, and J.W. Rose, J. mech. Eng. Sci. 17 243 (1975)\")\ndataRefID.append(\"TO DO\")\n# The following B values are calculated from the correlation given\ndataClass.append(\"class III\")\ndataT.append(np.array([293.15, 303.15, 323.15, 348.15, 373.15, 398.15, 423.15, 448.15, 473.15, 498.15, 548.15, \\\n 598.15, 648.15, 698.15, 748.15, 798.15, 848.15, 898.15, 948.15, 998.15, 1048.15, 1098.15, 1148.15, 1198.15, 1248.15]))\ndataB.append(np.array([-1251.5, -1073.3, -812.2, -599.2, -459.8, -364.0, -295.4, -244.6, -206.0, -175.8, -132.4, \\\n -102.9, -82.0, -66.4, -54.5, -45.1, -37.5, -31.3, -26.2, -21.8, -18.1, -14.9, -12.1, -9.7, -7.6]))\ndataBerr.append(BerrCalc(dataB[-1], 3))\n\n# data from 2002 compilation start here\n\n# original index in 2002 compilation: 88-eub/jof\nspeciesName.append(\"H2O\")\ndataRef.append(\"Eubank, P. T.; Joffrion, L. L.; Patel, M. R.; Warowny, W.; J. Chem. Thermodyn. 20 (1988) 1009\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([348.15, 360.65, 373.15, 373.15, 373.15, 385.65, 398.15, 398.15, 398.15, 410.65, \\\n 423.15, 423.15, 423.15, 435.65, 448.15, 448.15, 448.15, 448.15, \\\n 460.65, 460.65, 473.15, 473.15, 473.15, 473.15, 485.65, 485.65, 498.15, 498.15, 498.15, 498.15, \\\n 523.15, 548.15, 573.15, 598.15, 623.15]))\ndataB.append(np.array([-590.3, -515.0, -484.5, -452.7, -451.0, -400.9, -360.0, -357.6, -356.8, -320.9, \\\n -301.7, -289.9, -288.1, -263.1, -250.3, -248.9, -240.0, -240.6, \\\n -220.3, -221.2, -206.6, -200.1, -203.3, -203.0, -188.0, -188.1, -176.7, -176.7, -174.4, -174.2, \\\n -151.8, -133.0, -117.3, -103.4, -97.1]))\ndataBerr.append(np.array([15.0, 10.0, 40.0, 8.0, 17.7, 6.7, 38.0, 5.4, 13.4, 4.4, \\\n 35.0, 3.5, 10.3, 2.7, 30.0, 26.0, 2.1, 3.6, \\\n 1.8, 2.4, 25.0, 20.0, 1.6, 1.9, 1.4, 1.9, 20.0, 16.0, 1.2, 1.5, \\\n 1.3, 1.0, 0.9, 0.8, 0.7]))\n\n# original index in 2002 compilation: 87-hen/bic\nspeciesName.append(\"H2O\")\ndataRef.append(\"Hendl, H.; Bich, E.; Vogel, E.; J. Chem. Thermodyn. 29 (1997) 765.\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([380.56, 392.34, 404.47, 425.25, 447.24, 470.94, 483.56, 503.01, 503.01, 524.03]))\ndataB.append(np.array([-374.0, -350.3, -321.2, -267.0, -227.9, -186.8, -172.8, -157.7, -151.1, -136.8]))\ndataBerr.append(np.array([80.0, 80.0, 75.0, 75.0, 70.0, 70.0, 65.0, 65.0, 60.0, 60.0]))\n\n# original index in 2002 compilation: 89-kel/mcl\nspeciesName.append(\"H2O\")\ndataRef.append(\"Kell, G. S.; McLaurin, G. E.; Whalley, E.; Proc. R. Soc. London, A. 425 (1989) 49\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([423.15, 423.15, 448.15, 448.15, 473.15, 473.15, 498.15, 498.15, 523.15, \\\n 523.15, 548.15, 548.15, 573.15, 573.15, 573.15, 598.15, 598.15, 623.15, 623.15, 648.15, \\\n 648.15, 673.15, 673.15, 698.15, 698.15, 723.15, 723.15, 748.15, 748.15, 773.15, 773.15]))\ndataB.append(np.array([-274.0, -276.0, -240.0, -239.0, -200.9, -200.6, -171.1, -172.0, -149.7, \\\n -149.9, -130.8, -130.8, -115.8, -115.9, -115.6, -102.0, -102.0, -91.4, -91.2, -82.0, -81.8, \\\n -73.4, -73.5, -66.7, -66.4, -60.1, -60.0, -54.7, -54.6, -49.9, -49.8]))\ndataBerr.append(np.array([6.0, 6.0, 5.0, 5.0, 1.0, 1.0, 0.5, 0.5, 0.2, 0.2, 0.2, 0.2, 0.3, 0.3, \\\n 0.3, 0.3, 0.3, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1, 0.2, 0.2]))\n\n# to do: get data from additional references: 52-mcc/pen, 73-tre/boc, 74-lan/lei, 75-boh/geb, 76-lie/cle, 91-yam/kaw, 93-abd/baz, 93-abd/baz-1, 96-abd/baz\n\n### Data for ethane, C2H6\n\n# original index in 1980 compilation: 1\nspeciesName.append(\"C2H6\")\ndataRef.append(\"A. Eucken and A. Parts, Z. Phys. Chem. B20 184 (1933)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class II\")\ndataT.append(np.array([191.86, 193.65, 201.61, 201.17, 211.33, 213.34, 222.58, 224.50, \\\n 233.41, 236.67, 244.70, 247.00, 257.09, 259.03, 273.20]))\ndataB.append(np.array([-498, -487, -446, -443, -404, -395, -360, -354, \\\n -325, -316, -293, -287, -262, -258, -227.5]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 2\nspeciesName.append(\"C2H6\")\ndataRef.append(\"J.O. Hirshfelder, F.T. McClure, and I.F. Weeks, J. chem. Phys. 10 201 (1942)\")\ndataRefID.append(\"TO DO\")\n# Calculated from PVT data, max. press. 200 atm originally by J.A. Beattie, C. Hadlock, and N. Poffennberger,\n# J. chem. Phys. 3 93 (1935)\ndataClass.append(\"class I\")\ndataT.append(np.array([298.15, 323.15, 348.15, 373.15, 398.15, 423.15, 448.15, 473.15, 498.15, 523.15]))\ndataB.append(np.array([-191, -160, -139, -122, -108, -94, -86, -77, -71, -60]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 3\nspeciesName.append(\"C2H6\")\ndataRef.append(\"S.D. Hamann and W.J. Mcmanamey, Trans. Faraday Soc. 49 149 (1953)\")\ndataRefID.append(\"TO DO\")\n# Calculated from PVT data, max. press. 100 atm originally by A. Michels and G.W. Nederbragt, Physica, 6 656 (1939)\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.15, 323.15]))\ndataB.append(np.array([-223, -187, -157]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 4\nspeciesName.append(\"C2H6\")\ndataRef.append(\"S.D. Hamann and W.J. Mcmanamey, Trans. Faraday Soc. 49 149 (1953)\")\ndataRefID.append(\"TO DO\")\n# Calculated from PVT data, max. press. 700 atm originally by H.H. Reamer, R.H. Olds, B.H. Sage, and W.N. Lacey, \n# Ind. Engng Chem ind. End 36 956 (1944)\ndataClass.append(\"class I\")\ndataT.append(np.array([310.94, 344.27, 377.60, 410.94, 444.27, 477.60, 510.94]))\ndataB.append(np.array([-164.9, -132.5, -110.0, -90.4, -74.2, -59.9, -47.4]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 5\nspeciesName.append(\"C2H6\")\ndataRef.append(\"J.D. Lambert, G.A.H. Roberts, J.S. Rowlinson, and V.J. Wilkinson, Proc. R. Soc. A196 113 (1949)\")\ndataRefID.append(\"TO DO\")\n# Accuracy B +/- 50\ndataClass.append(\"N/A\")\ndataT.append(np.array([291.95, 319.25, 329.45, 341.35, 350.75]))\ndataB.append(np.array([-220, -190, -140, -170, -140]))\ndataBerr.append(np.array([50, 50, 50, 50, 50]))\n\n# original index in 1980 compilation: 6\nspeciesName.append(\"C2H6\")\ndataRef.append(\"S.D. Hamann and W.J. McManamey, Trans. Faraday Soc. 49 149 (1953)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class II\")\ndataT.append(np.array([303.15, 323.15, 323.15, 333.15, 343.15, 348.15, 358.15, \\\n 363.15, 373.15, 383.15, 393.15, 398.15, 413.15, 423.15]))\ndataB.append(np.array([-175.8, -146.4, -144.8, -139.9, -135.3, -125.0, -121.5, \\\n -115.8, -111.0, -105.3, -98.3, -103.3, -89.1, -83.5]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 7a\nspeciesName.append(\"C2H6\")\ndataRef.append(\"A. Michels, W. van Straaten, and J. Dawson, Physica, 20 17 (1954)\")\ndataRefID.append(\"TO DO\")\n# 3-term fit of PV data (P _> 80 atm)\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.14, 322.75, 347.65, 372.52, 397.84, 422.70]))\ndataB.append(np.array([-221.46, -185.61, -156.91, -133.29, -114.06, -97.72, -83.91]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 7b\nspeciesName.append(\"C2H6\")\ndataRef.append(\"A. Michels, W. van Straaten, and J. Dawson, Physica, 20 17 (1954)\")\ndataRefID.append(\"TO DO\")\n# 5-term fit of PV data (P _> 200 atm)\ndataClass.append(\"class I\")\ndataT.append(np.array([273.2, 298.2, 323.2, 348.2, 373.2, 398.2, 423.2]))\ndataB.append(np.array([-221.46, -184.65, -157.67, -134.92, -115.45, -99.28, -84.92]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 8\nspeciesName.append(\"C2H6\")\ndataRef.append(\"J.A. Huff and T.M. Reed, J. chem. Engng data 8 306 (1963)\")\ndataRefID.append(\"TO DO\")\n# from data originally in R.D. Gunn, M.S. Thesis, University of California, Berkeley (1958)\ndataClass.append(\"class I\")\ndataT.append(np.array([273.2, 298.2, 323.2, 377.6, 410.9, 444.3, 477.6, 510.9]))\ndataB.append(np.array([-222.2, -186.9, -157.5, -109.4, -89.6, -74.0, -61.6, -51.0]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 9\nspeciesName.append(\"C2H6\")\ndataRef.append(\"M. Rigby, J.H. Dymond, and E.B. Smith (unpublished) (1963)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([273.15, 293.15, 308.15, 323.15]))\ndataB.append(np.array([-221, -191, -170, -152]))\ndataBerr.append(np.array([4, 4, 4, 4]))\n\n# original index in 1980 compilation: 11\nspeciesName.append(\"C2H6\")\ndataRef.append(\"A.E. Hoover, I. Nagata, T.W. Leland, Jr., and R. Kobayashi\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([215, 240, 273.15]))\ndataB.append(np.array([-340.63, -276.5, -223.41]))\ndataBerr.append(np.array([3.4063, 1.106, 0.22341]))\n\n# original index in 1980 compilation: 12\nspeciesName.append(\"C2H6\")\ndataRef.append(\"R.N. Lichtenthaler and K. Schäfer, Ber. (dtsch.) Bunsenges. phys. Chem. 73 42 (1969)\")\ndataRefID.append(\"TO DO\")\n# Estimated absolute error in B +/- 1\ndataClass.append(\"N/A\")\ndataT.append(np.array([288.2, 296.0, 303.2, 313.2, 323.1]))\ndataB.append(np.array([-203.3, -191.5, -181.5, -168.4, -156.1]))\ndataBerr.append(np.array([1, 1, 1, 1, 1]))\n\n# original index in 1980 compilation: 13\nspeciesName.append(\"C2H6\")\ndataRef.append(\"K. Strein, R.N. Lichtenthaler, B. Schramm, and Kl. Schäfer, Ber. (dtsch.) Bunsenges. phys. Chem. 75 1308 (1971)\")\ndataRefID.append(\"TO DO\")\n# Estimated accuracy of B is +/- 1%\ndataClass.append(\"N/A\")\ndataT.append(np.array([296.1, 307.6, 333.6, 353.4, 373.7, 394.2, 413.6, 433.8, 453.6, 473.8, 493.3]))\ndataB.append(np.array([-188.0, -172.0, -144.3, -126.2, -111.9, -98.7, -88.1, -78.4, -69.1, -62.6, -54.1]))\ndataBerr.append(0.01*dataB[-1])\n\n# original index in 1980 compilation: 15\nspeciesName.append(\"C2H6\")\ndataRef.append(\"D.R. Douslin and R.H. Harrison, J. chem. Thermodyn. 5 491 (1973)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.15, 303.15, 323.15, 348.15, 373.15, 398.15, 423.15, 448.15, \\\n 473.15, 498.15, 523.15, 548.15, 573.15, 598.15, 623.15]))\ndataB.append(np.array([-222.2, -185.8, -179.4, -156.7, -133.0, -113.6, -97.3, -83.6, -71.7, \\\n -61.5, -52.4, -44.5, -37.3, -30.9, -25.0, -19.6]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 16\nspeciesName.append(\"C2H6\")\ndataRef.append(\"G.A. Pope, P.S. Chappelear, and R. Kobayashi, J. chem. Phys. 59 423 (1973)\")\ndataRefID.append(\"TO DO\")\n# Burnett method\ndataClass.append(\"N/A\")\ndataT.append(np.array([209.534, 238.769, 254.807, 273.150, 306.062]))\ndataB.append(np.array([-368.66, -287.05, -252.05, -219.38, -175.27]))\ndataBerr.append(np.array([4.59, 0.26, 0.99, 0.09, 0.56]))\n\n# original index in 1980 compilation: 17\nspeciesName.append(\"C2H6\")\ndataRef.append(\"R. Hahn, K. Schäfer, and B. Schramm, Ber. (dtsch.) Bunsenges. phys. Chem. 78 287 (1974)\")\ndataRefID.append(\"TO DO\")\n# B determined assuming B (296 K) = -188, quoted accuracy of B is +/- 2\ndataClass.append(\"N/A\")\ndataT.append(np.array([199.4, 210.8, 231.4, 251.2]))\ndataB.append(np.array([-422, -374, -313, -262]))\ndataBerr.append(np.array([2, 2, 2, 2]))\n\n# original index in 1980 compilation: 18\nspeciesName.append(\"C2H6\")\ndataRef.append(\"K. Schäfer, B. Schramm, and J.S.U. Navarro, Z. phys. Chem. Frankf. Ausg. 93 203 (1974)\")\ndataRefID.append(\"TO DO\")\n# Values of B obtained reltive to the value at 296 K\ndataClass.append(\"class II\")\ndataT.append(np.array([296, 353.2, 392.8, 432.1, 472.3, 510.6]))\ndataB.append(np.array([-192, -132.6, -104.3, -85.0, -68.0, -54.5]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 21\nspeciesName.append(\"C2H6\")\ndataRef.append(\"H. Mansoorian, K.R. Hall, and P.T. Eubank, Proc. Seventh Symp. Thermophys. Props., Am. Soc. Mech. Engrs., New York, 456 (1977)\")\ndataRefID.append(\"TO DO\")\n# Burnett method, P -> 135 atm \ndataClass.append(\"N/A\")\ndataT.append(np.array([323.15, 348.15, 373.15, 398.15, 423.15, 448.15, 473.15]))\ndataB.append(np.array([-156.1, -132.4, -111.4, -98.0, -85.7, -72.4, -62.4]))\ndataBerr.append(np.array([1.5, 1.5, 1.5, 1.4, 1.3, 1.2, 1.1]))\n\n# data from 2002 copmilation start here\n\n# original index in 2002 compilation: 94-est/tru\nspeciesName.append(\"C2H6\")\ndataRef.append(\"Estrada-Alexanders, A. F.; Trusler, J. P. M.; IChemE Res. Event, Two-Day Symp., Vol. 2, Inst. Chem. Eng.: Rugby, UK., 670, (1994)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([200, 225, 250, 275, 300, 325, 350, 375]))\ndataB.append(np.array([-419.2, -328.4, -264.9, -218.0, -182.1, -153.8, -130.8, -111.9]))\ndataBerr.append(np.array([4.0, 3.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0]))\n\n# original index in 2002 compilation: 95-esp/lem\nspeciesName.append(\"C2H6\")\ndataRef.append(\"Esper, G.; Lemming, W.; Beckermann, W.; Kohler, F.; Fluid Phase Equilib. 105 (1995) 173\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([223.41, 239.77, 259.87, 280.45, 300.48, 312.45, 330.17, 350.76]))\ndataB.append(np.array([-322.5, -281.0, -240.2, -206.8, -180.2, -166.6, -148.7, -130.9]))\ndataBerr.append(np.array([15.0, 8.0, 3.0, 2.0, 2.0, 2.0, 4.0, 6.0]))\n\n# original index in 2002 compilation: 88-hae/ker\nspeciesName.append(\"C2H6\")\ndataRef.append(\"Haeusler, H.; Kerl, K.; Int. J. Thermophys. 9 (1988) 117\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([256.71, 256.71, 269.19, 269.19, 282.84, 282.84, \\\n 298.32, 298.32, 298.35, 298.35, 305.45, 305.45, 322.96, 322.96]))\ndataB.append(np.array([-258.6, -259.0, -214.0, -214.0, -198.7, -201.0, \\\n -184.4, -184.0, -184.4, -181.0, -177.2, -177.0, -155.1, -155.3]))\ndataBerr.append(np.array([9.0, 6.0, 3.0, 2.0, 3.0, 6.0, \\\n 3.0, 2.0, 3.0, 2.0, 2.0, 3.0, 1.0, 1.0]))\n\n# original index in 2002 compilation: 87-jae\nspeciesName.append(\"C2H6\")\ndataRef.append(\"Jaeschke, M.; Int. J. Thermophys. 8 (1987) 81.\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([273.20, 293.20, 313.20, 333.20, 356.20]))\ndataB.append(np.array([-221.4, -190.9, -166.3, -146.1, -128.8]))\ndataBerr.append(np.array([1.0, 1.0, 1.0, 1.0, 1.0]))\n\n# original index in 2002 compilation: 74-pom/spu\nspeciesName.append(\"C2H6\")\ndataRef.append(\"Pompe, A.; Spurling, T. H.; Commonwealth Scientific & Indust. Res. Org. Div. of App. Organic Chemistry Technical Paper No. 1, CSIRO, Melbourne Aust., 42pp, (1974)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([273.15, 273.16, 273.16, 294.27, 298.15, 298.16, \\\n 298.16, 298.16, 299.83, 303.15, 305.38, 310.94, 310.94, 311.10, 316.49, 323.15, 323.16, \\\n 323.16, 323.16, 327.60, 344.27, 344.27, 344.49, 348.15, 348.16, 348.16, \\\n 360.94, 372.38, 373.15, 373.16, 373.16, 377.60, 377.60, 394.05, 394.27, \\\n 398.15, 398.16, 398.16, 410.94, 423.15, 423.16, 423.16, 444.27, \\\n 448.15, 448.16, 473.15, 473.16, 477.60, 498.15, 498.16, 510.94, 523.15, 523.16, \\\n 548.15, 573.15, 598.15, 623.15]))\ndataB.append(np.array([-222.0, -222.2, -222.8, -204.0, -185.4, -190.5, \\\n -184.9, -187.1, -195.0, -178.9, -185.3, -180.1, -165.8, -171.4, -176.3, -156.2, -159.3, \\\n -157.8, -158.3, -162.8, -144.2, -134.2, -137.0, -132.5, -136.9, -135.0, \\\n -125.0, -107.1, -113.1, -118.6, -115.5, -110.9, -109.8, -99.6, -93.0, \\\n -96.8, -102.3, -99.7, -89.9, -83.1, -88.6, -85.5, -73.0, \\\n -71.1, -78.0, -60.7, -71.1, -58.7, -51.7, -63.6, -46.2, -43.5, -56.5, \\\n -36.3, -29.6, -23.4, -17.9]))\ndataBerr.append(np.array([11.1, 2.0, 2.0, 20.0, 1.0, 10.0, \\\n 2.0, 2.0, 20.0, 1.0, 15.0, 15.0, 10.0, 15.0, 10.0, 1.0, 10.0, \\\n 1.0, 1.0, 10.0, 10.0, 10.0, 15.0, 1.0, 10.0, 1.0, \\\n 10.0, 10.0, 1.0, 10.0, 1.0, 10.0, 10.0, 10.0, 10.0,\n 0.5, 10.0, 1.0, 8.0, 0.5, 10.0, 1.0, 5.0, \\\n 0.5, 10.0, 0.5, 10.0, 5.0, 0.5, 10.0, 5.0, 0.5, 10.0, \\\n 0.4, 0.4, 0.4, 0.4]))\n\n# original index in 2002 compilation: 92-bel/big\nspeciesName.append(\"C2H6\")\ndataRef.append(\"Bell, T. N.; Bignell, C. M.; Dunlop, P. J.; Physica A: (Amsterdam). 181 (1992) 221\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([290.0, 300.0, 310.0]))\ndataB.append(np.array([-195.7, -182.6, -168.9]))\ndataBerr.append(np.array([2.0, 2.0, 2.0]))\n\n# original index in 2002 compilation: 80-kat/ohg\nspeciesName.append(\"C2H6\")\ndataRef.append(\"Katayama, T.; Ohgaki, K.; Ohmori, H.; J. Chem. Eng. Jpn. 13 (1980) 257.\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([298.15, 298.15, 298.15, 298.15, 298.15]))\ndataB.append(np.array([-185.8, -185.7, -185.8, -185.7, -185.7]))\ndataBerr.append(np.array([1.5, 1.5, 0.5, 0.5, 1.0]))\n\n# original index in 2002 compilation: 93-sta/hou\nspeciesName.append(\"C2H6\")\ndataRef.append(\"St.-Arnaud, J. M.; Hourri, A.; Bose, T. K.; Ingrain, D.; High Temp. - High Pressures. 25 (1993) 301\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([298.15]))\ndataB.append(np.array([-184.2]))\ndataBerr.append(np.array([1.7]))\n\n# original index in 2002 compilation: 81-fin/rae\nspeciesName.append(\"C2H6\")\ndataRef.append(\"Findeisen, R.; Raetzsch, M. T.; Z. Phys. Chem. (Leipzig). 262 (1981) 919\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([298.20, 323.20, 348.20, 373.20, 398.20]))\ndataB.append(np.array([-183.6, -154.2, -131.3, -111.1, -94.9]))\ndataBerr.append(np.array([3.8, 3.5, 3.3, 3.1, 3.0]))\n\n# original index in 2002 compilation: 84-ker/hae\nspeciesName.append(\"C2H6\")\ndataRef.append(\"Kerl, K.; Haeusler, H.; Ber. Bunsen-Ges. Phys. Chem. 88 (1984) 992\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([299.30, 301.39, 324.05, 345.03, 365.46]))\ndataB.append(np.array([-190.0, -190.0, -157.0, -132.0, -117.0]))\ndataBerr.append(np.array([1.0, 2.0, 3.0, 4.0, 4.0]))\n\n# original index in 2002 compilation: 96-hou/hol\nspeciesName.append(\"C2H6\")\ndataRef.append(\"Hou, H.; Holste, J. C.; Hall, K. R.; Marsh, K. N.; Gammon, B. E.; J. Chem. Eng. Data. 41 (1996) 344\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([300.0, 320.0]))\ndataB.append(np.array([-182.7, -159.3]))\ndataBerr.append(np.array([1.0, 1.0]))\n\n# original index in 2002 compilation: 73-bes/rob-1\nspeciesName.append(\"C2H6\")\ndataRef.append(\"Besserer, G. J.; Robinson, D. B.; J. Chem. Eng. Data. 18 (1973) 137\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([311.10, 344.49, 372.38, 394.05]))\ndataB.append(np.array([-171.4, -137.0, -107.1, -99.6]))\ndataBerr.append(np.array([17.0, 14.0, 11.0, 10.0]))\n\n# original index in 2002 compilation: 92-web\nspeciesName.append(\"C2H6\")\ndataRef.append(\"Weber, L. A.; Int. J. Thermophys. 13 (1992) 1011\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([320.0]))\ndataB.append(np.array([-159.4]))\ndataBerr.append(np.array([0.2]))\n\n# original index in 2002 compilation: 81-man/hal\nspeciesName.append(\"C2H6\")\ndataRef.append(\"Mansoorian, H.; Hall, K. R.; Holste, J. C.; Eubank, P. T.; J. Chem. Thermodyn. 13 (1981) 1001\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([323.15, 348.15, 373.15, 398.15, 423.15, 448.15, 473.15]))\ndataB.append(np.array([-156.3, -132.8, -113.5, -97.3, -83.9, -72.0, -61.8]))\ndataBerr.append(np.array([0.1, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1]))\n\n# original index in 2002 compilation: 84-ohg/kan\nspeciesName.append(\"C2H6\")\ndataRef.append(\"Ohgaki, K.; Kano, Y.; Katayama, T.; J. Chem. Eng. Jpn. 17 (1984) 543\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([423.15]))\ndataB.append(np.array([-86.3]))\ndataBerr.append(np.array([0.5]))\n\n# original index in 2002 compilation: 91-lop/roz\nspeciesName.append(\"C2H6\")\ndataRef.append(\"Lopatinskii, E. S.; Rozhnov, M. S.; Zhdanov, V. I.; Parnovskii, S. L.; Kudrya, Y. N.; Zh. Fiz. Khim. 65 (1991) 2060\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([293.15, 293.15, 293.15]))\ndataB.append(np.array([-191.1, -194.1, -194.1]))\ndataBerr.append(np.array([5.0, 5.0, 6.0]))\n\n\n### Data for acetylene, C2H2\n\n# original index in 1980 compilation: 1\nspeciesName.append(\"C2H2\")\ndataRef.append(\"K. Schafer, Z. phys. Chem. B36 85 (1937)\")\ndataRefID.append(\"TO DO\")\n# estimated error +/- 2%, but a large discrepency exists in early data and so a large error is applied\ndataClass.append(\"N/A\")\ndataT.append(np.array([199.63, 201.66, 203.09, 205.04, 206.98, 209.56, 211.71, 218.15, 219.42, \\\n 221.45, 222.90, 224.21, 225.98, 230.57, 232.96, 235.68, 237.07, 237.64, 238.92, 240.24, \\\n 242.71, 245.27, 248.96, 273.15]))\ndataB.append(np.array([-572, -566, -550, -532, -518, -503, -479, -454, -446, -436, -426, -414, \\\n -406, -390, -381, -369, -361, -358, -352, -343, -334, -328, -320, -258]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 2\nspeciesName.append(\"C2H2\")\ndataRef.append(\"G.A. Bottomley, C.G. Reeves, and G.H.F. Seiflow, Nature, Lond. 182 596 (1958)\")\ndataRefID.append(\"TO DO\")\n# 2-term fit. Maximum pressure 230 torr. Accuracy +/- 1, but a large discrepency exists in early data and so a large error is applied\ndataClass.append(\"N/A\")\ndataT.append(np.array([273.2, 293.2, 313.2]))\ndataB.append(np.array([-191, -158, -133]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# data from 2002 compilation begin here\n# This is the best estimate of Dymond ete al (2002) based on the available data\nspeciesName.append(\"C2H2\")\ndataRef.append(\"J.H. Dymond, K.N. Marsh, R.C. Wilhoit, K.C. Wong, C2 Organic Compounds, Second Virial Coefficients, in: Virial Coefficients Pure Gases, Springer-Verlag, Berlin/Heidelberg, 2002: pp. 111–149\")\ndataRefID.append(\"10.1007/10693952_5\")\n# NOT AN ORIGINAL EXPERIMENT BUT AN ESTIMATE; MORE DATA ARE NEEDED \ndataClass.append(\"N/A\")\ndataT.append(np.array([200, 210, 220, 230, 240, 250, 260, 270, 280, 290, 300, 310]))\ndataB.append(np.array([-595, -501, -425, -362, -310, -268, -233, -204, -181, -162, -147, -135]))\ndataBerr.append(np.array([30, 30, 30, 30, 25, 25, 10, 10, 5, 5, 5, 5]))\n\n### Data for ethanol, C2H5OH\n\n# original index in 1980 compilation: 1\nspeciesName.append(\"C2H5OH\")\ndataRef.append(\"P.A. Hanks and J.D. Lambert, (unpublished) (1951)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class II\")\ndataT.append(np.array([321.7, 331.9, 336.9, 343.4, 344.1, 351.3, 360.9, 371.0, 381.7, 399.4]))\ndataB.append(np.array([-2731, -1988, -1687, -1697, -1357, -1325, -1154, -926, -675, -523]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 2\nspeciesName.append(\"C2H5OH\")\ndataRef.append(\"C.B. Kretschmer and R. Wiebe, J. Am. chem. Soc. 76 2579 (1954)\")\ndataRefID.append(\"TO DO\")\n# Low-pressure vapour density method. Data fit to 3-term P series (P^0, P^1, P^3). \n# Uncertainty in B is +/- 5 at 373.15 K and above, but increases to about +/- 100 at 313.15 K\ndataClass.append(\"N/A\")\ndataT.append(np.array([313.15, 333.15, 353.15, 373.15, 393.15]))\ndataB.append(np.array([-2134, -1285, -938, -723, -578]))\ndataBerr.append(np.array([100, 100, 100, 5, 5]))\n\n\n# original index in 1980 compilation: 3\nspeciesName.append(\"C2H5OH\")\ndataRef.append(\"D.H. Knoebel and W.C. Edmister, J. chem. Engng Data 13 312 (1968)\")\ndataRefID.append(\"TO DO\")\n# Low pressure PVT measurements\ndataClass.append(\"N/A\")\ndataT.append(np.array([333.2, 353.2, 373.2]))\ndataB.append(np.array([-1522, -941, -687]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# data from 2002 compilation begin here\n\n# original index in 2002 compilation: 73-mar/bai\nspeciesName.append(\"C2H5OH\")\ndataRef.append(\"Markuzin, N. P.; Baidin, V. N.; Vestn. Leningr. Univ. Fiz. Khim. 2 (1973) 77\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([308.15]))\ndataB.append(np.array([-2210]))\ndataBerr.append(np.array([66]))\n\n# original index in 2002 compilation: 92-mas/von\nspeciesName.append(\"C2H5OH\")\ndataRef.append(\"Massucci, M.; Von Kralik, M. M.; Wormald, C. J.; J. Chem. Soc., Faraday Trans. 88 (1992) 985\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([333.90, 338.10, 343.90, 353.90, 363.80, 393.30, 412.20]))\ndataB.append(np.array([-1266, -1185, -1086, -942, -826, -586, -484]))\ndataBerr.append(np.array([50, 50, 45, 40, 35, 35, 30]))\n\n# original index in 2002 compilation: 84-bic/ram\nspeciesName.append(\"C2H5OH\")\ndataRef.append(\"Bich, E.; Ramsdorf, M.; Opel, G.; Z. Phys. Chem. (Leipzig). 265 (1984) 401\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([348.34, 360.91, 365.27, 370.32, 376.57, 383.94, 389.60, \\\n 390.07, 395.80, 412.52, 413.82, 415.31, 415.36, 439.24, 433.81, \\\n 443.92, 450.43, 469.66, 477.19, 486.40, 486.60, 502.63, 506.97, 517.33, 546.40, 551.42]))\ndataB.append(np.array([-1051, -866, -832, -736, -702, -630, -603, \\\n -596, -552, -466, -463, -463, -467, -375, -356, \\\n -355, -337, -286, -278, -257, -258, -230, -224, -211, -175, -165]))\ndataBerr.append(np.array([42, 36, 35, 32, 31, 29, 28, \\\n 28, 27, 24, 24, 24, 24, 21, 21, \\\n 21, 20, 19, 18, 18, 18, 17, 17, 16, 24, 15]))\n\n# original index in 2002 compilation: 89-abu/ver-1\nspeciesName.append(\"C2H5OH\")\ndataRef.append(\"Abusleme, J. A.; Vera, J. H.; Fluid Phase Equilib. 45 (1989) 287\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([343.15, 343.15, 353.15, 353.15, 363.15, 363.15]))\ndataB.append(np.array([-1336, -1331, -1021, -1023, -878, -863]))\ndataBerr.append(np.array([37, 37, 30, 31, 28, 27]))\n\n# original index in 2002 compilation: 54-foz/mor\nspeciesName.append(\"C2H5OH\")\ndataRef.append(\"Foz Gazulla, O. R.; Morcilla, J.; Perez Masia, A.; Mendez, A.; An. R. Soc. Esp. Fis. Quim., Ser. B. 50 (1954) 23\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([360, 371, 402, 424, 449, 474]))\ndataB.append(np.array([-990, -860, -600, -510, -440, -370]))\ndataBerr.append(np.array([100, 90, 60, 50, 40, 40]))\n\n# original index in 2002 compilation: 84-wil/lin\nspeciesName.append(\"C2H5OH\")\ndataRef.append(\"Wilson, K. S.; Lindley, D. D.; Kay, W. B.; Hershey, H. C.; J. Chem. Eng. Data. 29 (1984) 243\")\ndataRefID.append(\"TO DO\")\n# Lcomment\ndataClass.append(\"N/A\")\ndataT.append(np.array([373.07, 388.17, 398.15, 400.81, 423.15, 448.15, 473.15]))\ndataB.append(np.array([-965, -625, -569, -583, -378, -280, -196]))\ndataBerr.append(np.array([202, 86, 27, 75, 22, 24, 67]))\n\n### Data for ethylene (ethene), C2H4\n\n# original index in 1980 compilation: 1\nspeciesName.append(\"C2H4\")\ndataRef.append(\"C.A. Crommelin and H.G. Watts, Communs phys. Lab. Univ. Leiden 189c (1927)\")\ndataRefID.append(\"TO DO\")\n# 3-term fit. max. press. 40 atm\ndataClass.append(\"class II\")\ndataT.append(np.array([271.80, 273.06, 283.33, 293.34]))\ndataB.append(np.array([-176.34, -170.09, -165.40, -152.54]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 2\nspeciesName.append(\"C2H4\")\ndataRef.append(\"A. Eucken and A. Parts, Z. Phys. Chem B20 184 (1933)\")\ndataRefID.append(\"TO DO\")\n# pressure less than 100 Torr\ndataClass.append(\"class II\")\ndataT.append(np.array([181.13, 191.32, 193.01, 201.88, 203.46, 209.92, 212.22, 220.13, 223.12, \\\n 230.30, 232.38, 240.87, 242.86, 250.57, 252.99, 262.08, 273.20]))\ndataB.append(np.array([-428, -381, -374, -339, -333, -312, -305, -280, -275, \\\n -255, -250, -232, -229, -212, -207, -192, -176]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 3\nspeciesName.append(\"C2H4\")\ndataRef.append(\"W. Cawood and H.S. Patterson, J. chem. Soc. 619 (1933)\")\ndataRefID.append(\"TO DO\")\n# max. press. 4 atm; \ndataClass.append(\"class II\")\ndataT.append(np.array([273.15, 294.15]))\ndataB.append(np.array([-162.9, -153.5]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 3\nspeciesName.append(\"C2H4\")\ndataRef.append(\"W. Cawood and H.S. Patterson, Phil. Trans. R. Soc. A236 77 (1937)\")\ndataRefID.append(\"TO DO\")\n# max. press. 4 atm\ndataClass.append(\"class II\")\ndataT.append(np.array([294.15]))\ndataB.append(np.array([-147.7]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 4\nspeciesName.append(\"C2H4\")\ndataRef.append(\"A. Michels, J. Gruyter, and F. Niesen, Physica, 3 346 (1936)\")\ndataRefID.append(\"TO DO\")\n# 5-term fit to PV data (P -> 80 atm)\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-162.39, -135.79, -113.65, -95.48, -80.57, -69.20, -59.58]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 5\nspeciesName.append(\"C2H4\")\ndataRef.append(\"E.E. Roper, J. phys. Chem., Ithaca 44 835 (1940)\")\ndataRefID.append(\"TO DO\")\n# max. press. 1072 torr\ndataClass.append(\"class II\")\ndataT.append(np.array([198.77, 223.22, 223.23, 273.15, 273.15, 343.10]))\ndataB.append(np.array([-314.6, -250.9, -250.5, -181.1, -170.7, -109.5]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 6a\nspeciesName.append(\"C2H4\")\ndataRef.append(\"R.B. Bird, E.L. Spotz, and J.O. Hirschfelder, J. chem. Phys. 18 1395 (1950)\")\ndataRefID.append(\"TO DO\")\n# Values of B given by authors listed, but work performed by A. Michels and M. Geldermans, Physica, 9 967 (1942)\n# 4-term fit of PV data (P -> 80 atm)\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-167.84, -140.33, -117.97, -99.74, -84.92, -72.34, -62.29]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 6b\nspeciesName.append(\"C2H4\")\ndataRef.append(\"R.B. Bird, E.L. Spotz, and J.O. Hirschfelder, J. chem. Phys. 18 1395 (1950)\")\ndataRefID.append(\"TO DO\")\n# Values of B given by authors listed, but work performed by A. Michels and M. Geldermans, Physica, 9 967 (1942)\n# 4-term fit of PV data (P -> 170 atm)\ndataClass.append(\"class I\")\ndataT.append(np.array([298.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-140.55, -118.07, -99.90, -85.11, -72.61, -62.07]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 7\nspeciesName.append(\"C2H4\")\ndataRef.append(\"G.A. Bottomley, Miss D.S. Massie, and R. Whytlaw-Gray, Proc. R. Soc. A200 210 (1950)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class II\")\ndataT.append(np.array([295.51]))\ndataB.append(np.array([-147.0]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 8\nspeciesName.append(\"C2H4\")\ndataRef.append(\"H.M. Ashton and E.S. Halberstadt, Proc. R. Soc. A245 373 (1958)\")\ndataRefID.append(\"TO DO\")\n# values obtained from refractive index mesurements, errors not less than +/- 4\ndataClass.append(\"N/A\")\ndataT.append(np.array([299.8, 323.4, 337.0]))\ndataB.append(np.array([-145, -124, -108]))\ndataBerr.append(np.array([4, 4, 4]))\n\n# original index in 1980 compilation: 9\nspeciesName.append(\"C2H4\")\ndataRef.append(\"D. McA. Mason and B.E. Eakin, J. chem. Engng Data 6 499 (1961)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([288.70]))\ndataB.append(np.array([-142.1]))\ndataBerr.append(np.array([3]))\n\n# original index in 1980 compilation: 10\nspeciesName.append(\"C2H4\")\ndataRef.append(\"E.G. Butcher and R.S. Dadson, Proc. R. Soc. A277 448 (1964)\")\ndataRefID.append(\"TO DO\")\n# 4-term fit, errors +/- 0.3 in B\ndataClass.append(\"N/A\")\ndataT.append(np.array([273.15, 273.15, 283.15, 299.65, 313.15, 323.15, 333.15, 343.15, 353.15, \\\n 363.15, 373.15, 423.15, 473.15]))\ndataB.append(np.array([-172.8, -161.4, -150.9, -134.8, -123.3, -115.1, -108.0, -101.2, -95.0, \\\n -88.6, -83.2, -59.8, -42.9]))\ndataBerr.append(np.array([0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, \\\n 0.3, 0.3, 0.3, 0.3]))\n\n# original index in 1980 compilation: 11\nspeciesName.append(\"C2H4\")\ndataRef.append(\"W. Thomas and M. Zander, Z. agnew. Phys. 20 417 (1966)\")\ndataRefID.append(\"TO DO\")\n# 3-term fir, max. press. 21 atm \ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 283.15, 293.15, 303.15, 313.15, 323.15]))\ndataB.append(np.array([-156.3, -144.9, -136.7, -128.5, -120.7, -113.6]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 12\nspeciesName.append(\"C2H4\")\ndataRef.append(\"A. Sass, B.F. Dodge, and R.H. Bretton, J. chem. Engng Data 12 168 (1967)\")\ndataRefID.append(\"TO DO\")\n# P range 8-500 atm, first point is 9-term fit to PV data in rho-series, second point is 7-term fit\ndataClass.append(\"class II\")\ndataT.append(np.array([313.15, 373.15]))\ndataB.append(np.array([-116.1, -84.9]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 13\nspeciesName.append(\"C2H4\")\ndataRef.append(\"P.S. Ku and B.F. Dodge, J. chem. Engng Data 12 158 (1967)\")\ndataRefID.append(\"TO DO\")\n# 3-term fit of PV data, P values up to 100 atm \ndataClass.append(\"class II\")\ndataT.append(np.array([373.15]))\ndataB.append(np.array([-87.5]))\ndataBerr.append(BerrCalc(dataB[-1], 2))\n\n# original index in 1980 compilation: 14\nspeciesName.append(\"C2H4\")\ndataRef.append(\"S. Angus, B. Armstrong, and K.M de Reuck, International Thermodynamic Tables of the Fluid State, Ethylene,1972, Buttersorths, London 1974\")\ndataRefID.append(\"TO DO\")\n# error not given, but assumed class I\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 275, 280, 285, 290, 295, 298.15, 300, \\\n 305, 310, 315, 320, 325, 330, 335, 340, 345, 350, 355, 360, 365, \\\n 370, 375, 380, 385, 390, 395, 400, 405, 410, 415, 420, 425]))\ndataB.append(np.array([-168.7, -166.4, -160.4, -154.5, -148.9, -143.5, -140.2, -138.3, \\\n -133.4, -128.7, -124.2, -119.9, -115.8, -112.0, -108.3, -104.8, -101.4, -98.2, -95.1, -92.2, -89.4, \\\n -86.6, -84.0, -81.5, -79.0, -76.6, -74.3, -72.0, -69.8, -67.6, -65.5, -63.3, -61.3]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 16a\nspeciesName.append(\"C2H4\")\ndataRef.append(\"R.C. Lee and W.C. Edmister, A.I.Ch.E. Jl. 16 1047 (1970)\")\ndataRefID.append(\"TO DO\")\n# slope-intercept calcualtions\ndataClass.append(\"N/A\")\ndataT.append(np.array([298.15, 323.15, 348.15]))\ndataB.append(np.array([-145.60, -120.40, -100.80]))\ndataBerr.append(np.array([4.8, 1.3, 1.1]))\n\n# original index in 1980 compilation: 16b\nspeciesName.append(\"C2H4\")\ndataRef.append(\"R.C. Lee and W.C. Edmister, A.I.Ch.E. Jl. 16 1047 (1970)\")\ndataRefID.append(\"TO DO\")\n# curve fit\ndataClass.append(\"N/A\")\ndataT.append(np.array([298.15, 323.15, 348.15]))\ndataB.append(np.array([-152.50, -122.10, -101.50]))\ndataBerr.append(np.array([5.0, 2.4, 0.8]))\n\n# original index in 1980 compilation: 17\nspeciesName.append(\"C2H4\")\ndataRef.append(\"W. Göpel and T. Dorfmüller, Z. phys. Chem. Frankf. Ausg. 82 58 (1972)\")\ndataRefID.append(\"TO DO\")\n# standard deviations given for B\ndataClass.append(\"N/A\")\ndataT.append(np.array([199.7, 209.2, 224.0, 233.2, 248.2, 273.2, 296.7, 343.1]))\ndataB.append(np.array([-317.43, -288.51, -250.52, -233.01, -206.54, -173.06, -147.31, -109.73]))\ndataBerr.append(np.array([0.32, 0.34, 0.36, 0.37, 0.40, 0.42, 0.46, 0.46]))\n\n# original index in 1980 compilation: 18a\nspeciesName.append(\"C2H4\")\ndataRef.append(\"N.J. Trappeniers, T. Wassenaar, and G.J. Wolkers, Physica 82A 305 (1975)\")\ndataRefID.append(\"TO DO\")\n# 5-term fit to PVT data\ndataClass.append(\"class I\")\ndataT.append(np.array([273.15, 279.15]))\ndataB.append(np.array([-168.3, -160.8]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 18b\nspeciesName.append(\"C2H4\")\ndataRef.append(\"N.J. Trappeniers, T. Wassenaar, and G.J. Wolkers, Physica 82A 305 (1975)\")\ndataRefID.append(\"TO DO\")\n# 9-term fit to PVT data\ndataClass.append(\"class I\")\ndataT.append(np.array([283.65, 285.65, 289.15, 293.15, 298.15, 303.15, 323.15, 348.15]))\ndataB.append(np.array([-154.6, -152.3, -148.4, -144.2, -139.3, -134.5, -117.0, -100.4]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 18c\nspeciesName.append(\"C2H4\")\ndataRef.append(\"N.J. Trappeniers, T. Wassenaar, and G.J. Wolkers, Physica 82A 305 (1975)\")\ndataRefID.append(\"TO DO\")\n# 6-term fit to PVT data\ndataClass.append(\"class I\")\ndataT.append(np.array([373.15, 398.15, 423.15]))\ndataB.append(np.array([-84.9, -72.1, -61.4]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 19\nspeciesName.append(\"C2H4\")\ndataRef.append(\"D.R. Douslin and R.H. Harrison, J. chem. Thermodyn. 8 301 (1976)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"class I\")\ndataT.append(np.array([238.15, 243.15, 248.15, 253.15, 258.15, 263.15, 268.15, 273.15, 278.15, 282.35, 283.15, \\\n 288.15, 293.15, 298.15, 303.15, 323.15, 348.15, 373.15, 398.15, 423.15, 448.15]))\ndataB.append(np.array([-220.9, -212.0, -203.5, -195.5, -188.1, -180.9, -174.1, -167.6, -161.6, -156.7, -155.7, \\\n -150.3, -144.9, -139.8, -135.0, -117.7, -99.7, -84.8, -72.3, -61.6, -52.4]))\ndataBerr.append(BerrCalc(dataB[-1], 1))\n\n# original index in 1980 compilation: 20\nspeciesName.append(\"C2H4\")\ndataRef.append(\"J.W. Lee, Ph.D. thesis, University of London (1976)\")\ndataRefID.append(\"TO DO\")\n# estimated uncertainty in B is +/- 1.5\ndataClass.append(\"N/A\")\ndataT.append(np.array([243.6, 247.1, 251.1, 254.3, 258.9, 266.6, 270.4, 273.8, 278.8, 283.1, 288.8, 292.6]))\ndataB.append(np.array([-212.7, -206.5, -199.9, -194.9, -188.1, -177.5, -172.6, -168.3, -162.2, -157.2, -150.7, -146.5]))\ndataBerr.append(np.array([1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5]))\n\n# data from 2002 compilation begin here\n\n# original index in 2002 compilation: 81-lev/has\nspeciesName.append(\"C2H4\")\ndataRef.append(\"Levelt Sengers, J. M. H.; Hastings, J. R.; Int. J. Thermophys. 2 (1981) 269\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([223.15, 233.15, 243.15, 253.15, 263.15, 273.15]))\ndataB.append(np.array([-249.2, -233.0, -211.6, -194.7, -180.6, -167.5]))\ndataBerr.append(np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0]))\n\n# original index in 2002 compilation: 88-hae/ker\nspeciesName.append(\"C2H4\")\ndataRef.append(\"Haeusler, H.; Kerl, K.; Int. J. Thermophys. 9 (1988) 117\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([256.97, 256.97, 268.87, 268.87, 282.60, 282.60, 298.32, 298.32, 318.04, 318.04]))\ndataB.append(np.array([-202.8, -207.2, -186.2, -187.6, -155.7, -152.8, -139.7, -142.2, -123.1, -123.2]))\ndataBerr.append(np.array([4.0, 2.6, 4.0, 4.0, 8.0, 3.0, 4.0, 1.3, 6.0, 6.0]))\n\n# original index in 2002 compilation: 79-wax/dav\nspeciesName.append(\"C2H4\")\ndataRef.append(\"Waxman, M.; Davis, H. A.; Adv. Chem. Ser. 182 (1979) 285\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([273.15, 298.15, 323.15, 348.16, 373.16, 398.16, 423.17, 448.17]))\ndataB.append(np.array([-167.7, -139.8, -117.6, -99.6, -84.6, -72.1, -61.3, -52.2]))\ndataBerr.append(np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2]))\n\n# original index in 2002 compilation: 74-pom/spu\nspeciesName.append(\"C2H4\")\ndataRef.append(\"Pompe, A.; Spurling, T. H.; Commonwealth Scientific & Indust. Res. Org. Div. of App. Organic Chemistry Technical Paper No. 1, CSIRO, Melbourne Aust., 42pp, (1974)\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([273.16, 298.16, 313.16, 323.16, 348.16, 373.16, 373.16, 373.16, 398.16, 423.16]))\ndataB.append(np.array([-166.4, -138.3, -120.5, -116.9, -99.0, -83.6, -83.8, -84.6, -72.2, -61.0]))\ndataBerr.append(np.array([3.0, 2.5, 6.0, 2.0, 2.0, 4.0, 4.0, 2.0, 1.5, 1.0]))\n\n# original index in 2002 compilation: 90-ach/bos\nspeciesName.append(\"C2H4\")\ndataRef.append(\"Achtermann, H. J.; Bose, T. K.; Magnus, G.; Int. J. Thermophys. 11 (1990) 133\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([283.15, 288.15, 293.15, 298.15, 303.00, 323.15, 348.15, 373.15]))\ndataB.append(np.array([-155.8, -150.3, -145.3, -140.2, -135.2, -118.0, -100.0, -85.1]))\ndataBerr.append(np.array([2.1, 2.0, 2.0, 1.9, 1.9, 1.7, 1.5, 1.4]))\n\n# original index in 2002 compilation: 93-mce/fan\nspeciesName.append(\"C2H4\")\ndataRef.append(\"McElroy, P. J.; Fang, J.; J. Chem. Eng. Data. 38 (1993) 410\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([283.15, 293.15, 303.15, 313.15, 323.15, 333.15]))\ndataB.append(np.array([-155.5, -144.6, -134.7, -126.0, -117.9, -109.7]))\ndataBerr.append(np.array([8.8, 8.2, 7.7, 7.3, 6.9, 6.5]))\n\n# original index in 2002 compilation: 92-bel/big\nspeciesName.append(\"C2H4\")\ndataRef.append(\"Bell, T. N.; Bignell, C. M.; Dunlop, P. J.; Physica A: (Amsterdam). 181 (1992) 221\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([290.0, 300.0, 310.0]))\ndataB.append(np.array([-147.1, -136.5, -126.5]))\ndataBerr.append(np.array([1.5, 1.5, 1.5]))\n\n# original index in 2002 compilation: 91-lop/roz\nspeciesName.append(\"C2H4\")\ndataRef.append(\"Lopatinskii, E. S.; Rozhnov, M. S.; Zhdanov, V. I.; Parnovskii, S. L.; Kudrya, Y. N.; Zh. Fiz. Khim. 65 (1991) 2060\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([293.15, 293.15]))\ndataB.append(np.array([-144.6, -146.3]))\ndataBerr.append(np.array([3.4, 3.5]))\n\n# original index in 2002 compilation: 80-pra/vis\nspeciesName.append(\"C2H4\")\ndataRef.append(\"Prasad, D. H. L.; Viswanath, D. S.; J. Chem. Eng. Data. 25 (1980) 374\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([293.15, 323.15, 348.15, 373.15, 398.15, 423.15]))\ndataB.append(np.array([-140.2, -118.5, -99.9, -84.4, -71.1, -61.2]))\ndataBerr.append(np.array([2.4, 2.2, 2.0, 1.8, 1.7, 1.6]))\n\n# original index in 2002 compilation: 81-ohg/miz\nspeciesName.append(\"C2H4\")\ndataRef.append(\"Ohgaki, K.; Mizuhaya, T.; Katayama, T.; J. Chem. Eng. Jpn. 14 (1981) 71\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([298.15, 298.15]))\ndataB.append(np.array([-140.6, -140.4]))\ndataBerr.append(np.array([0.4, 0.5]))\n\n# original index in 2002 compilation: 83-leh/ran\nspeciesName.append(\"C2H4\")\ndataRef.append(\"Lehmann, J.; Rank, V.; Opel, G.; Z. Phys. Chem. (Leipzig). 264 (1983) 836\")\ndataRefID.append(\"TO DO\")\ndataClass.append(\"N/A\")\ndataT.append(np.array([298.18, 323.28, 348.17, 373.12]))\ndataB.append(np.array([-147.6, -121.0, -102.8, -82.1]))\ndataBerr.append(np.array([15.0, 13.0, 12.0, 10.0]))\n\n# original index in 2002 compilation: 81-fin/rae\nspeciesName.append(\"C2H4\")\ndataRef.append(\"Findeisen, R.; Raetzsch, M. T.; Z. Phys. Chem. (Leipzig). 262 (1981) 919\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([298.20, 323.20, 348.20, 373.20, 398.20]))\ndataB.append(np.array([-138.9, -115.2, -98.4, -82.8, -71.4]))\ndataBerr.append(np.array([3.4, 3.2, 3.0, 2.8, 2.7]))\n\n# original index in 2002 compilation: 81-mol-1\nspeciesName.append(\"C2H4\")\ndataRef.append(\"Mollerup, J. M.; J. Chem. Thermodyn. 17 (1985) 489\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([310.0]))\ndataB.append(np.array([-128.5]))\ndataBerr.append(np.array([0.6]))\n\n# original index in 2002 compilation: 84-ohg/sak\nspeciesName.append(\"C2H4\")\ndataRef.append(\"Ohgaki, K.; Sakai, N.; Kano, Y.; Katayama, T.; J. Chem. Eng. Jpn. 17 (1984) 545\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([423.15, 473.15]))\ndataB.append(np.array([-61.9, -43.5]))\ndataBerr.append(np.array([0.5, 0.3]))\n\n# original index in 2002 compilation: 82-ohg/nak-1\nspeciesName.append(\"C2H4\")\ndataRef.append(\"Ohgaki, K.; Nakamura, Y.; Ariyasu, H.; Katayama, T.; J. Chem. Eng. Jpn. 15 (1982) 85\")\ndataRefID.append(\"TO DO\")\n# comment\ndataClass.append(\"N/A\")\ndataT.append(np.array([398.15, 398.15]))\ndataB.append(np.array([-72.0, -72.0]))\ndataBerr.append(np.array([0.3, 0.3]))\n\n\n\n"
] | [
[
"numpy.exp",
"numpy.array",
"numpy.trapz",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
msarrafj/LimiterDG | [
"afac2f4d5cd0312009f88e8eaf3cc6d273dcad5a"
] | [
"Codes/Pressure_driven/Homogen/NIPGvsUpwind/Limiter/flux_limiter.py"
] | [
"from firedrake import *\nimport numpy as np\nimport math,sys\nimport time as tm\nfrom matplotlib import pyplot as plt\nfrom Limiter.hsign import *\nnp.set_printoptions(precision=9)\nfrom warnings import *\n\nclass get_flux(SignFlip):\n def __init__(self,mesh,fluxes):\n self.mesh = mesh\n self.fluxes = fluxes\n\n def apply(self):\n hsign,cell_map,TwoCell,boundary_facet = SignFlip(self.mesh).HSIGN()\n #========================;\n # Step 0: Define H_{E,E'}:\n #========================;\n flux_val= self.fluxes.vector().array()\n row , col = np.shape(cell_map)\n old_FLUX = np.zeros((row,col))\n for irow in range(row):\n for icol in range(col):\n old_FLUX[irow,icol] = flux_val[cell_map[irow,icol]]\n # print('Old flux:\\n',old_FLUX)\n FLUX = np.multiply(old_FLUX,hsign)\n return FLUX\n\n\nclass flux_limiter(SignFlip):\n def __init__(self,mesh,sol_0,sol,fluxes,solMax,solMin,dt):\n # def __init__(self,mesh,sol_0,sol,wells_avg,q_I,q_P,fluxes,solMax,solMin,dt):\n self.mesh = mesh\n self.sol_0 = sol_0\n self.sol = sol\n # self.wells_avg = wells_avg.vector().array()\n # self.q_I = q_I.vector().array()\n # self.q_P = q_P.vector().array()\n self.fluxes = fluxes\n # self.rPAvg_0 = rPAvg_0.vector().array()\n # self.rPAvg = rPAvg.vector().array()\n self.solMax = solMax\n self.solMin = solMin\n self.dt = dt\n\n\n # def wells_update(self,s):\n # fw = lambda t:\\\n # ( ((t-0.15)/0.7) * ((t-0.15)/0.7) * 1./(5e-4) )\\\n # /( ( ((t-0.15)/0.7) * ((t-0.15)/0.7) * 1./(5e-4) ) +\\\n # ( (1-((t-0.15)/0.7)) *(1-((t-0.15)/0.7) ) * 1./(2e-3) ))\n # return (1000* self.q_I - 1000 * fw(s) * self.q_P) \n # # self.wells_update(s) is when we want to use this function in this class\n # # forexample self.wells_update(solAvg)\n\n\n def apply(self):\n hsign,cell_map,TwoCell,boundary_facet = SignFlip(self.mesh).HSIGN()\n # print('Hsign', signing)\n # print('cell_map\\n', cell_map)\n # print('TwoCell', TwoCell)\n # print('boundary_facet', boundary_facet)\n\n #========================;\n # Step 0: Define H_{E,E'}:\n #========================;\n flux_val= self.fluxes.vector().array()\n row , col = np.shape(cell_map)\n old_FLUX = np.zeros((row,col))\n for irow in range(row):\n for icol in range(col):\n old_FLUX[irow,icol] = flux_val[cell_map[irow,icol]]\n # print('Old flux:\\n',old_FLUX)\n FLUX = np.multiply(old_FLUX,hsign)\n # print('original FLUX:\\n',FLUX)\n\n\n #========================================;\n # Calculate element correction factors:\n #========================================;\n\n # container to store alphaEplus and alphaEminu\n Vdg0 = FunctionSpace(self.mesh, \"DG\", 0)\n V0 = Function(Vdg0)\n # print(type(self.solMax))\n deltaBound = 10*np.finfo(self.solMax).eps #Arithmetic perturbations may violate the bounds.\n epsBound = np.finfo(self.solMax).eps\n # print('delatabound',deltaBound)\n area = V0.interpolate(CellVolume(self.mesh)).vector().array()\n # print(area)\n solAvgDG0_0 = V0.interpolate(self.sol_0).vector().array() \n # print('sol DG0 avg previous time step:\\n',solAvgDG0_0)\n # print('sol DG0 avg previous time step: min:%f,max:%f'%(solAvgDG0_0.min(),solAvgDG0_0.max()))\n\n # assert (np.all((solAvgDG0_0 <= self.solMax+deltaBound ) & (solAvgDG0_0 >= self.solMin-deltaBound) ) )\\\n # ,\"solAvg of previous time step is not between min and max!\"\n\n #==================;\n # Start iteration ;\n #==================;\n maxIter = 10000\n epsFLUX = 1e-6\n epsDelta = 1e-6\n solAvg = solAvgDG0_0 # limiter solution from last time step s0 on DG0 space\n for numIter in range(maxIter):\n # print('numIter of Flux-limiter',numIter)\n \n # Save suppressed fluxes.\n FLUX_0 = FLUX\n alphaEplus= np.zeros((row,1))\n # alphaEplus = min(1, g.areaT .* max(0, (umax - deltaBound) - lowOrderMeans) ./ (-sum( supprFluxes .* (supprFluxes < 0), 2) * tau + eps) );\n\n # Qplus = (area * np.maximum(0,np.subtract((self.solMax-deltaBound),solAvg)) )\n # incompressible patch test\n Qplus = area*np.maximum(0, (0.2)*\\\n np.subtract((self.solMax-deltaBound),solAvg)) \n \n # Qplus = (area *(0.2 * 1000)* np.maximum(0,np.subtract((self.solMax-deltaBound),solAvg)) )\n # incompressible well problem\n # Qplus = area*np.maximum(0, (0.2*1000)*\\\n # np.subtract((self.solMax-deltaBound),solAvg)-self.wells_avg*self.dt) \n # Qplus = area*np.maximum(0, (0.2*1000)*\\\n # np.subtract((self.solMax-deltaBound),solAvg)+\\\n # self.wells_update(solAvg)*self.dt) \n\n # compressible patch test\n # Qplus = area * np.maximum(0,np.subtract\\\n # ((self.rPAvg * self.solMax-deltaBound),self.rPAvg_0 * solAvg)) \n # print('Qplus:\\n',Qplus)\n assert (np.all(Qplus >= 0))\\\n ,\"Qplus is less than zero!\"\n Pplus = -1 *np.where(FLUX<0,FLUX,0).sum(axis=1) * self.dt + epsBound\n # print('Pplus:\\n',Pplus)\n alphaEplus = np.minimum(1,np.divide(Qplus,Pplus))\n # print('alphaEplus at iteration %d:\\n'%numIter,alphaEplus)\n # if alphaEplus is 1 means that no limiting is needed. (100% of unlimited flux is allowed\n # without introducing a mean-value overshoot); If alphaEplus is 0 this means that:\n # means Qplus = 0 which means no mass is allowed to be stored in E without introducing\n # mean-value overshoot.\n \n # incompressible patch test\n Qminus = area *(np.minimum(0,(0.2)*\\\n np.subtract((self.solMin+deltaBound),solAvg )))\n #\n # Qminus = (area * np.maximum(0,np.subtract(solAvg,self.solMin+deltaBound) ))\n # Qminus = (area *(0.2*1000)* np.maximum(0,np.subtract(solAvg,self.solMin+deltaBound) ))\n # incompressible well problem\n # Qminus = area *(np.minimum(0,(0.2*1000)*\\\n # np.subtract((self.solMin+deltaBound),solAvg)-self.wells_avg*self.dt) )\n # Qminus = area *(np.minimum(0,(0.2*1000)*\\\n # np.subtract((self.solMin+deltaBound),solAvg ) -\\\n # self.wells_update(solAvg)*self.dt) )\n\n # compressible patch test\n # Qminus = area * np.minimum(0,np.subtract\\\n # ((self.rPAvg * self.solMin+deltaBound),self.rPAvg_0 * solAvg ))\n #\n # Qminus = (area * np.subtract(solAvg,(self.solMin-deltaBound) ) )\n # Qminus = (area * np.subtract(solAvg,(self.solMin) ) )\n # assert (np.all(Qminus >= 0))\\\n # ,\"Qminus is less than zero!\"\n assert (np.all(Qminus <= 0))\\\n ,\"Qminus is greater than zero!\"\n # Qminus = (area * np.subtract(solAvg,(self.solMin+deltaBound)) )\n # print('Qminus:\\n',Qminus)\n # Pminus = 1 *np.where(FLUX>0,FLUX,0).sum(axis=1) * self.dt + epsBound\n Pminus = -1 *np.where(FLUX>0,FLUX,0).sum(axis=1) * self.dt - epsBound\n # print('Pminus:\\n',Pminus)\n alphaEminus = np.minimum(1,np.divide(Qminus,Pminus))\n # print('alphaEminus at iteration %d:\\n' %numIter,alphaEminus)\n # alphaEminus shows the percentage of howmuch of mass (Pminus) is allowd to exit element E\n # if alphaEminus = 1 no limiting is needed. and alphaEminus = 0 \n # it means that no mass is allowed to exit and hence 100% of flux should be limited.\n # np.savetxt('Qminus.out',Qminus)\n\n #============================================;\n # Compute edge correction factors alpha_E,E' :\n #============================================;\n # met1_Start = tm.time()\n alphaEface = np.ones((row,col))\n for irow in range(row):\n for icol in range(col):\n facet = cell_map[irow,icol]\n # Handling boundary terms\n if facet in boundary_facet:\n if FLUX[irow,icol] < 0:\n alphaEface[irow,icol] = alphaEplus[irow] \n\n elif FLUX[irow,icol] > 0:\n alphaEface[irow,icol] = alphaEminus[irow] \n\n\n # Handling interior edges\n else:\n if FLUX[irow,icol] < 0:\n b0 = TwoCell[facet] # cellID of irow and the opposite cell\n oppCell_ID = int(b0[np.where( b0 != irow )]) # includes only oppoiste cell ID\n alphaEface[irow,icol] = np.minimum(alphaEplus[irow] , alphaEminus[oppCell_ID])\n\n elif FLUX[irow,icol] > 0:\n b0 = TwoCell[facet] # cellID of irow and the opposite cell\n oppCell_ID = int(b0[np.where( b0 != irow )]) # includes only oppoiste cell ID\n alphaEface[irow,icol] = np.minimum(alphaEminus[irow] , alphaEplus[oppCell_ID])\n\n\n # met1_End = tm.time()\n # print('alphaEface at numIter %d:\\n'%numIter,alphaEface,type(alphaEface))\n\n # met2_Start = tm.time()\n # # use list comprehension in python which is very fast\n # alphaEface_fast=[alphaEplus[irow]*np.where(FLUX[irow,icol]<0,1,0) +\n # alphaEminus[irow]*np.where(FLUX[irow,icol]>0,1,0)\n # if cell_map[irow,icol] in boundary_facet\n # else np.minimum(alphaEplus[irow] ,\n # alphaEminus[ int(TwoCell[cell_map[irow,icol]][np.where( TwoCell[cell_map[irow,icol]] != irow )]) ])\n # *np.where(FLUX[irow,icol]<0,1,0) + np.minimum(alphaEminus[irow] ,\n # alphaEplus[ int(TwoCell[cell_map[irow,icol]][np.where( TwoCell[cell_map[irow,icol]] != irow )]) ])\n # *np.where(FLUX[irow,icol]>0,1,0) \n # for irow in range(row)\n # for icol in range(col)\n # ]\n\n # alphaEface_fast = np.asarray(alphaEface_fast).reshape((row,col))\n # met2_End = tm.time()\n\n # print('alphaEface_fast:\\n',alphaEface_fast,type(alphaEface_fast))\n # # print('For-loop took: %f and comprehension took: %f'%(met1_End-met1_Start,met2_End-met2_Start))\n # # Comment: loop-comprehension(even withour asarray conversion and reshaping) \n # # for some reason takes more time than for-loop!\n #\n # Verify that all correction factors are within [0,1].\n assert (np.all((alphaEface <= 1 ) & (alphaEface >= 0) ) )\\\n ,\"alphaEface are not between 0 and 1!\"\n #=========================================;\n # Compute the updated solution and fluxes ;\n #=========================================;\n # Incompressible patch test\n solAvg = solAvg - self.dt/area * \\\n (1./(0.2))* np.multiply(alphaEface,FLUX).sum(axis=1)\n # solAvg = solAvg - self.dt/area * \\\n # (1./(0.2*1000))* np.multiply(alphaEface,FLUX).sum(axis=1)\n # incompressible well problem\n # solAvg = solAvg - self.dt/area * \\\n # (1./(0.2*1000))* np.multiply(alphaEface,FLUX).sum(axis=1) + \\\n # self.dt * (1./(0.2*1000)) * self.wells_avg\n # solAvg = solAvg - self.dt/area * \\\n # (1./(0.2*1000))* np.multiply(alphaEface,FLUX).sum(axis=1) - \\\n # self.dt * (1./(0.2*1000)) * self.wells_update(solAvg)\n \n # compressible patch test\n # solAvg = (self.rPAvg_0/self.rPAvg) * solAvg - self.dt/area * \\\n # (1./self.rPAvg)* np.multiply(alphaEface,FLUX).sum(axis=1)\n # print(\"rpAvg_0/rPAvg\\n\",self.rPAvg_0/self.rPAvg)\n # solAvg = solAvg - self.dt/area * \\\n # (1./self.rPAvg)* np.multiply(alphaEface,FLUX).sum(axis=1)\n # solAvg = solAvg - self.dt/area * \\\n # (1./self.rPAvg_0)* np.multiply(alphaEface,FLUX).sum(axis=1)\n # print('alphaEface',alphaEface[150])\n # last one seems to work the better than others \n\n # print('solAvgUpdated at iteration %d is:\\n'%numIter,solAvg)\n # print(alphaEface*FLUX)\n FLUX = FLUX * np.subtract(1.,alphaEface)\n # print('FLUX0 at iteration %d is:\\n'%numIter,FLUX_0)\n # print('updatedFLUX at iteration %d is:\\n'%numIter,FLUX)\n\n #=========================;\n # Check stopping criteria ;\n #=========================;\n # Compute new errors:\n # Criterion 1 \n # method 1(the maximum absolute row sum)\n # normFLUX = np.linalg.norm(FLUX,np.inf)\n # print('normFLUX',normFLUX)\n #\n #method 2 \n normFLUX = np.abs(FLUX).max()\n # print('normFLUX',normFLUX)\n \n # Criterion 2\n # method 1\n # normDelta = np.linalg.norm(np.subtract(FLUX_0,FLUX),np.inf)\n # method 2\n normDelta = np.abs(np.subtract(FLUX_0,FLUX)).max()\n # print('normDelta',normDelta)\n\n # Check stopping criteria.\n # if (normFLUX < epsFLUX)|(normDelta < epsDelta) :\n # if (normFLUX < epsFLUX)&(normDelta < epsDelta) :\n if normFLUX < epsFLUX :\n flag = 0\n break\n elif normDelta < epsDelta:\n flag = 1;\n break\n elif numIter == maxIter:\n flag = 2;\n break\n\n # print('Flux-limiter converged in %d iterations'%numIter)\n # print('normFLUX',normFLUX)\n # print('normDelta',normDelta)\n # print('Exit flag is:', flag)\n\n\n #==================================;\n # Compute new reconstructed values ;\n #==================================;\n V = self.sol.function_space()\n sol_value = self.sol.vector().array()\n # print('sol_value_old:\\n',sol_value)\n solPost = solAvg\n solAvgDG_current = V0.interpolate(self.sol).vector().array() # unlimited solution current step\n # print('sol_value current time (not limited)',solAvgDG_current)\n Diff = solPost-solAvgDG_current\n # print('solPost - solAvgDG_current:\\n',Diff)\n sol_cell_map = V.cell_node_map().values \n # print('sol_cell_map',sol_cell_map)\n row , col = np.shape(sol_cell_map)\n # Add Diff to our nodal solution\n for irow in range(row):\n for icol in range(col):\n sol_value[sol_cell_map[irow,icol]] = sol_value[sol_cell_map[irow,icol]] + Diff[irow]\n\n # print('sol_value_new:\\n',sol_value)\n u_sol = Function(V)\n u_sol.vector().set_local(sol_value)\n # solFinal = interpolate(u_sol,V)\n\n u_solAvgDG0 = V0.interpolate(u_sol).vector().array() \n print('limited sol avg after FL:\\n min:%f,max:%f'%(u_solAvgDG0.min(),u_solAvgDG0.max()))\n # print('u_sol DG0 avg:\\n',solAvgDG0)\n \n # assert (np.all((u_solAvgDG0 <= (self.solMax+1e-7) ) & (u_solAvgDG0 >= (self.solMin-1e-7) ) ) )\\\n # ,\"constructed avgs are not inbound\"\n \n if np.all((u_solAvgDG0 < self.solMax ) & (u_solAvgDG0 > self.solMin)):\n warn('WARNING* averages of constructed sol are not in the range of [solMin,solMax] ')\n\n return u_sol,numIter\n\n"
] | [
[
"numpy.minimum",
"numpy.abs",
"numpy.multiply",
"numpy.set_printoptions",
"numpy.subtract",
"numpy.finfo",
"numpy.all",
"numpy.ones",
"numpy.shape",
"numpy.zeros",
"numpy.where",
"numpy.divide"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vd1371/GIAMS | [
"dd6551f344b8d0377131d4496846eb5d03b6189c"
] | [
"Optimizer/BruteForce.py"
] | [
"#Loading dependencies\nimport time\nimport numpy as np\nimport pandas as pd\nimport multiprocessing as mp\nfrom multiprocessing import Queue, Process\nfrom itertools import product\nfrom ._objectives import LCASolution\nfrom ._objectives import _eval_sol\n\nclass BruteForce:\n\n\tdef __init__(self, **params):\n\n\t\t# Objective_function is an instance of lca\n\t\tlca = params.pop('lca')\n\t\tself.lca = lca\n\n\t\t# Gettign one instance the objective function\n\t\tself.lca_ref = lca()\n\n\t\t# lca has network, directory, log, and \n\t\tself.directory = self.lca_ref.directory\n\t\tself.log = self.lca_ref.log\n\n\t\tasset_mrr_shape = self.lca_ref.network.assets[0].mrr_model.mrr.shape\n\t\tn_assets = len(self.lca_ref.network.assets)\n\n\t\t# It will be used to reshape the solution to 1d and original shape\n\t\tself.solut_shape = (n_assets, asset_mrr_shape[0], asset_mrr_shape[1])\n\t\t# It will be used for producing all combination of n binaries\n\t\t# It equals to number of assets x number of elements x number of decision making step\n\t\t# .. in the future\n\t\tself.dimension = n_assets * asset_mrr_shape[0] * asset_mrr_shape[1]\n\n\tdef _solut_to_1d_shape(self, solut):\n\t\treturn solut.reshape(-1)\n\n\tdef _solut_to_original_shape(self, solut):\n\t\treturn np.array(solut).reshape(self.solut_shape)\n\n\tdef set_hyperparameters(self, **params):\n\n\t\tself.optimization_type = params.pop('optimzition_type', 'max')\n\t\tself.n_jobs = params.pop('n_jobs', 1)\n\n\t\tself.log.info((f\"BruteForce is started. \\n\"\n\t\t\t\t\tf\"Optimization type: {self.optimization_type} \\n\"\n\t\t\t\t\tf\"n_jobs: {self.n_jobs} \\n\"\n\t\t\t\t\t))\n\n\t\tif self.optimization_type == 'min':\n\t\t\tself.sorting_order = False\n\t\telif self.optimization_type == 'max':\n\t\t\tself.sorting_order = True\n\n\tdef _mrr_generator(self):\n\t\t'''Naive mrr generator for brute foce algorithm\n\t\t\n\t\tFuture development: Develop a smarter one\n\t\t'''\n\t\t# Getting all possible combination of the binary array for MRR\n\t\tall_combinations = product([0, 1], repeat = self.dimension)\n\n\t\tfor mrr in all_combinations:\n\t\t\tyield mrr\n\n\tdef _possible_solution(self, solution_que):\n\n\t\toptimization_start = time.time()\n\t\tfor i, mrr in enumerate(self._mrr_generator()):\n\n\t\t\twhile solution_que.qsize() > 100:\n\t\t\t\tpass\n\n\t\t\t# Creating a new solution\n\t\t\tsolut = self._solut_to_original_shape(mrr)\n\t\t\tnew_sol = LCASolution(lca = self.lca,\n\t\t\t\t\t\t\t\tsolut = solut,\n\t\t\t\t\t\t\t\tobj_func = self.lca_ref.network.objective)\n\n\t\t\t# printing the progress\n\t\t\tif i % 100 == 0:\n\t\t\t\tprint (f\"{i}/{2**self.dimension} solutions are suggested so far in \"\n\t\t\t\t\t\tf\"{time.time() - optimization_start:.2f} seconds\")\n\t\t\t\n\t\t\t# Checking if the generated mrr is valid\n\t\t\tif new_sol.is_valid():\n\t\t\t\tsolution_que.put(new_sol)\n\n\tdef optimize_parallel(self, verbose = 1):\n\n\t\t# Queue for generated mrrs\n\t\tsolution_que = Queue()\n\t\tsolution_generator = Process(target = self._possible_solution, args = (solution_que,))\n\t\tsolution_generator.start()\n\n\t\t# Creaing a pool of workers for analysis\n\t\tanalysis_que = Queue()\n\t\tworkers_pool = []\n\t\tfor i in range(self.n_jobs - 1):\n\t\t\tworker = Process(target = _eval_sol_q, args = (solution_que, analysis_que,))\n\t\t\tworker.start()\n\t\t\tworkers_pool.append(worker)\n\t\tprint (\"workers started analysis...\")\n\n\t\t# Getting the results and saving them\n\t\tfirst = True\n\t\tstart = time.time()\n\t\twhile True:\n\t\t\twhile not analysis_que.empty():\n\t\t\t\tsolut = analysis_que.get()\n\n\t\t\t\tif first:\n\t\t\t\t\tbest_solution = solut\n\t\t\t\t\tfirst = False\n\t\t\t\telse:\n\t\t\t\t\tif solut.value > best_solution.value:\n\t\t\t\t\t\tbest_solution = solut\n\t\t\t\t\t\tif verbose == 1:\n\t\t\t\t\t\t\tprint (f'A better solution is found\\n{best_solution}')\n\t\t\t\t\t\tself.log.info(f\"BruteForce: Best solution so far: {best_solution} \\n\")\n\n\t\t\t\t# To check the last time any analysis has been produced\n\t\t\t\tstart = time.time()\n\n\t\t\t# If after a certain amount of time, there is nothing in the\n\t\t\t# ... analysis queue, then probably no more solutions will be\n\t\t\t# ... produced\n\t\t\tif time.time() - start > 120:\n\t\t\t\tans = input('more than 2 minutes but no new results, terminate? (y/n) :')\n\t\t\t\t\n\t\t\t\tif ans.lower() == \"y\":\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\t# Refresh time counter\n\t\t\t\t\tstart = time.time() \n\n\t\t# Joining all processes\n\t\tsolution_generator.join()\n\t\tfor worker in workers_pool:\n\t\t\tworker.join()\n\t\tprint (\"Done\")\n\n\tdef optimize_linear(self, verbose = 1):\n\n\t\toptimization_start = time.time()\n\n\t\tfirst = True\n\t\tfor i, mrr in enumerate(self._mrr_generator()):\n\n\t\t\tif i % 100 == 0:\n\t\t\t\tprint (f\"{i}/{2**self.dimension} solutions are analyzed so far in \"\n\t\t\t\t\t\tf\"{time.time() - optimization_start:.2f} seconds\")\n\n\t\t\t# Creating a new solution\n\t\t\tsolut = self._solut_to_original_shape(mrr)\n\t\t\tsolut = LCASolution(lca = self.lca,\n\t\t\t\t\t\t\t\tsolut = solut,\n\t\t\t\t\t\t\t\tobj_func = self.lca_ref.network.objective)\n\t\t\t\n\t\t\t# Checking if the generated mrr is valid\n\t\t\tif solut.is_valid():\n\t\t\t\t# To keep track of the time of the last valid solution\n\t\t\t\tstart = time.time()\n\t\t\t\t# Evaluate the solution\n\t\t\t\tsolut.evaluate()\n\n\t\t\t\tif first:\n\t\t\t\t\tbest_solution = solut\n\t\t\t\t\tfirst = False\n\t\t\t\telse:\n\t\t\t\t\tif solut.value > best_solution.value:\n\t\t\t\t\t\tbest_solution = solut\n\t\t\t\t\t\tif verbose == 1:\n\t\t\t\t\t\t\tprint (f'A better solution is found\\n{best_solution}')\n\t\t\t\t\t\tself.log.info(f\"BruteForce: Best solution so far: {best_solution} \\n\")\n\n\t\t\tif time.time() - start > 120:\n\t\t\t\tans = input('more than 2 minutes but no new results, terminate? (y/n) :')\n\t\t\t\t\n\t\t\t\tif ans.lower() == \"y\":\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\t# Refresh time counter\n\t\t\t\t\tstart = time.time()\n\t\tprint (\"Done\")\n\n\tdef optimize(self, verbose = 1):\n\t\t'''Conducting the optimization'''\n\t\tif self.n_jobs == 1:\n\t\t\tself.optimize_linear(verbose = verbose)\n\n\t\telse:\n\t\t\tself.optimize_parallel(verbose = verbose)\n\ndef _eval_sol_q(solution_que, analysis_que):\n\t'''Producer_consumer function\n\n\tsolution queue: a queue of possible solutions\n\tanalysis queue: a qeueu of analyzed solutions\n\t'''\n\twhile True:\n\t\twhile not solution_que.empty():\n\t\t\tsolut = solution_que.get()\n\t\t\tsolut.evaluate()\n\t\t\tanalysis_que.put(solut)\n\n\n\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xmnlab/bodoai-examples | [
"621e17356004031300a56288c7c27e47b6c247cc"
] | [
"bodoai_examples/udacity_course_ud188/intro_neural_networks/gradient_descent/main.py"
] | [
"\"\"\"\noriginal code available at:\n https://github.com/udacity/deep-learning-v2-pytorch/\n tree/master/intro-neural-networks/gradient-descent\n\n\nImplementing the Gradient Descent Algorithm\n\nIn this lab, we'll implement the basic functions of the Gradient Descent\nalgorithm to find the boundary in a small dataset. First, we'll start with\nsome functions that will help us plot and visualize the data.\n\n\n\"\"\"\nimport time\n\nimport bodo\nimport numpy as np\nimport pandas as pd\n\n# local\nfrom bodoai_examples.utils import bd_zip\n\n\ndef setup():\n url = (\n 'https://raw.githubusercontent.com/udacity/deep-learning-v2-pytorch/'\n 'master/intro-neural-networks/gradient-descent/data.csv'\n )\n data = pd.read_csv(url, header=None)\n data.to_csv('/tmp/data.csv', header=None, index=None)\n\n data_check = pd.read_csv(url, header=None)\n pd.testing.assert_frame_equal(data, data_check)\n\n # duplicate data just for benchmark propose\n dfs = []\n for i in range(100):\n dfs.append(data)\n data = pd.concat(dfs).reset_index(drop=True)\n\n data.to_csv('/tmp/data_10k.csv', header=None, index=None)\n\n\n# NON BODO AI FUNCTIONS\n\n\ndef read_data():\n \"\"\"\n Return X (features) and y (target) data.\n\n Returns\n -------\n tuple[numpy.ndarray, numpy.ndarray]\n \"\"\"\n data = pd.read_csv('/tmp/data_10k.csv', header=None)\n\n X = np.array(data[[0, 1]])\n y = np.array(data[2])\n return X, y\n\n\n# Activation (sigmoid) function\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef output_formula(features, weights, bias):\n return sigmoid(np.dot(features, weights) + bias)\n\n\ndef error_formula(y, output):\n return -y * np.log(output) - (1 - y) * np.log(1 - output)\n\n\ndef update_weights(x, y, weights, bias, learnrate):\n output = output_formula(x, weights, bias)\n d_error = y - output\n weights += learnrate * d_error * x\n bias += learnrate * d_error\n return weights, bias\n\n\ndef train(features, targets, epochs, learnrate, graph_lines=False):\n \"\"\"\n Training function\n\n This function will help us iterate the gradient descent algorithm through\n all the data, for a number of epochs. It will also plot the data, and\n some of the boundary lines obtained as we run the algorithm.\n\n\n Parameters\n ----------\n features : numpy.ndarray\n targets : numpy.ndarray\n epochs : int\n learnrate : float\n graph_lines : bool, optional\n by default False\n \"\"\"\n errors = []\n n_records, n_features = features.shape\n last_loss = 99999999999.9\n weights = np.random.normal(0.0, 1 / n_features ** 0.5, n_features)\n bias = 0\n\n for e in range(epochs):\n for x, y in zip(features, targets):\n output = output_formula(x, weights, bias)\n error = error_formula(y, output) # noqa: F841\n weights, bias = update_weights(x, y, weights, bias, learnrate)\n\n # Printing out the log-loss error on the training set\n out = output_formula(features, weights, bias)\n loss = np.mean(error_formula(targets, out))\n errors.append(loss)\n if e % (epochs / 10) == 0:\n print(\"\\n========== Epoch\", e, \"==========\")\n if last_loss and last_loss < loss:\n print(\"Train loss: \", loss, \" WARNING - Loss Increasing\")\n else:\n print(\"Train loss: \", loss)\n last_loss = loss\n predictions = out > 0.5\n accuracy = np.mean((predictions == targets).astype(int))\n print(\"Accuracy: \", accuracy)\n # if graph_lines and e % (epochs / 100) == 0:\n # print(-weights[0]/weights[1], -bias/weights[1])\n\n\n# BODO AI FUNCTIONS\n\n\[email protected]\ndef bd_read_data():\n \"\"\"\n Return X (features) and y (target) data.\n\n Returns\n -------\n tuple[numpy.ndarray, numpy.ndarray]\n \"\"\"\n data = pd.read_csv('/tmp/data_10k.csv', header=None)\n\n X = data[['0', '1']].values\n y = data['2'].values\n return X, y\n\n\n# Activation (sigmoid) function\[email protected]\ndef bd_sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\[email protected]\ndef bd_output_formula(features, weights, bias):\n return bd_sigmoid(np.dot(features, weights) + bias)\n\n\[email protected]\ndef bd_error_formula(y, output):\n return -y * np.log(output) - (1 - y) * np.log(1 - output)\n\n\[email protected]\ndef bd_update_weights(x, y, weights, bias, learnrate):\n output = bd_output_formula(x, weights, bias)\n d_error = y - output\n weights += learnrate * d_error * x\n bias += learnrate * d_error\n return weights, bias\n\n\[email protected]\ndef bd_train(features, targets, epochs, learnrate, graph_lines=False):\n \"\"\"\n Training function\n\n This function will help us iterate the gradient descent algorithm through\n all the data, for a number of epochs. It will also plot the data, and\n some of the boundary lines obtained as we run the algorithm.\n\n\n Parameters\n ----------\n features : numpy.ndarray\n targets : numpy.ndarray\n epochs : int\n learnrate : float\n graph_lines : bool, optional\n by default False\n \"\"\"\n errors = []\n n_records, n_features = features.shape\n last_loss = 99999999999.9\n weights = np.random.normal(0.0, 1 / n_features ** 0.5, n_features)\n bias = 0\n\n for e in range(epochs):\n for x, y in bd_zip(features, targets):\n output = bd_output_formula(x, weights, bias)\n error = bd_error_formula(y, output) # noqa: F841\n weights, bias = bd_update_weights(x, y, weights, bias, learnrate)\n\n # Printing out the log-loss error on the training set\n out = bd_output_formula(features, weights, bias)\n loss = np.mean(bd_error_formula(targets, out))\n errors.append(loss)\n if e % (epochs / 10) == 0:\n print(\"\\n========== Epoch\", e, \"==========\")\n if last_loss and last_loss < loss:\n print(\"Train loss: \", loss, \" WARNING - Loss Increasing\")\n else:\n print(\"Train loss: \", loss)\n last_loss = loss\n predictions = out > 0.5\n accuracy = np.mean((predictions == targets).astype(int))\n print(\"Accuracy: \", accuracy)\n # if graph_lines and e % (epochs / 100) == 0:\n # print(-weights[0]/weights[1], -bias/weights[1])\n\n\ndef main():\n setup()\n\n # benchmark for NON bodo ai training\n if bodo.get_rank() == 0:\n t0 = time.time()\n\n np.random.seed(44)\n epochs = 100\n learnrate = 0.01\n\n X, y = read_data()\n print(\n '\\n\\nNON bodoai training, X.shape:', X.shape, ', y.shape:', y.shape\n )\n\n train(X, y, epochs, learnrate, True)\n\n print('\\n\\nTime for NON bodoai training:', time.time() - t0, 's\\n\\n')\n\n print('=' * 80, '\\n\\n')\n\n # benchmark for bodo ai training\n t0 = time.time()\n\n np.random.seed(44)\n epochs = 100\n learnrate = 0.01\n\n bd_X, bd_y = bd_read_data()\n\n print(\n '\\n\\nbodoai training, X.shape:', bd_X.shape, ', y.shape:', bd_y.shape\n )\n\n bd_train(bd_X, bd_y, epochs, learnrate, True)\n\n print('\\n\\nTime for bodoai training:', time.time() - t0, 's\\n\\n')\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.dot",
"pandas.concat",
"pandas.read_csv",
"numpy.log",
"numpy.random.seed",
"numpy.random.normal",
"pandas.testing.assert_frame_equal",
"numpy.exp",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jrderek/Deployment- | [
"eff84d3141136138154d26567d04d61dd76ee842"
] | [
"Employee_Retention_Prediction-main/apps/ingestion/load_validate.py"
] | [
"import json\nfrom os import listdir\nimport shutil\nimport pandas as pd\nfrom datetime import datetime\nimport os\nfrom apps.database.database_operation import DatabaseOperation\nfrom apps.core.logger import Logger\n\nclass LoadValidate:\n\t\"\"\"\n\t*****************************************************************************\n\t*\n\t* filename: LoadValidate.py\n\t* version: 1.0\n\t* author: CODESTUDIO\n\t* creation date: 05-MAY-2020\n\t*\n\t* change history:\n\t*\n\t* who when version change (include bug# if apply)\n\t* ---------- ----------- ------- ------------------------------\n\t*\n\t*\n\t* description: Class to load, validate and transform the data\n\t*\n\t****************************************************************************\n\t\"\"\"\n\n\tdef __init__(self, run_id, data_path, mode):\n\t\tself.run_id = run_id\n\t\tself.data_path = data_path\n\t\tself.logger = Logger(self.run_id, 'LoadValidate', mode)\n\t\tself.dbOperation = DatabaseOperation(self.run_id, self.data_path, mode)\n\n\n\tdef values_from_schema(self, schema_file):\n\t\ttry:\n\t\t\tself.logger.info('Start of Reading values from Schema...')\n\t\t\twith open('apps/database/'+schema_file+'.json', 'r') as f:\n\t\t\t\tdic = json.load(f)\n\t\t\t\tf.close()\n\t\t\tcolumn_names = dic['ColName']\n\t\t\tnumber_of_columns = dic['NumberofColumns']\n\t\t\tself.logger.info('End of Reading values from Schema...')\n\t\texcept ValueError:\n\t\t\tself.logger.exception('ValueError raised while Reading values from Schema')\n\t\t\traise ValueError\n\t\texcept KeyError:\n\t\t\tself.logger.exception('ValueError raised while Reading values from Schema')\n\t\t\traise ValueError\n\t\texcept Exception as e:\n\t\t\tself.logger.exception('Exception raised while Reading values From Schema: %s' % e)\n\t\t\traise e\n\t\treturn column_names, number_of_columns\n\n\tdef validate_column_length(self,number_of_columns):\n\t\t\"\"\"\n\t\t* method: validate_column_length\n\t\t* description: method to validates the number of columns in the csv files\n\t\t* return: none\n\t\t*\n\t\t* Parameters\n\t\t* NumberofColumns:\n\t\t\"\"\"\n\t\ttry:\n\t\t\tself.logger.info('Start of Validating Column Length...')\n\t\t\tfor file in listdir(self.data_path):\n\t\t\t\tcsv = pd.read_csv(self.data_path + '/' + file)\n\t\t\t\tif csv.shape[1] == number_of_columns:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tshutil.move(self.data_path + '/' + file, self.data_path + '_rejects')\n\t\t\t\t\tself.logger.info(\"Invalid Columns Length :: %s\" % file)\n\n\t\t\tself.logger.info('End of Validating Column Length...')\n\n\t\texcept OSError:\n\t\t\tself.logger.exception('OSError raised while Validating Column Length')\n\t\t\traise OSError\n\t\texcept Exception as e:\n\t\t\tself.logger.exception('Exception raised while Validating Column Length: %s' % e)\n\t\t\traise e\n\n\tdef validate_missing_values(self):\n\t\t\"\"\"\n\t\t* method: validate_missing_values\n\t\t* description: method to validates if any column in the csv file has all values missing.\n\t\t* If all the values are missing, the file is not suitable for processing. it to be moved to bad file\n\t\t* return: none\n\t\t*\n\t\t* who when version change (include bug# if apply)\n\t\t* ---------- ----------- ------- ------------------------------\n\t\t*\n\t\t* Parameters\n\t\t* none:\n\t\t\"\"\"\n\t\ttry:\n\t\t\tself.logger.info('Start of Validating Missing Values...')\n\t\t\tfor file in listdir(self.data_path):\n\t\t\t\tcsv = pd.read_csv(self.data_path + '/' + file)\n\t\t\t\tcount = 0\n\t\t\t\tfor columns in csv:\n\t\t\t\t\tif (len(csv[columns]) - csv[columns].count()) == len(csv[columns]):\n\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\tshutil.move(self.data_path + '/' + file, self.data_path)\n\t\t\t\t\t\tself.logger.info(\"All Missing Values in Column :: %s\" % file)\n\t\t\t\t\t\tbreak\n\t\t\t\tself.logger.info('End of Validating Missing Values...')\n\n\t\texcept OSError:\n\t\t\tself.logger.exception('OSError raised while Validating Missing Values')\n\t\t\traise OSError\n\t\texcept Exception as e:\n\t\t\tself.logger.exception('Exception raised while Validating Missing Values: %s' % e)\n\t\t\traise e\n\n\n\tdef replace_missing_values(self):\n\t\t\"\"\"\n\t\t* method: replace_missing_values\n\t\t* description: method to replaces the missing values in columns with \"NULL\"\n\t\t* return: none\n\t\t*\n\t\t* who when version change (include bug# if apply)\n\t\t* ---------- ----------- ------- ------------------------------\n\t\t*\n\t\t* Parameters\n\t\t* none:\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tself.logger.info('Start of Replacing Missing Values with NULL...')\n\t\t\tonly_files = [f for f in listdir(self.data_path)]\n\t\t\tfor file in only_files:\n\t\t\t\tcsv = pd.read_csv(self.data_path + \"/\" + file)\n\t\t\t\tcsv.fillna('NULL', inplace=True)\n\t\t\t\tcsv.to_csv(self.data_path + \"/\" + file, index=None, header=True)\n\t\t\t\tself.logger.info('%s: File Transformed successfully!!' % file)\n\t\t\tself.logger.info('End of Replacing Missing Values with NULL...')\n\t\texcept Exception as e:\n\t\t\tself.logger.exception('Exception raised while Replacing Missing Values with NULL: %s' % e)\n\n\n\tdef archive_old_files(self):\n\t\t\"\"\"\n\t\t* method: archive_old_rejects\n\t\t* description: method to archive rejected files\n\t\t* return: none\n\t\t*\n\t\t* who when version change\n\t\t* ---------- ----------- ------- ----------------------------\n\t\t*\n\t\t* Parameters\n\t\t* none:\n\t\t\"\"\"\n\t\tnow = datetime.now()\n\t\tdate = now.date()\n\t\ttime = now.strftime(\"%H%M%S\")\n\t\ttry:\n\t\t\tself.logger.info('Start of Archiving Old Rejected Files...')\n\t\t\tsource = self.data_path + '_rejects/'\n\t\t\tif os.path.isdir(source):\n\t\t\t\tpath = self.data_path + '_rejects/'\n\t\t\t\tif not os.path.isdir(path):\n\t\t\t\t\tos.makedirs(path)\n\t\t\t\tdest = path + '/rejects_' + str(date) + \"_\" + str(time)\n\t\t\t\tfiles = os.listdir(source)\n\t\t\t\tfor f in files:\n\t\t\t\t\tif not os.path.isdir(dest):\n\t\t\t\t\t\tos.makedirs(dest)\n\t\t\t\t\tif f not in os.listdir(dest):\n\t\t\t\t\t\tshutil.move(source + f, dest)\n\n\t\t\tself.logger.info('End of Archiving Old Rejected Files...')\n\n\t\t\tself.logger.info('Start of Archiving Old Validating Files...')\n\t\t\tsource = self.data_path + '_validation/'\n\t\t\tif os.path.isdir(source):\n\t\t\t\tpath = self.data_path + '_archive'\n\t\t\t\tif not os.path.isdir(path):\n\t\t\t\t\tOS.makedirs(path)\n\t\t\t\tdest = path + '/validation_' + str(date) + \"_\" + str(time)\n\t\t\t\tfiles = os.listdir(source)\n\t\t\t\tfor f in files:\n\t\t\t\t\tif not os.path.isdir(dest):\n\t\t\t\t\t\tos.makedirs(dest)\n\t\t\t\t\tif f not in os.listdir(dest):\n\t\t\t\t\t\tshutil.move(source + f, dest)\n\n\t\t\tself.logger.info('End of Archiving Old Validation Files..')\n\n\t\t\tself.logger.info('Start of Archiving Old Processed Files...')\n\t\t\tsource = self.data_path + '_processed/'\n\t\t\tif os.path.isdir(source):\n\t\t\t\tpath = self.data_path + '_archive'\n\t\t\t\tif not os.path.isdir(path):\n\t\t\t\t\tos.makedirs(path)\n\t\t\t\tdest = path + '/processed_' + str(date) + \"_\" + str(time)\n\t\t\t\tfiles = os.listdir(source)\n\t\t\t\tfor f in files:\n\t\t\t\t\tif not os.path.isdir(dest):\n\t\t\t\t\t\tos.makedirs(dest)\n\t\t\t\t\tif f not in os.listdir(dest):\n\t\t\t\t\t\tshutil.move(source + f, dest)\n\n\t\t\tself.logger.info('End of Archiving Old Processed Files...')\n\n\t\t\tself.logger.info('Start of Archiving Old Result Files...')\n\t\t\tsource = self.data_path + '_results/'\n\t\t\tif os.path.isdir(source):\n\t\t\t\tpath = self.data_path + '_archive'\n\t\t\t\tif not os.path.isdir(path):\n\t\t\t\t\tos.makedirs(path)\n\t\t\t\tdest = path + '/results_' + str(date) + \"_\" + str(time)\n\t\t\t\tfiles = os.listdir(source)\n\t\t\t\tfor f in files:\n\t\t\t\t\tif not os.path.isdir(dest):\n\t\t\t\t\t\tos.makedirs(dest)\n\t\t\t\t\tif f not in os.listdir(dest):\n\t\t\t\t\t\tshutil.move(source + f, dest)\n\n\t\t\tself.logger.info('End of Archiving Old Result Files...')\n\t\texcept Exception as e:\n\t\t\tself.logger.exception('Exception raised while Archiving Old Rejected Files: %s' % e)\n\t\t\traise e\n\n\tdef move_processed_files(self):\n\t\t\"\"\"\n\t\t* method: move_processed_files\n\t\t* description: method to move processed files\n\t\t* return: none\n\t\t*\n\t\t* who when version change (include bug# if apply)\n\t\t* ---------- ----------- ------- ------------------------------\n\t\t*\n\t\t* Parameters\n\t\t* none:\n\t\t\"\"\"\n\t\ttry:\n\t\t\tself.logger.info('Start of Moving Processed Files...')\n\t\t\tfor file in listdir(self.data_path):\n\t\t\t\tshutil.move(self.data_path + '/' + file, self.data_path + '_processed')\n\t\t\t\tself.logger.info(\"Moved the already processed file %s\" % file)\n\n\t\t\tself.logger.info('End of Moving Processed Files...')\n\t\texcept Exception as e:\n\t\t\tself.logger.exception('Exception raised while Moving Processed Files: %s' % e)\n\t\t\traise e\n\n\tdef validate_trainset(self):\n\t\t\"\"\"\n\t\t* method: validate\n\t\t* description: method to validate the data\n\t\t* return: none\n\t\t*\n\t\t* who when version change\n\t\t* ---------- ----------- ------- ------------------------------\n\t\t*\n\t\t* Parameters\n\t\t* none:\n\t\t\"\"\"\n\t\ttry:\n\t\t\tself.logger.info('Start of Data Load, validation and transformation')\n\t\t\t# archive old files\n\t\t\tself.archive_old_files()\n\t\t\t# extracting values from training schema\n\t\t\tcolumn_names, number_of_columns = self.values_from_schema('schema_train')\n\t\t\t# validating column length in the file\n\t\t\tself.validate_column_length(number_of_columns)\n\t\t\t# validating if any column has all values missing\n\t\t\tself.validate_missing_values()\n\t\t\t# replacing blanks in the csv file with \"Null\" values\n\t\t\tself.replace_missing_values()\n\t\t\t# create database with given name, if present open the connection. Create table with columns given in schema\n\t\t\tself.dbOperation.create_table('training','training_raw_data_t',column_names)\n\t\t\t# insert csv files in the table\n\t\t\tself.dbOperation.insert_data('training','training_raw_data_t')\n\t\t\t# export data in table to csv file\n\t\t\tself.dbOperation.export_csv('training','training_raw_data_t')\n\t\t\t# move processed files\n\t\t\tself.move_processed_files()\n\t\t\tself.logger.info('End of Data Load, validation and transformation')\n\t\texcept Exception:\n\t\t\tself.logger.exception('Unsuccessful End of Data Load, validation and transformation')\n\t\t\traise Exception\n\n\tdef validate_predictset(self):\n\t\t\"\"\"\n\t\t* method: validate\n\t\t* description: method to validate the predict data\n\t\t* return: none\n\t\t*\n\t\t* Parameters\n\t\t* none:\n\t\t\"\"\"\n\t\ttry:\n\t\t\tself.logger.info('Start of Data Load, validation and transformation')\n\t\t\t# archive old rejected files\n\t\t\tself.archive_old_files()\n\t\t\t# extracting values from schema\n\t\t\tcolumn_names, number_of_columns = self.values_from_schema('schema_predict')\n\t\t\t# validating column length in the file\n\t\t\tself.validate_column_length(number_of_columns)\n\t\t\t# validating if any column has all values missing\n\t\t\tself.validate_missing_values()\n\t\t\t# replacing blanks in the csv file with \"Null\" values\n\t\t\tself.replace_missing_values()\n\t\t\t# create database with given name, if present open the connection! Create table with columns given in schema\n\t\t\tself.dbOperation.create_table('prediction','prediction_raw_data_t', column_names)\n\t\t\t# insert csv files in the table\n\t\t\tself.dbOperation.insert_data('prediction','prediction_raw_data_t')\n\t\t\t# export data in table to csv file\n\t\t\tself.dbOperation.export_csv('prediction','prediction_raw_data_t')\n\t\t\t# move processed files\n\t\t\tself.move_processed_files()\n\t\t\tself.logger.info('End of Data Load, validation and transformation')\n\t\texcept Exception:\n\t\t\tself.logger.exception('Unsuccessful End of Data Load, validation and transformation')\n\t\t\traise Exception\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
victorgmlyra/CameraCalib | [
"5485872e39e054c4aa6733484fe500a065f909bc"
] | [
"calibration.py"
] | [
"'''\r\n File name: calibration.py\r\n Author: Victor Lyra\r\n Date created: 17/02/2020\r\n Date last modified: 20/25/2013\r\n Python Version: 3.7\r\n'''\r\n\r\nimport numpy as np\r\nimport cv2\r\nimport glob, os\r\n\r\n# VARIABLES\r\nframes_from_video = True # If true => Extract frames from a video file\r\nundistort = True # If true => Undistort all calibration images\r\ncamera_name = 'note10' # Name directories, video and output file\r\nextension = 'mp4' # Video Extension\r\nvideo = '' # If empty => videos/{camera_name}.mp4\r\nskip_frames = 25 # Number of frames to skip in video\r\nboard_size = (6, 5) # Chess Board Ratio\r\n\r\n\r\ndef video_to_frames(skip_frames, video):\r\n print('Extracting frames from video...')\r\n directory = 'images/' + camera_name\r\n try:\r\n os.mkdir(directory)\r\n except os.error:\r\n print('{} folder already exists.'.format(directory))\r\n\r\n if video == '':\r\n video = 'videos/{}.{}'.format(camera_name, extension)\r\n video = cv2.VideoCapture(video)\r\n\r\n ret, frame = video.read()\r\n num_frame = 0\r\n image_name = 0\r\n\r\n while ret:\r\n if(num_frame % skip_frames == 0):\r\n cv2.imwrite(directory + '/{:03d}.jpg'.format(image_name), frame)\r\n image_name += 1\r\n\r\n num_frame += 1\r\n ret, frame = video.read()\r\n\r\n\r\n# termination criteria\r\ncriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\r\n\r\n# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\r\nobjp = np.zeros((board_size[1]*board_size[0],3), np.float32)\r\nobjp[:,:2] = np.mgrid[0:board_size[0],0:board_size[1]].T.reshape(-1,2)\r\n\r\n# Arrays to store object points and image points from all the images.\r\nobjpoints = [] # 3d point in real world space\r\nimgpoints = [] # 2d points in image plane.\r\n\r\nif frames_from_video:\r\n video_to_frames(skip_frames, video)\r\n\r\nimages = glob.glob('images/{}/*.jpg'.format(camera_name))\r\nimages.sort()\r\n\r\nfor fname in images:\r\n img = cv2.imread(fname)\r\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n\r\n # Find the chess board corners\r\n ret, corners = cv2.findChessboardCorners(gray, board_size, None)\r\n\r\n # If found, add object points, image points (after refining them)\r\n if ret == True:\r\n objpoints.append(objp)\r\n\r\n corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\r\n imgpoints.append(corners2)\r\n\r\n # Draw and display the corners\r\n img = cv2.drawChessboardCorners(img, board_size, corners2,ret)\r\n cv2.imshow('img',img)\r\n cv2.waitKey(200)\r\n\r\ncv2.destroyAllWindows()\r\n\r\nprint('Getting Calibration Values...')\r\nret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)\r\n\r\n# Calibration file\r\nprint('Writing Calibration File...')\r\ndistortion = ''\r\nfor k in dist[0]:\r\n distortion += '{0:.5f}'.format(k) + ' '\r\nwith open('calibration/' + camera_name + '.txt', 'w') as calibfile:\r\n calibfile.write('Intrinsic Matrix:\\n')\r\n for i in range(3):\r\n for j in range(3):\r\n calibfile.write('{0:.5f} '.format(mtx[i, j]))\r\n calibfile.write('\\n')\r\n calibfile.write('\\nDistortion Coefficients:\\n')\r\n calibfile.write(distortion)\r\n\r\nif undistort:\r\n print('Undistorting Images...')\r\n directory = 'undistorted/{}'.format(camera_name)\r\n try:\r\n os.mkdir(directory)\r\n except os.error:\r\n print('{} folder already exists.'.format(directory))\r\n\r\n for i, fname in enumerate(images):\r\n img = cv2.imread(fname)\r\n h, w = img.shape[:2]\r\n newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))\r\n\r\n # undistort\r\n dst = cv2.undistort(img, mtx, dist, None, newcameramtx)\r\n # crop the image\r\n # x, y, w, h = roi\r\n # dst = dst[y:y+h, x:x+w]\r\n cv2.imwrite(directory + '/undist{:03d}.png'.format(i), dst)\r\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TBGAnsell/Protocol | [
"90e2da93a95f9fc81a9606a33a4c4e22f5532e97"
] | [
"lipidens/test_PyLipID_cutoffs/test_PyLipID_cutoffs.py"
] | [
"#!/usr/bin/env python3\n\n\"\"\"\nScript to plot the probability distribution of protein-lipid distances and test a range of lower and upper cutoff combinations.\n\nAuthor: Wanling Song, Modified: T. Bertie Ansell\n\n\"\"\"\n#%matplotlib inline\nimport seaborn as sns\nimport os\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pylipid\nfrom pylipid.api import LipidInteraction\nprint(pylipid.__version__)\n\nfrom itertools import product\nimport mdtraj as md\nfrom pylipid.util import get_traj_info, check_dir\nimport matplotlib.ticker as ticker\nfrom itertools import product\nimport shutil\nimport pickle\n\ndef get_lipids(bilayer):\n \"\"\"\n Extract lipid names from inputted bilayer.\n \"\"\"\n if bilayer !=None:\n lip_list=list(set(re.findall(r'\\w[A-Z0-9]{2,}', bilayer)))\n elif bilayer==None:\n bilayer=str(input(\"\\nEnter lipid names for analysis seperated by space e.g. POPC DOPC: \"))\n lip_list=list(set(re.findall(r'\\w[A-Z0-9]{2,}', bilayer)))\n else:\n print(\"Bilayer not found, please define the bilayer composition\")\n exit()\n print(\"Lipids to test:\", lip_list)\n return lip_list\n\ndef load_traj(path):\n \"\"\"\n Load single trajectory used for obtaining lipid probability distribution.\n \"\"\"\n xtc_def=\"md_stride.xtc\"\n top_def=\"md_stride_firstframe.gro\"\n\n print(\"\\nLoading processed trajectories for cutoff testing:\")\n try:\n if os.path.isfile(\"{}/run1/{}\".format(path, xtc_def)):\n trajfile = \"{}/run1/{}\".format(path, xtc_def)\n else:\n print(\"\\n{}/run1/{} not found.\\nHave you processed the CG trajectories?\".format(path, xtc_def))\n r_alt=str(input(\"Do you wish to define alternative 'xtc' file name? (y/n): \"))\n if r_alt==\"y\":\n xtc_def=str(input(\"\\nDefine alternative 'xtc' file name: \"))\n if os.path.isfile(\"{}/run1/{}\".format(path, xtc_def)):\n trajfile=\"{}/run1/{}\".format(path, xtc_def)\n else:\n print(\"\\n{}/run1/{} not found.\".format(path, xtc_def))\n exit()\n elif r_alt==\"n\":\n print(\"Re-run protocol to process CG trajectories\")\n exit()\n else:\n print(\"INVALID: must enter y/n\")\n exit()\n if os.path.isfile(\"{}/run1/{}\".format(path, top_def)):\n topfile = \"{}/run1/{}\".format(path, top_def)\n else:\n print(\"\\n{}/run1/{} not found.\\nHave you processed the CG trajectories?\".format(path, top_def))\n r_alt=str(input(\"Do you wish to define alternative topology file? (y/n): \"))\n if r_alt==\"y\":\n top_def=str(input(\"\\nDefine alternative topology file name: \"))\n if os.path.isfile(\"{}/run1/{}\".format(path, top_def)):\n topfile = \"{}/run1/{}\".format(path, top_def)\n else:\n print(\"\\n{}/run1/{} not found.\".format(path, top_def))\n exit()\n elif r_alt==\"n\":\n print(\"Re-run protocol to process CG trajectories\")\n exit()\n else:\n print(\"INVALID: must enter y/n\")\n exit()\n traj = md.load(trajfile, top=topfile)\n except Exception as e:\n print(e)\n exit()\n return traj\n\ndef set_lipid(path, lipid):\n \"\"\"\n Establish save directory of cutoff test data.\n \"\"\"\n save_dir = \"{}/PyLipID_cutoff_test_{}\".format(path, lipid)\n fig_dir = check_dir(save_dir, \"Figures\", print_info=False)\n return fig_dir\n\ndef plot_minimum_distances(distances, times, title, fn):\n \"\"\"\n Plot the per residue minimum distance to individual lipids.\n \"\"\"\n fig, ax = plt.subplots(1, 1, figsize=(3, 2.5))\n ax.plot(times, distances)\n ax.set_xlabel(r\"Time ($\\mu$s)\")\n ax.set_ylabel(\"Minimum distances (nm)\")\n ax.set_title(title)\n ax.set_ylim(0, 1.0)\n sns.despine(top=True, right=True)\n plt.tight_layout()\n plt.savefig(fn,format='pdf', dpi=200)\n plt.close()\n return\n\ndef compute_minimum_distance(traj, lipid, fig_dir, nprot, lipid_atoms=None,\n contact_frames=10, distance_threshold=0.65):\n \"\"\"\n Obtain minimum distances of specified lipid to each residue in the protein if contact comes within distance_threshold\n for longer than the number of contact_frames.\n \"\"\"\n DIST_CONTACT_ALL = []\n traj_info, _, _ = get_traj_info(traj, lipid, lipid_atoms=lipid_atoms)\n for protein_idx in np.arange(nprot, dtype=int):\n for residue_idx, residue_atom_indices in enumerate(\n traj_info[\"protein_residue_atomid_list\"][protein_idx]):\n dist_matrix = np.array([np.min(\n md.compute_distances(traj, np.array(list(product(residue_atom_indices, lipid_atom_indices)))),\n axis=1) for lipid_atom_indices in traj_info[\"lipid_residue_atomid_list\"]])\n # plot distances\n for lipid_idx in np.arange(len(dist_matrix)):\n if sum(dist_matrix[lipid_idx] < distance_threshold) >= contact_frames:\n DIST_CONTACT_ALL.append(dist_matrix[lipid_idx])\n plot_minimum_distances(dist_matrix[lipid_idx], traj.time/1000000.0,\n \"{}-{}{}\".format(traj_info[\"residue_list\"][residue_idx], lipid, lipid_idx),\n \"{}/dist_{}_{}{}.pdf\".format(fig_dir, traj_info[\"residue_list\"][residue_idx], lipid, lipid_idx))\n\n distance_set = np.concatenate(DIST_CONTACT_ALL)\n return distance_set\n\ndef plot_PDF(distance_set, num_of_bins, fn, lipid):\n \"\"\"\n Plot the probability distribution of minimum lipid distances.\n \"\"\"\n fig, ax = plt.subplots(1,1, figsize=(4,3))\n ax.hist(distance_set, bins=num_of_bins, density=True, color='lightcoral')\n ax.set_xlim(0, 1.0)\n ax.set_xlabel(\"Minimum distance (nm)\")\n ax.set_ylabel(\"Probablity Density\")\n ax.set_title(lipid)\n ax.xaxis.set_major_locator(ticker.MultipleLocator(0.2))\n ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.05))\n sns.despine(top=True, right=True)\n plt.tight_layout()\n plt.savefig(fn,format='pdf', dpi=200)\n return\n\ndef test_cutoffs(cutoff_list, trajfile_list, topfile_list, lipid, lipid_atoms, nprot=1,\n stride=1, save_dir=None, timeunit=\"us\"):\n \"\"\"\n Perform exhastive cutoff testing by caluclating the number of binding sites, average duration and number of contacting residues\n for each pair of cutoffs in the cutoff list.\n \"\"\"\n num_of_binding_sites = {}\n duration_avgs = {}\n num_of_contacting_residues = {}\n for cutoffs in cutoff_list:\n print(\"\\n Testing cutoff pair:\", cutoffs, \"\\n\")\n li = LipidInteraction(trajfile_list, topfile_list=topfile_list, cutoffs=cutoffs, lipid=lipid,\n lipid_atoms=lipid_atoms, nprot=1, timeunit=timeunit,\n save_dir=save_dir, stride=stride)\n li.collect_residue_contacts()\n li.compute_residue_duration()\n li.compute_binding_nodes(print_data=False) # switch print and write to False for cleaner output.\n num_of_binding_sites[cutoffs] = len(li.node_list)\n duration_avgs[cutoffs] = li.dataset[\"Duration\"].mean()\n num_of_contacting_residues[cutoffs] = sum(li.dataset[\"Duration\"]>0)\n shutil.rmtree(li.save_dir)\n return num_of_binding_sites, duration_avgs, num_of_contacting_residues\n\n\ndef exhaustive_search_setup(path, lower_cutoff, upper_cutoff, replicates):\n \"\"\"\n Obtain list of cutoff pairs to use for exhastive cutoff testing using user specified lower and upper cutoff lists.\n Load all coarse-grain trajectories to test.\n \"\"\"\n print(\"\\nInitiating exhastive cutoff search:\\n\")\n print(\"Lower cutoffs to test:\", lower_cutoff)\n print(\"Upper cutoffs to test:\", upper_cutoff, \"\\n\")\n cutoff_list = list(product(lower_cutoff, upper_cutoff))\n trajfile_list=[]\n topfile_list=[]\n\n xtc_def=\"md_stride.xtc\"\n top_def=\"md_stride_firstframe.gro\"\n\n for n in range(1,replicates+1):\n try:\n if os.path.isfile(\"{}/run{}/{}\".format(path, n, xtc_def)):\n trajfile=\"{}/run{}/{}\".format(path, n, xtc_def)\n else:\n print(\"\\n{}/run{}/{} not found.\".format(path, n, xtc_def))\n xtc_def=str(input(\"Define alternative 'xtc' file name: \"))\n if os.path.isfile(\"{}/run{}/{}\".format(path, n, xtc_def)):\n trajfile=\"{}/run{}/{}\".format(path, n, xtc_def)\n else:\n print(\"\\n{}/run{}/{} not found.\".format(path, n, xtc_def))\n exit()\n if os.path.isfile(\"{}/run{}/{}\".format(path, n, top_def)):\n topfile=\"{}/run{}/{}\".format(path, n, top_def)\n else:\n print(\"\\n{}/run{}/{} not found.\".format(path, n, top_def))\n top_def=str(input(\"Define alternative topology file name: \"))\n if os.path.isfile(\"{}/run{}/{}\".format(path, n, top_def)):\n topfile=\"{}/run{}/{}\".format(path, n, top_def)\n else:\n print(\"\\n{}/run{}/{} not found.\".format(path, n, top_def))\n exit()\n \n trajfile_list.append(trajfile)\n topfile_list.append(topfile)\n except Exception as e:\n print(e)\n exit()\n\n print(\"List of trajectories to test:\", trajfile_list, \"\\n\")\n return cutoff_list, trajfile_list, topfile_list\n\ndef ex_data_process(path, lipid, num_of_binding_sites, duration_avgs, num_of_contacting_residues, cutoff_list):\n \"\"\"\n Process output data from PyLipID (num_binding_sites, duration_avgs, num_of_contacting_residues). Save in pickle format.\n \"\"\"\n test_data = {\"num_of_binding_sites\": num_of_binding_sites,\n \"duration_avgs\": duration_avgs,\n \"num_of_contacting_residues\": num_of_contacting_residues,\n \"test_cutoff_list\": cutoff_list}\n with open(f\"{path}/PyLipID_cutoff_test_{lipid}/test_cutoff_data_{lipid}.pickle\", \"wb\") as f:\n pickle.dump(test_data, f, 2)\n\ndef graph(cutoff_list, metric_values, ylabel, title, fn):\n \"\"\"\n Plot the data from the exhastive cutoff testing.\n \"\"\"\n fig, ax = plt.subplots(1, 1, figsize=(len(cutoff_list)*0.42, 3.6))\n ax.scatter(np.arange(len(cutoff_list)), metric_values, s=50, color='lightcoral')\n ax.set_xticks(np.arange(len(cutoff_list)))\n ax.set_xticklabels(cutoff_list, rotation=45, ha='right')\n ax.set_xlabel(\"Dual cutoff\")\n ax.set_ylabel(ylabel)\n ax.set_title(title)\n sns.despine(top=True, right=True)\n plt.tight_layout()\n plt.savefig(fn, format='pdf',dpi=200)\n return\n"
] | [
[
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.concatenate",
"matplotlib.pyplot.close"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cheng-tan/estimators | [
"993c0805e8ae3fb018ec42901d83b45d39c80c62"
] | [
"ips_snips.py"
] | [
"import math\nfrom scipy.stats import beta\n\n\nclass Estimator:\n def __init__(self):\n ############################### Aggregates quantities ######################################\n #\n # 'n': IPS of numerator\n # 'N': total number of samples in bin from log (IPS = n/N)\n # 'd': IPS of denominator (SNIPS = n/d)\n # 'Ne': number of samples in bin when off-policy agrees with log policy\n # 'c': max abs. value of numerator's items (needed for Clopper-Pearson confidence intervals)\n # 'SoS': sum of squares of numerator's items (needed for Gaussian confidence intervals)\n #\n #################################################################################################\n\n self.data = {'n':0.,'N':0,'d':0.,'Ne':0,'c':0.,'SoS':0}\n\n def add_example(self, p_log, r, p_pred, count=1):\n self.data['N'] += count\n if p_pred > 0:\n p_over_p = p_pred/p_log\n self.data['d'] += p_over_p*count\n self.data['Ne'] += count\n if r != 0:\n self.data['n'] += r*p_over_p*count\n self.data['c'] = max(self.data['c'], r*p_over_p)\n self.data['SoS'] += ((r*p_over_p)**2)*count\n\n def get_estimate(self, type):\n if self.data['N'] == 0:\n raise('Error: No data point added')\n\n if type == 'ips':\n return self.data['n']/self.data['N']\n elif type == 'snips':\n if self.data['d'] != 0:\n return self.data['n']/self.data['d']\n else:\n return 0\n else:\n raise('Error: Incorrect estimator type {}. Supported options are ips or snips'.format(type))\n\n\n def get_interval(self, type, alpha=0.05):\n bounds = []\n num = self.data['n']\n den = self.data['N']\n maxWeightedCost = self.data['c']\n SoS = self.data['SoS']\n\n if type == \"clopper-pearson\":\n if maxWeightedCost > 0.0:\n successes = num / maxWeightedCost\n n = den / maxWeightedCost\n bounds.append(beta.ppf(alpha / 2, successes, n - successes + 1))\n bounds.append(beta.ppf(1 - alpha / 2, successes + 1, n - successes))\n elif type == \"gaussian\":\n if SoS > 0.0 and den > 1:\n zGaussianCdf = {\n 0.25: 1.15,\n 0.1: 1.645,\n 0.05: 1.96\n }\n\n variance = (SoS - num * num / den) / (den - 1)\n gaussDelta = zGaussianCdf[alpha] * math.sqrt(variance/den)\n bounds.append(num / den - gaussDelta)\n bounds.append(num / den + gaussDelta)\n\n if not bounds:\n bounds = [0, 0]\n return bounds\n"
] | [
[
"scipy.stats.beta.ppf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
olavosamp/kaggle-real-or-not | [
"fffd0716301362af8da403e86cae6b0d54cf5955"
] | [
"libs/train.py"
] | [
"import time\nimport uuid\nfrom pathlib import Path\n\nimport torch\nimport numpy as np\nimport pandas as pd\nimport sklearn.metrics\nfrom tqdm import tqdm\n\nimport libs.commons as commons\nimport libs.models as models\n\n\nclass EarlyStop:\n '''Early stopping monitor for loss minimization'''\n def __init__(self, patience=8, tol=1e-4):\n self.counter = 0\n self.patience = patience\n self.tol = tol\n self.best_loss = 999\n\n def step(self, loss):\n if self.best_loss - loss > self.tol:\n self.counter = 0\n self.best_loss = loss\n else:\n self.counter += 1\n return self.check_early_stop()\n\n def check_early_stop(self):\n if self.counter >= self.patience:\n return True # Stop\n return False # Do not stop\n\n\nclass MetricTracker:\n '''Helper for tracking metrics by epoch'''\n def __init__(self, metrics=[], threshold=0.5):\n self.tracked_metrics = metrics\n self.columns = [\"epoch\", \"phase\", \"loss\"] + metrics\n self.results_df = pd.DataFrame(columns=self.columns)\n self.threshold = threshold\n self.time_start = None\n\n @staticmethod\n def calculate_accuracy(target, prediction, num_samples):\n correct = target == prediction\n return np.sum(correct) / num_samples\n\n @staticmethod\n def calculate_f1_score(target, prediction):\n return sklearn.metrics.f1_score(target, prediction)\n\n @staticmethod\n def calculate_roc_auc(target, confidence):\n return sklearn.metrics.roc_auc_score(target, confidence)\n\n def epoch_start(self):\n if self.time_start is None:\n self.time_start = time.time()\n return None\n elapsed = time.time() - self.time_start\n self.time_start = None\n return elapsed\n\n def epoch_end(self, epoch, phase, target, confidence, loss, num_samples):\n prediction = confidence > self.threshold\n loss = loss / num_samples\n\n epoch_results = {\"epoch\": epoch, \"phase\": phase, \"loss\": loss}\n if \"accuracy\" in self.tracked_metrics:\n epoch_results[\"accuracy\"] = self.calculate_accuracy(target, prediction, num_samples)\n if \"f1_score\" in self.tracked_metrics:\n epoch_results[\"f1_score\"] = self.calculate_f1_score(target, prediction)\n if \"roc_auc\" in self.tracked_metrics:\n epoch_results[\"roc_auc\"] = self.calculate_roc_auc(target, confidence)\n if \"seconds\" in self.tracked_metrics:\n elapsed = self.epoch_start()\n if elapsed is None:\n raise ValueError\n epoch_results[\"seconds\"] = elapsed\n self.results_df = self.results_df.append(epoch_results, sort=False, ignore_index=True)\n\n def last_result(self, phase):\n last_index = self.results_df.query('phase == @phase').index[-1]\n return self.results_df.loc[last_index, :]\n\n def save_results(self, path, verbose=True):\n commons.create_folder(Path(path).parent)\n self.results_df.to_csv(path, index=False)\n if verbose:\n print(f\"\\nSaved results to\\n{path}\")\n\n def print_results(self, phase, result=None):\n if result is None:\n result = self.last_result(phase)\n elif not hasattr(result, \"shape\"):\n raise ValueError(\"Result must be either: \\'last\\' or a Series-like object.\")\n\n self.time_string = time.strftime(\"%H:%M:%S\", time.gmtime(result[\"seconds\"]))\n print(\"Epoch complete in \", self.time_string)\n print(f\"{phase} loss: {result['loss']:.4f}\")\n if \"accuracy\" in self.tracked_metrics:\n print(f\"{phase} accuracy: {result['accuracy']*100:.2f}%\")\n if \"f1_score\" in self.tracked_metrics:\n print(f\"{phase} F1: {result['f1_score']:.4f}\")\n if \"roc_auc\" in self.tracked_metrics:\n print(f\"{phase} AUC: {result['roc_auc']:.4f}\")\n\n\ndef predict(model, dataloader, device=models.device, threshold=None, use_metadata=True):\n '''\n Performs inference on the input data using a Pytorch model.\n model: model object\n Model which will perform inference on the data. Must support\n inference through syntax\n result = model(input)\n data: collection of model inputs\n A collection, such as a list, of model inputs. Each element of data\n must match the required arguments of model.\n\n threshold: None or float (optional)\n Decision threshold to evaluate model outputs.\n \n If None, result will be an array of floats representing model confidence\n for each input.\n \n If float, result will be an array of zeros and ones. Each input will be\n converted to one if model confidence is greater than threshold and zero\n if lesser.\n\n Returns:\n result: array of float\n '''\n model.to(device)\n model.eval()\n softmax = torch.nn.Softmax(dim=1)\n result_list = []\n for image, metadata in tqdm(dataloader):\n image = image.to(device)\n metadata = metadata.to(device)\n\n output = model(image)\n\n confidence = softmax(output).detach().cpu().numpy()[:, 1]\n result_list.append(confidence)\n\n return result_list\n\n\ndef train_feedforward_net(model, dataset, batch_size, optimizer, scheduler, num_epochs,\n loss_balance=True, identifier=None, device=models.device):\n print(\"\\nUsing device: \", device)\n device_params = {\"device\": device, \"dtype\": torch.float64}\n model.to(**device_params)\n\n tracked_metrics = [\"accuracy\", \"f1_score\", \"roc_auc\", \"seconds\"]\n early_stop = EarlyStop(tol=1e-5, patience=8)\n metrics = MetricTracker(metrics=tracked_metrics)\n\n # Create unique identifier for this experiment.\n if identifier is None:\n identifier = str(uuid.uuid4())\n else:\n identifier = str(identifier) + \"_\" + str(uuid.uuid4())\n phase_list = (\"train\", \"val\")\n\n # Setup experiment paths\n experiment_dir = Path(commons.experiments_path) / str(identifier)\n weights_folder = experiment_dir / \"weights\"\n commons.create_folder(weights_folder)\n\n # Instantiate loss and softmax.\n if loss_balance:\n weight = [1.0, dataset[\"train\"].imbalance_ratio()]\n weight = torch.tensor(weight).to(**device_params)\n cross_entropy_loss = torch.nn.CrossEntropyLoss(weight=weight)\n else:\n cross_entropy_loss = torch.nn.CrossEntropyLoss()\n softmax = torch.nn.Softmax(dim=1)\n\n # Define data loaders.\n data_loader = {phase: torch.utils.data.DataLoader(\n dataset[phase], batch_size=batch_size, shuffle=True, num_workers=4) for phase in phase_list}\n\n i = 0\n while i <= num_epochs and not early_stop.check_early_stop():\n print(\"\\nEpoch: {}/{}\".format(i+1, num_epochs))\n phase_loss = 0\n for phase in phase_list:\n print(\"\\n{} phase: \".format(str(phase).capitalize()))\n\n # Set model to training or evalution mode according to the phase.\n if phase == \"train\":\n model.train()\n else:\n model.eval()\n\n batch_target = []\n batch_confidence = []\n metrics.epoch_start()\n # Iterate over the dataset.\n for entry, target in tqdm(data_loader[phase]):\n # Update epoch target list to compute AUC(ROC) later.\n batch_target.append(target.numpy())\n\n # Load samples to device.\n entry = entry.to(**device_params)\n target = target.to(device, dtype=torch.int64)\n\n # Set gradients to zero.\n optimizer.zero_grad()\n\n # Calculate gradients only in the training phase.\n with torch.set_grad_enabled(phase==\"train\"):\n output = model(entry)\n loss = cross_entropy_loss(output, target)\n confidence = softmax(output).detach().cpu().numpy()[:, 1]\n\n # Backward gradients and update weights if training.\n if phase==\"train\":\n loss.backward()\n optimizer.step()\n\n # Update epoch loss and epoch confidence list.\n phase_loss += loss.item()\n batch_confidence.append(confidence)\n\n if phase == \"train\":\n scheduler.step()\n\n # Compute epoch loss, accuracy and AUC(ROC).\n num_samples = len(dataset[phase])\n batch_target = np.concatenate(batch_target, axis=0)\n batch_confidence = np.concatenate(batch_confidence, axis=0) # List of batch confidences\n metrics.epoch_end(i+1, phase, batch_target, batch_confidence, phase_loss, num_samples)\n\n metrics.print_results(phase)\n\n # Save best weights\n if early_stop.counter == 0: # Implies a new best validation loss\n weights_path = weights_folder / \"ffnet_epoch_{}_{}.pth\".format(i+1, identifier)\n torch.save(model.state_dict(), weights_path)\n\n i += 1\n early_stop.step(metrics.last_result('val')[\"loss\"])\n\n if len(metrics.results_df) < 1:\n print(\"No results obtained\")\n return\n\n best_id = metrics.results_df.query(\"phase == 'val'\")[\"loss\"].idxmin()\n best_epoch = metrics.results_df.loc[best_id, \"epoch\"]\n best_result = metrics.results_df.query(\"epoch == @best_epoch & phase == 'val'\").iloc[0,:]\n print(\"\\nBest epoch: \", best_epoch)\n metrics.print_results('val', result=best_result)\n\n # Save results from all epochs\n results_path = experiment_dir / \"epoch_{}_results.csv\".format(i+1)\n metrics.save_results(results_path)\n return results_path.parent\n\n\ndef train_model(model, dataset, batch_size, optimizer, scheduler, num_epochs, loss_balance=True,\n identifier=None, freeze_conv=False):\n # Create unique identifier for this experiment.\n if identifier is None:\n identifier = str(uuid.uuid4())\n else:\n identifier = str(identifier) + \"_\" + str(uuid.uuid4())\n phase_list = (\"train\", \"val\")\n\n # Setup experiment paths\n experiment_dir = Path(commons.experiments_path) / str(identifier)\n weights_folder = experiment_dir / \"weights\"\n commons.create_folder(weights_folder)\n\n freeze_convolutional_resnet(model, freeze_conv)\n\n print(\"Using device: \", device)\n\n # Instantiate loss and softmax.\n if loss_balance:\n weight = [1.0, dataset[\"train\"].imbalance_ratio()]\n weight = torch.tensor(weight).to(device)\n cross_entropy_loss = torch.nn.CrossEntropyLoss(weight=weight)\n else:\n cross_entropy_loss = torch.nn.CrossEntropyLoss()\n softmax = torch.nn.Softmax(dim=1)\n\n # Define data loaders.\n data_loader = {x: torch.utils.data.DataLoader(dataset[x],\n batch_size=batch_size, shuffle=True, num_workers=4)\n for x in phase_list}\n\n # Measures that will be computed later.\n tracked_metrics = [\"epoch\", \"phase\", \"loss\", \"accuracy\", \"auc\", \"seconds\"]\n epoch_auc = {x: np.zeros(num_epochs) for x in phase_list}\n epoch_loss = {x: np.zeros(num_epochs) for x in phase_list}\n epoch_accuracy = {x: np.zeros(num_epochs) for x in phase_list}\n results_df = pd.DataFrame()\n\n for i in range(num_epochs):\n print(\"\\nEpoch: {}/{}\".format(i+1, num_epochs))\n results_dict = {metric: [] for metric in tracked_metrics}\n for phase in phase_list:\n print(\"\\n{} phase: \".format(str(phase).capitalize()))\n\n # Set model to training or evalution mode according to the phase.\n if phase == \"train\":\n model.train()\n else:\n model.eval()\n\n epoch_target = []\n epoch_confidence = []\n epoch_seconds = time.time()\n # Iterate over the dataset.\n for entry, target in tqdm(data_loader[phase]):\n # Update epoch target list to compute AUC(ROC) later.\n epoch_target.append(target.numpy())\n\n # Load samples to device.\n entry = entry.to(device)\n target = target.to(device)\n\n # Set gradients to zero.\n optimizer.zero_grad()\n\n # Calculate gradients only in the training phase.\n with torch.set_grad_enabled(phase==\"train\"):\n output = model(entry)\n loss = cross_entropy_loss(output, target)\n confidence = softmax(output).detach().cpu().numpy()[:, 1]\n\n # Backward gradients and update weights if training.\n if phase==\"train\":\n loss.backward()\n optimizer.step()\n\n # Update epoch loss and epoch confidence list.\n epoch_loss[phase][i] += loss.item() * image.size(0)\n epoch_confidence.append(confidence)\n\n if phase == \"train\":\n scheduler.step()\n\n # Compute epoch loss, accuracy and AUC(ROC).\n sample_number = len(dataset[phase])\n epoch_target = np.concatenate(epoch_target, axis=0)\n epoch_confidence = np.concatenate(epoch_confidence, axis=0) # List of batch confidences\n epoch_loss[phase][i] /= sample_number\n epoch_correct = epoch_target == (epoch_confidence > 0.5)\n epoch_accuracy[phase][i] = (epoch_correct.sum() / sample_number)\n epoch_auc[phase][i] = sklearn.metrics.roc_auc_score(epoch_target,\n epoch_confidence)\n epoch_seconds = time.time() - epoch_seconds\n\n time_string = time.strftime(\"%H:%M:%S\", time.gmtime(epoch_seconds))\n print(\"Epoch complete in \", time_string)\n print(\"{} loss: {:.4f}\".format(phase, epoch_loss[phase][i]))\n print(\"{} accuracy: {:.4f}\".format(phase, epoch_accuracy[phase][i]))\n print(\"{} area under ROC curve: {:.4f}\".format(phase, epoch_auc[phase][i]))\n\n # Collect metrics in a dictionary\n results_dict[\"epoch\"].append(i+1) # Epochs start at 1\n results_dict[\"phase\"].append(phase)\n results_dict[\"loss\"].append(epoch_loss[phase][i])\n results_dict[\"accuracy\"].append(epoch_accuracy[phase][i])\n results_dict[\"auc\"].append(epoch_auc[phase][i])\n results_dict[\"seconds\"].append(epoch_seconds)\n\n # Save metrics to DataFrame\n results_df = results_df.append(pd.DataFrame(results_dict), sort=False, ignore_index=True)\n\n # Save model\n weights_path = weights_folder / \"resnet18_epoch_{}_{}.pth\".format(i+1, identifier)\n results_path = experiment_dir / \"epoch_{}_results.csv\".format(i+1)\n torch.save(model.state_dict(), weights_path)\n results_df.to_csv(results_path, index=False)\n\n return results_path.parent\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.CrossEntropyLoss",
"torch.utils.data.DataLoader",
"pandas.DataFrame",
"torch.tensor",
"numpy.concatenate",
"torch.set_grad_enabled",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
BenevolentAI/benevolentai-dat | [
"a68c2a438bb5b9651e4e0cb72cdbe02738cc88a9",
"a68c2a438bb5b9651e4e0cb72cdbe02738cc88a9"
] | [
"diversity_analysis_tool/clean_ipums_script.py",
"diversity_analysis_tool/cli.py"
] | [
"import pandas as pd\nimport numpy as np\n\n\n# Converted raw IPUMS data set in .csv to cleaned dataframe\n# Cleaned dataframe is an example input to diversity_analysis_tool\ndef rename_by_code(df):\n # Sex\n df.loc[(df.SEX == 1), \"SEX\"] = \"Male\"\n df.loc[(df.SEX == 2), \"SEX\"] = \"Female\"\n\n # Race\n df.loc[(df.RACE == 1), \"RACE\"] = \"White\"\n df.loc[(df.RACE == 2), \"RACE\"] = \"Black/African American/Negro\"\n df.loc[(df.RACE == 3), \"RACE\"] = \"American Indian or Alaska Native\"\n df.loc[(df.RACE == 4), \"RACE\"] = \"Chinese\"\n df.loc[(df.RACE == 5), \"RACE\"] = \"Japanese\"\n df.loc[(df.RACE == 6), \"RACE\"] = \"Other Asian or Pacific Islander\"\n df.loc[(df.RACE == 7), \"RACE\"] = \"Other race, nec\"\n df.loc[(df.RACE == 8), \"RACE\"] = \"Two major races\"\n df.loc[(df.RACE == 9), \"RACE\"] = \"Three or more major races\"\n\n # Keeping the level for ordering in plots\n df[\"SES_LEVEL\"] = df.EDUC\n\n # Education\n df.loc[(df.EDUC == 0), \"EDUC\"] = \"N/A or no schooling\"\n df.loc[(df.EDUC == 1), \"EDUC\"] = \"Nursery school to grade 4\"\n df.loc[(df.EDUC == 2), \"EDUC\"] = \"Grade 5, 6, 7, or 8\"\n df.loc[(df.EDUC == 3), \"EDUC\"] = \"Grade 9\"\n df.loc[(df.EDUC == 4), \"EDUC\"] = \"Grade 10\"\n df.loc[(df.EDUC == 5), \"EDUC\"] = \"Grade 11\"\n df.loc[(df.EDUC == 6), \"EDUC\"] = \"Grade 12\"\n df.loc[(df.EDUC == 7), \"EDUC\"] = \"1 year of college\"\n df.loc[(df.EDUC == 8), \"EDUC\"] = \"2 years of college\"\n df.loc[(df.EDUC == 9), \"EDUC\"] = \"3 years of college\"\n df.loc[(df.EDUC == 10), \"EDUC\"] = \"4 years of college\"\n df.loc[(df.EDUC == 11), \"EDUC\"] = \"5+ years of college\"\n\n return df\n\n\ndef clean_ipums(filename, outputfile, save_to_file=True):\n df = pd.read_csv(filename)\n\n # ses: either inctot or educ could represent socioeconomic status\n subset_cols = [\"YEAR\", \"SEX\", \"AGE\", \"RACE\", \"EDUC\"]\n df = df[subset_cols]\n df = rename_by_code(df)\n # lower case columm names\n df.columns = map(str.lower, df.columns)\n\n if save_to_file:\n df.to_csv(outputfile, index=False)\n\n\nif __name__ == \"__main__\":\n ipums_filename = \"../input/usa_00004.csv\"\n outfilename = \"../input/ipums_test_cleaned.csv\"\n clean_ipums(ipums_filename, outfilename, save_to_file=True)\n",
"import argparse\nimport logging\nimport os\nimport pandas as pd\nfrom diversity_analysis_tool.diversity import AssessDiversity, transform_ses_order\n\nlogger = logging.getLogger(\"diversity_analysis_tool.main\")\nlogger.setLevel(logging.INFO)\n\n\ndef main():\n \"\"\"\n Command line entry point for assessing diversity in data\n \"\"\"\n parser = argparse.ArgumentParser(description=\"assess the diversity of your data\")\n parser.add_argument(\n \"input_data\",\n type=str,\n help=\"Path to the csv file containing the data you want to assess.\",\n )\n parser.add_argument(\n \"output_dir\", type=str, help=\"Path to a directory where results will stored.\"\n )\n parser.add_argument(\n \"-v\", \"--verbose\", action=\"store_true\", help=\"Increase logging verbosity.\"\n )\n args = parser.parse_args()\n if args.verbose:\n logger.setLevel(logging.DEBUG)\n if not os.path.isfile(args.input_data):\n logger.error(f\"{args.input_data} is not a valid path to a file\")\n exit(1)\n if not os.path.isdir(args.output_dir):\n logger.error(f\"{args.output_dir} does not exist, creating directory\")\n\n data_df = pd.read_csv(args.input_data)\n\n logger.debug(\n \"Converted data to pandas data frame. Creating AssessDiversity instance\"\n )\n # if there is a column describing the ses levels in the data frame, order the levels accordingly\n assess_diversity = AssessDiversity(None, None, None, transform_ses_order)\n\n assess_diversity.create_diversity_analysis_report(\n data_df,\n 5,\n \"age\",\n \"sex\",\n \"ethnicity\",\n \"race\",\n \"educ\",\n \"is_deceased\",\n args.output_dir,\n )\n logger.info(\"Assessment complete. See {} for results\".format(args.output_dir))\n"
] | [
[
"pandas.read_csv"
],
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kevinddchen/sudoku | [
"135b47ffcfade88628acabacd0a2c916da98a9e4"
] | [
"tests/test_sudoku.py"
] | [
"from time import time\nfrom typing import Iterator\n\nimport numpy as np\n\nfrom sudoku.solve import Puzzle, solve\n\n# ------------------------------------------------------------------------------\n\n'''\nTest sudoku solver on 100 puzzles taken from\nhttps://projecteuler.net/problem=96.\n\nUsage:\n `python test_sudoku.py`\n'''\n\n# ------------------------------------------------------------------------------\n\ndef read_puzzles(filename: str = 'assets/raw_puzzles.txt') -> Iterator[np.ndarray]:\n '''Read sudoku puzzles from file.'''\n with open(filename) as f:\n while f.readline(): ## Simultaneously checks for EOF and removes header\n arr = []\n for _ in range(9):\n x = f.readline()\n x = x.strip('\\n')\n x = [int(c) for c in x]\n arr.append(x)\n yield np.array(arr, dtype=np.uint8)\n\n# ------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n\n S = 0 ## Keep track of sum of the 3-digit number in the top-left corner of each puzzle\n t = time()\n\n for i, arr in enumerate(read_puzzles()):\n puzzle = Puzzle.fromArray(arr)\n puzzle = solve(puzzle)\n assert puzzle.is_solved(), f'Grid {i+1} not solved.'\n\n S += puzzle._grid[0, 0]*100 + puzzle._grid[0, 1]*10 + puzzle._grid[0, 2]\n\n assert S == 24702, f'Sum {S} does not match expected value.'\n\n print('Test passed.')\n print(f'Total time: {time()-t:.3f} sec.')\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mevtorres/astrotools | [
"42047837ef59f646a7519e82562bcafd5ad0d5f0"
] | [
"spectral_cube/tests/data/make_test_cubes.py"
] | [
"\"\"\"\nCreates several 4D fits images with a single stokes axis,\nin various transpositions\n\"\"\"\n\nfrom astropy.io import fits\nfrom astropy import wcs\nimport numpy as np\n\nHEADER_FILENAME = 'header_jybeam.hdr'\n\ndef transpose(d, h, axes):\n d = d.transpose(np.argsort(axes))\n h2 = h.copy()\n\n for i in range(len(axes)):\n for key in ['NAXIS', 'CDELT', 'CRPIX', 'CRVAL', 'CTYPE', 'CUNIT']:\n h2['%s%i' % (key, i + 1)] = h['%s%i' % (key, axes[i] + 1)]\n\n return d, h2\n\n\nif __name__ == \"__main__\":\n np.random.seed(42)\n\n beams = np.recarray(4, dtype=[('BMAJ', '>f4'), ('BMIN', '>f4'),\n ('BPA', '>f4'), ('CHAN', '>i4'),\n ('POL', '>i4')])\n beams['BMAJ'] = [0.4,0.3,0.3,0.4] # arcseconds\n beams['BMIN'] = [0.1,0.2,0.2,0.1]\n beams['BPA'] = [0,45,60,30] # degrees\n beams['CHAN'] = [0,1,2,3]\n beams['POL'] = [0,0,0,0]\n beams = fits.BinTableHDU(beams)\n\n # Single Stokes\n h = fits.header.Header.fromtextfile(HEADER_FILENAME)\n h['BUNIT'] = 'K' # Kelvins are a valid unit, JY/BEAM are not: they should be tested separately\n h['NAXIS1'] = 2\n h['NAXIS2'] = 3\n h['NAXIS3'] = 4\n h['NAXIS4'] = 1\n d = np.random.random((1, 2, 3, 4))\n\n fits.writeto('advs.fits', d, h, overwrite=True)\n\n d, h = transpose(d, h, [1, 2, 3, 0])\n fits.writeto('dvsa.fits', d, h, overwrite=True)\n\n d, h = transpose(d, h, [1, 2, 3, 0])\n fits.writeto('vsad.fits', d, h, overwrite=True)\n\n d, h = transpose(d, h, [1, 2, 3, 0])\n fits.writeto('sadv.fits', d, h, overwrite=True)\n\n d, h = transpose(d, h, [0, 2, 1, 3])\n fits.writeto('sdav.fits', d, h, overwrite=True)\n\n del h['BMAJ'], h['BMIN'], h['BPA']\n # want 4 spectral channels\n d = np.random.random((4, 3, 2, 1))\n hdul = fits.HDUList([fits.PrimaryHDU(data=d, header=h),\n beams])\n hdul.writeto('sdav_beams.fits', overwrite=True)\n\n\n # 3D files\n h = fits.header.Header.fromtextfile(HEADER_FILENAME)\n h['BUNIT'] = 'K' # Kelvins are a valid unit, JY/BEAM are not: they should be tested separately\n h['NAXIS1'] = 2\n h['NAXIS2'] = 3\n h['NAXIS3'] = 4\n h['NAXIS'] = 3\n for k in list(h.keys()):\n if k.endswith('4'):\n del h[k]\n\n d = np.random.random((4, 3, 2))\n fits.writeto('adv.fits', d, h, overwrite=True)\n\n h['BUNIT'] = 'JY/BEAM'\n fits.writeto('adv_JYBEAM_upper.fits', d, h, overwrite=True)\n h['BUNIT'] = 'Jy/beam'\n fits.writeto('adv_Jybeam_lower.fits', d, h, overwrite=True)\n h['BUNIT'] = ' Jy / beam '\n fits.writeto('adv_Jybeam_whitespace.fits', d, h, overwrite=True)\n\n bmaj, bmin, bpa = h['BMAJ'], h['BMIN'], h['BPA']\n del h['BMAJ'], h['BMIN'], h['BPA']\n hdul = fits.HDUList([fits.PrimaryHDU(data=d, header=h),\n beams])\n hdul.writeto('adv_beams.fits', overwrite=True)\n\n\n h['BUNIT'] = 'K'\n h['BMAJ'] = bmaj\n h['BMIN'] = bmin\n h['BPA'] = bpa\n d, h = transpose(d, h, [2, 0, 1])\n fits.writeto('vad.fits', d, h, overwrite=True)\n\n d, h = transpose(d, h, [2, 1, 0])\n fits.writeto('vda.fits', d, h, overwrite=True)\n\n h['BUNIT'] = 'JY/BEAM'\n fits.writeto('vda_JYBEAM_upper.fits', d, h, overwrite=True)\n h['BUNIT'] = 'Jy/beam'\n fits.writeto('vda_Jybeam_lower.fits', d, h, overwrite=True)\n h['BUNIT'] = ' Jy / beam '\n fits.writeto('vda_Jybeam_whitespace.fits', d, h, overwrite=True)\n\n del h['BMAJ'], h['BMIN'], h['BPA']\n hdul = fits.HDUList([fits.PrimaryHDU(data=d, header=h),\n beams])\n hdul.writeto('vda_beams.fits', overwrite=True)\n\n # make a version with spatial pixels\n h = fits.header.Header.fromtextfile(HEADER_FILENAME)\n for k in list(h.keys()):\n if k.endswith('4'):\n del h[k]\n h['BUNIT'] = 'K' # Kelvins are a valid unit, JY/BEAM are not: they should be tested separately\n d = np.arange(2*5*5).reshape((2,5,5))\n fits.writeto('255.fits', d, h, overwrite=True)\n\n # test cube for convolution, regridding\n d = np.zeros([2,5,5], dtype='float')\n d[0,2,2] = 1.0\n fits.writeto('255_delta.fits', d, h, overwrite=True)\n\n d = np.zeros([4,5,5], dtype='float')\n d[:,2,2] = 1.0\n hdul = fits.HDUList([fits.PrimaryHDU(data=d, header=h),\n beams])\n hdul.writeto('455_delta_beams.fits', overwrite=True)\n\n d = np.zeros([5,2,2], dtype='float')\n d[2,:,:] = 1.0\n fits.writeto('522_delta.fits', d, h, overwrite=True)\n\n beams = np.recarray(5, dtype=[('BMAJ', '>f4'), ('BMIN', '>f4'),\n ('BPA', '>f4'), ('CHAN', '>i4'),\n ('POL', '>i4')])\n beams['BMAJ'] = [0.1,0.2,0.3,0.4,0.5] # arcseconds\n beams['BMIN'] = [0.5,0.4,0.3,0.2,0.1]\n beams['BPA'] = [0,45,60,30,0] # degrees\n beams['CHAN'] = [0,0,0,0,0]\n beams['POL'] = [0,0,0,0,0]\n beams = fits.BinTableHDU(beams)\n\n hdul = fits.HDUList([fits.PrimaryHDU(data=d, header=h),\n beams])\n hdul.writeto('522_delta_beams.fits', overwrite=True)\n\n # Make a 2D spatial version\n h = fits.header.Header.fromtextfile(HEADER_FILENAME)\n for k in list(h.keys()):\n if k.endswith('4') or k.endswith('3'):\n del h[k]\n h['BUNIT'] = 'K'\n d = np.arange(5 * 5).reshape((5, 5))\n fits.writeto('55.fits', d, h, overwrite=True)\n\n # test cube for convolution, regridding\n d = np.zeros([5, 5], dtype='float')\n d[2, 2] = 1.0\n fits.writeto('55_delta.fits', d, h, overwrite=True)\n \n # oneD spectra\n d = np.arange(5, dtype='float')\n h = wcs.WCS(fits.Header.fromtextfile(HEADER_FILENAME)).sub([wcs.WCSSUB_SPECTRAL]).to_header()\n fits.writeto('5_spectral.fits', d, h, overwrite=True)\n\n hdul = fits.HDUList([fits.PrimaryHDU(data=d, header=h),\n beams])\n hdul.writeto('5_spectral_beams.fits', overwrite=True)\n"
] | [
[
"numpy.random.random",
"numpy.random.seed",
"numpy.arange",
"numpy.argsort",
"numpy.recarray",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ReyhaneAskari/sngan.pytorch | [
"d9edf36cadd0835aeed2bb7f73115d04d5a76edd"
] | [
"functions.py"
] | [
"# -*- coding: utf-8 -*-\n# @Date : 2019-07-25\n# @Author : Xinyu Gong ([email protected])\n# @Link : None\n# @Version : 0.0\n\nimport os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torchvision.utils import make_grid\nfrom imageio import imsave\nfrom tqdm import tqdm\nfrom copy import deepcopy\nimport logging\nfrom itertools import chain\n\nfrom utils.inception_score import get_inception_score\nfrom utils.fid_score import calculate_fid_given_paths\nfrom torch.nn.utils import parameters_to_vector\nfrom utils.optim import parameters_grad_to_vector\nfrom utils.fid_score_pytorch import calculate_fid\nfrom pathlib import Path\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef train(args, gen_net: nn.Module, dis_net: nn.Module, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch,\n writer_dict, schedulers=None):\n writer = writer_dict['writer']\n gen_step = 0\n\n # train mode\n gen_net = gen_net.train()\n dis_net = dis_net.train()\n\n dis_params_flatten = parameters_to_vector(dis_net.parameters())\n gen_params_flatten = parameters_to_vector(gen_net.parameters())\n\n if args.optimizer == 'sLead_Adam':\n # just to fill-up the grad buffers\n imgs = iter(train_loader).__next__()[0]\n z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))\n fake_imgs = gen_net(z)\n fake_validity = dis_net(fake_imgs)\n d_loss = torch.mean(nn.ReLU(inplace=True)(1 + fake_validity))\n g_loss = -torch.mean(fake_validity)\n (0.0 * d_loss).backward(create_graph=True)\n (0.0 * g_loss).backward(create_graph=True)\n\n for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):\n global_steps = writer_dict['train_global_steps']\n\n # Adversarial ground truths\n real_imgs = imgs.type(torch.cuda.FloatTensor)\n\n # Sample noise as generator input\n z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n real_validity = dis_net(real_imgs)\n fake_imgs = gen_net(z)\n assert fake_imgs.size() == real_imgs.size()\n fake_validity = dis_net(fake_imgs)\n\n # cal loss\n d_loss = torch.mean(\n nn.ReLU(inplace=True)(1.0 - real_validity)) + torch.mean(\n nn.ReLU(inplace=True)(1 + fake_validity))\n\n if args.optimizer == 'Adam':\n dis_optimizer.zero_grad()\n d_loss.backward()\n dis_optimizer.step()\n elif args.optimizer == 'sLead_Adam':\n if global_steps % args.n_critic == 0:\n gradsD = torch.autograd.grad(\n outputs=d_loss, inputs=(dis_net.parameters()),\n create_graph=True)\n for p, g in zip(dis_net.parameters(), gradsD):\n p.grad = g\n gen_params_flatten_prev = gen_params_flatten + 0.0\n gen_params_flatten = parameters_to_vector(gen_net.parameters()) + 0.0\n grad_gen_params_flatten = parameters_grad_to_vector(gen_net.parameters())\n delta_gen_params_flatten = gen_params_flatten - gen_params_flatten_prev\n vjp_dis = torch.autograd.grad(\n grad_gen_params_flatten, dis_net.parameters(),\n grad_outputs=delta_gen_params_flatten)\n dis_optimizer.step(vjps=vjp_dis)\n else:\n # do regular adam\n dis_optimizer.zero_grad()\n d_loss.backward()\n dis_optimizer.step()\n\n writer.add_scalar('d_loss', d_loss.item(), global_steps)\n\n # -----------------\n # Train Generator\n # -----------------\n if global_steps % args.n_critic == 0:\n # cal loss\n gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))\n gen_imgs = gen_net(gen_z)\n fake_validity = dis_net(gen_imgs)\n g_loss = -torch.mean(fake_validity)\n\n if args.optimizer == 'Adam':\n gen_optimizer.zero_grad()\n g_loss.backward()\n gen_optimizer.step()\n\n elif args.optimizer == 'sLead_Adam':\n gradsG = torch.autograd.grad(\n outputs=g_loss, inputs=(gen_net.parameters()),\n create_graph=True)\n for p, g in zip(gen_net.parameters(), gradsG):\n p.grad = g\n\n dis_params_flatten_prev = dis_params_flatten + 0.0\n dis_params_flatten = parameters_to_vector(dis_net.parameters()) + 0.0\n grad_dis_params_flatten = parameters_grad_to_vector(dis_net.parameters())\n delta_dis_params_flatten = dis_params_flatten - dis_params_flatten_prev\n vjp_gen = torch.autograd.grad(\n grad_dis_params_flatten, gen_net.parameters(),\n grad_outputs=delta_dis_params_flatten)\n\n gen_optimizer.step(vjps=vjp_gen)\n\n # adjust learning rate\n if schedulers:\n gen_scheduler, dis_scheduler = schedulers\n g_lr = gen_scheduler.step(global_steps)\n d_lr = dis_scheduler.step(global_steps)\n writer.add_scalar('LR/g_lr', g_lr, global_steps)\n writer.add_scalar('LR/d_lr', d_lr, global_steps)\n\n # moving average weight\n for p, avg_p in zip(gen_net.parameters(), gen_avg_param):\n avg_p.mul_(0.999).add_(0.001, p.data)\n\n writer.add_scalar('g_loss', g_loss.item(), global_steps)\n gen_step += 1\n\n # verbose\n if gen_step and iter_idx % args.print_freq == 0:\n tqdm.write(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\" %\n (epoch, args.max_epoch, iter_idx % len(train_loader), len(train_loader), d_loss.item(), g_loss.item()))\n\n writer_dict['train_global_steps'] = global_steps + 1\n\n\ndef validate(args, fixed_z, fid_stat, gen_net: nn.Module, writer_dict, train_loader, epoch):\n gen_net = gen_net.eval()\n global_steps = writer_dict['valid_global_steps']\n gen_net = gen_net.eval()\n eval_iter = args.num_eval_imgs // args.eval_batch_size\n\n # skip IS\n inception_score = 0\n\n # compute FID\n sample_list = []\n for i in range(eval_iter):\n z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.eval_batch_size, args.latent_dim)))\n samples = gen_net(z)\n sample_list.append(samples.data.cpu().numpy())\n\n new_sample_list = list(chain.from_iterable(sample_list))\n fake_image_np = np.concatenate([img[None] for img in new_sample_list], 0)\n\n real_image_np = []\n for i, (images, _) in enumerate(train_loader):\n real_image_np += [images.data.numpy()]\n batch_size = real_image_np[0].shape[0]\n if len(real_image_np) * batch_size >= fake_image_np.shape[0]:\n break\n real_image_np = np.concatenate(real_image_np, 0)[:fake_image_np.shape[0]]\n fid_score = calculate_fid(real_image_np, fake_image_np, batch_size=300)\n var_fid = fid_score[0][2]\n fid = round(fid_score[0][1], 3)\n print('------------------------fid_score------------------------')\n print(fid_score)\n\n # Generate a batch of images\n sample_dir = os.path.join(args.path_helper['sample_path'], 'sample_dir')\n Path(sample_dir).mkdir(exist_ok=True)\n\n sample_imgs = gen_net(fixed_z).mul_(127.5).add_(127.5).clamp_(0.0, 255.0)\n img_grid = make_grid(sample_imgs, nrow=5).to('cpu', torch.uint8).numpy()\n file_name = os.path.join(sample_dir, f'epoch_{epoch}_fid_{fid}.png')\n imsave(file_name, img_grid.swapaxes(0, 1).swapaxes(1, 2))\n\n writer_dict['valid_global_steps'] = global_steps + 1\n return inception_score, fid\n\n\nclass LinearLrDecay(object):\n def __init__(self, optimizer, start_lr, end_lr, decay_start_step, decay_end_step):\n\n assert start_lr > end_lr\n self.optimizer = optimizer\n self.delta = (start_lr - end_lr) / (decay_end_step - decay_start_step)\n self.decay_start_step = decay_start_step\n self.decay_end_step = decay_end_step\n self.start_lr = start_lr\n self.end_lr = end_lr\n\n def step(self, current_step):\n if current_step <= self.decay_start_step:\n lr = self.start_lr\n elif current_step >= self.decay_end_step:\n lr = self.end_lr\n else:\n lr = self.start_lr - self.delta * (current_step - self.decay_start_step)\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\n\ndef load_params(model, new_param):\n for p, new_p in zip(model.parameters(), new_param):\n p.data.copy_(new_p)\n\n\ndef copy_params(model):\n flatten = deepcopy(list(p.data for p in model.parameters()))\n return flatten\n"
] | [
[
"numpy.concatenate",
"torch.mean",
"numpy.random.normal",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ultravideo/eye-tracking-framework | [
"505f3f5481a8c716dc5245ceb3e52c0f96a642d6"
] | [
"source/detect_outliers.py"
] | [
"from sklearn.neighbors import LocalOutlierFactor\n\n\ndef detect_outliers(points, k=10):\n \"\"\"\n Detect outliers in given time series.\n Use LocalOutlierFactor to for detection.\n Returns an array of indexes of outlying points.\n \"\"\"\n outlier_indices = []\n\n clf = LocalOutlierFactor(n_neighbors=k)\n pred = clf.fit_predict(points)\n\n for idx, i in enumerate(pred):\n if i == -1:\n outlier_indices.append(idx)\n\n return outlier_indices\n"
] | [
[
"sklearn.neighbors.LocalOutlierFactor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
benj252/python-control | [
"47f00d336a61c5780a8960dd14fac01889590614"
] | [
"control/xferfcn.py"
] | [
"\"\"\"xferfcn.py\n\nTransfer function representation and functions.\n\nThis file contains the TransferFunction class and also functions\nthat operate on transfer functions. This is the primary representation\nfor the python-control library.\n\"\"\"\n\n# Python 3 compatibility (needs to go here)\nfrom __future__ import print_function\nfrom __future__ import division\n\n\"\"\"Copyright (c) 2010 by California Institute of Technology\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n3. Neither the name of the California Institute of Technology nor\n the names of its contributors may be used to endorse or promote\n products derived from this software without specific prior\n written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\nFOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH\nOR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF\nUSE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\nOF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGE.\n\nAuthor: Richard M. Murray\nDate: 24 May 09\nRevised: Kevin K. Chen, Dec 10\n\n$Id$\n\n\"\"\"\n\n# External function declarations\nimport numpy as np\nfrom numpy import angle, array, empty, finfo, ndarray, ones, \\\n polyadd, polymul, polyval, roots, sqrt, zeros, squeeze, exp, pi, \\\n where, delete, real, poly, nonzero\nimport scipy as sp\nfrom scipy.signal import lti, tf2zpk, zpk2tf, cont2discrete\nfrom copy import deepcopy\nfrom warnings import warn\nfrom itertools import chain\nfrom re import sub\nfrom .lti import LTI, timebaseEqual, timebase, isdtime\nfrom . import config\n\n__all__ = ['TransferFunction', 'tf', 'ss2tf', 'tfdata']\n\n\n# Define module default parameter values\n_xferfcn_defaults = {\n 'xferfcn.default_dt': None}\n\nclass TransferFunction(LTI):\n\n \"\"\"TransferFunction(num, den[, dt])\n\n A class for representing transfer functions\n\n The TransferFunction class is used to represent systems in transfer\n function form.\n\n The main data members are 'num' and 'den', which are 2-D lists of arrays\n containing MIMO numerator and denominator coefficients. For example,\n\n >>> num[2][5] = numpy.array([1., 4., 8.])\n\n means that the numerator of the transfer function from the 6th input to the\n 3rd output is set to s^2 + 4s + 8.\n\n Discrete-time transfer functions are implemented by using the 'dt'\n instance variable and setting it to something other than 'None'. If 'dt'\n has a non-zero value, then it must match whenever two transfer functions\n are combined. If 'dt' is set to True, the system will be treated as a\n discrete time system with unspecified sampling time. The default value of\n 'dt' is None and can be changed by changing the value of\n ``control.config.defaults['xferfcn.default_dt']``.\n\n The TransferFunction class defines two constants ``s`` and ``z`` that\n represent the differentiation and delay operators in continuous and\n discrete time. These can be used to create variables that allow algebraic\n creation of transfer functions. For example,\n\n >>> s = TransferFunction.s\n >>> G = (s + 1)/(s**2 + 2*s + 1)\n\n \"\"\"\n def __init__(self, *args):\n \"\"\"TransferFunction(num, den[, dt])\n\n Construct a transfer function.\n\n The default constructor is TransferFunction(num, den), where num and\n den are lists of lists of arrays containing polynomial coefficients.\n To create a discrete time transfer funtion, use TransferFunction(num,\n den, dt) where 'dt' is the sampling time (or True for unspecified\n sampling time). To call the copy constructor, call\n TransferFunction(sys), where sys is a TransferFunction object\n (continuous or discrete).\n\n \"\"\"\n args = deepcopy(args)\n if len(args) == 2:\n # The user provided a numerator and a denominator.\n (num, den) = args\n dt = config.defaults['xferfcn.default_dt']\n elif len(args) == 3:\n # Discrete time transfer function\n (num, den, dt) = args\n elif len(args) == 1:\n # Use the copy constructor.\n if not isinstance(args[0], TransferFunction):\n raise TypeError(\"The one-argument constructor can only take \\\n in a TransferFunction object. Received %s.\"\n % type(args[0]))\n num = args[0].num\n den = args[0].den\n # TODO: not sure this can ever happen since dt is always present\n try:\n dt = args[0].dt\n except NameError: # pragma: no coverage\n dt = config.defaults['xferfcn.default_dt']\n else:\n raise ValueError(\"Needs 1, 2 or 3 arguments; received %i.\"\n % len(args))\n\n num = _clean_part(num)\n den = _clean_part(den)\n\n inputs = len(num[0])\n outputs = len(num)\n\n # Make sure numerator and denominator matrices have consistent sizes\n if inputs != len(den[0]):\n raise ValueError(\n \"The numerator has %i input(s), but the denominator has \"\n \"%i input(s).\" % (inputs, len(den[0])))\n if outputs != len(den):\n raise ValueError(\n \"The numerator has %i output(s), but the denominator has \"\n \"%i output(s).\" % (outputs, len(den)))\n\n # Additional checks/updates on structure of the transfer function\n for i in range(outputs):\n # Make sure that each row has the same number of columns\n if len(num[i]) != inputs:\n raise ValueError(\n \"Row 0 of the numerator matrix has %i elements, but row \"\n \"%i has %i.\" % (inputs, i, len(num[i])))\n if len(den[i]) != inputs:\n raise ValueError(\n \"Row 0 of the denominator matrix has %i elements, but row \"\n \"%i has %i.\" % (inputs, i, len(den[i])))\n\n # Check for zeros in numerator or denominator\n # TODO: Right now these checks are only done during construction.\n # It might be worthwhile to think of a way to perform checks if the\n # user modifies the transfer function after construction.\n for j in range(inputs):\n # Check that we don't have any zero denominators.\n zeroden = True\n for k in den[i][j]:\n if k:\n zeroden = False\n break\n if zeroden:\n raise ValueError(\n \"Input %i, output %i has a zero denominator.\"\n % (j + 1, i + 1))\n\n # If we have zero numerators, set the denominator to 1.\n zeronum = True\n for k in num[i][j]:\n if k:\n zeronum = False\n break\n if zeronum:\n den[i][j] = ones(1)\n\n LTI.__init__(self, inputs, outputs, dt)\n self.num = num\n self.den = den\n\n self._truncatecoeff()\n\n def __call__(self, s):\n \"\"\"Evaluate the system's transfer function for a complex variable\n\n For a SISO transfer function, returns the value of the\n transfer function. For a MIMO transfer fuction, returns a\n matrix of values evaluated at complex variable s.\"\"\"\n\n if self.issiso():\n # return a scalar\n return self.horner(s)[0][0]\n else:\n # return a matrix\n return self.horner(s)\n\n def _truncatecoeff(self):\n \"\"\"Remove extraneous zero coefficients from num and den.\n\n Check every element of the numerator and denominator matrices, and\n truncate leading zeros. For instance, running self._truncatecoeff()\n will reduce self.num = [[[0, 0, 1, 2]]] to [[[1, 2]]].\n\n \"\"\"\n\n # Beware: this is a shallow copy. This should be okay.\n data = [self.num, self.den]\n for p in range(len(data)):\n for i in range(self.outputs):\n for j in range(self.inputs):\n # Find the first nontrivial coefficient.\n nonzero = None\n for k in range(data[p][i][j].size):\n if data[p][i][j][k]:\n nonzero = k\n break\n\n if nonzero is None:\n # The array is all zeros.\n data[p][i][j] = zeros(1)\n else:\n # Truncate the trivial coefficients.\n data[p][i][j] = data[p][i][j][nonzero:]\n [self.num, self.den] = data\n\n def __str__(self, var=None):\n \"\"\"String representation of the transfer function.\"\"\"\n\n mimo = self.inputs > 1 or self.outputs > 1\n if var is None:\n # TODO: replace with standard calls to lti functions\n var = 's' if self.dt is None or self.dt == 0 else 'z'\n outstr = \"\"\n\n for i in range(self.inputs):\n for j in range(self.outputs):\n if mimo:\n outstr += \"\\nInput %i to output %i:\" % (i + 1, j + 1)\n\n # Convert the numerator and denominator polynomials to strings.\n numstr = _tf_polynomial_to_string(self.num[j][i], var=var)\n denstr = _tf_polynomial_to_string(self.den[j][i], var=var)\n\n # Figure out the length of the separating line\n dashcount = max(len(numstr), len(denstr))\n dashes = '-' * dashcount\n\n # Center the numerator or denominator\n if len(numstr) < dashcount:\n numstr = ' ' * ((dashcount - len(numstr)) // 2) + numstr\n if len(denstr) < dashcount:\n denstr = ' ' * ((dashcount - len(denstr)) // 2) + denstr\n\n outstr += \"\\n\" + numstr + \"\\n\" + dashes + \"\\n\" + denstr + \"\\n\"\n\n # See if this is a discrete time system with specific sampling time\n if not (self.dt is None) and type(self.dt) != bool and self.dt > 0:\n # TODO: replace with standard calls to lti functions\n outstr += \"\\ndt = \" + self.dt.__str__() + \"\\n\"\n\n return outstr\n\n # represent to implement a re-loadable version\n def __repr__(self):\n \"\"\"Print transfer function in loadable form\"\"\"\n if self.issiso():\n return \"TransferFunction({num}, {den}{dt})\".format(\n num=self.num[0][0].__repr__(), den=self.den[0][0].__repr__(),\n dt=(isdtime(self, strict=True) and ', {}'.format(self.dt)) or '')\n else:\n return \"TransferFunction({num}, {den}{dt})\".format(\n num=self.num.__repr__(), den=self.den.__repr__(),\n dt=(isdtime(self, strict=True) and ', {}'.format(self.dt)) or '')\n\n def _repr_latex_(self, var=None):\n \"\"\"LaTeX representation of transfer function, for Jupyter notebook\"\"\"\n\n mimo = self.inputs > 1 or self.outputs > 1\n\n if var is None:\n # ! TODO: replace with standard calls to lti functions\n var = 's' if self.dt is None or self.dt == 0 else 'z'\n\n out = ['$$']\n\n if mimo:\n out.append(r\"\\begin{bmatrix}\")\n\n for i in range(self.outputs):\n for j in range(self.inputs):\n # Convert the numerator and denominator polynomials to strings.\n numstr = _tf_polynomial_to_string(self.num[i][j], var=var)\n denstr = _tf_polynomial_to_string(self.den[i][j], var=var)\n\n numstr = _tf_string_to_latex(numstr, var=var)\n denstr = _tf_string_to_latex(denstr, var=var)\n\n out += [r\"\\frac{\", numstr, \"}{\", denstr, \"}\"]\n\n if mimo and j < self.outputs - 1:\n out.append(\"&\")\n\n if mimo:\n out.append(r\"\\\\\")\n\n if mimo:\n out.append(r\" \\end{bmatrix}\")\n\n # See if this is a discrete time system with specific sampling time\n if not (self.dt is None) and type(self.dt) != bool and self.dt > 0:\n out += [r\"\\quad dt = \", str(self.dt)]\n\n out.append(\"$$\")\n\n return ''.join(out)\n\n def __neg__(self):\n \"\"\"Negate a transfer function.\"\"\"\n\n num = deepcopy(self.num)\n for i in range(self.outputs):\n for j in range(self.inputs):\n num[i][j] *= -1\n\n return TransferFunction(num, self.den, self.dt)\n\n def __add__(self, other):\n \"\"\"Add two LTI objects (parallel connection).\"\"\"\n from .statesp import StateSpace\n\n # Convert the second argument to a transfer function.\n if isinstance(other, StateSpace):\n other = _convert_to_transfer_function(other)\n elif not isinstance(other, TransferFunction):\n other = _convert_to_transfer_function(other, inputs=self.inputs,\n outputs=self.outputs)\n\n # Check that the input-output sizes are consistent.\n if self.inputs != other.inputs:\n raise ValueError(\n \"The first summand has %i input(s), but the second has %i.\"\n % (self.inputs, other.inputs))\n if self.outputs != other.outputs:\n raise ValueError(\n \"The first summand has %i output(s), but the second has %i.\"\n % (self.outputs, other.outputs))\n\n # Figure out the sampling time to use\n if self.dt is None and other.dt is not None:\n dt = other.dt # use dt from second argument\n elif (other.dt is None and self.dt is not None) or \\\n (timebaseEqual(self, other)):\n dt = self.dt # use dt from first argument\n else:\n raise ValueError(\"Systems have different sampling times\")\n\n # Preallocate the numerator and denominator of the sum.\n num = [[[] for j in range(self.inputs)] for i in range(self.outputs)]\n den = [[[] for j in range(self.inputs)] for i in range(self.outputs)]\n\n for i in range(self.outputs):\n for j in range(self.inputs):\n num[i][j], den[i][j] = _add_siso(\n self.num[i][j], self.den[i][j],\n other.num[i][j], other.den[i][j])\n\n return TransferFunction(num, den, dt)\n\n def __radd__(self, other):\n \"\"\"Right add two LTI objects (parallel connection).\"\"\"\n return self + other\n\n def __sub__(self, other):\n \"\"\"Subtract two LTI objects.\"\"\"\n return self + (-other)\n\n def __rsub__(self, other):\n \"\"\"Right subtract two LTI objects.\"\"\"\n return other + (-self)\n\n def __mul__(self, other):\n \"\"\"Multiply two LTI objects (serial connection).\"\"\"\n # Convert the second argument to a transfer function.\n if isinstance(other, (int, float, complex, np.number)):\n other = _convert_to_transfer_function(other, inputs=self.inputs,\n outputs=self.inputs)\n else:\n other = _convert_to_transfer_function(other)\n\n # Check that the input-output sizes are consistent.\n if self.inputs != other.outputs:\n raise ValueError(\n \"C = A * B: A has %i column(s) (input(s)), but B has %i \"\n \"row(s)\\n(output(s)).\" % (self.inputs, other.outputs))\n\n inputs = other.inputs\n outputs = self.outputs\n\n # Figure out the sampling time to use\n if self.dt is None and other.dt is not None:\n dt = other.dt # use dt from second argument\n elif (other.dt is None and self.dt is not None) or \\\n (self.dt == other.dt):\n dt = self.dt # use dt from first argument\n else:\n raise ValueError(\"Systems have different sampling times\")\n\n # Preallocate the numerator and denominator of the sum.\n num = [[[0] for j in range(inputs)] for i in range(outputs)]\n den = [[[1] for j in range(inputs)] for i in range(outputs)]\n\n # Temporary storage for the summands needed to find the (i, j)th\n # element of the product.\n num_summand = [[] for k in range(self.inputs)]\n den_summand = [[] for k in range(self.inputs)]\n\n # Multiply & add.\n for row in range(outputs):\n for col in range(inputs):\n for k in range(self.inputs):\n num_summand[k] = polymul(\n self.num[row][k], other.num[k][col])\n den_summand[k] = polymul(\n self.den[row][k], other.den[k][col])\n num[row][col], den[row][col] = _add_siso(\n num[row][col], den[row][col],\n num_summand[k], den_summand[k])\n\n return TransferFunction(num, den, dt)\n\n def __rmul__(self, other):\n \"\"\"Right multiply two LTI objects (serial connection).\"\"\"\n\n # Convert the second argument to a transfer function.\n if isinstance(other, (int, float, complex, np.number)):\n other = _convert_to_transfer_function(other, inputs=self.inputs,\n outputs=self.inputs)\n else:\n other = _convert_to_transfer_function(other)\n\n # Check that the input-output sizes are consistent.\n if other.inputs != self.outputs:\n raise ValueError(\n \"C = A * B: A has %i column(s) (input(s)), but B has %i \"\n \"row(s)\\n(output(s)).\" % (other.inputs, self.outputs))\n\n inputs = self.inputs\n outputs = other.outputs\n\n # Figure out the sampling time to use\n if self.dt is None and other.dt is not None:\n dt = other.dt # use dt from second argument\n elif (other.dt is None and self.dt is not None) \\\n or (self.dt == other.dt):\n dt = self.dt # use dt from first argument\n else:\n raise ValueError(\"Systems have different sampling times\")\n\n # Preallocate the numerator and denominator of the sum.\n num = [[[0] for j in range(inputs)] for i in range(outputs)]\n den = [[[1] for j in range(inputs)] for i in range(outputs)]\n\n # Temporary storage for the summands needed to find the\n # (i, j)th element\n # of the product.\n num_summand = [[] for k in range(other.inputs)]\n den_summand = [[] for k in range(other.inputs)]\n\n for i in range(outputs): # Iterate through rows of product.\n for j in range(inputs): # Iterate through columns of product.\n for k in range(other.inputs): # Multiply & add.\n num_summand[k] = polymul(other.num[i][k], self.num[k][j])\n den_summand[k] = polymul(other.den[i][k], self.den[k][j])\n num[i][j], den[i][j] = _add_siso(\n num[i][j], den[i][j],\n num_summand[k], den_summand[k])\n\n return TransferFunction(num, den, dt)\n\n # TODO: Division of MIMO transfer function objects is not written yet.\n def __truediv__(self, other):\n \"\"\"Divide two LTI objects.\"\"\"\n\n if isinstance(other, (int, float, complex, np.number)):\n other = _convert_to_transfer_function(\n other, inputs=self.inputs,\n outputs=self.inputs)\n else:\n other = _convert_to_transfer_function(other)\n\n if (self.inputs > 1 or self.outputs > 1 or\n other.inputs > 1 or other.outputs > 1):\n raise NotImplementedError(\n \"TransferFunction.__truediv__ is currently \\\n implemented only for SISO systems.\")\n\n # Figure out the sampling time to use\n if self.dt is None and other.dt is not None:\n dt = other.dt # use dt from second argument\n elif (other.dt is None and self.dt is not None) or \\\n (self.dt == other.dt):\n dt = self.dt # use dt from first argument\n else:\n raise ValueError(\"Systems have different sampling times\")\n\n num = polymul(self.num[0][0], other.den[0][0])\n den = polymul(self.den[0][0], other.num[0][0])\n\n return TransferFunction(num, den, dt)\n\n # TODO: Remove when transition to python3 complete\n def __div__(self, other):\n return TransferFunction.__truediv__(self, other)\n\n # TODO: Division of MIMO transfer function objects is not written yet.\n def __rtruediv__(self, other):\n \"\"\"Right divide two LTI objects.\"\"\"\n if isinstance(other, (int, float, complex, np.number)):\n other = _convert_to_transfer_function(\n other, inputs=self.inputs,\n outputs=self.inputs)\n else:\n other = _convert_to_transfer_function(other)\n\n if (self.inputs > 1 or self.outputs > 1 or\n other.inputs > 1 or other.outputs > 1):\n raise NotImplementedError(\n \"TransferFunction.__rtruediv__ is currently implemented only \"\n \"for SISO systems.\")\n\n return other / self\n\n # TODO: Remove when transition to python3 complete\n def __rdiv__(self, other):\n return TransferFunction.__rtruediv__(self, other)\n\n def __pow__(self, other):\n if not type(other) == int:\n raise ValueError(\"Exponent must be an integer\")\n if other == 0:\n return TransferFunction([1], [1]) # unity\n if other > 0:\n return self * (self**(other - 1))\n if other < 0:\n return (TransferFunction([1], [1]) / self) * (self**(other + 1))\n\n def __getitem__(self, key):\n key1, key2 = key\n\n # pre-process\n if isinstance(key1, int):\n key1 = slice(key1, key1 + 1, 1)\n if isinstance(key2, int):\n key2 = slice(key2, key2 + 1, 1)\n # dim1\n start1, stop1, step1 = key1.start, key1.stop, key1.step\n if step1 is None:\n step1 = 1\n if start1 is None:\n start1 = 0\n if stop1 is None:\n stop1 = len(self.num)\n # dim1\n start2, stop2, step2 = key2.start, key2.stop, key2.step\n if step2 is None:\n step2 = 1\n if start2 is None:\n start2 = 0\n if stop2 is None:\n stop2 = len(self.num[0])\n\n num = []\n den = []\n for i in range(start1, stop1, step1):\n num_i = []\n den_i = []\n for j in range(start2, stop2, step2):\n num_i.append(self.num[i][j])\n den_i.append(self.den[i][j])\n num.append(num_i)\n den.append(den_i)\n if self.isctime():\n return TransferFunction(num, den)\n else:\n return TransferFunction(num, den, self.dt)\n\n def evalfr(self, omega):\n \"\"\"Evaluate a transfer function at a single angular frequency.\n\n self._evalfr(omega) returns the value of the transfer function\n matrix with input value s = i * omega.\n\n \"\"\"\n warn(\"TransferFunction.evalfr(omega) will be deprecated in a \"\n \"future release of python-control; use evalfr(sys, omega*1j) \"\n \"instead\", PendingDeprecationWarning)\n return self._evalfr(omega)\n\n def _evalfr(self, omega):\n \"\"\"Evaluate a transfer function at a single angular frequency.\"\"\"\n # TODO: implement for discrete time systems\n if isdtime(self, strict=True):\n # Convert the frequency to discrete time\n dt = timebase(self)\n s = exp(1.j * omega * dt)\n if np.any(omega * dt > pi):\n warn(\"_evalfr: frequency evaluation above Nyquist frequency\")\n else:\n s = 1.j * omega\n\n return self.horner(s)\n\n def horner(self, s):\n \"\"\"Evaluate the systems's transfer function for a complex variable\n\n Returns a matrix of values evaluated at complex variable s.\n \"\"\"\n\n # Preallocate the output.\n if getattr(s, '__iter__', False):\n out = empty((self.outputs, self.inputs, len(s)), dtype=complex)\n else:\n out = empty((self.outputs, self.inputs), dtype=complex)\n\n for i in range(self.outputs):\n for j in range(self.inputs):\n out[i][j] = (polyval(self.num[i][j], s) /\n polyval(self.den[i][j], s))\n\n return out\n\n def freqresp(self, omega):\n \"\"\"Evaluate the transfer function at a list of angular frequencies.\n\n Reports the frequency response of the system,\n\n G(j*omega) = mag*exp(j*phase)\n\n for continuous time. For discrete time systems, the response is\n evaluated around the unit circle such that\n\n G(exp(j*omega*dt)) = mag*exp(j*phase).\n\n Parameters\n ----------\n omega : array_like\n A list of frequencies in radians/sec at which the system should be\n evaluated. The list can be either a python list or a numpy array\n and will be sorted before evaluation.\n\n Returns\n -------\n mag : (self.outputs, self.inputs, len(omega)) ndarray\n The magnitude (absolute value, not dB or log10) of the system\n frequency response.\n phase : (self.outputs, self.inputs, len(omega)) ndarray\n The wrapped phase in radians of the system frequency response.\n omega : ndarray or list or tuple\n The list of sorted frequencies at which the response was\n evaluated.\n \"\"\"\n # Preallocate outputs.\n numfreq = len(omega)\n mag = empty((self.outputs, self.inputs, numfreq))\n phase = empty((self.outputs, self.inputs, numfreq))\n\n # Figure out the frequencies\n omega.sort()\n if isdtime(self, strict=True):\n dt = timebase(self)\n slist = np.array([exp(1.j * w * dt) for w in omega])\n if max(omega) * dt > pi:\n warn(\"freqresp: frequency evaluation above Nyquist frequency\")\n else:\n slist = np.array([1j * w for w in omega])\n\n # Compute frequency response for each input/output pair\n for i in range(self.outputs):\n for j in range(self.inputs):\n fresp = (polyval(self.num[i][j], slist) /\n polyval(self.den[i][j], slist))\n mag[i, j, :] = abs(fresp)\n phase[i, j, :] = angle(fresp)\n\n return mag, phase, omega\n\n def pole(self):\n \"\"\"Compute the poles of a transfer function.\"\"\"\n _, den, denorder = self._common_den(allow_nonproper=True)\n rts = []\n for d, o in zip(den, denorder):\n rts.extend(roots(d[:o + 1]))\n return np.array(rts)\n\n def zero(self):\n \"\"\"Compute the zeros of a transfer function.\"\"\"\n if self.inputs > 1 or self.outputs > 1:\n raise NotImplementedError(\n \"TransferFunction.zero is currently only implemented \"\n \"for SISO systems.\")\n else:\n # for now, just give zeros of a SISO tf\n return roots(self.num[0][0])\n\n def feedback(self, other=1, sign=-1):\n \"\"\"Feedback interconnection between two LTI objects.\"\"\"\n other = _convert_to_transfer_function(other)\n\n if (self.inputs > 1 or self.outputs > 1 or\n other.inputs > 1 or other.outputs > 1):\n # TODO: MIMO feedback\n raise NotImplementedError(\n \"TransferFunction.feedback is currently only implemented \"\n \"for SISO functions.\")\n\n # Figure out the sampling time to use\n if self.dt is None and other.dt is not None:\n dt = other.dt # use dt from second argument\n elif (other.dt is None and self.dt is not None) or \\\n (self.dt == other.dt):\n dt = self.dt # use dt from first argument\n else:\n raise ValueError(\"Systems have different sampling times\")\n\n num1 = self.num[0][0]\n den1 = self.den[0][0]\n num2 = other.num[0][0]\n den2 = other.den[0][0]\n\n num = polymul(num1, den2)\n den = polyadd(polymul(den2, den1), -sign * polymul(num2, num1))\n\n return TransferFunction(num, den, dt)\n\n # For MIMO or SISO systems, the analytic expression is\n # self / (1 - sign * other * self)\n # But this does not work correctly because the state size will be too\n # large.\n\n def minreal(self, tol=None):\n \"\"\"Remove cancelling pole/zero pairs from a transfer function\"\"\"\n # based on octave minreal\n\n # default accuracy\n from sys import float_info\n sqrt_eps = sqrt(float_info.epsilon)\n\n # pre-allocate arrays\n num = [[[] for j in range(self.inputs)] for i in range(self.outputs)]\n den = [[[] for j in range(self.inputs)] for i in range(self.outputs)]\n\n for i in range(self.outputs):\n for j in range(self.inputs):\n\n # split up in zeros, poles and gain\n newzeros = []\n zeros = roots(self.num[i][j])\n poles = roots(self.den[i][j])\n gain = self.num[i][j][0] / self.den[i][j][0]\n\n # check all zeros\n for z in zeros:\n t = tol or \\\n 1000 * max(float_info.epsilon, abs(z) * sqrt_eps)\n idx = where(abs(z - poles) < t)[0]\n if len(idx):\n # cancel this zero against one of the poles\n poles = delete(poles, idx[0])\n else:\n # keep this zero\n newzeros.append(z)\n\n # poly([]) returns a scalar, but we always want a 1d array\n num[i][j] = np.atleast_1d(gain * real(poly(newzeros)))\n den[i][j] = np.atleast_1d(real(poly(poles)))\n\n # end result\n return TransferFunction(num, den, self.dt)\n\n def returnScipySignalLTI(self):\n \"\"\"Return a list of a list of :class:`scipy.signal.lti` objects.\n\n For instance,\n\n >>> out = tfobject.returnScipySignalLTI()\n >>> out[3][5]\n\n is a class:`scipy.signal.lti` object corresponding to the\n transfer function from the 6th input to the 4th output.\n\n \"\"\"\n\n # TODO: implement for discrete time systems\n if self.dt != 0 and self.dt is not None:\n raise NotImplementedError(\"Function not \\\n implemented in discrete time\")\n\n # Preallocate the output.\n out = [[[] for j in range(self.inputs)] for i in range(self.outputs)]\n\n for i in range(self.outputs):\n for j in range(self.inputs):\n out[i][j] = lti(self.num[i][j], self.den[i][j])\n\n return out\n\n def _common_den(self, imag_tol=None, allow_nonproper=False):\n \"\"\"\n Compute MIMO common denominators; return them and adjusted numerators.\n\n This function computes the denominators per input containing all\n the poles of sys.den, and reports it as the array den. The\n output numerator array num is modified to use the common\n denominator for this input/column; the coefficient arrays are also\n padded with zeros to be the same size for all num/den.\n\n Parameters\n ----------\n imag_tol: float\n Threshold for the imaginary part of a root to use in detecting\n complex poles\n\n allow_nonproper : boolean\n Do not enforce proper transfer functions\n\n Returns\n -------\n num: array\n n by n by kd where n = max(sys.outputs,sys.inputs)\n kd = max(denorder)+1\n Multi-dimensional array of numerator coefficients. num[i,j]\n gives the numerator coefficient array for the ith output and jth\n input; padded for use in td04ad ('C' option); matches the\n denorder order; highest coefficient starts on the left.\n If allow_nonproper=True and the order of a numerator exceeds the\n order of the common denominator, num will be returned as None\n\n den: array\n sys.inputs by kd\n Multi-dimensional array of coefficients for common denominator\n polynomial, one row per input. The array is prepared for use in\n slycot td04ad, the first element is the highest-order polynomial\n coefficient of s, matching the order in denorder. If denorder <\n number of columns in den, the den is padded with zeros.\n\n denorder: array of int, orders of den, one per input\n\n\n\n Examples\n --------\n >>> num, den, denorder = sys._common_den()\n\n \"\"\"\n\n # Machine precision for floats.\n eps = finfo(float).eps\n real_tol = sqrt(eps * self.inputs * self.outputs)\n\n # The tolerance to use in deciding if a pole is complex\n if (imag_tol is None):\n imag_tol = 2 * real_tol\n\n # A list to keep track of cumulative poles found as we scan\n # self.den[..][..]\n poles = [[] for j in range(self.inputs)]\n\n # RvP, new implementation 180526, issue #194\n # BG, modification, issue #343, PR #354\n\n # pre-calculate the poles for all num, den\n # has zeros, poles, gain, list for pole indices not in den,\n # number of poles known at the time analyzed\n\n # do not calculate minreal. Rory's hint .minreal()\n poleset = []\n for i in range(self.outputs):\n poleset.append([])\n for j in range(self.inputs):\n if abs(self.num[i][j]).max() <= eps:\n poleset[-1].append([array([], dtype=float),\n roots(self.den[i][j]), 0.0, [], 0])\n else:\n z, p, k = tf2zpk(self.num[i][j], self.den[i][j])\n poleset[-1].append([z, p, k, [], 0])\n\n # collect all individual poles\n for j in range(self.inputs):\n for i in range(self.outputs):\n currentpoles = poleset[i][j][1]\n nothave = ones(currentpoles.shape, dtype=bool)\n for ip, p in enumerate(poles[j]):\n collect = (np.isclose(currentpoles.real, p.real,\n atol=real_tol) &\n np.isclose(currentpoles.imag, p.imag,\n atol=imag_tol) &\n nothave)\n if np.any(collect):\n # mark first found pole as already collected\n nothave[nonzero(collect)[0][0]] = False\n else:\n # remember id of pole not in tf\n poleset[i][j][3].append(ip)\n for h, c in zip(nothave, currentpoles):\n if h:\n if abs(c.imag) < imag_tol:\n c = c.real\n poles[j].append(c)\n # remember how many poles now known\n poleset[i][j][4] = len(poles[j])\n\n # figure out maximum number of poles, for sizing the den\n maxindex = max([len(p) for p in poles])\n den = zeros((self.inputs, maxindex + 1), dtype=float)\n num = zeros((max(1, self.outputs, self.inputs),\n max(1, self.outputs, self.inputs),\n maxindex + 1),\n dtype=float)\n denorder = zeros((self.inputs,), dtype=int)\n\n havenonproper = False\n\n for j in range(self.inputs):\n if not len(poles[j]):\n # no poles matching this input; only one or more gains\n den[j, 0] = 1.0\n for i in range(self.outputs):\n num[i, j, 0] = poleset[i][j][2]\n else:\n # create the denominator matching this input\n # coefficients should be padded on right, ending at maxindex\n maxindex = len(poles[j])\n den[j, :maxindex+1] = poly(poles[j])\n denorder[j] = maxindex\n\n # now create the numerator, also padded on the right\n for i in range(self.outputs):\n # start with the current set of zeros for this output\n nwzeros = list(poleset[i][j][0])\n # add all poles not found in the original denominator,\n # and the ones later added from other denominators\n for ip in chain(poleset[i][j][3],\n range(poleset[i][j][4], maxindex)):\n nwzeros.append(poles[j][ip])\n\n numpoly = poleset[i][j][2] * np.atleast_1d(poly(nwzeros))\n\n # td04ad expects a proper transfer function. If the\n # numerater has a higher order than the denominator, the\n # padding will fail\n if len(numpoly) > maxindex + 1:\n if allow_nonproper:\n havenonproper = True\n break\n raise ValueError(\n self.__str__() +\n \"is not a proper transfer function. \"\n \"The degree of the numerators must not exceed \"\n \"the degree of the denominators.\")\n\n # numerator polynomial should be padded on left and right\n # ending at maxindex to line up with what td04ad expects.\n num[i, j, maxindex+1-len(numpoly):maxindex+1] = numpoly\n # print(num[i, j])\n\n if havenonproper:\n num = None\n\n return num, den, denorder\n\n def sample(self, Ts, method='zoh', alpha=None, prewarp_frequency=None):\n \"\"\"Convert a continuous-time system to discrete time\n\n Creates a discrete-time system from a continuous-time system by\n sampling. Multiple methods of conversion are supported.\n\n Parameters\n ----------\n Ts : float\n Sampling period\n method : {\"gbt\", \"bilinear\", \"euler\", \"backward_diff\",\n \"zoh\", \"matched\"}\n Method to use for sampling:\n\n * gbt: generalized bilinear transformation\n * bilinear: Tustin's approximation (\"gbt\" with alpha=0.5)\n * euler: Euler (or forward difference) method (\"gbt\" with alpha=0)\n * backward_diff: Backwards difference (\"gbt\" with alpha=1.0)\n * zoh: zero-order hold (default)\n\n alpha : float within [0, 1]\n The generalized bilinear transformation weighting parameter, which\n should only be specified with method=\"gbt\", and is ignored\n otherwise.\n\n prewarp_frequency : float within [0, infinity)\n The frequency [rad/s] at which to match with the input continuous-\n time system's magnitude and phase (the gain=1 crossover frequency,\n for example). Should only be specified with method='bilinear' or\n 'gbt' with alpha=0.5 and ignored otherwise.\n\n Returns\n -------\n sysd : StateSpace system\n Discrete time system, with sampling rate Ts\n\n Notes\n -----\n 1. Available only for SISO systems\n\n 2. Uses :func:`scipy.signal.cont2discrete`\n\n Examples\n --------\n >>> sys = TransferFunction(1, [1,1])\n >>> sysd = sys.sample(0.5, method='bilinear')\n\n \"\"\"\n if not self.isctime():\n raise ValueError(\"System must be continuous time system\")\n if not self.issiso():\n raise NotImplementedError(\"MIMO implementation not available\")\n if method == \"matched\":\n return _c2d_matched(self, Ts)\n sys = (self.num[0][0], self.den[0][0])\n if (method=='bilinear' or (method=='gbt' and alpha==0.5)) and \\\n prewarp_frequency is not None:\n Twarp = 2*np.tan(prewarp_frequency*Ts/2)/prewarp_frequency\n else:\n Twarp = Ts\n numd, dend, _ = cont2discrete(sys, Twarp, method, alpha)\n return TransferFunction(numd[0, :], dend, Ts)\n\n def dcgain(self):\n \"\"\"Return the zero-frequency (or DC) gain\n\n For a continous-time transfer function G(s), the DC gain is G(0)\n For a discrete-time transfer function G(z), the DC gain is G(1)\n\n Returns\n -------\n gain : ndarray\n The zero-frequency gain\n \"\"\"\n if self.isctime():\n return self._dcgain_cont()\n else:\n return self(1)\n\n def _dcgain_cont(self):\n \"\"\"_dcgain_cont() -> DC gain as matrix or scalar\n\n Special cased evaluation at 0 for continuous-time systems.\"\"\"\n gain = np.empty((self.outputs, self.inputs), dtype=float)\n for i in range(self.outputs):\n for j in range(self.inputs):\n num = self.num[i][j][-1]\n den = self.den[i][j][-1]\n if den:\n gain[i][j] = num / den\n else:\n if num:\n # numerator nonzero: infinite gain\n gain[i][j] = np.inf\n else:\n # numerator is zero too: give up\n gain[i][j] = np.nan\n return np.squeeze(gain)\n\n def is_static_gain(self):\n \"\"\"returns True if and only if all of the numerator and denominator \n polynomials of the (possibly MIMO) transfer function are zeroth order, \n that is, if the system has no dynamics. \"\"\"\n for list_of_polys in self.num, self.den: \n for row in list_of_polys:\n for poly in row:\n if len(poly) > 1: \n return False\n return True\n \n# c2d function contributed by Benjamin White, Oct 2012\ndef _c2d_matched(sysC, Ts):\n # Pole-zero match method of continuous to discrete time conversion\n szeros, spoles, sgain = tf2zpk(sysC.num[0][0], sysC.den[0][0])\n zzeros = [0] * len(szeros)\n zpoles = [0] * len(spoles)\n pregainnum = [0] * len(szeros)\n pregainden = [0] * len(spoles)\n for idx, s in enumerate(szeros):\n sTs = s * Ts\n z = exp(sTs)\n zzeros[idx] = z\n pregainnum[idx] = 1 - z\n for idx, s in enumerate(spoles):\n sTs = s * Ts\n z = exp(sTs)\n zpoles[idx] = z\n pregainden[idx] = 1 - z\n zgain = np.multiply.reduce(pregainnum) / np.multiply.reduce(pregainden)\n gain = sgain / zgain\n sysDnum, sysDden = zpk2tf(zzeros, zpoles, gain)\n return TransferFunction(sysDnum, sysDden, Ts)\n\n# Utility function to convert a transfer function polynomial to a string\n# Borrowed from poly1d library\n\n\ndef _tf_polynomial_to_string(coeffs, var='s'):\n \"\"\"Convert a transfer function polynomial to a string\"\"\"\n\n thestr = \"0\"\n\n # Compute the number of coefficients\n N = len(coeffs) - 1\n\n for k in range(len(coeffs)):\n coefstr = '%.4g' % abs(coeffs[k])\n power = (N - k)\n if power == 0:\n if coefstr != '0':\n newstr = '%s' % (coefstr,)\n else:\n if k == 0:\n newstr = '0'\n else:\n newstr = ''\n elif power == 1:\n if coefstr == '0':\n newstr = ''\n elif coefstr == '1':\n newstr = var\n else:\n newstr = '%s %s' % (coefstr, var)\n else:\n if coefstr == '0':\n newstr = ''\n elif coefstr == '1':\n newstr = '%s^%d' % (var, power,)\n else:\n newstr = '%s %s^%d' % (coefstr, var, power)\n\n if k > 0:\n if newstr != '':\n if coeffs[k] < 0:\n thestr = \"%s - %s\" % (thestr, newstr)\n else:\n thestr = \"%s + %s\" % (thestr, newstr)\n elif (k == 0) and (newstr != '') and (coeffs[k] < 0):\n thestr = \"-%s\" % (newstr,)\n else:\n thestr = newstr\n return thestr\n\n\ndef _tf_string_to_latex(thestr, var='s'):\n \"\"\" make sure to superscript all digits in a polynomial string\n and convert float coefficients in scientific notation\n to prettier LaTeX representation \"\"\"\n # TODO: make the multiplication sign configurable\n expmul = r' \\\\times'\n thestr = sub(var + r'\\^(\\d{2,})', var + r'^{\\1}', thestr)\n thestr = sub(r'[eE]\\+0*(\\d+)', expmul + r' 10^{\\1}', thestr)\n thestr = sub(r'[eE]\\-0*(\\d+)', expmul + r' 10^{-\\1}', thestr)\n return thestr\n\n\ndef _add_siso(num1, den1, num2, den2):\n \"\"\"Return num/den = num1/den1 + num2/den2.\n\n Each numerator and denominator is a list of polynomial coefficients.\n\n \"\"\"\n\n num = polyadd(polymul(num1, den2), polymul(num2, den1))\n den = polymul(den1, den2)\n\n return num, den\n\n\ndef _convert_to_transfer_function(sys, **kw):\n \"\"\"Convert a system to transfer function form (if needed).\n\n If sys is already a transfer function, then it is returned. If sys is a\n state space object, then it is converted to a transfer function and\n returned. If sys is a scalar, then the number of inputs and outputs can be\n specified manually, as in:\n\n >>> sys = _convert_to_transfer_function(3.) # Assumes inputs = outputs = 1\n >>> sys = _convert_to_transfer_function(1., inputs=3, outputs=2)\n\n In the latter example, sys's matrix transfer function is [[1., 1., 1.]\n [1., 1., 1.]].\n\n If sys is an array-like type, then it is converted to a constant-gain\n transfer function.\n\n >>> sys = _convert_to_transfer_function([[1., 0.], [2., 3.]])\n\n In this example, the numerator matrix will be\n [[[1.0], [0.0]], [[2.0], [3.0]]]\n and the denominator matrix [[[1.0], [1.0]], [[1.0], [1.0]]]\n\n \"\"\"\n from .statesp import StateSpace\n\n if isinstance(sys, TransferFunction):\n if len(kw):\n raise TypeError(\"If sys is a TransferFunction, \" +\n \"_convertToTransferFunction cannot take keywords.\")\n\n return sys\n elif isinstance(sys, StateSpace):\n\n if 0 == sys.states:\n # Slycot doesn't like static SS->TF conversion, so handle\n # it first. Can't join this with the no-Slycot branch,\n # since that doesn't handle general MIMO systems\n num = [[[sys.D[i, j]] for j in range(sys.inputs)]\n for i in range(sys.outputs)]\n den = [[[1.] for j in range(sys.inputs)]\n for i in range(sys.outputs)]\n else:\n try:\n from slycot import tb04ad\n if len(kw):\n raise TypeError(\n \"If sys is a StateSpace, \" +\n \"_convertToTransferFunction cannot take keywords.\")\n\n # Use Slycot to make the transformation\n # Make sure to convert system matrices to numpy arrays\n tfout = tb04ad(\n sys.states, sys.inputs, sys.outputs, array(sys.A),\n array(sys.B), array(sys.C), array(sys.D), tol1=0.0)\n\n # Preallocate outputs.\n num = [[[] for j in range(sys.inputs)]\n for i in range(sys.outputs)]\n den = [[[] for j in range(sys.inputs)]\n for i in range(sys.outputs)]\n\n for i in range(sys.outputs):\n for j in range(sys.inputs):\n num[i][j] = list(tfout[6][i, j, :])\n # Each transfer function matrix row\n # has a common denominator.\n den[i][j] = list(tfout[5][i, :])\n\n except ImportError:\n # If slycot is not available, use signal.lti (SISO only)\n if sys.inputs != 1 or sys.outputs != 1:\n raise TypeError(\"No support for MIMO without slycot.\")\n\n # Do the conversion using sp.signal.ss2tf\n # Note that this returns a 2D array for the numerator\n num, den = sp.signal.ss2tf(sys.A, sys.B, sys.C, sys.D)\n num = squeeze(num) # Convert to 1D array\n den = squeeze(den) # Probably not needed\n\n return TransferFunction(num, den, sys.dt)\n\n elif isinstance(sys, (int, float, complex, np.number)):\n if \"inputs\" in kw:\n inputs = kw[\"inputs\"]\n else:\n inputs = 1\n if \"outputs\" in kw:\n outputs = kw[\"outputs\"]\n else:\n outputs = 1\n\n num = [[[sys] for j in range(inputs)] for i in range(outputs)]\n den = [[[1] for j in range(inputs)] for i in range(outputs)]\n\n return TransferFunction(num, den)\n\n # If this is array-like, try to create a constant feedthrough\n try:\n D = array(sys)\n outputs, inputs = D.shape\n num = [[[D[i, j]] for j in range(inputs)] for i in range(outputs)]\n den = [[[1] for j in range(inputs)] for i in range(outputs)]\n return TransferFunction(num, den)\n except Exception as e:\n print(\"Failure to assume argument is matrix-like in\"\n \" _convertToTransferFunction, result %s\" % e)\n\n raise TypeError(\"Can't convert given type to TransferFunction system.\")\n\n\ndef tf(*args):\n \"\"\"tf(num, den[, dt])\n\n Create a transfer function system. Can create MIMO systems.\n\n The function accepts either 1, 2, or 3 parameters:\n\n ``tf(sys)``\n Convert a linear system into transfer function form. Always creates\n a new system, even if sys is already a TransferFunction object.\n\n ``tf(num, den)``\n Create a transfer function system from its numerator and denominator\n polynomial coefficients.\n\n If `num` and `den` are 1D array_like objects, the function creates a\n SISO system.\n\n To create a MIMO system, `num` and `den` need to be 2D nested lists\n of array_like objects. (A 3 dimensional data structure in total.)\n (For details see note below.)\n\n ``tf(num, den, dt)``\n Create a discrete time transfer function system; dt can either be a\n positive number indicating the sampling time or 'True' if no\n specific timebase is given.\n\n ``tf('s')`` or ``tf('z')``\n Create a transfer function representing the differential operator\n ('s') or delay operator ('z').\n\n Parameters\n ----------\n sys: LTI (StateSpace or TransferFunction)\n A linear system\n num: array_like, or list of list of array_like\n Polynomial coefficients of the numerator\n den: array_like, or list of list of array_like\n Polynomial coefficients of the denominator\n\n Returns\n -------\n out: :class:`TransferFunction`\n The new linear system\n\n Raises\n ------\n ValueError\n if `num` and `den` have invalid or unequal dimensions\n TypeError\n if `num` or `den` are of incorrect type\n\n See Also\n --------\n TransferFunction\n ss\n ss2tf\n tf2ss\n\n Notes\n -----\n ``num[i][j]`` contains the polynomial coefficients of the numerator\n for the transfer function from the (j+1)st input to the (i+1)st output.\n ``den[i][j]`` works the same way.\n\n The list ``[2, 3, 4]`` denotes the polynomial :math:`2s^2 + 3s + 4`.\n\n The special forms ``tf('s')`` and ``tf('z')`` can be used to create\n transfer functions for differentiation and unit delays.\n\n Examples\n --------\n >>> # Create a MIMO transfer function object\n >>> # The transfer function from the 2nd input to the 1st output is\n >>> # (3s + 4) / (6s^2 + 5s + 4).\n >>> num = [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]]\n >>> den = [[[9., 8., 7.], [6., 5., 4.]], [[3., 2., 1.], [-1., -2., -3.]]]\n >>> sys1 = tf(num, den)\n\n >>> # Create a variable 's' to allow algebra operations for SISO systems\n >>> s = tf('s')\n >>> G = (s + 1)/(s**2 + 2*s + 1)\n\n >>> # Convert a StateSpace to a TransferFunction object.\n >>> sys_ss = ss(\"1. -2; 3. -4\", \"5.; 7\", \"6. 8\", \"9.\")\n >>> sys2 = tf(sys1)\n\n \"\"\"\n\n if len(args) == 2 or len(args) == 3:\n return TransferFunction(*args)\n elif len(args) == 1:\n # Look for special cases defining differential/delay operator\n if args[0] == 's':\n return TransferFunction.s\n elif args[0] == 'z':\n return TransferFunction.z\n\n from .statesp import StateSpace\n sys = args[0]\n if isinstance(sys, StateSpace):\n return ss2tf(sys)\n elif isinstance(sys, TransferFunction):\n return deepcopy(sys)\n else:\n raise TypeError(\"tf(sys): sys must be a StateSpace or \"\n \"TransferFunction object. It is %s.\" % type(sys))\n else:\n raise ValueError(\"Needs 1 or 2 arguments; received %i.\" % len(args))\n\n\ndef ss2tf(*args):\n \"\"\"ss2tf(sys)\n\n Transform a state space system to a transfer function.\n\n The function accepts either 1 or 4 parameters:\n\n ``ss2tf(sys)``\n Convert a linear system into space system form. Always creates a\n new system, even if sys is already a StateSpace object.\n\n ``ss2tf(A, B, C, D)``\n Create a state space system from the matrices of its state and\n output equations.\n\n For details see: :func:`ss`\n\n Parameters\n ----------\n sys: StateSpace\n A linear system\n A: array_like or string\n System matrix\n B: array_like or string\n Control matrix\n C: array_like or string\n Output matrix\n D: array_like or string\n Feedthrough matrix\n\n Returns\n -------\n out: TransferFunction\n New linear system in transfer function form\n\n Raises\n ------\n ValueError\n if matrix sizes are not self-consistent, or if an invalid number of\n arguments is passed in\n TypeError\n if `sys` is not a StateSpace object\n\n See Also\n --------\n tf\n ss\n tf2ss\n\n Examples\n --------\n >>> A = [[1., -2], [3, -4]]\n >>> B = [[5.], [7]]\n >>> C = [[6., 8]]\n >>> D = [[9.]]\n >>> sys1 = ss2tf(A, B, C, D)\n\n >>> sys_ss = ss(A, B, C, D)\n >>> sys2 = ss2tf(sys_ss)\n\n \"\"\"\n\n from .statesp import StateSpace\n if len(args) == 4 or len(args) == 5:\n # Assume we were given the A, B, C, D matrix and (optional) dt\n return _convert_to_transfer_function(StateSpace(*args))\n\n elif len(args) == 1:\n sys = args[0]\n if isinstance(sys, StateSpace):\n return _convert_to_transfer_function(sys)\n else:\n raise TypeError(\n \"ss2tf(sys): sys must be a StateSpace object. It is %s.\"\n % type(sys))\n else:\n raise ValueError(\"Needs 1 or 4 arguments; received %i.\" % len(args))\n\n\ndef tfdata(sys):\n \"\"\"\n Return transfer function data objects for a system\n\n Parameters\n ----------\n sys: LTI (StateSpace, or TransferFunction)\n LTI system whose data will be returned\n\n Returns\n -------\n (num, den): numerator and denominator arrays\n Transfer function coefficients (SISO only)\n \"\"\"\n tf = _convert_to_transfer_function(sys)\n\n return tf.num, tf.den\n\n\ndef _clean_part(data):\n \"\"\"\n Return a valid, cleaned up numerator or denominator\n for the TransferFunction class.\n\n Parameters\n ----------\n data: numerator or denominator of a transfer function.\n\n Returns\n -------\n data: list of lists of ndarrays, with int converted to float\n \"\"\"\n valid_types = (int, float, complex, np.number)\n valid_collection = (list, tuple, ndarray)\n\n if (isinstance(data, valid_types) or\n (isinstance(data, ndarray) and data.ndim == 0)):\n # Data is a scalar (including 0d ndarray)\n data = [[array([data])]]\n elif (isinstance(data, ndarray) and data.ndim == 3 and\n isinstance(data[0, 0, 0], valid_types)):\n data = [[array(data[i, j])\n for j in range(data.shape[1])]\n for i in range(data.shape[0])]\n elif (isinstance(data, valid_collection) and\n all([isinstance(d, valid_types) for d in data])):\n data = [[array(data)]]\n elif (isinstance(data, (list, tuple)) and\n isinstance(data[0], (list, tuple)) and\n (isinstance(data[0][0], valid_collection) and\n all([isinstance(d, valid_types) for d in data[0][0]]))):\n data = list(data)\n for j in range(len(data)):\n data[j] = list(data[j])\n for k in range(len(data[j])):\n data[j][k] = array(data[j][k])\n else:\n # If the user passed in anything else, then it's unclear what\n # the meaning is.\n raise TypeError(\n \"The numerator and denominator inputs must be scalars or vectors \"\n \"(for\\nSISO), or lists of lists of vectors (for SISO or MIMO).\")\n\n # Check for coefficients that are ints and convert to floats\n for i in range(len(data)):\n for j in range(len(data[i])):\n for k in range(len(data[i][j])):\n if isinstance(data[i][j][k], (int, np.int)):\n data[i][j][k] = float(data[i][j][k])\n\n return data\n\n\n# Define constants to represent differentiation, unit delay\nTransferFunction.s = TransferFunction([1, 0], [1], 0)\nTransferFunction.z = TransferFunction([1, 0], [1], True)\n"
] | [
[
"numpy.sqrt",
"numpy.squeeze",
"numpy.any",
"scipy.signal.zpk2tf",
"numpy.exp",
"numpy.polyval",
"numpy.finfo",
"numpy.roots",
"scipy.signal.cont2discrete",
"numpy.isclose",
"numpy.nonzero",
"numpy.polymul",
"scipy.signal.tf2zpk",
"numpy.tan",
"numpy.delete",
"numpy.poly",
"scipy.signal.ss2tf",
"numpy.array",
"numpy.multiply.reduce",
"numpy.ones",
"scipy.signal.lti",
"numpy.angle",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
naavis/halosim-proto | [
"a0e89097a79e0dcbd76e8607f3e10224e0c5be9c"
] | [
"halosim/crystals.py"
] | [
"import numpy as np\n\n\ndef generate_hexagonal_prototype_crystal():\n \"\"\"\n Generate a hexagonal prototype crystal with a c/a ratio of 1.0 and c axis parallel to the y coordinate axis.\n :return: 3xn array of vertex coordinates and nx3 array of vertex indices to indicate each triangle.\n \"\"\"\n angles = 2.0 * np.pi * np.arange(0.0, 1.0, 1.0 / 6.0)\n vertices = np.array([np.cos(angles), np.sin(angles)])\n height = 1.0 # This should make c/a ration 1.0 by default\n xs = np.tile(vertices[0, :], 2)\n ys = np.concatenate((np.tile(height, 6), np.tile(-height, 6)))\n zs = np.tile(vertices[1, :], 2)\n\n triangles = np.zeros((20, 3), np.int)\n # Top basal face\n triangles[0] = [0, 1, 2]\n triangles[1] = [0, 2, 5]\n triangles[2] = [2, 3, 5]\n triangles[3] = [3, 4, 5]\n # Bottom basal face\n triangles[4] = [6, 8, 7]\n triangles[5] = [6, 11, 8]\n triangles[6] = [8, 11, 9]\n triangles[7] = [9, 11, 10]\n # Prism faces\n triangles[8] = [0, 5, 11]\n triangles[9] = [0, 11, 6]\n triangles[10] = [4, 10, 5]\n triangles[11] = [5, 10, 11]\n triangles[12] = [3, 9, 4]\n triangles[13] = [4, 9, 10]\n triangles[14] = [2, 8, 3]\n triangles[15] = [3, 8, 9]\n triangles[16] = [1, 7, 2]\n triangles[17] = [2, 7, 8]\n triangles[18] = [0, 6, 1]\n triangles[19] = [1, 6, 7]\n\n return np.array([xs, ys, zs]), triangles\n\n\ndef generate_hexagonal_crystal(rot_a, rot_a_std, rot_b, rot_b_std, c_a_ratio, c_a_ratio_std):\n \"\"\"\n Generate a hexagonal crystal with given rotation properties and c/a ratio.\n :param rot_a: Rotation around a axis.\n :param rot_a_std: Standard deviation of a axis rotation.\n :param rot_b: Rotation around b axis.\n :param rot_b_std: Standard deviation of b axis rotation.\n :param c_a_ratio: c/a ratio of crystal.\n :param c_a_ratio_std: Standard deviation of c/a ratio.\n :return:\n \"\"\"\n vertices, triangles = generate_hexagonal_prototype_crystal()\n\n vertices[1, :] = (c_a_ratio + c_a_ratio_std * np.random.randn()) * vertices[1, :]\n vertices /= vertices.max()\n\n # Calculate areas\n areas = np.zeros((triangles.shape[0],))\n # Basal faces\n big_basal_area = 0.5 * np.sqrt(3.0)\n small_basal_area = 0.5 * (1.5 * np.sqrt(3.0) - 2.0 * big_basal_area)\n areas[0] = small_basal_area\n areas[1] = big_basal_area\n areas[2] = big_basal_area\n areas[3] = small_basal_area\n areas[4] = small_basal_area\n areas[5] = big_basal_area\n areas[6] = big_basal_area\n areas[7] = small_basal_area\n # Prism faces\n areas[8:] = 0.5 * c_a_ratio\n\n # Make sure areas sum to 1.0 for easier probability calculations.\n areas /= np.sum(areas)\n\n # Rotate vertices of crystal\n rotate_a = rot_a + rot_a_std * np.random.randn()\n rotate_b = rot_b + rot_b_std * np.random.randn()\n rotate_c = 2.0 * np.pi * np.random.rand()\n vertices = rotate(vertices, rotate_a, rotate_b, rotate_c)\n\n # Calculate normals of crystal\n normals = get_normals(vertices, triangles)\n\n return vertices, triangles, normals, areas\n\n\ndef rotate(vertices, rotate_a, rotate_b, rotate_c):\n \"\"\"\n Rotate vertices of a crystal.\n :param vertices: 3xn ndarray of coordinates.\n :param rotate_a: Rotation angle around crystal a-axis.\n :param rotate_b: Rotation angle around crystal b-axis.\n :param rotate_c: Rotation angle around crystal c-axis.\n :return: Modified vertex coordinates as 3xn array.\n \"\"\"\n rasin = np.sin(rotate_a)\n racos = np.cos(rotate_a)\n vertices = np.array([\n vertices[0, :],\n racos * vertices[1, :] - rasin * vertices[2, :],\n rasin * vertices[1, :] + racos * vertices[2, :]\n ])\n\n rbsin = np.sin(rotate_b)\n rbcos = np.cos(rotate_b)\n vertices = np.array([\n rbcos * vertices[0, :] - rbsin * vertices[1, :],\n rbsin * vertices[0, :] + rbcos * vertices[1, :],\n vertices[2, :]\n ])\n\n rcsin = np.sin(rotate_c)\n rccos = np.cos(rotate_c)\n vertices = np.array([\n rccos * vertices[0, :] - rcsin * vertices[2, :],\n vertices[1, :],\n rcsin * vertices[0, :] + rccos * vertices[2, :]\n ])\n\n return vertices\n\n\ndef get_normals(vertices, triangles):\n \"\"\"\n Calculate normals for each triangle in crystal.\n :param vertices: 3xn array of vertex coordinates.\n :param triangles: nx3 list of vertex indices.\n :return: 3xn array of normalized normal vectors.\n \"\"\"\n normals = []\n for triangle in triangles:\n vertices_in_triangle = vertices[:, triangle]\n normal = np.cross(vertices_in_triangle[:, 2] - vertices_in_triangle[:, 0], vertices_in_triangle[:, 1] - vertices_in_triangle[:, 0])\n normal /= np.linalg.norm(normal)\n normals.append(normal)\n normals = np.array(normals).T\n return normals\n"
] | [
[
"numpy.sqrt",
"numpy.arange",
"numpy.cos",
"numpy.tile",
"numpy.sin",
"numpy.linalg.norm",
"numpy.random.randn",
"numpy.random.rand",
"numpy.cross",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
felixmzd/Conceptors.jl | [
"6673ee497cb6351f8f84cec81a48b54e5e3591a5"
] | [
"lorenz/reservoir_lorenz.py"
] | [
"from lorenz import functions\nimport numpy as np\n\n\nclass Reservoir:\n\n def __init__(self, N=500, alpha=400, NetSR=0.6, bias_scale=0.4, inp_scale=1.2, conn=None):\n\n self.N = N\n self.alpha = alpha\n self.NetSR = NetSR\n self.bias_scale = bias_scale\n self.inp_scale = inp_scale\n if not conn:\n self.conn = 10.0 / self.N\n else:\n self.conn = conn\n\n self.W_raw = self.NetSR * functions.IntWeights(self.N, self.N, self.conn)\n self.W_bias = self.bias_scale * np.random.randn(self.N)\n\n def run(self, patterns, t_learn=2000, t_wash=500, TyA_wout=0.01, TyA_wload=0.0001,\n gradient_load=False, gradient_c=False, load=True, gradient_window=1, c_adapt_rate=0.01, gradient_cut=2.0):\n\n self.patterns = patterns\n self.t_learn = t_learn\n self.t_wash = t_wash\n self.TyA_wout = TyA_wout\n self.TyA_wload = TyA_wload\n self.gradient_load = gradient_load\n self.gradient_c = gradient_c\n self.gradien_cut = gradient_cut\n self.c_adapt_rate = c_adapt_rate\n self.n_patterns = len(self.patterns)\n\n if type(self.patterns[0]) == np.ndarray:\n self.n_ip_dim = len(patterns[0][0])\n else:\n if type(self.patterns[0](0)) == np.float64:\n self.n_ip_dim = 1\n else:\n self.n_ip_dim = len(self.patterns[0](0))\n\n self.W_in = self.inp_scale * np.random.randn(self.N, self.n_ip_dim)\n\n self.C = []\n\n self.TrainArgs = np.zeros([self.N, self.n_patterns * self.t_learn])\n self.TrainOldArgs = np.zeros([self.N, self.n_patterns * self.t_learn])\n TrainOuts = np.zeros([self.n_ip_dim, self.n_patterns * self.t_learn])\n I = np.eye(self.N)\n\n for i, p in zip(range(self.n_patterns), self.patterns):\n\n x = np.zeros([self.N])\n xOld = np.zeros([self.N])\n xColl = np.zeros([self.N, self.t_learn])\n xOldColl = np.zeros([self.N, self.t_learn])\n uColl = np.zeros([self.n_ip_dim, self.t_learn])\n Cc = np.zeros([self.N, self.N])\n\n for t in range(self.t_learn + self.t_wash):\n if not type(p) == np.ndarray:\n u = np.reshape(p(t), self.n_ip_dim)\n else:\n u = p[t]\n\n xOld = x\n\n x = np.tanh(np.dot(self.W_raw, x) + np.dot(self.W_in, u) + self.W_bias)\n\n if gradient_c:\n\n grad = x - np.dot(Cc, x)\n norm = np.linalg.norm(grad)\n if (norm > self.gradien_cut):\n grad = self.gradien_cut / norm * grad\n Cc = Cc + self.c_adapt_rate * (np.outer(grad, x.T) - (self.alpha ** -2) * Cc)\n\n if (t >= self.t_wash):\n xColl[:, t - self.t_wash] = x\n xOldColl[:, t - self.t_wash] = xOld\n uColl[:, t - self.t_wash] = u\n\n if not gradient_c:\n\n R = np.dot(xColl, np.transpose(xColl)) / self.t_learn\n U, S, V = np.linalg.svd(R, full_matrices=True)\n S = np.diag(S)\n S = (np.dot(S, np.linalg.inv(S + (self.alpha ** -2) * I)))\n self.C.append(np.dot(U, np.dot(S, U.T)))\n\n else:\n\n self.C.append(Cc)\n\n self.TrainArgs[:, i * self.t_learn:(i + 1) * self.t_learn] = xColl\n self.TrainOldArgs[:, i * self.t_learn:(i + 1) * self.t_learn] = xOldColl\n TrainOuts[:, i * self.t_learn:(i + 1) * self.t_learn] = uColl\n\n if load:\n \"\"\" Output Training \"\"\"\n\n self.W_out = functions.RidgeWout(self.TrainArgs, TrainOuts, self.TyA_wout)\n self.NRMSE_readout = functions.NRMSE(np.dot(self.W_out, self.TrainArgs), TrainOuts);\n print(\"NRMSE for training of output connections: \")\n print(self.NRMSE_readout)\n\n \"\"\" Loading \"\"\"\n\n W_bias_rep = np.tile(self.W_bias, (self.n_patterns * self.t_learn, 1)).T\n W_targets = (np.arctanh(self.TrainArgs) - W_bias_rep)\n self.W = functions.RidgeWload(self.TrainOldArgs, W_targets, self.TyA_wload)\n self.NRMSE_load = functions.NRMSE(np.dot(self.W, self.TrainOldArgs), W_targets)\n print(\"NRMSE avg. per neuron for re-learning of connecivity matrix (loading): \")\n print(np.mean(self.NRMSE_load))\n\n def recall(self, t_recall=200):\n\n self.Y_recalls = []\n self.t_recall = t_recall\n\n for i in range(self.n_patterns):\n\n Cc = self.C[i]\n\n # start with a random state\n # x = 0.5*np.random.randn(self.N)\n\n # start from the last network state from training\n x = self.TrainArgs[:, -1]\n y_recall = np.zeros([self.t_recall, self.n_ip_dim])\n\n for t in range(self.t_recall):\n x = np.dot(Cc, np.tanh(np.dot(self.W, x) + self.W_bias))\n y = np.dot(self.W_out, x)\n y_recall[t] = y\n\n self.Y_recalls.append(y_recall)\n"
] | [
[
"numpy.diag",
"numpy.dot",
"numpy.linalg.svd",
"numpy.arctanh",
"numpy.linalg.inv",
"numpy.eye",
"numpy.tile",
"numpy.linalg.norm",
"numpy.random.randn",
"numpy.mean",
"numpy.transpose",
"numpy.outer",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
uafgeotools/array_processing | [
"9ad464588aa162d36c9a90677784d6a8d7d4e6e8"
] | [
"array_processing/tools/array_characterization.py"
] | [
"import numpy as np\nfrom scipy import optimize\nfrom scipy.special import gammainc\nfrom fastkml import kml\n\n\ndef arraySig(rij, kmax, sigLevel, p=0.9, velLims=(0.27, 0.36), NgridV=100,\n NgridTh=100, NgridK=100):\n r\"\"\"\n Estimate 2-D array uncertainties in trace velocity and back-azimuth, and\n calculate impulse response.\n\n Args:\n rij: Coordinates (km) of sensors as eastings & northings in a\n ``(2, N)`` array\n kmax (float): Impulse response will be calculated over the range\n [-`kmax`, `kmax`] in :math:`k`-space (1/km)\n sigLevel (float): Variance in time delays (s), typically\n :math:`\\sigma_\\tau`\n p (float): Confidence limit in uncertainty estimates\n velLims (tuple): Range of trace velocities (km/s) to estimate\n uncertainty over. A single value can be used, but the by default a\n range is used\n NgridV (int): Number of velocities to estimate uncertainties in range\n `velLims`\n NgridTh (int): Number of angles to estimate uncertainties in range\n :math:`[0^\\circ, 360^\\circ]`\n NgridK (int): Number of :math:`k`-space coordinates to calculate in\n each dimension\n\n Returns:\n tuple: Tuple containing:\n\n - **sigV** – Uncertainties in trace velocity (°) as a function of trace\n velocity and back-azimuth as ``(NgridTh, NgridV)`` array\n - **sigTh** – Uncertainties in trace velocity (km/s) as a function of\n trace velocity and back-azimuth as ``(NgridTh, NgridV)`` array\n - **impResp** – Impulse response over grid as ``(NgridK, NgridK)``\n array\n - **vel** – Vector of trace velocities (km/s) for axis in\n ``(NgridV, )`` array\n - **th** – Vector of back azimuths (°) for axis in ``(NgridTh, )``\n array\n - **kvec** – Vector wavenumbers for axes in :math:`k`-space in\n ``(NgridK, )`` array\n \"\"\"\n\n # calculate uncertainties\n # preliminaries\n dij = co_array(rij)\n th = np.linspace(0, 360 * (1 - 1 / NgridTh), NgridTh) / 180 * np.pi\n if len(velLims) == 1:\n vel = velLims\n else:\n vel = np.linspace(velLims[0], velLims[1], NgridV)\n Th, Vel = np.meshgrid(th, vel)\n S1 = np.sin(Th) / Vel\n S2 = np.cos(Th) / Vel\n sigTh = np.zeros(Th.shape)\n sigV = sigTh.copy()\n # single-pass calcs\n # calculate eigenvalues/vectors of design matrix (one-time shot)\n C = [email protected]\n cii, Ve = np.linalg.eig(C)\n thEigR = np.arctan2(Ve[1, 0], Ve[0, 0])\n R = np.array([[np.cos(thEigR), np.sin(thEigR)],\n [-np.sin(thEigR), np.cos(thEigR)]])\n # calculate chi2 for desired confidence level\n x2 = chi2(2, 1-p)\n sigS = sigLevel / np.sqrt(cii)\n # prep for loop\n a = np.sqrt(x2) * sigS[0]\n b = np.sqrt(x2) * sigS[1]\n N, M = Th.shape\n\n # froot loops\n for n in range(N):\n for m in range(M):\n # calculate elliptical extrema\n So = R @ [[S1[n, m]], [S2[n, m]]]\n eExtrm, eVec = rthEllipse(a, b, So[0][0], So[1][0])\n # rotate & recalculate\n eVec = eVec @ R\n # fix up angle calcs\n sigTh[n, m] = np.abs(np.diff(\n (np.arctan2(eVec[2:, 1], eVec[2:, 0]) * 180 / np.pi - 360)\n % 360))\n if sigTh[n, m] > 180:\n sigTh[n, m] = np.abs(sigTh[n, m] - 360)\n sigV[n, m] = np.abs(np.diff(1 / eExtrm[:2]))\n\n # prepare impulse response\n impResp, kvec = impulseResp(dij, kmax, NgridK)\n\n return sigV, sigTh, impResp, vel, th / np.pi * 180, kvec\n\n\ndef impulseResp(dij, kmax, NgridK):\n r\"\"\"\n Calculate impulse response of a 2-D array.\n\n Args:\n dij: Coordinates of co-array of ``N`` sensors in a ``(2, (N*N-1)/2)``\n array\n kmax (float): Impulse response will be calculated over the range\n [-`kmax`, `kmax`] in :math:`k`-space\n NgridK (int): Number of :math:`k`-space coordinates to calculate in\n each dimension\n\n Returns:\n tuple: Tuple containing:\n\n - **d** – Impulse response over grid as ``(NgridK, NgridK)`` array\n - **kvec** - Vector wavenumbers for axes in :math:`k`-space in\n ``(NgridK, )`` array\n \"\"\"\n\n # pre-allocate grid for :math:`k`-space\n kvec = np.linspace(-kmax, kmax, NgridK)\n Kx, Ky = np.meshgrid(kvec, kvec)\n N = dij.shape[1]\n K = np.vstack((Ky.flatten(), Kx.flatten())).T\n d = 2 * np.cos(K @ dij)\n # last term adds in fact that cos(0)==1 for ignored self-delay terms\n d = np.reshape(np.sum(d, axis=1), (NgridK, NgridK))\n + (1 + np.sqrt(1 + 8 * N)) / 2\n\n return d, kvec\n\n\ndef rthEllipse(a, b, x0, y0):\n r\"\"\"\n Calculate angles subtending, and extremal distances to, a\n coordinate-aligned ellipse from the origin.\n\n Args:\n a (float): Semi-major axis of ellipse\n b (float): Semi-minor axis of ellipse\n x0 (float): Horizontal center of ellipse\n y0 (float): Vertical center of ellipse\n\n Returns:\n tuple: Tuple containing:\n\n - **eExtrm** – Extremal parameters in ``(4, )`` array as\n\n .. code-block:: none\n\n [min distance, max distance, min angle (degrees), max angle (degrees)]\n\n - **eVec** – Coordinates of extremal points on ellipse in ``(4, 2)``\n array as\n\n .. code-block:: none\n\n [[x min dist., y min dist.],\n [x max dist., y max dist.],\n [x max angle tangency, y max angle tangency],\n [x min angle tangency, y min angle tangency]]\n \"\"\"\n\n # set constants\n A = 2/a**2\n B = 2*x0/a**2\n C = 2/b**2\n D = 2*y0/b**2\n E = (B*x0+D*y0)/2-1\n F = C-A\n G = A/2\n H = C/2\n eExtrm = np.zeros((4,))\n eVec = np.zeros((4, 2))\n eps = np.finfo(np.float64).eps\n\n # some tolerances for numerical errors\n circTol = 1e8 # is it circular to better than circTol*eps?\n zeroTol = 1e4 # is center along a coord. axis to better than zeroTol*eps?\n magTol = 1e-5 # is a sol'n within ellipse*(1+magTol) (maginification)\n\n # pursue circular or elliptical solutions\n if np.abs(F) <= circTol * eps:\n # circle\n cent = np.sqrt(x0 ** 2 + y0 ** 2)\n eExtrm[0:2] = cent + np.array([-a, a])\n eVec[0:2, :] = np.array([\n [x0-a*x0/cent, y0-a*y0/cent],\n [x0+a*x0/cent, y0+a*y0/cent]])\n else:\n # ellipse\n # check for trivial distance sol'n\n if np.abs(y0) < zeroTol * eps:\n eExtrm[0:2] = x0 + np.array([-a, a])\n eVec[0:2, :] = np.vstack((eExtrm[0:2], [0, 0])).T\n elif np.abs(x0) < zeroTol * eps:\n eExtrm[0:2] = y0 + np.array([-b, b])\n eVec[0:2, :] = np.vstack(([0, 0], eExtrm[0:2])).T\n else:\n # use dual solutions of quartics to find best, real-valued results\n # solve quartic for y\n fy = F**2*H\n y = quarticEqn(-D*F*(2*H+F)/fy,\n (B**2*(G+F)+E*F**2+D**2*(H+2*F))/fy,\n -D*(B**2+2*E*F+D**2)/fy, (D**2*E)/fy)\n y = np.array([y[i] for i in list(np.where(y == np.real(y))[0])])\n xy = B*y / (D-F*y)\n # solve quartic for x\n fx = F**2*G\n x = quarticEqn(B*F*(2*G-F)/fx, (B**2*(G-2*F)+E*F**2+D**2*(H-F))/fx,\n B*(2*E*F-B**2-D**2)/fx, (B**2*E)/fx)\n x = np.array([x[i] for i in list(np.where(x == np.real(x))[0])])\n yx = D*x / (F*x+B)\n # combine both approaches\n distE = np.hstack(\n (np.sqrt(x ** 2 + yx ** 2), np.sqrt(xy ** 2 + y ** 2)))\n # trap real, but bogus sol's (esp. near Th = 180)\n distEidx = np.where(\n (distE <= np.sqrt(x0 ** 2 + y0 ** 2)\n + np.max([a, b]) * (1 + magTol))\n & (distE >= np.sqrt(x0 ** 2 + y0 ** 2)\n - np.max([a, b]) * (1 + magTol)))\n coords = np.hstack(((x, yx), (xy, y))).T\n coords = coords[distEidx, :][0]\n distE = distE[distEidx]\n eExtrm[0:2] = [distE.min(), distE.max()]\n eVec[0:2, :] = np.vstack(\n (coords[np.where(distE == distE.min()), :][0][0],\n coords[np.where(distE == distE.max()), :][0][0]))\n # angles subtended\n if x0 < 0:\n x0 = -x0\n y = -np.array(quadraticEqn(D ** 2 + B ** 2 * H / G, 4 * D * E,\n 4 * E ** 2 - B ** 2 * E / G))\n x = -np.sqrt(E / G - H / G * y ** 2)\n else:\n y = -np.array(quadraticEqn(D ** 2 + B ** 2 * H / G, 4 * D * E,\n 4 * E ** 2 - B ** 2 * E / G))\n x = np.sqrt(E / G - H / G * y ** 2)\n eVec[2:, :] = np.vstack((x, y)).T\n # various quadrant fixes\n if x0 == 0 or np.abs(x0) - a < 0:\n eVec[2, 0] = -eVec[2, 0]\n eExtrm[2:] = np.sort(np.arctan2(eVec[2:, 1], eVec[2:, 0]) / np.pi * 180)\n\n return eExtrm, eVec\n\n\ndef co_array(rij):\n r\"\"\"\n Form co-array coordinates for given array coordinates.\n\n Args:\n rij: ``(d, n)`` array; ``n`` sensor coordinates as [northing, easting,\n {elevation}] column vectors in ``d`` dimensions\n\n Returns:\n ``(d, n(n-1)//2)`` co-array, coordinates of the sensor pairing\n separations\n \"\"\"\n\n idx = [(i, j) for i in range(rij.shape[1]-1)\n for j in range(i+1, rij.shape[1])]\n\n return rij[:, [i[0] for i in idx]] - rij[:, [j[1] for j in idx]]\n\n\ndef chi2(nu, alpha, funcTol=1e-10):\n r\"\"\"\n Calculate value of a :math:`\\chi^2` such that a :math:`\\nu`-dimensional\n confidence ellipsoid encloses a fraction :math:`1 - \\alpha` of normally\n distributed variable.\n\n Args:\n nu (int): Degrees of freedom (typically embedding dimension of\n variable)\n alpha (float): Confidence interval such that :math:`\\alpha \\in [0, 1]`\n funcTol (float): Optimization function evaluation tolerance for\n :math:`\\nu \\ne 2`\n\n Returns:\n float: Value of a :math:`\\chi^2` enclosing :math:`1 - \\alpha`\n confidence region\n \"\"\"\n\n if nu == 2:\n # this shorthand owing to Ken Arnoult\n return -2 * np.log(alpha)\n else:\n # but just in case we end up with a nu != 2 situation\n gammaTest = lambda X2test: np.abs(gammainc(nu / 2,\n X2test / 2) - (1-alpha))\n return optimize.fmin(func=gammaTest, x0=1, ftol=funcTol, disp=False)\n\n\ndef cubicEqn(a, b, c):\n r\"\"\"\n Roots of cubic equation in the form :math:`x^3 + ax^2 + bx + c = 0`.\n\n Args:\n a (int or float): Scalar coefficient of cubic equation, can be\n complex\n b (int or float): Same as above\n c (int or float): Same as above\n\n Returns:\n list: Roots of cubic equation in standard form\n\n See Also:\n :func:`numpy.roots` — Generic polynomial root finder\n\n Notes:\n Relatively stable solutions, with some tweaks by Dr. Z,\n per algorithm of Numerical Recipes 2nd ed., :math:`\\S` 5.6. Even\n :func:`numpy.roots` can have some (minor) issues; e.g.,\n :math:`x^3 - 5x^2 + 8x - 4 = 0`.\n \"\"\"\n\n Q = a*a/9 - b/3\n R = (3*c - a*b)/6 + a*a*a/27\n Q3 = Q*Q*Q\n R2 = R*R\n ao3 = a/3\n\n # Q & R are real\n if np.isreal([a, b, c]).all():\n # 3 real roots\n if R2 < Q3:\n sqQ = -2 * np.sqrt(Q)\n theta = np.arccos(R / np.sqrt(Q3))\n # This solution first published in 1615 by Viète!\n x = [sqQ * np.cos(theta / 3) - ao3,\n sqQ * np.cos((theta + 2 * np.pi) / 3) - ao3,\n sqQ * np.cos((theta - 2 * np.pi) / 3) - ao3]\n # Q & R real, but 1 real, 2 complex roots\n else:\n # this is req'd since np.sign(0) = 0\n if R != 0:\n A = -np.sign(R) * (np.abs(R) + np.sqrt(R2 - Q3)) ** (1 / 3)\n else:\n A = -np.sqrt(-Q3) ** (1 / 3)\n if A == 0:\n B = 0\n else:\n B = Q/A\n # one real root & two conjugate complex ones\n x = [\n (A+B) - ao3,\n -.5 * (A+B) + 1j * np.sqrt(3) / 2 * (A - B) - ao3,\n -.5 * (A+B) - 1j * np.sqrt(3) / 2 * (A - B) - ao3]\n # Q & R complex, so also 1 real, 2 complex roots\n else:\n sqR2mQ3 = np.sqrt(R2 - Q3)\n if np.real(np.conj(R) * sqR2mQ3) >= 0:\n A = -(R+sqR2mQ3)**(1/3)\n else:\n A = -(R-sqR2mQ3)**(1/3)\n if A == 0:\n B = 0\n else:\n B = Q/A\n # one real root & two conjugate complex ones\n x = [\n (A+B) - ao3,\n -.5 * (A+B) + 1j * np.sqrt(3) / 2 * (A - B) - ao3,\n -.5 * (A+B) - 1j * np.sqrt(3) / 2 * (A - B) - ao3\n ]\n # parse real and/or int roots for tidy output\n for k in range(0, 3):\n if np.real(x[k]) == x[k]:\n x[k] = float(np.real(x[k]))\n if int(x[k]) == x[k]:\n x[k] = int(x[k])\n return x\n\n\ndef quadraticEqn(a, b, c):\n r\"\"\"\n Roots of quadratic equation in the form :math:`ax^2 + bx + c = 0`.\n\n Args:\n a (int or float): Scalar coefficient of quadratic equation, can be\n complex\n b (int or float): Same as above\n c (int or float): Same as above\n\n Returns:\n list: Roots of quadratic equation in standard form\n\n See Also:\n :func:`numpy.roots` — Generic polynomial root finder\n\n Notes:\n Stable solutions, even for :math:`b^2 >> ac` or complex coefficients,\n per algorithm of Numerical Recipes 2nd ed., :math:`\\S` 5.6.\n \"\"\"\n\n # real coefficient branch\n if np.isreal([a, b, c]).all():\n # note np.sqrt(-1) = nan, so force complex argument\n if b:\n # std. sub-branch\n q = -0.5*(b + np.sign(b) * np.sqrt(np.complex(b * b - 4 * a * c)))\n else:\n # b = 0 sub-branch\n q = -np.sqrt(np.complex(-a * c))\n # complex coefficient branch\n else:\n if np.real(np.conj(b) * np.sqrt(b * b - 4 * a * c)) >= 0:\n q = -0.5*(b + np.sqrt(b * b - 4 * a * c))\n else:\n q = -0.5*(b - np.sqrt(b * b - 4 * a * c))\n # stable root solution\n x = [q/a, c/q]\n # parse real and/or int roots for tidy output\n for k in 0, 1:\n if np.real(x[k]) == x[k]:\n x[k] = float(np.real(x[k]))\n if int(x[k]) == x[k]:\n x[k] = int(x[k])\n return x\n\n\ndef quarticEqn(a, b, c, d):\n r\"\"\"\n Roots of quartic equation in the form :math:`x^4 + ax^3 + bx^2 +\n cx + d = 0`.\n\n Args:\n a (int or float): Scalar coefficient of quartic equation, can be\n complex\n b (int or float): Same as above\n c (int or float): Same as above\n d (int or float): Same as above\n\n Returns:\n list: Roots of quartic equation in standard form\n\n See Also:\n :func:`numpy.roots` — Generic polynomial root finder\n\n Notes:\n Stable solutions per algorithm of CRC Std. Mathematical Tables, 29th\n ed.\n \"\"\"\n\n # find *any* root of resolvent cubic\n a2 = a*a\n y = cubicEqn(-b, a*c - 4*d, (4*b - a2)*d - c*c)\n y = y[0]\n # find R\n R = np.sqrt(a2 / 4 - (1 + 0j) * b + y) # force complex in sqrt\n foo = 3*a2/4 - R*R - 2*b\n if R != 0:\n # R is already complex.\n D = np.sqrt(foo + (a * b - 2 * c - a2 * a / 4) / R)\n E = np.sqrt(foo - (a * b - 2 * c - a2 * a / 4) / R) # ...\n else:\n sqrtTerm = 2 * np.sqrt(y * y - (4 + 0j) * d) # force complex in sqrt\n D = np.sqrt(foo + sqrtTerm)\n E = np.sqrt(foo - sqrtTerm)\n x = [-a/4 + R/2 + D/2,\n -a/4 + R/2 - D/2,\n -a/4 - R/2 + E/2,\n -a/4 - R/2 - E/2]\n # parse real and/or int roots for tidy output\n for k in range(0, 4):\n if np.real(x[k]) == x[k]:\n x[k] = float(np.real(x[k]))\n if int(x[k]) == x[k]:\n x[k] = int(x[k])\n\n return x\n\n\ndef read_kml(kml_file):\n r\"\"\"Parse an array KML file into a list of element latitudes and longitudes.\n\n KML file must contain a single folder containing the array element points.\n\n Args:\n kml_file (str): Full path to input KML file (extension ``.kml``)\n\n Returns:\n tuple: ``(latlist, lonlist)`` for input to :func:`~array_processing.algorithms.helpers.getrij`\n \"\"\"\n\n # Read in KML file\n k = kml.KML()\n with open(kml_file, mode='rb') as f:\n k.from_string(f.read())\n\n # Extract coordinates\n elements = list(list(list(k.features())[0].features())[0].features())\n lonlist = [element.geometry.x for element in elements]\n latlist = [element.geometry.y for element in elements]\n\n return latlist, lonlist\n"
] | [
[
"numpy.sqrt",
"numpy.linspace",
"scipy.special.gammainc",
"numpy.arctan2",
"numpy.max",
"scipy.optimize.fmin",
"numpy.hstack",
"numpy.linalg.eig",
"numpy.sin",
"numpy.finfo",
"numpy.real",
"numpy.diff",
"numpy.zeros",
"numpy.log",
"numpy.array",
"numpy.isreal",
"numpy.meshgrid",
"numpy.sum",
"numpy.abs",
"numpy.conj",
"numpy.cos",
"numpy.sign",
"numpy.complex",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
FlunderMayFly/RoboND_Rover_Project | [
"9c5f8dd6a538b57f26ef3feeff953c7b5f0aaef1"
] | [
"code/decision.py"
] | [
"import numpy as np\nimport pdb\n\n\n# This is where you can build a decision tree for determining throttle, brake and steer \n# commands based on the output of the perception_step() function\ndef decision_step(Rover):\n\n # Implement conditionals to decide what to do given perception data\n # Here you're all set up with some basic functionality but you'll need to\n # improve on this decision tree to do a good job of navigating autonomously!\n\n # Example:\n # Check if we have vision data to make decisions with\n if Rover.nav_angles is not None:\n # Check for Rover.mode status\n if Rover.mode == 'forward': \n # Check the extent of navigable terrain\n if len(Rover.nav_angles) >= Rover.stop_forward: \n # If mode is forward, navigable terrain looks good \n # and velocity is below max, then throttle \n if Rover.vel < Rover.max_vel:\n # Set throttle value to throttle setting\n Rover.throttle = Rover.throttle_set\n else: # Else coast\n Rover.throttle = 0\n Rover.brake = 0\n # Set steering to average angle clipped to the range +/- 15\n Rover.steer = np.clip(np.mean(Rover.nav_angles * 180/np.pi), -15, 15)\n # If there's a lack of navigable terrain pixels then go to 'stop' mode\n elif len(Rover.nav_angles) < Rover.stop_forward:\n # Set mode to \"stop\" and hit the brakes!\n Rover.throttle = 0\n # Set brake to stored brake value\n Rover.brake = Rover.brake_set\n Rover.steer = 0\n Rover.mode = 'stop'\n\n # If we're already in \"stop\" mode then make different decisions\n elif Rover.mode == 'stop':\n # If we're in stop mode but still moving keep braking\n if Rover.vel > 0.2:\n Rover.throttle = 0\n Rover.brake = Rover.brake_set\n Rover.steer = 0\n # If we're not moving (vel < 0.2) then do something else\n elif Rover.vel <= 0.2:\n # Now we're stopped and we have vision data to see if there's a path forward\n if len(Rover.nav_angles) < Rover.go_forward:\n Rover.throttle = 0\n # Release the brake to allow turning\n Rover.brake = 0\n # Turn range is +/- 15 degrees, when stopped the next line will induce 4-wheel turning\n Rover.steer = -15 # Could be more clever here about which way to turn\n # If we're stopped but see sufficient navigable terrain in front then go!\n if len(Rover.nav_angles) >= Rover.go_forward:\n # Set throttle back to stored value\n Rover.throttle = Rover.throttle_set\n # Release the brake\n Rover.brake = 0\n # Set steer to mean angle\n Rover.steer = np.clip(np.mean(Rover.nav_angles * 180/np.pi), -15, 15)\n Rover.mode = 'forward'\n # Just to make the rover do something \n # even if no modifications have been made to the code\n else:\n Rover.throttle = Rover.throttle_set\n Rover.steer = 0\n Rover.brake = 0\n \n # If we are in the near of a sample we want to stop to go into picking mode\n # if Rover.near_sample:\n # Rover.throttle = 0\n # Rover.brake = Rover.brake_set\n # Rover.steer = 0\n\n # If in a state where want to pickup a rock send pickup command\n if Rover.near_sample and Rover.vel == 0 and not Rover.picking_up:\n Rover.send_pickup = True\n\n # print(str(Rover.near_sample))\n \n return Rover\n\n"
] | [
[
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bage79/transformer-evolution-bage | [
"715bdf61421dc19e21fb0f66bfa4b564305987f8"
] | [
"gpt/train.py"
] | [
"import argparse\nimport os\nimport random\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport wandb\nfrom torch.nn.parallel import DistributedDataParallel\nfrom tqdm import tqdm, trange\n\nimport config as cfg\nimport optimization as optim\nfrom vocab import load_vocab\nfrom . import data\nfrom . import model as gpt\n\n\ndef set_seed(args):\n \"\"\" random seed \"\"\"\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef init_process_group(rank, world_size):\n \"\"\" init_process_group \"\"\"\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '12355'\n dist.init_process_group(\"gloo\", rank=rank, world_size=world_size)\n\n\ndef destroy_process_group():\n \"\"\" destroy_process_group \"\"\"\n dist.destroy_process_group()\n\n\ndef eval_epoch(config, rank, model, data_loader):\n \"\"\" 모델 epoch 평가 \"\"\"\n matchs = []\n model.eval()\n\n n_word_total = 0\n n_correct_total = 0\n with tqdm(total=len(data_loader), desc=f\"Valid({rank})\") as pbar:\n for i, value in enumerate(data_loader):\n labels, dec_inputs = map(lambda v: v.to(config.device), value)\n\n outputs = model(dec_inputs)\n logits_cls = outputs[1]\n _, indices = logits_cls.max(1)\n\n match = torch.eq(indices, labels).detach()\n matchs.extend(match.cpu())\n accuracy = np.sum(matchs) / len(matchs) if 0 < len(matchs) else 0\n\n pbar.update(1)\n pbar.set_postfix_str(f\"Acc: {accuracy:.3f}\")\n return np.sum(matchs) / len(matchs) if 0 < len(matchs) else 0\n\n\ndef train_epoch(config, rank, epoch, model, criterion_lm, criterion_cls, optimizer, scheduler, train_loader):\n \"\"\" 모델 epoch 학습 \"\"\"\n losses = []\n model.train()\n\n with tqdm(total=len(train_loader), desc=f\"Train({rank}) {epoch}\") as pbar:\n for i, value in enumerate(train_loader):\n labels, dec_inputs = map(lambda v: v.to(config.device), value)\n labels_lm = dec_inputs[:, 1:].contiguous()\n\n optimizer.zero_grad()\n outputs = model(dec_inputs)\n logits_lm, logits_cls = outputs[0], outputs[1]\n\n if 0 < config.lm:\n loss_lm = criterion_lm(logits_lm.view(-1, logits_lm.size(2)), labels_lm.view(-1))\n loss_cls = criterion_cls(logits_cls, labels)\n loss = loss_lm + loss_cls\n else:\n loss_cls = criterion_cls(logits_cls, labels)\n loss = loss_cls\n\n loss_val = loss_cls.item()\n losses.append(loss_val)\n\n loss.backward()\n optimizer.step()\n scheduler.step()\n\n pbar.update(1)\n pbar.set_postfix_str(f\"Loss: {loss_val:.3f} ({np.mean(losses):.3f})\")\n return np.mean(losses)\n\n\ndef train_model(rank, world_size, args):\n \"\"\" 모델 학습 \"\"\"\n if 1 < args.n_gpu:\n init_process_group(rank, world_size)\n master = (world_size == 0 or rank % world_size == 0)\n if master and args.wandb:\n wandb.init(project=\"transformer-evolution-bage\")\n\n vocab = load_vocab(args.vocab)\n\n config = cfg.Config.load(args.config)\n config.lm = args.lm\n config.n_enc_vocab, config.n_dec_vocab = len(vocab), len(vocab)\n config.device = torch.device(f\"cuda:{rank}\" if torch.cuda.is_available() else \"cpu\")\n print(config)\n\n best_epoch, best_loss, best_score = 0, 0, 0\n model = gpt.MovieClassification(config)\n if os.path.isfile(args.save):\n best_epoch, best_loss, best_score = model.load(args.save)\n print(f\"rank: {rank} load state dict from: {args.save}\")\n elif os.path.isfile(args.pretrain):\n epoch, loss = model.gpt.load(args.pretrain)\n print(f\"rank: {rank} load pretrain from: {args.pretrain}, epoch={epoch}, loss={loss}\")\n if 1 < args.n_gpu:\n model.to(config.device)\n model = DistributedDataParallel(model, device_ids=[rank], find_unused_parameters=True)\n else:\n model.to(config.device)\n if master and args.wandb:\n wandb.watch(model)\n\n criterion_lm = torch.nn.CrossEntropyLoss(ignore_index=config.i_pad, reduction='mean')\n criterion_cls = torch.nn.CrossEntropyLoss()\n\n train_loader, train_sampler = data.build_data_loader(vocab, args.train, args, shuffle=True)\n test_loader, _ = data.build_data_loader(vocab, args.test, args, shuffle=False)\n\n t_total = len(train_loader) * args.epoch\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = optim.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)\n\n offset = best_epoch\n for step in trange(args.epoch, desc=\"Epoch\"):\n if train_sampler:\n train_sampler.set_epoch(step)\n epoch = step + offset\n\n loss = train_epoch(config, rank, epoch, model, criterion_lm, criterion_cls, optimizer, scheduler, train_loader)\n score = eval_epoch(config, rank, model, test_loader)\n if master and args.wandb:\n wandb.log({\"loss\": loss, \"accuracy\": score})\n\n if master and best_score < score:\n best_epoch, best_loss, best_score = epoch, loss, score\n if isinstance(model, DistributedDataParallel):\n model.module.save(best_epoch, best_loss, best_score, args.save)\n else:\n model.save(best_epoch, best_loss, best_score, args.save)\n print(f\">>>> rank: {rank} save model to {args.save}, epoch={best_epoch}, loss={best_loss:.3f}, socre={best_score:.3f}\")\n\n if 1 < args.n_gpu:\n destroy_process_group()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", default=\"config_half.json\", type=str, required=False,\n help=\"config file\")\n parser.add_argument(\"--lm\", default=0.0, type=float, required=False,\n help=\"language model loss rate\")\n parser.add_argument(\"--vocab\", default=\"../kowiki.model\", type=str, required=False,\n help=\"vocab file\")\n parser.add_argument(\"--train\", default=\"../data/ratings_train.json\", type=str, required=False,\n help=\"input train file\")\n parser.add_argument(\"--test\", default=\"../data/ratings_test.json\", type=str, required=False,\n help=\"input test file\")\n parser.add_argument(\"--save\", default=\"save_best.pth\", type=str, required=False,\n help=\"save file\")\n parser.add_argument(\"--pretrain\", default=\"save_pretrain.pth\", type=str, required=False,\n help=\"pretrain file\")\n parser.add_argument(\"--epoch\", default=20, type=int, required=False,\n help=\"epoch\")\n parser.add_argument(\"--batch\", default=512, type=int, required=False,\n help=\"batch\")\n parser.add_argument(\"--gpu\", default=None, type=int, required=False,\n help=\"GPU id to use.\")\n parser.add_argument('--seed', type=int, default=42, required=False,\n help=\"random seed for initialization\")\n parser.add_argument('--weight_decay', type=float, default=0, required=False,\n help=\"weight decay\")\n parser.add_argument('--learning_rate', type=float, default=5e-5, required=False,\n help=\"learning rate\")\n parser.add_argument('--adam_epsilon', type=float, default=1e-8, required=False,\n help=\"adam epsilon\")\n parser.add_argument('--warmup_steps', type=float, default=0, required=False,\n help=\"warmup steps\")\n args = parser.parse_args()\n\n if torch.cuda.is_available():\n args.n_gpu = torch.cuda.device_count() if args.gpu is None else 1\n else:\n args.n_gpu = 0\n set_seed(args)\n\n if 1 < args.n_gpu:\n mp.spawn(train_model,\n args=(args.n_gpu, args),\n nprocs=args.n_gpu,\n join=True)\n else:\n train_model(0 if args.gpu is None else args.gpu, args.n_gpu, args)\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.distributed.init_process_group",
"numpy.random.seed",
"torch.multiprocessing.spawn",
"torch.eq",
"torch.manual_seed",
"numpy.mean",
"torch.cuda.is_available",
"torch.distributed.destroy_process_group",
"torch.cuda.manual_seed_all",
"torch.cuda.device_count",
"numpy.sum",
"torch.nn.parallel.DistributedDataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liangzhenduo0608/Paddle | [
"23a4f54b73305f43c469fd7444310a74b9d49b67"
] | [
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_program_translator.py"
] | [
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport astor\nimport gast\nimport inspect\nimport numpy as np\nimport textwrap\nimport unittest\n\nimport paddle.fluid as fluid\nfrom paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator\nfrom paddle.fluid.dygraph.jit import declarative\nfrom paddle.fluid.dygraph.nn import Linear\n\nfrom ifelse_simple_func import dyfunc_with_if_else\n\nnp.random.seed(0)\n\n\n# TODO(Aurelius): Currently, `declarative` don't support decorate the function\n# that contains layers with initialized operation, like `fc = linear(10, 3)`.\n# Because initialized ops will be added into program and be executed many times.\n# The parameters are assumed to initialized outside of the function.\ndef simple_func(x, weight_numpy):\n x = fluid.dygraph.to_variable(x)\n w = fluid.dygraph.to_variable(weight_numpy)\n y = fluid.layers.matmul(x, w)\n z = fluid.layers.mean(y)\n return z\n\n\n@declarative\ndef decorated_simple_func(x, weight_numpy):\n x = fluid.dygraph.to_variable(x)\n w = fluid.dygraph.to_variable(weight_numpy)\n y = fluid.layers.matmul(x, w)\n z = fluid.layers.mean(y)\n return z\n\n\ndef get_source_code(func):\n raw_code = inspect.getsource(func)\n code = textwrap.dedent(raw_code)\n root = gast.parse(code)\n source_code = astor.to_source(gast.gast_to_ast(root))\n return source_code\n\n\nclass StaticCode1():\n # TODO: Transform return statement\n def dyfunc_with_if_else(x_v, label=None):\n __return_1 = fluid.layers.fill_constant(\n shape=[1], dtype='bool', value=False)\n __return_0 = fluid.layers.fill_constant(\n shape=[1], dtype='bool', value=False)\n __return_value_0 = fluid.layers.fill_constant(\n shape=[1], dtype='float64', value=0.0)\n\n def true_fn_0(x_v):\n x_v = x_v - 1\n return x_v\n\n def false_fn_0(x_v):\n x_v = x_v + 1\n return x_v\n\n x_v = fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(\n fluid.layers.mean(x_v)[0] > 5, true_fn_0, false_fn_0, (x_v, ),\n (x_v, ), (x_v, ))\n\n def true_fn_1(__return_0, __return_value_0, label, x_v):\n loss = fluid.layers.cross_entropy(x_v, label)\n __return_0 = fluid.layers.fill_constant(\n shape=[1], dtype='bool', value=True)\n __return_value_0 = loss\n return __return_0, __return_value_0\n\n def false_fn_1(__return_0, __return_value_0):\n return __return_0, __return_value_0\n\n __return_0, __return_value_0 = (\n fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(\n label is not None, true_fn_1, false_fn_1,\n (__return_0, __return_value_0, label, x_v),\n (__return_0, __return_value_0), (__return_0, __return_value_0)))\n\n def true_fn_2(__return_1, __return_value_0, x_v):\n __return_1 = fluid.layers.fill_constant(\n shape=[1], dtype='bool', value=True)\n __return_value_0 = x_v\n return __return_1, __return_value_0\n\n def false_fn_2(__return_1, __return_value_0):\n return __return_1, __return_value_0\n\n __return_1, __return_value_0 = (\n fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(\n fluid.dygraph.dygraph_to_static.convert_operators.\n convert_logical_not(__return_0), true_fn_2, false_fn_2,\n (__return_1, __return_value_0, x_v),\n (__return_1, __return_value_0), (__return_1, __return_value_0)))\n return __return_value_0\n\n\nclass StaticCode2():\n # TODO: Transform return statement\n def dyfunc_with_if_else(x_v, label=None):\n __return_3 = fluid.layers.fill_constant(\n shape=[1], dtype='bool', value=False)\n __return_2 = fluid.layers.fill_constant(\n shape=[1], dtype='bool', value=False)\n __return_value_1 = fluid.layers.fill_constant(\n shape=[1], dtype='float64', value=0.0)\n\n def true_fn_3(x_v):\n x_v = x_v - 1\n return x_v\n\n def false_fn_3(x_v):\n x_v = x_v + 1\n return x_v\n\n x_v = fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(\n fluid.layers.mean(x_v)[0] > 5, true_fn_3, false_fn_3, (x_v, ),\n (x_v, ), (x_v, ))\n\n def true_fn_4(__return_2, __return_value_1, label, x_v):\n loss = fluid.layers.cross_entropy(x_v, label)\n __return_2 = fluid.layers.fill_constant(\n shape=[1], dtype='bool', value=True)\n __return_value_1 = loss\n return __return_2, __return_value_1\n\n def false_fn_4(__return_2, __return_value_1):\n return __return_2, __return_value_1\n\n __return_2, __return_value_1 = (\n fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(\n label is not None, true_fn_4, false_fn_4,\n (__return_2, __return_value_1, label, x_v),\n (__return_2, __return_value_1), (__return_2, __return_value_1)))\n\n def true_fn_5(__return_3, __return_value_1, x_v):\n __return_3 = fluid.layers.fill_constant(\n shape=[1], dtype='bool', value=True)\n __return_value_1 = x_v\n return __return_3, __return_value_1\n\n def false_fn_5(__return_3, __return_value_1):\n return __return_3, __return_value_1\n\n __return_3, __return_value_1 = (\n fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(\n fluid.dygraph.dygraph_to_static.convert_operators.\n convert_logical_not(__return_2), true_fn_5, false_fn_5,\n (__return_3, __return_value_1, x_v),\n (__return_3, __return_value_1), (__return_3, __return_value_1)))\n return __return_value_1\n\n\nclass NetWithError(fluid.dygraph.layers.Layer):\n @declarative\n def forward(self, x):\n linear = fluid.dygraph.Linear(32, 64)\n y = linear(x)\n return y\n\n\nclass TestDygraphToStaticCode(unittest.TestCase):\n def setUp(self):\n # set to print all string diff when assertEqual fails\n self.maxDiff = None\n\n def test_decorator(self):\n program_translator = ProgramTranslator()\n code = program_translator.get_code(dyfunc_with_if_else)\n answer = get_source_code(StaticCode1.dyfunc_with_if_else)\n self.assertEqual(answer, code)\n\n def test_program_translator(self):\n answer = get_source_code(StaticCode2.dyfunc_with_if_else)\n program_translator = ProgramTranslator()\n code = program_translator.get_code(dyfunc_with_if_else)\n self.assertEqual(answer, code)\n\n\nclass TestEnableDeclarative(unittest.TestCase):\n def setUp(self):\n self.x = np.random.randn(30, 10, 32).astype('float32')\n self.weight = np.random.randn(32, 64).astype('float32')\n self.program_translator = ProgramTranslator()\n\n def test_raise_error(self):\n with fluid.dygraph.guard():\n self.program_translator.enable(True)\n net = NetWithError()\n with self.assertRaises(ValueError):\n net(fluid.dygraph.to_variable(self.x))\n\n def test_enable_disable_get_output(self):\n self.program_translator.enable(True)\n with fluid.dygraph.guard():\n static_output = self.program_translator.get_output(\n simple_func, self.x, self.weight)\n\n self.program_translator.enable(False)\n with fluid.dygraph.guard():\n dygraph_output = self.program_translator.get_output(\n simple_func, self.x, self.weight)\n self.assertTrue(\n np.allclose(\n static_output.numpy(), dygraph_output.numpy(), atol=1e-4))\n\n def test_enable_disable_get_func(self):\n\n self.program_translator.enable(True)\n with fluid.dygraph.guard():\n static_func = self.program_translator.get_func(simple_func)\n self.assertTrue(callable(static_func))\n static_output = static_func(self.x, self.weight)\n self.assertTrue(isinstance(static_output, fluid.Variable))\n\n self.program_translator.enable(False)\n with fluid.dygraph.guard():\n dygraph_func = self.program_translator.get_func(simple_func)\n self.assertTrue(callable(dygraph_func))\n dygraph_output = dygraph_func(self.x, self.weight)\n self.assertTrue(isinstance(dygraph_output, fluid.core.VarBase))\n\n def test_enable_disable_get_program(self):\n\n self.program_translator.enable(True)\n static_output = self.program_translator.get_program(simple_func, self.x,\n self.weight)\n self.assertTrue(isinstance(static_output, tuple))\n self.assertEqual(len(static_output), 4)\n self.assertTrue(isinstance(static_output[0], fluid.Program))\n self.assertTrue(isinstance(static_output[1], fluid.Program))\n # Check all inputs and outputs are Variable\n for var in static_output[2]:\n self.assertTrue(isinstance(var, fluid.Variable))\n\n for var in static_output[3]:\n self.assertTrue(isinstance(var, fluid.Variable))\n\n self.program_translator.enable(False)\n with fluid.dygraph.guard():\n dygraph_output = self.program_translator.get_program(\n simple_func, self.x, self.weight)\n self.assertTrue(isinstance(dygraph_output, fluid.core.VarBase))\n\n def test_enable_disable_declarative(self):\n\n self.program_translator.enable(True)\n with fluid.dygraph.guard():\n static_output = decorated_simple_func(self.x, self.weight)\n\n self.program_translator.enable(False)\n with fluid.dygraph.guard():\n dygraph_output = decorated_simple_func(self.x, self.weight)\n self.assertTrue(\n np.allclose(\n static_output.numpy(), dygraph_output.numpy(), atol=1e-4))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.random.randn",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Anaphory/100woerterbuecher | [
"d70c9077a18e1cae9506a9713edf78e9972d7143"
] | [
"05-Needleman-Wunsch/needlemanwunsch.py"
] | [
"#!/usr/bin/python\n\n\"\"\"Needleman-Wunsch algorithm for sequence alignment, and application\nto NorthEuraLex data.\"\"\"\n\nimport pandas\nimport itertools\n\n\ndef needleman_wunsch(sequence1, sequence2):\n \"\"\"Generate an alignment between sequence1 and sequence2.\n\n Use Needleman and Wunsch's dynamic programming algorithm to create\n an alignment between sequence1 and sequence2, and return the\n corresponding edit distance.\n\n \"\"\"\n data = [[None for _ in range(len(sequence1) + 1)]\n for _ in range(len(sequence2) + 1)]\n traceback = [[None for _ in range(len(sequence1) + 1)]\n for _ in range(len(sequence2) + 1)]\n\n for top in range(len(sequence1) + 1):\n data[0][top] = top\n traceback[0][top] = \"←\"\n for left in range(len(sequence2) + 1):\n data[left][0] = left\n traceback[left][0] = \"↑\"\n\n for column in range(len(sequence1)):\n for row in range(len(sequence2)):\n topleft = data[row][column]\n top = data[row][column + 1]\n left = data[row + 1][column]\n\n replace_cost = 0 if sequence1[column] == sequence2[row] else 1\n\n from_topleft = topleft + replace_cost\n from_top = top + 1\n from_left = left + 1\n\n if from_topleft < from_top and from_topleft < from_left:\n direction = \"↖\"\n cell = from_topleft\n elif from_top < from_left:\n direction = \"↑\"\n cell = from_top\n else:\n direction = \"←\"\n cell = from_left\n data[row + 1][column + 1] = cell\n traceback[row + 1][column + 1] = direction\n\n distance = data[-1][-1]\n column = len(sequence1)\n row = len(sequence2)\n alignment = []\n while row > 0 or column > 0:\n if traceback[row][column] == \"↖\":\n alignment.insert(0, (sequence1[column - 1], sequence2[row - 1]))\n row = row - 1\n column = column - 1\n elif traceback[row][column] == \"←\":\n alignment.insert(0, (sequence1[column - 1], None))\n column = column - 1\n elif traceback[row][column] == \"↑\":\n alignment.insert(0, (None, sequence2[row - 1]))\n row = row - 1\n else:\n raise RuntimeError\n\n return distance, alignment\n\n\ndef all_pairs():\n data = pandas.read_csv(\n \"../Aehnlichkeitsmatrix/beispieldaten_northeuralex.tsv\",\n keep_default_na=False,\n sep=\"\\t\")\n for concept, block in data.groupby(\"gloss\"):\n for _, group in block.groupby(\"class\"):\n if len(group) > 1:\n for (index1, data1), (index2, data2) in itertools.combinations(\n group.iterrows(), 2):\n yield needleman_wunsch(data1[\"IPA\"], data2[\"IPA\"])[1]\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
weiziyoung/instacart | [
"5da75e6a033859c3394e4e651331aafb002f161c",
"5da75e6a033859c3394e4e651331aafb002f161c"
] | [
"appendix/902_reorder.py",
"py_feature/215_onb_diff.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 30 23:28:19 2017\n\n@author: konodera\n\nnohup python -u 902_reorder.py > LOG/_xgb_item.txt &\n\n\n\"\"\"\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport pandas as pd\nimport numpy as np\nimport gc\nimport xgboost as xgb\nimport utils\n\nutils.start(__file__)\n\n\n\n# setting\nOUTF = '../output/sub/apdx/seq2dec.p'\nLOOP = 2\nESR = 40\n\n#seed = np.random.randint(99999)\nseed = 71\n\nnp.random.seed(seed)\n\nvalid_size = 0.05\n\n\n# XGB param\nnround = 10000\n#nround = 10\n\nparam = {'max_depth':10, \n 'eta':0.02,\n 'colsample_bytree':0.4,\n 'subsample':0.75,\n 'silent':1,\n 'nthread':27,\n 'eval_metric':'logloss',\n 'objective':'binary:logistic',\n 'tree_method':'hist'\n }\n\nprint(\"\"\"#==== print param ======\"\"\")\nprint('OUTF:', OUTF)\nprint('seed:', seed)\n\n#==============================================================================\n# prepare\n#==============================================================================\ntrain = utils.read_pickles('../feature/{}/all_apdx'.format('trainT-0'))\n\n# f317 obj into int\n#col = [c for c in train.columns if 'seq2' in c and not '_df' in c]\n#train[col] = train[col].astype(np.float32)\n\ny_train = train['y']\nX_train = train.drop('y', axis=1)\ndel train\ngc.collect()\n\n# drop id\ncol = [c for c in X_train.columns if '_id' in c] + ['is_train']\ncol.remove('user_id')\nprint('drop1',col)\nX_train.drop(col, axis=1, inplace=True) # keep user_id\n\n# drop obj\ncol = X_train.dtypes[X_train.dtypes=='object'].index.tolist()+['seq2dec_r0_df2']\nprint('drop2',col)\nX_train.drop(col, axis=1, inplace=True)\n\nX_train.fillna(-1, inplace=1)\n\n#==============================================================================\n# SPLIT!\nprint('split by user')\n#==============================================================================\ntrain_user = X_train[['user_id']].drop_duplicates()\n\ndef split_build_valid():\n \n train_user['is_valid'] = np.random.choice([0,1], size=len(train_user), \n p=[1-valid_size, valid_size])\n valid_n = train_user['is_valid'].sum()\n build_n = (train_user.shape[0] - valid_n)\n \n print('build user:{}, valid user:{}'.format(build_n, valid_n))\n valid_user = train_user[train_user['is_valid']==1].user_id\n is_valid = X_train.user_id.isin(valid_user)\n \n dbuild = xgb.DMatrix(X_train[~is_valid].drop('user_id', axis=1), y_train[~is_valid])\n dvalid = xgb.DMatrix(X_train[is_valid].drop('user_id', axis=1), label=y_train[is_valid])\n watchlist = [(dbuild, 'build'),(dvalid, 'valid')]\n \n print('FINAL SHAPE')\n print('dbuild.shape:{} dvalid.shape:{}\\n'.format((dbuild.num_row(), dbuild.num_col()),\n (dvalid.num_row(), dvalid.num_col())))\n\n return dbuild, dvalid, watchlist\n\n#==============================================================================\nprint('hold out')\n#==============================================================================\n\n# hold out\nmodels = []\nfor i in range(LOOP):\n print('LOOP',i)\n dbuild, dvalid, watchlist = split_build_valid()\n \n if i==0:\n col_train = dbuild.feature_names\n \n model = xgb.train(param, dbuild, nround, watchlist,\n early_stopping_rounds=ESR, verbose_eval=5)\n models.append(model)\n# model.save_model('../output/model/{}/xgb_item_{}.model'.format(DATE, i))\n # VALID\n valid_yhat = model.predict(dvalid)\n print('Valid Mean:', np.mean(valid_yhat))\n del dbuild, dvalid, watchlist\n gc.collect()\n\ndel train_user, X_train, y_train\ngc.collect()\n\n#==============================================================================\nprint('test')\n#==============================================================================\ntest = utils.read_pickles('../feature/{}/all_apdx'.format('test')).fillna(-1)\n\n# f317 obj into int\n#col = [c for c in test.columns if 'seq2' in c and not '_df' in c]\n#test[col] = test[col].astype(np.float32)\n\n\nsub_test = test[['order_id', 'product_id']]\n\ndtest = xgb.DMatrix(test[col_train])\nsub_test['yhat'] = 0\nfor model in models:\n sub_test['yhat'] += model.predict(dtest)\nsub_test['yhat'] /= LOOP\nprint('Test Mean:', sub_test['yhat'].mean())\n\nsub_test.to_pickle(OUTF)\n\n\n#==============================================================================\nutils.end(__file__)\n\n\n\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 5 22:36:10 2017\n\n@author: konodera\n\n\n\"\"\"\n\nimport pandas as pd\nimport gc\nimport numpy as np\nfrom collections import defaultdict\nfrom scipy.stats import skew\nimport utils\nutils.start(__file__)\n\n#==============================================================================\n# load\n#==============================================================================\n\ncol = ['product_id', 'user_id', 'order_number', 'order_number_rev']\nlog = utils.read_pickles('../input/mk/log', col).sort_values(col[:3])\n\n\"\"\"\n1 1 1\n1 1 2\n1 1 4\n1 2 3\n1 2 4\n2 2 5\n\"\"\"\n#==============================================================================\n# def\n#==============================================================================\ndef make(T):\n \"\"\"\n T = 0\n folder = 'trainT-0'\n \"\"\"\n if T==-1:\n folder = 'test'\n else:\n folder = 'trainT-'+str(T)\n \n log_ = log[log.order_number_rev>T]\n log_['user_max_onb'] = log_.groupby('user_id').order_number.transform(np.max)\n \n item_min = defaultdict(int)\n item_mean = defaultdict(int)\n item_median = defaultdict(int)\n item_max = defaultdict(int)\n item_std = defaultdict(int)\n item_skew = defaultdict(int)\n \n pid_bk = uid_bk = onb_bk = None\n diff = []\n \n for pid, uid, onb, max_onb in log_[['product_id', 'user_id', 'order_number', 'user_max_onb']].values:\n \n if pid==pid_bk and uid==uid_bk:\n diff.append(onb-onb_bk)\n \"\"\"\n pattern would be like:\n onb -> diff \n 1111 1,2,3,4 -> [1,1,1]\n 11101 1,2,3,5 -> [1,1,2]\n 111 1,2,3 -> [1,1]\n 1101 1,2,4 -> [1,2]\n 1011 1,3,4 -> [2,1]\n \"\"\"\n \n elif pid==pid_bk and uid!=uid_bk:\n pass\n elif pid!=pid_bk:\n if len(diff)>0:\n item_min[pid] = np.min(diff)\n item_mean[pid] = np.mean(diff)\n item_median[pid] = np.median(diff)\n item_max[pid] = np.max(diff)\n item_std[pid] = np.std(diff)\n item_skew[pid] = skew(diff)\n diff = []\n \n pid_bk = pid\n uid_bk = uid\n onb_bk = onb\n \n item_min = pd.DataFrame.from_dict(item_min, orient='index').reset_index()\n item_min.columns = ['product_id', 'item_onb_diff_min']\n item_mean = pd.DataFrame.from_dict(item_mean, orient='index').reset_index()\n item_mean.columns = ['product_id', 'item_onb_diff_mean']\n item_median = pd.DataFrame.from_dict(item_median, orient='index').reset_index()\n item_median.columns = ['product_id', 'item_onb_diff_median']\n item_max = pd.DataFrame.from_dict(item_max, orient='index').reset_index()\n item_max.columns = ['product_id', 'item_onb_diff_max']\n item_std = pd.DataFrame.from_dict(item_std, orient='index').reset_index()\n item_std.columns = ['product_id', 'item_onb_diff_std']\n item_skew = pd.DataFrame.from_dict(item_skew, orient='index').reset_index()\n item_skew.columns = ['product_id', 'item_onb_diff_skew']\n \n df1 = pd.merge(item_min, item_mean, on='product_id', how='outer')\n df2 = pd.merge(item_median, item_max, on='product_id', how='outer')\n df3 = pd.merge(item_std, item_skew, on='product_id', how='outer')\n \n df = pd.merge(pd.merge(df1, df2, on='product_id', how='outer'), \n df3, on='product_id', how='outer')\n \n df.fillna(-99, inplace=True)\n df.to_pickle('../feature/{}/f215_product.p'.format(folder))\n \n\n#==============================================================================\n# main\n#==============================================================================\nmake(0)\nmake(1)\nmake(2)\n\nmake(-1)\n\n\n#==============================================================================\nutils.end(__file__)\n\n"
] | [
[
"numpy.mean",
"numpy.random.seed"
],
[
"pandas.merge",
"numpy.min",
"numpy.median",
"numpy.max",
"numpy.std",
"numpy.mean",
"pandas.DataFrame.from_dict",
"scipy.stats.skew"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
manoelhortaribeiro/HatefulUsersTwitter | [
"84a5f58f1b0d8de932c635488564c10184b2840d"
] | [
"preprocessing/5_get_diffusion_graph.py"
] | [
"import networkx as nx\nimport numpy as np\n\ninitial_belief = 1\nk = 2\n\nnp.random.seed(1)\ngraph = nx.read_graphml(\"../data/preprocessing/users_infected.graphml\")\n\nslur_nodes = list(nx.get_node_attributes(graph, \"slur\"))\nother_nodes = list(set(graph.nodes()).difference(set(slur_nodes)))\nnode_list = slur_nodes + other_nodes\n\ntransition_matrix = nx.adjacency_matrix(graph, nodelist=node_list).asfptype()\nn = transition_matrix.shape[0]\n\nfor i in range(n):\n total = transition_matrix[i, :].sum()\n if total != 0:\n transition_matrix[i, :] = transition_matrix[i, :] / total\n\n\nbeliefs = np.zeros(len(node_list))\nbeliefs[:len(slur_nodes)] = initial_belief\n\nfor _ in range(k):\n out = transition_matrix.dot(beliefs)\n beliefs = out\n\n\nfinal_beliefs_dict = dict()\nfor node, belief in zip(node_list, beliefs):\n final_beliefs_dict[node] = float(belief)\n\nnx.set_node_attributes(graph, name=\"diffusion_slur\", values=final_beliefs_dict)\nnx.write_graphml(graph, \"../data/preprocessing/users_infected_diffusion.graphml\".format(k))\n"
] | [
[
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
x893/openmv | [
"ec968f4988fbd3baf8c9b97b8f51ac0ea942496f"
] | [
"usr/openmv.py"
] | [
"#!/usr/bin/env python2\n# This file is part of the OpenMV project.\n# Copyright (c) 2013/2014 Ibrahim Abdelkader <[email protected]>\n# This work is licensed under the MIT license, see the file LICENSE for details.\n#\n# Openmv module.\n\nimport struct\nimport sys,time\nimport serial\nimport platform\nimport numpy as np\nfrom PIL import Image\n\n__serial = None\n__FB_HDR_SIZE =12\n\n# USB Debug commands\n__USBDBG_CMD = 48\n__USBDBG_FW_VERSION = 0x80\n__USBDBG_FRAME_SIZE = 0x81\n__USBDBG_FRAME_DUMP = 0x82\n__USBDBG_FRAME_UPDATE = 0x04\n__USBDBG_SCRIPT_EXEC = 0x05\n__USBDBG_SCRIPT_STOP = 0x06\n__USBDBG_SCRIPT_SAVE = 0x07\n__USBDBG_SCRIPT_RUNNING = 0x87\n__USBDBG_TEMPLATE_SAVE = 0x08\n__USBDBG_DESCRIPTOR_SAVE= 0x09\n__USBDBG_ATTR_READ = 0x8A\n__USBDBG_ATTR_WRITE = 0x0B\n__USBDBG_SYS_RESET = 0x0C\n__USBDBG_SYS_BOOT = 0x0D\n__USBDBG_JPEG_ENABLE = 0x0E\n__USBDBG_TX_BUF_LEN = 0x8E\n__USBDBG_TX_BUF = 0x8F\n\nATTR_CONTRAST =0\nATTR_BRIGHTNESS =1\nATTR_SATURATION =2\nATTR_GAINCEILING=3\n\n__BOOTLDR_START = 0xABCD0001\n__BOOTLDR_RESET = 0xABCD0002\n__BOOTLDR_ERASE = 0xABCD0004\n__BOOTLDR_WRITE = 0xABCD0008\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n # open CDC port\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\ndef disconnect():\n global __serial\n try:\n if (__serial):\n __serial.close()\n __serial = None\n except:\n pass\n\ndef set_timeout(timeout):\n __serial.timeout = timeout\n\ndef fb_size():\n # read fb header\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_FRAME_SIZE, __FB_HDR_SIZE))\n return struct.unpack(\"III\", __serial.read(12))\n\ndef fb_dump():\n size = fb_size()\n\n if (not size[0]):\n # frame not ready\n return None\n\n if (size[2] > 2): #JPEG\n num_bytes = size[2]\n else:\n num_bytes = size[0]*size[1]*size[2]\n\n # read fb data\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_FRAME_DUMP, num_bytes))\n buff = __serial.read(num_bytes)\n\n if size[2] == 1: # Grayscale\n y = np.fromstring(buff, dtype=np.uint8)\n buff = np.column_stack((y, y, y))\n elif size[2] == 2: # RGB565\n arr = np.fromstring(buff, dtype=np.uint16).newbyteorder('S')\n r = (((arr & 0xF800) >>11)*255.0/31.0).astype(np.uint8)\n g = (((arr & 0x07E0) >>5) *255.0/63.0).astype(np.uint8)\n b = (((arr & 0x001F) >>0) *255.0/31.0).astype(np.uint8)\n buff = np.column_stack((r,g,b))\n else: # JPEG\n try:\n buff = np.asarray(Image.frombuffer(\"RGB\", size[0:2], buff, \"jpeg\", \"RGB\", \"\"))\n except Exception as e:\n print (\"JPEG decode error (%s)\"%(e))\n return None\n\n if (buff.size != (size[0]*size[1]*3)):\n return None\n\n return (size[0], size[1], buff.reshape((size[1], size[0], 3)))\n\ndef fb_update():\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_FRAME_UPDATE, 0))\n\ndef exec_script(buf):\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_SCRIPT_EXEC, len(buf)))\n __serial.write(buf)\n\ndef stop_script():\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\ndef script_running():\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack(\"I\", __serial.read(4))[0]\n\ndef save_template(x, y, w, h, path):\n buf = struct.pack(\"IIII\", x, y, w, h) + path\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE, len(buf)))\n __serial.write(buf)\n\ndef save_descriptor(x, y, w, h, path):\n buf = struct.pack(\"HHHH\", x, y, w, h) + path\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_DESCRIPTOR_SAVE, len(buf)))\n __serial.write(buf)\n\ndef set_attr(attr, value):\n __serial.write(struct.pack(\"<BBIhh\", __USBDBG_CMD, __USBDBG_ATTR_WRITE, 0, attr, value))\n\ndef get_attr(attr):\n __serial.write(struct.pack(\"<BBIh\", __USBDBG_CMD, __USBDBG_ATTR_READ, 1, attr))\n return __serial.read(1)\n\ndef reset():\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_SYS_RESET, 0))\n\ndef bootloader_start():\n __serial.write(struct.pack(\"<I\", __BOOTLDR_START))\n return struct.unpack(\"I\", __serial.read(4))[0] == __BOOTLDR_START\n\ndef bootloader_reset():\n __serial.write(struct.pack(\"<I\", __BOOTLDR_RESET))\n\ndef flash_erase(sector):\n __serial.write(struct.pack(\"<II\", __BOOTLDR_ERASE, sector))\n\ndef flash_write(buf):\n __serial.write(struct.pack(\"<I\", __BOOTLDR_WRITE) + buf)\n\ndef tx_buf_len():\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack(\"I\", __serial.read(4))[0]\n\ndef tx_buf(bytes):\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_TX_BUF, bytes))\n return __serial.read(bytes)\n\ndef fw_version():\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack(\"III\", __serial.read(12))\n\ndef enable_jpeg(enable):\n __serial.write(struct.pack(\"<BBIH\", __USBDBG_CMD, __USBDBG_JPEG_ENABLE, 0, enable))\n\nif __name__ == '__main__':\n if len(sys.argv)!= 2:\n print ('usage: openmv.py <script>')\n sys.exit(1)\n with open(sys.argv[1], 'r') as fin:\n buf = fin.read()\n\n s = serial.Serial(\"/dev/openmvcam\", 921600, timeout=0.3)\n init(s)\n exec_script(buf)\n tx_len = tx_buf_len()\n if (tx_len):\n print(tx_buf(tx_len))\n s.close()\n"
] | [
[
"numpy.fromstring",
"numpy.column_stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shree-gade/CrypTen | [
"09f636827a07291f50a4ba62a6d4274a70265b4d"
] | [
"crypten/common/functions/approximations.py"
] | [
"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\n\nimport crypten\nimport torch\nfrom crypten.config import cfg\n\n\n__all__ = [\n \"exp\",\n \"log\",\n \"reciprocal\",\n \"inv_sqrt\",\n \"sqrt\",\n \"_eix\",\n \"cossin\",\n \"cos\",\n \"sin\",\n \"sigmoid\",\n \"tanh\",\n \"erf\",\n \"softmax\",\n \"log_softmax\",\n]\n\n\n# Iterative methods:\ndef exp(self):\n r\"\"\"Approximates the exponential function using a limit approximation:\n\n .. math::\n\n exp(x) = \\lim_{n \\\\rightarrow \\\\infty} (1 + x / n) ^ n\n\n Here we compute exp by choosing n = 2 ** d for some large d equal to\n `iterations`. We then compute (1 + x / n) once and square `d` times.\n\n Set the number of iterations for the limit approximation with\n config.exp_iterations.\n \"\"\" # noqa: W605\n iters = cfg.functions.exp_iterations\n\n result = 1 + self.div(2 ** iters)\n for _ in range(iters):\n result = result.square()\n return result\n\n\ndef log(self, input_in_01=False):\n r\"\"\"\n Approximates the natural logarithm using 8th order modified\n Householder iterations. This approximation is accurate within 2% relative\n error on [0.0001, 250].\n\n Iterations are computed by: :math:`h = 1 - x * exp(-y_n)`\n\n .. math::\n\n y_{n+1} = y_n - \\sum_k^{order}\\frac{h^k}{k}\n\n Args:\n input_in_01 (bool) : Allows a user to indicate that the input is in the domain [0, 1],\n causing the function optimize for this domain. This is useful for computing\n log-probabilities for entropy functions.\n\n We shift the domain of convergence by a constant :math:`a` using the following identity:\n\n .. math::\n\n \\ln{u} = \\ln {au} - \\ln{a}\n\n Since the domain of convergence for CrypTen's log() function is approximately [1e-4, 1e2],\n we can set :math:`a=100`.\n\n Configuration parameters:\n iterations (int): number of Householder iterations for the approximation\n exp_iterations (int): number of iterations for limit approximation of exp\n order (int): number of polynomial terms used (order of Householder approx)\n \"\"\"\n if input_in_01:\n return log(self.mul(100)) - 4.605170\n\n # Initialization to a decent estimate (found by qualitative inspection):\n # ln(x) = x/120 - 20exp(-2x - 1.0) + 3.0\n iterations = cfg.functions.log_iterations\n exp_iterations = cfg.functions.log_exp_iterations\n order = cfg.functions.log_order\n\n term1 = self.div(120)\n term2 = exp(self.mul(2).add(1.0).neg()).mul(20)\n y = term1 - term2 + 3.0\n\n # 8th order Householder iterations\n with cfg.temp_override({\"functions.exp_iterations\": exp_iterations}):\n for _ in range(iterations):\n h = 1 - self * exp(-y)\n y -= h.polynomial([1 / (i + 1) for i in range(order)])\n return y\n\n\ndef reciprocal(self, input_in_01=False):\n r\"\"\"\n Args:\n input_in_01 (bool) : Allows a user to indicate that the input is in the range [0, 1],\n causing the function optimize for this range. This is useful for improving\n the accuracy of functions on probabilities (e.g. entropy functions).\n\n Methods:\n 'NR' : `Newton-Raphson`_ method computes the reciprocal using iterations\n of :math:`x_{i+1} = (2x_i - self * x_i^2)` and uses\n :math:`3*exp(1 - 2x) + 0.003` as an initial guess by default\n\n 'log' : Computes the reciprocal of the input from the observation that:\n :math:`x^{-1} = exp(-log(x))`\n\n Configuration params:\n reciprocal_method (str): One of 'NR' or 'log'.\n reciprocal_nr_iters (int): determines the number of Newton-Raphson iterations to run\n for the `NR` method\n reciprocal_log_iters (int): determines the number of Householder\n iterations to run when computing logarithms for the `log` method\n reciprocal_all_pos (bool): determines whether all elements of the\n input are known to be positive, which optimizes the step of\n computing the sign of the input.\n reciprocal_initial (tensor): sets the initial value for the\n Newton-Raphson method. By default, this will be set to :math:\n `3*exp(-(x-.5)) + 0.003` as this allows the method to converge over\n a fairly large domain\n\n .. _Newton-Raphson:\n https://en.wikipedia.org/wiki/Newton%27s_method\n \"\"\"\n pos_override = {\"functions.reciprocal_all_pos\": True}\n if input_in_01:\n with cfg.temp_override(pos_override):\n rec = reciprocal(self.mul(64)).mul(64)\n return rec\n\n # Get config options\n method = cfg.functions.reciprocal_method\n all_pos = cfg.functions.reciprocal_all_pos\n initial = cfg.functions.reciprocal_initial\n\n if not all_pos:\n sgn = self.sign()\n pos = sgn * self\n with cfg.temp_override(pos_override):\n return sgn * reciprocal(pos)\n\n if method == \"NR\":\n nr_iters = cfg.functions.reciprocal_nr_iters\n if initial is None:\n # Initialization to a decent estimate (found by qualitative inspection):\n # 1/x = 3exp(1 - 2x) + 0.003\n result = 3 * (1 - 2 * self).exp() + 0.003\n else:\n result = initial\n for _ in range(nr_iters):\n if hasattr(result, \"square\"):\n result += result - result.square().mul_(self)\n else:\n result = 2 * result - result * result * self\n return result\n elif method == \"log\":\n log_iters = cfg.functions.reciprocal_log_iters\n with cfg.temp_override({\"functions.log_iters\": log_iters}):\n return exp(-log(self))\n else:\n raise ValueError(f\"Invalid method {method} given for reciprocal function\")\n\n\ndef inv_sqrt(self):\n r\"\"\"\n Computes the inverse square root of the input using the Newton-Raphson method.\n\n Configuration params:\n sqrt_nr_iters (int): determines the number of Newton-Raphson iterations to run.\n sqrt_nr_initial (tensor): sets the initial value for the Newton-Raphson iterations.\n By default, this will be set to allow the method to converge over a\n fairly large domain.\n\n .. _Newton-Raphson:\n https://en.wikipedia.org/wiki/Fast_inverse_square_root#Newton's_method\n \"\"\"\n initial = cfg.functions.sqrt_nr_initial\n iters = cfg.functions.sqrt_nr_iters\n\n # Initialize using decent approximation\n if initial is None:\n y = exp(self.div(2).add(0.2).neg()).mul(2.2).add(0.2)\n y -= self.div(1024)\n else:\n y = initial\n\n # Newton Raphson iterations for inverse square root\n for _ in range(iters):\n y = y.mul_(3 - self * y.square()).div_(2)\n return y\n\n\ndef sqrt(self):\n r\"\"\"\n Computes the square root of the input by computing its inverse square root using\n the Newton-Raphson method and multiplying by the input.\n\n Configuration params:\n sqrt_nr_iters (int): determines the number of Newton-Raphson iterations to run\n sqrt_initial (tensor): sets the initial value for the inverse square root\n Newton-Raphson iterations. By default, this will be set to allow convergence\n over a fairly large domain.\n\n .. _Newton-Raphson:\n https://en.wikipedia.org/wiki/Fast_inverse_square_root#Newton's_method\n \"\"\"\n return inv_sqrt(self).mul_(self)\n\n\ndef _eix(self):\n r\"\"\"Computes e^(i * self) where i is the imaginary unit.\n Returns (Re{e^(i * self)}, Im{e^(i * self)} = cos(self), sin(self)\n \"\"\"\n iterations = cfg.functions.trig_iterations\n\n re = 1\n im = self.div(2 ** iterations)\n\n # First iteration uses knowledge that `re` is public and = 1\n re -= im.square()\n im *= 2\n\n # Compute (a + bi)^2 -> (a^2 - b^2) + (2ab)i `iterations` times\n for _ in range(iterations - 1):\n a2 = re.square()\n b2 = im.square()\n im = im.mul_(re)\n im._tensor *= 2\n re = a2 - b2\n\n return re, im\n\n\ndef cossin(self):\n r\"\"\"Computes cosine and sine of input via exp(i * x).\n\n Args:\n iterations (int): for approximating exp(i * x)\n \"\"\"\n return self._eix()\n\n\ndef cos(self):\n r\"\"\"Computes the cosine of the input using cos(x) = Re{exp(i * x)}\n\n Args:\n iterations (int): for approximating exp(i * x)\n \"\"\"\n return cossin(self)[0]\n\n\ndef sin(self):\n r\"\"\"Computes the sine of the input using sin(x) = Im{exp(i * x)}\n\n Args:\n iterations (int): for approximating exp(i * x)\n \"\"\"\n return cossin(self)[1]\n\n\n# Logistic Functions\ndef sigmoid(self):\n r\"\"\"Computes the sigmoid function using the following definition\n\n .. math::\n \\sigma(x) = (1 + e^{-x})^{-1}\n\n If a valid method is given, this function will compute sigmoid\n using that method:\n\n \"chebyshev\" - computes tanh via Chebyshev approximation with\n truncation and uses the identity:\n\n .. math::\n \\sigma(x) = \\frac{1}{2}tanh(\\frac{x}{2}) + \\frac{1}{2}\n\n \"reciprocal\" - computes sigmoid using :math:`1 + e^{-x}` and computing\n the reciprocal\n\n \"\"\" # noqa: W605\n method = cfg.functions.sigmoid_tanh_method\n\n if method == \"chebyshev\":\n tanh_approx = tanh(self.div(2))\n return tanh_approx.div(2) + 0.5\n elif method == \"reciprocal\":\n ltz = self._ltz()\n sign = 1 - 2 * ltz\n\n pos_input = self.mul(sign)\n denominator = pos_input.neg().exp().add(1)\n\n # TODO: Set these with configurable parameters\n with cfg.temp_override(\n {\n \"functions.exp_iterations\": 9,\n \"functions.reciprocal_nr_iters\": 3,\n \"functions.reciprocal_all_pos\": True,\n \"functions.reciprocal_initial\": 0.75,\n }\n ):\n pos_output = denominator.reciprocal()\n\n result = pos_output.where(1 - ltz, 1 - pos_output)\n # TODO: Support addition with different encoder scales\n # result = pos_output + ltz - 2 * pos_output * ltz\n return result\n else:\n raise ValueError(f\"Unrecognized method {method} for sigmoid\")\n\n\ndef tanh(self):\n r\"\"\"Computes the hyperbolic tangent function using the identity\n\n .. math::\n tanh(x) = 2\\sigma(2x) - 1\n\n If a valid method is given, this function will compute tanh using that method:\n\n \"chebyshev\" - computes tanh via Chebyshev approximation with truncation.\n\n .. math::\n tanh(x) = \\sum_{j=1}^terms c_{2j - 1} P_{2j - 1} (x / maxval)\n\n where c_i is the ith Chebyshev series coefficient and P_i is ith polynomial.\n The approximation is truncated to +/-1 outside [-1, 1].\n\n Args:\n terms (int): highest degree of Chebyshev polynomials.\n Must be even and at least 6.\n \"\"\"\n method = cfg.functions.sigmoid_tanh_method\n\n if method == \"reciprocal\":\n return self.mul(2).sigmoid().mul(2).sub(1)\n elif method == \"chebyshev\":\n terms = cfg.functions.sigmoid_tanh_terms\n coeffs = crypten.common.util.chebyshev_series(torch.tanh, 1, terms)[1::2]\n tanh_polys = _chebyshev_polynomials(self, terms)\n tanh_polys_flipped = (\n tanh_polys.unsqueeze(dim=-1).transpose(0, -1).squeeze(dim=0)\n )\n out = tanh_polys_flipped.matmul(coeffs)\n\n # truncate outside [-maxval, maxval]\n return out.hardtanh()\n else:\n raise ValueError(f\"Unrecognized method {method} for tanh\")\n\n\ndef _chebyshev_polynomials(self, terms):\n r\"\"\"Evaluates odd degree Chebyshev polynomials at x\n\n Chebyshev Polynomials of the first kind are defined as\n\n .. math::\n P_0(x) = 1, P_1(x) = x, P_n(x) = 2 P_{n - 1}(x) - P_{n-2}(x)\n\n Args:\n self (MPCTensor): input at which polynomials are evaluated\n terms (int): highest degree of Chebyshev polynomials.\n Must be even and at least 6.\n Returns:\n MPCTensor of polynomials evaluated at self of shape `(terms, *self)`\n \"\"\"\n if terms % 2 != 0 or terms < 6:\n raise ValueError(\"Chebyshev terms must be even and >= 6\")\n\n polynomials = [self.clone()]\n y = 4 * self.square() - 2\n z = y - 1\n polynomials.append(z.mul(self))\n\n for k in range(2, terms // 2):\n next_polynomial = y * polynomials[k - 1] - polynomials[k - 2]\n polynomials.append(next_polynomial)\n\n return crypten.stack(polynomials)\n\n\ndef erf(tensor):\n r\"\"\"\n Approximates the error function of the input tensor using a Taylor approximation.\n \"\"\"\n iters = cfg.functions.erf_iterations\n\n output = tensor.clone()\n for n in range(1, iters + 1):\n multiplier = ((-1) ** n) / (math.factorial(n) * (2 * n + 1))\n output = output.add(tensor.pos_pow(2 * n + 1).mul(multiplier))\n return output.mul(2.0 / math.sqrt(math.pi))\n # NOTE: This approximation is not unstable for large tensor values.\n\n\ndef softmax(self, dim, **kwargs):\n r\"\"\"Compute the softmax of a tensor's elements along a given dimension\"\"\"\n # 0-d case\n if self.dim() == 0:\n assert dim == 0, \"Improper dim argument\"\n return self.new(torch.ones_like((self.data)))\n\n if self.size(dim) == 1:\n return self.new(torch.ones_like(self.data))\n\n maximum_value = self.max(dim, keepdim=True)[0]\n logits = self - maximum_value\n numerator = logits.exp()\n with cfg.temp_override({\"functions.reciprocal_all_pos\": True}):\n inv_denominator = numerator.sum(dim, keepdim=True).reciprocal()\n return numerator * inv_denominator\n\n\ndef log_softmax(self, dim, **kwargs):\n r\"\"\"Applies a softmax followed by a logarithm.\n While mathematically equivalent to log(softmax(x)), doing these two\n operations separately is slower, and numerically unstable. This function\n uses an alternative formulation to compute the output and gradient correctly.\n \"\"\"\n # 0-d case\n if self.dim() == 0:\n assert dim == 0, \"Improper dim argument\"\n return self.new(torch.zeros((), device=self.device))\n\n if self.size(dim) == 1:\n return self.new(torch.zeros_like(self.data))\n\n maximum_value = self.max(dim, keepdim=True)[0]\n logits = self - maximum_value\n normalize_term = exp(logits).sum(dim, keepdim=True)\n result = logits - normalize_term.log()\n return result\n"
] | [
[
"torch.zeros_like",
"torch.ones_like",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sschlenkrich/HybridMonteCarlo | [
"72f54aa4bcd742430462b27b72d70369c01f9ac4"
] | [
"hybmc/models/HybridModel.py"
] | [
"#!/usr/bin/python\n\nimport numpy as np\nfrom hybmc.models.StochasticProcess import StochasticProcess\n\n\nclass HybridModel(StochasticProcess):\n\n # Python constructor\n def __init__(self, \n domAlias, # name of our domestic (numeraire) currency\n domRatesModel, # domestic rates model specifies numeraire\n forAliases, # list of foreign currencies (all relative to dom currency)\n forAssetModels, # list of foreign asset models\n forRatesModels, # list of foreign rates models\n correlations ): # np.array of instantanous correlations\n #\n self.domAlias = domAlias \n self.domRatesModel = domRatesModel\n self.forAliases = forAliases \n self.forAssetModels = forAssetModels \n self.forRatesModels = forRatesModels \n self.correlations = correlations\n # add sanity checks here\n # we need to know the model index for a given alias\n self.index = { self.forAliases[k] : k for k in range(len(self.forAliases)) }\n # manage model indices in state variable\n self._size = self.domRatesModel.size()\n self._factors = self.domRatesModel.factors()\n self.modelsStartIdx = [ ]\n lastModelIdx = domRatesModel.size()\n for assetModel, ratesModel in zip(self.forAssetModels,self.forRatesModels):\n self._size += (assetModel.size() + ratesModel.size())\n self._factors += (assetModel.factors() + ratesModel.factors())\n self.modelsStartIdx.append(lastModelIdx)\n self.modelsStartIdx.append(lastModelIdx + assetModel.size())\n lastModelIdx += (assetModel.size() + ratesModel.size())\n # check correlation matrix properties here\n if self.correlations is not None: # None is interpreted as identity\n self.L = np.linalg.cholesky(self.correlations)\n else:\n self.L = None\n # per default we do not apply hybrid adjuster\n self.hybAdjTimes = None\n\n def size(self):\n return self._size\n\n def factors(self):\n return self._factors\n\n def initialValues(self):\n initialValueList = [ self.domRatesModel.initialValues() ]\n for assetModel, ratesModel in zip(self.forAssetModels,self.forRatesModels):\n initialValueList.append(assetModel.initialValues())\n initialValueList.append(ratesModel.initialValues())\n return np.concatenate(initialValueList)\n\n def evolve(self, t0, X0, dt, dW, X1):\n if self.L is not None:\n dZ = self.L.dot(dW)\n else:\n dZ = dW\n if not self.forAliases: # take a short cut\n self.domRatesModel.evolve(t0, X0, dt, dZ, X1)\n return\n # evolve domestic rates\n domSize = self.domRatesModel.size() # shorten expressions\n self.domRatesModel.evolve(t0, X0[:domSize], dt, dZ[:self.domRatesModel.factors()], X1[:domSize])\n # we need the domestic drift r_d\n r_d = self.domRatesModel.shortRateOverPeriod(t0, dt, X0[:domSize], X1[:domSize])\n # now we iterate over foreign models\n corrStartIdx = self.domRatesModel.factors() # we need to keep track of our sub-correlations\n for k, alias in enumerate(self.forAliases):\n # carefully collect Brownian increments\n dw_asset = dZ[corrStartIdx : \\\n corrStartIdx + self.forAssetModels[k].factors()]\n dw_rates = dZ[corrStartIdx + self.forAssetModels[k].factors() : \\\n corrStartIdx + self.forAssetModels[k].factors() + self.forRatesModels[k].factors()]\n # we need the starting point states for evolution, y0 (asset), x0 (rates)\n y0 = X0[ self.modelsStartIdx[2 * k] : \\\n self.modelsStartIdx[2 * k] + self.forAssetModels[k].size() ]\n x0 = X0[ self.modelsStartIdx[2 * k + 1] : \\\n self.modelsStartIdx[2 * k + 1] + self.forRatesModels[k].size() ]\n # Quanto adjustment\n # we use the model-independent implementation to allow for credit hybrid components\n # todo: capture case self.correlations = None\n qAdj = self.correlations[corrStartIdx,\n corrStartIdx + self.forAssetModels[k].factors() : \n corrStartIdx + self.forAssetModels[k].factors() + self.forRatesModels[k].factors()]\n # we want to modify qAdj vector w/o changing the correlation\n qAdj = np.array(qAdj)\n # we need to extend the input state for our asset mode to account for drift and adjuster\n y0 = np.concatenate([ y0, np.array([0.0, 0.0]) ]) # maybe better use append here, but check view/copy\n y0[-1] = self.hybridVolAdjuster(k, t0)\n assetVol = self.forAssetModels[k].volatility(t0, y0)\n qAdj *= (assetVol*np.sqrt(dt))\n dw_rates = dw_rates - qAdj # create a new vector\n # evolve foreign rates\n x1 = X1[ self.modelsStartIdx[2 * k + 1] : \\\n self.modelsStartIdx[2 * k + 1] + self.forRatesModels[k].size() ]\n self.forRatesModels[k].evolve(t0, x0, dt, dw_rates, x1)\n # calculate FX drift, volAdjuster and extend input state\n r_f = self.forRatesModels[k].shortRateOverPeriod(t0, dt, x0, x1)\n y0[-2] = r_d - r_f # FX drift\n # finally we can evolve FX\n y1 = X1[ self.modelsStartIdx[2 * k] : \\\n self.modelsStartIdx[2 * k] + self.forAssetModels[k].size() ]\n self.forAssetModels[k].evolve(t0, y0, dt, dw_asset, y1)\n # no need to copy results coz we work on views of X1\n # but we need to update stochastic factor index\n corrStartIdx += (self.forAssetModels[k].factors() + self.forRatesModels[k].factors())\n return\n\n # interface for payoff calculation\n\n def numeraire(self, t, X):\n return self.domRatesModel.numeraire(t,X[:self.domRatesModel.size()])\n \n def asset(self, t, X, alias):\n if alias is None or alias==self.domAlias:\n return 1.0\n k = self.index[alias] # this should throw an exception if alias is unknown\n y = X[ self.modelsStartIdx[2 * k] : self.modelsStartIdx[2 * k] + self.forAssetModels[k].size() ]\n return self.forAssetModels[k].asset(t, y, alias)\n\n def zeroBond(self, t, T, X, alias):\n if alias is None or alias==self.domAlias:\n x = X[:self.domRatesModel.size()]\n return self.domRatesModel.zeroBond(t, T, x, alias)\n k = self.index[alias] # this should throw an exception if alias is unknown\n x = X[ self.modelsStartIdx[2 * k + 1] : \\\n self.modelsStartIdx[2 * k + 1] + self.forRatesModels[k].size() ]\n return self.forRatesModels[k].zeroBond(t, T, x, alias)\n\n # keep track of components in hybrid model\n\n def stateAliases(self):\n aliases = [ self.domAlias + '_' + stateAlias for stateAlias in self.domRatesModel.stateAliases() ]\n for k, alias in enumerate(self.forAliases):\n aliases += [ alias + '_' + stateAlias for stateAlias in self.forAssetModels[k].stateAliases() ]\n aliases += [ alias + '_' + stateAlias for stateAlias in self.forRatesModels[k].stateAliases() ]\n return aliases\n\n def factorAliases(self):\n aliases = [ self.domAlias + '_' + factorAlias for factorAlias in self.domRatesModel.factorAliases() ]\n for k, alias in enumerate(self.forAliases):\n aliases += [ alias + '_' + factorAlias for factorAlias in self.forAssetModels[k].factorAliases() ]\n aliases += [ alias + '_' + factorAlias for factorAlias in self.forRatesModels[k].factorAliases() ]\n return aliases\n \n # add hybrid vol adjuster methodology here\n # we calculate term structures of local volatility, correspondingeffective hybrid volatility\n # and an adjuster based on an additive shift\n\n def hybridVolAdjuster(self, forIdx, t):\n if self.hybAdjTimes is None:\n return 0.0 # default\n # linear interpolation with constant extrapolation\n # maybe better use scipy interpolation with linear extraplation\n return np.interp(t,self.hybAdjTimes,self.hybVolAdj[forIdx],self.hybVolAdj[forIdx,0],self.hybVolAdj[forIdx,-1])\n\n def recalculateHybridVolAdjuster(self, hybAdjTimes = None):\n if hybAdjTimes is not None: # if we don't supply time grid we want to keep the current grid\n self.hybAdjTimes = hybAdjTimes\n if self.hybAdjTimes.shape[0]==0: # patological case, do nothing\n return\n # we need to check for consistent times again\n if not self.hybAdjTimes[0]==0.0:\n raise ValueError('HybridModel: hybAdjTimes_[0]==0.0 required.')\n for k in range(1,self.hybAdjTimes.shape[0]):\n if not self.hybAdjTimes[k] > self.hybAdjTimes[k-1]:\n raise ValueError('HybridModel: hybAdjTimes_[k]>hybAdjTimes_[k-1] required.')\n # initialise \n self.localVol = np.zeros([len(self.forAliases),len(self.hybAdjTimes)])\n self.hybrdVol = np.ones([len(self.forAliases),len(self.hybAdjTimes)]) # 1.0 is required for p calculation\n self.hybVolAdj = np.zeros([len(self.forAliases),len(self.hybAdjTimes)])\n S0 = np.array([ m.asset(0.0, m.initialValues(), None) for m in self.forAssetModels ])\n # calculate vols at zero\n for i in range(len(S0)):\n y0 = np.concatenate([ self.forAssetModels[i].initialValues(), [0.0, 0.0]]) # this is asset model-dependent\n self.localVol[i,0] = self.forAssetModels[i].volatility(0.0, y0)\n self.hybrdVol[i,0] = self.localVol[i,0]\n if len(self.hybAdjTimes)==1: # nothing else to do\n return \n # now we start with the actual methodology...\n corrStartIdx = self.domRatesModel.factors() \n for i in range(len(S0)):\n # we collect all relevant correlations\n # recall, \n # Y0 is domestic rates model\n # X1 is asset (or FX) model\n # Y1 is forign rates model\n #\n # domestic rates vs FX, ASSUME vol-FX correlation is zero\n rhoY0X1 = self.correlations[ :self.domRatesModel.factors(), corrStartIdx] # \n # foreign rates vs FX, ASSUME vol-FX correlation is zero\n rhoX1Y1 = self.correlations[ corrStartIdx,\n corrStartIdx + self.forAssetModels[i].factors() :\n corrStartIdx + self.forAssetModels[i].factors() + self.forRatesModels[i].factors() ]\n # rates vs rates, ASSUME all vol-... correlation are zero\n rhoY0Y1 = self.correlations[ :self.domRatesModel.factors(),\n corrStartIdx + self.forAssetModels[i].factors() :\n corrStartIdx + self.forAssetModels[i].factors() + self.forRatesModels[i].factors() ]\n # update stochastic factor index\n corrStartIdx += (self.forAssetModels[i].factors() + self.forRatesModels[i].factors())\n # bootstrap over adjuster times\n for k, T in list(enumerate(self.hybAdjTimes))[1:]:\n # ATM forward and effective local volatility\n dfDom = self.domRatesModel.zeroBond(0.0, T, self.domRatesModel.initialValues(), None)\n dfFor = self.forRatesModels[i].zeroBond(0.0, T, self.forRatesModels[i].initialValues(), None)\n S = S0[i] * dfFor / dfDom # maybe it's worth to save S for debugging\n y = np.zeros(self.forAssetModels[i].size() + 2) # this is asset model-dependent\n y[0] = np.log(S / S0[i])\n self.localVol[i,k] = self.forAssetModels[i].volatility(self.hybAdjTimes[k], y)\n # calculate derivative of hybrid variance\n hPrime = np.zeros(k+1)\n for j, t in enumerate(self.hybAdjTimes[:k+1]):\n sigmaP0 = self.domRatesModel.zeroBondVolatility(t, T)\n sigmaP0Prime = self.domRatesModel.zeroBondVolatilityPrime(t, T)\n sigmaP1 = self.forRatesModels[i].zeroBondVolatility(t, T)\n sigmaP1Prime = self.forRatesModels[i].zeroBondVolatilityPrime(t, T)\n #\n sigma0 = sigmaP0 - rhoY0Y1.dot(sigmaP1) + self.hybrdVol[i,j]*rhoY0X1 # bootstrapping enters here\n sum0 = sigmaP0Prime.dot(sigma0)\n #\n sigma1 = sigmaP1 - sigmaP0.dot(rhoY0Y1) - self.hybrdVol[i,j]*rhoX1Y1 # bootstrapping enters here\n sum1 = sigma1.dot(sigmaP1Prime)\n # collect terms and finish\n hPrime[j] = 2.0*(sum0 + sum1)\n p = 0.5 * hPrime[k] * (self.hybAdjTimes[k] - self.hybAdjTimes[k - 1])\n q = 0.5 * hPrime[k-1] * (self.hybAdjTimes[k] - self.hybAdjTimes[k - 1])\n for j in range(1,k):\n q += 0.5 * (hPrime[j - 1] + hPrime[j]) * (self.hybAdjTimes[j] - self.hybAdjTimes[j - 1])\n # let's see if this works...\n root2 = p*p / 4.0 - q + self.localVol[i,k] * self.localVol[i,k]\n if not root2 >= 0.0:\n raise ValueError('HybridModel: root2>=0.0 required.')\n self.hybrdVol[i,k] = -p / 2.0 + np.sqrt(root2)\n if not self.hybrdVol[i,k] > 0.0:\n raise ValueError('HybridModel: hybrdVol[i,k]>0.0 required.')\n # maybe we should add some more safety checks here...\n self.hybVolAdj[i,k] = self.hybrdVol[i,k] - self.localVol[i,k]\n"
] | [
[
"numpy.log",
"numpy.sqrt",
"numpy.concatenate",
"numpy.interp",
"numpy.linalg.cholesky",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anonymous-dense-submission/DensE | [
"34f604d9e2f4e57b3acecc2e3da7da99863255e0"
] | [
"codes/model.py"
] | [
"#!/usr/bin/python3\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom sklearn.metrics import average_precision_score\n\nfrom torch.utils.data import DataLoader\n\nfrom dataloader import TestDataset\n\nimport torch.autograd as autograd\nfrom torch.autograd import Variable\nfrom numpy.random import RandomState\n\nclass DensEModel(nn.Module):\n def __init__(self, model_name, nentity, nrelation, hidden_dim, gamma, \n entity_embedding_has_mod=False, relation_embedding_has_mod=False):\n super(DensEModel, self).__init__()\n self.model_name = model_name\n self.nentity = nentity\n self.nrelation = nrelation\n self.hidden_dim = hidden_dim\n self.epsilon = 1.2\n self.rel_high_bound = 2.0\n \n self.use_abs_norm = True\n self.allow_minus_mod = True\n self.use_entity_phase = False\n self.use_real_part = False\n \n self.criterion = 'he'\n \n if self.criterion == 'glorot':\n mod_range = 1. / np.sqrt(2 * (self.hidden_dim + self.hidden_dim))\n elif self.criterion == 'he':\n mod_range = 1. / np.sqrt(2 * self.hidden_dim)\n \n if self.allow_minus_mod:\n self.embedding_range = nn.Parameter(\n torch.Tensor([mod_range * 2.]), \n requires_grad=False\n )\n else:\n self.embedding_range = nn.Parameter(\n torch.Tensor([mod_range]), \n requires_grad=False\n )\n \n self.gamma1 = nn.Parameter(\n torch.Tensor([(self.rel_high_bound + self.epsilon) * mod_range * self.hidden_dim]), \n requires_grad=False\n )\n \n self.gamma = nn.Parameter(\n torch.Tensor([gamma]), \n requires_grad=False\n )\n \n self.unit_mod = nn.Parameter(\n torch.Tensor([1.]), \n requires_grad=False\n )\n \n self.zero_ent_phase = nn.Parameter(\n torch.Tensor([0.]), \n requires_grad=False\n )\n\n self.modulus = nn.Parameter(torch.Tensor([[0.5 * self.embedding_range.item()]]))\n self.entity_embedding_has_mod = entity_embedding_has_mod\n self.relation_embedding_has_mod = relation_embedding_has_mod\n \n self.entity_x = nn.Embedding(self.nentity, self.hidden_dim)\n self.entity_y = nn.Embedding(self.nentity, self.hidden_dim) \n self.entity_z = nn.Embedding(self.nentity, self.hidden_dim)\n \n self.relation_w = nn.Embedding(self.nrelation, self.hidden_dim)\n self.relation_x = nn.Embedding(self.nrelation, self.hidden_dim)\n self.relation_y = nn.Embedding(self.nrelation, self.hidden_dim)\n self.relation_z = nn.Embedding(self.nrelation, self.hidden_dim)\n \n self.init_weights()\n \n #Do not forget to modify this line when you add a new model in the \"forward\" function\n if model_name not in ['DensE']:\n raise ValueError('model %s not supported' % model_name)\n if self.use_real_part:\n try:\n assert(self.use_abs_norm == True)\n except:\n raise ValueError('use_abs_norm should be true if you only use real part')\n if (not self.entity_embedding_has_mod) and self.relation_embedding_has_mod:\n raise ValueError('when relation has mod, entity must have mod')\n \n \n def init_weights(self):\n\n rel_w, rel_x, rel_y, rel_z = self.relation_init(self.nrelation, self.hidden_dim)\n rel_w, rel_x, rel_y, rel_z = torch.from_numpy(rel_w), torch.from_numpy(rel_x), torch.from_numpy(rel_y), torch.from_numpy(rel_z)\n self.relation_w.weight.data = rel_w.type_as(self.relation_w.weight.data)\n self.relation_x.weight.data = rel_x.type_as(self.relation_x.weight.data)\n self.relation_y.weight.data = rel_y.type_as(self.relation_y.weight.data)\n self.relation_z.weight.data = rel_z.type_as(self.relation_z.weight.data)\n \n e_x, e_y, e_z = self.entity_init(self.nentity, self.hidden_dim)\n e_x, e_y, e_z = torch.from_numpy(e_x), torch.from_numpy(e_y), torch.from_numpy(e_z)\n self.entity_x.weight.data = e_x.type_as(self.entity_x.weight.data)\n self.entity_y.weight.data = e_y.type_as(self.entity_y.weight.data)\n self.entity_z.weight.data = e_z.type_as(self.entity_z.weight.data)\n \n# def relation_init(self, n_entries, features, criterion='he'):\n# fan_in = features\n# fan_out = features\n \n# if criterion == 'glorot':\n# s = 1. / np.sqrt(2 * (fan_in + fan_out))\n# elif criterion == 'he':\n# s = 1. / np.sqrt(2 * fan_in)\n# else:\n# raise ValueError('Invalid criterion: ', criterion)\n \n# print('INFO: init rel_mod is: ', s)\n\n# kernel_shape = (n_entries, features)\n \n# rel_mod = np.random.uniform(low=-s, high=s, size=kernel_shape)\n# rotate_phase = np.random.uniform(low=-2*np.pi, high=2*np.pi, size=kernel_shape)\n# theta = np.random.uniform(low=0, high=np.pi, size=kernel_shape)\n# phi = np.random.uniform(low=0, high=2*np.pi, size=kernel_shape)\n \n# rel_w = rel_mod * np.cos(rotate_phase/2)\n# rel_x = rel_mod * np.sin(rotate_phase/2) * np.sin(theta) * np.cos(phi)\n# rel_y = rel_mod * np.sin(rotate_phase/2) * np.sin(theta) * np.sin(phi)\n# rel_z = rel_mod * np.sin(rotate_phase/2) * np.cos(theta)\n\n# return rel_w, rel_x, rel_y, rel_z\n\n def relation_init(self, n_entries, features, criterion='he'):\n fan_in = features\n fan_out = features\n \n if criterion == 'glorot':\n s = 1. / np.sqrt(2 * (fan_in + fan_out))\n elif criterion == 'he':\n s = 1. / np.sqrt(2 * fan_in)\n else:\n raise ValueError('Invalid criterion: ', criterion)\n \n print('INFO: init rel_mod is: ', s)\n\n kernel_shape = (n_entries, features)\n \n rel_w = np.random.uniform(low=-s, high=s, size=kernel_shape)\n rel_x = np.random.uniform(low=-s, high=s, size=kernel_shape)\n rel_y = np.random.uniform(low=-s, high=s, size=kernel_shape)\n rel_z = np.random.uniform(low=-s, high=s, size=kernel_shape)\n\n return rel_w, rel_x, rel_y, rel_z\n \n def entity_init(self, n_entries, features, criterion='he'):\n fan_in = features\n fan_out = features\n\n if criterion == 'glorot':\n s = 1. / np.sqrt(2 * (fan_in + fan_out))\n elif criterion == 'he':\n s = 1. / np.sqrt(2 * fan_in)\n else:\n raise ValueError('Invalid criterion: ', criterion)\n \n print('INFO: init x, y, z is: ', s)\n\n # rng = RandomState(456)\n kernel_shape = (n_entries, features)\n \n x = np.random.uniform(low=-s, high=s, size=kernel_shape)\n y = np.random.uniform(low=-s, high=s, size=kernel_shape)\n z = np.random.uniform(low=-s, high=s, size=kernel_shape)\n\n return x, y, z\n \n def forward(self, sample, mode='single'):\n '''\n Forward function that calculate the score of a batch of triples.\n In the 'single' mode, sample is a batch of triple.\n In the 'head-batch' or 'tail-batch' mode, sample consists two part.\n The first part is usually the positive sample.\n And the second part is the entities in the negative samples.\n Because negative samples and positive samples usually share two elements \n in their triple ((head, relation) or (relation, tail)).\n '''\n\n if mode == 'single':\n # batch_size, negative_sample_size = sample.size(0), 1\n \n head_x = self.entity_x(sample[:, 0]).unsqueeze(1)\n head_y = self.entity_y(sample[:, 0]).unsqueeze(1)\n head_z = self.entity_z(sample[:, 0]).unsqueeze(1)\n \n tail_x = self.entity_x(sample[:, 2]).unsqueeze(1)\n tail_y = self.entity_y(sample[:, 2]).unsqueeze(1)\n tail_z = self.entity_z(sample[:, 2]).unsqueeze(1)\n \n rel_w = self.relation_w(sample[:, 1]).unsqueeze(1)\n rel_x = self.relation_x(sample[:, 1]).unsqueeze(1)\n rel_y = self.relation_y(sample[:, 1]).unsqueeze(1)\n rel_z = self.relation_z(sample[:, 1]).unsqueeze(1)\n \n elif mode == 'head-batch':\n tail_part, head_part = sample\n # batch_size, negative_sample_size = head_part.size(0), head_part.size(1)\n \n head_x = self.entity_x(head_part)\n head_y = self.entity_y(head_part)\n head_z = self.entity_z(head_part)\n \n tail_x = self.entity_x(tail_part[:, 2]).unsqueeze(1)\n tail_y = self.entity_y(tail_part[:, 2]).unsqueeze(1)\n tail_z = self.entity_z(tail_part[:, 2]).unsqueeze(1)\n \n rel_w = self.relation_w(tail_part[:, 1]).unsqueeze(1)\n rel_x = self.relation_x(tail_part[:, 1]).unsqueeze(1)\n rel_y = self.relation_y(tail_part[:, 1]).unsqueeze(1)\n rel_z = self.relation_z(tail_part[:, 1]).unsqueeze(1)\n \n elif mode == 'tail-batch':\n head_part, tail_part = sample\n # batch_size, negative_sample_size = tail_part.size(0), tail_part.size(1)\n \n head_x = self.entity_x(head_part[:, 0]).unsqueeze(1)\n head_y = self.entity_y(head_part[:, 0]).unsqueeze(1)\n head_z = self.entity_z(head_part[:, 0]).unsqueeze(1)\n \n tail_x = self.entity_x(tail_part)\n tail_y = self.entity_y(tail_part)\n tail_z = self.entity_z(tail_part)\n \n rel_w = self.relation_w(head_part[:, 1]).unsqueeze(1)\n rel_x = self.relation_x(head_part[:, 1]).unsqueeze(1)\n rel_y = self.relation_y(head_part[:, 1]).unsqueeze(1)\n rel_z = self.relation_z(head_part[:, 1]).unsqueeze(1)\n else:\n raise ValueError('mode %s not supported' % mode)\n \n model_func = {\n 'DensE': self.DensE\n }\n \n if self.model_name in model_func:\n score = model_func[self.model_name](head_x, head_y, head_z, \n rel_w, rel_x, rel_y, rel_z, \n tail_x, tail_y, tail_z, \n mode)\n else:\n raise ValueError('model %s not supported' % self.model_name)\n \n return score\n\n def DensE(self, head_x, head_y, head_z, \n rel_w, rel_x, rel_y, rel_z, \n tail_x, tail_y, tail_z, \n mode):\n pi = 3.14159265358979323846\n assert(self.use_entity_phase == False)\n assert(self.use_real_part == False)\n \n denominator = torch.sqrt(rel_w ** 2 + rel_x ** 2 + rel_y ** 2 + rel_z ** 2)\n w = rel_w / denominator\n x = rel_x / denominator\n y = rel_y / denominator\n z = rel_z / denominator\n \n compute_tail_x = (1 - 2*y*y - 2*z*z) * head_x + (2*x*y - 2*z*w) * head_y + (2*x*z + 2*y*w) * head_z\n compute_tail_y = (2*x*y + 2*z*w) * head_x + (1 - 2*x*x - 2*z*z) * head_y + (2*y*z - 2*x*w) * head_z\n compute_tail_z = (2*x*z - 2*y*w) * head_x + (2*y*z + 2*x*w) * head_y + (1 - 2*x*x - 2*y*y) * head_z\n \n if self.relation_embedding_has_mod:\n compute_tail_x = denominator * compute_tail_x\n compute_tail_y = denominator * compute_tail_y\n compute_tail_z = denominator * compute_tail_z\n \n delta_x = (compute_tail_x - tail_x)\n delta_y = (compute_tail_y - tail_y)\n delta_z = (compute_tail_z - tail_z)\n \n score1 = torch.stack([delta_x, delta_y, delta_z], dim = 0)\n score1 = score1.norm(dim = 0)\n \n x = -x\n y = -y\n z = -z\n compute_head_x = (1 - 2*y*y - 2*z*z) * tail_x + (2*x*y - 2*z*w) * tail_y + (2*x*z + 2*y*w) * tail_z\n compute_head_y = (2*x*y + 2*z*w) * tail_x + (1 - 2*x*x - 2*z*z) * tail_y + (2*y*z - 2*x*w) * tail_z\n compute_head_z = (2*x*z - 2*y*w) * tail_x + (2*y*z + 2*x*w) * tail_y + (1 - 2*x*x - 2*y*y) * tail_z\n \n if self.relation_embedding_has_mod:\n compute_head_x = compute_head_x / denominator\n compute_head_y = compute_head_y / denominator\n compute_head_z = compute_head_z / denominator\n \n delta_x2 = (compute_head_x - head_x)\n delta_y2 = (compute_head_y - head_y)\n delta_z2 = (compute_head_z - head_z)\n \n score2 = torch.stack([delta_x2, delta_y2, delta_z2], dim = 0)\n score2 = score2.norm(dim = 0) \n \n score1 = score1.mean(dim=2)\n score2 = score2.mean(dim=2)\n\n# score1 = score1.sum(dim=2)\n# score2 = score2.sum(dim=2)\n \n score = (score1 + score2) / 2\n \n score = self.gamma.item() - score\n \n return score, score1, score2, torch.abs(delta_x)\n\n @staticmethod\n def train_step(model, optimizer, train_iterator, step, args):\n '''\n A single train step. Apply back-propation and return the loss\n '''\n\n model.train()\n\n optimizer.zero_grad()\n\n positive_sample, negative_sample, subsampling_weight, mode = next(train_iterator)\n\n if args.cuda:\n positive_sample = positive_sample.cuda()\n negative_sample = negative_sample.cuda()\n subsampling_weight = subsampling_weight.cuda()\n\n negative_score, head_mod, tail_mod, rel_mod = model((positive_sample, negative_sample), mode=mode) # 全是负样本分数 shape: batch_size, neg_size\n \n if step % 500 == 0:\n print(negative_score.mean(), head_mod.mean(), tail_mod.mean(), rel_mod.mean())\n\n if args.negative_adversarial_sampling:\n #In self-adversarial sampling, we do not apply back-propagation on the sampling weight\n negative_score = (F.softmax(negative_score * args.adversarial_temperature, dim = 1).detach() \n * F.logsigmoid(-negative_score)).sum(dim = 1)\n else:\n negative_score = F.logsigmoid(-negative_score).mean(dim = 1)\n\n positive_score, head_mod, tail_mod, rel_mod = model(positive_sample) # 正样本分数 shape: batch_size, 1 \n\n if step % 500 == 0:\n print(positive_score.mean(), head_mod.mean(), tail_mod.mean(), rel_mod.mean())\n\n positive_score = F.logsigmoid(positive_score).squeeze(dim = 1)\n\n if args.uni_weight:\n positive_sample_loss = - positive_score.mean()\n negative_sample_loss = - negative_score.mean()\n else:\n positive_sample_loss = - (subsampling_weight * positive_score).sum()/subsampling_weight.sum()\n negative_sample_loss = - (subsampling_weight * negative_score).sum()/subsampling_weight.sum()\n\n loss = (positive_sample_loss + negative_sample_loss)/2\n \n if args.regularization != 0.0:\n #Use L3 regularization for ComplEx and DistMult\n regularization = args.regularization * (\n model.entity_x.weight.data.norm(p = 3)**3 + \n model.entity_y.weight.data.norm(p = 3)**3 + \n model.entity_z.weight.data.norm(p = 3)**3 \n ) / args.batch_size\n\n loss = loss + regularization\n regularization_log = {'regularization': regularization.item()}\n else:\n regularization_log = {}\n \n loss.backward()\n\n optimizer.step()\n\n log = {\n **regularization_log,\n 'positive_sample_loss': positive_sample_loss.item(),\n 'negative_sample_loss': negative_sample_loss.item(),\n 'loss': loss.item(),\n# 'train_hit1': train_hit1\n }\n\n return log\n \n @staticmethod\n def test_step(model, test_triples, all_true_triples, args):\n '''\n Evaluate the model on test or valid datasets\n '''\n \n model.eval()\n \n if args.countries:\n #Countries S* datasets are evaluated on AUC-PR\n #Process test data for AUC-PR evaluation\n sample = list()\n y_true = list()\n for head, relation, tail in test_triples:\n for candidate_region in args.regions:\n y_true.append(1 if candidate_region == tail else 0)\n sample.append((head, relation, candidate_region))\n\n sample = torch.LongTensor(sample)\n if args.cuda:\n sample = sample.cuda()\n\n with torch.no_grad():\n y_score = model(sample).squeeze(1).cpu().numpy()\n\n y_true = np.array(y_true)\n\n #average_precision_score is the same as auc_pr\n auc_pr = average_precision_score(y_true, y_score)\n\n metrics = {'auc_pr': auc_pr}\n \n else:\n #Otherwise use standard (filtered) MRR, MR, HITS@1, HITS@3, and HITS@10 metrics\n #Prepare dataloader for evaluation\n test_dataloader_head = DataLoader(\n TestDataset(\n test_triples, \n all_true_triples, \n args.nentity, \n args.nrelation/2, \n 'head-batch'\n ), \n batch_size=args.test_batch_size,\n num_workers=max(1, args.cpu_num//2), \n collate_fn=TestDataset.collate_fn\n )\n\n test_dataloader_tail = DataLoader(\n TestDataset(\n test_triples, \n all_true_triples, \n args.nentity, \n args.nrelation/2, \n 'tail-batch'\n ), \n batch_size=args.test_batch_size,\n num_workers=max(1, args.cpu_num//2), \n collate_fn=TestDataset.collate_fn\n )\n \n test_dataset_list = [test_dataloader_head, test_dataloader_tail]\n \n logs = []\n\n step = 0\n total_steps = sum([len(dataset) for dataset in test_dataset_list])\n\n with torch.no_grad():\n for test_dataset in test_dataset_list:\n for positive_sample, negative_sample, filter_bias, mode in test_dataset:\n if args.cuda:\n positive_sample = positive_sample.cuda()\n negative_sample = negative_sample.cuda()\n filter_bias = filter_bias.cuda()\n\n batch_size = positive_sample.size(0)\n\n score, head_mod, tail_mod, rel_mod = model((positive_sample, negative_sample), mode)\n# print(filter_bias, filter_bias.shape, filter_bias.sum())\n score += filter_bias\n\n #Explicitly sort all the entities to ensure that there is no test exposure bias\n argsort = torch.argsort(score, dim = 1, descending=True)\n\n if mode == 'head-batch':\n positive_arg = positive_sample[:, 0]\n elif mode == 'tail-batch':\n positive_arg = positive_sample[:, 2]\n else:\n raise ValueError('mode %s not supported' % mode)\n\n for i in range(batch_size):\n #Notice that argsort is not ranking\n ranking = (argsort[i, :] == positive_arg[i]).nonzero()\n assert ranking.size(0) == 1\n\n #ranking + 1 is the true ranking used in evaluation metrics\n ranking = 1 + ranking.item()\n logs.append({\n 'MRR': 1.0/ranking,\n 'MR': float(ranking),\n 'HITS@1': 1.0 if ranking <= 1 else 0.0,\n 'HITS@3': 1.0 if ranking <= 3 else 0.0,\n 'HITS@10': 1.0 if ranking <= 10 else 0.0,\n })\n\n if step % args.test_log_steps == 0:\n logging.info('Evaluating the model... (%d/%d)' % (step, total_steps))\n\n step += 1\n\n metrics = {}\n for metric in logs[0].keys():\n metrics[metric] = sum([log[metric] for log in logs])/len(logs)\n\n return metrics\n \n \n "
] | [
[
"torch.abs",
"torch.LongTensor",
"torch.nn.functional.softmax",
"numpy.sqrt",
"torch.Tensor",
"torch.sqrt",
"torch.nn.functional.logsigmoid",
"torch.from_numpy",
"torch.nn.Embedding",
"torch.no_grad",
"sklearn.metrics.average_precision_score",
"torch.stack",
"numpy.random.uniform",
"torch.argsort",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
brianafischer/P3_Implement_SLAM | [
"935d7f069514c347cd9939a1cb8e76840dca2c71"
] | [
"helpers.py"
] | [
"from robot_class import robot\nfrom math import *\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# --------\n# this helper function displays the world that a robot is in\n# it assumes the world is a square grid of some given size\n# and that landmarks is a list of landmark positions(an optional argument)\ndef display_world(world_size, position, landmarks=None):\n \n # using seaborn, set background grid to gray\n sns.set_style(\"dark\")\n\n # Plot grid of values\n world_grid = np.zeros((world_size+1, world_size+1))\n\n # Set minor axes in between the labels\n ax=plt.gca()\n cols = world_size+1\n rows = world_size+1\n\n ax.set_xticks([x for x in range(1,cols)],minor=True )\n ax.set_yticks([y for y in range(1,rows)],minor=True)\n \n # Plot grid on minor axes in gray (width = 1)\n plt.grid(which='minor',ls='-',lw=1, color='white')\n \n # Plot grid on major axes in larger width\n plt.grid(which='major',ls='-',lw=2, color='white')\n \n # Create an 'o' character that represents the robot\n # ha = horizontal alignment, va = vertical\n ax.text(position[0], position[1], 'o', ha='center', va='center', color='r', fontsize=30)\n \n # Draw landmarks if they exists\n if(landmarks is not None):\n # loop through all path indices and draw a dot (unless it's at the car's location)\n for pos in landmarks:\n if(pos != position):\n ax.text(pos[0], pos[1], 'x', ha='center', va='center', color='purple', fontsize=20)\n \n # Display final result\n plt.show()\n\n \n# --------\n# this routine makes the robot data\n# the data is a list of measurements and movements: [measurements, [dx, dy]]\n# collected over a specified number of time steps, N\n#\ndef make_data(N, num_landmarks, world_size, measurement_range, motion_noise, \n measurement_noise, distance):\n\n\n # check if data has been made\n complete = False\n\n while not complete:\n\n data = []\n\n # make robot and landmarks\n r = robot(world_size, measurement_range, motion_noise, measurement_noise)\n r.make_landmarks(num_landmarks)\n seen = [False for row in range(num_landmarks)]\n \n # guess an initial motion\n orientation = random.random() * 2.0 * pi\n dx = cos(orientation) * distance\n dy = sin(orientation) * distance\n \n for k in range(N-1):\n \n # collect sensor measurements in a list, Z\n Z = r.sense()\n\n # check off all landmarks that were observed \n for i in range(len(Z)):\n seen[Z[i][0]] = True\n \n # move\n while not r.move(dx, dy):\n # if we'd be leaving the robot world, pick instead a new direction\n orientation = random.random() * 2.0 * pi\n dx = cos(orientation) * distance\n dy = sin(orientation) * distance\n\n # collect/memorize all sensor and motion data\n data.append([Z, [dx, dy]])\n\n # we are done when all landmarks were observed; otherwise re-run\n complete = (sum(seen) == num_landmarks)\n\n print(' ')\n print('Landmarks: ', r.landmarks)\n print(r)\n\n\n return r, data"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.grid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JosiahCraw/Python-Comms-Utils-ENEL422- | [
"d9f3be4adcfed198ee0d331f956b7341796c0797"
] | [
"comms_utils/plot.py"
] | [
"import numpy as np\nfrom scipy import special\nfrom typing import List\nimport matplotlib.pyplot as plt\nfrom comms_utils.ak import AK\nfrom comms_utils.pulse import Pulse\nfrom comms_utils.signal import Signal\nfrom comms_utils.comb import Comb\nimport comms_utils.threaded as threaded\nimport comms_utils.decode as decode\n\ndef eye_diagram(signal: Signal, pulse: Pulse, clock_comb: List[float], num_periods:\n int=3, plot_sample_lines: bool=False, title: str=None, pgf_plot: str=None):\n corrected_clock = pulse.apply_conv_delay(len(signal), clock_comb)\n start_index = corrected_clock.index(1.0)\n finish_index = len(corrected_clock) - corrected_clock[::-1].index(1.0)\n sig_data, sig_time = signal.get_data()\n count = 0\n \n clock_edges = np.where(np.array(corrected_clock, dtype=float)==1.0)[0]\n last_clock = clock_edges[0]\n period = np.array(sig_time[last_clock:clock_edges[num_periods]]) - sig_time[last_clock]\n if plot_sample_lines == True:\n for i in range(1, num_periods):\n time = sig_time[clock_edges[i]]-sig_time[last_clock]\n plt.axvline(x=time, color='r', linestyle='--')\n for i in clock_edges[1:]:\n count += 1\n if count == num_periods:\n plt.plot(period, sig_data[last_clock:i])\n last_clock = i\n count = 0\n if title != None:\n plt.title(title)\n else:\n plt.title(\"{}-PAM Eye Diagram\".format(signal.get_levels()))\n plt.xlabel(\"Time\")\n plt.ylabel(\"Amplitude\")\n if pgf_plot != None:\n plt.savefig(pgf_plot)\n else:\n plt.show()\n\n\ndef calc_errors(y: List[float], ak: AK, original_bin: np.ndarray,\n comb: Comb, signal: Signal, pulse: Pulse, db: float):\n signal.add_noise(db)\n recv_sig = signal.convolve(pulse)\n delayed_clock = pulse.apply_conv_delay(len(recv_sig), comb.get_clock_comb())\n\n decoded_data = decode.decode_pam(recv_sig*delayed_clock, ak.get_levels())\n bit_array = np.array(list(decoded_data), dtype=int)\n bit_errors = np.sum(bit_array != original_bin)\n bit_error_rate = bit_errors / len(ak)\n y.append(bit_error_rate)\n signal.remove_noise()\n\n\ndef analytical_bit_error(db_array: List[float], ak: AK, title: str=None):\n numerical = list()\n for db in db_array:\n db_num = 10 ** (db/10)\n numerical.append(0.5*special.erfc(np.sqrt(db_num)))\n plt.plot(db_array, numerical, '-b')\n plt.yscale(\"log\")\n plt.grid(True, which='both')\n plt.xlabel(\"$E_b/N_0$ (dB)\")\n plt.ylabel(\"BER\")\n if title == None:\n plt.title(\"{}-PAM Bit Error Rate\".format(ak.get_levels()))\n else:\n plt.title(title)\n plt.show()\n\n\ndef bit_errors(signal: Signal, comb: Comb, pulse: Pulse, db_array: List[float], title: str=None, \n numerical_line: bool=False, pgf_plot: str=None, threading: bool=False):\n ak = comb.get_ak()\n original_bin = decode.decode_pam(ak.get_data(), ak.get_levels())\n original_bin = np.array(list(original_bin), dtype=int)\n if signal.get_snr_db() != None:\n signal.remove_noise()\n print(\"Removed initial noise from signal\")\n y = list()\n numerical = list()\n if threading == True:\n threaded.calc_errors_threaded(db_array.copy(), y, ak, original_bin,\n comb, signal)\n else:\n for db in db_array:\n if numerical_line == True:\n db_num = 10 ** (db/10)\n numerical.append(0.5*special.erfc(np.sqrt(db_num)))\n calc_errors(y, ak, original_bin, comb, signal, pulse, db)\n plt.plot(db_array, y, 'o-b')\n if numerical_line == True:\n plt.plot(db_array, numerical, '--r')\n plt.yscale(\"log\")\n plt.grid(True, which='both')\n plt.xlabel(\"$E_b/N_0$ (dB)\")\n plt.ylabel(\"BER\")\n \n if title != None:\n plt.title(title)\n else:\n plt.title(\"{}-PAM Bit Error Rate\".format(ak.get_levels()))\n if pgf_plot != None:\n plt.savefig(pgf_plot)\n else:\n plt.show()\n \n\n\n\n"
] | [
[
"matplotlib.pyplot.axvline",
"numpy.sqrt",
"matplotlib.pyplot.title",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AnsonYanxin/pytorch-YOLOv4 | [
"f5122d600d3d7f7a8ff07b12dc2967e92c3f0586"
] | [
"demo_onnx.py"
] | [
"import sys\nimport onnx\nimport os\nimport argparse\nimport numpy as np\nimport cv2\nimport onnxruntime\nfrom tool.utils import *\nfrom tool.darknet2onnx import *\n\n\ndef main(cfg_file, weight_file, image_path, batch_size):\n\n # Transform to onnx as specified batch size\n fransform_to_onnx(cfg_file, weight_file, batch_size)\n # Transform to onnx for demo\n onnx_path_demo = fransform_to_onnx(cfg_file, weight_file, 1)\n\n session = onnxruntime.InferenceSession(onnx_path_demo)\n # session = onnx.load(onnx_path)\n print(\"The model expects input shape: \", session.get_inputs()[0].shape)\n\n image_src = cv2.imread(image_path)\n detect(session, image_src)\n\n\n\ndef detect(session, image_src):\n IN_IMAGE_H = session.get_inputs()[0].shape[2]\n IN_IMAGE_W = session.get_inputs()[0].shape[3]\n\n # Input\n resized = cv2.resize(image_src, (IN_IMAGE_W, IN_IMAGE_H), interpolation=cv2.INTER_LINEAR)\n img_in = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)\n img_in = np.transpose(img_in, (2, 0, 1)).astype(np.float32)\n img_in = np.expand_dims(img_in, axis=0)\n img_in /= 255.0\n print(\"Shape of the network input: \", img_in.shape)\n\n # Compute\n input_name = session.get_inputs()[0].name\n # output, output_exist = session.run(['decoder.output_conv', 'lane_exist.linear2'], {\"input.1\": image_np})\n\n # print(img_in)\n\n outputs = session.run(None, {input_name: img_in})\n\n '''\n print(len(outputs))\n print(outputs[0].shape)\n print(outputs[1].shape)\n print(outputs[2].shape)\n print(outputs[3].shape)\n print(outputs[4].shape)\n print(outputs[5].shape)\n print(outputs[6].shape)\n print(outputs[7].shape)\n print(outputs[8].shape)\n '''\n\n outputs = [\n [outputs[0],outputs[1],outputs[2]],\n [outputs[3],outputs[4],outputs[5]],\n [outputs[6],outputs[7],outputs[8]]\n ]\n\n # print(outputs[2])\n\n num_classes = 80\n boxes = post_processing(img_in, 0.5, num_classes, 0.4, outputs)\n\n if num_classes == 20:\n namesfile = 'data/voc.names'\n elif num_classes == 80:\n namesfile = 'data/coco.names'\n else:\n namesfile = 'data/names'\n\n class_names = load_class_names(namesfile)\n plot_boxes_cv2(image_src, boxes, savename='predictions_onnx.jpg', class_names=class_names)\n\n\n\nif __name__ == '__main__':\n print(\"Converting to onnx and running demo ...\")\n if len(sys.argv) == 5:\n cfg_file = sys.argv[1]\n weight_file = sys.argv[2]\n image_path = sys.argv[3]\n batch_size = int(sys.argv[4])\n main(cfg_file, weight_file, image_path, batch_size)\n else:\n print('Please run this way:\\n')\n print(' python demo_onnx.py <cfgFile> <weightFile> <imageFile> <batchSize>')\n"
] | [
[
"numpy.expand_dims",
"numpy.transpose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
os-simopt/WRFtamer | [
"3646441c5438118c455f54d336547b1822c3076c"
] | [
"wrftamer/plotting/load_and_prepare.py"
] | [
"import datetime as dt\nimport xarray as xr\nimport pandas as pd\nimport numpy as np\n\n\n########################################################################################################################\n# Load Data\n########################################################################################################################\n\n\ndef load_obs_data(obs_data: dict, obs: str, dataset: str, **kwargs):\n \"\"\"\n This function just loads observations from a single location and stores everything in the obs_data dict.\n \"\"\"\n\n from wrftamer.wrfplotter_classes import Timeseries\n\n if obs == '' or dataset is None:\n return\n\n try:\n dtstart, dtend = kwargs[\"obs_load_from_to\"]\n except KeyError:\n ttp = kwargs[\"time_to_plot\"]\n dtstart = dt.datetime(ttp.year, 1, 1)\n dtend = dt.datetime(ttp.year + 1, 1, 1)\n\n ts = Timeseries(dataset)\n ts.read_cfconform_data(dtstart, dtend, calc_pt=True)\n\n if \"station_name\" in ts.data.dims:\n for stat in ts.data.station_name.values:\n tmp = ts.data.sel({\"station_name\": stat})\n if tmp[\"station_name\"].values == obs:\n obs_data[obs] = tmp\n break\n else:\n obs_data[obs] = ts.data\n\n\ndef load_mod_data(mod_data: dict, exp_name: str, verbose=False, **kwargs):\n \"\"\"\n This function just loads model data from a single location and stores everything in the mod_data dict.\n \"\"\"\n\n from wrftamer.main import project\n\n try:\n proj_name = kwargs[\"proj_name\"]\n except KeyError:\n proj_name = None\n\n dom = kwargs[\"dom\"]\n ave_window = kwargs[\"AveChoice_WRF\"]\n\n if ave_window in [0, \"raw\"]:\n prefix = \"raw\"\n else:\n prefix = \"Ave\" + str(ave_window) + \"Min\"\n\n proj = project(proj_name)\n workdir = proj.get_workdir(exp_name)\n list_of_locs = proj.exp_list_tslocs(exp_name, verbose=False)\n\n file2load = workdir / f\"out/{prefix}_tslist_{dom}.nc\"\n\n if verbose:\n print(\"Searching for file \", file2load)\n\n if file2load.is_file():\n tmp_xa = xr.open_dataset(file2load)\n\n tslist_data = dict()\n for dim in range(0, tmp_xa.dims[\"station_name\"]):\n tmp = tmp_xa.isel(station_name=dim)\n\n loc = str(tmp.station_name.values)\n\n if loc in list_of_locs:\n tslist_data[loc] = tmp\n\n if bool(tslist_data):\n mod_data[exp_name] = tslist_data\n else:\n print(\"No data for Experiment \", exp_name)\n pass # do not add empty dicts\n else:\n print(\"No data for Experiment\", exp_name)\n\n\ndef load_all_obs_data(dataset, **kwargs):\n from wrftamer.wrfplotter_classes import Timeseries\n\n if dataset is None:\n return\n\n try:\n dtstart, dtend = kwargs[\"obs_load_from_to\"]\n except KeyError:\n ttp = kwargs[\"time_to_plot\"]\n\n dtstart = dt.datetime(ttp.year, 1, 1)\n dtend = dt.datetime(ttp.year + 1, 1, 1)\n\n ts = Timeseries(dataset)\n\n use_dask = kwargs.get(\"use_dask\", True)\n\n ts.read_cfconform_data(dtstart, dtend, calc_pt=True, use_dask=use_dask)\n\n return ts.data\n\n\ndef load_all_mod_data(**kwargs):\n \"\"\"\n Loads all data (all experiments in <list_of_exps>) for all locations and concats the data to a single dataset.\n\n Args:\n **kwargs:\n\n Returns:\n\n \"\"\"\n\n from wrftamer.main import project\n\n try:\n proj_name = kwargs[\"proj_name\"]\n except KeyError:\n proj_name = None\n\n dom = kwargs[\"dom\"]\n ave_window = kwargs.get(\"AveChoice_WRF\", None)\n pred_window = kwargs.get(\"Prediction_Range\", None)\n\n if ave_window in [0, \"raw\"]:\n prefix = \"raw\"\n else:\n prefix = \"Ave\" + str(ave_window) + \"Min\"\n\n proj = project(proj_name)\n all_xa = []\n for exp_name in kwargs[\"Expvec\"]:\n\n workdir = proj.get_workdir(exp_name)\n\n if pred_window is None:\n file2load = f\"{workdir}/out/{prefix}_tslist_{dom}.nc\"\n else:\n file2load = f\"{workdir}/out/{pred_window}_{dom}.nc\"\n\n tmp_xa = xr.open_dataset(file2load)\n all_xa.append(tmp_xa)\n tmp_xa.close()\n\n all_xa = xr.concat(all_xa, dim=\"time\")\n\n return all_xa\n\n\n########################################################################################################################\n# Data Preparation\n########################################################################################################################\ndef get_limits_and_labels(plottype: str, var: str, data=None, map_data=None, units=None, description=None):\n infos = dict()\n infos[\"plottype\"] = plottype\n infos[\"var\"] = var\n\n if plottype == \"Profiles\":\n\n infos[\"ylim\"] = [0, np.nanmax([item.ALT.max() for item in data])]\n infos[\"xlim\"] = [\n np.nanmin([item.iloc[:, 1].min() for item in data]),\n np.nanmax([item.iloc[:, 1].max() for item in data]),\n ]\n\n infos[\"xlabel\"] = f\"{description} ({units})\"\n infos[\"ylabel\"] = \"z (m)\"\n infos[\"title\"] = \"\"\n infos[\"font_size\"] = 15\n\n elif plottype == \"Timeseries\":\n\n infos[\"ylim\"] = [\n np.floor(np.nanmin(data.min())),\n np.ceil(np.nanmax(data.max())),\n ]\n infos[\"tlim\"] = [data.index.min(), data.index.max()]\n\n infos[\"xlabel\"] = \"time (UTC)\"\n infos[\"ylabel\"] = f\"{description} ({units})\"\n infos[\"title\"] = \"\"\n infos[\"font_size\"] = 15\n\n elif plottype == 'Histogram':\n infos[\"xlabel\"] = f\"{description} ({units})\"\n infos[\"ylabel\"] = f\"Count\"\n infos[\"title\"] = \"\"\n infos[\"font_size\"] = 15\n\n elif plottype == 'Windrose':\n\n wsp_data, dir_data = data\n infos[\"wspmax\"] = np.ceil(wsp_data.max().max())\n\n infos[\"xlabel\"] = \"\"\n infos[\"ylabel\"] = \"\"\n infos[\"title\"] = \"\"\n infos[\"font_size\"] = 15\n\n elif plottype == \"Obs vs Mod\":\n\n vmin = np.floor(np.nanmin(data.min()))\n vmax = np.ceil(np.nanmax(data.max()))\n\n infos[\"ylim\"] = [vmin, vmax]\n infos[\"xlim\"] = [vmin, vmax]\n\n infos[\"xlabel\"] = f\"Observation ({units})\"\n infos[\"ylabel\"] = f\"Model ({units})\"\n infos[\"title\"] = \"\"\n infos[\"font_size\"] = 15\n\n elif plottype == \"zt-Plot\":\n\n infos[\"clim\"] = [np.floor(data.values.min()), np.ceil(data.values.max())]\n infos[\"ylim\"] = [\n np.floor(data.indexes[\"ALT\"].min()),\n np.ceil(data.indexes[\"ALT\"].max()),\n ]\n infos[\"tlim\"] = [data.indexes[\"time\"].min(), data.indexes[\"time\"].max()]\n\n infos[\"xlabel\"] = \"time (UTC)\"\n infos[\"ylabel\"] = f\"z ({data.ALT.units})\"\n infos[\"title\"] = f\"{data.long_name} ({data.units})\"\n infos[\"font_size\"] = 15\n\n elif plottype in [\"Map\", \"MapSequence\"]:\n\n vmin, vmax = np.floor(map_data.values.min()), np.ceil(map_data.values.max())\n cmapname = \"viridis\" # standard colormap\n\n if map_data.name in [\"DIR\", \"dir\", \"dd\"]:\n vmin, vmax = 0, 360\n cmapname = \"hsv\"\n elif map_data.name in [\"HGT\", \"hgt\", \"terrain\"]:\n vmin = int(25 * round(float(vmin) / 25.0))\n vmax = int(25 * round(float(vmax) / 25.0))\n cmapname = \"terrain\"\n elif map_data.name == \"LU_INDEX\":\n vmin, vmax = 1, 3\n cmapname = \"jet\"\n\n infos[\"clim\"] = [vmin, vmax]\n infos[\"xlim\"] = [map_data.XLONG.values.min(), map_data.XLONG.values.max()]\n infos[\"ylim\"] = [map_data.XLAT.values.min(), map_data.XLAT.values.max()]\n infos[\"xlabel\"] = \"longitude (°)\"\n infos[\"ylabel\"] = \"latitude (°)\"\n infos[\n \"title\"\n ] = f\"{map_data.description} ({map_data.units}) at model level {map_data.model_level}\"\n infos[\"font_size\"] = 15\n infos[\"ticks\"] = np.linspace(vmin, vmax, 10)\n infos[\"cmapname\"] = cmapname\n\n return infos\n\n\ndef prep_profile_data(obs_data, mod_data, infos: dict, verbose=False) -> (list, str, str):\n \"\"\"\n Takes the data coming from my classes, selects the right data and puts everyting in a list.\n In this proj_name, I cannot concat everything into a single dataframe, because the the Z-vectors are different.\n\n obs_data: dict of observations\n mod_data: dict of model data\n \"\"\"\n\n anemometer = infos[\"anemometer\"]\n var = infos[\"var\"]\n loc = infos[\"loc\"]\n time_to_plot = infos[\"time_to_plot\"]\n ttp = np.datetime64(time_to_plot)\n expvec = infos[\"Expvec\"]\n obsvec = infos[\"Obsvec\"]\n\n # change a few parameter Names to fit the\n translator = {\n \"WSP\": {\"Sonic\": \"_USA\", \"Analog\": \"_CUP\"},\n \"DIR\": {\"Sonic\": \"_USA\", \"Analog\": \"_VANE\"},\n }\n if var in [\"WSP\", \"DIR\"]:\n device = translator.get(var, \"\").get(anemometer, \"\")\n else:\n device = \"\"\n\n data2plot = []\n for obs in obsvec:\n zvec, data = [], []\n myobs = obs_data[obs]\n myobs = myobs.where(myobs.time == ttp, drop=True)\n\n for key in myobs.keys():\n if var in key and \"std\" not in key:\n if device == \"\":\n if (\n \"CUP\" not in key\n and \"USA\" not in key\n and \"VANE\" not in key\n and key.startswith(var + \"_\")\n ):\n zvec.append(float(key.split(\"_\")[1]))\n data.append(myobs[key].values[0])\n\n elif device in key:\n zvec.append(float(key.rsplit(\"_\", 1)[1]))\n data.append(myobs[key].values[0])\n units = myobs[key].units\n description = var # (standard_name contains height)\n\n df = pd.DataFrame({\"ALT\": zvec, loc: data})\n data2plot.append(df)\n\n for exp in expvec:\n try:\n mymod = mod_data[exp][loc]\n mymod = mymod.where(mymod.time == ttp, drop=True)\n mymod = mymod.to_dataframe()\n mymod = mymod.set_index(\"ALT\")\n mymod = pd.DataFrame(mymod[var])\n mymod = mymod.rename(columns={var: exp})\n mymod = mymod.reset_index()\n data2plot.append(mymod)\n\n units = mod_data[exp][loc][var].units\n description = mod_data[exp][loc][var].standard_name\n\n except KeyError:\n if verbose:\n print(f\"No data found for experiment {exp}\")\n except IndexError:\n if verbose:\n print(f\"No data found at this time: {time_to_plot}\")\n\n return data2plot, units, description\n\n\ndef prep_zt_data(mod_data, infos: dict) -> xr.Dataset:\n var = infos[\"var\"]\n loc = infos[\"loc\"]\n\n key = list(mod_data.keys())[0]\n data2plot = mod_data[key][loc]\n\n # new_z\n new_z = mod_data[key][loc][\"ALT\"].mean(axis=0, keep_attrs=True)\n data2plot = data2plot.drop_vars(\"ALT\")\n data2plot = data2plot.rename_vars({\"model_level\": \"ALT\"})\n\n data2plot[\"ALT\"] = new_z\n data2plot = data2plot.swap_dims({\"model_level\": \"ALT\"})\n data2plot = data2plot.drop_vars(\"model_level\")\n\n return data2plot[var]\n\n\ndef prep_ts_data(obs_data, mod_data, infos: dict, verbose=False) -> (pd.DataFrame, str, str):\n \"\"\"\n Takes the data coming from my classes, selects the right data and concats\n everything into a single dataframe for easy plotting with hvplot.\n\n obs_data: dict of observations\n mod_data: dict of model data\n expvec: list of experiments to plot\n obsvec: list of observations to plot\n loc: location of the time series\n var: variable to plot\n lev: level at which the varialbe is valid\n anemometer: device used for observation\n \"\"\"\n\n anemometer = infos[\"anemometer\"]\n loc = infos[\"loc\"]\n expvec = infos[\"Expvec\"]\n obsvec = infos[\"Obsvec\"]\n var = infos[\"var\"]\n lev = infos[\"lev\"]\n\n if len(mod_data) > 0:\n tmp_t = mod_data[list(mod_data.keys())[0]][loc].time.values\n tlim1 = pd.Timestamp(tmp_t.min())\n tlim2 = pd.Timestamp(tmp_t.max())\n\n # change a few parameter Names to fit the name of the obs-files\n translator = {\n \"WSP\": {\"Sonic\": \"_USA\", \"Analog\": \"_CUP\"},\n \"DIR\": {\"Sonic\": \"_USA\", \"Analog\": \"_VANE\"},\n }\n if var in [\"WSP\", \"DIR\"]:\n device = translator.get(var, \"\").get(anemometer, \"\")\n else:\n device = \"\"\n\n if var == 'PRES':\n description = \"air pressure\"\n var = f'P_{lev}'\n else:\n description = var\n\n all_df = []\n\n for obs in obsvec:\n try:\n myobs = obs_data[obs].to_dataframe()\n\n try: # Two different naming conventions exist right now. In the future, reduces to one.\n tmp = myobs[var]\n units = obs_data[obs][var].units\n except KeyError:\n tmp = myobs[f\"{var}{device}_{lev}\"]\n units = obs_data[obs][f\"{var}{device}_{lev}\"].units\n\n tmp = tmp.rename(obs)\n\n except KeyError:\n tmp = pd.DataFrame()\n tmp.name = \"Data Missing\"\n units = \"\"\n description = \"No Data\"\n\n if len(mod_data) > 0:\n tmp = tmp[tlim1:tlim2]\n\n all_df.append(tmp)\n\n for exp in expvec:\n try:\n\n mymod = mod_data[exp][loc]\n units = mymod[var].units\n description = mymod[var].standard_name\n\n # interpolate all data to desired level\n zvec = mymod.ALT[0, :].values\n idx = (np.abs(zvec - float(lev))).argmin()\n xa1 = mymod.isel(model_level=idx)\n xa2 = mymod.isel(model_level=idx + 1)\n w1 = abs(zvec[idx] - float(lev)) / abs(zvec[idx + 1] - zvec[idx])\n w2 = abs(zvec[idx + 1] - float(lev)) / abs(zvec[idx + 1] - zvec[idx])\n mymod = xa1 * w2 + xa2 * w1\n\n if 'DIR' in mymod and 'U' in mymod and 'V' in mymod:\n dd = 180.0 / np.pi * np.arctan2(-mymod['U'].values, -mymod['V'].values)\n dd = np.mod(dd, 360)\n mymod['DIR'].values = dd\n\n mymod = mymod.to_dataframe()\n\n tmp = mymod[var]\n tmp = tmp.rename(exp)\n all_df.append(tmp)\n\n except KeyError:\n if verbose:\n print(f\"No Data for Experiment {exp}\")\n\n if len(all_df) > 0:\n data2plot = pd.concat(all_df, axis=1)\n else:\n data2plot = pd.DataFrame(\n {\n \"time\": [dt.datetime(1970, 1, 1), dt.datetime(2020, 1, 1)],\n \"data\": [np.nan, np.nan],\n }\n )\n data2plot = data2plot.set_index(\"time\")\n\n return data2plot, units, description\n\n\ndef prep_windrose_data(obs_data, mod_data, infos: dict, verbose=False) -> ((pd.DataFrame, pd.DataFrame), str, str):\n \"\"\"\n Takes the data coming from my classes, selects the right data and concats\n everything into a single dataframe for easy plotting with hvplot.\n\n obs_data: dict of observations\n mod_data: dict of model data\n expvec: list of experiments to plot\n obsvec: list of observations to plot\n loc: location of the time series\n var: variable to plot\n lev: level at which the varialbe is valid\n anemometer: device used for observation\n \"\"\"\n\n infos2 = infos.copy()\n infos2['var'] = 'DIR'\n\n anemometer = infos[\"anemometer\"]\n if anemometer == 'Sonic':\n device2 = '_USA'\n else:\n device2 = '_VANE'\n\n obsvec = infos[\"Obsvec\"]\n obs = obsvec[0]\n lev = infos[\"lev\"] # this is for WSP\n\n if len(obs_data) > 0:\n # get lev2, which is the closest level to WSP.\n possible_levs2 = []\n varnames = list(obs_data[obs].variables)\n for var in varnames:\n if var.startswith(f'DIR{device2}'):\n possible_levs2.append(int(var.split('_')[-1]))\n\n try: # Capture fail if varname is not of the form DIR_{device}_{level}\n idx = np.argmin(abs(np.asarray(possible_levs2) - int(lev)))\n lev2 = str(possible_levs2[idx])\n except ValueError:\n lev2 = lev\n\n infos2['lev'] = lev2\n\n wsp_data, disc, disc = prep_ts_data(obs_data, mod_data, infos, verbose=verbose)\n dir_data, disc, disc = prep_ts_data(obs_data, mod_data, infos2, verbose=verbose)\n\n units = 'm s-1, degree'\n description = 'wind speed and wind direction'\n\n return (wsp_data, dir_data), units, description\n"
] | [
[
"pandas.concat",
"numpy.linspace",
"numpy.asarray",
"pandas.DataFrame",
"numpy.datetime64",
"numpy.arctan2",
"numpy.mod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
gcroci2/pytorch-lightning | [
"eb648855110c604c547d04884f9352e8c4d81785"
] | [
"pytorch_lightning/utilities/apply_func.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport operator\nfrom abc import ABC\nfrom collections.abc import Mapping, Sequence\nfrom copy import copy\nfrom functools import partial\nfrom typing import Any, Callable, Optional, Union\n\nimport numpy as np\nimport torch\n\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.imports import _compare_version, _TORCHTEXT_AVAILABLE\n\nif _TORCHTEXT_AVAILABLE:\n if _compare_version(\"torchtext\", operator.ge, \"0.9.0\"):\n from torchtext.legacy.data import Batch\n else:\n from torchtext.data import Batch\nelse:\n Batch = type(None)\n\n\ndef to_dtype_tensor(value, dtype: torch.dtype = None, device: torch.device = None):\n if device is None:\n raise MisconfigurationException(\"device (torch.device) should be provided.\")\n return torch.tensor(value, dtype=dtype, device=device)\n\n\ndef from_numpy(value, device: torch.device = None):\n if device is None:\n raise MisconfigurationException(\"device (torch.device) should be provided.\")\n return torch.from_numpy(value).to(device)\n\n\nCONVERSION_DTYPES = [\n # bool -> uint8 as bool -> torch.bool triggers RuntimeError: Unsupported data type for NCCL process group\n (bool, partial(to_dtype_tensor, dtype=torch.uint8)),\n (int, partial(to_dtype_tensor, dtype=torch.int)),\n (float, partial(to_dtype_tensor, dtype=torch.float)),\n (np.ndarray, from_numpy),\n]\n\n\ndef apply_to_collection(\n data: Any,\n dtype: Union[type, tuple],\n function: Callable,\n *args,\n wrong_dtype: Optional[Union[type, tuple]] = None,\n **kwargs\n) -> Any:\n \"\"\"\n Recursively applies a function to all elements of a certain dtype.\n\n Args:\n data: the collection to apply the function to\n dtype: the given function will be applied to all elements of this dtype\n function: the function to apply\n *args: positional arguments (will be forwarded to calls of ``function``)\n wrong_dtype: the given function won't be applied if this type is specified and the given collections is of\n the :attr:`wrong_type` even if it is of type :attr`dtype`\n **kwargs: keyword arguments (will be forwarded to calls of ``function``)\n\n Returns:\n the resulting collection\n \"\"\"\n elem_type = type(data)\n\n # Breaking condition\n if isinstance(data, dtype) and (wrong_dtype is None or not isinstance(data, wrong_dtype)):\n return function(data, *args, **kwargs)\n\n # Recursively apply to collection items\n if isinstance(data, Mapping):\n return elem_type({\n k: apply_to_collection(v, dtype, function, *args, wrong_dtype=wrong_dtype, **kwargs)\n for k, v in data.items()\n })\n\n if isinstance(data, tuple) and hasattr(data, '_fields'): # named tuple\n return elem_type(\n *(apply_to_collection(d, dtype, function, *args, wrong_dtype=wrong_dtype, **kwargs) for d in data)\n )\n\n if isinstance(data, Sequence) and not isinstance(data, str):\n return elem_type([\n apply_to_collection(d, dtype, function, *args, wrong_dtype=wrong_dtype, **kwargs) for d in data\n ])\n\n # data is neither of dtype, nor a collection\n return data\n\n\nclass TransferableDataType(ABC):\n \"\"\"\n A custom type for data that can be moved to a torch device via `.to(...)`.\n Example:\n >>> isinstance(dict, TransferableDataType)\n False\n >>> isinstance(torch.rand(2, 3), TransferableDataType)\n True\n >>> class CustomObject:\n ... def __init__(self):\n ... self.x = torch.rand(2, 2)\n ... def to(self, device):\n ... self.x = self.x.to(device)\n ... return self\n >>> isinstance(CustomObject(), TransferableDataType)\n True\n \"\"\"\n\n @classmethod\n def __subclasshook__(cls, subclass):\n if cls is TransferableDataType:\n to = getattr(subclass, \"to\", None)\n return callable(to)\n return NotImplemented\n\n\ndef move_data_to_device(batch: Any, device: torch.device):\n \"\"\"\n Transfers a collection of data to the given device. Any object that defines a method\n ``to(device)`` will be moved and all other objects in the collection will be left untouched.\n\n Args:\n batch: A tensor or collection of tensors or anything that has a method `.to(...)`.\n See :func:`apply_to_collection` for a list of supported collection types.\n device: The device to which the data should be moved\n\n Return:\n the same collection but with all contained tensors residing on the new device.\n\n See Also:\n - :meth:`torch.Tensor.to`\n - :class:`torch.device`\n \"\"\"\n\n def batch_to(data):\n # try to move torchtext data first\n if _TORCHTEXT_AVAILABLE and isinstance(data, Batch):\n\n # Shallow copy because each Batch has a reference to Dataset which contains all examples\n device_data = copy(data)\n for field, field_value in data.dataset.fields.items():\n if field_value is None:\n continue\n device_field = move_data_to_device(getattr(data, field), device)\n setattr(device_data, field, device_field)\n return device_data\n\n kwargs = dict(non_blocking=True) if isinstance(data, torch.Tensor) else {}\n return data.to(device, **kwargs)\n\n dtype = (TransferableDataType, Batch) if _TORCHTEXT_AVAILABLE else TransferableDataType\n return apply_to_collection(batch, dtype=dtype, function=batch_to)\n\n\ndef convert_to_tensors(data, device: torch.device = None):\n if device is None:\n raise MisconfigurationException(\"device (torch.device) should be provided.\")\n\n for src_dtype, conversion_func in CONVERSION_DTYPES:\n data = apply_to_collection(data, src_dtype, partial(conversion_func, device=device))\n\n def _move_to_device_and_make_contiguous(t: torch.Tensor, device: torch.device):\n return t.to(device).contiguous()\n\n data = apply_to_collection(data, torch.Tensor, partial(_move_to_device_and_make_contiguous, device=device))\n return data\n"
] | [
[
"torch.from_numpy",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stephen2run/EcoDataLearn | [
"eff8c86586077a495666bcfddd1adb5abc56eefe"
] | [
"src/example/data_visualization.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n# visulaize the important characteristics of the dataset\nimport matplotlib.pyplot as plt\n\n# step 1: download the data\ndataframe_all = pd.read_csv(\"https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv\")\nnum_rows = dataframe_all.shape[0]\n\n# step 2: remove useless data\n# count the number of missing elements (NaN) in each column\ncounter_nan = dataframe_all.isnull().sum()\ncounter_without_nan = counter_nan[counter_nan==0]\n# remove the columns with missing elements\ndataframe_all = dataframe_all[counter_without_nan.keys()]\n# remove the first 7 columns which contain no discriminative information\ndataframe_all = dataframe_all.ix[:,7:]\n# the list of columns (the last column is the class label)\ncolumns = dataframe_all.columns\nprint(columns)\n\n# step 3: get features (x) and scale the features\n# get x and convert it to numpy array\nx = dataframe_all.ix[:,:-1].values\nstandard_scaler = StandardScaler()\nx_std = standard_scaler.fit_transform(x)\n\n# step 4: get class labels y and then encode it into number\n# get class label data\ny = dataframe_all.ix[:,-1].values\n# encode the class label\nclass_labels = np.unique(y)\nlabel_encoder = LabelEncoder()\ny = label_encoder.fit_transform(y)\n\n# step 5: split the data into training set and test set\ntest_percentage = 0.1\nx_train, x_test, y_train, y_test = train_test_split(x_std, y, test_size = test_percentage, random_state = 0)\n\n# t-distributed Stochastic Neighbor Embedding (t-SNE) visualization\nfrom sklearn.manifold import TSNE\ntsne = TSNE(n_components=2, random_state=0)\nx_test_2d = tsne.fit_transform(x_test)\n\n# scatter plot the sample points among 5 classes\nmarkers=('s', 'd', 'o', '^', 'v')\ncolor_map = {0:'red', 1:'blue', 2:'lightgreen', 3:'purple', 4:'cyan'}\nplt.figure()\nfor idx, cl in enumerate(np.unique(y_test)):\n plt.scatter(x=x_test_2d[y_test==cl,0], y=x_test_2d[y_test==cl,1], c=color_map[idx], marker=markers[idx], label=cl)\nplt.xlabel('X in t-SNE')\nplt.ylabel('Y in t-SNE')\nplt.legend(loc='upper left')\nplt.title('t-SNE visualization of test data')\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"sklearn.cross_validation.train_test_split",
"matplotlib.pyplot.title",
"numpy.unique",
"matplotlib.pyplot.scatter",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler",
"sklearn.preprocessing.LabelEncoder",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gquarles/qhost-bot | [
"402716555b887489382e3ce82af45017c2a548bb"
] | [
"plot.py"
] | [
"import matplotlib.pyplot as plt\nimport discord\nimport os\nimport psutil\nimport json\nimport urllib.request\n\nTOKEN = 'NTUwODU1MTUxODkyNjkyOTky.D1ovfA._13Nmqjkh01I_Q9b8I_wPX9mtBA'\nclient = discord.Client()\ndirectory = \"D:\\servers\\sapphire\\isle\"\nplayersDir = 'D:\\servers\\sapphire\\isle\\TheIsle\\Saved\\Databases\\Survival\\Players\\\\'\ndinosDir = 'D:\\qhost-bot\\dinos\\\\'\n\ndef killServer():\n os.system('taskkill /F /FI \"WindowTitle eq Administrator: qHost Isle Server\" /T')\n\ndef startServer():\n killServer()\n os.system('start cmd /c ' + directory + '\\start.bat')\n\ndef updateServer():\n os.system(directory + '\\\\update.bat')\n\ndef getRam():\n islePID = 0\n memoryUse = 0\n process = filter(lambda p: p.name() == \"TheIsleServer-Win64-Shipping.exe\", psutil.process_iter())\n for i in process:\n islePID = psutil.Process(i.pid)\n if islePID != 0:\n memoryUse = islePID.memory_info()[0]/2.**30\n return memoryUse\n\ndef getCpu():\n islePID = 0\n cpuUse = 0\n process = filter(lambda p: p.name() == \"TheIsleServer-Win64-Shipping.exe\", psutil.process_iter())\n for i in process:\n islePID = psutil.Process(i.pid)\n if islePID != 0:\n cpuUse = islePID.cpu_percent(interval=1) / psutil.cpu_count()\n cpuUse = round(cpuUse)\n return cpuUse\n\ndef getOnline():\n islePID = 0\n process = filter(lambda p: p.name() == \"TheIsleServer-Win64-Shipping.exe\", psutil.process_iter())\n for i in process:\n islePID = psutil.Process(i.pid)\n if islePID == 0:\n return False\n else:\n return True\n\ndef getPlayer(steamID):\n if os.path.isfile(playersDir + steamID + \".json\"):\n with open(playersDir + steamID + \".json\") as f:\n data = json.load(f)\n return(data)\n f.close()\n else:\n return False\n\ndef changePlayer(steamID, key, value):\n player = getPlayer(steamID)\n if os.path.isfile(playersDir + steamID + \".json\"):\n player[key] = value\n with open(playersDir + steamID + \".json\", 'w') as f:\n json.dump(player, f)\n f.close()\n return True\n else:\n return False\n\ndef verifyExists(steamID):\n if not os.path.isfile(playersDir + steamID + \".json\"):\n with open(dinosDir + 'default.json') as f:\n default = json.load(f)\n f.close()\n with open(playersDir + steamID + \".json\", 'w') as f:\n json.dump(default, f)\n f.close()\n\ndef loadDino(dino, steamID):\n verifyExists(steamID)\n if not os.path.isfile(dinosDir + dino + '.json'):\n return False\n else:\n with open(dinosDir + dino + '.json') as f:\n dinoFile = json.load(f)\n f.close()\n with open(playersDir + steamID + \".json\", 'w') as f:\n json.dump(dinoFile, f)\n f.close()\n return True\n\ndef saveDino(dino, steamID):\n verifyExists(steamID)\n with open(playersDir + steamID + \".json\") as f:\n dinoFile = json.load(f)\n f.close()\n with open(dinosDir + dino + '.json', 'w') as f:\n json.dump(dinoFile, f)\n f.close()\n return True\n\n\ndef getPlayers():\n with open('points.json') as json_file:\n players = json.load(json_file)\n return players\n\ndef savePlayers(players):\n with open('points.json', 'w') as json_file: \n json.dump(players, json_file)\n\ndef checkSteamID(steamID, players):\n for ply in players:\n if steamID == ply['steamID']:\n verifyExists(steamID)\n return ply\n return False\n\ndef checkDiscordID(discordID, players):\n for ply in players:\n if discordID == ply['discordID']:\n verifyExists(ply['steamID'])\n return ply\n return False\n\ndef hasPoints(discordID, points):\n players = getPlayers()\n ply = checkDiscordID(discordID, players)\n points = int(points)\n plyPoints = int(ply['points'])\n if plyPoints - points < 1:\n return False\n else:\n return True\n\ndef subtractPoints(discordID, points):\n players = getPlayers()\n ply = checkDiscordID(discordID, players)\n if ply['points'] == 'admin':\n return True\n if hasPoints(discordID, points):\n players = getPlayers()\n ply = checkDiscordID(discordID, players)\n points = int(points)\n plyPoints = int(ply['points'])\n plyPoints = plyPoints - points\n plyPoints = str(plyPoints)\n ply['points'] = plyPoints\n savePlayers(players)\n return True\n else:\n return False\n\ndef checkDinoName(name):\n for dino, dinoName in dinoNameDict.items():\n if name == dino:\n return dinoName\n return False\n\ndef checkDinoPrice(name):\n for dinoName, dinoPrice in dinoPriceDict.items():\n if name == dinoName:\n return dinoPrice\n return False\n\ndef loadWarnings():\n with open('warnings.json') as f:\n return json.load(f)\n\n\ndef saveWarnings(warnings):\n warningsFile = open('warnings.json', 'w')\n warningsJSON = json.dumps(warnings)\n warningsJSON = json.loads(warningsJSON)\n warningsFile.write(json.dumps(warningsJSON, indent=4, sort_keys=True))\n warningsFile.close()\n\ndef getWarnings(discordID):\n warnings = loadWarnings()\n if discordID in warnings:\n return warnings[discordID]\n else:\n return False\n\ndef addWarning(discordID, warning):\n warnings = loadWarnings()\n if getWarnings(discordID):\n warns = warnings[discordID]\n warns.append(warning)\n warnings[discordID] = warns\n saveWarnings(warnings)\n else:\n warns = []\n warns.append(warning)\n warnings[discordID] = warns\n saveWarnings(warnings)\n\ndinoNameDict = {\n 'spino': 'Spino',\n 'rex': 'RexAdultS',\n 'giga': 'GigaAdultS',\n 'acro': 'Acro',\n 'alberto': 'Albert',\n 'allo': 'AlloAdultS',\n 'carno': 'CarnoAdultS',\n 'sucho': 'SuchoAdultS',\n 'bary': 'Bary',\n 'cerato': 'CeratoAdultS',\n 'dilo': 'DiloAdultS',\n 'utah': 'UtahAdultS',\n 'austro': 'Austro',\n 'herrera': 'Herrera',\n 'shant': 'Shant',\n 'cama': 'Camara',\n 'trike': 'TrikeAdultS',\n 'para': 'ParaAdultS',\n 'maia': 'MaiaAdultS',\n 'dryo': 'Dryo',\n 'anky': 'Anky',\n 'pachy': 'Pachy',\n 'stego': 'Stego',\n 'ava': 'Ava',\n 'theri': 'Theri',\n 'pue': 'Puerta',\n 'gali': 'GalliAdultS',\n 'diablo': 'DiabloAdultS'\n}\n\ndinoPriceDict = {\n 'Spino': '250',\n 'RexAdultS': '300',\n 'GigaAdultS': '300',\n 'Acro': '30',\n 'Albert': '24',\n 'AlloAdultS': '18',\n 'CarnoAdultS': '16',\n 'SuchoAdultS': '22',\n 'Bary': '14',\n 'CeratoAdultS': '20',\n 'DiloAdultS': '14',\n 'UtahAdultS': '26',\n 'Austro': '6',\n 'Herrera': '6',\n 'Shant': '60',\n 'Camara': '180',\n 'TrikeAdultS': '36',\n 'ParaAdultS': '22',\n 'MaiaAdultS': '14',\n 'Dryo': '4',\n 'Anky': '22',\n 'Pachy': '10',\n 'Stego': '22',\n 'Ava': '8',\n 'Theri': '26',\n 'Puerta': '250',\n 'GalliAdultS': '12',\n 'DiabloAdultS': '18'\n}\n\nplayers = getPlayers()\n\[email protected]\nasync def on_message(message):\n if message.author == client.user:\n return\n\n if message.content.startswith('!channel'):\n msg = str(message.channel)\n msg = msg + ' {0.author.mention}'.format(message)\n await client.send_message(message.channel, msg)\n\n if str(message.channel) == 'sapphire-isle' or str(message.channel) == 'server-control': #main-server-box\n if message.content.startswith('!help') or message.content.startswith('!commands'):\n msg = 'Please visit the server-box-control-guide text channel'\n msg = msg + ''.format(message)\n await client.send_message(message.channel, msg)\n if message.content.startswith('!start'):\n killServer()\n startServer()\n msg = '{0.author.mention} | Server Started'.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!stop'):\n killServer()\n msg = '{0.author.mention} | Server Stopped'.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!update'):\n killServer()\n msg = '{0.author.mention} | Server Update Started'.format(message)\n await client.send_message(message.channel, msg)\n updateServer()\n msg = '{0.author.mention} | Server Update Finished'.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!restart'):\n killServer()\n startServer()\n msg = '{0.author.mention} | Server Restarted'.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!status'):\n ram = round(getRam())\n cpu = getCpu()\n cpu = str(cpu)\n ram = str(ram)\n online = getOnline()\n if online:\n msg = '{0.author.mention} | RAM Usage: ' + ram + ' GB | CPU Usage: ' + cpu + '% | Server is ONLINE'\n else:\n msg = '{0.author.mention} | RAM Usage: 0 GB | 0% CPU | Server is OFFLINE'\n \n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!pointplot'):\n with open('points.json') as json_file:\n players = json.load(json_file)\n points = []\n\n for player in players:\n if not player['points'] == 'admin':\n point = int(player['points'])\n points.append(point)\n plt.clf()\n plt.plot(points)\n \n plt.savefig('plot1.png')\n await client.send_file(message.channel, 'plot1.png')\n elif message.content.startswith('!pointdist'):\n plt.clf()\n\n with open('points.json') as json_file:\n players = json.load(json_file)\n\n points = []\n\n for player in players:\n if not player['points'] == 'admin':\n point = int(player['points'])\n points.append(point)\n\n points.sort()\n plt.plot(points)\n plt.xticks([])\n plt.ylabel('Points')\n plt.xlabel('Players')\n plt.savefig('plot.png')\n await client.send_file(message.channel, 'plot.png')\n elif message.content.startswith('!pointhist'):\n with open('points.json') as json_file:\n players = json.load(json_file)\n\n points = []\n\n for player in players:\n if not player['points'] == 'admin':\n point = int(player['points'])\n points.append(point)\n plt.clf()\n plt.hist(points)\n plt.ylabel('Points')\n plt.xlabel('Player Accounts')\n plt.savefig('plot2.png')\n await client.send_file(message.channel, 'plot2.png')\n elif message.content.startswith('!stats'):\n pDir = 'D:\\servers\\sapphire\\isle\\TheIsle\\Saved\\Databases\\Survival\\Players'\n onlyfiles = next(os.walk(pDir))[2]\n joinedCount = str(len(onlyfiles))\n players = getPlayers()\n plyCount = 0\n for ply in players:\n plyCount = plyCount + 1\n registerRate = str(round(plyCount / int(joinedCount) * 100))\n msg = 'Registered Discord Users: **' + str(plyCount) + '**\\nUnique Players To Join Server: **' + joinedCount + '**\\n'\n msg = msg + 'That is a register rate of **' + registerRate + '**%'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n \n elif message.content.startswith('!getplayer'):\n command = message.content\n command = command.split()\n player = getPlayer(command[1])\n if player:\n msg = command[1]\n msg = msg + '```'\n for key, value in player.items():\n msg = msg + str(key) + ' : ' + str(value) + '\\n'\n msg = msg + '```'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = 'Player file does not exist at ' + command[1]\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!edit'):\n command = message.content\n command = command.split()\n if os.path.isfile(playersDir + command[1] + \".json\"):\n if changePlayer(command[1], command[2], command[3]):\n steamID = str(command[1])\n msg = '{0.author.mention} | '\n msg = msg + 'Player file **'\n msg = msg + steamID\n msg = msg + '** edited'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = 'Player file does not exist at ' + command[1]\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!grow'):\n command = message.content\n command = command.split()\n if len(command) < 3:\n growth = '1.0'\n else:\n growth = str(command[2])\n\n if os.path.isfile(playersDir + command[1] + \".json\"):\n if changePlayer(command[1], \"Growth\", growth):\n changePlayer(command[1], \"Hunger\", \"99999\")\n changePlayer(command[1], \"Thirst\", \"99999\")\n changePlayer(command[1], \"Stamina\", \"99999\")\n changePlayer(command[1], \"Health\", \"99999\")\n changePlayer(command[1], \"UnlockedCharacters\", \"\")\n\n steamID = str(command[1])\n msg = '{0.author.mention} | '\n msg = msg + 'Player **'\n msg = msg + steamID\n msg = msg + '** growth set to **' + growth + '**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = 'Player file does not exist at ' + command[1]\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!dino'):\n command = message.content\n command = command.split()\n if os.path.isfile(playersDir + command[1] + \".json\"):\n dino = str(command[2])\n if changePlayer(command[1], \"CharacterClass\", dino):\n if changePlayer(command[1], \"UnlockedCharacters\", \"\"):\n steamID = str(command[1])\n msg = '{0.author.mention} | '\n msg = msg + 'Player **'\n msg = msg + steamID\n msg = msg + '** dino set to **' + dino + '**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = 'Player file does not exist at ' + command[1]\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!download'):\n command = message.content\n command = command.split()\n msg = '{0.author.mention} | Starting download of player file ' + str(command[1])\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n urllib.request.urlretrieve('ftp://JuicyJuiceNV:[email protected]:8821/144.48.104.226_14010/TheIsle/Saved/Databases/Survival/Players/' + str(command[1]) + '.json', playersDir + str(command[1]) + '.json')\n player = getPlayer(command[1])\n if player:\n msg = '{0.author.mention} | Download finished. Now displaying '\n msg = msg + str(command[1])\n msg = msg + '```'\n for key, value in player.items():\n msg = msg + str(key) + ' : ' + str(value) + '\\n'\n msg = msg + '```'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!upload'):\n command = message.content\n command = command.split()\n msg = '{0.author.mention} | Starting upload of player file ' + str(command[1])\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n f = open(\"up.ftp\", \"w\")\n f.write('open 144.48.104.226 8821\\n')\n f.write('JuicyJuiceNV\\n')\n f.write('JuIcEJuiCy$4567\\n')\n f.write('cd /144.48.104.226_14010/TheIsle/Saved/Databases/Survival/Players\\n')\n f.write('put D:\\servers\\sapphire\\isle\\TheIsle\\Saved\\Databases\\Survival\\Players\\\\' + str(command[1]) + '.json\\n')\n f.write('disconnect\\n')\n f.write('quit')\n f.close()\n os.system('ftp -i -s:up.ftp')\n msg = '{0.author.mention} | Finished uploading player file ' + str(command[1])\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!wipe'):\n msg = '{0.author.mention} | Initating Wipe...'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n os.system('ftp -i -s:new.ftp')\n msg = '{0.author.mention} | Wiped all player files from Queens Isle '\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n\n\n elif str(message.channel) == 'admin-commands' or str(message.channel) == 'server-administration': #server-administration\n command = message.content\n command = command.split()\n players = getPlayers()\n if message.content.startswith('!points'):\n if len(command) == 1:\n plyID = message.author.id\n elif len(command) == 2:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n found = 0\n players = getPlayers()\n found = checkDiscordID(plyID, players)\n if found:\n msg = '<@' + plyID + '>'\n msg = msg + ', you have **' + found['points'] + '** <:fossil:556472990460805138>'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n found = 1\n if not found:\n msg = '<@' + plyID + '>'\n msg = msg + ', you do not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!register'):\n if not len(command) == 2:\n msg = 'Please use !register **SteamID64**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n plyID = message.author.id\n steamID = command[1]\n found = False\n for ply in players:\n if steamID == ply['steamID']:\n found = True\n msg = 'SteamID already registered to <@' + ply['discordID'] + '>.' \n await client.send_message(message.channel, msg)\n if not found:\n for ply in players:\n if plyID == ply['discordID']:\n found = True\n msg = '<@' + ply['discordID'] + '>, you already have an account with the SteamID **' + ply['steamID'] + '**.' \n await client.send_message(message.channel, msg)\n if not found:\n newPlayer = {\n 'discordID': plyID,\n 'steamID': steamID,\n 'points': '0'\n }\n players.insert(0, newPlayer)\n savePlayers(players)\n msg = '<@' + plyID + '>, You have successfully registered as SteamID 64 **' + steamID + '**.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!gender') or message.content.startswith('!transition'):\n if not len(command) == 3:\n msg = 'Please use !gender **@discordName male/female**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n gender = False\n if command[2].lower() == 'male' or command[2].lower() == 'm':\n gender = 'False'\n elif command[2].lower() == 'female' or command[2].lower() == 'f':\n gender = 'True'\n else:\n msg = 'Please use !gender **@discordName male/female**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n if gender:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n ply = checkDiscordID(plyID, players)\n steamID = ply['steamID']\n if changePlayer(steamID, \"bGender\", gender):\n msg = str(command[1]) + ' Gender set to ' + gender\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n\n\n elif message.content.startswith('!grow'):\n growth = False\n if len(command) == 2:\n growth = '1.0'\n elif len(command) == 3:\n growth = str(command[2])\n else:\n msg = 'Please use !grow **@discordName** **growthLevel**(optional)'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n if growth:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n\n ply = checkDiscordID(plyID, players)\n if not ply:\n msg = str(command[1]) + ' Does not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n steamID = ply['steamID']\n if changePlayer(steamID, \"Growth\", growth):\n changePlayer(steamID, \"Hunger\", \"99999\")\n changePlayer(steamID, \"Thirst\", \"99999\")\n changePlayer(steamID, \"Stamina\", \"99999\")\n changePlayer(steamID, \"Health\", \"99999\")\n changePlayer(steamID, \"UnlockedCharacters\", \"\")\n msg = str(command[1]) + ' Growth set to ' + growth\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!skin'):\n if not len(command) == 9:\n msg = 'Please use !skin **@discordName** **1 2 3 4 5 6 7**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n\n ply = checkDiscordID(plyID, players)\n if not ply:\n msg = str(command[1]) + ' Does not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n steamID = ply['steamID']\n if changePlayer(steamID, \"SkinPaletteSection1\", command[2]):\n if changePlayer(steamID, \"SkinPaletteSection2\", command[3]):\n if changePlayer(steamID, \"SkinPaletteSection3\", command[4]):\n if changePlayer(steamID, \"SkinPaletteSection4\", command[5]):\n if changePlayer(steamID, \"SkinPaletteSection6\", command[7]):\n if changePlayer(steamID, \"SkinPaletteVariation\", command[8]):\n msg = str(command[1]) + ' Skin edited.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!warn'):\n if len(command) < 3:\n msg = 'Please use !warn **@discordName** **warning**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n return\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n\n players = getPlayers()\n\n if not checkDiscordID(plyID, players):\n msg = 'Could not find player ' + command[1]\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n return\n \n warning = ''\n\n for i in range(2, len(command)):\n warning = warning + command[i] + ' '\n \n addWarning(plyID, warning)\n msg = 'Warned ' + command[1] + ' for ' + warning\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n\n elif message.content.startswith('!dino'):\n if not len(command) == 3:\n msg = 'Please use !dino **@discordName** **DinoName**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n\n ply = checkDiscordID(plyID, players)\n if not ply:\n msg = str(command[1]) + ' Does not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n steamID = ply['steamID']\n dino = str(command[2])\n if changePlayer(steamID, \"CharacterClass\", dino):\n if changePlayer(steamID, \"UnlockedCharacters\", \"\"):\n msg = str(command[1]) + ' Dino set to ' + dino\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!lookup') or message.content.startswith('!getplayer'):\n if not len(command) == 2:\n msg = 'Please use !lookup **@discordName** OR !lookup **SteamID**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n if command[1][0] == '<':\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n\n warnings = getWarnings(plyID)\n warnString = 'None'\n if warnings:\n warnString = '```'\n for warning in warnings:\n warnString = warnString + warning + '\\n'\n warnString = warnString + '```'\n\n found = False\n for ply in players:\n if plyID == ply['discordID']:\n verifyExists(ply['steamID'])\n found = True\n player = getPlayer(ply['steamID'])\n msg = plyID = str(command[1]) + ' has the SteamID **' + ply['steamID'] + '** and has **' + ply['points'] + '** <:fossil:556472990460805138>\\n profile: \thttp://steamcommunity.com/profiles/' + ply['steamID'] + '\\n' \n msg = msg + 'Warnings: ' + warnString\n msg = msg + '```'\n for key, value in player.items():\n msg = msg + str(key) + ' : ' + str(value) + '\\n'\n msg = msg + '```'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n if not found:\n msg = str(command[1]) + ' Does not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n steamID = str(command[1])\n found = False\n for ply in players:\n if steamID == ply['steamID']:\n found = True\n player = getPlayer(ply['steamID'])\n\n warnings = getWarnings(ply['discordID'])\n warnString = 'None'\n if warnings:\n warnString = '```'\n for warning in warnings:\n warnString = warnString + warning + '\\n'\n warnString = warnString + '```'\n\n msg = '<@' + ply['discordID'] + '> has the SteamID **' + ply['steamID'] + '** and has **' + ply['points'] + '** <:fossil:556472990460805138>\\n profile: http://steamcommunity.com/profiles/' + ply['steamID'] + '\\n' \n msg = msg + 'Warnings: ' + warnString\n msg = msg + '```'\n for key, value in player.items():\n msg = msg + str(key) + ' : ' + str(value) + '\\n'\n msg = msg + '```'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n if not found:\n msg = str(command[1]) + ' Does not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!bring'):\n if not len(command) == 2:\n msg = 'Please use !bring **@discordName**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n\n ply = checkDiscordID(plyID, players)\n if not ply:\n msg = str(command[1]) + ' Does not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n admin = checkDiscordID(message.author.id, players)\n adminSteamID = admin['steamID']\n verifyExists(adminSteamID)\n adminFile = getPlayer(adminSteamID)\n location = str(adminFile['Location_Isle_V3'])\n\n verifyExists(ply['steamID'])\n plyFile = getPlayer(ply['steamID'])\n changePlayer(ply['steamID'], 'Location_Isle_V3', location)\n\n msg = str(command[1]) + ' Brought to <@' + message.author.id + '>'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n\n\n\n elif message.content.startswith('!save'):\n if not len(command) == 3:\n msg = 'Please use !save **@discordName** **Name_Of_Save**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n\n ply = checkDiscordID(plyID, players)\n if not ply:\n msg = str(command[1]) + ' Does not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n steamID = ply['steamID']\n saveDino(command[2], steamID)\n msg = str(command[1]) + ' Saved to ' + str(command[2])\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!load'):\n if not len(command) == 3:\n msg = 'Please use !load **@discordName** **Name_Of_Save**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n\n ply = checkDiscordID(plyID, players)\n if not ply:\n msg = str(command[1]) + ' Does not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n steamID = ply['steamID']\n if loadDino(command[2], steamID):\n msg = str(command[1]) + ' Loaded dino file ' + str(command[2])\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = str(command[2]) + ' File does not exist.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n \n\n elif message.content.startswith('!edit'):\n if not len(command) == 4:\n msg = 'Please use !edit **@discordName** **Trait** **NewValue**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n\n ply = checkDiscordID(plyID, players)\n if not ply:\n msg = str(command[1]) + ' Does not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n steamID = ply['steamID']\n if changePlayer(steamID, command[2], command[3]):\n msg = 'Player file <@' + ply['discordID'] + '> ' + command[2] + ' is now ' + command[3] + '.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!assign'):\n if not len(command) == 3:\n msg = 'Please use !assign **@discordName SteamID64**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n steamID = str(command[2])\n\n found = checkSteamID(steamID, players)\n if found:\n msg = 'SteamID already assigned to <@' + found['discordID'] + '>.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n found = False\n for ply in players:\n if plyID == ply['discordID']:\n found = True\n ply['steamID'] = steamID\n savePlayers(players)\n msg = plyID = str(command[1]) + ' now has the SteamID **' + ply['steamID'] + '**.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n if not found:\n msg = plyID = str(command[1]) + ' does not have a pointshop account'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!setpoints'):\n if len(command) == 3:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n found = checkDiscordID(plyID, players)\n if found:\n found['points'] = command[2]\n savePlayers(players)\n msg = plyID = str(command[1]) + ' now has **' + found['points'] + '** <:fossil:556472990460805138>'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = plyID = str(command[1]) + ' does not have a pointshop account'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = 'Please use !setpoints **@discordName amount**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!addpoints'):\n if len(command) == 3:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n found = checkDiscordID(plyID, players)\n if found:\n found['points'] = str(int(found['points']) + int(command[2]))\n savePlayers(players)\n msg = plyID = str(command[1]) + ' now has **' + found['points'] + '** <:fossil:556472990460805138>'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = plyID = str(command[1]) + ' does not have a pointshop account'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = 'Please use !addpoints **@discordName amount**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!removepoints') or message.content.startswith('!yoinkpoints'):\n if len(command) == 3:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n found = checkDiscordID(plyID, players)\n if found:\n if int(found['points']) - int(command[2]) > 0:\n found['points'] = str(int(found['points']) - int(command[2]))\n savePlayers(players)\n msg = plyID = str(command[1]) + ' now has **' + found['points'] + '** <:fossil:556472990460805138>'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = str(command[1]) + ' does not have enough <:fossil:556472990460805138> for that transaction.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = plyID = str(command[1]) + ' does not have a pointshop account'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = 'Please use !removepoints **@discordName amount**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif str(message.channel) == 'sapphire-isle-pointshop-user' or str(message.channel) == 'shop-points': #shop-points\n command = message.content\n command = command.split()\n players = getPlayers()\n if message.content.startswith('!points'):\n if len(command) == 1:\n plyID = message.author.id\n elif len(command) == 2:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n found = 0\n for ply in players:\n if ply['discordID'] == plyID:\n msg = '<@' + plyID + '>'\n msg = msg + ', you have **' + ply['points'] + '** <:fossil:556472990460805138>'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n found = 1\n if not found:\n msg = '<@' + plyID + '>'\n msg = msg + ', you do not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!register'):\n if not len(command) == 2:\n msg = 'Please use !register **SteamID64**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n plyID = message.author.id\n steamID = command[1]\n found = False\n for ply in players:\n if steamID == ply['steamID']:\n found = True\n msg = 'SteamID already registered to <@' + ply['discordID'] + '>.' \n await client.send_message(message.channel, msg)\n if not found:\n for ply in players:\n if plyID == ply['discordID']:\n found = True\n msg = '<@' + ply['discordID'] + '>, you already have an account with the SteamID **' + ply['steamID'] + '**.' \n await client.send_message(message.channel, msg)\n if not found:\n newPlayer = {\n 'discordID': plyID,\n 'steamID': steamID,\n 'points': '250'\n }\n players.insert(0, newPlayer)\n savePlayers(players)\n msg = '<@' + plyID + '>, You have successfully registered as SteamID 64 **' + steamID + '**.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!transfer'):\n if not len(command) == 3:\n msg = 'Please use !transfer **@username amount**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n if not checkDiscordID(message.author.id, players):\n msg = '<@' + plyID + '>'\n msg = msg + ', you do not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n ply = checkDiscordID(plyID, players)\n if not ply:\n msg = plyID = str(command[1]) + ' does not have a pointshop account'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n initPly = checkDiscordID(message.author.id, players)\n if int(initPly['points']) - int(command[2]) < 0:\n msg = '<@' + message.author.id + '>' + ', you do not have enough <:fossil:556472990460805138> for this transaction.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n initPly['points'] = str(int(initPly['points']) - int(command[2]))\n ply['points'] = str(int(ply['points']) + int(command[2]))\n savePlayers(players)\n msg = '<@' + message.author.id + '>' + ', you have transfered **' + command[2] + '** <:fossil:556472990460805138> to <@' + ply['discordID'] + '>'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif str(message.channel) == 'test-qbot' or str(message.channel) == 'member-registration': #member-registration\n command = message.content\n command = command.split()\n players = getPlayers()\n if message.content.startswith('!register'):\n if not len(command) == 2:\n msg = 'Please use !register **SteamID64**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n plyID = message.author.id\n steamID = command[1]\n found = False\n for ply in players:\n if steamID == ply['steamID']:\n found = True\n msg = 'SteamID already registered to <@' + ply['discordID'] + '>.' \n await client.send_message(message.channel, msg)\n if not found:\n for ply in players:\n if plyID == ply['discordID']:\n found = True\n msg = '<@' + ply['discordID'] + '>, you already have an account with the SteamID **' + ply['steamID'] + '**.' \n await client.send_message(message.channel, msg)\n role = discord.utils.get(message.server.roles, name='Member')\n await client.add_roles(message.author, role)\n if not found:\n newPlayer = {\n 'discordID': plyID,\n 'steamID': steamID,\n 'points': '250'\n }\n players.insert(0, newPlayer)\n savePlayers(players)\n role = discord.utils.get(message.server.roles, name='Member')\n await client.add_roles(message.author, role)\n msg = '<@' + plyID + '>, You have successfully registered as SteamID 64 **' + steamID + '**.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif str(message.channel) == 'purchasing-chat': #purchasing chat\n command = message.content\n command = command.split()\n players = getPlayers()\n if not checkDiscordID(message.author.id, players):\n msg = '<@' + message.author.id + '>'\n msg = msg + ', you do not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n return\n if message.content.startswith('!points'):\n if len(command) == 1:\n plyID = message.author.id\n elif len(command) == 2:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n found = 0\n for ply in players:\n if ply['discordID'] == plyID:\n msg = '<@' + plyID + '>'\n msg = msg + ', you have **' + ply['points'] + '** <:fossil:556472990460805138>'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n found = 1\n if not found:\n msg = '<@' + plyID + '>'\n msg = msg + ', you do not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!purchase'):\n if len(command) == 1:\n command.append('')\n if command[1] == 'gender' or command[1] == 'genderswap':\n plyID = message.author.id\n ply = checkDiscordID(plyID, players)\n if not len(command) == 2:\n msg = 'Please use !purchase gender'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n return\n if subtractPoints(plyID, 10):\n verifyExists(ply['steamID'])\n plyFile = getPlayer(ply['steamID'])\n gender = str(plyFile['bGender'])\n if gender == 'False':\n gender = 'True'\n pGender = 'Female'\n elif gender == 'True':\n gender = 'False'\n pGender = 'Male'\n else:\n msg = '<@' + message.author.id + '>' + ', ERROR READING PLAYER FILE @GRIFF#6889'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n if changePlayer(ply['steamID'], 'bGender', gender):\n players = getPlayers()\n ply = checkDiscordID(plyID, players)\n msg = '<@' + message.author.id + '>' + ', You have purchased a gender swap to **' + pGender + '**. You now have **' + ply['points'] + '** <:fossil:556472990460805138>'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = '<@' + message.author.id + '>' + ', you do not have enough <:fossil:556472990460805138> for this transaction'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n plyID = message.author.id\n ply = checkDiscordID(plyID, players)\n verifyExists(ply['steamID'])\n plyFile = getPlayer(ply['steamID'])\n if not len(command) == 2:\n msg = 'Please use !purchase dinoname'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n return\n dino = checkDinoName(command[1].lower())\n if dino:\n if subtractPoints(plyID, checkDinoPrice(dino)):\n plyFile = getPlayer(ply['steamID'])\n if changePlayer(ply['steamID'], 'CharacterClass', dino):\n changePlayer(ply['steamID'], 'UnlockedCharacters', '')\n changePlayer(ply['steamID'], 'Growth', '1.0')\n changePlayer(ply['steamID'], \"Hunger\", \"99999\")\n changePlayer(ply['steamID'], \"Thirst\", \"99999\")\n changePlayer(ply['steamID'], \"Stamina\", \"99999\")\n changePlayer(ply['steamID'], \"Health\", \"99999\")\n players = getPlayers()\n ply = checkDiscordID(plyID, players)\n msg = '<@' + message.author.id + '>' + ', you have purchased **' + dino + '** You now have **' + ply['points'] + '** <:fossil:556472990460805138>'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n return\n else:\n msg = '<@' + message.author.id + '>' + ', you do not have enough <:fossil:556472990460805138> for this transaction'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n return\n else:\n msg = 'Invalid dino: **' + command[1] + '** please do !prices for a list of things you can purchase.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n return\n elif message.content.startswith('!list') or message.content.startswith('!prices'):\n msg = 'Currently available purchases:\\n'\n dinos = []\n prices = []\n for key, value in dinoNameDict.items():\n dinos.append(key)\n for key, value in dinoPriceDict.items():\n prices.append(value)\n msg = msg + '\\n 10 <:fossil:556472990460805138> | !purchase gender'\n for i in range(0, len(dinos)):\n extra = ''\n if int(prices[i]) < 100:\n extra = ' '\n if int(prices[i]) < 10:\n extra = ' '\n msg = msg + '\\n ' + prices[i] + extra + ' <:fossil:556472990460805138> | !purchase ' + dinos[i]\n msg = msg.format(message)\n await client.send_message(message.channel, msg) \n elif str(message.channel) == 'event-points':\n command = message.content\n command = command.split()\n players = getPlayers()\n if message.content.startswith('!points'):\n if len(command) == 1:\n plyID = message.author.id\n elif len(command) == 2:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n found = 0\n for ply in players:\n if ply['discordID'] == plyID:\n msg = '<@' + plyID + '>'\n msg = msg + ', you have **' + ply['points'] + '** <:fossil:556472990460805138>'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n found = 1\n if not found:\n msg = '<@' + plyID + '>'\n msg = msg + ', you do not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!bring'):\n if not len(command) == 2:\n msg = 'Please use !bring **@discordName**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n\n ply = checkDiscordID(plyID, players)\n if not ply:\n msg = str(command[1]) + ' Does not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n admin = checkDiscordID(message.author.id, players)\n adminSteamID = admin['steamID']\n verifyExists(adminSteamID)\n adminFile = getPlayer(adminSteamID)\n location = str(adminFile['Location_Isle_V3'])\n\n verifyExists(ply['steamID'])\n plyFile = getPlayer(ply['steamID'])\n changePlayer(ply['steamID'], 'Location_Isle_V3', location)\n\n msg = str(command[1]) + ' Brought to <@' + message.author.id + '>'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!removepoints') or message.content.startswith('!yoinkpoints'):\n if len(command) == 3:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n found = checkDiscordID(plyID, players)\n if found:\n if int(found['points']) - int(command[2]) > 0:\n found['points'] = str(int(found['points']) - int(command[2]))\n savePlayers(players)\n msg = plyID = str(command[1]) + ' now has **' + found['points'] + '** <:fossil:556472990460805138>'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = str(command[1]) + ' does not have enough <:fossil:556472990460805138> for that transaction.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = plyID = str(command[1]) + ' does not have a pointshop account'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = 'Please use !removepoints **@discordName amount**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!addpoints'):\n if len(command) == 3:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n found = checkDiscordID(plyID, players)\n if found:\n found['points'] = str(int(found['points']) + int(command[2]))\n savePlayers(players)\n msg = plyID = str(command[1]) + ' now has **' + found['points'] + '** <:fossil:556472990460805138>'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = plyID = str(command[1]) + ' does not have a pointshop account'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = 'Please use !addpoints **@discordName amount**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!setpoints'):\n if len(command) == 3:\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n found = checkDiscordID(plyID, players)\n if found:\n found['points'] = command[2]\n savePlayers(players)\n msg = plyID = str(command[1]) + ' now has **' + found['points'] + '** <:fossil:556472990460805138>'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = plyID = str(command[1]) + ' does not have a pointshop account'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n msg = 'Please use !setpoints **@discordName amount**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n elif message.content.startswith('!lookup') or message.content.startswith('!getplayer'):\n if not len(command) == 2:\n msg = 'Please use !lookup **@discordName** OR !lookup **SteamID**'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n else:\n if command[1][0] == '<':\n plyID = str(command[1])\n plyID = plyID[1:]\n plyID = plyID[1:]\n plyID = plyID[:-1]\n if plyID[0] == '!':\n plyID = plyID[1:]\n\n found = False\n for ply in players:\n if plyID == ply['discordID']:\n verifyExists(ply['steamID'])\n found = True\n player = getPlayer(ply['steamID'])\n msg = plyID = str(command[1]) + ' has the SteamID **' + ply['steamID'] + '** and has **' + ply['points'] + '** <:fossil:556472990460805138>\\n profile: \thttp://steamcommunity.com/profiles/' + ply['steamID'] + '\\n' \n msg = msg + '```'\n for key, value in player.items():\n msg = msg + str(key) + ' : ' + str(value) + '\\n'\n msg = msg + '```'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n if not found:\n msg = str(command[1]) + ' Does not have a pointshop account.'\n msg = msg.format(message)\n await client.send_message(message.channel, msg)\n\[email protected]\nasync def on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n\nclient.run(TOKEN)"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
StannisZhou/mixed_hmc | [
"7925b3290c9ba692a2afbe06e102ea88a7cea511"
] | [
"scripts/correlated_topic_models/generate_samples_ap_data.py"
] | [
"import os\nimport tempfile\n\nimport numpy as np\n\nimport joblib\nimport sacred\nfrom momentum.correlated_topic_models.dhmc import draw_samples_dhmc\nfrom momentum.correlated_topic_models.gibbs import draw_samples_gibbs\nfrom momentum.correlated_topic_models.hmc_within_gibbs import \\\n draw_samples_hmc_within_gibbs\nfrom momentum.correlated_topic_models.mixed_hmc import draw_samples_mixed_hmc\nfrom momentum.correlated_topic_models.pymc3 import draw_samples_pymc3\nfrom momentum.diagnostics.ess import get_min_ess\nfrom sacred.observers import FileStorageObserver\n\nlog_folder = os.path.expanduser('~/logs/correlated_topic_models_results')\nex = sacred.Experiment('correlated_topic_models_generate_samples')\nex.observers.append(FileStorageObserver.create(log_folder))\n\n\[email protected]\ndef config():\n method = 'pymc3'\n n_warm_up_samples = int(1e3)\n n_samples = int(4e3)\n epsilon = 4\n # epsilon = [3, 4]\n L_multiplier = 80\n L = [40, 50]\n total_travel_time = 600\n n_discrete_to_update = 1\n n_chains = 96\n n_short_to_exclude = 20\n n_long_to_exclude = 400\n n_documents = 20\n use_efficient_proposal = True\n mode = 'RW'\n\n\[email protected]\ndef run(\n n_short_to_exclude,\n n_long_to_exclude,\n n_documents,\n method,\n n_warm_up_samples,\n n_samples,\n n_chains,\n epsilon,\n L_multiplier,\n L,\n total_travel_time,\n n_discrete_to_update,\n use_efficient_proposal,\n mode,\n):\n # Generate temp folder\n temp_folder = tempfile.TemporaryDirectory()\n temp_folder_name = temp_folder.name\n # Load data\n data_fname = 'ap_data.joblib'\n ap_data = joblib.load(data_fname)\n documents, K, mu, Sigma, beta = (\n ap_data['documents'],\n ap_data['K'],\n ap_data['mu'],\n ap_data['Sigma'],\n ap_data['beta'],\n )\n document_lengths = np.array([len(w) for w in documents])\n shortest_indices = np.argsort(document_lengths)\n indices = shortest_indices[\n np.floor(\n np.linspace(\n n_short_to_exclude, len(documents) - n_long_to_exclude, n_documents\n )\n ).astype(np.int32)\n ]\n ex.add_artifact(data_fname)\n for ind in indices:\n print(\n 'Working on document {} of length {}, method {}'.format(\n ind, len(documents[ind]), method\n )\n )\n if method == 'gibbs':\n output = joblib.Parallel(n_jobs=joblib.cpu_count())(\n joblib.delayed(draw_samples_gibbs)(\n n_warm_up_samples + n_samples,\n documents[ind],\n mu,\n Sigma,\n beta,\n use_efficient_proposal=use_efficient_proposal,\n )\n for _ in range(n_chains)\n )\n z_samples, eta_samples = list(zip(*output))\n z_samples, eta_samples = (np.stack(z_samples), np.stack(eta_samples))\n results = {\n 'eta': eta_samples,\n 'z': z_samples,\n 'n_warm_up_samples': n_warm_up_samples,\n }\n print(\n 'Mean: {}'.format(\n np.mean(\n eta_samples[:, n_warm_up_samples:].reshape((-1, K - 1)), axis=0\n )\n )\n )\n for ess_method in ['mean', 'bulk', 'tail']:\n print(\n 'Min ess {}: '.format(ess_method),\n get_min_ess(eta_samples[:, n_warm_up_samples:], method=ess_method),\n )\n elif method == 'pymc3':\n z_samples, eta_samples, accept_array = draw_samples_pymc3(\n n_warm_up_samples,\n n_samples,\n documents[ind],\n mu,\n Sigma,\n beta,\n n_chains=n_chains,\n )\n z_samples = z_samples.reshape((n_chains, -1, len(documents[ind])))\n eta_samples = eta_samples.reshape((n_chains, -1, K - 1))\n accept_array = accept_array.reshape((n_chains, -1))\n acceptance_rate = np.mean(accept_array, axis=1)\n print(\n 'Filtering out {} pathological chains out of {}'.format(\n np.sum(acceptance_rate <= 0.4), len(acceptance_rate)\n )\n )\n z_samples, eta_samples, accept_array = (\n z_samples[acceptance_rate > 0.4],\n eta_samples[acceptance_rate > 0.4],\n accept_array[acceptance_rate > 0.4],\n )\n print(np.mean(accept_array))\n results = {\n 'eta': eta_samples,\n 'z': z_samples,\n 'accept_array': accept_array,\n 'n_warm_up_samples': 0,\n }\n print('Mean: {}'.format(np.mean(eta_samples.reshape((-1, K - 1)), axis=0)))\n for ess_method in ['mean', 'bulk', 'tail']:\n print(\n 'Min ess {}: '.format(ess_method),\n get_min_ess(eta_samples, method=ess_method),\n )\n elif method == 'mixed_hmc':\n L = len(documents[ind]) * L_multiplier\n adaptive_step_size = np.array(np.diag(Sigma))\n adaptive_step_size /= np.sum(adaptive_step_size)\n output = joblib.Parallel(n_jobs=joblib.cpu_count())(\n joblib.delayed(draw_samples_mixed_hmc)(\n n_samples=n_warm_up_samples + n_samples,\n w=documents[ind],\n mu=mu,\n Sigma=Sigma,\n beta=beta,\n epsilon=epsilon,\n total_travel_time=total_travel_time,\n L=L,\n n_discrete_to_update=n_discrete_to_update,\n progbar=False,\n adaptive_step_size=adaptive_step_size,\n mode=mode,\n )\n for _ in range(n_chains)\n )\n z_samples, eta_samples, accept_array = list(zip(*output))\n z_samples, eta_samples, accept_array = (\n np.stack(z_samples),\n np.stack(eta_samples),\n np.stack(accept_array),\n )\n acceptance_rate = np.mean(accept_array, axis=1)\n print(\n 'Filtering out {} pathological chains'.format(\n np.sum(acceptance_rate <= 0.4)\n )\n )\n z_samples, eta_samples, accept_array = (\n z_samples[acceptance_rate > 0.4],\n eta_samples[acceptance_rate > 0.4],\n accept_array[acceptance_rate > 0.4],\n )\n print(np.mean(accept_array))\n results = {\n 'eta': eta_samples,\n 'z': z_samples,\n 'accept_array': accept_array,\n 'n_warm_up_samples': n_warm_up_samples,\n }\n print(\n 'Mean: {}'.format(\n np.mean(\n eta_samples[:, n_warm_up_samples:].reshape((-1, K - 1)), axis=0\n )\n )\n )\n for ess_method in ['mean', 'bulk', 'tail']:\n print(\n 'Min ess {}: '.format(ess_method),\n get_min_ess(eta_samples[:, n_warm_up_samples:], method=ess_method),\n )\n elif method == 'hmc_within_gibbs':\n output = joblib.Parallel(n_jobs=joblib.cpu_count())(\n joblib.delayed(draw_samples_hmc_within_gibbs)(\n n_samples=n_warm_up_samples + n_samples,\n w=documents[ind],\n mu=mu,\n Sigma=Sigma,\n beta=beta,\n epsilon=epsilon,\n L=L,\n )\n for _ in range(n_chains)\n )\n z_samples, eta_samples, accept_array = list(zip(*output))\n z_samples, eta_samples, accept_array = (\n np.stack(z_samples),\n np.stack(eta_samples),\n np.stack(accept_array),\n )\n acceptance_rate = np.mean(accept_array, axis=1)\n print(\n 'Filtering out {} pathological chains'.format(\n np.sum(acceptance_rate <= 0.4)\n )\n )\n z_samples, eta_samples, accept_array = (\n z_samples[acceptance_rate > 0.4],\n eta_samples[acceptance_rate > 0.4],\n accept_array[acceptance_rate > 0.4],\n )\n print(np.mean(accept_array))\n results = {\n 'eta': eta_samples,\n 'z': z_samples,\n 'accept_array': accept_array,\n 'n_warm_up_samples': n_warm_up_samples,\n }\n print(\n 'Mean: {}'.format(\n np.mean(\n eta_samples[:, n_warm_up_samples:].reshape((-1, K - 1)), axis=0\n )\n )\n )\n for ess_method in ['mean', 'bulk', 'tail']:\n print(\n 'Min ess {}: '.format(ess_method),\n get_min_ess(eta_samples[:, n_warm_up_samples:], method=ess_method),\n )\n elif method == 'dhmc':\n adaptive_step_size = np.array(np.diag(Sigma))\n adaptive_step_size /= np.sum(adaptive_step_size)\n output = joblib.Parallel(n_jobs=joblib.cpu_count())(\n joblib.delayed(draw_samples_dhmc)(\n n_samples=n_warm_up_samples + n_samples,\n w=documents[ind],\n mu=mu,\n Sigma=Sigma,\n beta=beta,\n epsilon=epsilon,\n L=L,\n progbar=False,\n adaptive_step_size=adaptive_step_size,\n )\n for _ in range(n_chains)\n )\n z_samples, eta_samples, accept_array = list(zip(*output))\n z_samples, eta_samples, accept_array = (\n np.stack(z_samples),\n np.stack(eta_samples),\n np.stack(accept_array),\n )\n acceptance_rate = np.mean(accept_array, axis=1)\n print(\n 'Filtering out {} pathological chains'.format(\n np.sum(acceptance_rate <= 0.4)\n )\n )\n z_samples, eta_samples, accept_array = (\n z_samples[acceptance_rate > 0.4],\n eta_samples[acceptance_rate > 0.4],\n accept_array[acceptance_rate > 0.4],\n )\n print(np.mean(accept_array))\n results = {\n 'eta': eta_samples,\n 'z': z_samples,\n 'accept_array': accept_array,\n 'n_warm_up_samples': n_warm_up_samples,\n }\n print(\n 'Mean: {}'.format(\n np.mean(\n eta_samples[:, n_warm_up_samples:].reshape((-1, K - 1)), axis=0\n )\n )\n )\n for ess_method in ['mean', 'bulk', 'tail']:\n print(\n 'Min ess {}: '.format(ess_method),\n get_min_ess(eta_samples[:, n_warm_up_samples:], method=ess_method),\n )\n else:\n raise ValueError('Unsupported method {}'.format(method))\n\n results_fname = '{}/results_document_{}.joblib'.format(temp_folder_name, ind)\n joblib.dump(results, results_fname)\n ex.add_artifact(results_fname)\n\n temp_folder.cleanup()\n\n\n# Gibbs experiments\nex.run(config_updates={'method': 'gibbs'})\n# PyMC3 experiments\nex.run(config_updates={'method': 'pymc3'})\n# mixed HMC experiments\nex.run(config_updates={'method': 'mixed_hmc', 'mode': 'gibbs'})\n# HMC-within-Gibbs experiments\nfor epsilon in [0.1, 0.2, 0.3]:\n for L in [400, 500, 600, 700, 800]:\n print('Working on hmc_within_gibbs, epsilon: {}, L: {}'.format(epsilon, L))\n ex.run(\n config_updates={'method': 'hmc_within_gibbs', 'epsilon': epsilon, 'L': L}\n )\n"
] | [
[
"numpy.diag",
"numpy.stack",
"numpy.mean",
"numpy.argsort",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zer0sh0t/wall_e | [
"be98c1517dcd481d177ab1bbe71312003fddbce0"
] | [
"wall_e/core.py"
] | [
"import math\nimport copy\nimport sympy as sp\nfrom functools import partial\nfrom scipy.optimize import minimize\n\n# import matplotlib.pyplot as plt\n# from matplotlib.animation import FuncAnimation\n\npprint = lambda inp: sp.pprint(inp)\n\nclass Robot():\n def __init__(self, name, type_, dh_params, masses, lengths, dimensions):\n '''\n format of inputs:\n type_(str): l - linear, r - rotary\n dh_params(list[list]): [[theta_0, d_0, alpha_0, a_0], [theta_1, d_1, alpha_1, a_1], ...]\n masses(list): [mass_0, mass_1, ..., mass_n]\n lengths(list): [length_0, length_1, ..., length_n]\n dimensions(list[list]): [[radius_0], [radius_1], ..., [radius_n]] for joints of circular cross-sections\n : [[a_0, b_0], [a_1, b_1], ..., [a_n, b_n]] for joints of rectangular cross-sections\n where a is the length and b is the breadth of the rectangular cross-section\n : [[radius_0], [a_1, b_1], [radius_2], ...] of course, it can be a combo of both\n '''\n self.name = name\n self.type = type_\n self.dh_params = dh_params\n self.masses = masses\n self.lengths = lengths\n self.dimensions = dimensions\n self.t = sp.symbols('t', real=True) # time\n self.clear_cache()\n\n # clear the cache if you want to call solve_fk() for the second with different set of dh_params\n def clear_cache(self):\n self.forward_params, self.forward_mats = [], []\n self.vel_params, self.acc_params = [], []\n self.tau_exprs = []\n\n def _screw_zx(self, i):\n if self.type[i] == 'r':\n theta = sp.Function(f'theta{i}')(self.t)\n vel = theta.diff(self.t)\n acc = vel.diff(self.t)\n d, alpha, a = sp.symbols(f'd{i} alpha{i} a{i}', real=True)\n elif self.type[i] == 'l':\n d = sp.Function(f'd{i}')(self.t)\n vel = d.diff(self.t)\n acc = vel.diff(self.t)\n theta, alpha, a = sp.symbols(f'theta{i} alpha{i} a{i}', real=True)\n\n ct, st, ca, sa = sp.cos(theta), sp.sin(theta), sp.cos(alpha), sp.sin(alpha)\n screw_mat = sp.Matrix([\n [ct, -st*ca, sa*st, a*ct],\n [st, ca*ct, -sa*ct, a*st],\n [0, sa, ca, d],\n [0, 0, 0, 1]\n ])\n return screw_mat, theta, d, alpha, a, vel, acc\n\n # forward kinematics\n def solve_fk(self, dh_params=None):\n if dh_params == None:\n dh_params = copy.deepcopy(self.dh_params)\n\n if len(self.forward_mats) == 0:\n mat_expr = sp.eye(4)\n for i in range(len(dh_params)):\n screw_mat_expr, theta, d, alpha, a, vel, acc = self._screw_zx(i)\n mat_expr = mat_expr * screw_mat_expr\n self.forward_params.append([theta, d, alpha, a])\n self.vel_params.append(vel)\n self.acc_params.append(acc)\n self.forward_mats.append(mat_expr)\n\n subs_dict = {}\n for p, dhp in zip(self.forward_params, dh_params):\n for p_, dhp_ in zip(p, dhp):\n subs_dict[p_] = dhp_\n \n final_mat_expr = self.forward_mats[-1]\n final_mat = final_mat_expr.subs(subs_dict)\n return final_mat_expr, final_mat\n \n def _cost_fn_ik(self, initial_guess, end_pos, dh_params):\n j = 0\n for i in range(len(dh_params)):\n if self.type[i] == 'r':\n dh_params[i][0] = initial_guess[j]\n j += 1 \n \n fk_mat = self.solve_fk(dh_params)[1]\n qx, qy, qz = float(fk_mat[0, 3]), float(fk_mat[1, 3]), float(fk_mat[2, 3])\n qx_req, qy_req, qz_req = end_pos\n cost = ((qx_req - qx) ** 2 + (qy_req - qy) ** 2 + (qz_req - qz) ** 2) ** 0.5\n return cost\n\n # inverse kinematics\n def solve_ik(self, end_pos, tol=None):\n if self.type.count('l') == len(self.dh_params):\n raise ValueError('can\\'t optimize joint angles of linear joints')\n else:\n dh_params = copy.deepcopy(self.dh_params)\n initial_guess = []\n for i in range(len(dh_params)):\n if self.type[i] == 'r':\n initial_guess.append(dh_params[i][0])\n cost_fn = partial(self._cost_fn_ik, end_pos=end_pos, dh_params=dh_params)\n\n if tol == None:\n tol = 3e-4\n result = minimize(cost_fn, initial_guess, tol=tol)\n optim_thetas = result.x\n\n j = 0\n final_theta_vals = []\n for i in range(len(dh_params)):\n if self.type[i] == 'r':\n theta = optim_thetas[j] \n dh_params[i][0] = theta\n final_theta_vals.append(theta)\n j += 1\n else:\n final_theta_vals.append(dh_params[i][0])\n \n qz_req = end_pos[2]\n fk_mat = self.solve_fk(dh_params)[1]\n return final_theta_vals, fk_mat\n\n def _get_q_idx(self, i):\n if self.type[i] == 'r':\n q_idx = 0\n elif self.type[i] == 'l':\n q_idx = 1\n return q_idx\n\n def _U_ij(self, i, j):\n q_idx = self._get_q_idx(j)\n U_ij = self.forward_mats[i].diff(self.forward_params[j][q_idx])\n return U_ij\n\n def _U_ijk(self, i, j, k):\n U_ij = self._U_ij(i, j)\n q_idx = self._get_q_idx(k)\n U_ijk = U_ij.diff(self.forward_params[k][q_idx])\n return U_ijk\n\n def _D_ic(self, i, c, n, Js):\n D_ic = 0\n lower_bound = max(i, c)\n for j in range(lower_bound, n):\n U_jc = self._U_ij(j, c)\n J_j = Js[j]\n U_ji_T = self._U_ij(j, i).T\n D_ic += sp.Trace(U_jc * J_j * U_ji_T).rewrite(sp.Sum)\n return D_ic\n\n def _inertial_force(self, i, c, n, Js):\n D_ic = self._D_ic(i, c, n, Js)\n q_c_ddot = self.acc_params[c]\n inertial_force = D_ic * q_c_ddot\n return inertial_force\n\n def _h_icd(self, i, c, d, n, Js):\n h_icd = 0\n lower_bound = max(i, c, d)\n for j in range(lower_bound, n):\n U_jcd = self._U_ijk(j, c, d)\n J_j = Js[j]\n U_ji_T = self._U_ij(j, i).T\n h_icd += sp.Trace(U_jcd * J_j * U_ji_T).rewrite(sp.Sum)\n return h_icd\n\n def _coriolis_force(self, i, c, d, n, Js):\n h_icd = self._h_icd(i, c, d, n, Js)\n q_c_dot, q_d_dot = self.vel_params[c], self.vel_params[d]\n coriolis_force = h_icd * q_c_dot * q_d_dot\n return coriolis_force\n\n def _gravitational_force(self, i, n, masses, g, rs):\n gravitational_force = 0\n for j in range(i, n):\n m_j = masses[j]\n U_ji = self._U_ij(j, i)\n jj_r = rs[j]\n val = - m_j * g * U_ji * jj_r\n gravitational_force += val[0, 0]\n return gravitational_force\n \n # inverse dynamics\n def solve_id(self, thetas, vels, accs, ds=None):\n '''\n source: https://www.youtube.com/playlist?list=PLbRMhDVUMngcdUbBySzyzcPiFTYWr4rV_ lecture 24 to 29\n\n format of inputs:\n thetas(list): [theta_0, theta_1, ..., theta_n]\n vels(list): [vel_0, vel_1, ..., vel_n]\n accs(list): [acc_0, acc_1, ..., acc_n]\n lengths(list): [length_0, length_1, ..., length_n]\n\n formulation of inverse dynamics:\n n: number of joints\n J: interia tensor\n if rectangular_joint: m: mass of the joint, l: length of the joint, b: breadth of the joint\n if cicular_joint: m: mass of the joint, r: radius of the joint\n T: transformation matrix\n q: angle(if rotary) or offset(if linear)\n ii_r: position vector of ith frame wrt ith frame\n ii_v: velocity vector of ith frame wrt ith frame\n tau: torque(if rotary) or force(if linear)\n\n if rectangular_joint:\n J = [\n [m*(a**2)/12, 0, 0, 0],\n [0, m*(l**2)/3, 0, -m*l/2],\n [0, 0, m*(b**2)/12, 0],\n [0, -m*(l**2)/2, 0, m]\n ] \n elif circular_joint:\n J = [\n [m*(l**2)/3, 0, 0, -m*l/2],\n [0, m*(r**2)/4, 0, 0],\n [0, 0, m*(r**2)/4, 0],\n [-m*l/2, 0, 0, m]\n ]\n\n g = [0, -9.81, 0, 0].T\n ii_r = [-L_i/2, 0, 0, 1]\n \n U_ij = d_i0_T / dq_j\n U_ijk = dU_ij / dq_k\n i0_v = sum_j_1_i(U_ij * q_j_dot) * ii_r # no need to compute this\n\n tau = sum_c_1_n(D_ic * q_c_ddot) + sum_c_1_n(sum_d_1_n(h_icd * q_c_dot * q_d_dot)) + C_i; i = 1 to n\n\n D_ic = sum_j_max(i, c)_n(Tr(U_jc * J_j * U_ji.T)); i, c = 1 to n # inertia term\n h_icd = sum_j_max(i, c, d)_n(Tr(U_jcd * J_j * U_ji.T)); i, c, d = 1 to n # coriolis and centrifugal term\n C_i = sum_j_i_n(- m_j * g * U_ji * jj_r); i = 1 to n # gravity term\n '''\n \n dh_params = copy.deepcopy(self.dh_params)\n lengths = copy.deepcopy(self.lengths)\n if ds == None:\n ds = [dhp[1] for dhp in self.dh_params]\n\n if len(self.tau_exprs) == 0:\n Js = []\n for m, l, d in zip(self.masses, lengths, self.dimensions):\n if len(d) == 1: # circular cross-section\n r = d[0]\n J = sp.Matrix([\n [m*(l**2)/3, 0, 0, -m*l/2],\n [0, m*(r**2)/4, 0, 0],\n [0, 0, m*(r**2)/4, 0],\n [-m*l/2, 0, 0, m]\n ])\n elif len(d) == 2: # rectangular cross-section\n a, b = d\n J = sp.Matrix([\n [m*(a**2)/12, 0, 0, 0],\n [0, m*(l**2)/3, 0, -m*l/2],\n [0, 0, m*(b**2)/12, 0],\n [0, -m*(l**2)/2, 0, m]\n ])\n Js.append(J)\n\n g = sp.Matrix([0, -9.81, 0, 0]).T\n rs = []\n for l in lengths:\n r = sp.Matrix([-l/2, 0, 0, 1])\n rs.append(r)\n\n n = len(dh_params)\n for i in range(n):\n inertial_force = 0\n coriolis_force = 0\n for c in range(n):\n inertial_force += self._inertial_force(i, c, n, Js)\n for d in range(n):\n coriolis_force += self._coriolis_force(i, c, d, n, Js)\n\n gravitational_force = self._gravitational_force(i, n, self.masses, g, rs) # C_i\n tau_expr = inertial_force + coriolis_force + gravitational_force\n self.tau_exprs.append(tau_expr)\n \n '''\n order of substitution: a, alpha, acc, vel, theta, d\n values of higher order derivatives are substituted first and then the values of the lower ones,\n this is done to make sure that the lower order derivative values are not overwritten by the higher order ones\n to better understand this phenemenon, run this block of code:\n\n t = sp.symbols('t', real=True)\n theta_expr = sp.Function('theta')(t)\n vel_expr, acc_expr = theta_expr.diff(t), theta_expr.diff(t, 2)\n\n theta, vel, acc = 10, 20, 30\n eq = theta_expr + vel_expr + acc_expr # the expected value of this equation is 60\n value_0 = eq.subs(theta_expr, theta).subs(vel_expr, vel). subs(acc_expr, acc)\n value_1 = eq.subs(acc_expr, acc).subs(vel_expr, vel).subs(theta_expr, theta)\n print(f'value obtained by substituting in this order: theta, val, acc: {value_0}')\n print(f'value obtained by substituting in this order: acc, val, theta: {value_1}')\n '''\n\n alpha_a_dict = {}\n for p, dhp in zip(self.forward_params, dh_params):\n for i in range(len(p)):\n if i == 0:\n pass\n elif i == 1:\n pass\n else:\n alpha_a_dict[p[i]] = dhp[i]\n\n for i in range(len(self.tau_exprs)):\n tau_expr = self.tau_exprs[i]\n actual_tau_expr = tau_expr.subs(alpha_a_dict)\n self.tau_exprs[i] = actual_tau_expr\n\n acc_dict = {}\n vel_dict = {}\n theta_dict = {}\n d_dict = {}\n for p, t, ds in zip(self.forward_params, thetas, ds):\n for i in range(len(p)):\n if i == 0:\n theta_dict[p[i]] = t\n elif i == 1:\n d_dict[p[i]] = ds\n else:\n pass\n for vp, v in zip(self.vel_params, vels):\n vel_dict[vp] = v\n for ap, a in zip(self.acc_params, accs):\n acc_dict[ap] = a\n\n taus = []\n for tau_expr in self.tau_exprs:\n tau = tau_expr.subs(acc_dict).subs(vel_dict).subs(theta_dict).subs(d_dict)\n taus.append(tau)\n return self.tau_exprs, taus\n\n def _get_vals(self, list_):\n n = len(self.dh_params)\n thetas, ds, vels, accs = [], [], [], []\n j = 0\n for i in range(len(list_)):\n val = list_[i]\n if 0 <= i < n:\n if self.type[j] == 'r':\n thetas.append(val) # this value will be optimized\n ds.append(self.dh_params[j][1]) # whereas this value won't be optimized as this joint can't change it's length ##################\n elif self.type[j] == 'l':\n ds.append(val) # this value will be optimized\n thetas.append(self.dh_params[j][0]) # whereas this value won't be optimized as this joint can't change its angle\n j += 1\n elif n <= i < 2*n:\n vels.append(val)\n else:\n accs.append(val)\n return thetas, ds, vels, accs\n\n def _cost_fn_fd(self, initial_guess, taus_req):\n thetas, ds, vels, accs = self._get_vals(initial_guess)\n taus = self.solve_id(thetas, vels, accs, ds)[1]\n cost = 0\n for tr, t in zip(taus_req, taus):\n cost += (tr - t) ** 2\n cost = cost ** 0.5\n return cost\n\n # forward dynamics\n def solve_fd(self, taus_req, tol=None):\n initial_guess = [0 for _ in range(3*len(self.dh_params))]\n cost_fn = partial(self._cost_fn_fd, taus_req=taus_req)\n if tol == None:\n tol = 3e-4\n\n result = minimize(cost_fn, initial_guess, tol=tol)\n optim_vals = result.x\n thetas, ds, vels, accs = self._get_vals(optim_vals)\n taus = self.solve_id(thetas, vels, accs, ds)[1]\n optim_vals = {'thetas': thetas, 'offsets(d)': ds, 'vels': vels, 'accs': accs}\n return optim_vals, taus\n\n def __repr__(self):\n return f'Robot(name={self.name}, type={self.type})'\n \n# experimental stuff \n#########################################\n# def _get_pts(self, s_pt, length, angle, d, init_z=0):\n# x, y, z = s_pt\n# end_x = x + length * math.cos(angle)\n# end_y = y + length * math.sin(angle)\n# end_z = z + d + init_z\n# offset_pts = (end_x, end_y, z + init_z)\n# end_pts = (end_x, end_y, end_z)\n# return offset_pts, end_pts\n# \n# def _render(self, angles, ds, init_z, reinit):\n# if angles == None:\n# angles = [dhp[0] for dhp in self.dh_params]\n# if ds == None:\n# ds = [dhp[1] for dhp in self.dh_params]\n# lengths = copy.deepcopy(self.lengths)\n# \n# if reinit:\n# fig = plt.figure()\n# ax = fig.add_subplot(111, projection='3d')\n# else:\n# fig = self.fig\n# ax = self.ax\n# ax.clear()\n# ax.set_title(f'{self.name}')\n# \n# s_pt = (0, 0, 0)\n# for i in range(len(angles)):\n# if i != 0:\n# angle = angles[i] + angles[i-1]\n# iz = 0\n# c = 'r'\n# else:\n# angle = angles[i]\n# iz = init_z\n# c = 'm' # base\n# \n# o_pt, e_pt = self._get_pts(s_pt, lengths[i], angle, ds[i], iz)\n# ax.scatter(s_pt[2], s_pt[1], zs=s_pt[0], c=c) # current point\n# ax.scatter(o_pt[2], o_pt[1], zs=o_pt[0], c='r') # offset point\n# \n# # s_pt -> o_pt -> e_pt\n# ax.plot([s_pt[2], o_pt[2]], [s_pt[1], o_pt[1]], zs=[s_pt[0], o_pt[0]], c='b')\n# ax.plot([o_pt[2], e_pt[2]], [o_pt[1], e_pt[1]], zs=[o_pt[0], e_pt[0]], c='b')\n# s_pt = e_pt\n# ax.scatter(s_pt[2], s_pt[1], zs=s_pt[0], c='g') # end effector\n# \n# # render the robot at home position provided in the dh paramters\n# def render(self, angles=None, ds=None, init_z=0):\n# self._render(angles, ds, init_z, True)\n# plt.show()\n# \n# def move(self, end_pos=None, thetas=None, ds=None, ret=False):\n# '''\n# moves the robot to the given position/angle\n# provide either end_pos or thetas\n# \n# end_pos(tuple): (x_final, y_final, z_final)\n# thetas(list): [theta_0, theta_1, ...]\n# optional:\n# ds(list): [d_0, d_1, ...]\n# '''\n# if len(self.forward_mats) == 0:\n# _, _ = self.solve_fk() # collect cache\n# \n# if end_pos != None: # get angles using inveres kinematics\n# thetas, _ = self.solve_ik(end_pos)\n# elif thetas != None:\n# pass # we've got everything we need\n# else:\n# raise ValueError('please specify either the end position or the joint angles!!')\n# \n# if ds == None:\n# home_z = sum([dhp[1] for dhp in self.dh_params])\n# else:\n# home_z = sum(ds)\n# \n# # if end_pos[2] > home_z:\n# # init_z = end_pos[2] - home_z\n# # else:\n# # init_z = - (abs(end_pos[2]) - home_z)\n# init_z = end_pos[2] - home_z\n# \n# if ret:\n# return thetas, init_z\n# else:\n# self.render(angles=thetas, ds=ds, init_z=init_z)\n# \n# def _cubic_fn(self, q_i, q_f, ts, t):\n# val = q_i + (3 * (q_f - q_i) / (ts**2)) * (t**2) - (2 * (q_f - q_i) / (ts**3)) * (t**3)\n# return val\n# \n# def _cubic_dot_fn(self, q_i, q_f, vel_i, vel_f, ts, t):\n# val = q_i + vel_i * t + ((3 * (q_f - q_i) / (ts**2)) - (2 * vel_i / ts) - (vel_f / ts)) * (t**2) + \\\n# ((- 2 * (q_f - q_i) / (ts**3)) + ((vel_f + vel_i) / (ts**2))) * (t**3)\n# return val\n# \n# def _fifth_fn(self, th_i, th_f, vel_i, vel_f, acc_i, acc_f, ts, t):\n# val = th_i + vel_i * t + (acc_i * (t**2) / 2) + ((20 * (th_f - th_i) - (8 * vel_f + 12 * vel_i) * ts - \\\n# (3 * acc_i - acc_f) * (ts**2)) / 2 * (ts**3)) * (t**3) + ((30 * (th_i - th_f) + (14 * vel_f + 16 * vel_i) * ts + \\\n# (3 * acc_i - 2 * acc_f) * (ts**2)) / 2 * (ts**4)) * (t**4)+ ((12 * (th_f - th_i) - 6 * (vel_f + vel_i) * ts - \\\n# (acc_i - acc_f) * (ts**2)) / 2 * (ts**5)) * (t**5) \n# return val\n# \n# def _create_anim(self):\n# self.fig = plt.figure()\n# self.ax = self.fig.add_subplot(111, projection='3d')\n# \n# def _anim_fn(self, i, thetas_across_time, ds_across_time, init_z):\n# z = self._cubic_fn(0, init_z, len(thetas_across_time)-1, i) # smooth translation towards the end z plane\n# self._render(angles=thetas_across_time[i], ds=ds_across_time[i], init_z=z, reinit=False) \n# \n# def move_traj(self, ts, end_pos=None, final_angles=None, final_ds=None, fn='cubic', final_vels=None, final_accs=None):\n# '''\n# plots the trajectory of the robot\n# provide either end_pos or final_angles\n# \n# ts(int): time taken by the robot to reach end position/final angles\n# end_pos(tuple): (x_final, y_final, z_final)\n# final_angles(list): [theta_0, theta_1, ...]\n# \n# fn(str): 'cubic', 'cubic_dot' or 'fifth'\n# 'cubic' - use this if final_vels == None\n# 'cubic_dot' - use this if final_vels != None but final_accs == None\n# 'fifth' - use this if final_vels != None and final_accs != None\n# \n# optional:\n# final_ds(list): [d_0, d_1, ...] # for linear joints\n# final_vels(list): [vel_0, vel_1, ...]\n# final_accs(list): [acc_0, acc_1, ...]\n# '''\n# if fn not in ['cubic', 'cubic_dot', 'fifth']:\n# raise ValueError('please input a valid trajectory function!!')\n# \n# init_angles = [dhp[0] for dhp in self.dh_params]\n# init_ds = [dhp[1] for dhp in self.dh_params]\n# if end_pos != None:\n# final_angles, init_z = self.move(end_pos, ret=True)\n# \n# if fn != 'cubic':\n# vel_i = 0\n# if fn == 'fifth':\n# acc_i = 0\n# \n# thetas_across_time = []\n# ds_across_time = []\n# for t in range(ts):\n# thetas = []\n# ds = []\n# \n# for i in range(len(self.dh_params)):\n# if self.type[i] == 'r':\n# q_i, q_f = init_angles[i], final_angles[i]\n# elif self.type[i] == 'l':\n# q_i = init_ds[i]\n# if final_ds != None:\n# q_f = final_ds[i]\n# else:\n# q_f = q_i\n# \n# if fn != 'cubic':\n# vel_f = final_vels[i]\n# if fn == 'fifth':\n# acc_f = final_accs[i]\n# \n# if q_f != q_i: # happens only when the linear joint's offset is not changed across time\n# if fn == 'cubic':\n# val = self._cubic_fn(q_i, q_f, ts-1, t)\n# elif fn == 'cubic_dot':\n# val = self._cubic_dot_fn(q_i, q_f, vel_i, vel_f, ts-1, t)\n# elif fn == 'fifth':\n# val = self._fifth_fn(q_i, q_f, vel_i, vel_f, acc_i, acc_f, ts-1, t)\n# else:\n# val = q_i\n# \n# if self.type[i] == 'r':\n# thetas.append(val)\n# ds.append(self.dh_params[i][1])\n# elif self.type[i] == 'l':\n# ds.append(val)\n# thetas.append(self.dh_params[i][0])\n# \n# thetas_across_time.append(thetas)\n# ds_across_time.append(ds)\n# \n# self._create_anim()\n# anim_fn = partial(self._anim_fn, thetas_across_time=thetas_across_time, ds_across_time=ds_across_time, init_z=init_z)\n# anim = FuncAnimation(self.fig, anim_fn, frames=ts, repeat=False)\n# plt.show()\n"
] | [
[
"scipy.optimize.minimize"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
formermagic/git-t5 | [
"5ee27d1be72988986f03659bd79fc2f9680e5372"
] | [
"git_t5/core/data_module.py"
] | [
"import copy\nimport typing\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union\n\nimport datasets as hfds\nimport jax\nimport numpy as np\nfrom git_t5.core import HFDatasetConfig, LocalDatasetConfig, MultitaskDatasetConfig\nfrom git_t5.data import (\n DataCollatorForT5MLM,\n Dataset,\n T5Dataset,\n T5MultitaskDataset,\n compute_input_and_target_lengths,\n prepare_dataset,\n)\nfrom git_t5.utils import resolve_object, stack_mappings\nfrom omegaconf import MISSING\nfrom torch.utils import data\nfrom transformers import AutoTokenizer, PreTrainedTokenizerBase\nfrom transformers.tokenization_utils_base import VERY_LARGE_INTEGER\n\nif TYPE_CHECKING:\n from git_t5.cli.train_model import Config\n from git_t5.core.trainer import T5Trainer\nelse:\n Config = Any\n T5Trainer = Any\n\nT = typing.TypeVar(\"T\")\n\n\ndef tokenize_fn(\n tokenizer: PreTrainedTokenizerBase,\n column: str,\n) -> Callable[..., Dict[str, Union[List[List[int]], np.ndarray]]]:\n def wrap_fn(\n examples: Dict[str, List[str]]\n ) -> Dict[str, Union[List[List[int]], np.ndarray]]:\n return tokenizer(\n examples[column],\n truncation=False,\n return_attention_mask=False,\n ) # type: ignore\n\n return wrap_fn\n\n\ndef select_subset(\n dataset: hfds.Dataset,\n size: Union[float, int],\n seed: Optional[int] = None,\n) -> hfds.Dataset:\n num_samples: int\n if isinstance(size, int) or size > 1:\n num_samples = min(int(size), len(dataset))\n else:\n num_samples = int(len(dataset) * size)\n\n rng = np.random.default_rng(seed)\n indices = rng.integers(0, len(dataset), (num_samples,))\n\n return dataset.select(indices)\n\n\ndef _collate_fn(samples: List[T]) -> T:\n assert len(samples) == 1\n return samples[0]\n\n\n@dataclass\nclass DataModuleConfig:\n pass\n\n\n@dataclass\nclass T5DataModuleConfig(DataModuleConfig):\n validation_size: float = 0.05\n max_sequence_length: Optional[int] = None\n train_batch_size: int = 8\n valid_batch_size: int = 8\n num_proc: Optional[int] = None\n num_workers: Optional[int] = None\n mlm_probability: float = 0.15\n mean_noise_span_length: float = 3.0\n decoder_start_token_id: int = MISSING\n limit_train_size: float = 1.0\n limit_valid_size: float = 1.0\n\n\n@dataclass\nclass T5DataModule:\n config: Config\n datasets: Dict[str, Dataset]\n tokenizer: PreTrainedTokenizerBase\n data_collator: DataCollatorForT5MLM\n max_sequence_length: int\n input_length: int\n target_length: int\n trainer: Optional[T5Trainer] = None\n\n @classmethod\n def from_config(cls, config: Config) -> \"T5DataModule\":\n tokenizer = cls.load_tokenizer(config)\n max_sequence_length = config.data.max_sequence_length or VERY_LARGE_INTEGER\n max_sequence_length = min(max_sequence_length, tokenizer.model_max_length)\n\n input_length, target_length = compute_input_and_target_lengths(\n max_sequence_length,\n noise_density=config.data.mlm_probability,\n mean_noise_span_length=config.data.mean_noise_span_length,\n extra_tokens_per_span_inputs=1,\n extra_tokens_per_span_targets=1,\n )\n\n eos_token_id = tokenizer.eos_token_id\n pad_token_id = tokenizer.pad_token_id\n sentinel_token_id = tokenizer.convert_tokens_to_ids(\"<extra_id_0>\") # type: ignore\n if eos_token_id is None:\n raise ValueError(\"Tokenizer must have an existing `eos_token_id` value.\")\n if pad_token_id is None:\n raise ValueError(\"Tokenizer must have an existing `pad_token_id` value.\")\n if sentinel_token_id is None:\n raise ValueError(\"Tokenizer must have an existing `eos_token_id` value.\")\n\n data_collator = DataCollatorForT5MLM(\n tokenizer=tokenizer,\n noise_density=config.data.mlm_probability,\n mean_noise_span_length=config.data.mean_noise_span_length,\n input_length=max_sequence_length,\n target_length=target_length,\n eos_token_id=eos_token_id,\n pad_token_id=pad_token_id,\n sentinel_token_id=sentinel_token_id,\n decoder_start_token_id=config.data.decoder_start_token_id,\n )\n\n datasets = cls.load_datasets(config, tokenizer, input_length)\n datasets = stack_mappings(datasets)\n datasets = {\n \"train\": cls.get_dataset(\n datasets[\"train\"],\n batch_size=config.data.train_batch_size,\n collate_fn=data_collator,\n shuffle=True,\n drop_last=True,\n seed=config.training.seed,\n ),\n \"valid\": cls.get_dataset(\n datasets[\"validation\"],\n batch_size=config.data.valid_batch_size,\n collate_fn=data_collator,\n shuffle=False,\n drop_last=True,\n seed=None,\n ),\n }\n\n return T5DataModule(\n config,\n datasets=datasets,\n tokenizer=tokenizer,\n data_collator=data_collator,\n max_sequence_length=max_sequence_length,\n input_length=input_length,\n target_length=target_length,\n )\n\n def train_dataloader(self) -> data.DataLoader:\n num_workers = self.config.data.num_workers or 0\n return data.DataLoader(\n self.datasets[\"train\"],\n collate_fn=_collate_fn,\n num_workers=num_workers,\n )\n\n def valid_dataloader(self) -> data.DataLoader:\n num_workers = self.config.data.num_workers or 0\n return data.DataLoader(\n self.datasets[\"valid\"],\n collate_fn=_collate_fn,\n num_workers=num_workers,\n )\n\n @classmethod\n def get_dataset(\n cls,\n datasets: List[hfds.Dataset],\n batch_size: int,\n collate_fn: typing.Callable[..., Dict[str, np.ndarray]],\n shuffle: bool,\n drop_last: bool,\n seed: Optional[int],\n ) -> Dataset:\n batch_size = batch_size * jax.device_count()\n\n if len(datasets) == 1:\n dataset = T5Dataset(\n datasets[0],\n batch_size=batch_size,\n collate_fn=collate_fn,\n shuffle=shuffle,\n drop_last=drop_last,\n seed=seed,\n )\n else:\n dataset = T5MultitaskDataset(\n datasets,\n batch_size=batch_size,\n collate_fn=collate_fn,\n shuffle=shuffle,\n drop_last=drop_last,\n seed=seed,\n )\n\n return dataset\n\n @classmethod\n def prepare_dataset(\n cls,\n dataset: hfds.DatasetDict,\n config: Config,\n tokenizer: PreTrainedTokenizerBase,\n input_length: int,\n ) -> hfds.DatasetDict:\n if config.dataset.column_name is None:\n raise ValueError(\n \"You must provide a `column_name` to specify which column of the dataset to use.\"\n )\n\n dataset = prepare_dataset(\n dataset,\n tokenize_fn(tokenizer, config.dataset.column_name),\n input_length=input_length,\n batch_size=128,\n load_from_cache_file=not config.training.overwrite_cache,\n num_proc=config.data.num_proc,\n )\n\n # limit preprocessed dataset if needed\n dataset = cls.limit_dataset(dataset, config)\n\n return dataset\n\n @classmethod\n def load_datasets(\n cls,\n config: Config,\n tokenizer: PreTrainedTokenizerBase,\n input_length: int,\n ) -> List[Dict[str, hfds.Dataset]]:\n dataset_config = resolve_object(config.dataset)\n datasets: List[Dict[str, hfds.Dataset]] = []\n if isinstance(dataset_config, MultitaskDatasetConfig):\n dataset_config.resolve()\n for _, dataset_config in dataset_config.tasks.items():\n config = copy.deepcopy(config)\n config.dataset = dataset_config\n dataset = cls.load_dataset(config)\n dataset = cls.prepare_dataset(dataset, config, tokenizer, input_length)\n datasets.append(dataset)\n else:\n dataset = cls.load_dataset(config)\n dataset = cls.prepare_dataset(dataset, config, tokenizer, input_length)\n datasets.append(dataset)\n\n return datasets\n\n @classmethod\n def load_dataset(cls, config: Config) -> hfds.DatasetDict:\n dataset_config = resolve_object(config.dataset)\n if isinstance(dataset_config, LocalDatasetConfig):\n dataset = hfds.load_from_disk(dataset_config.dataset_path)\n if not isinstance(dataset, hfds.DatasetDict):\n dataset = hfds.DatasetDict(train=dataset)\n\n if \"validation\" not in dataset.keys():\n dataset = dataset[\"train\"].train_test_split(\n test_size=config.data.validation_size,\n load_from_cache_file=not config.training.overwrite_cache,\n )\n dataset[\"validation\"] = dataset.pop(\"test\")\n elif isinstance(dataset_config, HFDatasetConfig):\n dataset = hfds.load_dataset(\n dataset_config.dataset_name,\n dataset_config.dataset_config,\n cache_dir=config.training.cache_dir,\n )\n\n if not isinstance(dataset, hfds.DatasetDict):\n dataset = hfds.DatasetDict(train=dataset)\n\n if \"validation\" not in dataset.keys():\n valid_percentage = int(config.data.validation_size * 100)\n dataset[\"validation\"] = hfds.load_dataset(\n dataset_config.dataset_name,\n dataset_config.dataset_config,\n split=f\"train[:{valid_percentage}%]\",\n cache_dir=config.training.cache_dir,\n )\n dataset[\"train\"] = hfds.load_dataset(\n dataset_config.dataset_name,\n dataset_config.dataset_config,\n split=f\"train[{valid_percentage}%:]\",\n cache_dir=config.training.cache_dir,\n )\n else:\n raise ValueError(\"Unknown dataset type provided.\")\n\n # limit loaded dataset if needed\n dataset = cls.limit_dataset(dataset, config)\n\n return dataset\n\n @classmethod\n def limit_dataset(\n cls,\n dataset: hfds.DatasetDict,\n config: Config,\n ) -> hfds.DatasetDict:\n if config.data.limit_train_size != 1:\n dataset[\"train\"] = select_subset(\n dataset[\"train\"],\n config.data.limit_train_size,\n config.training.seed,\n )\n\n if config.data.limit_valid_size != 1:\n dataset[\"validation\"] = select_subset(\n dataset[\"validation\"],\n config.data.limit_valid_size,\n config.training.seed,\n )\n\n return dataset\n\n @classmethod\n def load_tokenizer(cls, config: Config) -> PreTrainedTokenizerBase:\n tokenizer = AutoTokenizer.from_pretrained(\n config.tokenizer.tokenizer_path,\n use_fast=config.tokenizer.use_fast,\n cache_dir=config.training.cache_dir,\n )\n\n return tokenizer\n"
] | [
[
"torch.utils.data.DataLoader",
"numpy.random.default_rng"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ISISNeutronMuon/SScanSS-2 | [
"aa70107c8771e0ed0c1202a683ad94de84dff411"
] | [
"sscanss/core/scene/renderer.py"
] | [
"import ctypes\nimport numpy as np\nfrom OpenGL import GL, error\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom .camera import Camera, world_to_screen, screen_to_world\nfrom .node import Node, InstanceRenderNode, BatchRenderNode\nfrom .scene import Scene\nfrom .shader import DefaultShader, GouraudShader\nfrom ..geometry.colour import Colour\nfrom ..math.matrix import Matrix44\nfrom ..math.vector import Vector3\nfrom ..util.misc import Attributes\nfrom ...config import settings\n\n\nclass OpenGLRenderer(QtWidgets.QOpenGLWidget):\n \"\"\"Provides OpenGL widget for draw 3D scene for the sample setup and instrument\n\n :param parent: main window instance\n :type parent: MainWindow\n \"\"\"\n pick_added = QtCore.pyqtSignal(object, object)\n\n def __init__(self, parent):\n super().__init__(parent)\n self.parent = parent\n\n self.scene = Scene()\n self.show_bounding_box = False\n self.show_coordinate_frame = True\n self.picks = []\n self.picking = False\n self.default_font = QtGui.QFont(\"Times\", 10)\n self.error = False\n self.custom_error_handler = None\n self.shader_programs = {}\n\n self.setFocusPolicy(QtCore.Qt.StrongFocus)\n\n def cleanup(self):\n self.makeCurrent()\n del self.scene\n for key in self.shader_programs.keys():\n self.shader_programs[key].destroy()\n self.doneCurrent()\n\n @property\n def picking(self):\n return self._picking\n\n @picking.setter\n def picking(self, value):\n \"\"\"Enables/Disables point picking\n\n :param value: indicates if point picking is enabled\n :type value: bool\n \"\"\"\n self._picking = value\n if value:\n self.setCursor(QtCore.Qt.CrossCursor)\n else:\n self.setCursor(QtCore.Qt.ArrowCursor)\n\n def initializeGL(self):\n try:\n GL.glClearColor(*Colour.white())\n GL.glColor4f(*Colour.black())\n\n GL.glEnable(GL.GL_DEPTH_TEST)\n GL.glDisable(GL.GL_CULL_FACE)\n GL.glEnable(GL.GL_MULTISAMPLE)\n\n number_of_lights = self.initLights()\n # Create and compile GLSL shaders program\n self.shader_programs['mesh'] = GouraudShader(number_of_lights)\n self.shader_programs['default'] = DefaultShader()\n\n self.context().aboutToBeDestroyed.connect(self.cleanup)\n\n except error.GLError:\n self.parent.showMessage('An error occurred during OpenGL initialization. '\n 'The minimum OpenGL requirement for this software is version 2.0.\\n\\n'\n 'This error may be caused by:\\n'\n '* A missing or faulty graphics driver installation.\\n'\n '* Accessing SScanSS 2 from a remote connection with GPU rendering disabled.\\n\\n'\n 'The software will be closed now.'\n )\n raise\n\n def initLights(self):\n \"\"\"Sets up light properties\"\"\"\n ambient = [0.0, 0.0, 0.0, 1.0]\n diffuse = [0.5, 0.5, 0.5, 1.0]\n specular = [0.2, 0.2, 0.2, 1.0]\n\n # set up light direction\n front = [0.0, 0.0, 1.0, 0.0]\n back = [0.0, 0.0, -1.0, 0.0]\n left = [-1.0, 0.0, 0.0, 0.0]\n right = [1.0, 0.0, 0.0, 0.0]\n top = [0.0, 1.0, 0.0, 0.0]\n bottom = [0.0, -1.0, 0.0, 0.0]\n directions = {GL.GL_LIGHT0: front, GL.GL_LIGHT1: back, GL.GL_LIGHT2: left,\n GL.GL_LIGHT3: right, GL.GL_LIGHT4: top, GL.GL_LIGHT5: bottom}\n\n for light, direction in directions.items():\n GL.glLightfv(light, GL.GL_AMBIENT, ambient)\n GL.glLightfv(light, GL.GL_DIFFUSE, diffuse)\n GL.glLightfv(light, GL.GL_SPECULAR, specular)\n GL.glLightfv(light, GL.GL_POSITION, direction)\n\n GL.glEnable(light)\n\n GL.glEnable(GL.GL_LIGHTING)\n\n return len(directions)\n\n def resizeGL(self, width, height):\n GL.glViewport(0, 0, width, height)\n GL.glMatrixMode(GL.GL_PROJECTION)\n self.scene.camera.aspect = width / height\n GL.glLoadTransposeMatrixf(self.scene.camera.projection)\n\n def paintGL(self):\n if self.scene.invalid:\n if self.error:\n return\n\n self.error = True\n\n if self.custom_error_handler is not None:\n self.custom_error_handler()\n self.scene.camera.reset()\n return\n\n self.error = False\n\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n\n GL.glMatrixMode(GL.GL_PROJECTION)\n GL.glLoadTransposeMatrixf(self.scene.camera.projection)\n\n GL.glMatrixMode(GL.GL_MODELVIEW)\n GL.glLoadTransposeMatrixf(self.scene.camera.model_view)\n\n if self.show_coordinate_frame:\n self.renderAxis()\n\n for node in self.scene.nodes:\n self.recursiveDraw(node)\n\n if self.show_bounding_box:\n self.renderBoundingBox()\n\n if self.picks:\n self.renderPicks()\n\n def recursiveDraw(self, node):\n \"\"\"Recursive renders node from the scene with its children\n\n :param node: node\n :type: Node\n \"\"\"\n if not node.visible:\n return\n\n GL.glPushMatrix()\n GL.glPushAttrib(GL.GL_CURRENT_BIT)\n GL.glMultTransposeMatrixf(node.transform)\n\n mode = Node.RenderMode.Solid if node.render_mode is None else node.render_mode\n\n if mode == Node.RenderMode.Solid:\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n elif mode == Node.RenderMode.Wireframe:\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)\n else:\n GL.glDepthMask(GL.GL_FALSE)\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_ZERO, GL.GL_SRC_COLOR)\n\n self.draw(node)\n\n # reset OpenGL State\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n GL.glDepthMask(GL.GL_TRUE)\n GL.glDisable(GL.GL_BLEND)\n\n for child in node.children:\n self.recursiveDraw(child)\n\n GL.glPopAttrib()\n GL.glPopMatrix()\n\n def draw(self, node):\n \"\"\"Renders a leaf node (node with no child) from the scene\n\n :param node: leaf node\n :type node: Node\n \"\"\"\n program = self.shader_programs['default']\n if node.vertices.size > 0 and node.indices.size > 0:\n if node.normals.size > 0:\n program = self.shader_programs['mesh']\n\n program.bind()\n node.buffer.bind()\n\n primitive = GL.GL_TRIANGLES if node.render_primitive == Node.RenderPrimitive.Triangles else GL.GL_LINES\n\n if isinstance(node, InstanceRenderNode):\n self.drawInstanced(node, primitive)\n elif isinstance(node, BatchRenderNode):\n self.drawBatch(node, primitive)\n else:\n self.drawNode(node, primitive)\n\n node.buffer.release()\n program.release()\n\n def drawNode(self, node, primitive):\n \"\"\"Renders a leaf node (node with no child) from the scene\n\n :param node: leaf node\n :type node: Node\n :param primitive: OpenGL primitive to render\n :type primitive: OpenGL.constant.IntConstant\n \"\"\"\n node.buffer.bind()\n if node.selected:\n GL.glColor4f(*settings.value(settings.Key.Selected_Colour))\n else:\n GL.glColor4f(*node.colour.rgbaf)\n\n if node.outlined:\n self.drawOutline(primitive, node.indices)\n\n GL.glDrawElements(primitive, node.buffer.count, GL.GL_UNSIGNED_INT, ctypes.c_void_p(0))\n\n def drawInstanced(self, node, primitive):\n \"\"\"Renders a instanced node from the scene\n\n :param node: leaf node\n :type node: InstanceRenderNode\n :param primitive: OpenGL primitive to render\n :type primitive: OpenGL.constant.IntConstant\n \"\"\"\n for index, transform in enumerate(node.per_object_transform):\n GL.glPushMatrix()\n GL.glMultTransposeMatrixf(transform)\n if node.selected[index]:\n GL.glColor4f(*settings.value(settings.Key.Selected_Colour))\n else:\n GL.glColor4f(*node.per_object_colour[index].rgbaf)\n\n if node.outlined[index]:\n self.drawOutline(primitive, node.buffer.count)\n\n GL.glDrawElements(primitive, node.buffer.count, GL.GL_UNSIGNED_INT, ctypes.c_void_p(0))\n GL.glPopMatrix()\n\n def drawBatch(self, node, primitive):\n \"\"\"Renders a batch node from the scene\n\n :param node: leaf node\n :type node: BatchRenderNode\n :param primitive: OpenGL primitive to render\n :type primitive: OpenGL.constant.IntConstant\n \"\"\"\n start = 0\n for index, end in enumerate(node.batch_offsets):\n if node.selected[index]:\n GL.glColor4f(*settings.value(settings.Key.Selected_Colour))\n else:\n GL.glColor4f(*node.per_object_colour[index].rgbaf)\n GL.glPushMatrix()\n t = Matrix44.identity() if not node.per_object_transform else node.per_object_transform[index]\n GL.glMultTransposeMatrixf(t)\n\n count = end - start\n offset = start * node.vertices.itemsize\n\n if node.outlined[index]:\n self.drawOutline(primitive, count, offset)\n\n GL.glDrawElements(primitive, count, GL.GL_UNSIGNED_INT, ctypes.c_void_p(offset))\n\n GL.glPopMatrix()\n start = end\n\n def drawOutline(self, primitive, count, offset=0):\n \"\"\"Renders the red outline of the bound vertex array\n\n :param primitive: OpenGL primitive to render\n :type primitive: OpenGL.constant.IntConstant\n :param count: number of elements in array to draw\n :type count: int\n :param offset: start index in vertex array\n :type offset: int\n \"\"\"\n old_colour = GL.glGetDoublev(GL.GL_CURRENT_COLOR)\n old_line_width = GL.glGetInteger(GL.GL_LINE_WIDTH)\n polygon_mode = GL.glGetIntegerv(GL.GL_POLYGON_MODE)\n GL.glColor3f(1, 0, 0)\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)\n GL.glLineWidth(3)\n GL.glCullFace(GL.GL_FRONT)\n GL.glEnable(GL.GL_CULL_FACE)\n # First Pass\n GL.glDrawElements(primitive, count, GL.GL_UNSIGNED_INT, ctypes.c_void_p(offset))\n\n GL.glColor4dv(old_colour)\n GL.glLineWidth(old_line_width)\n GL.glDisable(GL.GL_CULL_FACE)\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, polygon_mode[0])\n\n def pickEvent(self, event):\n \"\"\"Custom event for point picking\n\n :param event: mouse event\n :type event: QtGui.QMouseEvent\n \"\"\"\n if event.buttons() != QtCore.Qt.LeftButton:\n return\n\n point = event.pos()\n v1, valid1 = self.unproject(point.x(), point.y(), 0.0)\n v2, valid2 = self.unproject(point.x(), point.y(), 1.0)\n if not valid1 or not valid2:\n return\n self.pick_added.emit(v1, v2)\n\n def renderPicks(self):\n \"\"\"Renders picked points in the scene\"\"\"\n size = settings.value(settings.Key.Measurement_Size)\n\n node = InstanceRenderNode(len(self.picks))\n node.render_mode = Node.RenderMode.Solid\n node.render_primitive = Node.RenderPrimitive.Lines\n\n vertices = np.array([[-size, 0., 0.], [size, 0., 0.],\n [0., -size, 0.], [0., size, 0.],\n [0., 0., -size], [0., 0., size]], dtype=np.float32)\n indices = np.array([0, 1, 2, 3, 4, 5], dtype=np.uint32)\n node.vertices = vertices\n node.indices = indices\n for index, pick in enumerate(self.picks):\n point, selected = pick\n node.selected[index] = selected\n node.per_object_colour[index] = Colour(0.9, 0.4, 0.4)\n node.per_object_transform[index] = Matrix44.fromTranslation(point)\n\n node.buildVertexBuffer()\n self.draw(node)\n\n def mousePressEvent(self, event):\n if self.picking:\n self.pickEvent(event)\n else:\n self.scene.camera.mode = Camera.Projection.Perspective\n self.last_pos = event.pos()\n\n def mouseMoveEvent(self, event):\n if self.picking:\n return\n\n translation_speed = 0.001\n\n if event.buttons() == QtCore.Qt.LeftButton:\n p1 = (self.last_pos.x() / self.width() * 2, self.last_pos.y() / self.height() * 2)\n p2 = (event.x() / self.width() * 2, event.y() / self.height() * 2)\n self.scene.camera.rotate(p1, p2)\n\n elif event.buttons() == QtCore.Qt.RightButton:\n dx = event.x() - self.last_pos.x()\n dy = event.y() - self.last_pos.y()\n x_offset = -dx * translation_speed\n y_offset = -dy * translation_speed\n self.scene.camera.pan(x_offset, y_offset)\n\n self.last_pos = event.pos()\n self.update()\n\n def showCoordinateFrame(self, state):\n \"\"\"Sets visibility of the coordinate frame in the widget\n\n :param state: indicates if the coordinate frame should be visible\n :type state: bool\n \"\"\"\n self.show_coordinate_frame = state\n self.update()\n\n def showBoundingBox(self, state):\n \"\"\"Sets visibility of the sample bounding box frame in the widget\n\n :param state: indicates if the bounding box should be visible\n :type state: bool\n \"\"\"\n self.show_bounding_box = state\n self.update()\n\n def wheelEvent(self, event):\n zoom_scale = 0.05\n delta = 0.0\n num_degrees = event.angleDelta() / 8\n if not num_degrees.isNull():\n delta = num_degrees.y() / 15\n\n self.scene.camera.zoom(delta * zoom_scale)\n self.update()\n\n def loadScene(self, scene, zoom_to_fit=True):\n \"\"\"Loads a scene into the widget and adjust the camera\n\n :param scene: sample or instrument scene\n :type scene: Scene\n :param zoom_to_fit: indicates that the scene should be zoomed to fit window\n :type zoom_to_fit: bool\n \"\"\"\n self.scene = scene\n self.scene.camera.aspect = self.width() / self.height()\n\n if not self.scene.isEmpty():\n bounding_box = self.scene.bounding_box\n if zoom_to_fit:\n self.scene.camera.zoomToFit(bounding_box.center, bounding_box.radius)\n else:\n self.scene.camera.updateView(bounding_box.center, bounding_box.radius)\n\n self.update()\n\n def unproject(self, x, y, z):\n \"\"\"Converts point in screen coordinate to point in world coordinate\n\n :param x: x coordinate\n :type x: float\n :param y: y coordinate\n :type y: float\n :param z: z coordinate\n :type z: float\n :return: point in screen coordinates and flag indicating the new point is valid\n :rtype: Tuple[Vector3, bool]\n \"\"\"\n y = self.height() - y # invert y to match screen coordinate\n screen_point = Vector3([x, y, z])\n model_view = self.scene.camera.model_view\n projection = self.scene.camera.projection\n\n world_point, valid = screen_to_world(screen_point, model_view, projection, self.width(), self.height())\n return world_point, valid\n\n def project(self, x, y, z):\n \"\"\"Converts point in world coordinate to point in screen coordinate\n\n :param x: x coordinate\n :type x: float\n :param y: y coordinate\n :type y: float\n :param z: z coordinate\n :type z: float\n :return: point in screen coordinates and flag indicating the new point is valid\n :rtype: Tuple[Vector3, bool]\n \"\"\"\n world_point = Vector3([x, y, z])\n model_view = self.scene.camera.model_view\n projection = self.scene.camera.projection\n\n screen_point, valid = world_to_screen(world_point, model_view, projection, self.width(), self.height())\n screen_point.y = self.height() - screen_point.y # invert y to match screen coordinate\n return screen_point, valid\n\n def renderBoundingBox(self):\n \"\"\"Draws the axis aligned bounding box of the sample\"\"\"\n if Attributes.Sample not in self.scene:\n return\n\n bounding_box = self.scene[Attributes.Sample].bounding_box\n max_x, max_y, max_z = bounding_box.max\n min_x, min_y, min_z = bounding_box.min\n\n node = Node()\n node.render_mode = Node.RenderMode.Solid\n node.render_primitive = Node.RenderPrimitive.Lines\n\n node.vertices = np.array([[min_x, min_y, min_z],\n [min_x, max_y, min_z],\n [max_x, min_y, min_z],\n [max_x, max_y, min_z],\n [min_x, min_y, max_z],\n [min_x, max_y, max_z],\n [max_x, min_y, max_z],\n [max_x, max_y, max_z]], dtype=np.float32)\n node.indices = np.array([0, 1, 1, 3, 3, 2, 2, 0,\n 4, 5, 5, 7, 7, 6, 6, 4,\n 0, 4, 1, 5, 2, 6, 3, 7], dtype=np.uint32)\n node.colour = Colour(0.9, 0.4, 0.4)\n node.buildVertexBuffer()\n self.draw(node)\n\n def renderAxis(self):\n \"\"\"Draws the X, Y and Z axis lines and centre point\"\"\"\n if self.scene.isEmpty():\n return\n\n scale = self.scene.bounding_box.radius\n\n node = BatchRenderNode(3)\n node.render_mode = Node.RenderMode.Solid\n node.render_primitive = Node.RenderPrimitive.Lines\n node.vertices = np.array([[0.0, 0.0, 0.0], [scale, 0.0, 0.0],\n [0.0, 0.0, 0.0], [0.0, scale, 0.0],\n [0.0, 0.0, 0.0], [0.0, 0.0, scale]], dtype=np.float32)\n\n node.indices = np.array([0, 1, 2, 3, 4, 5], dtype=np.uint32)\n node.per_object_colour = [Colour(1.0, 0.0, 0.0), Colour(0.0, 1.0, 0.0), Colour(0.0, 0.0, 1.0)]\n node.batch_offsets = [2, 4, 6]\n node.buildVertexBuffer()\n\n GL.glEnable(GL.GL_DEPTH_CLAMP)\n GL.glDepthFunc(GL.GL_LEQUAL)\n self.draw(node)\n GL.glDisable(GL.GL_DEPTH_CLAMP)\n GL.glDepthFunc(GL.GL_LESS)\n\n origin, ok = self.project(0., 0., 0.)\n if not ok:\n return\n\n GL.glPushAttrib(GL.GL_ALL_ATTRIB_BITS)\n painter = QtGui.QPainter(self)\n painter.setPen(QtGui.QColor.fromRgbF(0.5, 0.5, 0.5))\n painter.setFont(self.default_font)\n\n # draw origin\n painter.drawEllipse(QtCore.QPointF(origin.x, origin.y), 10, 10)\n\n axes = [(1, 0, 0, 'X'), (0, 1, 0, 'Y'), (0, 0, 1, 'Z')]\n\n for x, y, z, label in axes:\n painter.setPen(QtGui.QColor.fromRgbF(x, y, z))\n\n x *= scale * 1.01\n y *= scale * 1.01\n z *= scale * 1.01\n\n text_pos, ok = self.project(x, y, z)\n if not ok:\n continue\n\n # Render text\n painter.drawText(QtCore.QPointF(*text_pos[:2]), label)\n\n painter.end()\n GL.glPopAttrib()\n\n def viewFrom(self, direction):\n \"\"\"Changes view direction of scene camera\n\n :param direction: direction to view from\n :type direction: Direction\n \"\"\"\n self.scene.camera.mode = Camera.Projection.Orthographic\n self.scene.camera.viewFrom(direction)\n self.update()\n\n def resetCamera(self):\n \"\"\"Resets scene camera\"\"\"\n self.scene.camera.reset()\n self.update()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.