ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40cd9ab5a8e769a8919c9113e320169f56bbb76 | from .custom import CustomDataset
from .xml_style import XMLDataset
from .coco import CocoDataset
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .loader import GroupSampler, DistributedGroupSampler, build_dataloader
from .utils import to_tensor, random_scale, show_ann, get_dataset
from .concat_dataset import ConcatDataset
from .repeat_dataset import RepeatDataset
from .extra_aug import ExtraAugmentation
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset', 'GroupSampler',
'DistributedGroupSampler', 'build_dataloader', 'to_tensor', 'random_scale',
'show_ann', 'get_dataset', 'ConcatDataset', 'RepeatDataset',
'ExtraAugmentation', 'WIDERFaceDataset', 'MyDataset'
]
|
py | b40cd9fae2faeeefb4599f465616d26cca8041a1 | """
Contains possible interactions with the Galaxy Datasets
"""
import logging
import os
import shlex
import time
import warnings
from typing import (
Any,
Dict,
List,
Optional,
Union,
)
from urllib.parse import urljoin
import bioblend
from bioblend import TimeoutException
from bioblend.galaxy.client import Client
log = logging.getLogger(__name__)
TERMINAL_STATES = {'ok', 'empty', 'error', 'discarded', 'failed_metadata'}
# Non-terminal states are: 'new', 'upload', 'queued', 'running', 'paused', 'setting_metadata'
class DatasetClient(Client):
module = 'datasets'
def __init__(self, galaxy_instance):
super().__init__(galaxy_instance)
def show_dataset(self, dataset_id, deleted=False, hda_ldda='hda'):
"""
Get details about a given dataset. This can be a history or a library dataset.
:type dataset_id: str
:param dataset_id: Encoded dataset ID
:type deleted: bool
:param deleted: Whether to return results for a deleted dataset
:type hda_ldda: str
:param hda_ldda: Whether to show a history dataset ('hda' - the default) or library
dataset ('ldda').
:rtype: dict
:return: Information about the HDA or LDDA
"""
params = dict(
hda_ldda=hda_ldda,
)
return self._get(id=dataset_id, deleted=deleted, params=params)
def _initiate_download(self, dataset_id: str, stream_content: bool,
require_ok_state: bool = True, maxwait: float = 12000):
dataset = self.wait_for_dataset(dataset_id, maxwait=maxwait, check=False)
if not dataset['state'] == 'ok':
message = f"Dataset state is not 'ok'. Dataset id: {dataset_id}, current state: {dataset['state']}"
if require_ok_state:
raise DatasetStateException(message)
else:
warnings.warn(message, DatasetStateWarning)
file_ext = dataset.get('file_ext')
# Resort to 'data' when Galaxy returns an empty or temporary extension
if not file_ext or file_ext == 'auto' or file_ext == '_sniff_':
file_ext = 'data'
# The preferred download URL is
# '/api/histories/<history_id>/contents/<dataset_id>/display?to_ext=<dataset_ext>'
# since the old URL:
# '/dataset/<dataset_id>/display?to_ext=<dataset_ext>'
# does not work when using REMOTE_USER with access disabled to
# everything but /api without auth
download_url = dataset['download_url'] + '?to_ext=' + file_ext
url = urljoin(self.gi.base_url, download_url)
r = self.gi.make_get_request(url, stream=stream_content)
r.raise_for_status()
return dataset, file_ext, r
def download_dataset(self, dataset_id, file_path=None, use_default_filename=True,
require_ok_state=True, maxwait=12000):
"""
Download a dataset to file or in memory. If the dataset state is not
'ok', a ``DatasetStateException`` will be thrown, unless ``require_ok_state=False``.
:type dataset_id: str
:param dataset_id: Encoded dataset ID
:type file_path: str
:param file_path: If this argument is provided, the dataset will be streamed to disk
at that path (should be a directory if ``use_default_filename=True``).
If the file_path argument is not provided, the dataset content is loaded into memory
and returned by the method (Memory consumption may be heavy as the entire file
will be in memory).
:type use_default_filename: bool
:param use_default_filename: If ``True``, the exported
file will be saved as ``file_path/%s``,
where ``%s`` is the dataset name.
If ``False``, ``file_path`` is assumed to
contain the full file path including the filename.
:type require_ok_state: bool
:param require_ok_state: If ``False``, datasets will be downloaded even if not in an 'ok' state,
issuing a ``DatasetStateWarning`` rather than raising a ``DatasetStateException``.
:type maxwait: float
:param maxwait: Total time (in seconds) to wait for the dataset state to
become terminal. If the dataset state is not terminal within this
time, a ``DatasetTimeoutException`` will be thrown.
:rtype: bytes or str
:return: If a ``file_path`` argument is not provided, returns the file
content. Otherwise returns the local path of the downloaded file.
"""
dataset, file_ext, r = self._initiate_download(
dataset_id,
stream_content=file_path is not None,
require_ok_state=require_ok_state,
maxwait=maxwait
)
if file_path is None:
if 'content-length' in r.headers and len(r.content) != int(r.headers['content-length']):
log.warning("Transferred content size does not match content-length header (%s != %s)", len(r.content), r.headers['content-length'])
return r.content
else:
if use_default_filename:
# Build a useable filename
filename = dataset['name'] + '.' + file_ext
# Now try to get a better filename from the response headers
# We expect tokens 'filename' '=' to be followed by the quoted filename
if 'content-disposition' in r.headers:
tokens = list(shlex.shlex(r.headers['content-disposition'], posix=True))
try:
header_filepath = tokens[tokens.index('filename') + 2]
filename = os.path.basename(header_filepath)
except (ValueError, IndexError):
pass
file_local_path = os.path.join(file_path, filename)
else:
file_local_path = file_path
with open(file_local_path, 'wb') as fp:
for chunk in r.iter_content(chunk_size=bioblend.CHUNK_SIZE):
if chunk:
fp.write(chunk)
# Return location file was saved to
return file_local_path
def get_datasets(
self,
limit: int = 500,
offset: int = 0,
name: Optional[str] = None,
extension: Optional[Union[str, List[str]]] = None,
state: Optional[Union[str, List[str]]] = None,
visible: Optional[bool] = None,
deleted: Optional[bool] = None,
purged: Optional[bool] = None,
tool_id: Optional[str] = None,
tag: Optional[str] = None,
history_id: Optional[str] = None,
create_time_min: str = None,
create_time_max: str = None,
update_time_min: str = None,
update_time_max: str = None,
order: str = 'create_time-dsc',
) -> List[dict]:
"""
Get the latest datasets, or select another subset by specifying optional
arguments for filtering (e.g. a history ID).
Since the number of datasets may be very large, ``limit`` and ``offset``
parameters are required to specify the desired range.
If the user is an admin, this will return datasets for all the users,
otherwise only for the current user.
:type limit: int
:param limit: Maximum number of datasets to return.
:type offset: int
:param offset: Return datasets starting from this specified position.
For example, if ``limit`` is set to 100 and ``offset`` to 200,
datasets 200-299 will be returned.
:type name: str
:param name: Dataset name to filter on.
:type extension: str or list of str
:param extension: Dataset extension (or list of extensions) to filter on.
:type state: str or list of str
:param state: Dataset state (or list of states) to filter on.
:type visible: bool
:param visible: Optionally filter datasets by their ``visible`` attribute.
:type deleted: bool
:param deleted: Optionally filter datasets by their ``deleted`` attribute.
:type purged: bool
:param purged: Optionally filter datasets by their ``purged`` attribute.
:type tool_id: str
:param tool_id: Tool ID to filter on.
:type tag: str
:param tag: Dataset tag to filter on.
:type history_id: str
:param history_id: Encoded history ID to filter on.
:type create_time_min: str
:param create_time_min: Show only datasets created after the provided
time and date, which should be formatted as ``YYYY-MM-DDTHH-MM-SS``.
:type create_time_max: str
:param create_time_max: Show only datasets created before the provided
time and date, which should be formatted as ``YYYY-MM-DDTHH-MM-SS``.
:type update_time_min: str
:param update_time_min: Show only datasets last updated after the provided
time and date, which should be formatted as ``YYYY-MM-DDTHH-MM-SS``.
:type update_time_max: str
:param update_time_max: Show only datasets last updated before the provided
time and date, which should be formatted as ``YYYY-MM-DDTHH-MM-SS``.
:type order: str
:param order: One or more of the following attributes for ordering datasets:
``create_time`` (default), ``extension``, ``hid``, ``history_id``, ``name``,
``update_time``. Optionally, ``-asc`` or ``-dsc`` (default) can be appended
for ascending and descending order respectively. Multiple attributes can be
stacked as a comma-separated list of values, e.g. ``create_time-asc,hid-dsc``.
:rtype: list
:param: A list of datasets
"""
params: Dict[str, Any] = {
'limit': limit,
'offset': offset,
'order': order,
}
if history_id:
params['history_id'] = history_id
q: List[str] = []
qv: List[Any] = []
if name:
q.append('name')
qv.append(name)
if state:
op, val = self._param_to_filter(state)
q.append(f'state-{op}')
qv.append(val)
if extension:
op, val = self._param_to_filter(extension)
q.append(f'extension-{op}')
qv.append(val)
if visible is not None:
q.append('visible')
qv.append(str(visible))
if deleted is not None:
q.append('deleted')
qv.append(str(deleted))
if purged is not None:
q.append('purged')
qv.append(str(purged))
if tool_id is not None:
q.append('tool_id')
qv.append(str(tool_id))
if tag is not None:
q.append('tag')
qv.append(str(tag))
if create_time_min:
q.append('create_time-ge')
qv.append(create_time_min)
if create_time_max:
q.append('create_time-le')
qv.append(create_time_max)
if update_time_min:
q.append('update_time-ge')
qv.append(update_time_min)
if update_time_max:
q.append('update_time-le')
qv.append(update_time_max)
params['q'] = q
params['qv'] = qv
return self._get(params=params)
def _param_to_filter(self, param):
if type(param) is str:
return 'eq', param
if type(param) is list:
if len(param) == 1:
return 'eq', param.pop()
return 'in', ','.join(param)
raise Exception("Filter param is not of type ``str`` or ``list``")
def publish_dataset(self, dataset_id: str, published: bool = False):
"""
Make a dataset publicly available or private. For more fine-grained control (assigning different
permissions to specific roles), use the ``update_permissions()`` method.
:type dataset_id: str
:param dataset_id: dataset ID
:type published: bool
:param published: Whether to make the dataset published (``True``) or private (``False``).
:rtype: dict
:return: Current roles for all available permission types.
.. note::
This method can only be used with Galaxy ``release_19.05`` or later.
"""
payload: Dict[str, Any] = {
'action': 'remove_restrictions' if published else 'make_private'
}
url = self._make_url(dataset_id) + '/permissions'
self.gi.datasets._put(url=url, payload=payload)
def update_permissions(self, dataset_id: str, access_ids: Optional[list] = None,
manage_ids: Optional[list] = None, modify_ids: Optional[list] = None):
"""
Set access, manage or modify permissions for a dataset to a list of roles.
:type dataset_id: str
:param dataset_id: dataset ID
:type access_ids: list
:param access_ids: role IDs which should have access permissions for the dataset.
:type manage_ids: list
:param manage_ids: role IDs which should have manage permissions for the dataset.
:type modify_ids: list
:param modify_ids: role IDs which should have modify permissions for the dataset.
:rtype: dict
:return: Current roles for all available permission types.
.. note::
This method can only be used with Galaxy ``release_19.05`` or later.
"""
payload: Dict[str, Any] = {
'action': 'set_permissions'
}
if access_ids:
payload['access'] = access_ids
if manage_ids:
payload['manage'] = manage_ids
if modify_ids:
payload['modify'] = modify_ids
url = self._make_url(dataset_id) + '/permissions'
self.gi.datasets._put(url=url, payload=payload)
def wait_for_dataset(self, dataset_id, maxwait=12000, interval=3, check=True):
"""
Wait until a dataset is in a terminal state.
:type dataset_id: str
:param dataset_id: dataset ID
:type maxwait: float
:param maxwait: Total time (in seconds) to wait for the dataset state to
become terminal. If the dataset state is not terminal within this
time, a ``DatasetTimeoutException`` will be raised.
:type interval: float
:param interval: Time (in seconds) to wait between 2 consecutive checks.
:type check: bool
:param check: Whether to check if the dataset terminal state is 'ok'.
:rtype: dict
:return: Details of the given dataset.
"""
assert maxwait >= 0
assert interval > 0
time_left = maxwait
while True:
dataset = self.show_dataset(dataset_id)
state = dataset['state']
if state in TERMINAL_STATES:
if check and state != 'ok':
raise Exception(f"Dataset {dataset_id} is in terminal state {state}")
return dataset
if time_left > 0:
log.info(f"Dataset {dataset_id} is in non-terminal state {state}. Will wait {time_left} more s")
time.sleep(min(time_left, interval))
time_left -= interval
else:
raise DatasetTimeoutException(f"Dataset {dataset_id} is still in non-terminal state {state} after {maxwait} s")
class DatasetStateException(Exception):
pass
class DatasetStateWarning(Warning):
pass
class DatasetTimeoutException(TimeoutException):
pass
|
py | b40cdc49b2d271b02a31de85d72a343dc19b6af1 | import json
from corehq.apps.api.resources.auth import LoginAndDomainAuthentication
from corehq.apps.locations.models import SQLLocation
from corehq.apps.locations.resources.v0_1 import LocationResource, get_location_or_not_exist
from corehq.util.quickcache import quickcache
@quickcache(['project.name', 'show_administrative'], timeout=10)
def _user_locations_ids(project, show_administrative):
locations = SQLLocation.by_domain(project.name)
if show_administrative == 'False':
locations = locations.filter(location_type__administrative=True)
# admins and users not assigned to a location can see and edit everything
return locations.values_list('location_id', flat=True)
class EWSLocationResource(LocationResource):
def obj_get_list(self, bundle, **kwargs):
domain = kwargs['domain']
project = getattr(bundle.request, 'project', self.domain_obj(domain))
parent_id = bundle.request.GET.get('parent_id', None)
include_inactive = json.loads(bundle.request.GET.get('include_inactive', 'false'))
show_administrative = bundle.request.GET.get('show_administrative', False)
viewable = _user_locations_ids(project, show_administrative)
if not parent_id:
locs = SQLLocation.root_locations(domain, include_inactive)
else:
parent = get_location_or_not_exist(parent_id, domain)
locs = parent.sql_location.child_locations(include_inactive)
return [child for child in locs if child.location_id in viewable]
class Meta(LocationResource.Meta):
authentication = LoginAndDomainAuthentication(allow_session_auth=True)
resource_name = 'ews_location'
|
py | b40cdcde047e479ad06c1cae7437c883f0c874ef | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Linux network utilities
import sys
import socket
import fcntl
import struct
import array
import os
from binascii import unhexlify, hexlify
# Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
STATE_ESTABLISHED = '01'
STATE_SYN_SENT = '02'
STATE_SYN_RECV = '03'
STATE_FIN_WAIT1 = '04'
STATE_FIN_WAIT2 = '05'
STATE_TIME_WAIT = '06'
STATE_CLOSE = '07'
STATE_CLOSE_WAIT = '08'
STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = unhexlify(host)
host_out = ''
for x in range(0, len(host) // 4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', b'\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split(b'\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return hexlify(bytearray(addr)).decode('ascii')
def test_ipv6_local():
'''
Check for (local) IPv6 support.
'''
import socket
# By using SOCK_DGRAM this will not actually make a connection, but it will
# fail if there is no route to IPv6 localhost.
have_ipv6 = True
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
except socket.error:
have_ipv6 = False
return have_ipv6
|
py | b40cdd243e48c54c2ceaa380f0cdc9af4a93768f | version_number="0.3.141592" ### Change the number, not the name
|
py | b40cdd82cc63c1d4e4c474c8dd6028ad307d8a92 | """
Module for implementation of multiscale mesh and CoarseVolumes objects functionalities
"""
import time
import pdb
import configparser as cp
from . finescaleMesh import FineScaleMesh
from ..msCoarseningLib import algoritmo
#from msCoarseningLib.configManager import readConfig
from . meshComponents import MoabVariable, MeshEntities
from . mscorePymoab import MsCoreMoab
from . meshComponentsMS import MultiscaleMeshEntities, MoabVariableMS, MeshEntitiesMS
import numpy as np
from math import pi, sqrt
from pymoab import core, types, rng, topo_util
print('Initializing Finescale Mesh for Multiscale Methods')
class FineScaleMeshMS(FineScaleMesh):
def __init__(self,mesh_file, dim = 3):
super().__init__(mesh_file,dim)
self.partition = self.init_partition()
self.coarse_volumes = [CoarseVolume(self.core, self.dim, i, self.partition[:] == i) for i in range(self.partition[:].max()+1 )]
print("Creating object general")
self.general = MultiscaleMeshEntities(self.core,self.coarse_volumes)
for i,el in zip(range(len(self.coarse_volumes)),self.coarse_volumes):
el(i,self.general)
def init_entities(self):
self.nodes = MeshEntitiesMS(self.core, entity_type = "node")
self.edges = MeshEntitiesMS(self.core, entity_type = "edges")
self.faces = MeshEntitiesMS(self.core, entity_type = "faces")
if self.dim == 3:
self.volumes = MeshEntitiesMS(self.core, entity_type = "volumes")
def init_variables(self):
#self.alma = MoabVariableMS(self.core,data_size=1,var_type= "volumes", data_format="int", name_tag="alma")
#self.ama = MoabVariableMS(self.core,data_size=1,var_type= "faces", data_format="float", name_tag="ama",data_density="sparse")
#self.arma = MoabVariableMS(self.core,data_size=3,var_type= "edges", data_format="float", name_tag="arma", data_density="sparse")
self.permeability = MoabVariableMS(self.core,data_size=1,var_type= "volumes", data_format="int", name_tag="permeability")
self.pressure = MoabVariableMS(self.core,data_size=1,var_type= "volumes", data_format="float", name_tag="pressure")
self.erro = MoabVariableMS(self.core,data_size=1,var_type= "volumes", data_format="float", name_tag="erro")
def init_partition(self):
config = self.read_config()
particionador_type = config.get("Particionador","algoritmo")
if particionador_type != '0':
if self.dim == 3:
partition = MoabVariable(self.core,data_size=1,var_type= "volumes", data_format="int", name_tag="Partition",
data_density="sparse")
name_function = "scheme" + particionador_type
key = "Coarsening_" + particionador_type + "_Input"
specific_attributes = config.items(key)
used_attributes = []
for at in specific_attributes:
used_attributes.append(float(at[1]))
[partition[:],coarse_center] = getattr(algoritmo, name_function)(self.volumes.center[:],
len(self), self.rx, self.ry, self.rz,*used_attributes)
elif self.dim == 2:
partition = MoabVariable(self.core,data_size=1,var_type= "faces", data_format="int", name_tag="Partition",
data_density="sparse")
name_function = "scheme" + particionador_type
key = "Coarsening_" + particionador_type + "_Input"
specific_attributes = config.items(key)
used_attributes = []
for at in specific_attributes:
used_attributes.append(float(at[1]))
[partition[:],coarse_center] = getattr(msCoarseningLib.algoritmo, name_function)(self.faces.center[:],
len(self), self.rx, self.ry, self.rz,*used_attributes)
return partition
def init_partition_parallel(self):
if self.dim == 3:
partition = MoabVariable(self.core,data_size=1,var_type= "volumes", data_format="int", name_tag="Parallel",
data_density="sparse")
# partition[:]
# [partition[:],coarse_center] = getattr(msCoarseningLib.algoritmo, name_function)(self.volumes.center[:],
# len(self), self.rx, self.ry, self.rz,*used_attributes)
elif self.dim == 2:
partition = MoabVariable(self.core,data_size=1,var_type= "faces", data_format="int", name_tag="Parallel", data_density="sparse")
return partition
def read_config(self, config_input="msCoarse.ini"):
config_file = cp.ConfigParser()
config_file.read(config_input)
return config_file
class CoarseVolume(FineScaleMeshMS):
def __init__(self, father_core, dim, i, coarse_vec):
self.dim = dim
self.level = father_core.level + 1
self.coarse_num = i
print("Level {0} - Volume {1}".format(self.level,self.coarse_num))
self.core = MsCoreMoab(father_core, i, coarse_vec)
self.init_entities()
self.init_variables()
self.init_coarse_variables()
self.macro_dim()
def init_variables(self):
pass
def __call__(self,i,general):
self.nodes.enhance(i,general)
self.edges.enhance(i,general)
self.faces.enhance(i,general)
if self.dim == 3:
self.volumes.enhance(i,general)
pass
def init_coarse_variables(self):
#self.lama = MoabVariableMS(self.core,data_size=1,var_type= "faces", data_format="int", name_tag="lama", level=self.level, coarse_num=self.coarse_num)
self.pressure_coarse = MoabVariableMS(self.core,data_size=1,var_type= "volumes", data_format="float", name_tag="pressure_coarse", level=self.level, coarse_num=self.coarse_num)
self.permeability_coarse = MoabVariableMS(self.core,data_size=1,var_type= "volumes", data_format="float", name_tag="permeability_coarse", level=self.level, coarse_num=self.coarse_num)
|
py | b40cdf5cb7a31b7006b332321c4db22a6a909b20 | """
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
import re
import shutil
import string
import tempfile
from streamalert.apps import StreamAlertApp
from streamalert.shared import CLUSTERED_FUNCTIONS, config, metrics
from streamalert.shared.logger import get_logger
from streamalert_cli.terraform import TERRAFORM_FILES_PATH
from streamalert_cli.apps.helpers import save_app_auth_info
from streamalert_cli.helpers import continue_prompt
DEFAULT_CONFIG_PATH = 'conf'
LOGGER = get_logger(__name__)
class CLIConfig:
"""A class to load, modify, and display the StreamAlertCLI Config"""
def __init__(self, config_path, extra_terraform_files=None, build_directory=None):
self.config_path = config_path
self.config = config.load_config(config_path)
self._terraform_files = extra_terraform_files or []
self.build_directory = self._setup_build_directory(build_directory)
def __repr__(self):
return str(self.config)
def __getitem__(self, key):
return self.config[key]
def __setitem__(self, key, new_value):
self.config.__setitem__(key, new_value)
self.write()
def get(self, key, default=None):
"""Lookup a value based on its key"""
return self.config.get(key, default)
def keys(self):
"""Config keys"""
return list(self.config.keys())
def clusters(self):
"""Return list of cluster configuration keys"""
return list(self.config['clusters'].keys())
@property
def terraform_files(self):
"""Return set of terraform files to include with this deployment"""
return set(self._terraform_files).union(
self.config['global']['general'].get('terraform_files', [])
)
def _copy_terraform_files(self, directory):
"""Copy all packaged terraform files and terraform files provided by the user to temp
Args:
config (CLIConfig): Loaded StreamAlert config
"""
shutil.copytree(TERRAFORM_FILES_PATH, directory)
# Copy any additional user provided terraform files to temp
for item in self.terraform_files:
shutil.copy2(item, directory)
LOGGER.info('Copied Terraform configuration to \'%s\'', directory)
def _setup_build_directory(self, directory):
"""Create the directory to be used for building infrastructure
Args:
directory (str): Optional path to directory to create
Returns:
str: Path to directory that will be used
"""
if not directory:
temp_dir = tempfile.TemporaryDirectory(prefix='streamalert_build-')
directory = temp_dir.name
# Calling cleanup here to remove this directory so shutil can recreate it
# Without calling this here, an exception is raised when tempfile garbage collects
temp_dir.cleanup()
if os.path.exists(directory):
shutil.rmtree(directory) # shutil.copytree in python3.7 cannot handle existing dir
self._copy_terraform_files(directory)
return directory
def set_prefix(self, prefix):
"""Set the Org Prefix in Global settings"""
if not isinstance(prefix, str):
LOGGER.error('Invalid prefix type, must be string')
return False
acceptable_chars = set([*string.digits, *string.ascii_lowercase])
if not set(prefix).issubset(acceptable_chars):
LOGGER.error('Prefix must contain only lowercase letters and numbers')
return False
self.config['global']['account']['prefix'] = prefix
self.write()
LOGGER.info('Prefix successfully configured')
return True
def set_aws_account_id(self, aws_account_id):
"""Set the AWS Account ID in Global settings"""
if not re.search(r'\A\d{12}\Z', aws_account_id):
LOGGER.error('Invalid AWS Account ID, must be 12 digits long')
return False
self.config['global']['account']['aws_account_id'] = aws_account_id
self.write()
LOGGER.info('AWS Account ID successfully configured')
return True
def toggle_rule_staging(self, enabled):
"""Toggle rule staging on or off
Args:
enabled (bool): False if disabling rule staging, true if enabling
"""
print('Setting rule staging enabled setting to: {}'.format(enabled))
self.config['global']['infrastructure']['rule_staging']['enabled'] = enabled
self.write()
def toggle_metrics(self, *lambda_functions, **kwargs):
"""Toggle CloudWatch metric logging and filter creation
Args:
enabled (bool): False if disabling metrics, true if enable_logging
clusters (list): Clusters to enable or disable metrics on
lambda_functions (list): Which lambda functions to enable or disable
metrics on (rule, alert, or athena)
"""
enabled = kwargs.get('enabled', False)
clusters = kwargs.get('clusters', [])
for function in lambda_functions:
function_config = '{}_config'.format(function)
if function not in CLUSTERED_FUNCTIONS:
if function_config not in self.config['lambda']:
self.config['lambda'][function_config] = {}
self.config['lambda'][function_config]['enable_custom_metrics'] = enabled
else:
# Classifier - toggle for each cluster
for cluster in clusters:
self.config['clusters'][cluster][function_config]['enable_custom_metrics'] = (
enabled
)
self.write()
@staticmethod
def _add_metric_alarm_config(alarm_info, current_alarms):
"""Helper function to add the metric alarm to the respective config
Args:
alarm_info (dict): All the necessary values needed to add a CloudWatch
metric alarm
current_alarms (dict): All of the current metric alarms from the config
Returns:
dict: The new metric alarms dictionary with the added metric alarm
"""
# Some keys that come from the argparse options can be omitted
omitted_keys = {'debug', 'alarm_name', 'command', 'clusters', 'function'}
current_alarms[alarm_info['alarm_name']] = {
key: value
for key, value in alarm_info.items() if key not in omitted_keys
}
return current_alarms
def _alarm_exists(self, alarm_name):
"""Check if this alarm name is already used somewhere. CloudWatch alarm
names must be unique to an AWS account
Args:
alarm_name (str): The name of the alarm being created
Returns:
bool: True if the the alarm name is already present in the config
"""
message = ('CloudWatch metric alarm names must be unique '
'within each AWS account. Please remove this alarm '
'so it can be updated or choose another name.')
funcs = {metrics.CLASSIFIER_FUNCTION_NAME}
for func in funcs:
func_config = '{}_config'.format(func)
for cluster, cluster_config in self.config['clusters'].items():
func_alarms = cluster_config[func_config].get('custom_metric_alarms', {})
if alarm_name in func_alarms:
LOGGER.error('An alarm with name \'%s\' already exists in the '
'\'conf/clusters/%s.json\' cluster. %s', alarm_name, cluster,
message)
return True
for func, global_lambda_config in self.config['lambda'].items():
if alarm_name in global_lambda_config.get('custom_metric_alarms', {}):
LOGGER.error('An alarm with name \'%s\' already exists in the '
'\'conf/lambda.json\' in function config \'%s\'. %s',
alarm_name, func, message)
return True
return False
def _clusters_with_metrics_enabled(self, function):
function_config = '{}_config'.format(function)
return {
cluster
for cluster, cluster_config in self.config['clusters'].items()
if (self.config['clusters'][cluster][function_config].get('enable_custom_metrics'))
}
def _add_cluster_metric_alarm(self, alarm_info):
"""Add a metric alarm that corresponds to a predefined metrics for clusters
Args:
alarm_info (dict): All the necessary values needed to add a CloudWatch
metric alarm.
"""
function_name = alarm_info['function']
# Go over each of the clusters and see if enable_metrics == True and prompt
# the user to toggle metrics on if this is False
config_name = '{}_config'.format(function_name)
for cluster in alarm_info['clusters']:
function_config = (
self.config['clusters'][cluster][config_name])
if not function_config.get('enable_custom_metrics'):
prompt = ('Metrics are not currently enabled for the \'{}\' function '
'within the \'{}\' cluster. Would you like to enable metrics '
'for this cluster?'.format(function_name, cluster))
if continue_prompt(message=prompt):
self.toggle_metrics(function_name, enabled=True, clusters=[cluster])
elif not continue_prompt(message='Would you still like to add this alarm '
'even though metrics are disabled?'):
continue
metric_alarms = function_config.get('custom_metric_alarms', {})
# Format the metric name for the cluster based metric
# Prepend a prefix for this function and append the cluster name
alarm_settings = alarm_info.copy()
alarm_settings['metric_name'] = '{}-{}-{}'.format(
metrics.FUNC_PREFIXES[function_name],
alarm_settings['metric_name'],
cluster.upper()
)
function_config['custom_metric_alarms'] = self._add_metric_alarm_config(
alarm_settings,
metric_alarms
)
LOGGER.info('Successfully added \'%s\' metric alarm for the \'%s\' '
'function to \'conf/clusters/%s.json\'.',
alarm_settings['alarm_name'], function_name, cluster)
return True
def _add_global_metric_alarm(self, alarm_info):
"""Add a metric alarm that corresponds to a predefined metrics globally
Args:
alarm_info (dict): All the necessary values needed to add a CloudWatch
metric alarm
"""
function_name = alarm_info['function']
func_config_name = '{}_config'.format(function_name)
# Check if metrics are not enabled, and ask the user if they would like to enable them
if func_config_name not in self.config['lambda']:
self.config['lambda'][func_config_name] = {}
function_config = self.config['lambda'][func_config_name]
if function_name in CLUSTERED_FUNCTIONS:
if not self._clusters_with_metrics_enabled(function_name):
prompt = (
'Metrics are not currently enabled for the \'{}\' function '
'within any cluster. Creating an alarm will have no effect '
'until metrics are enabled for this function in at least one '
'cluster. Would you still like to continue?'.format(function_name)
)
if not continue_prompt(message=prompt):
return False
else:
if not function_config.get('enable_custom_metrics'):
prompt = (
'Metrics are not currently enabled for the \'{}\' function. '
'Would you like to enable metrics for this function?'
).format(function_name)
if continue_prompt(message=prompt):
self.toggle_metrics(function_name, enabled=True)
elif not continue_prompt(message='Would you still like to add this alarm '
'even though metrics are disabled?'):
return False
metric_alarms = function_config.get('custom_metric_alarms', {})
# Format the metric name for the aggregate metric
alarm_settings = alarm_info.copy()
alarm_settings['metric_name'] = '{}-{}'.format(
metrics.FUNC_PREFIXES[function_name],
alarm_settings['metric_name']
)
function_config['custom_metric_alarms'] = self._add_metric_alarm_config(
alarm_settings,
metric_alarms
)
LOGGER.info('Successfully added \'%s\' metric alarm to '
'\'conf/lambda.json\'.', alarm_settings['alarm_name'])
return True
def add_metric_alarm(self, alarm_info):
"""Add a metric alarm that corresponds to a predefined metrics
Args:
alarm_info (dict): All the necessary values needed to add a CloudWatch
metric alarm
"""
# Check to see if an alarm with this name already exists
if self._alarm_exists(alarm_info['alarm_name']):
return False
# Get the current metrics for each function
current_metrics = metrics.MetricLogger.get_available_metrics()[alarm_info['function']]
if not alarm_info['metric_name'] in current_metrics:
LOGGER.error(
'Metric name \'%s\' not defined for function \'%s\'',
alarm_info['metric_name'],
alarm_info['function']
)
return False
if 'clusters' in alarm_info:
self._add_cluster_metric_alarm(alarm_info)
else:
if not self._add_global_metric_alarm(alarm_info):
return False
self.write()
return True
def add_app(self, func_name, app_info):
"""Add a configuration for a new streamalert app integration function
Args:
app_info (dict): The necessary values needed to begin configuring
a new app integration
Returns:
bool: False if errors occurred, True otherwise
"""
exists, prompt_for_auth, overwrite = False, True, False
app = StreamAlertApp.get_app(app_info['type'])
cluster_name = app_info['cluster']
app_name = app_info['app_name']
# Check to see if there is an existing configuration for this app integration
cluster_config = self.config['clusters'][cluster_name]
if func_name in cluster_config['modules'].get('streamalert_apps', {}):
prompt = ('An app with the name \'{}\' is already configured for cluster '
'\'{}\'. Would you like to update the existing app\'s configuration'
'?'.format(app_name, cluster_name))
exists = True
# Return if the user is not deliberately updating an existing config
if not continue_prompt(message=prompt):
return
prompt = ('Would you also like to update the authentication information for '
'app integration with name \'{}\'?'.format(app_name))
# If this is true, we shouldn't prompt again to warn about overwriting
prompt_for_auth = overwrite = continue_prompt(message=prompt)
if prompt_for_auth and not save_app_auth_info(app, app_info, func_name, overwrite):
return False
apps_config = cluster_config['modules'].get('streamalert_apps', {})
if not exists:
# Save a default app settings to the config for new apps
new_app_config = {
'app_name': app_info['app_name'],
'concurrency_limit': 2,
'log_level': 'info',
'log_retention_days': 14,
'memory': app_info['memory'],
'metric_alarms': {
'errors': {
'enabled': True,
'evaluation_periods': 1,
'period_secs': 120
}
},
'schedule_expression': app_info['schedule_expression'],
'timeout': app_info['timeout'],
'type': app_info['type']
}
apps_config[func_name] = new_app_config
else:
# Allow for updating certain attributes for the app without overwriting
# current parts of the configuration
updated_app_config = {
'memory': app_info['memory'],
'schedule_expression': app_info['schedule_expression'],
'timeout': app_info['timeout']
}
apps_config[func_name].update(updated_app_config)
cluster_config['modules']['streamalert_apps'] = apps_config
# Add this service to the sources for this app integration
# The `streamalert_app` is purposely singular here
app_sources = self.config['clusters'][cluster_name]['data_sources'].get(
'streamalert_app', {}
)
app_sources[func_name] = [app.service()]
self.config['clusters'][cluster_name]['data_sources']['streamalert_app'] = app_sources
LOGGER.info('Successfully added \'%s\' app integration to \'conf/clusters/%s.json\' '
'for service \'%s\'.', app_info['app_name'], app_info['cluster'],
app_info['type'])
self.write()
return True
def add_threat_intel(self, threat_intel_info):
"""Add Threat Intel configure to config
Args:
threat_intel_info (dict): Settings to enable Threat Intel from commandline.
"""
prefix = self.config['global']['account']['prefix']
if 'threat_intel' not in self.config:
self.config['threat_intel'] = {}
self.config['threat_intel']['enabled'] = threat_intel_info['enable']
table_name = threat_intel_info.get('dynamodb_table_name')
if table_name:
self.config['threat_intel']['dynamodb_table_name'] = table_name
elif not self.config['threat_intel'].get('dynamodb_table_name'):
# set default dynamodb table name if one does not exist
self.config['threat_intel']['dynamodb_table_name'] = (
'{}_streamalert_threat_intel_downloader'.format(prefix)
)
self.write()
LOGGER.info('Threat Intel configuration successfully created')
def add_threat_intel_downloader(self, ti_downloader_info):
"""Add Threat Intel Downloader configure to config
Args:
ti_downloader_info (dict): Settings for Threat Intel Downloader Lambda
function, generated from commandline
"manage.py threat_intel_downloader enable"
Returns:
(bool): Return True if writing settings of Lambda function successfully.
"""
default_config = {
'autoscale': False,
'enabled': True,
'interval': 'rate(1 day)',
'log_level': 'info',
'memory': '128',
'timeout': '120',
'table_rcu': 10,
'table_wcu': 10,
'ioc_keys': [],
'ioc_filters': [],
'ioc_types': [],
'excluded_sub_types': [],
'max_read_capacity': 5,
'min_read_capacity': 5,
'target_utilization': 70
}
if 'threat_intel_downloader_config' in self.config['lambda']:
LOGGER.info('Threat Intel Downloader has been enabled. '
'Please edit config/lambda.json if you want to '
'change lambda function settings.')
return False
self.config['lambda']['threat_intel_downloader_config'] = default_config
# overwrite settings in conf/lambda.json for Threat Intel Downloader
for key, value in ti_downloader_info.items():
if key in self.config['lambda']['threat_intel_downloader_config']:
self.config['lambda']['threat_intel_downloader_config'][key] = value
self.write()
return True
@staticmethod
def _config_writer(path, data, sort=False):
with open(path, 'r+') as conf_file:
json.dump(data, conf_file, indent=2, separators=(',', ': '), sort_keys=sort)
conf_file.truncate()
def write(self):
"""Write the current config in memory to disk"""
# Write loaded configuration files
def format_path(parts):
return '{}.json'.format(os.path.join(*parts))
for config_key in self.config:
path_parts = [self.config_path, config_key]
if config_key == 'clusters':
# Write loaded cluster files
for cluster_key in self.config['clusters']:
parts = path_parts + [cluster_key]
self._config_writer(format_path(parts), self.config['clusters'][cluster_key])
elif config_key != 'logs':
self._config_writer(format_path(path_parts), self.config[config_key])
|
py | b40cdfe8c289184c1263141a12061c395e2f5fa4 | """
Django settings for hydra_datastore_site project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a@&p(smfq!h=d79b=x%qfmguscq)4^h!*zlhn**dc((f&1i14g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'hydra_datastore.apps.HydraDatastoreConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hydra_datastore_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hydra_datastore_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static") |
py | b40ce00badfd7fbcb86312353cb107cb16cac07c | """
Created on Oct 13, 2021
@author: Richard Christie
"""
from opencmiss.utils.zinc.general import ChangeManager
from opencmiss.zinc.node import Nodeset
from opencmiss.zinc.field import Field, FieldGroup
from opencmiss.zinc.scene import Scene
def get_scene_selection_group(scene: Scene, subelement_handling_mode=FieldGroup.SUBELEMENT_HANDLING_MODE_FULL):
"""
Get existing scene selection group of standard name.
:param scene: Zinc Scene to get selection group for.
:param subelement_handling_mode: Mode controlling how faces, lines and nodes are
automatically added or removed with higher dimensional elements.
:return: Existing selection group, or None.
"""
selection_group = scene.getSelectionField().castGroup()
if selection_group.isValid():
selection_group.setSubelementHandlingMode(subelement_handling_mode)
return selection_group
return None
selection_group_name = 'cmiss_selection'
def create_scene_selection_group(scene: Scene, subelement_handling_mode=FieldGroup.SUBELEMENT_HANDLING_MODE_FULL):
"""
Create empty, unmanaged scene selection group of standard name.
Should have already called get_selection_group with None returned.
Can discover orphaned group of that name.
:param scene: Zinc Scene to create selection for.
:param subelement_handling_mode: Mode controlling how faces, lines and nodes are
automatically added or removed with higher dimensional elements. Defaults to on/full.
:return: Selection group for scene.
"""
region = scene.getRegion()
fieldmodule = region.getFieldmodule()
with ChangeManager(fieldmodule):
selection_group = fieldmodule.findFieldByName(selection_group_name)
if selection_group.isValid():
selection_group = selection_group.castGroup()
if selection_group.isValid():
selection_group.clear()
selection_group.setManaged(False)
if not selection_group.isValid():
selection_group = fieldmodule.createFieldGroup()
selection_group.setName(selection_group_name)
selection_group.setSubelementHandlingMode(subelement_handling_mode)
scene.setSelectionField(selection_group)
return selection_group
def group_add_group_elements(group: FieldGroup, other_group: FieldGroup, highest_dimension_only=True):
"""
Add to group elements from other_group.
:param group: Zinc FieldGroup to modify.
:param other_group: Zinc FieldGroup to add elements from.
:param highest_dimension_only: If set (default), only add elements of
highest dimension present in other_group, otherwise add all dimensions.
"""
fieldmodule = group.getFieldmodule()
with ChangeManager(fieldmodule):
for dimension in range(3, 0, -1):
mesh = fieldmodule.findMeshByDimension(dimension)
other_element_group = other_group.getFieldElementGroup(mesh)
if other_element_group.isValid() and (other_element_group.getMeshGroup().getSize() > 0):
element_group = group.getFieldElementGroup(mesh)
if not element_group.isValid():
element_group = group.createFieldElementGroup(mesh)
mesh_group = element_group.getMeshGroup()
mesh_group.addElementsConditional(other_element_group)
if highest_dimension_only:
break
def group_add_group_nodes(group: FieldGroup, other_group: FieldGroup, nodeset: Nodeset):
"""
Add to group elements and/or nodes from other_group.
:param group: Zinc FieldGroup to modify.
:param other_group: Zinc FieldGroup to add nodes from.
:param nodeset: Nodeset to add nodes from.
"""
other_node_group = other_group.getFieldNodeGroup(nodeset)
if other_node_group.isValid() and (other_node_group.getNodesetGroup().getSize() > 0):
node_group = group.getFieldNodeGroup(nodeset)
if not node_group.isValid():
node_group = group.createFieldNodeGroup(nodeset)
nodeset_group = node_group.getNodesetGroup()
nodeset_group.addNodesConditional(other_group.getFieldNodeGroup(nodeset))
def field_is_managed_real_1_to_3_components(field_in: Field):
"""
Conditional function returning True if the field is real-valued
with up to 3 components, and is managed.
"""
return (field_in.getValueType() == Field.VALUE_TYPE_REAL) and \
(field_in.getNumberOfComponents() <= 3) and field_in.isManaged()
|
py | b40ce04c01a0916e1e3d5a089a717a459b55db0e | a,b,c=map(int, input().split())
if a+b>c and b+c>a and a+b >c:
if a == b == c:
print(1)
elif a==b or b==c or a==c:
print(2)
else:
print(3)
else:
print(-1)
|
py | b40ce050115b51a26aad69c9ef97372861a9f5fe | # The DE-ID format, or versions of it, is commonly used in medical
# deidentification. This core facility takes care of the central capabilities.
# Most DE-ID tags are like this:
#
# **EMAIL
#
# But some of them have additional content, like this:
#
# **DATE<5/7/09>
#
# Sometimes they're square brackets instead of angle brackets.
# There are also default forms for the content.
import re, random
from ReplacementEngine import *
from ReplacementEngine import _IDReplace
from ClearReplacementStrategy import *
#
# Rendering
#
import string
# This is complicated. It inherits some of its rendering behavior
# from the clear replacement strategy - and needs to override some
# of its methods, for person replacement - but otherwise, it's
# its own thing.
class DEIDStyleRenderingStrategy(ClearRenderingStrategy):
def __init__(self, engine):
ClearRenderingStrategy.__init__(self, engine)
self.lBracket, self.rBracket = engine.bracketPair
self.P_NAME_INDEX = 0
self.P_INIT_INDEX = 0
def Replace(self, pattern, **kw):
mName = pattern.__ctype__ + "Replace"
doIt = False
if hasattr(self, mName):
# This is ugly. I need to inherit some behavior
# from the ClearRenderingStrategy, but ONLY
# some of it. If the class the method is defined
# on isn't a child of THIS ROOT CLASS, then
# we need to punt. But the only way to find that out
# is to have a "new-style" class and search the
# __mro__ list. So I've made the parents new-style
# classes.
# I can't simply pick the methods that are defined
# here to let through, since children of this class
# might also define something.
for c in self.__class__.__mro__:
# It's gotta be in a local dictionary. hasattr()
# handles inheritance. If we pass DEIDStyleRenderingStrategy
# in the list, and we haven't found the entry,
# then we punt.
if c.__dict__.has_key(mName):
doIt = True
break
if c is DEIDStyleRenderingStrategy:
break
if doIt:
return getattr(self, mName)(pattern, **kw)
else:
return "**" + pattern.replacer.label
def _wrap(self, pattern, content):
return "**" + pattern.replacer.label + self.lBracket + content + self.rBracket
# People.
def _nextName(self):
s = string.uppercase[self.P_NAME_INDEX] * 3
self.P_NAME_INDEX = (self.P_NAME_INDEX + 1) % 26
return s
def _nextInit(self):
s = string.uppercase[self.P_INIT_INDEX]
self.P_INIT_INDEX = (self.P_INIT_INDEX + 1) % 26
return s
def _PERSONReplacementSeed(self, pattern):
# We need a first and a last name. We MIGHT need
# middle names.
return {"firstNameAlts": [self._nextName()],
"middleNames": None,
"lastName": self._nextName()}
def _getRSMiddleNames(self, seed, numNames):
if seed["middleNames"] is None:
seed["middleNames"] = []
while len(seed["middleNames"]) < numNames:
seed["middleNames"].append(self._nextName())
return seed["middleNames"][:numNames]
def PERSONReplace(self, pattern, **kw):
# Hm. What do we do here? Exactly what we
# do otherwise. We just need to make sure that
# the pattern is marked for all upper. And
# the user has to use the DEIDPersonCategory.
pattern.cap_status = ALL_UPPER
return self._wrap(pattern, ClearRenderingStrategy.PERSONReplace(self, pattern, **kw))
def AGEReplace(self, pattern, **kw):
# Presuming that we have some coherent age.
# If the upper bound and lower bound are not the
# same, then we have to pick some seed.
ageSeed = None
if pattern.ageUb == pattern.ageLb:
ageSeed = pattern.ageUb
elif int(pattern.ageUb) / 10 == int(pattern.ageLb) / 10:
# They're in the same decade.
ageSeed = pattern.ageLb
else:
ageSeed = random.randint(pattern.ageUb, pattern.ageLb)
if ageSeed < 13:
return self._wrap(pattern, "birth-12")
elif ageSeed < 20:
return self._wrap(pattern, "in teens")
elif ageSeed > 89:
return self._wrap(pattern, "90+")
else:
return self._wrap(pattern, "in %d0s" % (int(ageSeed) / 10))
def DATEReplace(self, pattern, **kw):
return self._wrap(pattern, ClearRenderingStrategy.DATEReplace(self, pattern, **kw))
class DEIDStyleReplacementEngine(PIIReplacementEngine):
__rname__ = "clear -> DE-ID"
bracketPair = ("", "")
def createDigestionStrategy(self):
return ClearDigestionStrategy(self)
def createRenderingStrategy(self):
return DEIDStyleRenderingStrategy(self)
#
# Digestion
#
# We may have to do some date digestion, using the clear
# digester.
class DEIDStyleDigestionStrategy(DigestionStrategy):
def __init__(self, engine):
DigestionStrategy.__init__(self, engine)
self.deidPattern = engine.deidPattern
tags = engine.categories.keys()
self.patDict = {}
self.replPat = re.compile(self.deidPattern % "|".join(tags))
for tag in tags:
self.patDict[tag] = re.compile(("^" + self.deidPattern + "$") % tag)
self.dateDigester = None
def canCache(self, ctype):
return ctype in ["PERSON", "DATE", "AGE"]
def FindReplacedElements(self, s, tags):
return [(m.start(), m.end(), m.group(1)) for m in self.replPat.finditer(s)]
# We can get something out of names, ages, and dates.
# The name looks like this:
# **NAME<VVV>, **NAME<WWW Q. XXX>
# **NAME<SSS RRR QQQ PPP>
# Most of this is identical to PIIPersonCategory.Digest.
# Once we digest, the replacement should be identical to the parent,
# since we're working off the pattern.
INITPAT = re.compile("^[A-Z][.]?$")
def PERSONDigest(self, pat, seed):
p = self.patDict[pat.replacer.label]
m = p.match(seed)
name = m.group(3)
pat.cap_status = MIXED
# There will be no name extension.
pat.name_ext = ""
# Default is not to invert. Only invert
# if you find a reason to. Ditto one name.
pat.last_is_first = False
pat.one_name = False
toks = name.split()
if len(toks) == 1:
pat.one_name = True
middleNames = []
firstName = lastName = toks[0]
else:
firstName = toks[0]
lastName = toks[-1]
middleNames = toks[1:-1]
firstNameAlts = [firstName]
pat.mid_initials = []
for m in middleNames:
if self.INITPAT.match(m) is not None:
pat.mid_initials.append(True)
else:
pat.mid_initials.append(False)
# Finally, set the replacement keys.
# Any of the following can invoke the cache. Don't
# forget case insensitivity.
allKeys = [(None, lastName.upper())]
for firstName in firstNameAlts:
allKeys = allKeys + [(firstName.upper(), lastName.upper()),
(firstName.upper(), None)]
pat.setReplacementCacheKeys(allKeys)
# Possibilities: **AGE<in 30s> **AGE<birth-12> **AGE<in teens> **AGE<90+>
AGE_RE = re.compile("^in\s+(.*)s$")
def AGEDigest(self, pat, seed):
p = self.patDict[pat.replacer.label]
m = p.match(seed)
if m is not None:
age = m.group(3)
if age == "birth-12":
pat.ageLb = 0
pat.ageUb = 12
elif age == "in teens":
pat.ageLb = 13
pat.ageUb = 19
elif age == "90+":
pat.ageLb = 90
pat.ageUb = 120
else:
m = self.AGE_RE.match(age)
if m:
pat.ageLb = int(m.group(1))
pat.ageUb = pat.ageLb + 9
pat.spell = False
def DATEDigest(self, pat, seed):
p = self.patDict[pat.replacer.label]
m = p.match(seed)
if m is not None:
seed = m.group(3)
if self.dateDigester is None:
self.dateDigester = ClearDigestionStrategy(self.engine)
self.dateDigester.DATEDigest(pat, seed)
class DEIDStyleResynthesisEngine(PIIReplacementEngine):
__rname__ = "DE-ID -> clear"
deidPattern = None
def createDigestionStrategy(self):
return DEIDStyleDigestionStrategy(self)
def createRenderingStrategy(self):
return ClearRenderingStrategy(self)
|
py | b40ce0c81933ced9639b88c401488fc16913b1a0 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v4.enums",
marshal="google.ads.googleads.v4",
manifest={"MatchingFunctionOperatorEnum",},
)
class MatchingFunctionOperatorEnum(proto.Message):
r"""Container for enum describing matching function operator."""
class MatchingFunctionOperator(proto.Enum):
r"""Possible operators in a matching function."""
UNSPECIFIED = 0
UNKNOWN = 1
IN = 2
IDENTITY = 3
EQUALS = 4
AND = 5
CONTAINS_ANY = 6
__all__ = tuple(sorted(__protobuf__.manifest))
|
py | b40ce1d028584d4016eea43ca4e4a8a1c273527b | """
Django settings for perftracker project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!5)es5!)c48g^1z-d#f5s=^d^x&i0ec-ec*k4fa&8z!&ly5)pp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# show several developer features (e.g. series analysis)
DEV_MODE = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'perftracker',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'request_logging.middleware.LoggingMiddleware',
]
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"django_auth_ldap.backend.LDAPBackend",
]
AUTH_LDAP_USER_ATTR_MAP = {
"first_name": "givenName",
"last_name": "sn",
"email": "mail",
}
ROOT_URLCONF = 'perftracker_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'perftracker_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
BOOTSTRAP3 = {
'css_url': '/static/bootstrap3/css/bootstrap.min.css',
'theme_url': '/static/bootstrap3/css/bootstrap-theme.min.css',
'javascript_in_head': True,
}
LOGGING = {
'disable_existing_loggers': False,
'version': 1,
'formatters': {
'verbose': {
'format': '{asctime} {levelname} {module} {process:d} {thread:d} {message}',
'style': '{',
},
'simple': {
'format': '{asctime} {levelname} {message}',
'style': '{',
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'logfile': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'formatter': 'verbose',
'filename': 'perftracker.log',
}
},
'loggers': {
'': {
'level': 'INFO',
'handlers': ['logfile'],
'propagate': False
},
'django.request': {
'handlers': ['logfile'],
'level': 'INFO', # change debug level as appropriate
'propagate': False,
},
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['logfile'],
'propagate': False,
},
'django_auth_ldap': {
'level': 'DEBUG',
'handlers': ['logfile'],
},
},
}
REQUEST_LOGGING_ENABLE_COLORIZE = False
REQUEST_LOGGING_MAX_BODY_LENGTH = 1024
ENV_NODE_DISPLAY_NAME_RE = ('', '') # passed to re.sub() to replace parts of HW names
curr_dir = os.path.abspath(os.path.dirname(__file__))
if os.path.exists(os.path.join(curr_dir, "settings_local.py")):
sys.path.append(curr_dir)
from settings_local import *
|
py | b40ce2f792884a84690ee9f9a4e91742ee4f440f | # The MIT License (MIT)
# Copyright (c) 2020-2021 CoML
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Rachid RIAD, Hadrien TITEUX, Léopold FAVRE
"""
##########
Continuum and corpus
##########
"""
import csv
import logging
import os
from concurrent.futures import ThreadPoolExecutor
from copy import deepcopy
from dataclasses import dataclass
from functools import total_ordering
from pathlib import Path
from typing import Optional, Tuple, List, Union, TYPE_CHECKING, Generator
import cvxpy as cp
import numpy as np
from pyannote.core import Annotation, Segment, Timeline
from pyannote.database.util import load_rttm
from sortedcontainers import SortedDict, SortedSet
from typing_extensions import Literal
from .dissimilarity import AbstractDissimilarity
from .numba_utils import build_A
if TYPE_CHECKING:
from .alignment import UnitaryAlignment, Alignment, SoftAlignment
from .sampler import AbstractContinuumSampler, StatisticalContinuumSampler
CHUNK_SIZE = (10**6) // os.cpu_count()
# defining Annotator type
Annotator = str
PivotType = Literal["float_pivot", "int_pivot"]
PrecisionLevel = Literal["high", "medium", "low"]
# percentages for the precision
PRECISION_LEVEL = {
"high": 0.01,
"medium": 0.02,
"low": 0.1
}
@total_ordering
@dataclass(frozen=True, eq=True)
class Unit:
"""
Represents an annotated unit, e.g., a time segment and (optionally)
a text annotation. Can be sorted or used in a set. If two units share
the same time segment, they're sorted alphabetically using their
annotation. The `None` annotation is first in the "alphabet"
>>> new_unit = Unit(segment=Segment(17.5, 21.3), annotation='Verb')
>>> new_unit.segment.start, new_unit.segment.end
17.5, 21.3
>>> new_unit.annotation
'Verb'
"""
segment: Segment
annotation: Optional[str] = None
def __lt__(self, other: 'Unit'):
if self.segment == other.segment:
if self.annotation is None:
return True
elif other.annotation is None:
return False
else:
return self.annotation < other.annotation
else:
return self.segment < other.segment
class Continuum:
"""
Representation of a continuum, i.e a set of annotated segments by multiple annotators.
It is implemented as a dictionnary of sets (all sorted) :
``{'annotator1': {unit1, ...}, ...}``
"""
uri: str
_annotations: SortedDict
bound_inf: float
bound_sup: float
def __init__(self, uri: Optional[str] = None):
"""
Default constructor.
Parameters
----------
uri: optional str
name of annotated resource (e.g. audio or video file)
"""
self.uri = uri
# Structure {annotator -> SortedSet}
self._annotations: SortedDict = SortedDict()
self._categories: SortedSet = SortedSet()
self.bound_inf = 0.0
self.bound_sup = 0.0
self.best_window_size = np.inf
# Default best window size. Re-measure it with self.measure_best_window_size
@classmethod
def from_csv(cls,
path: Union[str, Path],
discard_invalid_rows=True,
delimiter: str = ","):
"""
Load annotations from a CSV file , with structure
annotator, category, segment_start, segment_end.
.. warning::
The CSV file mustn't have any header
Parameters
----------
path: Path or str
Path to the CSV file storing annotations
discard_invalid_rows: bool
If set, every invalid row is ignored when parsing the file.
delimiter: str
CSV columns delimiter. Defaults to ','
Returns
-------
Continuum:
New continuum object loaded from the CSV
"""
if isinstance(path, str):
path = Path(path)
continuum = cls()
with open(path) as csv_file:
reader = csv.reader(csv_file, delimiter=delimiter)
for row in reader:
seg = Segment(float(row[2]), float(row[3]))
try:
continuum.add(row[0], seg, row[1])
except ValueError as e:
if discard_invalid_rows:
print(f"Discarded invalid segment : {str(e)}")
else:
raise e
return continuum
@classmethod
def from_rttm(cls, path: Union[str, Path]) -> 'Continuum':
"""
Load annotations from a RTTM file. The file name field will be used
as an annotation's annotator
Parameters
----------
path: Path or str
Path to the RTTM file storing annotations
Returns
-------
continuum : Continuum
New continuum object loaded from the RTTM file
"""
annotations = load_rttm(str(path))
continuum = cls()
for uri, annot in annotations.items():
continuum.add_annotation(uri, annot)
return continuum
def copy_flush(self) -> 'Continuum':
"""
Returns a copy of the continuum without any annotators/annotations, but with every other information
"""
continuum = Continuum(self.uri)
continuum.bound_inf, continuum.bound_sup = self.bound_inf, self.bound_sup
continuum.best_window_size = self.best_window_size
return continuum
def copy(self) -> 'Continuum':
"""
Makes a copy of the current continuum.
Returns
-------
continuum: Continuum
"""
continuum = Continuum(self.uri)
continuum._annotations = deepcopy(self._annotations)
continuum.bound_inf, continuum.bound_sup = self.bound_inf, self.bound_sup
continuum.best_window_size = self.best_window_size
return continuum
def __bool__(self):
"""Truthiness, basically tests for emptiness
>>> if continuum:
... # continuum is not empty
... else:
... # continuum is empty
"""
return not all(len(annotations) == 0 for annotations in self._annotations.values())
def __len__(self):
return len(self._annotations)
def __eq__(self, other: 'Continuum'):
"""Two continua are equal if and only if all their annotators and all
their units are strictly equal"""
if not isinstance(other, Continuum):
return False
if self.annotators != other.annotators:
return False
if self.num_units != other.num_units:
return False
for (my_annotator, my_unit), (other_annotator, other_unit) in zip(self, other):
if my_annotator != other_annotator:
return False
elif my_unit != other_unit:
return False
return True
def __ne__(self, other: 'Continuum'):
return not self == other
@property
def num_units(self) -> int:
"""Total number of units in the continuum."""
return sum(len(units) for units in self._annotations.values())
@property
def categories(self) -> SortedSet:
"""Returns the (alphabetically) sorted set of all the continuum's annotations's categories."""
return self._categories
@property
def category_weights(self) -> SortedDict:
"""
Returns a dictionary where the keys are the categories in the continuum, and a key's value
is the proportion of occurrence of the category in the continuum.
"""
weights = SortedDict()
nb_units = 0
for _, unit in self:
nb_units += 1
if unit.annotation not in weights:
weights[unit.annotation] = 1
else:
weights[unit.annotation] += 1
for annotation in weights.keys():
weights[annotation] /= nb_units
return weights
@property
def bounds(self) -> Tuple[float, float]:
"""Bounds of the continuum. Initially defined as (0, 0),
they grow as annotations are added."""
return self.bound_inf, self.bound_sup
@property
def num_annotators(self) -> int:
"""Number of annotators"""
return len(self._annotations)
@property
def avg_num_annotations_per_annotator(self) -> float:
"""Average number of annotated segments per annotator"""
return self.num_units / self.num_annotators
@property
def max_num_annotations_per_annotator(self):
"""The maximum number of annotated segments an annotator has
in this continuum"""
max_num_annotations_per_annotator = 0
for annotator in self._annotations:
max_num_annotations_per_annotator = np.max(
[max_num_annotations_per_annotator,
len(self[annotator])])
return max_num_annotations_per_annotator
@property
def avg_length_unit(self) -> float:
"""Mean of the annotated segments' durations"""
return sum(unit.segment.duration for _, unit in self) / self.num_units
def add_annotator(self, annotator: Annotator):
"""
Adds the annotator to the set, with no annotated segment. Does nothing if already present.
"""
if annotator not in self._annotations:
self._annotations[annotator] = SortedSet()
def add(self, annotator: Annotator, segment: Segment, annotation: Optional[str] = None):
"""
Add a segment to the continuum
Parameters
----------
annotator: Annotator (str)
The annotator that produced the added annotation
segment: `pyannote.core.Segment`
The segment for that annotation
annotation: optional str
That segment's annotation, if any.
"""
if segment.duration == 0.0:
raise ValueError("Tried adding segment of duration 0.0")
if annotator not in self._annotations:
self._annotations[annotator] = SortedSet()
if annotation is not None:
self._categories.add(annotation)
self._annotations[annotator].add(Unit(segment, annotation))
self.bound_inf = min(self.bound_inf, segment.start)
self.bound_sup = max(self.bound_sup, segment.end)
def add_annotation(self, annotator: Annotator, annotation: Annotation):
"""
Add a full pyannote annotation to the continuum.
Parameters
----------
annotator: Annotator (str)
A string id for the annotator who produced that annotation.
annotation: pyannote.core.Annotation
A pyannote `Annotation` object. If a label is present for a given
segment, it will be considered as that label's annotation.
"""
for segment, _, label in annotation.itertracks(yield_label=True):
self.add(annotator, segment, label)
def add_timeline(self, annotator: Annotator, timeline: Timeline):
"""
Add a full pyannote timeline to the continuum.
Parameters
----------
annotator: Annotator (str)
A string id for the annotator who produced that timeline.
timeline: `pyannote.core.Timeline`
A pyannote `Annotation` object. No annotation will be attached to
segments.
"""
for segment in timeline:
self.add(annotator, segment)
def reset_bounds(self):
"""
Resets the bounds of the continuum (used in displaying and/or sampling) to the start of leftmost annotation
and the end of rightmost annotation.
"""
self.bound_inf = min((next(iter(annotations)).segment.start for annotations in self._annotations.values() if annotations),
default=0.0)
self.bound_sup = max((next(reversed(annotations)).segment.end for annotations in self._annotations.values() if annotations),
default=0.0)
def add_textgrid(self,
annotator: Annotator,
tg_path: Union[str, Path],
selected_tiers: Optional[List[str]] = None,
use_tier_as_annotation: bool = False):
"""
Add a textgrid file's content to the Continuum
Parameters
----------
annotator: Annotator (str)
A string id for the annotator who produced that TextGrid.
tg_path: `Path` or str
Path to the textgrid file.
selected_tiers: optional list of str
If set, will drop tiers that are not contained in this list.
use_tier_as_annotation: optional bool
If True, the annotation for each non-empty interval will be the name
of its parent Tier.
"""
from textgrid import TextGrid, IntervalTier
tg = TextGrid.fromFile(str(tg_path))
for tier_name in tg.getNames():
if selected_tiers is not None and tier_name not in selected_tiers:
continue
tier: IntervalTier = tg.getFirst(tier_name)
for interval in tier:
if not interval.mark:
continue
if use_tier_as_annotation:
self.add(annotator,
Segment(interval.minTime, interval.maxTime),
tier_name)
else:
self.add(annotator,
Segment(interval.minTime, interval.maxTime),
interval.mark)
def add_elan(self,
annotator: Annotator,
eaf_path: Union[str, Path],
selected_tiers: Optional[List[str]] = None,
use_tier_as_annotation: bool = False):
"""
Add an Elan (.eaf) file's content to the Continuum
Parameters
----------
annotator: Annotator (str)
A string id for the annotator who produced that ELAN file.
eaf_path: `Path` or str
Path to the .eaf (ELAN) file.
selected_tiers: optional list of str
If set, will drop tiers that are not contained in this list.
use_tier_as_annotation: optional bool
If True, the annotation for each non-empty interval will be the name
of its parent Tier.
"""
from pympi import Eaf
eaf = Eaf(eaf_path)
for tier_name in eaf.get_tier_names():
if selected_tiers is not None and tier_name not in selected_tiers:
continue
for start, end, value in eaf.get_annotation_data_for_tier(tier_name):
if use_tier_as_annotation:
self.add(annotator, Segment(start, end), tier_name)
else:
self.add(annotator, Segment(start, end), value)
def merge(self, continuum: 'Continuum', in_place: bool = False) -> Optional['Continuum']:
"""
Merge two Continuua together. Units from the same annotators
are also merged together (with the usual order of units).
Parameters
----------
continuum: Continuum
other continuum to merge into the current one.
in_place: bool
If set to true, the merge is done in place, and the current
continuum (self) is the one being modified. A new continuum
resulting in the merge is returned otherwise.
Returns
-------
Continuum, optional: Returns the merged copy if in_place is set to True.
"""
current_cont = self if in_place else self.copy()
for annotator in continuum.annotators:
# ensure all annotators are added to the continuum,
# even those who do not have any annotated Units
current_cont.add_annotator(annotator)
for annotator, unit in continuum:
current_cont.add(annotator, unit.segment, unit.annotation)
if not in_place:
return current_cont
def __add__(self, other: 'Continuum'):
"""
Same as a "not-in-place" merge.
Parameters
----------
other: Continuum
the continuum to merge into `self`
"""
return self.merge(other, in_place=False)
def __getitem__(self, keys: Union[str, Tuple[str, int]]) -> Union[SortedSet, Unit]:
"""Get the set of annotations from an annotator, or a specific annotation.
(Deep copies are returned to ensure some constraints cannot be violated)
>>> continuum['Alex']
SortedSet([Unit(segment=<Segment(2, 9)>, annotation='1'), Unit(segment=<Segment(11, 17)>, ...
>>> continuum['Alex', 0]
Unit(segment=<Segment(2, 9)>, annotation='1')
Parameters
----------
keys: Annotator or Annotator,int
Raises
------
KeyError
"""
try:
if isinstance(keys, str):
return deepcopy(self._annotations[keys])
else:
annotator, idx = keys
try:
return deepcopy(self._annotations[annotator][idx])
except IndexError:
raise IndexError(f'index {idx} of annotations by {annotator} is out of range')
except KeyError:
raise KeyError('key must be either Annotator (from the continuum) or (Annotator, int)')
def __iter__(self) -> Generator[Tuple[Annotator, Unit], None, None]:
"""
Iterates over (annotator, unit) tuples for every unit in the continuum.
"""
for annotator, annotations in self._annotations.items():
for unit in annotations:
yield annotator, unit
def iter_annotator(self, annotator: Annotator) -> Generator[Unit, None, None]:
"""
Iterates over the annotations of the given annotator.
Raises
------
KeyError
If the annotators is not on this continuum.
"""
for unit in self._annotations[annotator]:
yield unit
def remove(self, annotator: Annotator, unit: Unit):
"""
Removes the given unit from the given annotator's annotations.
Keeps the bounds of the continuum as they are.
Raises
------
KeyError
if the unit is not from the annotator's annotations.
"""
annotations: SortedSet = self._annotations[annotator]
annotations.remove(unit)
@property
def annotators(self) -> SortedSet:
"""Returns a sorted set of the annotators in the Continuum
>>> self.annotators:
... SortedSet(["annotator_a", "annotator_b", "annot_ref"])
"""
return SortedSet(self._annotations.keys())
def iterunits(self, annotator: Annotator):
"""Iterate over units from the given annotator
(in chronological and alphabetical order if annotations are present)
>>> for unit in self.iterunits("Max"):
... # do something with the unit
"""
return iter(self._annotations[annotator])
def get_best_soft_alignment(self, dissimilarity: AbstractDissimilarity) -> 'SoftAlignment':
assert len(self.annotators) >= 2 and self, "Disorder cannot be computed with less than two annotators, or " \
"without annotations."
sizes = np.empty(self.num_annotators, dtype=np.int32)
for i, units in enumerate(self._annotations.values()):
sizes[i] = len(units)
disorders, possible_unitary_alignments = dissimilarity.valid_alignments(self)
# Definition of the integer linear program
n = len(disorders)
# Constraints matrix ("every unit must appear once and only once")
A = build_A(possible_unitary_alignments, sizes)
x = cp.Variable(shape=(n,), boolean=True)
try:
import cylp
cp.Problem(cp.Minimize(disorders.T @ x), [A @ x >= 1]).solve(solver=cp.CBC)
except (ImportError, cp.SolverError):
logging.warning("CBC solver not installed. Using GLPK.")
cp.Problem(cp.Minimize(disorders.T @ x), [A @ x >= 1]).solve(solver=cp.GLPK_MI)
assert x.value is not None, "The linear solver couldn't find an alignment with minimal disorder " \
"(likely because the amount of possible unitary alignments was too high)"
# compare with 0.9 as cvxpy returns 1.000 or small values i.e. 10e-14
chosen_alignments_ids, = np.where(x.value > 0.9)
chosen_alignments: np.ndarray = possible_unitary_alignments[chosen_alignments_ids]
alignments_disorders: np.ndarray = disorders[chosen_alignments_ids]
from .alignment import UnitaryAlignment, SoftAlignment
set_unitary_alignements = []
for alignment_id, alignment in enumerate(chosen_alignments):
u_align_tuple = []
for annotator_id, unit_id in enumerate(alignment):
annotator, units = self._annotations.peekitem(annotator_id)
try:
unit = units[unit_id]
u_align_tuple.append((annotator, unit))
except IndexError: # it's a "null unit"
u_align_tuple.append((annotator, None))
unitary_alignment = UnitaryAlignment(list(u_align_tuple))
unitary_alignment.disorder = alignments_disorders[alignment_id]
set_unitary_alignements.append(unitary_alignment)
return SoftAlignment(set_unitary_alignements,
continuum=self,
check_validity=False,
disorder=np.sum(alignments_disorders) / self.avg_num_annotations_per_annotator)
def get_first_window(self, dissimilarity: AbstractDissimilarity, w: int = 1) -> Tuple['Continuum', float]:
"""
Returns a tuple (continuum, x_limit), where :
- Before x_limit, there are the (w * nb_annotators) leftmost annotations
of the continuum.
- After x_limit, there are (approximately) all the annotations from the continuum
that have a dissimilarity lower than (delta_empty * nb_annotators) with the annotations
before x_limit.
"""
# Everything is converted to zippable lists. This is necessary for this method to have
# a simple and known complexity for choosing the most advantageous window size.
annotators = list(self.annotators)
annotations = list(self._annotations.values())
sizes = list(map(len, annotations))
indexes = [0] * len(annotators) # Indexes for "advancing" homogeneously in the continuum
smallest_unit = Unit(Segment(-np.inf, -np.inf), None)
window = Continuum()
for annotator in annotators:
window.add_annotator(annotator)
taken_units = 0
rightmost_unit = smallest_unit
to_take = min(float(np.sum(sizes)), w * self.num_annotators)
while taken_units < to_take: # At least (nb_annotators * w) units
x_limit = np.inf
for units, index, size in zip(annotations, indexes, sizes): # Taking the rightmost unit not already taken
if index >= size: # All annotations have been consumed
continue
unit = units[index]
x_limit = min(x_limit, unit.segment.end)
for i, (annotator, units, index, size) in enumerate(zip(annotators, annotations, indexes, sizes)):
if index >= size: # All annotations have been consumed
continue
unit = units[index] # Adding the units before x_limit. This will take between 1 and nb_annotator units.
if unit.segment.end <= x_limit:
window.add(annotator, unit.segment, unit.annotation)
rightmost_unit = max(unit, rightmost_unit) # Rightmost taken unit is kept
taken_units += 1 # for selection of additionnal units.
indexes[i] += 1
x_limit = window.bound_sup
# Now we add the additionnal annotations, "reachable" from those already selected.
for annotator, units, index, size in zip(annotators, annotations, indexes, sizes):
while index < size:
unit = units[index]
if dissimilarity.d(rightmost_unit, unit) > dissimilarity.delta_empty * self.num_annotators:
break
window.add(annotator, unit.segment, unit.annotation)
index += 1
return window, x_limit
def get_fast_alignment(self, dissimilarity: AbstractDissimilarity, window_size: int) -> 'Alignment':
"""Returns an 'approximation' of the best alignment (Very likely to be the actual best alignment for
continua with limited overlapping)"""
from .alignment import Alignment
copy = self.copy()
unitary_alignments = []
disorders = []
while copy:
window, x_limit = copy.get_first_window(dissimilarity, window_size)
# Window contains each annotator's first annotations
# We retain only the leftmost unitary alignment in the best alignment of the window,
# as it is the most likely to be in the global best alignment
best_alignment = window.get_best_alignment(dissimilarity)
for chosen in best_alignment.take_until_limit(x_limit):
unitary_alignments.append(chosen)
disorders.append(chosen.disorder)
for annotator, unit in chosen.n_tuple:
if unit is not None:
copy.remove(annotator, unit) # Now we remove the units from the chosen alignment.
return Alignment(unitary_alignments,
self,
check_validity=False, # Validity has been thoroughly tested
disorder=np.sum(disorders) / self.avg_num_annotations_per_annotator)
def measure_best_window_size(self, dissimilarity: AbstractDissimilarity):
"""
Sets the best window size for computing the fast-gamma of this continuum, by using the
sampling the computing complexity function.
"""
smallest_window, _ = self.get_first_window(dissimilarity, 1)
smallest_window.get_best_alignment(dissimilarity)
s = smallest_window.max_num_annotations_per_annotator
n = int(self.avg_num_annotations_per_annotator)
p = int(self.num_annotators)
window_sizes = np.arange(1, max(2, self.max_num_annotations_per_annotator))
numba_factor = 1/20
def f(w):
return (n * p + # Copying the continuum
+ ((n - w) * p / 2 + 2 * p + (w + s * p) * p # getting first window
+ (n - w) * p / 2 + numba_factor * (w + s) ** p # getting best alignment
+ (w + s) * p * np.log2((w + s) * p) + w * p # getting the w leftmost alignments & adding them
) * (n / w))
# adding the log factorial corresponds to the emptying-the-continuum-unit-by-unit time.
logfactorials = np.log2(window_sizes)
for i in range(1, len(logfactorials)):
logfactorials[i] += logfactorials[i - 1]
times = f(window_sizes) + p * logfactorials
min_index = np.argmin(times)
if times[min_index] < n * p + numba_factor * n**p: # Check is fast-gamma is advantageous compared to gamma
self.best_window_size = window_sizes[min_index]
else:
logging.warning("Fast-gamma disadvantageous, using normal gamma.")
def get_best_alignment(self, dissimilarity: AbstractDissimilarity) -> 'Alignment':
"""
Returns the best alignment of the continuum for the given dissimilarity. This alignment comes
with the associated disorder, so you can obtain it in constant time with alignment.disorder.
Beware that the computational complexity of the algorithm is very high
:math:`(O(p_1 \\times p_2 \\times ... \\times p_n)` where :math:`p_i` is the number
of annotations of annotator :math:`i`).
Parameters
----------
dissimilarity: AbstractDissimilarity
the dissimilarity that will be used to compute unit-to-unit disorder.
"""
assert len(self.annotators) >= 2 and self, "Disorder cannot be computed with less than two annotators, or " \
"without annotations."
sizes = np.empty(self.num_annotators, dtype=np.int32)
for i, units in enumerate(self._annotations.values()):
sizes[i] = len(units)
disorders, possible_unitary_alignments = dissimilarity.valid_alignments(self)
# Definition of the integer linear program
n = len(disorders)
# Constraints matrix ("every unit must appear once and only once")
A = build_A(possible_unitary_alignments, sizes)
x = cp.Variable(shape=(n,), boolean=True)
try:
import cylp
cp.Problem(cp.Minimize(disorders.T @ x), [A @ x == 1]).solve(solver=cp.CBC)
except (ImportError, cp.SolverError):
logging.warning("CBC solver not installed. Using GLPK.")
matmul = A @ x
cp.Problem(cp.Minimize(disorders.T @ x), [1 <= matmul, matmul <= 1]).solve(solver=cp.GLPK_MI)
assert x.value is not None, "The linear solver couldn't find an alignment with minimal disorder " \
"(likely because the amount of possible unitary alignments was too high)"
# compare with 0.9 as cvxpy returns 1.000 or small values i.e. 10e-14
chosen_alignments_ids, = np.where(x.value > 0.9)
chosen_alignments: np.ndarray = possible_unitary_alignments[chosen_alignments_ids]
alignments_disorders: np.ndarray = disorders[chosen_alignments_ids]
from .alignment import UnitaryAlignment, Alignment
set_unitary_alignements = []
for alignment_id, alignment in enumerate(chosen_alignments):
u_align_tuple = []
for annotator_id, unit_id in enumerate(alignment):
annotator, units = self._annotations.peekitem(annotator_id)
try:
unit = units[unit_id]
u_align_tuple.append((annotator, unit))
except IndexError: # it's a "null unit"
u_align_tuple.append((annotator, None))
unitary_alignment = UnitaryAlignment(list(u_align_tuple))
unitary_alignment.disorder = alignments_disorders[alignment_id]
set_unitary_alignements.append(unitary_alignment)
return Alignment(set_unitary_alignements,
continuum=self,
# Validity of results from get_best_alignments have been thoroughly tested :
check_validity=False,
disorder=np.sum(alignments_disorders) / self.avg_num_annotations_per_annotator)
def compute_gamma(self,
dissimilarity: Optional['AbstractDissimilarity'] = None,
n_samples: int = 30,
precision_level: Optional[Union[float, PrecisionLevel]] = None,
ground_truth_annotators: Optional[SortedSet] = None,
sampler: 'AbstractContinuumSampler' = None,
fast: bool = False,
soft: bool = False) -> 'GammaResults':
"""
Parameters
----------
dissimilarity: AbstractDissimilarity, optional
dissimilarity instance. Used to compute the disorder between units. If not set, it defaults
to the combined categorical dissimilarity with parameters taken from the java implementation.
n_samples: optional int
number of random continuum sampled from this continuum used to
estimate the gamma measure
precision_level: optional float or "high", "medium", "low"
error percentage of the gamma estimation. If a literal
precision level is passed (e.g. "medium"), the corresponding numerical
value will be used (high: 1%, medium: 2%, low : 5%)
ground_truth_annotators: SortedSet of str
if set, the random continuua will only be sampled from these
annotators. This should be used when you want to compare a prediction
against some ground truth annotation.
sampler: AbstractContinuumSampler
Sampler object, which implements a sampling strategy for creating random continuua used
to calculate the expected disorder. If not set, defaults to the Statistical continuum sampler
fast:
Sets the algorithm to the much faster fast-gamma. It's supposed to be less precise than the "canonical"
algorithm from Mathet 2015, but usually isn't.
Performance gains and precision are explained in the Performance section of the documentation.
soft:
Activate soft-gamma, an alternative measure that uses a slighlty different definition of an
alignment. For further information, please consult the 'Soft-Gamma' section of the documentation.
Incompatible with fast-gamma : raises an error if both 'fast' and 'soft' are set to True.
"""
from .dissimilarity import CombinedCategoricalDissimilarity
if dissimilarity is None:
dissimilarity = CombinedCategoricalDissimilarity()
if sampler is None:
from .sampler import StatisticalContinuumSampler
sampler = StatisticalContinuumSampler()
sampler.init_sampling(self, ground_truth_annotators)
job = _compute_best_alignment_job
if soft and fast:
raise NotImplementedError("Fast-gamma and Soft-gamma are not compatible with each other.")
if soft:
job = _compute_soft_alignment_job
# Multiprocessed computation of sample disorder
if fast:
job = _compute_fast_alignment_job
self.measure_best_window_size(dissimilarity)
# Multithreaded computation of sample disorder
with ThreadPoolExecutor(max_workers=os.cpu_count()) as p:
# Launching jobs
logging.info(f"Starting computation for the best alignment and a batch of {n_samples} random samples...")
best_alignment_task = p.submit(job,
*(dissimilarity, self))
result_pool = [
# Step one : computing the disorders of a batch of random samples from the continuum (done in parallel)
p.submit(job,
*(dissimilarity, sampler.sample_from_continuum))
for _ in range(n_samples)
]
chance_best_alignments: List[Alignment] = []
chance_disorders: List[float] = []
# Obtaining results
best_alignment = best_alignment_task.result()
logging.info("Best alignment obtained")
for i, result in enumerate(result_pool):
chance_best_alignments.append(result.result())
logging.info(f"finished computation of random sample dissimilarity {i + 1}/{n_samples}")
chance_disorders.append(chance_best_alignments[-1].disorder)
logging.info("done.")
if precision_level is not None:
if isinstance(precision_level, str):
precision_level = PRECISION_LEVEL[precision_level]
assert 0 < precision_level < 1.0
# If the variation of the disorders of the samples si too high, others are generated.
# taken from subsection 5.3 of the original paper
# confidence at 95%, i.e., 1.96
variation_coeff = np.std(chance_disorders) / np.mean(chance_disorders)
confidence = 1.96
required_samples = np.ceil((variation_coeff * confidence / precision_level) ** 2).astype(np.int32)
if required_samples > n_samples:
logging.info(f"Computing second batch of {required_samples - n_samples} "
f"because variation was too high.")
result_pool = [
p.submit(job,
*(dissimilarity, sampler.sample_from_continuum))
for _ in range(required_samples - n_samples)
]
for i, result in enumerate(result_pool):
chance_best_alignments.append(result.result())
logging.info(f"finished computation of additionnal random sample dissimilarity "
f"{i + 1}/{required_samples - n_samples}")
logging.info("done.")
return GammaResults(
best_alignment=best_alignment,
chance_alignments=chance_best_alignments,
precision_level=precision_level,
dissimilarity=dissimilarity
)
def to_csv(self, path: Union[str, Path], delimiter=","):
if isinstance(path, str):
path = Path(path)
with open(path, "w") as csv_file:
writer = csv.writer(csv_file, delimiter=delimiter)
for annotator, unit in self:
writer.writerow([annotator, unit.annotation,
unit.segment.start, unit.segment.end])
def _repr_png_(self):
"""IPython notebook support
See also
--------
:mod:`pygamma_agreement.notebook`
"""
from .notebook import repr_continuum
return repr_continuum(self)
@dataclass
class GammaResults:
"""
Gamma results object. Stores the information about a gamma measure computation,
used for getting the values of measures from the gamma family (gamma, gamma-cat and gamma-k).
"""
best_alignment: 'Alignment'
chance_alignments: List['Alignment']
dissimilarity: AbstractDissimilarity
precision_level: Optional[float] = None
@property
def n_samples(self):
"""Number of samples used for computation of the expected disorder."""
return len(self.chance_alignments)
@property
def alignments_nb(self):
"""Number of unitary alignments in the best alignment."""
return len(self.best_alignment.unitary_alignments)
@property
def observed_disorder(self) -> float:
"""Returns the disorder of the computed best alignment, i.e, the
observed disagreement."""
return self.best_alignment.disorder
@property
def expected_disorder(self) -> float:
"""Returns the expected disagreement for computed random samples, i.e.,
the mean of the sampled continuua's disorders"""
return float(np.mean([align.disorder for align in self.chance_alignments]))
@property
def approx_gamma_range(self):
"""Returns a tuple of the expected boundaries of the computed gamma,
obtained using the expected disagreement and the precision level"""
if self.precision_level is None:
raise ValueError("No precision level has been set, cannot compute"
"the gamma boundaries")
return (1 - self.observed_disorder / (self.expected_disorder *
(1 - self.precision_level)),
1 - self.observed_disorder / (self.expected_disorder *
(1 + self.precision_level)))
@property
def gamma(self) -> float:
"""Returns the gamma value"""
observed_disorder = self.observed_disorder
if observed_disorder == 0:
return 1
return 1 - observed_disorder / self.expected_disorder
@property
def gamma_cat(self) -> float:
"""Returns the gamma-cat value"""
with ThreadPoolExecutor(max_workers=os.cpu_count()) as p:
observed_disorder_job = p.submit(_compute_gamma_k_job,
*(self.dissimilarity, self.best_alignment, None))
chance_disorders_jobs = [
p.submit(_compute_gamma_k_job,
*(self.dissimilarity, alignment, None))
for alignment in self.chance_alignments
]
observed_disorder = observed_disorder_job.result()
if observed_disorder == 0:
return 1
expected_disorder = float(np.mean(np.array([job_res.result() for job_res in chance_disorders_jobs])))
if expected_disorder == 0:
return 0
return 1 - observed_disorder / expected_disorder
def gamma_k(self, category: str) -> float:
"""Returns the gamma-k value for the given category"""
with ThreadPoolExecutor(max_workers=os.cpu_count()) as p:
observed_disorder_job = p.submit(_compute_gamma_k_job,
*(self.dissimilarity, self.best_alignment, category))
chance_disorders_jobs = [
p.submit(_compute_gamma_k_job,
*(self.dissimilarity, alignment, category))
for alignment in self.chance_alignments
]
observed_disorder = observed_disorder_job.result()
if observed_disorder == 0:
return 1
expected_disorder = float(np.mean(np.array([job_res.result() for job_res in chance_disorders_jobs])))
return 1 - observed_disorder / expected_disorder
def _compute_best_alignment_job(dissimilarity: AbstractDissimilarity,
continuum: Continuum):
"""
Function used to launch a multiprocessed job for calculating the best aligment of a continuum
using the given dissimilarity.
"""
return continuum.get_best_alignment(dissimilarity)
def _compute_fast_alignment_job(dissimilarity: AbstractDissimilarity,
continuum: Continuum):
"""
Function used to launch a multiprocessed job for calculating an approximation of
the best aligment of a continuum, using the given dissimilarity.
"""
if continuum.best_window_size == np.inf: # window size is set to infinity when normal gamma is better.
return continuum.get_best_alignment(dissimilarity)
return continuum.get_fast_alignment(dissimilarity, continuum.best_window_size)
def _compute_soft_alignment_job(dissimilarity: AbstractDissimilarity,
continuum: Continuum):
return continuum.get_best_soft_alignment(dissimilarity)
def _compute_gamma_k_job(dissimilarity: AbstractDissimilarity,
alignment: 'Alignment',
category: Optional[str]):
return alignment.gamma_k_disorder(dissimilarity, category)
|
py | b40ce3743d816cd595271bb6defeeed4e8c1dc67 | from loguru import logger
from chatbot import register_call
import global_variables
import yaml
import random
import os
from pykeepass import PyKeePass
from pynput.keyboard import Key, Listener, Controller as keyboard_controller
from fuzzywuzzy import fuzz
import json
import numpy as np
@register_call("getPassword")
def getPassword(session_id = "general", entry="none"):
cfg = None
# Laden der intent-eigenen Konfigurationsdatei
config_path = os.path.join('intents','functions','password','config_password.yml')
with open(config_path, "r", encoding='utf-8') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
# Holen der Sprache aus der globalen Konfigurationsdatei
LANGUAGE = global_variables.voice_assistant.cfg['assistant']['language']
db_file = cfg['intent']['password']['db_file']
key_file = cfg['intent']['password']['key_file']
typed_pw = cfg['intent']['password'][LANGUAGE]['typed_pw']
db_file = os.path.join('intents','functions','password',db_file)
key_file = os.path.join('intents','functions','password',key_file)
if not os.path.exists(db_file):
return cfg['intent']['password'][LANGUAGE]['db_not_found']
if not os.path.exists(key_file):
return cfg['intent']['password'][LANGUAGE]['key_not_found']
UNKNOWN_ENTRY = random.choice(cfg['intent']['password'][LANGUAGE]['unknown_entry'])
UNKNOWN_ENTRY = UNKNOWN_ENTRY.format(entry)
NO_VOICE_MATCH = cfg['intent']['password'][LANGUAGE]['no_voice_match']
# Konnte die Konfigurationsdatei des Intents geladen werden?
if cfg:
try:
kp = PyKeePass(os.path.abspath(db_file), keyfile=os.path.abspath(key_file))
except Exception as e:
return cfg['intent']['password'][LANGUAGE]['could_not_access_keystore']
# Verifiziere Stimme
fp_entry = kp.find_entries(title='_fingerprint', first=True)
if fp_entry:
a = json.loads(fp_entry.notes)
b = global_variables.voice_assistant.current_speaker_fingerprint
nx = np.array(a)
ny = np.array(b)
cosDist = 1 - np.dot(nx, ny) / np.linalg.norm(nx) / np.linalg.norm(ny)
if (cosDist >= 0.3):
return NO_VOICE_MATCH
entries = kp.entries
for title in entries:
ratio = fuzz.ratio(title.title.lower(), entry.lower())
logger.info("Übereinstimmung von {} und {} ist {}%", title.title, entry, ratio)
if ratio > 70:
if (title):
keyboard = keyboard_controller()
keyboard.type(title.password)
return typed_pw.format(title.title)
return UNKNOWN_ENTRY
else:
logger.error("Konnte Konfigurationsdatei für Intent 'password' nicht laden.")
return
@register_call("getUsername")
def getUsername(session_id = "general", entry="none"):
cfg = None
# Laden der intent-eigenen Konfigurationsdatei
config_path = os.path.join('intents','functions','password','config_password.yml')
with open(config_path, "r", encoding='utf-8') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
# Holen der Sprache aus der globalen Konfigurationsdatei
LANGUAGE = global_variables.voice_assistant.cfg['assistant']['language']
db_file = cfg['intent']['password']['db_file']
key_file = cfg['intent']['password']['key_file']
db_file = os.path.join('intents','functions','password',db_file)
key_file = os.path.join('intents','functions','password',key_file)
if not os.path.exists(db_file):
return cfg['intent']['password'][LANGUAGE]['db_not_found']
if not os.path.exists(key_file):
return cfg['intent']['password'][LANGUAGE]['key_not_found']
UNKNOWN_ENTRY = random.choice(cfg['intent']['password'][LANGUAGE]['unknown_entry'])
UNKNOWN_ENTRY = UNKNOWN_ENTRY.format(entry)
NO_VOICE_MATCH = cfg['intent']['password'][LANGUAGE]['no_voice_match']
# Konnte die Konfigurationsdatei des Intents geladen werden?
if cfg:
try:
kp = PyKeePass(os.path.abspath(db_file), keyfile=os.path.abspath(key_file))
except Exception as e:
return cfg['intent']['password'][LANGUAGE]['could_not_access_keystore']
# Verifiziere Stimme
fp_entry = kp.find_entries(title='_fingerprint', first=True)
if fp_entry:
a = json.loads(fp_entry.notes)
b = global_variables.voice_assistant.current_speaker_fingerprint
nx = np.array(a)
ny = np.array(b)
cosDist = 1 - np.dot(nx, ny) / np.linalg.norm(nx) / np.linalg.norm(ny)
if (cosDist >= 0.3):
return NO_VOICE_MATCH
entries = kp.entries
for title in entries:
ratio = fuzz.ratio(title.title.lower(), entry.lower())
logger.info("Übereinstimmung von {} und {} ist {}%", title.title, entry, ratio)
if ratio > 70:
if (title):
return title.username
return UNKNOWN_ENTRY
else:
logger.error("Konnte Konfigurationsdatei für Intent 'password' nicht laden.")
return "" |
py | b40ce468777774368e7510920f69894b1b81440e | # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
# General Gifti Input - Output to and from the filesystem
# Stephan Gerhard, Oktober 2010
##############
import os
from . import parse_gifti_fast as gfp
reload(gfp)
def read(filename):
""" Load a Gifti image from a file
Parameters
----------
filename : string
The Gifti file to open, it has usually ending .gii
Returns
-------
img : GiftiImage
Returns a GiftiImage
"""
if not os.path.isfile(filename):
raise IOError("No such file or directory: '%s'" % filename)
return gfp.parse_gifti_file(filename)
def write(image, filename):
""" Save the current image to a new file
Parameters
----------
image : GiftiImage
A GiftiImage instance to store
filename : string
Filename to store the Gifti file to
Returns
-------
None
Notes
-----
The Gifti spec suggests using the following suffixes to your
filename when saving each specific type of data:
.gii
Generic GIFTI File
.coord.gii
Coordinates
.func.gii
Functional
.label.gii
Labels
.rgba.gii
RGB or RGBA
.shape.gii
Shape
.surf.gii
Surface
.tensor.gii
Tensors
.time.gii
Time Series
.topo.gii
Topology
"""
f = open(filename, 'w')
f.write(image.to_xml())
f.close()
|
py | b40ce46f2dc68e269d4bceb554e84180b6ee2fef | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""data preprocessing utilities"""
import os
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--input_path', type=str)
parser.add_argument('--label_path', type=str)
parser.add_argument('--data_config_path', type=str, default="./data_config.npz")
parser.add_argument('--save_data_path', default='./', help='checkpoint directory')
opt = parser.parse_args()
def custom_normalize(dataset, mean=None, std=None):
""" custom normalization """
ori_shape = dataset.shape
dataset = dataset.reshape(ori_shape[0], -1)
dataset = np.transpose(dataset)
if mean is None:
mean = np.mean(dataset, axis=1)
std = np.std(dataset, axis=1)
std += (np.abs(std) < 0.0000001)
dataset = dataset - mean[:, None]
dataset = dataset / std[:, None]
dataset = np.transpose(dataset)
dataset = dataset.reshape(ori_shape)
return dataset, mean, std
def generate_data():
"""generate dataset for s11 parameter prediction"""
data_input = np.load(opt.input_path)
if os.path.exists(opt.data_config_path):
data_config = np.load(opt.data_config_path)
mean = data_config["mean"]
std = data_config["std"]
data_input, mean, std = custom_normalize(data_input, mean, std)
else:
data_input, mean, std = custom_normalize(data_input)
data_label = np.load(opt.label_path)
print(data_input.shape)
print(data_label.shape)
data_input = data_input.transpose((0, 4, 1, 2, 3))
data_label[:, :] = np.log10(-data_label[:, :] + 1.0)
if os.path.exists(opt.data_config_path):
scale_s11 = data_config['scale_s11']
else:
scale_s11 = 0.5 * np.max(np.abs(data_label[:, :]))
data_label[:, :] = data_label[:, :] / scale_s11
np.savez(opt.data_config_path, scale_s11=scale_s11, mean=mean, std=std)
np.save(os.path.join(opt.save_data_path, 'data_input.npy'), data_input)
np.save(os.path.join(opt.save_data_path, 'data_label.npy'), data_label)
print("data saved in target path")
if __name__ == "__main__":
generate_data()
|
py | b40ce58eace512b784be8c80aaa1d50e7241f6ed | from django.conf.urls import url
from django.contrib import admin
from .test_smbackend import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.home),
url(r'^send_mail/$', views.send_email, name="sendmail"),
url(r'^send_email_message/$', views.send_email_message, name="sendmailmessage"),
url(r'^send_multialternative/$', views.send_multialternative, name="sendmultialternative"),
url(r'^send_email_non_default_app/$', views.send_email_non_default_app, name="send_email_non_default_app"),
]
|
py | b40ce646a0b414d5ffba3ba8818cd1f22b3659ef | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import jsonschema
import lale.lib.lale
from lale.lib.lale import ConcatFeatures
from lale.lib.lale import NoOp
from lale.lib.lale import IdentityWrapper
from lale.lib.sklearn import LogisticRegression
from lale.lib.sklearn import TfidfVectorizer
from lale.lib.sklearn import NMF
import sklearn.datasets
class TestDatasetSchemas(unittest.TestCase):
@classmethod
def setUpClass(cls):
from sklearn.datasets import load_iris
irisArr = load_iris()
cls._irisArr = {'X': irisArr.data, 'y': irisArr.target}
from lale.datasets import sklearn_to_pandas
(train_X, train_y), (test_X, test_y) = sklearn_to_pandas.load_iris_df()
cls._irisDf = {'X': train_X, 'y': train_y}
(train_X, train_y), (test_X, test_y) = \
sklearn_to_pandas.digits_df()
cls._digits = {'X': train_X, 'y': train_y}
(train_X, train_y), (test_X, test_y) = \
sklearn_to_pandas.california_housing_df()
cls._housing = {'X': train_X, 'y': train_y}
from lale.datasets import openml
(train_X, train_y), (test_X, test_y) = openml.fetch(
'credit-g', 'classification', preprocess=False)
cls._creditG = {'X': train_X, 'y': train_y}
from lale.datasets import load_movie_review
train_X, train_y = load_movie_review()
cls._movies = {'X': train_X, 'y': train_y}
from lale.datasets.uci.uci_datasets import fetch_drugscom
train_X, train_y, test_X, test_y = fetch_drugscom()
cls._drugRev = {'X': train_X, 'y': train_y}
@classmethod
def tearDownClass(cls):
cls._irisArr = None
cls._irisDf = None
cls._digits = None
cls._housing = None
cls._creditG = None
cls._movies = None
cls._drugRev = None
def test_datasets_with_own_schemas(self):
from lale.datasets.data_schemas import to_schema
from lale.type_checking import validate_schema
for name in ['irisArr', 'irisDf', 'digits', 'housing', 'creditG', 'movies', 'drugRev']:
dataset = getattr(self, f'_{name}')
data_X, data_y = dataset['X'], dataset['y']
schema_X, schema_y = to_schema(data_X), to_schema(data_y)
validate_schema(data_X, schema_X, subsample_array=False)
validate_schema(data_y, schema_y, subsample_array=False)
def test_ndarray_to_schema(self):
from lale.datasets.data_schemas import to_schema
from lale.type_checking import validate_schema
all_X, all_y = self._irisArr['X'], self._irisArr['y']
assert not hasattr(all_X, 'json_schema')
all_X_schema = to_schema(all_X)
validate_schema(all_X, all_X_schema, subsample_array=False)
assert not hasattr(all_y, 'json_schema')
all_y_schema = to_schema(all_y)
validate_schema(all_y, all_y_schema, subsample_array=False)
all_X_expected = {
'type': 'array', 'minItems': 150, 'maxItems': 150,
'items': {
'type': 'array', 'minItems': 4, 'maxItems': 4,
'items': {'type': 'number'}}}
all_y_expected = {
'type': 'array', 'minItems': 150, 'maxItems': 150,
'items': {'type': 'integer'}}
self.maxDiff = None
self.assertEqual(all_X_schema, all_X_expected)
self.assertEqual(all_y_schema, all_y_expected)
def test_pandas_to_schema(self):
from lale.datasets.data_schemas import to_schema
from lale.type_checking import validate_schema
import pandas as pd
train_X, train_y = self._irisDf['X'], self._irisDf['y']
assert isinstance(train_X, pd.DataFrame)
assert not hasattr(train_X, 'json_schema')
train_X_schema = to_schema(train_X)
validate_schema(train_X, train_X_schema, subsample_array=False)
assert isinstance(train_y, pd.Series)
assert not hasattr(train_y, 'json_schema')
train_y_schema = to_schema(train_y)
validate_schema(train_y, train_y_schema, subsample_array=False)
train_X_expected = {
'type': 'array', 'minItems': 120, 'maxItems': 120,
'items': {
'type': 'array', 'minItems': 4, 'maxItems': 4,
'items': [
{'description': 'sepal length (cm)', 'type': 'number'},
{'description': 'sepal width (cm)', 'type': 'number'},
{'description': 'petal length (cm)', 'type': 'number'},
{'description': 'petal width (cm)', 'type': 'number'}]}}
train_y_expected = {
'type': 'array', 'minItems': 120, 'maxItems': 120,
'items': {'description': 'target', 'type': 'integer'}}
self.maxDiff = None
self.assertEqual(train_X_schema, train_X_expected)
self.assertEqual(train_y_schema, train_y_expected)
def test_arff_to_schema(self):
from lale.datasets.data_schemas import to_schema
from lale.type_checking import validate_schema
train_X, train_y = self._creditG['X'], self._creditG['y']
assert hasattr(train_X, 'json_schema')
train_X_schema = to_schema(train_X)
validate_schema(train_X, train_X_schema, subsample_array=False)
assert hasattr(train_y, 'json_schema')
train_y_schema = to_schema(train_y)
validate_schema(train_y, train_y_schema, subsample_array=False)
train_X_expected = {
'type': 'array', 'minItems': 670, 'maxItems': 670,
'items': {
'type': 'array', 'minItems': 20, 'maxItems': 20,
'items': [
{'description': 'checking_status', 'enum': [
'<0', '0<=X<200', '>=200', 'no checking']},
{'description': 'duration', 'type': 'number'},
{'description': 'credit_history', 'enum': [
'no credits/all paid', 'all paid',
'existing paid', 'delayed previously',
'critical/other existing credit']},
{'description': 'purpose', 'enum': [
'new car', 'used car', 'furniture/equipment',
'radio/tv', 'domestic appliance', 'repairs',
'education', 'vacation', 'retraining', 'business',
'other']},
{'description': 'credit_amount', 'type': 'number'},
{'description': 'savings_status', 'enum': [
'<100', '100<=X<500', '500<=X<1000', '>=1000',
'no known savings']},
{'description': 'employment', 'enum': [
'unemployed', '<1', '1<=X<4', '4<=X<7', '>=7']},
{'description': 'installment_commitment', 'type': 'number'},
{'description': 'personal_status', 'enum': [
'male div/sep', 'female div/dep/mar', 'male single',
'male mar/wid', 'female single']},
{'description': 'other_parties', 'enum': [
'none', 'co applicant', 'guarantor']},
{'description': 'residence_since', 'type': 'number'},
{'description': 'property_magnitude', 'enum': [
'real estate', 'life insurance', 'car',
'no known property']},
{'description': 'age', 'type': 'number'},
{'description': 'other_payment_plans', 'enum': [
'bank', 'stores', 'none']},
{'description': 'housing', 'enum': [
'rent', 'own', 'for free']},
{'description': 'existing_credits', 'type': 'number'},
{'description': 'job', 'enum': [
'unemp/unskilled non res', 'unskilled resident',
'skilled', 'high qualif/self emp/mgmt']},
{'description': 'num_dependents', 'type': 'number'},
{'description': 'own_telephone', 'enum': ['none', 'yes']},
{'description': 'foreign_worker', 'enum': ['yes', 'no']}]}}
train_y_expected = {
'type': 'array', 'minItems': 670, 'maxItems': 670,
'items': {'description': 'class', 'enum': [0, 1]}}
self.maxDiff = None
self.assertEqual(train_X_schema, train_X_expected)
self.assertEqual(train_y_schema, train_y_expected)
def test_keep_numbers(self):
from lale.datasets.data_schemas import to_schema
from lale.lib.lale import Project
train_X, train_y = self._creditG['X'], self._creditG['y']
trainable = Project(columns={'type': 'number'})
trained = trainable.fit(train_X)
transformed = trained.transform(train_X)
transformed_schema = to_schema(transformed)
transformed_expected = {
'type': 'array', 'minItems': 670, 'maxItems': 670,
'items': {
'type': 'array', 'minItems': 7, 'maxItems': 7,
'items': [
{'description': 'duration', 'type': 'number'},
{'description': 'credit_amount', 'type': 'number'},
{'description': 'installment_commitment', 'type': 'number'},
{'description': 'residence_since', 'type': 'number'},
{'description': 'age', 'type': 'number'},
{'description': 'existing_credits', 'type': 'number'},
{'description': 'num_dependents', 'type': 'number'}]}}
self.maxDiff = None
self.assertEqual(transformed_schema, transformed_expected)
def test_keep_non_numbers(self):
from lale.datasets.data_schemas import to_schema
from lale.lib.lale import Project
train_X, train_y = self._creditG['X'], self._creditG['y']
trainable = Project(columns={'not': {'type': 'number'}})
trained = trainable.fit(train_X)
transformed = trained.transform(train_X)
transformed_schema = to_schema(transformed)
transformed_expected = {
'type': 'array', 'minItems': 670, 'maxItems': 670,
'items': {
'type': 'array', 'minItems': 13, 'maxItems': 13,
'items': [
{'description': 'checking_status', 'enum': [
'<0', '0<=X<200', '>=200', 'no checking']},
{'description': 'credit_history', 'enum': [
'no credits/all paid', 'all paid',
'existing paid', 'delayed previously',
'critical/other existing credit']},
{'description': 'purpose', 'enum': [
'new car', 'used car', 'furniture/equipment',
'radio/tv', 'domestic appliance', 'repairs',
'education', 'vacation', 'retraining', 'business',
'other']},
{'description': 'savings_status', 'enum': [
'<100', '100<=X<500', '500<=X<1000', '>=1000',
'no known savings']},
{'description': 'employment', 'enum': [
'unemployed', '<1', '1<=X<4', '4<=X<7', '>=7']},
{'description': 'personal_status', 'enum': [
'male div/sep', 'female div/dep/mar', 'male single',
'male mar/wid', 'female single']},
{'description': 'other_parties', 'enum': [
'none', 'co applicant', 'guarantor']},
{'description': 'property_magnitude', 'enum': [
'real estate', 'life insurance', 'car',
'no known property']},
{'description': 'other_payment_plans', 'enum': [
'bank', 'stores', 'none']},
{'description': 'housing', 'enum': [
'rent', 'own', 'for free']},
{'description': 'job', 'enum': [
'unemp/unskilled non res', 'unskilled resident',
'skilled', 'high qualif/self emp/mgmt']},
{'description': 'own_telephone', 'enum': ['none', 'yes']},
{'description': 'foreign_worker', 'enum': ['yes', 'no']}]}}
self.maxDiff = None
self.assertEqual(transformed_schema, transformed_expected)
def test_input_schema_fit(self):
self.maxDiff = None
self.assertEqual(
LogisticRegression.input_schema_fit(),
LogisticRegression.get_schema('input_fit'))
self.assertEqual(
(NMF >> LogisticRegression).input_schema_fit(),
NMF.get_schema('input_fit'))
self.assertEqual(
IdentityWrapper(op=LogisticRegression).input_schema_fit(),
LogisticRegression.get_schema('input_fit'))
actual = (TfidfVectorizer | NMF).input_schema_fit()
expected = {
'anyOf': [
{ 'type': 'object',
'required': ['X'],
'additionalProperties': False,
'properties': {
'X': {
'anyOf': [
{ 'type': 'array', 'items': {'type': 'string'}},
{ 'type': 'array',
'items': {
'type': 'array',
'minItems': 1, 'maxItems': 1,
'items': {'type': 'string'}}}]},
'y': {}}},
{ 'type': 'object',
'required': ['X'],
'additionalProperties': False,
'properties': {
'X': {
'type': 'array',
'items': {
'type': 'array',
'items': {'type': 'number', 'minimum': 0.0}}},
'y': {}}}]}
self.assertEqual(actual, expected)
def test_transform_schema_NoOp(self):
from lale.datasets.data_schemas import to_schema
for ds in [self._irisArr, self._irisDf, self._digits, self._housing, self._creditG, self._movies, self._drugRev]:
s_input = to_schema(ds['X'])
s_output = NoOp.transform_schema(s_input)
self.assertIs(s_input, s_output)
def test_transform_schema_pipeline(self):
from lale.datasets.data_schemas import to_schema
pipeline = NMF >> LogisticRegression
input_schema = to_schema(self._digits['X'])
transformed_schema = pipeline.transform_schema(input_schema)
transformed_expected = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'description':
'Probability of the sample for each class in the model.',
'type': 'array',
'items': {'type': 'array', 'items': {'type': 'number'}}}
self.maxDiff = None
self.assertEqual(transformed_schema, transformed_expected)
def test_transform_schema_choice(self):
from lale.datasets.data_schemas import to_schema
choice = NMF | LogisticRegression
input_schema = to_schema(self._digits['X'])
transformed_schema = choice.transform_schema(input_schema)
transformed_expected = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'type': 'array',
'items': {'type': 'array', 'items': {'type': 'number'}}}
self.maxDiff = None
self.assertEqual(transformed_schema, transformed_expected)
def test_transform_schema_higher_order(self):
from lale.datasets.data_schemas import to_schema
inner = LogisticRegression
outer = IdentityWrapper(op=LogisticRegression)
input_schema = to_schema(self._digits['X'])
transformed_inner = inner.transform_schema(input_schema)
transformed_outer = outer.transform_schema(input_schema)
self.maxDiff = None
self.assertEqual(transformed_inner, transformed_outer)
def test_transform_schema_Concat_irisArr(self):
from lale.datasets.data_schemas import to_schema
data_X, data_y = self._irisArr['X'], self._irisArr['y']
s_in_X, s_in_y = to_schema(data_X), to_schema(data_y)
def check(s_actual, n_expected, s_expected):
assert s_actual['items']['minItems'] == n_expected, str(s_actual)
assert s_actual['items']['maxItems'] == n_expected, str(s_actual)
assert s_actual['items']['items'] == s_expected, str(s_actual)
s_out_X = ConcatFeatures.transform_schema({'items': [s_in_X]})
check(s_out_X, 4, {'type': 'number'})
s_out_y = ConcatFeatures.transform_schema({'items': [s_in_y]})
check(s_out_y, 1, {'type': 'integer'})
s_out_XX = ConcatFeatures.transform_schema({'items': [s_in_X, s_in_X]})
check(s_out_XX, 8, {'type': 'number'})
s_out_yy = ConcatFeatures.transform_schema({'items': [s_in_y, s_in_y]})
check(s_out_yy, 2, {'type': 'integer'})
s_out_Xy = ConcatFeatures.transform_schema({'items': [s_in_X, s_in_y]})
check(s_out_Xy, 5, {'type': 'number'})
s_out_XXX = ConcatFeatures.transform_schema({
'items': [s_in_X, s_in_X, s_in_X]})
check(s_out_XXX, 12, {'type': 'number'})
def test_transform_schema_Concat_irisDf(self):
from lale.datasets.data_schemas import to_schema
data_X, data_y = self._irisDf['X'], self._irisDf['y']
s_in_X, s_in_y = to_schema(data_X), to_schema(data_y)
def check(s_actual, n_expected, s_expected):
assert s_actual['items']['minItems'] == n_expected, str(s_actual)
assert s_actual['items']['maxItems'] == n_expected, str(s_actual)
assert s_actual['items']['items'] == s_expected, str(s_actual)
s_out_X = ConcatFeatures.transform_schema({'items': [s_in_X]})
check(s_out_X, 4, {'type': 'number'})
s_out_y = ConcatFeatures.transform_schema({'items': [s_in_y]})
check(s_out_y, 1, {'description': 'target', 'type': 'integer'})
s_out_XX = ConcatFeatures.transform_schema({'items': [s_in_X, s_in_X]})
check(s_out_XX, 8, {'type': 'number'})
s_out_yy = ConcatFeatures.transform_schema({'items': [s_in_y, s_in_y]})
check(s_out_yy, 2, {'type': 'integer'})
s_out_Xy = ConcatFeatures.transform_schema({'items': [s_in_X, s_in_y]})
check(s_out_Xy, 5, {'type': 'number'})
s_out_XXX = ConcatFeatures.transform_schema({
'items': [s_in_X, s_in_X, s_in_X]})
check(s_out_XXX, 12, {'type': 'number'})
def test_lr_with_all_datasets(self):
should_succeed = ['irisArr', 'irisDf', 'digits', 'housing']
should_fail = ['creditG', 'movies', 'drugRev']
for name in should_succeed:
dataset = getattr(self, f'_{name}')
LogisticRegression.validate_schema(**dataset)
for name in should_fail:
dataset = getattr(self, f'_{name}')
with self.assertRaises(ValueError):
LogisticRegression.validate_schema(**dataset)
def test_project_with_all_datasets(self):
import lale.lib.lale
should_succeed = ['irisArr', 'irisDf', 'digits', 'housing', 'creditG', 'drugRev']
should_fail = ['movies']
for name in should_succeed:
dataset = getattr(self, f'_{name}')
lale.lib.lale.Project.validate_schema(**dataset)
for name in should_fail:
dataset = getattr(self, f'_{name}')
with self.assertRaises(ValueError):
lale.lib.lale.Project.validate_schema(**dataset)
def test_nmf_with_all_datasets(self):
should_succeed = ['digits']
should_fail = ['irisArr', 'irisDf', 'housing', 'creditG', 'movies', 'drugRev']
for name in should_succeed:
dataset = getattr(self, f'_{name}')
NMF.validate_schema(**dataset)
for name in should_fail:
dataset = getattr(self, f'_{name}')
with self.assertRaises(ValueError):
NMF.validate_schema(**dataset)
def test_tfidf_with_all_datasets(self):
should_succeed = ['movies']
should_fail = ['irisArr', 'irisDf', 'digits', 'housing', 'creditG', 'drugRev']
for name in should_succeed:
dataset = getattr(self, f'_{name}')
TfidfVectorizer.validate_schema(**dataset)
for name in should_fail:
dataset = getattr(self, f'_{name}')
with self.assertRaises(ValueError):
TfidfVectorizer.validate_schema(**dataset)
def test_decision_function_binary(self):
from lale.lib.lale import Project
train_X, train_y = self._creditG['X'], self._creditG['y']
trainable = Project(columns={'type': 'number'}) >> LogisticRegression()
trained = trainable.fit(train_X, train_y)
decisions = trained.decision_function(train_X)
class TestErrorMessages(unittest.TestCase):
def test_wrong_cont(self):
with self.assertRaises(jsonschema.ValidationError) as cm:
LogisticRegression(C=-1)
summary = cm.exception.message.split('\n')[0]
self.assertEqual(summary, "Invalid configuration for LogisticRegression(C=-1) due to invalid value C=-1.")
def test_wrong_cat(self):
with self.assertRaises(jsonschema.ValidationError) as cm:
LogisticRegression(solver='adam')
summary = cm.exception.message.split('\n')[0]
self.assertEqual(summary, "Invalid configuration for LogisticRegression(solver='adam') due to invalid value solver=adam.")
def test_unknown_arg(self):
with self.assertRaises(jsonschema.ValidationError) as cm:
LogisticRegression(activation='relu')
summary = cm.exception.message.split('\n')[0]
self.assertEqual(summary, "Invalid configuration for LogisticRegression(activation='relu') due to argument 'activation' was unexpected.")
def test_constraint(self):
with self.assertRaises(jsonschema.ValidationError) as cm:
LogisticRegression(solver='sag', penalty='l1')
summary = cm.exception.message.split('\n')[0]
self.assertEqual(summary, "Invalid configuration for LogisticRegression(solver='sag', penalty='l1') due to constraint the newton-cg, sag, and lbfgs solvers support only l2 penalties.")
class TestSchemaValidation(unittest.TestCase):
def test_any(self):
from lale.type_checking import is_subschema
num_schema = {'type': 'number'}
any_schema = {'laleType': 'Any'}
jsonschema.validate(42, num_schema)
jsonschema.validate(42, any_schema)
self.assertTrue(is_subschema(num_schema, any_schema))
self.assertTrue(is_subschema(any_schema, num_schema))
class TestWithScorer(unittest.TestCase):
def test_bare_array(self):
from lale.datasets.data_schemas import NDArrayWithSchema
from numpy import ndarray
import sklearn.metrics
X, y = sklearn.datasets.load_iris(return_X_y=True)
self.assertIsInstance(X, ndarray)
self.assertIsInstance(y, ndarray)
self.assertNotIsInstance(X, NDArrayWithSchema)
self.assertNotIsInstance(y, NDArrayWithSchema)
trainable = LogisticRegression()
trained = trainable.fit(X, y)
scorer = sklearn.metrics.make_scorer(sklearn.metrics.accuracy_score)
out = scorer(trained, X, y)
self.assertIsInstance(out, float)
self.assertNotIsInstance(out, NDArrayWithSchema)
|
py | b40ce6a1dd5eec9f0250533a5cecbfeefd088a9a | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayResult',
'AwaitableGetVirtualNetworkGatewayResult',
'get_virtual_network_gateway',
]
@pulumi.output_type
class GetVirtualNetworkGatewayResult:
"""
A common class for general resource information.
"""
def __init__(__self__, active_active=None, bgp_settings=None, custom_routes=None, enable_bgp=None, enable_dns_forwarding=None, enable_private_ip_address=None, etag=None, gateway_default_site=None, gateway_type=None, id=None, inbound_dns_forwarding_endpoint=None, ip_configurations=None, location=None, name=None, provisioning_state=None, resource_guid=None, sku=None, tags=None, type=None, vpn_client_configuration=None, vpn_gateway_generation=None, vpn_type=None):
if active_active and not isinstance(active_active, bool):
raise TypeError("Expected argument 'active_active' to be a bool")
pulumi.set(__self__, "active_active", active_active)
if bgp_settings and not isinstance(bgp_settings, dict):
raise TypeError("Expected argument 'bgp_settings' to be a dict")
pulumi.set(__self__, "bgp_settings", bgp_settings)
if custom_routes and not isinstance(custom_routes, dict):
raise TypeError("Expected argument 'custom_routes' to be a dict")
pulumi.set(__self__, "custom_routes", custom_routes)
if enable_bgp and not isinstance(enable_bgp, bool):
raise TypeError("Expected argument 'enable_bgp' to be a bool")
pulumi.set(__self__, "enable_bgp", enable_bgp)
if enable_dns_forwarding and not isinstance(enable_dns_forwarding, bool):
raise TypeError("Expected argument 'enable_dns_forwarding' to be a bool")
pulumi.set(__self__, "enable_dns_forwarding", enable_dns_forwarding)
if enable_private_ip_address and not isinstance(enable_private_ip_address, bool):
raise TypeError("Expected argument 'enable_private_ip_address' to be a bool")
pulumi.set(__self__, "enable_private_ip_address", enable_private_ip_address)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if gateway_default_site and not isinstance(gateway_default_site, dict):
raise TypeError("Expected argument 'gateway_default_site' to be a dict")
pulumi.set(__self__, "gateway_default_site", gateway_default_site)
if gateway_type and not isinstance(gateway_type, str):
raise TypeError("Expected argument 'gateway_type' to be a str")
pulumi.set(__self__, "gateway_type", gateway_type)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if inbound_dns_forwarding_endpoint and not isinstance(inbound_dns_forwarding_endpoint, str):
raise TypeError("Expected argument 'inbound_dns_forwarding_endpoint' to be a str")
pulumi.set(__self__, "inbound_dns_forwarding_endpoint", inbound_dns_forwarding_endpoint)
if ip_configurations and not isinstance(ip_configurations, list):
raise TypeError("Expected argument 'ip_configurations' to be a list")
pulumi.set(__self__, "ip_configurations", ip_configurations)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if vpn_client_configuration and not isinstance(vpn_client_configuration, dict):
raise TypeError("Expected argument 'vpn_client_configuration' to be a dict")
pulumi.set(__self__, "vpn_client_configuration", vpn_client_configuration)
if vpn_gateway_generation and not isinstance(vpn_gateway_generation, str):
raise TypeError("Expected argument 'vpn_gateway_generation' to be a str")
pulumi.set(__self__, "vpn_gateway_generation", vpn_gateway_generation)
if vpn_type and not isinstance(vpn_type, str):
raise TypeError("Expected argument 'vpn_type' to be a str")
pulumi.set(__self__, "vpn_type", vpn_type)
@property
@pulumi.getter(name="activeActive")
def active_active(self) -> Optional[bool]:
"""
ActiveActive flag.
"""
return pulumi.get(self, "active_active")
@property
@pulumi.getter(name="bgpSettings")
def bgp_settings(self) -> Optional['outputs.BgpSettingsResponse']:
"""
Virtual network gateway's BGP speaker settings.
"""
return pulumi.get(self, "bgp_settings")
@property
@pulumi.getter(name="customRoutes")
def custom_routes(self) -> Optional['outputs.AddressSpaceResponse']:
"""
The reference to the address space resource which represents the custom routes address space specified by the customer for virtual network gateway and VpnClient.
"""
return pulumi.get(self, "custom_routes")
@property
@pulumi.getter(name="enableBgp")
def enable_bgp(self) -> Optional[bool]:
"""
Whether BGP is enabled for this virtual network gateway or not.
"""
return pulumi.get(self, "enable_bgp")
@property
@pulumi.getter(name="enableDnsForwarding")
def enable_dns_forwarding(self) -> Optional[bool]:
"""
Whether dns forwarding is enabled or not.
"""
return pulumi.get(self, "enable_dns_forwarding")
@property
@pulumi.getter(name="enablePrivateIpAddress")
def enable_private_ip_address(self) -> Optional[bool]:
"""
Whether private IP needs to be enabled on this gateway for connections or not.
"""
return pulumi.get(self, "enable_private_ip_address")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="gatewayDefaultSite")
def gateway_default_site(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference to the LocalNetworkGateway resource which represents local network site having default routes. Assign Null value in case of removing existing default site setting.
"""
return pulumi.get(self, "gateway_default_site")
@property
@pulumi.getter(name="gatewayType")
def gateway_type(self) -> Optional[str]:
"""
The type of this virtual network gateway.
"""
return pulumi.get(self, "gateway_type")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="inboundDnsForwardingEndpoint")
def inbound_dns_forwarding_endpoint(self) -> str:
"""
The IP address allocated by the gateway to which dns requests can be sent.
"""
return pulumi.get(self, "inbound_dns_forwarding_endpoint")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[Sequence['outputs.VirtualNetworkGatewayIPConfigurationResponse']]:
"""
IP configurations for virtual network gateway.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the virtual network gateway resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the virtual network gateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.VirtualNetworkGatewaySkuResponse']:
"""
The reference to the VirtualNetworkGatewaySku resource which represents the SKU selected for Virtual network gateway.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="vpnClientConfiguration")
def vpn_client_configuration(self) -> Optional['outputs.VpnClientConfigurationResponse']:
"""
The reference to the VpnClientConfiguration resource which represents the P2S VpnClient configurations.
"""
return pulumi.get(self, "vpn_client_configuration")
@property
@pulumi.getter(name="vpnGatewayGeneration")
def vpn_gateway_generation(self) -> Optional[str]:
"""
The generation for this VirtualNetworkGateway. Must be None if gatewayType is not VPN.
"""
return pulumi.get(self, "vpn_gateway_generation")
@property
@pulumi.getter(name="vpnType")
def vpn_type(self) -> Optional[str]:
"""
The type of this virtual network gateway.
"""
return pulumi.get(self, "vpn_type")
class AwaitableGetVirtualNetworkGatewayResult(GetVirtualNetworkGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayResult(
active_active=self.active_active,
bgp_settings=self.bgp_settings,
custom_routes=self.custom_routes,
enable_bgp=self.enable_bgp,
enable_dns_forwarding=self.enable_dns_forwarding,
enable_private_ip_address=self.enable_private_ip_address,
etag=self.etag,
gateway_default_site=self.gateway_default_site,
gateway_type=self.gateway_type,
id=self.id,
inbound_dns_forwarding_endpoint=self.inbound_dns_forwarding_endpoint,
ip_configurations=self.ip_configurations,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
sku=self.sku,
tags=self.tags,
type=self.type,
vpn_client_configuration=self.vpn_client_configuration,
vpn_gateway_generation=self.vpn_gateway_generation,
vpn_type=self.vpn_type)
def get_virtual_network_gateway(resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayResult:
"""
A common class for general resource information.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200601:getVirtualNetworkGateway', __args__, opts=opts, typ=GetVirtualNetworkGatewayResult).value
return AwaitableGetVirtualNetworkGatewayResult(
active_active=__ret__.active_active,
bgp_settings=__ret__.bgp_settings,
custom_routes=__ret__.custom_routes,
enable_bgp=__ret__.enable_bgp,
enable_dns_forwarding=__ret__.enable_dns_forwarding,
enable_private_ip_address=__ret__.enable_private_ip_address,
etag=__ret__.etag,
gateway_default_site=__ret__.gateway_default_site,
gateway_type=__ret__.gateway_type,
id=__ret__.id,
inbound_dns_forwarding_endpoint=__ret__.inbound_dns_forwarding_endpoint,
ip_configurations=__ret__.ip_configurations,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type,
vpn_client_configuration=__ret__.vpn_client_configuration,
vpn_gateway_generation=__ret__.vpn_gateway_generation,
vpn_type=__ret__.vpn_type)
|
py | b40ce74f935ae75b8bd41ce0ab3d556764f783fb | """
Codes for preprocessing real-world datasets used in the experiments
in the paper "Unbiased Recommender Learning from Missing-Not-At-Random Implicit Feedback".
"""
import codecs
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
from scipy import sparse, stats
from sklearn.model_selection import train_test_split
def preprocess_dataset(threshold: int = 4) -> Tuple:
"""Load and Preprocess datasets."""
# load dataset.
col = {0: 'user', 1: 'item', 2: 'rate'}
with codecs.open(f'../data/train.txt', 'r', 'utf-8', errors='ignore') as f:
data_train = pd.read_csv(f, delimiter='\t', header=None)
data_train.rename(columns=col, inplace=True)
with codecs.open(f'../data/test.txt', 'r', 'utf-8', errors='ignore') as f:
data_test = pd.read_csv(f, delimiter='\t', header=None)
data_test.rename(columns=col, inplace=True)
num_users, num_items = data_train.user.max(), data_train.item.max()
for _data in [data_train, data_test]:
_data.user, _data.item = _data.user - 1, _data.item - 1
# binalize rating.
_data.rate[_data.rate < threshold] = 0
_data.rate[_data.rate >= threshold] = 1
# train-val-test, split
train, test = data_train.values, data_test.values
train, val = train_test_split(train, test_size=0.1, random_state=12345)
# estimate pscore
_, item_freq = np.unique(train[train[:, 2] == 1, 1], return_counts=True)
pscore = (item_freq / item_freq.max()) ** 0.5
# only positive data
train = train[train[:, 2] == 1, :2]
val = val[val[:, 2] == 1, :2]
# creating training data
all_data = pd.DataFrame(
np.zeros((num_users, num_items))).stack().reset_index()
all_data = all_data.values[:, :2]
unlabeled_data = np.array(
list(set(map(tuple, all_data)) - set(map(tuple, train))), dtype=int)
train = np.r_[np.c_[train, np.ones(train.shape[0])],
np.c_[unlabeled_data, np.zeros(unlabeled_data.shape[0])]]
# save datasets
path = Path(f'../data/point')
path.mkdir(parents=True, exist_ok=True)
np.save(str(path / 'train.npy'), arr=train.astype(np.int))
np.save(str(path / 'val.npy'), arr=val.astype(np.int))
np.save(str(path / 'test.npy'), arr=test.astype(np.int))
np.save(str(path / 'pscore.npy'), arr=pscore)
np.save(str(path / 'item_freq.npy'), arr=item_freq)
|
py | b40ce8a40616cad7b8c83ad97f619fe2f5ae90b2 | # faça um programa que leia um número real e mostre somente a parte inteira deste número.
import math
n = float(input('Digite um número real: '))
print('A parte inteira de {} é: {}'.format(n, (n // 1)))
print('A parte inteira de {} é: {}'.format(n, math.floor(n)))
print('A parte inteira de {} é: {}'.format(n, math.trunc(n)))
print('A parte inteira de {} é: {}'.format(n, int(n)))
|
py | b40ce8c3314a2af30339a492924256696d5f9758 |
# Copyright Notice:
# Copyright 2018 Dell, Inc. All rights reserved.
# License: BSD License. For full license text see link: https://github.com/RedDrum-Redfish-Project/RedDrum-OpenBMC/LICENSE.txt
from .obmcDbusInterfaces import RdOpenBmcDbusInterfaces
from .obmcStaticConfig import RdOpenBmcStaticConfig
class RdOpenBmcDiscovery():
def __init__(self,rdr):
# got static Config Resource handle with the static IDs and config values
self.staticCfg=RdOpenBmcStaticConfig()
# get instance of DbusInterfaces APIs
self.dbus = RdOpenBmcDbusInterfaces(rdr)
# initialize discovery dicts
self.chassisDict={}
self.managersDict={}
self.systemsDict={}
self.fansDict={}
self.temperatureSensorsDict={}
self.powerSuppliesDict={}
self.voltageSensorsDict={}
self.powerControlDict={}
self.mgrNetworkProtocolDict={}
self.mgrEthernetDict={}
# --------------------------------------------------
def discoverResourcesPhase1(self, rdr):
#PHASE-1a:
rdr.logMsg("INFO","....discovery: running phase-1a. adding Base Manager (BMC) resource")
# Get BMC Manager info and create a Base Manager Entry for the BMC
mgrBaseInfo = self.dbus.discoverObmcMgrBaseInfo()
mgrEntry = self.makeMgrBaseEntry(self.staticCfg.mgrId, mgrBaseInfo )
if mgrEntry is not None:
self.managersDict[self.staticCfg.mgrId] = mgrEntry
#PHASE-1b:
rdr.logMsg("INFO","....discovery: running phase-1b. adding Base Chassis resource")
# Get Chassis info and create a Base Chassis Entry
chasBaseInfo = self.dbus.discoverObmcChassisBaseInfo()
chasEntry = self.makeChassisBaseEntry(self.staticCfg.chasId, chasBaseInfo)
if chasEntry is not None:
self.chassisDict[self.staticCfg.chasId] = chasEntry
#PHASE-1c:
rdr.logMsg("INFO","....discovery: running phase-1c. adding Base System resource")
# Get System info and create a Base System Entry
sysBaseInfo = self.dbus.discoverObmcSysBaseInfo()
sysEntry = self.makeSysBaseEntry(self.staticCfg.sysId, sysBaseInfo, chasBaseInfo)
if sysEntry is not None:
self.systemsDict[self.staticCfg.sysId] = sysEntry
#PHASE-1d:
rdr.logMsg("INFO","....discovery: running phase-1d. adding TempSensor resources")
# Get TempSensor info and create TempSensor entries for the chassisId
tempSensorInfo = self.dbus.discoverObmcTempSensorsInfo()
tempSensorEntry = self.makeTempSensorsEntry(self.staticCfg.chasId, tempSensorInfo)
if tempSensorEntry is not None:
self.temperatureSensorsDict[self.staticCfg.chasId] = tempSensorEntry
#PHASE-1e:
rdr.logMsg("INFO","....discovery: running phase-1e. adding Fan resources")
# Get Fans info and create Fan entries for the chassisId
fanInfo = self.dbus.discoverObmcFansInfo()
fansEntry = self.makeFansEntry(self.staticCfg.chasId, fanInfo)
if fansEntry is not None:
self.fansDict[self.staticCfg.chasId] = fansEntry
#PHASE-1f:
rdr.logMsg("INFO","....discovery: running phase-1f. adding VoltageSensor resources")
# Get VoltageSensor info and create VoltageSensor entries for the chassisId
voltageSensorInfo = self.dbus.discoverObmcVoltageSensorsInfo()
voltageSensorEntry = self.makeVoltageSensorsEntry(self.staticCfg.chasId, voltageSensorInfo)
if voltageSensorEntry is not None:
self.voltageSensorsDict[self.staticCfg.chasId] = voltageSensorEntry
#PHASE-1g:
rdr.logMsg("INFO","....discovery: running phase-1g. adding PowerSupply resources")
# Get PowerSupply info and create PowerSupply entries for the chassisId
powerSuppliesInfo = self.dbus.discoverObmcPowerSuppliesInfo()
powerSuppliesEntry = self.makePowerSuppliesEntry(self.staticCfg.chasId, powerSuppliesInfo)
if powerSuppliesEntry is not None:
self.powerSuppliesDict[self.staticCfg.chasId] = powerSuppliesEntry
#PHASE-1h:
rdr.logMsg("INFO","....discovery: running phase-1h. adding PowerControl resources")
# Get Power Control info and create PowerControl entries for the chassisId
powerControlInfo = self.dbus.discoverObmcPowerControlInfo()
powerControlEntry = self.makePowerControlEntry(self.staticCfg.chasId, powerControlInfo)
if powerControlEntry is not None:
self.powerControlDict[self.staticCfg.chasId] = powerControlEntry
#PHASE-1i:
rdr.logMsg("INFO","....discovery: running phase-1k. moving resources to the front-end cache databases")
# now set the front-end databases to what we have discovered here in the backend
# but note that at this point we are not saving these to the HDD cache
# initialize the chassis databases
# --point the front-end chassis databases at the backend dicts we just generated
rdr.logMsg("INFO","............discovery: setting chassis databases")
rdr.root.chassis.chassisDb=self.chassisDict
rdr.root.chassis.fansDb=self.fansDict
rdr.root.chassis.tempSensorsDb=self.temperatureSensorsDict
rdr.root.chassis.powerSuppliesDb=self.powerSuppliesDict
rdr.root.chassis.voltageSensorsDb=self.voltageSensorsDict
rdr.root.chassis.powerControlDb=self.powerControlDict
# --point the front-end managers databases at the backend dicts we just generated
rdr.logMsg("INFO","............discovery: setting managers database")
rdr.root.managers.managersDb=self.managersDict
# --point the front-end systems databases at the backend dicts we just generated
rdr.logMsg("INFO","............discovery: setting systems database")
rdr.root.systems.systemsDb=self.systemsDict
#PHASE-1j:
rdr.logMsg("INFO","....discovery: running phase-1.. initialize volatile Dicts")
# --initialize the Chassis volatileDicts
rdr.logMsg("INFO","............discovery: initializing Chassis VolatileDicts")
rdr.root.chassis.initializeChassisVolatileDict(rdr)
# --initialize the Managers volatileDicts
rdr.logMsg("INFO","............discovery: initializing Managers VolatileDicts")
rdr.root.managers.initializeManagersVolatileDict(rdr)
# --initialize the Systems volatileDict
rdr.logMsg("INFO","........system discovery: initializing Systems VolatileDict")
rdr.root.systems.initializeSystemsVolatileDict(rdr)
#PHASE-1k:
rdr.logMsg("INFO","....discovery: Phase1 complete")
return(0)
# Phase-2 discovery -- runs after Phase-1 discovery if no errors
# Generally this is used to startup hw-monitors on separate threads
# For initial OpenBMC integration, nothing to do here
def discoverResourcesPhase2(self, rdr):
# nothing to do in phasae2
return(0)
# --------------------------------------------------
# --------------------------------------------------
# helper function used by makeXXXEntry() methods below
# if info=None, then we set all properties to None (json null)
def loadPropsFromHwDiscovery(self, resp, props, info ):
if info is None:
info={}
if (props is None) or (resp is None):
return(9)
for prop in props:
if prop in info:
resp[prop] = info[prop]
else:
resp[prop] = None
return(0)
# --------------------------------------------------
# --------------------------------------------------
# Create Base Chassis Db Entry for RedDrum OpenBMC
def makeChassisBaseEntry(self, chasId, chasBaseInfo):
resp=dict()
resp["Name"]="Base Server Chassis"
resp["ChassisType"]="RackMount"
resp["Description"]="Base Chassis Enclosure for RackMount Server"
resp["BaseNavigationProperties"]=["Thermal", "Power"]
resp["ComputerSystems"]=[self.staticCfg.sysId]
resp["Manufacturer"]=self.staticCfg.sysManufacturer
resp["Model"]=self.staticCfg.sysModel
resp["PartNumber"]=self.staticCfg.sysPartNumber
resp["ManagedBy"]=[self.staticCfg.mgrId]
#resp["ContainedBy"]= # not in baseServerProfile
resp["CooledBy"]=[self.staticCfg.chasId] # not in baseServerProfile
resp["PoweredBy"]=[self.staticCfg.chasId] # not in baseServerProfile
#resp["Oem"]={}
resp["Status"]={"State": "Enabled", "Health": "OK" } # set Status as static value--it never changes
resp["Volatile"]=["PowerState", "IndicatorLED"] # properties we update from dbus on each get.
# PowerState is required, IndicatorLED recommended in BaseSvrProfile
resp["Patchable"]=["IndicatorLED","AssetTag"] # properties that can be written. Recommended in BaseServerProfile
resp["AssetTag"]="" # init discoverytime asset tag to an empty string
# properties to get from Dbus interface during discovery
# FYI-baseServerProfile requires we include EITHER: PartNumber or SKU here-currently RedDrum uses PartNumber
propsFromHwDiscovery=["SerialNumber" ]
self.loadPropsFromHwDiscovery(resp, propsFromHwDiscovery, chasBaseInfo )
return(resp)
# --------------------------------------------------
# Create Base Manager Db Entry for RedDrum OpenBMC
def makeMgrBaseEntry(self, mgrId, mgrBaseInfo ):
resp=dict()
resp["Name"]="OpenBMC"
resp["Description"]="OpenBMC Baseboard Management Controller"
resp["ManagerType"]="BMC"
resp["Model"]=self.staticCfg.mgrModel
resp["ManagerInChassis"]=self.staticCfg.chasId # note: not required in BaseServerProfile
resp["ManagerForChassis"]=[self.staticCfg.chasId]
resp["ManagerForServers"]=[self.staticCfg.sysId]
resp["Status"]={"State": "Enabled", "Health": "OK" }
resp["SerialConsole"]= {"ServiceEnabled": self.staticCfg.serialConsoleEnabled,
"ConnectTypesSupported": self.staticCfg.serialConsoleConnectTypesSupported }
resp["CommandShell"] = {"ServiceEnabled": self.staticCfg.commandShellEnabled,
"ConnectTypesSupported": self.staticCfg.commandShellConnectTypesSupported }
resp["ActionsResetAllowableValues"]=["GracefulRestart","ForceRestart"]
#resp["BaseNavigationProperties"]=["NetworkProtocol","EthernetInterfaces","LogServices"] # required in BaseServerProfile
resp["BaseNavigationProperties"]=["NetworkProtocol","EthernetInterfaces"] # required in BaseServerProfile
resp["GetDateTimeFromOS"]=True
resp["GetServiceEntryPointUuidFrom"]="ServiceRoot" # ServiceRoot | UUID
# properties that can be written.
resp["Patchable"]=["DateTime", "DateTimeLocalOffset"] # Recommended in BaseServerProfile
# get these properties from Dbus discovery
propsFromHwDiscovery=["FirmwareVersion","UUID" ] #
self.loadPropsFromHwDiscovery(resp, propsFromHwDiscovery, mgrBaseInfo )
if ("UUID" not in resp) or (resp["UUID"] is None):
#if we couldnt get UUID from dbus, use the one from serviceRoot
resp["GetUuidFromServiceRoot"]=True # we will try to read BMC UUID from dbus
if "UUID" in resp:
del resp["UUID"]
# ***ManagerNetworkProtocols
managerNetworkProtocols = {
"Name": "OpenBMC Network Protocols",
"HTTP": {"Port": 80, "ProtocolEnabled": True},
"HTTPS": {"Port": 443,"ProtocolEnabled": True },
"SSH": {"Port": 22, "ProtocolEnabled": True },
#"NTP": {}, # no NTP in BaseServer Profile
"HostName": "",
"FQDN": "",
"Status": {"State": "Enabled", "Health": "OK"}
}
resp["NetworkProtocols"]= managerNetworkProtocols
# *** EthernetInterfaces
ipv4info=[{"Address": None, "SubnetMask": None, "Gateway": None, "AddressOrigin": None}]
ethDeviceInfo = {
"Name": "", "SpeedMbps": None, "HostName": "", "FQDN": "", "LinkStatus": None,
"InterfaceEnabled": None, "FullDuplex": True, "AutoNeg": True,
"MACAddress": None, "PermanentMACAddress": None, "IPv4Addresses": ipv4info
}
resp["EthernetInterfaces"] = { "eth1": ethDeviceInfo }
return(resp)
# --------------------------------------------------
# Create System Db Entry for RedDrum OpenBMC
def makeSysBaseEntry(self, sysId, sysBaseInfo, chasBaseInfo):
resp=dict()
resp["Name"]="Computer System"
resp["Description"]="Computer System Base Resource"
resp["SystemType"]="Physical"
resp["Manufacturer"]=self.staticCfg.sysManufacturer
resp["Model"]=self.staticCfg.sysModel
resp["PartNumber"]=self.staticCfg.sysPartNumber
resp["AssetTag"]="" # init discoverytime asset tag to an empty string
resp["Volatile"]=["PowerState", "IndicatorLED"]
resp["ActionsResetAllowableValues"]=["On","ForceOff", "ForceRestart" ]
resp["Patchable"]=["IndicatorLED", "AssetTag" ]
resp["MemorySummary"]={"TotalSystemMemoryGiB": None }
resp["ProcessorSummary"]={"Count": None,"Model": None }
resp["Status"]={"State": "Enabled", "Health": "OK" } # set Status as static value--it never changes
resp["BootSourceVolatileProperties"]=["BootSourceOverrideEnabled","BootSourceOverrideTarget",
"BootSourceOverrideMode", "UefiTargetBootSourceOverride" ]
# Note: BootSourceOverrideMode is read recommended (not write)
resp["BootSourceAllowableValues"]=["None","Pxe","Hdd","BiosSetup"] #xg555
resp["Chassis"]=self.staticCfg.chasId
resp["ManagedBy"]=[ self.staticCfg.mgrId ]
# LogServices ReadRequirement Recc
# use the processor summary from discovered system Info
if "ProcessorSummary" in sysBaseInfo and "ProcessorSummary" in resp:
propsFromSysDiscovery=["Count", "Model"]
self.loadPropsFromHwDiscovery(resp["ProcessorSummary"], propsFromSysDiscovery,
sysBaseInfo["ProcessorSummary"] )
# use the Memory summary from discovered system Info
if "MemorySummary" in sysBaseInfo and "MemorySummary" in resp:
propsFromSysDiscovery=["TotalSystemMemoryGiB" ]
self.loadPropsFromHwDiscovery(resp["MemorySummary"], propsFromSysDiscovery,
sysBaseInfo["MemorySummary"] )
# use the SerialNumber discovered via Chassis Info
propsFromChasDiscovery=["SerialNumber"]
self.loadPropsFromHwDiscovery(resp, propsFromChasDiscovery, chasBaseInfo )
# add other System Properties
propsFromSysDiscovery=["BiosVersion", "UUID"]
self.loadPropsFromHwDiscovery(resp, propsFromSysDiscovery, sysBaseInfo )
if ("UUID" not in resp) or (resp["UUID"] is None):
#if we couldnt get UUID from dbus via discoverObmcSysBaseInfo(), so use the one from serviceRoot
resp["GetUuidFromServiceRoot"]=True # RedDrum will use the self-generated RedDrum UUID for the system UUID
if "UUID" in resp:
del resp["UUID"]
return(resp)
# --------------------------------------------------
# Create Temp Sensor Db Entries for RedDrum OpenBMC
# where discoverObmcTempSensorsInfo() returned a dict of temp sensors
# tempSensorInfo = { "Id":
# { "<tempSensorId0>": { "Name": <name>,"SensorNumber": <sn>,"PhysicalContext": <pc>,"Status": <status>,
# "MinReadingRange": <minrr>, "MaxReadingRange": <maxrr>,"AddRelatedItems": <related> } }
# { "<tempSensorId1>": { ... }
# }
def makeTempSensorsEntry(self, chasId, tempSensorInfo):
sensorPropsFromHw=["SensorNumber", "UpperThresholdNonCritical", "LowerThresholdNonCritical", "UpperThresholdCritical",
"LowerThresholdCritical", "MinReadingRange", "MaxReadingRange" ]
resp=dict()
if "Id" not in tempSensorInfo:
return(resp)
resp["Id"]={}
# create Intake/Inlet sensor
if "Intake" in tempSensorInfo["Id"]:
resp["Id"]["Intake"]={}
resp["Id"]["Intake"]["Name"] = "Intake Temp"
resp["Id"]["Intake"]["PhysicalContext"] = "Intake"
resp["Id"]["Intake"]["Volatile"] = [ "ReadingCelsius" ]
resp["Id"]["Intake"]["AddRelatedItems"] = ["System","Chassis"]
resp["Id"]["Intake"]["Status"] = {"State": "Enabled", "Health": "OK" }
self.loadPropsFromHwDiscovery(resp["Id"]["Intake"], sensorPropsFromHw, tempSensorInfo["Id"]["Intake"] )
resp["Id"]["Intake"]["Volatile"]=["ReadingCelsius"]
# create Board Temp sensor
if "Board" in tempSensorInfo["Id"]:
resp["Id"]["Board"]={}
resp["Id"]["Board"]["Name"] = "Board Temp"
resp["Id"]["Board"]["PhysicalContext"] = "SystemBoard"
resp["Id"]["Board"]["Volatile"] = [ "ReadingCelsius" ]
resp["Id"]["Board"]["AddRelatedItems"] = ["System","Chassis"]
resp["Id"]["Intake"]["Status"] = {"State": "Enabled", "Health": "OK" }
self.loadPropsFromHwDiscovery(resp["Id"]["Board"], sensorPropsFromHw, tempSensorInfo["Id"]["Board"] )
resp["Id"]["Board"]["Volatile"]=["ReadingCelsius"]
# create CPU1 Temp sensor
if "CPU1" in tempSensorInfo["Id"]:
resp["Id"]["CPU1"]={}
resp["Id"]["CPU1"]["Name"] = "CPU1 Temp"
resp["Id"]["CPU1"]["PhysicalContext"] = "CPU"
resp["Id"]["CPU1"]["Volatile"] = [ "ReadingCelsius" ]
resp["Id"]["CPU1"]["AddRelatedItems"] = ["System", "Processor" ]
resp["Id"]["Intake"]["Status"] = {"State": "Enabled", "Health": "OK" }
self.loadPropsFromHwDiscovery(resp["Id"]["CPU1"], sensorPropsFromHw, tempSensorInfo["Id"]["CPU1"] )
resp["Id"]["CPU1"]["Volatile"]=["ReadingCelsius"]
# create CPU2 Temp sensor
if "CPU2" in tempSensorInfo["Id"]:
resp["Id"]["CPU2"]={}
resp["Id"]["CPU2"]["Name"] = "CPU2 Temp"
resp["Id"]["CPU2"]["PhysicalContext"] = "CPU"
resp["Id"]["CPU2"]["Volatile"] = [ "ReadingCelsius" ]
resp["Id"]["CPU2"]["AddRelatedItems"] = ["System", "Processor" ]
resp["Id"]["Intake"]["Status"] = {"State": "Enabled", "Health": "OK" }
self.loadPropsFromHwDiscovery(resp["Id"]["CPU2"], sensorPropsFromHw, tempSensorInfo["Id"]["CPU2"] )
resp["Id"]["CPU2"]["Volatile"]=["ReadingCelsius"]
return(resp)
# --------------------------------------------------
# Create Fan Db Entries for RedDrum OpenBMC
# fansInfo={"MaxNumOfFans": <max>, "MinNumOfFans": <min>,
# "Id": "<fanId0>": # where "<fanId0>" is "0", "<fanId1>" is "1", etc
# { "ReadingUnits": "RPM", "MinReadingRange": 0, "MaxReadingRange": 4000, LowerThresholdCritical": 16 }
# "<vanId1>": { ... }
# "ReduncancyGroup": "0": {...}
def makeFansEntry(self, chasId, fanInfo):
resp=dict()
if fanInfo is None:
fanInfo={}
resp=dict()
resp["Id"]={}
resp["RedundancyGroup"]={}
if "Id" in fanInfo:
# first fill-in the array of all Fans as absent
for fanId in fanInfo["Id"]:
entryData={}
entryData["Volatile"]=["Reading" ]
entryData["ReadingUnits"]="RPM"
entryData["MinReadingRange"]=None
entryData["MaxReadingRange"]=None
entryData["LowerThresholdCritical"]=None
entryData["RedundancyGroup"]="0"
entryData["Name"]="Fan"+fanId
entryData["PhysicalContext"]="Backplane"
entryData["Status"]={"State": "Absent", "Health": None }
resp["Id"][fanId]=entryData
# iterate through the array of fan entries returned,
# and update entry w/ data returned from dbus
fanPropertyList=["MinReadingRange","MaxReadingRange","LowerThresholdCritical",]
for psuId in fanInfo["Id"]:
if "Status" in fanInfo["Id"][psuId]:
resp["Id"][fanId]["Status"]=fanInfo["Id"][fanId]["Status"]
for prop in fanPropertyList:
if prop in fanInfo["Id"][fanId]:
resp["Id"][fanId][prop]=fanInfo["Id"][fanId][prop]
# set all fans in same redundancy group "0"
# xg later if redundancy on/off is supported this will change
if True:
redundancyGroupId="0"
entryData={}
entryData["Name"]="Shared Chassis Fans"
entryData["Mode"]="N+m"
entryData["Status"]={"State": "Enabled", "Health": "OK" }
if "MinNumOfFans" in fanInfo:
entryData["MinNumNeeded"]=fanInfo["MinNumOfFans"]
else:
entryData["MinNumNeeded"]=None
if "MaxNumOfFans" in fanInfo:
entryData["MaxNumSupported"]= fanInfo["MaxNumOfFans"]
else:
entryData["MaxNumSupported"]=None
resp["RedundancyGroup"][redundancyGroupId]=entryData
return(resp)
# --------------------------------------------------
# Create Voltage Sensor Db Entry for RedDrum OpenBMC
# where discoverObmcVoltageSensorsInfo() returned a dict of voltage sensors
# voltageSensorInfo = { "Id":
# { "<voltageSensorId0>": { "Name": <name>,"SensorNumber": <sn>,"PhysicalContext": <pc>,"Status": <status>,
# "MinReadingRange": <minrr>, "MaxReadingRange": <maxrr>,"AddRelatedItems": <related> } }
# { "<voltageSensorId1>": { ... }
# }
def makeVoltageSensorsEntry(self, chasId, voltageSensorInfo):
if voltageSensorInfo is None:
return(None)
resp=dict()
propsFromHwDiscovery=["Name","SensorNumber","PhysicalContext","Status","MinReadingRange", "MaxReadingRange","AddRelatedItems"]
resp["Id"]={}
if "Id" in voltageSensorInfo:
for voltageSensorId in voltageSensorInfo["Id"]:
resp["Id"][voltageSensorId]={}
self.loadPropsFromHwDiscovery(resp["Id"][voltageSensorId], propsFromHwDiscovery, voltageSensorInfo["Id"][voltageSensorId] )
resp["Id"][voltageSensorId]["Volatile"]=["ReadingVolts"]
return(resp)
# --------------------------------------------------
# Create PowerSupply Db Entries for RedDrum OpenBMC
# powerSuppliesInfo={ "MaxNumOfPsus": <max>, "MinNumOfPsus": <min>,
# "Id": "<psuId0>":
# { "PsuNum": <psunum>, "PowerSupplyType": "<type>", "LineInputVoltageType": "<livt>",
# "PowerCapacityWatts": <watts>, "SerialNumber": "<sn>",
# "Status": {"State": "<status>", "Health": <health> }
# },
# <psuId1>: { ... }
# "RedundancyGroup": "0": { ... }
# }
def makePowerSuppliesEntry(self, chasId, powerSuppliesInfo):
if powerSuppliesInfo is None:
powerSuppliesInfo={}
resp=dict()
resp["Id"]={}
resp["RedundancyGroup"]={}
if "Id" in powerSuppliesInfo:
# first fill-in the array of all PowerSupplies as absent
for psuId in powerSuppliesInfo["Id"]:
entryData={}
entryData["Volatile"]=["LineInputVoltage", "LastPowerOutputWatts"]
entryData["RedundancyGroup"]="0"
entryData["Name"]="Psu"+psuId
entryData["PowerSupplyType"]=None
entryData["LineInputVoltageType"]=None
entryData["PowerCapacityWatts"]=None
entryData["SerialNumber"]=None
entryData["Status"]={"State": "Absent", "Health": None }
resp["Id"][psuId]=entryData
# iterate through the array of powerSupply entries returned,
# and update entry w/ data returned from dbus
psuPropertyList=["PowerSupplyType", "LineInputVoltageType", "PowerCapacityWatts", "SerialNumber"]
for psuId in powerSuppliesInfo["Id"]:
if "Status" in powerSuppliesInfo["Id"][psuId]:
resp["Id"][psuId]["Status"]=powerSuppliesInfo["Id"][psuId]["Status"]
for prop in psuPropertyList:
if prop in powerSuppliesInfo["Id"][psuId]:
resp["Id"][psuId][prop]=powerSuppliesInfo["Id"][psuId][prop]
# set all powersupplies in same redundancy group "0"
# xg later if redundancy on/off is supported this will change
if True:
redundancyGroupId="0"
entryData={}
entryData["Name"]="Shared PowerSupplies"
entryData["Mode"]="N+m"
entryData["Status"]={"State": "Enabled", "Health": "OK" }
if "MinNumOfPsus" in powerSuppliesInfo:
entryData["MinNumNeeded"]=powerSuppliesInfo["MinNumOfPsus"]
else:
entryData["MinNumNeeded"]=None
if "MaxNumOfPsus" in powerSuppliesInfo:
entryData["MaxNumSupported"]= powerSuppliesInfo["MaxNumOfPsus"]
else:
entryData["MaxNumSupported"]=None
resp["RedundancyGroup"][redundancyGroupId]=entryData
return(resp)
# --------------------------------------------------
# Create PowerControl Db Entries for RedDrum OpenBMC
def makePowerControlEntry(self, chasId, powerControlInfo):
# create the base MemberId-0 PowerControl Entry -- the MAIN server power control resource
resp=dict()
resp["Id"]={}
resp0=dict()
resp0["Name"]="Chassis_Power_Control"
resp0["PhysicalContext"]="Chassis"
resp0["Patchable"]=["LimitInWatts","LimitException"]
resp0["Volatile"]=["PowerConsumedWatts"]
resp0["LimitInWatts"]=None
resp0["LimitException"]=None
# set Base nonVol resources read from dBus
propsFromHwDiscovery=["PowerCapacityWatts" ]
self.loadPropsFromHwDiscovery(resp0, propsFromHwDiscovery, powerControlInfo )
# load the main power control resource into PowerControl Db as index "0"
resp["Id"]["0"] = resp0
return(resp)
# --------------------------------------------------
|
py | b40ce91c883b4fc53379eaa5d9fbac2aca0a8779 | from typing import List, Tuple
from numbers import Number, Real
from math import sqrt
from colour import Color
from xmastree.datatypes import Animation
from xmastree.datatypes.vector import Vector
from xmastree.mathutils.polynomial import Polynomial
from xmastree.animations.utils.pointanim import point_cycle_animation
from xmastree.transform import convert_vectors_to_complex
from xmastree.coloring import voronoi_coloring
def animate(positions: List[Vector]) -> Animation:
height = max(position.z for position in positions)
slope = sqrt(height * height + 1)
zs = convert_vectors_to_complex(positions)
zs = [z / slope for z in zs]
NEWTON_ITER_COUNT = 5
POINT_RANGE = 1
FRAME_RATE = 60
MOVE_TIME = FRAME_RATE * 3
STAGGER_TIME = 0
HALT_TIME = FRAME_RATE * 1
TRANSPOSE_TIME = MOVE_TIME + STAGGER_TIME
WAIT_TIME = TRANSPOSE_TIME + STAGGER_TIME
CYCLE_TIME = (MOVE_TIME + WAIT_TIME + HALT_TIME) * 2
FRAME_COUNT = CYCLE_TIME
point_args: List[Tuple[Real, Number]] = [
(0, 1),
(STAGGER_TIME, -1),
(TRANSPOSE_TIME, 1j),
(WAIT_TIME, -1j),
]
colors = list(map(Color, "#A91F03 #FDB850 #2D5052 #51BCE2".split()))
frames: Animation = []
for i in range(FRAME_COUNT):
centers = [
point_cycle_animation(i - d, MOVE_TIME, WAIT_TIME + HALT_TIME)
* z * POINT_RANGE
for d, z in point_args
]
f = Polynomial.from_roots(*centers)
df = f.d
newton_z: List[complex] = []
for z in zs:
for _ in range(NEWTON_ITER_COUNT):
z -= f(z) / df(z)
newton_z.append(z)
frame = voronoi_coloring(newton_z, centers, colors)
frames.append(frame)
return frames
|
py | b40ceaf17e3a660bed6f69a200bc2ca4c761746a | from fastai.vision.models import (
resnet50, resnet101, resnet152,
)
from fastai.vision import imagenet_stats
img_size = 224
class ResNet50(object):
def __init__(self):
self.base_arch = resnet50
def get_model_config(self):
return {
'base_arch': self.base_arch,
}
def get_img_stats(self):
return imagenet_stats
def get_img_size(self):
return img_size
class ResNet101(object):
def __init__(self):
self.base_arch = resnet101
def get_model_config(self):
return {
'base_arch': self.base_arch,
}
def get_img_stats(self):
return imagenet_stats
def get_img_size(self):
return img_size
class ResNet152(object):
def __init__(self):
self.base_arch = resnet152
def get_model_config(self):
return {
'base_arch': self.base_arch,
}
def get_img_stats(self):
return imagenet_stats
def get_img_size(self):
return img_size
|
py | b40ceb78225e1e769a1669597eef52644c6576bd | class Dive(object):
@staticmethod
def first_scenario(self):
depth = 0
horizontal_pos = 0
# convert each item to int type
for index in range(0, len(input_list_temp)):
current_direction, current_direction_count_str = input_list_temp[index].split()
current_direction_count = int(current_direction_count_str)
if current_direction == 'forward':
horizontal_pos += current_direction_count
elif current_direction == 'down':
depth += current_direction_count
elif current_direction == 'up':
depth -= current_direction_count
multiplied = depth * horizontal_pos
print(f'Scenario 1 output is {multiplied}')
@staticmethod
def second_scenario(self):
depth = 0
horizontal_pos = 0
aim = 0
# convert each item to int type
for index in range(0, len(input_list_temp)):
current_direction, current_direction_count_str = input_list_temp[index].split()
current_direction_count = int(current_direction_count_str)
if current_direction == 'forward':
horizontal_pos += current_direction_count
depth += aim * current_direction_count
elif current_direction == 'down':
aim += current_direction_count
elif current_direction == 'up':
aim -= current_direction_count
multiplied = depth * horizontal_pos
print(f'Scenario 2 output is {multiplied}')
if __name__ == '__main__':
input_string = input('Enter coordinates separated by a comma. NOTE for a long list of numbers, put them in a file and < it into the script with "python3 dive.py < file.txt".: \n')
input_list_temp = input_string.split(',')
Dive().first_scenario(input_list_temp)
Dive().second_scenario(input_list_temp) |
py | b40cec12ca488a1fe82b9cf8ed73b792792a7c8c | #!/usr/bin/env python3
from griddly import GymWrapper, gd
import numpy as np
import torch
import copy as cp
from tqdm import tqdm
from coevo import AgentNet, get_state, Individual, Canonical, AgentInd, EnvInd, define_action_space, play_one_game
#np.random.seed(0)
#actions aléatoires
def init_custom_env(pretty_mode=False, level=0, game="simple_maze"):
if pretty_mode:
env = GymWrapper(yaml_file=game+".yaml",
global_observer_type=gd.ObserverType.SPRITE_2D, player_observer_type=gd.ObserverType.SPRITE_2D, level=level)
else:
env = GymWrapper(yaml_file=game+".yaml", level=level)
return env
def test_agent_es():
envInd = EnvInd()
env = envInd.env
obs = env.reset()
n_action = env.action_space
agent = AgentNet(get_state(obs), n_action)
params = agent.get_params()
d = len(params)
es = Canonical(d)
assert es.n_pop > 0 # population size
for i in range(es.n_pop):
es.population.append(AgentInd(env))
pop = es.ask()
orig_pop = cp.copy(pop)
for i in pop:
i.play_game(envInd, render = False)
assert i.fitness >= -15
print("max = ", max(pop, key=lambda p: p.fitness))
es.tell(pop)
pop2 = es.ask()
assert len(orig_pop) == len(pop2)
for i in range(len(pop)):
assert np.any(orig_pop[i].genes != pop2[i].genes)
for i in pop2:
i.play_game(envInd)
print("max = ", max(pop2, key=lambda p: p.fitness))
def test_generations():
envInd = EnvInd()
env = envInd.env
obs = env.reset()
n_action = env.action_space
agent = AgentNet(get_state(obs), n_action)
params = agent.get_params()
d = len(params)
es = Canonical(d)
for i in range(es.n_pop):
es.population.append(AgentInd(env))
for i in range(3):
print("-------- Iteration ", i+1," --------")
pop = es.ask()
for i in pop:
i.play_game(envInd, render=False)
es.tell(pop)
es.log()
print("max = ", max(pop, key=lambda p: p.fitness))
#es.plot(data='mean')
def test_evolution_zelda():
envInd = EnvInd()
env = envInd.env
obs = env.reset()
agent = AgentNet(get_state(obs), env.action_space)
params = agent.get_params()
d = len(params)
es = Canonical(d)
assert es.n_pop > 0 # population size
for i in range(es.n_pop):
es.population.append(AgentInd(env=env))
pop = es.ask()
orig_pop = cp.copy(pop)
for i in pop:
i.play_game(envInd, render = False)
assert i.fitness <= AgentInd.nb_steps_max + 30
print("max = ", max(pop, key=lambda p: p.fitness))
es.tell(pop)
pop2 = es.ask()
assert len(orig_pop) == len(pop2)
for i in range(len(pop)):
assert np.any(orig_pop[i].genes != pop2[i].genes)
for i in pop2:
i.play_game(envInd)
def test_generation_zelda():
envInd = EnvInd()
env = envInd.env
#define_action_space(env)
obs = env.reset()
agent = AgentNet(get_state(obs), env.action_space)
params = agent.get_params()
d = len(params)
i=0
sum=0
best_ind = None
best_fitness = 0
for i in tqdm(range(1000)):
# print("-------- Iteration ", i+1," --------")
if (i==0):
envInd = EnvInd()
es = Canonical(d)
for j in range(es.n_pop):
es.population.append(AgentInd(env=envInd.env))
pop = es.ask()
for k in pop:
play_one_game(k, envInd)
k.compute_fitness()
es.tell(pop)
maximum = max(pop, key=lambda p: p.fitness)
if maximum.fitness > best_fitness:
best_fitness = maximum.fitness
best_ind = cp.copy(maximum.agent.state_dict())
sum = sum + maximum.fitness
print("max = ", maximum)
# for i in range(es.n_pop):
# torch.save(pop[i].agent.state_dict(), "save/last_agent"+str(i))
torch.save(best_ind, "save/best_agent_trained_normal_env")
print("Best fitness = ", best_fitness)
es.plot(data='max')
def test_save_agent():
env = init_custom_env(game="simple_zelda")
obs = env.reset()
agent = AgentNet(get_state(obs), env.action_space)
torch.save(agent.state_dict(), "test_save")
def test_load_agent(number=5):
envInd = EnvInd()
env = envInd.env
define_action_space(env)
obs = env.reset()
agent = AgentNet(get_state(obs), env.action_space)
agent.load_state_dict(torch.load("save/last_agent"+str(number)))
indiv = AgentInd(env=env, genes=agent.get_params())
indiv.play_game(envInd, render=False)
def load_best_agent():
envInd = EnvInd()
env = envInd.env
define_action_space(env)
obs = env.reset()
agent = AgentNet(get_state(obs), env.action_space)
agent.load_state_dict(torch.load("best_agent2"))
indiv = AgentInd(env=env, genes=agent.get_params())
indiv.play_game(envInd, render=False)
print(indiv.fitness)
#test_agent_es()
#test_generations()
#test_evolution_zelda()
test_generation_zelda()
#load_best_agent()
#test_save_agent()
#test_load_agent(30)
|
py | b40cec28bee0ce2f3ce5ab5d911d24eb31fdc266 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import aim.ext.transport.remote_tracking_pb2 as remote__tracking__pb2
class RemoteTrackingServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.get_resource = channel.unary_unary(
'/RemoteTrackingService/get_resource',
request_serializer=remote__tracking__pb2.ResourceRequest.SerializeToString,
response_deserializer=remote__tracking__pb2.ResourceResponse.FromString,
)
self.release_resource = channel.unary_unary(
'/RemoteTrackingService/release_resource',
request_serializer=remote__tracking__pb2.ReleaseResourceRequest.SerializeToString,
response_deserializer=remote__tracking__pb2.ReleaseResourceResponse.FromString,
)
self.run_instruction = channel.stream_stream(
'/RemoteTrackingService/run_instruction',
request_serializer=remote__tracking__pb2.InstructionRequest.SerializeToString,
response_deserializer=remote__tracking__pb2.InstructionResponse.FromString,
)
self.run_write_instructions = channel.stream_unary(
'/RemoteTrackingService/run_write_instructions',
request_serializer=remote__tracking__pb2.WriteInstructionsRequest.SerializeToString,
response_deserializer=remote__tracking__pb2.WriteInstructionsResponse.FromString,
)
class RemoteTrackingServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def get_resource(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def release_resource(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def run_instruction(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def run_write_instructions(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RemoteTrackingServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'get_resource': grpc.unary_unary_rpc_method_handler(
servicer.get_resource,
request_deserializer=remote__tracking__pb2.ResourceRequest.FromString,
response_serializer=remote__tracking__pb2.ResourceResponse.SerializeToString,
),
'release_resource': grpc.unary_unary_rpc_method_handler(
servicer.release_resource,
request_deserializer=remote__tracking__pb2.ReleaseResourceRequest.FromString,
response_serializer=remote__tracking__pb2.ReleaseResourceResponse.SerializeToString,
),
'run_instruction': grpc.stream_stream_rpc_method_handler(
servicer.run_instruction,
request_deserializer=remote__tracking__pb2.InstructionRequest.FromString,
response_serializer=remote__tracking__pb2.InstructionResponse.SerializeToString,
),
'run_write_instructions': grpc.stream_unary_rpc_method_handler(
servicer.run_write_instructions,
request_deserializer=remote__tracking__pb2.WriteInstructionsRequest.FromString,
response_serializer=remote__tracking__pb2.WriteInstructionsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'RemoteTrackingService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class RemoteTrackingService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def get_resource(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/RemoteTrackingService/get_resource',
remote__tracking__pb2.ResourceRequest.SerializeToString,
remote__tracking__pb2.ResourceResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def release_resource(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/RemoteTrackingService/release_resource',
remote__tracking__pb2.ReleaseResourceRequest.SerializeToString,
remote__tracking__pb2.ReleaseResourceResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def run_instruction(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/RemoteTrackingService/run_instruction',
remote__tracking__pb2.InstructionRequest.SerializeToString,
remote__tracking__pb2.InstructionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def run_write_instructions(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_unary(request_iterator, target, '/RemoteTrackingService/run_write_instructions',
remote__tracking__pb2.WriteInstructionsRequest.SerializeToString,
remote__tracking__pb2.WriteInstructionsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
py | b40cec6b4892bb4cce21c714d1132cc9250cf759 | import functools
import time
def timeit(ns=True):
r"""
9.1 在函数上添加包装器
你想在函数上添加一个包装器,增加额外的操作处理(比如日志、计时等)
>>> count_down.__name__
'count_down'
"""
if ns:
_get_time = time.time_ns
unit = 'ns'
else:
_get_time = time.time
unit = 's'
def wrapper(func):
@functools.wraps(func)
def inner(*args, **kwargs):
start = _get_time()
result = func(*args, **kwargs)
end = _get_time()
print("cost time: {}{}".format(end-start, unit))
return result
return inner
return wrapper
@timeit()
def count_down(n):
i = 0
while i < n:
i += 1
if __name__ == '__main__':
import doctest
doctest.testmod()
# count_down(10000000)
count_down(10)
|
py | b40cec9144497d2a1fdfef92b983ed218c7e801f | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow 2.0 Profiler for both Eager Mode and Graph Mode.
The profiler has two mode:
- Programmatic Mode: start(), stop() and Profiler class. It will perform
when calling start() or create Profiler class and will stop
when calling stop() or destroying Profiler class.
- On-demand Mode: start_profiler_server(). It will perform profiling when
receive profiling request.
NOTE: Only one active profiler session is allowed. Use of simultaneous
Programmatic Mode and On-demand Mode is undefined and will likely fail.
NOTE: The Keras TensorBoard callback will automatically perform sampled
profiling. Before enabling customized profiling, set the callback flag
"profile_batches=[]" to disable automatic sampled profiling.
customized profiling.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
import threading
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import c_api_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
_profiler = None
_profiler_lock = threading.Lock()
_run_num = 0
# This suffix should be kept in sync with kProfileEmptySuffix in
# tensorflow/core/profiler/rpc/client/capture_profile.cc.
_EVENT_FILE_SUFFIX = '.profile-empty'
class ProfilerAlreadyRunningError(Exception):
pass
class ProfilerNotRunningError(Exception):
pass
def start():
"""Start profiling.
Raises:
ProfilerAlreadyRunningError: If another profiling session is running.
"""
global _profiler
with _profiler_lock:
if _profiler is not None:
raise ProfilerAlreadyRunningError('Another profiler is running.')
profiler_context = pywrap_tensorflow.TFE_NewProfilerContext()
if context.default_execution_mode == context.EAGER_MODE:
context.ensure_initialized()
pywrap_tensorflow.TFE_ProfilerContextSetEagerContext(
profiler_context,
context.context()._handle) # pylint: disable=protected-access
_profiler = pywrap_tensorflow.TFE_NewProfiler(profiler_context)
pywrap_tensorflow.TFE_DeleteProfilerContext(profiler_context)
if not pywrap_tensorflow.TFE_ProfilerIsOk(_profiler):
logging.warning('Another profiler session is running which is probably '
'created by profiler server. Please avoid using profiler '
'server and profiler APIs at the same time.')
def stop():
"""Stop current profiling session and return its result.
Returns:
A binary string of tensorflow.tpu.Trace. User can write the string
to file for offline analysis by tensorboard.
Raises:
ProfilerNotRunningError: If there is no active profiling session.
"""
global _profiler
global _run_num
with _profiler_lock:
if _profiler is None:
raise ProfilerNotRunningError(
'Cannot stop profiling. No profiler is running.')
if context.default_execution_mode == context.EAGER_MODE:
context.async_wait()
with c_api_util.tf_buffer() as buffer_:
pywrap_tensorflow.TFE_ProfilerSerializeToString(
_profiler,
buffer_)
result = pywrap_tensorflow.TF_GetBuffer(buffer_)
pywrap_tensorflow.TFE_DeleteProfiler(_profiler)
_profiler = None
_run_num += 1
return result
def maybe_create_event_file(logdir):
"""Create an empty event file if not already exists.
This event file indicates that we have a plugins/profile/ directory in the
current logdir.
Args:
logdir: log directory.
"""
for file_name in gfile.ListDirectory(logdir):
if file_name.endswith(_EVENT_FILE_SUFFIX):
return
# TODO(b/127330388): Use summary_ops_v2.create_file_writer instead.
event_writer = pywrap_tensorflow.EventsWriter(
compat.as_bytes(os.path.join(logdir, 'events')))
event_writer.InitWithSuffix(compat.as_bytes(_EVENT_FILE_SUFFIX))
def save(logdir, result):
"""Save profile result to TensorBoard logdir.
Args:
logdir: log directory read by TensorBoard.
result: profiling result returned by stop().
"""
plugin_dir = os.path.join(
logdir, 'plugins', 'profile',
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
gfile.MakeDirs(plugin_dir)
maybe_create_event_file(logdir)
with gfile.Open(os.path.join(plugin_dir, 'local.trace'), 'wb') as f:
f.write(result)
def start_profiler_server(port):
"""Start a profiler grpc server that listens to given port.
The profiler server will keep the program running even the training finishes.
Please shutdown the server with CTRL-C. It can be used in both eager mode and
graph mode. The service defined in
tensorflow/core/profiler/profiler_service.proto. Please use
tensorflow/contrib/tpu/profiler/capture_tpu_profile to capture tracable
file following https://cloud.google.com/tpu/docs/cloud-tpu-tools#capture_trace
Args:
port: port profiler server listens to.
"""
profiler_context = pywrap_tensorflow.TFE_NewProfilerContext()
if context.default_execution_mode == context.EAGER_MODE:
context.ensure_initialized()
pywrap_tensorflow.TFE_ProfilerContextSetEagerContext(
profiler_context,
context.context()._handle) # pylint: disable=protected-access
pywrap_tensorflow.TFE_StartProfilerServer(profiler_context, port)
pywrap_tensorflow.TFE_DeleteProfilerContext(profiler_context)
class Profiler(object):
"""Context-manager eager profiler api.
Example usage:
```python
with Profiler("/path/to/logdir"):
# do some work
```
"""
def __init__(self, logdir):
self._logdir = logdir
def __enter__(self):
start()
def __exit__(self, typ, value, tb):
result = stop()
save(self._logdir, result)
|
py | b40ced6c14cdd6a56dfe81b26cf8c3a6ad05d5b8 | import base64
from pbx_gs_python_utils.utils.Files import Files
from osbot_aws.apis import S3
from osbot_aws.apis import Secrets
from osbot_aws.apis.Lambda import load_dependency
def send_file_to_slack(file_path, title, bot_token, channel): # refactor into Slack_API class
load_dependency('requests') ; import requests
my_file = {
'file': ('/tmp/file.png', open(file_path, 'rb'), 'png')
}
payload = {
"filename" : '{0}.png'.format(title),
"token" : bot_token,
"channels" : [channel],
}
requests.post("https://slack.com/api/files.upload", params=payload, files=my_file)
return 'send png file: {0}'.format(title)
def run(event, context):
channel = event.get('channel')
png_data = event.get('png_data')
s3_bucket = event.get('s3_bucket')
s3_key = event.get('s3_key')
title = event.get('title')
team_id = event.get('team_id')
aws_secrets_id = event.get('aws_secrets_id')
if team_id == 'T7F3AUXGV': aws_secrets_id = 'slack-gs-bot' # hard coded values
if team_id == 'T0SDK1RA8': aws_secrets_id = 'slack-gsbot-for-pbx' # need to move to special function
bot_token = Secrets(aws_secrets_id).value()
if png_data:
#(fd, tmp_file) = tempfile.mkstemp('png')
tmp_file = Files.temp_file('.png')
with open(tmp_file, "wb") as fh:
fh.write(base64.decodebytes(png_data.encode()))
else:
if s3_bucket and s3_key:
tmp_file = S3().file_download_and_delete(s3_bucket, s3_key)
else:
return None
return send_file_to_slack(tmp_file, title, bot_token, channel)
|
py | b40ceda5e2a3b99d70ba1484e740268b1c502ea9 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=redefined-outer-name, wrong-import-position
"""
Generate time histograms for each DOM from a photon raw data pickle file (as
extracted from a CLSim forward event simulation).
"""
from __future__ import absolute_import, division, print_function
__all__ = [
'generate_histos',
'parse_args',
]
__author__ = 'P. Eller, J.L. Lanfranchi'
__license__ = '''Copyright 2017 Philipp Eller and Justin L. Lanfranchi
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
from argparse import ArgumentParser
from collections import OrderedDict
import cPickle as pickle
from os.path import abspath, dirname, expanduser, expandvars, isfile
import re
import sys
import numpy as np
if __name__ == '__main__' and __package__ is None:
RETRO_DIR = dirname(dirname(dirname(abspath(__file__))))
if RETRO_DIR not in sys.path:
sys.path.append(RETRO_DIR)
from retro import load_pickle
from retro.i3info.extract_gcd import extract_gcd
def generate_histos(
photons,
hole_ice_model,
t_max,
num_bins,
gcd=None,
include_rde=True,
include_noise=True,
outfile=None,
):
"""Generate time histograms from photons extracted from CLSim (repated)
forward event simulations.
Parameters
----------
photons : string or mapping
hole_ice_model : string
Raw CLSim does not (currently) incorproate hole ice model; this is a
modification to the angular acceptance of the phtons that CLSim
returns, so must be specified (and applied) post-hoc (e.g., in this
function).
t_max : float
Last edge in time binning (first edge is at 0), in units of ns.
num_bins : int
Number of time bins, which span from 0 to t_max.
gcd : str or None, optional
Path to GCD i3 or pkl file to get DOM coordinates, rde, and noise
(where the latter two only have an effect if `include_rde` and/or
`include_noise` are True). Regardless if this is specified, the code
will attempt to automatically figure out the GCD file used to produce
the table. If this succeeds and `gcd` is specified by the user, the
user's value is checked against that found in the data. If the user
does not specify `gcd`, the value found in the data is used. If neither
`gcd` is provided nor one can be found in the data, an error is raised.
include_rde : bool, optional
Whether to use relative DOM efficiencies (RDE) to scale the results per
DOM. RDE is included by default.
include_noise : bool, optiional
Whether to add the noise floor for each DOM to the results. Noise is
included by default.
outfile : str or None, optiional
If a string is specified, save the histos to a pickle file by the name
`outfile`. If not specified (or `None`), `histos` will not be written
to a file.
Returns
-------
histos : OrderedDict
Raises
------
ValueError
If `gcd` is specified but does not match a GCD file found in the data
ValueError
If `gcd` is not specified and no GCD can be found in the data
See also
--------
i3processing.sim
Perform the repeated simulation to get photons at DOMs. Generates an i3
file.
i3processing.extract_photon_info
Extract photon info (and pertinent metadata) from the i3 file produced
from the above.
retro_dom_pdfs
Produce distributions corresponding to the histograms made here, but
using Retro reco.
"""
photons_file_name = None
if isinstance(photons, basestring):
photons_file_name = photons
photons = load_pickle(photons_file_name)
dom_info = photons['doms']
bin_edges = np.linspace(0, t_max, num_bins + 1)
bin_widths = np.diff(bin_edges)
gcd_info = None
if isinstance(gcd, basestring):
exp_gcd = expanduser(expandvars(gcd))
if exp_gcd.endswith('.pkl'):
gcd_info = load_pickle(exp_gcd)
elif '.i3' in exp_gcd:
gcd_info = extract_gcd(exp_gcd)
else:
raise ValueError('No idea how to handle GCD file "{}"'.format(gcd))
if photons['gcd']:
try:
gcd_from_data = expanduser(expandvars(photons['gcd']))
if gcd_from_data.endswith('.pkl'):
gcd_info_from_data = load_pickle(gcd_from_data)
else:
gcd_info_from_data = extract_gcd(gcd_from_data)
except (AttributeError, KeyError, ValueError):
raise
#assert gcd_info is not None
else:
if gcd_info is None:
gcd_info = gcd_info_from_data
else:
pass
#if not np.all(gcd_info == gcd_info_from_data):
# print('WARNING: Using different GCD from the one used'
# ' during simulation!')
if gcd_info is None:
if photons_file_name is not None:
photons_err = ' filename "{}"'.format(photons_file_name)
raise ValueError(
'No GCD info could be found from arg `gcd`={} or in `photons`'
'{}'.format(gcd, photons_err)
)
rde = gcd_info['rde']
noise_rate_hz = gcd_info['noise']
mask = (rde == 0) | np.isnan(rde) | np.isinf(rde)
operational_doms = ~mask
rde = np.ma.masked_where(mask, rde)
quantum_effieincy = rde
histos = OrderedDict()
keep_gcd_keys = ['source_gcd_name', 'source_gcd_md5', 'source_gcd_i3_md5']
histos['gcd_info'] = OrderedDict([(k, gcd_info[k]) for k in keep_gcd_keys])
histos['include_rde'] = include_rde
histos['include_noise'] = include_noise
histos['bin_edges'] = bin_edges
histos['binning_spec'] = OrderedDict([
('domain', (0, t_max)),
('num_bins', num_bins),
('spacing', 'linear'),
('units', 'ns')
])
# Note the first number in the file is a number approximately equal (but
# greater than) the peak in the distribution, so is useless for us.
possible_paths = [
hole_ice_model,
'$I3_SRC/ice-models/resources/models/angsens/' + hole_ice_model,
'$I3_SRC/ice-models/resources/models/angsens/as.' + hole_ice_model,
'$I3_SRC/ice-models/resources/models/angsens_flasher/' + hole_ice_model,
'$I3_SRC/ice-models/resources/models/angsens_flasher/as.' + hole_ice_model,
]
coeffs_loaded = False
for path in possible_paths:
path = expanduser(expandvars(path))
if not isfile(path):
continue
try:
poly_coeffs = np.loadtxt(path)[1:]
except:
pass
else:
coeffs_loaded = True
break
if not coeffs_loaded:
raise ValueError('Could not load hole ice model at any of\n{}'
.format(possible_paths))
# We want coszen = -1 to correspond to upgoing particles, but angular
# sensitivity is given w.r.t. the DOM axis (which points "down" towards earth,
# and therefore is rotated 180-deg). So rotate the coszen polynomial about cz=0
# by negating the odd coefficients (coeffs are in ascending powers of "x".
flipped_coeffs = np.empty_like(poly_coeffs)
flipped_coeffs[0::2] = poly_coeffs[0::2]
flipped_coeffs[1::2] = -poly_coeffs[1::2]
angsens_poly = np.polynomial.Polynomial(flipped_coeffs, domain=(-1, 1))
# Attach the weights to the data
num_sims = photons['num_sims']
for data_dict in photons['doms'].values():
cz = data_dict['coszen']
try:
# Note that angular sensitivity will modify the total number of
# photons detected, and the poly is normalized as such already, so no
# normalization should be applied here.
angsens_wt = angsens_poly(cz)
except:
print(np.min(cz), np.max(cz))
raise
data_dict['weight'] = angsens_wt / num_sims
for k, array in data_dict.items():
data_dict[k] = array.astype(np.float32)
histos['results'] = results = OrderedDict()
for (string, dom), data in dom_info.items():
string_idx, dom_idx = string - 1, dom - 1
if not operational_doms[string_idx, dom_idx]:
continue
hist, _ = np.histogram(
data['time'],
bins=bin_edges,
weights=data['weight'],
normed=False
)
if include_rde:
hist *= quantum_effieincy[string_idx, dom_idx]
if include_noise:
hist += (noise_rate_hz[string_idx, dom_idx] / 1e9) * bin_widths
results[(string, dom)] = hist
if outfile is not None:
outfile = expanduser(expandvars(outfile))
print('Writing histos to\n"{}"'.format(outfile))
pickle.dump(
histos,
open(outfile, 'wb'),
protocol=pickle.HIGHEST_PROTOCOL
)
return histos, dom_info
def parse_args(description=__doc__):
"""Parse command line arguments"""
parser = ArgumentParser(description=description)
parser.add_argument(
'--photons', required=True,
help='''Raw data pickle file containing photons'''
)
parser.add_argument(
'--hole-ice-model', required=True,
help='''Filepath to hole ice model to apply to the photons.'''
)
parser.add_argument(
'--t-max', type=int, required=True,
help='''Bin up to this maximum time (integer # of nanoseconds)'''
)
parser.add_argument(
'--num-bins', type=int, required=True,
help='''Number of bins to use for time histograms.'''
)
parser.add_argument(
'--gcd', default=None,
help='''GCD file used to obtaining relative DOM efficiencies
(RDE) and noise (if --include-noise flag is specified). This is only
necessary if one of those flags is set and if the GCD file cannot be
determined from the input file.'''
)
parser.add_argument(
'--include-rde', action='store_true',
help='''Include relative DOM efficiency corrections (per DOM) to
histograms (as obtained from GCD file).'''
)
parser.add_argument(
'--include-noise', action='store_true',
help='''Include noise offsets in histograms (as obtained from GCD
file).'''
)
parser.add_argument(
'--outfile', default=None,
help='''Filepath for storing histograms. If not specified, a default
name is derived from the --raw-data filename.'''
)
args = parser.parse_args()
# Construct the output filename if none is provided
if args.outfile is None:
args.outfile = re.sub(
r'_photons.pkl',
'_photon_histos_0-{:d}ns_{:d}bins.pkl'.format(
args.t_max, args.num_bins
),
args.photons
)
return args
if __name__ == '__main__':
histos, dom_info = generate_histos(**vars(parse_args())) # pylint: disable=invalid-name
|
py | b40cee025046f9218870432f38b35ea30f511164 | import logging
from data.database import TeamRole, validate_database_url
logger = logging.getLogger(__name__)
def check_health(app_config):
# Attempt to connect to the database first. If the DB is not responding,
# using the validate_database_url will timeout quickly, as opposed to
# making a normal connect which will just hang (thus breaking the health
# check).
try:
validate_database_url(app_config['DB_URI'], {}, connect_timeout=3)
except Exception as ex:
return (False, 'Could not connect to the database: %s' % ex.message)
# We will connect to the db, check that it contains some team role kinds
try:
okay = bool(list(TeamRole.select().limit(1)))
return (okay, 'Could not connect to the database' if not okay else None)
except Exception as ex:
return (False, 'Could not connect to the database: %s' % ex.message)
|
py | b40cf1c96e7a8275d6ee5996962d131979927137 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import os
from invoke import task, call
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__)))
LANGUAGES = ['fr']
I18N_ROOT = 'udata_ldap/translations'
I18N_DOMAIN = 'udata-ldap'
TO_CLEAN = ['build', 'dist', '**/*.pyc', 'reports']
def color(code):
'''A simple ANSI color wrapper factory'''
return lambda t: '\033[{0}{1}\033[0;m'.format(code, t)
green = color('1;32m')
red = color('1;31m')
blue = color('1;30m')
cyan = color('1;36m')
purple = color('1;35m')
white = color('1;39m')
def header(text):
'''Display an header'''
print(' '.join((blue('>>'), cyan(text))))
def info(text, *args, **kwargs):
'''Display informations'''
text = text.format(*args, **kwargs)
print(' '.join((purple('>>>'), text)))
def success(text):
'''Display a success message'''
print(' '.join((green('>>'), white(text))))
def error(text):
'''Display an error message'''
print(red('✘ {0}'.format(text)))
@task
def clean(ctx):
'''Cleanup all build artifacts'''
header(clean.__doc__)
with ctx.cd(ROOT):
for pattern in TO_CLEAN:
info('Removing {0}', pattern)
ctx.run('rm -rf {0}'.format(pattern))
@task
def test(ctx, report=False):
'''Run tests suite'''
cmd = 'pytest -v'
if report:
cmd = ' '.join((cmd, '--junitxml=reports/tests.xml'))
with ctx.cd(ROOT):
ctx.run(cmd, pty=True)
@task
def cover(ctx, html=False):
'''Run tests suite with coverage'''
cmd = 'pytest --cov udata_ldap --cov-report term'
if html:
cmd = ' '.join((cmd, '--cov-report html:reports/cover'))
with ctx.cd(ROOT):
ctx.run(cmd, pty=True)
@task
def qa(ctx):
'''Run a quality report'''
header(qa.__doc__)
with ctx.cd(ROOT):
info('Python Static Analysis')
flake8_results = ctx.run('flake8 udata_ldap', pty=True, warn=True)
if flake8_results.failed:
error('There is some lints to fix')
else:
success('No lint to fix')
info('Ensure PyPI can render README and CHANGELOG')
readme_results = ctx.run('python setup.py check -m -s', pty=True, warn=True, hide=True)
if readme_results.failed:
print(readme_results.stdout)
error('README and/or CHANGELOG is not renderable by PyPI')
else:
success('README and CHANGELOG are renderable by PyPI')
if flake8_results.failed or readme_results.failed:
error('Quality check failed')
exit(flake8_results.return_code or readme_results.return_code)
success('Quality check OK')
@task
def i18n(ctx, update=False):
'''Extract translatable strings'''
header(i18n.__doc__)
with ctx.cd(ROOT):
ctx.run('mkdir -p {}'.format(I18N_ROOT))
ctx.run('python setup.py extract_messages')
for lang in LANGUAGES:
pofile = os.path.join(I18N_ROOT, lang, 'LC_MESSAGES', '{}.po'.format(I18N_DOMAIN))
if not os.path.exists(pofile):
ctx.run('python setup.py init_catalog -l {}'.format(lang))
elif update:
ctx.run('python setup.py update_catalog -l {}'.format(lang))
success('Updated translations')
@task
def i18nc(ctx):
'''Compile translations'''
header(i18nc.__doc__)
with ctx.cd(ROOT):
ctx.run('python setup.py compile_catalog')
success('Compiled translations')
@task(i18nc)
def dist(ctx, buildno=None):
'''Package for distribution'''
header('Building a distribuable package')
cmd = ['python setup.py']
if buildno:
cmd.append('egg_info -b {0}'.format(buildno))
cmd.append('bdist_wheel')
with ctx.cd(ROOT):
ctx.run(' '.join(cmd), pty=True)
success('Distribution is available in dist directory')
@task(clean, qa, call(test, report=True), dist, default=True)
def default(ctx):
'''Perform quality report, tests and packaging'''
pass
|
py | b40cf208b7724b43e3528c8028bd2ec165f65ea7 | import logging
import numpy as np
import rlberry.spaces as spaces
from rlberry.envs.finite import GridWorld
logger = logging.getLogger(__name__)
class FourRoom(GridWorld):
"""
GridWorld with four rooms.
Parameters
----------
reward_free : bool, default=False
If true, no rewards are given to the agent.
difficulty: int, {0, 1 or 2}
Difficulty 0: reward in one location
Difficulty 1: easy suboptimal reward, hard optimal reward
Difficulty 2: easy suboptimal reward, hard optimal reward,
negative rewards by default.
Note: this parameter is ignored if reward_free is True.
array_observation:
If true, the observations are converted to an array (x, y)
instead of a discrete index.
Notes
-----
The function env.sample() does not handle conversions to array states
when array_observation is True. Only the functions env.reset() and
env.step() are covered.
"""
name = "FourRoom"
def __init__(self, reward_free=False, difficulty=0, array_observation=False):
self.reward_free = reward_free
self.difficulty = difficulty
self.array_observation = array_observation
if difficulty not in [0, 1, 2]:
raise ValueError("FourRoom difficulty must be in [0, 1, 2]")
# Common parameters
nrows = 9
ncols = 9
start_coord = (0, 0)
terminal_states = ((8, 0),)
success_probability = 0.95
#
walls = ()
for ii in range(9):
if ii not in [2, 6]:
walls += ((ii, 4),)
for jj in range(9):
if jj != 7:
walls += ((4, jj),)
# Default reward according to the difficulty
if difficulty in [0, 1]:
default_reward = 0.0
elif difficulty == 2:
default_reward = -0.005
# Rewards according to the difficulty
if self.reward_free:
reward_at = {}
else:
if difficulty == 0:
reward_at = {(8, 0): 1.0}
elif difficulty in [1, 2]:
reward_at = {
(8, 0): 1.0,
(3, 3): 0.1,
}
# Init base class
GridWorld.__init__(
self,
nrows=nrows,
ncols=ncols,
start_coord=start_coord,
terminal_states=terminal_states,
success_probability=success_probability,
reward_at=reward_at,
walls=walls,
default_reward=default_reward,
)
# spaces
if self.array_observation:
self.observation_space = spaces.Box(0.0, 1.0, shape=(2,))
def _convert_index_to_float_coord(self, state_index):
yy, xx = self.index2coord[state_index]
# centering
xx = xx + 0.5
yy = yy + 0.5
# map to [0, 1]
xx = xx / self.ncols
yy = yy / self.nrows
return np.array([xx, yy])
def reset(self):
self.state = self.coord2index[self.start_coord]
state_to_return = self.state
if self.array_observation:
state_to_return = self._convert_index_to_float_coord(self.state)
return state_to_return
def step(self, action):
assert self.action_space.contains(action), "Invalid action!"
# save state for rendering
if self.is_render_enabled():
self.append_state_for_rendering(self.state)
# take step
next_state, reward, done, info = self.sample(self.state, action)
self.state = next_state
state_to_return = self.state
if self.array_observation:
state_to_return = self._convert_index_to_float_coord(self.state)
return state_to_return, reward, done, info
|
py | b40cf253ce2574ff6345d38321792570ba0b4eb5 | # vFabric Administration Server API
# Copyright (c) 2012 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vas.shared.Collection import Collection
from vas.shared.Resource import Resource
from vas.util.LinkUtils import LinkUtils
class NodeApplications(Collection):
"""Used to enumerate a node instance's applications
:ivar `vas.shared.Security.Security` security: The resource's security
"""
def __init__(self, client, location):
super(NodeApplications, self).__init__(client, location, 'applications', NodeApplication)
class NodeApplication(Resource):
"""An application on a node instance
:ivar str context_path: The application's context path
:ivar str host: The host the application will deploy its
revisions to
:ivar `vas.tc_server.Applications.Application` group_application: The application that this node application
is a member of
:ivar `vas.tc_server.NodeInstances.NodeInstance` instance: The node instance that contains the
application
:ivar str name: The application's name
:ivar `vas.tc_server.NodeRevisions.NodeRevisions` revisions: The application's revisions
:ivar `vas.shared.Security.Security` security: The resource's security
:ivar str service: The service the application will deploy its
revisions to
"""
__group_application = None
__instance = None
__revisions = None
@property
def context_path(self):
return self.__context_path
@property
def host(self):
return self.__host
@property
def group_application(self):
self.__group_application = self.__group_application or Application(self._client,
self.__group_application_location)
return self.__group_application
@property
def instance(self):
self.__instance = self.__instance or NodeInstance(self._client, self.__instance_location)
return self.__instance
@property
def name(self):
return self.__name
@property
def revisions(self):
self.__revisions = self.__revisions or NodeRevisions(self._client, self.__revisions_location)
return self.__revisions
@property
def service(self):
return self.__service
def __init__(self, client, location):
super(NodeApplication, self).__init__(client, location)
self.__context_path = self._details['context-path']
self.__host = self._details['host']
self.__name = self._details['name']
self.__service = self._details['service']
self.__group_application_location = LinkUtils.get_link_href(self._details, 'group-application')
self.__instance_location = LinkUtils.get_link_href(self._details, 'node-instance')
self.__revisions_location = LinkUtils.get_link_href(self._details, 'node-revisions')
def __str__(self):
return "<{} name={} context_path={} service={} host={}>".format(self.__class__.__name__, self.__name,
self.__context_path, self.__service, self.__host)
from vas.tc_server.Applications import Application
from vas.tc_server.NodeInstances import NodeInstance
from vas.tc_server.NodeRevisions import NodeRevisions
|
py | b40cf2aa65872b98bcef527176aa4532ae65a1b4 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .brightcove import BrightcoveNewIE
class TVANouvellesIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?tvanouvelles\.ca/videos/(?P<id>\d+)"
_TEST = {
"url": "http://www.tvanouvelles.ca/videos/5117035533001",
"info_dict": {
"id": "5117035533001",
"ext": "mp4",
"title": "L’industrie du taxi dénonce l’entente entre Québec et Uber: explications",
"description": "md5:479653b7c8cf115747bf5118066bd8b3",
"uploader_id": "1741764581",
"timestamp": 1473352030,
"upload_date": "20160908",
},
"add_ie": ["BrightcoveNew"],
}
BRIGHTCOVE_URL_TEMPLATE = (
"http://players.brightcove.net/1741764581/default_default/index.html?videoId=%s"
)
def _real_extract(self, url):
brightcove_id = self._match_id(url)
return self.url_result(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
BrightcoveNewIE.ie_key(),
brightcove_id,
)
class TVANouvellesArticleIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?tvanouvelles\.ca/(?:[^/]+/)+(?P<id>[^/?#&]+)"
_TEST = {
"url": "http://www.tvanouvelles.ca/2016/11/17/des-policiers-qui-ont-la-meche-un-peu-courte",
"info_dict": {
"id": "des-policiers-qui-ont-la-meche-un-peu-courte",
"title": "Des policiers qui ont «la mèche un peu courte»?",
"description": "md5:92d363c8eb0f0f030de9a4a84a90a3a0",
},
"playlist_mincount": 4,
}
@classmethod
def suitable(cls, url):
return (
False
if TVANouvellesIE.suitable(url)
else super(TVANouvellesArticleIE, cls).suitable(url)
)
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
entries = [
self.url_result(
"http://www.tvanouvelles.ca/videos/%s" % mobj.group("id"),
ie=TVANouvellesIE.ie_key(),
video_id=mobj.group("id"),
)
for mobj in re.finditer(r'data-video-id=(["\'])?(?P<id>\d+)', webpage)
]
title = self._og_search_title(webpage, fatal=False)
description = self._og_search_description(webpage)
return self.playlist_result(entries, display_id, title, description)
|
py | b40cf3cc8221895e9a200d9c21cb33a69c86e76b |
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'bcrypt',
'pyramid',
'pyramid_jinja2',
'pyramid_debugtoolbar',
'pyramid_tm',
'pyramid_ipython',
'ipython',
'SQLAlchemy',
'transaction',
'zope.sqlalchemy',
'waitress',
]
tests_require = [
'WebTest >= 1.3.1', # py3 compat
'pytest', # includes virtualenv
'pytest-cov',
'tox',
]
setup(name='learning_journal_basic',
version='0.0',
description='learning_journal',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='Regenal Grant',
author_email='[email protected]',
url='',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
extras_require={
'testing': tests_require,
},
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = learning_journal_basic:main
[console_scripts]
init_db = learning_journal.scripts_basic.initializedb:main
""",
) |
py | b40cf3f75489120b3f9fcc4971e0b3b7d5e6673c | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import shutil
import tempfile
import unittest
from test.generic.config_utils import get_fast_test_task_config, get_test_task_config
from test.generic.utils import compare_model_state, compare_samples, compare_states
import torch
from classy_vision.dataset import build_dataset
from classy_vision.generic.distributed_util import is_distributed_training_run
from classy_vision.generic.util import get_checkpoint_dict
from classy_vision.hooks import CheckpointHook, LossLrMeterLoggingHook
from classy_vision.losses import ClassyLoss, build_loss, register_loss
from classy_vision.models import build_model
from classy_vision.optim import build_optimizer
from classy_vision.tasks import ClassificationTask, build_task
from classy_vision.trainer import LocalTrainer
@register_loss("test_stateful_loss")
class TestStatefulLoss(ClassyLoss):
def __init__(self, in_plane):
super(TestStatefulLoss, self).__init__()
self.alpha = torch.nn.Parameter(torch.Tensor(in_plane, 2))
torch.nn.init.xavier_normal(self.alpha)
@classmethod
def from_config(cls, config) -> "TestStatefulLoss":
return cls(in_plane=config["in_plane"])
def forward(self, output, target):
value = output.matmul(self.alpha)
loss = torch.mean(torch.abs(value))
return loss
class TestClassificationTask(unittest.TestCase):
def _compare_model_state(self, model_state_1, model_state_2, check_heads=True):
compare_model_state(self, model_state_1, model_state_2, check_heads)
def _compare_samples(self, sample_1, sample_2):
compare_samples(self, sample_1, sample_2)
def _compare_states(self, state_1, state_2, check_heads=True):
compare_states(self, state_1, state_2)
def setUp(self):
# create a base directory to write checkpoints to
self.base_dir = tempfile.mkdtemp()
def tearDown(self):
# delete all the temporary data created
shutil.rmtree(self.base_dir)
def test_build_task(self):
config = get_test_task_config()
task = build_task(config)
self.assertTrue(isinstance(task, ClassificationTask))
def test_hooks_config_builds_correctly(self):
config = get_test_task_config()
config["hooks"] = [{"name": "loss_lr_meter_logging"}]
task = build_task(config)
self.assertTrue(len(task.hooks) == 1)
self.assertTrue(isinstance(task.hooks[0], LossLrMeterLoggingHook))
def test_get_state(self):
config = get_test_task_config()
loss = build_loss(config["loss"])
task = (
ClassificationTask()
.set_num_epochs(1)
.set_loss(loss)
.set_model(build_model(config["model"]))
.set_optimizer(build_optimizer(config["optimizer"]))
)
for phase_type in ["train", "test"]:
dataset = build_dataset(config["dataset"][phase_type])
task.set_dataset(dataset, phase_type)
task.prepare()
task = build_task(config)
task.prepare()
def test_synchronize_losses_non_distributed(self):
"""
Tests that synchronize losses has no side effects in a non-distributed setting.
"""
test_config = get_fast_test_task_config()
task = build_task(test_config)
task.prepare()
old_losses = copy.deepcopy(task.losses)
task.synchronize_losses()
self.assertEqual(old_losses, task.losses)
def test_synchronize_losses_when_losses_empty(self):
config = get_fast_test_task_config()
task = build_task(config)
task.prepare()
task.set_use_gpu(torch.cuda.is_available())
# Losses should be empty when creating task
self.assertEqual(len(task.losses), 0)
task.synchronize_losses()
def test_checkpointing(self):
"""
Tests checkpointing by running train_steps to make sure the train_steps
run the same way after loading from a checkpoint.
"""
config = get_fast_test_task_config()
task = build_task(config).set_hooks([LossLrMeterLoggingHook()])
task_2 = build_task(config).set_hooks([LossLrMeterLoggingHook()])
task.set_use_gpu(torch.cuda.is_available())
# prepare the tasks for the right device
task.prepare()
# test in both train and test mode
for _ in range(2):
task.advance_phase()
# set task's state as task_2's checkpoint
task_2._set_checkpoint_dict(get_checkpoint_dict(task, {}, deep_copy=True))
task_2.prepare()
# task 2 should have the same state
self._compare_states(task.get_classy_state(), task_2.get_classy_state())
# this tests that both states' iterators return the same samples
sample = next(task.get_data_iterator())
sample_2 = next(task_2.get_data_iterator())
self._compare_samples(sample, sample_2)
# test that the train step runs the same way on both states
# and the loss remains the same
task.train_step()
task_2.train_step()
self._compare_states(task.get_classy_state(), task_2.get_classy_state())
def test_final_train_checkpoint(self):
"""Test that a train phase checkpoint with a where of 1.0 can be loaded"""
config = get_fast_test_task_config()
task = build_task(config).set_hooks(
[CheckpointHook(self.base_dir, {}, phase_types=["train"])]
)
task_2 = build_task(config)
task.set_use_gpu(torch.cuda.is_available())
trainer = LocalTrainer()
trainer.train(task)
self.assertAlmostEqual(task.where, 1.0, delta=1e-3)
# set task_2's state as task's final train checkpoint
task_2.set_checkpoint(self.base_dir)
task_2.prepare()
# we should be able to train the task
trainer.train(task_2)
def test_test_only_checkpointing(self):
"""
Tests checkpointing by running train_steps to make sure the
train_steps run the same way after loading from a training
task checkpoint on a test_only task.
"""
train_config = get_fast_test_task_config()
train_config["num_epochs"] = 10
test_config = get_fast_test_task_config()
test_config["test_only"] = True
train_task = build_task(train_config).set_hooks([LossLrMeterLoggingHook()])
test_only_task = build_task(test_config).set_hooks([LossLrMeterLoggingHook()])
# prepare the tasks for the right device
train_task.prepare()
# test in both train and test mode
trainer = LocalTrainer()
trainer.train(train_task)
# set task's state as task_2's checkpoint
test_only_task._set_checkpoint_dict(
get_checkpoint_dict(train_task, {}, deep_copy=True)
)
test_only_task.prepare()
test_state = test_only_task.get_classy_state()
# We expect the phase idx to be different for a test only task
self.assertEqual(test_state["phase_idx"], -1)
# We expect that test only state is test, no matter what train state is
self.assertFalse(test_state["train"])
# Num updates should be 0
self.assertEqual(test_state["num_updates"], 0)
# train_phase_idx should -1
self.assertEqual(test_state["train_phase_idx"], -1)
# Verify task will run
trainer = LocalTrainer()
trainer.train(test_only_task)
def test_test_only_task(self):
"""
Tests the task in test mode by running train_steps
to make sure the train_steps run as expected on a
test_only task
"""
test_config = get_fast_test_task_config()
test_config["test_only"] = True
# delete train dataset
del test_config["dataset"]["train"]
test_only_task = build_task(test_config).set_hooks([LossLrMeterLoggingHook()])
test_only_task.prepare()
test_state = test_only_task.get_classy_state()
# We expect that test only state is test, no matter what train state is
self.assertFalse(test_state["train"])
# Num updates should be 0
self.assertEqual(test_state["num_updates"], 0)
# Verify task will run
trainer = LocalTrainer()
trainer.train(test_only_task)
def test_train_only_task(self):
"""
Tests that the task runs when only a train dataset is specified.
"""
test_config = get_fast_test_task_config()
# delete the test dataset from the config
del test_config["dataset"]["test"]
task = build_task(test_config).set_hooks([LossLrMeterLoggingHook()])
task.prepare()
# verify the the task can still be trained
trainer = LocalTrainer()
trainer.train(task)
@unittest.skipUnless(torch.cuda.is_available(), "This test needs a gpu to run")
def test_checkpointing_different_device(self):
config = get_fast_test_task_config()
task = build_task(config)
task_2 = build_task(config)
for use_gpu in [True, False]:
task.set_use_gpu(use_gpu)
task.prepare()
# set task's state as task_2's checkpoint
task_2._set_checkpoint_dict(get_checkpoint_dict(task, {}, deep_copy=True))
# we should be able to run the trainer using state from a different device
trainer = LocalTrainer()
task_2.set_use_gpu(not use_gpu)
trainer.train(task_2)
@unittest.skipUnless(
is_distributed_training_run(), "This test needs a distributed run"
)
def test_get_classy_state_on_loss(self):
config = get_fast_test_task_config()
config["loss"] = {"name": "test_stateful_loss", "in_plane": 256}
task = build_task(config)
task.prepare()
self.assertIn("alpha", task.get_classy_state()["loss"])
|
py | b40cf49e5d3fe5fc7330f778023092094129dc72 | # -*- coding: utf-8 -*-
#
# IBM - Machine Learning with Python documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'IBM - Machine Learning with Python'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'IBM-Machine-Learning-with-Pythondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'IBM-Machine-Learning-with-Python.tex',
u'IBM - Machine Learning with Python Documentation',
u"Jesus Santana", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'IBM-Machine-Learning-with-Python', u'IBM - Machine Learning with Python Documentation',
[u"Jesus Santana"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'IBM-Machine-Learning-with-Python', u'IBM - Machine Learning with Python Documentation',
u"Jesus Santana", 'IBM - Machine Learning with Python',
'This Machine Learning with Python course dives into the basics of machine learning using an approachable, and well-known, programming language.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
py | b40cf4bf9097b1c3f9894a18a9d816f0ed71e731 | #!/usr/bin/env python
from setuptools import setup
from codecs import open
from os import path
import vdf
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='vdf',
version=vdf.__version__,
description='Library for working with Valve\'s VDF text format',
long_description=long_description,
url='https://github.com/ValvePython/vdf',
author='Rossen Georgiev',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: PyPy',
],
keywords='valve keyvalue vdf tf2 dota2 csgo',
packages=['vdf'],
zip_safe=True,
)
|
py | b40cf4e17224f582e1020bb880262ee6970eeff4 | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_user_with_email_successful(self):
"""Testing"""
email = '[email protected]'
password = '125478963@#5$!QWERty'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_email_for_lowercase(self):
"""lowerCases"""
email = '[email protected]'
user = get_user_model().objects.create_user(
email=email,
password='125478963@#5$!QWERty'
)
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'pass123')
def test_create_superuser(self):
"""create a su"""
user = get_user_model().objects.create_superuser(
'[email protected]',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
|
py | b40cf4faa7ad3336c8b4b862c934513fa6d34c3d | '''
Module implementing Brian's refractory mechanism.
'''
from brian2.units.fundamentalunits import Unit, DIMENSIONLESS
from brian2.units.allunits import second
from .equations import (Equations, SingleEquation, DIFFERENTIAL_EQUATION,
PARAMETER, Expression, BOOLEAN)
__all__ = ['add_refractoriness']
def check_identifier_refractory(identifier):
'''
Check that the identifier is not using a name reserved for the refractory
mechanism. The reserved names are `not_refractory`, `refractory`,
`refractory_until`.
Parameters
----------
identifier : str
The identifier to check.
Raises
------
ValueError
If the identifier is a variable name used for the refractory mechanism.
'''
if identifier in ('not_refractory', 'refractory', 'refractory_until'):
raise SyntaxError('The name "%s" is used in the refractory mechanism '
' and should not be used as a variable '
'name.' % identifier)
Equations.register_identifier_check(check_identifier_refractory)
def add_refractoriness(eqs):
'''
Extends a given set of equations with the refractory mechanism. New
parameters are added and differential equations with the "unless refractory"
flag are changed so that their right-hand side is 0 when the neuron is
refractory (by multiplication with the ``not_refractory`` variable).
Parameters
----------
eqs : `Equations`
The equations without refractory mechanism.
Returns
-------
new_eqs : `Equations`
New equations, with added parameters and changed differential
equations having the "unless refractory" flag.
'''
new_equations = []
# replace differential equations having the active flag
for eq in eqs.values():
if eq.type == DIFFERENTIAL_EQUATION and 'unless refractory' in eq.flags:
# the only case where we have to change anything
new_code = 'int(not_refractory)*(' + eq.expr.code + ')'
new_equations.append(SingleEquation(DIFFERENTIAL_EQUATION,
eq.varname, eq.dim,
expr=Expression(new_code),
flags=eq.flags))
else:
new_equations.append(eq)
# add new parameters
new_equations.append(SingleEquation(PARAMETER, 'not_refractory',
DIMENSIONLESS, var_type=BOOLEAN))
new_equations.append(SingleEquation(PARAMETER, 'lastspike', second.dim))
return Equations(new_equations)
|
py | b40cf53bad1866a1b1b0f28edf6c006301b4812f | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License 2.0;
"""ContactsService extends the GDataService for Google Contacts operations.
ContactsService: Provides methods to query feeds and manipulate items.
Extends GDataService.
DictionaryToParamList: Function which converts a dictionary into a list of
URL arguments (represented as strings). This is a
utility function used in CRUD operations.
"""
# __author__ = 'dbrattli (Dag Brattli)'
import gdata
import gdata.calendar
import gdata.service
DEFAULT_BATCH_URL = ('http://www.google.com/m8/feeds/contacts/default/full'
'/batch')
DEFAULT_PROFILES_BATCH_URL = ('http://www.google.com'
'/m8/feeds/profiles/default/full/batch')
GDATA_VER_HEADER = 'GData-Version'
class Error(Exception):
pass
class RequestError(Error):
pass
class ContactsService(gdata.service.GDataService):
"""Client for the Google Contacts service."""
def __init__(self, email=None, password=None, source=None,
server='www.google.com', additional_headers=None,
contact_list='default', **kwargs):
"""Creates a client for the Contacts service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'www.google.com'.
contact_list: string (optional) The name of the default contact list to
use when no URI is specified to the methods of the service.
Default value: 'default' (the logged in user's contact list).
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
self.contact_list = contact_list
gdata.service.GDataService.__init__(
self, email=email, password=password, service='cp', source=source,
server=server, additional_headers=additional_headers, **kwargs)
def GetFeedUri(self, kind='contacts', contact_list=None, projection='full',
scheme=None):
"""Builds a feed URI.
Args:
kind: The type of feed to return, typically 'groups' or 'contacts'.
Default value: 'contacts'.
contact_list: The contact list to return a feed for.
Default value: self.contact_list.
projection: The projection to apply to the feed contents, for example
'full', 'base', 'base/12345', 'full/batch'. Default value: 'full'.
scheme: The URL scheme such as 'http' or 'https', None to return a
relative URI without hostname.
Returns:
A feed URI using the given kind, contact list, and projection.
Example: '/m8/feeds/contacts/default/full'.
"""
contact_list = contact_list or self.contact_list
if kind == 'profiles':
contact_list = 'domain/%s' % contact_list
prefix = scheme and '%s://%s' % (scheme, self.server) or ''
return '%s/m8/feeds/%s/%s/%s' % (prefix, kind, contact_list, projection)
def GetContactsFeed(self, uri=None):
uri = uri or self.GetFeedUri()
return self.Get(uri, converter=gdata.contacts.ContactsFeedFromString)
def GetContact(self, uri):
return self.Get(uri, converter=gdata.contacts.ContactEntryFromString)
def CreateContact(self, new_contact, insert_uri=None, url_params=None,
escape_params=True):
"""Adds an new contact to Google Contacts.
Args:
new_contact: atom.Entry or subclass A new contact which is to be added to
Google Contacts.
insert_uri: the URL to post new contacts to the feed
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the contact created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
insert_uri = insert_uri or self.GetFeedUri()
return self.Post(new_contact, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.ContactEntryFromString)
def UpdateContact(self, edit_uri, updated_contact, url_params=None,
escape_params=True):
"""Updates an existing contact.
Args:
edit_uri: string The edit link URI for the element being updated
updated_contact: string, atom.Entry or subclass containing
the Atom Entry which will replace the contact which is
stored at the edit_url
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful update, a httplib.HTTPResponse containing the server's
response to the PUT request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
return self.Put(updated_contact, self._CleanUri(edit_uri),
url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.ContactEntryFromString)
def DeleteContact(self, edit_uri, extra_headers=None,
url_params=None, escape_params=True):
"""Removes an contact with the specified ID from Google Contacts.
Args:
edit_uri: string The edit URL of the entry to be deleted. Example:
'/m8/feeds/contacts/default/full/xxx/yyy'
url_params: dict (optional) Additional URL parameters to be included
in the deletion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful delete, a httplib.HTTPResponse containing the server's
response to the DELETE request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
return self.Delete(self._CleanUri(edit_uri),
url_params=url_params, escape_params=escape_params)
def GetGroupsFeed(self, uri=None):
uri = uri or self.GetFeedUri('groups')
return self.Get(uri, converter=gdata.contacts.GroupsFeedFromString)
def CreateGroup(self, new_group, insert_uri=None, url_params=None,
escape_params=True):
insert_uri = insert_uri or self.GetFeedUri('groups')
return self.Post(new_group, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.GroupEntryFromString)
def UpdateGroup(self, edit_uri, updated_group, url_params=None,
escape_params=True):
return self.Put(updated_group, self._CleanUri(edit_uri),
url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.GroupEntryFromString)
def DeleteGroup(self, edit_uri, extra_headers=None,
url_params=None, escape_params=True):
return self.Delete(self._CleanUri(edit_uri),
url_params=url_params, escape_params=escape_params)
def ChangePhoto(self, media, contact_entry_or_url, content_type=None,
content_length=None):
"""Change the photo for the contact by uploading a new photo.
Performs a PUT against the photo edit URL to send the binary data for the
photo.
Args:
media: filename, file-like-object, or a gdata.MediaSource object to send.
contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this
method will search for an edit photo link URL and
perform a PUT to the URL.
content_type: str (optional) the mime type for the photo data. This is
necessary if media is a file or file name, but if media
is a MediaSource object then the media object can contain
the mime type. If media_type is set, it will override the
mime type in the media object.
content_length: int or str (optional) Specifying the content length is
only required if media is a file-like object. If media
is a filename, the length is determined using
os.path.getsize. If media is a MediaSource object, it is
assumed that it already contains the content length.
"""
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
url = contact_entry_or_url.GetPhotoEditLink().href
else:
url = contact_entry_or_url
if isinstance(media, gdata.MediaSource):
payload = media
# If the media object is a file-like object, then use it as the file
# handle in the in the MediaSource.
elif hasattr(media, 'read'):
payload = gdata.MediaSource(file_handle=media,
content_type=content_type, content_length=content_length)
# Assume that the media object is a file name.
else:
payload = gdata.MediaSource(content_type=content_type,
content_length=content_length, file_path=media)
return self.Put(payload, url)
def GetPhoto(self, contact_entry_or_url):
"""Retrives the binary data for the contact's profile photo as a string.
Args:
contact_entry_or_url: a gdata.contacts.ContactEntry objecr or a string
containing the photo link's URL. If the contact entry does not
contain a photo link, the image will not be fetched and this method
will return None.
"""
# TODO: add the ability to write out the binary image data to a file,
# reading and writing a chunk at a time to avoid potentially using up
# large amounts of memory.
url = None
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
photo_link = contact_entry_or_url.GetPhotoLink()
if photo_link:
url = photo_link.href
else:
url = contact_entry_or_url
if url:
return self.Get(url, converter=str)
else:
return None
def DeletePhoto(self, contact_entry_or_url):
url = None
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
url = contact_entry_or_url.GetPhotoEditLink().href
else:
url = contact_entry_or_url
if url:
self.Delete(url)
def GetProfilesFeed(self, uri=None):
"""Retrieves a feed containing all domain's profiles.
Args:
uri: string (optional) the URL to retrieve the profiles feed,
for example /m8/feeds/profiles/default/full
Returns:
On success, a ProfilesFeed containing the profiles.
On failure, raises a RequestError.
"""
uri = uri or self.GetFeedUri('profiles')
return self.Get(uri,
converter=gdata.contacts.ProfilesFeedFromString)
def GetProfile(self, uri):
"""Retrieves a domain's profile for the user.
Args:
uri: string the URL to retrieve the profiles feed,
for example /m8/feeds/profiles/default/full/username
Returns:
On success, a ProfileEntry containing the profile for the user.
On failure, raises a RequestError
"""
return self.Get(uri,
converter=gdata.contacts.ProfileEntryFromString)
def UpdateProfile(self, edit_uri, updated_profile, url_params=None,
escape_params=True):
"""Updates an existing profile.
Args:
edit_uri: string The edit link URI for the element being updated
updated_profile: string atom.Entry or subclass containing
the Atom Entry which will replace the profile which is
stored at the edit_url.
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_params will be
escaped before they are included in the request.
Returns:
On successful update, a httplib.HTTPResponse containing the server's
response to the PUT request.
On failure, raises a RequestError.
"""
return self.Put(updated_profile, self._CleanUri(edit_uri),
url_params=url_params, escape_params=escape_params,
converter=gdata.contacts.ProfileEntryFromString)
def ExecuteBatch(self, batch_feed, url,
converter=gdata.contacts.ContactsFeedFromString):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.contacts.ContactFeed A feed containing batch
request entries. Each entry contains the operation to be performed
on the data contained in the entry. For example an entry with an
operation type of insert will be used as if the individual entry
had been inserted.
url: str The batch URL to which these operations should be applied.
converter: Function (optional) The function used to convert the server's
response to an object. The default value is ContactsFeedFromString.
Returns:
The results of the batch request's execution on the server. If the
default converter is used, this is stored in a ContactsFeed.
"""
return self.Post(batch_feed, url, converter=converter)
def ExecuteBatchProfiles(self, batch_feed, url,
converter=gdata.contacts.ProfilesFeedFromString):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.profiles.ProfilesFeed A feed containing batch
request entries. Each entry contains the operation to be performed
on the data contained in the entry. For example an entry with an
operation type of insert will be used as if the individual entry
had been inserted.
url: string The batch URL to which these operations should be applied.
converter: Function (optional) The function used to convert the server's
response to an object. The default value is
gdata.profiles.ProfilesFeedFromString.
Returns:
The results of the batch request's execution on the server. If the
default converter is used, this is stored in a ProfilesFeed.
"""
return self.Post(batch_feed, url, converter=converter)
def _CleanUri(self, uri):
"""Sanitizes a feed URI.
Args:
uri: The URI to sanitize, can be relative or absolute.
Returns:
The given URI without its http://server prefix, if any.
Keeps the leading slash of the URI.
"""
url_prefix = 'http://%s' % self.server
if uri.startswith(url_prefix):
uri = uri[len(url_prefix):]
return uri
class ContactsQuery(gdata.service.Query):
def __init__(self, feed=None, text_query=None, params=None,
categories=None, group=None):
self.feed = feed or '/m8/feeds/contacts/default/full'
if group:
self._SetGroup(group)
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
params=params, categories=categories)
def _GetGroup(self):
if 'group' in self:
return self['group']
else:
return None
def _SetGroup(self, group_id):
self['group'] = group_id
group = property(_GetGroup, _SetGroup,
doc='The group query parameter to find only contacts in this group')
class GroupsQuery(gdata.service.Query):
def __init__(self, feed=None, text_query=None, params=None,
categories=None):
self.feed = feed or '/m8/feeds/groups/default/full'
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
params=params, categories=categories)
class ProfilesQuery(gdata.service.Query):
"""Constructs a query object for the profiles feed."""
def __init__(self, feed=None, text_query=None, params=None,
categories=None):
self.feed = feed or '/m8/feeds/profiles/default/full'
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
params=params, categories=categories)
|
py | b40cf5aae3af1591e437c310f0f6942fce7f3846 | from django.shortcuts import render
from .models import Blog
# Create your views here.
def index(request):
blogs = Blog.objects.order_by('-date')
context = {'blogs': blogs}
return render(request, 'blog_app/index.html', context)
def blog(request, blog_id):
blog = Blog.objects.get(pk=blog_id)
context = {'blog': blog}
return render(request, 'blog_app/blog.html', context)
|
py | b40cf72be412fecc1aff2dc81ea773ed66f9a3e0 |
from __future__ import print_function, division
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
class bpdata_train(Dataset):
def __init__(self, csv_file, root_dir, transform=None):
self.bp = pd.read_csv(csv_file, names = ['sbp','dbp','mbp','cla'])
self.root_dir = root_dir
self.transform = transform
self.to_tensor = transforms.ToTensor()
def __len__(self):
return len(self.bp)
def __getitem__(self, idx):
file_name = os.path.join(self.root_dir,'check%d.csv'%(idx+1))
patient_file = pd.read_csv(file_name,names = ['ppg','ecg'])
#patient_file = patient_file[['ppg','ecg']]
# patient_file=np.asarray(patient_file)
patient_file = np.asarray(patient_file)
# print(img_as_np.shape)
#patient_file = self.to_tensor(img_as_np)
#print(patient_file.shape)
#print(self.bp)
sbp_list = self.bp['sbp']
dbp_list = self.bp['dbp']
mbp_list = self.bp['mbp']
class_list = self.bp['cla']
#print(sbp_list[1])
sbp_data = sbp_list[idx]
dbp_data = dbp_list[idx]
mbp_data = mbp_list[idx]
class_data = class_list[idx]
#print(class_data)
sample = {'file': patient_file, 'sbp': sbp_data,'dbp': dbp_data,'mbp': mbp_data}
if self.transform:
sample = self.transform(sample)
return patient_file,mbp_data, class_data
# bp_dataset = bpdata(csv_file='/home/jeyamariajose/Projects/dl/bp.csv',
# root_dir='/home/jeyamariajose/Projects/dl/data/')
# train_loader = torch.utils.data.DataLoader(dataset=bp_dataset,
# batch_size=batch_size,
# shuffle=True)
# for i,(data,label) in enumerate(train_loader):
# print(label)
class bpdata_test(Dataset):
def __init__(self, csv_file, root_dir, transform=None):
self.bp = pd.read_csv(csv_file, names = ['sbp','dbp','mbp','cla'])
self.root_dir = root_dir
self.transform = transform
self.to_tensor = transforms.ToTensor()
def __len__(self):
return len(self.bp)
def __getitem__(self, idx):
file_name = os.path.join(self.root_dir,'check%d.csv'%(idx+4011))
patient_file = pd.read_csv(file_name,names = ['ppg','ecg'])
#patient_file = patient_file[['ppg','ecg']]
# patient_file=np.asarray(patient_file)
patient_file = np.asarray(patient_file)
# print(img_as_np.shape)
#patient_file = self.to_tensor(img_as_np)
#print(patient_file.shape)
#print(self.bp)
sbp_list = self.bp['sbp']
dbp_list = self.bp['dbp']
mbp_list = self.bp['mbp']
class_list = self.bp['cla']
#print(sbp_list[1])
sbp_data = sbp_list[idx]
dbp_data = dbp_list[idx]
mbp_data = mbp_list[idx]
class_data = class_list[idx]
sample = {'file': patient_file, 'sbp': sbp_data,'dbp': dbp_data,'mbp': mbp_data}
if self.transform:
sample = self.transform(sample)
return patient_file,mbp_data, class_data
class bpdata_val(Dataset):
def __init__(self, csv_file, root_dir, transform=None):
self.bp = pd.read_csv(csv_file, names = ['sbp','dbp','mbp','cla'])
self.root_dir = root_dir
self.transform = transform
self.to_tensor = transforms.ToTensor()
def __len__(self):
return len(self.bp)
def __getitem__(self, idx):
file_name = os.path.join(self.root_dir,'check%d.csv'%(idx+4512))
patient_file = pd.read_csv(file_name,names = ['ppg','ecg'])
#patient_file = patient_file[['ppg','ecg']]
# patient_file=np.asarray(patient_file)
patient_file = np.asarray(patient_file)
# print(img_as_np.shape)
#patient_file = self.to_tensor(img_as_np)
#print(patient_file.shape)
#print(self.bp)
sbp_list = self.bp['sbp']
dbp_list = self.bp['dbp']
mbp_list = self.bp['mbp']
class_list = self.bp['cla']
#print(sbp_list[1])
sbp_data = sbp_list[idx]
dbp_data = dbp_list[idx]
mbp_data = mbp_list[idx]
class_data = class_list[idx]
sample = {'file': patient_file, 'sbp': sbp_data,'dbp': dbp_data,'mbp': mbp_data}
if self.transform:
sample = self.transform(sample)
return patient_file,mbp_data, class_data
|
py | b40cf75f1c526c04c9b5d282963c60aa5815b63f | import pygame
import random
pygame.init()
WIDTH, HEIGHT = (750, 750)
WHITE = (255, 255, 255)
BLACK = (20, 18, 18)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 255)
bg = pygame.transform.scale(pygame.image.load("assets/background-black.png"), (WIDTH, HEIGHT))
blue_ship = pygame.image.load("assets/pixel_ship_blue_small.png")
green_ship = pygame.image.load("assets/pixel_ship_green_small.png")
red_ship = pygame.image.load("assets/pixel_ship_red_small.png")
yellow_ship = pygame.image.load("assets/pixel_ship_yellow.png")
yellow_laser = pygame.image.load("assets/pixel_laser_yellow.png")
red_laser = pygame.image.load("assets/pixel_laser_red.png")
green_laser = pygame.image.load("assets/pixel_laser_green.png")
blue_laser = pygame.image.load("assets/pixel_laser_blue.png")
background_music = pygame.mixer.Sound("assets/feel_the_beat_cropped.wav")
shoot_effect = pygame.mixer.Sound("assets/bullet.wav")
font = pygame.font.Font('freesansbold.ttf', 20)
bg_rect = bg.get_rect()
SCREEN = pygame.display.set_mode((WIDTH, HEIGHT))
SONG_END = pygame.USEREVENT + 1
pygame.display.set_caption('Starships')
enemies = []
clock = pygame.time.Clock()
enemy_images = [blue_ship, green_ship, red_ship]
enemy_lasers = [blue_laser, green_laser, red_laser]
level = 1
life = 3
score = 0
FPS = 60
lost = False
class Ship:
def __init__(self, xcord, ycord, health):
self.x = xcord
self.y = ycord
self.img = None
self.laser = None
self.lasers = []
self.health = health
self.VEL = 15
def draw(self, window):
window.blit(self.img, (self.x, self.y))
def shoot(self):
missile = Laser(self.x, self.y, self.laser)
pygame.mixer.Channel(1).play(missile.fire_sound)
self.lasers.append(missile)
def get_width(self):
return self.img.get_width()
def get_height(self):
return self.img.get_height()
class Player(Ship):
def __init__(self, xcord, ycord, health):
super().__init__(xcord, ycord, health)
self.img = yellow_ship
self.mask = pygame.mask.from_surface(self.img)
self.laser = yellow_laser
self.max_health = 100
def draw(self, window):
super().draw(window)
self.healthbar(window)
def move_lasers(self, objects):
global score
for missile in self.lasers:
for obj in objects:
if missile.collide(obj):
if missile in self.lasers:
self.lasers.remove(missile)
enemies.remove(obj)
score += obj.score
if missile.y < -50 and missile in self.lasers:
self.lasers.remove(missile)
missile.draw(SCREEN)
missile.move(missile.VEL)
def healthbar(self, window):
pygame.draw.rect(window, (255, 0, 0), (self.x, self.y + self.img.get_height() + 10, self.img.get_width(), 10))
pygame.draw.rect(window, (0, 255, 0), (
self.x, self.y + self.img.get_height() + 10, self.img.get_width() * (self.health / self.max_health), 10))
def hit(self, damage):
if self.health - damage > 0:
self.health -= damage
else:
self.health = 0
class Enemy(Ship):
def __init__(self, xcord, ycord, health, image, laser):
super().__init__(xcord, ycord, health)
self.img = image
self.mask = pygame.mask.from_surface(self.img)
self.laser = laser
self.VEL = 5
self.score = 10
self.damage_inflict = 10
def draw(self, window):
super().draw(window)
def move(self):
global life
self.y += self.VEL
if self.y > WIDTH:
enemies.remove(self)
lose_life(1)
def move_lasers(self, user):
for missile in self.lasers:
if missile.collide(user):
if missile in self.lasers:
self.lasers.remove(missile)
user.hit(self.damage_inflict)
if missile.y > WIDTH + 50 and missile in self.lasers:
self.lasers.remove(missile)
missile.draw(SCREEN)
missile.move(-missile.VEL)
class Laser:
def __init__(self, xcord, ycord, image):
self.x = xcord
self.y = ycord
self.img = image
self.mask = pygame.mask.from_surface(image)
self.lasers = []
self.VEL = 20
self.fire_sound = shoot_effect
def draw(self, window):
window.blit(self.img, (self.x, self.y))
def move(self, velocity):
self.y -= velocity
def collide(self, obj):
return collide(self, obj)
def lose_life(damage):
global life
if life - damage > 0:
life -= damage
else:
life = 0
def collide(object1, object2):
offset_x = object2.x - object1.x
offset_y = object2.y - object1.y
return object1.mask.overlap(object2.mask, (offset_x, offset_y)) is not None
def create_enemies(level):
while len(enemies) < level * 5:
x = random.randint(100, WIDTH - 100)
y = random.randint(-200, -50)
image = random.choice(enemy_images)
laser = enemy_lasers[enemy_images.index(image)]
obj = Enemy(x, y, 100, image, laser)
enemies.append(obj)
def reset():
global level, life, score, lost, player, enemies
level = 1
life = 3
score = 0
lost = False
enemies = []
player = Player(375, 600, 100)
def main():
global level, score, life, player
player = Player(375, 600, 100)
pygame.mixer.Channel(0).play(background_music)
pygame.mixer.Channel(0).set_endevent(SONG_END)
background_music.set_volume(0.15)
shoot_effect.set_volume(0.05)
while True:
# Pygame system controls
########################
# Setting the frame rate of the game
clock.tick(FPS)
# Check for quit events in the game
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == SONG_END:
pygame.mixer.Channel(0).play(background_music)
# Check for keyed events in the game
####################################
keys = pygame.key.get_pressed()
# Check for user inputs validate boundary values
if keys[pygame.K_LEFT] and player.x > 0:
player.x -= player.VEL
if keys[pygame.K_RIGHT] and player.x + player.get_width() < WIDTH:
player.x += player.VEL
if keys[pygame.K_UP] and player.y > 0:
player.y -= player.VEL
if keys[pygame.K_DOWN] and player.y + player.get_height() < HEIGHT:
player.y += player.VEL
if keys[pygame.K_SPACE]:
player.shoot()
# Creation of the gui components
################################
lives = font.render(f"Lives: {life}", True, WHITE, BLACK)
levels = font.render(f"Level: {level}", True, WHITE, BLACK)
scores = font.render(f"Score: {score}", True, WHITE, BLACK)
create_enemies(level)
# Game mechanics for all the objects in the game
###############################################
# Game settings
if 0 <= score < 1000:
level = 1
elif 1000 <= score < 2000:
level = 2
else:
level = 3
# Winning conditions
if life == 0 or player.health == 0:
pygame.mixer.Channel(0).stop()
break
# Enemy mechanics
for enemy in enemies:
enemy.move()
if random.randint(0, 120) == 1:
enemy.shoot()
if collide(player, enemy):
player.hit(enemy.damage_inflict)
if enemy in enemies:
enemies.remove(enemy)
# Drawing all the objects to the screen
#######################################
# Background
SCREEN.blit(bg, (0, 0, WIDTH, HEIGHT))
# Lives stat
SCREEN.blit(lives, (10, 10))
SCREEN.blit(levels, (WIDTH - 100, 10))
SCREEN.blit(scores, (10, 40))
# Enemy ships
for enemy in enemies:
enemy.move_lasers(player)
enemy.draw(SCREEN)
# Player ship
player.move_lasers(enemies)
player.draw(SCREEN)
# Update the screen
pygame.display.update()
def main_menu():
title_font = pygame.font.SysFont("comicsans", 70)
while True:
SCREEN.blit(bg, bg_rect)
title = title_font.render(f"Press space to begin...", True, WHITE, BLACK)
SCREEN.blit(title, (WIDTH / 2 - title.get_width() / 2, 350))
pygame.display.update()
keys = pygame.key.get_pressed()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if keys[pygame.K_SPACE]:
reset()
main()
pygame.time.wait(1000)
main_menu()
|
py | b40cf7e38fc6f0990f47c2ed828cc6611df45796 | """
Problem name: AvoidRoads
Class: TCO '03 Semifinals 4, Division I Level One
Description: https://community.topcoder.com/stat?c=problem_statement&pm=1889
"""
from collections import defaultdict
def solve(args):
""" Solved as follows:
1) Store all road blocks in a dictionary
2) Initialize borders of the map:
2.1) NPaths in origin (0,0) is 1
2.2) NPaths for the first row (x=0) is p[0][y] = p[0][y-1]; but if
there is a block, then p[0][y] = 0
2.3) Same for the first column (y=0)
3) Traverse the map in increasing order of rows and columns.
NPaths = Paths from down + paths from left
If there is a block in any of the two paths, then the relevant term
becomes 0
The solution is p[n][m]
"""
n, m, blocks = args
n += 1
m += 1
barriers = defaultdict(set)
for b in blocks:
coords = list(map(int, b.split()))
start = tuple(coords[:2])
end = tuple(coords[2:])
barriers[start].add(end)
barriers[end].add(start)
paths = []
for x in range(n):
paths.append([0]*m)
paths[0][0] = 1
# first column
for x in range(1, n):
paths[x][0] = 0 if (x-1, 0) in barriers[(x, 0)] else paths[x-1][0]
# first row
for y in range(1, m):
paths[0][y] = 0 if (0, y-1) in barriers[(0, y)] else paths[0][y-1]
# the rest
for x in range(1, n):
for y in range(1, m):
left_paths = 0 if (x, y-1) in barriers.get((x, y), set()) else paths[x][y-1]
down_paths = 0 if (x-1, y) in barriers.get((x, y), set()) else paths[x-1][y]
paths[x][y] = left_paths + down_paths
return paths[n-1][m-1]
if __name__ == "__main__":
test_cases = [([6, 6, ["0 0 0 1", "6 6 5 6"]], 252),
([1, 1, []], 2),
([35, 31, []], 6406484391866534976),
([2, 2, ["0 0 1 0", "1 2 2 2", "1 1 2 1"]], 0)
]
for index, case in enumerate(test_cases):
output = solve(case[0])
assert output == case[1], 'Case {} failed: {} != {}'.format(
index, output, case[1])
else:
print('All tests OK')
|
py | b40cf847ad881a1398261e0cc0a19b91264de141 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
@author: Mindaugas Greibus
'''
import glob,os, re
import sys
import subprocess
#reload(sys)
#sys.setdefaultencoding('utf-8')
train_repo_name="train"
test_repo_name="test"
wordSet = set([])
def processRepo(repo_name):
src_dir = "../"+repo_name+"_repo/"
with open("../target/liepa_"+repo_name+".fileids", "w") as outfile:
for corpus_dir in os.listdir(src_dir):
if(os.path.isdir(os.path.join(src_dir, corpus_dir))):
with open(src_dir + "/../target/_"+ corpus_dir+"_"+repo_name+".fileids", "r") as infile:
for line in infile:
outfile.write(line)
with open("../target/liepa_"+repo_name+"_sil.transcription", "w") as outfile:
for corpus_dir in os.listdir(src_dir):
if(os.path.isdir(os.path.join(src_dir, corpus_dir))):
with open(src_dir + "/../target/_"+ corpus_dir+"_"+repo_name+".transcription", "r") as infile:
for line in infile:
# line = line.decode("utf-8")
line = re.sub(r'\s{2,}',r" ",line)
# line = line.replace(u"\ufeff", "")
outfile.write(line)
if __name__ == "__main__":
processRepo(test_repo_name)
processRepo(train_repo_name)
|
py | b40cf8687c6d3f0c1d0ce09bc65ad8b0f76c5fc9 | import logging
from hashlib import md5
from typing import Optional
from great_expectations.util import load_class
logger = logging.getLogger(__name__)
class Anonymizer:
"""Anonymize string names in an optionally-consistent way."""
def __init__(self, salt=None):
if salt is not None and not isinstance(salt, str):
logger.error("invalid salt: must provide a string. Setting a random salt.")
salt = None
if salt is None:
import secrets
self._salt = secrets.token_hex(8)
else:
self._salt = salt
@property
def salt(self):
return self._salt
def anonymize(self, string_):
salted = self._salt + string_
return md5(salted.encode("utf-8")).hexdigest()
def anonymize_object_info(
self,
anonymized_info_dict,
ge_classes,
object_=None,
object_class=None,
object_config=None,
runtime_environment=None,
) -> dict:
assert (
object_ or object_class or object_config
), "Must pass either object_ or object_class or object_config."
if runtime_environment is None:
runtime_environment = {}
object_class_name: Optional[str] = None
try:
if object_class is None and object_ is not None:
object_class = object_.__class__
elif object_class is None and object_config is not None:
object_class_name = object_config.get("class_name")
object_module_name = object_config.get(
"module_name"
) or runtime_environment.get("module_name")
object_class = load_class(object_class_name, object_module_name)
object_class_name = object_class.__name__
for ge_class in ge_classes:
if issubclass(object_class, ge_class):
anonymized_info_dict["parent_class"] = ge_class.__name__
if not object_class == ge_class:
anonymized_info_dict["anonymized_class"] = self.anonymize(
object_class_name
)
break
if not anonymized_info_dict.get("parent_class"):
anonymized_info_dict["parent_class"] = "__not_recognized__"
anonymized_info_dict["anonymized_class"] = self.anonymize(
object_class_name
)
except AttributeError:
anonymized_info_dict["parent_class"] = "__not_recognized__"
anonymized_info_dict["anonymized_class"] = self.anonymize(object_class_name)
return anonymized_info_dict
@staticmethod
def _is_parent_class_recognized(
classes_to_check,
object_=None,
object_class=None,
object_config=None,
) -> Optional[str]:
"""
Check if the parent class is a subclass of any core GE class.
This private method is intended to be used by anonymizers in a public `is_parent_class_recognized()` method. These anonymizers define and provide the core GE classes_to_check.
Returns:
The name of the parent class found, or None if no parent class was found
"""
assert (
object_ or object_class or object_config
), "Must pass either object_ or object_class or object_config."
try:
if object_class is None and object_ is not None:
object_class = object_.__class__
elif object_class is None and object_config is not None:
object_class_name = object_config.get("class_name")
object_module_name = object_config.get("module_name")
object_class = load_class(object_class_name, object_module_name)
for class_to_check in classes_to_check:
if issubclass(object_class, class_to_check):
return class_to_check.__name__
return None
except AttributeError:
return None
|
py | b40cf9448c22f65ffbfb177977c481e2c6e666ae | #!/usr/bin/env python3
"""
File: ode.py
Author: pdsherman
Date: May 2021
"""
import math
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
dr = 0.05
wn = 5.5
y0 = 5.0
acdr = math.acos(dr)
sdr = math.sqrt(1.0 - dr**2)
wd = wn * sdr
a = dr*wn
alpha = 2*dr*wn
beta = wn**2.0
def y_actual(t):
n1 = math.sin(wd*t + acdr)
n2 = math.exp(-a*t)/sdr
return y0*n1*n2
def x_dot_equations(x):
return [x[1], -alpha*x[1]-beta*x[0]]
def print_line(s, x, f):
print("{:^5} | {:^9.4f} {:^9.4f} | {:^9.4f} {:^9.4f} |".format(s, x[0], x[1], f[0], f[1]))
def RK3(h, x0, print_flag = False):
A = [[0.0, 0.0], [0.5, 0.0], [-1.0, 2.0]]
B = [1./6.0, 2.0/3.0, 1.0/6.0]
return RK(h, x0, A, B, print_flag)
def RK4(h, x0, print_flag = False):
A = [[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.0, 1.0]]
B = [1./6.0, 1.0/3.0, 1.0/3.0, 1.0/6.0]
return RK(h, x0, A, B, print_flag)
def RK_optimal(h, x0, print_flag = False):
A = [[0.0, 0.0, 0.0],
[0.4, 0.0, 0.0],
[0.29697761, 0.15875964, 0.0],
[0.21810040, -3.05096516, 3.83286476]]
B = [0.17476028, -0.55148066, 1.20553560, 0.17118478]
return RK(h, x0, A, B, print_flag)
def RK(h, x0, A, B, print_flag):
n = len(x0)
x = [0.0] * n
x1 = [0.0] * n
f = []
if print_flag:
print("-"*51)
print("{:^5} | {:^9} {:^9} | {:^9} {:^9} |".format("Stage", "X0", "X1", "f0", "f1"))
print("-"*51)
for i in range(len(B)):
dx = [0.0] * n
for j in range(i):
for k in range(n):
dx[k] += A[i][j]*f[j][k]
for k in range(n):
x[k] = x0[k] + h*dx[k]
x_dot = x_dot_equations(x)
f.append(x_dot)
if print_flag:
print_line(i+1, x, f[-1])
if print_flag:
print("-"*51)
# Explicit Equation
dx = [0.0, 0.0]
for i in range(len(B)):
for k in range(n):
dx[k] += B[i]*f[i][k]
for k in range(n):
x1[k] = x0[k] + h*dx[k]
return x1
## ------------------------- ##
## -- SCRIPT START -- ##
## ------------------------- ##
if __name__ == "__main__":
plt.figure()
T_MAX = 10.0
t = np.linspace(0.0, T_MAX, 250)
# Actual solution
y = [y_actual(x) for x in t]
# Runga-Kutta
t_rk = [0.0]
x_rk3 = [y0, 0.0]
y_rk3 = [x_rk3[0]]
x_rk4 = [y0, 0.0]
y_rk4 = [x_rk4[0]]
x_rk_opt = [y0, 0.0]
y_rk_opt = [x_rk_opt[0]]
h = 0.08
while True:
if(t_rk[-1] + h > T_MAX):
break
t_rk.append(t_rk[-1] + h)
y_t = y_actual(t_rk[-1])
x_rk3 = RK3(h, x_rk3)
y_rk3.append(x_rk3[0])
x_rk4 = RK4(h, x_rk4)
y_rk4.append(x_rk4[0])
x_rk_opt = RK_optimal(h, x_rk_opt)
y_rk_opt.append(x_rk_opt[0])
print("\n")
print("3-stage: {:.7f}".format(y_rk3[-1]))
print("4-stage: {:.7f}".format(y_rk4[-1]))
print("Optimal: {:.7f}".format(y_rk_opt[-1]))
# Display
plt.plot(t, y)
plt.plot(t_rk, y_rk3, 'r-o', markersize=3)
plt.plot(t_rk, y_rk4, 'g-o', markersize=3)
plt.plot(t_rk, y_rk_opt, 'k-o', markersize=3)
plt.show()
|
py | b40cf98246a2042c5213b4d6d884c5f0a548e769 | # coding: utf-8
import sys
from collections import Counter
import numpy as np
import tensorflow.keras as kr
if sys.version_info[0] > 2:
is_py3 = True
else:
reload(sys)
sys.setdefaultencoding("utf-8")
is_py3 = False
def native_word(word, encoding='utf-8'):
"""如果在python2下面使用python3训练的模型,可考虑调用此函数转化一下字符编码"""
if not is_py3:
return word.encode(encoding)
else:
return word
def native_content(content):
if not is_py3:
return content.decode('utf-8')
else:
return content
def open_file(filename, mode='r'):
"""
常用文件操作,可在python2和python3间切换.
mode: 'r' or 'w' for read or write
"""
if is_py3:
return open(filename, mode, encoding='utf-8', errors='ignore')
else:
return open(filename, mode)
def read_file(filename):
"""读取文件数据"""
contents, labels = [], []
with open_file(filename) as f:
for line in f:
try:
label, content = line.strip().split('\t')
if content:
contents.append(list(native_content(content)))
labels.append(native_content(label))
except:
pass
return contents, labels
def build_vocab(train_dir, vocab_dir, vocab_size=5000):
"""根据训练集构建词汇表,存储"""
data_train, _ = read_file(train_dir)
all_data = []
for content in data_train:
all_data.extend(content)
counter = Counter(all_data)
count_pairs = counter.most_common(vocab_size - 1)
words, _ = list(zip(*count_pairs))
# 添加一个 <PAD> 来将所有文本pad为同一长度
words = ['<PAD>'] + list(words)
open_file(vocab_dir, mode='w').write('\n'.join(words) + '\n')
def read_vocab(vocab_dir):
"""读取词汇表"""
# words = open_file(vocab_dir).read().strip().split('\n')
with open_file(vocab_dir) as fp:
# 如果是py2 则每个值都转化为unicode
words = [native_content(_.strip()) for _ in fp.readlines()]
word_to_id = dict(zip(words, range(len(words))))
return words, word_to_id
def read_category():
"""读取分类目录,固定"""
categories = ['体育', '财经', '房产', '家居', '教育', '科技', '时尚', '时政', '游戏', '娱乐']
categories = [native_content(x) for x in categories]
cat_to_id = dict(zip(categories, range(len(categories))))
return categories, cat_to_id
def to_words(content, words):
"""将id表示的内容转换为文字"""
return ''.join(words[x] for x in content)
def process_file(filename, word_to_id, cat_to_id, max_length=600):
"""将文件转换为id表示"""
contents, labels = read_file(filename)
data_id, label_id = [], []
for i in range(len(contents)):
data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])
label_id.append(cat_to_id[labels[i]])
# 使用keras提供的pad_sequences来将文本pad为固定长度
x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length)
y_pad = kr.utils.to_categorical(label_id, num_classes=len(cat_to_id)) # 将标签转换为one-hot表示
return x_pad, y_pad
def batch_iter(x, y, batch_size=64):
"""生成批次数据"""
data_len = len(x)
num_batch = int((data_len - 1) / batch_size) + 1
indices = np.random.permutation(np.arange(data_len))
x_shuffle = x[indices]
y_shuffle = y[indices]
for i in range(num_batch):
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]
|
py | b40cf9a47363388992b045a84d236dd9ca52ae3c | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#########################################################################
# 执行扫描任务
import datetime
import os
import re
import yaml
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
# import sys
os.environ["DJANGO_SETTINGS_MODULE"] = 'admin.settings.local_cj'
import django
import time
django.setup()
from scanhosts.models import HostLoginifo
from scanhosts.util.nmap_all_server import NmapNet
from scanhosts.util.nmap_all_server import NmapDocker
from scanhosts.util.nmap_all_server import NmapKVM
from scanhosts.util.nmap_all_server import NmapVMX
from scanhosts.util.nmap_all_server import snmp_begin
from scanhosts.util.j_filter import FilterRules
from scanhosts.util.get_pv_relation import GetHostType
from detail.models import PhysicalServerInfo,ConnectionInfo,OtherMachineInfo,StatisticsRecord
from operations.models import MachineOperationsInfo
from scanhosts.util.nmap_all_server import NetDevLogin
from admin.settings.local_cj import BASE_DIR
import logging
logger = logging.getLogger("django")
from apps.detail.utils.machines import Machines
# def net_begin():
# '''
# 开始执行网络扫描
# :return:
# '''
# nm = NmapNet(oid='1.3.6.1.2.1.1.5.0',Version=2)
# nm_res = nm.query()
# print "...................",nm_res
def main():
'''
读取扫描所需配置文件
:return:
'''
s_conf = yaml.load(open('conf/scanhosts.yaml'))
s_nets = s_conf['hostsinfo']['nets']
s_ports = s_conf['hostsinfo']['ports']
s_pass = s_conf['hostsinfo']['ssh_pass']
s_cmds = s_conf['hostsinfo']['syscmd_list']
s_keys = s_conf['hostsinfo']['ssh_key_file']
s_blacks = s_conf['hostsinfo']['black_list']
s_emails = s_conf['hostsinfo']['email_list']
n_sysname_oid = s_conf['netinfo']['sysname_oid']
n_sn_oid = s_conf['netinfo']['sn_oids']
n_commu = s_conf['netinfo']['community']
n_login_sw = s_conf['netinfo']['login_enable']
n_backup_sw = s_conf['netinfo']['backup_enable']
n_backup_sever = s_conf['netinfo']['tfp_server']
d_pass = s_conf['dockerinfo']['ssh_pass']
starttime = datetime.datetime.now()
'''
扫描主机信息
'''
for nmap_type in s_nets:
unkown_list,key_not_login_list = snmp_begin(nmap_type,s_ports,s_pass,s_keys,s_cmds,s_blacks,s_emails)
'''
扫描网络信息
'''
nm = NmapNet(n_sysname_oid,n_sn_oid,n_commu)
if key_not_login_list:
for item in key_not_login_list:
is_net = nm.query(item)
if is_net[0] or is_net[1]:
HostLoginifo.objects.update_or_create(ip=item,hostname=is_net[0],sn=is_net[1],mathine_type="Network device")
else:
HostLoginifo.objects.update_or_create(ip=item,ssh_port=key_not_login_list[item][0],ssh_status=0)
other_sn = item.replace('.','')
ob = OtherMachineInfo.objects.filter(sn_key=other_sn)
if not ob:
print(".........................OtherMachineInfo",item,other_sn)
OtherMachineInfo.objects.create(ip=item,sn_key=other_sn,reson_str=u"SSH端口存活,无法登录",oth_cab_id=1)
if unkown_list:
for item in unkown_list:
is_net = nm.query(item)
if is_net[0] or is_net[1]:
HostLoginifo.objects.update_or_create(ip=item,hostname=is_net,mathine_type="Network device")
else:
HostLoginifo.objects.update_or_create(ip=item,ssh_status=0)
other_sn = item.replace('.','')
ob = OtherMachineInfo.objects.filter(sn_key=other_sn)
if not ob:
OtherMachineInfo.objects.create(ip=item,sn_key=other_sn,reson_str=u"IP存活,非Linux服务器",oth_cab_id=1)
# '''
# 网络设备备份或者登录功能
# '''
# net_login_dct = {}
# with open("%s/conf/net_dev.pass"%BASE_DIR,'r') as f:
# for item in f.readlines():
# ip,username,passwd,en_passwd = re.split("\s+",item)[:4]
# net_login_dct[ip] = (username,passwd,en_passwd)
# if n_login_sw == "True":
# res = NetDevLogin(dev_ips=net_login_dct,backup_sw=n_backup_sw,back_server=n_backup_sever)
'''
规则:主机信息,去重、生成关系字典
'''
ft = FilterRules()
key_ip_dic = ft.run()
'''
梳理虚拟服务器主机于服务器信息
'''
pv = GetHostType()
p_relate_dic = pv.get_host_type(key_ip_dic)
'''
更新宿主机类型中表对应关系
'''
ip_key_dic = {v:k for k,v in key_ip_dic.items()}
docker_p_list = p_relate_dic["docker-containerd"]
kvm_p_list = p_relate_dic["qemu-system-x86_64"]
vmware_p_list = p_relate_dic["vmx"]
for item in docker_p_list:
PhysicalServerInfo.objects.filter(conn_phy__sn_key=ip_key_dic[item]).update(vir_type="1")
for item in kvm_p_list:
PhysicalServerInfo.objects.filter(conn_phy__sn_key=ip_key_dic[item]).update(vir_type="0")
for item in vmware_p_list:
PhysicalServerInfo.objects.filter(conn_phy__sn_key=ip_key_dic[item]).update(vir_type="2")
'''
扫描docker的宿主机和虚拟服务的关系
'''
ds = NmapDocker(s_cmds,d_pass,ip_key_dic)
ds.do_nmap(docker_p_list)
'''
扫描KVM的宿主机和虚拟服务的关系
# '''
ks = NmapKVM(ip_key_dic)
ks.do_nmap(kvm_p_list)
'''
扫描ESXI虚拟机配置
'''
ne = NmapVMX(vmware_p_list,ip_key_dic)
ne.dosnmp()
'''
更新状态表,用户信息表
'''
c_sn_lst = [item.sn_key for item in ConnectionInfo.objects.all()]
o_sn_lst = [item.sn_key for item in OtherMachineInfo.objects.all()]
old_sn_list = [item.sn_key for item in MachineOperationsInfo.objects.all()]
new_sn_lst = c_sn_lst + o_sn_lst
diff_sn_lst = set(new_sn_lst + old_sn_list)
for item in diff_sn_lst:
try:
nsin = MachineOperationsInfo.objects.filter(sn_key=item)
if not nsin:
MachineOperationsInfo.objects.create(sn_key=item)
except Exception as e:
print("Error:SN:%s not insert into database,reason is:%s"%(item,e))
logger.error("Error:SN:%s not insert into database,reason is:%s"%(item,e))
'''
统计总数
'''
info_dic = Machines().get_all_count()
StatisticsRecord.objects.create(all_count=info_dic['all_c'],pyh_count=info_dic['pyh_c'],net_count=info_dic['net_c'],
other_count=info_dic['other_c'],vmx_count=info_dic['vmx_c'],kvm_count=info_dic['kvm_c'],docker_count=info_dic['docker_c'])
endtime = datetime.datetime.now()
totaltime = (endtime - starttime).seconds
logger.info("{Finish:Use time %s s}"%totaltime)
print("{Finish:Use time %s s}"%totaltime)
if __name__ == "__main__":
main() |
py | b40cfaa2a4b94a3c7a5ae1422b9dd8f5b9bdf032 | import jsonschema
from cliquet import resource, schema
from cliquet.errors import raise_invalid
from jsonschema import exceptions as jsonschema_exceptions
from pyramid.settings import asbool
from kinto.views import object_exists_or_404
class RecordSchema(schema.ResourceSchema):
class Options:
preserve_unknown = True
_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'
@resource.register(name='record',
collection_path=_parent_path + '/records',
record_path=_parent_path + '/records/{{id}}')
class Record(resource.ProtectedResource):
mapping = RecordSchema()
schema_field = 'schema'
def __init__(self, *args, **kwargs):
super(Record, self).__init__(*args, **kwargs)
# Check if already fetched before (in batch).
collections = self.request.bound_data.setdefault('collections', {})
collection_uri = self.get_parent_id(self.request)
if collection_uri not in collections:
# Unknown yet, fetch from storage.
collection_parent_id = '/buckets/%s' % self.bucket_id
collection = object_exists_or_404(self.request,
collection_id='collection',
parent_id=collection_parent_id,
object_id=self.collection_id)
collections[collection_uri] = collection
self._collection = collections[collection_uri]
def get_parent_id(self, request):
self.bucket_id = request.matchdict['bucket_id']
self.collection_id = request.matchdict['collection_id']
return '/buckets/%s/collections/%s' % (self.bucket_id,
self.collection_id)
def is_known_field(self, field_name):
"""Without schema, any field is considered as known."""
return True
def process_record(self, new, old=None):
"""Validate records against collection schema, if any."""
new = super(Record, self).process_record(new, old)
schema = self._collection.get('schema')
settings = self.request.registry.settings
schema_validation = 'experimental_collection_schema_validation'
if not schema or not asbool(settings.get(schema_validation)):
return new
collection_timestamp = self._collection[self.collection.modified_field]
try:
jsonschema.validate(new, schema)
new[self.schema_field] = collection_timestamp
except jsonschema_exceptions.ValidationError as e:
field = e.path.pop() if e.path else e.validator_value.pop()
raise_invalid(self.request, name=field, description=e.message)
return new
def collection_get(self):
result = super(Record, self).collection_get()
self._handle_cache_expires(self.request.response)
return result
def get(self):
result = super(Record, self).get()
self._handle_cache_expires(self.request.response)
return result
def _handle_cache_expires(self, response):
"""If the parent collection defines a ``cache_expires`` attribute,
then cache-control response headers are sent.
.. note::
Those headers are also sent if the
``kinto.record_cache_expires_seconds`` setting is defined.
"""
cache_expires = self._collection.get('cache_expires')
if cache_expires is None:
by_bucket = 'kinto.%s_record_cache_expires_seconds' % (
self.bucket_id)
by_collection = '%s_%s_record_cache_expires_seconds' % (
self.bucket_id, self.collection_id)
settings = self.request.registry.settings
cache_expires = settings.get(by_collection,
settings.get(by_bucket))
if cache_expires is not None:
response.cache_expires(seconds=cache_expires)
|
py | b40cfbff54448fbd4163307df6d30ea3099068c2 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#通过使用正则表单是筛选出网页中列表的标题 这里我们测试需要,直接将网站的源码粘贴到了jike.txt中
import re
#读取网站代码:
f = open('jike.txt','r')
html = f.read()
f.close()
#匹配图片网址 #posColumn=2688.2">Python 类深入</a>
title = re.findall('<h2 class="lesson-info-h2">(.*?)posColumn=(.*?)">(.*?)</a>', html, re.S)
for each in title:
print each[2]
# i = 0
# for each in pic_url:
# print 'now downloding:' + each
# pic = requests.get(each)
# fp = open('pic\\' + str(i) + '.jpg' , 'wb')
# fp.write(pic.content)
# fp.close()
# i = i+1 |
py | b40cfc760f9646049ccc6157a40af7becf7e32b9 | # -*- coding: utf-8 -*-
import os
class Waypoints:
def __init__(self, plugin_dir):
self._route = []
self._notes = []
self._index = 0
self._save_route = os.path.join(plugin_dir, 'save_route.txt')
self._save_index = os.path.join(plugin_dir, 'save_index.txt')
if self.load(self._save_route):
self._load_index()
def __len__(self):
return len(self._route)
def clear(self):
self._route = []
self._notes = []
self._index = 0
if os.path.isfile(self._save_route):
os.remove(self._save_route)
if os.path.isfile(self._save_index):
os.remove(self._save_index)
def pos(self):
return 0 if len(self) == 0 else self._index + 1
def target(self):
return '' if len(self) == 0 else self._route[self._index]
def note(self):
return '' if len(self) == 0 else self._notes[self._index]
def has_next(self):
return self._index + 1 < len(self)
def has_prev(self):
return self._index > 0
def next(self):
if not self.has_next():
return False
self._index += 1
self.save()
return True
def prev(self):
if not self.has_prev():
return False
self._index -= 1
self.save()
return True
def reached(self, system):
if system.lower() != self.target().lower():
return False
return self.next()
def load(self, filename):
if len(filename) == 0:
return False
if not os.path.isfile(filename):
return False
self._route = []
self._notes = []
self._index = 0
try:
spansh = False
with open(filename, 'r') as f:
for line in f:
clean = line.rstrip(' \r\n').replace('"', '')
s = clean.replace('|', ',').split(',')
if len(s[0]) == 0 \
or s[0][0] == '#':
continue
if s[0] == 'System Name':
#"System Name","Jumps"
if len(s) == 2 and s[1] == 'Jumps':
spansh = True
#"System Name","Distance To Arrival","Distance Remaining","Neutron Star","Jumps"
if len(s) == 5 and s[4] == 'Jumps':
spansh = True
continue
self._route.append(s[0])
if not spansh and len(s) > 1:
self._notes.append(s[1])
else:
self._notes.append('')
except IOError:
print("Failed to read file {}".format(filename))
self._route = []
return False
return True
def _load_index(self):
try:
with open(self._save_index, 'r') as f:
self._index = int(f.readline())
except IOError:
print("Failed to read saved route index")
def save(self):
try:
with open(self._save_route, 'w') as f:
f.write('System Name\n')
for i in range(0, len(self._route)):
f.write(self._route[i])
f.write('|')
f.write(self._notes[i])
f.write('\n')
with open(self._save_index, 'w') as f:
f.write(str(self._index))
f.write('\n')
except IOError:
print("Failed to save current route")
|
py | b40cfcb041b83e825d783b10f707d2eae9fe59cf | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:KazuSilverTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
sys.path.append("qa/pull-tester/")
from tests_config import *
BOLD = ("","")
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/'
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_KSLVCOIND' not in vars():
ENABLE_KSLVCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passon string
opts = set()
passon_args = []
PASSON_REGEX = re.compile("^--")
PARALLEL_REGEX = re.compile('^-parallel=')
print_help = False
run_parallel = 4
for arg in sys.argv[1:]:
if arg == "--help" or arg == "-h" or arg == "-?":
print_help = True
break
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif PASSON_REGEX.match(arg):
passon_args.append(arg)
elif PARALLEL_REGEX.match(arg):
run_parallel = int(arg.split(sep='=', maxsplit=1)[1])
else:
opts.add(arg)
#Set env vars
if "KSLVCOIND" not in os.environ:
os.environ["KSLVCOIND"] = BUILDDIR + '/src/kazusilverd' + EXEEXT
if "KSLVCOINCLI" not in os.environ:
os.environ["KSLVCOINCLI"] = BUILDDIR + '/src/kazusilver-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/kazusilver/kazusilver/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/kazusilver/kazusilver/pull/5677#issuecomment-136646964
print("Win tests currently disabled by default. Use -win option to enable")
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_KSLVCOIND == 1):
print("No rpc tests to run. Wallet, utils, and kazusilverd must all be enabled")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError as e:
print("WARNING: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
"to run zmq tests, see dependency info in /qa/README.md.")
ENABLE_ZMQ=0
#Tests
testScripts = [
# longest test should go first, to favor running tests in parallel
# 'p2p-fullblocktest.py',
# 'walletbackup.py',
# 'bip68-112-113-p2p.py',
# 'wallet.py',
# 'wallet-hd.py',
# 'listtransactions.py',
# 'receivedby.py',
# 'mempool_resurrect_test.py',
# 'txn_doublespend.py --mineblock',
# 'txn_clone.py',
# 'getchaintips.py',
# 'rawtransactions.py',
# 'rest.py',
# 'mempool_spendcoinbase.py',
# 'mempool_reorg.py',
# 'mempool_limit.py',
# 'httpbasics.py',
# 'multi_rpc.py',
# 'zapwallettxes.py',
# 'proxy_test.py',
# 'merkle_blocks.py',
# 'fundrawtransaction.py',
# 'signrawtransactions.py',
# 'nodehandling.py',
# 'reindex.py',
# 'addressindex.py',
# 'timestampindex.py',
# 'spentindex.py',
# 'txindex.py',
# 'decodescript.py',
# 'blockchain.py',
# 'disablewallet.py',
# 'sendheaders.py',
# 'keypool.py',
# 'prioritise_transaction.py',
# 'invalidblockrequest.py',
# 'invalidtxrequest.py',
# 'abandonconflict.py',
# 'p2p-versionbits-warning.py',
# 'p2p-segwit.py',
# 'segwit.py',
# 'importprunedfunds.py',
# 'signmessages.py',
'cfund-donate.py',
'cfund-listproposals.py',
'cfund-paymentrequest-extract-funds.py',
'cfund-paymentrequest-state-accept.py',
'cfund-paymentrequest-state-expired.py',
'cfund-proposal-state-accept.py',
'cfund-proposal-state-expired.py',
'cfund-rawtx-create-proposal.py',
'cfund-rawtx-paymentrequest-create.py',
'cfund-rawtx-paymentrequest-vote.py',
'cfund-rawtx-proposal-vote.py',
'cfund-vote.py',
'reject-version-bit.py',
]
#if ENABLE_ZMQ:
# testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bip68-sequence.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_longpoll.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'mempool_packages.py',
'maxuploadtarget.py',
'replace-by-fee.py',
'p2p-feefilter.py',
'pruning.py', # leave pruning last as it takes a REALLY long time
]
def runtests():
test_list = []
if '-extended' in opts:
test_list = testScripts + testScriptsExt
elif len(opts) == 0 or (len(opts) == 1 and "-win" in opts):
test_list = testScripts
else:
for t in testScripts + testScriptsExt:
if t in opts or re.sub(".py$", "", t) in opts:
test_list.append(t)
if print_help:
# Only print help of the first script and exit
subprocess.check_call((RPC_TESTS_DIR + test_list[0]).split() + ['-h'])
sys.exit(0)
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
flags = ["--srcdir=%s/src" % BUILDDIR] + passon_args
if coverage:
flags.append(coverage.flag)
if len(test_list) > 1 and run_parallel > 1:
# Populate cache
subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags)
#Run Tests
max_len_name = len(max(test_list, key=len))
time_sum = 0
time0 = time.time()
job_queue = RPCTestHandler(run_parallel, test_list, flags)
results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0]
all_passed = True
for _ in range(len(test_list)):
(name, stdout, stderr, passed, duration) = job_queue.get_next()
all_passed = all_passed and passed
time_sum += duration
print('\n' + BOLD[1] + name + BOLD[0] + ":")
print(stdout)
print('stderr:\n' if not stderr == '' else '', stderr)
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration)
print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration))
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0]
print(results)
print("\nRuntime: %s s" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
sys.exit(not all_passed)
class RPCTestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.test_list = test_list
self.flags = flags
self.num_running = 0
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
port_seed = ["--portseed=%s" % len(self.test_list)]
self.jobs.append((t,
time.time(),
subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc) = j
if proc.poll() is not None:
(stdout, stderr) = proc.communicate(timeout=3)
passed = stderr == "" and proc.returncode == 0
self.num_running -= 1
self.jobs.remove(j)
return name, stdout, stderr, passed, int(time.time() - time0)
print('.', end='', flush=True)
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `kazusilver-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
|
py | b40cfd07860f36850c378e25599b9c6687ad7158 | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The BitcoinDX Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool re-org scenarios.
Test re-org scenarios with a mempool that contains transactions
that spend (directly or indirectly) coinbase transactions.
"""
from test_framework.test_framework import BitcoinDXTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from test_framework.wallet import MiniWallet
class MempoolCoinbaseTest(BitcoinDXTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [
[
'[email protected]', # immediate tx relay
],
[]
]
def run_test(self):
wallet = MiniWallet(self.nodes[0])
# Start with a 200 block chain
assert_equal(self.nodes[0].getblockcount(), 200)
self.log.info("Add 4 coinbase utxos to the miniwallet")
# Block 76 contains the first spendable coinbase txs.
first_block = 76
wallet.scan_blocks(start=first_block, num=4)
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_1
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_2 and spend_2_1
# 3. Indirect (coinbase and child both in chain) : spend_3 and spend_3_1
# Use invalidateblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [self.nodes[0].getblockhash(n) for n in range(first_block, first_block+4)]
coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b]
utxo_1 = wallet.get_utxo(txid=coinbase_txids[1])
utxo_2 = wallet.get_utxo(txid=coinbase_txids[2])
utxo_3 = wallet.get_utxo(txid=coinbase_txids[3])
self.log.info("Create three transactions spending from coinbase utxos: spend_1, spend_2, spend_3")
spend_1 = wallet.create_self_transfer(from_node=self.nodes[0], utxo_to_spend=utxo_1)
spend_2 = wallet.create_self_transfer(from_node=self.nodes[0], utxo_to_spend=utxo_2)
spend_3 = wallet.create_self_transfer(from_node=self.nodes[0], utxo_to_spend=utxo_3)
self.log.info("Create another transaction which is time-locked to two blocks in the future")
utxo = wallet.get_utxo(txid=coinbase_txids[0])
timelock_tx = wallet.create_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=utxo,
mempool_valid=False,
locktime=self.nodes[0].getblockcount() + 2
)['hex']
self.log.info("Check that the time-locked transaction is too immature to spend")
assert_raises_rpc_error(-26, "non-final", self.nodes[0].sendrawtransaction, timelock_tx)
self.log.info("Broadcast and mine spend_2 and spend_3")
wallet.sendrawtransaction(from_node=self.nodes[0], tx_hex=spend_2['hex'])
wallet.sendrawtransaction(from_node=self.nodes[0], tx_hex=spend_3['hex'])
self.log.info("Generate a block")
self.nodes[0].generate(1)
self.log.info("Check that time-locked transaction is still too immature to spend")
assert_raises_rpc_error(-26, 'non-final', self.nodes[0].sendrawtransaction, timelock_tx)
self.log.info("Create spend_2_1 and spend_3_1")
spend_2_utxo = wallet.get_utxo(txid=spend_2['txid'])
spend_2_1 = wallet.create_self_transfer(from_node=self.nodes[0], utxo_to_spend=spend_2_utxo)
spend_3_utxo = wallet.get_utxo(txid=spend_3['txid'])
spend_3_1 = wallet.create_self_transfer(from_node=self.nodes[0], utxo_to_spend=spend_3_utxo)
self.log.info("Broadcast and mine spend_3_1")
spend_3_1_id = self.nodes[0].sendrawtransaction(spend_3_1['hex'])
self.log.info("Generate a block")
last_block = self.nodes[0].generate(1)
# Sync blocks, so that peer 1 gets the block before timelock_tx
# Otherwise, peer 1 would put the timelock_tx in recentRejects
self.sync_all()
self.log.info("The time-locked transaction can now be spent")
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
self.log.info("Add spend_1 and spend_2_1 to the mempool")
spend_1_id = self.nodes[0].sendrawtransaction(spend_1['hex'])
spend_2_1_id = self.nodes[0].sendrawtransaction(spend_2_1['hex'])
assert_equal(set(self.nodes[0].getrawmempool()), {spend_1_id, spend_2_1_id, timelock_tx_id})
self.sync_all()
self.log.info("invalidate the last block")
for node in self.nodes:
node.invalidateblock(last_block[0])
self.log.info("The time-locked transaction is now too immature and has been removed from the mempool")
self.log.info("spend_3_1 has been re-orged out of the chain and is back in the mempool")
assert_equal(set(self.nodes[0].getrawmempool()), {spend_1_id, spend_2_1_id, spend_3_1_id})
self.log.info("Use invalidateblock to re-org back and make all those coinbase spends immature/invalid")
b = self.nodes[0].getblockhash(first_block + 100)
for node in self.nodes:
node.invalidateblock(b)
self.log.info("Check that the mempool is empty")
assert_equal(set(self.nodes[0].getrawmempool()), set())
self.sync_all()
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
py | b40cfd4d1c2fdb6112a7bb776e25c15d983598ff | """
Fuzz tests an object after the default construction to make sure it does not crash lldb.
"""
import lldb
def fuzz_obj(obj):
obj.AddEvent(lldb.SBEvent())
obj.StartListeningForEvents(lldb.SBBroadcaster(), 0xffffffff)
obj.StopListeningForEvents(lldb.SBBroadcaster(), 0xffffffff)
event = lldb.SBEvent()
broadcaster = lldb.SBBroadcaster()
obj.WaitForEvent(5, event)
obj.WaitForEventForBroadcaster(5, broadcaster, event)
obj.WaitForEventForBroadcasterWithType(5, broadcaster, 0xffffffff, event)
obj.PeekAtNextEvent(event)
obj.PeekAtNextEventForBroadcaster(broadcaster, event)
obj.PeekAtNextEventForBroadcasterWithType(broadcaster, 0xffffffff, event)
obj.GetNextEvent(event)
obj.GetNextEventForBroadcaster(broadcaster, event)
obj.GetNextEventForBroadcasterWithType(broadcaster, 0xffffffff, event)
obj.HandleBroadcastEvent(event)
|
py | b40cfe35467f89f6023e699901f316f770119b21 | from drawer.src import main_gui
if __name__ == "__main__":
main_gui()
|
py | b40cfe4e7f1158d9715cf1c5817e1c11d76f4033 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 7 13:11:37 2020
@author: jonasg
"""
import pyaerocom as pya
dd = '/home/jonasg/MyPyaerocom/data/modeldata/OsloCTM3v1.01-met2010_AP3-CTRL/renamed'
reader = pya.io.ReadGridded('OsloCTM3v1.01-met2010_AP3-CTRL',
data_dir=dd)
print(reader)
data = reader.read_var('ec550aer', vert_which='ModelLevel')
print(data)
lats = (10, 20, 30)
lons = (10, 20, 30)
#stats = data.to_time_series(latitude=lats, longitude=lons,
# vert_scheme='profile')
arr = data.to_xarray()
subset = pya.helpers.extract_latlon_dataarray(arr, lats, lons,
lat_dimname=None,
lon_dimname=None, method='nearest',
new_index_name='latlon',
check_domain=True)
print(subset) |
py | b40cfe858b903562bbaa35ea5ba4c21718b45440 | '''
Copyright (c) 2011, Yahoo! Inc.
All rights reserved.
Redistribution and use of this software in source and binary forms,
with or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
* Neither the name of Yahoo! Inc. nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior
written permission of Yahoo! Inc.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
# Python implementation of Andoni's e2LSH. This version is fast because it
# uses Python hashes to implement the buckets. The numerics are handled
# by the numpy routine so this should be close to optimal in speed (although
# there is no control of the hash tables layout in memory.)
# This file implements the following classes
# lsh - the basic projection algorithm (on k-dimensional hash)
# index - a group of L lsh hashes
# TestDataClass - a generic class for handling the raw data
# To use
# Call this routine with the -histogram flag to create some random
# test data and to compute the nearest-neighbor distances
# Load the .distance file that is produced into Matlab and compute
# the d_nn and d_any histograms from the first and second columns
# of the .distance data.
# Use these histograms (and their bin positions) as input to the
# Matlab ComputeMPLSHParameters() routine.
# This gives you the optimum LSH parameters. You can use these
# values directly as parameters to this code.
# You can use the -ktest, -ltest and -wtest flags to test the
# parameters.
# Prerequisites: Python version 2.6 (2.5 might work) and NumPy
# By Malcolm Slaney, Yahoo! Research
import random, numpy, pickle, os, operator, traceback, sys, math, time
import itertools # For Multiprobe
#######################################################################
# Note, the data is always a numpy array of size Dx1.
#######################################################################
# This class just implements k-projections into k integers
# (after quantization) and then reducing that integer vector
# into a T1 and T2 hash. Data can either be entered into a
# table, or retrieved.
class lsh:
'''This class implements one k-dimensional projection, the T1/T2 hashing
and stores the results in a table for later retrieval. Input parameters
are the bin width (w, floating point, or float('inf') to get binary LSH),
and the number of projections to compute for one table entry (k, an integer).'''
def __init__(self, w, k):
self.k = k # Number of projections
self.w = w # Bin width
self.projections = None
self.buckets = {}
# This only works for Python >= 2.6
def sizeof(self):
'''Return how much storage is needed for this object. In bytes
'''
return sys.getsizeof(self.buckets) + \
sys.getsizeof(self.projections) + \
sys.getsizeof(self)
# Create the random constants needed for the projections.
# Can't do this until we see some data, so we know the
# diementionality.
def CreateProjections(self, dim):
self.dim = dim
# print "CreateProjections: Creating projection matrix for %dx%d data." % (self.k, self.dim)
self.projections = numpy.random.randn(self.k, self.dim)
self.bias = numpy.random.rand(self.k, 1)
if 0:
print "Dim is", self.dim
print 'Projections:\n', self.projections
# print 'T1 hash:\n', self.t1hash
# print 'T2 hash:\n', self.t2hash
if 0:
# Write out the project data so we can check it's properties.
# Should be Gaussian with mean of zero and variance of 1.
fp = open('Projections.data', 'w')
for i in xrange(0,self.projections.shape[0]):
for j in xrange(0,self.projections.shape[1]):
fp.write('%g ' % self.projections[i,j])
fp.write('\n')
# Compute the t1 and t2 hashes for some data. Doing it this way
# instead of in a loop, as before, is 10x faster. Thanks to Anirban
# for pointing out the flaw. Not sure if the T2 hash is needed since
# our T1 hash is so strong.
debugFP = None
firstTimeCalculateHashes = False # Change to false to turn this off
infinity = float('inf') # Easy way to access this flag
def CalculateHashes(self, data):
'''Multiply the projection data (KxD) by some data (Dx1),
and quantize'''
#Sara: changed self.projections == None to self.projections is None
if self.projections is None:
self.CreateProjections(len(data))
bins = numpy.zeros((self.k,1), 'int')
if lsh.firstTimeCalculateHashes:
print 'data = ', numpy.transpose(data)
print 'bias = ', numpy.transpose(self.bias)
print 'projections = ',
for i in range(0, self.projections.shape[0]):
for j in range(0, self.projections.shape[1]):
print self.projections[i][j],
print
# print 't1Hash = ', self.t1hash
# print 't2Hash = ', self.t2hash
firstTimeCalculateHashes = False
print "Bin values:", self.bias + \
numpy.dot(self.projections, data)/self.w
print "Type of bins:", type(self.bias + \
numpy.dot(self.projections, data)/self.w)
if 0:
if lsh.debugFP == None:
print "Opening Projections file"
lsh.debugFP = open('Projections.data', 'w')
d = self.bias + numpy.dot(self.projections, data)/self.w
for i in xrange(0, len(d)):
lsh.debugFP.write('%g\n' % d[i])
lsh.debugFP.write('\n')
lsh.debugFP.flush()
if self.w == lsh.infinity:
# Binary LSH
bins[:] = (numpy.sign(numpy.dot(self.projections, data))+1)/2.0
else:
bins[:] = numpy.floor(self.bias + numpy.dot(self.projections, data)/self.w)
t1 = self.ListHash(bins)
t2 = self.ListHash(bins[::-1]) # Reverse data for second hash
return t1, t2
# Input: A Nx1 array (of integers)
# Output: A 28 bit hash value.
# From: http://stackoverflow.com/questions/2909106/
# python-whats-a-correct-and-good-way-to-implement-hash/2909572#2909572
def ListHash(self, d):
# return str(d).__hash__() # Good for testing, but not efficient
#Sara: changed d == None to d is None
if d is None or len(d) == 0:
return 0
# d = d.reshape((d.shape[0]*d.shape[1]))
value = d[0, 0] << 7
for i in d[:,0]:
value = (101*value + i)&0xfffffff
return value
# Just a debug version that returns the bins too.
def CalculateHashes2(self, data):
if self.projections == None:
print "CalculateHashes2: data.shape=%s, len(data)=%d" % (str(data.shape), len(data))
self.CreateProjections(len(data))
bins = numpy.zeros((self.k,1), 'int')
parray = numpy.dot(self.projections, data)
bins[:] = numpy.floor(parray/self.w + self.bias)
t1 = self.ListHash(bins)
t2 = self.ListHash(bins[::-1]) # Reverse data for second hash
# print self.projections, data, parray, bins
# sys.exit(1)
return t1, t2, bins, parray
# Return a bunch of hashes, depending on the level of multiprobe
# asked for. Each list entry contains T1, T2. This is a Python
# iterator... so call it in a for loop. Each iteration returns
# a bin ID (t1,t2)
# [Need to store bins in integer array so we don't convert to
# longs prematurely and get the wrong hash!]
def CalculateHashIterator(self, data, multiprobeRadius=0):
#Sara: changed self.projections == None to self.projections is None
if self.projections is None:
self.CreateProjections(len(data))
bins = numpy.zeros((self.k,1), 'int')
directVector = numpy.zeros((self.k,1), 'int')
newProbe = numpy.zeros((self.k,1), 'int')
if self.w == lsh.infinity:
points = numpy.dot(self.projections, data)
bins[:] = (numpy.sign(points)+1)/2.0
directVector[:] = -numpy.sign(bins-0.5)
else:
points = numpy.dot(self.projections, data)/self.w + self.bias
bins[:] = numpy.floor(points)
directVector[:] = numpy.sign(points-numpy.floor(points)-0.5)
t1 = self.ListHash(bins)
t2 = self.ListHash(bins[::-1])
yield (t1,t2)
if multiprobeRadius > 0:
# print "Multiprobe points:", points
# print "Multiprobe bin:", bins
# print "Multiprobe direct:", directVector
dimensions = range(self.k)
deltaVector = numpy.zeros((self.k, 1), 'int') # Preallocate
for r in range(1, multiprobeRadius+1):
# http://docs.python.org/library/itertools.html
for candidates in itertools.combinations(dimensions, r):
deltaVector *= 0 # Start Empty
deltaVector[list(candidates), 0] = 1 # Set some bits
newProbe[:] = bins + deltaVector*directVector # New probe
t1 = self.ListHash(newProbe)
t2 = self.ListHash(newProbe[::-1]) # Reverse data for second hash
# print "Multiprobe probe:",newProbe, t1, t2
yield (t1,t2)
# Put some data into the hash bucket for this LSH projection
def InsertIntoTable(self, id, data):
(t1, t2) = self.CalculateHashes(data)
if t1 not in self.buckets:
self.buckets[t1] = {t2: [id]}
else:
if t2 not in self.buckets[t1]:
self.buckets[t1][t2] = [id]
else:
self.buckets[t1][t2].append(id)
# Find some data in the hash bucket. Return all the ids
# that we find for this T1-T2 pair.
def FindXXObsolete(self, data):
(t1, t2) = self.CalculateHashes(data)
if t1 not in self.buckets:
return []
row = self.buckets[t1]
if t2 not in row:
return []
return row[t2]
#
def Find(self, data, multiprobeRadius=0):
'''Find the points that are close to the query data. Use multiprobe
to also look in nearby buckets.'''
res = []
for (t1,t2) in self.CalculateHashIterator(data, multiprobeRadius):
# print "Find t1:", t1
if t1 not in self.buckets:
continue
row = self.buckets[t1]
if t2 not in row:
continue
res += row[t2]
return res
# Create a dictionary showing all the buckets an ID appears in
def CreateDictionary(self, theDictionary, prefix):
for b in self.buckets: # Over all buckets
w = prefix + str(b)
for c in self.buckets[b]:# Over all T2 hashes
for i in self.buckets[b][c]:#Over ids
if not i in theDictionary:
theDictionary[i] = [w]
else:
theDictionary[i] += w
return theDictionary
# Print some stats for these lsh buckets
def StatsXXX(self):
maxCount = 0; sumCount = 0;
numCount = 0; bucketLens = [];
for b in self.buckets:
for c in self.buckets[b]:
l = len(self.buckets[b][c])
if l > maxCount:
maxCount = l
maxLoc = (b,c)
# print b,c,self.buckets[b][c]
sumCount += l
numCount += 1
bucketLens.append(l)
theValues = sorted(bucketLens)
med = theValues[(len(theValues)+1)/2-1]
print "Bucket Counts:"
print "\tTotal indexed points:", sumCount
print "\tT1 Buckets filled: %d/%d" % (len(self.buckets), 0)
print "\tT2 Buckets used: %d/%d" % (numCount, 0)
print "\tMaximum T2 chain length:", maxCount, "at", maxLoc
print "\tAverage T2 chain length:", float(sumCount)/numCount
print "\tMedian T2 chain length:", med
def HealthStats(self):
'''Count the number of points in each bucket (which is currently
a function of both T1 and T2)'''
maxCount = 0; numCount = 0; totalIndexPoints = 0;
for b in self.buckets:
for c in self.buckets[b]:
l = len(self.buckets[b][c])
if l > maxCount:
maxCount = l
maxLoc = (b,c)
# print b,c,self.buckets[b][c]
totalIndexPoints += l
numCount += 1
T1Buckets = len(self.buckets)
T2Buckets = numCount
T1T2BucketAverage = totalIndexPoints/float(numCount)
T1T2BucketMax = maxCount
return (T1Buckets, T2Buckets, T1T2BucketAverage, T1T2BucketMax)
# Get a list of all IDs that are contained in these hash buckets
def GetAllIndices(self):
theList = []
for b in self.buckets:
for c in self.buckets[b]:
theList += self.buckets[b][c]
return theList
# Put some data into the hash table, see how many collisions we get.
def Test(self, n):
self.buckets = {}
self.projections = None
d = numpy.array([.2,.3])
for i in range(0,n):
self.InsertIntoTable(i, d+i)
for i in range(0,n):
r = self.Find(d+i)
matches = sum(map(lambda x: x==i, r))
if matches == 0:
print "Couldn't find item", i
elif matches == 1:
pass
if len(r) > 1:
print "Found big bin for", i,":", r
# Put together several LSH projections to form an index. The only
# new parameter is the number of groups of projections (one LSH class
# object per group.)
class index:
def __init__(self, w, k, l):
self.k = k;
self.l = l
self.w = w
self.projections = []
self.myIDs = []
for i in range(0,l): # Create all LSH buckets
self.projections.append(lsh(w, k))
# Only works for Python > 2.6
def sizeof(self):
'''Return the sizeof this index in bytes.
'''
return sum(p.sizeof() for p in self.projections) + \
sys.getsizeof(self)
# Replace id we are given with a numerical id. Since we are going
# to use the ID in L tables, it is better to replace it here with
# an integer. We store the original ID in an array, and return it
# to the user when we do a find().
def AddIDToIndex(self, id):
if type(id) == int:
return id # Don't bother if already an int
self.myIDs.append(id)
return len(self.myIDs)-1
def FindID(self, id):
if type(id) != int or id < 0 or id >= len(self.myIDs):
return id
return self.myIDs[id]
# Insert some data into all LSH buckets
def InsertIntoTable(self, id, data):
intID = self.AddIDToIndex(id)
for p in self.projections:
p.InsertIntoTable(intID, data)
def FindXXObsolete(self, data):
'''Find some data in all the LSH buckets. Return a list of
data's id and bucket counts'''
items = [p.Find(data) for p in self.projections]
results = {}
for itemList in items:
for item in itemList:
if item in results: # Much faster without setdefault
results[item] += 1
else:
results[item] = 1
s = sorted(results.items(), key=operator.itemgetter(1), \
reverse=True)
return [(self.FindID(i),c) for (i,c) in s]
def Find(self, queryData, multiprobeR=0):
'''Find some data in all the LSH tables. Use Multiprobe, with
the given radius, to search neighboring buckets. Return a list of
results. Each result is a tuple consisting of the candidate ID
and the number of times it was found in the index.'''
results = {}
for p in self.projections:
ids = p.Find(queryData, multiprobeR)
# print "Got back these IDs from p.Find:", ids
for id in ids:
if id in results:
results[id] += 1
else:
results[id] = 1
s = sorted(results.items(), key=operator.itemgetter(1), \
reverse=True)
return [(self.FindID(i),c) for (i,c) in s]
def FindExact(self, queryData, GetData, multiprobeR=0):
'''Return a list of results sorted by their exact
distance from the query. GetData is a function that
returns the original data given its key. This function returns
a list of results, each result has the candidate ID and distance.'''
s = self.Find(queryData, multiprobeR)
# print "Intermediate results are:", s
d = map(lambda (id,count): (id,((GetData(id)-queryData)**2).sum(), \
count), s)
s = sorted(d, key=operator.itemgetter(1))
return [(self.FindID(i),d) for (i,d,c) in s]
# Put some data into the hash tables.
def Test(self, n):
d = numpy.array([.2,.3])
for i in range(0,n):
self.InsertIntoTable(i, d+i)
for i in range(0,n):
r = self.Find(d+i)
print r
# Print the statistics of each hash table.
def Stats(self):
for i in range(0, len(self.projections)):
p = self.projections[i]
print "Buckets", i,
p.Stats()
# Get al the IDs that are part of this index. Just check one hash
def GetAllIndices(self):
if self.projections and len(self.projections) > 0:
p = self.projections[0]
return p.GetAllIndices()
return None
# Return the buckets (t1 and t2 hashes) associated with a data point
def GetBuckets(self, data):
b = []
for p in self.projections:
( t1, t2, bins, parray) = p.CalculateHashes2(data)
print "Bucket:", t1, t2, bins, parray
b += (t1, t2)
return b
#
def DictionaryPrefix(self, pc):
prefix = 'W'
prefixes = 'abcdefghijklmnopqrstuvwxyz'
while pc > 0: # Create unique ID for theis bucket
prefix += prefixes[pc%len(prefixes)]
pc /= len(prefixes)
return prefix
# Create a list ordered by ID listing which buckets are used for each ID
def CreateDictionary(self):
theDictionary = {}
pi = 0
for p in self.projections:
prefix = self.DictionaryPrefix(pi)
theDictionary = p.CreateDictionary(theDictionary,\
prefix)
pi += 1
return theDictionary
# Find the bucket ids that best correspond to this piece of data.
def FindBuckets(self, data):
theWords = []
pi = 0
for p in self.projections:
prefix = self.DictionaryPrefix(pi)
( t1, t2, bins, parray) = p.CalculateHashes2(data)
word = prefix + str(t1)
theWords += [word]
pi += 1
return theWords
# Save an LSH index to a pickle file.
def SaveIndex(filename, ind):
try:
fp = open(filename, 'w')
pickle.dump(ind, fp)
fp.close()
statinfo = os.stat(filename,)
if statinfo:
print "Wrote out", statinfo.st_size, "bytes to", \
filename
except:
print "Couldn't pickle index to file", filename
traceback.print_exc(file=sys.stderr)
# Read an LSH index from a pickle file.
def LoadIndex(filename):
if type(filename) == str:
try:
fp = open(filename, 'r')
except:
print "Couldn't open %s to read LSH Index" % (filename)
return None
else:
fp = filename
try:
ind = pickle.load(fp)
fp.close()
return ind
except:
print "Couldn't read pickle file", filename
traceback.print_exc(file=sys.stderr)
class TestDataClass:
'''A bunch of routines used to generate data we can use to test
this LSH implementation.'''
def __init__(self):
self.myData = None
self.myIndex = None
self.nearestNeighbors = {} # A dictionary pointing to IDs
def LoadData(self, filename):
'''Load data from a flat file, one line per data point.'''
lineCount = 0
try:
fp = open(filename)
if fp:
for theLine in fp: # Count lines in file
if theLine == '':
break
lineCount += 1
dim = len(theLine.split()) # Allocate the storage array
self.myData = numpy.zeros((dim, lineCount))
fp.seek(0,0) # Go back to beginning of file
lineCount = 0
for theLine in fp: # Now load the data
data = [float(i) for i in theLine.split()]
self.myData[:,lineCount] = data
lineCount += 1
fp.close()
else:
print "Can't open %s to LoadData()" % filename
except:
print "Error loading data from %s in TestDataClass.LoadData()" \
% filename
traceback.print_exc(file=sys.stderr)
print "self.myData has %d lines and is:" % lineCount, self.myData
def SaveData(self, filename):
'''Save this data in a flat file. One line per data point.'''
numDims = self.NumDimensions()
try:
fp = open(filename, 'w')
if fp:
for i in xrange(0, self.NumPoints()):
data = self.RetrieveData(i).reshape(numDims)
fp.write(' '.join([str(d) for d in data]) + '\n')
fp.close()
return
except:
pass
sys.stderr.write("Can't write test data to %s\n" % filename)
def CreateIndex(self, w, k, l):
'''Create an index for the data we have in our database. Inputs are
the LSH parameters: w, k and l.'''
self.myIndex = index(w, k, l)
itemCount = 0
tic = time.clock()
for itemID in self.IterateKeys():
features = self.RetrieveData(itemID)
if features != None:
self.myIndex.InsertIntoTable(itemID, features)
itemCount += 1
print "Finished indexing %d items in %g seconds." % \
(itemCount, time.clock()-tic)
sys.stdout.flush()
def RetrieveData(self, id):
'''Find a point in the array of data.'''
id = int(id) # Key in this base class is an int!
if id < self.myData.shape[1]:
return self.myData[:,id:id+1]
return None
def NumPoints(self):
'''How many data point are in this database?'''
return self.myData.shape[1]
def NumDimensions(self):
'''What is the dimensionality of the data?'''
return self.myData.shape[0]
def GetRandomQuery(self):
'''Pick a random query from the dataset. Return a key.'''
return random.randrange(0,self.NumPoints()) # Pick random query
def FindNearestNeighbors(self, count):
'''Exhaustive search for count nearest-neighbor results.
Save the results in a dictionary.'''
numPoints = self.NumPoints()
self.nearestNeighbors = {}
for i in xrange(0,count):
qid = self.GetRandomQuery() # Pick random query
qData = self.RetrieveData(qid) # Find it's data
nearestDistance2 = None
nearestIndex = None
for id2 in self.IterateKeys():
if qid != id2:
d2 = ((self.RetrieveData(id2)-qData)**2).sum()
if id == -1: # Debugging
print qid, id2, qData, self.RetrieveData(id2), d2
if nearestDistance2 == None or d2 < nearestDistance2:
nearestDistance2 = d2
nearestIndex = id2
self.nearestNeighbors[qid] = \
(nearestIndex, math.sqrt(nearestDistance2))
if qid == -1:
print qid, nearestIndex, math.sqrt(nearestDistance2)
sys.stdout.flush()
def SaveNearestNeighbors(self, filename):
'''Save the nearest neighbor dictionary in a file. Each line
of the file contains the query key, the distance to the nearest
neighbor, and the NN key.'''
if filename.endswith('.gz'):
import gzip
fp = gzip.open(filename, 'w')
else:
fp = open(filename, 'w')
if fp:
for (query,(nn,dist)) in self.nearestNeighbors.items():
fp.write('%s %g %s\n' % (str(query), dist, str(nn)))
fp.close()
else:
print "Can't open %s to write nearest-neighbor data" % filename
def LoadNearestNeighbors(self, filename):
'''Load a file full of nearest neighbor data.'''
self.nearestNeighbors = {}
if filename.endswith('.gz'):
import gzip
fp = gzip.open(filename, 'r')
else:
fp = open(filename, 'r')
if fp:
print "Loading nearest-neighbor data from:", filename
for theLine in fp:
(k,d,nn) = theLine.split()
if type(self.myData) == numpy.ndarray: # Check for array indices
k = int(k)
nn = int(nn)
if k < self.NumPoints() and nn < self.NumPoints():
self.nearestNeighbors[k] = (nn,float(d))
elif k in self.myData and nn in self.myData: # dictionary index
self.nearestNeighbors[k] = (nn,float(d))
fp.close()
print " Loaded %d items into the nearest-neighbor dictionary." % len(self.nearestNeighbors)
else:
print "Can't open %s to read nearest neighbor data." % filename
def IterateKeys(self):
'''Iterate through all possible keys in the dataset.'''
for i in range(self.NumPoints()):
yield i
def FindMedian(self):
numDim = self.NumDimensions()
numPoints = self.NumPoints()
oneColumn = numpy.zeros((numPoints))
medians = numpy.zeros((numDim))
for d in xrange(numDim):
rowNumber = 0
for k in self.IterateKeys():
oneData = self.RetrieveData(k)
oneColumn[rowNumber] = oneData[d]
rowNumber += 1
m = numpy.median(oneColumn, overwrite_input=True)
medians[d] = m
return medians
def ComputeDistanceHistogram(self, fp = sys.stdout):
'''Calculate the nearest-neighbor and any-neighbor distance
histograms needed for the LSH Parameter Optimization. For
a number of random query points, print the distance to the
nearest neighbor, and to any random neighbor. This becomes
the input for the parameter optimization routine. Enhanced
to also print the NN binary projections.'''
numPoints = self.NumPoints()
# medians = self.FindMedian() # Not used now, but useful for binary quantization
print "Pulling %d items from the NearestNeighbors list for ComputeDistanceHistogram" % \
len(self.nearestNeighbors.items())
for (queryKey,(nnKey,nnDist)) in self.nearestNeighbors.items():
randKey = self.GetRandomQuery()
queryData = self.RetrieveData(queryKey)
nnData = self.RetrieveData(nnKey)
randData = self.RetrieveData(randKey)
if len(queryData) == 0 or len(nnData) == 0: # Missing, probably because of subsampling
print "Skipping %s/%s because data is missing." % (queryKey, nnKey)
continue
anyD2 = ((randData-queryData)**2).sum()
projection = numpy.random.randn(1, queryData.shape[0])
# print "projection:", projection.shape
# print "queryData:", queryData.shape
# print "nnData:", nnData.shape
# print "randData:", randData.shape
queryProj = numpy.sign(numpy.dot(projection, queryData))
nnProj = numpy.sign(numpy.dot(projection, nnData))
randProj = numpy.sign(numpy.dot(projection, randData))
# print 'CDH:', queryProj, nnProj, randProj
fp.write('%g %g %d %d\n' % \
(nnDist, math.sqrt(anyD2), \
queryProj==nnProj, queryProj==randProj))
fp.flush()
def ComputePnnPany(self, w, k, l, multiprobe=0):
'''Compute the probability of Pnn and Pany for a given index size.
Create the desired index, populate it with the data, and then measure
the NN and ANY neighbor retrieval rates.
Return
the pnn rate for one 1-dimensional index (l=1),
the pnn rate for an l-dimensional index,
the pany rate for one 1-dimensional index (l=1),
and the pany rate for an l-dimensional index
the CPU time per query (seconds)'''
numPoints = self.NumPoints()
numDims = self.NumDimensions()
self.CreateIndex(w, k, l) # Put data into new index
cnn = 0; cnnFull = 0
cany = 0; canyFull = 0
queryCount = 0 # Probe the index
totalQueryTime = 0
startRecallTestTime = time.clock()
# print "ComputePnnPany: Testing %d nearest neighbors." % len(self.nearestNeighbors.items())
for (queryKey,(nnKey,dist)) in self.nearestNeighbors.items():
queryData = self.RetrieveData(queryKey)
if queryData == None or len(queryData) == 0:
print "Can't find data for key %s" % str(queryKey)
sys.stdout.flush()
continue
startQueryTime = time.clock() # Measure CPU time
matches = self.myIndex.Find(queryData, multiprobe)
totalQueryTime += time.clock() - startQueryTime
for (m,c) in matches:
if nnKey == m: # See if NN was found!!!
cnn += c
cnnFull += 1
if m != queryKey: # Don't count the query
cany += c
canyFull += len(matches)-1 # Total candidates minus 1 for query
queryCount += 1
# Some debugging for k curve.. print individual results
# print "ComputePnnPany Debug:", w, k, l, len(matches), numPoints, cnn, cnnFull, cany, canyFull
recallTestTime = time.clock() - startRecallTestTime
print "Tested %d NN queries in %g seconds." % (queryCount, recallTestTime)
sys.stdout.flush()
if queryCount == 0:
queryCount = 1 # To prevent divide by zero
perQueryTime = totalQueryTime/queryCount
print "CPP:", cnn, cnnFull, cany, canyFull
print "CPP:", cnn/float(queryCount*l), cnnFull/float(queryCount), \
cany/float(queryCount*l*numPoints), canyFull/float(queryCount*numPoints), \
perQueryTime, numDims
return cnn/float(queryCount*l), cnnFull/float(queryCount), \
cany/float(queryCount*l*numPoints), canyFull/float(queryCount*numPoints), \
perQueryTime, numDims
def ComputePnnPanyCurve(self, wList = .291032, multiprobe=0):
if type(wList) == float or type(wList) == int:
wList = [wList*10**((i-10)/10.0) for i in range(0,21)]
for w in wList:
(pnn, pnnFull, pany, panyFull, queryTime, numDims) = self.ComputePnnPany(w, 1, 10, multiprobe)
if w == wList[0]:
print "# w pnn pany queryTime"
print "PnnPany:", w, multiprobe, pnn, pany, queryTime
sys.stdout.flush()
def ComputeKCurve(self, kList, w = .291032, r=0):
'''Compute the number of ANY neighbors as a function of
k. Should go down exponentially.'''
numPoints = self.NumPoints()
l = 10
for k in sorted(list(kList)):
(pnn, pnnFull, pany, panyFull, queryTime, numDims) = self.ComputePnnPany(w, k, l, r)
print w, k, l, r, pnn, pany, pany*numPoints, queryTime
sys.stdout.flush()
def ComputeLCurve(self, lList, w = 2.91032, k=10, r=0):
'''Compute the probability of nearest neighbors as a function
of l.'''
numPoints = self.NumPoints()
firstTime = True
for l in sorted(list(lList)):
(pnn, pnnFull, pany, panyFull, queryTime, numDims) = self.ComputePnnPany(w, k, l, r)
if firstTime:
print "# w k l r pnnFull, panyFull panyFull*N queryTime"
firstTime = False
print w, k, l, r, pnnFull, panyFull, panyFull*numPoints, queryTime
sys.stdout.flush()
class RandomTestData(TestDataClass):
'''Generate uniform random data points between -1 and 1.'''
def CreateData(self, numPoints, dim):
self.myData = (numpy.random.rand(dim, numPoints)-.5)*2.0
class HyperCubeTestData(TestDataClass):
'''Create a hypercube of data. All points are in the corners'''
def CreateData(self, numDim, noise = None):
numPoints = 2**numDim
self.myData = numpy.zeros((numPoints, numDim))
for i in range(0,numPoints):
for b in range(0,numDim):
if (2**b) & i:
self.myData[b, i] = 1.0
if noise != None:
self.myData += (numpy.random.rand(numDim, numPoints)-.5)*noise
class RegularTestData(TestDataClass):
'''Fill the 2-D test array with a regular grid of points between -1 and 1'''
def CreateData(self, numDivs):
self.myData = numpy.zeros(((2*numDivs+1)**2,2))
i = 0
for x in range(-numDivs, numDivs+1):
for y in range(-numDivs, numDivs+1):
self.myData[0, i] = x/float(divs)
self.myData[1, i] = y/float(divs)
i += 1
# Use Dimension Doubling to measure the dimensionality of a random
# set of data. Generate some data (either random Gaussian or a grid)
# Then count the number of points that fall within the given radius of this
# query.
def XXXTestDimensionality2():
binWidth = .5
if True:
numPoints = 100000
myTestData = TestDataClass(numPoints, 3)
else:
myTestData = RegularTestData(100)
numPoints = myTestData.NumPoints
k = 4; l = 2; N = 1000
myTestIndex = index(binWidth, k, l, N)
for i in range(0,numPoints):
myTestIndex.InsertIntoTable(i, myTestData.RetrieveData(i))
rBig = binWidth/8.0
rSmall = rBig/2.0
cBig = 0.0; cSmall = 0.0
for id in random.sample(ind.GetAllIndices(), 2):
qp = FindLSHTestData(id)
cBig += myTestIndex.CountInsideRadius(qp, myTestData.FindData, rBig)
cSmall += myTestIndex.CountInsideRadius(qp, myTestData.FindData, rSmall)
if cBig > cSmall and cSmall > 0:
dim = math.log(cBig/cSmall)/math.log(rBig/rSmall)
else:
dim = 0
print cBig, cSmall, dim
return ind
# Generate some 2-dimensional data, put it into an index and then
# show the points retrieved. This is all done as a function of number
# of projections per bucket, number of buckets to use for each index, and
# the number of LSH bucket (the T1 size). Write out the data so we can
# plot it (in Matlab)
def GraphicalTest(k, l, N):
numPoints = 1000
myTestData = TestDataClass(numPoints, 3)
ind = index(.1, k, l, N)
for i in range(0,numPoints):
ind.InsertIntoTable(i, myTestData.RetrieveData(i))
i = 42
r = ind.Find(data[i,:])
fp = open('lshtestpoints.txt','w')
for i in range(0,numPoints):
if i in r:
c = r[i]
else:
c = 0
fp.write("%g %g %d\n" % (data[i,0], data[i,1], c))
fp.close()
return r
def SimpleTest():
import time
dim = 250
numPoints = 10000
myTestData = RandomTestData()
myTestData.CreateData(numPoints,dim)
myTestIndex = index(w=.4, k=10, l=10, N=numPoints)
startLoad = time.clock()
for id in myTestData.IterateKeys():
data = myTestData.RetrieveData(id)
myTestIndex.InsertIntoTable(id, data)
endLoad = time.clock()
print "Time to load %d points is %gs (%gms per point)" % \
(numPoints, endLoad-startLoad, (endLoad-startLoad)/numPoints*1000.0)
startRecall = time.clock()
resCount = 0
resFound = 0
for id in myTestData.IterateKeys():
query = myTestData.RetrieveData(id)
res = myTestIndex.Find(query)
if not res == None and len(res) > 0:
resFound += 1
if not res == None:
resCount += len(res)
endRecall = time.clock()
print "Time to recall %d points is %gs (%gms per point" % \
(numPoints, endRecall-startRecall, (endRecall-startRecall)/numPoints*1000.0)
print "Found a recall hit all but %d times, average results per query is %g" % \
(numPoints-resFound, resCount/float(numPoints))
def OutputAllProjections(myTestData, myTestIndex, filename):
'''Calculate and output all the projected data for an index.'''
lshProjector = myTestIndex.projections[0]
fp = open(filename, 'w')
for id in myTestData.IterateKeys():
d = myTestData.RetrieveData(id)
(t1, t2, bins, parray) = lshProjector.CalculateHashes2(d)
fp.write('%d %d %g %g\n' % (t1, t2, bins[0][0], parray[0][0]))
fp.close()
# Exact Optimization:
# For 100000 5-d data use: w=2.91032 and get 0.55401 hits per bin and 0.958216 nn.
# K=23.3372 L=2.70766 cost is 2.98756
# Expected statistics for optimal solution:
# Assuming K=23, L=3
# p_nn(w) is 0.958216
# p_any(w) is 0.55401
# Probability of finding NN for L=1: 0.374677
# Probability of finding ANY for L=1: 1.26154e-06
# Probability of finding NN for L=3: 0.75548
# Probability of finding ANY for L=3: 3.78462e-06
# Expected number of hits per query: 0.378462
'''
10-D data:
Mean of Python NN data is 0.601529 and std is 0.0840658.
Scaling all distances by 0.788576 for easier probability calcs.
Simple Approximation:
For 100000 5-d data use: w=4.17052 and get 0.548534 hits per bin and 0.885004 nn.
K=19.172 L=10.4033 cost is 20.8065
Expected statistics: for simple approximation
Assuming K=19, L=10
Probability of finding NN for L=1: 0.0981652
Probability of finding ANY for L=1: 1.10883e-05
Probability of finding NN for L=10: 0.644148
Probability of finding ANY for L=10: 0.000110878
Expected number of hits per query: 11.0878
Exact Optimization:
For 100000 5-d data use: w=4.26786 and get 0.556604 hits per bin and 0.887627 nn.
K=21.4938 L=12.9637 cost is 17.3645
Expected statistics for optimal solution:
Assuming K=21, L=13
p_nn(w) is 0.887627
p_any(w) is 0.556604
Probability of finding NN for L=1: 0.0818157
Probability of finding ANY for L=1: 4.53384e-06
Probability of finding NN for L=13: 0.670323
Probability of finding ANY for L=13: 5.89383e-05
Expected number of hits per query: 5.89383
'''
if __name__ == '__main__':
defaultDims = 10
defaultW = 2.91032
defaultK = 10
defaultL = 1
defaultClosest = 1000
defaultMultiprobeRadius = 0
defaultFileName = 'testData'
cmdName = sys.argv.pop(0)
while len(sys.argv) > 0:
arg = sys.argv.pop(0).lower()
if arg == '-d':
arg = sys.argv.pop(0)
try:
defaultDims = int(arg)
defaultFileName = 'testData%03d' % defaultDims
except:
print "Couldn't parse new value for defaultDims: %s" % arg
print 'New default dimensions for test is', defaultDims
elif arg == '-f':
defaultFileName = sys.argv.pop(0)
print 'New file name is', defaultFileName
elif arg == '-k':
arg = sys.argv.pop(0)
try:
defaultK = int(arg)
except:
print "Couldn't parse new value for defaultK: %s" % arg
print 'New default k for test is', defaultK
elif arg == '-l':
arg = sys.argv.pop(0)
try:
defaultL = int(arg)
except:
print "Couldn't parse new value for defaultL: %s" % arg
print 'New default l for test is', defaultL
elif arg == '-c':
arg = sys.argv.pop(0)
try:
defaultClosest = int(arg)
except:
print "Couldn't parse new value for defaultClosest: %s" % arg
print 'New default number closest for test is', defaultClosest
elif arg == '-w':
arg = sys.argv.pop(0)
try:
defaultW = float(arg)
except:
print "Couldn't parse new value for w: %s" % arg
print 'New default W for test is', defaultW
elif arg == '-r':
arg = sys.argv.pop(0)
try:
defaultMultiprobeRadius = int(arg)
except:
print "Couldn't parse new value for multiprobeRadius: %s" % arg
print 'New default multiprobeRadius for test is', defaultMultiprobeRadius
elif arg == '-create': # Create some uniform random data and find NN
myTestData = RandomTestData()
myTestData.CreateData(100000, defaultDims)
myTestData.SaveData(defaultFileName + '.dat')
print "Finished creating random data. Now computing nearest neighbors..."
myTestData.FindNearestNeighbors(defaultClosest)
myTestData.SaveNearestNeighbors(defaultFileName + '.nn')
elif arg == '-histogram': # Calculate distance histograms
myTestData = TestDataClass()
myTestData.LoadData(defaultFileName + '.dat')
myTestData.LoadNearestNeighbors(defaultFileName + '.nn')
fp = open(defaultFileName + '.distances', 'w')
if fp:
myTestData.ComputeDistanceHistogram(fp)
fp.close()
else:
print "Can't open %s.distances to store NN data" % defaultFileName
elif arg == '-sanity':
myTestData = TestDataClass()
myTestData.LoadData(defaultFileName + '.dat')
print myTestData.RetrieveData(myTestData.GetRandomQuery())
print myTestData.RetrieveData(myTestData.GetRandomQuery())
elif arg == '-b': # Calculate bucket probabilities
random.seed(0)
myTestData = TestDataClass()
myTestData.LoadData(defaultFileName + '.dat')
myTestData.LoadNearestNeighbors(defaultFileName + '.nn')
# ComputePnnPanyCurve(myData, [.291032])
myTestData.ComputePnnPanyCurve(defaultW)
elif arg == '-wtest': # Calculate bucket probabilities as a function of w
random.seed(0)
myTestData = TestDataClass()
myTestData.LoadData(defaultFileName + '.dat')
myTestData.LoadNearestNeighbors(defaultFileName + '.nn')
wList = [defaultW*.5**-i for i in range(-10,10)]
# wList = [defaultW*.5**-i for i in range(-3,3)]
myTestData.ComputePnnPanyCurve(wList, defaultMultiprobeRadius)
elif arg == '-ktest': # Calculate bucket probabilities as a function of k
random.seed(0)
myTestData = TestDataClass()
myTestData.LoadData(defaultFileName + '.dat')
myTestData.LoadNearestNeighbors(defaultFileName + '.nn')
# ComputePnnPanyCurve(myData, [.291032])
kList = [math.floor(math.sqrt(2)**k) for k in range(0,10)]
kList = [1,2,3,4,5,6,8,10,12,14,16,18,20,22,25,30,35,40]
myTestData.ComputeKCurve(kList, defaultW, defaultMultiprobeRadius)
elif arg == '-ltest': # Calculate bucket probabilities as a function of l
random.seed(0)
myTestData = TestDataClass()
myTestData.LoadData(defaultFileName + '.dat')
myTestData.LoadNearestNeighbors(defaultFileName + '.nn')
# ComputePnnPanyCurve(myData, [.291032])
lList = [math.floor(math.sqrt(2)**k) for k in range(0,10)]
lList = [1,2,3,4,5,6,8,10,12,14,16,18,20,22,25,30]
myTestData.ComputeLCurve(lList, w=defaultW,
k=defaultK, r=defaultMultiprobeRadius)
elif arg == '-timing':
# sys.argv.pop(0)
timingModels = []
while len(sys.argv) > 0:
print "Parsing timing argument", sys.argv[0], len(sys.argv)
if sys.argv[0].startswith('-'):
break
try:
(w,k,l,r,rest) = sys.argv[0].strip().split(',', 5)
timingModels.append([float(w), int(k), int(l), int(r)])
except:
print "Couldn't parse %s. Need w,k,l,r" % sys.argv[0]
sys.argv.pop(0)
myTestData = TestDataClass()
myTestData.LoadData(defaultFileName + '.dat')
myTestData.LoadNearestNeighbors(defaultFileName + '.nn')
for (w, k, l, r) in timingModels:
sys.stdout.flush()
(pnnL1, pnn, panyL1, pany, perQueryTime, numDims) = myTestData.ComputePnnPany(w, k, l, r)
print "Timing:", w, k, l, r, myTestData.NumPoints(), pnn, pany, perQueryTime*1000.0, numDims
elif arg == '-test': # Calculate bucket probabilities as a function of l
random.seed(0)
myTestData = TestDataClass()
myTestData.LoadData(defaultFileName + '.dat')
myTestData.LoadNearestNeighbors(defaultFileName + '.nn')
# ComputePnnPanyCurve(myData, [.291032])
myTestData.ComputeLCurve([defaultL], w=defaultW, k=defaultK)
else:
print '%s: Unknown test argument %s' % (cmdName, arg)
|
py | b40cff1929b75881ea0fc65a1c6d7f1c0156ec49 | import sys
import pandas as pd
import networkx as nx
import numpy as np
dataset = sys.argv[1]
noise_percent = sys.argv[2]
df = pd.read_csv(f"data/intermediate/{dataset}/features.csv", index_col=0)
deviations = df.std()
scaling_factor = int(noise_percent)/100
df = df+scaling_factor*np.random.randn(*df.shape)
import sklearn.cluster
model = sklearn.cluster.OPTICS()
clusters = model.fit_predict(df)
clusters = pd.Series(clusters, index = df.index)
clusters.to_csv(f"data/processed/clusters/{dataset}/optics_noise_percent_{noise_percent}.csv", header = None)
|
py | b40cff31dd427624e369fb84e3811ea169c0ffe5 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RDichromat(RPackage):
"""Collapse red-green or green-blue distinctions to simulate the effects of
different types of color-blindness."""
homepage = "https://cran.r-project.org/web/packages/dichromat/index.html"
url = "https://cran.r-project.org/src/contrib/dichromat_2.0-0.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/dichromat"
version('2.0-0', '84e194ac95a69763d740947a7ee346a6')
|
py | b40cff40c9f0fba85e76d6a5a4823ff4a98b10d1 | # Generated by Django 2.1.5 on 2019-03-13 22:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main_app', '0002_auto_20190313_2204'),
]
operations = [
migrations.AlterUniqueTogether(
name='inscription',
unique_together={('adherent', 'sport')},
),
]
|
py | b40cff537c4759321452ba3f3d08cb1bcdbf647a | """
__Seed builder__
AUTO_GENERATED (Read only)
Modify via builder
"""
from rest_framework import serializers
from app.models import User
from app.models import Team
from app.models import File
from seed.serializers.file import FileSerializer
class UserSerializer(serializers.ModelSerializer):
profile_image = FileSerializer(read_only=True)
team_ids = serializers.PrimaryKeyRelatedField(
many=True, source='teams', queryset=Team.objects.all(),
required=True, allow_null=False)
profile_image_id = serializers.PrimaryKeyRelatedField(
source='profile_image', queryset=File.objects.all(),
required=True, allow_null=False)
class Meta:
model = User
fields = (
'id',
'hash',
'username',
'first_name',
'last_name',
'email',
'is_active',
'profile_image',
'profile_image_id',
'team_ids',
) |
py | b40cffb8d80f2d46386cf040c9cfd16cd15c5d96 | from ipaddress import IPv6Address, ip_address, IPv4Address
from logging import root
import shutil
from ceres.util.default_root import get_coin_root_path
from ceres.util.ceres_config import get_farmer_name, get_coin_names
from ceres.util.path import mkdir
from ceres.util.config import initial_config_file, load_config
from pathlib import Path
import os
from pkg_resources import ensure_directory
from ceres.cmds.init_funcs import chia_init, copy_cert_files, create_all_ssl
def ceres_init(root_path: Path, coin: str="ceres", coins=None):
chia_init(root_path, coin)
create_ceres_coins_config(root_path)
if not coins:
coins = get_coin_names(root_path)
create_ceres_all_ca_path(root_path, coins)
ceres_generate_ssl_for_all_coins(root_path, coins)
coins_config_file = root_path / "config" / "coins_config.yaml"
if not coins_config_file.exists():
print(f"coins_config.yaml NOT Found, run ceres init first")
else:
create_config_for_every_coins(root_path, coins)
def create_config_for_every_coins(root_path: Path, coins: list):
coins_config_file = root_path / "config" / "coins_config.yaml"
if not coins_config_file.exists():
print(f"coins_config.yaml NOT Found, run ceres init first")
else:
all_coins_root_path = root_path / "all_coins"
if not all_coins_root_path.exists():
mkdir(all_coins_root_path)
coin_names = get_coin_names(root_path)
for coin in coin_names:
if coins and coin not in coins: continue
coin_root_path = get_coin_root_path(coin)
chia_init(coin_root_path, coin)
def create_ceres_all_ca_path(root_path: Path, coins):
all_ca_path = root_path / "all_ca"
if not all_ca_path.exists():
mkdir(all_ca_path)
coins = get_coin_names(root_path)
for coin in coins:
ca_path = all_ca_path / f"{coin}_ca"
if not ca_path.exists():
mkdir(ca_path)
print(f"Created ca directory: {ca_path}")
def create_ceres_coins_config(root_path: Path, filename: str="coins-config.yaml"):
ceres_all_coins_config_path = root_path / "config"
ceres_all_coins_config_file = ceres_all_coins_config_path / f"coins_config.yaml"
if ceres_all_coins_config_path.is_dir() and ceres_all_coins_config_file.exists():
print(
f"{filename} already exists"
)
return -1
mkdir(ceres_all_coins_config_path.parent)
ceres_all_coins_config_data = initial_config_file('ceres', filename)
with open(ceres_all_coins_config_file, "w") as f:
f.write(ceres_all_coins_config_data)
def ceres_generate_ssl_for_all_coins(root_path: Path, coins=None):
if not coins:
coins = get_coin_names(root_path)
all_ca_path = root_path / "all_ca"
for coin in coins:
create_certs = all_ca_path / f"{coin}_ca" / "ca"
if not create_certs.exists():
print(f"{create_certs} does not exist")
continue
# TODO: should check ssl before continue
coin_root_path = get_coin_root_path(coin)
# chia_init()
ca_dir: Path = coin_root_path / "config/ssl/ca"
if ca_dir.exists():
print(f"Deleting your OLD CA in {ca_dir}")
shutil.rmtree(ca_dir)
print(f"Copying your CA from {create_certs} to {ca_dir}")
copy_cert_files(create_certs, ca_dir)
create_all_ssl(coin, coin_root_path)
|
py | b40d00c7de28755a90acc1ed97a4779791e89a73 | # coding: utf-8
"""
EPIC API
REST API for interacting with EPIC (https://epic.zenotech.com) services. <br /> Please note this API is in BETA and does not yet contain all EPIC functionality. # noqa: E501
The version of the OpenAPI document: v2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from epiccore.configuration import Configuration
class InlineResponse2003(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'count': 'int',
'next': 'str',
'previous': 'str',
'results': 'list[File]'
}
attribute_map = {
'count': 'count',
'next': 'next',
'previous': 'previous',
'results': 'results'
}
def __init__(self, count=None, next=None, previous=None, results=None, local_vars_configuration=None): # noqa: E501
"""InlineResponse2003 - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._count = None
self._next = None
self._previous = None
self._results = None
self.discriminator = None
self.count = count
self.next = next
self.previous = previous
self.results = results
@property
def count(self):
"""Gets the count of this InlineResponse2003. # noqa: E501
:return: The count of this InlineResponse2003. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this InlineResponse2003.
:param count: The count of this InlineResponse2003. # noqa: E501
:type count: int
"""
if self.local_vars_configuration.client_side_validation and count is None: # noqa: E501
raise ValueError("Invalid value for `count`, must not be `None`") # noqa: E501
self._count = count
@property
def next(self):
"""Gets the next of this InlineResponse2003. # noqa: E501
:return: The next of this InlineResponse2003. # noqa: E501
:rtype: str
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this InlineResponse2003.
:param next: The next of this InlineResponse2003. # noqa: E501
:type next: str
"""
self._next = next
@property
def previous(self):
"""Gets the previous of this InlineResponse2003. # noqa: E501
:return: The previous of this InlineResponse2003. # noqa: E501
:rtype: str
"""
return self._previous
@previous.setter
def previous(self, previous):
"""Sets the previous of this InlineResponse2003.
:param previous: The previous of this InlineResponse2003. # noqa: E501
:type previous: str
"""
self._previous = previous
@property
def results(self):
"""Gets the results of this InlineResponse2003. # noqa: E501
:return: The results of this InlineResponse2003. # noqa: E501
:rtype: list[File]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this InlineResponse2003.
:param results: The results of this InlineResponse2003. # noqa: E501
:type results: list[File]
"""
if self.local_vars_configuration.client_side_validation and results is None: # noqa: E501
raise ValueError("Invalid value for `results`, must not be `None`") # noqa: E501
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse2003):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InlineResponse2003):
return True
return self.to_dict() != other.to_dict()
|
py | b40d05540d0ac89e301710c8590cc1081c38f89e | # Copyright (c) 2022, Aakvatech Limited and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class ResultHistoryTable(Document):
pass
|
py | b40d062a7266f73bfed20a7156a734f2cd376cac | # Copyright 2021 Alibaba, Inc. and its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, time, os, json, logging, subprocess
import http.client
from google.protobuf.json_format import MessageToJson
from global_conf import GlobalConf
from log import *
class ServerUtils:
def __init__(self):
gf = GlobalConf()
self.http_port = gf.http_port()
self.grpc_port = gf.grpc_port()
self.mysql_port = gf.mysql_port()
# SIGKILL -9 | SIGUSR1 -10 | SIGUSR2 -12
def stop_proxima_be(self, signal='SIGUSR2'):
logging.info("Begin stop proxima be")
cmd = "lsof -i :%s | grep LISTEN | grep proxima | awk '{print $2}' | xargs kill -s %s" % (str(self.http_port), signal)
logging.info("stop cmd: %s", cmd)
ret = os.system(cmd)
if ret != 0:
logging.error("execute cmd %s failed.", cmd)
return False
time.sleep(5)
times = 30
cmd = "lsof -i :%s" % (str(self.http_port))
while times > 0:
try:
output = subprocess.check_output(cmd, shell=True)
logging.info("output: %s", output)
except:
break
time.sleep(5)
times -= 5
logging.info("End stop proxima be")
return True
def start_proxima_be(self):
logging.info("Begin start proxima be")
src_path = os.getenv('SRC_PATH')
it_dir = src_path + '/tests/integration/script'
build_dir = os.getenv('BUILD_DIR_NAME')
cmd = 'sh %s/start_proxima_be.sh %s %s' % (it_dir, build_dir, src_path)
logging.info(cmd)
ret = os.system(cmd)
times = 30
cmd = "lsof -i :%s" % (str(self.http_port))
while times > 0:
try:
output = subprocess.check_output(cmd, shell=True)
logging.info("output: %s", output)
break
except:
time.sleep(1)
times -= 1
logging.info("End start proxima be")
return ret
# SIGKILL -9 | SIGUSR1 -10 | SIGUSR2 -12
def stop_mysql_repo(self, signal='SIGUSR2'):
cmd = "ps auxwww | grep 'bin/mysql_repository' | grep -v grep | awk '{print $2}' | xargs kill -s %s" % (signal)
logging.info("stop cmd: %s", cmd)
ret = os.system(cmd)
if ret != 0:
logging.error("execute cmd %s failed.", cmd)
return False
time.sleep(1)
return True
def start_mysql_repo(self):
src_path = os.getenv('SRC_PATH')
it_dir = src_path + '/tests/integration/script'
build_dir = os.getenv('BUILD_DIR_NAME')
cmd = 'sh %s/start_repo.sh %s %s' % (it_dir, build_dir, src_path)
logging.info(cmd)
ret = os.system(cmd)
time.sleep(1)
return ret
def stop_mysql(self):
cmd = 'mysqladmin -u root -proot shutdown'
# cmd = "ps auxwww | grep mysqld | grep -v grep | awk '{print $2}' | xarg kill -9"
logging.info("stop cmd: %s", cmd)
ret = os.system(cmd)
if ret != 0:
logging.error("execute cmd %s failed.", cmd)
return False
time.sleep(1)
return True
def start_mysql(self):
src_path = os.getenv('SRC_PATH')
it_dir = src_path + '/tests/integration/script'
build_dir = os.getenv('BUILD_DIR_NAME')
cmd = 'sh %s/start_mysql.sh %s %s' % (it_dir, build_dir, src_path)
logging.info(cmd)
ret = os.system(cmd)
return ret
|
py | b40d0788e1e2db22df77acae83386bcb5d12b721 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.framework.tensor import register_tensor_op
from oneflow.compatible.single_client.nn.module import Module
class TypeAs(Module):
def __init__(self):
super().__init__()
def forward(self, input, target):
return input.to(dtype=target.dtype)
@register_tensor_op("type_as")
def type_as_op(input, target):
"""Returns this tensor cast to the type of the given tensor.
This is a no-op if the tensor is already of the correct type.
Args:
input (Tensor): the input tensor.
target (Tensor): the tensor which has the desired type.
For example:
.. code-block:: python
>>> import oneflow.compatible.single_client.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> input = flow.Tensor(np.random.randn(1, 2, 3), dtype=flow.float32)
>>> target = flow.Tensor(np.random.randn(4, 5, 6), dtype = flow.int32)
>>> input = input.type_as(target)
>>> input.dtype
oneflow.int32
"""
return TypeAs()(input, target)
class Long(Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input.to(dtype=flow.int64)
@register_tensor_op("long")
def long_op(input):
"""`Tensor.long()` is equivalent to `Tensor.to(flow.int64)`. See to().
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow.compatible.single_client.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> input = flow.Tensor(np.random.randn(1, 2, 3), dtype=flow.float32)
>>> input = input.long()
>>> input.dtype
oneflow.int64
"""
return Long()(input)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
|
py | b40d07e54e093f073c770f5436b42c5c15c83650 | import json
import pytest
from main import check_job_state
from data import Data, JobState
@pytest.mark.parametrize(
"filepath, result",
[
("data/succeeded.json", JobState.SUCCEEDED),
("data/failed.json", JobState.FAILED),
("data/queued.json", JobState.QUEUED),
("data/cancelled.json", JobState.CANCELLED),
],
)
def test_is_succeeded(filepath: str, result: bool):
with open(filepath) as f:
json_payload = json.load(f)
data = Data(**json_payload)
job_state = check_job_state(data)
assert job_state == result
|
py | b40d0869dfcaaba3819c9a22366445504f49dadb | import torch
import torch.nn as nn
class Model(torch.nn.Module):
def __init__(self, input_shape, outputs_count, hidden_count = 512):
super(Model, self).__init__()
self.device = "cpu"
self.layers = [
nn.Linear(input_shape[0] + outputs_count, hidden_count),
nn.ReLU(),
nn.Linear(hidden_count, hidden_count//2),
nn.ReLU(),
nn.Linear(hidden_count//2, 1)
]
torch.nn.init.xavier_uniform_(self.layers[0].weight)
torch.nn.init.xavier_uniform_(self.layers[2].weight)
torch.nn.init.uniform_(self.layers[4].weight, -0.003, 0.003)
self.model = nn.Sequential(*self.layers)
self.model.to(self.device)
print("model_critic")
print(self.model)
print("\n\n")
def forward(self, state, action):
x = torch.cat([state, action], dim = 1)
return self.model(x)
def save(self, path):
torch.save(self.model.state_dict(), path + "trained/model_critic.pt")
def load(self, path):
self.model.load_state_dict(torch.load(path + "trained/model_critic.pt", map_location = self.device))
self.model.eval()
|
py | b40d08834e0aa418e25528c111402f707897c791 | import scipy
import numpy as np
import statsmodels.api as sm
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import EngFormatter
class FrequencyPlot:
def __init__(
self,
sample_rate = 48000,
num_fft = 8192,
smoothing_amount = 0.002,
smoothen = True,
ymin = None
):
self.sample_rate = sample_rate
self.nyquist = self.sample_rate // 2
self.num_fft = num_fft
self.ymin = ymin
self.num_freqs = self.sample_rate // 2
self.num_bins = self.num_fft//2+1
self.make_freqs()
self.min_value = 1e-6
self.lin_log_oversampling = 1
self.smoothing_amount = smoothing_amount # 0 to 0.04 recommended
self.lowess_it = 1
self.lowess_delta = 0.001 # High saves computation time
self.smoothen = smoothen
def make_freqs(self):
self.freqs = np.linspace(start=0, stop=self.num_freqs, num=self.num_bins)
def make_from_signal(self, signal, filename='frequency_plot.png'):
self.frequency_plot_path = filename
color = ['r', 'm', 'g', 'y']
labels = ['1','2','3','4']
fig, ax = self.format()
if signal.ndim <= 1:
fig, ax = self.doit(signal, fig=fig, ax=ax)
else:
for sig, col, lab in zip(signal, color, labels):
print(col, sig.max())
fig, ax = self.doit(sig, col, lab, fig, ax)
ax.set_ylim(bottom=self.ymin or self.mag.max()-60, top=self.mag.max())
fig.savefig(self.frequency_plot_path)
def doit(self, signal, color='r', label=None, fig=None, ax=None):
self.linewidth = 0.6
mag = self.get_mag(signal)
if self.smoothen:
mag = self.smooth_exponentially(mag)
mag = self.get_logmag(mag)
self.mag = mag
ax.plot(self.freqs, mag, linewidth=self.linewidth, color=color, label=label)
return fig, ax
def make_from_matcheq(self, source, target, correction, signal, filename='frequency_plot.png'):
self.frequency_plot_path = filename
self.linewidth = 1
self.num_fft = (source.shape[0]-1) * 2
self.make_freqs()
result = self.get_mag(signal)
if self.smoothen:
source = self.smooth_exponentially(source)
target = self.smooth_exponentially(target)
result = self.smooth_exponentially(result)
#correction = self.smooth_exponentially(correction)
self.source = self.get_logmag(source)
self.target = self.get_logmag(target)
self.result = self.get_logmag(result)
self.correction = self.get_logmag(correction)
start = next(i for i, xn in enumerate(self.freqs) if xn >= 20)
stop = next(i for i, xn in enumerate(self.freqs) if xn >= 70)
self.correction_visual_adjust = np.mean(self.target[start:stop])
self.correction = self.correction + self.correction_visual_adjust
self.plot_frequency_curve_meq()
def get_mag(self, signal):
*_, stft = scipy.signal.stft(
signal,
fs = self.sample_rate,
window = 'boxcar',
nperseg = self.num_fft,
noverlap = 0,
boundary = None,
padded = False
)
mag = np.abs(stft).mean(axis=1)
return mag
def get_logmag(self, fft):
mag = np.abs(fft)
mag[mag == 0] = self.min_value
mag = 20 * np.log10(mag)
return mag
def smooth_lowess(self, array):
# LOWESS (Locally Weighted Scatterplot Smoothing)
result = sm.nonparametric.lowess(
endog = array,
exog = np.linspace(0, 1, len(array)),
frac = self.smoothing_amount, # The fraction of the data used when estimating each y-value.
it = self.lowess_it, # The number of residual-based reweightings to perform.
delta = self.lowess_delta # Distance within which to use linear-interpolation instead of weighted regression.
)[:, 1]
return result
def smooth_exponentially(self, arr):
grid_linear = self.sample_rate * 0.5 * np.linspace(
start = 0,
stop = 1,
num = self.num_bins
)
grid_logarithmic = self.sample_rate * 0.5 * np.logspace(
start = np.log10(4/self.num_fft),
stop = 0,
num = (self.num_fft//2) * self.lin_log_oversampling + 1
)
interpolator = scipy.interpolate.interp1d(
x = grid_linear,
y = arr,
kind = 'cubic'
)
arr_log = interpolator(grid_logarithmic)
arr_log_filtered = self.smooth_lowess(array=arr_log)
interpolator = scipy.interpolate.interp1d(
x = grid_logarithmic,
y = arr_log_filtered,
kind = 'cubic',
fill_value = 'extrapolate'
)
arr_filtered = interpolator(grid_linear)
arr_filtered[0] = self.min_value
arr_filtered[1] = arr[1]
return arr_filtered
def plot_frequency_curve(self):
fig, ax0 = self.format()
ax0.plot(self.freqs, self.mag, linewidth=self.linewidth)
#ax0.set_ylim(bottom=self.ymin or self.mag.max()-60, top=self.mag.max())
return fig, ax0
def plot_frequency_curve_meq(self):
fig, ax0 = self.format()
ax0.plot(self.freqs, self.source, linewidth=self.linewidth, label='source')
ax0.plot(self.freqs, self.target, linewidth=self.linewidth, label='target')
ax0.plot(self.freqs, self.correction, linewidth=self.linewidth, label='correction')
ax0.plot(self.freqs, self.result, linewidth=self.linewidth, label='result')
ax0.legend(['source', 'target', f'correction {int(self.correction_visual_adjust)} dB', 'result'], loc='best')#loc='lower left')
fig.savefig(self.frequency_plot_path)
def format(self):
fig, ax0 = plt.subplots(nrows=1, figsize=(16,8))#figsize=(7, 9.6))
ax0.set_xscale('log')
ax0.set_title('Frequency Curve')
formatter0 = EngFormatter(unit='Hz')
ax0.xaxis.set_major_formatter(formatter0)
ax0.set_xlabel('Frequency')
ax0.set_ylabel('dBFS')
ax0.set_xlim(left=20, right=self.nyquist)
ax0.set_xticks([20, 40, 70, 130, 250, 500, 1000, 2000, 4000, 8000, 16000, self.nyquist])
#ax0.set_ylim(bottom=-60, top=None)
ax0.xaxis.grid(b=True, which='major') # minor major both
ax0.yaxis.grid(b=True, which='major') # minor major both
plt.tight_layout()
return fig, ax0
if __name__ == '__main__':
None
|
py | b40d08cbf448731c58ff5ea1673bf8367ac73575 | import os
import copy
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from PIL import Image
from CaffeLoader import loadCaffemodel
import argparse
parser = argparse.ArgumentParser()
# Basic options
parser.add_argument("-style_image", help="Style target image", default='examples/inputs/seated-nude.jpg')
parser.add_argument("-style_blend_weights", default=None)
parser.add_argument("-content_image", help="Content target image", default='examples/inputs/tubingen.jpg')
parser.add_argument("-image_size", help="Maximum height / width of generated image", type=int, default=512)
parser.add_argument("-gpu", help="Zero-indexed ID of the GPU to use; for CPU mode set -gpu = -1", type=int, default=0)
# Optimization options
parser.add_argument("-content_weight", type=float, default=5e0)
parser.add_argument("-style_weight", type=float, default=1e2)
parser.add_argument("-tv_weight", type=float, default=1e-3)
parser.add_argument("-num_iterations", type=int, default=1000)
parser.add_argument("-init", choices=['random', 'image'], default='random')
parser.add_argument("-init_image", default=None)
parser.add_argument("-optimizer", choices=['lbfgs', 'adam'], default='lbfgs')
parser.add_argument("-learning_rate", type=float, default=1e0)
parser.add_argument("-lbfgs_num_correction", type=int, default=0)
# Output options
parser.add_argument("-print_iter", type=int, default=50)
parser.add_argument("-save_iter", type=int, default=100)
parser.add_argument("-output_image", default='out.png')
# Other options
parser.add_argument("-style_scale", type=float, default=1.0)
parser.add_argument("-original_colors", type=int, choices=[0, 1], default=0)
parser.add_argument("-pooling", choices=['avg', 'max'], default='max')
parser.add_argument("-model_file", type=str, default='models/vgg19-d01eb7cb.pth')
parser.add_argument("-backend", choices=['nn', 'cudnn', 'mkl'], default='nn')
parser.add_argument("-cudnn_autotune", action='store_true')
parser.add_argument("-seed", type=int, default=-1)
parser.add_argument("-content_layers", help="layers for content", default='relu4_2')
parser.add_argument("-style_layers", help="layers for style", default='relu1_1,relu2_1,relu3_1,relu4_1,relu5_1')
params = parser.parse_args()
Image.MAX_IMAGE_PIXELS = 1000000000 # Support gigapixel images
def main():
dtype = setup_gpu()
cnn, layerList = loadCaffemodel(params.model_file, params.pooling, params.gpu)
content_image = preprocess(params.content_image, params.image_size).type(dtype)
style_image_list = params.style_image.split(',')
style_images_caffe = []
for image in style_image_list:
style_size = int(params.image_size * params.style_scale)
img_caffe = preprocess(image, style_size).type(dtype)
style_images_caffe.append(img_caffe)
if params.init_image != None:
image_size = (content_image.size(2), content_image.size(3))
init_image = preprocess(params.init_image, image_size).type(dtype)
# Handle style blending weights for multiple style inputs
style_blend_weights = []
if params.style_blend_weights == None:
# Style blending not specified, so use equal weighting
for i in style_image_list:
style_blend_weights.append(1.0)
for i, blend_weights in enumerate(style_blend_weights):
style_blend_weights[i] = int(style_blend_weights[i])
else:
style_blend_weights = params.style_blend_weights.split(',')
assert len(style_blend_weights) == len(style_image_list), \
"-style_blend_weights and -style_images must have the same number of elements!"
# Normalize the style blending weights so they sum to 1
style_blend_sum = 0
for i, blend_weights in enumerate(style_blend_weights):
style_blend_weights[i] = float(style_blend_weights[i])
style_blend_sum = float(style_blend_sum) + style_blend_weights[i]
for i, blend_weights in enumerate(style_blend_weights):
style_blend_weights[i] = float(style_blend_weights[i]) / float(style_blend_sum)
content_layers = params.content_layers.split(',')
style_layers = params.style_layers.split(',')
# Set up the network, inserting style and content loss modules
cnn = copy.deepcopy(cnn)
content_losses, style_losses, tv_losses = [], [], []
next_content_idx, next_style_idx = 1, 1
net = nn.Sequential()
c, r = 0, 0
if params.tv_weight > 0:
tv_mod = TVLoss(params.tv_weight).type(dtype)
net.add_module(str(len(net)), tv_mod)
tv_losses.append(tv_mod)
for i, layer in enumerate(list(cnn), 1):
if next_content_idx <= len(content_layers) or next_style_idx <= len(style_layers):
if isinstance(layer, nn.Conv2d):
net.add_module(str(len(net)), layer)
if layerList['C'][c] in content_layers:
print("Setting up content layer " + str(i) + ": " + str(layerList['C'][c]))
loss_module = ContentLoss(params.content_weight)
net.add_module(str(len(net)), loss_module)
content_losses.append(loss_module)
if layerList['C'][c] in style_layers:
print("Setting up style layer " + str(i) + ": " + str(layerList['C'][c]))
loss_module = StyleLoss(params.style_weight)
net.add_module(str(len(net)), loss_module)
style_losses.append(loss_module)
c+=1
if isinstance(layer, nn.ReLU):
net.add_module(str(len(net)), layer)
if layerList['R'][r] in content_layers:
print("Setting up content layer " + str(i) + ": " + str(layerList['R'][r]))
loss_module = ContentLoss(params.content_weight)
net.add_module(str(len(net)), loss_module)
content_losses.append(loss_module)
next_content_idx += 1
if layerList['R'][r] in style_layers:
print("Setting up style layer " + str(i) + ": " + str(layerList['R'][r]))
loss_module = StyleLoss(params.style_weight)
net.add_module(str(len(net)), loss_module)
style_losses.append(loss_module)
next_style_idx += 1
r+=1
if isinstance(layer, nn.MaxPool2d) or isinstance(layer, nn.AvgPool2d):
net.add_module(str(len(net)), layer)
# Capture content targets
for i in content_losses:
i.mode = 'capture'
print("Capturing content targets")
print_torch(net)
net(content_image)
# Capture style targets
for i in content_losses:
i.mode = 'None'
for i, image in enumerate(style_images_caffe):
print("Capturing style target " + str(i+1))
for j in style_losses:
j.mode = 'capture'
j.blend_weight = style_blend_weights[i]
net(style_images_caffe[i])
# Set all loss modules to loss mode
for i in content_losses:
i.mode = 'loss'
for i in style_losses:
i.mode = 'loss'
# Freeze the network in order to prevent
# unnecessary gradient calculations
for param in net.parameters():
param.requires_grad = False
# Initialize the image
if params.seed >= 0:
torch.manual_seed(params.seed)
torch.cuda.manual_seed(params.seed)
torch.backends.cudnn.deterministic=True
if params.init == 'random':
B, C, H, W = content_image.size()
img = torch.randn(C, H, W).mul(0.001).unsqueeze(0).type(dtype)
elif params.init == 'image':
if params.init_image != None:
img = init_image.clone()
else:
img = content_image.clone()
img = nn.Parameter(img.type(dtype))
def maybe_print(t, loss):
if params.print_iter > 0 and t % params.print_iter == 0:
print("Iteration " + str(t) + " / "+ str(params.num_iterations))
for i, loss_module in enumerate(content_losses):
print(" Content " + str(i+1) + " loss: " + str(loss_module.loss.item()))
for i, loss_module in enumerate(style_losses):
print(" Style " + str(i+1) + " loss: " + str(loss_module.loss.item()))
print(" Total loss: " + str(loss.item()))
def maybe_save(t):
should_save = params.save_iter > 0 and t % params.save_iter == 0
should_save = should_save or t == params.num_iterations
if should_save:
output_filename, file_extension = os.path.splitext(params.output_image)
if t == params.num_iterations:
filename = output_filename + str(file_extension)
else:
filename = str(output_filename) + "_" + str(t) + str(file_extension)
disp = deprocess(img.clone())
# Maybe perform postprocessing for color-independent style transfer
if params.original_colors == 1:
disp = original_colors(deprocess(content_image.clone()), disp)
disp.save(str(filename))
# Function to evaluate loss and gradient. We run the net forward and
# backward to get the gradient, and sum up losses from the loss modules.
# optim.lbfgs internally handles iteration and calls this function many
# times, so we manually count the number of iterations to handle printing
# and saving intermediate results.
num_calls = [0]
def feval():
num_calls[0] += 1
optimizer.zero_grad()
net(img)
loss = 0
for mod in content_losses:
loss += mod.loss
for mod in style_losses:
loss += mod.loss
if params.tv_weight > 0:
for mod in tv_losses:
loss += mod.loss
loss.backward()
maybe_save(num_calls[0])
maybe_print(num_calls[0], loss)
return loss
optimizer, loopVal = setup_optimizer(img)
while num_calls[0] <= loopVal:
optimizer.step(feval)
# Configure the optimizer
def setup_optimizer(img):
if params.optimizer == 'lbfgs':
print("Running optimization with L-BFGS")
optim_state = {
'max_iter': params.num_iterations,
'tolerance_change': -1,
'tolerance_grad': -1,
}
if params.lbfgs_num_correction > 0:
optim_state['history_size'] = params.lbfgs_num_correction
optimizer = optim.LBFGS([img], **optim_state)
loopVal = 1
elif params.optimizer == 'adam':
print("Running optimization with ADAM")
optimizer = optim.Adam([img], lr = params.learning_rate)
loopVal = params.num_iterations - 1
return optimizer, loopVal
def setup_gpu():
if params.gpu > -1:
if params.backend == 'cudnn':
torch.backends.cudnn.enabled = True
if params.cudnn_autotune:
torch.backends.cudnn.benchmark = True
else:
torch.backends.cudnn.enabled = False
torch.cuda.set_device(params.gpu)
dtype = torch.cuda.FloatTensor
elif params.gpu == -1:
if params.backend =='mkl':
torch.backends.mkl.enabled = True
dtype = torch.FloatTensor
return dtype
# Preprocess an image before passing it to a model.
# We need to rescale from [0, 1] to [0, 255], convert from RGB to BGR,
# and subtract the mean pixel.
def preprocess(image_name, image_size):
image = Image.open(image_name).convert('RGB')
if type(image_size) is not tuple:
image_size = tuple([int((float(image_size) / max(image.size))*x) for x in (image.height, image.width)])
Loader = transforms.Compose([transforms.Resize(image_size), transforms.ToTensor()])
rgb2bgr = transforms.Compose([transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])])])
Normalize = transforms.Compose([transforms.Normalize(mean=[103.939, 116.779, 123.68], std=[1,1,1])])
tensor = Normalize(rgb2bgr(Loader(image) * 256)).unsqueeze(0)
return tensor
# Undo the above preprocessing.
def deprocess(output_tensor):
Normalize = transforms.Compose([transforms.Normalize(mean=[-103.939, -116.779, -123.68], std=[1,1,1])])
bgr2rgb = transforms.Compose([transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])])])
output_tensor = bgr2rgb(Normalize(output_tensor.squeeze(0).cpu())) / 256
output_tensor.clamp_(0, 1)
Image2PIL = transforms.ToPILImage()
image = Image2PIL(output_tensor.cpu())
return image
# Combine the Y channel of the generated image and the UV/CbCr channels of the
# content image to perform color-independent style transfer.
def original_colors(content, generated):
content_channels = list(content.convert('YCbCr').split())
generated_channels = list(generated.convert('YCbCr').split())
content_channels[0] = generated_channels[0]
return Image.merge('YCbCr', content_channels).convert('RGB')
# Print like Lua/Torch7
def print_torch(net):
simplelist = ""
for i, layer in enumerate(net, 1):
simplelist = simplelist + "(" + str(i) + ") -> "
print("nn.Sequential ( \n [input -> " + simplelist + "output]")
def strip(x):
return str(x).replace(", ",',').replace("(",'').replace(")",'') + ", "
def n():
return " (" + str(i) + "): " + "nn." + str(l).split("(", 1)[0]
for i, l in enumerate(net, 1):
if "2d" in str(l):
ks, st, pd = strip(l.kernel_size), strip(l.stride), strip(l.padding)
if "Conv2d" in str(l):
ch = str(l.in_channels) + " -> " + str(l.out_channels)
print(n() + "(" + ch + ", " + (ks).replace(",",'x', 1) + st + pd.replace(", ",')'))
elif "Pool2d" in str(l):
st = st.replace(" ",' ') + st.replace(", ",')')
print(n() + "(" + ((ks).replace(",",'x' + ks, 1) + st).replace(", ",','))
else:
print(n())
print(")")
# Define an nn Module to compute content loss
class ContentLoss(nn.Module):
def __init__(self, strength):
super(ContentLoss, self).__init__()
self.strength = strength
self.crit = nn.MSELoss()
self.mode = 'None'
def forward(self, input):
if self.mode == 'loss':
self.loss = self.crit(input, self.target) * self.strength
elif self.mode == 'capture':
self.target = input.detach()
return input
class GramMatrix(nn.Module):
def forward(self, input):
B, C, H, W = input.size()
x_flat = input.view(C, H * W)
return torch.mm(x_flat, x_flat.t())
# Define an nn Module to compute style loss
class StyleLoss(nn.Module):
def __init__(self, strength):
super(StyleLoss, self).__init__()
self.target = torch.Tensor()
self.strength = strength
self.gram = GramMatrix()
self.crit = nn.MSELoss()
self.mode = 'None'
self.blend_weight = None
def forward(self, input):
self.G = self.gram(input)
self.G = self.G.div(input.nelement())
if self.mode == 'capture':
if self.blend_weight == None:
self.target = self.G.detach()
elif self.target.nelement() == 0:
self.target = self.G.detach().mul(self.blend_weight)
else:
self.target = self.target.add(self.blend_weight, self.G.detach())
elif self.mode == 'loss':
self.loss = self.strength * self.crit(self.G, self.target)
return input
class TVLoss(nn.Module):
def __init__(self, strength):
super(TVLoss, self).__init__()
self.strength = strength
def forward(self, input):
self.x_diff = input[:,:,1:,:] - input[:,:,:-1,:]
self.y_diff = input[:,:,:,1:] - input[:,:,:,:-1]
self.loss = self.strength * (torch.sum(torch.abs(self.x_diff)) + torch.sum(torch.abs(self.y_diff)))
return input
if __name__ == "__main__":
main() |
py | b40d0a6235d769287f7f77b88dd0bdd258c69dc9 | input = """
a(1).
a(2) | a(3).
ok1 :- #max{V:a(V)} = 3.
b(3).
b(1) | b(2).
ok2 :- #max{V:b(V)} = 3.
"""
output = """
a(1).
a(2) | a(3).
ok1 :- #max{V:a(V)} = 3.
b(3).
b(1) | b(2).
ok2 :- #max{V:b(V)} = 3.
"""
|
py | b40d0d1fd69ac4fa7ebff7a3991b72c24a146e3e | #!/usr/bin/python3
"""
CalPal: A calorie tracking app.
Written by Nhat Nguyen and Albert Ong.
CMPE 131
Last Revised by Nhat Nguyen: 24.27.2018
reader.py
A Python module checks if the pass user information
matches with the database.
TODO: Optimize the reading and writing database
"""
# Import pandas
import pandas as pd
def convert(df_column):
"""
Converts a DataFrame column to list
"""
data_list = []
for element in df_column:
data_list.append(element)
return data_list
def checkEmail(email):
"""
Checks if a given email is currently in the database.
"""
email_list = getUserDatabase()[2]
return email in email_list
def checkLogin(email, password):
"""
Validates the user email and password.
Args:
email (str): The email from the submitted form.
password (str): The password from the submitted form.
Returns:
bool: True when the email and password matches the database, False otherwise.
"""
# Retrieves the entire user database.
database = getUserDatabase()
# Retrieves the list of emilas and passwords.
email_list = database[2]
password_list = database[3]
# Checks if the inputted email exists in the database.
if email in email_list:
# Gets the index associated with the row of the user's data.
user_index = email_list.index(email)
# Returns True if the inputted email and password match
if email_list[user_index] == email and password_list[user_index] == password:
return True
# Returns False if the email is not in the database.
else:
return False
def getUserData(email):
"""
A function that returns a user's data given the user's email.
This is specifically used in login_redirect.
Returns a list of strings
"""
# Retrieves the datafile.
datafile = pd.read_excel(getUserDatabasePath(), sheet_name="Sheet1")
# Retrieves the index associated with the user's email.
index = datafile.loc[datafile["Email"] == email].index[0]
# A list that will eventually store all of ther user's data.
user_data = []
# Iterates though ever list in the database.
for data_list in getUserDatabase():
# When integers are read, they're actually read as int64 types.
# This converts them to conventional ints.
try:
add_data = int(data_list[index])
except ValueError:
add_data = data_list[index]
# Adds the piece of data to user_data.
user_data.append(add_data)
# Returns the user's data.
return user_data
def createUser(new_user_data):
"""
Creates a new users if the inputted email is unique.
new_user_data is formatted:
[fname,
lname,
email,
password,
gender,
birth_day,
birth_month,
birth_year,
height, weight,
calorie_goal]
"""
# Retrieves the inputted email.
email = new_user_data[2]
# Only creates a new user if the email is not currently
# in the database.
if not checkEmail(email):
# Adds the email and password to a dictionary.
password = new_user_data[3]
dict_email_password[email] = password
# A list that will eventually become the new database.
new_database = []
# Iterates through every column in the database.
for index, column in enumerate(getUserDatabase()):
# Appends the new user data to the column.
column.append(new_user_data[index])
# Appends the column, now with the new user data added, to
# the new database.
new_database.append(column)
# Writes the new database to user_database.xlsx
writeToUserDatabase(new_database)
def writeNewUserData(old_email, new_user_data):
"""
Writes a set of new user data to user_database.xlsx.
"""
# Retrieves the datafile.
datafile = pd.read_excel(getUserDatabasePath(), sheet_name="Sheet1")
# Retrieves the index associated with the user's previous email.
user_index = datafile.loc[datafile["Email"] == old_email].index[0]
# A list that will eventually store the updated user batabase.
updated_database = []
# Iterates through every column in the database.
for data_index, column in enumerate(getUserDatabase()):
# Replaces the value in the column at the given user's index
# with the new user data.
column[user_index] = new_user_data[data_index]
# Adds the column to the updated database.
updated_database.append(column)
# Writes the updated database to user_database.xlsx.
writeToUserDatabase(updated_database)
def getDatabase(database_path, columns):
"""
Returns a database given the path to the database file and a list
of the names of the columns
"""
# Create a Pandas dataframe from the excel file
df = pd.read_excel(database_path, sheet_name="Sheet1")
# A list that will store all the values in the database.
# This will be the final output.
database = []
# Uses a for loop to iterate each column of the database.
for column_name in columns:
# Retrieves the database column.
database_column = convert(df[column_name])
# Appends the column to the final output.
database.append(database_column)
# Returns the final output.
return database
def getUserDatabase():
"""
Returns the entire user database.
"""
user_data_columns = ("First Name",
"Last Name",
"Email",
"Password",
"Gender",
"Birth-day",
"Birth-month",
"Birth-year",
"Height",
"Weight",
"Calorie goal")
return getDatabase(getUserDatabasePath(), user_data_columns)
def writeToUserDatabase(new_database):
"""
Takes a list of columns and writes the columns to user_database.xlsx
This function is used in both createUser and writeNewUserData.
"""
# A dictionary that will store the column names as keys
# and a list of data as values.
dataframe_dict = {}
# Uses a for loop to interate through each column name
for column_index, column_name in enumerate(("First Name",
"Last Name",
"Email",
"Password",
"Gender",
"Birth-day",
"Birth-month",
"Birth-year",
"Height",
"Weight",
"Calorie goal")):
# Assigns the column name to the list in the new database.
dataframe_dict[column_name] = new_database[column_index]
# Creates a Pandas dataframe from the data.
df = pd.DataFrame(dataframe_dict)
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(getUserDatabasePath(), engine="xlsxwriter")
# Convert the dataframe to an XlsxWriter Excel object.
df.to_excel(writer, sheet_name='Sheet1')
# Close the Pandas Excel writer and output the Excel file.
writer.save()
def getUserDatabasePath():
"""
Returns the path of the user database depending on whether or not this
file is being run on reader.py or app.py.
"""
if __name__ == "__main__":
database_path = "../../database/user_database.xlsx"
else:
database_path = "../database/user_database.xlsx"
return database_path
#=======================================================================
# Create a Pandas dataframe from the excel file
df = pd.read_excel(getUserDatabasePath(), sheet_name="Sheet1")
# Save columns as list
list_fname = convert(df["First Name"])
list_lname = convert(df["Last Name"])
list_email = convert(df["Email"])
list_password = convert(df["Password"])
list_gender = convert(df["Gender"])
list_birth_day = convert(df["Birth-day"])
list_birth_month = convert(df["Birth-month"])
list_birth_year = convert(df["Birth-year"])
list_height = convert(df["Height"])
list_weight = convert(df["Weight"])
list_calorie_goal = convert(df["Calorie goal"])
# Create a dictionary (KEY email: VALUE password) for user information
dict_email_password = {}
for i in range(len(list_email)):
dict_email_password[list_email[i]] = list_password[i]
if __name__ == "__main__":
fname = "John"
lname = "Doe"
email = "[email protected]"
password = "testing"
gender = "Female"
birth_day = 1
birth_month = 12
birth_year = 1998
height = 70
print(getUserData("[email protected]"))
|
py | b40d0e5958c38b87b1a9609cd37b1c16e85d96af | from detectron2.config.defaults import _C
from detectron2.config import CfgNode as CN
# ---------------------------------------------------------------------------- #
# Additional Configs
# ---------------------------------------------------------------------------- #
_C.MODEL.MOBILENET = False
_C.MODEL.BACKBONE.ANTI_ALIAS = False
_C.MODEL.RESNETS.DEFORM_INTERVAL = 1
_C.INPUT.HFLIP_TRAIN = True
_C.DEBUG = CN()
_C.DEBUG.OVERFIT_NUM_IMAGES = -1
# Global tag
_C.EXPERIMENT_NAME = "dafne"
# Automatic Mixed Precision
_C.SOLVER.AMP = CN({"ENABLED": False})
# Optimizer type: one of "sgd", "adam"
_C.SOLVER.OPTIMIZER = "sgd"
# Set area/width/height min
_C.INPUT.MIN_AREA = 10
_C.INPUT.MIN_SIDE = 2
# ---------------------------------------------------------------------------- #
# TOP Module Options
# ---------------------------------------------------------------------------- #
_C.MODEL.TOP_MODULE = CN()
_C.MODEL.TOP_MODULE.NAME = "" # Can be "conv"
_C.MODEL.TOP_MODULE.DIM = 16
# ---------------------------------------------------------------------------- #
# DAFNE Head
# ---------------------------------------------------------------------------- #
_C.MODEL.DAFNE = CN()
# This is the number of foreground classes.
_C.MODEL.DAFNE.NUM_CLASSES = 15
_C.MODEL.DAFNE.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"]
_C.MODEL.DAFNE.FPN_STRIDES = [8, 16, 32, 64, 128]
_C.MODEL.DAFNE.PRIOR_PROB = 0.01
_C.MODEL.DAFNE.INFERENCE_TH_TRAIN = 0.05
_C.MODEL.DAFNE.INFERENCE_TH_TEST = 0.05
_C.MODEL.DAFNE.NMS_TH = 0.1
_C.MODEL.DAFNE.PRE_NMS_TOPK_TRAIN = 2000
_C.MODEL.DAFNE.PRE_NMS_TOPK_TEST = 2000
_C.MODEL.DAFNE.POST_NMS_TOPK_TRAIN = 1000
_C.MODEL.DAFNE.POST_NMS_TOPK_TEST = 1000
_C.MODEL.DAFNE.TOP_LEVELS = 2
_C.MODEL.DAFNE.NORM = "GN" # Support GN or none
_C.MODEL.DAFNE.USE_SCALE = True
_C.MODEL.DAFNE.LOSS_SMOOTH_L1_BETA = 1.0 / 9.0 # Smooth L1 loss beta
_C.MODEL.DAFNE.ENABLE_LOSS_MODULATION = True # Use modulated loss
_C.MODEL.DAFNE.ENABLE_LOSS_LOG = True # Use modulated loss
_C.MODEL.DAFNE.SORT_CORNERS = True # Use the canonical representation for corners
_C.MODEL.DAFNE.SORT_CORNERS_DATALOADER = True # Use the canonical representation for corners
_C.MODEL.DAFNE.CENTERNESS = "oriented" # "Must be one of ["none", "plain", "oriented"]
_C.MODEL.DAFNE.CENTERNESS_ALPHA = 5 # Smoothing parameter used for pow(ctr', 1/alpha)
_C.MODEL.DAFNE.CENTERNESS_USE_IN_SCORE = True
# Must be one of ["direct", "iterative", "offset", "center-to-corner"]
_C.MODEL.DAFNE.CORNER_PREDICTION = "center-to-corner"
_C.MODEL.DAFNE.CORNER_TOWER_ON_CENTER_TOWER = True
_C.MODEL.DAFNE.MERGE_CORNER_CENTER_PRED = False
# Enable the assignment of different sizes for different feature levels
_C.MODEL.DAFNE.ENABLE_LEVEL_SIZE_FILTERING = True
_C.MODEL.DAFNE.ENABLE_IN_BOX_CHECK = True
_C.MODEL.DAFNE.ENABLE_FPN_STRIDE_NORM = True
# Multiply centerness before threshold
# This will affect the final performance by about 0.05 AP but save some time
_C.MODEL.DAFNE.THRESH_WITH_CTR = False
# If centereness should be on regression or classification branch (true: regression, false: classification)
_C.MODEL.DAFNE.CTR_ON_REG = True
# Focal loss parameters
_C.MODEL.DAFNE.LOSS_ALPHA = 0.25
_C.MODEL.DAFNE.LOSS_GAMMA = 2.0
_C.MODEL.DAFNE.SIZES_OF_INTEREST = [64, 128, 256, 512]
_C.MODEL.DAFNE.USE_RELU = True
_C.MODEL.DAFNE.USE_DEFORMABLE = False
# Loss lambdas
_C.MODEL.DAFNE.LOSS_LAMBDA = CN()
_C.MODEL.DAFNE.LOSS_LAMBDA_NORM = True # Normalize lambdas to sum up to 1
_C.MODEL.DAFNE.LOSS_LAMBDA.CORNERS = 1.0
_C.MODEL.DAFNE.LOSS_LAMBDA.BOX = 1.0
_C.MODEL.DAFNE.LOSS_LAMBDA.LTRB = 1.0
_C.MODEL.DAFNE.LOSS_LAMBDA.CTR = 1.0
_C.MODEL.DAFNE.LOSS_LAMBDA.CLS = 1.0
_C.MODEL.DAFNE.LOSS_LAMBDA.CENTER = 1.0
# the number of convolutions used in the cls and bbox tower
_C.MODEL.DAFNE.NUM_CLS_CONVS = 4
_C.MODEL.DAFNE.NUM_BOX_CONVS = 4
_C.MODEL.DAFNE.NUM_SHARE_CONVS = 0
_C.MODEL.DAFNE.CENTER_SAMPLE = True
_C.MODEL.DAFNE.CENTER_SAMPLE_ONLY = False
_C.MODEL.DAFNE.COMBINE_CENTER_SAMPLE = True
_C.MODEL.DAFNE.POS_RADIUS = 2.0
_C.MODEL.DAFNE.LOC_LOSS_TYPE = "smoothl1" # Can be iou, giou, smoothl1
_C.MODEL.DAFNE.YIELD_PROPOSAL = False
# Test Time Augmentation
_C.TEST.AUG.VFLIP = True
_C.TEST.AUG.HFLIP = True
# _C.TEST.AUG.ROTATION_ANGLES = (0, 90, 180, 270)
_C.TEST.AUG.ROTATION_ANGLES = ()
_C.TEST.NUM_PRED_VIS = 20
# IoU Threshold at test time
_C.TEST.IOU_TH = 0.5
# Rotation angles for training augmentation
_C.INPUT.ROTATION_AUG_ANGLES = [0.0, 90.0, 180.0, 270.0]
_C.INPUT.RESIZE_TYPE = "shortest-edge" # Can be one of ["shortest-edge", "both"]
_C.INPUT.RESIZE_HEIGHT_TRAIN = 0 # Only valid if RESIZE_TYPE=="both"
_C.INPUT.RESIZE_WIDTH_TRAIN = 0
_C.INPUT.RESIZE_HEIGHT_TEST = 0 # Only valid if RESIZE_TYPE=="both"
_C.INPUT.RESIZE_WIDTH_TEST = 0
# Can be one of "choice" or "range"
_C.INPUT.ROTATION_AUG_SAMPLE_STYLE = "choice"
# Enable color augmentation such as saturation, brightness etc
_C.INPUT.USE_COLOR_AUGMENTATIONS = False
_C.MODEL.META_ARCHITECTURE = "OneStageDetector"
_C.MODEL.BACKBONE.NAME = "build_dafne_resnet_fpn_backbone"
_C.MODEL.RESNETS.OUT_FEATURES = ["res3", "res4", "res5"]
_C.MODEL.FPN.IN_FEATURES = ["res3", "res4", "res5"]
_C.MODEL.PROPOSAL_GENERATOR.NAME = "DAFNe"
_C.MODEL.DLA = CN()
_C.MODEL.DLA.NORM = "BN"
_C.MODEL.DLA.CONV_BODY = "DLA34"
# If true, dota 1.5 will be loaded with the same classes as dota 1.0
# to allow for training dota 1.0 in conjunction with the 1.5 annotations
_C.DATASETS.DOTA_REMOVE_CONTAINER_CRANE = False
_C.MODEL.CONVNEXT = CN()
_C.MODEL.CONVNEXT.SIZE = "base"
|
py | b40d0e85b1d9e1e976bc43ea2cc49d4b41737fcb | #!/usr/bin/python3
import re
with open('input.txt', 'r') as f:
alldata = f.readlines()
f.close()
def part1(data):
total_1_4_7_8 = 0
for line in data:
cols = line.strip().split(' | ')
patterns = cols[0].strip().split(' ')
outputs = cols[1].strip().split(' ')
for output in outputs:
if len(output) == 2: # 1
total_1_4_7_8 += 1
elif len(output) == 4: # 4
total_1_4_7_8 += 1
elif len(output) == 3: # 7
total_1_4_7_8 += 1
elif len(output) == 7: # 8
total_1_4_7_8 += 1
return total_1_4_7_8
def fanout(word):
return sorted([char for char in word])
def read_7seg(wires):
code = ''.join(str(wire) for wire in wires)
if code == 'abcefg':
return 0
elif code == 'cf':
return 1
elif code == 'acdeg':
return 2
elif code == 'acdfg':
return 3
elif code == 'bcdf':
return 4
elif code == 'abdfg':
return 5
elif code == 'abdefg':
return 6
elif code == 'acf':
return 7
elif code == 'abcdefg':
return 8
elif code == 'abcdfg':
return 9
return -1
def write_7seg(number):
if number == 0:
return fanout('abcefg')
elif number == 1:
return fanout('cf')
elif number == 2:
return fanout('acdeg')
elif number == 3:
return fanout('acdfg')
elif number == 4:
return fanout('bcdf')
elif number == 5:
return fanout('abdfg')
elif number == 6:
return fanout('abdefg')
elif number == 7:
return fanout('acf')
elif number == 8:
return fanout('abcdefg')
elif number == 9:
return fanout('abcdfg')
return []
def is_valid_7seg(wires):
return read_7seg(wires) >= 0
def remap_wires(wirein, wiremap):
wireout = []
for wire in wirein:
wireout.append(wiremap[wire])
return sorted(wireout)
def get_wiremap(patterns):
# Initialize wire map
abcdefg = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
wiremap = {}
for wire in abcdefg:
wiremap[wire] = abcdefg.copy()
# Determine wire mappings
for pattern in patterns:
if len(pattern) == 2: # 1
for wirein in fanout(pattern):
for wireout in abcdefg:
if wireout not in write_7seg(1):
if wireout in wiremap[wirein]:
wiremap[wirein].remove(wireout)
for wirein in abcdefg:
if wirein not in fanout(pattern):
for wireout in write_7seg(1):
if wireout in wiremap[wirein]:
wiremap[wirein].remove(wireout)
elif len(pattern) == 4: # 4
for wirein in fanout(pattern):
for wireout in abcdefg:
if wireout not in write_7seg(4):
if wireout in wiremap[wirein]:
wiremap[wirein].remove(wireout)
for wirein in abcdefg:
if wirein not in fanout(pattern):
for wireout in write_7seg(4):
if wireout in wiremap[wirein]:
wiremap[wirein].remove(wireout)
elif len(pattern) == 3: # 7
for wirein in fanout(pattern):
for wireout in abcdefg:
if wireout not in write_7seg(7):
if wireout in wiremap[wirein]:
wiremap[wirein].remove(wireout)
for wirein in abcdefg:
if wirein not in fanout(pattern):
for wireout in write_7seg(7):
if wireout in wiremap[wirein]:
wiremap[wirein].remove(wireout)
for a_to in wiremap['a']:
for b_to in wiremap['b']:
for c_to in wiremap['c']:
for d_to in wiremap['d']:
for e_to in wiremap['e']:
for f_to in wiremap['f']:
for g_to in wiremap['g']:
candidate_map = {'a': a_to, 'b': b_to, 'c': c_to, 'd': d_to, 'e': e_to, 'f': f_to, 'g': g_to}
is_valid = True
for pattern in patterns:
is_valid &= is_valid_7seg(remap_wires(pattern, candidate_map))
if is_valid:
return candidate_map
def part2(data):
total = 0
for line in data:
# Parse notes
cols = line.strip().split(' | ')
patterns = cols[0].strip().split(' ')
outputs = cols[1].strip().split(' ')
wiremap = get_wiremap(patterns)
decoded_output = ''
for output in outputs:
decoded_output += str(read_7seg(remap_wires(fanout(output), wiremap)))
total += int(decoded_output)
return total
print(part1(alldata))
print(part2(alldata)) |
py | b40d10d7a7222b94483b4a1403a6b76a1fea4b53 | from app import create_app,db
from flask_script import Manager,Server
from app.models import User,Pitch
from flask_migrate import Migrate, MigrateCommand
#creating app instance
#app = create_app('development')
#app = create_app('test')
app = create_app('production')
migrate = Migrate(app,db)
manager = Manager(app)
manager.add_command('server',Server)
manager.add_command('db',MigrateCommand)
@manager.command
def test():
'''Run the unit tests'''
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.shell
def make_shell_context():
return dict(app=app,db=db,User=User,Pitch=Pitch)
if __name__ == '__main__':
manager.run() |
py | b40d10e8ee08da7ccf9b0c807e059fc77aaacbdd | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
- Replace xgettext -k_T -n
- support string like _T("xxx") and {_T string("xxxx")}
- generates message.po with the same symtax as regular xgettext
- translatable string sort may differ from regular xgettext
Copyright © 2005-2014 The Galette Team
This file is part of Galette (http://galette.tuxfamily.org).
Galette is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galette is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Galette. If not, see <http://www.gnu.org/licenses/>.
@author Didier Chevalier <[email protected]>
@author Johan Cwiklinski <[email protected]>
"""
import sys
import re
# pattern definition
translatable= re.compile("_T\((\"[^\"]*\")\)")
tpl_translatable= re.compile("_T\ string=(\"[^\"]*\")")
# constants string
startLoc = "#: "
nextLoc = " "
#
dico = {}
def location() :
return inputFileName + ":" + str(lineNum+1)
#
for inputFileName in sys.argv[1:] :
inFile=open(inputFileName)
lines = inFile.readlines()
inFile.close()
# get line
for lineNum, line in enumerate(lines) :
# search translatable strings
matchs = translatable.findall(line)
for match in matchs:
if dico.has_key(match):
if dico[match][-1:] == "\n":
dico[match] += startLoc + location()
else :
dico[match] += nextLoc + location() + "\n"
else :
dico[match] = startLoc + location()
tpl_matchs = tpl_translatable.findall(line)
for tpl_match in tpl_matchs:
if dico.has_key(tpl_match):
if dico[tpl_match][-1:] == "\n":
dico[tpl_match] += startLoc + location()
else :
dico[tpl_match] += nextLoc + location() + "\n"
else :
dico[tpl_match] = startLoc + location()
#
outFile = open("messages.po",'w')
for k, v in dico.iteritems():
outFile.write(v)
if v[-1:] != "\n" :
outFile.write("\n")
outFile.write("msgid " + k + "\nmsgstr \"\"\n\n")
outFile.close()
|
py | b40d1235fb3097f8a9b577891988a624a082c0ff | from PyQt5.QtCore import QSize
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QComboBox, QLineEdit, QPushButton, QDesktopWidget, QCheckBox, QMessageBox, QPlainTextEdit
from PyQt5.QtGui import QPixmap, QIcon, QCursor
from PyQt5 import Qt
from PyQt5 import QtCore
import sys
import math
import random
import time
class InsertionSort(QWidget):
def __init__(self, minRange = 0, maxRange = 10000, uret = 1000):
super().__init__()
self.minRange = int(minRange)
self.maxRange = int(maxRange)
self.uret = int(uret)
self.setUI()
def setUI(self):
self.Sayilar = []
self.sayiUret(self.uret)
global count
self.count = 0
self.algorithm = QLabel(self)
self.algorithm.move(370,10)
self.algorithm.setText("Kullanılan Algoritma: Insertion Sort")
self.algorithm.setStyleSheet("color: white; font-family: Arial Black; font-size: 15px;")
self.label = QLabel(self)
self.label.setText("Kullanılan Dizi: ")
self.label.move(10,50)
self.label.setStyleSheet("color: white; font-family: Arial Black; font-size: 15px;")
self.kullanilanDizi = QPlainTextEdit(self)
self.kullanilanDizi.setGeometry(150,50,790,150)
self.kullanilanDizi.insertPlainText(str(self.Sayilar))
self.kullanilanDizi.setStyleSheet("background-color: #000066;border-style: outset;color: white;border-color: white;border-width: 2px;border-radius: 10px;font: bold 14px;")
self.kullanilanDizi.setReadOnly(True)
self.setWindowIcon(QIcon('img/logo.png'))
self.setWindowTitle("Sonuclar")
self.centerPoint = QDesktopWidget().availableGeometry().center()
self.setGeometry(self.centerPoint.x() - 475, self.centerPoint.y() - 225,950,450)
self.setMinimumSize(QSize(950,450))
self.setMaximumSize(QSize(950,450))
self.setStyleSheet("background-color: #000066")
self.hesapla()
def sayiUret(self, x):
for a in range(x):
EklenecekSayi = random.randint(self.minRange, self.maxRange)
self.Sayilar.append(EklenecekSayi)
def hesapla(self):
self.BaslamaZamani = time.time()
self.insertion_sort(self.Sayilar)
self.BitisZamani = time.time()
self.zaman = self.BitisZamani-self.BaslamaZamani
self.label2 = QLabel(self)
self.label2.setText("Sıralanmış Hali: ")
self.label2.move(10,210)
self.label2.setStyleSheet("color: white; font-family: Arial Black; font-size: 15px;")
self.siralananDizi = QPlainTextEdit(self)
self.siralananDizi.setGeometry(150,210,790,150)
self.siralananDizi.insertPlainText(str(self.Sayilar))
self.siralananDizi.setStyleSheet("background-color: #000066;border-style: outset;color: white;border-color: white;border-width: 2px;border-radius: 10px;font: bold 14px;")
self.siralananDizi.setReadOnly(True)
self.adimSayisi = QLabel(self)
self.adimSayisi.move(345,370)
self.adimSayisi.setText(f"Sıralama için yapılan işlem sayısı: {self.count}")
self.adimSayisi.setStyleSheet("color: white; font-family: Arial Black; font-size: 15px;")
self.sure = QLabel(self)
self.sure.move(325,390)
self.sure.setText("Algoritmanın çalışma süresi: {:.8f} saniye".format(self.zaman))
self.sure.setStyleSheet("color: white; font-family: Arial Black; font-size: 15px;")
def insertion_sort(self, Sayilar):
for index in range(1, len(Sayilar)):
self.count += 1
mevcutDeger = Sayilar[index]
pozisyon = index
while pozisyon > 0 and Sayilar[pozisyon - 1] > mevcutDeger:
self.count += 1
Sayilar[pozisyon] = Sayilar[pozisyon - 1]
pozisyon = pozisyon - 1
Sayilar[pozisyon] = mevcutDeger
|
py | b40d1253cd6cb0072d89ba9864a778d0a6e0313a | from django.contrib import admin
from django.urls import path, include
from django.views.generic.base import TemplateView
from django.conf import settings
from django.conf.urls.static import static
from homepage.sitemaps import HomepageSitemap
from contact.sitemaps import ContactSitemap
from products.sitemaps import ProductSitemap, AllProductsSitemap
from django.contrib.sitemaps.views import sitemap
admin.site.site_header = 'Brand Name Website Administration'
admin.site.index_title = 'Administration Page'
admin.site.site_title = 'Brand Name Website Administration'
sitemaps = {
'homepage' : HomepageSitemap,
'contact' : ContactSitemap,
'product' : ProductSitemap,
'all_product' : AllProductsSitemap,
}
urlpatterns = [
path('', include('homepage.urls')),
path('admin/', admin.site.urls),
path('contact/', include('contact.urls')),
path('products/', include('products.urls')),
path('robots.txt', TemplateView.as_view(
template_name="robots.txt",
content_type="text/plain")
),
path('sitemap.xml', sitemap, {'sitemaps' : sitemaps}),
]
|
py | b40d12650e58191184f6ae48d0e0559ba5348022 | # pylama:ignore=W0611
from .CategoricalInputer import CategoricalInputer
from .NumericInputer import NumericInputer
|
py | b40d12fdc9bdeeff50fe6bd60e233f3727a3c0af | import subprocess
import musixmatch
import getKey
def getCurrentTrack():
currentTrack = subprocess.Popen(['osascript', 'getCurrentTrack.scpt'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
return currentTrack.communicate()[0].decode('utf-8').strip().split('-')
def getLyrics():
return lyrics
currentTrack = getCurrentTrack()
currentInfo = {
"artist": currentTrack[0],
"track": currentTrack[1]
}
print(currentInfo)
musixmatch_key = getKey.musixmatch()[:-1]
# try:
# musixmatch
# chart = musixmatch.ws.track.chart.get(country='it', apikey=apikey)
# except musixmatch.api.Error, e:
# pass
|
py | b40d14127fb26ee06ce4f40a578acacfb8d63791 | import requests
import pyttsx3
from bs4 import BeautifulSoup
import json
engine = pyttsx3.init()
en_voice_id = "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0"
engine.setProperty('voice', en_voice_id)
engine.setProperty('rate', 120)
engine.setProperty('volume', 0.9)
def print_headlines(response_text):
soup = BeautifulSoup(response_text, 'lxml')
headlines = soup.find_all(attrs={"itemprop": "headline"})
for headline in headlines:
data=headline.text
print(headline.text,"\n")
engine.say(data)
engine.runAndWait()
url = 'http://www.inshorts.com/en/read/technology'
response = requests.get(url)
print_headlines(response.text)
|
py | b40d142635679aac45f2b3e11e570e6e3899431e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from advntr.advntr_commands import genotype, view_model, add_model, del_model
from advntr import settings
from advntr import __version__
class CustomHelpFormatter(argparse.HelpFormatter):
def __init__(self, prog):
super(CustomHelpFormatter, self).__init__(prog, max_help_position=40, width=110)
def _format_action_invocation(self, action):
default = self._metavar_formatter(action, action.dest)
args_string = self._format_args(action, default)
return '/'.join(action.option_strings) + ' ' + args_string
def main():
description = '=======================================================\n' \
'adVNTR %s: Genopyting tool for VNTRs\n' \
'=======================================================\n' \
'Source code: https://github.com/mehrdadbakhtiari/adVNTR\n' \
'Instructions: http://advntr.readthedocs.io\n' \
'-------------------------------------------------------\n' % __version__
help = 'Command: genotype\tfind RU counts and mutations in VNTRs\n' \
' viewmodel\tview existing models in database\n' \
' addmodel\tadd custom VNTR to the database\n' \
' delmodel\tremove a model from database\n'
usage = '\r{}\nusage: %(prog)s <command> [options]\n\n\r{}\r{}'.format(description.ljust(len('usage:')), help, '\n')
parser = argparse.ArgumentParser(usage=usage, add_help=False)
subparsers = parser.add_subparsers(title='Commands', dest='command')
fmt = lambda prog: CustomHelpFormatter(prog)
genotype_parser = subparsers.add_parser('genotype', usage='advntr genotype [options]', formatter_class=fmt,
add_help=False)
genotype_io_group = genotype_parser.add_argument_group("Input/output options")
genotype_io_group.add_argument('-a', '--alignment_file', type=str, metavar='<file>',
help='alignment file in SAM/BAM/CRAM format')
genotype_io_group.add_argument('-r', '--reference_filename', type=str, metavar='<file>',
help='path to a FASTA-formatted reference file for CRAM files. It overrides filename'
' specified in header, which is normally used to find the reference')
genotype_io_group.add_argument('-f', '--fasta', type=str, metavar='<file>',
help='Fasta file containing raw reads',)
genotype_io_group.add_argument('-p', '--pacbio', action='store_true',
help='set this flag if input file contains PacBio reads instead of Illumina reads')
genotype_io_group.add_argument('-n', '--nanopore', action='store_true',
help='set this flag if input file contains Nanopore MinION reads instead of Illumina')
genotype_io_group.add_argument('-o', '--outfile', metavar='<file>', default=None,
help='file to write results. '
'adVNTR writes output to stdout if oufile is not specified.')
outfmt_choices = ['text', 'bed']
genotype_io_group.add_argument('-of', '--outfmt', metavar='<format>', default='text', choices=outfmt_choices,
help='output format. Allowed values are {'+', '.join(outfmt_choices)+'} [%(default)s]')
genotype_algortihm_group = genotype_parser.add_argument_group("Algorithm options")
genotype_algortihm_group.add_argument('-fs', '--frameshift', action='store_true',
help='set this flag to search for frameshifts in VNTR instead of copy'
' number. Supported VNTR IDs: %s' % settings.FRAMESHIFT_VNTRS)
genotype_algortihm_group.add_argument('-e', '--expansion', action='store_true',
help='set this flag to determine long expansion from PCR-free data')
genotype_algortihm_group.add_argument('-c', '--coverage', type=float, metavar='<float>',
help='average sequencing coverage in PCR-free sequencing')
genotype_algortihm_group.add_argument('--haploid', action='store_true', default=False,
help='set this flag if the organism is haploid')
genotype_algortihm_group.add_argument('-naive', '--naive', action='store_true', default=False,
help='use naive approach for PacBio reads')
genotype_others_group = genotype_parser.add_argument_group("Other options")
genotype_others_group.add_argument('-h', '--help', action='help',
help='show this help message and exit')
genotype_others_group.add_argument('--working_directory', type=str, metavar='<path>', default=None,
help='working directory for creating temporary files needed for computation')
genotype_others_group.add_argument('-m', '--models', type=str, metavar='<file>', default=None,
help='VNTR models file [%s]' % settings.ILLUMINA_DEFAULT_MODELS_FILE)
genotype_others_group.add_argument('-t', '--threads', type=int, metavar='<int>', default=4,
help='number of threads [%(default)s]')
genotype_others_group.add_argument('-u', '--update', action='store_true', default=False,
help='set this flag to iteratively update the model')
genotype_others_group.add_argument('-vid', '--vntr_id', type=str, metavar='<text>', default=None,
help='comma-separated list of VNTR IDs')
viewmodel_parser = subparsers.add_parser('viewmodel', usage='advntr viewmodel [options]', formatter_class=fmt)
viewmodel_parser.add_argument('-g', '--gene', type=str, default='', metavar='<text>',
help='comma-separated list of Gene Names')
viewmodel_parser.add_argument('-p', '--pattern', type=str, default=None, metavar='<text>',
help='repeating pattern of VNTR in forward (5\' to 3\') direction')
viewmodel_parser.add_argument('-m', '--models', type=str, default=None, metavar='<file>',
help='VNTR models file [%s]' % settings.ILLUMINA_DEFAULT_MODELS_FILE)
addmodel_parser = subparsers.add_parser('addmodel', usage='advntr addmodel [options]', formatter_class=fmt,
add_help=False)
addmodel_args_group = addmodel_parser.add_argument_group("Required arguments")
addmodel_other_group = addmodel_parser.add_argument_group("Other options")
addmodel_args_group.add_argument('-r', '--reference', type=str, default=None, metavar='<text>',
help='Reference genome')
addmodel_args_group.add_argument('-c', '--chromosome', type=str, default=None, metavar='<text>',
help='Chromosome (e.g. chr1)')
addmodel_args_group.add_argument('-p', '--pattern', type=str, default=None, metavar='<text>',
help='First repeating pattern of VNTR in forward (5\' to 3\') direction')
addmodel_args_group.add_argument('-s', '--start', type=int, default=None, metavar='<int>',
help='Start coordinate of VNTR in forward (5\' to 3\') direction')
addmodel_args_group.add_argument('-e', '--end', type=int, default=None, metavar='<int>',
help='End coordinate of VNTR in forward (5\' to 3\') direction')
addmodel_other_group.add_argument('-g', '--gene', type=str, default=None, metavar='<text>',
help='Gene name')
addmodel_other_group.add_argument('-a', '--annotation', type=str, default=None, metavar='<text>',
help='Annotation of VNTR region')
addmodel_other_group.add_argument('-m', '--models', type=str, default=None, metavar='<file>',
help='VNTR models file [%s]' % settings.ILLUMINA_DEFAULT_MODELS_FILE)
addmodel_other_group.add_argument('-h', '--help', action='help',
help='show this help message and exit')
delmodel_parser = subparsers.add_parser('delmodel', usage='advntr delmodel [options]', formatter_class=fmt,
add_help=False)
delmodel_args_group = delmodel_parser.add_argument_group("Required arguments")
delmodel_other_group = delmodel_parser.add_argument_group("Other options")
delmodel_args_group.add_argument('-vid', '--vntr_id', type=str, metavar='<text>', default=None,
help='VNTR ID')
delmodel_other_group.add_argument('-m', '--models', type=str, default=None, metavar='<file>',
help='VNTR models file [%s]' % settings.ILLUMINA_DEFAULT_MODELS_FILE)
delmodel_other_group.add_argument('-h', '--help', action='help',
help='show this help message and exit')
args = parser.parse_args()
if args.command == 'genotype':
genotype(args, genotype_parser)
elif args.command == 'viewmodel':
view_model(args, viewmodel_parser)
elif args.command == 'addmodel':
add_model(args, addmodel_parser)
elif args.command == 'delmodel':
del_model(args, delmodel_parser)
else:
parser.error('Please specify a valid command')
if __name__ == '__main__':
main()
|
py | b40d143de6d4a5416d3b03933477b70b56c429df | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['IoTAddonArgs', 'IoTAddon']
@pulumi.input_type
class IoTAddonArgs:
def __init__(__self__, *,
device_name: pulumi.Input[str],
io_t_device_details: pulumi.Input['IoTDeviceInfoArgs'],
io_t_edge_device_details: pulumi.Input['IoTDeviceInfoArgs'],
kind: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
role_name: pulumi.Input[str],
addon_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a IoTAddon resource.
:param pulumi.Input[str] device_name: The device name.
:param pulumi.Input['IoTDeviceInfoArgs'] io_t_device_details: IoT device metadata to which appliance needs to be connected.
:param pulumi.Input['IoTDeviceInfoArgs'] io_t_edge_device_details: IoT edge device to which the IoT Addon needs to be configured.
:param pulumi.Input[str] kind: Addon type.
Expected value is 'IotEdge'.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] role_name: The role name.
:param pulumi.Input[str] addon_name: The addon name.
"""
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "io_t_device_details", io_t_device_details)
pulumi.set(__self__, "io_t_edge_device_details", io_t_edge_device_details)
pulumi.set(__self__, "kind", 'IotEdge')
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "role_name", role_name)
if addon_name is not None:
pulumi.set(__self__, "addon_name", addon_name)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Input[str]:
"""
The device name.
"""
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter(name="ioTDeviceDetails")
def io_t_device_details(self) -> pulumi.Input['IoTDeviceInfoArgs']:
"""
IoT device metadata to which appliance needs to be connected.
"""
return pulumi.get(self, "io_t_device_details")
@io_t_device_details.setter
def io_t_device_details(self, value: pulumi.Input['IoTDeviceInfoArgs']):
pulumi.set(self, "io_t_device_details", value)
@property
@pulumi.getter(name="ioTEdgeDeviceDetails")
def io_t_edge_device_details(self) -> pulumi.Input['IoTDeviceInfoArgs']:
"""
IoT edge device to which the IoT Addon needs to be configured.
"""
return pulumi.get(self, "io_t_edge_device_details")
@io_t_edge_device_details.setter
def io_t_edge_device_details(self, value: pulumi.Input['IoTDeviceInfoArgs']):
pulumi.set(self, "io_t_edge_device_details", value)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
Addon type.
Expected value is 'IotEdge'.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="roleName")
def role_name(self) -> pulumi.Input[str]:
"""
The role name.
"""
return pulumi.get(self, "role_name")
@role_name.setter
def role_name(self, value: pulumi.Input[str]):
pulumi.set(self, "role_name", value)
@property
@pulumi.getter(name="addonName")
def addon_name(self) -> Optional[pulumi.Input[str]]:
"""
The addon name.
"""
return pulumi.get(self, "addon_name")
@addon_name.setter
def addon_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "addon_name", value)
class IoTAddon(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
addon_name: Optional[pulumi.Input[str]] = None,
device_name: Optional[pulumi.Input[str]] = None,
io_t_device_details: Optional[pulumi.Input[pulumi.InputType['IoTDeviceInfoArgs']]] = None,
io_t_edge_device_details: Optional[pulumi.Input[pulumi.InputType['IoTDeviceInfoArgs']]] = None,
kind: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
IoT Addon.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] addon_name: The addon name.
:param pulumi.Input[str] device_name: The device name.
:param pulumi.Input[pulumi.InputType['IoTDeviceInfoArgs']] io_t_device_details: IoT device metadata to which appliance needs to be connected.
:param pulumi.Input[pulumi.InputType['IoTDeviceInfoArgs']] io_t_edge_device_details: IoT edge device to which the IoT Addon needs to be configured.
:param pulumi.Input[str] kind: Addon type.
Expected value is 'IotEdge'.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] role_name: The role name.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IoTAddonArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
IoT Addon.
:param str resource_name: The name of the resource.
:param IoTAddonArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IoTAddonArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
addon_name: Optional[pulumi.Input[str]] = None,
device_name: Optional[pulumi.Input[str]] = None,
io_t_device_details: Optional[pulumi.Input[pulumi.InputType['IoTDeviceInfoArgs']]] = None,
io_t_edge_device_details: Optional[pulumi.Input[pulumi.InputType['IoTDeviceInfoArgs']]] = None,
kind: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IoTAddonArgs.__new__(IoTAddonArgs)
__props__.__dict__["addon_name"] = addon_name
if device_name is None and not opts.urn:
raise TypeError("Missing required property 'device_name'")
__props__.__dict__["device_name"] = device_name
if io_t_device_details is None and not opts.urn:
raise TypeError("Missing required property 'io_t_device_details'")
__props__.__dict__["io_t_device_details"] = io_t_device_details
if io_t_edge_device_details is None and not opts.urn:
raise TypeError("Missing required property 'io_t_edge_device_details'")
__props__.__dict__["io_t_edge_device_details"] = io_t_edge_device_details
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__.__dict__["kind"] = 'IotEdge'
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if role_name is None and not opts.urn:
raise TypeError("Missing required property 'role_name'")
__props__.__dict__["role_name"] = role_name
__props__.__dict__["host_platform"] = None
__props__.__dict__["host_platform_type"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
__props__.__dict__["version"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:databoxedge/v20201201:IoTAddon"), pulumi.Alias(type_="azure-native:databoxedge:IoTAddon"), pulumi.Alias(type_="azure-nextgen:databoxedge:IoTAddon"), pulumi.Alias(type_="azure-native:databoxedge/v20200901:IoTAddon"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901:IoTAddon"), pulumi.Alias(type_="azure-native:databoxedge/v20200901preview:IoTAddon"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901preview:IoTAddon"), pulumi.Alias(type_="azure-native:databoxedge/v20210201:IoTAddon"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20210201:IoTAddon"), pulumi.Alias(type_="azure-native:databoxedge/v20210201preview:IoTAddon"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20210201preview:IoTAddon")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(IoTAddon, __self__).__init__(
'azure-native:databoxedge/v20201201:IoTAddon',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'IoTAddon':
"""
Get an existing IoTAddon resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = IoTAddonArgs.__new__(IoTAddonArgs)
__props__.__dict__["host_platform"] = None
__props__.__dict__["host_platform_type"] = None
__props__.__dict__["io_t_device_details"] = None
__props__.__dict__["io_t_edge_device_details"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
__props__.__dict__["version"] = None
return IoTAddon(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="hostPlatform")
def host_platform(self) -> pulumi.Output[str]:
"""
Host OS supported by the IoT addon.
"""
return pulumi.get(self, "host_platform")
@property
@pulumi.getter(name="hostPlatformType")
def host_platform_type(self) -> pulumi.Output[str]:
"""
Platform where the runtime is hosted.
"""
return pulumi.get(self, "host_platform_type")
@property
@pulumi.getter(name="ioTDeviceDetails")
def io_t_device_details(self) -> pulumi.Output['outputs.IoTDeviceInfoResponse']:
"""
IoT device metadata to which appliance needs to be connected.
"""
return pulumi.get(self, "io_t_device_details")
@property
@pulumi.getter(name="ioTEdgeDeviceDetails")
def io_t_edge_device_details(self) -> pulumi.Output['outputs.IoTDeviceInfoResponse']:
"""
IoT edge device to which the IoT Addon needs to be configured.
"""
return pulumi.get(self, "io_t_edge_device_details")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Addon type.
Expected value is 'IotEdge'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Addon Provisioning State
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Addon type
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def version(self) -> pulumi.Output[str]:
"""
Version of IoT running on the appliance.
"""
return pulumi.get(self, "version")
|
py | b40d143fe76ff7acfc47960a13825e96d201c3af | """# Mutable partial
This is analogous to `functools.partial`.
Examples
--------
Make sure you have run the [setup code](setup.md).
```python
def foo(*args, **kwargs):
\ print('args', args)
\ print('kwargs', kwargs)
\ return 0
model = MyModel()
model.mutable = partial(foo, 'hello world', goodbye='moon')
model.mutable()
```
Out:
```
args ('hello world',)
kwargs {'goodbye': 'moon'}
0
```
"""
from .mutable import Mutable
from .mutable_tuple import MutableTuple
from .mutable_dict import MutableDict
class partial(Mutable):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def __init__(self, func, *args, **kwargs):
super().__init__()
assert callable(func), 'First argument must be callable'
self._python_type = None
self.func, self.args, self.kwargs = (
func, MutableTuple(args), MutableDict(kwargs)
)
def __call__(self, *args, **kwargs):
if self.func is not None:
# kwargs is mutable dictionary, args is mutable list
kwargs_ = self.kwargs.unshell()
kwargs_.update(kwargs)
return self.func(*args, *self.args.unshell(), **kwargs_)
def __repr__(self):
args_str = ', '.join([i.__repr__() for i in self.args])
kwargs_str = ', '.join([
'{}={}'.format(key, val.__repr__())
for key, val in self.kwargs.items()
])
args_kwargs_str = ''
if args_str and kwargs_str:
args_kwargs_str = args_str + ', ' + kwargs_str
elif args_str and not kwargs_str:
args_kwargs_str = args_str
elif not args_str and kwargs_str:
args_kwargs_str = kwargs_str
return '<{}({})>'.format(self.func.__name__, args_kwargs_str)
@classmethod
def register(cls, func):
def add_function(*args, **kwargs):
return cls(func, *args, **kwargs)
setattr(cls, func.__name__, add_function)
return func |
py | b40d1508c1b45f1a52aa55e10fa8f01d23ddd6d2 | from werkzeug.exceptions import ClientDisconnected
from elasticapm.conf import constants
from elasticapm.utils import compat, get_url_dict
from elasticapm.utils.wsgi import get_environ, get_headers
def get_data_from_request(request, capture_body=False, capture_headers=True):
result = {
"env": dict(get_environ(request.environ)),
"method": request.method,
"socket": {"remote_address": request.environ.get("REMOTE_ADDR"), "encrypted": request.is_secure},
"cookies": request.cookies,
}
if capture_headers:
result["headers"] = dict(get_headers(request.environ))
if request.method in constants.HTTP_WITH_BODY:
body = None
if request.content_type == "application/x-www-form-urlencoded":
body = compat.multidict_to_dict(request.form)
elif request.content_type and request.content_type.startswith("multipart/form-data"):
body = compat.multidict_to_dict(request.form)
if request.files:
body["_files"] = {
field: val[0].filename if len(val) == 1 else [f.filename for f in val]
for field, val in compat.iterlists(request.files)
}
else:
try:
body = request.get_data(as_text=True)
except ClientDisconnected:
pass
if body is not None:
result["body"] = body if capture_body else "[REDACTED]"
result["url"] = get_url_dict(request.url)
return result
def get_data_from_response(response, capture_headers=True):
result = {}
if isinstance(getattr(response, "status_code", None), compat.integer_types):
result["status_code"] = response.status_code
if capture_headers and getattr(response, "headers", None):
headers = response.headers
result["headers"] = {key: ";".join(headers.getlist(key)) for key in compat.iterkeys(headers)}
return result
|
py | b40d167e0ebd4a5ed4c8562a8b0f036c3c9b065d | # -*- coding: utf-8 -*-
"""
Python runner for MDStudio library unit tests, run as:
::
python tests
"""
import os
import sys
import unittest
# Add modules in package to path so we can import them
modulepath = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
sys.path.insert(0, modulepath)
def module_test_suite():
"""
Run MDStudio_SMARTCyp module unit tests.
"""
loader = unittest.TestLoader()
print('Running MDStudio unittests')
testpath = os.path.join(os.path.dirname(__file__), 'api')
suite = loader.discover(testpath, pattern='test_*.py')
runner = unittest.TextTestRunner(verbosity=2)
return runner.run(suite).wasSuccessful()
if __name__ == '__main__':
ret = module_test_suite()
sys.exit(not ret)
|
py | b40d1703eae62abbc8515f3d1920146fb113db2b | import math
from typing import Dict, List
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import ShapeSpec, cat
from detectron2.modeling import ROI_HEADS_REGISTRY
from adet.layers import conv_with_kaiming_uniform
from ..poolers import TopPooler
from .attn_predictor import ATTPredictor
class SeqConvs(nn.Module):
def __init__(self, conv_dim, roi_size):
super().__init__()
height = roi_size[0]
downsample_level = math.log2(height) - 2
assert math.isclose(downsample_level, int(downsample_level))
downsample_level = int(downsample_level)
conv_block = conv_with_kaiming_uniform(
norm="BN", activation=True)
convs = []
for i in range(downsample_level):
convs.append(conv_block(
conv_dim, conv_dim, 3, stride=(2, 1)))
convs.append(nn.Conv2d(conv_dim, conv_dim, kernel_size=(4, 1), bias=False))
self.convs = nn.Sequential(*convs)
def forward(self, x):
return self.convs(x)
class RNNPredictor(nn.Module):
def __init__(self, cfg):
super(RNNPredictor, self).__init__()
# fmt: off
self.voc_size = cfg.MODEL.BATEXT.VOC_SIZE
conv_dim = cfg.MODEL.BATEXT.CONV_DIM
roi_size = cfg.MODEL.BATEXT.POOLER_RESOLUTION
# fmt: on
self.convs = SeqConvs(conv_dim, roi_size)
self.rnn = nn.LSTM(conv_dim, conv_dim, num_layers=1, bidirectional=True)
self.clf = nn.Linear(conv_dim * 2, self.voc_size + 1)
self.recognition_loss_fn = build_recognition_loss_fn()
def forward(self, x, targets=None):
# check empty
if x.size(0) == 0:
return x.new_zeros((x.size(2), 0, self.voc_size))
x = self.convs(x).squeeze(dim=2) # NxCxW
x = x.permute(2, 0, 1) # WxNxC
x, _ = self.rnn(x)
preds = self.clf(x)
if self.training:
rec_loss = self.recognition_loss_fn(preds, targets, self.voc_size)
return preds, rec_loss
else:
# (W, N, C) -> (N, W, C)
_, preds = preds.permute(1, 0, 2).max(dim=-1)
return preds, None
### CoordConv
class MaskHead(nn.Module):
def __init__(self, cfg):
super(MaskHead, self).__init__()
conv_dim = cfg.MODEL.BATEXT.CONV_DIM
conv_block = conv_with_kaiming_uniform(
norm="BN", activation=True)
convs = []
convs.append(conv_block(258, conv_dim, 3, 1))
for i in range(3):
convs.append(conv_block(
conv_dim, conv_dim, 3, 1))
self.mask_convs = nn.Sequential(*convs)
def forward(self, features):
x_range = torch.linspace(-1, 1, features.shape[-1], device=features.device)
y_range = torch.linspace(-1, 1, features.shape[-2], device=features.device)
y, x = torch.meshgrid(y_range, x_range)
y = y.expand([features.shape[0], 1, -1, -1])
x = x.expand([features.shape[0], 1, -1, -1])
coord_feat = torch.cat([x, y], 1)
ins_features = torch.cat([features, coord_feat], dim=1)
mask_features = self.mask_convs(ins_features)
return mask_features
def build_recognizer(cfg, type):
if type == 'rnn':
return RNNPredictor(cfg)
if type == 'attn':
return ATTPredictor(cfg)
else:
raise NotImplementedError("{} is not a valid recognizer".format(type))
def ctc_loss(preds, targets, voc_size):
# prepare targets
target_lengths = (targets != voc_size).long().sum(dim=-1)
trimmed_targets = [t[:l] for t, l in zip(targets, target_lengths)]
targets = torch.cat(trimmed_targets)
x = F.log_softmax(preds, dim=-1)
input_lengths = torch.full((x.size(1),), x.size(0), dtype=torch.long)
return F.ctc_loss(
x, targets, input_lengths, target_lengths,
blank=voc_size, zero_infinity=True
)
def build_recognition_loss_fn(rec_type="ctc"):
if rec_type == "ctc":
return ctc_loss
else:
raise NotImplementedError("{} is not a valid recognition loss".format(rec_type))
@ROI_HEADS_REGISTRY.register()
class TextHead(nn.Module):
"""
TextHead performs text region alignment and recognition.
It is a simplified ROIHeads, only ground truth RoIs are
used during training.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
"""
Args:
in_channels (int): number of channels of the input feature
"""
super(TextHead, self).__init__()
# fmt: off
pooler_resolution = cfg.MODEL.BATEXT.POOLER_RESOLUTION
pooler_scales = cfg.MODEL.BATEXT.POOLER_SCALES
sampling_ratio = cfg.MODEL.BATEXT.SAMPLING_RATIO
conv_dim = cfg.MODEL.BATEXT.CONV_DIM
num_conv = cfg.MODEL.BATEXT.NUM_CONV
canonical_size = cfg.MODEL.BATEXT.CANONICAL_SIZE
self.in_features = cfg.MODEL.BATEXT.IN_FEATURES
self.voc_size = cfg.MODEL.BATEXT.VOC_SIZE
recognizer = cfg.MODEL.BATEXT.RECOGNIZER
self.top_size = cfg.MODEL.TOP_MODULE.DIM
self.coordconv = cfg.MODEL.BATEXT.USE_COORDCONV
self.aet = cfg.MODEL.BATEXT.USE_AET
# fmt: on
self.pooler = TopPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type="BezierAlign",
canonical_box_size=canonical_size,
canonical_level=3,
assign_crit="bezier")
conv_block = conv_with_kaiming_uniform(
norm="BN", activation=True)
tower = []
for i in range(num_conv):
tower.append(
conv_block(conv_dim, conv_dim, 3, 1))
self.tower = nn.Sequential(*tower)
if self.coordconv:
self.mask_head = MaskHead(cfg)
self.recognizer = build_recognizer(cfg, recognizer)
def forward(self, images, features, proposals, targets=None):
"""
see detectron2.modeling.ROIHeads
"""
del images
features = [features[f] for f in self.in_features]
if self.coordconv:
mask_features = []
for i in range(len(features)):
mask_feat = self.mask_head(features[i])
all_feat = mask_feat + features[i]
mask_features.append(all_feat)
features = mask_features
if self.training:
beziers = [p.beziers for p in targets]
if not self.aet:
targets = torch.cat([x.text for x in targets], dim=0)
else:
beziers2 = [p.top_feat for p in proposals]
for k in range(len(targets)):
rec_assign = [int(torch.argmin(torch.abs(beziers[k] - beziers2[k][i]).sum(dim=1))) for i in range(len(beziers2[k]))]
targets[k] = torch.cat([targets[k].text, targets[k].text[rec_assign]], dim = 0)
targets = torch.cat([x for x in targets], dim = 0)
cat_beziers = []
for ix in range(len(beziers)):
cat_beziers.append(cat((beziers[ix], beziers2[ix]), dim=0))
beziers = cat_beziers
else:
beziers = [p.top_feat for p in proposals]
bezier_features = self.pooler(features, beziers)
bezier_features = self.tower(bezier_features)
# TODO: move this part to recognizer
if self.training:
preds, rec_loss = self.recognizer(bezier_features, targets)
rec_loss *= 0.05
losses = {'rec_loss': rec_loss}
return None, losses
else:
if bezier_features.size(0) == 0:
for box in proposals:
box.beziers = box.top_feat
box.recs = box.top_feat
return proposals, {}
preds, _ = self.recognizer(bezier_features, targets)
start_ind = 0
for proposals_per_im in proposals:
end_ind = start_ind + len(proposals_per_im)
proposals_per_im.recs = preds[start_ind:end_ind]
proposals_per_im.beziers = proposals_per_im.top_feat
start_ind = end_ind
return proposals, {}
|
py | b40d178d5b5806fc230b14cdb7adb63c16775657 | import argparse
import os
import time
import cv2
import tensorflow as tf
from tensorflow.python.platform import gfile
import detect_and_align
import cognitive_face as CF
def predictFace(img, person_group_id=2, large_person_group_id=None, max_candidates_return=2, threshold=0.5):
faces = CF.face.detect(img)
res = CF.face.identify([faces[0]['faceId']], person_group_id, large_person_group_id, max_candidates_return, threshold)
return res
def load_model(model):
model_exp = os.path.expanduser(model)
if (os.path.isfile(model_exp)):
print('Loading model filename: %s' % model_exp)
with gfile.FastGFile(model_exp, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
else:
raise ValueError('Specify model file, not directory!')
def main(args):
with tf.Graph().as_default():
with tf.Session() as sess:
# Setup models
load_model(args.model)
mtcnn = detect_and_align.create_mtcnn(sess, None)
cap = cv2.VideoCapture(0)
frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
show_landmarks = False
show_bb = True
show_fps = True
while(True):
start = time.time()
_, frame = cap.read()
# Locate faces and landmarks in frame
face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(frame, mtcnn)
if len(face_patches) > 0:
for bb, landmark in zip(padded_bounding_boxes, landmarks):
if show_bb:
cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]), (255, 0, 0), 2)
if show_landmarks:
for j in range(5):
size = 1
top_left = (int(landmark[j]) - size, int(landmark[j + 5]) - size)
bottom_right = (int(landmark[j]) + size, int(landmark[j + 5]) + size)
cv2.rectangle(frame, top_left, bottom_right, (255, 0, 255), 2)
else:
print('Couldn\'t find a face')
end = time.time()
seconds = end - start
fps = round(1 / seconds, 2)
if show_fps:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, str(fps), (0, int(frame_height) - 5), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
cv2.imshow('frame', frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('l'):
show_landmarks = not show_landmarks
elif key == ord('b'):
show_bb = not show_bb
elif key == ord('f'):
show_fps = not show_fps
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str, help='Path to model protobuf (.pb) file')
main(parser.parse_args())
|
py | b40d1798f38f53ff5bbe4b9961a64f05b9fb1e35 | #!/usr/bin/python
import sys
from . import AC_tools as AC
"""
Example processing of SMVGEAR prod/loss tags for GEOS-Chem diganotic (ND65). A variety
of functions for working with KPP/SMVGEAR tags are in AC_tools
(funcs4GESOSC/variables).
NOTES
---
- This code is for working with smvgear diagnostics. for KPP P/L see
Tagged_GC_KPP_Mechanism4family.py
- details on the GEOS-Chem diagnostic are in the GEOS-Chem manual
(http://acmg.seas.harvard.edu/geos/doc/man/chapter_13.html)
"""
# --- Master debug setting
DEBUG = True
def main(trop_limit=True, res='4x5', debug=False):
"""
Get prod loss output for a family and print this to screen
"""
# --- Get family from Command line (and other vars)
wd = sys.argv[1]
spec = sys.argv[2]
# version?
ver = AC.iGEOSChem_ver(wd)
# --- Get all tags for this family (through dictionary route)
# ( e.g. 'PIOx', 'LIOx', 'P_Iy', 'L_Iy' )
nums, rxns, tags, Coe = AC.prod_loss_4_spec(wd, spec, ver=ver)
# beatify reaction strings
rxnstr_l = [''.join(i[4:]) for i in rxns]
# one consider one tag per reaction and tagged reactions
try:
tags = [i[0] for i in tags] # just consider first tag
except:
print('WARNING! - attempting to process just tagged reactions')
detail_zip = list(zip(rxnstr_l, list(zip(nums, tags))))
untagged = [n for n, i in enumerate(tags) if (len(i) < 1)]
print(('Untagged reactions: ', [detail_zip[i] for i in untagged]))
tags = [i for n, i in enumerate(tags) if (n not in untagged)]
tags = [i[0] for i in tags] # just consider first tag
# tags.pop( tags.index('LR71') ) # rm tag for ClOO loss...
# --- Extract prod loss for these tracers
# get prod loss IDs
PDs = [AC.PLO3_to_PD(i, ver=ver, wd=wd, fp=True) for i in tags]
# extract en mass
fam_loss = AC.get_GC_output(wd, vars=['PORL_L_S__'+i for i in PDs],
trop_limit=trop_limit, r_list=True)
# print [ ( i.shape, i.sum() ) for i in fam_loss ]
# Get reference species for family ( e.g. so output is in X g of Y )
ref_spec = AC.get_ref_spec(spec)
# get shared variable arrrays
s_area = get_surface_area(res=res)[..., 0] # m2 land map
# convert to mass terms ( in g X )
fam_loss = convert_molec_cm3_s_2_g_X_s(ars=fam_loss,
ref_spec=ref_spec, wd=wd, conbine_ars=False,
rm_strat=True, month_eq=True)
print([i.shape for i in fam_loss])
# sum and convert to Gg
p_l = [i.sum() / 1E9 for i in fam_loss]
# --- print output as: reaction, magnitude, percent of family
pcent = [np.sum(i)/np.sum(p_l)*100 for i in p_l]
d = dict(list(zip(tags, list(zip(rxnstr_l, p_l, pcent)))))
df = pd.DataFrame(d).T
df.columns = ['rxn', 'Gg X', '% of total']
# sort
df = df.sort_values(['% of total'], ascending=False)
print(df)
if __name__ == "__main__":
main(debug=DEBUG)
|
py | b40d18a5c25bcd3a68da96c2453ce3c2c094828b | from typing import List
from pydantic import BaseModel
from rdkit import Chem
import openeye.oechem as oechem
import openeye.oeomega as oeomega
from copy import deepcopy
from typing_extensions import Literal
from dockstream.utils.dockstream_exceptions import LigandPreparationFailed
from dockstream.core.ligand_preparator import LigandPreparator, _LE
from dockstream.core.RDkit.RDkit_ligand_preparator import RDkitLigandPreparator
from dockstream.utils.translations.molecule_translator import MoleculeTranslator
from dockstream.utils.translations.translation import RDkitMolToOpenEyeMol
from dockstream.utils.enums.OpenEye_enums import OpenEyeLigandPreparationEnum
from dockstream.core.ligand.ligand import Ligand
_LP = OpenEyeLigandPreparationEnum()
class OpenEyeLigandPreparator(LigandPreparator, BaseModel):
type: Literal["OpenEye"] = "OpenEye"
class Config:
underscore_attrs_are_private = True
def __init__(self, **data):
super().__init__(**data)
def _initialize_ligands(self):
super()._initialize_ligands()
def _load_references(self):
references = []
for path in self.align.reference_paths:
mol_supplier = oechem.oemolistream()
# set the provided format
ref_format = self.align.reference_format.upper()
if ref_format == _LP.ALIGN_REFERENCE_FORMAT_SDF:
mol_supplier.SetFormat(oechem.OEFormat_SDF)
elif ref_format == _LP.ALIGN_REFERENCE_FORMAT_PDB:
mol_supplier.SetFormat(oechem.OEFormat_PDB)
else:
raise LigandPreparationFailed("Specified format not supported!")
if mol_supplier.open(path):
for mol in mol_supplier.GetOEMols():
references.append(oechem.OEMol(mol))
else:
oechem.OEThrow.Fatal("Unable to create specified output file.")
if len(references) == 0:
raise LigandPreparationFailed("No reference molecules could be loaded at path(s) specified.")
self._references = references
self._logger.log(f"Stored {len(references)} reference molecules.", _LE.DEBUG)
def _get_RDkit_aligner(self, conf, ligands):
return RDkitLigandPreparator(ligands=ligands, **conf)
def _smiles_to_molecules(self, ligands: List[Ligand]) -> List[Ligand]:
for lig in ligands:
lig_molecule = oechem.OEMol()
oechem.OESmilesToMol(lig_molecule, lig.get_smile())
lig.set_molecule(lig_molecule)
lig.set_mol_type(_LP.TYPE_OPENEYE)
return ligands
def generate3Dcoordinates(self):
"""Method to generate 3D coordinates, in case the molecules have been built from SMILES."""
for lig in self.ligands:
lig.set_molecule(None)
lig.set_mol_type(None)
ligand_list = self._smiles_to_molecules(deepcopy(self.ligands))
failed = 0
succeeded = 0
builder = oeomega.OEConformerBuilder()
for idx, ligand in enumerate(ligand_list):
inp_mol = ligand.get_molecule()
if inp_mol is None:
continue
return_code = builder.Build(inp_mol)
if return_code != oeomega.OEOmegaReturnCode_Success:
failed += 1
self._logger.log(f"The 3D coordinate generation of molecule {ligand.get_ligand_number()} (smile: {ligand.get_smile()}) failed (oeomega return code={return_code}).",
_LE.DEBUG)
continue
self.ligands[idx] = Ligand(smile=ligand.get_smile(),
original_smile=ligand.get_original_smile(),
ligand_number=ligand.get_ligand_number(),
enumeration=ligand.get_enumeration(),
molecule=oechem.OEMol(inp_mol),
mol_type=_LP.TYPE_OPENEYE,
name=ligand.get_name())
succeeded += 1
if failed > 0:
self._logger.log(f"Of {len(self.ligands)}, {failed} could not be embedded.", _LE.WARNING)
self._logger.log(f"In total, {succeeded} ligands were successfully embedded (oeomega).", _LE.DEBUG)
def align_ligands(self):
if self.align.mode != _LP.ALIGN_MODE_INTERNAL:
raise LigandPreparationFailed("Only internal alignment supported at the moment.")
if self._references is None:
raise LigandPreparationFailed("No reference molecule has been found.")
# use the general, internal alignment technique
# ---------
# 1) translate the ligands from openeye to rdkit and do not use "bySMILES" method, as
# coordinates would be lost
mol_trans = MoleculeTranslator(self.ligands)
ligands_rdkit = mol_trans.get_as_rdkit()
self._logger.log(f"Align: Of {len(self.ligands)}, {len(ligands_rdkit)} were translated to RDkit molecules.",
_LE.DEBUG)
# 2) do the alignment to a reference molecule; also disable RDkit logger
ligands_rdkit = self._align_ligands_with_RDkit_preparator(ligands_rdkit)
# 3) translate ligands back and update internal collection
mol_trans = MoleculeTranslator(ligands_rdkit)
translated_mols = mol_trans.get_as_openeye()
for lig, translated_mol in zip(self.ligands, translated_mols):
lig.set_molecule(translated_mol.get_molecule())
def write_ligands(self, path, format):
ofs = oechem.oemolostream()
format = format.upper()
ligands_copy = [deepcopy(lig) for lig in self.ligands]
# check and specify format of file
if format == _LP.OUTPUT_FORMAT_SDF:
ofs.SetFormat(oechem.OEFormat_SDF)
elif format == _LP.OUTPUT_FORMAT_MOL2:
ofs.SetFormat(oechem.OEFormat_MOL2)
else:
raise LigandPreparationFailed("Specified output format unknown.")
if ofs.open(path):
for lig in ligands_copy:
lig.add_tags_to_molecule()
if lig.get_molecule() is not None:
mol = deepcopy(lig.get_molecule())
mol.SetTitle(lig.get_identifier())
oechem.OEWriteMolecule(ofs, mol)
else:
oechem.OEThrow.Fatal("Unable to create specified output file.")
ofs.close()
self._logger.log(f"Wrote {len(self.ligands)} molecules to file {path} (format: {format}).", _LE.DEBUG)
def _make_ligands_from_molecules(self, ligands):
buffer = []
if isinstance(ligands[0], Chem.Mol):
ligands = [RDkitMolToOpenEyeMol(mol, bySMILES=False) for mol in ligands]
for index_mol, mol in enumerate(ligands):
buffer.append(Ligand(smile=oechem.OEMolToSmiles(mol),
ligand_number=index_mol,
enumeration=0,
molecule=mol,
mol_type=_LP.TYPE_OPENEYE))
self.ligands = buffer
|
py | b40d19fe4d56a519feec686bb88aba4276424152 | import tensorflow as tf
import tensorflow.keras.backend as K
def dice_loss(y_true, y_pred, loss_type='jaccard', smooth=1.):
y_true_f = tf.cast(tf.reshape(y_true, [-1]), tf.float32)
y_pred_f = tf.cast(tf.reshape(y_pred, [-1]), tf.float32)
intersection = tf.reduce_sum(y_true_f * y_pred_f)
if loss_type == 'jaccard':
union = tf.reduce_sum(tf.square(y_pred_f)) + tf.reduce_sum(
tf.square(y_true_f))
elif loss_type == 'sorensen':
union = tf.reduce_sum(y_pred_f) + tf.reduce_sum(y_true_f)
else:
raise ValueError("Unknown `loss_type`: %s" % loss_type)
return (1 - (2. * intersection + smooth) / (union + smooth))
def soft_dice_loss(y_true, y_pred, smooth=1):
eps = K.epsilon()
y_pred = K.clip(y_pred, eps, 1 - eps)
return tf.reduce_mean(
1 - (2 * tf.reduce_sum(y_true * y_pred, axis=(1, 2, 3, 4)) + smooth) /
(tf.reduce_sum(y_true, axis=(1, 2, 3, 4)) +
tf.reduce_sum(y_pred, axis=(1, 2, 3, 4)) + smooth))
def dice_coe_loss(y_true, y_pred, loss_type='jaccard', smooth=1.):
return 1 - dice_coe(y_true, y_pred, loss_type=loss_type, smooth=smooth)
def dice_coe_hard(y_true, y_pred, loss_type='sorensen', smooth=1.):
return dice_coe(y_true, tf.cast(y_pred>0.5,tf.float32), loss_type=loss_type, smooth=smooth)
def dice_coe(y_true, y_pred, loss_type='jaccard', smooth=1.):
#tf.print(y_true.shape,'y_true.shape')
#tf.print(y_pred.shape,'y_pred.shape')
intersection = tf.reduce_sum(y_true * y_pred, axis=(1, 2, 3, 4))
#tf.print(intersection.shape,'intersection.shape')
if loss_type == 'jaccard':
union = tf.reduce_sum(tf.square(y_pred), axis=(1, 2, 3, 4)) + tf.reduce_sum(
tf.square(y_true), axis=(1, 2, 3, 4))
elif loss_type == 'sorensen':
union = tf.reduce_sum(y_pred, axis=(1, 2, 3, 4)) + tf.reduce_sum(y_true, axis=(1, 2, 3, 4))
else:
raise ValueError("Unknown `loss_type`: %s" % loss_type)
#tf.print(union.shape,'union.shape')
#tf.print(tf.reduce_mean((2. * intersection + smooth) / (union + smooth)).shape,'output shape')
#tf.print(tf.reduce_mean((2. * intersection + smooth) / (union + smooth)),'output')
return tf.reduce_mean((2. * intersection + smooth) / (union + smooth))
def focal_loss(y_true, y_pred, gamma=2):
n_pos = tf.reduce_sum(y_true)
bs = y_true.shape[0]
return -tf.reduce_sum(y_true * tf.math.log(y_pred) *
(1 - y_pred)**gamma) / bs / n_pos
def focal_loss_fix(y_true, y_pred, gamma=2):
n_pos = tf.reduce_sum(y_true)
bs = y_true.shape[0]
return -tf.reduce_sum(0.25 * y_true * tf.math.log(y_pred) *
(1 - y_pred)**gamma) / bs / n_pos
def binary_focal_loss(y_true, y_pred, gamma=2):
alpha = 1 / K.sum(y_true, axis=(1, 2, 3, 4))
y_true = tf.cast(y_true, tf.float32)
# Define epsilon so that the back-propagation will not result in NaN for 0 divisor case
epsilon = K.epsilon()
# Add the epsilon to prediction value
# y_pred = y_pred + epsilon
# Clip the prediciton value
y_pred = K.clip(y_pred, epsilon, 1.0 - epsilon)
# Calculate p_t
loss = (y_true * (-alpha) * (1 - y_pred)**gamma * K.log(y_pred))
return K.mean(K.sum(loss, axis=(1, 2, 3, 4)))
def binary_focal_loss_custom(y_true, y_pred, gamma=2):
alpha = 1 / K.sum(y_true, axis=(1, 2, 3, 4))
beta = 1 / K.sum(tf.cast(tf.equal(y_true, 0.0), dtype=tf.float32),
axis=(1, 2, 3, 4))
y_true = tf.cast(y_true, tf.float32)
# Define epsilon so that the back-propagation will not result in NaN for 0 divisor case
epsilon = K.epsilon()
# Add the epsilon to prediction value
# y_pred = y_pred + epsilon
# Clip the prediciton value
y_pred = K.clip(y_pred, epsilon, 1.0 - epsilon)
# Calculate p_t
loss = (y_true * (-alpha) * (1 - y_pred)**gamma * K.log(y_pred)) - (
(1 - y_true) * (beta) * y_pred**gamma * K.log(1 - y_pred))
return K.mean(K.sum(loss, axis=(1, 2, 3, 4))) / 2
def binary_focal_loss_fixed(y_true, y_pred, alpha=1, gamma=2):
y_true = tf.cast(y_true, tf.float32)
# Define epsilon so that the back-propagation will not result in NaN for 0 divisor case
epsilon = K.epsilon()
# Add the epsilon to prediction value
# y_pred = y_pred + epsilon
# Clip the prediciton value
y_pred = K.clip(y_pred, epsilon, 1.0 - epsilon)
# Calculate p_t
p_t = tf.where(K.equal(y_true, 1), y_pred, 1 - y_pred)
# Calculate alpha_t
alpha_factor = K.ones_like(y_true) * alpha
alpha_t = tf.where(K.equal(y_true, 1), alpha_factor, 1 - alpha_factor)
# Calculate cross entropy
cross_entropy = -K.log(p_t)
weight = alpha_t * K.pow((1 - p_t), gamma)
# Calculate focal loss
loss = weight * cross_entropy
# Sum the losses in mini_batch
loss = K.mean(loss)
return loss
def tf_focal_loss(prediction_tensor,
target_tensor,
weights=None,
alpha=0.25,
gamma=2):
sigmoid_p = tf.nn.sigmoid(prediction_tensor)
zeros = tf.zeros_like(sigmoid_p, dtype=sigmoid_p.dtype)
# For poitive prediction, only need consider front part loss, back part is 0;
# target_tensor > zeros <=> z=1, so poitive coefficient = z - p.
pos_p_sub = tf.where(target_tensor > zeros, target_tensor - sigmoid_p,
zeros)
# For negative prediction, only need consider back part loss, front part is 0;
# target_tensor > zeros <=> z=1, so negative coefficient = 0.
neg_p_sub = tf.where(target_tensor > zeros, zeros, sigmoid_p)
per_entry_cross_ent = - alpha * (pos_p_sub ** gamma) * tf.math.log(tf.clip_by_value(sigmoid_p, 1e-8, 1.0)) \
- (1 - alpha) * (neg_p_sub ** gamma) * tf.math.log(tf.clip_by_value(1.0 - sigmoid_p, 1e-8, 1.0))
return tf.reduce_sum(per_entry_cross_ent)
def labels_to_one_hot(ground_truth, num_classes=1):
"""
Converts ground truth labels to one-hot, sparse tensors.
Used extensively in segmentation losses.
:param ground_truth: ground truth categorical labels (rank `N`)
:param num_classes: A scalar defining the depth of the one hot dimension
(see `depth` of `tf.one_hot`)
:return: one-hot sparse tf tensor
(rank `N+1`; new axis appended at the end)
"""
# read input/output shapes
if isinstance(num_classes, tf.Tensor):
num_classes_tf = tf.cast(num_classes, tf.int32)
else:
num_classes_tf = tf.constant(num_classes, tf.int32)
input_shape = tf.shape(ground_truth)
output_shape = tf.concat(
[input_shape, tf.reshape(num_classes_tf, (1, ))], 0)
if num_classes == 1:
# need a sparse representation?
return tf.reshape(ground_truth, output_shape)
# squeeze the spatial shape
ground_truth = tf.reshape(ground_truth, (-1, ))
# shape of squeezed output
dense_shape = tf.stack([tf.shape(ground_truth)[0], num_classes_tf], 0)
# create a rank-2 sparse tensor
ground_truth = tf.cast(ground_truth, tf.int64)
ids = tf.range(tf.cast(dense_shape[0], tf.int64), dtype=tf.int64)
ids = tf.stack([ids, ground_truth], axis=1)
one_hot = tf.SparseTensor(indices=ids,
values=tf.ones_like(ground_truth,
dtype=tf.float32),
dense_shape=tf.cast(dense_shape, tf.int64))
# resume the spatial dims
one_hot = tf.sparse.reshape(one_hot, output_shape)
return one_hot
def undecided_loss(prediction, ground_truth, weight_map=None):
"""
:param prediction:
:param ground_truth:
:param weight_map:
:return:
"""
ratio_undecided = 1.0 / tf.cast(tf.shape(prediction)[-1], tf.float32)
res_undecided = tf.reciprocal(
tf.reduce_mean(tf.abs(prediction - ratio_undecided), -1) + 0.0001)
if weight_map is None:
return tf.reduce_mean(res_undecided)
else:
res_undecided = tf.Print(tf.cast(res_undecided, tf.float32), [
tf.shape(res_undecided),
tf.shape(weight_map),
tf.shape(res_undecided * weight_map)
],
message='test_printshape_und')
return tf.reduce_sum(res_undecided * weight_map /
tf.reduce_sum(weight_map))
def volume_enforcement(prediction,
ground_truth,
weight_map=None,
eps=0.001,
hard=False):
"""
Computing a volume enforcement loss to ensure that the obtained volumes are
close and avoid empty results when something is expected
:param prediction:
:param ground_truth: labels
:param weight_map: potential weight map to apply
:param eps: epsilon to use as regulariser
:return:
"""
prediction = tf.cast(prediction, tf.float32)
if len(ground_truth.shape) == len(prediction.shape):
ground_truth = ground_truth[..., -1]
one_hot = labels_to_one_hot(ground_truth, tf.shape(prediction)[-1])
gt_red = tf.sparse_reduce_sum(one_hot, 0)
pred_red = tf.reduce_sum(prediction, 0)
if hard:
pred_red = tf.sparse_reduce_sum(
labels_to_one_hot(tf.argmax(prediction, -1),
tf.shape(prediction)[-1]), 0)
if weight_map is not None:
n_classes = prediction.shape[1].value
weight_map_nclasses = tf.tile(
tf.expand_dims(tf.reshape(weight_map, [-1]), 1), [1, n_classes])
gt_red = tf.sparse_reduce_sum(weight_map_nclasses * one_hot,
reduction_axes=[0])
pred_red = tf.reduce_sum(weight_map_nclasses * prediction, 0)
return tf.reduce_mean(
tf.sqrt(
tf.square((gt_red + eps) / (pred_red + eps) - (pred_red + eps) /
(gt_red + eps))))
def volume_enforcement_fin(prediction,
ground_truth,
weight_map=None,
eps=0.001):
"""
Computing a volume enforcement loss to ensure that the obtained volumes are
close and avoid empty results when something is expected
:param prediction:
:param ground_truth:
:param weight_map:
:param eps:
:return:
"""
prediction = tf.cast(prediction, tf.float32)
if len(ground_truth.shape) == len(prediction.shape):
ground_truth = ground_truth[..., -1]
one_hot = labels_to_one_hot(ground_truth, tf.shape(prediction)[-1])
gt_red = tf.sparse_reduce_sum(one_hot, 0)
pred_red = tf.sparse_reduce_sum(
labels_to_one_hot(tf.argmax(prediction, -1),
tf.shape(prediction)[-1]), 0)
if weight_map is not None:
n_classes = prediction.shape[1].value
weight_map_nclasses = tf.tile(
tf.expand_dims(tf.reshape(weight_map, [-1]), 1), [1, n_classes])
gt_red = tf.sparse_reduce_sum(weight_map_nclasses * one_hot,
reduction_axes=[0])
pred_red = tf.sparse_reduce_sum(
labels_to_one_hot(tf.argmax(prediction, -1),
tf.shape(prediction)[-1]) * weight_map_nclasses,
0)
return tf.reduce_mean(
tf.sqrt(
tf.square((gt_red + eps) / (pred_red + eps) - (pred_red + eps) /
(gt_red + eps))))
def generalised_dice_loss(prediction,
ground_truth,
weight_map=None,
type_weight='Square'):
"""
Function to calculate the Generalised Dice Loss defined in
Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
loss function for highly unbalanced segmentations. DLMIA 2017
:param prediction: the logits
:param ground_truth: the segmentation ground truth
:param weight_map:
:param type_weight: type of weighting allowed between labels (choice
between Square (square of inverse of volume),
Simple (inverse of volume) and Uniform (no weighting))
:return: the loss
"""
prediction = tf.cast(prediction, tf.float32)
if len(ground_truth.shape) == len(prediction.shape):
ground_truth = ground_truth[..., -1]
one_hot = labels_to_one_hot(ground_truth, tf.shape(prediction)[-1])
if weight_map is not None:
num_classes = prediction.shape[1].value
# weight_map_nclasses = tf.reshape(
# tf.tile(weight_map, [num_classes]), prediction.get_shape())
weight_map_nclasses = tf.tile(
tf.expand_dims(tf.reshape(weight_map, [-1]), 1), [1, num_classes])
ref_vol = tf.sparse_reduce_sum(weight_map_nclasses * one_hot,
reduction_axes=[0])
intersect = tf.sparse_reduce_sum(weight_map_nclasses * one_hot *
prediction,
reduction_axes=[0])
seg_vol = tf.reduce_sum(tf.multiply(weight_map_nclasses, prediction),
0)
else:
ref_vol = tf.sparse_reduce_sum(one_hot, reduction_axes=[0])
intersect = tf.sparse_reduce_sum(one_hot * prediction,
reduction_axes=[0])
seg_vol = tf.reduce_sum(prediction, 0)
if type_weight == 'Square':
weights = tf.reciprocal(tf.square(ref_vol))
elif type_weight == 'Simple':
weights = tf.reciprocal(ref_vol)
elif type_weight == 'Uniform':
weights = tf.ones_like(ref_vol)
else:
raise ValueError("The variable type_weight \"{}\""
"is not defined.".format(type_weight))
new_weights = tf.where(tf.is_inf(weights), tf.zeros_like(weights), weights)
weights = tf.where(tf.is_inf(weights),
tf.ones_like(weights) * tf.reduce_max(new_weights),
weights)
generalised_dice_numerator = \
2 * tf.reduce_sum(tf.multiply(weights, intersect))
# generalised_dice_denominator = \
# tf.reduce_sum(tf.multiply(weights, seg_vol + ref_vol)) + 1e-6
generalised_dice_denominator = tf.reduce_sum(
tf.multiply(weights, tf.maximum(seg_vol + ref_vol, 1)))
generalised_dice_score = \
generalised_dice_numerator / generalised_dice_denominator
generalised_dice_score = tf.where(tf.is_nan(generalised_dice_score), 1.0,
generalised_dice_score)
return 1 - generalised_dice_score
def dice_plus_xent_loss(prediction, ground_truth, weight_map=None):
"""
Function to calculate the loss used in https://arxiv.org/pdf/1809.10486.pdf,
no-new net, Isenseee et al (used to win the Medical Imaging Decathlon).
It is the sum of the cross-entropy and the Dice-loss.
:param prediction: the logits
:param ground_truth: the segmentation ground truth
:param weight_map:
:return: the loss (cross_entropy + Dice)
"""
num_classes = tf.shape(prediction)[-1]
prediction = tf.cast(prediction, tf.float32)
loss_xent = cross_entropy(prediction, ground_truth, weight_map=weight_map)
# Dice as according to the paper:
one_hot = labels_to_one_hot(ground_truth, num_classes=num_classes)
softmax_of_logits = tf.nn.softmax(prediction)
if weight_map is not None:
weight_map_nclasses = tf.tile(tf.reshape(weight_map, [-1, 1]),
[1, num_classes])
dice_numerator = 2.0 * tf.sparse_reduce_sum(
weight_map_nclasses * one_hot * softmax_of_logits,
reduction_axes=[0])
dice_denominator = \
tf.reduce_sum(weight_map_nclasses * softmax_of_logits,
reduction_indices=[0]) + \
tf.sparse_reduce_sum(one_hot * weight_map_nclasses,
reduction_axes=[0])
else:
dice_numerator = 2.0 * tf.sparse_reduce_sum(
one_hot * softmax_of_logits, reduction_axes=[0])
dice_denominator = \
tf.reduce_sum(softmax_of_logits, reduction_indices=[0]) + \
tf.sparse_reduce_sum(one_hot, reduction_axes=[0])
epsilon = 0.00001
loss_dice = -(dice_numerator + epsilon) / (dice_denominator + epsilon)
dice_numerator = tf.Print(dice_denominator,
[dice_numerator, dice_denominator, loss_dice])
return loss_dice + loss_xent
def sensitivity_specificity_loss(prediction,
ground_truth,
weight_map=None,
r=0.05):
"""
Function to calculate a multiple-ground_truth version of
the sensitivity-specificity loss defined in "Deep Convolutional
Encoder Networks for Multiple Sclerosis Lesion Segmentation",
Brosch et al, MICCAI 2015,
https://link.springer.com/chapter/10.1007/978-3-319-24574-4_1
error is the sum of r(specificity part) and (1-r)(sensitivity part)
:param prediction: the logits
:param ground_truth: segmentation ground_truth.
:param r: the 'sensitivity ratio'
(authors suggest values from 0.01-0.10 will have similar effects)
:return: the loss
"""
if weight_map is not None:
# raise NotImplementedError
tf.logging.warning('Weight map specified but not used.')
prediction = tf.cast(prediction, tf.float32)
one_hot = labels_to_one_hot(ground_truth, tf.shape(prediction)[-1])
one_hot = tf.sparse_tensor_to_dense(one_hot)
# value of unity everywhere except for the previous 'hot' locations
one_cold = 1 - one_hot
# chosen region may contain no voxels of a given label. Prevents nans.
epsilon = 1e-5
squared_error = tf.square(one_hot - prediction)
specificity_part = tf.reduce_sum(
squared_error * one_hot, 0) / \
(tf.reduce_sum(one_hot, 0) + epsilon)
sensitivity_part = \
(tf.reduce_sum(tf.multiply(squared_error, one_cold), 0) /
(tf.reduce_sum(one_cold, 0) + epsilon))
return tf.reduce_sum(r * specificity_part + (1 - r) * sensitivity_part)
def cross_entropy(prediction, ground_truth, weight_map=None):
"""
Function to calculate the cross-entropy loss function
:param prediction: the logits (before softmax)
:param ground_truth: the segmentation ground truth
:param weight_map:
:return: the cross-entropy loss
"""
if len(ground_truth.shape) == len(prediction.shape):
ground_truth = ground_truth[..., -1]
# TODO trace this back:
ground_truth = tf.cast(ground_truth, tf.int32)
entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=prediction, labels=ground_truth)
if weight_map is None:
return tf.reduce_mean(entropy)
weight_sum = tf.maximum(tf.reduce_sum(weight_map), 1e-6)
return tf.reduce_sum(entropy * weight_map / weight_sum)
def cross_entropy_dense(prediction, ground_truth, weight_map=None):
if weight_map is not None:
raise NotImplementedError
entropy = tf.nn.softmax_cross_entropy_with_logits(logits=prediction,
labels=ground_truth)
return tf.reduce_mean(entropy)
def wasserstein_disagreement_map(prediction,
ground_truth,
weight_map=None,
M=None):
"""
Function to calculate the pixel-wise Wasserstein distance between the
flattened prediction and the flattened labels (ground_truth) with respect
to the distance matrix on the label space M.
:param prediction: the logits after softmax
:param ground_truth: segmentation ground_truth
:param M: distance matrix on the label space
:return: the pixelwise distance map (wass_dis_map)
"""
if weight_map is not None:
# raise NotImplementedError
tf.logging.warning('Weight map specified but not used.')
assert M is not None, "Distance matrix is required."
# pixel-wise Wassertein distance (W) between flat_pred_proba and flat_labels
# wrt the distance matrix on the label space M
num_classes = prediction.shape[1].value
ground_truth.set_shape(prediction.shape)
unstack_labels = tf.unstack(ground_truth, axis=-1)
unstack_labels = tf.cast(unstack_labels, dtype=tf.float64)
unstack_pred = tf.unstack(prediction, axis=-1)
unstack_pred = tf.cast(unstack_pred, dtype=tf.float64)
# print("shape of M", M.shape, "unstacked labels", unstack_labels,
# "unstacked pred" ,unstack_pred)
# W is a weighting sum of all pairwise correlations (pred_ci x labels_cj)
pairwise_correlations = []
for i in range(num_classes):
for j in range(num_classes):
pairwise_correlations.append(
M[i, j] * tf.multiply(unstack_pred[i], unstack_labels[j]))
wass_dis_map = tf.add_n(pairwise_correlations)
return wass_dis_map
def generalised_wasserstein_dice_loss(prediction,
ground_truth,
weight_map=None):
"""
Function to calculate the Generalised Wasserstein Dice Loss defined in
Fidon, L. et. al. (2017) Generalised Wasserstein Dice Score
for Imbalanced Multi-class Segmentation using Holistic
Convolutional Networks.MICCAI 2017 (BrainLes)
:param prediction: the logits
:param ground_truth: the segmentation ground_truth
:param weight_map:
:return: the loss
"""
if weight_map is not None:
# raise NotImplementedError
tf.logging.warning('Weight map specified but not used.')
prediction = tf.cast(prediction, tf.float32)
num_classes = prediction.shape[1].value
one_hot = labels_to_one_hot(ground_truth, tf.shape(prediction)[-1])
one_hot = tf.sparse_tensor_to_dense(one_hot)
# M = tf.cast(M, dtype=tf.float64)
# compute disagreement map (delta)
M = M_tree
delta = wasserstein_disagreement_map(prediction, one_hot, M=M)
# compute generalisation of all error for multi-class seg
all_error = tf.reduce_sum(delta)
# compute generalisation of true positives for multi-class seg
one_hot = tf.cast(one_hot, dtype=tf.float64)
true_pos = tf.reduce_sum(tf.multiply(
tf.constant(M[0, :num_classes], dtype=tf.float64), one_hot),
axis=1)
true_pos = tf.reduce_sum(tf.multiply(true_pos, 1. - delta), axis=0)
WGDL = 1. - (2. * true_pos) / (2. * true_pos + all_error)
return tf.cast(WGDL, dtype=tf.float32)
def dice(prediction, ground_truth, weight_map=None):
"""
Function to calculate the dice loss with the definition given in
Milletari, F., Navab, N., & Ahmadi, S. A. (2016)
V-net: Fully convolutional neural
networks for volumetric medical image segmentation. 3DV 2016
using a square in the denominator
:param prediction: the logits
:param ground_truth: the segmentation ground_truth
:param weight_map:
:return: the loss
"""
prediction = tf.cast(prediction, tf.float32)
if len(ground_truth.shape) == len(prediction.shape):
ground_truth = ground_truth[..., -1]
one_hot = labels_to_one_hot(ground_truth, tf.shape(prediction)[-1])
if weight_map is not None:
num_classes = prediction.shape[1].value
weight_map_nclasses = tf.tile(
tf.expand_dims(tf.reshape(weight_map, [-1]), 1), [1, num_classes])
dice_numerator = 2.0 * tf.sparse_reduce_sum(
weight_map_nclasses * one_hot * prediction, reduction_axes=[0])
dice_denominator = \
tf.reduce_sum(weight_map_nclasses * tf.square(prediction),
reduction_indices=[0]) + \
tf.sparse_reduce_sum(one_hot * weight_map_nclasses,
reduction_axes=[0])
else:
dice_numerator = 2.0 * tf.sparse_reduce_sum(one_hot * prediction,
reduction_axes=[0])
dice_denominator = \
tf.reduce_sum(tf.square(prediction), reduction_indices=[0]) + \
tf.sparse_reduce_sum(one_hot, reduction_axes=[0])
epsilon = 0.00001
dice_score = (dice_numerator + epsilon) / (dice_denominator + epsilon)
# dice_score.set_shape([num_classes])
# minimising (1 - dice_coefficients)
return 1.0 - tf.reduce_mean(dice_score)
def dice_nosquare(prediction, ground_truth, weight_map=None):
"""
Function to calculate the classical dice loss
:param prediction: the logits
:param ground_truth: the segmentation ground_truth
:param weight_map:
:return: the loss
"""
prediction = tf.cast(prediction, tf.float32)
if len(ground_truth.shape) == len(prediction.shape):
ground_truth = ground_truth[..., -1]
one_hot = labels_to_one_hot(ground_truth, tf.shape(prediction)[-1])
# dice
if weight_map is not None:
num_classes = prediction.shape[1].value
weight_map_nclasses = tf.tile(
tf.expand_dims(tf.reshape(weight_map, [-1]), 1), [1, num_classes])
dice_numerator = 2.0 * tf.sparse_reduce_sum(
weight_map_nclasses * one_hot * prediction, reduction_axes=[0])
dice_denominator = \
tf.reduce_sum(prediction * weight_map_nclasses,
reduction_indices=[0]) + \
tf.sparse_reduce_sum(weight_map_nclasses * one_hot,
reduction_axes=[0])
else:
dice_numerator = 2.0 * tf.sparse_reduce_sum(one_hot * prediction,
reduction_axes=[0])
dice_denominator = tf.reduce_sum(prediction, reduction_indices=[0]) + \
tf.sparse_reduce_sum(one_hot, reduction_axes=[0])
epsilon = 0.00001
dice_score = (dice_numerator + epsilon) / (dice_denominator + epsilon)
# dice_score.set_shape([num_classes])
# minimising (1 - dice_coefficients)
return 1.0 - tf.reduce_mean(dice_score)
def tversky(prediction, ground_truth, weight_map=None, alpha=0.5, beta=0.5):
"""
Function to calculate the Tversky loss for imbalanced data
Sadegh et al. (2017)
Tversky loss function for image segmentation
using 3D fully convolutional deep networks
:param prediction: the logits
:param ground_truth: the segmentation ground_truth
:param alpha: weight of false positives
:param beta: weight of false negatives
:param weight_map:
:return: the loss
"""
prediction = tf.to_float(prediction)
if len(ground_truth.shape) == len(prediction.shape):
ground_truth = ground_truth[..., -1]
one_hot = labels_to_one_hot(ground_truth, tf.shape(prediction)[-1])
one_hot = tf.sparse_tensor_to_dense(one_hot)
p0 = prediction
p1 = 1 - prediction
g0 = one_hot
g1 = 1 - one_hot
if weight_map is not None:
num_classes = prediction.shape[1].value
weight_map_flattened = tf.reshape(weight_map, [-1])
weight_map_expanded = tf.expand_dims(weight_map_flattened, 1)
weight_map_nclasses = tf.tile(weight_map_expanded, [1, num_classes])
else:
weight_map_nclasses = 1
tp = tf.reduce_sum(weight_map_nclasses * p0 * g0)
fp = alpha * tf.reduce_sum(weight_map_nclasses * p0 * g1)
fn = beta * tf.reduce_sum(weight_map_nclasses * p1 * g0)
EPSILON = 0.00001
numerator = tp
denominator = tp + fp + fn + EPSILON
score = numerator / denominator
return 1.0 - tf.reduce_mean(score)
def dice_dense(prediction, ground_truth, weight_map=None):
"""
Computing mean-class Dice similarity.
:param prediction: last dimension should have ``num_classes``
:param ground_truth: segmentation ground truth (encoded as a binary matrix)
last dimension should be ``num_classes``
:param weight_map:
:return: ``1.0 - mean(Dice similarity per class)``
"""
if weight_map is not None:
raise NotImplementedError
prediction = tf.cast(prediction, dtype=tf.float32)
ground_truth = tf.cast(ground_truth, dtype=tf.float32)
ground_truth = tf.reshape(ground_truth, prediction.shape)
# computing Dice over the spatial dimensions
reduce_axes = list(range(len(prediction.shape) - 1))
dice_numerator = 2.0 * tf.reduce_sum(prediction * ground_truth,
axis=reduce_axes)
dice_denominator = \
tf.reduce_sum(tf.square(prediction), axis=reduce_axes) + \
tf.reduce_sum(tf.square(ground_truth), axis=reduce_axes)
epsilon = 0.00001
dice_score = (dice_numerator + epsilon) / (dice_denominator + epsilon)
return 1.0 - tf.reduce_mean(dice_score)
def dice_dense_nosquare(prediction, ground_truth, weight_map=None):
"""
Computing mean-class Dice similarity with no square terms in the denominator
:param prediction: last dimension should have ``num_classes``
:param ground_truth: segmentation ground truth (encoded as a binary matrix)
last dimension should be ``num_classes``
:param weight_map:
:return: ``1.0 - mean(Dice similarity per class)``
"""
if weight_map is not None:
raise NotImplementedError
prediction = tf.cast(prediction, dtype=tf.float32)
ground_truth = tf.cast(ground_truth, dtype=tf.float32)
ground_truth = tf.reshape(ground_truth, prediction.shape)
# computing Dice over the spatial dimensions
reduce_axes = list(range(len(prediction.shape) - 1))
dice_numerator = 2.0 * tf.reduce_sum(prediction * ground_truth,
axis=reduce_axes)
dice_denominator = \
tf.reduce_sum(prediction, axis=reduce_axes) + \
tf.reduce_sum(ground_truth, axis=reduce_axes)
epsilon = 0.00001
dice_score = (dice_numerator + epsilon) / (dice_denominator + epsilon)
return 1.0 - tf.reduce_mean(dice_score)
def weighted_dice_coefficient(y_true, y_pred, axis=(1, 2, 3), smooth=0.00001):
"""
Weighted dice coefficient. Default axis assumes a "channels first" data structure
:param smooth:
:param y_true:
:param y_pred:
:param axis:
:return:
"""
return tf.keras.backend.mean(
2. * (tf.keras.backend.sum(y_true * y_pred, axis=axis) + smooth / 2) /
(tf.keras.backend.sum(y_true, axis=axis) +
tf.keras.backend.sum(y_pred, axis=axis) + smooth))
def weighted_dice_coefficient_loss(y_true, y_pred):
return -weighted_dice_coefficient(y_true, y_pred)
|
py | b40d1b609537a3833b8fe7cdea9d23e9efd1e836 | ###############################################################################
#MIT License
#
#Copyright (c) 2019 Daniel Vitor Ruiz
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
###############################################################################
import os
import cv2
import numpy as np
from PIL import Image
import time
import random
#for fancy parameterization
from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser(description='Compute resulting image using ANDA techinique for the MSRA10K dataset')
parser.add_argument(
'-obj_path', '--obj_path',
type=str, default="/home/bakrinski/datasets/MSRA10K/images/",
help='OBJ_FOLDER_IMG input images path'
)
parser.add_argument(
'-obj_mask_path', '--obj_mask_path',
type=str, default="/home/bakrinski/datasets/MSRA10K/masks/",
help='OBJ_FOLDER_MASK input masks path'
)
parser.add_argument(
'-bg_path', '--bg_path',
type=str, default="/home/dvruiz/PConv-Keras/output/",
help='BG_FOLDER_IMG background images path'
)
parser.add_argument(
'-index_obj_path', '--index_obj_path',
type=str, default="dataset.txt",
help='LIST_OF_N_OBJECTS filepath for the file containing per line a indice, e.g. "dataset.txt" resulting from genObjIndicees.py'
)
parser.add_argument(
'-index_bg_path', '--index_bg_path',
type=str, default="indices_cosine.txt",
help='LIST_OF_INDICES filepath for the file containing per line a indice, e.g. "indices_cosine.txt" resulting from computeKnn.py'
)
parser.add_argument(
'-out_path', '--out_path',
type=str, default="output/",
help='output path containing a folder named images and masks, e.g."output/" '
)
parser.add_argument(
'-seed', '--seed',
type=int, default=22,
help='seed number for the pseudo-random computation'
)
parser.add_argument(
'-size', '--size',
type=int, default=10000,
help='number of images in the dataset'
)
parser.add_argument(
'-n_bgs', '--n_bgs',
type=int, default=1,
help='N_OF_BACKGROUNDS'
)
parser.add_argument(
'-n_ops', '--n_ops',
type=int, default=1,
help='N_OF_OPS'
)
return parser.parse_args()
# SETTINGS
#CALL PARSER
args = parse_args()
#
OBJ_FOLDER_IMG = args.obj_path
OBJ_FOLDER_MASK = args.obj_mask_path
BG_FOLDER_IMG = args.bg_path
OUTPUT_FOLDER_IMG = "images/"
OUTPUT_FOLDER_MASK = "masks/"
LIST_OF_N_OBJECTS = args.index_obj_path
N_OBJECT = args.size
N_OF_BACKGROUNDS = args.n_bgs
N_OF_OPS = args.n_ops
LIST_OF_INDICES = args.index_bg_path
kernelErode = np.ones((3, 3), np.uint8)
maxH = 512
maxW = 512
random.seed(args.seed)
np.random.seed(args.seed)
noise_scale = np.random.uniform(low=0.975, high=1.025, size=N_OBJECT)
#
# # SETTINGS
# OBJ_FOLDER_IMG = "/home/bakrinski/datasets/MSRA10K/images/"
# OBJ_FOLDER_MASK = "/home/bakrinski/datasets/MSRA10K/masks/"
# BG_FOLDER_IMG = "/home/dvruiz/PConv-Keras/output/"
# OUTPUT_FOLDER_IMG = "images/"
# OUTPUT_FOLDER_MASK = "masks/"
# LIST_OF_N_OBJECTS = "dataset.txt"
# N_WORST = 10000
# N_OF_BACKGROUNDS = 1
# N_OF_OPS = 1
# LIST_OF_INDICES = "indices_cosine.txt"
#
# kernelErode = np.ones((3, 3), np.uint8)
#
# maxH = 512
# maxW = 512
#
# random.seed(22)
# np.random.seed(22)
# # noise_scale = np.random.uniform(low=0.975, high=1.025, size=13980)
# noise_scale = np.random.uniform(low=0.975, high=1.025, size=N_WORST)
# #
def randomTranslateInside(newYmax, newYmin, newXmax, newXmin, newOrigin, border, M):
noise_x = np.random.uniform(low=0.0, high=1.0)
noise_y = np.random.uniform(low=0.0, high=1.0)
# check if bbox can move in y
if((newYmax - newYmin) < border[0]):
# check the direction of free space
if((newYmax) < newOrigin[0] + border[0]):
if((newYmin) > newOrigin[0]):
freeSpacePos = (newOrigin[0] + border[0]) - newYmax
freeSpaceNeg = newYmin - newOrigin[0]
luck = np.random.randint(low=0, high=2)
if(luck == 0):
M[1][2] += np.floor(noise_y * freeSpacePos)
else:
M[1][2] -= np.floor(noise_y * freeSpaceNeg)
else:
freeSpace = (newOrigin[0] + border[0]) - newYmax
M[1][2] += np.floor(noise_y * freeSpace)
else:
if((newYmin) > newOrigin[0]):
freeSpace = newYmin - newOrigin[0]
M[1][2] -= np.floor(noise_y * freeSpace)
if((newXmax - newXmin) < border[1]):
# check the direction of free space
if((newXmax) < newOrigin[1] + border[1]):
if((newXmin) > newOrigin[1]):
freeSpacePos = (newOrigin[1] + border[1]) - newXmax
freeSpaceNeg = newXmin - newOrigin[1]
luck = np.random.randint(low=0, high=2)
if(luck == 0):
M[0][2] += np.floor(noise_x * freeSpacePos)
else:
M[0][2] -= np.floor(noise_x * freeSpaceNeg)
else:
freeSpace = (newOrigin[1] + border[1]) - newXmax
M[0][2] += np.floor(noise_x * freeSpace)
else:
if((newXmin) > newOrigin[1]):
freeSpace = newXmin - newOrigin[1]
M[0][2] -= np.floor(noise_x * freeSpace)
return M
def geometricOp2(resizedImg, resizedMask, bgOriginalshape, op, globalIndex):
#######################################################
diffH = int((resizedImg.shape[0] - bgOriginalshape[0]) / 2)
diffW = int((resizedImg.shape[1] - bgOriginalshape[1]) / 2)
####
ymin, ymax, xmin, xmax = bbox(resizedMask)
# xmin -= np.abs(noise_translate_x[globalIndex])
# xmax += np.abs(noise_translate_x[globalIndex])
# ymin -= np.abs(noise_translate_y[globalIndex])
# ymax += np.abs(noise_translate_y[globalIndex])
propX = (xmax - xmin)
propY = (ymax - ymin)
areaOBJ = propX * propY
areaIMG = bgOriginalshape[0] * bgOriginalshape[1]
prop = areaOBJ / areaIMG
###
op = globalIndex % 5
if(op == 0):
beta = 0.05 * noise_scale[globalIndex]
if(op == 1):
beta = 0.15 * noise_scale[globalIndex]
if(op == 2):
beta = 0.65 * noise_scale[globalIndex]
if(op == 3):
beta = 0.75 * noise_scale[globalIndex]
if(op == 4):
beta = 0.85 * noise_scale[globalIndex]
scale = np.sqrt((beta * areaIMG) / areaOBJ)
diffx = ((xmax - xmin) / 2)
diffy = ((ymax - ymin) / 2)
centerx = xmin + diffx
centery = ymin + diffy
pts1 = np.float32([[xmin, ymin], [xmax, ymin], [xmin, ymax]])
newXmin = centerx - diffx * scale
newXmax = centerx + diffx * scale
newYmin = centery - diffy * scale
newYmax = centery + diffy * scale
# LOGIC HERE
newOrigin = [diffH, diffW]
border = [bgOriginalshape[0], bgOriginalshape[1]]
# check if the aspect of the object is the same as the bg
obj_orientation = -1
bg_orientation = -1
if(diffx >= diffy):
obj_orientation = 0
else:
obj_orientation = 1
if(bgOriginalshape[1] >= bgOriginalshape[0]):
bg_orientation = 0
else:
bg_orientation = 1
# check if can fit
if((newYmax - newYmin <= border[0])and(newXmax - newXmin <= border[1])):
# ok then it can fit
# but does it need translation?
pts2 = np.float32(
[[newXmin, newYmin], [newXmax, newYmin], [newXmin, newYmax]])
M = cv2.getAffineTransform(pts1, pts2)
# origin of object must be >= newOrigin
if(newYmin <= newOrigin[0]):
local_diff_y = newOrigin[0] - newYmin
M[1][2] += (local_diff_y)
if(newXmin <= newOrigin[1]):
local_diff_x = newOrigin[1] - newXmin
M[0][2] += (local_diff_x)
# maxdim must be <= border with the correct origin
if(newYmax >= (border[0] + newOrigin[0])):
local_diff_y = newYmax - (border[0] + newOrigin[0])
M[1][2] -= (local_diff_y)
if(newXmax >= (border[1] + newOrigin[1])):
local_diff_x = newXmax - (border[1] + newOrigin[1])
M[0][2] -= (local_diff_x)
newXmin = xmin * M[0][0] + ymin * M[0][1] + M[0][2]
newXmax = xmax * M[0][0] + ymax * M[0][1] + M[0][2]
newYmin = xmin * M[1][0] + ymin * M[1][1] + M[1][2]
newYmax = xmax * M[1][0] + ymax * M[1][1] + M[1][2]
newXminTmp = min(newXmin, newXmax)
newXmaxTmp = max(newXmin, newXmax)
newYminTmp = min(newYmin, newYmax)
newYmaxTmp = max(newYmin, newYmax)
newXmin = newXminTmp
newXmax = newXmaxTmp
newYmin = newYminTmp
newYmax = newYmaxTmp
M = randomTranslateInside(
newYmax, newYmin, newXmax, newXmin, newOrigin, border, M)
resizedImg = cv2.warpAffine(
resizedImg, M, (maxW, maxH), flags=cv2.INTER_LINEAR)
resizedMask = cv2.warpAffine(
resizedMask, M, (maxW, maxH), flags=cv2.INTER_NEAREST)
else:
# it cannot fit
# resize
if(obj_orientation == bg_orientation):
# print("same")
# limit resize to max that fits
# scale must consider translation
scale = min((border[0]) / (ymax - ymin),
(border[1]) / (xmax - xmin))
#
newXmin = centerx - diffx * scale
newXmax = centerx + diffx * scale
newYmin = centery - diffy * scale
newYmax = centery + diffy * scale
pts2 = np.float32(
[[newXmin, newYmin], [newXmax, newYmin], [newXmin, newYmax]])
M = cv2.getAffineTransform(pts1, pts2)
# origin of object must be >= newOrigin
if(newYmin <= newOrigin[0]):
local_diff_y = newOrigin[0] - newYmin
M[1][2] += (local_diff_y)
if(newXmin <= newOrigin[1]):
local_diff_x = newOrigin[1] - newXmin
M[0][2] += (local_diff_x)
# maxdim must be <= border with the correct origin
if(newYmax >= (border[0] + newOrigin[0])):
local_diff_y = newYmax - (border[0] + newOrigin[0])
M[1][2] -= (local_diff_y)
if(newXmax >= (border[1] + newOrigin[1])):
local_diff_x = newXmax - (border[1] + newOrigin[1])
M[0][2] -= (local_diff_x)
newXmin = xmin * M[0][0] + ymin * M[0][1] + M[0][2]
newXmax = xmax * M[0][0] + ymax * M[0][1] + M[0][2]
newYmin = xmin * M[1][0] + ymin * M[1][1] + M[1][2]
newYmax = xmax * M[1][0] + ymax * M[1][1] + M[1][2]
newXminTmp = min(newXmin, newXmax)
newXmaxTmp = max(newXmin, newXmax)
newYminTmp = min(newYmin, newYmax)
newYmaxTmp = max(newYmin, newYmax)
newXmin = newXminTmp
newXmax = newXmaxTmp
newYmin = newYminTmp
newYmax = newYmaxTmp
#
M = randomTranslateInside(
newYmax, newYmin, newXmax, newXmin, newOrigin, border, M)
resizedImg = cv2.warpAffine(
resizedImg, M, (maxW, maxH), flags=cv2.INTER_LINEAR)
resizedMask = cv2.warpAffine(
resizedMask, M, (maxW, maxH), flags=cv2.INTER_NEAREST)
else:
# print("different")
# check if a rotated obj fits
idxmod = np.random.randint(low=0, high=2)
if(idxmod == 0):
degrees = -90
if(idxmod == 1):
degrees = 90
M = cv2.getRotationMatrix2D(((maxW / 2), (maxH / 2)), degrees, 1)
newXmin = xmin * M[0][0] + ymin * M[0][1] + M[0][2]
newXmax = xmax * M[0][0] + ymax * M[0][1] + M[0][2]
newYmin = xmin * M[1][0] + ymin * M[1][1] + M[1][2]
newYmax = xmax * M[1][0] + ymax * M[1][1] + M[1][2]
newXminTmp = min(newXmin, newXmax)
newXmaxTmp = max(newXmin, newXmax)
newYminTmp = min(newYmin, newYmax)
newYmaxTmp = max(newYmin, newYmax)
newXmin = newXminTmp
newXmax = newXmaxTmp
newYmin = newYminTmp
newYmax = newYmaxTmp
# scale must consider translation
scale = min((border[0]) / (newYmax - newYmin),
(border[1]) / (newXmax - newXmin))
#
M[0][0] *= scale
M[0][1] *= scale
M[1][0] *= scale
M[1][1] *= scale
newXmin = xmin * M[0][0] + ymin * M[0][1] + M[0][2]
newXmax = xmax * M[0][0] + ymax * M[0][1] + M[0][2]
newYmin = xmin * M[1][0] + ymin * M[1][1] + M[1][2]
newYmax = xmax * M[1][0] + ymax * M[1][1] + M[1][2]
newXminTmp = min(newXmin, newXmax)
newXmaxTmp = max(newXmin, newXmax)
newYminTmp = min(newYmin, newYmax)
newYmaxTmp = max(newYmin, newYmax)
newXmin = newXminTmp
newXmax = newXmaxTmp
newYmin = newYminTmp
newYmax = newYmaxTmp
# origin of object must be >= newOrigin
if(newYmin <= newOrigin[0]):
local_diff_y = newOrigin[0] - newYmin
M[1][2] += (local_diff_y)
if(newXmin <= newOrigin[1]):
local_diff_x = newOrigin[1] - newXmin
M[0][2] += (local_diff_x)
# maxdim must be <= border with the correct origin
if(newYmax >= (border[0] + newOrigin[0])):
local_diff_y = newYmax - (border[0] + newOrigin[0])
M[1][2] -= (local_diff_y)
if(newXmax >= (border[1] + newOrigin[1])):
local_diff_x = newXmax - (border[1] + newOrigin[1])
M[0][2] -= (local_diff_x)
newXmin = xmin * M[0][0] + ymin * M[0][1] + M[0][2]
newXmax = xmax * M[0][0] + ymax * M[0][1] + M[0][2]
newYmin = xmin * M[1][0] + ymin * M[1][1] + M[1][2]
newYmax = xmax * M[1][0] + ymax * M[1][1] + M[1][2]
newXminTmp = min(newXmin, newXmax)
newXmaxTmp = max(newXmin, newXmax)
newYminTmp = min(newYmin, newYmax)
newYmaxTmp = max(newYmin, newYmax)
newXmin = newXminTmp
newXmax = newXmaxTmp
newYmin = newYminTmp
newYmax = newYmaxTmp
#
M = randomTranslateInside(
newYmax, newYmin, newXmax, newXmin, newOrigin, border, M)
resizedImg = cv2.warpAffine(
resizedImg, M, (maxW, maxH), flags=cv2.INTER_LINEAR)
resizedMask = cv2.warpAffine(
resizedMask, M, (maxW, maxH), flags=cv2.INTER_NEAREST)
####
# cv2.rectangle(resizedMask, (int(newXmin), int(newYmin)),
# (int(newXmax), int(newYmax)), (255, 255, 255), 1)
#######################################################
return resizedImg, resizedMask
def bbox(img):
rows = np.any(img, axis=1)
cols = np.any(img, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
return rmin, rmax, cmin, cmax
def resize_with_pad(image, height, width):
def get_padding_size(image, height, width):
# h, w, _ = image.shape
h = image.shape[0]
w = image.shape[1]
top, bottom, left, right = (0, 0, 0, 0)
if h < height:
dh = height - h
top = dh // 2
bottom = dh - top
if w < width:
dw = width - w
left = dw // 2
right = dw - left
else:
pass
return top, bottom, left, right
top, bottom, left, right = get_padding_size(image, height, width)
BLACK = [0, 0, 0]
constant = cv2.copyMakeBorder(
image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=BLACK)
return constant
def resizeToOrg(bgOriginalshape, new, newMask):
if(bgOriginalshape[0] < new.shape[0]):
diffH = int((new.shape[0] - bgOriginalshape[0]) / 2)
new = new[diffH:bgOriginalshape[0] + diffH, :, :]
newMask = newMask[diffH:bgOriginalshape[0] + diffH, :, :]
if(bgOriginalshape[1] < new.shape[1]):
diffW = int((new.shape[1] - bgOriginalshape[1]) / 2)
new = new[:, diffW:bgOriginalshape[1] + diffW, :]
newMask = newMask[:, diffW:bgOriginalshape[1] + diffW, :]
return new, newMask
def loadResizedBG(index):
bgName = "MSRA10K_image_{:06d}.png".format(index)
bgFile = Image.open(BG_FOLDER_IMG + bgName)
bg = np.array(bgFile)
bgOriginalshape = bg.shape
resizedBg = resize_with_pad(bg, height=maxH, width=maxW)
bgFile.close()
return resizedBg, bgOriginalshape
def main(op, multipleBgs, outPath):
# read LIST_OF_N_OBJECTS
arrOBJ = np.zeros(N_OBJECT, np.int)
f = open(LIST_OF_N_OBJECTS, "r")
for i in range(0, N_OBJECT):
line = f.readline()
args = line.split(" ")
arrOBJ[i] = int(args[0])
f.close()
###
# read LIST_OF_N_OBJECTS
arrBG = np.zeros((N_OBJECT, N_OF_BACKGROUNDS), np.int)
f = open(LIST_OF_INDICES, "r")
for i in range(0, N_OBJECT):
line = f.readline()
if line == '\n':
arrOBJ[i] = -1
else:
args = line.split(" ")
for bgindex in range(0, N_OF_BACKGROUNDS):
arrBG[i][bgindex] = int(args[bgindex])
f.close()
###
realI = 0
for i in range(0, N_OBJECT, 1):
if(arrOBJ[i] != -1):
imgName = "MSRA10K_image_{:06d}.jpg".format(arrOBJ[i])
imFile = Image.open(OBJ_FOLDER_IMG + imgName)
img = np.array(imFile)
maskName = imgName.replace(".jpg", ".png")
maskName = maskName.replace("image", "mask")
maskFile = Image.open(OBJ_FOLDER_MASK + maskName)
mask = np.array(maskFile)
mask = np.tile(mask[:, :, None], [1, 1, 3])
resizedImg = resize_with_pad(img, height=maxH, width=maxW)
resizedMask = resize_with_pad(mask, height=maxH, width=maxW)
imFile.close()
maskFile.close()
# print(stamp)
resizedImgArr = [[None] * (N_OF_OPS)] * N_OF_BACKGROUNDS
resizedMaskArr = [[None] * (N_OF_OPS)] * N_OF_BACKGROUNDS
# print(resizedImgArr)
resizedBg = [None] * (N_OF_BACKGROUNDS)
bgOriginalshape = [None] * (N_OF_BACKGROUNDS)
blur = [[None] * (N_OF_OPS)] * N_OF_BACKGROUNDS
inv_blur = [[None] * (N_OF_OPS)] * N_OF_BACKGROUNDS
new = [[None] * (N_OF_OPS)] * N_OF_BACKGROUNDS
result = [[None] * (N_OF_OPS)] * N_OF_BACKGROUNDS
resizedMaskFinal = [[None] * (N_OF_OPS)] * N_OF_BACKGROUNDS
for bgindex in range(0, N_OF_BACKGROUNDS):
resizedBg[bgindex], bgOriginalshape[bgindex] = loadResizedBG(
arrBG[i][bgindex])
# calcule ops per bgs
for opindex in range(0, N_OF_OPS):
globalIndex = (
((realI * N_OF_BACKGROUNDS) + bgindex) * N_OF_OPS) + opindex
# print(globalIndex)
resizedImgArr[bgindex][opindex], resizedMaskArr[bgindex][opindex] = geometricOp2(
resizedImg, resizedMask, bgOriginalshape[bgindex], opindex, globalIndex)
# internalLoop
# BEGIN Smooth border copy
resizedMaskTmp = cv2.erode(
resizedMaskArr[bgindex][opindex], kernelErode, iterations=1)
blur[bgindex][opindex] = cv2.blur(resizedMaskTmp, (3, 3))
blur[bgindex][opindex] = (
blur[bgindex][opindex] / 255) * 0.95
inv_blur[bgindex][opindex] = 1 - blur[bgindex][opindex]
new[bgindex][opindex] = blur[bgindex][opindex] * resizedImgArr[bgindex][opindex] + \
inv_blur[bgindex][opindex] * resizedBg[bgindex]
# END Smooth border copy
new[bgindex][opindex], resizedMaskArr[bgindex][opindex] = resizeToOrg(
bgOriginalshape[bgindex], new[bgindex][opindex], resizedMaskArr[bgindex][opindex])
#########################################################
result[bgindex][opindex] = Image.fromarray(
(new[bgindex][opindex]).astype(np.uint8))
resizedMaskFinal[bgindex][opindex] = Image.fromarray(
(resizedMaskArr[bgindex][opindex]).astype(np.uint8))
stamp = "{:06d}_{:06d}_{:03d}.png".format(
arrOBJ[i], arrBG[i][bgindex], opindex)
result[bgindex][opindex].save(outPath + OUTPUT_FOLDER_IMG +
"MSRA10K_image_" + stamp)
resizedMaskFinal[bgindex][opindex].save(outPath + OUTPUT_FOLDER_MASK
+ "MSRA10K_mask_" + stamp)
print(stamp)
#########################################################
realI += 1
if __name__ == '__main__':
if(args.n_bgs>1):
main(0,True,args.out_path)
else:
main(0,False,args.out_path)
|
py | b40d1bccec31c5bb19029e661b42aa36629faae8 | def selection_0():
# Library import
import numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Library version
matplotlib_version = matplotlib.__version__
numpy_version = numpy.__version__
# Histo binning
xBinning = numpy.linspace(-8.0,8.0,101,endpoint=True)
# Creating data sequence: middle of each bin
xData = numpy.array([-7.92,-7.76,-7.6,-7.44,-7.28,-7.12,-6.96,-6.8,-6.64,-6.48,-6.32,-6.16,-6.0,-5.84,-5.68,-5.52,-5.36,-5.2,-5.04,-4.88,-4.72,-4.56,-4.4,-4.24,-4.08,-3.92,-3.76,-3.6,-3.44,-3.28,-3.12,-2.96,-2.8,-2.64,-2.48,-2.32,-2.16,-2.0,-1.84,-1.68,-1.52,-1.36,-1.2,-1.04,-0.88,-0.72,-0.56,-0.4,-0.24,-0.08,0.08,0.24,0.4,0.56,0.72,0.88,1.04,1.2,1.36,1.52,1.68,1.84,2.0,2.16,2.32,2.48,2.64,2.8,2.96,3.12,3.28,3.44,3.6,3.76,3.92,4.08,4.24,4.4,4.56,4.72,4.88,5.04,5.2,5.36,5.52,5.68,5.84,6.0,6.16,6.32,6.48,6.64,6.8,6.96,7.12,7.28,7.44,7.6,7.76,7.92])
# Creating weights for histo: y1_ETA_0
y1_ETA_0_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,813.137467067,406.568733534,813.137467067,0.0,406.568733534,0.0,813.137467067,406.568733534,1626.27533413,813.137467067,4065.68733534,2032.84406767,4472.25526887,4878.8272024,6098.531003,6911.67087007,6911.67087007,12197.066006,15043.0455407,13823.3377401,12603.6339395,13010.2018731,19515.3008096,18702.1649425,16669.3212749,15856.1814078,21548.1484773,21141.5765437,22361.2843443,21954.7164108,14636.4776072,13010.2018731,15043.0455407,13823.3377401,11790.4940725,9757.6504048,9351.08247127,9351.08247127,5691.96306947,6098.531003,6098.531003,5285.39513594,4878.8272024,2845.98153473,2439.4128012,2439.4128012,2032.84406767,1626.27533413,1219.7066006,813.137467067,813.137467067,406.568733534,0.0,406.568733534,406.568733534,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating a new Canvas
fig = plt.figure(figsize=(8,6),dpi=80)
frame = gridspec.GridSpec(1,1)
pad = fig.add_subplot(frame[0])
# Creating a new Stack
pad.hist(x=xData, bins=xBinning, weights=y1_ETA_0_weights,\
label="$signal$", histtype="stepfilled", rwidth=1.0,\
color="#5954d8", edgecolor="#5954d8", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
# Axis
plt.rc('text',usetex=False)
plt.xlabel(r"$\eta$ $[ ax ]$ ",\
fontsize=16,color="black")
plt.ylabel(r"$\mathrm{N.}\ \mathrm{of}\ ax$ $(\mathcal{L}_{\mathrm{int}} = 40.0\ \mathrm{fb}^{-1})$ ",\
fontsize=16,color="black")
# Boundary of y-axis
ymax=(y1_ETA_0_weights).max()*1.1
ymin=0 # linear scale
#ymin=min([x for x in (y1_ETA_0_weights) if x])/100. # log scale
plt.gca().set_ylim(ymin,ymax)
# Log/Linear scale for X-axis
plt.gca().set_xscale("linear")
#plt.gca().set_xscale("log",nonposx="clip")
# Log/Linear scale for Y-axis
plt.gca().set_yscale("linear")
#plt.gca().set_yscale("log",nonposy="clip")
# Saving the image
plt.savefig('../../HTML/MadAnalysis5job_0/selection_0.png')
plt.savefig('../../PDF/MadAnalysis5job_0/selection_0.png')
plt.savefig('../../DVI/MadAnalysis5job_0/selection_0.eps')
# Running!
if __name__ == '__main__':
selection_0()
|
py | b40d1c33b6348e2af4e2833d8b35a8d74bf0a1ee | import matplotlib.pyplot as plt
import numpy as np
from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, LogNormalVariable, BetaVariable
from brancher import inference
import brancher.functions as BF
# Probabilistic model #
T = 200
nu = LogNormalVariable(0.3, 1., 'nu')
x0 = NormalVariable(0., 1., 'x0')
b = BetaVariable(0.5, 1.5, 'b')
x = [x0]
names = ["x0"]
for t in range(1, T):
names.append("x{}".format(t))
x.append(NormalVariable(b * x[t - 1], nu, names[t]))
AR_model = ProbabilisticModel(x)
# Generate data #
data = AR_model._get_sample(number_samples=1)
time_series = [float(data[xt].cpu().detach().numpy()) for xt in x]
true_b = data[b].cpu().detach().numpy()
true_nu = data[nu].cpu().detach().numpy()
print("The true coefficient is: {}".format(float(true_b)))
# Observe data #
[xt.observe(data[xt][:, 0, :]) for xt in x]
# Variational distribution #
Qnu = LogNormalVariable(0.5, 1., "nu", learnable=True)
Qb = BetaVariable(0.5, 0.5, "b", learnable=True)
variational_posterior = ProbabilisticModel([Qb, Qnu])
AR_model.set_posterior_model(variational_posterior)
# Inference #
inference.perform_inference(AR_model,
number_iterations=200,
number_samples=300,
optimizer='Adam',
lr=0.05)
loss_list = AR_model.diagnostics["loss curve"]
# Statistics
posterior_samples = AR_model._get_posterior_sample(2000)
nu_posterior_samples = posterior_samples[nu].cpu().detach().numpy().flatten()
b_posterior_samples = posterior_samples[b].cpu().detach().numpy().flatten()
b_mean = np.mean(b_posterior_samples)
b_sd = np.sqrt(np.var(b_posterior_samples))
print("The estimated coefficient is: {} +- {}".format(b_mean, b_sd))
# Two subplots, unpack the axes array immediately
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4)
ax1.plot(time_series)
ax1.set_title("Time series")
ax2.plot(np.array(loss_list))
ax2.set_title("Convergence")
ax2.set_xlabel("Iteration")
ax3.hist(b_posterior_samples, 25)
ax3.axvline(x=true_b, lw=2, c="r")
ax3.set_title("Posterior samples (b)")
ax3.set_xlim(0,1)
ax4.hist(nu_posterior_samples, 25)
ax4.axvline(x=true_nu, lw=2, c="r")
ax4.set_title("Posterior samples (nu)")
plt.show() |
py | b40d1c7f1662f3c8d12e82de95912e18a997b6b6 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import json
from django import forms
from django.conf import settings
from django.utils.safestring import mark_safe
from .models import SermepaResponse
from .mixins import SermepaMixin
class SermepaPaymentForm(SermepaMixin, forms.Form):
Ds_SignatureVersion = forms.IntegerField(widget=forms.HiddenInput())
Ds_MerchantParameters = forms.IntegerField(widget=forms.HiddenInput())
Ds_Signature = forms.IntegerField(widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
merchant_parameters = kwargs.pop('merchant_parameters', None)
secret_key = kwargs.pop('secret_key', settings.SERMEPA_SECRET_KEY) # implementation for django_payments
super(SermepaPaymentForm, self).__init__(*args, **kwargs)
if merchant_parameters:
json_data = json.dumps(merchant_parameters)
print(json_data)
order = merchant_parameters['Ds_Merchant_Order']
b64_params = self.encode_base64(json_data.encode())
signature = self.get_firma_peticion(order, b64_params, secret_key)
self.initial['Ds_SignatureVersion'] = settings.SERMEPA_SIGNATURE_VERSION
self.initial['Ds_MerchantParameters'] = b64_params.decode('ascii')
self.initial['Ds_Signature'] = signature
def render(self):
return mark_safe(u"""<form id="tpv_form" action="%s" method="post">
%s
<input type="submit" name="submit" alt="Comprar ahora" value="Comprar ahora"/>
</form>""" % (settings.SERMEPA_URL_PRO, self.as_p()))
def sandbox(self):
return mark_safe(u"""<form id="tpv_form" action="%s" method="post">
%s
<input type="submit" name="submit" alt="Comprar ahora" value="Comprar ahora"/>
</form>""" % (settings.SERMEPA_URL_TEST, self.as_p()))
class SermepaResponseForm(forms.Form):
Ds_SignatureVersion = forms.CharField(max_length=256)
Ds_Signature = forms.CharField(max_length=256)
Ds_MerchantParameters = forms.CharField(max_length=2048)
Ds_Date = forms.DateField(required=False, input_formats=('%d/%m/%Y',))
Ds_Hour = forms.TimeField(required=False, input_formats=('%H:%M',))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.