max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
sorts/insertionSort/insertion_sort.py | JesseJMa/data-structure-and-algorithm | 0 | 12793851 | def insertion_sort(arr, l, r):
for i in range(l + 1, r + 1):
temp = arr[i]
index = i
while index > 0 and arr[index - 1] > temp:
arr[index] = arr[index - 1]
index -= 1
arr[index] = temp | 3.875 | 4 |
huaweicloud-sdk-scm/huaweicloudsdkscm/v3/model/show_certificate_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 64 | 12793852 | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowCertificateResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'status': 'str',
'order_id': 'str',
'name': 'str',
'type': 'str',
'brand': 'str',
'push_support': 'str',
'revoke_reason': 'str',
'signature_algrithm': 'str',
'issue_time': 'str',
'not_before': 'str',
'not_after': 'str',
'validity_period': 'int',
'validation_method': 'str',
'domain_type': 'str',
'domain': 'str',
'sans': 'str',
'domain_count': 'int',
'wildcard_count': 'int',
'authentification': 'list[Authentification]'
}
attribute_map = {
'id': 'id',
'status': 'status',
'order_id': 'order_id',
'name': 'name',
'type': 'type',
'brand': 'brand',
'push_support': 'push_support',
'revoke_reason': 'revoke_reason',
'signature_algrithm': 'signature_algrithm',
'issue_time': 'issue_time',
'not_before': 'not_before',
'not_after': 'not_after',
'validity_period': 'validity_period',
'validation_method': 'validation_method',
'domain_type': 'domain_type',
'domain': 'domain',
'sans': 'sans',
'domain_count': 'domain_count',
'wildcard_count': 'wildcard_count',
'authentification': 'authentification'
}
def __init__(self, id=None, status=None, order_id=None, name=None, type=None, brand=None, push_support=None, revoke_reason=None, signature_algrithm=None, issue_time=None, not_before=None, not_after=None, validity_period=None, validation_method=None, domain_type=None, domain=None, sans=None, domain_count=None, wildcard_count=None, authentification=None):
"""ShowCertificateResponse - a model defined in huaweicloud sdk"""
super(ShowCertificateResponse, self).__init__()
self._id = None
self._status = None
self._order_id = None
self._name = None
self._type = None
self._brand = None
self._push_support = None
self._revoke_reason = None
self._signature_algrithm = None
self._issue_time = None
self._not_before = None
self._not_after = None
self._validity_period = None
self._validation_method = None
self._domain_type = None
self._domain = None
self._sans = None
self._domain_count = None
self._wildcard_count = None
self._authentification = None
self.discriminator = None
if id is not None:
self.id = id
if status is not None:
self.status = status
if order_id is not None:
self.order_id = order_id
if name is not None:
self.name = name
if type is not None:
self.type = type
if brand is not None:
self.brand = brand
if push_support is not None:
self.push_support = push_support
if revoke_reason is not None:
self.revoke_reason = revoke_reason
if signature_algrithm is not None:
self.signature_algrithm = signature_algrithm
if issue_time is not None:
self.issue_time = issue_time
if not_before is not None:
self.not_before = not_before
if not_after is not None:
self.not_after = not_after
if validity_period is not None:
self.validity_period = validity_period
if validation_method is not None:
self.validation_method = validation_method
if domain_type is not None:
self.domain_type = domain_type
if domain is not None:
self.domain = domain
if sans is not None:
self.sans = sans
if domain_count is not None:
self.domain_count = domain_count
if wildcard_count is not None:
self.wildcard_count = wildcard_count
if authentification is not None:
self.authentification = authentification
@property
def id(self):
"""Gets the id of this ShowCertificateResponse.
证书id。
:return: The id of this ShowCertificateResponse.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ShowCertificateResponse.
证书id。
:param id: The id of this ShowCertificateResponse.
:type: str
"""
self._id = id
@property
def status(self):
"""Gets the status of this ShowCertificateResponse.
证书状态。取值如下: - PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。 - CHECKING:证书申请审核中。 - CANCELCHECKING:取消证书申请审核中。 - UNPASSED:证书申请未通过。 - EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。
:return: The status of this ShowCertificateResponse.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ShowCertificateResponse.
证书状态。取值如下: - PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。 - CHECKING:证书申请审核中。 - CANCELCHECKING:取消证书申请审核中。 - UNPASSED:证书申请未通过。 - EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。
:param status: The status of this ShowCertificateResponse.
:type: str
"""
self._status = status
@property
def order_id(self):
"""Gets the order_id of this ShowCertificateResponse.
订单id。
:return: The order_id of this ShowCertificateResponse.
:rtype: str
"""
return self._order_id
@order_id.setter
def order_id(self, order_id):
"""Sets the order_id of this ShowCertificateResponse.
订单id。
:param order_id: The order_id of this ShowCertificateResponse.
:type: str
"""
self._order_id = order_id
@property
def name(self):
"""Gets the name of this ShowCertificateResponse.
证书名称。
:return: The name of this ShowCertificateResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ShowCertificateResponse.
证书名称。
:param name: The name of this ShowCertificateResponse.
:type: str
"""
self._name = name
@property
def type(self):
"""Gets the type of this ShowCertificateResponse.
证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。
:return: The type of this ShowCertificateResponse.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ShowCertificateResponse.
证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。
:param type: The type of this ShowCertificateResponse.
:type: str
"""
self._type = type
@property
def brand(self):
"""Gets the brand of this ShowCertificateResponse.
证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。
:return: The brand of this ShowCertificateResponse.
:rtype: str
"""
return self._brand
@brand.setter
def brand(self, brand):
"""Sets the brand of this ShowCertificateResponse.
证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。
:param brand: The brand of this ShowCertificateResponse.
:type: str
"""
self._brand = brand
@property
def push_support(self):
"""Gets the push_support of this ShowCertificateResponse.
证书是否支持推送。
:return: The push_support of this ShowCertificateResponse.
:rtype: str
"""
return self._push_support
@push_support.setter
def push_support(self, push_support):
"""Sets the push_support of this ShowCertificateResponse.
证书是否支持推送。
:param push_support: The push_support of this ShowCertificateResponse.
:type: str
"""
self._push_support = push_support
@property
def revoke_reason(self):
"""Gets the revoke_reason of this ShowCertificateResponse.
证书吊销原因。
:return: The revoke_reason of this ShowCertificateResponse.
:rtype: str
"""
return self._revoke_reason
@revoke_reason.setter
def revoke_reason(self, revoke_reason):
"""Sets the revoke_reason of this ShowCertificateResponse.
证书吊销原因。
:param revoke_reason: The revoke_reason of this ShowCertificateResponse.
:type: str
"""
self._revoke_reason = revoke_reason
@property
def signature_algrithm(self):
"""Gets the signature_algrithm of this ShowCertificateResponse.
签名算法。
:return: The signature_algrithm of this ShowCertificateResponse.
:rtype: str
"""
return self._signature_algrithm
@signature_algrithm.setter
def signature_algrithm(self, signature_algrithm):
"""Sets the signature_algrithm of this ShowCertificateResponse.
签名算法。
:param signature_algrithm: The signature_algrithm of this ShowCertificateResponse.
:type: str
"""
self._signature_algrithm = signature_algrithm
@property
def issue_time(self):
"""Gets the issue_time of this ShowCertificateResponse.
证书签发时间,没有获取到有效值时为空。
:return: The issue_time of this ShowCertificateResponse.
:rtype: str
"""
return self._issue_time
@issue_time.setter
def issue_time(self, issue_time):
"""Sets the issue_time of this ShowCertificateResponse.
证书签发时间,没有获取到有效值时为空。
:param issue_time: The issue_time of this ShowCertificateResponse.
:type: str
"""
self._issue_time = issue_time
@property
def not_before(self):
"""Gets the not_before of this ShowCertificateResponse.
证书生效时间,没有获取到有效值时为空。
:return: The not_before of this ShowCertificateResponse.
:rtype: str
"""
return self._not_before
@not_before.setter
def not_before(self, not_before):
"""Sets the not_before of this ShowCertificateResponse.
证书生效时间,没有获取到有效值时为空。
:param not_before: The not_before of this ShowCertificateResponse.
:type: str
"""
self._not_before = not_before
@property
def not_after(self):
"""Gets the not_after of this ShowCertificateResponse.
证书失效时间,没有获取到有效值时为空。
:return: The not_after of this ShowCertificateResponse.
:rtype: str
"""
return self._not_after
@not_after.setter
def not_after(self, not_after):
"""Sets the not_after of this ShowCertificateResponse.
证书失效时间,没有获取到有效值时为空。
:param not_after: The not_after of this ShowCertificateResponse.
:type: str
"""
self._not_after = not_after
@property
def validity_period(self):
"""Gets the validity_period of this ShowCertificateResponse.
证书有效期,按月为单位。
:return: The validity_period of this ShowCertificateResponse.
:rtype: int
"""
return self._validity_period
@validity_period.setter
def validity_period(self, validity_period):
"""Sets the validity_period of this ShowCertificateResponse.
证书有效期,按月为单位。
:param validity_period: The validity_period of this ShowCertificateResponse.
:type: int
"""
self._validity_period = validity_period
@property
def validation_method(self):
"""Gets the validation_method of this ShowCertificateResponse.
域名认证方式,取值如下:DNS、FILE、EMAIL。
:return: The validation_method of this ShowCertificateResponse.
:rtype: str
"""
return self._validation_method
@validation_method.setter
def validation_method(self, validation_method):
"""Sets the validation_method of this ShowCertificateResponse.
域名认证方式,取值如下:DNS、FILE、EMAIL。
:param validation_method: The validation_method of this ShowCertificateResponse.
:type: str
"""
self._validation_method = validation_method
@property
def domain_type(self):
"""Gets the domain_type of this ShowCertificateResponse.
域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名
:return: The domain_type of this ShowCertificateResponse.
:rtype: str
"""
return self._domain_type
@domain_type.setter
def domain_type(self, domain_type):
"""Sets the domain_type of this ShowCertificateResponse.
域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名
:param domain_type: The domain_type of this ShowCertificateResponse.
:type: str
"""
self._domain_type = domain_type
@property
def domain(self):
"""Gets the domain of this ShowCertificateResponse.
证书绑定域名。
:return: The domain of this ShowCertificateResponse.
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this ShowCertificateResponse.
证书绑定域名。
:param domain: The domain of this ShowCertificateResponse.
:type: str
"""
self._domain = domain
@property
def sans(self):
"""Gets the sans of this ShowCertificateResponse.
证书绑定的附加域名信息。
:return: The sans of this ShowCertificateResponse.
:rtype: str
"""
return self._sans
@sans.setter
def sans(self, sans):
"""Sets the sans of this ShowCertificateResponse.
证书绑定的附加域名信息。
:param sans: The sans of this ShowCertificateResponse.
:type: str
"""
self._sans = sans
@property
def domain_count(self):
"""Gets the domain_count of this ShowCertificateResponse.
证书可绑定域名个数。
:return: The domain_count of this ShowCertificateResponse.
:rtype: int
"""
return self._domain_count
@domain_count.setter
def domain_count(self, domain_count):
"""Sets the domain_count of this ShowCertificateResponse.
证书可绑定域名个数。
:param domain_count: The domain_count of this ShowCertificateResponse.
:type: int
"""
self._domain_count = domain_count
@property
def wildcard_count(self):
"""Gets the wildcard_count of this ShowCertificateResponse.
证书可绑定附加域名个数。
:return: The wildcard_count of this ShowCertificateResponse.
:rtype: int
"""
return self._wildcard_count
@wildcard_count.setter
def wildcard_count(self, wildcard_count):
"""Sets the wildcard_count of this ShowCertificateResponse.
证书可绑定附加域名个数。
:param wildcard_count: The wildcard_count of this ShowCertificateResponse.
:type: int
"""
self._wildcard_count = wildcard_count
@property
def authentification(self):
"""Gets the authentification of this ShowCertificateResponse.
域名所有权认证信息,详情请参见Authentification字段数据结构说明。
:return: The authentification of this ShowCertificateResponse.
:rtype: list[Authentification]
"""
return self._authentification
@authentification.setter
def authentification(self, authentification):
"""Sets the authentification of this ShowCertificateResponse.
域名所有权认证信息,详情请参见Authentification字段数据结构说明。
:param authentification: The authentification of this ShowCertificateResponse.
:type: list[Authentification]
"""
self._authentification = authentification
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowCertificateResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 2.359375 | 2 |
code/functions/segment/__init__.py | a9w/Fat2_polarizes_WAVE | 0 | 12793853 | <reponame>a9w/Fat2_polarizes_WAVE<filename>code/functions/segment/__init__.py
"""Functions for segmenting images."""
from .interface import (
interface_endpoints_mask,
interface_endpoints_coords,
interface_shape_edge_method,
trim_interface,
refine_junction,
edge_between_neighbors,
)
from .timelapse import (
segment_epithelium_timelapse,
largest_object_mask_timelapse,
segment_hemijunctions_timelapse,
)
from .tissue import (
epithelium_watershed,
largest_object_mask,
select_border_adjacent,
select_in_field,
select_mask_adjacent,
segment_hemijunctions,
cell_edges_mask,
cell_interiors_mask,
cell_vertices_mask,
neighbor_array_nr,
)
__all__ = [
"interface_endpoints_mask",
"interface_endpoints_coords",
"interface_shape_edge_method",
"trim_interface",
"refine_junction",
"edge_between_neighbors",
"segment_epithelium_timelapse",
"largest_object_mask_timelapse",
"segment_hemijunctions_timelapse",
"epithelium_watershed",
"largest_object_mask",
"select_border_adjacent",
"select_in_field",
"select_mask_adjacent",
"segment_hemijunctions",
"cell_edges_mask",
"cell_interiors_mask",
"cell_vertices_mask",
"neighbor_array_nr"
]
| 1.84375 | 2 |
usecases/get_network.py | HathorNetwork/hathor-explorer-service | 0 | 12793854 | <gh_stars>0
from typing import Optional
from gateways.node_gateway import NodeGateway
class GetNetwork:
def __init__(self, node_gateway: Optional[NodeGateway] = None) -> None:
self.node_gateway = node_gateway or NodeGateway()
def get(self) -> Optional[dict]:
network = self.node_gateway.get_network()
if network is not None:
return network.to_dict()
return None
| 2.796875 | 3 |
src/pytiger/utils/plugins.py | tigercomputing/pytiger | 1 | 12793855 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
A simple plugin loading mechanism
"""
# Copyright © 2015 Tiger Computing Ltd
# This file is part of pytiger and distributed under the terms
# of a BSD-like license
# See the file COPYING for details
# Idea borrowed and adapted from:
# https://copyninja.info/blog/dynamic-module-loading.html
# http://stackoverflow.com/a/3381582
import imp
import os
def load(plugin_dir, package=__name__):
"""
Load Python modules and packages from a directory.
This function will list the contents of ``plugin_dir`` and load any Python
modules (files ending ``.py``) or packages (directories with a
``__init__.py`` file) found within it. Sub-directories are not searched.
Modules are compiled as they are loaded, if necessary.
Plugins are loaded within a package name as supplied to this function in
the optional ``package`` parameter. If this is not provided, this defaults
to ``pytiger.utils.plugins``. The module name supplied in ``package`` must
already be known to Python (i.e. in ``sys.modules``).
The function returns a list of python module objects, one per loaded module
or package.
:param str plugin_dir: The path to the directory to load plugins from.
:param str package: Python package to load the plugins into.
.. versionadded:: 1.1.0
"""
plugin_dir = os.path.realpath(plugin_dir)
# Discover the list of plugins
plugins = []
for dirent in os.listdir(plugin_dir):
# skip __init__.py
if dirent.startswith('__'):
continue
# Load .py files as plugins
if dirent.endswith('.py'):
plugins.append(os.path.splitext(dirent)[0])
continue
# Load directories containing __init__.py
full_path = os.path.join(plugin_dir, dirent)
if os.path.isdir(full_path):
if os.path.isfile(os.path.join(full_path, '__init__.py')):
plugins.append(dirent)
# Now load the plugin modules
modules = []
for plugin in plugins:
f, path, desc = imp.find_module(plugin, [plugin_dir])
module = imp.load_module(package + '.' + plugin, f, path, desc)
modules.append(module)
return modules
| 2.578125 | 3 |
current_playing.py | BishoyAbdelmalik/Clippy-Server | 0 | 12793856 | import asyncio
import os
import json
from winrt.windows.media.control import \
GlobalSystemMediaTransportControlsSessionManager as MediaManager
from winrt.windows.storage.streams import \
DataReader, Buffer, InputStreamOptions
async def get_media_info():
sessions = await MediaManager.request_async()
# This source_app_user_model_id check and if statement is optional
# Use it if you want to only get a certain player/program's media
# (e.g. only chrome.exe's media not any other program's).
# To get the ID, use a breakpoint() to run sessions.get_current_session()
# while the media you want to get is playing.
# Then set TARGET_ID to the string this call returns.
current_session = sessions.get_current_session()
if current_session: # there needs to be a media session running
info = await current_session.try_get_media_properties_async()
# song_attr[0] != '_' ignores system attributes
info_dict = {song_attr: info.__getattribute__(song_attr) for song_attr in dir(info) if song_attr[0] != '_'}
# converts winrt vector to list
info_dict['genres'] = list(info_dict['genres'])
# create the current_media_info dict with the earlier code first
thumb_stream_ref = info_dict['thumbnail']
try:
filename="./static/media_thumb.jpg"
if os.path.exists(filename):
os.remove(filename)
# 5MB (5 million byte) buffer - thumbnail unlikely to be larger
thumb_read_buffer = Buffer(5000000)
await read_stream_into_buffer(thumb_stream_ref, thumb_read_buffer)
buffer_reader = DataReader.from_buffer(thumb_read_buffer)
byte_buffer = buffer_reader.read_bytes(thumb_read_buffer.length)
if not os.path.exists('static'):
os.makedirs('static')
filename="./static/media_thumb.jpg"
if not len(bytearray(byte_buffer)) ==0:
with open(filename, 'wb+') as fobj:
fobj.write(bytearray(byte_buffer))
info_dict["thumbnail"]=filename[1:]
except Exception as e:
# print(e)
# print("something went wrong with getting thumbnail")
info_dict["thumbnail"]=" "
return info_dict
return None
async def read_stream_into_buffer(stream_ref, buffer):
readable_stream = await stream_ref.open_read_async()
readable_stream.read_async(buffer, buffer.capacity, InputStreamOptions.READ_AHEAD)
if __name__ == '__main__':
print(json.dumps(asyncio.run(get_media_info()))) | 2.609375 | 3 |
HLTrigger/Configuration/python/HLT_75e33/paths/L1T_DoubleTkMuon_15_7_cfi.py | PKUfudawei/cmssw | 1 | 12793857 | import FWCore.ParameterSet.Config as cms
#from ..modules.hltL1TkMuons_cfi import *
from ..modules.hltDoubleMuon7DZ1p0_cfi import *
from ..modules.hltL1TkDoubleMuFiltered7_cfi import *
from ..modules.hltL1TkSingleMuFiltered15_cfi import *
from ..sequences.HLTBeginSequence_cfi import *
from ..sequences.HLTEndSequence_cfi import *
L1T_DoubleTkMuon_15_7 = cms.Path(
HLTBeginSequence +
# hltL1TkMuons +
hltL1TkDoubleMuFiltered7 +
hltL1TkSingleMuFiltered15 +
hltDoubleMuon7DZ1p0 +
HLTEndSequence
)
| 0.917969 | 1 |
src/autogen/eigs.py | ldXiao/polyfem | 228 | 12793858 | from sympy import *
from sympy.matrices import *
import os
import re
import argparse
# local
import pretty_print
def sqr(a):
return a * a
def trunc_acos(x):
tmp = Piecewise((0.0, x >= 1.0), (pi, x <= -1.0), (acos(x), True))
return tmp.subs(x, x)
def eigs_2d(mat):
a = mat[0, 0] + mat[1, 1]
delta = (mat[0, 0] - mat[1, 1])**2 + 4 * mat[0, 1]**2
tmp1 = Piecewise(
(a / 2, delta < 1e-10),
((a - sqrt(delta)) / 2.0, True)
)
tmp2 = Piecewise(
(a / 2, delta < 1e-10),
((a + sqrt(delta)) / 2.0, True)
)
return tmp1.subs(delta, delta), tmp2.subs(delta, delta)
def eigs_3d(mat):
b = mat[0] + mat[4] + mat[8]
t = sqr(mat[1]) + sqr(mat[2]) + sqr(mat[5])
p = 0.5 * (sqr(mat[0] - mat[4]) + sqr(mat[0] - mat[8]) + sqr(mat[4] - mat[8]))
p += 3.0 * t
q = 18.0 * (mat[0] * mat[4] * mat[8] + 3.0 * mat[1] * mat[2] * mat[5])
q += 2.0 * (mat[0] * sqr(mat[0]) + mat[4] * sqr(mat[4]) + mat[8] * sqr(mat[8]))
q += 9.0 * b * t
q -= 3.0 * (mat[0] + mat[4]) * (mat[0] + mat[8]) * (mat[4] + mat[8])
q -= 27.0 * (mat[0] * sqr(mat[5]) + mat[4] * sqr(mat[2]) + mat[8] * sqr(mat[1]))
delta = trunc_acos(0.5 * q / sqrt(p * sqr(p)))
p = 2.0 * sqrt(p)
tmp1 = Piecewise(
(b / 3.0, p < 1e-10),
((b + p * cos(delta / 3.0)) / 3.0, True)
)
tmp2 = Piecewise(
(b / 3.0, p < 1e-10),
((b + p * cos((delta + 2.0 * pi) / 3.0)) / 3.0, True)
)
tmp3 = Piecewise(
(b / 3.0, p < 1e-10),
((b + p * cos((delta - 2.0 * pi) / 3.0)) / 3.0, True)
)
return tmp1.subs(p, p), tmp2.subs(p, p), tmp3.subs(p, p)
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("output", type=str, help="path to the output folder")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
dims = [2, 3]
cpp = "#include <polyfem/auto_eigs.hpp>\n\n\n"
hpp = "#pragma once\n\n#include <Eigen/Dense>\n\n"
cpp = cpp + "namespace polyfem {\nnamespace autogen " + "{\n"
hpp = hpp + "namespace polyfem {\nnamespace autogen " + "{\n"
hpp = hpp + "template<typename T>\nT int_pow(T val, int exp) { T res = exp <=0 ? T(0.): val; for(int i = 1; i < exp; ++i) res = res*val; return res; }\n\n"
lambdaa = Symbol('lambda', real=True)
for dim in dims:
print("processing " + str(dim))
M = zeros(dim, dim)
for i in range(0, dim):
for j in range(0, dim):
if i <= j:
M[i, j] = Symbol('m[' + str(i) + ',' + str(j) + ']', real=True)
else:
M[i, j] = Symbol('m[' + str(j) + ',' + str(i) + ']', real=True)
if dim == 2:
lambdas = eigs_2d(M)
else:
lambdas = eigs_3d(M)
# lambdas = simplify(lambdas)
c99 = pretty_print.C99_print(lambdas)
c99 = re.sub(r"m\[(\d{1}),(\d{1})\]", r'm(\1,\2)', c99)
c99 = re.sub(r"result_(\d{1})", r'res(\1)', c99)
c99 = c99.replace("0.0", "T(0)")
c99 = c99.replace(" M_PI", " T(M_PI)")
signature = "template<typename T>\nvoid eigs_" + str(dim) + "d(const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3> &m, "
signature += "Eigen::Matrix<T, Eigen::Dynamic, 1, 0, 3, 1> &res)"
hpp = hpp + signature + " {\nres.resize(" + str(dim) + ");\n" + c99 + "\n}\n\n"
cpp = cpp + "\n"
hpp = hpp + "\n"
cpp = cpp + "\n}}\n"
hpp = hpp + "\n}}\n"
path = os.path.abspath(args.output)
print("saving...")
with open(os.path.join(path, "auto_eigs.cpp"), "w") as file:
file.write(cpp)
with open(os.path.join(path, "auto_eigs.hpp"), "w") as file:
file.write(hpp)
print("done!")
| 2.5625 | 3 |
meridian/channels/gallbladder.py | sinotradition/meridian | 5 | 12793859 | #!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
from meridian.acupoints import tongziliao232
from meridian.acupoints import tinghui14
from meridian.acupoints import shangguan41
from meridian.acupoints import heyan24
from meridian.acupoints import xuanlu22
from meridian.acupoints import xuanli22
from meridian.acupoints import qubin14
from meridian.acupoints import shuaigu43
from meridian.acupoints import tianchong11
from meridian.acupoints import fubai22
from meridian.acupoints import touqiaoyin241
from meridian.acupoints import wangu23
from meridian.acupoints import benshen32
from meridian.acupoints import yangbai22
from meridian.acupoints import toulinqi221
from meridian.acupoints import muchuang41
from meridian.acupoints import zhengying42
from meridian.acupoints import chengling22
from meridian.acupoints import naokong31
from meridian.acupoints import fengchi12
from meridian.acupoints import jianjing13
from meridian.acupoints import yuanye14
from meridian.acupoints import zhejin21
from meridian.acupoints import riyue44
from meridian.acupoints import jingmen12
from meridian.acupoints import daimai44
from meridian.acupoints import wushu31
from meridian.acupoints import weidao24
from meridian.acupoints import juliao12
from meridian.acupoints import huantiao24
from meridian.acupoints import fengshi14
from meridian.acupoints import zhongdu12
from meridian.acupoints import xiyangguan121
from meridian.acupoints import yanglingquan222
from meridian.acupoints import yangjiao21
from meridian.acupoints import waiqiu41
from meridian.acupoints import guangming12
from meridian.acupoints import yangfu23
from meridian.acupoints import xuanzhong21
from meridian.acupoints import qiuxu11
from meridian.acupoints import zulinqi224
from meridian.acupoints import diwuhui434
from meridian.acupoints import xiaxi21
from meridian.acupoints import zuqiaoyin241
SPELL=u'zúshàoyángdǎnjīng'
CN=u'足少阳胆经'
ABBR=u'GB'
NAME='gallbladder'
FULLNAME='GallbladderChannelofFoot-Shaoyang'
SEQ=8
if __name__ == '__main__':
pass
| 1.359375 | 1 |
PopStats/model.py | haoruilee/DeepSets | 213 | 12793860 | <filename>PopStats/model.py
import torch
import torch.nn as nn
import torch.nn.functional as F
# from loglinear import LogLinear
class DeepSet(nn.Module):
def __init__(self, in_features, set_features=50):
super(DeepSet, self).__init__()
self.in_features = in_features
self.out_features = set_features
self.feature_extractor = nn.Sequential(
nn.Linear(in_features, 50),
nn.ELU(inplace=True),
nn.Linear(50, 100),
nn.ELU(inplace=True),
nn.Linear(100, set_features)
)
self.regressor = nn.Sequential(
nn.Linear(set_features, 30),
nn.ELU(inplace=True),
nn.Linear(30, 30),
nn.ELU(inplace=True),
nn.Linear(30, 10),
nn.ELU(inplace=True),
nn.Linear(10, 1),
)
self.add_module('0', self.feature_extractor)
self.add_module('1', self.regressor)
def reset_parameters(self):
for module in self.children():
reset_op = getattr(module, "reset_parameters", None)
if callable(reset_op):
reset_op()
def forward(self, input):
x = input
x = self.feature_extractor(x)
x = x.sum(dim=1)
x = self.regressor(x)
return x
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'Feature Exctractor=' + str(self.feature_extractor) \
+ '\n Set Feature' + str(self.regressor) + ')'
class DeepSet1(nn.Module):
def __init__(self, in_features, set_features=512):
super(DeepSet1, self).__init__()
self.in_features = in_features
self.out_features = set_features
self.feature_extractor = nn.Sequential(
nn.Linear(in_features, 512),
nn.ELU(inplace=True),
nn.Linear(512, 512),
nn.ELU(inplace=True),
nn.Linear(512, set_features)
)
self.regressor = nn.Sequential(
nn.Linear(set_features, 512),
nn.ELU(inplace=True),
nn.Linear(512, 512),
nn.ELU(inplace=True),
nn.Linear(512, 512),
nn.ELU(inplace=True),
nn.Linear(512, 1),
)
self.add_module('0', self.feature_extractor)
self.add_module('1', self.regressor)
def reset_parameters(self):
for module in self.children():
reset_op = getattr(module, "reset_parameters", None)
if callable(reset_op):
reset_op()
def forward(self, input):
x = input
x = self.feature_extractor(x)
x = x.sum(dim=1)
x = self.regressor(x)
return x
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'Feature Exctractor=' + str(self.feature_extractor) \
+ '\n Set Feature' + str(self.regressor) + ')'
class DeepSet2(nn.Module):
def __init__(self, in_features, set_features=256):
super(DeepSet2, self).__init__()
self.in_features = in_features
self.out_features = set_features
self.feature_extractor = nn.Sequential(
nn.Linear(in_features, 256),
nn.ELU(inplace=True),
nn.Linear(256, 256),
nn.ELU(inplace=True),
nn.Linear(256, set_features)
)
self.log_feature_extractor = nn.Sequential(
nn.Linear(in_features, 256),
nn.ReLU(inplace=True),
nn.Linear(256, 256),
nn.ReLU(inplace=True),
nn.Linear(256, set_features),
nn.ReLU(inplace=True)
)
self.regressor = nn.Sequential(
nn.Linear(set_features*2, 512),
nn.ELU(inplace=True),
nn.Linear(512, 512),
nn.ELU(inplace=True),
nn.Linear(512, 512),
nn.ELU(inplace=True),
nn.Linear(512, 1),
)
self.add_module('0', self.feature_extractor)
self.add_module('1', self.regressor)
def reset_parameters(self):
for module in self.children():
reset_op = getattr(module, "reset_parameters", None)
if callable(reset_op):
reset_op()
def forward(self, input):
x = input
x1 = self.feature_extractor(x)
x2 = self.log_feature_extractor(x) + 0.001
x2 = x2.log()
x = torch.cat((x1, x2), 2)
x = x.sum(dim=1)
x = self.regressor(x)
return x
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'Feature Exctractor=' + str(self.feature_extractor) \
+ '\n Set Feature' + str(self.regressor) + ')'
class DeepSet3(nn.Module):
def __init__(self, in_features, set_features=50):
super(DeepSet3, self).__init__()
self.in_features = in_features
self.out_features = set_features
self.feature_extractor = nn.Sequential(
nn.Linear(in_features, 50),
nn.ELU(inplace=True),
nn.Linear(50, 50),
nn.ELU(inplace=True),
nn.Linear(50, set_features)
)
self.log_feature_extractor = nn.Sequential(
nn.Linear(in_features, 50),
nn.ReLU(inplace=True),
nn.Linear(50, 50),
nn.ReLU(inplace=True),
nn.Linear(50, set_features),
nn.ReLU(inplace=True)
)
self.l1 = nn.Linear(set_features*2, 30)
self.l2 = LogLinear(set_features*2, 30)
self.lp = nn.ReLU()
self.regressor = nn.Sequential(
#nn.Linear(set_features*2, 512),
nn.ELU(inplace=True),
nn.Linear(60, 30),
nn.ELU(inplace=True),
nn.Linear(30, 10),
nn.ELU(inplace=True),
nn.Linear(10, 1),
)
self.add_module('0', self.feature_extractor)
self.add_module('1', self.regressor)
def reset_parameters(self):
for module in self.children():
reset_op = getattr(module, "reset_parameters", None)
if callable(reset_op):
reset_op()
def forward(self, input):
x = input
x1 = self.feature_extractor(x)
x2 = self.log_feature_extractor(x) + 0.001
x2 = x2.log()
x = torch.cat((x1, x2), 2)
x = x.sum(dim=1)
x1 = self.l1(x)
x2 = self.lp(x) + 0.001
x2 = self.l2(x2)
x = torch.cat((x1, x2), 1)
x = self.regressor(x)
return x
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'Feature Exctractor=' + str(self.feature_extractor) \
+ '\n Set Feature' + str(self.regressor) + ')'
| 2.5 | 2 |
global_finprint/bruv/models.py | GlobalFinPrint/global_finprint | 0 | 12793861 | <filename>global_finprint/bruv/models.py
from decimal import Decimal
from collections import Counter
from django.contrib.gis.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from django.core.urlresolvers import reverse
from django.contrib.gis.geos import Point
from global_finprint.annotation.models.observation import Observation, MasterRecord
from global_finprint.annotation.models.video import Video, Assignment
from global_finprint.core.version import VersionInfo
from global_finprint.core.models import AuditableModel
from global_finprint.trip.models import Trip
from global_finprint.habitat.models import ReefHabitat, Substrate, SubstrateComplexity
from mptt.models import MPTTModel, TreeForeignKey
from django.contrib.postgres.fields import ArrayField, JSONField
# todo: move some of these out ot the db?
EQUIPMENT_BAIT_CONTAINER = {
('B', 'Bag'),
('C', 'Cage'),
}
CURRENT_DIRECTION = {
('N', 'North'),
('NE', 'Northeast'),
('E', 'East'),
('SE', 'Southeast'),
('S', 'South'),
('SW', 'Southwest'),
('W', 'West'),
('NW', 'Northwest'),
}
TIDE_CHOICES = {
('F', 'Flood'),
('E', 'Ebb'),
('S', 'Slack'),
('S2F', 'Slack to Flood'),
('S2E', 'Slack to Ebb'),
}
SURFACE_CHOP_CHOICES = {
('L', 'Light'),
('M', 'Medium'),
('H', 'Heavy'),
}
BAIT_TYPE_CHOICES = {
('CHP', 'Chopped'),
('CRS', 'Crushed'),
('WHL', 'Whole'),
}
VISIBILITY_CHOICES = {
('V0-2', 'V0-2'),
('V2-4', 'V2-4'),
('V4-6', 'V4-6'),
('V6-8', 'V6-8'),
('V8-10', 'V8-10'),
('V10+', 'V10+')
}
FIELD_OF_VIEW_CHOICES = {
('FU', 'Facing Up'),
('FD', 'Facing Down'),
('L', 'Limited'),
('O', 'Open')
}
class BaitContainer(models.Model):
# starting seed: cage, bag
type = models.CharField(max_length=32)
def __str__(self):
return u"{0}".format(self.type)
class FrameType(models.Model):
# starting seed: rebar, stainless rebar, PVC, mixed
type = models.CharField(max_length=32)
image = models.ImageField(null=True, blank=True)
def __str__(self):
return u"{0}".format(self.type)
class Equipment(AuditableModel):
camera = models.CharField(max_length=32)
stereo = models.BooleanField(default=False)
frame_type = models.ForeignKey(to=FrameType)
container = models.ForeignKey(to=BaitContainer)
arm_length = models.PositiveIntegerField(null=True, help_text='centimeters')
camera_height = models.PositiveIntegerField(null=True, help_text='centimeters')
def __str__(self):
return u"{0} / {1} / {2}{3}".format(self.frame_type.type,
self.container.type,
self.camera, ' (Stereo)' if self.stereo else '')
class Meta:
verbose_name_plural = "Equipment"
ordering = ['frame_type__type', 'container__type', 'camera']
class EnvironmentMeasure(AuditableModel):
water_temperature = models.DecimalField(null=True, blank=True,
max_digits=4, decimal_places=1,
help_text='C') # C
salinity = models.DecimalField(null=True, blank=True,
max_digits=4, decimal_places=2,
help_text='ppt') # ppt .0
conductivity = models.DecimalField(null=True, blank=True,
max_digits=8, decimal_places=2,
help_text='S/m') # S/m .00
dissolved_oxygen = models.DecimalField(null=True, blank=True,
max_digits=8, decimal_places=1)
current_flow = models.DecimalField(null=True, blank=True,
max_digits=5, decimal_places=2,
help_text='m/s') # m/s .00
current_direction = models.CharField(max_length=2,
null=True, blank=True,
choices=CURRENT_DIRECTION,
help_text='compass direction') # eight point compass
tide_state = models.CharField(max_length=3,
null=True, blank=True,
choices=TIDE_CHOICES)
estimated_wind_speed = models.IntegerField(null=True, blank=True, help_text='Beaufort')
measured_wind_speed = models.IntegerField(null=True, blank=True, help_text='kts')
wind_direction = models.CharField(max_length=2,
null=True, blank=True,
choices=CURRENT_DIRECTION,
help_text='compass direction') # eight point compass
cloud_cover = models.IntegerField(null=True, blank=True, help_text='%') # percentage
surface_chop = models.CharField(max_length=1,
null=True, blank=True,
choices=SURFACE_CHOP_CHOICES)
def __str__(self):
return u'{0} {1}'.format('Env measure for',str(self.set))
class Bait(AuditableModel):
description = models.CharField(max_length=32, help_text='1kg')
type = models.CharField(max_length=3, choices=BAIT_TYPE_CHOICES)
oiled = models.BooleanField(default=False, help_text='20ml menhaden oil')
def __str__(self):
return u'{0} {1}{2}'.format(self.get_type_display(), self.description, ' (m)' if self.oiled else '')
class Meta:
unique_together = ('description', 'type', 'oiled')
# needed for SetTag#get_choices because python doesn't have this somehow (!!!)
def flatten(x):
if type(x) is list:
return [a for i in x for a in flatten(i)]
else:
return [x]
class SetTag(MPTTModel):
name = models.CharField(max_length=50, unique=True)
description = models.TextField(null=True, blank=True)
active = models.BooleanField(
default=True,
help_text='overridden if parent is inactive')
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
class MPTTMeta:
order_insertion_by = ['name']
def __str__(self):
return u"{0}".format(self.name)
@classmethod
def get_choices(cls, node=None):
if node is None:
nodes = [cls.get_choices(node=node) for node in cls.objects.filter(parent=None, active=True)]
return [(node.pk, node.name) for node in flatten(nodes)]
elif node.is_leaf_node():
return node
else:
return [node] + [cls.get_choices(node=node) for node in node.get_children().filter(active=True)]
class BenthicCategory(MPTTModel):
name = models.CharField(max_length=50, unique=True)
description = models.TextField(null=True, blank=True)
active = models.BooleanField(
default=True,
help_text='overridden if parent is inactive')
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
class MPTTMeta:
order_insertion_by = ['name']
def __str__(self):
return u"{0}".format(self.name)
class Meta:
verbose_name_plural = 'benthic categories'
class Set(AuditableModel):
# suggested code pattern:
# [site.code][reef.code]_[set number within reef]
code = models.CharField(max_length=32, db_index=True, help_text='[site + reef code]_xxx', null=True, blank=True)
set_date = models.DateField()
coordinates = models.PointField(null=True)
latitude = models.DecimalField(max_digits=12, decimal_places=8)
longitude = models.DecimalField(max_digits=12, decimal_places=8)
drop_time = models.TimeField()
haul_date = models.DateField(null=True, blank=True)
haul_time = models.TimeField(null=True, blank=True)
depth = models.DecimalField(help_text='m', decimal_places=2, max_digits=12,
validators=[MinValueValidator(Decimal('0.01'))])
comments = models.TextField(null=True, blank=True)
message_to_annotators = models.TextField(null=True, blank=True)
tags = models.ManyToManyField(to=SetTag)
current_flow_estimated = models.CharField(max_length=50, null=True, blank=True, help_text='H, M, L')
current_flow_instrumented = models.DecimalField(null=True, blank=True,
max_digits=5, decimal_places=2,
help_text='m/s') # m/s .00
bruv_image_url = models.CharField(max_length=200, null=True, blank=True)
splendor_image_url = models.CharField(max_length=200, null=True, blank=True)
benthic_category = models.ManyToManyField(BenthicCategory, through='BenthicCategoryValue')
# new fields
substrate_relief_mean = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12)
substrate_relief_sd = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12)
visibility = models.CharField(db_column='visibility_str', max_length=10, null=True, blank=True, choices=VISIBILITY_CHOICES)
field_of_view = models.CharField(max_length=10, null=True, blank=True, choices=FIELD_OF_VIEW_CHOICES)
custom_field_value = JSONField(db_column='custom_fields', null=True)
# todo: need some form changes here ...
bait = models.ForeignKey(Bait, null=True)
equipment = models.ForeignKey(Equipment)
reef_habitat = models.ForeignKey(ReefHabitat, blank=True)
trip = models.ForeignKey(Trip)
drop_measure = models.OneToOneField(
EnvironmentMeasure,
on_delete=models.CASCADE,
null=True,
related_name='drop_parent_set')
haul_measure = models.OneToOneField(
EnvironmentMeasure,
on_delete=models.CASCADE,
null=True,
related_name='haul_parent_set')
video = models.OneToOneField(
Video,
on_delete=models.CASCADE,
null=True,
related_name='set'
)
bulk_loaded = models.BooleanField(default=False)
class Meta:
unique_together = ('trip', 'code')
@property
def environmentmeasure_set(self):
return [x for x in [self.haul_measure, self.drop_measure] if x is not None]
@property
def next_by_code(self):
return self.trip.get_next_set_by_code(self.code)
def save(self, *args, **kwargs):
# todo: we're assuming the input is latitude & longitude! this should be checked!
self.coordinates = Point(float(self.longitude), float(self.latitude))
if not self.code: # set code if it hasn't been set
self.code = u'{}{}_xxx'.format(self.reef().site.code, self.reef().code)
super(Set, self).save(*args, **kwargs)
self.refresh_from_db()
if self.code == u'{}{}_xxx'.format(self.reef().site.code, self.reef().code):
next_id = str(len(Set.objects.filter(trip=self.trip, reef_habitat__reef=self.reef()))).zfill(3)
self.code = self.code.replace('_xxx', u'_{}'.format(next_id))
super(Set, self).save(*args, **kwargs)
def reef(self):
return self.reef_habitat.reef
def get_absolute_url(self):
return reverse('set_update', args=[str(self.id)])
def observations(self):
if self.video:
return Observation.objects.filter(assignment__in=self.video.assignment_set.all())
def habitat_filename(self, image_type):
server_env = VersionInfo.get_server_env()
return '/{0}/{1}/{2}/{3}.png'.format(server_env,
self.trip.code,
self.code,
image_type)
# todo: "property-ize" this?
def master(self, project=1):
try:
return MasterRecord.objects.get(set=self, project_id=project)
except MasterRecord.DoesNotExist:
return None
def assignment_counts(self, project=1):
status_list = {'Total': 0}
if self.video:
status_list.update(Counter(Assignment.objects.filter(
video=self.video, project=project).values_list('status__id', flat=True)))
status_list['Total'] = sum(status_list.values())
return status_list
def required_fields(self):
# need to make this data-driven, not hard-coded field choices
# currently required:
# 1) visibility
# 2) current flow (either)
# 3) substrate
# 4) substrate complexity
return bool(self.visibility
and (self.current_flow_estimated or self.current_flow_instrumented))
def completed(self):
# we consider the following for "completion":
# 1) complete annotations have been promoted into a master
# 2) a master annotation record has been completed
# 3) other 'required' fields have been completed (see above)
master = self.master()
return master \
and (master.status.is_finished) \
and self.required_fields()
def __str__(self):
return u"{0}_{1}".format(self.trip.code, self.code)
class BenthicCategoryValue(models.Model):
set = models.ForeignKey(Set)
benthic_category = TreeForeignKey(BenthicCategory)
value = models.IntegerField()
| 1.648438 | 2 |
connector/fyle_integrations_platform_connector/apis/reimbursements.py | fylein/fyle-integrations-platform-connector | 0 | 12793862 | <filename>connector/fyle_integrations_platform_connector/apis/reimbursements.py
from .base import Base
from apps.fyle.models import Reimbursement
class Reimbursements(Base):
"""Class for Reimbursements APIs."""
def __construct_query_params(self) -> dict:
"""
Constructs the query params for the API call.
:return: dict
"""
last_synced_record = Reimbursement.get_last_synced_at(self.workspace_id)
updated_at = self.format_date(last_synced_record.updated_at) if last_synced_record else None
query_params = {'order': 'updated_at.desc'}
if updated_at:
query_params['updated_at'] = updated_at
return query_params
def __get_all_generator(self):
"""
Returns the generator for retrieving data from the API.
:return: Generator
"""
query_params = self.__construct_query_params()
return self.connection.list_all(query_params)
def search_reimbursements(self, query_params):
"""
Get of reimbursements filtered on query parameters
:return: Generator
"""
query_params['order'] = 'updated_at.desc'
return self.connection.list_all(query_params)
def bulk_post_reimbursements(self, data):
"""
Post of reimbursements
"""
payload = {
'data': data
}
return self.connection.bulk_post_reimbursements(payload)
def sync(self):
"""
Syncs the latest API data to DB.
"""
generator = self.__get_all_generator()
for items in generator:
Reimbursement.create_or_update_reimbursement_objects(items['data'], self.workspace_id)
| 2.140625 | 2 |
query-csv-pandemic.py | puregome/queries | 0 | 12793863 | <filename>query-csv-pandemic.py<gh_stars>0
#/usr/bin/env python3
# query-csv-distance.py: extract social distancing tweets from csv file at stdin
# usage: gunzip -c file.csv.gz | python3 query-csv-test.py
# 20200525 erikt(at)xs4all.nl
import csv
import re
import sys
TOPICQUERY = "corona|covid|huisarts|mondkapje|rivm|blijfthuis|flattenthecurve|houvol"
PANDEMICQUERY = "|".join([TOPICQUERY, r'virus|besmet|ziekenhui|\bic\b|intensive.care|^zorg|vaccin|[^ad]arts|uitbraak|uitbrak|pandemie|ggd|'+
r'mondkapje|quarantaine|\bwho\b|avondklok|variant|verple|sympto|e.golf|mutant|^omt$|umc|hcq|'+
r'hydroxychloroquine|virolo|zkh|oversterfte|patiënt|patient|intensivist|🦠|ivermectin'])
DISTANCEQUERY = "1[.,]5[ -]*m|afstand.*hou|hou.*afstand|anderhalve[ -]*meter"
LOCKDOWNQUERY = "lock.down|lockdown"
VACCINQUERY = "vaccin|ingeënt|ingeent|inent|prik|spuit|bijwerking|-->|💉|pfizer|moderna|astrazeneca|astra|zeneca|novavax|biontech|booster|vax|mrna|inject"
TESTQUERY = r'\btest|getest|sneltest|pcr'
CTBQUERY = r'(ctb|qr|toegangsbewij|testbewij|coronapas|vaccinatiepas|vaccinpas|\bcodes\b|2g|3g|1g|apartheid)'
QUERY = "|".join([PANDEMICQUERY, TESTQUERY, VACCINQUERY, LOCKDOWNQUERY, DISTANCEQUERY, CTBQUERY])
TEXT = "text"
csvreader = csv.DictReader(sys.stdin)
csvwriter = csv.DictWriter(sys.stdout,fieldnames=csvreader.fieldnames)
csvwriter.writeheader()
for row in csvreader:
if re.search(QUERY,row[TEXT],flags=re.IGNORECASE):
csvwriter.writerow(row)
| 2.421875 | 2 |
test/hummingbot/core/data_type/test_trade_fee.py | pecuniafinance/hummingbot | 542 | 12793864 | from decimal import Decimal
from unittest import TestCase
from hummingbot.core.data_type.common import TradeType, PositionAction
from hummingbot.core.data_type.in_flight_order import TradeUpdate
from hummingbot.core.data_type.trade_fee import (
AddedToCostTradeFee,
DeductedFromReturnsTradeFee,
TokenAmount,
TradeFeeBase,
TradeFeeSchema,
)
class TradeFeeTests(TestCase):
def test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return(self):
schema = TradeFeeSchema(
percent_fee_token="HBOT",
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=False,
)
fee = TradeFeeBase.new_spot_fee(
fee_schema=schema,
trade_type=TradeType.BUY,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(AddedToCostTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
def test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return(self):
schema = TradeFeeSchema(
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=True,
)
fee = TradeFeeBase.new_spot_fee(
fee_schema=schema,
trade_type=TradeType.BUY,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(DeductedFromReturnsTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
def test_deducted_from_return_spot_fee_created_for_sell(self):
schema = TradeFeeSchema(
percent_fee_token="HBOT",
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=False,
)
fee = TradeFeeBase.new_spot_fee(
fee_schema=schema,
trade_type=TradeType.SELL,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(DeductedFromReturnsTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
schema.percent_fee_token = None
schema.buy_percent_fee_deducted_from_returns = True
fee = TradeFeeBase.new_spot_fee(
fee_schema=schema,
trade_type=TradeType.SELL,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(DeductedFromReturnsTradeFee, type(fee))
def test_added_to_cost_perpetual_fee_created_when_opening_positions(self):
schema = TradeFeeSchema(
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=False,
)
fee = TradeFeeBase.new_perpetual_fee(
fee_schema=schema,
position_action=PositionAction.OPEN,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(AddedToCostTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
schema.percent_fee_token = "HBOT"
fee = TradeFeeBase.new_perpetual_fee(
fee_schema=schema,
position_action=PositionAction.OPEN,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(AddedToCostTradeFee, type(fee))
def test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token(self):
schema = TradeFeeSchema(
percent_fee_token="HBOT",
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=False,
)
fee = TradeFeeBase.new_perpetual_fee(
fee_schema=schema,
position_action=PositionAction.CLOSE,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(AddedToCostTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
def test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token(self):
schema = TradeFeeSchema(
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=False,
)
fee = TradeFeeBase.new_perpetual_fee(
fee_schema=schema,
position_action=PositionAction.CLOSE,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(DeductedFromReturnsTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
def test_added_to_cost_json_serialization(self):
token_amount = TokenAmount(token="COINALPHA", amount=Decimal("20.6"))
fee = AddedToCostTradeFee(
percent=Decimal("0.5"),
percent_token="COINALPHA",
flat_fees=[token_amount]
)
expected_json = {
"fee_type": AddedToCostTradeFee.type_descriptor_for_json(),
"percent": "0.5",
"percent_token": "COINALPHA",
"flat_fees": [token_amount.to_json()]
}
self.assertEqual(expected_json, fee.to_json())
def test_added_to_cost_json_deserialization(self):
token_amount = TokenAmount(token="COINALPHA", amount=Decimal("20.6"))
fee = AddedToCostTradeFee(
percent=Decimal("0.5"),
percent_token="COINALPHA",
flat_fees=[token_amount]
)
self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json()))
def test_deducted_from_returns_json_serialization(self):
token_amount = TokenAmount(token="COINALPHA", amount=Decimal("20.6"))
fee = DeductedFromReturnsTradeFee(
percent=Decimal("0.5"),
percent_token="COINALPHA",
flat_fees=[token_amount]
)
expected_json = {
"fee_type": DeductedFromReturnsTradeFee.type_descriptor_for_json(),
"percent": "0.5",
"percent_token": "COINALPHA",
"flat_fees": [token_amount.to_json()]
}
self.assertEqual(expected_json, fee.to_json())
def test_deducted_from_returns_json_deserialization(self):
token_amount = TokenAmount(token="CO<PASSWORD>", amount=Decimal("20.6"))
fee = DeductedFromReturnsTradeFee(
percent=Decimal("0.5"),
percent_token="COINALPHA",
flat_fees=[token_amount]
)
self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json()))
def test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self):
# Configure fee to use a percent token different from the token used to request the fee value
# That forces the logic to need the convertion rate if the fee amount is calculated
fee = AddedToCostTradeFee(percent=Decimal("0"), percent_token="CO<PASSWORD>")
fee_amount = fee.fee_amount_in_token(
trading_pair="HBOT-COINALPHA",
price=Decimal("1000"),
order_amount=Decimal("1"),
token="BNB")
self.assertEqual(Decimal("0"), fee_amount)
def test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self):
# Configure fee to use a percent token different from the token used to request the fee value
# That forces the logic to need the convertion rate if the fee amount is calculated
fee = DeductedFromReturnsTradeFee(percent=Decimal("0"), percent_token="CO<PASSWORD>")
fee_amount = fee.fee_amount_in_token(
trading_pair="HBOT-COINALPHA",
price=Decimal("1000"),
order_amount=Decimal("1"),
token="BNB")
self.assertEqual(Decimal("0"), fee_amount)
class TokenAmountTests(TestCase):
def test_json_serialization(self):
amount = TokenAmount(token="HBOT-COINALPHA", amount=Decimal("1000.50"))
expected_json = {
"token": "HBOT-COINALPHA",
"amount": "1000.50",
}
self.assertEqual(expected_json, amount.to_json())
def test_json_deserialization(self):
amount = TokenAmount(token="HBOT-COINALPHA", amount=Decimal("1000.50"))
self.assertEqual(amount, TokenAmount.from_json(amount.to_json()))
class TradeUpdateTests(TestCase):
def test_json_serialization(self):
token_amount = TokenAmount(token="COINALPHA", amount=Decimal("20.6"))
fee = DeductedFromReturnsTradeFee(
percent=Decimal("0.5"),
percent_token="COINALPHA",
flat_fees=[token_amount]
)
trade_update = TradeUpdate(
trade_id="12345",
client_order_id="OID1",
exchange_order_id="EOID1",
trading_pair="HBOT-COINALPHA",
fill_timestamp=1640001112,
fill_price=Decimal("1000.11"),
fill_base_amount=Decimal("2"),
fill_quote_amount=Decimal("2000.22"),
fee=fee,
)
expected_json = trade_update._asdict()
expected_json.update({
"fill_price": "1000.11",
"fill_base_amount": "2",
"fill_quote_amount": "2000.22",
"fee": fee.to_json(),
})
self.assertEqual(expected_json, trade_update.to_json())
def test_json_deserialization(self):
token_amount = TokenAmount(token="COINALPHA", amount=Decimal("20.6"))
fee = DeductedFromReturnsTradeFee(
percent=Decimal("0.5"),
percent_token="CO<PASSWORD>",
flat_fees=[token_amount]
)
trade_update = TradeUpdate(
trade_id="12345",
client_order_id="OID1",
exchange_order_id="EOID1",
trading_pair="HBOT-COINALPHA",
fill_timestamp=1640001112,
fill_price=Decimal("1000.11"),
fill_base_amount=Decimal("2"),
fill_quote_amount=Decimal("2000.22"),
fee=fee,
)
self.assertEqual(trade_update, TradeUpdate.from_json(trade_update.to_json()))
| 2.515625 | 3 |
deepinsight_iqa/diqa/utils/np_imgutils.py | sandyz1000/deepinsight-iqa | 2 | 12793865 | import math
import tensorflow as tf
import cv2
import numpy as np
from scipy import signal
def image_normalization(image: np.ndarray, new_min=0, new_max=255) -> np.ndarray:
"""
Normalize the input image to a given range set by min and max parameter
Args:
image ([type]): [description]
new_min ([type], optional): [description]. Defaults to 0.
new_max ([type], optional): [description]. Defaults to 255.
Returns:
[np.ndarray]: Normalized image
"""
original_dtype = image.dtype
image = image.astype(np.float32)
image_min, image_max = np.min(image), np.max(image)
image = tf.cast(image, np.float32)
normalized_image = (new_max - new_min) / (image_max - image_min) * (image - image_min) + new_min
return tf.cast(normalized_image, original_dtype)
def normalize_kernel(kernel: np.array) -> np.ndarray:
return kernel / np.sum(kernel, axis=-1)
def gaussian_kernel2d(kernel_size: int, sigma: float, dtype=np.float32) -> np.ndarray:
krange = np.arange(kernel_size)
x, y = np.meshgrid(krange, krange)
constant = np.round(kernel_size / 2)
x -= constant
y -= constant
kernel = 1 / (2 * math.pi * sigma**2) * np.math.exp(-(x ** 2 + y ** 2) / (2 * sigma ** 2))
return normalize_kernel(kernel)
def gaussian_filter(
image: np.ndarray, kernel_size: int,
sigma: float, dtype=np.float32, strides: int = 1
) -> np.ndarray:
"""
Apply convolution filter to image with gaussian image kernel
TODO: Verify this methos with tensorflow
https://stackoverflow.com/questions/48097941/strided-convolution-of-2d-in-numpy
Args:
image ([np.ndarray]): [description]
kernel_size ([int]): [description]
sigma ([float]): [description]
dtype ([type], optional): [description]. Defaults to np.float32.
strides ([int], optional): [description]. Defaults to 1.
Returns:
[np.ndarray]: [description]
"""
kernel = gaussian_kernel2d(kernel_size, sigma)
if len(image.shape) == 3:
image = image[np.newaxis, ...]
image = tf.cast(image, tf.float32)
image = image.astype(np.float32)
image = signal.convolve2d(image, kernel[:, :, np.newaxis, np.newaxis], mode='same', )[::strides, ::strides]
return image.astype(dtype)
def image_shape(image: np.ndarray, dtype=np.int32) -> np.ndarray:
shape = image.shape
shape = shape[:2] if len(image.shape) == 3 else shape[1:3]
return shape
def scale_shape(image: np.ndarray, scale: float):
shape = image_shape(image, np.float32)
shape = np.math.ceil(shape * scale)
return shape.astype(np.float32)
def rescale(image: np.ndarray, scale: float, dtype=np.float32, **kwargs) -> np.ndarray:
assert len(image.shape) in (3, 4), 'The tensor must be of dimension 3 or 4'
image = image.astype(np.float32)
rescale_size = scale_shape(image, scale)
interpolation = kwargs.pop('interpolation', cv2.INTER_CUBIC)
rescaled_image = cv2.resize(image, rescale_size, interpolation=interpolation)
return rescaled_image.astype(dtype)
def read_image(filename: str, **kwargs) -> np.ndarray:
mode = kwargs.pop('mode', cv2.IMREAD_UNCHANGED)
return cv2.imread(filename, flags=mode)
def image_preprocess(image: np.ndarray, SCALING_FACTOR=1 / 4) -> np.ndarray:
"""
#### Image Normalization
The first step for DIQA is to pre-process the images. The image is converted into grayscale,
and then a low-pass filter is applied. The low-pass filter is defined as:
\begin{align*}
\hat{I} = I_{gray} - I^{low}
\end{align*}
where the low-frequency image is the result of the following algorithm:
1. Blur the grayscale image.
2. Downscale it by a factor of SCALING_FACTOR.
3. Upscale it back to the original size.
The main reasons for this normalization are (1) the Human Visual System (HVS) is not sensitive to changes
in the low-frequency band, and (2) image distortions barely affect the low-frequency component of images.
Arguments:
image {np.ndarray} -- [description]
Returns:
np.ndarray -- [description]
"""
image = tf.cast(image, tf.float32)
image = tf.image.rgb_to_grayscale(image)
image_low = gaussian_filter(image, 16, 7 / 6)
image_low = rescale(image_low, SCALING_FACTOR, method=tf.image.ResizeMethod.BICUBIC)
image_low = tf.image.resize(image_low,
size=image_shape(image),
method=tf.image.ResizeMethod.BICUBIC)
return image - tf.cast(image_low, image.dtype)
| 3.28125 | 3 |
Others/Source/02/2.3/hex_test.py | silence0201/Learn-Python | 1 | 12793866 | <reponame>silence0201/Learn-Python
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee <EMAIL> #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
# 以0x或0X开头的整数数值是十六进制的整数
hex_value1 = 0x13
hex_value2 = 0XaF
print("hexValue1的值为:", hex_value1)
print("hexValue2的值为:", hex_value2)
# 以0b或0B开头的整数数值是二进制的整数
bin_val = 0b111
print('bin_val的值为:', bin_val)
bin_val = 0B101
print('bin_val的值为:', bin_val)
# 以0o或0O开头的整数数值是二进制的整数
oct_val = 0o54
print('oct_val的值为:', oct_val)
oct_val = 0O17
print('oct_val的值为:', oct_val)
# 在数值中使用下画线
one_million = 1_000_000
print(one_million)
price = 234_234_234 # price实际的值为234234234
android = 1234_1234 # android实际的值为12341234
| 3.34375 | 3 |
repos/system_upgrade/el7toel8/actors/detectgrubconfigerror/actor.py | adka1408/leapp-repository | 0 | 12793867 | <filename>repos/system_upgrade/el7toel8/actors/detectgrubconfigerror/actor.py
from leapp.actors import Actor
from leapp.libraries.actor.scanner import detect_config_error
from leapp.models import GrubConfigError
from leapp.reporting import Report
from leapp.libraries.common.reporting import report_generic
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class DetectGrubConfigError(Actor):
"""
Check grub configuration for syntax error in GRUB_CMDLINE_LINUX value.
"""
name = 'detect_grub_config_error'
consumes = ()
produces = (Report, GrubConfigError)
tags = (ChecksPhaseTag, IPUWorkflowTag)
def process(self):
error_detected = detect_config_error('/etc/default/grub')
if error_detected:
report_generic(
title='Syntax error detected in grub configuration',
summary='Syntax error was detected in GRUB_CMDLINE_LINUX value of grub configuration. '
'This error is causing booting and other issues. '
'Error is automatically fixed by add_upgrade_boot_entry actor.',
severity='low'
)
self.produce(GrubConfigError(error_detected=error_detected))
| 2.484375 | 2 |
mayan/apps/dynamic_search/links.py | Dave360-crypto/mayan-edms | 3 | 12793868 | from django.utils.translation import ugettext_lazy as _
search = {'text': _(u'search'), 'view': 'search', 'famfam': 'zoom'}
search_advanced = {'text': _(u'advanced search'), 'view': 'search_advanced', 'famfam': 'zoom_in'}
search_again = {'text': _(u'search again'), 'view': 'search_again', 'famfam': 'arrow_undo'}
| 1.882813 | 2 |
Python/Fluent_Python/chapter3/section7/s7_1.py | sunyunxian/test_lib | 1 | 12793869 | def decorator(func):
def inner():
print("Running inner()")
return inner
@decorator
def target():
print("Running target()")
def p_target():
print("Running target()")
if __name__ == "__main__":
target() # decorator(target)()
decorator(p_target) # no ouput
print(target) # <function decorator.<locals>.inner at 0x000001D9A9692DC0>
| 3.234375 | 3 |
Tools/Asynchronous_and_Constant_Input_API_call_checker/async_parse_outfile_aws.py | mlapistudy/ICSE2021_421 | 9 | 12793870 | import sys
import re
from utils.utils import print_writeofd
# First argument is whether or not to proceed with manual checking:
if sys.argv[1] == '-m':
MANUAL_CHECKING = True
elif sys.argv[1] == '-a':
MANUAL_CHECKING = False
else:
print("The first argument must be either -m or -a, see README.md for details")
exit(1)
# Second argument is the output file from async_main_google.py
ifd = open(sys.argv[2], 'r')
# Third argument is the output file for a list of all repos
ofd = open(sys.argv[3], 'w')
# All files number
allfile = 0
# All occurences of found 0 files
no_async = 0
# Determined cases of no parallelism
noparrelism = 0
# Determined cases of no pattern:
nopattern = 0
# Number of exception cases - repo no longer exist
github_exception = 0
# Number of exception cases - processing error
proces_exception = 0
# No retrieve result files
no_retrieve = 0
# Use Lambda function
use_lambda = 0
# Possible parallelism
possible_para = 0
# Determined no parallelism
det_no_para = 0
# Determined parallelism
det_para = 0
# There exists code in between start clause and while clause
between_code = 0
# Determined to be no pattern
det_no_pattern = 0
def get_all_add_up():
return no_async + noparrelism + nopattern + github_exception + proces_exception + no_retrieve + use_lambda + possible_para + det_no_pattern
def scan_block(lines, i, j, keyword):
while i < j:
if keyword in lines[i]:
return True
i += 1
return False
def scan_block_numbers(lines, i, j, keyword):
ret = 0
while i < j:
if keyword in lines[i]:
ret += 1
i += 1
return ret
def print_code(i, j, lines):
while i < j:
if "Nodes in between start statement and while statement" in lines[i]:
i_copy = i
while "------" not in lines[i_copy]:
print(lines[i_copy])
i_copy += 1
break
i += 1
safe_list = ["if response:", "job_id = response['JobId']",
"synthesis_task = {'taskId': response['SynthesisTask']['TaskId']", "'taskStatus': 'inProgress'}", "taskId = response['SynthesisTask']['TaskId']"]
def check_safe_list(string):
for safes in safe_list:
if safes in string:
return True
return False
def judge_code(i, j, lines):
while i < j:
if "Nodes in between start statement and while statement" in lines[i]:
i_copy = i + 1
while "------" not in lines[i_copy]:
if lines[i_copy].isspace():
i_copy += 1
continue
if check_safe_list(lines[i_copy]):
i_copy += 1
continue
if "operation.done" in lines[i_copy] or "operation.result" in lines[i_copy]:
return True
return False
i_copy += 1
return True
i += 1
return False
lines = ifd.readlines()
i = 0
while i < len(lines):
begin = get_all_add_up()
allfile += 1
j = i + 1
while j < len(lines) and lines[j] != "=================================================\n":
j += 1
if j > len(lines):
break
# Now i and j stores the start and end of one search snippet
k = i + 1
# Judge if there is any github exception triggered
if scan_block(lines, i, j, "Other Github Exceptions occurred"):
github_exception += 1
ofd.write("github_exception: {}".format(lines[k]))
i = j
continue
# Judge if there is any other exception triggered
if scan_block(lines, i, j, "EXCEPTION OCCURS"):
proces_exception += 1
ofd.write("process_exception: {}".format(lines[k]))
i = j
continue
# Judge if this is a use lambda function case
# If only relying on auto-tool: this should be a parallelism-used case
if "Use Lambda Function" in lines[j - 1]:
if MANUAL_CHECKING:
print("use_lambda: {}".format(lines[k]))
print("Please inspect the above. Enter 1 if this is a no parallelism case, and enter 2 if this is a use lambda case")
user = input()
while user != '1' and user != '2':
print("PRESS 1 OR 2, NOT ANYTHING ELSE!")
user = input()
if user == '1':
det_no_para += 1
print_writeofd("use_lambda (no_parallelism): {}".format(lines[k].strip("\n")), ofd)
i = j
continue
elif user == '2':
use_lambda += 1
print_writeofd("use_lambda (use_lambda): {}".format(lines[k].strip("\n")), ofd)
i = j
continue
else:
use_lambda += 1
ofd.write("use_lambda: {}".format(lines[k]))
i = j
continue
# Judge if this is a no pattern identified case
# If only relying on auto-tool: this should be a parallelism-used case
if "NO PATTERN IDENTIFIED" in lines[j - 1]:
if MANUAL_CHECKING:
print("\n\n\n\n\n\n")
print_writeofd("no_pattern: {}".format(lines[k].strip("\n")), ofd)
print("Please inspect the above. Enter 1 if this is a no parallelism case, and enter 2 if this is a use-parallelism case, and enter 3 if this shouldn't count")
user = input()
while user != '1' and user != '2' and user != '3':
print("PRESS 1 OR 2 OR 3, NOT ANYTHING ELSE!")
user = input()
if user == '1':
det_no_para += 1
print_writeofd("no_pattern (no_parallelism): {}".format(lines[k].strip("\n")), ofd)
i = j
continue
elif user == '2':
det_para += 1
print_writeofd("no_pattern (parallelism): {}".format(lines[k].strip("\n")), ofd)
i = j
continue
# These are for cases where the repo is actually mis-using the API
elif user == '3':
proces_exception += 1
print_writeofd("no_pattern (process_exception): {}".format(lines[k].strip("\n")), ofd)
i = j
continue
else:
nopattern += 1
ofd.write("no_pattern: {}".format(lines[k]))
i = j
continue
# Judge if this is a no use of async case
# Such project should not be counted towards the total count of projects
if "No use of async" in lines[j - 1]:
no_async += 1
ofd.write("no_async: {}".format(lines[k]))
i = j
continue
# Judge if this is a no retrieve result case
# Such project should not be counted towards the total count of projects
if "No retrieve result" in lines[j - 1]:
no_retrieve += 1
ofd.write("no_retrieve: {}".format(lines[k]))
i = j
continue
# At this point there shouldn't be any "operating missing", sanity check:
if scan_block(lines, i, j, "operation") and scan_block(lines, i, j, "missing") and (not scan_block(lines, i, j, "Pattern identified")):
print("Operation missing while it's neither use lambda nor no pattern identified: {}".format(lines[k]))
exit(1)
# Check if needs to prompt users on codes between start and while statement:
if scan_block(lines, i, j, "Nodes in between start statement and while statement"):
# If these two numbers equal then need to prompt users:
if scan_block_numbers(lines, i, j, "Nodes in between start statement and while statement") == scan_block_numbers(lines, i, j, "Pattern identified"):
between_code += 1
if MANUAL_CHECKING:
print("\n\n\n\n\n\n")
print_code(i, j, lines)
print("Please inspect the above. Enter 1 if can proceed, and enter 2 if this is a use_parallelism case")
user = input()
while user != '1' and user != '2':
print("PRESS 1 OR 2, NOT ANYTHING ELSE!")
user = input()
if user == '1':
print_writeofd("code_between (proceeds): {}".format(lines[k].strip('\n')), ofd)
elif user == '2':
det_para += 1
print_writeofd("code_between (parallelism): {}".format(lines[k].strip('\n')), ofd)
i = j
continue
# If not manual checking, then just count this as a no parallelism use case
else:
if not judge_code(i, j, lines):
det_no_pattern += 1
i = j
continue
# Judge if this is a no use of parallelism case
if "No use of parallelism" in lines[j - 1]:
noparrelism += 1
ofd.write("no_parallelism: {}".format(lines[k]))
i = j
continue
while i < j:
if "***" in lines[i]:
i_copy = i
while i_copy < j:
if "BOTH IDENTIFIED IN THE SAME FILE" in lines[i_copy]:
# Only do the following if doing manual checking
if MANUAL_CHECKING:
possible_para += 1
print("\n\n\n\n\n\n")
print(lines[i])
i += 1
while i < j and "========================" not in lines[i]:
print(lines[i])
i += 1
if i != j:
print(lines[i])
print("Please inspect the above. Enter 1 if this is a no parallelism case, and enter 2 if this is a use-parallelism case")
user = input()
while user != '1' and user != '2':
print("PRESS 1 OR 2, NOT ANYTHING ELSE!")
user = input()
if user == '1':
det_no_para += 1
print_writeofd("possible_parallelism (no_parallelism): {}".format(lines[k].strip("\n")), ofd)
elif user == '2':
det_para += 1
print_writeofd("possible_parallelism (parallelism): {}".format(lines[k].strip("\n")), ofd)
break
else:
i += 1
while i < j and "========================" not in lines[i]:
i += 1
ofd.write("possible_parallelism: {}".format(lines[k]))
possible_para += 1
break
i_copy += 1
if i_copy == j:
ofd.write("no_parallelism: {}".format(lines[k]))
noparrelism += 1
break
i += 1
i = j
ofd.write("\n\n==================================================================\n")
if not MANUAL_CHECKING:
print_writeofd("{}, Total files searched".format(allfile), ofd)
print_writeofd("BEFORE MANUAL INSPECTION:", ofd)
print_writeofd("{}, No use of Async".format(no_async), ofd)
print_writeofd("{}, Github search exceptions".format(github_exception), ofd)
print_writeofd("{}, Processing exceptions".format(proces_exception), ofd)
print_writeofd("{}, Use of Lambda Function".format(use_lambda), ofd)
print_writeofd("{}, No retrieve result".format(no_retrieve), ofd)
print_writeofd("{}, No pattern identified".format(nopattern + det_no_pattern), ofd)
print_writeofd("{}, No use of parallelism".format(noparrelism), ofd)
print_writeofd("{}, Possible use of parallel cases".format(possible_para), ofd)
print_writeofd("RELYING ON AUTO TOOL: {} NO USE OF PARALELLISM".format(noparrelism), ofd)
print_writeofd("RELYING ON AUTO TOOL: {} PARALELLISM USED".format(possible_para + nopattern + det_no_pattern + use_lambda), ofd)
print_writeofd("RELYING ON AUTO TOOL: {} RELEVANT TOTAL PROJECTS".format(noparrelism + possible_para + nopattern + det_no_pattern + use_lambda), ofd)
elif MANUAL_CHECKING:
print_writeofd("", ofd)
print_writeofd("", ofd)
print_writeofd("After MANUAL INSPECTION:", ofd)
print_writeofd("{}, No use of Async".format(no_async), ofd)
print_writeofd("{}, Github search exceptions".format(github_exception), ofd)
print_writeofd("{}, Processing exceptions".format(proces_exception), ofd)
print_writeofd("{}, Use of Lambda Function".format(use_lambda), ofd)
print_writeofd("{}, No retrieve result".format(no_retrieve), ofd)
print_writeofd("{}, No pattern identified".format(nopattern + det_no_pattern), ofd)
print_writeofd("{}, No use of parallelism".format(noparrelism + det_no_para), ofd)
print_writeofd("{}, Use of parallel cases".format(det_para), ofd)
print_writeofd("RELYING ON MANUAL CHECKING: {} NO USE OF PARALELLISM".format(noparrelism + det_no_para), ofd)
print_writeofd("RELYING ON MANUAL CHECKING: {} PARALELLISM USED".format(det_para + use_lambda), ofd)
print_writeofd("RELYING ON MANUAL CHECKING: {} RELEVANT TOTAL PROJECTS".format(noparrelism + det_no_para + det_para + use_lambda), ofd)
ofd.close() | 2.953125 | 3 |
snippets/python/automation/beep.py | c6401/Snippets | 0 | 12793871 | def beep():
print('\007')
| 1.632813 | 2 |
djmodels/contrib/gis/utils/__init__.py | iMerica/dj-models | 5 | 12793872 | """
This module contains useful utilities for GeoDjango.
"""
from djmodels.contrib.gis.utils.ogrinfo import ogrinfo # NOQA
from djmodels.contrib.gis.utils.ogrinspect import mapping, ogrinspect # NOQA
from djmodels.contrib.gis.utils.srs import add_srs_entry # NOQA
from djmodels.core.exceptions import ImproperlyConfigured
try:
# LayerMapping requires DJMODELS_SETTINGS_MODULE to be set,
# and ImproperlyConfigured is raised if that's not the case.
from djmodels.contrib.gis.utils.layermapping import LayerMapping, LayerMapError # NOQA
except ImproperlyConfigured:
pass
| 1.507813 | 2 |
module1-introduction-to-sql/rpg_db_example.py | rgiuffre90/DS-Unit-3-Sprint-2-SQL-and-Databases | 0 | 12793873 | import sqlite3
def connect_to_db(db_name="rpg_db.sqlite3"):
return sqlite3.connect(db_name)
def execute_query(cursor, query):
cursor.execute(query)
return cursor.fetchall()
GET_CHARACTERS = """
SELECT *
FROM charactercreator_character
"""
CHARACTER_COUNT = """
SELECT COUNT(*)
FROM charactercreator_character
"""
CLASS_COUNT = """
SELECT (SELECT COUNT(*) FROM charactercreator_cleric) AS cleric,
(SELECT COUNT(*) FROM charactercreator_fighter) AS fighter,
(SELECT COUNT(*) FROM charactercreator_mage) AS mage,
(SELECT COUNT(*) FROM charactercreator_necromancer) AS necromancer,
(SELECT COUNT(*) FROM charactercreator_thief) AS theif
"""
ITEM_COUNT = """
SELECT COUNT(*)
FROM armory_item
"""
WEP_COUNT = """
SELECT COUNT(*) name
FROM armory_item
INNER JOIN armory_weapon
ON armory_item.item_id = armory_weapon.item_ptr_id
"""
ITEMS_NO_WEPS = """
SELECT(
SELECT COUNT(*)
FROM armory_item
) -
(SELECT COUNT(*)
FROM armory_weapon
)
"""
CHAR_ITEM_COUNT = """
SELECT character_id, COUNT(*)
FROM charactercreator_character_inventory
GROUP BY item_id LIMIT 20;
"""
CHAR_WEP_COUNT = """
SELECT charactercreator_character_inventory.character_id, COUNT(*)
FROM charactercreator_character_inventory
INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id
GROUP BY charactercreator_character_inventory.character_id LIMIT 20
"""
AVG_WEAPONS = """
SELECT AVG(num_weapons)
FROM
(
SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_weapons
FROM charactercreator_character_inventory
INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id
GROUP BY charactercreator_character_inventory.character_id
)
"""
AVG_ITEMS = """
SELECT AVG(num_items)
FROM
(
SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_items
FROM charactercreator_character_inventory
INNER JOIN armory_item ON charactercreator_character_inventory.item_id = armory_item.item_id
GROUP BY charactercreator_character_inventory.character_id
)
"""
if __name__ == "__main__":
conn = connect_to_db()
curs = conn.cursor()
char_count = execute_query(curs, CHARACTER_COUNT)
results = execute_query(curs, GET_CHARACTERS)
class_count = execute_query(curs, CLASS_COUNT)
item_count = execute_query(curs, ITEM_COUNT)
wep_count = execute_query(curs, WEP_COUNT)
items_no_weps = execute_query(curs, ITEMS_NO_WEPS)
char_item_count = execute_query(curs, CHAR_ITEM_COUNT)
char_wep_count = execute_query(curs, CHAR_WEP_COUNT)
avg_items = execute_query(curs, AVG_ITEMS)
avg_weapons = execute_query(curs, AVG_WEAPONS)
print(results[0])
print("Character Count:", char_count)
print("Class Count (cleric, fighter, mage, necromancer, theif):", class_count)
print("Item Count", item_count)
print("Weapon Count:", wep_count)
print("Items without Weapons:", items_no_weps)
print("Items per character ID:", char_item_count)
print("Weapons per character ID:", char_wep_count)
print("Average Number of Items Per Character:", avg_items)
print("Average Number of Weapons Per Character:", avg_weapons)
| 3.515625 | 4 |
bugprediction/linear_regression_model.py | HaaLeo/bug-prediction | 0 | 12793874 | <filename>bugprediction/linear_regression_model.py
# ------------------------------------------------------------------------------------------------------
# Copyright (c) <NAME>. All rights reserved.
# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.
# ------------------------------------------------------------------------------------------------------
import torch.nn as nn
class LinearRegressionModel(nn.Module):
def __init__(self, input_dim, output_dim):
super(LinearRegressionModel, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x): #pylint: disable=arguments-differ
prediction = self.linear(x)
return prediction
| 2.484375 | 2 |
main.py | shashankboosi/FakeNewsML | 1 | 12793875 | <filename>main.py
"""
COMP9417
Assignment: Fake news Challenge
Authors: <NAME>(z5222766), <NAME> (z5058240), <NAME>(z5173917), <NAME> (z5113901)
main.py: Main file for program execution
"""
from src.data_import import FakeNewsData
from src.train_validation_split import DataSplit
from src.preprocess import Preprocess
from src.feature_extraction import Features
from src.models import Models
from src.score import LABELS
from src.utils import input_file, output_file
import scipy.sparse as sp
import os
import time
# Global Variables
trainStancePath = "data/train_stances.csv"
testStancePath = "data/competition_test_stances.csv"
trainBodyPath = "data/train_bodies.csv"
testBodyPath = "data/competition_test_bodies.csv"
# header attributes
primary_id = "Body ID"
stance = "Stance"
body = "articleBody"
headline = "Headline"
base_preprocess_path = "preprocessed_data"
base_feature_path = "final_features"
output = "output"
def target_labels(stances):
labels = []
for i in range(len(stances)):
labels.append(LABELS.index(stances[i][stance]))
return labels
def headlines_bodies(temp_headline, temp_body):
headlines = []
bodies = []
for i in range(len(temp_headline)):
bodies.append(temp_body[int(temp_headline[i][primary_id])])
headlines.append(temp_headline[i][headline])
return headlines, bodies
'''
This files combine all the data mining part starting from Data importing, spliting,
pre processing, feature transformation, modelling and visualization.
Check README for clear understanding of what is happening.
'''
if __name__ == "__main__":
t0 = time.time()
# Importing the data
train = FakeNewsData(trainStancePath, trainBodyPath)
test = FakeNewsData(testStancePath, testBodyPath)
# Extracting IDs for data splitting
ids = list(train.articleBody.keys())
# The DataSplit generates the train and validation splits according to our split size
print("Data Splitting")
train_validation_split = DataSplit(ids=ids, headline=train.headlineInstances, split_size=0.8)
train_stances, validation_stances = train_validation_split.split()
# Preprocess the train
print("Start of pre-processing for train")
if not (os.path.exists(base_preprocess_path + "/" + "training_headlines.p") and os.path.exists(
base_preprocess_path + "/" + "training_bodies.p")):
preprocessed_train_data = Preprocess(headline=train_stances, body=train.articleBody,
preprocess_type="lemma")
train_preprocessed_headlines, train_preprocessed_bodies = preprocessed_train_data.get_clean_headlines_and_bodies()
output_file(train_preprocessed_headlines, base_preprocess_path + "/" + "training_headlines.p")
output_file(train_preprocessed_bodies, base_preprocess_path + "/" + "training_bodies.p")
else:
train_preprocessed_headlines = input_file(base_preprocess_path + "/" + "training_headlines.p")
train_preprocessed_bodies = input_file(base_preprocess_path + "/" + "training_bodies.p")
# Preprocess the validation
print("Start of pre-processing for validation")
if not (os.path.exists(base_preprocess_path + "/" + "validation_headlines.p") and os.path.exists(
base_preprocess_path + "/" + "validation_bodies.p")):
preprocessed_validation_data = Preprocess(headline=validation_stances, body=train.articleBody,
preprocess_type="lemma")
validation_preprocessed_headlines, validation_preprocessed_bodies = preprocessed_validation_data.get_clean_headlines_and_bodies()
output_file(validation_preprocessed_headlines, base_preprocess_path + "/" + "validation_headlines.p")
output_file(validation_preprocessed_bodies, base_preprocess_path + "/" + "validation_bodies.p")
else:
validation_preprocessed_headlines = input_file(base_preprocess_path + "/" + "validation_headlines.p")
validation_preprocessed_bodies = input_file(base_preprocess_path + "/" + "validation_bodies.p")
# Preprocess the test
print("Start of pre-processing for test")
if not (os.path.exists(base_preprocess_path + "/" + "test_headlines.p") and os.path.exists(
base_preprocess_path + "/" + "test_bodies.p")):
preprocessed_test_data = Preprocess(headline=test.headlineInstances, body=test.articleBody,
preprocess_type="lemma")
test_preprocessed_headlines, test_preprocessed_bodies = preprocessed_test_data.get_clean_headlines_and_bodies()
output_file(test_preprocessed_headlines, base_preprocess_path + "/" + "test_headlines.p")
output_file(test_preprocessed_bodies, base_preprocess_path + "/" + "test_bodies.p")
else:
test_preprocessed_headlines = input_file(base_preprocess_path + "/" + "test_headlines.p")
test_preprocessed_bodies = input_file(base_preprocess_path + "/" + "test_bodies.p")
# Split headlines and bodies for train, validation and test
train_headlines, train_bodies = headlines_bodies(train_stances, train.articleBody)
validation_headlines, validation_bodies = headlines_bodies(validation_stances, train.articleBody)
test_headlines, test_bodies = headlines_bodies(test.headlineInstances, test.articleBody)
if not (os.path.exists(base_feature_path + "/" + "train_features.p") and os.path.exists(
base_feature_path + "/" + "validation_features.p") and os.path.exists(
base_feature_path + "/" + "test_features.p")):
# Feature extraction and combining them for the models
print("Feature extraction for train")
train_features = Features(train_preprocessed_headlines, train_preprocessed_bodies,
train_headlines,
train_bodies)
# TF-IDF weight extraction
train_tfidf_weights, validation_tfidf_weights, test_tfidf_weights = train_features.tfidf_extraction(
validation_headlines, validation_bodies, test_headlines, test_bodies)
# Sentence weighting for train
train_sentence_weights = train_features.sentence_weighting()
print("Feature extraction for validation")
validation_features = Features(validation_preprocessed_headlines, validation_preprocessed_bodies,
validation_headlines, validation_bodies)
# Sentence weighting for validation
validation_sentence_weights = validation_features.sentence_weighting()
print("Feature extraction for test")
test_features = Features(test_preprocessed_headlines, test_preprocessed_bodies,
test_headlines, test_bodies)
# Sentence weighting for test
test_sentence_weights = test_features.sentence_weighting()
# Combine the features to prepare them as an inout for the models
final_train_features = sp.bmat([[train_tfidf_weights, train_sentence_weights.T]]).A
output_file(final_train_features, base_feature_path + "/" + "train_features.p")
final_validation_features = sp.bmat([[validation_tfidf_weights, validation_sentence_weights.T]]).A
output_file(final_validation_features, base_feature_path + "/" + "validation_features.p")
final_test_features = sp.bmat([[test_tfidf_weights, test_sentence_weights.T]]).A
output_file(final_test_features, base_feature_path + "/" + "test_features.p")
else:
print("Feature Extraction")
final_train_features = input_file(base_feature_path + "/" + "train_features.p")
final_validation_features = input_file(base_feature_path + "/" + "validation_features.p")
final_test_features = input_file(base_feature_path + "/" + "test_features.p")
t1 = time.time()
print("Time for feature extraction is:", t1 - t0)
# Target variables
train_target_labels = target_labels(train_stances)
validation_target_labels = target_labels(validation_stances)
test_target_labels = target_labels(test.headlineInstances)
# Modelling the features
print("Start of Modelling")
models = Models(final_train_features, final_validation_features, final_test_features, train_target_labels,
validation_target_labels, test_target_labels)
# Calling the 4 models
models.get_lr()
models.get_dt()
models.get_nb()
models.get_rf()
'''
Used read_from_csv in utils to know the actual labels and the predicted labels
to produce the correctness visualizations graphs for the report.
'''
t2 = time.time()
print("Time for the total is:", t2 - t0)
print("\nEnd of tests\n")
| 2.796875 | 3 |
past_questions/migrations/0004_remove_faculty_department_faculty_department.py | curlyzik/varsity-pq | 2 | 12793876 | <gh_stars>1-10
# Generated by Django 4.0 on 2021-12-18 01:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
(
"past_questions",
"0003_alter_department_level_alter_faculty_department_and_more",
),
]
operations = [
migrations.RemoveField(
model_name="faculty",
name="department",
),
migrations.AddField(
model_name="faculty",
name="department",
field=models.ManyToManyField(to="past_questions.Department"),
),
]
| 1.59375 | 2 |
guess.py | AnacondaDontWantNone/gotTheBuns | 0 | 12793877 | <reponame>AnacondaDontWantNone/gotTheBuns<gh_stars>0
#This is a Guess the Number game.
import random
guessesTaken = 0
print('Hello! What is your name?')
myName = input()
number = random.randint(1, 20)
print('Well, ' + myName + ', I am thinking of a number between 1 and 20. Can you guess it in six tries? :)')
for guessesTaken in range(6):
try:
print('Take a guess.') #Four spaces in front of "print"
guess = input()
guess = int(guess)
if guess < number: #"if" statement part of for statement body.
print('Your guess is too low.') #Eight spaces in front of "print"
if guess > number:
print('Your guess is too high')
if guess == number:
break
except:
print('That\'s not a number, silly! Sorry, but that counts as a guess.')
continue
if guess == number:
guessesTaken = str(guessesTaken + 1)
print('Good job, ' + myName + '! You guessed my number in ' + guessesTaken + ' guesses!')
if guess != number:
number = str(number)
print('Nope. The number I was thinking of was ' + number + '.')
| 4.15625 | 4 |
codebyte/python/letter_capitalize/letter_capitalize_test.py | lowks/levelup | 0 | 12793878 | import unittest
from letter_capitalize import LetterCapitalize
class TestWordCapitalize(unittest.TestCase):
def test_word_capitalize(self):
self.assertEqual(LetterCapitalize("hello world"), "Hello World")
if __name__ == '__main__':
unittest.main() | 3.703125 | 4 |
tests/test_datetime.py | SeitaBV/isodate | 4 | 12793879 | <gh_stars>1-10
##############################################################################
# Copyright 2009, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the authors nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT
##############################################################################
"""
Test cases for the isodatetime module.
"""
import datetime as dt
import pytest
from isodate import (
DATE_BAS_COMPLETE,
DATE_BAS_ORD_COMPLETE,
DATE_BAS_WEEK_COMPLETE,
DATE_EXT_COMPLETE,
DATE_EXT_ORD_COMPLETE,
DATE_EXT_WEEK_COMPLETE,
TIME_BAS_COMPLETE,
TIME_BAS_MINUTE,
TIME_EXT_COMPLETE,
TIME_EXT_MINUTE,
TZ_BAS,
TZ_EXT,
TZ_HOUR,
UTC,
FixedOffset,
ISO8601Error,
datetime_isoformat,
parse_datetime,
)
# the following list contains tuples of ISO datetime strings and the expected
# result from the parse_datetime method. A result of None means an ISO8601Error
# is expected.
TEST_CASES = [
(
"19850412T1015",
dt.datetime(1985, 4, 12, 10, 15),
DATE_BAS_COMPLETE + "T" + TIME_BAS_MINUTE,
"19850412T1015",
),
(
"1985-04-12T10:15",
dt.datetime(1985, 4, 12, 10, 15),
DATE_EXT_COMPLETE + "T" + TIME_EXT_MINUTE,
"1985-04-12T10:15",
),
(
"1985102T1015Z",
dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC),
DATE_BAS_ORD_COMPLETE + "T" + TIME_BAS_MINUTE + TZ_BAS,
"1985102T1015Z",
),
(
"1985-102T10:15Z",
dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC),
DATE_EXT_ORD_COMPLETE + "T" + TIME_EXT_MINUTE + TZ_EXT,
"1985-102T10:15Z",
),
(
"1985W155T1015+0400",
dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0, "+0400")),
DATE_BAS_WEEK_COMPLETE + "T" + TIME_BAS_MINUTE + TZ_BAS,
"1985W155T1015+0400",
),
(
"1985-W15-5T10:15+04",
dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0, "+0400")),
DATE_EXT_WEEK_COMPLETE + "T" + TIME_EXT_MINUTE + TZ_HOUR,
"1985-W15-5T10:15+04",
),
(
"1985-W15-5T10:15-0430",
dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(-4, -30, "-0430")),
DATE_EXT_WEEK_COMPLETE + "T" + TIME_EXT_MINUTE + TZ_BAS,
"1985-W15-5T10:15-0430",
),
(
"1985-W15-5T10:15+04:45",
dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 45, "+04:45")),
DATE_EXT_WEEK_COMPLETE + "T" + TIME_EXT_MINUTE + TZ_EXT,
"1985-W15-5T10:15+04:45",
),
(
"20110410T101225.123000Z",
dt.datetime(2011, 4, 10, 10, 12, 25, 123000, tzinfo=UTC),
DATE_BAS_COMPLETE + "T" + TIME_BAS_COMPLETE + ".%f" + TZ_BAS,
"20110410T101225.123000Z",
),
(
"2012-10-12T08:29:46.069178Z",
dt.datetime(2012, 10, 12, 8, 29, 46, 69178, tzinfo=UTC),
DATE_EXT_COMPLETE + "T" + TIME_EXT_COMPLETE + ".%f" + TZ_BAS,
"2012-10-12T08:29:46.069178Z",
),
(
"2012-10-12T08:29:46.691780Z",
dt.datetime(2012, 10, 12, 8, 29, 46, 691780, tzinfo=UTC),
DATE_EXT_COMPLETE + "T" + TIME_EXT_COMPLETE + ".%f" + TZ_BAS,
"2012-10-12T08:29:46.691780Z",
),
(
"2012-10-30T08:55:22.1234567Z",
dt.datetime(2012, 10, 30, 8, 55, 22, 123457, tzinfo=UTC),
DATE_EXT_COMPLETE + "T" + TIME_EXT_COMPLETE + ".%f" + TZ_BAS,
"2012-10-30T08:55:22.123457Z",
),
(
"2012-10-30T08:55:22.1234561Z",
dt.datetime(2012, 10, 30, 8, 55, 22, 123456, tzinfo=UTC),
DATE_EXT_COMPLETE + "T" + TIME_EXT_COMPLETE + ".%f" + TZ_BAS,
"2012-10-30T08:55:22.123456Z",
),
(
"2014-08-18 14:55:22.123456Z",
None,
DATE_EXT_COMPLETE + "T" + TIME_EXT_COMPLETE + ".%f" + TZ_BAS,
"2014-08-18T14:55:22.123456Z",
),
]
@pytest.mark.parametrize("datetime_string, expectation, format, output", TEST_CASES)
def test_parse(datetime_string, expectation, format, output):
"""
Parse an ISO date string and compare it to the expected value.
"""
if expectation is None:
with pytest.raises(ISO8601Error):
parse_datetime(datetime_string)
else:
result = parse_datetime(datetime_string)
assert result == expectation
@pytest.mark.parametrize("datetime_string, expectation, format, output", TEST_CASES)
def test_format(datetime_string, expectation, format, output):
"""
Take date object and create ISO string from it.
This is the reverse test to test_parse.
"""
if expectation is None:
with pytest.raises(AttributeError):
datetime_isoformat(expectation, format)
else:
result = datetime_isoformat(expectation, format)
assert result == output
| 1.359375 | 1 |
vae/vae.py | szrlee/vae-anomaly-detector | 0 | 12793880 | <filename>vae/vae.py
#!/usr/bin/python3
"""
Pytorch Variational Autoendoder Network Implementation
"""
from itertools import chain
import time
import json
import pickle
import numpy as np
import torch
from torch.autograd import Variable
from torch import nn
from torch import optim
from torch.nn import functional as F
from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score
class Encoder(nn.Module):
"""
Probabilistic Encoder
Return the mean and the variance of z ~ q(z|x). The prior
of x is assume to be normal(0, I).
Arguments:
input_dim {int} -- number of features
Returns:
(tensor, tensor) -- mean and variance of the latent variable
output from the forward propagation
"""
def __init__(self, input_dim, config):
super(Encoder, self).__init__()
config_encoder = json.loads(config.get("encoder"))
config_read_mu = json.loads(config.get("read_mu"))
config_read_logvar = json.loads(config.get("read_sigma"))
config_encoder[0]['in_features'] = input_dim
encoder_network = []
for layer in config_encoder:
if layer['type'] == 'linear':
encoder_network.append(nn.Linear(layer['in_features'], layer['out_features']))
elif layer['type'] == 'relu':
encoder_network.append(nn.ReLU())
elif layer['type'] == 'tanh':
encoder_network.append(nn.Tanh())
elif layer['type'] == 'dropout':
encoder_network.append(nn.Dropout(layer['rate']))
elif layer['type'] == 'batch_norm':
encoder_network.append(nn.BatchNorm1d(layer['num_features']))
self.encoder_network = nn.Sequential(*encoder_network)
self.read_mu = nn.Linear(config_read_mu['in_features'], config.getint('latent_dim'))
self.read_logvar = nn.Linear(config_read_logvar['in_features'], config.getint('latent_dim'))
self.initialize_parameters()
def initialize_parameters(self):
"""
Xavier initialization
"""
for layer in self.modules():
if isinstance(layer, nn.Linear):
bound = 1 / np.sqrt(layer.in_features)
layer.weight.data.uniform_(-bound, bound)
layer.bias.data.zero_()
def forward(self, inputs):
"""
Forward propagation
"""
hidden_state = self.encoder_network(inputs)
mean = self.read_mu(hidden_state)
logvar = self.read_logvar(hidden_state)
return mean, logvar
class Decoder(nn.Module):
"""
Decoder
"""
def __init__(self, input_dim, config):
super(Decoder, self).__init__()
config_decoder = json.loads(config.get("decoder"))
self._distr = config['distribution']
decoder_network = []
for layer in config_decoder:
if layer['type'] == 'linear':
decoder_network.append(nn.Linear(layer['in_features'], layer['out_features']))
elif layer['type'] == 'relu':
decoder_network.append(nn.ReLU())
elif layer['type'] == 'relu6':
decoder_network.append(nn.ReLU6())
elif layer['type'] == 'tanh':
decoder_network.append(nn.Tanh())
elif layer['type'] == 'sigmoid':
decoder_network.append(nn.Sigmoid())
elif layer['type'] == 'dropout':
decoder_network.append(nn.Dropout(layer['rate']))
elif layer['type'] == 'batch_norm':
decoder_network.append(nn.BatchNorm1d(layer['num_features']))
elif layer['type'] == 'read_x':
decoder_network.append(nn.Linear(layer['in_features'], input_dim))
self.decoder = nn.Sequential(*decoder_network)
if self._distr == 'poisson':
self.read_alpha = nn.Sequential(
nn.Linear(config.getint('latent_dim'), input_dim),
nn.ReLU6()
)
self.initialize_parameters()
def initialize_parameters(self):
for layer in self.modules():
if isinstance(layer, nn.Linear):
bound = 1 / np.sqrt(layer.in_features)
layer.weight.data.uniform_(-bound, bound)
layer.bias.data.zero_()
def forward(self, z):
if self._distr == 'poisson':
alpha = 0.5 * self.read_alpha(z)
return alpha * self.decoder(z)
else:
return self.decoder(z)
class VAE(nn.Module):
"""
VAE, x --> mu, log_sigma_sq --> N(mu, log_sigma_sq) --> z --> x
"""
def __init__(self, input_dim, config, checkpoint_directory):
super(VAE, self).__init__()
self.config = config
self.model_name = '{}{}'.format(config['model']['name'], config['model']['config_id'])
self.checkpoint_directory = checkpoint_directory
self._distr = config['model']['distribution']
self._device = config['model']['device']
self._encoder = Encoder(input_dim, config['model'])
self._decoder = Decoder(input_dim, config['model'])
self.num_epochs = config.getint('training', 'n_epochs')
self._optim = optim.Adam(
self.parameters(),
lr=config.getfloat('training', 'lr'),
betas=json.loads(config['training']['betas'])
)
self.mu = None
self.logvar = None
self.precentile_threshold = config.getfloat('model', 'threshold')
self.threshold = None
self.cur_epoch = 0
self._save_every = config.getint('model', 'save_every')
def parameters(self):
return chain(self._encoder.parameters(), self._decoder.parameters())
def _sample_z(self, mu, logvar):
epsilon = torch.randn(mu.size())
epsilon = Variable(epsilon, requires_grad=False).type(torch.FloatTensor).to(self._device)
sigma = torch.exp(logvar / 2)
return mu + sigma * epsilon
def forward(self, inputs):
"""
Forward propagation
"""
self.mu, self.logvar = self._encoder(inputs)
latent = self._sample_z(self.mu, self.logvar)
theta = self._decoder(latent)
#if torch.isnan(theta).any():
#index = torch.where(torch.isnan(theta))[0][0]
#print(index)
#print(inputs[index])
#print('mu: {}'.format(self.mu[index]))
#print('logvar: {}'.format(self.logvar[index]))
#print(latent[index])
#input()
return theta
def _to_numpy(self, tensor):
return tensor.data.cpu().numpy()
def poisson_cross_entropy(self, logtheta, inputs):
return - inputs * logtheta + torch.exp(logtheta)
def loglikelihood(self, reduction):
"""
Return the log-likelihood
"""
if self._distr == 'poisson':
#if reduction == 'none':
# return self.poisson_cross_entropy
return nn.PoissonNLLLoss(reduction=reduction)
elif self._distr == 'bernoulli':
return nn.BCELoss(reduction=reduction)
else:
raise ValueError('{} is not a valid distribution'.format(self._distr))
def fit(self, trainloader, print_every=1):
"""
Train the neural network
"""
start_time = time.time()
storage = {
'loss': [], 'kldiv': [], '-logp(x|z)': [],
'precision': [], 'recall': [], 'log_densities': None, 'params': None
}
for epoch in range(self.cur_epoch, self.cur_epoch + self.num_epochs):
self.cur_epoch += 1
# temporary storage
losses, kldivs, neglogliks = [], [], []
for inputs, _ in trainloader:
self.train()
inputs = inputs.to(self._device)
logtheta = self.forward(inputs)
loglikelihood = -self.loglikelihood(reduction='sum')(logtheta, inputs) / inputs.shape[0]
assert not torch.isnan(loglikelihood).any()
kl_div = -0.5 * torch.sum(1 + self.logvar - self.mu.pow(2) - self.logvar.exp()) / inputs.shape[0]
loss = -loglikelihood + kl_div
loss.backward()
self._optim.step()
self._optim.zero_grad()
losses.append(self._to_numpy(loss))
kldivs.append(self._to_numpy(kl_div))
neglogliks.append(self._to_numpy(-loglikelihood))
storage['loss'].append(np.mean(losses))
storage['kldiv'].append(np.mean(kldivs))
storage['-logp(x|z)'].append(np.mean(neglogliks))
if (epoch + 1) % print_every == 0:
epoch_time = self._get_time(start_time, time.time())
f1, acc, prec, recall, _, _ = self.evaluate(trainloader)
storage['precision'].append(prec)
storage['recall'].append(recall)
print('epoch: {} | loss: {:.3f} | -logp(x|z): {:.3f} | kldiv: {:.3f} | time: {}'.format(
epoch + 1,
storage['loss'][-1],
storage['-logp(x|z)'][-1],
storage['kldiv'][-1],
epoch_time))
print('F1. {:.3f} | acc. {:.3f} | prec.: {:.3f} | rec. {:.3f}'.format(f1, acc, prec, recall))
if (epoch + 1) % self._save_every == 0:
f1, acc, prec, recall, _, _ = self.evaluate(trainloader)
self.save_checkpoint(f1)
storage['log_densities'] = self._get_densities(trainloader)
storage['params'] = self._get_parameters(trainloader)
with open('./results/{}.pkl'.format(self.model_name), 'wb') as _f:
pickle.dump(storage, _f, pickle.HIGHEST_PROTOCOL)
def _get_time(self, starting_time, current_time):
total_time = current_time - starting_time
minutes = round(total_time // 60)
seconds = round(total_time % 60)
return '{} min., {} sec.'.format(minutes, seconds)
# def _remove_spam(self, dataloader, data):
# idx_to_remove = self._find_threshold(dataloader)
# data.pop(idx_to_remove)
# self._encoder.initialize_parameters()
# self._decoder.initialize_parameters()
# self._optim = optim.Adam(self.parameters(), lr=self.lr, betas=(0.5, 0.999))
# return data
def _get_parameters(self, dataloader):
self.eval()
parameters = []
for inputs, _ in dataloader:
inputs = inputs.to(self._device)
logtheta = self._to_numpy(self.forward(inputs))
parameters.extend(logtheta)
if self._distr == 'poisson':
parameters = np.exp(np.array(parameters))
else:
parameters = np.array(parameters)
return parameters
def _get_densities(self, dataloader):
all_log_densities = []
for inputs, _ in dataloader:
mini_batch_log_densities = self._evaluate_probability(inputs)
all_log_densities.extend(mini_batch_log_densities)
all_log_densities = np.array(all_log_densities)
return all_log_densities
def _evaluate_probability(self, inputs):
self.eval()
with torch.no_grad():
inputs = inputs.to(self._device)
logtheta = self.forward(inputs)
log_likelihood = -self.loglikelihood(reduction='none')(logtheta, inputs)
#if np.isnan(log_likelihood).any():
# index = np.where(np.isnan(log_likelihood))
# print(index)
# index = index[0][0]
# print(inputs[index,:])
# print(logtheta[index,:])
log_likelihood = torch.sum(log_likelihood, 1)
assert inputs.shape[0] == log_likelihood.shape[0]
return self._to_numpy(log_likelihood)
def _find_threshold(self, dataloader):
log_densities = self._get_densities(dataloader)
lowest_density = np.argmin(log_densities)
self.threshold = np.nanpercentile(log_densities, self.precentile_threshold)
return lowest_density
def evaluate(self, dataloader):
"""
Evaluate accuracy.
"""
self._find_threshold(dataloader)
predictions = []
ground_truth = []
log_densities = []
for inputs, targets in dataloader:
pred, mini_batch_log_densities = self.predict(inputs)
predictions.extend(pred)
ground_truth.extend(list(self._to_numpy(targets)))
log_densities.extend(mini_batch_log_densities)
log_densities = np.array(log_densities)
if np.isnan(log_densities).any():
print(np.where(np.isnan(log_densities)))
f1 = f1_score(ground_truth, predictions)
accuracy = accuracy_score(ground_truth, predictions)
precision = precision_score(ground_truth, predictions)
recall = recall_score(ground_truth, predictions)
return f1, accuracy, precision, recall, log_densities, ground_truth
def predict(self, inputs):
"""
Predict the class of the inputs
"""
log_density = self._evaluate_probability(inputs)
predictions = np.zeros_like(log_density).astype(int)
predictions[log_density < self.threshold] = 1
#if np.isnan(log_density).any():
# print(inputs[np.where(np.isnan(log_density))])
#print(self.threshold)
return list(predictions), log_density
def save_checkpoint(self, f1_score):
"""Save model paramers under config['model_path']"""
model_path = '{}/epoch_{}-f1_{}.pt'.format(
self.checkpoint_directory,
self.cur_epoch,
f1_score)
checkpoint = {
'model_state_dict': self.state_dict(),
'optimizer_state_dict': self._optim.state_dict()
}
torch.save(checkpoint, model_path)
def restore_model(self, filename, epoch):
"""
Retore the model parameters
"""
model_path = '{}{}/{}.pt'.format(
self.config['paths']['checkpoints_directory'],
self.model_name,
filename)
checkpoint = torch.load(model_path)
self.load_state_dict(checkpoint['model_state_dict'])
self._optim.load_state_dict(checkpoint['optimizer_state_dict'])
self.cur_epoch = epoch
| 2.546875 | 3 |
start.py | lizzyTheLizard/medium-security-zap | 2 | 12793881 | #!/usr/bin/env python
# Spider and start listening for passive requests
import sys
from zapv2 import ZAPv2
from zap_common import *
#Configuration
zap_ip = 'localhost'
port = 12345
spiderTimeoutInMin = 2
startupTimeoutInMin=1
target='http://localhost:8080'
def main(argv):
#Initialize Zap API
http_proxy = 'http://' + zap_ip + ':' + str(port)
https_proxy = 'http://' + zap_ip + ':' + str(port)
zap = ZAPv2(proxies={'http': http_proxy, 'https': https_proxy})
#Check untill zap is running
wait_for_zap_start(zap, startupTimeoutInMin*60)
#Check that target is reachable
zap_access_target(zap, target)
# Use both spider
zap_spider(zap, target)
zap_ajax_spider(zap, target, spiderTimeoutInMin)
if __name__ == "__main__":
main(sys.argv[1:])
| 2.90625 | 3 |
tests/test_landing_page.py | Valaraucoo/raven-functional-tests | 0 | 12793882 | <reponame>Valaraucoo/raven-functional-tests
from helpers import *
class TestLandingPageLoading:
def test_get_base_url(self, driver):
driver.get(BASE_URL)
# waiting for animation
time.sleep(2)
button_login = driver.find_element_by_css_selector('button')
assert 'Zaloguj się' in button_login.get_attribute('innerHTML')
button_modal = driver.find_element_by_id('modal-trigger')
assert 'Czy jesteś w tym serwisie po raz pierwszy?' in button_modal.get_attribute('innerHTML')
button_modal.click()
button_modal_close = driver.find_element_by_id('modal-trigger2')
assert 'Cancel' in button_modal_close.get_attribute('innerHTML')
button_modal_close.click()
support_link = driver.find_element_by_xpath('//*[text()[contains(., "Support")]]')
driver.execute_script("arguments[0].click();", support_link)
assert 'Witamy w supporcie!' in driver.find_element_by_tag_name('h2').get_attribute('innerHTML')
def test_login_to_dashboard(self, driver):
login(driver, '<EMAIL>', 'admin')
| 2.359375 | 2 |
corded/errors.py | an-dyy/Corded | 0 | 12793883 | <reponame>an-dyy/Corded
from aiohttp import ClientResponse
class CordedError(Exception):
pass
# HTTP Errors
class HTTPError(CordedError):
def __init__(self, response: ClientResponse):
self.response = response
class BadRequest(HTTPError):
pass
class Unauthorized(HTTPError):
pass
class Forbidden(HTTPError):
pass
class NotFound(HTTPError):
pass
class PayloadTooLarge(HTTPError):
pass
class TooManyRequests(HTTPError):
pass
class DiscordServerError(HTTPError):
pass
| 2.484375 | 2 |
nlu/components/embeddings/sentence_xlm/sentence_xlm.py | Murat-Karadag/nlu | 0 | 12793884 | from sparknlp.annotator import XlmRoBertaSentenceEmbeddings
class Sentence_XLM:
@staticmethod
def get_default_model():
return XlmRoBertaSentenceEmbeddings.pretrained() \
.setInputCols("sentence", "token") \
.setOutputCol("sentence_xlm_roberta")
@staticmethod
def get_pretrained_model(name, language):
return XlmRoBertaSentenceEmbeddings.pretrained(name, language) \
.setInputCols("sentence", "token") \
.setOutputCol("sentence_xlm_roberta")
| 2.5 | 2 |
bx_py_utils/humanize/pformat.py | boxine/bx_py_utils | 6 | 12793885 | import json
import pprint
def pformat(value):
"""
Format given object: Try JSON fist and fallback to pformat()
(JSON dumps are nicer than pprint.pformat() ;)
"""
try:
value = json.dumps(value, indent=4, sort_keys=True, ensure_ascii=False)
except TypeError:
# Fallback if values are not serializable with JSON:
value = pprint.pformat(value, width=120)
return value
| 3.484375 | 3 |
mmdnn/conversion/examples/darknet/extractor.py | kmader/MMdnn | 3,442 | 12793886 | #----------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#----------------------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import print_function
import os
from mmdnn.conversion.examples.darknet import darknet as cdarknet
from mmdnn.conversion.examples.imagenet_test import TestKit
from mmdnn.conversion.examples.extractor import base_extractor
from mmdnn.conversion.common.utils import download_file
class darknet_extractor(base_extractor):
_base_model_url = "https://raw.githubusercontent.com/pjreddie/darknet/master/"
architecture_map = {
'yolov3' : {
'config' : _base_model_url + "cfg/yolov3.cfg",
'weights' : "https://pjreddie.com/media/files/yolov3.weights"
},
'yolov2' :{
'config' : _base_model_url + "cfg/yolov2.cfg",
'weights' : "https://pjreddie.com/media/files/yolov2.weights"
}
}
@classmethod
def download(cls, architecture, path = './'):
if cls.sanity_check(architecture):
cfg_name = architecture + ".cfg"
architecture_file = download_file(cls.architecture_map[architecture]['config'], directory=path, local_fname=cfg_name)
if not architecture_file:
return None
weight_name = architecture + ".weights"
weight_file = download_file(cls.architecture_map[architecture]['weights'], directory=path, local_fname=weight_name)
if not weight_file:
return None
print("Darknet Model {} saved as [{}] and [{}].".format(architecture, architecture_file, weight_file))
return (architecture_file, weight_file)
else:
return None
@classmethod
def inference(cls, architecture, files, model_path, image_path):
import numpy as np
if cls.sanity_check(architecture):
download_file(cls._base_model_url + "cfg/coco.data", directory='./')
download_file(cls._base_model_url + "data/coco.names", directory='./data/')
print(files)
net = cdarknet.load_net(files[0].encode(), files[1].encode(), 0)
meta = cdarknet.load_meta("coco.data".encode())
r = cdarknet.detect(net, meta, image_path.encode())
# print(r)
return r
else:
return None
# d = darknet_extractor()
# model_filename = d.download('yolov3')
# print(model_filename)
# image_path = "./mmdnn/conversion/examples/data/dog.jpg"
# model_path = "./"
# d = darknet_extractor()
# result = d.inference('yolov3', model_filename, model_path, image_path = image_path)
# print(result)
| 2.140625 | 2 |
data/transcoder_evaluation_gfg/python/SCHEDULE_JOBS_SERVER_GETS_EQUAL_LOAD.py | mxl1n/CodeGen | 241 | 12793887 | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( a , b , n ) :
s = 0
for i in range ( 0 , n ) :
s += a [ i ] + b [ i ]
if n == 1 :
return a [ 0 ] + b [ 0 ]
if s % n != 0 :
return - 1
x = s // n
for i in range ( 0 , n ) :
if a [ i ] > x :
return - 1
if i > 0 :
a [ i ] += b [ i - 1 ]
b [ i - 1 ] = 0
if a [ i ] == x :
continue
y = a [ i ] + b [ i ]
if i + 1 < n :
y += b [ i + 1 ]
if y == x :
a [ i ] = y
b [ i ] = 0
if i + 1 < n : b [ i + 1 ] = 0
continue
if a [ i ] + b [ i ] == x :
a [ i ] += b [ i ]
b [ i ] = 0
continue
if i + 1 < n and a [ i ] + b [ i + 1 ] == x :
a [ i ] += b [ i + 1 ]
b [ i + 1 ] = 0
continue
return - 1
for i in range ( 0 , n ) :
if b [ i ] != 0 :
return - 1
return x
#TOFILL
if __name__ == '__main__':
param = [
([4, 9, 16, 18, 20, 23, 24, 25, 25, 26, 29, 30, 35, 40, 41, 43, 44, 46, 53, 53, 56, 56, 58, 60, 62, 70, 80, 80, 80, 82, 86, 90, 92, 92, 95],[3, 15, 16, 16, 18, 26, 30, 32, 32, 35, 37, 41, 42, 43, 48, 49, 49, 54, 55, 57, 65, 66, 67, 67, 68, 83, 85, 89, 89, 90, 91, 93, 96, 97, 99],29,),
([-24, 70, -74, -90, 72, 50, -94, 86, -58, -68, 42, 0, 98, -70, -14, -32, 6, 74, 64, -78, 86, -42, -56, 2, -34, -46, 70, -62, 50, -58, -58, 42, 86, 96, -8, 8, -22, -14, -14, 98, 2, 98, -28],[-26, 36, 48, 48, -38, -86, 90, -62, 30, -4, 82, 16, 32, -6, 58, 82, -66, -40, 52, -78, 94, -70, -80, -68, -58, -26, 50, -78, -90, -48, -28, 48, 56, 50, 72, -22, -2, 8, -94, 92, -44, -66, -30],34,),
([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],13,),
([98, 18, 50, 36, 88, 75, 2, 40, 74, 19, 63, 82, 77, 5, 59, 97, 70, 50, 71, 90, 90, 61, 63, 99],[93, 25, 16, 42, 55, 61, 69, 68, 95, 28, 40, 90, 1, 86, 76, 40, 13, 47, 71, 4, 64, 54, 84, 45],16,),
([-80, -64, -64, -64, -64, -62, -54, -48, -44, -44, -38, -30, -30, -26, -14, -12, -10, -6, -6, 6, 22, 22, 22, 26, 28, 50, 52, 70, 86, 86, 88, 90],[-96, -94, -80, -74, -64, -56, -52, -32, -30, -24, -12, -12, -8, -2, 4, 8, 16, 20, 24, 24, 24, 48, 50, 54, 60, 64, 74, 80, 88, 90, 92, 92],22,),
([0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1],[1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1],20,),
([59, 61, 64],[22, 59, 85],1,),
([98, 92, 28, 42, -74, -36, 40, -8, 32, -22, -70, -22, -56, 74, 6, 6, -62, 46, 34, 2],[-62, -84, 72, 60, 10, -18, -44, -22, 14, 0, 76, 72, 96, -28, -24, 52, -74, -30, 16, 66],18,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],34,),
([72, 97, 79, 21, 83, 2, 31, 59, 6, 11, 79, 97],[27, 71, 87, 36, 73, 37, 80, 34, 57, 17, 88, 52],9,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | 3.078125 | 3 |
backend/reservas/admin.py | ES2-UFPI/404-portal | 1 | 12793888 | <filename>backend/reservas/admin.py
from django.contrib import admin
from .models import Reserva
admin.site.register(Reserva)
| 1.210938 | 1 |
data_loader.py | winterest/f-function | 0 | 12793889 | <filename>data_loader.py
from torch.utils import data
import os
import torch
from torchvision import transforms as T
from scipy import interpolate
from PIL import Image
from random import shuffle
import xml.etree.ElementTree as ET
## Config
img_size = 256
## End of config
class LabeledImageFolder(data.Dataset):
def __init__(self, root, GT_path,list_img_path,image_size=224,mode='train',augmentation_prob=0.4):
"""Initializes image paths and preprocessing module."""
self.root = root
# GT : Ground Truth
self.GT_paths = GT_path
#self.image_paths = list(map(lambda x: os.path.join(root, x), os.listdir(root)))
self.image_paths = list_img_path
self.image_size = image_size
self.mode = mode
self.RotationDegree = [0,90,180,270]
self.augmentation_prob = augmentation_prob
print("image count in {} path :{}".format(self.mode,len(self.image_paths)))
def __getitem__(self, index):
#img_size = 224
"""Reads an image from a file and preprocesses it and returns."""
image_path = self.image_paths[index]
filename = image_path.split('_')[-1][:-len(".jpg")]
#GT_path = self.GT_paths + 'ISIC_' + filename + '_segmentation.png'
image = Image.open(image_path)
#GT = Image.open(GT_path)
annot_fn = self.GT_paths + filename.split('/')[-1] + '.xml'
tree = ET.parse(annot_fn)
objs = tree.findall('object')
#img = Image.open(fn)
wid = image.width
hei = image.height
for ix, obj in enumerate(objs):
if obj.find('name').text.lower().strip()=='graph':
bbox = obj.find('bndbox')
x11 = int(float(bbox.find('xmin').text))
y11 = int(float(bbox.find('ymin').text))
x12 = int(float(bbox.find('xmax').text))
y12 = int(float(bbox.find('ymax').text))
if obj.find('name').text.lower().strip()=='xypercent':
xper = obj.find('xper')
#print(xper.text)
xper = xper.text.split(' ')
xper = [int(float(i)*224) for i in xper]
yper = obj.find('yper')
#print(yper.text)
yper = yper.text.split(' ')
yper = [int(float(i)*224) for i in yper]
image = image.crop((x11,y11,x12,y12)).resize((img_size,img_size))
matrix = torch.zeros(img_size,img_size)
vector = torch.ones(img_size) * (-1)
f = interpolate.interp1d(xper, yper)
xnew = list(range(xper[0],xper[-1]+1))
ynew = f(xnew)
ynew = [int(i) for i in ynew]
for n,xn in enumerate(xnew):
matrix[xn, ynew[n]] = 1
vector[xn] = ynew[n]
Transform = []
Transform.append(T.ToTensor())
Transform = T.Compose(Transform)
image_t = Transform(image)
Norm_ = T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
image_t = Norm_(image_t)
return image_t, vector, matrix, image_path
def __len__(self):
"""Returns the total number of font files."""
return len(self.image_paths)
def get_loader(root_path, image_size, batch_size, split_ratio = 0.99, num_workers=2, mode='train',augmentation_prob=0.4):
"""Builds and returns Dataloader."""
image_path = root_path+'/JPEGImages/'
GT_path = root_path+'/Annotations/'
list_all = list(map(lambda x: os.path.join(image_path, x), os.listdir(image_path)))
shuffle(list_all)
num_train = int(split_ratio * len(list_all))
list_train = list_all[:num_train]
list_val = list_all[num_train:]
train_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_train,
image_size =image_size, mode=mode,augmentation_prob=augmentation_prob)
train_loader = data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers)
val_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_val,
image_size =image_size, mode=mode,augmentation_prob=augmentation_prob)
val_loader = data.DataLoader(dataset=val_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers)
return train_loader, val_loader | 2.484375 | 2 |
tests/examples/minlplib/mathopt3.py | ouyang-w-19/decogo | 2 | 12793890 | # NLP written by GAMS Convert at 04/21/18 13:52:29
#
# Equation counts
# Total E G L N X C B
# 8 5 0 3 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 7 7 0 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 43 19 24 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(None,None),initialize=10)
m.x2 = Var(within=Reals,bounds=(None,None),initialize=-10)
m.x3 = Var(within=Reals,bounds=(None,None),initialize=10)
m.x4 = Var(within=Reals,bounds=(None,None),initialize=10)
m.x5 = Var(within=Reals,bounds=(None,None),initialize=10)
m.x6 = Var(within=Reals,bounds=(None,None),initialize=-10)
m.obj = Objective(expr=(m.x1 + m.x2)**2 + (m.x3 - m.x5)**2 + (m.x6 - m.x4)**2 + 2*(m.x1 + m.x3 - m.x4)**2 + (m.x2 - m.x1
+ m.x3 - m.x4)**2 + 10*sin(m.x1 + m.x5 - m.x6)**2, sense=minimize)
m.c2 = Constraint(expr=m.x1**2 - sin(m.x2) - m.x4 + m.x5 + m.x6 == 0)
m.c3 = Constraint(expr=m.x1*m.x3 - m.x2*m.x4*m.x1 - sin((-m.x1) - m.x3 + m.x6) - m.x5 == 0)
m.c4 = Constraint(expr=m.x2*m.x6*cos(m.x5) - sin(m.x3*m.x4) + m.x2 - m.x5 == 0)
m.c5 = Constraint(expr=m.x1*m.x2 - m.x3**2 - m.x4*m.x5 - m.x6**2 == 0)
m.c6 = Constraint(expr= 2*m.x1 + 5*m.x2 + m.x3 + m.x4 <= 1)
m.c7 = Constraint(expr= 3*m.x1 - 2*m.x2 + m.x3 - 4*m.x4 <= 0)
m.c8 = Constraint(expr= m.x1 + m.x2 + m.x3 + m.x4 + m.x5 + m.x6 <= 2)
| 1.65625 | 2 |
aiohttp_asgi/__init__.py | PaulWasTaken/aiohttp-asgi | 10 | 12793891 | from .resource import ASGIResource
__all__ = ("ASGIResource",)
| 1.132813 | 1 |
2014/bio-2014-1-1.py | pratheeknagaraj/bio_solutions | 0 | 12793892 | #!/usr/bin/env python
"""
bio-2014-1-1.py: Sample solution for question 1 on
the 2014 British Informatics Olympiad Round One exam
Lucky Numbers
"""
__author__ = "<NAME>"
__date__ = "25 January 2016"
"""
This is simply an implemenation based problem. We first
generate the list of lucky numbers via the algorithm
provided and then use a binary search to find the
corresponding lower and greater elements.
"""
# Generate luck numbers list
vals = range(1,11000,2)
complete = False
cur_index = 1
cur = None
while not complete:
cur = vals[cur_index]
if len(vals) < cur:
break
end_index = cur*(len(vals)/cur)-1
for i in xrange(end_index, -1, -cur):
vals.pop(i)
cur_index += 1
# Find the lucky numbers
def find_less_greater(val):
pos = binary_search(0, len(vals)-1, val)
if vals[pos] == val:
return vals[pos-1], vals[pos+1]
if vals[pos] < val:
return vals[pos], vals[pos+1]
return vals[pos-1], vals[pos]
def binary_search(start, end, val):
if start >= end:
return start
mid_pos = (end-start)/2 + start
mid_val = vals[mid_pos]
if val < mid_val:
return binary_search(start, mid_pos-1, val)
elif val > mid_val:
return binary_search(mid_pos+1, end, val)
return mid_pos
def lucky_numbers(val):
lower, upper = find_less_greater(val)
print lower, upper
return (lower, upper)
in1 = raw_input()
lucky_numbers(int(in1)) | 3.9375 | 4 |
python/analysis/mostImportantMiRNAs.py | mjoppich/miRExplore | 0 | 12793893 | import matplotlib
from collections import defaultdict, OrderedDict
from plots.DotSetPlot import DotSetPlot
processToTitle = {
"targetMirsECA": "EC activation and\n inflammation",
"targetMirsMonocyte": "Monocyte diff. &\nMacrophage act.",
"targetMirsFCF": "Foam cell formation",
"targetMirsAngio": "Angiogenesis",
"targetMirsVasRemod": "Vascular remodeling",
"targetMirsTCell": "T cell differentiation &\n activation",
"targetMirsCholEfflux": "Cholesterol efflux",
"targetMirsSMCProlif": "SMC proliferation &\n SMC migration"
}
network2nicename = {
"CV-IPN-Plaque_destabilization_1": "(VI) Plaque destabilization",
"CV-IPN-Platelet_activation_1": "(V) Platelet activation",
"CV-IPN-Smooth_muscle_cell_activation_1": "(IV) SMC activation",
"CV-IPN-Foam_cell_formation_1": "(III) Foam cell formation",
"CV-IPN-Endothelial_cell-monocyte_interaction_1": "(II) EC/MC interaction",
"CV-IPN-Endothelial_cell_activation_1": "(I) EC activation",
}
celltype2nicename = {
'SMC': "Smooth muscle cell",
'EC': "Endothelial cell",
"MC": "Macrophage/Monocyte",
"FC": "Foam cell"
}
def source2index( sname ):
if sname != None and sname.startswith("CV-IPN"):
return 0
return 1
mirna2evidenceCellT = defaultdict(lambda: defaultdict(set))
mirna2evidenceCBN = defaultdict(lambda: defaultdict(set))
mirna2evidenceProcess = defaultdict(lambda: defaultdict(set))
pubmed2tuples = defaultdict(set)
mirna2evflows = defaultdict(set)
dataLabels = defaultdict(set)
#"miR-98", "miR-125a"
manuMirnas = ["miR-98", "miR-125a","miR-21", "miR-34a", "miR-93", "miR-125b", "miR-126", "miR-146a", "miR-155", "miR-370"]
#manuMirnas = ['miR-126', 'miR-21', 'miR-155', 'miR-146a', 'miR-125b', 'miR-34a', 'miR-499', 'miR-221', 'miR-370', 'miR-504']
#manuMirnas = ['miR-181c', 'miR-222', 'miR-126', 'miR-155', 'miR-125b', 'miR-34a', 'miR-370', 'miR-146a', 'miR-21', 'miR-93']
manuMirnas = list({'miR-155', 'miR-93', 'miR-181c', 'miR-370', 'miR-222', 'miR-125b', 'miR-34a', 'miR-146a', 'miR-126', 'miR-21'})
manuMirnas = ["miR-98", "miR-125a","miR-21", "miR-34a", "miR-93", "miR-125b", "miR-126", "miR-146a", "miR-155", "miR-370"]
miRNA2InteractionPartner = defaultdict(set)
miRNA2Evidences = defaultdict(set)
with open("/mnt/d/yanc_network/disease_pw_important_cbn.txt", 'r') as fin:
for line in fin:
line = line.strip().split("\t")
#CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554
cbn = network2nicename.get(line[0], line[0])
gene = line[1]
miRNA = line[2]
cellT = celltype2nicename.get(line[3], line[3])
evidence = line[4]
if "US" in miRNA:
continue
miRNA2InteractionPartner[miRNA].add(gene)
miRNA2Evidences[miRNA].add(evidence)
dataLabels["Cell-Type"].add(cellT)
dataLabels["CBN"].add(cbn)
mirna2evidenceCellT[miRNA][evidence].add(cellT)
mirna2evidenceCBN[miRNA][evidence].add(cbn)
#important_process
with open("/mnt/d/yanc_network/pathway_important_process.txt", 'r') as fin:
for line in fin:
line = line.strip().split("\t")
#CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554
process = processToTitle.get(line[0], line[0])
gene = line[1]
miRNA = line[2]
cellT = celltype2nicename.get(line[3], line[3])
evidence = line[4]
if "US" in miRNA:
continue
miRNA2InteractionPartner[miRNA].add(gene)
miRNA2Evidences[miRNA].add(evidence)
dataLabels["Cell-Type"].add(cellT)
dataLabels["Process"].add(process)
mirna2evidenceCellT[miRNA][evidence].add(cellT)
mirna2evidenceProcess[miRNA][evidence].add(process)
for x in manuMirnas:
print(x, miRNA2InteractionPartner[x], miRNA2Evidences[x])
allMiRNA = set()
for x in mirna2evidenceCellT:
allMiRNA.add(x)
for x in mirna2evidenceProcess:
allMiRNA.add(x)
for x in mirna2evidenceCBN:
allMiRNA.add(x)
dataUpPlot = {}
for miRNA in allMiRNA:
miRNAEvs = set()
for x in mirna2evidenceCBN.get(miRNA, []):
miRNAEvs.add(x)
for x in mirna2evidenceProcess.get(miRNA, []):
miRNAEvs.add(x)
for x in mirna2evidenceCellT.get(miRNA, []):
miRNAEvs.add(x)
miRNAData = {
"CBN": set(),
"Process": set(),
"Cell-Type": set()
}
for ev in miRNAEvs:
cellT = mirna2evidenceCellT[miRNA].get(ev, None)
cbns = mirna2evidenceCBN[miRNA].get(ev, None)
process = mirna2evidenceProcess[miRNA].get(ev, None)
if cellT != None:
miRNAData['Cell-Type'] = miRNAData['Cell-Type'].union(cellT)
if cbns != None:
miRNAData['CBN'] = miRNAData['CBN'].union(cbns)
if process != None:
miRNAData['Process'] = miRNAData['Process'].union(process)
dataUpPlot[miRNA] = miRNAData
orderDict = OrderedDict()
for type in ["CBN", "Process", "Cell-Type"]:
orderDict[type] = sorted(dataLabels[type])
def makeMIRNAName(miRNA):
return miRNA
return miRNA + " (" + str(len(miRNA2InteractionPartner[miRNA])) + ","+ str(len(miRNA2Evidences[miRNA]))+")"
filteredData = OrderedDict()
for miRNA in manuMirnas:
if miRNA in dataUpPlot:
filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA]
else:
print("Missing manu", miRNA)
stages2 = 0
stages0 = 0
from natsort import natsorted
for miRNA in natsorted(dataUpPlot, key=lambda x: x.split("-")[1]):
stages = dataUpPlot[miRNA]['CBN']
if len(miRNA2Evidences[miRNA]) <= 0:
continue
if len(dataUpPlot[miRNA]['Process']) == 0:
pass#continue
if len(dataUpPlot[miRNA]['CBN']) == 0:
continue
filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA]
print(len(dataUpPlot))
print(len(filteredData))
print(stages2)
print(stages0)
fout = open("/mnt/c/Users/mjopp/Desktop/d3-parsets-d3v5/titanic.csv", "w")
print("miRNA", "CBN", "PROCESS", "CELLTYPE", sep=",", file=fout)
mirna2printTuple = defaultdict(list)
for miRNA in allMiRNA:
miRNAEvs = set()
for x in mirna2evidenceCBN.get(miRNA, []):
miRNAEvs.add(x)
for x in mirna2evidenceProcess.get(miRNA, []):
miRNAEvs.add(x)
for x in mirna2evidenceCellT.get(miRNA, []):
miRNAEvs.add(x)
miRNAData = {
"CBN": set(),
"Process": set(),
"Cell-Type": set()
}
for ev in miRNAEvs:
cellT = mirna2evidenceCellT[miRNA].get(ev, ["None"])
cbns = mirna2evidenceCBN[miRNA].get(ev, ["None"])
processes = mirna2evidenceProcess[miRNA].get(ev, ["None"])
if miRNA == "miR-98":
print(ev, cbns, celltype, process)
if "None" in cbns:# or "None" in processes:
continue
for celltype in cellT:
for cbn in cbns:
for process in processes:
mirna2printTuple[miRNA].append( (cbn, process, celltype) )
selMirnas = sorted([x for x in mirna2printTuple], reverse=True, key=lambda x: len(mirna2printTuple[x]))
print(selMirnas[0:10])
for miRNA in manuMirnas:
for (cbn, process, celltype) in mirna2printTuple[miRNA]:
print(miRNA, cbn.replace("\n", " ").replace(" ", " "), process.replace("\n", " ").replace(" ", " "), celltype, sep=",", file=fout)
interactorCounts = [len(miRNA2InteractionPartner[miRNA]) for miRNA in filteredData]
pubmedCounts = [len(miRNA2Evidences[miRNA]) for miRNA in filteredData]
DotSetPlot().plot(dataLabels, filteredData, numbers={"Interactor Count":interactorCounts , "PubMed Evidence Count": pubmedCounts },sortData=False,order=orderDict)#, max=30)
matplotlib.pyplot.savefig("/mnt/d/owncloud/markus/uni/publications/miReview/dotset_important.pdf")
matplotlib.pyplot.show()
| 2.46875 | 2 |
src/hud.py | anita-hu/simulanes | 1 | 12793894 | <filename>src/hud.py<gh_stars>1-10
# Modified work Copyright (c) 2021 <NAME>, <NAME>.
# Original work Copyright (c) 2018 Intel Labs.
# authors: <NAME> (<EMAIL>)
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
# Original source: https://github.com/carla-simulator/carla/blob/master/PythonAPI/examples/automatic_control.py
import datetime
import math
import os
import pygame
import carla
import utils
class HUD(object):
"""Class for HUD text"""
def __init__(self, width, height, doc):
"""Constructor method"""
self.dim = (width, height)
font = pygame.font.Font(pygame.font.get_default_font(), 20)
font_name = 'courier' if os.name == 'nt' else 'mono'
fonts = [x for x in pygame.font.get_fonts() if font_name in x]
default_font = 'ubuntumono'
mono = default_font if default_font in fonts else fonts[0]
mono = pygame.font.match_font(mono)
self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else 14)
self._notifications = FadingText(font, (width, 40), (0, height - 40))
self.help = HelpText(doc, pygame.font.Font(mono, 24), width, height)
self.server_fps = 0
self.frame = 0
self.simulation_time = 0
self.map_name = None
self._show_info = True
self._info_text = []
self._server_clock = pygame.time.Clock()
def on_world_tick(self, timestamp):
"""Gets informations from the world at every tick"""
self._server_clock.tick()
self.server_fps = self._server_clock.get_fps()
self.frame = timestamp.frame_count
self.simulation_time = timestamp.elapsed_seconds
def tick(self, world, clock):
"""HUD method for every tick"""
self._notifications.tick(world, clock)
self.map_name = world.map.name
if not self._show_info:
return
transform = world.player.get_transform()
vel = world.player.get_velocity()
control = world.player.get_control()
heading = 'N' if abs(transform.rotation.yaw) < 89.5 else ''
heading += 'S' if abs(transform.rotation.yaw) > 90.5 else ''
heading += 'E' if 179.5 > transform.rotation.yaw > 0.5 else ''
heading += 'W' if -0.5 > transform.rotation.yaw > -179.5 else ''
colhist = world.collision_sensor.get_collision_history()
collision = [colhist[x + self.frame - 200] for x in range(0, 200)]
max_col = max(1.0, max(collision))
collision = [x / max_col for x in collision]
vehicles = world.world.get_actors().filter('vehicle.*')
ego_location = world.player.get_location()
waypoint = world.map.get_waypoint(ego_location, project_to_road=True)
# always make traffic lights
if world.player.is_at_traffic_light():
traffic_light = world.player.get_traffic_light()
if traffic_light.get_state() == carla.TrafficLightState.Red:
world.hud.notification("Traffic light changed! Good to go!")
traffic_light.set_state(carla.TrafficLightState.Green)
self._info_text = [
'Server: % 16.0f FPS' % self.server_fps,
'Client: % 16.0f FPS' % clock.get_fps(),
'',
'Vehicle: % 20s' % utils.get_actor_display_name(world.player, truncate=20),
'Map: % 20s' % world.map.name,
'Road id: % 20s' % waypoint.road_id,
'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)),
'',
'Speed: % 15.0f km/h' % (3.6 * math.sqrt(vel.x**2 + vel.y**2 + vel.z**2)),
u'Heading:% 16.0f\N{DEGREE SIGN} % 2s' % (transform.rotation.yaw, heading),
'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (transform.location.x, transform.location.y)),
'GNSS:% 24s' % ('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)),
'Height: % 18.0f m' % transform.location.z,
'']
if isinstance(control, carla.VehicleControl):
self._info_text += [
('Throttle:', control.throttle, 0.0, 1.0),
('Steer:', control.steer, -1.0, 1.0),
('Brake:', control.brake, 0.0, 1.0),
('Reverse:', control.reverse),
('Hand brake:', control.hand_brake),
('Manual:', control.manual_gear_shift),
'Gear: %s' % {-1: 'R', 0: 'N'}.get(control.gear, control.gear)]
elif isinstance(control, carla.WalkerControl):
self._info_text += [
('Speed:', control.speed, 0.0, 5.556),
('Jump:', control.jump)]
self._info_text += [
'',
'Collision:',
collision,
'',
'Number of vehicles: % 8d' % len(vehicles)]
if len(vehicles) > 1:
self._info_text += ['Nearby vehicles:']
def dist(l):
return math.sqrt((l.x - transform.location.x)**2 + (l.y - transform.location.y)
** 2 + (l.z - transform.location.z)**2)
vehicles = [(dist(x.get_location()), x) for x in vehicles if x.id != world.player.id]
for dist, vehicle in sorted(vehicles):
if dist > 200.0:
break
vehicle_type = utils.get_actor_display_name(vehicle, truncate=22)
self._info_text.append('% 4dm %s' % (dist, vehicle_type))
def toggle_info(self):
"""Toggle info on or off"""
self._show_info = not self._show_info
def notification(self, text, seconds=2.0):
"""Notification text"""
self._notifications.set_text(text, seconds=seconds)
def error(self, text):
"""Error text"""
self._notifications.set_text('Error: %s' % text, (255, 0, 0))
def render(self, display):
"""Render for HUD class"""
if self._show_info:
info_surface = pygame.Surface((250, self.dim[1]))
info_surface.set_alpha(100)
display.blit(info_surface, (0, 0))
v_offset = 4
bar_h_offset = 100
bar_width = 106
for item in self._info_text:
if v_offset + 18 > self.dim[1]:
break
if isinstance(item, list):
if len(item) > 1:
points = [(x + 8, v_offset + 8 + (1 - y) * 30) for x, y in enumerate(item)]
pygame.draw.lines(display, (255, 136, 0), False, points, 2)
item = None
v_offset += 18
elif isinstance(item, tuple):
if isinstance(item[1], bool):
rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6))
pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1)
else:
rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6))
pygame.draw.rect(display, (255, 255, 255), rect_border, 1)
fig = (item[1] - item[2]) / (item[3] - item[2])
if item[2] < 0.0:
rect = pygame.Rect(
(bar_h_offset + fig * (bar_width - 6), v_offset + 8), (6, 6))
else:
rect = pygame.Rect((bar_h_offset, v_offset + 8), (fig * bar_width, 6))
pygame.draw.rect(display, (255, 255, 255), rect)
item = item[0]
if item: # At this point has to be a str.
surface = self._font_mono.render(item, True, (255, 255, 255))
display.blit(surface, (8, v_offset))
v_offset += 18
self._notifications.render(display)
self.help.render(display)
class FadingText(object):
""" Class for fading text """
def __init__(self, font, dim, pos):
"""Constructor method"""
self.font = font
self.dim = dim
self.pos = pos
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
def set_text(self, text, color=(255, 255, 255), seconds=2.0):
"""Set fading text"""
text_texture = self.font.render(text, True, color)
self.surface = pygame.Surface(self.dim)
self.seconds_left = seconds
self.surface.fill((0, 0, 0, 0))
self.surface.blit(text_texture, (10, 11))
def tick(self, _, clock):
"""Fading text method for every tick"""
delta_seconds = 1e-3 * clock.get_time()
self.seconds_left = max(0.0, self.seconds_left - delta_seconds)
self.surface.set_alpha(500.0 * self.seconds_left)
def render(self, display):
"""Render fading text method"""
display.blit(self.surface, self.pos)
class HelpText(object):
""" Helper class for text render"""
def __init__(self, doc, font, width, height):
"""Constructor method"""
lines = doc.split('\n')
self.font = font
self.dim = (680, len(lines) * 22 + 12)
self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
self.surface.fill((0, 0, 0, 0))
for i, line in enumerate(lines):
text_texture = self.font.render(line, True, (255, 255, 255))
self.surface.blit(text_texture, (22, i * 22))
self._render = False
self.surface.set_alpha(220)
def toggle(self):
"""Toggle on or off the render help"""
self._render = not self._render
def render(self, display):
"""Render help text method"""
if self._render:
display.blit(self.surface, self.pos)
| 2.75 | 3 |
src/cryptos/__init__.py | villoro/airflow_tasks | 0 | 12793895 | from .process import update_cryptos
| 0.988281 | 1 |
oo/pessoa.py | limaon/pythonbirds | 0 | 12793896 | <reponame>limaon/pythonbirds<filename>oo/pessoa.py
class Pessoa:
def __init__(self, nome=None, idade=35):
self.idade = idade
self.nome = nome
def cumprimentar(self):
return f'Ola {id(self)}'
class CirculoPerfeito:
def __init__(self):
self.cor = 'Azul'
self.circuferencia = 4
self.material = 'Papel'
def mostra_cor(self):
return id(self)
if __name__ == '__main__':
circulo_primeiro = CirculoPerfeito()
circulo_segundo = CirculoPerfeito()
print(type(circulo_primeiro))
print(circulo_primeiro is circulo_segundo)
print(id(circulo_primeiro), circulo_primeiro.mostra_cor())
circulo_segundo.cor = 'Amarelo'
print(circulo_primeiro.cor, circulo_segundo.cor)
| 3.28125 | 3 |
getwordindexfile.py | lqyluck/semi-supervised-lda | 2 | 12793897 | '''
Created on 2011-5-30
@author: cyzhang
'''
import re
import sys,os
def dofeaindex(file,filetype):
feamap = {}
linenum = 0
for line in open(file):
line = line.strip()
content = line.split('\t')
if len(content) != 2:
continue
felist=content[1].split(' ')
if len(felist) == 0:
continue
j = 0
while j < len(felist):
if 0 == filetype:
feamap[felist[j]] = feamap.get(felist[j],0) + int(felist[j + 1])
j += 2
else:
feamap[felist[j]] = feamap.get(felist[j],0) + 1
j += 1
linenum += 1
return feamap,linenum
if __name__ == "__main__":
inputfile = sys.argv[1]
outputfile = sys.argv[2]
filetype = int(sys.argv[3])
cutoff = int(sys.argv[4])
myfile = open(outputfile , 'w')
feamap,linenum = dofeaindex(inputfile,filetype)
id = 0
for fea,num in feamap.items():
if num > cutoff:
myfile.write(str(id) + ' ' + fea + '\n')
id += 1
myfile.close() | 2.734375 | 3 |
connections/conn_napalm.py | no-such-anthony/net-run | 1 | 12793898 | <filename>connections/conn_napalm.py
from napalm import get_network_driver
class conn_napalm():
def __init__(self, device, connection_key):
self.device = device
self.connection_key = connection_key
#check for required kwargs, grab root level key if not in connection_key
#could have some default values?
if 'hostname' not in device[connection_key]:
device[connection_key]['hostname'] = device.get('host', device['name'])
if 'username' not in device[connection_key]:
device[connection_key]['username'] = device.get('username', '')
if 'password' not in device[connection_key]:
device[connection_key]['password'] = device.get('password', '')
self.driver = device[connection_key].pop('driver', device.get('platform',''))
def connect(self):
driver = get_network_driver(self.driver)
self.connection = driver(**self.device[self.connection_key])
self.connection.open()
return self.connection
def close(self):
self.connection.close() | 2.6875 | 3 |
house_rocket_analysis/APP/app.py | diogovalentte/data_engineer_portfolio | 8 | 12793899 | # Libraries
from pandas.io.formats.format import DataFrameFormatter
from streamlit_folium import folium_static
import pandas as pd
import numpy as np
import seaborn as sns
import streamlit as st
import sys
#! Add folder "src" as a package path
project_path = "Put/here/the/path/to/the/project's/root/folder/house_rocket_analysis"
sys.path.append(f'{project_path}/src/')
import visualization.maps as maps
#! App configuration
st.set_page_config(layout='wide')
@st.cache(allow_output_mutation=True)
def load_data(path):
data = pd.read_csv(path)
return data
# Pages definition
def sidebar():
st.sidebar.title('Select Page')
page_select = st.sidebar.selectbox( label='', options=['Final Reports', 'Maps'])
return page_select
def page_final_reports(renamed_houses, recommended_houses):
# Filter Recommended Houses to Buy DataFrame
st.sidebar.title('Search for recommended home for purchase')
id_input = str(st.sidebar.text_input(label='Enter the ID')).strip() # Input ID to search house
st.title('House Rocket Analysis')
st.title('')
st.title(f'There are {renamed_houses.shape[0]} properties available for purchase today.')
st.dataframe(renamed_houses)
st.header("Main considerations of the analysis.")
st.markdown('* The variables with the highest positive correlation with Price are "Grade" and "Sqft living".')
st.markdown('* Houses rated 8 or higher in the "Grid" (Quality of the building mateirais of the house) attribute have the best average price per rank and number of homes.')
st.markdown('* The average price of renovated homes is 22% higher than unrenovated homes.')
st.markdown('* The biggest correlation with Price and what can be added in a makeover is the bathroom and the amount of square feet of the house.')
st.markdown('* The best season for re-selling homes is Spring.')
st.header(
"""After these analyses, the recommended houses for House Rocket to buy follow the conditions:
Places with grade of variable "Grid" (Quality of the building mateirais of the house) equal or greater than 8
Houses with condition equal to or greater than 3
Houses priced below the median price in your region (ZipCode)""")
st.header("""The re-sale price of the after-purchased homes is based on the various "Total Avarage Price", which means the average value of the region's house prices (ZipCode) and the average price of the Season that the house was announced.
If the purchase price of the house is higher than the "Total Avarage Price", then the suggested selling price will be the purchase price + 10%.
If the purchase price of the house is less than the "Total Avarage Price", then the suggested selling price will be the purchase price + 30%.""")
st.header("""A column has also been added in the table representing the recommended re-sale price and the profit from re-selling the house if it is renewed.
If the house is renovated, the re-sale price and the after-sale profit will be 20% higher.
""")
st.title(f'After analysis, {recommended_houses.shape[0]} properties are recommended for purchase and re-sale.')
st.subheader('New columns have also been added at the end of the table. They represent the recommended selling price of the houses, whether it has been renovated or not, in addition to the possible profit if sold at the recommended price.')
st.text("")
try:
if not id_input:
st.dataframe(recommended_houses)
else:
if int(id_input) in recommended_houses['ID'].values:
st.dataframe(recommended_houses.loc[recommended_houses['ID'] == int(id_input)])
else:
st.error(
'Property with this ID is not recommended for purchase or there is no home with this ID.')
except:
st.error('ERROR: Input value is not a valid ID.')
#finally:
return None
def page_maps(renamed_houses, recommended_houses):
# SideBar - - -
st.sidebar.title('Filter Map')
filter_data = st.sidebar.radio(label='Filter Houses', options=[
'All houses', 'Recommended homes to buy'])
# Filters - -
if filter_data == 'Recommended homes to buy':
st.title('Map of all recommended homes for purchase')
st.header('')
data = recommended_houses.copy()
else:
st.title('Map of all available houses')
st.header('')
data = renamed_houses.copy()
# Map of density
houses_map = maps.houses_map(data)
folium_static(houses_map, width=1200, height=700)
# Map with avarage price per region (ZipCode)
st.title('Avarage Price per Region')
avg_region = maps.price_per_region(renamed_houses)
folium_static(avg_region, width=1200, height=700)
if __name__ == '__main__':
path = f"{project_path}/data/interim/renamed_data.csv"
renamed_houses = load_data(path)
path = f"{project_path}/reports/data/final_houses_sale.csv"
recommended_houses = load_data(path)
page_select = sidebar()
if page_select == 'Final Reports':
page_final_reports(renamed_houses=renamed_houses, recommended_houses=recommended_houses)
else:
page_maps(renamed_houses=renamed_houses, recommended_houses=recommended_houses)
| 3.296875 | 3 |
incident_io_client/models/severities_list_response_body.py | expobrain/python-incidentio-client | 0 | 12793900 | from typing import Any, Dict, List, Type, TypeVar
import attr
from ..models.severity_response_body import SeverityResponseBody
T = TypeVar("T", bound="SeveritiesListResponseBody")
@attr.s(auto_attribs=True)
class SeveritiesListResponseBody:
"""
Example:
{'severities': [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': "It's not really that bad, everyone
chill", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at':
'2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z', 'description': "It's not really
that bad, everyone chill", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at':
'2021-08-17T13:28:57.801578Z'}]}
Attributes:
severities (List[SeverityResponseBody]): Example: [{'created_at': '2021-08-17T13:28:57.801578Z', 'description':
"It's not really that bad, everyone chill", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1,
'updated_at': '2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z', 'description': "It's
not really that bad, everyone chill", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1,
'updated_at': '2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z', 'description': "It's
not really that bad, everyone chill", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1,
'updated_at': '2021-08-17T13:28:57.801578Z'}].
"""
severities: List[SeverityResponseBody]
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
severities = []
for severities_item_data in self.severities:
severities_item = severities_item_data.to_dict()
severities.append(severities_item)
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"severities": severities,
}
)
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
severities = []
_severities = d.pop("severities")
for severities_item_data in _severities:
severities_item = SeverityResponseBody.from_dict(severities_item_data)
severities.append(severities_item)
severities_list_response_body = cls(
severities=severities,
)
severities_list_response_body.additional_properties = d
return severities_list_response_body
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| 2.390625 | 2 |
server.py | vhnguyen0707/CMPUT404-assignment-webserver | 0 | 12793901 | <reponame>vhnguyen0707/CMPUT404-assignment-webserver
# coding: utf-8
import os
import socketserver
# Copyright 2013 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
class MyWebServer(socketserver.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
request = self.data.decode("utf-8").split(' ')
try:
method = request[0]
path = request[1]
http_version = request[2][:8].strip() # get http version
other_methods = ["OPTIONS", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT"] # all methods defined in rfc266 other than GET
if method == "GET":
relative_path = "./www" + os.path.normpath(request[1]) #normalize case of pathname
if http_version == 'HTTP/1.1' and "Host" not in self.data.decode():
self.request.sendall(bytearray(f"{http_version} 400 Bad Request\r\nContent-Type: text/html\r\nConnection: close\r\n\r\n", 'utf-8'))
elif os.path.isdir(relative_path):
# when path is an existing directory
if path[-1] == '/':
try:
f = open(f'{relative_path}/index.html', 'r')
content = f.read()
content_length = len(content)
response = f'{http_version} 200 OK\r\nContent-Type: text/html\r\nContent-Length: {content_length}\r\n\r\n{content}'
self.request.sendall(bytearray(response, 'utf-8'))
except:
# error reading file occured
self.request.sendall(bytearray(f"{http_version} 500 Internal Server Error\r\n", 'utf-8'))
else:
self.request.sendall(bytearray(f'{http_version} 301 Moved Permanently\r\nContent-Type: text/html\r\nLocation: {path}/\r\n\r\n', 'utf-8'))
elif os.path.isfile(relative_path):
# when path is an existing file
try:
f = open(f'{relative_path}', 'r')
content = f.read()
content_length = len(content)
content_type = path[path.rfind('.') + 1:len(path)] # get file extension
response = f'{http_version} 200 OK\r\nContent-Type: text/{content_type}\r\nContent-Length: {content_length}\r\n\r\n{content}'
self.request.sendall(bytearray(response, 'utf-8'))
except:
# error reading file occured
self.request.sendall(bytearray(f"{http_version} 500 Internal Server Error\r\n", 'utf-8'))
else:
# not a valid path
self.request.sendall(bytearray(f'{http_version} 404 Not Found\r\nContent-Type: text/html\r\nConnection:Close\r\n\r\n', 'utf-8'))
elif method in other_methods:
self.request.sendall(bytearray(f"{http_version} 405 Method Not Allowed\r\nContent-Type: text/html\r\n\r\n", 'utf-8'))
else:
self.request.sendall(bytearray(f"{http_version} 400 Bad Request\r\nContent-Type: text/html\r\nConnection: close\r\n\r\n", 'utf-8'))
except IndexError:
pass
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
socketserver.TCPServer.allow_reuse_address = True
# Create the server, binding to localhost on port 8080
server = socketserver.TCPServer((HOST, PORT), MyWebServer)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
| 2.828125 | 3 |
stan/tests/test_procparse.py | chappers/Stan | 1 | 12793902 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 18 12:54:30 2014
@author: Chapman
"""
import unittest
from stan.proc import proc_parse
cstr = """proc describe data = df1 out = df2;
by a;
run;"""
cstr1 = """proc describe data = df1 out = df2;
by a;
fin = "/usr/test.text";
quit;"""
class TestDesc(unittest.TestCase):
def test_dataset_opt(self):
self.assertTrue(proc_parse.proc_parse(cstr).strip() == "df2=describe.describe(data=df1,by='a')")
self.assertTrue(proc_parse.proc_parse(cstr1).strip() == "df2=describe.describe(data=df1,by='a',fin='/usr/test.text')")
if __name__ == '__main__':
unittest.main()
| 2.1875 | 2 |
tests/settings.py | jamesturk/django-mergeobject | 0 | 12793903 | <reponame>jamesturk/django-mergeobject<gh_stars>0
SECRET_KEY = 'so-secret'
INSTALLED_APPS = (
'tests',
)
MIDDLEWARE_CLASSES = ()
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
}
| 1.453125 | 1 |
halls.parser.py | dwrodri/cumapit | 2 | 12793904 | <gh_stars>1-10
#!/usr/bin/python2
import imageio
import png
import numpy as np
import sys
import math
from collections import namedtuple
if len(sys.argv) != 2:
print("%s <campusMap.png>" % sys.argv[0])
exit()
IMG = imageio.imread(sys.argv[1])
Point = namedtuple("Point",["x","y"])
def update_pixel(row,col,r,g,b,a):
global IMG
temp = [r,g,b,a]
for i in xrange(len(temp)):
IMG[row,col,i] = temp[i]
def write_png(fn):
global IMG
file_handle = open(fn, 'wb')
w = png.Writer(IMG.shape[1], IMG.shape[0], alpha=True)
w.write(file_handle, np.reshape(IMG, (IMG.shape[0], IMG.shape[1]*IMG.shape[2])))
file_handle.close()
def in_bounds(p):
global IMG
return p.x > 0 and p.x < IMG.shape[1] and p.y > 0 and p.y < IMG.shape[0]
def draw_line(x1,y1,x2,y2,r,g,b,a):
# straight line up and down
if x1 == x2:
for y in range(min(y1,y2), max(y1,y2)+1):
if not in_bounds(Point(x1,y)):
break
update_pixel(y,x1,r,g,b,a)
return
slope = float(y2 - y1) / float(x2 - x1)
if math.fabs(slope) > 1.0:
slope = 1.0 / slope
if y1 > y2:
y1, y2 = y2, y1
x1, x2 = x2, x1
x = float(x1)
for y in range(y1, y2+1):
if not in_bounds(Point(int(x),y)):
break
update_pixel(y,int(x),r,g,b,a)
x += slope
return
# make it definitely go left to right
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
y = float(y1)
for x in range(x1, x2+1):
if not in_bounds(Point(x,int(y))):
break
update_pixel(int(math.floor(y)),x,r,g,b,a)
y += slope
def main():
global IMG
# read in the file
splitLines = []
for line in sys.stdin:
splitLines += [line.strip().split(' ')]
# start transparent
for i in xrange(IMG.shape[0]):
for j in xrange(IMG.shape[1]):
update_pixel(i,j,0,0,0,0)
# draw all the halls
for splitLine in splitLines:
if splitLine[0] != "Hallway":
continue
point0 = splitLines[int(splitLine[1])]
point1 = splitLines[int(splitLine[2])]
draw_line( \
int(point0[3]), int(point0[2]), \
int(point1[3]), int(point1[2]), \
0, 255, 255, 255 \
)
write_png("halls.png")
main()
| 2.9375 | 3 |
DoraGamePlaying-m5skipping/plotResults.py | AlexDoumas/BrPong_1 | 3 | 12793905 | import numpy as np
from matplotlib import pyplot as plt
def loadFile(filename):
f = open(filename,'r')
text = f.read()
f.close()
rewards = []
steps = []
for line in text.split('\n'):
pieces = line.split(',')
if(len(pieces) == 2):
rewards.append(float(pieces[0]))
steps.append(int(pieces[1]))
return rewards,steps
def loadFiles(files):
rewards = []
steps = []
for f in files:
r,s = loadFile(f)
rewards.extend(r)
steps.extend(s)
return rewards,steps,
def plotResults(rewards,steps,outputFile):
plt.subplot(2,1,1)
plt.plot(rewards)
plt.xlabel('number of games played')
plt.ylabel('reward received per game')
plt.subplot(2,1,2)
plt.plot(steps)
plt.xlabel('number of games played')
plt.ylabel('number of actions taken per game')
plt.savefig(outputFile)
def Average(rewards,n):
return [np.mean(rewards[i:i+n]) for i in range(len(rewards)-n)]
if(__name__ == "__main__"):
LargeAgent = ['LargeAgent/2018-01-15 11:50:29.380284']
SmallAgent = ['SmallAgent/2018-01-15 13:12:18.774147']
rewards,steps = loadFiles(['./SmallAgent/29-01-2018'])
rewards = Average(rewards,10)
steps = Average(steps,10)
plotResults(rewards,steps,"./test.png") | 2.875 | 3 |
hundun/__version__.py | llbxg/hundun | 4 | 12793906 | MAJOR = 0
MINOR = 1
MICRO = 38
__version__ = f'{MAJOR}.{MINOR}.{MICRO}'
| 1.359375 | 1 |
traffic_lights/training/train.py | aidandunlop/traffic_light_recognition | 5 | 12793907 | <reponame>aidandunlop/traffic_light_recognition<filename>traffic_lights/training/train.py
import torch
import numpy as np
from traffic_lights.data.constants import CLASS_LABEL_MAP
from traffic_lights.lib.engine import train_one_epoch
from .model import get_model, evaluate
# TODO: add eval boolean flag, which reports eval stuff if wanted
# TODO: look at amount of parameters y so much
def train(
parameterization,
num_classes,
device,
data_loader_train,
data_loader_val,
validation_ground_truth,
):
model = get_model(num_classes)
model.to(device)
epochs = parameterization["num_epochs"]
learning_rate = parameterization["lr"]
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
print("Using lr={} and num_epochs={}".format(learning_rate, epochs))
for epoch in range(epochs):
train_one_epoch(
model, optimizer, data_loader_train, device, epoch, print_freq=1000
)
evaluation = evaluate(model, data_loader_val, device, validation_ground_truth)
precisions = [
0 if np.isnan(metric["AP"]) else metric["AP"] for metric in evaluation
]
mean_average_precision = np.sum(precisions) / len(CLASS_LABEL_MAP)
print("mAP:", mean_average_precision)
torch.save(model, "model_lr_{}_epochs_{}.pth".format(learning_rate, epochs))
return mean_average_precision
| 2.5 | 2 |
exo/make_heatmap.py | CEGRcode/exo | 0 | 12793908 | <reponame>CEGRcode/exo
#!/usr/bin/python
from __future__ import division
import math
import pprint
import click
import matplotlib
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
from matplotlib.ticker import (AutoMinorLocator, MultipleLocator)
import numpy as np
matplotlib.use('Agg')
"""
Program to Create a heatmap from tagPileUp tabular file and contrast Threshold file.
"""
def rebin(a, new_shape):
M, N = a.shape
m, n = new_shape
if m >= M:
# repeat rows in data matrix
a = np.repeat(a, math.ceil(float(m) / M), axis=0)
M, N = a.shape
m, n = new_shape
row_delete_num = M % m
col_delete_num = N % n
np.random.seed(seed=0)
if row_delete_num > 0:
# select deleted rows with equal intervals
row_delete = np.linspace(0, M - 1, num=row_delete_num, dtype=int)
# sort the random selected deleted row ids
row_delete = np.sort(row_delete)
row_delete_plus1 = row_delete[1:-1] + \
1 # get deleted rows plus position
# get deleted rows plus position (top +1; end -1)
row_delete_plus1 = np.append(
np.append(row_delete[0] + 1, row_delete_plus1), row_delete[-1] - 1)
# put the info of deleted rows into the next rows by mean
a[row_delete_plus1, :] = (
a[row_delete, :] + a[row_delete_plus1, :]) / 2
a = np.delete(a, row_delete, axis=0) # random remove rows
if col_delete_num > 0:
# select deleted cols with equal intervals
col_delete = np.linspace(0, N - 1, num=col_delete_num, dtype=int)
# sort the random selected deleted col ids
col_delete = np.sort(col_delete)
col_delete_plus1 = col_delete[1:-1] + \
1 # get deleted cols plus position
# get deleted cols plus position (top +1; end -1)
col_delete_plus1 = np.append(
np.append(col_delete[0] + 1, col_delete_plus1), col_delete[-1] - 1)
# put the info of deleted cols into the next cols by mean
a[:, col_delete_plus1] = (
a[:, col_delete] + a[:, col_delete_plus1]) / 2
a = np.delete(a, col_delete, axis=1) # random remove columns
M, N = a.shape
# compare the heatmap matrix
a_compress = a.reshape((m, int(M / m), n, int(N / n))).mean(3).mean(1)
return np.array(a_compress)
def plot_heatmap(data01, c, out_file_name, upper_lim, lower_lim, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, sites):
# initialize color
levs = range(100)
assert len(levs) % 2 == 0, 'N levels must be even.'
# select colors from color list
my_cmap = mcolors.LinearSegmentedColormap.from_list(
name='white_sth', colors=c, N=len(levs) - 1,)
# initialize figure
plt.figure(figsize=(col_num / 96, row_num / 96), dpi=96)
# remove margins , # this helps to maintain the ticks to be odd
ax = plt.axes([0, 0, 1, 1])
plt.imshow(data01, cmap=my_cmap, interpolation='nearest',
vmin=lower_lim, vmax=upper_lim, aspect='auto') # plot heatmap
# little trick to create custom tick labels.
# [ only works if the difference between col and row is 100 (cols - rows = 100), fails for (300,150) & (300,100) etc]
# calculate the major tick locations
locaters = col_num // 4
ax.xaxis.set_major_locator(MultipleLocator(locaters))
# get the initial ticks
locs, labels = plt.xticks()
# remove the first location to get proper heatmap tick position.
locs = np.delete(locs, 0)
labels.pop()
# find the mid value and set it to zero, since ax is helping to make sure there are odd number of ticks.
mid = int(len(labels) // 2)
labels[0] = "-" + ticks
labels[mid] = "0"
labels[len(labels) - 1] = ticks
# display the new ticks
plt.xticks(locs, labels, fontsize=14)
ax.xaxis.set_minor_locator(AutoMinorLocator(2))
ax.tick_params(which='major', length=10, width=2, color='black')
ax.tick_params(which='minor', length=6, width=2, color='black')
# Draw a horizontal line through the midpoint.
plt.axvline(color='black', linestyle='--', x=locs[mid], linewidth=2)
print("\n DEBUG INFO \n locs : {} \n length_locs : {} \n labels : {} \n length_labels:{}\n".format(
locs, len(locs), labels, len(labels)))
plt.yticks([])
plt.xlabel(xlabel, fontsize=14)
ylabel = "{:,}".format(sites) + " sites"
plt.ylabel(ylabel, fontsize=14)
plt.title(heatmapTitle, fontsize=18)
# to increase the width of the plot borders
plt.setp(ax.spines.values(), linewidth=2)
plt.savefig(out_file_name, bbox_inches='tight',
pad_inches=0.05, facecolor=None, dpi=ddpi)
def plot_colorbar(data01, c, out_file_name, row_num, col_num, categories):
# initialize color
levs = range(100)
assert len(levs) % 2 == 0, 'N levels must be even.'
# select colors from color list
my_cmap = mcolors.LinearSegmentedColormap.from_list(
name='white_sth', colors=c, N=len(levs) - 1,)
# initialize figure
fig = plt.figure(figsize=(col_num / 96, row_num / 96), dpi=300)
# remove margins , # this helps to maintain the ticks to be odd
ax = plt.axes([0, 0, 1, 1])
plt.imshow(data01, cmap=my_cmap, interpolation='nearest',
aspect='auto') # plot heatmap
plt.xticks([])
plt.yticks([])
# to increase the width of the plot borders
plt.setp(ax.spines.values(), linewidth=2)
# calculate how long the color box should be for each by setting up a ratio: (this site)/(total sites) = (height of unknown box)/(feature box height)
totalsites = sum(categories)
rpheight = categories[0] / totalsites * data01.shape[0]
stmheight = categories[1] / totalsites * data01.shape[0]
srgheight = categories[2] / totalsites * data01.shape[0]
cycheight = categories[3] / totalsites * data01.shape[0]
cofheight = categories[4] / totalsites * data01.shape[0]
unbheight = categories[5] / totalsites * data01.shape[0]
# print "cofheight: {}, unbheight : {}".format(unbheight, cofheight)
# now calculate the "top" location of each box, each top should be the ending position of the previous box
topstm = rpheight
topsrg = topstm + stmheight
topcyc = topsrg + srgheight
topcof = topcyc + cycheight
topunb = topcof + cofheight
# find the actual position of the numbers by centering the numbers in the colored boxes and applying an arbitrary offset
rppos = int(rpheight / 2)
stmpos = int(stmheight / 2 + topstm)
srgpos = int(srgheight / 2 + topsrg)
cycpos = int(cycheight / 2 + topcyc)
cofpos = int(cofheight / 2 + topcof)
unbpos = int(unbheight / 2 + topunb)
# positions for the values
print("rp: {}, stm: {}, ess : {}, cof : {}, unb : {}, trna : {}".format(
rppos, stmpos, srgpos, cycpos, cofpos, unbpos))
# The default transform specifies that text is in data co-ordinates, that is even though the
# image is compressed , the point are plotted based on datapoint in (x,y) like a graph
# Assigning the rotation based on minimum value
if min(categories) == categories[0]:
if categories[0] != 0:
plt.text(25, rppos, categories[0], horizontalalignment='center',
verticalalignment='center', fontsize=10, color='white', weight='bold')
else:
plt.text(25, rppos, categories[0], horizontalalignment='center',
verticalalignment='center', fontsize=10, color='white', weight='bold')
# Assigning the rotation based on minimum value
if min(categories) == categories[1]:
if categories[1] != 0:
plt.text(25, stmpos, categories[1], horizontalalignment='center',
verticalalignment='center', fontsize=13, color='black', weight='bold')
else:
plt.text(25, stmpos, categories[1], horizontalalignment='center',
verticalalignment='center', fontsize=16, color='black', rotation=90, weight='bold')
# Assigning the rotation based on minimum value
if min(categories) == categories[2]:
if categories[2] != 0:
plt.text(25, srgpos, categories[2], horizontalalignment='center',
verticalalignment='center', fontsize=13, color='white', weight='bold')
else:
plt.text(25, srgpos, categories[2], horizontalalignment='center',
verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold')
# Assigning the rotation based on minimum value
if min(categories) == categories[3]:
if categories[3] != 0:
plt.text(25, cycpos, categories[3], horizontalalignment='center',
verticalalignment='center', fontsize=13, color='white', weight='bold')
else:
plt.text(25, cycpos, categories[3], horizontalalignment='center',
verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold')
# Assigning the rotation based on minimum value
if min(categories) == categories[4]:
if categories[4] != 0:
plt.text(25, cofpos, categories[4], horizontalalignment='center',
verticalalignment='center', fontsize=13, color='white', weight='bold')
else:
plt.text(25, cofpos, categories[4], horizontalalignment='center',
verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold')
# Assigning the rotation based on minimum value
if min(categories) == categories[5]:
if categories[5] != 0:
plt.text(25, unbpos, categories[5], horizontalalignment='center',
verticalalignment='center', fontsize=10, color='white', weight='bold')
else:
plt.text(25, unbpos, categories[5], horizontalalignment='center',
verticalalignment='center', fontsize=10, color='white', rotation=90, weight='bold')
# removing all the borders and frame
for item in [fig, ax]:
item.patch.set_visible(False)
# saving the file
plt.savefig(out_file_name, bbox_inches='tight',
facecolor=None, dpi=300)
def load_Data(input_file, out_file, upper_lim, lower_lim, color, header, start_col, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, generateColorbar):
data = open(input_file, 'r')
if header == 'T':
data.readline()
data0 = []
dataGenes = [] # to store colorbar data
# to store counts for RP, SAGA and TFIID
catergoryCount = [0, 0, 0, 0, 0, 0]
sites = 0 # to calculate the # of sites in the heatmap
for rec in data:
tmp = [(x.strip()) for x in rec.split('\t')]
sites = sites + 1
if generateColorbar == '1':
rankOrder = int(rec.split("\t")[0])
if rankOrder <= 19999:
dataGenes.append([1] * len(tmp[start_col:]))
catergoryCount[0] = catergoryCount[0] + 1
elif rankOrder <= 29999 and rankOrder >= 20000:
dataGenes.append([2] * len(tmp[start_col:]))
catergoryCount[1] = catergoryCount[1] + 1
elif rankOrder <= 39999 and rankOrder >= 30000:
dataGenes.append([3] * len(tmp[start_col:]))
catergoryCount[2] = catergoryCount[2] + 1
elif rankOrder <= 49999 and rankOrder >= 40000:
dataGenes.append([4] * len(tmp[start_col:]))
catergoryCount[3] = catergoryCount[3] + 1
elif rankOrder <= 59999 and rankOrder >= 50000:
dataGenes.append([5] * len(tmp[start_col:]))
catergoryCount[4] = catergoryCount[4] + 1
elif rankOrder <= 219999 and rankOrder >= 210000:
dataGenes.append([6] * len(tmp[start_col:]))
catergoryCount[5] = catergoryCount[5] + 1
data0.append(tmp[start_col:])
data0 = np.array(data0, dtype=float)
print("# sites in the heatmap", sites)
# creating the np-array to plot the colorbar
dataGenes = np.array(dataGenes, dtype=float)
print("catergoryCount : {}".format(catergoryCount))
if row_num == -999:
row_num = data0.shape[0]
if col_num == -999:
col_num = data0.shape[1]
# rebin data0 (compresses the data using treeView compression algorithm)
if row_num < data0.shape[0] and col_num < data0.shape[1]:
data0 = rebin(data0, (row_num, col_num))
if generateColorbar == '1':
# i have hard-coded the width for colorbar(50)
dataGenes = rebin(dataGenes, (row_num, 50))
elif row_num < data0.shape[0]:
data0 = rebin(data0, (row_num, data0.shape[1]))
if generateColorbar == '1':
dataGenes = rebin(dataGenes, (row_num, 50))
elif col_num < data0.shape[1]:
data0 = rebin(data0, (data0.shape[0], col_num))
if generateColorbar == '1':
dataGenes = rebin(dataGenes, (data0.shape[0], 50))
# set color here
# convert rgb to hex (since matplotlib doesn't support 0-255 format for colors)
s = color.split(",")
color = '#{:02X}{:02X}{:02X}'.format(int(s[0]), int(s[1]), int(s[2]))
c = ["white", color]
# generate heatmap
plot_heatmap(data0, c, out_file, upper_lim, lower_lim, row_num,
col_num, ticks, ddpi, xlabel, heatmapTitle, sites)
# checking if we need to plot the color bar
if generateColorbar == '1':
print("Creating the colobar")
mycolors = ['#ff2600', '#ffd54f', '#43a047',
'#0096ff', '#9437ff', '#9e9e9e']
colors = []
# deciding colors based on the catergory values.
for i in range(0, len(catergoryCount)):
if catergoryCount[i] != 0:
colors.append(mycolors[i])
plot_colorbar(dataGenes, colors, "colorbar.png",
900, 35, catergoryCount)
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(options_metavar='<options>', context_settings=CONTEXT_SETTINGS)
@click.argument('tagpileup-cdt', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,))
@click.argument('threshold-file', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,))
@click.option('-ph', '--height', metavar="<int>", type=int, default=700, prompt=True, show_default='True', help='Plot Height')
@click.option('-pw', '--width', metavar="<int>", type=int, default=300, prompt=True, show_default='True', help='Plot Width')
@click.option('-c', '--color', metavar="<string>", default='0,0,0', prompt=True, show_default='0,0,0', help='Plot Color')
@click.option('-t', '--title', metavar="<string>", default=' ', prompt=True, show_default=' ', help='Plot Title')
@click.option('-xl', '--xlabel', metavar="<string>", default=' ', prompt=True, show_default=' ', help='Label under X-axis')
@click.option('-k', '--ticks', metavar="<string>", default='2', prompt=True, show_default='2', help='X-axis tick mark value')
@click.option('-d', '--dpi', metavar="<int>", type=int, default=100, prompt=True, show_default='100', help='Plot pixel density')
@click.option('-cb', '--colorbar', type=click.Choice(['0', '1'], case_sensitive=False), prompt=True, default='0', help="Generate the gene colorbar (0: No, 1: Yes)")
@click.option('-o', '--out', metavar="<string>", default='Heatmap.png', prompt=True, show_default='Heatmap.png', help='output filename')
def cli(tagpileup_cdt, threshold_file, color, height, width, title, xlabel, ticks, dpi, colorbar, out):
"""
Creates YEP Style All Feature heatmap containing genecategories.
\b
Generates Colorbar for the gene categories.
"""
click.echo('\n' + '.' * 50)
params = {}
openfile = open(threshold_file, 'r').readlines()
for line in openfile:
line = line.strip()
temp = line.split(":")
if temp[0] not in params.keys():
params[temp[0]] = temp[1]
print(" \n Parameters for the heatmap")
pprint.pprint(params)
upper_lim = float(params['upper_threshold'])
lower_lim = int(params['lower_threshold'])
header = params['header']
start_col = int(params['start_col'])
load_Data(tagpileup_cdt, out, upper_lim, lower_lim, color, header,
start_col, height, width, ticks, dpi, xlabel, title, colorbar)
click.echo('\n' + '.' * 50)
| 2.609375 | 3 |
voice_rebuilder/rtp_capture.py | Casual-Alchemist/sampleproj | 0 | 12793909 | import pyshark
class Audio_Scraper:
def __init__(self, pcap, filter, outfile):
self.pcap = pcap
self.filter = filter
self.outfile = outfile
def scraper(self):
rtp_list =[]
pcap_file = self.pcap
out_file = self.outfile
print("Scraping: " + pcap_file)
filter_type = self.filter
cap = pyshark.FileCapture(pcap_file,display_filter=filter_type)
raw_audio = open(out_file,'wb')
for i in cap:
try:
rtp = i[3]
#data = rtp.get_field_value('DATA')
data = rtp.payload
if ":" in data:
print(data)
rtp_list.append(data.split(":"))
except:
pass
for rtp_packet in rtp_list:
packet = " ".join(rtp_packet)
print(packet)
audio = bytearray.fromhex(packet)
raw_audio.write(audio)
print("\nFinished outputing raw audio: %s" % out_file)
# pcap_test = Audio_Scraper("my.pcap","rtp","my_audio.raw").scraper()
| 3.078125 | 3 |
hello_new_project.py | Dangl-IT/avacloud-demo-python | 1 | 12793910 | from __future__ import print_function
import time
import avacloud_client_python
from avacloud_client_python.rest import ApiException
import requests
import os
import json
client_id = 'use_your_own_value'
client_secret = '<PASSWORD>_your_own_value'
url = 'https://identity.dangl-it.com/connect/token'
payload = {'grant_type': 'client_credentials', 'scope': 'avacloud'}
response = requests.post(url, data=payload, auth=(client_id, client_secret))
access_token = response.json()['access_token']
# Configure OAuth2 access token for authorization: Dangl.Identity
configuration = avacloud_client_python.Configuration()
configuration.access_token = access_token
# Here, a very small project is created and saved as GAEB file
try:
ava_api_instance = avacloud_client_python.AvaConversionApi(avacloud_client_python.ApiClient(configuration))
ava_project = json.loads("""{
"projectInformation": {
"itemNumberSchema": {
"tiers": [
{
"length": 2,
"tierType": "Group"
},
{
"length": 2,
"tierType": "Group"
},
{
"length": 4,
"tierType": "Position"
}
]
}
},
"serviceSpecifications": [
{
"projectTaxRate": 0.19,
"elements": [
{
"elementTypeDiscriminator": "ServiceSpecificationGroupDto",
"shortText": "Parent Group",
"itemNumber": {
"stringRepresentation": "01."
},
"elements": [
{
"elementTypeDiscriminator": "ServiceSpecificationGroupDto",
"shortText": "Sub Group",
"itemNumber": {
"stringRepresentation": "01.02."
},
"elements": [
{
"elementTypeDiscriminator": "PositionDto",
"shortText": "Hello Position!",
"itemNumber": {
"stringRepresentation": "01.02.0500"
},
"quantityOverride": 10,
"unitPriceOverride": 5
}
]
}
]
}
]
}
]
}""")
# See https://github.com/swagger-api/swagger-codegen/issues/2305 for more info about why you should use _preload_content=False
# If the _preload_content parameter is not set to False, the binary response content (file) will be attempted to be decoded as UTF8 string,
# this would lead to an error. Instead, the raw response should be used
api_response = ava_api_instance.ava_conversion_convert_to_gaeb(ava_project,
destination_gaeb_type='GaebXml_V3_2',
target_exchange_phase_transform='Grant',
_preload_content=False)
with open("./NewProject.X86", "wb") as gaeb_file:
gaeb_file.write(api_response.data)
except ApiException as e:
print("Exception when calling AvaConversionApi->ava_conversion_convert_to_gaeb: %s\n" % e)
| 2.5 | 2 |
autograd/scipy/stats/beta.py | gautam1858/autograd | 6,119 | 12793911 | <gh_stars>1000+
from __future__ import absolute_import
import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import beta, psi
cdf = primitive(scipy.stats.beta.cdf)
logpdf = primitive(scipy.stats.beta.logpdf)
pdf = primitive(scipy.stats.beta.pdf)
def grad_beta_logpdf_arg0(x, a, b):
return (1 + a * (x-1) + x * (b-2)) / (x * (x-1))
def grad_beta_logpdf_arg1(x, a, b):
return np.log(x) - psi(a) + psi(a + b)
def grad_beta_logpdf_arg2(x, a, b):
return np.log1p(-x) - psi(b) + psi(a + b)
defvjp(cdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g: g * np.power(x, a-1) * np.power(1-x, b-1) / beta(a, b)), argnums=[0])
defvjp(logpdf,
lambda ans, x, a, b: unbroadcast_f(x, lambda g: g * grad_beta_logpdf_arg0(x, a, b)),
lambda ans, x, a, b: unbroadcast_f(a, lambda g: g * grad_beta_logpdf_arg1(x, a, b)),
lambda ans, x, a, b: unbroadcast_f(b, lambda g: g * grad_beta_logpdf_arg2(x, a, b)))
defvjp(pdf,
lambda ans, x, a, b: unbroadcast_f(x, lambda g: g * ans * grad_beta_logpdf_arg0(x, a, b)),
lambda ans, x, a, b: unbroadcast_f(a, lambda g: g * ans * grad_beta_logpdf_arg1(x, a, b)),
lambda ans, x, a, b: unbroadcast_f(b, lambda g: g * ans * grad_beta_logpdf_arg2(x, a, b)))
| 2.078125 | 2 |
examples/144. Binary Tree Preorder Traversal.py | yehzhang/RapidTest | 0 | 12793912 | from rapidtest import Test, Case, TreeNode
from solutions.binary_tree_preorder_traversal import Solution
with Test(Solution) as test:
Case(TreeNode.from_string('[1,null,2,3]'), result=[1, 2, 3])
Case(TreeNode.from_string('[]'), result=[])
Case(TreeNode.from_string('[1]'), result=[1])
Case(TreeNode.from_string('[1,2]'), result=[1, 2])
Case(TreeNode.from_string('[1,2]'), result=[1, 2])
Case(TreeNode.from_string(
'[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]'),
result=[1, 2, 4, 6, 6, 1, 0, 3, 9, 2, 7, 8, 4, 5, 4, 5, 2, 4, 6, 3, 8, 2])
| 2.6875 | 3 |
microcosm_pubsub/chain/statements/assign.py | Sinon/microcosm-pubsub | 5 | 12793913 | <gh_stars>1-10
"""
assign("foo.bar").to("baz")
assign_constant(1).to("qux")
"""
from inspect import getfullargspec
from microcosm_pubsub.chain.exceptions import AttributeNotFound
class Reference:
def __init__(self, name):
self.parts = name.split(".")
@property
def key(self):
return self.parts[0]
def __call__(self, context):
value = context[self.key]
for part in self.parts[1:]:
if hasattr(value, part):
value = getattr(value, part)
else:
try:
value = value[part]
except KeyError:
raise AttributeNotFound(self.parts[0], part)
return value
class Constant:
def __init__(self, value):
self.value = value
def __call__(self, context):
return self.value
class Function:
def __init__(self, func):
self.func = func
try:
self.args = getfullargspec(func).args
except TypeError:
# NB: getfullargspec fails for builtins like 'dict'
#
# And oddly `inspect.isbuiltin` doesn't work as expected either. We could instead
# check `func.__module__ == 'builtins'` but that feels more fragile than just assuming
# an error indictates a builtin... and that a builtin won't take our 'context' argument
# in any useful way.
#
# See: https://bugs.python.org/issue1748064
self.args = ()
def __call__(self, context):
if self.args:
return self.func(context)
else:
return self.func()
class AssignStatement:
"""
Assign `this` value as `that`.
"""
def __init__(self, this, that=None):
self.this = this
self.that = that
def to(self, that):
self.that = that
return self
@property
def name(self):
return self.that
def __str__(self):
return f"assign_{self.name}"
def __call__(self, context):
value = self.this(context)
context[self.name] = value
return value
def assign(this):
return AssignStatement(Reference(this))
def assign_constant(this):
return AssignStatement(Constant(this))
def assign_function(this):
return AssignStatement(Function(this))
def extract(name, key, key_property=None):
"""
Extract an argument from a context to another context key
:param name: new context key
:param key: old context key
:param key_property: propery of the context key
"""
if key_property:
key = ".".join([key, key_property])
return AssignStatement(Reference(key), name)
| 2.625 | 3 |
alembic/versions/140a25d5f185_create_tokens_table.py | alvierahman90/matrix-registration | 160 | 12793914 | """create tokens table
Revision ID: 1<PASSWORD>
Revises:
Create Date: 2020-12-12 01:44:28.195736
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import Table, Column, Integer, String, Boolean, DateTime, ForeignKey
from sqlalchemy.engine.reflection import Inspector
from flask_sqlalchemy import SQLAlchemy
# revision identifiers, used by Alembic.
revision = '1<PASSWORD>'
down_revision = None
branch_labels = None
depends_on = None
db = SQLAlchemy()
def upgrade():
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
tables = inspector.get_table_names()
if 'ips' not in tables:
op.create_table(
'ips',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('address', sa.String(255), nullable=True)
)
if 'tokens' not in tables:
op.create_table(
'tokens',
sa.Column('name', String(255), primary_key=True),
sa.Column('expiration_date', DateTime, nullable=True),
sa.Column('max_usage', Integer, default=1),
sa.Column('used', Integer, default=0),
sa.Column('disabled', Boolean, default=False),
sa.Column('ips', Integer, ForeignKey('association.id'))
)
else:
try:
with op.batch_alter_table('tokens') as batch_op:
batch_op.alter_column('ex_date', new_column_name='expiration_date', nullable=True)
batch_op.alter_column('one_time', new_column_name='max_usage')
batch_op.add_column(
Column('disabled', Boolean, default=False)
)
except KeyError:
pass
if 'association' not in tables:
op.create_table(
'association', db.Model.metadata,
Column('ips', String, ForeignKey('ips.address'), primary_key=True),
Column('tokens', Integer, ForeignKey('tokens.name'), primary_key=True)
)
op.execute("update tokens set expiration_date=null where expiration_date='None'")
def downgrade():
op.alter_column('tokens', 'expiration_date', new_column_name='ex_date')
op.alter_column('tokens', 'max_usage', new_column_name='one_time')
| 1.664063 | 2 |
nozama-cloudsearch-data/nozama/cloudsearch/data/tests/conftest.py | iGamesInc/nozama-cloudsearch | 0 | 12793915 | <filename>nozama-cloudsearch-data/nozama/cloudsearch/data/tests/conftest.py
# -*- coding: utf-8 -*-
"""
"""
import logging
import pytest
@pytest.fixture(scope='session')
def logger(request):
"""Set up a root logger showing all entries in the console.
"""
log = logging.getLogger()
hdlr = logging.StreamHandler()
fmt = '%(asctime)s %(name)s %(levelname)s %(message)s'
formatter = logging.Formatter(fmt)
hdlr.setFormatter(formatter)
log.addHandler(hdlr)
log.setLevel(logging.DEBUG)
log.propagate = False
return log
@pytest.fixture(scope='function')
def mongodb(request):
"""Set up a mongo connection reset and ready to roll.
"""
from nozama.cloudsearch.data import db
db.init(dict(db_name='unittesting-db'))
db.db().hard_reset()
@pytest.fixture(scope='function')
def elastic(request):
"""Set up a elasticsearch connection reset and ready to roll.
This will attempt to connect to the default elasticsearch instance
on http://localhost:9200. Its not configurable yet.
"""
from nozama.cloudsearch.data.db import init_es, get_es
init_es(dict(es_namespace="ut_"))
get_es().hard_reset()
| 2.109375 | 2 |
earth_enterprise/src/scons/packageUtils_test.py | ezeeyahoo/earthenterprise | 2,661 | 12793916 | <reponame>ezeeyahoo/earthenterprise
#-*- Python -*-
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import re
from packageUtils import IsPackageVersionSufficient
from packageUtils import UsesRPM
from packageUtils import FileContains
from packageUtils import GetDEBPackageInfo
failure_list = []
# test FileContains
test_file = '/etc/profile'
if FileContains('/IDontExist', re.compile(r"a")):
failure_list.append("FileContains Failed: returned true for non-existing file")
if FileContains(test_file, re.compile(r"PROFILE")) == False:
failure_list.append("FileContains Failed: did not find PROFILE in /etc/hostname")
if FileContains(test_file, re.compile(r"not anything here")):
failure_list.append("FileContains Failed: found garbage search string in /etc/hostname")
# test UsesRPM
print("Basic checks for Ubuntu vs RPM\nMake sure these coincide with your current system.\n\n")
uses_rpm = "does not use RPM"
if UsesRPM():
uses_rpm = "uses RPM"
print("This machine %s" % uses_rpm)
# test GetDEBPackageInfo for non-RPM systems
if UsesRPM() == False:
package_name = "gdal-ge"
package_results = GetDEBPackageInfo (package_name)
if len(package_results) != 2 | package_results[1] == False:
failure_list.append("%s not installed: GetDEBPackageInfo returns %s" %
(package_name, package_results))
# test Package check
valid_test_packages = [['apache-ge-devel', '2.2.2'],
['apache-ge-devel', '2.2.2.1'],
['jdk-ge', '1.6.0-1'],
['jdk-ge', '1.6.0-0']];
invalid_test_packages = [['apache-ge-devel9', '2.2.2'],
['apache-ge-devel', '10.2.2.1'],
['j9dk-ge', '1.6.0-1'],
['jdk-ge', '1.99.0-0']];
for package_list in valid_test_packages:
if IsPackageVersionSufficient(package_list[0], package_list[1]) == False:
failure_list.append("Failed test that should pass: %s" % (package_list))
print("Test is now looking for invalid packages (error messages expected until tests are complete).\n\n")
for package_list in invalid_test_packages:
if IsPackageVersionSufficient(package_list[0], package_list[1]):
failure_list.append("Passed test that should fail: %s" % (package_list))
print("\n\nTests complete.\n\n")
if len(failure_list) > 0:
print("\n\n%s TEST FAILURES" % len(failure_list))
for s in failure_list:
print(s)
else:
print("\n\nSUCCESS: All tests succeeded!")
| 2.40625 | 2 |
tests/test_mc_cnn.py | CNES/Pandora_MCCNN | 3 | 12793917 | #!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA_MCCNN
#
# https://github.com/CNES/Pandora_MCCNN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains functions to test the cost volume create by mc_cnn
"""
import unittest
import numpy as np
import torch
import torch.nn as nn
from mc_cnn.run import computes_cost_volume_mc_cnn_fast
from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer
from mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator
from mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator
# pylint: disable=no-self-use
class TestMCCNN(unittest.TestCase):
"""
TestMCCNN class allows to test the cost volume create by mc_cnn
"""
def setUp(self):
"""
Method called to prepare the test fixture
"""
self.ref_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1))
self.sec_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) + 1
self.ref_img_1 = np.tile(np.arange(13, dtype=np.float32), (13, 1))
self.sec_img_2 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) - 1
def test_computes_cost_volume_mc_cnn_fast(self):
""" "
Test the computes_cost_volume_mc_cnn_fast function
"""
# create reference and secondary features
ref_feature = torch.randn((64, 4, 4), dtype=torch.float64)
sec_features = torch.randn((64, 4, 4), dtype=torch.float64)
cos = nn.CosineSimilarity(dim=0, eps=1e-6)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 5), np.nan)
# disparity -2
cv_gt[:, 2:, 0] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy()
# disparity -1
cv_gt[:, 1:, 1] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy()
# disparity 0
cv_gt[:, :, 2] = cos(ref_feature[:, :, :], sec_features[:, :, :]).cpu().detach().numpy()
# disparity 1
cv_gt[:, :3, 3] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy()
# disparity 2
cv_gt[:, :2, 4] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy()
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -2, 2)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
def test_computes_cost_volume_mc_cnn_fast_negative_disp(self):
""" "
Test the computes_cost_volume_mc_cnn_fast function with negative disparities
"""
# create reference and secondary features
ref_feature = torch.randn((64, 4, 4), dtype=torch.float64)
sec_features = torch.randn((64, 4, 4), dtype=torch.float64)
cos = nn.CosineSimilarity(dim=0, eps=1e-6)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 4), np.nan)
# disparity -4
# all nan
# disparity -3
cv_gt[:, 3:, 1] = cos(ref_feature[:, :, 3:], sec_features[:, :, 0:1]).cpu().detach().numpy()
# disparity -2
cv_gt[:, 2:, 2] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy()
# disparity -1
cv_gt[:, 1:, 3] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy()
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -4, -1)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
def test_computes_cost_volume_mc_cnn_fast_positive_disp(self):
""" "
Test the computes_cost_volume_mc_cnn_fast function with positive disparities
"""
# create reference and secondary features
ref_feature = torch.randn((64, 4, 4), dtype=torch.float64)
sec_features = torch.randn((64, 4, 4), dtype=torch.float64)
cos = nn.CosineSimilarity(dim=0, eps=1e-6)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 4), np.nan)
# disparity 1
cv_gt[:, :3, 0] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy()
# disparity 2
cv_gt[:, :2, 1] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy()
# disparity 3
cv_gt[:, :1, 2] = cos(ref_feature[:, :, :1], sec_features[:, :, 3:]).cpu().detach().numpy()
# disparity 4
# all nan
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, 1, 4)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
def sad_cost(self, ref_features, sec_features):
"""
Useful to test the computes_cost_volume_mc_cnn_accurate function
"""
return torch.sum(abs(ref_features[0, :, :, :] - sec_features[0, :, :, :]), dim=0)
def test_computes_cost_volume_mc_cnn_accurate(self):
""" "
Test the computes_cost_volume_mc_cnn_accurate function
"""
# create reference and secondary features
ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64)
sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 5), np.nan)
# disparity -2
cv_gt[:, 2:, 0] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy()
# disparity -1
cv_gt[:, 1:, 1] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy()
# disparity 0
cv_gt[:, :, 2] = self.sad_cost(ref_feature[:, :, :, :], sec_features[:, :, :, :]).cpu().detach().numpy()
# disparity 1
cv_gt[:, :3, 3] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy()
# disparity 2
cv_gt[:, :2, 4] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy()
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
acc = AccMcCnnInfer()
# Because input shape of nn.Conv2d is (Batch_size, Channel, H, W), we add 1 dimensions
cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -2, 2, self.sad_cost)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
def test_computes_cost_volume_mc_cnn_accuratenegative_disp(self):
""" "
Test the computes_cost_volume_mc_cnn_accurate function with negative disparities
"""
# create reference and secondary features
ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64)
sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 4), np.nan)
# disparity -4
# all nan
# disparity -3
cv_gt[:, 3:, 1] = self.sad_cost(ref_feature[:, :, :, 3:], sec_features[:, :, :, 0:1]).cpu().detach().numpy()
# disparity -2
cv_gt[:, 2:, 2] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy()
# disparity -1
cv_gt[:, 1:, 3] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy()
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
acc = AccMcCnnInfer()
# Because input shape of nn.Conv2d is (Batch_size, Channel, H, W), we add 1 dimensions
cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -4, -1, self.sad_cost)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
def test_computes_cost_volume_mc_cnn_accurate_positive_disp(self):
""" "
Test the computes_cost_volume_mc_cnn_accurate function with positive disparities
"""
# create reference and secondary features
ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64)
sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 4), np.nan)
# disparity 1
cv_gt[:, :3, 0] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy()
# disparity 2
cv_gt[:, :2, 1] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy()
# disparity 3
cv_gt[:, :1, 2] = self.sad_cost(ref_feature[:, :, :, :1], sec_features[:, :, :, 3:]).cpu().detach().numpy()
# disparity 4
# all nan
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
acc = AccMcCnnInfer()
# Because input shape of nn.Conv2d is (Batch_size, Channel, H, W), we add 1 dimensions
cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, 1, 4, self.sad_cost)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
# pylint: disable=invalid-name
# -> because changing the name here loses the reference to the actual name of the checked function
def test_MiddleburyGenerator(self):
"""
test the function MiddleburyGenerator
"""
# Script use to create images_middlebury and samples_middlebury :
# pylint: disable=pointless-string-statement
"""
# shape 1, 2, 13, 13 : 1 exposures, 2 = left and right images
image_pairs_0 = np.zeros((1, 2, 13, 13))
# left
image_pairs_0[0, 0, :, :] = np.tile(np.arange(13), (13, 1))
# right
image_pairs_0[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) + 1
image_pairs_1 = np.zeros((1, 2, 13, 13))
image_pairs_1[0, 0, :, :] = np.tile(np.arange(13), (13, 1))
image_pairs_1[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) - 1
img_file = h5py.File('images_middlebury.hdf5', 'w')
img_0 = [image_pairs_0]
grp = img_file.create_group(str(0))
# 1 illumination
for light in range(len(img_0)):
dset = grp.create_dataset(str(light), data=img_0[light])
img_1 = [image_pairs_1]
grp = img_file.create_group(str(1))
for light in range(len(img_1)):
dset = grp.create_dataset(str(light), data=img_1[light])
sampl_file = h5py.File('sample_middlebury.hdf5', 'w')
# disparity of image_pairs_0
x0 = np.array([[0., 5., 6., 1.]
[0., 7., 7., 1.]])
# disparity of image_pairs_1
x1 = np.array([[ 1., 7., 5., -1.]
[ 0., 0., 0., 0.]])
sampl_file.create_dataset(str(0), data=x0)
sampl_file.create_dataset(str(1), data=x1)
"""
# Positive disparity
cfg = {
"data_augmentation": False,
"dataset_neg_low": 1,
"dataset_neg_high": 1,
"dataset_pos": 0,
"augmentation_param": {
"vertical_disp": 0,
"scale": 0.8,
"hscale": 0.8,
"hshear": 0.1,
"trans": 0,
"rotate": 28,
"brightness": 1.3,
"contrast": 1.1,
"d_hscale": 0.9,
"d_hshear": 0.3,
"d_vtrans": 1,
"d_rotate": 3,
"d_brightness": 0.7,
"d_contrast": 1.1,
},
}
training_loader = MiddleburyGenerator("tests/sample_middlebury.hdf5", "tests/images_middlebury.hdf5", cfg)
# Patch of shape 3, 11, 11
# With the firt dimension = left patch, right positive patch, right negative patch
patch = training_loader.__getitem__(0)
x_ref_patch = 6
y_ref_patch = 5
patch_size = 5
gt_ref_patch = self.ref_img_0[
y_ref_patch - patch_size : y_ref_patch + patch_size + 1,
x_ref_patch - patch_size : x_ref_patch + patch_size + 1,
]
# disp = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
disp = 1
x_sec_pos_patch = x_ref_patch - disp
y_sec_pos_patch = 5
gt_sec_pos_patch = self.sec_img_0[
y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1,
x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1,
]
# dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
dataset_neg = 1
x_sec_neg_patch = x_ref_patch - disp + dataset_neg
y_sec_neg_patch = 5
gt_sec_neg_patch = self.sec_img_0[
y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1,
x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1,
]
gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)
# Check if the calculated patch is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(patch, gt_path)
# negative disparity
patch = training_loader.__getitem__(2)
x_ref_patch = 5
y_ref_patch = 7
patch_size = 5
gt_ref_patch = self.ref_img_0[
y_ref_patch - patch_size : y_ref_patch + patch_size + 1,
x_ref_patch - patch_size : x_ref_patch + patch_size + 1,
]
# disp = -1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
disp = -1
x_sec_pos_patch = x_ref_patch - disp
y_sec_pos_patch = 5
gt_sec_pos_patch = self.sec_img_0[
y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1,
x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1,
]
# dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
dataset_neg = 1
x_sec_neg_patch = x_ref_patch - disp + dataset_neg
y_sec_neg_patch = 5
gt_sec_neg_patch = self.sec_img_0[
y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1,
x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1,
]
gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)
# Check if the calculated patch is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(patch, gt_path)
# pylint: disable=invalid-name
# -> because changing the name here loses the reference to the actual name of the checked function
def test_DataFusionContestGenerator(self):
"""
test the function DataFusionContestGenerator
"""
# pylint: disable=pointless-string-statement
"""
# Script use to create images_middlebury and samples_middlebury :
# shape 2, 13, 13 : 2 = left and right images, row, col
image_pairs_0 = np.zeros((2, 13, 13))
# left
image_pairs_0[0, :, :] = np.tile(np.arange(13), (13, 1))
# right
image_pairs_0[1, :, :] = np.tile(np.arange(13), (13, 1)) + 1
image_pairs_1 = np.zeros((2, 13, 13))
image_pairs_1[0, :, :] = np.tile(np.arange(13), (13, 1))
image_pairs_1[1, :, :] = np.tile(np.arange(13), (13, 1)) - 1
img_file = h5py.File('images_dfc.hdf5', 'w')
img_file.create_dataset(str(0), data=image_pairs_0)
img_file.create_dataset(str(1), data=image_pairs_1)
sampl_file = h5py.File('sample_dfc.hdf5', 'w')
# disparity of image_pairs_0
x0 = np.array([[0., 5., 6., 1.],
[0., 7., 7., 1.]])
# disparity of image_pairs_1
x1 = np.array([[ 1., 7., 5., -1.],
[ 0., 0., 0., 0.]])
sampl_file.create_dataset(str(0), data=x0)
sampl_file.create_dataset(str(1), data=x1)
"""
# Positive disparity
cfg = {
"data_augmentation": False,
"dataset_neg_low": 1,
"dataset_neg_high": 1,
"dataset_pos": 0,
"vertical_disp": 0,
"augmentation_param": {
"scale": 0.8,
"hscale": 0.8,
"hshear": 0.1,
"trans": 0,
"rotate": 28,
"brightness": 1.3,
"contrast": 1.1,
"d_hscale": 0.9,
"d_hshear": 0.3,
"d_vtrans": 1,
"d_rotate": 3,
"d_brightness": 0.7,
"d_contrast": 1.1,
},
}
training_loader = DataFusionContestGenerator("tests/sample_dfc.hdf5", "tests/images_dfc.hdf5", cfg)
# Patch of shape 3, 11, 11
# With the firt dimension = left patch, right positive patch, right negative patch
patch = training_loader.__getitem__(0)
x_ref_patch = 6
y_ref_patch = 5
patch_size = 5
gt_ref_patch = self.ref_img_0[
y_ref_patch - patch_size : y_ref_patch + patch_size + 1,
x_ref_patch - patch_size : x_ref_patch + patch_size + 1,
]
# disp = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
disp = 1
x_sec_pos_patch = x_ref_patch - disp
y_sec_pos_patch = 5
gt_sec_pos_patch = self.sec_img_0[
y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1,
x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1,
]
# dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
dataset_neg = 1
x_sec_neg_patch = x_ref_patch - disp + dataset_neg
y_sec_neg_patch = 5
gt_sec_neg_patch = self.sec_img_0[
y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1,
x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1,
]
gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)
# Check if the calculated patch is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(patch, gt_path)
# negative disparity
patch = training_loader.__getitem__(2)
x_ref_patch = 5
y_ref_patch = 7
patch_size = 5
gt_ref_patch = self.ref_img_1[
y_ref_patch - patch_size : y_ref_patch + patch_size + 1,
x_ref_patch - patch_size : x_ref_patch + patch_size + 1,
]
# disp = -1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
disp = -1
x_sec_pos_patch = x_ref_patch - disp
y_sec_pos_patch = 7
gt_sec_pos_patch = self.sec_img_2[
y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1,
x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1,
]
# dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
dataset_neg = 1
x_sec_neg_patch = x_ref_patch - disp + dataset_neg
y_sec_neg_patch = 7
gt_sec_neg_patch = self.sec_img_2[
y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1,
x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1,
]
gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)
# Check if the calculated patch is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(patch, gt_path)
if __name__ == "__main__":
unittest.main()
| 2.0625 | 2 |
studies/upgrade_noise/plotting/make_plots.py | kaareendrup/gnn-reco | 0 | 12793918 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sqlite3
from sklearn.metrics import auc
from sklearn.metrics import roc_curve
def add_truth(data, database):
data = data.sort_values('event_no').reset_index(drop = True)
with sqlite3.connect(database) as con:
query = 'select event_no, energy, interaction_type, pid from truth where event_no in %s'%str(tuple(data['event_no']))
truth = pd.read_sql(query,con).sort_values('event_no').reset_index(drop = True)
truth['track'] = 0
truth.loc[(abs(truth['pid']) == 14) & (truth['interaction_type'] == 1), 'track'] = 1
add_these = []
for key in truth.columns:
if key not in data.columns:
add_these.append(key)
for key in add_these:
data[key] = truth[key]
return data
def get_interaction_type(row):
if row["interaction_type"] == 1: # CC
particle_type = "nu_" + {12: 'e', 14: 'mu', 16: 'tau'}[abs(row['pid'])]
return f"{particle_type} CC"
else:
return "NC"
def resolution_fn(r):
if len(r) > 1:
return (np.percentile(r, 84) - np.percentile(r, 16)) / 2.
else:
return np.nan
def add_energylog10(df):
df['energy_log10'] = np.log10(df['energy'])
return df
def get_error(residual):
rng = np.random.default_rng(42)
w = []
for i in range(150):
new_sample = rng.choice(residual, size = len(residual), replace = True)
w.append(resolution_fn(new_sample))
return np.std(w)
def get_roc_and_auc(data, target):
fpr, tpr, _ = roc_curve(data[target], data[target+'_pred'])
auc_score = auc(fpr,tpr)
return fpr,tpr,auc_score
def plot_roc(target, runids, save_dir, save_as_csv = False):
width = 3.176*2
height = 2.388*2
fig = plt.figure(figsize = (width,height))
for runid in runids:
data = pd.read_csv('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target))
database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid)
if save_as_csv:
data = add_truth(data, database)
data = add_energylog10(data)
data.to_csv(save_dir + '/%s_%s.csv'%(runid, target))
pulses_cut_val = 20
if runid == 140021:
pulses_cut_val = 10
fpr, tpr, auc = get_roc_and_auc(data, target)
plt.plot(fpr,tpr, label =' %s : %s'%(runid,round(auc,3)))
plt.legend()
plt.title('Track/Cascade Classification')
plt.ylabel('True Positive Rate', fontsize = 12)
plt.xlabel('False Positive Rate', fontsize = 12)
ymax = 0.3
x_text = 0.2
y_text = ymax - 0.05
y_sep = 0.1
plt.text(x_text, y_text - 0 * y_sep, "IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)"%(runids[0], runids[1]), va='top', fontsize = 8)
plt.text(x_text, y_text - 1 * y_sep, "Pulsemaps used: SplitInIcePulses_GraphSage_Pulses ", va='top', fontsize = 8)
plt.text(x_text, y_text - 2 * y_sep, "n_pulses > (%s, %s) selection applied during training"%(10,20), va='top', fontsize = 8)
fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target),bbox_inches="tight")
return
def calculate_width(data_sliced, target):
track =data_sliced.loc[data_sliced['track'] == 1,:].reset_index(drop = True)
cascade =data_sliced.loc[data_sliced['track'] == 0,:].reset_index(drop = True)
if target == 'energy':
residual_track = ((track[target + '_pred'] - track[target])/track[target])*100
residual_cascade = ((cascade[target + '_pred'] - cascade[target])/cascade[target])*100
elif target == 'zenith':
residual_track = (track[target + '_pred'] - track[target])*(360/(2*np.pi))
residual_cascade = (cascade[target + '_pred'] - cascade[target])*(360/(2*np.pi))
else:
residual_track = (track[target + '_pred'] - track[target])
residual_cascade = (cascade[target + '_pred'] - cascade[target])
return resolution_fn(residual_track), resolution_fn(residual_cascade), get_error(residual_track), get_error(residual_cascade)
def get_width(df, target):
track_widths = []
cascade_widths = []
track_errors = []
cascade_errors = []
energy = []
bins = np.arange(0,3.1,0.1)
if target in ['zenith', 'energy', 'XYZ']:
for i in range(1,len(bins)):
print(bins[i])
idx = (df['energy_log10']> bins[i-1]) & (df['energy_log10'] < bins[i])
data_sliced = df.loc[idx, :].reset_index(drop = True)
energy.append(np.mean(data_sliced['energy_log10']))
track_width, cascade_width, track_error, cascade_error = calculate_width(data_sliced, target)
track_widths.append(track_width)
cascade_widths.append(cascade_width)
track_errors.append(track_error)
cascade_errors.append(cascade_error)
track_plot_data = pd.DataFrame({'mean': energy, 'width': track_widths, 'width_error': track_errors})
cascade_plot_data = pd.DataFrame({'mean': energy, 'width': cascade_widths, 'width_error': cascade_errors})
return track_plot_data, cascade_plot_data
else:
print('target not supported: %s'%target)
# Load data
def make_plot(target, runids, save_dir, save_as_csv = False):
colors = {140021: 'tab:blue', 140022: 'tab:orange'}
fig = plt.figure(constrained_layout = True)
ax1 = plt.subplot2grid((6, 6), (0, 0), colspan = 6, rowspan= 6)
for runid in runids:
predictions_path = '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target)
database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid)
pulses_cut_val = 20
if runid == 140021:
pulses_cut_val = 10
df = pd.read_csv(predictions_path).sort_values('event_no').reset_index(drop = True)
df = add_truth(df, database)
df = add_energylog10(df)
if save_as_csv:
df.to_csv(save_dir + '/%s_%s.csv'%(runid, target))
plot_data_track, plot_data_cascade = get_width(df, target)
ax1.plot(plot_data_track['mean'],plot_data_track['width'],linestyle='solid', lw = 0.5, color = 'black', alpha = 1)
ax1.fill_between(plot_data_track['mean'],plot_data_track['width'] - plot_data_track['width_error'], plot_data_track['width'] + plot_data_track['width_error'],color = colors[runid], alpha = 0.8 ,label = 'Track %s'%runid)
ax1.plot(plot_data_cascade['mean'],plot_data_cascade['width'],linestyle='dashed', color = 'tab:blue', lw = 0.5, alpha = 1)
ax1.fill_between(plot_data_cascade['mean'], plot_data_cascade['width']- plot_data_cascade['width_error'], plot_data_cascade['width']+ plot_data_cascade['width_error'], color = colors[runid], alpha = 0.3, label = 'Cascade %s'%runid )
ax2 = ax1.twinx()
ax2.hist(df['energy_log10'], histtype = 'step', label = 'deposited energy', color = colors[runid])
#plt.title('$\\nu_{v,u,e}$', size = 20)
ax1.tick_params(axis='x', labelsize=6)
ax1.tick_params(axis='y', labelsize=6)
ax1.set_xlim((0,3.1))
leg = ax1.legend(frameon=False, fontsize = 8)
for line in leg.get_lines():
line.set_linewidth(4.0)
if target == 'energy':
ax1.set_ylim((0,175))
ymax = 23.
y_sep = 8
unit_tag = '(%)'
else:
unit_tag = '(deg.)'
if target == 'angular_res':
target = 'direction'
if target == 'XYZ':
target = 'vertex'
unit_tag = '(m)'
if target == 'zenith':
ymax = 10.
y_sep = 2.3
ax1.set_ylim((0,45))
plt.tick_params(right=False,labelright=False)
ax1.set_ylabel('%s Resolution %s'%(target.capitalize(), unit_tag), size = 10)
ax1.set_xlabel('Energy (log10 GeV)', size = 10)
x_text = 0.5
y_text = ymax - 2.
ax1.text(x_text, y_text - 0 * y_sep, "IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)"%(runids[0], runids[1]), va='top', fontsize = 8)
ax1.text(x_text, y_text - 1 * y_sep, "Pulsemaps used: SplitInIcePulses_GraphSage_Pulses ", va='top', fontsize = 8)
ax1.text(x_text, y_text - 2 * y_sep, "n_pulses > (%s, %s) selection applied during training"%(10,20), va='top', fontsize = 8)
fig.suptitle("%s regression Upgrade MC using GNN"%target)
#fig.suptitle('%s Resolution'%target.capitalize(), size = 12)
fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target))#,bbox_inches="tight")
return
runids = [140021, 140022]
targets = ['zenith', 'energy', 'track']
save_as_csv = True
save_dir = '/home/iwsatlas1/oersoe/phd/tmp/upgrade_csv'
for target in targets:
if target != 'track':
make_plot(target, runids, save_dir, save_as_csv)
else:
plot_roc(target, runids, save_dir, save_as_csv) | 2.40625 | 2 |
tools/checkimg.py | mrzhuzhe/YOLOX | 0 | 12793919 | <filename>tools/checkimg.py
import numpy as np
import pandas as pd
import ast
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import cv2
def plot_image_and_bboxes(img, bboxes):
fig, ax = plt.subplots(1, figsize=(10, 8))
ax.axis('off')
ax.imshow(img)
for bbox in bboxes:
rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], linewidth=2, edgecolor='r', facecolor="none")
ax.add_patch(rect)
plt.show()
def get_image(img_name):
return np.array(Image.open(img_name))
def get_bbox(annots):
bboxes = [list(annot.values()) for annot in annots]
return bboxes
df = pd.read_csv('./slices_df.csv')
df['annotations'] = df['annotations'].apply(lambda x: ast.literal_eval(x))
df['bboxes'] = df.annotations.apply(get_bbox)
imglist = []
for i, row in df.iterrows():
img = get_image(row["path"])
bboxes = row["bboxes"]
if bboxes != []:
for bbox in bboxes:
img = cv2.rectangle(img, (bbox[0],bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0), 2)
imglist.append(img)
if (i+1) % 49 == 0:
print(i, len(imglist))
_, axs = plt.subplots(7, 7, figsize=(32, 32))
axs = axs.flatten()
for img, ax in zip(imglist, axs):
ax.axis('off')
ax.imshow(img)
print(row)
plt.show()
imglist = [] | 2.484375 | 2 |
examples/1_trace.py | tamsri/murt | 4 | 12793920 | <reponame>tamsri/murt
from murt import Tracer
# Scene File Path in obj
OBJ_FILE_PATH = "./assets/poznan.obj"
# Initialize Tracer
my_tracer = Tracer(OBJ_FILE_PATH)
# Set transmiting position
tx_pos = [0, 15, 0]
# Set receiving position
rx_pos = [-30, 1.5, 45]
# Return the traced paths
results = my_tracer.trace(tx_pos, rx_pos)
print(results)
| 2.421875 | 2 |
helpers_fritz.py | scarfboy/fritzbox-traffic | 0 | 12793921 | <gh_stars>0
import time, urllib, urllib2, hashlib, pprint, re, sys
import urllib, urllib2, socket, httplib
import json
"""
Being lazy with globals because you probably don't have more than one in your LAN
Note that login takes a little time. The _fritz_sid keeps the login token so if you can
keep the interpreter running you can get fster fetches
CONSIDER: fetching things beyond transfers.
"""
_fritz_sid = None
_fritz_lastfetched = 0
_fritz_lastdata = None
IP = '192.168.178.1'
username = ''
password = '<PASSWORD>'
def urlfetch(url, data=None, headers=None, raise_as_none=False, return_reqresp=False):
""" Returns:
- if return_reqresp==False (default), returns the data at an URL
- if return_reqresp==True, returns the request and response objects (can be useful for streams)
data: May be
- a dict
- a sequence of tuples (will be encoded),
- a string (not altered - you often want to have used urllib.urlencode)
When you use this parameter, the request becomes a POST instead of the default GET
(and seems to force <tt>Content-type: application/x-www-form-urlencoded</tt>)
headers: dict of additional headers (each is add_header()'d)
raise_as_none: In cases where you want to treat common connection failures as 'try again later',
using True here can save a bunch of your own typing in error catching
"""
try:
if type(data) in (tuple, dict):
data=urllib.urlencode(data)
req = urllib2.Request(url, data=data)
if headers!=None:
for k in headers:
vv = headers[k]
if type(vv) in (list,tuple):
for v in vv:
req.add_header(k,v)
else: # assume single string. TODO: consider unicode
req.add_header(k,vv)
response = urllib2.urlopen(req, timeout=60)
if return_reqresp:
return req,response
else:
data = response.read()
return data
except (socket.timeout), e:
if raise_as_none:
sys.stderr.write( 'Timeout fetching %r\n'%url )
return None
else:
raise
except (socket.error, urllib2.URLError, httplib.HTTPException), e:
if raise_as_none:
#print 'Networking problem, %s: %s'%(e.__class__, str(e))
return None
else:
raise
def fritz_login():
data = urlfetch('http://%s/login_sid.lua'%(IP,))
m = re.search('<Challenge>([0-9a-f]+)</Challenge>', data)
challenge = m.groups()[0]
m5h = hashlib.md5()
hashstr = '%s-%s'%(challenge, password)
m5h.update(hashstr.encode('utf_16_le'))
response = m5h.hexdigest()
data = urlfetch('http://%s/login_sid.lua'%(IP,), {'response':'%s-%s'%(challenge, response)})
m = re.search('<SID>([0-9a-f]+)</SID>', data)
return m.groups()[0]
def fritz_fetch():
" Fetches ul/dl graph data "
global _fritz_sid, _fritz_lastfetched, _fritz_lastdata
td = time.time() - _fritz_lastfetched
if td < 5.0 and _fritz_lastdata!=None: # if our last fetch was less than 5 seconds ago, we're not going to get a new answer
return _fritz_lastdata
try:
fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid)
data = urlfetch(fetchurl)
except urllib2.HTTPError, e:
if e.code==403:
#print "Forbidden, tryin to log in for new SID"
_fritz_sid = fritz_login()
fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid)
#print fetchurl
data = urlfetch(fetchurl)
jd = json.loads( data )[0]# [0]: assume it's one main interface
_fritz_lastfetched = time.time()
_fritz_lastdata = jd
#pprint.pprint( jd )
return jd
if __name__ == '__main__':
import pprint
pprint.pprint( fritz_fetch() )
| 2.671875 | 3 |
unidump/cli.py | Codepoints/unidump | 33 | 12793922 | """
handle the CLI logic for a unidump call
"""
import argparse
import codecs
import gettext
from os.path import dirname
from shutil import get_terminal_size
import sys
from textwrap import TextWrapper
# pylint: disable=unused-import
from typing import List, IO, Any
# pylint: enable=unused-import
from unicodedata import unidata_version
from unidump import VERSION, unidump
from unidump.env import Env
TL = gettext.translation('unidump', localedir=dirname(__file__)+'/locale',
fallback=True)
_ = TL.gettext
TW = TextWrapper(width=min(80, getattr(get_terminal_size(), 'columns')),
replace_whitespace=True,
initial_indent=' ', subsequent_indent=' ').fill
DESCRIPTION = '\n\n'.join([
TW(_('A Unicode code point dump.')),
TW(_('Think of it as hexdump(1) for Unicode. The command analyses the '
'input and then prints three columns: the raw byte index of the '
'first code point in this row, code points in their hex notation, '
'and finally the raw input characters with control and whitespace '
'replaced by a dot.')),
TW(_('Invalid byte sequences are represented with an “X” and with the hex '
'value enclosed in question marks, e.g., “?F5?”.')),
TW(_('You can pipe in data from stdin, select several files at once, or '
'even mix all those input methods together.')),
])
EPILOG = '\n\n'.join([
_('Examples:'),
TW(_('* Basic usage with stdin:')),
''' echo -n 'ABCDEFGHIJKLMNOP' | unidump -n 4
0 0041 0042 0043 0044 ABCD
4 0045 0046 0047 0048 EFGH
8 0049 004A 004B 004C IJKL
12 004D 004E 004F 0050 MNOP''',
TW(_('* Dump the code points translated from another encoding:')),
' unidump -c latin-1 some-legacy-file',
TW(_('* Dump many files at the same time:')),
' unidump foo-*.txt',
TW(_('* Control characters and whitespace are safely rendered:')),
''' echo -n -e '\\x01' | unidump -n 1
0 0001 .''',
TW(_('* Finally learn what your favorite Emoji is composed of:')),
''' ( echo -n -e '\\xf0\\x9f\\xa7\\x9d\\xf0\\x9f\\x8f\\xbd\\xe2' ; \\
echo -n -e '\\x80\\x8d\\xe2\\x99\\x82\\xef\\xb8\\x8f' ; ) | \\
unidump -n 5
0 1F9DD 1F3FD 200D 2642 FE0F .🏽.♂️''',
TW(_('See <http://emojipedia.org/man-elf-medium-skin-tone/> for images. '
'The “elf” emoji (the first character) is replaced with a dot here, '
'because the current version of Python’s unicodedata doesn’t know of '
'this character yet.')),
TW(_('* Use it like strings(1):')),
' unidump -e \'{data}\' some-file.bin',
TW(_('This will replace every unknown byte from the input file with “X” '
'and every control and whitespace character with “.”.')),
TW(_('* Only print the code points of the input:')),
''' unidump -e '{repr}'$'\\n' -n 1 some-file.txt''',
TW(_('This results in a stream of code points in hex notation, each on a '
'new line, without byte counter or rendering of actual data. You can '
'use this to count the total amount of characters (as opposed to raw '
'bytes) in a file, if you pipe it through `wc -l`.')),
TW(_('This is version {} of unidump, using Unicode {} data.')
.format(VERSION, unidata_version)).lstrip() + '\n'
])
def force_stdout_to_utf8():
"""force stdout to be UTF-8 encoded, disregarding locale
Do not type-check this:
error: Incompatible types in assignment (expression has type
"StreamWriter", variable has type "TextIO")
error: "TextIO" has no attribute "detach"
\\o/
"""
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
def main(args: List[str] = None) -> int:
"""entry-point for an unidump CLI call"""
force_stdout_to_utf8()
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(
prog='unidump',
description=DESCRIPTION,
epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('files', nargs='*', metavar='FILE', default=('-',),
help=_(
'input files. Use “-” or keep empty for stdin.'))
parser.add_argument('-n', '--length', type=int, default=16,
dest='linelength', metavar='LENGTH',
help=_(
'format output using this much input characters. '
'Default is %(default)s characters.'))
parser.add_argument('-c', '--encoding', type=str, default='utf-8',
metavar='ENC',
help=_(
'interpret input in this encoding. Default is '
'%(default)s. You can choose any encoding that '
'Python supports, e.g. “latin-1”.'))
parser.add_argument('-e', '--format', type=str, default=None,
dest='lineformat', metavar='FORMAT',
help=_(
'specify a custom format in Python’s {} notation. '
'Default is “%s”. '
'See examples below on how to use this option.'
) % Env.lineformat.replace('\n', '\\n'))
parser.add_argument('-v', '--version', action='version',
version=_('%(prog)s {} using Unicode {} data').format(
VERSION, unidata_version))
options = parser.parse_args(args)
try:
for filename in options.files:
infile = None # type: IO[bytes]
if filename == '-':
infile = sys.stdin.buffer
else:
try:
infile = open(filename, 'rb')
except FileNotFoundError:
sys.stdout.flush()
sys.stderr.write(_('File {} not found.\n')
.format(filename))
continue
except IsADirectoryError:
sys.stdout.flush()
sys.stderr.write(_('{} is a directory.\n')
.format(filename))
continue
unidump(
infile,
env=Env(
linelength=options.linelength,
encoding=options.encoding,
lineformat=options.lineformat,
output=sys.stdout))
except KeyboardInterrupt:
sys.stdout.flush()
return 1
else:
return 0
| 2.796875 | 3 |
api/serializers.py | lutoma/open-grgraz | 2 | 12793923 | from rest_framework import serializers
from api.models import *
class ParliamentaryGroupSerializer(serializers.ModelSerializer):
class Meta:
model = ParliamentaryGroup
fields = ('id', 'name')
class ParliamentarySessionSerializer(serializers.ModelSerializer):
class Meta:
model = ParliamentarySession
fields = ('session_date',)
class CouncilPersonSerializer(serializers.ModelSerializer):
class Meta:
model = CouncilPerson
fields = ('name', 'academic_degree', 'email', 'parliamentary_group')
class FileSerializer(serializers.ModelSerializer):
class Meta:
model = File
fields = ('long_filename', 'short_filename', 'path')
class AnswerSerializer(serializers.ModelSerializer):
session = serializers.StringRelatedField()
proposer = CouncilPersonSerializer()
files = FileSerializer(many=True)
class Meta:
model = Motion
fields = ('id', 'motion_id', 'session', 'title', 'parliamentary_group',
'proposer', 'files')
class MotionSerializer(serializers.ModelSerializer):
session = serializers.StringRelatedField()
proposer = CouncilPersonSerializer()
files = FileSerializer(many=True)
answers = AnswerSerializer(many=True)
class Meta:
model = Motion
fields = ('id', 'motion_id', 'session', 'title', 'motion_type', 'parliamentary_group',
'proposer', 'files', 'answers')
| 2.25 | 2 |
main.py | YanhengWang/Draughts | 0 | 12793924 | <gh_stars>0
from utils import *
from graphics import Graphics
from state import State
from torch.autograd import Variable
import network
import torch
current = State(None, None)
net = network.ResNet()
focusMoves = []
focus = 0
def MCTS(root):
global net
if root.Expand():
data = torch.FloatTensor(StateToImg(root.child[-1])).unsqueeze(0)
delta = net(Variable(data)).data[0, 0]
root.child[-1].v = delta
delta *= -1
else:
best = root.BestChild()
if best == None:
delta = -1
else:
delta = -MCTS(best)
root.v += delta
root.n += 1
return delta
def Callback(idx, place):
global GUI
global current
global focusMoves
global focus
if place == STONE:
focus = idx
if current.GetColour(idx)==current.player and not(current.mandatory):
for move in focusMoves:
GUI.SetBoard(False, *move)
_, focusMoves = current.GetMoves(focus)
for move in focusMoves:
GUI.SetBoard(True, *move)
else:
for i, move in enumerate(current.moves):
if focus==move[0] and idx==move[-1]: # a legal move
for j, move2 in enumerate(current.moves):
if j != i:
GUI.SetBoard(False, *move2) # clear other highlights
GUI.Move(move, current.pos[move[0]])
current = current.child[i]
for j in range(1000):
MCTS(current)
data = torch.FloatTensor(StateToImg(current)).unsqueeze(0)
print(net(Variable(data)).data[0,0])
best = current.child[0]
for c in current.child:
if c.n > best.n:
best = c
move2 = current.moves[current.child.index(best)]
GUI.Move(move2, current.pos[move2[0]])
current = best
data = torch.FloatTensor(StateToImg(current)).unsqueeze(0)
print(net(Variable(data)).data[0,0])
if current.mandatory:
for move2 in current.moves:
GUI.SetBoard(True, *move2)
break
f = torch.load(PATH_PARAM)
net.load_state_dict(f)
net.eval()
for i in range(50):
MCTS(current)
GUI = Graphics(Callback)
GUI.Run()
| 2.078125 | 2 |
setup.py | bashu/django-absoluteuri | 14 | 12793925 | <reponame>bashu/django-absoluteuri
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
__doc__ = "Absolute URI functions and template tags for Django"
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
readme = read('README.rst')
changelog = read('CHANGELOG.rst')
setup(
name='django-absoluteuri',
version='1.3.1.dev0',
description=__doc__,
long_description=readme + '\n\n' + changelog,
author='Fusionbox, Inc.',
author_email='<EMAIL>',
url='https://github.com/fusionbox/django-absoluteuri',
packages=[package for package in find_packages() if package.startswith('absoluteuri')],
install_requires=[
'Django>=1.11',
],
test_suite='setuptest.setuptest.SetupTestSuite',
tests_require=[
'django-setuptest',
],
license="Apache 2.0",
zip_safe=True,
keywords='django-absoluteuri',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| 1.460938 | 1 |
scripts/to_sorec_list_form.py | ajbc/spf | 28 | 12793926 | <reponame>ajbc/spf
import sys
from collections import defaultdict
path = sys.argv[1]
undir = (len(sys.argv) == 3)
fin = open(path +'/train.tsv')
finn = open(path +'/network.tsv')
fout_items = open(path +'/items_sorec.dat', 'w+')
fout_users = open(path +'/users_sorec.dat', 'w+')
users = defaultdict(list)
items = set()
for line in fin:
user, item, rating = [int(x.strip()) for x in line.split('\t')]
users[user].append(item)
items.add(item)
umap = {}
imap = {}
fmap_items = open(path +'/item_map_sorec.dat', 'w+')
fmap_users = open(path +'/user_map_sorec.dat', 'w+') # this should be the same
for user in users:
umap[user] = len(umap)
fmap_users.write("%d,%d\n" % (user, umap[user]))
for item in items:
imap[item] = len(imap)
fmap_items.write("%d,%d\n" % (item, imap[item]))
fmap_users.close()
fmap_items.close()
user_data = defaultdict(list)
item_data = defaultdict(list)
for user in users:
for item in users[user]:
user_data[umap[user]].append(imap[item])
item_data[imap[item]].append(umap[user])
for line in finn:
user, friend = [int(x.strip()) for x in line.split('\t')]
if user not in umap or friend not in umap:
continue
user_data[umap[user]].append(len(imap) + umap[friend])
item_data[len(imap) + umap[friend]].append(umap[user])
# undirected
if undir:
user_data[umap[friend]].append(len(imap) + umap[user])
item_data[len(imap) + umap[user]].append(umap[friend])
for item in sorted(items, key=lambda x: imap[x]):
line = str(len(item_data[imap[item]]))
for user in item_data[imap[item]]:
line += ' ' + str(user)
fout_items.write(line + '\n')
for user in sorted(users, key=lambda x: umap[x]):
line = str(len(user_data[umap[user]]))
for item in user_data[umap[user]]:
line += ' ' + str(item)
fout_users.write(line + '\n')
line = str(len(item_data[len(imap) + umap[user]]))
for user in item_data[len(imap) + umap[user]]:
line += ' ' + str(user)
fout_items.write(line + '\n')
fout_items.close()
fout_users.close()
| 2.125 | 2 |
CMSLogic/models.py | AbhijithGanesh/Student-Portal-CMS | 0 | 12793927 | from django.db import models
from pytz import country_names as c
from datetime import date
dict_choices = dict(c)
_choices = []
_keys = list(dict_choices.keys())
_value = list(dict_choices.values())
if len(_keys) == len(_value):
for i in range(len(_keys)):
a = [_keys[i], _value[i]]
_choices.append(tuple(a))
class StudentProfile(models.Model):
Name = models.CharField(max_length=300)
Application_Number = models.BigIntegerField()
Date_Of_Birth = models.DateField()
Gender = models.CharField(
max_length=30,
choices=[
("M", "Male"),
("F", "Female"),
("N", "Non-Binary"),
("W", "Would not like to reveal"),
],
)
HomeState = models.CharField(max_length=300)
Country = models.CharField(max_length=75, choices=_choices)
ContactNumber = models.BigIntegerField()
class ContactUs(models.Model):
Department_Name = models.CharField(max_length=300)
Department_Head = models.CharField(max_length=300)
Department_ContactDetails = models.IntegerField()
class Meta:
verbose_name_plural = "Contact Us"
class Events(models.Model):
Event_Name = models.CharField(max_length=50)
Event_Head = models.ForeignKey(StudentProfile, on_delete=models.DO_NOTHING)
Event_Duration = models.DurationField()
Event_Descripton = models.TextField(null=False, default="Empty Description")
class Meta:
verbose_name_plural = "Events and Notices"
| 2.359375 | 2 |
ch13/myproject_virtualenv/src/django-myproject/myproject/apps/likes/views.py | PacktPublishing/Django-3-Web-Development-Cookbook | 159 | 12793928 | <reponame>PacktPublishing/Django-3-Web-Development-Cookbook
import structlog
from django.contrib.contenttypes.models import ContentType
from django.http import JsonResponse
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from .models import Like
from .templatetags.likes_tags import liked_count
logger = structlog.get_logger("django_structlog")
@never_cache
@csrf_exempt
def json_set_like(request, content_type_id, object_id):
"""
Sets the object as a favorite for the current user
"""
result = {
"success": False,
}
if request.user.is_authenticated and request.method == "POST":
content_type = ContentType.objects.get(id=content_type_id)
obj = content_type.get_object_for_this_type(pk=object_id)
like, is_created = Like.objects.get_or_create(
content_type=ContentType.objects.get_for_model(obj),
object_id=obj.pk,
user=request.user)
if is_created:
logger.info("like_created", content_type_id=content_type.pk, object_id=obj.pk)
else:
like.delete()
logger.info("like_deleted", content_type_id=content_type.pk, object_id=obj.pk)
result = {
"success": True,
"action": "add" if is_created else "remove",
"count": liked_count(obj),
}
return JsonResponse(result)
| 2.015625 | 2 |
apphv/mainUser/migrations/0029_auto_20190625_1620.py | FerneyMoreno20/Portfolio | 0 | 12793929 | <gh_stars>0
# Generated by Django 2.2.2 on 2019-06-25 16:20
import ckeditor.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('parametros', '0002_empleos'),
('mainUser', '0028_auto_20190624_1815'),
]
operations = [
migrations.RemoveField(
model_name='educacion',
name='TipoEstu',
),
migrations.RemoveField(
model_name='experiencia',
name='CargExpe',
),
migrations.DeleteModel(
name='Habilidades',
),
migrations.RemoveField(
model_name='logros',
name='NombTiLo',
),
migrations.AddField(
model_name='usuarios',
name='CargExpe',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.Empleos', verbose_name='Cargo Ocupado'),
),
migrations.AddField(
model_name='usuarios',
name='DescLogr',
field=ckeditor.fields.RichTextField(null=True, verbose_name='Descripción o caracteristicas del logro'),
),
migrations.AddField(
model_name='usuarios',
name='EmprExpe',
field=models.CharField(max_length=150, null=True, verbose_name='Empresa'),
),
migrations.AddField(
model_name='usuarios',
name='FechLogr',
field=models.DateField(null=True, verbose_name='Fecha de culminación de logro'),
),
migrations.AddField(
model_name='usuarios',
name='FuncionE',
field=ckeditor.fields.RichTextField(null=True, verbose_name='Funciones desempeñadas'),
),
migrations.AddField(
model_name='usuarios',
name='Instituto',
field=models.CharField(max_length=30, null=True, verbose_name='Inistitución o Academia'),
),
migrations.AddField(
model_name='usuarios',
name='LogrExpe',
field=ckeditor.fields.RichTextField(null=True, verbose_name='Logros Alcanzados'),
),
migrations.AddField(
model_name='usuarios',
name='NiveHabil',
field=models.CharField(default='', max_length=20, null=True, verbose_name='Nivel de Habilidad: '),
),
migrations.AddField(
model_name='usuarios',
name='NombHabil',
field=models.CharField(default='', max_length=100, null=True, verbose_name='Habilidad'),
),
migrations.AddField(
model_name='usuarios',
name='NombLogr',
field=models.CharField(max_length=100, null=True, verbose_name='Logro o Distinción'),
),
migrations.AddField(
model_name='usuarios',
name='NombTiLo',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoLogr', verbose_name='Tipo de Logro'),
),
migrations.AddField(
model_name='usuarios',
name='TipoEstu',
field=models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoEstu', verbose_name='Tipo de educación'),
),
migrations.AddField(
model_name='usuarios',
name='TituloEst',
field=models.CharField(max_length=250, null=True, verbose_name='Titulo de estudio'),
),
migrations.DeleteModel(
name='Educacion',
),
migrations.DeleteModel(
name='Experiencia',
),
migrations.DeleteModel(
name='Logros',
),
]
| 1.914063 | 2 |
Code/preprocessing/doc2vec.py | mattaq31/recognition-forge | 0 | 12793930 | from preprocessing.vectorizers import Doc2VecVectorizer
from nnframework.data_builder import DataBuilder
import pandas as pd
import constants as const
import numpy as np
def generate_d2v_vectors(source_file):
df = pd.read_csv(source_file)
messages = df["Message"].values
vectorizer = Doc2VecVectorizer()
vectors = vectorizer.vectorize(messages)
return np.c_[df.iloc[:,0].values, vectors]
if __name__ == '__main__':
# Generate vectors (with index)
output = generate_d2v_vectors(const.FILE_UNIQUE_UNLABELLED)
# Save vectors as npy file
np.save(const.FILE_DOC2VEC_INPUTS_UNLABELLED, output) | 3.109375 | 3 |
Prob1.py | ntnshrm87/Python_Quest | 0 | 12793931 | class C:
dangerous = 2
c1 = C()
c2 = C()
print(c1.dangerous)
c1.dangerous = 3
print(c1.dangerous)
print(c2.dangerous)
del c1.dangerous
print(c1.dangerous)
print(c2.dangerous)
# Solution:
# 2
# 3
# 2
# 2
# 2
# Reference:
# object.__del__(self)
# Called when the instance is about to be destroyed. This is also called a destructor.
# If a base class has a __del__() method, the derived class’s __del__() method,
# if any, must explicitly call it to ensure proper deletion of the base class part of the instance.
# Note that it is possible (though not recommended!) for the __del__() method to postpone destruction
# of the instance by creating a new reference to it. It may then be called at a later time when
# this new reference is deleted. It is not guaranteed that __del__() methods are called for objects
# that still exist when the interpreter exits.
# Note del x doesn’t directly call x.__del__() — the former decrements the reference count for x by one,
# and the latter is only called when x’s reference count reaches zero. Some common situations
# that may prevent the reference count of an object from going to zero include: circular references
# between objects (e.g., a doubly-linked list or a tree data structure with parent and child pointers);
# a reference to the object on the stack frame of a function that caught an exception (the traceback
# stored in sys.exc_traceback keeps the stack frame alive); or a reference to the object on the
# stack frame that raised an unhandled exception in interactive mode (the traceback stored in
# sys.last_traceback keeps the stack frame alive). The first situation can only be remedied by
# explicitly breaking the cycles; the latter two situations can be resolved by storing None in
# sys.exc_traceback or sys.last_traceback. Circular references which are garbage are detected when
# the option cycle detector is enabled (it’s on by default), but can only be cleaned up if there
# are no Python-level __del__() methods involved. Refer to the documentation for the gc module
# for more information about how __del__() methods are handled by the cycle detector,
# particularly the description of the garbage value.
| 3.828125 | 4 |
covidKeralaDHS.py | cibinjoseph/covidKeralaDHS | 0 | 12793932 | <filename>covidKeralaDHS.py
"""
A module to parse the COVID bulletins provided by DHS Kerala
"""
import urllib3
from bs4 import BeautifulSoup
import json
import sys
linkPre = 'http://dhs.kerala.gov.in'
jsonDefaultFile = 'bulletinLinks.json'
bulletinDefaultFile = 'bulletin.pdf'
def __getPDFlink(bulletinPageLink):
"""
Return links to pdf bulletin uploads in page.
This link can be checked for updated bulletins.
"""
# Parse bulletin page to get pdf link
req = urllib3.PoolManager()
bulletinPage = req.request('GET', bulletinPageLink)
soup = BeautifulSoup(bulletinPage.data, 'html.parser')
try:
divTag = soup.find('div', attrs={'class': 'entry-content'})
pTags = divTag.findAll('p')
except AttributeError:
print('Error: Broken Connection. Rerun')
raise ConnectionError
# Get link to pdf bulletin
for tag in pTags:
if 'English' in tag.text:
return linkPre + tag.a.get('href')
else:
return None
def cleanDate(date):
"""
Returns the date in the format dd.mm.yyyy
This can be used to write to the JSON file in a standard format
"""
# Sanity checks
if not isinstance(date,str):
raise TypeError
if not len(date) == 10:
raise ValueError
return date[0:2] + '.' + date[3:5] + '.' + date[6:10]
def __getDateLinkDict(verbose=True):
"""
Returns a dict data type containing all dates
and their corresponding links to bulletin pages.
"""
# Ensure python version 3+
if sys.version_info.major < 3:
print('ERROR: Use python version 3+')
raise SyntaxError
# Parse DHS Kerala webpage to get html tags
if verbose:
print('Parsing Kerala DHS webpage ...')
print('Obtaining links of dates:')
DHSLink = linkPre + '/%e0%b4%a1%e0%b5%86%e0%b4%af%e0%b4%bf%e0%b4%b2%e0%b4%bf-%e0%b4%ac%e0%b5%81%e0%b4%b3%e0%b5%8d%e0%b4%b3%e0%b4%b1%e0%b5%8d%e0%b4%b1%e0%b4%bf%e0%b4%a8%e0%b5%8d%e2%80%8d/'
req = urllib3.PoolManager()
DHSPage = req.request('GET', DHSLink)
soup = BeautifulSoup(DHSPage.data, 'html.parser')
tags = soup.findAll('h3', attrs={'class': 'entry-title'})
# Clean html tags to extract date and corresponding link to pdfs bulletins
dateLinkDict = dict()
for tag in tags:
# The returned dates may not be consistently formatted on the website.
# Eg. dd-mm-yyy and dd/mm/yyyy are both found
date = cleanDate(tag.a.text)
bulletinPageLink = linkPre + tag.a.get('href')
dateLinkDict[date] = __getPDFlink(bulletinPageLink)
if verbose:
print(date)
return dateLinkDict
def downloadPDF(PDFlink):
"""
Downloads pdf bulletin from the provided link
"""
try:
req = urllib3.PoolManager()
response = req.request('GET', PDFlink)
bulletinFile = open(bulletinDefaultFile, 'wb')
bulletinFile.write(response.data)
except HTTPError:
print('Error: PDF file not found')
return False
finally:
bulletinFile.close()
def writeJSON(dateLinkDict, filename=jsonDefaultFile):
"""
Writes dateLinkDict as a json file.
This JSON file can be used to check for updates.
"""
jsonFile = open(filename, 'w')
json.dump(dateLinkDict, jsonFile)
jsonFile.close()
def readJSON(filename=jsonDefaultFile):
"""
Reads all dateLinkDict from a json file.
This JSON file can be used to check for updates.
"""
jsonFile = open(filename, 'r')
dateLinkDict = json.load(jsonFile)
jsonFile.close()
return dateLinkDict
def getBulletin(date, verbose=True):
"""
Downloads latest bulletin for the given date and returns True.
Returns False if bulletin is not available.
"""
stdDate = cleanDate(date)
dateLinkDict = __getDateLinkDict(verbose)
if stdDate in dateLinkDict:
downloadPDF(dateLinkDict[stdDate])
return True
else:
return False
def isNewBulletin(date, updateJSONfile=True, verbose=True):
"""
Returns bulletin link if an updated bulletin is available on provided date.
Returns False if no new bulletins are available.
If running for first time, the JSON file is created and returns True.
"""
stdDate = cleanDate(date)
dateLinkDictNew = __getDateLinkDict(verbose)
# If date does not exist on server
if not stdDate in dateLinkDictNew:
return False
try:
# If local JSON file exists in directory
dateLinkDictOld = readJSON(jsonDefaultFile)
# If date does not exist in local JSON file
if not stdDate in dateLinkDictOld:
if updateJSONfile:
writeJSON(dateLinkDictNew)
return True
# If both bulletins are same
if (dateLinkDictNew[stdDate] == dateLinkDictOld[stdDate]):
return False
else:
# If both bulletins are different
if updateJSONfile:
writeJSON(dateLinkDictNew)
return True
except FileNotFoundError:
# If local JSON file does not exist
if updateJSONfile:
writeJSON(dateLinkDictNew)
return True
if __name__ == "__main__":
"""
If the module is invoked as a python program, it checks for new bulletins
and downloads the latest one.
"""
from datetime import date
today = date.today().strftime('%d.%m.%Y')
isNew = isNewBulletin(today)
if isNew:
print('NEW BULLETIN AVAILABLE')
downloadPDF(isNew)
print('Downloaded to ' + bulletinDefaultFile)
else:
print('NO NEW BULLETINS AVAILABLE')
| 3.296875 | 3 |
1-DiveIntoPython/week5/lecturesdemos/AsychnchronousProgramming/generators.py | mamoudmatook/PythonSpecializaionInRussian | 0 | 12793933 | <filename>1-DiveIntoPython/week5/lecturesdemos/AsychnchronousProgramming/generators.py
def myrange_generator(top):
current = 0:
while current < top:
yield current
current += 1 | 3.3125 | 3 |
coordination/environment/traffic.py | CN-UPB/FutureCoord | 1 | 12793934 | <reponame>CN-UPB/FutureCoord
from typing import List, Dict
from functools import cmp_to_key
import numpy as np
import scipy.stats as stats
from numpy.random import default_rng, BitGenerator
from tick.base import TimeFunction
from tick.hawkes import SimuInhomogeneousPoisson
class Request:
def __init__(self, arrival: float, duration: float, datarate: float, max_latency: float, endpoints: tuple, service: int):
self.arrival = arrival
self.duration = duration
self.datarate = datarate
self.max_latency = max_latency
self.ingress, self.egress = endpoints
self.ingress = int(self.ingress)
self.egress = int(self.egress)
self.service: int = int(service)
self.vtypes: List[int] = None
self.resd_lat: float = None
def __str__(self):
attrs = [round(self.duration, 2), round(self.datarate, 2), round(self.resd_lat, 2), round(self.max_latency, 2)]
attrs = [self.ingress, self.egress, *attrs, self.service]
return 'Route: ({}-{}); Duration: {}; Rate: {}; Resd. Lat.: {}; Lat.: {}; Service: {}'.format(*attrs)
class ServiceTraffic:
def __init__(self, rng: BitGenerator, service: int, horizon: float, process: Dict, datarates: Dict, latencies: Dict, endpoints: np.ndarray, rates: np.ndarray, spaths: Dict):
self.rng = rng
self.MAX_SEED = 2**30 - 1
self.service = service
self.horizon = horizon
self.process = process
self.datarates = datarates
self.latencies = latencies
self.endpoints = endpoints
self.spaths = spaths
# create time function for inhomogenous poisson process
T = np.linspace(0.0, horizon - 1, horizon)
rates = np.ascontiguousarray(rates)
self.rate_function = TimeFunction((T, rates))
def sample_arrival(self, horizon):
poi_seed = self.rng.integers(0, self.MAX_SEED)
poi_seed = int(poi_seed)
in_poisson = SimuInhomogeneousPoisson(
[self.rate_function], end_time=horizon, verbose=False, seed=poi_seed)
in_poisson.track_intensity()
in_poisson.simulate()
arrivals = in_poisson.timestamps[0]
return arrivals
def sample_duration(self, size):
mduration = self.process['mduration']
duration = self.rng.exponential(scale=mduration, size=size)
return duration
def sample_datarates(self, size):
mean = self.datarates['loc']
scale = self.datarates['scale']
a, b = self.datarates['a'], self.datarates['b']
a, b = (a - mean) / scale, (b - mean) / scale
datarates = stats.truncnorm.rvs(a, b, mean, scale, size=size, random_state=self.rng)
return datarates
def sample_latencies(self, propagation: np.ndarray):
mean = self.latencies['loc']
scale = self.latencies['scale']
a, b = self.latencies['a'], self.latencies['b']
a, b = (a - mean) / scale, (b - mean) / scale
lat = stats.truncnorm.rvs(a, b, mean, scale, size=propagation.size, random_state=self.rng)
# scale maximum end-to-end latencies (given by shortest path propagation delay) with sampled factor
lat = lat * propagation
return lat
def sample_endpoints(self, arrivals):
ingresses, egresses = [], []
for arrival in arrivals:
# get endpoint probability matrix for respective timestep
timestep = int(np.floor(arrival))
prob = self.endpoints[timestep]
# sample ingress / egress from probability matrix
flatten = prob.ravel()
index = np.arange(flatten.size)
ingress, egress = np.unravel_index(
self.rng.choice(index, p=flatten), prob.shape)
ingresses.append(ingress)
egresses.append(egress)
return ingresses, egresses
def sample(self):
# sample parameters for each service from distribution functions
arrival = self.sample_arrival(self.horizon)
duration = self.sample_duration(len(arrival))
ingresses, egresses = self.sample_endpoints(arrival)
# use arrival time to index the endpoint probability matrix and traffic matrix
rates = self.sample_datarates(size=len(arrival))
propagation = np.asarray([self.spaths[ingr][egr] for ingr, egr in zip(ingresses, egresses)])
latencies = self.sample_latencies(propagation)
# build request objects and append them to the traffic trace
requests = []
for arr, dr, rate, lat, ingr, egr in zip(arrival, duration, rates, latencies, ingresses, egresses):
req = Request(arr, dr, rate, lat, (ingr, egr), self.service)
requests.append(req)
return requests
class Traffic:
def __init__(self, processes):
self.processes = processes
def sample(self):
# generate requests for each type of service from respective processes
requests = [process.sample() for process in self.processes]
requests = [req for srequests in requests for req in srequests]
# sort to-be-simulated service requests according to their arrival time
requests = sorted(requests, key=cmp_to_key(
lambda r1, r2: r1.arrival - r2.arrival))
return requests
def __iter__(self):
trace = self.sample()
return iter(trace)
class TrafficStub:
def __init__(self, trace):
self.trace = trace
def sample(self):
return self.trace
def __iter__(self):
return iter(self.trace)
| 2.125 | 2 |
Sound/micro.py | mencattini/ReMIx | 1 | 12793935 | """Microphone module."""
import alsaaudio
# pylint: disable=R0903, E1101
class Micro():
"""Class to use micro in a `with` bloc."""
def __init__(self, alsaaudio_capture=alsaaudio.PCM_CAPTURE,
alsaaudio_nonblock=alsaaudio.PCM_NONBLOCK):
"""Open the device in nonblocking capture mode.
The last argument could just as well have been zero for blocking mode.
Then we could have left out the sleep call in the bottom of the loop
"""
self.capture = alsaaudio_capture
self.nonblock = alsaaudio_nonblock
self.inp = None
def __enter__(self):
"""Set the acquisition and return it."""
self.inp = alsaaudio.PCM(self.capture, self.nonblock)
return self.inp
def __exit__(self, capture, nonblock, inpt):
"""Close the acquisition."""
self.inp.close()
| 2.734375 | 3 |
Programiz Projects/4 - Find the Area of a Triangle.py | PythonCodes1/Python-Progression | 0 | 12793936 | <reponame>PythonCodes1/Python-Progression
"""
To find the area of a triangle, you must use this method:
s = (a+b+c)/2
area = √(s(s-a)*(s-b)*(s-c))
"""
a = float(input('Enter first side: '))
b = float(input('Enter second side: '))
c = float(input('Enter third side: '))
s = (a+b+c)/2
area = (s*(s-a)*(s-b)*(s-c)) ** 0.5
print("The area of the triangle is %0.2f" %area)
| 4.15625 | 4 |
app/payments/migrations/0003_auto_20181219_2332.py | zanielyene/krabacus3 | 2 | 12793937 | <reponame>zanielyene/krabacus3<filename>app/payments/migrations/0003_auto_20181219_2332.py
# Generated by Django 2.1.2 on 2018-12-19 23:32
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('payments', '0002_auto_20181219_2206'),
]
operations = [
migrations.AlterField(
model_name='subscriptionpayment',
name='payment_read_time',
field=models.DateTimeField(default=datetime.datetime(2018, 12, 19, 23, 32, 52, 579324, tzinfo=utc)),
),
]
| 1.484375 | 1 |
lab1/part1/core/layerFactory.py | Currycurrycurry/FDSS_PRML | 0 | 12793938 | <reponame>Currycurrycurry/FDSS_PRML<filename>lab1/part1/core/layerFactory.py
from lab1.part1.core.layers.dense import DenseLayer
from lab1.part1.core.layers.relu import ReluLayer
from lab1.part1.core.layers.sigmoid import SigmoidLayer
from lab1.part1.core.layers.softmax import SoftmaxLayer
from lab1.part1.core.layers.preLU import PReLULayer
from lab1.part1.core.layers.leakyRelu import LeakyReLULayer
from lab1.part1.core.layers.elu import ELULayer
from lab1.part1.util.logger_util import logger
class LayerFactory(object):
@staticmethod
def produce_layer(type):
if type == 'dense':
logger.info(type)
return DenseLayer()
elif type == 'relu':
logger.info(type)
return ReluLayer()
elif type == 'sigmoid':
logger.info(type)
return SigmoidLayer()
elif type == 'softmax':
logger.info(type)
return SoftmaxLayer()
elif type == 'prelu':
logger.info(type)
return PReLULayer()
elif type == 'elu':
logger.info(type)
return ELULayer()
elif type == 'leakyrelu':
logger.info(type)
return LeakyReLULayer()
if __name__ == '__main__':
print(LayerFactory.produce_layer('dense'))
| 2.140625 | 2 |
EixampleEnergy/drawers/drawer_map.py | TugdualSarazin/eixample_energy | 0 | 12793939 | <gh_stars>0
import contextily as cx
import matplotlib.pyplot as plt
from EixampleEnergy.drawers.drawer_elem import DrawerElem
class DrawerMap(DrawerElem):
def __init__(self, full_df,
color_col,
cmap='YlGnBu',
xlim=None, ylim=None, bg_img=None):
# Init attributes
self.full_df = full_df
self.color_col = color_col
self.cmap = cmap
self.bg_img = bg_img
if xlim is None:
self.xlim = ([self.full_df.total_bounds[0], self.full_df.total_bounds[2]])
else:
self.xlim = xlim
if ylim is None:
self.ylim = ([self.full_df.total_bounds[1], self.full_df.total_bounds[3]])
else:
self.ylim = ylim
# Min / max
self.vmin = self.full_df[self.color_col].min()
self.vmax = self.full_df[self.color_col].max()
def draw(self, df):
self.ax.clear()
self.ax.set_xlim(self.xlim)
self.ax.set_ylim(self.ylim)
#df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap, vmin=self.vmin, vmax=self.vmax)
df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap)
if self.bg_img:
cx.add_basemap(self.ax, crs=self.full_df.crs.to_string(), source=self.bg_img,
cmap=plt.get_cmap('gray'), vmin=0, vmax=255)
#leg = self.ax.get_legend()
#self.ax.legend(bbox_to_anchor=(1.0, .5), prop={'size': 30})
#leg.set_bbox_to_anchor((1.15, 0.5))
self.ax.set_axis_off()
self.ax.set_position([0., 0., 1., 1.])
def download_bg(self, save_path):
print(f"Downloading map's background image to {save_path}")
img, ext = cx.bounds2raster(
self.xlim[0], self.ylim[0], self.xlim[1], self.ylim[1],
save_path,
ll=True,
# source=cx.providers.CartoDB.Positron,
# source='https://a.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png'
source='http://www.google.cn/maps/vt?lyrs=s@189&gl=cn&x={x}&y={y}&z={z}'
)
| 2.296875 | 2 |
notebooks-text-format/cond_bmm_emnist.py | arpitvaghela/probml-notebooks | 166 | 12793940 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="M_qo7DmLJKLP"
# #Class-Conditional Bernoulli Mixture Model for EMNIST
# + [markdown] id="TU1pCzcIJHTm"
# ## Setup
#
# + id="400WanLyGA2C"
# !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
# %cd -q /pyprobml/scripts
# + id="k1rLl6dHH7Wh"
# !pip install -q superimport
# !pip install -q distrax
# + id="cLpBn5KQeB46"
from conditional_bernoulli_mix_lib import ClassConditionalBMM
from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class
from noisy_spelling_hmm import Word
from jax import vmap
import jax.numpy as jnp
import jax
from jax.random import PRNGKey, split
import numpy as np
from matplotlib import pyplot as plt
# + colab={"base_uri": "https://localhost:8080/"} id="ey9k06RweuKc" outputId="38131e5a-82fb-49db-c4d3-f4364a643152"
select_n = 25
dataset, targets = get_emnist_images_per_class(select_n)
dataset, targets = jnp.array(dataset), jnp.array(targets)
# + [markdown] id="KwNq7HYYLPO9"
# ## Initialization of Class Conditional BMMs
# + colab={"base_uri": "https://localhost:8080/"} id="UABtUDPjffFt" outputId="d873a708-542c-44e6-8c72-2c5908c7bbad"
n_mix = 30
n_char = 52
mixing_coeffs = jnp.array(np.full((n_char, n_mix), 1./n_mix))
p_min, p_max = 0.4, 0.6
n_pixels = 28 * 28
probs = jnp.array(np.random.uniform(p_min, p_max, (n_char, n_mix, n_pixels)))
class_priors = jnp.array(np.full((n_char,), 1./n_char))
cbm_gd = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char)
cbm_em = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char)
# + [markdown] id="Qa95Fua5Kc3i"
# ## Full Batch Gradient Descentt
# + colab={"base_uri": "https://localhost:8080/", "height": 336} id="PDzuEjs9Kewi" outputId="c81916c0-c6b7-45bd-d308-eab878afe281"
num_epochs, batch_size = 100, len(dataset)
losses = cbm_gd.fit_sgd(dataset.reshape((-1, n_pixels)), targets, batch_size, num_epochs = num_epochs)
plt.plot(losses, color="k", linewidth=3)
plt.xlabel("Iteration")
plt.ylabel("Negative Log Likelihood")
plt.show()
# + [markdown] id="37mNMNrpInfh"
# ## EM Algorithm
# + colab={"base_uri": "https://localhost:8080/", "height": 336} id="FJeBzIKYfsUk" outputId="9d8db485-a251-4b1a-a6e5-93833c83dce6"
losses = cbm_em.fit_em(dataset, targets, 8)
plt.plot(losses, color="k", linewidth=3)
plt.xlabel("Iteration")
plt.ylabel("Negative Log Likelihood")
plt.show()
# + [markdown] id="NjCQpoH1Iuuf"
# ## Plot of the Probabilities of Components Distribution
# + id="KkyAHDW4JgyM"
def plot_components_dist(cbm, n_mix):
fig = plt.figure(figsize=(45, 20))
for k in range(n_mix):
for cls in range(cbm.num_of_classes):
plt.subplot(n_mix ,cbm.num_of_classes, cbm.num_of_classes*k + cls +1)
plt.imshow(1 - cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)), cmap = "gray")
plt.axis('off')
plt.tight_layout()
plt.show()
# + [markdown] id="J8KLkCWpNAeF"
# ### GD
# + colab={"base_uri": "https://localhost:8080/", "height": 666} id="DSOiuNeAM8gl" outputId="dce9416a-b646-423d-b4bf-c78728db1cab"
plot_components_dist(cbm_gd, n_mix)
# + [markdown] id="FO31plUVNDSO"
# ### EM
# + id="ZM43qs6FfvlP" colab={"base_uri": "https://localhost:8080/", "height": 666} outputId="81a095f1-1099-4809-90a8-272dbed11662"
plot_components_dist(cbm_em, n_mix)
# + [markdown] id="IqRdcklzOeAY"
# ## Sampling
# + id="wgI6sFWKN4ax"
p1, p2, p3 = 0.4, 0.1, 2e-3
n_misspelled = 1 # number of misspelled words created for each class
vocab = ['book', 'bird', 'bond', 'bone', 'bank', 'byte', 'pond', 'mind', 'song', 'band']
rng_key = PRNGKey(0)
keys = [dev_array for dev_array in split(rng_key, len(vocab))]
# + id="x3GpZ8jbf11N" colab={"base_uri": "https://localhost:8080/"} outputId="5a348b69-bdf4-4f80-f059-1062ba2fbb88"
hmms = {word: Word(word, p1, p2, p3, n_char, "all", mixing_coeffs=cbm_em.model.mixture_distribution.probs,
initial_probs=cbm_em.model.components_distribution.distribution.probs, n_mix=n_mix) for word in vocab}
samples = jax.tree_multimap(lambda word, key: hmms[word].n_sample(n_misspelled, key), vocab, keys)
# + id="7VXVsobcg_KO" colab={"base_uri": "https://localhost:8080/"} outputId="3e915a79-7f5c-4131-d6ee-97f11c83d86f"
decoded_words = vmap(decode, in_axes = (0, None, None))(jnp.array(samples)[:, :, :, -1].reshape((n_misspelled * len(vocab), -1)), n_char + 1, "all")
get_decoded_samples(decoded_words)
# + [markdown] id="xrRy8MG0afR8"
# ### Figure
# + id="O0-HaN5rQAvP"
def plot_samples(samples):
samples = np.array(samples)[:, :, :, :-1].reshape((-1, 28, 28))
fig, axes = plt.subplots(ncols=4, nrows=10, figsize=(4, 10))
fig.subplots_adjust(hspace = .2, wspace=.001)
for i, ax in enumerate(axes.flatten()):
ax.imshow(samples[i], cmap="gray")
ax.set_axis_off()
fig.tight_layout()
plt.show()
# + id="EbZn9vrfhei4" colab={"base_uri": "https://localhost:8080/", "height": 728} outputId="114217bf-cadb-4331-82ef-b4844c038342"
plot_samples(samples)
# + [markdown] id="eNDmwV7EPyrR"
# ## Calculation of Log Likelihoods for Test Data
# + id="525MUl5HPe1K"
# noisy words
test_words = ['bo--', '-On-', 'b-N-', 'B---', '-OnD', 'b--D', '---D', '--Nd', 'B-nD', '-O--', 'b--d', '--n-']
test_images = fake_test_data(test_words, dataset, targets, n_char + 1, "all")
# + id="1dFCdVNgPYtJ"
def plot_log_likelihood(hmms, test_words, test_images, vocab):
fig, axes = plt.subplots(4, 3, figsize=(20, 10))
for i, (ax, img, word) in enumerate(zip(axes.flat, test_images, test_words)):
flattened_img = img.reshape((len(img), -1))
loglikelihoods = jax.tree_map(lambda w: jnp.sum(hmms[w].loglikelihood(word, flattened_img)), vocab)
loglikelihoods = jnp.array(loglikelihoods)
ax.bar(vocab, jnp.exp(jax.nn.log_softmax(loglikelihoods)), color="black")
ax.set_title(f'{word}')
plt.tight_layout()
plt.show()
# + id="qv-Df8GEhfC4" colab={"base_uri": "https://localhost:8080/", "height": 784} outputId="9be6abf3-0ecc-4ef5-e301-380c5eac38ff"
plot_log_likelihood(hmms, test_words, test_images, vocab)
| 1.820313 | 2 |
python/tests/test_base.py | JLLeitschuh/DDF | 160 | 12793941 | from __future__ import unicode_literals
import unittest
from ddf import DDFManager, DDF_HOME
class BaseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dm_spark = DDFManager('spark')
cls.airlines = cls.loadAirlines(cls.dm_spark)
cls.mtcars = cls.loadMtCars(cls.dm_spark)
@classmethod
def tearDownClass(cls):
cls.dm_spark.shutdown()
@classmethod
def loadAirlines(cls, dm):
table_name = 'airlines_na_pyddf_unittest'
if table_name not in [x.split('\t')[0] for x in dm.sql('show tables')]:
dm.sql('set hive.metastore.warehouse.dir=/tmp', False)
dm.sql('drop table if exists {}'.format(table_name), False)
dm.sql("""create table {} (Year int,Month int,DayofMonth int,
DayOfWeek int,DepTime int,CRSDepTime int,ArrTime int,
CRSArrTime int,UniqueCarrier string, FlightNum int,
TailNum string, ActualElapsedTime int, CRSElapsedTime int,
AirTime int, ArrDelay int, DepDelay int, Origin string,
Dest string, Distance int, TaxiIn int, TaxiOut int, Cancelled int,
CancellationCode string, Diverted string, CarrierDelay int,
WeatherDelay int, NASDelay int, SecurityDelay int, LateAircraftDelay int )
ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
""".format(table_name), False)
dm.sql("load data local inpath '{}/resources/test/airlineWithNA.csv' "
"into table {}".format(DDF_HOME, table_name), False)
return dm.sql2ddf('select * from {}'.format(table_name), False)
@classmethod
def loadMtCars(cls, dm):
table_name = 'mtcars_pyddf_unittest'
if table_name not in [x.split('\t')[0] for x in dm.sql('show tables')]:
dm.sql('set shark.test.data.path=resources', False)
# session.sql('set hive.metastore.warehouse.dir=/tmp')
dm.sql('drop table if exists {}'.format(table_name), False)
dm.sql("CREATE TABLE {} (mpg double, cyl int, disp double, "
"hp int, drat double, wt double, "
"qesc double, vs int, am int, gear int, carb int)"
" ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '".format(table_name), False)
dm.sql("LOAD DATA LOCAL INPATH '{}/resources/test/mtcars' "
"INTO TABLE {}".format(DDF_HOME, table_name), False)
return dm.sql2ddf('select * from {}'.format(table_name), False)
| 2.40625 | 2 |
examples/example_cidnotfound.py | Mirio/psnstoreprice | 1 | 12793942 | <reponame>Mirio/psnstoreprice
from psnstoreprice import PsnStorePrice
url = "https://store.playstation.com/#!/it-it/giochi/arslan-the-warriors-of-legend-con-bonus/"
pricelib = PsnStorePrice()
pricelib.getpage(pricelib.normalizeurl(url)) | 1.9375 | 2 |
backend/liveHeartbeat/apps.py | rajc1729/django-nextjs-realtime | 0 | 12793943 | from django.apps import AppConfig
class LiveheartbeatConfig(AppConfig):
name = 'liveHeartbeat'
| 1.046875 | 1 |
justine/views/grupos.py | VTacius/justine | 0 | 12793944 | <filename>justine/views/grupos.py
# coding: utf-8
from pyramid.view import view_config
from pyramid import httpexceptions as exception
from ..juliette.modelGroup import Grupo
from ..juliette.excepciones import DatosException, ConflictoException
from ..schemas.grupos import EsquemaGrupo
import logging
log = logging.getLogger(__name__)
@view_config(route_name="grupos_creacion", renderer='json', permission='creacion')
def grupos_creacion(peticion):
# Validando datos recibidos
try:
v = EsquemaGrupo('cn')
print(peticion.json_body)
contenido = v.validacion(peticion.json_body['corpus'])
except KeyError as e:
log.warning(e)
return exception.HTTPBadRequest(e)
except ValueError as e:
log.warning(e)
return exception.HTTPBadRequest(e)
except TypeError as e:
# Se refiere a que no se hayan enviado datos json correctamente formateados
log.warning(e)
return exception.HTTPBadRequest(e)
except DatosException as e:
log.warning(e)
return exception.HTTPBadRequest(e)
# Realizamos la operacion Creacion de Usuarios mediante la librería
try:
grupo = Grupo()
cn_grupo = contenido['cn']
contenido = grupo.crear(cn_grupo, contenido)
except ConflictoException as e:
# Si el grupo ya existe, devolvemos un 409 Conflict
log.warning(e)
return exception.HTTPConflict(e)
# La siguiente parece ser LA FORMA de responder en este caso
# TODO: Sin embargo, mi response en este caso esta vació cuando se llama con un Request creado vacío
peticion.response.status_code = 201
peticion.response.headerlist.extend(
(
('Location', "grupos/%s" % str(cn_grupo)),
)
)
return {'mensaje': cn_grupo}
@view_config(route_name='grupos_listado', renderer='json', permission='listar')
def grupos_listado(peticion):
try:
grupo = Grupo()
contenido = grupo.obtener()
except Exception as e:
log.error(e)
return exception.HTTPInternalServerError()
print(contenido)
return contenido
@view_config(route_name='grupos_listado_options', renderer='json')
def grupos_listado_options(peticion):
pass
@view_config(route_name='grupos_detalle', renderer='json')
def grupos_detalle (peticion):
try:
uid = peticion.matchdict['grupo']
except KeyError as e:
return exception.HTTPBadRequest()
# Realizamos la operación Detalle de Usuarios mediante la librería
try:
grupo = Grupo()
contenido = grupo.obtener(uid)
except DatosException as e:
return exception.HTTPNotFound()
except Exception as e:
log.error(e)
return exception.HTTPInternalServerError()
return {'mensaje': contenido}
@view_config(route_name='grupos_borrado', renderer='json', permission='borrado')
def grupos_borrado(peticion):
# Validando datos recibidos
try:
v = EsquemaGrupo()
cn_grupo = peticion.matchdict['grupo']
except KeyError as e:
log.warning(e)
return exception.HTTPBadRequest(e)
except TypeError as e:
# Se refiere a que no se hayan enviado datos json correctamente formateados
log.warning(e)
return exception.HTTPBadRequest(e)
except DatosException as e:
log.warning(e)
return exception.HTTPBadRequest(e)
# Realizamos la operacion Borrado de Grupos mediante la librería
try:
grupo = Grupo()
contenido = grupo.borrar(cn_grupo)
except ConflictoException as e:
# En este caso, conflicto viene a decir que no existe
log.warning(e)
return exception.HTTPNotFound(e)
except DatosException as e:
log.warning(e)
return exception.HTTPBadRequest(e)
except Exception as e:
log.error(e)
return exception.HTTPInternalServerError(e)
return {'mensaje': contenido}
| 2.328125 | 2 |
pdf_sanitizer/pdf_sanitizer.py | lucasmrdt/pdf-sanitizer | 0 | 12793945 | import difflib
import pathlib
import argparse
from .utils import fail_with_message, progress_with_message, success_with_message
try:
import PyPDF2
except ImportError:
fail_with_message(
'Please install required dependencies before using this package.\n\t> pip3 install -r requirements.txt --user')
def parse_file(path: str):
if not pathlib.Path(path).exists():
raise argparse.ArgumentTypeError('invalid file path')
return path
def parse_ratio(x):
try:
x = float(x)
except ValueError:
raise argparse.ArgumentTypeError(
"%r not a floating-point literal" % (x,))
if x < 0.0 or x > 1.0:
raise argparse.ArgumentTypeError("%r not in range [0.0, 1.0]" % (x,))
return x
def diff(content1: str, content2: str):
return difflib.SequenceMatcher(None, content1, content2)
def has_deleted_item(diff):
for operation, *_ in diff.get_opcodes():
if operation == 'delete' or operation == 'replace':
return True
return False
def get_title(content):
return content.split('\n')[0]
def get_content(content):
return content.replace(get_title(content), '').strip()
def has_content(content):
return len(get_content(content)) != 0
def sanitize(pad_input: PyPDF2.PdfFileReader, pdf_output: PyPDF2.PdfFileWriter, title_ratio: float, content_ratio: float):
prev_page = pad_input.getPage(0)
nb_pages = pad_input.getNumPages()
for i in range(1, nb_pages):
progress_with_message('Sanitizing pdf ...', i / nb_pages)
current_page = pad_input.getPage(i)
current_content = current_page.extractText()
prev_content = prev_page.extractText()
diff_title = diff(get_title(prev_content), get_title(current_content))
diff_content = diff(get_content(prev_content),
get_content(current_content))
title_has_changed = diff_title.ratio() < title_ratio
content_has_changed = (diff_content.ratio() < content_ratio
and (has_deleted_item(diff_content) or len(prev_content) > len(current_content)))
if has_content(prev_content) and (title_has_changed or content_has_changed):
pdf_output.addPage(prev_page)
prev_page = current_page
pdf_output.addPage(prev_page)
parser = argparse.ArgumentParser(
description="Quickly remove useless page from a huge pdf to get a readable pdf")
parser.add_argument('input_file', type=parse_file,
help='pdf file to be sanitized')
parser.add_argument('output_file', type=str,
help='output sanitized pdf file name')
parser.add_argument('--title-ratio', type=parse_ratio,
help='float between [0, 1] which is responsible of detecting similar pages from title. The higher the ratio, the more sensitive the sanitizer will be to any changes. (default: 0.5)', default=.5, dest='title_ratio')
parser.add_argument('--content-ratio', type=parse_ratio,
help='float between [0, 1] which is responsible of detecting similar pages from content. The higher the ratio, the more sensitive the sanitizer will be to any changes. (default: 0.8)',
default=.8, dest='content_ratio')
def main():
args = parser.parse_args()
pdf_input = PyPDF2.PdfFileReader(args.input_file)
pdf_output = PyPDF2.PdfFileWriter()
sanitize(pdf_input, pdf_output, args.title_ratio, args.content_ratio)
with open(args.output_file, 'wb') as f:
pdf_output.write(f)
success_with_message(f'Your file has been sanitized at {args.output_file}')
if __name__ == '__main__':
main()
| 2.859375 | 3 |
utils/files_to_h5_hierarchical.py | lzamparo/SdA_reduce | 0 | 12793946 | #! /usr/bin/env python
"""
Vacuum up all the object.CSV files from the given input directory, and pack them into an hdf5 file that is organized by plate.well
Plates go 1 .. 14. Rows go 1 ... 16, Cols 1 ... 24.
"""
import os
from optparse import OptionParser
import pandas
from tables.file import File, openFile
from tables import Filters
from tables import Atom
import numpy as np
# Check that options are present, else print help msg
parser = OptionParser()
parser.add_option("-i", "--input", dest="indir", help="read input from here")
parser.add_option("-s", "--suffix", dest="suffix", help="specify the suffix for data files")
parser.add_option("-d", "--dataframe", dest="dataframe", help="read a csv file describing the data set here")
parser.add_option("-o", "--filename", dest="filename", help="specify the .h5 filename that will contain all the data")
(options, args) = parser.parse_args()
# Open and prepare an hdf5 file
filename = options.filename
h5file = openFile(filename, mode = "w", title = "Data File")
# Load the dataframe describing the layout of the experimental data
df = pandas.read_csv(options.dataframe)
all_plates = set(df['Plate'])
# Create a new group under "/" (root)
plates_group = h5file.createGroup("/", 'plates', 'the plates for this replicate')
# Create a group for each plate
for plate in all_plates:
desc = "plate number " + str(plate)
h5file.createGroup("/plates/",str(plate),desc)
# build a lookup of image number to plate, well
img_to_pw = {}
# populate the lookup table of image number to (plate, well)
for index, rec in df.iterrows():
for img_num in xrange(rec['Low'],rec['High'] + 1):
well = (int(rec['Row']) - 1) * 24 + int(rec['Col'])
img_to_pw[img_num] = (rec['Plate'],well)
# get the root
root = h5file.root
# Go and read the files,
input_dir = options.indir
suffix = options.suffix
cur_dir = os.getcwd()
try:
files = os.listdir(input_dir)
os.chdir(input_dir)
except:
print "Could not read files from " + input_dir
# Read all the files, process 'em.
zlib_filters = Filters(complib='zlib', complevel=5)
for i,f in enumerate(files):
if i % 10 == 0:
print "processing %s, %d files done of %d total" % (f,i,len(files))
if f.endswith(suffix):
my_data = np.genfromtxt(f, delimiter=',', autostrip = True)
atom = Atom.from_dtype(my_data.dtype)
# slice this data file by grouped image numbers
min_img, max_img = int(min(my_data[:,0])), int(max(my_data[:,0]))
for img_num in xrange(min_img,max_img+1):
try:
plate, well = img_to_pw[img_num]
except KeyError as e:
print "image number not found in image to well map: " + str(img_num)
continue
objs = my_data[my_data[:,0] == img_num]
well_group = "/plates/" + str(plate)
well_node = "/plates/" + str(plate) + "/" + str(well)
if h5file.__contains__(well_node):
# some data for this well exists in an EArray already, append this data to it.
ds = h5file.get_node(where=well_node)
ds.append(objs)
else:
# no data from images belonging to this well have yet been dumped into an EArray.
ds = h5file.create_earray(where=well_group, name=str(well), atom=atom, shape=(0,my_data.shape[1]), filters=zlib_filters)
ds.append(objs)
h5file.flush()
os.chdir(cur_dir)
print "done!"
h5file.close()
| 3.203125 | 3 |
pyImagingMSpec/smoothing.py | andy-d-palmer/pyIMS | 2 | 12793947 | __author__ = 'palmer'
# every method in smoothing should accept (im,**args)
def median(im, **kwargs):
from scipy import ndimage
im = ndimage.filters.median_filter(im,**kwargs)
return im
def hot_spot_removal(xic, q=99.):
import numpy as np
xic_q = np.percentile(xic, q)
xic[xic > xic_q] = xic_q
return xic | 2.640625 | 3 |
aldryn_apphooks_config_utils/context_processors.py | TigerND/aldryn-apphook-utils | 0 | 12793948 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from aldryn_apphooks_config.utils import get_app_instance
def apphooks_config(request):
namespace, config = get_app_instance(request)
return {
'namespace': namespace,
'config': config,
}
| 1.484375 | 1 |
tests/multijson/test_multi.py | adeadman/multijson | 1 | 12793949 | <gh_stars>1-10
import json
import uuid
import datetime
import pytest
from decimal import Decimal
from multijson import MultiJSONEncoder
class Custom:
def __init__(self, name, age):
self.name = name
self.age = age
class TestMultiJSONEncoder:
def test_dump_uuid(self):
test_input = {
"id": uuid.uuid4(),
}
output = json.dumps(test_input, cls=MultiJSONEncoder)
result = json.loads(output)
assert "id" in result
assert result["id"] == str(test_input["id"])
def test_dump_date(self):
test_input = {
"date": datetime.date(2017, 7, 1),
}
output = json.dumps(test_input, cls=MultiJSONEncoder)
result = json.loads(output)
assert "date" in result
assert result["date"] == "2017-07-01"
def test_dump_datetime(self):
test_input = {
"time": datetime.datetime(2017, 7, 1, 23, 11, 11),
}
output = json.dumps(test_input, cls=MultiJSONEncoder)
result = json.loads(output)
assert "time" in result
assert result["time"] == "2017-07-01T23:11:11Z"
def test_dump_decimal(self):
test_input = {
"a": Decimal('3.9'),
"b": Decimal('0.0003'),
"c": Decimal('1200000.0000021'),
}
output = json.dumps(test_input, cls=MultiJSONEncoder)
assert output == '{"a": 3.9, "b": 0.0003, "c": 1200000.0000021}'
def test_dump_custom_object(self):
test_input = {
"custom": Custom("Rincewind", 120),
}
with pytest.raises(TypeError):
json.dumps(test_input, cls=MultiJSONEncoder)
| 2.53125 | 3 |
QUANTAXIS_Trade/WindowsCTP/test.py | xiongyixiaoyang/QUANTAXIS | 2 | 12793950 | <filename>QUANTAXIS_Trade/WindowsCTP/test.py<gh_stars>1-10
import datetime
import logging
import math
import multiprocessing as mp
import os
import pickle
import shutil
import numpy as np
import pandas as pd
from ParadoxTrading.Chart import Wizard
from ParadoxTrading.Engine import (MarketEvent, SettlementEvent,
StrategyAbstract)
from ParadoxTrading.EngineExt.Futures import (BacktestEngine,
BacktestMarketSupply,
CTAEqualFundPortfolio,
CTAEqualRiskATRPortfolio,
CTAEqualRiskGARCHPortfolio,
InterDayBacktestExecution,
InterDayPortfolio)
from ParadoxTrading.EngineExt.Futures.Trend import CTAStatusType, CTAStrategy
from ParadoxTrading.Fetch.ChineseFutures import (FetchDominantIndex,
FetchInstrumentDayData,
FetchProductIndex,
RegisterIndex)
from ParadoxTrading.Indicator import (BIAS, CCI, EFF, EMA, KDJ, MA, RSI, SAR,
BBands, StepDrawdownStop)
from ParadoxTrading.Utils import DataStruct
class GetTradingday(object):
def __init__(self, _start, _end, _symbol='a'):
self.start = _start
self.end = _end
self.symbol = _symbol
self.fetcherindex = FetchProductIndex()
self.fetcherindex.psql_host = '192.168.4.103'
self.fetcherindex.psql_user = 'ubuntu'
self.fetcherindex.mongo_host = '192.168.4.103'
def gettradingday(self):
market_data = self.fetcherindex.fetchDayData(
self.start, self.end, self.symbol)
tradingday_list = market_data['tradingday']
return tradingday_list
fetcher = FetchInstrumentDayData()
fetcher.psql_host = '192.168.4.103'
fetcher.psql_user = 'ubuntu'
fetcher.mongo_host = '192.168.4.103'
start = '20150101'
end = '20180428'
get_tradingday = GetTradingday(start, end)
tradingday_list = get_tradingday.gettradingday()
symbollist = ['oi', 'y']
for symbol in symbollist:
data_df = pd.DataFrame()
for index in range(1, len(tradingday_list)):
pricelist = []
day = tradingday_list[index]
yesterday = tradingday_list[index-1]
domian_instrument = fetcher.fetchDominant(symbol, day)
data = fetcher.fetchDayData(yesterday, day, domian_instrument)
pricelist.append(data['tradingday'][0])
pricelist.append(data['openprice'][0])
pricelist.append(data['highprice'][0])
pricelist.append(data['lowprice'][0])
pricelist.append(data['closeprice'][0])
df = pd.DataFrame(pricelist).T
df.columns = ['tradingday', 'openprice',
'highprice', 'lowprice', 'closeprice']
data_df = pd.concat([data_df, df])
pd.DataFrame.to_csv(data_df, '.\\data\\{0}.csv'.format(symbol))
| 2.03125 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.