max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
django_mail_admin/transports/babyl.py | jayvdb/django_mail_admin | 179 | 11166196 | <gh_stars>100-1000
from mailbox import Babyl
from django_mail_admin.transports.generic import GenericFileMailbox
class BabylTransport(GenericFileMailbox):
_variant = Babyl
|
aat/core/handler/handler.py | mthomascarcamo/aat | 305 | 11166217 | from abc import ABCMeta, abstractmethod
from inspect import isabstract
from typing import TYPE_CHECKING, Callable, Optional, Tuple
from ..data import Event
from ...config import EventType
if TYPE_CHECKING:
# Circular import
from aat.engine import StrategyManager
class EventHandler(metaclass=ABCMeta):
_manager: "StrategyManager"
def _setManager(self, mgr: "StrategyManager") -> None:
self._manager = mgr
def _valid_callback(self, callback: str) -> Optional[Callable]:
if (
hasattr(self, callback)
and not isabstract(callback)
and not hasattr(getattr(self, callback), "_original")
):
return getattr(self, callback)
return None
def callback(self, event_type: EventType) -> Tuple[Optional[Callable], ...]:
return {
# Market data
EventType.TRADE: (self._valid_callback("onTrade"),),
EventType.OPEN: (
self._valid_callback("onOpen"),
self._valid_callback("onOrder"),
),
EventType.CANCEL: (
self._valid_callback("onCancel"),
self._valid_callback("onOrder"),
),
EventType.CHANGE: (
self._valid_callback("onChange"),
self._valid_callback("onOrder"),
),
EventType.FILL: (
self._valid_callback("onFill"),
self._valid_callback("onOrderEvent"),
),
EventType.DATA: (self._valid_callback("onData"),),
EventType.HALT: (self._valid_callback("onHalt"),),
EventType.CONTINUE: (self._valid_callback("onContinue"),),
EventType.ERROR: (self._valid_callback("onError"),),
EventType.START: (self._valid_callback("onStart"),),
EventType.EXIT: (self._valid_callback("onExit"),),
# Order Entry
EventType.BOUGHT: (
self._valid_callback("onBought"),
self._valid_callback("onTraded"),
),
EventType.SOLD: (
self._valid_callback("onSold"),
self._valid_callback("onTraded"),
),
EventType.RECEIVED: (self._valid_callback("onReceived"),),
EventType.REJECTED: (self._valid_callback("onRejected"),),
EventType.CANCELED: (self._valid_callback("onCanceled"),),
}.get(event_type, tuple())
################################################
# Event Handler Methods #
# #
# NOTE: these should all be of the form onNoun #
################################################
@abstractmethod
async def onTrade(self, event: Event) -> None:
"""Called whenever a `Trade` event is received"""
async def onOrder(self, event: Event) -> None:
"""Called whenever an Order `Open`, `Cancel`, `Change`, or `Fill` event is received"""
pass
async def onOpen(self, event: Event) -> None:
"""Called whenever an Order `Open` event is received"""
pass
async def onCancel(self, event: Event) -> None:
"""Called whenever an Order `Cancel` event is received"""
pass
async def onChange(self, event: Event) -> None:
"""Called whenever an Order `Change` event is received"""
pass
async def onFill(self, event: Event) -> None:
"""Called whenever an Order `Fill` event is received"""
pass
async def onData(self, event: Event) -> None:
"""Called whenever other data is received"""
async def onHalt(self, event: Event) -> None:
"""Called whenever an exchange `Halt` event is received, i.e. an event to stop trading"""
pass
async def onContinue(self, event: Event) -> None:
"""Called whenever an exchange `Continue` event is received, i.e. an event to continue trading"""
pass
async def onError(self, event: Event) -> None:
"""Called whenever an internal error occurs"""
pass
async def onStart(self, event: Event) -> None:
"""Called once at engine initialization time"""
pass
async def onExit(self, event: Event) -> None:
"""Called once at engine exit time"""
pass
################################################
# Order Entry Callbacks #
# #
# NOTE: these should all be of the form onVerb #
################################################
async def onBought(self, event: Event) -> None:
"""Called on my order bought"""
pass
async def onSold(self, event: Event) -> None:
"""Called on my order sold"""
pass
async def onTraded(self, event: Event) -> None:
"""Called on my order bought or sold"""
pass
async def onReceived(self, event: Event) -> None:
"""Called on my order received by exchange"""
pass
async def onRejected(self, event: Event) -> None:
"""Called on my order rejected"""
pass
async def onCanceled(self, event: Event) -> None:
"""Called on my order canceled"""
pass
#################
# Other Methods #
#################
setattr(EventHandler.onTrade, "_original", 1)
setattr(EventHandler.onOrder, "_original", 1)
setattr(EventHandler.onOpen, "_original", 1)
setattr(EventHandler.onCancel, "_original", 1)
setattr(EventHandler.onChange, "_original", 1)
setattr(EventHandler.onFill, "_original", 1)
setattr(EventHandler.onData, "_original", 1)
setattr(EventHandler.onHalt, "_original", 1)
setattr(EventHandler.onContinue, "_original", 1)
setattr(EventHandler.onError, "_original", 1)
setattr(EventHandler.onStart, "_original", 1)
setattr(EventHandler.onExit, "_original", 1)
setattr(EventHandler.onBought, "_original", 1)
setattr(EventHandler.onSold, "_original", 1)
setattr(EventHandler.onReceived, "_original", 1)
setattr(EventHandler.onRejected, "_original", 1)
setattr(EventHandler.onTraded, "_original", 1)
|
src/warp/yul/SwitchToIfVisitor.py | sambarnes/warp | 414 | 11166249 | from __future__ import annotations
from typing import Optional
import warp.yul.ast as ast
from warp.yul.AstMapper import AstMapper
class SwitchToIfVisitor(AstMapper):
def visit_switch(self, node: ast.Switch) -> ast.Block:
return self.visit(
ast.Block(
(
ast.VariableDeclaration(
variables=[ast.TypedName("match_var")], value=node.expression
),
switch_to_if(ast.Identifier("match_var"), node.cases),
)
)
)
def switch_to_if(switch_var: ast.Identifier, cases: list[ast.Case]) -> ast.Block:
res = switch_to_if_helper(switch_var, cases, case_no=0)
assert res is not None
return res
def switch_to_if_helper(
switch_var: ast.Identifier, cases: list[ast.Case], case_no: int = 0
) -> Optional[ast.Block]:
assert case_no <= len(cases)
if case_no == len(cases):
return None
if cases[case_no].value is None:
assert case_no == len(cases) - 1, "Default case should be the last one"
return cases[case_no].body
return ast.Block(
(
ast.If(
condition=ast.FunctionCall(
function_name=ast.Identifier("eq"),
arguments=[switch_var, cases[case_no].value],
),
body=cases[case_no].body,
else_body=switch_to_if_helper(switch_var, cases, case_no + 1),
),
)
)
|
alipay/aop/api/domain/AlipayFundTaxbillSignUnsignModel.py | antopen/alipay-sdk-python-all | 213 | 11166275 | <reponame>antopen/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayFundTaxbillSignUnsignModel(object):
def __init__(self):
self._biz_scene = None
self._contractor_code = None
self._employer_code = None
self._identification_in_belonging_employer = None
self._product_code = None
self._tax_optimization_mode = None
@property
def biz_scene(self):
return self._biz_scene
@biz_scene.setter
def biz_scene(self, value):
self._biz_scene = value
@property
def contractor_code(self):
return self._contractor_code
@contractor_code.setter
def contractor_code(self, value):
self._contractor_code = value
@property
def employer_code(self):
return self._employer_code
@employer_code.setter
def employer_code(self, value):
self._employer_code = value
@property
def identification_in_belonging_employer(self):
return self._identification_in_belonging_employer
@identification_in_belonging_employer.setter
def identification_in_belonging_employer(self, value):
self._identification_in_belonging_employer = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def tax_optimization_mode(self):
return self._tax_optimization_mode
@tax_optimization_mode.setter
def tax_optimization_mode(self, value):
self._tax_optimization_mode = value
def to_alipay_dict(self):
params = dict()
if self.biz_scene:
if hasattr(self.biz_scene, 'to_alipay_dict'):
params['biz_scene'] = self.biz_scene.to_alipay_dict()
else:
params['biz_scene'] = self.biz_scene
if self.contractor_code:
if hasattr(self.contractor_code, 'to_alipay_dict'):
params['contractor_code'] = self.contractor_code.to_alipay_dict()
else:
params['contractor_code'] = self.contractor_code
if self.employer_code:
if hasattr(self.employer_code, 'to_alipay_dict'):
params['employer_code'] = self.employer_code.to_alipay_dict()
else:
params['employer_code'] = self.employer_code
if self.identification_in_belonging_employer:
if hasattr(self.identification_in_belonging_employer, 'to_alipay_dict'):
params['identification_in_belonging_employer'] = self.identification_in_belonging_employer.to_alipay_dict()
else:
params['identification_in_belonging_employer'] = self.identification_in_belonging_employer
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.tax_optimization_mode:
if hasattr(self.tax_optimization_mode, 'to_alipay_dict'):
params['tax_optimization_mode'] = self.tax_optimization_mode.to_alipay_dict()
else:
params['tax_optimization_mode'] = self.tax_optimization_mode
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundTaxbillSignUnsignModel()
if 'biz_scene' in d:
o.biz_scene = d['biz_scene']
if 'contractor_code' in d:
o.contractor_code = d['contractor_code']
if 'employer_code' in d:
o.employer_code = d['employer_code']
if 'identification_in_belonging_employer' in d:
o.identification_in_belonging_employer = d['identification_in_belonging_employer']
if 'product_code' in d:
o.product_code = d['product_code']
if 'tax_optimization_mode' in d:
o.tax_optimization_mode = d['tax_optimization_mode']
return o
|
data/masif_site/nn_models/hbond_only/custom_params.py | NBDsoftware/masif | 309 | 11166292 | custom_params = {}
custom_params['model_dir'] = 'nn_models/hbond_only/model_data/'
custom_params['out_dir'] = 'output/hbond_only/'
custom_params['feat_mask'] = [0.0, 0.0, 1.0, 0.0, 0.0]
|
scripts/scoring/score.py | Diffblue-benchmarks/Microsoft-malmo | 3,570 | 11166310 | <filename>scripts/scoring/score.py
# ------------------------------------------------------------------------------------------------
# Copyright (c) 2016 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------------------------
from lxml import etree
import argparse
import shutil
class ScoreLog:
"""Extract missions (digest, agent name) and their totals from score log files to given csv file."""
def __init__(self, score_csv_file):
self.score_file = score_csv_file
self.separator = ","
self.mission = ""
self.agent = ""
self.total = ""
def new_mission(self, mission_init):
self.mission = mission_init.find('MissionDigest').text
self.agent = mission_init.find('AgentName').text
def record_total(self, mission_total):
self.total = mission_total.text
self.output()
def parse(self, message):
# print(message)
msg = etree.fromstring(message)
if msg.tag == 'MissionInit':
self.new_mission(msg)
elif msg.tag == 'MissionTotal':
self.record_total(msg)
def score(self, file):
try:
log = etree.parse(file)
root = log.getroot()
for child in root:
# print(child.tag)
if child.tag == 'record':
self.parse(child.find('message').text)
except etree.XMLSyntaxError as err:
# Incomplete log files don't have a closing </log>.
# Try to copy file, append and re-parse.
print('XMLSyntaxError ' + str(err))
if 'Premature end of data' in str(err):
print('Re-try after appending </log>')
file2 = file + '2'
shutil.copyfile(file, file2)
with open(file2, 'a') as log:
log.write('</log>')
self.score(file2)
else:
raise
def output(self):
self.score_file.write(self.mission + self.separator + self.agent + self.separator + self.total + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='score missions')
parser.add_argument('--log_files', nargs='+', default=[], help='the log files to score')
args = parser.parse_args()
with open('score.csv', 'w') as score_file:
[ScoreLog(score_file).score(log_file) for log_file in args.log_files]
|
src/genie/libs/parser/iosxr/tests/ShowBgpInstanceNeighborsDetail/cli/equal/golden_output1_expected.py | balmasea/genieparser | 204 | 11166331 | expected_output = {
"instance": {
"all": {
"vrf": {
"default": {
"neighbor": {
"10.4.1.1": {
"remote_as": 65000,
"link_state": "internal link",
"local_as_as_no": 65000,
"local_as_no_prepend": True,
"local_as_replace_as": True,
"local_as_dual_as": True,
"router_id": "10.4.1.1",
"session_state": "established",
"up_time": "1w1d",
"nsr_state": "None",
"holdtime": 180,
"keepalive_interval": 60,
"min_acceptable_hold_time": 3,
"last_write": "00:00:03",
"attempted": 19,
"written": 19,
"second_last_write": "00:01:03",
"second_attempted": 19,
"second_written": 19,
"last_write_pulse_rcvd": "Nov 1 21:31:48.334 ",
"last_full_not_set_pulse_count": 85322,
"last_ka_error_before_reset": "00:00:00",
"last_ka_error_ka_not_sent": "00:00:00",
"precedence": "internet",
"non_stop_routing": True,
"multiprotocol_capability": "received",
"minimum_time_between_adv_runs": 0,
"inbound_message": "3",
"outbound_message": "3",
"address_family": {
"ipv4 unicast": {
"neighbor_version": 7,
"update_group": "0.2",
"filter_group": "0.1",
"refresh_request_status": "No Refresh request being processed",
"route_refresh_request_received": 0,
"route_refresh_request_sent": 0,
"accepted_prefixes": 1,
"best_paths": 1,
"exact_no_prefixes_denied": 0,
"cummulative_no_prefixes_denied": 0,
"prefix_advertised": 1,
"prefix_suppressed": 0,
"prefix_withdrawn": 0,
"maximum_prefix_max_prefix_no": 1048576,
"maximum_prefix_warning_only": True,
"maximum_prefix_threshold": "75%",
"maximum_prefix_restart": 0,
"eor_status": "was received during read-only mode",
"last_synced_ack_version": 0,
"last_ack_version": 7,
"additional_paths_operation": "None",
"send_multicast_attributes": True,
"additional_routes_local_label": "Unicast SAFI",
}
},
"bgp_session_transport": {
"connection": {
"state": "established",
"connections_established": 2,
"connections_dropped": 1,
},
"transport": {
"local_host": "10.16.2.2",
"local_port": "179",
"if_handle": "0x00000000",
"foreign_host": "10.4.1.1",
"foreign_port": "27104",
},
},
}
}
}
}
}
}
} |
PhysicsTools/PatAlgos/python/recoLayer0/muonPFIsolationValuesPAT_cff.py | ckamtsikis/cmssw | 852 | 11166345 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
import CommonTools.ParticleFlow.Isolation.muonPFIsolationValuesPFBRECO_cff as _m
muPFIsoValueCharged03PAT = _m.muPFIsoValueCharged03PFBRECO.clone()
muPFIsoValueCharged03PAT.deposits[0].src = 'muPFIsoDepositChargedPAT'
muPFMeanDRIsoValueCharged03PAT = _m.muPFMeanDRIsoValueCharged03PFBRECO.clone()
muPFMeanDRIsoValueCharged03PAT.deposits[0].src = 'muPFIsoDepositChargedPAT'
muPFSumDRIsoValueCharged03PAT = _m.muPFSumDRIsoValueCharged03PFBRECO.clone()
muPFSumDRIsoValueCharged03PAT.deposits[0].src = 'muPFIsoDepositChargedPAT'
muPFIsoValueChargedAll03PAT = _m.muPFIsoValueChargedAll03PFBRECO.clone()
muPFIsoValueChargedAll03PAT.deposits[0].src = 'muPFIsoDepositChargedAllPAT'
muPFMeanDRIsoValueChargedAll03PAT = _m.muPFMeanDRIsoValueChargedAll03PFBRECO.clone()
muPFMeanDRIsoValueChargedAll03PAT.deposits[0].src = 'muPFIsoDepositChargedAllPAT'
muPFSumDRIsoValueChargedAll03PAT = _m.muPFSumDRIsoValueChargedAll03PFBRECO.clone()
muPFSumDRIsoValueChargedAll03PAT.deposits[0].src = 'muPFIsoDepositChargedAllPAT'
muPFIsoValueGamma03PAT = _m.muPFIsoValueGamma03PFBRECO.clone()
muPFIsoValueGamma03PAT.deposits[0].src = 'muPFIsoDepositGammaPAT'
muPFMeanDRIsoValueGamma03PAT = _m.muPFMeanDRIsoValueGamma03PFBRECO.clone()
muPFMeanDRIsoValueGamma03PAT.deposits[0].src = 'muPFIsoDepositGammaPAT'
muPFSumDRIsoValueGamma03PAT = _m.muPFSumDRIsoValueGamma03PFBRECO.clone()
muPFSumDRIsoValueGamma03PAT.deposits[0].src = 'muPFIsoDepositGammaPAT'
muPFIsoValueNeutral03PAT = _m.muPFIsoValueNeutral03PFBRECO.clone()
muPFIsoValueNeutral03PAT.deposits[0].src = 'muPFIsoDepositNeutralPAT'
muPFMeanDRIsoValueNeutral03PAT = _m.muPFMeanDRIsoValueNeutral03PFBRECO.clone()
muPFMeanDRIsoValueNeutral03PAT.deposits[0].src = 'muPFIsoDepositNeutralPAT'
muPFSumDRIsoValueNeutral03PAT = _m.muPFSumDRIsoValueNeutral03PFBRECO.clone()
muPFSumDRIsoValueNeutral03PAT.deposits[0].src = 'muPFIsoDepositNeutralPAT'
muPFIsoValueGammaHighThreshold03PAT = _m.muPFIsoValueGammaHighThreshold03PFBRECO.clone()
muPFIsoValueGammaHighThreshold03PAT.deposits[0].src = 'muPFIsoDepositGammaPAT'
muPFMeanDRIsoValueGammaHighThreshold03PAT = _m.muPFMeanDRIsoValueGammaHighThreshold03PFBRECO.clone()
muPFMeanDRIsoValueGammaHighThreshold03PAT.deposits[0].src = 'muPFIsoDepositGammaPAT'
muPFSumDRIsoValueGammaHighThreshold03PAT = _m.muPFSumDRIsoValueGammaHighThreshold03PFBRECO.clone()
muPFSumDRIsoValueGammaHighThreshold03PAT.deposits[0].src = 'muPFIsoDepositGammaPAT'
muPFIsoValueNeutralHighThreshold03PAT = _m.muPFIsoValueNeutralHighThreshold03PFBRECO.clone()
muPFIsoValueNeutralHighThreshold03PAT.deposits[0].src = 'muPFIsoDepositNeutralPAT'
muPFMeanDRIsoValueNeutralHighThreshold03PAT = _m.muPFMeanDRIsoValueNeutralHighThreshold03PFBRECO.clone()
muPFMeanDRIsoValueNeutralHighThreshold03PAT.deposits[0].src = 'muPFIsoDepositNeutralPAT'
muPFSumDRIsoValueNeutralHighThreshold03PAT = _m.muPFSumDRIsoValueNeutralHighThreshold03PFBRECO.clone()
muPFSumDRIsoValueNeutralHighThreshold03PAT.deposits[0].src = 'muPFIsoDepositNeutralPAT'
muPFIsoValuePU03PAT = _m.muPFIsoValuePU03PFBRECO.clone()
muPFIsoValuePU03PAT.deposits[0].src = 'muPFIsoDepositPUPAT'
muPFMeanDRIsoValuePU03PAT = _m.muPFMeanDRIsoValuePU03PFBRECO.clone()
muPFMeanDRIsoValuePU03PAT.deposits[0].src = 'muPFIsoDepositPUPAT'
muPFSumDRIsoValuePU03PAT = _m.muPFSumDRIsoValuePU03PFBRECO.clone()
muPFSumDRIsoValuePU03PAT.deposits[0].src = 'muPFIsoDepositPUPAT'
##############################
muPFIsoValueCharged04PAT = _m.muPFIsoValueCharged04PFBRECO.clone()
muPFIsoValueCharged04PAT.deposits[0].src = 'muPFIsoDepositChargedPAT'
muPFMeanDRIsoValueCharged04PAT = _m.muPFMeanDRIsoValueCharged04PFBRECO.clone()
muPFMeanDRIsoValueCharged04PAT.deposits[0].src = 'muPFIsoDepositChargedPAT'
muPFSumDRIsoValueCharged04PAT = _m.muPFSumDRIsoValueCharged04PFBRECO.clone()
muPFSumDRIsoValueCharged04PAT.deposits[0].src = 'muPFIsoDepositChargedPAT'
muPFIsoValueChargedAll04PAT = _m.muPFIsoValueChargedAll04PFBRECO.clone()
muPFIsoValueChargedAll04PAT.deposits[0].src = 'muPFIsoDepositChargedAllPAT'
muPFMeanDRIsoValueChargedAll04PAT = _m.muPFMeanDRIsoValueChargedAll04PFBRECO.clone()
muPFMeanDRIsoValueChargedAll04PAT.deposits[0].src = 'muPFIsoDepositChargedAllPAT'
muPFSumDRIsoValueChargedAll04PAT = _m.muPFSumDRIsoValueChargedAll04PFBRECO.clone()
muPFSumDRIsoValueChargedAll04PAT.deposits[0].src = 'muPFIsoDepositChargedAllPAT'
muPFIsoValueGamma04PAT = _m.muPFIsoValueGamma04PFBRECO.clone()
muPFIsoValueGamma04PAT.deposits[0].src = 'muPFIsoDepositGammaPAT'
muPFMeanDRIsoValueGamma04PAT = _m.muPFMeanDRIsoValueGamma04PFBRECO.clone()
muPFMeanDRIsoValueGamma04PAT.deposits[0].src = 'muPFIsoDepositGammaPAT'
muPFSumDRIsoValueGamma04PAT = _m.muPFSumDRIsoValueGamma04PFBRECO.clone()
muPFSumDRIsoValueGamma04PAT.deposits[0].src = 'muPFIsoDepositGammaPAT'
muPFIsoValueNeutral04PAT = _m.muPFIsoValueNeutral04PFBRECO.clone()
muPFIsoValueNeutral04PAT.deposits[0].src = 'muPFIsoDepositNeutralPAT'
muPFMeanDRIsoValueNeutral04PAT = _m.muPFMeanDRIsoValueNeutral04PFBRECO.clone()
muPFMeanDRIsoValueNeutral04PAT.deposits[0].src = 'muPFIsoDepositNeutralPAT'
muPFSumDRIsoValueNeutral04PAT = _m.muPFSumDRIsoValueNeutral04PFBRECO.clone()
muPFSumDRIsoValueNeutral04PAT.deposits[0].src = 'muPFIsoDepositNeutralPAT'
muPFIsoValueGammaHighThreshold04PAT = _m.muPFIsoValueGammaHighThreshold04PFBRECO.clone()
muPFIsoValueGammaHighThreshold04PAT.deposits[0].src = 'muPFIsoDepositGammaPAT'
muPFMeanDRIsoValueGammaHighThreshold04PAT = _m.muPFMeanDRIsoValueGammaHighThreshold04PFBRECO.clone()
muPFMeanDRIsoValueGammaHighThreshold04PAT.deposits[0].src = 'muPFIsoDepositGammaPAT'
muPFSumDRIsoValueGammaHighThreshold04PAT = _m.muPFSumDRIsoValueGammaHighThreshold04PFBRECO.clone()
muPFSumDRIsoValueGammaHighThreshold04PAT.deposits[0].src = 'muPFIsoDepositGammaPAT'
muPFIsoValueNeutralHighThreshold04PAT = _m.muPFIsoValueNeutralHighThreshold04PFBRECO.clone()
muPFIsoValueNeutralHighThreshold04PAT.deposits[0].src = 'muPFIsoDepositNeutralPAT'
muPFMeanDRIsoValueNeutralHighThreshold04PAT = _m.muPFMeanDRIsoValueNeutralHighThreshold04PFBRECO.clone()
muPFMeanDRIsoValueNeutralHighThreshold04PAT.deposits[0].src = 'muPFIsoDepositNeutralPAT'
muPFSumDRIsoValueNeutralHighThreshold04PAT = _m.muPFSumDRIsoValueNeutralHighThreshold04PFBRECO.clone()
muPFSumDRIsoValueNeutralHighThreshold04PAT.deposits[0].src = 'muPFIsoDepositNeutralPAT'
muPFIsoValuePU04PAT = _m.muPFIsoValuePU04PFBRECO.clone()
muPFIsoValuePU04PAT.deposits[0].src = 'muPFIsoDepositPUPAT'
muPFMeanDRIsoValuePU04PAT = _m.muPFMeanDRIsoValuePU04PFBRECO.clone()
muPFMeanDRIsoValuePU04PAT.deposits[0].src = 'muPFIsoDepositPUPAT'
muPFSumDRIsoValuePU04PAT = _m.muPFSumDRIsoValuePU04PFBRECO.clone()
muPFSumDRIsoValuePU04PAT.deposits[0].src = 'muPFIsoDepositPUPAT'
muonPFIsolationValuesPATTask = cms.Task(
muPFIsoValueCharged03PAT,
muPFMeanDRIsoValueCharged03PAT,
muPFSumDRIsoValueCharged03PAT,
muPFIsoValueChargedAll03PAT,
muPFMeanDRIsoValueChargedAll03PAT,
muPFSumDRIsoValueChargedAll03PAT,
muPFIsoValueGamma03PAT,
muPFMeanDRIsoValueGamma03PAT,
muPFSumDRIsoValueGamma03PAT,
muPFIsoValueNeutral03PAT,
muPFMeanDRIsoValueNeutral03PAT,
muPFSumDRIsoValueNeutral03PAT,
muPFIsoValueGammaHighThreshold03PAT,
muPFMeanDRIsoValueGammaHighThreshold03PAT,
muPFSumDRIsoValueGammaHighThreshold03PAT,
muPFIsoValueNeutralHighThreshold03PAT,
muPFMeanDRIsoValueNeutralHighThreshold03PAT,
muPFSumDRIsoValueNeutralHighThreshold03PAT,
muPFIsoValuePU03PAT,
muPFMeanDRIsoValuePU03PAT,
muPFSumDRIsoValuePU03PAT,
##############################
muPFIsoValueCharged04PAT,
muPFMeanDRIsoValueCharged04PAT,
muPFSumDRIsoValueCharged04PAT,
muPFIsoValueChargedAll04PAT,
muPFMeanDRIsoValueChargedAll04PAT,
muPFSumDRIsoValueChargedAll04PAT,
muPFIsoValueGamma04PAT,
muPFMeanDRIsoValueGamma04PAT,
muPFSumDRIsoValueGamma04PAT,
muPFIsoValueNeutral04PAT,
muPFMeanDRIsoValueNeutral04PAT,
muPFSumDRIsoValueNeutral04PAT,
muPFIsoValueGammaHighThreshold04PAT,
muPFMeanDRIsoValueGammaHighThreshold04PAT,
muPFSumDRIsoValueGammaHighThreshold04PAT,
muPFIsoValueNeutralHighThreshold04PAT,
muPFMeanDRIsoValueNeutralHighThreshold04PAT,
muPFSumDRIsoValueNeutralHighThreshold04PAT,
muPFIsoValuePU04PAT,
muPFMeanDRIsoValuePU04PAT,
muPFSumDRIsoValuePU04PAT
)
muonPFIsolationValuesPATSequence = cms.Sequence(muonPFIsolationValuesPATTask)
|
tests/test_reason/test_relativedifference.py | dumpmemory/doubtlab | 300 | 11166353 | import pytest
import numpy as np
from doubtlab.reason import RelativeDifferenceReason
@pytest.mark.parametrize("t, s", [(0.05, 4), (0.2, 3), (0.4, 2), (0.6, 1)])
def test_from_predict(t, s):
"""Test `from_predict` on an obvious examples"""
y = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
preds = np.array([1.0, 1.1, 1.3, 1.5, 1.7])
predicate = RelativeDifferenceReason.from_predict(pred=preds, y=y, threshold=t)
assert np.sum(predicate) == s
def test_zero_error():
"""Ensure error is throw when `y=0`"""
y = np.array([0.0])
preds = np.array([1.0])
with pytest.raises(ValueError):
RelativeDifferenceReason.from_predict(pred=preds, y=y, threshold=0.1)
|
cli/scripts/addr.py | niclashedam/liblightnvm | 126 | 11166368 | #!/usr/bin/env python
from subprocess import Popen, PIPE
NSECTORS = 4
NPLANES = 4
def main():
ppas = []
for i in xrange(0, 16):
ch = 0
lun = 0
blk = 3
pg = 0
sec = i % NSECTORS
pl = (i / NSECTORS) % NPLANES
cmd = [str(x) for x in [
"nvm_addr",
"fmt_g",
"/dev/nvme0n1",
ch, lun, pl, blk, pg, sec
]]
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
print(out.strip())
ppas.append(out[1:17])
print("ppas{ %s }" % " ".join(ppas))
if __name__ == "__main__":
main()
|
adb/systrace/catapult/common/py_utils/py_utils/tempfile_ext.py | mohanedmoh/TBS | 2,151 | 11166408 | <reponame>mohanedmoh/TBS<gh_stars>1000+
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import shutil
import tempfile
@contextlib.contextmanager
def NamedTemporaryDirectory(suffix='', prefix='tmp', dir=None):
"""A context manager that manages a temporary directory.
This is a context manager version of tempfile.mkdtemp. The arguments to this
function are the same as the arguments for that one.
This can be used to automatically manage the lifetime of a temporary file
without maintaining an open file handle on it. Doing so can be useful in
scenarios where a parent process calls a child process to create a temporary
file and then does something with the resulting file.
"""
# This uses |dir| as a parameter name for consistency with mkdtemp.
# pylint: disable=redefined-builtin
d = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
try:
yield d
finally:
shutil.rmtree(d)
|
tests/primitives/test_strategybase.py | fakegit/DEXBot | 249 | 11166420 | <reponame>fakegit/DEXBot
import logging
import pytest
log = logging.getLogger("dexbot")
log.setLevel(logging.DEBUG)
@pytest.fixture()
def worker(strategybase):
return strategybase
@pytest.mark.mandatory
def test_init(worker):
pass
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_get_operational_balance(asset, worker, monkeypatch):
share = 0.1
def get_share(*args):
return share
symbol = worker.market[asset]['symbol']
balance = worker.balance(symbol)
op_balance = worker.get_operational_balance()
assert op_balance[asset] == balance['amount']
monkeypatch.setattr(worker, 'get_worker_share_for_asset', get_share)
op_balance = worker.get_operational_balance()
assert op_balance[asset] == balance['amount'] * share
|
tests/test_config.py | tyrylu/todoman | 318 | 11166421 | from unittest.mock import patch
import pytest
from click.testing import CliRunner
from todoman.cli import cli
from todoman.configuration import ConfigurationException
from todoman.configuration import load_config
def test_explicit_nonexistant(runner):
result = CliRunner().invoke(
cli,
env={"TODOMAN_CONFIG": "/nonexistant"},
catch_exceptions=True,
)
assert result.exception
assert "Configuration file /nonexistant does not exist" in result.output
def test_xdg_nonexistant(runner):
with patch("xdg.BaseDirectory.xdg_config_dirs", ["/does-not-exist"]):
result = CliRunner().invoke(
cli,
catch_exceptions=True,
)
assert result.exception
assert "No configuration file found" in result.output
def test_xdg_existant(runner, tmpdir, config):
with tmpdir.mkdir("todoman").join("config.py").open("w") as f:
with config.open() as c:
f.write(c.read())
with patch("xdg.BaseDirectory.xdg_config_dirs", [str(tmpdir)]):
result = CliRunner().invoke(
cli,
catch_exceptions=True,
)
assert not result.exception
assert not result.output.strip()
def test_sane_config(config, runner, tmpdir):
config.write(
'color = "auto"\n'
'date_format = "%Y-%m-%d"\n'
f'path = "{tmpdir}"\n'
f'cache_path = "{tmpdir.join("cache.sqlite")}"\n'
)
result = runner.invoke(cli)
# This is handy for debugging breakage:
if result.exception:
print(result.output)
raise result.exception
assert not result.exception
def test_invalid_color(config, runner):
config.write('color = 12\npath = "/"\n')
result = runner.invoke(cli, ["list"])
assert result.exception
assert (
"Error: Bad color setting. Invalid type (expected str, got int)."
in result.output
)
def test_invalid_color_arg(config, runner):
config.write('path = "/"\n')
result = runner.invoke(cli, ["--color", "12", "list"])
assert result.exception
assert "Usage:" in result.output
def test_missing_path(config, runner):
config.write('color = "auto"\n')
result = runner.invoke(cli, ["list"])
assert result.exception
assert "Error: Missing 'path' setting." in result.output
@pytest.mark.xfail(reason="Not implemented")
def test_extra_entry(config, runner):
config.write("color = auto\ndate_format = %Y-%m-%d\npath = /\nblah = false\n")
result = runner.invoke(cli, ["list"])
assert result.exception
assert "Error: Invalid configuration entry" in result.output
@pytest.mark.xfail(reason="Not implemented")
def test_extra_section(config, runner):
config.write("date_format = %Y-%m-%d\npath = /\n[extra]\ncolor = auto\n")
result = runner.invoke(cli, ["list"])
assert result.exception
assert "Invalid configuration section" in result.output
def test_missing_cache_dir(config, runner, tmpdir):
cache_dir = tmpdir.join("does").join("not").join("exist")
cache_file = cache_dir.join("cache.sqlite")
config.write(f'path = "{tmpdir}/*"\ncache_path = "{cache_file}"\n')
result = runner.invoke(cli)
assert not result.exception
assert cache_dir.isdir()
assert cache_file.isfile()
def test_date_field_in_time_format(config, runner, tmpdir):
config.write('path = "/"\ntime_format = "%Y-%m-%d"\n')
result = runner.invoke(cli)
assert result.exception
assert (
"Found date component in `time_format`, please use `date_format` for that."
in result.output
)
def test_date_field_in_time(config, runner, tmpdir):
config.write('path = "/"\ndate_format = "%Y-%d-:%M"\n')
result = runner.invoke(cli)
assert result.exception
assert (
"Found time component in `date_format`, please use `time_format` for that."
in result.output
)
def test_colour_validation_auto(config):
with patch(
"todoman.configuration.find_config",
return_value=(str(config)),
):
cfg = load_config()
assert cfg["color"] == "auto"
def test_colour_validation_always(config):
config.write("color = 'always'\n", "a")
with patch(
"todoman.configuration.find_config",
return_value=(str(config)),
):
cfg = load_config()
assert cfg["color"] == "always"
def test_colour_validation_invalid(config):
config.write("color = 'on_weekends_only'\n", "a")
with patch(
"todoman.configuration.find_config",
return_value=(str(config)),
), pytest.raises(ConfigurationException):
load_config()
|
test/pytest/test_search_sms.py | bitigchi/MuditaOS | 369 | 11166426 | # Copyright (c) 2017-2021, Mudita Sp. z.o.o. All rights reserved.
# For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
import time
import pytest
from harness.interface.defs import key_codes
@pytest.mark.rt1051
@pytest.mark.usefixtures("phone_unlocked")
def test_search_sms(harness, sms_text, phone_number):
body = {"category": "message", "messageBody": sms_text, "phoneNumber": str(phone_number)}
messages = harness.endpoint_request("messages", "get", body)["body"]
assert len(messages) != 0
|
tests/storage/test_dict_storage.py | FrostByte266/neupy | 801 | 11166476 | <gh_stars>100-1000
import copy
import numpy as np
from neupy.utils import asfloat
from neupy import layers, storage
from neupy.storage import (
validate_data_structure, InvalidFormat,
ParameterLoaderError, load_layer_parameter,
)
from base import BaseTestCase
class DictStorageTestCase(BaseTestCase):
maxDiff = 10000
def test_storage_invalid_input_type(self):
network = [
layers.Input(10),
layers.Relu(5),
layers.Relu(2),
]
message = (
"Invalid input type. Input should be "
"network or optimizer with network"
)
with self.assertRaisesRegexp(TypeError, message):
storage.save_dict(network)
def test_storage_save_dict(self):
network = layers.join(
layers.parallel([
layers.Input(2, name='input-1'),
layers.PRelu(1, name='prelu')
], [
layers.Input(1, name='input-2'),
layers.Sigmoid(4, name='sigmoid'),
layers.BatchNorm(name='batch-norm'),
]),
layers.Concatenate(name='concatenate'),
layers.Softmax(3, name='softmax'),
)
dict_network = storage.save_dict(network)
expected_keys = ('metadata', 'layers', 'graph')
self.assertItemsEqual(expected_keys, dict_network.keys())
expected_metadata_keys = ('created', 'language', 'library', 'version')
actual_metadata_keys = dict_network['metadata'].keys()
self.assertItemsEqual(expected_metadata_keys, actual_metadata_keys)
self.assertEqual(len(dict_network['layers']), 7)
expected_layers = [{
'class_name': 'Input',
'configs': {'name': 'input-1', 'shape': (2,)},
'name': 'input-1',
}, {
'class_name': 'PRelu',
'configs': {'alpha_axes': (-1,), 'name': 'prelu', 'n_units': 1},
'name': 'prelu',
}, {
'class_name': 'Input',
'configs': {'name': 'input-2', 'shape': (1,)},
'name': 'input-2',
}, {
'class_name': 'Sigmoid',
'configs': {'name': 'sigmoid', 'n_units': 4},
'name': 'sigmoid',
}, {
'class_name': 'BatchNorm',
'configs': {
'alpha': 0.1,
'axes': (0,),
'epsilon': 1e-05,
'name': 'batch-norm'
},
'name': 'batch-norm',
}, {
'class_name': 'Concatenate',
'configs': {'axis': -1, 'name': 'concatenate'},
'name': 'concatenate',
}, {
'class_name': 'Softmax',
'configs': {'name': 'softmax', 'n_units': 3},
'name': 'softmax',
}]
actual_layers = []
for i, layer in enumerate(dict_network['layers']):
self.assertIn('parameters', layer, msg="Layer #" + str(i))
layer = copy.deepcopy(layer)
del layer['parameters']
actual_layers.append(layer)
self.assertEqual(actual_layers, expected_layers)
def test_storage_load_dict_using_names(self):
relu = layers.Relu(2, name='relu')
network = layers.join(layers.Input(10), relu)
weight = np.ones((10, 2))
bias = np.ones((2,))
storage.load_dict(network, {
'metadata': {}, # avoided for simplicity
'graph': {}, # avoided for simplicity
# Input layer was avoided on purpose
'layers': [{
'name': 'relu',
'class_name': 'Relu',
'configs': {},
'parameters': {
'weight': {'trainable': True, 'value': weight},
'bias': {'trainable': True, 'value': bias},
}
}]
})
np.testing.assert_array_almost_equal(weight, self.eval(relu.weight))
np.testing.assert_array_almost_equal(bias, self.eval(relu.bias))
def test_storage_load_dict_using_wrong_names(self):
network = layers.join(
layers.Input(3),
layers.Relu(4, name='relu'),
layers.Linear(5, name='linear') >> layers.Relu(),
layers.Softmax(6, name='softmax'),
)
storage.load_dict(network, {
'metadata': {}, # avoided for simplicity
'graph': {}, # avoided for simplicity
# Input layer was avoided on purpose
'layers': [{
'name': 'name-1',
'class_name': 'Relu',
'configs': {},
'parameters': {
'weight': {'trainable': True, 'value': np.ones((3, 4))},
'bias': {'trainable': True, 'value': np.ones((4,))},
}
}, {
'name': 'name-2',
'class_name': 'Relu',
'configs': {},
'parameters': {
'weight': {'trainable': True, 'value': np.ones((4, 5))},
'bias': {'trainable': True, 'value': np.ones((5,))},
}
}, {
'name': 'name-3',
'class_name': 'Softmax',
'configs': {},
'parameters': {
'weight': {'trainable': True, 'value': np.ones((5, 6))},
'bias': {'trainable': True, 'value': np.ones((6,))},
}
}]
}, load_by='order', skip_validation=False)
relu = network.layer('relu')
self.assertEqual(12, np.sum(self.eval(relu.weight)))
self.assertEqual(4, np.sum(self.eval(relu.bias)))
linear = network.layer('linear')
self.assertEqual(20, np.sum(self.eval(linear.weight)))
self.assertEqual(5, np.sum(self.eval(linear.bias)))
softmax = network.layer('softmax')
self.assertEqual(30, np.sum(self.eval(softmax.weight)))
self.assertEqual(6, np.sum(self.eval(softmax.bias)))
def test_storage_load_dict_invalid_number_of_paramters(self):
network = layers.join(
layers.Input(3),
layers.Relu(4, name='relu'),
layers.Linear(5, name='linear') > layers.Relu(),
layers.Softmax(6, name='softmax'),
)
data = {
'metadata': {}, # avoided for simplicity
'graph': {}, # avoided for simplicity
# Input layer was avoided on purpose
'layers': [{
'name': 'name-1',
'class_name': 'Relu',
'configs': {},
'parameters': {
'weight': {
'trainable': True,
'value': np.ones((3, 4))
},
'bias': {'trainable': True, 'value': np.ones((4,))},
}
}]
}
with self.assertRaises(ParameterLoaderError):
storage.load_dict(network, data, ignore_missing=False)
def test_failed_loading_mode_for_storage(self):
network = layers.Input(2) >> layers.Sigmoid(1)
with self.assertRaisesRegexp(ValueError, "Invalid value"):
storage.load_dict(network, {}, load_by='unknown')
def test_failed_load_parameter_invalid_type(self):
sigmoid = layers.Sigmoid(1, bias=None)
network = layers.join(layers.Input(2), sigmoid)
network.create_variables()
with self.assertRaisesRegexp(ParameterLoaderError, "equal to None"):
load_layer_parameter(sigmoid, {
'parameters': {
'bias': {
'value': np.array([[0]]),
'trainable': True,
},
},
})
class StoredDataValidationTestCase(BaseTestCase):
def test_stored_data_dict_format_basics(self):
with self.assertRaises(InvalidFormat):
validate_data_structure([])
with self.assertRaises(InvalidFormat):
validate_data_structure({})
with self.assertRaises(InvalidFormat):
validate_data_structure({'layers': {}})
with self.assertRaises(InvalidFormat):
validate_data_structure({'layers': []})
def test_stored_data_layers_format(self):
with self.assertRaises(InvalidFormat):
validate_data_structure({'layers': [[]]})
with self.assertRaises(InvalidFormat):
validate_data_structure({'layers': [{
'parameters': {},
}]})
with self.assertRaises(InvalidFormat):
validate_data_structure({'layers': [{
'parameters': {},
}]})
with self.assertRaises(InvalidFormat):
validate_data_structure({'layers': [{
'parameters': {},
}]})
with self.assertRaises(InvalidFormat):
validate_data_structure({
'layers': [{
'parameters': [], # wrong type
'name': 'name',
}]
})
result = validate_data_structure({
'layers': [{
'parameters': {},
'name': 'name',
}]
})
self.assertIsNone(result)
def test_stored_data_parameters_format(self):
with self.assertRaises(InvalidFormat):
validate_data_structure({'layers': [{
'name': 'name',
'parameters': {
'weight': np.ones((2, 3)),
}
}]})
with self.assertRaises(InvalidFormat):
validate_data_structure({'layers': [{
'name': 'name',
'parameters': {
'weight': {
'data': np.ones((2, 3)),
},
}
}]})
result = validate_data_structure({'layers': [{
'name': 'name',
'parameters': {
'weight': {
'value': np.ones((2, 3)),
'trainable': True,
},
}
}]})
self.assertIsNone(result)
def test_basic_skip_validation(self):
network = layers.Input(10) >> layers.Relu(1)
with self.assertRaises(InvalidFormat):
storage.load_dict(network, {}, skip_validation=False)
class TransferLearningTestCase(BaseTestCase):
def test_transfer_learning_using_position(self):
network_pretrained = layers.join(
layers.Input(10),
layers.Elu(5),
layers.Elu(2, name='elu'),
layers.Sigmoid(1),
)
network_new = layers.join(
layers.Input(10),
layers.Elu(5),
layers.Elu(2),
)
pretrained_layers_stored = storage.save_dict(network_pretrained)
with self.assertRaises(ParameterLoaderError):
storage.load_dict(
network_new,
pretrained_layers_stored,
load_by='names_or_order',
ignore_missing=False)
storage.load_dict(
network_new,
pretrained_layers_stored,
load_by='names_or_order',
ignore_missing=True)
random_input = asfloat(np.random.random((12, 10)))
new_network_output = self.eval(network_new.output(random_input))
pretrained_output = self.eval(
network_pretrained.end('elu').output(random_input))
np.testing.assert_array_almost_equal(
pretrained_output, new_network_output)
def test_transfer_learning_using_names(self):
network_pretrained = layers.join(
layers.Input(10),
layers.Elu(5, name='elu-a'),
layers.Elu(2, name='elu-b'),
layers.Sigmoid(1),
)
network_new = layers.join(
layers.Input(10),
layers.Elu(5, name='elu-a'),
layers.Elu(2, name='elu-b'),
layers.Elu(8, name='elu-c'), # new layer
)
pretrained_layers_stored = storage.save_dict(network_pretrained)
storage.load_dict(
network_new,
pretrained_layers_stored,
load_by='names',
skip_validation=False,
ignore_missing=True)
random_input = asfloat(np.random.random((12, 10)))
pretrained_output = self.eval(
network_pretrained.end('elu-b').output(random_input))
new_network_output = self.eval(
network_new.end('elu-b').output(random_input))
np.testing.assert_array_almost_equal(
pretrained_output, new_network_output)
pred = self.eval(network_new.output(random_input))
self.assertEqual(pred.shape, (12, 8))
|
tests/roots/test-ext-autodoc/target/pep570.py | samdoran/sphinx | 4,973 | 11166478 | def foo(*, a, b):
pass
def bar(a, b, /, c, d):
pass
def baz(a, /, *, b):
pass
def qux(a, b, /):
pass
|
arbitrage/public_markets/huobicny.py | abaoj/bitcoin-arbitrage | 126 | 11166547 | <gh_stars>100-1000
# Copyright (C) 2017, JackYao <<EMAIL>>
from ._huobi import Huobi
class HuobiCNY(Huobi):
def __init__(self):
super().__init__("CNY", "btc")
|
clize/__init__.py | scholer/clize | 390 | 11166591 | # clize -- A command-line argument parser for Python
# Copyright (C) 2011-2016 by <NAME> and contributors. See AUTHORS and
# COPYING for details.
"""procedurally generate command-line interfaces from callables"""
from clize.parser import Parameter
from clize.runner import Clize, SubcommandDispatcher, run
from clize.legacy import clize, make_flag
from clize.errors import UserError, ArgumentError
__all__ = [
'run', 'Parameter', 'UserError',
'Clize', 'ArgumentError', 'SubcommandDispatcher',
'clize', 'make_flag'
]
|
backend/classifier/classifier/commands/build.py | pdehaan/facebook-political-ads | 228 | 11166596 | """
Builds our classifiers
"""
import click
import dill
from classifier.utilities import (get_classifier, confs, get_vectorizer,
classifier_path, train_classifier)
@click.option("--lang", help="Limit to language")
@click.command("build")
@click.pass_context
def build(ctx, lang):
"""
Build classifiers for each of our languages.
"""
for (directory, conf) in confs(ctx.obj["base"]):
if lang and conf["language"] != lang:
continue
model = train_classifier(get_classifier(), get_vectorizer(conf),
directory, conf["language"])
model_path = classifier_path(directory)
with open(model_path, 'wb') as classy:
dill.dump(model, classy)
print("Saved model {}".format(model_path))
|
examples/avro-cli.py | woodlee/confluent-kafka-python | 2,838 | 11166598 | #!/usr/bin/env python
#
# Copyright 2018 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from uuid import uuid4
from six.moves import input
from confluent_kafka import avro
# Parse Schema used for serializing User class
record_schema = avro.loads("""
{
"namespace": "confluent.io.examples.serialization.avro",
"name": "User",
"type": "record",
"fields": [
{"name": "name", "type": "string"},
{"name": "favorite_number", "type": "int"},
{"name": "favorite_color", "type": "string"}
]
}
""")
class User(object):
"""
User stores the deserialized user Avro record.
"""
# Use __slots__ to explicitly declare all data members.
__slots__ = ["name", "favorite_number", "favorite_color", "id"]
def __init__(self, name=None, favorite_number=None, favorite_color=None):
self.name = name
self.favorite_number = favorite_number
self.favorite_color = favorite_color
# Unique id used to track produce request success/failures.
# Do *not* include in the serialized object.
self.id = uuid4()
def to_dict(self):
"""
The Avro Python library does not support code generation.
For this reason we must provide a dict representation of our class for serialization.
"""
return {
"name": self.name,
"favorite_number": self.favorite_number,
"favorite_color": self.favorite_color
}
def on_delivery(err, msg, obj):
"""
Handle delivery reports served from producer.poll.
This callback takes an extra argument, obj.
This allows the original contents to be included for debugging purposes.
"""
if err is not None:
print('Message {} delivery failed for user {} with error {}'.format(
obj.id, obj.name, err))
else:
print('Message {} successfully produced to {} [{}] at offset {}'.format(
obj.id, msg.topic(), msg.partition(), msg.offset()))
def produce(topic, conf):
"""
Produce User records
"""
from confluent_kafka.avro import AvroProducer
producer = AvroProducer(conf, default_value_schema=record_schema)
print("Producing user records to topic {}. ^c to exit.".format(topic))
while True:
# Instantiate new User, populate fields, produce record, execute callbacks.
record = User()
try:
record.name = input("Enter name: ")
record.favorite_number = int(input("Enter favorite number: "))
record.favorite_color = input("Enter favorite color: ")
# The message passed to the delivery callback will already be serialized.
# To aid in debugging we provide the original object to the delivery callback.
producer.produce(topic=topic, value=record.to_dict(),
callback=lambda err, msg, obj=record: on_delivery(err, msg, obj))
# Serve on_delivery callbacks from previous asynchronous produce()
producer.poll(0)
except KeyboardInterrupt:
break
except ValueError:
print("Invalid input, discarding record...")
continue
print("\nFlushing records...")
producer.flush()
def consume(topic, conf):
"""
Consume User records
"""
from confluent_kafka.avro import AvroConsumer
from confluent_kafka.avro.serializer import SerializerError
print("Consuming user records from topic {} with group {}. ^c to exit.".format(topic, conf["group.id"]))
c = AvroConsumer(conf, reader_value_schema=record_schema)
c.subscribe([topic])
while True:
try:
msg = c.poll(1)
# There were no messages on the queue, continue polling
if msg is None:
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
record = User(msg.value())
print("name: {}\n\tfavorite_number: {}\n\tfavorite_color: {}\n".format(
record.name, record.favorite_number, record.favorite_color))
except SerializerError as e:
# Report malformed record, discard results, continue polling
print("Message deserialization failed {}".format(e))
continue
except KeyboardInterrupt:
break
print("Shutting down consumer..")
c.close()
def main(args):
# handle common configs
conf = {'bootstrap.servers': args.bootstrap_servers,
'schema.registry.url': args.schema_registry}
if args.userinfo:
conf['schema.registry.basic.auth.credentials.source'] = 'USER_INFO'
conf['schema.registry.basic.auth.user.info'] = args.userinfo
if args.mode == "produce":
produce(args.topic, conf)
else:
# Fallback to earliest to ensure all messages are consumed
conf['group.id'] = args.group
conf['auto.offset.reset'] = "earliest"
consume(args.topic, conf)
if __name__ == '__main__':
# To use the provided cluster execute <source root>/tests/docker/bin/cluster_up.sh.
# Defaults assume the use of the provided test cluster.
parser = argparse.ArgumentParser(description="Example client for handling Avro data")
parser.add_argument('-b', dest="bootstrap_servers",
default="localhost:29092", help="Bootstrap broker(s) (host[:port])")
parser.add_argument('-s', dest="schema_registry",
default="http://localhost:8083", help="Schema Registry (http(s)://host[:port]")
parser.add_argument('-t', dest="topic", default="example_avro",
help="Topic name")
parser.add_argument('-u', dest="userinfo", default="ckp_tester:test_secret",
help="Userinfo (username:password); requires Schema Registry with HTTP basic auth enabled")
parser.add_argument('mode', choices=['produce', 'consume'],
help="Execution mode (produce | consume)")
parser.add_argument('-g', dest="group", default="example_avro",
help="Consumer group; required if running 'consumer' mode")
main(parser.parse_args())
|
utils/model_compression.py | cwwang15/neural_network_cracking | 196 | 11166601 | #!/usr/bin/env python
import h5py
import numpy as np
import struct
import sys
import argparse
import json
import itertools
class CompressActor(object):
RECORD_FMT = '>f'
def __init__(self, fname, ofile, weight_file):
self.fname = fname
self.ofile = ofile
self.weight_file = weight_file
def act(self):
raise NotImplementedError()
class Compressor(CompressActor):
def act(self):
self.out_floats = []
out_struct = {
'groups' : []
}
with h5py.File(self.fname, 'r') as dataset:
for key in dataset.keys():
subgroup = dataset[key]
outgroup = {
'datasets' : []
}
assert type(subgroup) == h5py.Group
for gkey in subgroup.keys():
datablock = subgroup[gkey]
assert type(datablock) == h5py.Dataset
outgroup['datasets'].append(
[gkey, self.output_datablock(datablock)])
outgroup['attr'] = list(map(lambda t: (t[0], int(t[1])),
subgroup.attrs.items()))
out_struct['groups'].append([key, outgroup])
out_struct['attr'] = list(map(lambda t: (t[0], int(t[1])),
dataset.attrs.items()))
self.output_head(out_struct)
def output_datablock(self, datablock):
self.out_floats += datablock[:].flatten().tolist()
return list(datablock.shape)
def write_weight(self, weight, ofile):
ofile.write(struct.pack(self.RECORD_FMT, weight))
def output_head(self, out_struct):
with open(self.ofile, 'w') as ofile:
json.dump(out_struct, ofile)
with open(self.weight_file, 'wb') as ofile:
for item in self.out_floats:
self.write_weight(item, ofile)
class Decompressor(CompressActor):
def act(self):
with open(self.fname, 'r') as ifile:
item = json.load(ifile)
with open(self.weight_file, 'rb') as weightfile:
chunksize = struct.calcsize(self.RECORD_FMT)
self.weights = []
chunk = weightfile.read(chunksize)
while chunk != b'':
self.weights.append(self.read_weight(chunk))
chunk = weightfile.read(chunksize)
self.output(item)
def read_weight(self, chunk):
return struct.unpack(self.RECORD_FMT, chunk)[0]
def calc_num_elems(self, dimensions):
num_elems = 1
for dimension in dimensions:
num_elems *= dimension
return num_elems
def output(self, item):
with h5py.File(self.ofile, 'w') as ofile:
ctr = 0
for agroup in item['groups']:
key, groups = agroup
grp = ofile.create_group(key)
for attr in groups['attr']:
grp.attrs[attr[0]] = attr[1]
for adataset in groups['datasets']:
name, shape = adataset
num_elems = self.calc_num_elems(shape)
data = np.reshape(self.weights[ctr:num_elems + ctr], shape)
grp.create_dataset(name, data=data, dtype=np.float32)
ctr += num_elems
for attr in item['attr']:
ofile.attrs[attr[0]] = attr[1]
def main(args):
assert args.compress or args.decompress, (
'Must provide compress or decompress argument')
(Compressor if args.compress else Decompressor)(
args.ifile, args.ofile, args.weight_file).act()
if __name__=='__main__':
parser = argparse.ArgumentParser(
description='Compress and decompress model. ')
parser.add_argument('ifile', help='Input file. ')
parser.add_argument('ofile', help='Output file. ')
parser.add_argument('weight_file', help='File for weights. ')
parser.add_argument('-c', '--compress', action='store_true')
parser.add_argument('-d', '--decompress', action='store_true')
main(parser.parse_args())
|
experiment_impact_tracker/operating_system/common.py | ANarayan/experiment-impact-tracker | 202 | 11166605 | <reponame>ANarayan/experiment-impact-tracker
from sys import platform
def is_linux(*args, **kwargs):
return platform == "linux" or platform == "linux2"
|
python/general-python/create-drive-times/create-drive-times.py | NagarjunaManupati/ESRI | 272 | 11166626 | <filename>python/general-python/create-drive-times/create-drive-times.py
"""
Importing concepts found at:
GitHub Developer Support
https://github.com/Esri/developer-support/tree/master/python/general-python/update-webmap-json
https://developers.arcgis.com/rest/analysis/api-reference/programmatically-accessing-analysis-services.htm
https://developers.arcgis.com/rest/analysis/api-reference/create-drivetime.htm
"""
import urllib
import urllib2
import json
import httplib
import time
import contextlib
import string
import smtplib
class ArcGISOnline(object):
def __init__(self, Username, Password):
self.username = Username
self.password = Password
self.__token = self.generateToken(self.username, self.password)['token']
self.__protocol = self.__useProtocol()
self.__orgInfo = self.__GetInfo()
self.__short = self.__orgInfo['urlKey']
self.__analysis_url = self.__orgInfo['helperServices']['analysis']['url']
def submit_request(self, request):
""" Returns the response from an HTTP request in json format."""
with contextlib.closing(urllib2.urlopen(request)) as response:
job_info = json.load(response)
return job_info
@staticmethod
def generateToken(username, password):
'''Generate a token using urllib modules for the input
username and password'''
url = "https://arcgis.com/sharing/generateToken"
data = {'username': username,
'password': password,
'referer' : 'https://arcgis.com',
'expires' : 1209600,
'f': 'json'}
data = urllib.urlencode(data)
request = urllib2.Request(url, data)
response = urllib2.urlopen(request)
return json.loads(response.read())
@property
def token(self):
'''Makes the non-public token read-only as a public token property'''
return self.__token
@property
def AnalysisURL(self):
'''Makes the non-public token read-only as a public token property'''
return self.__analysis_url
def __useProtocol(self):
tokenResponse = self.generateToken(self.username, self.password)
if tokenResponse['ssl']:
ssl = 'https'
else:
ssl = 'http'
return ssl
def __GetInfo(self):
'''Get information about the specified organization
this information includes the Short name of the organization (['urlKey'])
as well as the organization ID ['id']'''
URL= '{}://arcgis.com/sharing/rest/portals/self?f=json&token={}'.format(self.__protocol,self.__token)
request = urllib2.Request(URL)
response = urllib2.urlopen(request)
return json.loads(response.read())
def analysis_job(self, analysis_url, task, params):
""" Submits an Analysis job and returns the job URL for monitoring the job
status in addition to the json response data for the submitted job."""
# Unpack the Analysis job parameters as a dictionary and add token and
# formatting parameters to the dictionary. The dictionary is used in the
# HTTP POST request. Headers are also added as a dictionary to be included
# with the POST.
#
print("Submitting analysis job...")
params["f"] = "json"
params["token"] = self.__token
headers = {"Referer":"http://www.arcgis.com"}
task_url = "{}/{}".format(analysis_url, task)
submit_url = "{}/submitJob?".format(task_url)
request = urllib2.Request(submit_url, urllib.urlencode(params), headers)
analysis_response = self.submit_request(request)
if analysis_response:
# Print the response from submitting the Analysis job.
#
print(analysis_response)
return task_url, analysis_response
else:
raise Exception("Unable to submit analysis job.")
def analysis_job_status(self, task_url, job_info):
""" Tracks the status of the submitted Analysis job."""
if "jobId" in job_info:
# Get the id of the Analysis job to track the status.
#
job_id = job_info.get("jobId")
job_url = "{}/jobs/{}?f=json&token={}".format(task_url, job_id, self.__token)
request = urllib2.Request(job_url)
job_response = self.submit_request(request)
# Query and report the Analysis job status.
#
if "jobStatus" in job_response:
while not job_response.get("jobStatus") == "esriJobSucceeded":
time.sleep(5)
request = urllib2.Request(job_url)
job_response = self.submit_request(request)
print(job_response)
if job_response.get("jobStatus") == "esriJobFailed":
raise Exception("Job failed.")
elif job_response.get("jobStatus") == "esriJobCancelled":
raise Exception("Job cancelled.")
elif job_response.get("jobStatus") == "esriJobTimedOut":
raise Exception("Job timed out.")
if "results" in job_response:
return job_response
else:
raise Exception("No job results.")
else:
raise Exception("No job url.")
def analysis_job_results(self, task_url, job_info):
""" Use the job result json to get information about the feature service
created from the Analysis job."""
# Get the paramUrl to get information about the Analysis job results.
#
if "jobId" in job_info:
job_id = job_info.get("jobId")
if "results" in job_info:
results = job_info.get("results")
result_values = {}
for key in results.keys():
param_value = results[key]
if "paramUrl" in param_value:
param_url = param_value.get("paramUrl")
result_url = "{}/jobs/{}/{}?token={}&f=json".format(task_url,
job_id,
param_url,
self.__token)
request = urllib2.Request(result_url)
param_result = self.submit_request(request)
job_value = param_result.get("value")
result_values[key] = job_value
return result_values
else:
raise Exception("Unable to get analysis job results.")
else:
raise Exception("Unable to get analysis job results.")
def GetTravelModes(self, FORMOFTRAVEL):
url = "http://logistics.arcgis.com/arcgis/rest/services/World/Utilities/GPServer/GetTravelModes/execute?token={0}&f=p<PASSWORD>".format(self.__token)
request = urllib2.Request(url)
response = urllib2.urlopen(request)
responseJ = json.loads(response.read())
for mode in responseJ['results'][0]['value']['features']:
if mode['attributes']['Name'] == FORMOFTRAVEL:
return mode['attributes']['TravelMode']
def CreateDriveTimes(self, featureLayerURL, WHERE_CLAUSE, breakValues, breakUnits, overlapPolicy, OUTPUTNAME):
data = {}
data['inputLayer'] = {'url' : featureLayerURL,
'filter' : WHERE_CLAUSE
}
data['travelMode'] = self.GetTravelModes("Driving Time")
data['breakValues'] = breakValues
data['breakUnits'] = breakUnits
data['overlapPolicy'] = overlapPolicy
data['outputName'] = {"serviceProperties": {"name": OUTPUTNAME}}
task_url, job_info = self.analysis_job(self.__analysis_url, "CreateDriveTimeAreas", data)
job_info = self.analysis_job_status(task_url, job_info)
job_values = self.analysis_job_results(task_url, job_info)
return job_values
if __name__ == '__main__':
username = "thisIsAUserName"
password = "<PASSWORD>!"
onlineAccount = ArcGISOnline(username, password)
jobResults = onlineAccount.CreateDriveTimes("URLTOFEATURESERVICE", "OBJECTID = 4", [5.0, 10.0, 15.0], "Minutes", "Split", "ThisIsAnOutput")
print "DONE"
|
openfda/covid19serology/tests/pipeline_test.py | FDA/openfda | 388 | 11166661 | #!/usr/bin/env python
# coding=utf-8
import shutil
import tempfile
import unittest
from pickle import *
import leveldb
import openfda.ndc.pipeline
from openfda.ndc.pipeline import *
from openfda.covid19serology.pipeline import SerologyCSV2JSON
from openfda.tests.api_test_helpers import *
class SerologyPipelineTests(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
openfda.covid19serology.pipeline.SEROLOGY_TEST_SYNC_DIR = join(self.test_dir, 'covid19serology/s3_sync')
openfda.covid19serology.pipeline.SEROLOGY_TEST_JSON_DB_DIR = join(self.test_dir, 'covid19serology/json.db')
os.makedirs(openfda.covid19serology.pipeline.SEROLOGY_TEST_SYNC_DIR)
shutil.copyfile(
os.path.join(dirname(os.path.abspath(__file__)), "20200421_Euroimmun_SARS-COV-2_ELISA_(IgG)_results.csv"),
os.path.join(openfda.covid19serology.pipeline.SEROLOGY_TEST_SYNC_DIR, "input.csv"))
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_csv_to_json(self):
csv2json = SerologyCSV2JSON()
csv2json.run()
ldb = leveldb.LevelDB(os.path.join(csv2json.output().path, 'shard-00000-of-00001.db'))
data = list(ldb.RangeIter())
eq_(110, len(data))
# Verify base logic.
row = loads(data[0][1])
eq_('Euroimmun', row.get('manufacturer'))
eq_('SARS-COV-2 ELISA (IgG)', row.get('device'))
eq_('4/21/2020', row.get('date_performed'))
eq_('E200330DT', row.get('lot_number'))
eq_('Panel 1', row.get('panel'))
eq_('1', row.get('sample_no'))
eq_('C0001', row.get('sample_id'))
eq_('Serum', row.get('type'))
eq_('Negatives', row.get('group'))
eq_('NA', row.get('days_from_symptom'))
eq_('NA', row.get('igm_result'))
eq_('Negative', row.get('igg_result'))
eq_('NA', row.get('iga_result'))
eq_('NA', row.get('pan_result'))
eq_('NA', row.get('igm_igg_result'))
eq_('Pass', row.get('control'))
eq_('0', row.get('igm_titer'))
eq_('0', row.get('igg_titer'))
eq_('Negative', row.get('igm_truth'))
eq_('Negative', row.get('igg_truth'))
eq_('Negative', row.get('antibody_truth'))
eq_('NA', row.get('igm_agree'))
eq_('TN', row.get('igg_agree'))
eq_('NA', row.get('iga_agree'))
eq_('NA', row.get('pan_agree'))
eq_('NA', row.get('igm_igg_agree'))
eq_('TN', row.get('antibody_agree'))
# Verify some variations.
row = loads(data[55][1])
eq_('29', row.get('days_from_symptom'))
eq_('400', row.get('igm_titer'))
eq_('1600', row.get('igg_titer'))
def main(argv):
unittest.main(argv=argv)
if __name__ == '__main__':
unittest.main()
|
app/oauth_office365/helper.py | larrycameron80/PwnAuth | 304 | 11166663 | <filename>app/oauth_office365/helper.py
from .models import Victim, Application
from datetime import datetime
from requests_oauthlib import OAuth2Session
from django.utils.timezone import make_aware
from django.db.utils import IntegrityError
import copy
from collections import OrderedDict
import logging
logger = logging.getLogger('oauth.office365')
def get_authorization_url(app):
"""
Helper function to build an authorization URL to be used.
:return: A string representing the authorization URL
"""
microsoft = get_oauth_session()
authorization_url, state = microsoft.authorization_url(
app.authorization_url,
response_mode="form_post",
)
return authorization_url
def store_token(app, token):
"""
Takes a token dictionary returned by the Microsoft OAuth provider and stores it
in the database. Using the required 'user.read' scope it fetches some identifying information
to store with the token
:param app: The Application object
:param token: The token dictionary returned by the provider
:return:
"""
microsoft = get_oauth_session(token=token)
data = microsoft.get('https://graph.microsoft.com/v1.0/me').json()
victim = Victim(
name='Not given' if data['displayName'] is None else data['displayName'],
email=data['userPrincipalName'],
access_token=token['access_token'],
refresh_token=token['refresh_token'],
expires_at=make_aware(datetime.fromtimestamp(token['expires_at']))
)
try:
victim.save()
logger.info('Received token for user %s', victim.email, extra={'user': 'APP'})
except IntegrityError:
logger.info('Updated token for user %s', victim.email, extra={'user': 'APP'})
victim = Victim.objects.get(email=data['userPrincipalName'])
victim.refresh_token = token['refresh_token']
victim.access_token = token['access_token']
victim.expires_at = make_aware(datetime.fromtimestamp(token['expires_at']))
victim.save()
def make_token_updater(victim):
"""
Helper function to generate a victim aware token_updater function.
:param victim: the Victim object to save the updated token to
:return: token_updater(token) A Victim aware token updater that saves refreshed token to the given Victim
"""
def token_updater(token):
victim.access_token = token['access_token']
victim.refresh_token = token['refresh_token']
victim.expires_at = datetime.fromtimestamp(float(token['expires_at']))
victim.save()
return token_updater
def get_oauth_session(victim=None, token=None):
"""
Helper function to instantiate a OAuth2Session object configured to handle interaction
with the Azure v2 endpoint. Defines:
* Scope for the session from the Application scope
* Redirect URI
* Token URI
* Authorization URI
* Refresh URI
* Token if one exists
* Token updater
* Extra kwargs for token update
:return: An instance of OAuth2Session ready for use
"""
app = Application.objects.first()
if token:
token = token
token_updater = None
elif victim:
token = {
'access_token': victim.access_token,
'refresh_token': victim.refresh_token,
'expires_at': victim.expires_at.timestamp(),
'token_type': 'bearer'
}
token_updater = make_token_updater(victim)
else:
token = None
token_updater = None
extra = {
'redirect_uri': app.redirect_url,
'client_id': app.client_id,
'client_secret': app.client_secret
}
microsoft = OAuth2Session(
app.client_id,
token=token,
scope=app.scopes.split(','),
auto_refresh_kwargs=extra,
auto_refresh_url=app.token_url,
token_updater=token_updater,
redirect_uri=app.redirect_url,
)
return microsoft
def prune(obj, key):
# empty = True
if type(obj[key]) is dict or type(obj[key]) is OrderedDict:
empty = True
for k, v in copy.copy(obj[key]).items():
if type(v) is not list and type(v) is not dict and type(v) is not OrderedDict and v is not None:
empty = False
elif type(v) is list:
for i in range(0, len(v)):
empty = prune(v, i)
if len(v) == 0:
del obj[key][k]
else:
empty = prune(obj[key], k)
elif type(obj[key]) is list:
for i in range(0, len(obj[key])):
empty = prune(obj[key], i)
if len(obj[key]) == 0:
del obj[key]
else:
empty = False
else:
if obj[key] is not None:
empty = False
if empty:
del obj[key]
return empty
def refresh_token(app, victim):
"""
Takes a victim and refreshes their access token. Saves the resulting new token parameters
to the Victim object and returns the updated fields as well
:param app: The Application object representing the OAuth provider
:param victim: The Victim to be refreshed
:return: A Dictionary containing the updated token parameters
"""
microsoft = get_oauth_session(victim)
extra = {
'redirect_uri': app.redirect_url,
'client_id': app.client_id,
'client_secret': app.client_secret
}
refreshed_token = microsoft.refresh_token(
app.token_url,
**extra
)
victim.access_token = refreshed_token['access_token']
victim.refresh_token = refreshed_token['refresh_token']
victim.expires_at = datetime.fromtimestamp(float(refreshed_token['expires_at']))
victim.save()
logger.info('Refreshed token for user %s', victim.email, extra={'user': 'APP'})
return {
'access_token': refreshed_token['access_token'],
'refresh_token': refreshed_token['refresh_token'],
'expires_at': refreshed_token['expires_at']
}
|
tests/test_data/test_languages.py | ishine/wikipron | 111 | 11166673 | <gh_stars>100-1000
import json
import os
_REPO_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
)
_LANGUAGES = os.path.join(_REPO_DIR, "data/scrape/lib/languages.json")
def test_casefold_value():
"""Check if each language in data/scrape/lib/languages.json
has a value for 'casefold' key.
"""
missing_languages = set()
with open(_LANGUAGES, "r") as source:
languages = json.load(source)
for language in languages:
if languages[language]["casefold"] is None:
missing_languages.add(languages[language]["wiktionary_name"])
assert not missing_languages, (
"The following languages do not have a 'casefold' value "
"in data/scrape/lib/languages.json:"
f"{missing_languages}"
)
|
Scripts/dump-classes.py | kolinkrewinkel/Multiplex | 314 | 11166683 | #!/usr/bin/python
from subprocess import call
import glob
import os
import sys
import fileinput
import re
destination_path = 'Multiplex/Modules/IDEHeaders/IDEHeaders/'
def dump_all_frameworks():
# 3 different directories contain all of the frameworks a plugin may interface with.
# They're located at {APP_DIR}/Contents/
shared_frameworks = ['DVTFoundation', 'DVTKit']
frameworks = ['IDEFoundation', 'IDEKit']
other_frameworks = ['']
for framework in shared_frameworks:
dump_framework(frameworkPath('SharedFrameworks', framework), frameworkDumpDestination(framework))
for framework in frameworks:
dump_framework(frameworkPath('Frameworks', framework), frameworkDumpDestination(framework))
cleanup_dumped_files()
def frameworkPath(frameworkDir, frameworkName):
framework_root_directory = '/Applications/Xcode-beta.app/Contents/'
return framework_root_directory + frameworkDir + '/' + frameworkName + '.framework/' + frameworkName
def frameworkDumpDestination(frameworkName):
return destination_path + frameworkName
def dump_framework(path, destinationDir):
call(['class-dump', path, '-H', '-s', '-o', destinationDir])
def cleanup_dumped_files():
relative_paths = glob.glob(destination_path + '/*/*.h')
for relativePath in relative_paths:
absolute_path = os.path.abspath(relativePath)
cleanFile(absolute_path)
def cleanFile(filePath):
tempName = filePath + '.tmp'
inputFile = open(filePath)
outputFile = open(tempName, 'w')
fileContent = unicode(inputFile.read(), "utf-8")
# Remove Foundation imports
outText = re.sub('#import "NS(.*?).h"\n', '', fileContent)
# Remove .cxx_destructs
outText = re.sub('- \(void\).cxx_destruct;\n', '', outText)
# Fix delegate imports
outText = re.sub('.h"', '-Protocol.h"', outText)
# Add import for structs
outText = re.sub('//\n\n', '//\n\n#import "CDStructures.h"\n', outText)
# Change the unknown block type to a generic block that doesn't need an import
outText = re.sub('CDUnknownBlockType', 'dispatch_block_t', outText)
# Remove protocols from ivars as they're not supported
outText = re.sub('<(.*?)> (\*|)', ' ' + r"\2", outText)
outputFile.write((outText.encode("utf-8")))
outputFile.close()
inputFile.close()
os.rename(tempName, filePath)
dump_all_frameworks()
|
chainer_chemistry/dataset/preprocessors/gwm_preprocessor.py | pfnet/chainerchem | 184 | 11166694 | from chainer_chemistry.dataset.preprocessors.common import construct_supernode_feature # NOQA
from chainer_chemistry.dataset.preprocessors.ggnn_preprocessor import GGNNPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.gin_preprocessor import GINPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.nfp_preprocessor import NFPPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.rsgcn_preprocessor import RSGCNPreprocessor # NOQA
class NFPGWMPreprocessor(NFPPreprocessor):
def get_input_features(self, mol):
atom_array, adj_array = super(
NFPGWMPreprocessor, self).get_input_features(mol)
super_node_x = construct_supernode_feature(
mol, atom_array, adj_array)
return atom_array, adj_array, super_node_x
class GGNNGWMPreprocessor(GGNNPreprocessor):
def get_input_features(self, mol):
atom_array, adj_array = super(
GGNNGWMPreprocessor, self).get_input_features(mol)
super_node_x = construct_supernode_feature(
mol, atom_array, adj_array)
return atom_array, adj_array, super_node_x
class GINGWMPreprocessor(GINPreprocessor):
def get_input_features(self, mol):
atom_array, adj_array = super(
GINGWMPreprocessor, self).get_input_features(mol)
super_node_x = construct_supernode_feature(
mol, atom_array, adj_array)
return atom_array, adj_array, super_node_x
class RSGCNGWMPreprocessor(RSGCNPreprocessor):
def get_input_features(self, mol):
atom_array, adj_array = super(
RSGCNGWMPreprocessor, self).get_input_features(mol)
super_node_x = construct_supernode_feature(
mol, atom_array, adj_array)
return atom_array, adj_array, super_node_x
|
control_and_ai/function_approximation_rl/train_function_approximation_rl.py | julianxu/Rockets | 275 | 11166700 | """
Author: <NAME>
Date: 10/05/2017
Description: Scripts that train the Function Approximation RL networks.
"""
import _pickle
import logging
from control_and_ai.helpers import *
from control_and_ai.function_approximation_q_learning import *
from main_simulation import *
verbose = True
logger = logging.getLogger(__name__)
if verbose:
logging.basicConfig(format='%(asctime)s - %(message)s\t', datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.INFO)
logging.basicConfig(format='%(asctime)s - %(message)s\t', datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.ERROR)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
simulation_settings = {'Side Engines': True,
'Clouds': True,
'Vectorized Nozzle': True,
'Graph': False,
'Render': False,
'Starting Y-Pos Constant': 1,
'Initial Force': 'random',
'Rows': 1,
'Columns': 2,
#'Initial Coordinates': (0.8,0.5,0),
'Test': False,
'Episodes': 5000}
evo_strategy_parameters = {
'population_size': 100,
'action_size': 3,
'noise_standard_deviation': 0.1,
'number_of_generations': 1000,
'learning_rate': 0.00025,
'state_size': 8,
'max_num_actions': 250
}
env = []
for i in range(evo_strategy_parameters['population_size']+1):
env.append(RocketLander(simulation_settings))
def rocket_rl_function_approximation(env, settings : dict, logger, load_path=None, save_path=None, low_discretization=True):
if settings['Test']:
print("Testing rocket_rl_function_approximation with load_path = {0}, save_path = {1}".format(load_path, save_path))
else:
print("Training rocket_rl_function_approximation with load_path = {0}, save_path = {1}".format(load_path,
save_path))
env.reset()
s = env.get_state_with_barge_and_landing_coordinates(untransformed_state=False) # remove this line if normal state
reinforcedControl = FunctionApproximation(s, load=load_path, low_discretization=low_discretization, epsilon=0.001, alpha=0.001)
max_steps = 1000
steps = 0
def train():
episode = 1
done = False
for episode in range(settings['Episodes']):
s = env.reset()
steps = 0
while steps < max_steps:
a = reinforcedControl.act()
s, r, done, info = env.step(a)
s = env.get_state_with_barge_and_landing_coordinates(untransformed_state=False)
reinforcedControl.learn(s, r)
if episode % 50 == 0 or settings['Render']:
env.refresh(render=True)
if done:
logger.info('Episode:\t{0}\tReward:\t{1}'.format(episode, reinforcedControl.total_reward))
reinforcedControl.reset()
break
steps += 1
if episode % 50 == 0:
if save_path is not None:
logger.info('Saving Model at Episode:\t{0}'.format(episode))
_pickle.dump(reinforcedControl.log, open(save_path, "wb"))
def test():
episode = 0
while episode < settings['Episodes']:
a = reinforcedControl.act()
s, r, done, info = env.step(a)
if settings['Render']:
env.refresh(render=True)
logger.info('Episode:\t{0}\tReward:\t{1}'.format(episode, reinforcedControl.total_reward))
if done:
env.reset()
reinforcedControl.reset()
episode += 1
if isinstance(settings.get('Test'), bool):
if settings['Test']:
test()
else:
train()
else:
train()
def train_low_discretization_rl():
print("Training LOW Discretization RL-Function Approximator")
load_path = None#'rl_linear_function_approximation_low_function_discretization_squared_states_2000_episodes.p'
save_path = 'rl_linear_function_approximation_low_function_discretization_trained_at_once.p'
rocket_rl_function_approximation(env[0], settings=simulation_settings, logger=logger, load_path=load_path,
save_path=save_path, low_discretization=True)
def train_high_discretization_rl():
print("Training HIGH Discretization RL-Function Approximator")
load_path = 'rl_linear_function_approximation_high_function_discretization_5000_episodes_trained_at_once.p'
save_path = 'rl_linear_function_approximation_high_function_discretization_5000_episodes_trained_at_once.p'
rocket_rl_function_approximation(env[0], settings=simulation_settings, logger=logger, load_path=load_path,
save_path=save_path, low_discretization=False)
#train_low_discretization_rl()
train_high_discretization_rl()
|
bagel-data/config/tfclassif.py | AnneBeyer/tgen | 222 | 11166714 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
config = {'language': 'en',
'selector': '',
'use_tokens': True,
#'nn': '1-hot',
'nn': 'emb',
#'nn_shape': 'ff1',
'nn_shape': 'rnn',
'num_hidden_units': 128,
'passes': 100,
'min_passes': 20,
'randomize': True,
'batch_size': 20,
'alpha': 1e-3,
'emb_size': 50,
'max_tree_len': 50,
'validation_freq': 1,
}
|
train/datamodules/touch_detect.py | Pandinosaurus/PyTouch | 149 | 11166722 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import numpy as np
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Subset
from pytouch.datasets import DigitFolder
from pytouch.tasks import TouchDetect
_log = logging.getLogger(__name__)
class TouchDetectDataModule(LightningDataModule):
def __init__(
self,
cfg,
*args,
**kwargs,
):
super().__init__()
self.cfg = cfg
self.transform = TouchDetect.transform
def setup(self, stage=None):
train_dataset = DigitFolder(
root=self.cfg.data.path,
exclude=self.cfg.data.exclude,
baseline=None,
transform=self.transform(self.cfg.data.transform, train=True),
)
val_dataset = DigitFolder(
root=self.cfg.data.path,
exclude=self.cfg.data.exclude,
baseline=None,
transform=self.transform(self.cfg.data.transform, train=False),
)
self.dataset_len = len(train_dataset)
dataset_idx = list(range(self.dataset_len))
np.random.shuffle(dataset_idx)
split_train_val = int(
np.floor(self.cfg.training.train_val_ratio * self.dataset_len)
)
self.train_idx, self.val_idx = (
dataset_idx[:split_train_val],
dataset_idx[split_train_val:],
)
_log.info(
f"Total dataset size: {self.dataset_len}, train {len(self.train_idx)}, val {len(self.val_idx)}"
+ f" using sensors {set(train_dataset.serials)}"
)
self.train_dataset = Subset(train_dataset, self.train_idx)
self.val_dataset = Subset(val_dataset, self.val_idx)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.cfg.training.batch_size,
num_workers=self.cfg.training.n_threads,
pin_memory=self.cfg.training.pin_memory,
shuffle=True,
)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
batch_size=self.cfg.training.batch_size,
num_workers=self.cfg.training.n_threads,
pin_memory=self.cfg.training.pin_memory,
shuffle=False,
)
|
Chapter18/hw/libhw/servo.py | haohaoxiao/Deep-Reinforcement-Learning-Hands-On-Second-Edition | 621 | 11166741 | import pyb
class ServoBrain:
FREQ = 50 # 20ms -- standard pulse interval for servos
MIN_PERCENT = 2.3
MAX_PERCENT = 12.7
MIN_POS = 0
MAX_POS = 1
def __init__(self):
"""
Construct servo brain
"""
self._timer_channels = None
self._inversions = None
self._channels = None
self._positions = None
self._timers = None
def init(self, timer_channels, inversions=None):
"""
:param timer_channels: list of tuples (pin_name, (timer, channel))
:param inversions: list of bools specifying servos to be inverted, if None, no inversions
"""
self._timer_channels = timer_channels
self._inversions = inversions if inversions is not None else [False for _ in timer_channels]
self._timers = {}
self._channels = []
self._positions = []
for pin_name, (t_idx, ch_idx) in timer_channels:
if t_idx not in self._timers:
self._timers[t_idx] = pyb.Timer(t_idx, freq=self.FREQ)
pin = pyb.Pin(pin_name, pyb.Pin.OUT)
self._channels.append(self._timers[t_idx].channel(ch_idx, pyb.Timer.PWM, pin=pin))
self._positions.append(0.0)
self._apply_positions(self._positions)
def deinit(self):
if self._timers is None:
return
for t in self._timers.values():
t.deinit()
self._timer_channels = None
self._channels = None
self._positions = None
self._timers = None
@property
def positions(self):
return self._positions
@positions.setter
def positions(self, value):
self._positions = value
self._apply_positions(self._positions)
@classmethod
def position_to_percent(cls, pos):
return (pos-cls.MIN_POS)*(cls.MAX_PERCENT - cls.MIN_PERCENT)/(cls.MAX_POS - cls.MIN_POS) + cls.MIN_PERCENT
def _apply_positions(self, values):
for p, ch, inv in zip(values, self._channels, self._inversions):
if inv:
p = self.MAX_POS - p
ch.pulse_width_percent(self.position_to_percent(p))
_PINS_TO_TIMER = {
"B6": (4, 1),
"B7": (4, 2),
"B8": (4, 3),
"B9": (4, 4),
"B10": (2, 3),
"B11": (2, 4),
}
def pins_to_timer_channels(pins):
"""
Convert list of pins to list of tuples (timer, channel). This function is hardware-specific
:param pins: list of pin names
:return: list of (pin_name, (timer, channel)) tuples
"""
res = []
for p in pins:
pair = _PINS_TO_TIMER.get(p)
assert pair is not None
res.append((p, pair))
return res
class ServoBrainOld:
"""
Shouldn't be used, kept only for demonstration purposes
"""
MIN_POS = -10
MAX_POS = 10
NEUT_POS = 0
_MIN_TIME_US = 500
_MAX_TIME_US = 2500
_NEUTRAL_TIME_US = 1500
_INTERVAL_US = 10000
_BASE_FREQ = 1000000
def __init__(self, servo_pins, base_timer_index=1):
if isinstance(servo_pins, str):
servo_pins = (servo_pins, )
self.servo_pins = servo_pins
self.base_timer_index = base_timer_index
self._base_timer = pyb.Timer(base_timer_index)
self._pins = []
self._pin_timers = []
self._positions = []
self._frequencies = []
self._pin_callbacks = []
@property
def positions(self):
return self._positions
@positions.setter
def positions(self, new_pos):
self._positions = new_pos
self._frequencies = [self.pos_to_freq(p) for p in new_pos]
@classmethod
def pos_to_freq(cls, pos):
if pos < cls.MIN_POS:
pos = cls.MIN_POS
elif pos > cls.MAX_POS:
pos = cls.MAX_POS
return cls._BASE_FREQ // (cls._MIN_TIME_US + (pos + 10) * 100)
def init(self):
self.positions = [self.NEUT_POS for _ in self.servo_pins]
for idx, pin in enumerate(self.servo_pins):
self._pin_timers.append(pyb.Timer(idx + self.base_timer_index + 1))
p = pyb.Pin(pin, pyb.Pin.OUT)
p.init(pyb.Pin.OUT, pyb.Pin.PULL_UP)
p.low()
self._pins.append(p)
self._pin_callbacks.append(self._get_pin_callback(idx))
self._base_timer.init(freq=self._BASE_FREQ//self._INTERVAL_US)
self._base_timer.callback(lambda t: self._base_callback())
def deinit(self):
self._base_timer.deinit()
self._pins.clear()
for t in self._pin_timers:
t.deinit()
self._pin_timers.clear()
self._positions.clear()
self._pin_callbacks.clear()
def _get_pin_callback(self, idx):
def func(t):
if not self._pins[idx].value():
self._pins[idx].high()
else:
self._pins[idx].low()
t.deinit()
return func
def _base_callback(self):
for idx in range(len(self.servo_pins)):
self._pin_timers[idx].callback(self._pin_callbacks[idx])
self._pin_timers[idx].init(freq=self._frequencies[idx])
|
deprecated/benchmark/ps/ctr/infer_args.py | hutuxian/FleetX | 170 | 11166748 | <reponame>hutuxian/FleetX
import argparse
import logging
def parse_args():
parser = argparse.ArgumentParser(description="PaddlePaddle DeepFM example")
parser.add_argument(
'--model_path',
type=str,
required=True,
help="The path of model parameters gz file")
parser.add_argument(
'--data_path',
type=str,
required=True,
help="The path of the dataset to infer")
parser.add_argument(
'--embedding_size',
type=int,
default=10,
help="The size for embedding layer (default:10)")
parser.add_argument(
'--sparse_feature_dim',
type=int,
default=1000001,
help="The size for embedding layer (default:1000001)")
parser.add_argument(
'--batch_size',
type=int,
default=1000,
help="The size of mini-batch (default:1000)")
return parser.parse_args()
|
kaldi/patch/steps/nnet3/train_cvector_dnn.py | ishine/asv-subtools | 370 | 11166750 | #!/usr/bin/env python
# Copyright 2016 <NAME>.
# 2016 <NAME>
# 2017 Johns Hopkins University (author: <NAME>)
# 2018 <NAME>
# Apache 2.0.
""" This script is based on steps/nnet3/tdnn/train.sh
"""
from __future__ import print_function
import argparse
import logging
import os
import pprint
import shutil
import sys
import traceback
sys.path.insert(0, 'steps')
import libs.nnet3.train.common as common_train_lib
import libs.common as common_lib
import libs.nnet3.train.frame_level_objf as train_lib
import libs.nnet3.report.log_parse as nnet3_log_parse
logger = logging.getLogger('libs')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s [%(pathname)s:%(lineno)s - "
"%(funcName)s - %(levelname)s ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('Starting DNN trainer (train_dnn.py)')
def get_args():
""" Get args from stdin.
We add compulsory arguments as named arguments for readability
The common options are defined in the object
libs.nnet3.train.common.CommonParser.parser.
See steps/libs/nnet3/train/common.py
"""
parser = argparse.ArgumentParser(
description="""Trains a feed forward DNN acoustic model using the
cross-entropy objective. DNNs include simple DNNs, TDNNs and CNNs.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve',
parents=[common_train_lib.CommonParser(include_chunk_context=False).parser])
# Parameters for the optimization
parser.add_argument("--trainer.optimization.minibatch-size",
type=str, dest='minibatch_size', default='512',
help="""Size of the minibatch used in SGD training
(argument to nnet3-merge-egs); may be a more general
rule as accepted by the --minibatch-size option of
nnet3-merge-egs; run that program without args to see
the format.""")
parser.add_argument("--trainer.num-jobs-compute-prior", type=int,
dest='num_jobs_compute_prior', default=10,
help="The prior computation jobs are single "
"threaded and run on the CPU")
# General options
parser.add_argument("--am-output-name", type=str, required=True,
help="The name of am output-node")
parser.add_argument("--xvec-output-name", type=str, required=True,
help="The name of xvec output-node")
parser.add_argument("--am-weight", type=float, default=1.0,
help="The am weight")
parser.add_argument("--xvec-weight", type=float, default=1.0,
help="The xvec weight")
parser.add_argument("--am-egs-dir", type=str, required=True,
help="Directory with am egs for training")
parser.add_argument("--xvec-egs-dir", type=str, required=True,
help="Directory with xvector egs for training")
parser.add_argument("--dir", type=str, required=True,
help="Directory to store the models and "
"all other files.")
print(' '.join(sys.argv), file=sys.stderr)
print(sys.argv, file=sys.stderr)
args = parser.parse_args()
[args, run_opts] = process_args(args)
return [args, run_opts]
def process_args(args):
""" Process the options got from get_args()
"""
if (not os.path.exists(args.dir)
or not os.path.exists(args.dir+"/configs")):
raise Exception("This scripts expects {0} to exist and have a configs "
"directory which is the output of "
"make_configs.py script")
# set the options corresponding to args.use_gpu
run_opts = common_train_lib.RunOpts()
if args.use_gpu:
if not common_lib.check_if_cuda_compiled():
logger.warning(
"""You are running with one thread but you have not compiled
for CUDA. You may be running a setup optimized for GPUs.
If you have GPUs and have nvcc installed, go to src/ and do
./configure; make""")
run_opts.train_queue_opt = "--gpu 1"
run_opts.parallel_train_opts = ""
run_opts.combine_queue_opt = "--gpu 1"
run_opts.prior_gpu_opt = "--use-gpu=yes"
run_opts.prior_queue_opt = "--gpu 1"
else:
logger.warning("Without using a GPU this will be very slow. "
"nnet3 does not yet support multiple threads.")
run_opts.train_queue_opt = ""
run_opts.parallel_train_opts = "--use-gpu=no"
run_opts.combine_queue_opt = ""
run_opts.prior_gpu_opt = "--use-gpu=no"
run_opts.prior_queue_opt = ""
run_opts.command = args.command
run_opts.egs_command = (args.egs_command
if args.egs_command is not None else
args.command)
run_opts.num_jobs_compute_prior = args.num_jobs_compute_prior
return [args, run_opts]
def train(args, run_opts):
""" The main function for training.
Args:
args: a Namespace object with the required parameters
obtained from the function process_args()
run_opts: RunOpts object obtained from the process_args()
"""
arg_string = pprint.pformat(vars(args))
logger.info("Arguments for the experiment\n{0}".format(arg_string))
# Set some variables.
config_dir = '{0}/configs'.format(args.dir)
am_var_file = '{0}/vars_am'.format(config_dir)
xvec_var_file = '{0}/vars_xvec'.format(config_dir)
am_variables = common_train_lib.parse_generic_config_vars_file(am_var_file)
xvec_variables = common_train_lib.parse_generic_config_vars_file(xvec_var_file)
# Set some variables.
try:
am_model_left_context = am_variables['model_left_context']
am_model_right_context = am_variables['model_right_context']
xvec_model_left_context = xvec_variables['model_left_context']
xvec_model_right_context = xvec_variables['model_right_context']
except KeyError as e:
raise Exception("KeyError {0}: Variables need to be defined in "
"{1}".format(str(e), '{0}/configs'.format(args.dir)))
am_left_context = am_model_left_context
am_right_context = am_model_right_context
xvec_left_context = xvec_model_left_context
xvec_right_context = xvec_model_right_context
# Initialize as "raw" nnet, prior to training the LDA-like preconditioning
# matrix. This first config just does any initial splicing that we do;
# we do this as it's a convenient way to get the stats for the 'lda-like'
# transform.
if (args.stage <= -5) and os.path.exists(args.dir+"/configs/init.config"):
logger.info("Initializing a basic network for estimating "
"preconditioning matrix")
common_lib.execute_command(
"""{command} {dir}/log/nnet_init.log \
nnet3-init --srand=-2 {dir}/configs/init.config \
{dir}/init.raw""".format(command=run_opts.command,
dir=args.dir))
am_egs_dir = args.am_egs_dir
xvec_egs_dir = args.xvec_egs_dir
am_output_name = args.am_output_name
xvec_output_name = args.xvec_output_name
am_weight = args.am_weight
xvec_weight = args.xvec_weight
feat_dim = int(common_lib.get_command_stdout("cat {0}/info/feat_dim".format(am_egs_dir)))
num_archives = int(common_lib.get_command_stdout("cat {0}/info/num_archives".format(am_egs_dir)))
tmp_feat_dim = int(common_lib.get_command_stdout("cat {0}/info/feat_dim".format(xvec_egs_dir)))
tmp_num_archives = int(common_lib.get_command_stdout("cat {0}/info/num_archives".format(xvec_egs_dir)))
# frames_per_eg is no longer a parameter but load from am_egs/info/frames_per_eg
am_frames_per_eg = int(common_lib.get_command_stdout("cat {0}/info/frames_per_eg".format(am_egs_dir)))
if feat_dim != tmp_feat_dim or num_archives*am_frames_per_eg != tmp_num_archives:
raise Exception('The am egs and xvec egs do not match')
if args.num_jobs_final > num_archives:
raise Exception('num_jobs_final cannot exceed the number of archives '
'in the egs directory')
# # No need to copy files for decoding
# common_train_lib.copy_egs_properties_to_exp_dir(am_egs_dir, args.dir)
if args.stage <= -3 and os.path.exists(args.dir+"/configs/init.config"):
logger.info('Computing the preconditioning matrix for input features')
train_lib.common.compute_preconditioning_matrix(
args.dir, egs_dir, num_archives, run_opts,
max_lda_jobs=args.max_lda_jobs,
rand_prune=args.rand_prune)
if args.stage <= -1:
logger.info("Preparing the initial network.")
common_train_lib.prepare_initial_network(args.dir, run_opts)
# set num_iters so that as close as possible, we process the data
# $num_epochs times, i.e. $num_iters*$avg_num_jobs) ==
# $num_epochs*$num_archives, where
# avg_num_jobs=(num_jobs_initial+num_jobs_final)/2.
num_archives_expanded = num_archives * am_frames_per_eg
num_archives_to_process = int(args.num_epochs * num_archives_expanded)
num_archives_processed = 0
num_iters = ((num_archives_to_process * 2)
/ (args.num_jobs_initial + args.num_jobs_final))
# If do_final_combination is True, compute the set of models_to_combine.
# Otherwise, models_to_combine will be none.
if args.do_final_combination:
models_to_combine = common_train_lib.get_model_combine_iters(
num_iters, args.num_epochs,
num_archives_expanded, args.max_models_combine,
args.num_jobs_final)
else:
models_to_combine = None
logger.info("Training will run for {0} epochs = "
"{1} iterations".format(args.num_epochs, num_iters))
for iter in range(num_iters):
if (args.exit_stage is not None) and (iter == args.exit_stage):
logger.info("Exiting early due to --exit-stage {0}".format(iter))
return
current_num_jobs = int(0.5 + args.num_jobs_initial
+ (args.num_jobs_final - args.num_jobs_initial)
* float(iter) / num_iters)
if args.stage <= iter:
lrate = common_train_lib.get_learning_rate(iter, current_num_jobs,
num_iters,
num_archives_processed,
num_archives_to_process,
args.initial_effective_lrate,
args.final_effective_lrate)
shrinkage_value = 1.0 - (args.proportional_shrink * lrate)
if shrinkage_value <= 0.5:
raise Exception("proportional-shrink={0} is too large, it gives "
"shrink-value={1}".format(args.proportional_shrink,
shrinkage_value))
percent = num_archives_processed * 100.0 / num_archives_to_process
epoch = (num_archives_processed * args.num_epochs
/ num_archives_to_process)
shrink_info_str = ''
if shrinkage_value != 1.0:
shrink_info_str = 'shrink: {0:0.5f}'.format(shrinkage_value)
logger.info("Iter: {0}/{1} "
"Epoch: {2:0.2f}/{3:0.1f} ({4:0.1f}% complete) "
"lr: {5:0.6f} {6}".format(iter, num_iters - 1,
epoch, args.num_epochs,
percent,
lrate, shrink_info_str))
train_lib.common.train_cvector_one_iteration(
dir=args.dir,
iter=iter,
srand=args.srand,
am_output_name=am_output_name,
am_weight=am_weight,
am_egs_dir=am_egs_dir,
xvec_output_name=xvec_output_name,
xvec_weight=xvec_weight,
xvec_egs_dir=xvec_egs_dir,
num_jobs=current_num_jobs,
num_archives_processed=num_archives_processed,
num_archives=num_archives,
learning_rate=lrate,
minibatch_size_str=args.minibatch_size,
momentum=args.momentum,
max_param_change=args.max_param_change,
shuffle_buffer_size=args.shuffle_buffer_size,
run_opts=run_opts,
am_frames_per_eg=am_frames_per_eg,
dropout_edit_string=common_train_lib.get_dropout_edit_string(
args.dropout_schedule,
float(num_archives_processed) / num_archives_to_process,
iter),
shrinkage_value=shrinkage_value,
get_raw_nnet_from_am=False,
backstitch_training_scale=args.backstitch_training_scale,
backstitch_training_interval=args.backstitch_training_interval)
if args.cleanup:
# do a clean up everythin but the last 2 models, under certain
# conditions
common_train_lib.remove_model(
args.dir, iter-2, num_iters, models_to_combine,
args.preserve_model_interval,
get_raw_nnet_from_am=False)
if args.email is not None:
reporting_iter_interval = num_iters * args.reporting_interval
if iter % reporting_iter_interval == 0:
# lets do some reporting
[report, times, data] = (
nnet3_log_parse.generate_acc_logprob_report(args.dir))
message = report
subject = ("Update : Expt {dir} : "
"Iter {iter}".format(dir=args.dir, iter=iter))
common_lib.send_mail(message, subject, args.email)
num_archives_processed = num_archives_processed + current_num_jobs
# when we do final combination, just use the xvector egs
if args.stage <= num_iters:
if args.do_final_combination:
logger.info("Doing final combination to produce final.mdl")
train_lib.common.combine_models(
dir=args.dir, num_iters=num_iters,
models_to_combine=models_to_combine,
egs_dir=xvec_egs_dir,
minibatch_size_str="64", run_opts=run_opts,
get_raw_nnet_from_am=False,
max_objective_evaluations=args.max_objective_evaluations,
use_multitask_egs=True)
# sum_to_one_penalty=args.combine_sum_to_one_penalty,
else:
common_lib.force_symlink("{0}.raw".format(num_iters),
"{0}/final.raw".format(args.dir))
if args.cleanup:
logger.info("Cleaning up the experiment directory "
"{0}".format(args.dir))
remove_egs = False
common_train_lib.clean_nnet_dir(
nnet_dir=args.dir, num_iters=num_iters, egs_dir=am_egs_dir,
preserve_model_interval=args.preserve_model_interval,
remove_egs=remove_egs,
get_raw_nnet_from_am=False)
# TODO: we may trace other output nodes expect for "output"
# do some reporting
outputs_list = common_train_lib.get_outputs_list("{0}/final.raw".format(
args.dir), get_raw_nnet_from_am=False)
if 'output' in outputs_list:
[report, times, data] = nnet3_log_parse.generate_acc_logprob_report(args.dir)
if args.email is not None:
common_lib.send_mail(report, "Update : Expt {0} : "
"complete".format(args.dir),
args.email)
with open("{dir}/accuracy.{output_name}.report".format(dir=args.dir,
output_name="output"),
"w") as f:
f.write(report)
common_lib.execute_command("steps/info/nnet3_dir_info.pl "
"{0}".format(args.dir))
def main():
[args, run_opts] = get_args()
try:
train(args, run_opts)
common_lib.wait_for_background_commands()
except BaseException as e:
# look for BaseException so we catch KeyboardInterrupt, which is
# what we get when a background thread dies.
if args.email is not None:
message = ("Training session for experiment {dir} "
"died due to an error.".format(dir=args.dir))
common_lib.send_mail(message, message, args.email)
if not isinstance(e, KeyboardInterrupt):
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
main()
|
scripts/ltr_msmarco-passage/convert_common.py | keleog/pyserini | 451 | 11166755 | <reponame>keleog/pyserini<filename>scripts/ltr_msmarco-passage/convert_common.py
#
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import spacy
"""
This file provides helpers to convert passage and queries
"""
def read_stopwords(fileName='stopwords.txt', lower_case=True):
"""Reads a list of stopwords from a file. By default the words
are read from a standard repo location and are lower_cased.
:param fileName a stopword file name
:param lower_case a boolean flag indicating if lowercasing is needed.
:return a list of stopwords
"""
stopwords = set()
with open(fileName) as f:
for w in f:
w = w.strip()
if w:
if lower_case:
w = w.lower()
stopwords.add(w)
return stopwords
def is_alpha_num(s):
return s and (re.match("^[a-zA-Z-_.0-9]+$", s) is not None)
class SpacyTextParser:
def __init__(self, model_name, stopwords,
remove_punct=True,
sent_split=False,
keep_only_alpha_num=False,
lower_case=True,
enable_POS=True):
"""Constructor.
:param model_name a name of the spacy model to use, e.g., en_core_web_sm
:param stopwords a list of stop words to be excluded (case insensitive);
a token is also excluded when its lemma is in the stop word list.
:param remove_punct a bool flag indicating if the punctuation tokens need to be removed
:param sent_split a bool flag indicating if sentence splitting is necessary
:param keep_only_alpha_num a bool flag indicating if we need to keep only alpha-numeric characters
:param enable_POS a bool flag that enables POS tagging (which, e.g., can improve lemmatization)
"""
disable_list = ['ner', 'parser']
if not enable_POS:
disable_list.append('tagger')
print('Disabled Spacy components: ', disable_list)
self._nlp = spacy.load(model_name, disable=disable_list)
if sent_split:
sentencizer = self._nlp.create_pipe("sentencizer")
self._nlp.add_pipe(sentencizer)
self._remove_punct = remove_punct
self._stopwords = frozenset([w.lower() for w in stopwords])
self._keep_only_alpha_num = keep_only_alpha_num
self._lower_case = lower_case
@staticmethod
def _basic_clean(text):
return text.replace("’", "'")
def __call__(self, text):
"""A thin wrapper that merely calls spacy.
:param text input text string
:return a spacy Doc object
"""
return self._nlp(SpacyTextParser._basic_clean(text))
def proc_text(self, text):
"""Process text, remove stopwords and obtain lemmas, but does not split into sentences.
This function should not emit newlines!
:param text input text string
:return a tuple (lemmatized text, original-form text). Text is white-space separated.
"""
lemmas = []
tokens = []
doc = self(text)
for tokObj in doc:
if self._remove_punct and tokObj.is_punct:
continue
lemma = tokObj.lemma_
text = tokObj.text
if self._keep_only_alpha_num and not is_alpha_num(text):
continue
tok1 = text.lower()
tok2 = lemma.lower()
if tok1 in self._stopwords or tok2 in self._stopwords:
continue
if self._lower_case:
text = text.lower()
lemma = lemma.lower()
lemmas.append(lemma)
tokens.append(text)
return ' '.join(lemmas), ' '.join(tokens)
def get_retokenized(tokenizer, text):
"""Obtain a space separated re-tokenized text.
:param tokenizer: a tokenizer that has the function
tokenize that returns an array of tokens.
:param text: a text to re-tokenize.
"""
return ' '.join(tokenizer.tokenize(text))
def add_retokenized_field(data_entry,
src_field,
dst_field,
tokenizer):
"""
Create a re-tokenized field from an existing one.
:param data_entry: a dictionary of entries (keys are field names, values are text items)
:param src_field: a source field
:param dst_field: a target field
:param tokenizer: a tokenizer to use, if None, nothing is done
"""
if tokenizer is not None:
dst = ''
if src_field in data_entry:
dst = get_retokenized(tokenizer, data_entry[src_field])
data_entry[dst_field] = dst |
py_zipkin/exception.py | arthurlogilab/py_zipkin | 225 | 11166756 | <reponame>arthurlogilab/py_zipkin
# -*- coding: utf-8 -*-
class ZipkinError(Exception):
"""Custom error to be raised on Zipkin exceptions."""
|
Testing/dataloader.py | dani3l125/TDNet | 195 | 11166765 | import os
import torch
import numpy as np
import imageio
import cv2
import pdb
def recursive_glob(rootdir=".", suffix=""):
return [
os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames
if filename.endswith(suffix)]
class cityscapesLoader():
colors = [ # [ 0, 0, 0],
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[0, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
]
label_colours = dict(zip(range(19), colors))
def __init__(self,img_path,in_size):
self.img_path = img_path
self.n_classes = 19
self.files = recursive_glob(rootdir=self.img_path, suffix=".png")
self.files.sort()
self.files_num = len(self.files)
self.data = []
self.size = (in_size[1],in_size[0])
self.mean = np.array([.485, .456, .406])
self.std = np.array([.229, .224, .225])
def load_frames(self):
for idx in range(self.files_num):
img_path = self.files[idx].rstrip()
img_name = img_path.split('/')[-1]
folder = img_path.split('/')[-2]
#img = cv2.imread(img_path).astype(np.float32)
img = imageio.imread(img_path)
ori_size = img.shape[:-1]
img = cv2.resize(img,self.size)/255.0
img = (img-self.mean)/self.std
img = img.transpose(2, 0, 1)
img = img[np.newaxis,:]
img = torch.from_numpy(img).float()
self.data.append([img,img_name,folder,self.size])
def decode_segmap(self, temp):
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, self.n_classes):
r[temp == l] = self.label_colours[l][0]
g[temp == l] = self.label_colours[l][1]
b[temp == l] = self.label_colours[l][2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:, :, 0] = r #/ 255.0
rgb[:, :, 1] = g #/ 255.0
rgb[:, :, 2] = b #/ 255.0
return rgb
|
src/pretalx/common/management/commands/makemessages.py | lili668668/pretalx | 418 | 11166775 | <reponame>lili668668/pretalx
"""This command supersedes the Django-inbuilt makemessages command.
We do this to allow the easy management of translations by way of plugins.
The way GNU gettext handles path precedence, it will always create new
translation files for given languages in the pretalx root locales directory
instead of updating the already existing plugin locale directory.
This management command copies all plugin-provided languages to the core
locales directory, then moves them back once the translations have been
generated and cleans up empty directories.
Yes, it's hacky, but have you tried managing symlinks instead?
"""
import shutil
from importlib import import_module
from pathlib import Path
from django.conf import settings
from django.core.management.commands.makemessages import Command as Parent
from pretalx.common.signals import register_locales
class Command(Parent):
def handle(self, *args, **options):
locales = {}
for receiver, response in register_locales.send(sender=None):
module = import_module(receiver.__module__.split(".")[0])
path = Path(module.__path__[0])
for locale in response:
if "-" in locale:
locale_parts = locale.split("-")
locale = (
locale_parts[0]
+ "_"
+ "_".join(part.capitalize() for part in locale_parts[1:])
)
locales[locale] = path
if not locales:
super().handle(*args, **options)
locale_path = Path(settings.LOCALE_PATHS[0])
moves = []
for locale, path in locales.items():
translation_path = path / "locale" / locale
translation_file = translation_path / "LC_MESSAGES/django.po"
new_dir = locale_path / locale
moves.append((translation_path, new_dir))
if not translation_file.exists():
print(f"{translation_file} does not exist, regenerating.")
continue
if new_dir.exists():
shutil.rmtree(new_dir)
translation_path.replace(new_dir)
super().handle(*args, **options)
for move in moves:
move[1].replace(move[0])
|
fastseq/optimizer/fairseq/generate.py | nttcs-ds/fastseq | 346 | 11166790 | <filename>fastseq/optimizer/fairseq/generate.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Optimize fairseq-generate (v0.10.2)"""
import ast
import logging
import math
import os
import sys
from itertools import chain
from multiprocessing import Queue, JoinableQueue
from torch.multiprocessing import Process
import numpy as np
import torch
from fairseq_cli.generate import main
from fairseq.utils import apply_to_sample
from fairseq import scoring, checkpoint_utils, tasks, utils
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fastseq.utils.api_decorator import replace
from fairseq.options import add_generation_args
GENERATE_FINISHED = "done"
POSTPROCESS_FINISHED = None
original_add_generation_args = add_generation_args
@replace(add_generation_args)
def add_generation_args_v2(parser):
group = original_add_generation_args(parser)
# fmt: off
group.add_argument(
'--postprocess-workers',
default=1,
type=int,
choices=range(1, 128, 1),
metavar='N',
help='number of worker for post process')
group.add_argument(
'--decode-hypothesis',
action="store_true",
default=True)
group.add_argument(
'--use-el-attn',
action='store_true',
help='Use Efficient Lossless Attention optimization ? ')
# fmt: on
def move_to_cpu(sample):
def _move_to_cpu(tensor):
# PyTorch has poor support for half tensors (float16) on CPU.
# Move any such tensors to float32.
if tensor.dtype in {torch.bfloat16, torch.float16}:
return tensor.cpu().to(dtype=torch.float32)
else:
return tensor.cpu()
return apply_to_sample(_move_to_cpu, sample)
def convert_base_e_to_base_2(t):
return t / math.log(2)
class IOProcess(Process):
"""
Single process to handle IO and compute metrics
"""
def __init__(self, args, task, message_queue, output_file):
"""
Process to handle IO and compute metrics
Args:
args (Namespace): paramerter for model and generation
task (fairseq.tasks.fairseq_task.Fairseq):
use to load dict for detokenize
message_queue (multiprocessing.Queue): queue store output
"""
super(IOProcess, self).__init__()
self.tgt_dict = task.target_dictionary
# Generate and compute BLEU score
self.scorer = scoring.build_scorer(args, self.tgt_dict)
self.args = args
self.message_queue = message_queue
self.has_target = False
self.output_file = output_file
def run(self):
while True:
msg = self.message_queue.get()
if isinstance(msg, tuple):
t, h = msg
if hasattr(self.scorer, 'add_string'):
self.scorer.add_string(t, h)
else:
self.scorer.add(t, h)
self.has_target = True
elif msg == GENERATE_FINISHED:
if self.has_target:
if self.args.bpe and not self.args.sacrebleu:
if self.args.remove_bpe:
print("BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization")
else:
print("If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization")
print("Generate {} with beam={}: {}".format(
self.args.gen_subset, self.args.beam, self.scorer.result_string()),
file=self.output_file,)
break
else:
print(msg, file = self.output_file)
self.message_queue.task_done()
self.message_queue.close()
self.message_queue.join_thread()
class PostProcess(Process):
"""
Use multiple processes to do detokenization
"""
def __init__(self, args, task, data_queue, message_queue, generator):
"""
Handle detokenize and belu score computation
Args:
args (Namespace): paramerter for model and generation
task (fairseq.tasks.fairseq_task.Fairseq):
use to load dict for detokenize
data_queue (multiprocessing.Queue):
queue store tensor data for detokenize
message_queue (multiprocessing.Queue): queue store output
"""
super(PostProcess, self).__init__()
# Set dictionaries
try:
self.src_dict = getattr(task, 'source_dictionary', None)
except NotImplementedError:
self.src_dict = None
self.tgt_dict = task.target_dictionary
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
self.align_dict = utils.load_align_dict(args.replace_unk)
# Generate and compute BLEU score
self.scorer = scoring.build_scorer(args, self.tgt_dict)
self.args = args
self.task = task
self.data_queue = data_queue
self.message_queue = message_queue
self.generator = generator
if args.decode_hypothesis:
self.tokenizer = task.build_tokenizer(args)
self.bpe = task.build_bpe(args)
def _decode(self, x):
if self.bpe is not None:
x = self.bpe.decode(x)
if self.tokenizer is not None:
x = self.tokenizer.decode(x)
return x
def _get_symbols_to_strip_from_output(self, generator):
if hasattr(generator, "symbols_to_strip_from_output"):
return generator.symbols_to_strip_from_output
else:
return {generator.eos}
def _detokenize(self, sample, hypos):
"""
Detokenize and compute BELU
"""
message_list = []
for i, sample_id in enumerate(sample['id'].tolist()):
has_target = sample['target'] is not None
# Remove padding
if "src_tokens" in sample["net_input"]:
src_tokens = utils.strip_pad(
sample["net_input"]["src_tokens"][i, :], self.tgt_dict.pad()
)
else:
src_tokens = None
target_tokens = None
if has_target:
target_tokens = (
utils.strip_pad(sample["target"][i, :], self.tgt_dict.pad()).int().cpu()
)
# Either retrieve the original sentences or regenerate them from tokens
if self.align_dict is not None:
src_str = self.task.dataset(
self.args.gen_subset).src.get_original_text(sample_id)
target_str = self.task.dataset(
self.args.gen_subset).tgt.get_original_text(sample_id)
else:
if self.src_dict is not None:
src_str = self.src_dict.string(src_tokens,
self.args.remove_bpe)
else:
src_str = ""
if has_target:
target_str = self.tgt_dict.string(
target_tokens,
self.args.remove_bpe,
escape_unk = True,
extra_symbols_to_ignore = self._get_symbols_to_strip_from_output(self.generator),
)
if not self.args.quiet:
if self.src_dict is not None:
if self.args.decode_hypothesis:
message_list.append('S-{}\t{}'.format(
sample_id, self._decode(src_str)))
else:
message_list.append('S-{}\t{}'.format(
sample_id, src_str))
if has_target:
if self.args.decode_hypothesis:
message_list.append('T-{}\t{}'.format(
sample_id, self._decode(target_str)))
else:
message_list.append('T-{}\t{}'.format(
sample_id, target_str))
# Process top predictions
for j, hypo in enumerate(hypos[i][:self.args.nbest]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens = hypo['tokens'].int(),
src_str = src_str,
alignment = hypo['alignment'],
align_dict = self.align_dict,
tgt_dict = self.tgt_dict,
remove_bpe = self.args.remove_bpe,
extra_symbols_to_ignore = self._get_symbols_to_strip_from_output(self.generator),
)
if not self.args.quiet:
score = convert_base_e_to_base_2(hypo["score"])
message_list.append('H-{}\t{}\t{}'.format(
sample_id, score, hypo_str))
if self.args.decode_hypothesis:
detok_hypo_str = self._decode(hypo_str)
message_list.append('D-{}\t{}\t{}'.format(
sample_id, score, detok_hypo_str))
message_list.append('P-{}\t{}'.format(
sample_id, ' '.join(
map(
lambda x: '{:.4f}'.format(x),
convert_base_e_to_base_2(hypo['positional_scores']).tolist(),
))))
if self.args.print_alignment:
message_list.append('A-{}\t{}'.format(
sample_id, ' '.join([
'{}-{}'.format(src_idx, tgt_idx)
for src_idx, tgt_idx in alignment
])))
if self.args.print_step:
message_list.append('I-{}\t{}'.format(
sample_id, hypo['steps']))
if getattr(self.args, 'retain_iter_history', False):
for step, h in enumerate(hypo['history']):
_, h_str, _ = utils.post_process_prediction(
hypo_tokens = h['tokens'].int(),
src_str = self.src_str,
alignment = None,
align_dict = None,
tgt_dict = self.tgt_dict,
remove_bpe = None,
)
message_list.append('E-{}_{}\t{}'.format(sample_id, step, h_str))
# Score only the top hypothesis
if has_target and j == 0:
if (self.align_dict is not None or
self.args.remove_bpe is not None):
# Convert back to tokens for evaluation with unk
# replacement and/or without BPE
target_tokens = self.tgt_dict.encode_line(
target_str, add_if_not_exist = True)
hypo_tokens = self.tgt_dict.encode_line(
detok_hypo_str, add_if_not_exist = True)
if hasattr(self.scorer, "add_string"):
self.message_queue.put((target_str, detok_hypo_str))
else:
self.message_queue.put((target_tokens, hypo_tokens))
self.message_queue.put('\n'.join(message_list))
def run(self):
while True:
r = self.data_queue.get()
if r == GENERATE_FINISHED or r is POSTPROCESS_FINISHED:
self.data_queue.put(POSTPROCESS_FINISHED)
break
else:
sample, hypos = r
self._detokenize(sample, hypos)
self.data_queue.close()
self.data_queue.join_thread()
self.message_queue.close()
self.message_queue.join_thread()
self.message_queue.join()
def _main(args, output_file):
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
stream=output_file,
)
logger = logging.getLogger("fastseq.optimizer.fairseq.generate")
utils.import_user_module(args)
if args.max_tokens is None and args.batch_size is None:
args.max_tokens = 12000
logger.info(args)
# Fix seed for stochastic decoding
if args.seed is not None and not args.no_seed_provided:
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
overrides = ast.literal_eval(args.model_overrides)
# Load ensemble
logger.info("loading model(s) from {}".format(args.path))
models, _ = checkpoint_utils.load_model_ensemble(
utils.split_paths(args.path),
arg_overrides = overrides,
task = task,
suffix = getattr(args, "checkpoint_suffix", ""),
strict = (args.checkpoint_shard_count == 1),
num_shards = args.checkpoint_shard_count,
)
if args.lm_path is not None:
overrides["data"] = args.data
try:
lms, _ = checkpoint_utils.load_model_ensemble(
[args.lm_path],
arg_overrides=overrides,
task=None,
)
except:
logger.warning("Failed to load language model! Please make sure that the language model dict is the same as target dict and is located in the data dir ({})".format(args.data))
raise
assert len(lms) == 1
else:
lms = [None]
# Optimize ensemble for generation
for model in chain(models, lms):
if model is None:
continue
if args.fp16:
model.half()
if use_cuda and not args.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(args)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset = task.dataset(args.gen_subset),
max_tokens = args.max_tokens,
max_sentences = args.batch_size,
max_positions = utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]),
ignore_invalid_inputs = args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple = args.required_batch_size_multiple,
num_shards = args.num_shards,
shard_id = args.shard_id,
num_workers = args.num_workers,
data_buffer_size = args.data_buffer_size,
).next_epoch_itr(shuffle = False)
progress = progress_bar.progress_bar(
itr,
log_format = args.log_format,
log_interval = args.log_interval,
default_log_format = ("tqdm" if not args.no_progress_bar else "none"),
)
# Initialize generator
gen_timer = StopwatchMeter()
extra_gen_cls_kwargs = {"lm_model": lms[0], "lm_weight": args.lm_weight}
generator = task.build_generator(
models, args, extra_gen_cls_kwargs = extra_gen_cls_kwargs
)
num_sentences = 0
data_queue = Queue()
message_queue = JoinableQueue()
p_list = []
for _ in range(args.postprocess_workers):
p = PostProcess(args, task, data_queue, message_queue, generator)
p_list.append(p)
p.start()
io_process = IOProcess(args, task, message_queue, output_file)
io_process.start()
if args.use_el_attn:
task.transpose_enc_dec_kv_proj(models)
wps_meter = TimeMeter()
for sample in progress:
cpu_sample = sample
if 'net_input' not in sample:
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample['target'][:, :args.prefix_size]
constraints = None
if "constraints" in sample:
constraints = sample["constraints"]
gen_timer.start()
try:
hypos = task.inference_step(
generator, models, sample, prefix_tokens, constraints)
except:
logging.exception(sys.exc_info()[0])
for p in p_list:
p.terminate()
io_process.terminate()
data_queue.close()
message_queue.close()
sys.exit(1)
num_generated_tokens = sum(len(h[0]['tokens']) for h in hypos)
gen_timer.stop(num_generated_tokens)
hypos = [h[:args.nbest] for h in hypos]
hypos = move_to_cpu(hypos) if use_cuda else hypos
data_queue.put((cpu_sample, hypos))
wps_meter.update(num_generated_tokens)
progress.log({'wps': round(wps_meter.avg)})
num_sentences += (
cpu_sample['nsentences'] if "nsentences" in cpu_sample else cpu_sample["id"].numel()
)
data_queue.put(GENERATE_FINISHED)
for p in p_list:
p.join()
message_queue.put(GENERATE_FINISHED)
io_process.join()
sent_through = num_sentences / gen_timer.sum if num_sentences > 0 else 0
tokens_through = 1. / gen_timer.avg if num_sentences > 0 else 0
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info(
"Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
sent_through,
tokens_through,
)
)
return
@replace(main)
def main_v2(args):
assert args.path is not None, '--path required for generation!'
assert (
not args.sampling or args.nbest == args.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
args.replace_unk is None or args.dataset_impl == "raw"
), "--replace-unk requires a raw text dataset (--dataset-impl=raw)"
if args.results_path is not None:
os.makedirs(args.results_path, exist_ok = True)
output_path = os.path.join(
args.results_path, "generate-{}.txt".format(args.gen_subset)
)
with open(output_path, "w", buffering = 1, encoding = "utf-8") as h:
return _main(args, h)
else:
return _main(args, sys.stdout)
|
examples/seismic/test_seismic_utils.py | kristiantorres/devito | 204 | 11166802 | import pytest
import numpy as np
from devito import norm
from examples.seismic import Model, setup_geometry, AcquisitionGeometry
def not_bcs(bc):
return ("mask", 1) if bc == "damp" else ("damp", 0)
@pytest.mark.parametrize('nbl, bcs', [
(20, ("mask", 1)), (0, ("mask", 1)),
(20, ("damp", 0)), (0, ("damp", 0))
])
def test_damp(nbl, bcs):
shape = (21, 21)
vp = np.ones(shape)
model = Model((0, 0), (10, 10), shape, 4, vp, nbl=nbl, bcs=bcs[0])
try:
center = model.damp.data[tuple(s // 2 for s in model.damp.shape)]
except AttributeError:
center = model.damp
assert all([s == s0 + 2 * nbl for s, s0 in zip(model.vp.shape, shape)])
assert center == bcs[1]
switch_bcs = not_bcs(bcs[0])
model._initialize_bcs(bcs=switch_bcs[0])
try:
center = model.damp.data[tuple(s // 2 for s in model.damp.shape)]
except AttributeError:
center = model.damp
assert center == switch_bcs[1]
@pytest.mark.parametrize('shape', [(41,), (21, 21), (11, 11, 11)])
def test_default_geom(shape):
vp = np.ones(shape)
o = tuple([0]*len(shape))
d = tuple([10]*len(shape))
model = Model(o, d, shape, 4, vp, nbl=20, dt=1)
assert model.critical_dt == 1
geometry = setup_geometry(model, 250)
nrec = shape[0] * (shape[1] if len(shape) > 2 else 1)
assert geometry.grid == model.grid
assert geometry.nrec == nrec
assert geometry.nsrc == 1
assert geometry.src_type == "Ricker"
assert geometry.rec.shape == (251, nrec)
assert norm(geometry.rec) == 0
assert geometry.src.shape == (251, 1)
assert norm(geometry.new_src(src_type=None)) == 0
rec2 = geometry.rec.resample(num=501)
assert rec2.shape == (501, nrec)
assert rec2.grid == model.grid
assert geometry.new_rec(name="bonjour").name == "bonjour"
assert geometry.new_src(name="bonjour").name == "bonjour"
@pytest.mark.parametrize('shape', [(41,), (21, 21), (11, 11, 11)])
def test_geom(shape):
vp = np.ones(shape)
o = tuple([0]*len(shape))
d = tuple([10]*len(shape))
model = Model(o, d, shape, 4, vp, nbl=20, dt=1)
assert model.critical_dt == 1
nrec = 31
nsrc = 4
rec_coordinates = np.ones((nrec, len(shape)))
src_coordinates = np.ones((nsrc, len(shape)))
geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates,
t0=0.0, tn=250)
assert geometry.grid == model.grid
assert geometry.nrec == nrec
assert geometry.nsrc == nsrc
assert geometry.src_type is None
assert geometry.rec.shape == (251, nrec)
assert norm(geometry.rec) == 0
assert geometry.src.shape == (251, nsrc)
assert norm(geometry.new_src(src_type=None)) == 0
assert norm(geometry.src) == 0
rec2 = geometry.rec.resample(num=501)
assert rec2.shape == (501, nrec)
assert rec2.grid == model.grid
assert geometry.new_rec(name="bonjour").name == "bonjour"
assert geometry.new_src(name="bonjour").name == "bonjour"
|
test/stress_tests/hid_usb_test.py | Intellinium/DAPLink | 1,354 | 11166807 | <filename>test/stress_tests/hid_usb_test.py
#
# DAPLink Interface Firmware
# Copyright (c) 2016-2017, ARM Limited, All Rights Reserved
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mbed_lstools
import threading
import time
import pyocd
should_exit = False
exit_cond = threading.Condition()
print_mut = threading.RLock()
global_start_time = time.time()
def _get_time():
return time.time() - global_start_time
def sync_print(msg):
with print_mut:
print(msg)
def hid_main(thread_index, board_id):
global should_exit
count = 0
try:
device = pyocd.probe.pydapaccess.DAPAccess.get_device(board_id)
while not should_exit:
device.open()
info = device.vendor(0)
info = str(bytearray(info[1:1 + info[0]]))
assert info == board_id
device.close()
if count % 100 == 0:
sync_print("Thread %i on loop %10i at %.6f - %s - board %s" %
(thread_index, count, _get_time(),
time.strftime("%H:%M:%S"), board_id))
count += 1
except:
sync_print("Thread %i exception board %s" % (thread_index, board_id))
with exit_cond:
should_exit = 1
exit_cond.notify_all()
raise
def main():
global should_exit
lstools = mbed_lstools.create()
mbed_list = lstools.list_mbeds()
for thread_index, mbed in enumerate(mbed_list):
msd_thread = threading.Thread(target=hid_main,
args=(thread_index, mbed['target_id']))
msd_thread.start()
try:
with exit_cond:
while not should_exit:
exit_cond.wait(1)
except KeyboardInterrupt:
pass
should_exit = True
sync_print("Exiting")
if __name__ == "__main__":
main()
|
leetcode.com/python/1268_Search_Suggestions_System.py | its-sushant/coding-interview-gym | 713 | 11166816 | <filename>leetcode.com/python/1268_Search_Suggestions_System.py
import bisect
# Usig binary search
class Solution(object):
def suggestedProducts(self, products, searchWord):
"""
:type products: List[str]
:type searchWord: str
:rtype: List[List[str]]
"""
products.sort()
result, prefix, startIdx = [], "", 0
for char in searchWord:
prefix += char
startIdx = bisect.bisect_left(products, prefix, startIdx)
currnetSearchRes = []
for product in products[startIdx: startIdx + 3]:
if product.startswith(prefix):
currnetSearchRes.append(product)
result.append(currnetSearchRes)
return result
|
rsbook_code/planning/plotting.py | patricknaughton01/RoboticSystemsBook | 116 | 11166845 | def mpl_plot_graph(ax,G,vertex_options={},edge_options={},dims=[0,1],directed=False):
"""Plots a graph G=(V,E) using matplotlib.
ax is a matplotlib Axes object.
If states have more than 2 dimensions, you can control the x-y axes
using the dims argument.
"""
import numpy as np
V,E = G
if len(V)==0:
return
X = [v[dims[0]] for v in V]
Y = [v[dims[1]] for v in V]
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
lines = []
for e in E:
x1,y1 = X[e[0]],Y[e[0]]
x2,y2 = X[e[1]],Y[e[1]]
lines.append(np.array([[x1,y1],[x2,y2]],dtype=float))
#convert normal edge options to collection options
collection_options = {}
for k,opt in edge_options.items():
if not k.endswith('s') and k not in ['alpha']:
collection_options[k+'s'] = np.asarray([opt]*len(lines))
linecoll = LineCollection(lines,zorder=2,**collection_options)
ax.add_collection(linecoll)
ax.scatter(X,Y,zorder=3,**vertex_options)
|
experiments/annual_reviews/figure8/mpsc_experiment.py | catgloss/safe-control-gym | 120 | 11166847 | """This script runs the MPSC experiment in our Annual Reviews article.
See Figure 8 in https://arxiv.org/pdf/2108.06266.pdf.
"""
import os
import sys
import shutil
import matplotlib.pyplot as plt
from munch import munchify
from functools import partial
from safe_control_gym.utils.utils import read_file
from safe_control_gym.utils.registration import make
from safe_control_gym.utils.configuration import ConfigFactory
def main():
# Define arguments.
fac = ConfigFactory()
config = fac.merge()
env_func = partial(make,
config.task,
**config.task_config)
# Create controller from PPO YAML.
ppo_config_dir = os.path.dirname(os.path.abspath(__file__))+'/config_overrides'
ppo_dict = read_file(os.path.join(ppo_config_dir,'unsafe_ppo_config.yaml'))
ppo_config = munchify(ppo_dict)
# Setup PPO controller.
ppo_ctrl = make(ppo_config.algo,
env_func,
**ppo_config.algo_config)
# Load state_dict from trained PPO.
ppo_model_dir = os.path.dirname(os.path.abspath(__file__))+'/unsafe_ppo_model'
ppo_ctrl.load(os.path.join(ppo_model_dir,'unsafe_ppo_model_30000.pt')) # Show violation.
# Remove temporary files and directories
shutil.rmtree(os.path.dirname(os.path.abspath(__file__))+'/temp')
# Setup MPSC.
ctrl = make(config.algo,
env_func,
rl_controller=ppo_ctrl,
**config.algo_config)
ctrl.reset()
train_env = env_func(init_state=None)
ctrl.learn(env=train_env)
test_env = env_func()
uncertified_env = env_func()
results = ctrl.run(env=test_env,
uncertified_env=uncertified_env)
ctrl.close()
fig_obs, ax_obs = plt.subplots()
ax_obs.plot(results.obs[:, 0], results.obs[:, 2], '.-', label='Certified')
ax_obs.plot(results.uncertified_obs[:10, 0], results.uncertified_obs[:10, 2], 'r--', label='Uncertified')
ax_obs.plot(results.obs[results.corrections>1e-6, 0], results.obs[results.corrections>1e-6, 2], 'r.', label='Modified')
ax_obs.legend()
ax_obs.set_title('State Space')
ax_obs.set_xlabel(r'$x$')
ax_obs.set_ylabel(r'$\theta$')
ax_obs.set_box_aspect(0.5)
fig_act, ax_act = plt.subplots()
ax_act.plot(results.actions[:], 'b-', label='Certified Inputs')
ax_act.plot(results.learning_actions[:], 'r--', label='Uncertified Input')
ax_act.legend()
ax_act.set_title('Input comparison')
ax_act.set_xlabel('Step')
ax_act.set_ylabel('Input')
ax_act.set_box_aspect(0.5)
fig, ax = plt.subplots()
ax.plot(results.obs[:,2], results.obs[:,3],'.-', label='Certified')
modified_inds = results.corrections>1e-6
ax.plot(results.obs[results.corrections>1e-6, 2], results.obs[results.corrections>1e-6, 3], 'r.', label='Modified')
uncert_end = results.uncertified_obs.shape[0]
ax.plot(results.uncertified_obs[:uncert_end, 2], results.uncertified_obs[:uncert_end, 3], 'r--', label='Uncertified')
ax.axvline(x=-0.2, color='r', label='Limit')
ax.axvline(x=0.2, color='r')
ax.set_xlabel(r"$\theta$")
ax.set_ylabel(r"$\dot{\theta}$")
ax.set_box_aspect(0.5)
ax.legend()
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
|
qt__pyqt__pyside__pyqode/pyqt5_simple_check_password.py | gil9red/SimplePyScripts | 117 | 11166905 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import sys
from PyQt5.QtWidgets import QMainWindow, QApplication, QInputDialog, QMessageBox, QLabel, QLineEdit
class Example(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('Example')
self.setCentralWidget(QLabel('<font size="100">Hello, <b>USER</b>!</font>'))
if __name__ == '__main__':
app = QApplication([])
password, ok = QInputDialog.getText(None, 'Auth', 'Input password:', QLineEdit.Password)
if not ok:
QMessageBox.warning(None, 'Warning', 'Need input password!')
sys.exit()
if password != '<PASSWORD>':
QMessageBox.warning(None, 'Warning', 'Invalid password!')
sys.exit()
w = Example()
w.show()
app.exec_()
|
test/hlt/pytest/python/com/huawei/iotplatform/client/dto/NotifyNBCommandStatusChangedDTO.py | yuanyi-thu/AIOT- | 128 | 11166908 | <filename>test/hlt/pytest/python/com/huawei/iotplatform/client/dto/NotifyNBCommandStatusChangedDTO.py<gh_stars>100-1000
from com.huawei.iotplatform.client.dto.NBCommandResult import NBCommandResult
class NotifyNBCommandStatusChangedDTO(object):
result = NBCommandResult()
def __init__(self):
self.deviceId = None
self.commandId = None
def getDeviceId(self):
return self.deviceId
def setDeviceId(self, deviceId):
self.deviceId = deviceId
def getCommandId(self):
return self.commandId
def setCommandId(self, commandId):
self.commandId = commandId
def getResult(self):
return self.result
def setResult(self, result):
self.result = result
|
ML/confusion-matrix/split_two.py | saneravi/ML_Stuff | 209 | 11166915 | #!/usr/bin/env python
"""Split the classes into two equal-sized groups to maximize accuracy."""
import json
import os
import random
import numpy as np
random.seed(0)
import logging
import sys
from visualize import apply_permutation, plot_cm, read_symbols, swap, swap_1d
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
def calculate_split_accuracy(cm):
"""
Calculate the accuracy of the adjusted classifier.
The adjusted classifier is built by joining the first n/2 classes into one
group and the rest into another group.
"""
n = len(cm)
first = int(n / 2)
cm_small = np.zeros((2, 2))
for i in range(n):
class_i = int(i < first)
for j in range(n):
class_j = int(j < first)
cm_small[class_i][class_j] += cm[i][j]
return (float(cm_small[0][0] + cm_small[1][1]) / cm_small.sum())
def calculate_split_error(cm):
"""Calculate the error of 2 group split."""
return 1.0 - calculate_split_accuracy(cm)
def simulated_annealing(current_cm,
current_perm=None,
score=calculate_split_error,
steps=2 * 10**5,
temp=100.0,
cooling_factor=0.99,
deterministic=False):
"""
Optimize current_cm by randomly swapping elements.
Parameters
----------
current_cm : numpy array
current_perm : None or iterable, optional (default: None)
steps : int, optional (default: 2 * 10**4)
temp : float > 0.0, optional (default: 100.0)
Temperature
cooling_factor: float in (0, 1), optional (default: 0.99)
"""
assert temp > 0
assert cooling_factor > 0
assert cooling_factor < 1
n = len(current_cm)
if current_perm is None:
current_perm = list(range(n))
current_perm = np.array(current_perm)
# Debugging code
perm_exp = np.zeros((n, n), dtype=np.int)
for i in range(n):
for j in range(n):
perm_exp[i][j] = j
current_cm = apply_permutation(current_cm, current_perm)
perm_exp_current = apply_permutation(perm_exp, current_perm)
logging.debug(perm_exp_current[0])
print("apply permutation %s" % str(current_perm))
current_score = score(current_cm)
best_perm = current_perm
best_cm = current_cm
best_score = current_score
print("## Starting Score: {:0.2f}%".format(current_score * 100))
for step in range(steps):
tmp = np.array(current_cm, copy=True)
split_part = int(n / 2) - 1
i = random.randint(0, split_part)
j = random.randint(split_part + 1, n - 1)
perm = swap_1d(current_perm.copy(), i, j)
tmp = swap(tmp, i, j)
# tmp = apply_permutation(tmp, perm)
tmp_score = score(tmp)
if deterministic:
chance = 1.0
else:
chance = random.random()
temp *= 0.99
hot_prob = min(1, np.exp(-(tmp_score - current_score) / temp))
if chance <= hot_prob:
if best_score > tmp_score: # Minimize the score
best_perm = perm
best_cm = tmp
best_score = tmp_score
current_score = tmp_score
perm_exp_current = swap(perm_exp_current, i, j)
print(list(perm_exp_current[0]))
current_cm = tmp
logging.info(("Current: %0.2f%% (best: %0.2f%%, hot_prob=%0.2f%%, "
"step=%i)"),
(current_score * 100),
(best_score * 100),
(hot_prob * 100),
step)
return {'cm': best_cm, 'perm': list(perm_exp_current[0])}
def main(cm_file, perm_file, steps, labels_file):
"""Orchestrate."""
# Load confusion matrix
with open(cm_file) as f:
cm = json.load(f)
cm = np.array(cm)
# Load permutation
if os.path.isfile(perm_file):
print("loaded %s" % perm_file)
with open(perm_file) as data_file:
perm = json.load(data_file)
else:
perm = random.shuffle(list(range(len(cm))))
print("Score without perm: {:0.2f}%".format(calculate_split_error(cm) * 100))
result = simulated_annealing(cm, perm,
score=calculate_split_error,
deterministic=True,
steps=steps)
# First recursive step
# split_i = int(len(cm) / 2)
# cm = result['cm'][:split_i, :split_i]
# perm = list(range(split_i))
# result = simulated_annealing(cm, perm,
# score=calculate_split_error,
# deterministic=True,
# steps=steps)
print("Score: {}".format(calculate_split_error(result['cm'])))
print("Perm: {}".format(list(result['perm'])))
# Load labels
if os.path.isfile(labels_file):
with open(labels_file) as f:
symbols = json.load(f)
else:
symbols = read_symbols()
print("Symbols: {}".format([symbols[i] for i in result['perm']]))
plot_cm(result['cm'], zero_diagonal=True)
def get_parser():
"""Get parser object for script xy.py."""
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--cm",
dest="cm_file",
help=("path of a json file with a confusion matrix"),
metavar="cm.json",
default='confusion-matrix.json')
parser.add_argument("--perm",
dest="perm_file",
help=("path of a json file with a permutation to "
"start with"),
metavar="perm.json",
default="")
parser.add_argument("--labels",
dest="labels_file",
help=("path of a json file with a list of label "
"names"),
metavar="labels.json",
default="")
parser.add_argument("-n",
dest="n",
default=4 * 10**5,
type=int,
help="number of steps to iterate")
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
main(args.cm_file, args.perm_file, args.n, args.labels_file)
|
alipay/aop/api/response/AlipayOverseasTravelPoiQueryResponse.py | antopen/alipay-sdk-python-all | 213 | 11166968 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.PoiQueryResult import PoiQueryResult
class AlipayOverseasTravelPoiQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOverseasTravelPoiQueryResponse, self).__init__()
self._poi_query_result = None
@property
def poi_query_result(self):
return self._poi_query_result
@poi_query_result.setter
def poi_query_result(self, value):
if isinstance(value, PoiQueryResult):
self._poi_query_result = value
else:
self._poi_query_result = PoiQueryResult.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayOverseasTravelPoiQueryResponse, self).parse_response_content(response_content)
if 'poi_query_result' in response:
self.poi_query_result = response['poi_query_result']
|
configs/new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ.py | mmabrouk/detectron2 | 21,274 | 11167011 | <gh_stars>1000+
from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
model.backbone.bottom_up.stages.depth = 101
|
aztk/core/models/validators.py | Geims83/aztk | 161 | 11167017 | <filename>aztk/core/models/validators.py
import collections
from aztk.error import InvalidModelFieldError
class Validator:
"""
Base class for a validator.
To write your validator extend this class and implement the validate method.
To raise an error raise InvalidModelFieldError
"""
def __call__(self, value):
self.validate(value)
def validate(self, value):
raise NotImplementedError()
class Required(Validator):
"""
Validate the field value is not `None`
"""
def validate(self, value):
if value is None:
raise InvalidModelFieldError("is required")
class String(Validator):
"""
Validate the value of the field is a `str`
"""
def validate(self, value):
if not value:
return
if not isinstance(value, str):
raise InvalidModelFieldError("{0} should be a string".format(value))
class Integer(Validator):
"""
Validate the value of the field is a `int`
"""
def validate(self, value):
if not value:
return
if not isinstance(value, int):
raise InvalidModelFieldError("{0} should be an integer".format(value))
class Float(Validator):
"""
Validate the value of the field is a `float`
"""
def validate(self, value):
if not value:
return
if not isinstance(value, float):
raise InvalidModelFieldError("{0} should be a float".format(value))
class Boolean(Validator):
"""This validator forces fields values to be an instance of `bool`."""
def validate(self, value):
if not value:
return
if not isinstance(value, bool):
raise InvalidModelFieldError("{0} should be a boolean".format(value))
class In(Validator):
"""
Validate the field value is in the list of allowed choices
"""
def __init__(self, choices):
self.choices = choices
def validate(self, value):
if not value:
return
if value not in self.choices:
raise InvalidModelFieldError("{0} should be in {1}".format(value, self.choices))
class InstanceOf(Validator):
"""
Check if the field is an instance of the given type
"""
def __init__(self, cls):
self.type = cls
def validate(self, value):
if not value:
return
if not isinstance(value, self.type):
raise InvalidModelFieldError("should be an instance of '{}'".format(self.type.__name__))
class Model(Validator):
"""
Validate the field is a model
"""
def __init__(self, model):
self.model = model
def validate(self, value):
if not value:
return
if not isinstance(value, self.model):
raise InvalidModelFieldError("should be an instance of '{}'".format(self.model.__name__))
value.validate()
class List(Validator):
"""
Validate the given item is a list
"""
def __init__(self, *validators):
self.validators = validators
def validate(self, value):
if not value:
return
if not isinstance(value, collections.MutableSequence):
raise InvalidModelFieldError("should be a list")
for i in value:
for validator in self.validators:
validator(i)
|
src_joint/utils_io.py | msc42/HPSG-Neural-Parser | 119 | 11167031 | __author__ = 'max'
import re
MAX_CHAR_LENGTH = 45
NUM_CHAR_PAD = 2
# Regular expressions used to normalize digits.
DIGIT_RE = re.compile(br"\d")
|
tests/test_client.py | Aliemeka/supabase-py | 181 | 11167105 | from __future__ import annotations
from typing import Any
import pytest
@pytest.mark.xfail(
reason="None of these values should be able to instanciate a client object"
)
@pytest.mark.parametrize("url", ["", None, "valeefgpoqwjgpj", 139, -1, {}, []])
@pytest.mark.parametrize("key", ["", None, "<KEY>", 139, -1, {}, []])
def test_incorrect_values_dont_instanciate_client(url: Any, key: Any) -> None:
"""Ensure we can't instanciate client with nonsense values."""
from supabase import Client, create_client
_: Client = create_client(url, key)
|
alipay/aop/api/domain/MyBkAccountVO.py | snowxmas/alipay-sdk-python-all | 213 | 11167136 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MyBkAccountVO(object):
def __init__(self):
self._account_ext_no = None
self._account_fip_branch_code = None
self._account_fip_code = None
self._account_fip_name = None
self._account_no = None
self._account_type = None
self._available = None
self._bank_card_category = None
self._card_holder_name = None
self._grant_channel = None
self._refuse_code = None
@property
def account_ext_no(self):
return self._account_ext_no
@account_ext_no.setter
def account_ext_no(self, value):
self._account_ext_no = value
@property
def account_fip_branch_code(self):
return self._account_fip_branch_code
@account_fip_branch_code.setter
def account_fip_branch_code(self, value):
self._account_fip_branch_code = value
@property
def account_fip_code(self):
return self._account_fip_code
@account_fip_code.setter
def account_fip_code(self, value):
self._account_fip_code = value
@property
def account_fip_name(self):
return self._account_fip_name
@account_fip_name.setter
def account_fip_name(self, value):
self._account_fip_name = value
@property
def account_no(self):
return self._account_no
@account_no.setter
def account_no(self, value):
self._account_no = value
@property
def account_type(self):
return self._account_type
@account_type.setter
def account_type(self, value):
self._account_type = value
@property
def available(self):
return self._available
@available.setter
def available(self, value):
self._available = value
@property
def bank_card_category(self):
return self._bank_card_category
@bank_card_category.setter
def bank_card_category(self, value):
self._bank_card_category = value
@property
def card_holder_name(self):
return self._card_holder_name
@card_holder_name.setter
def card_holder_name(self, value):
self._card_holder_name = value
@property
def grant_channel(self):
return self._grant_channel
@grant_channel.setter
def grant_channel(self, value):
self._grant_channel = value
@property
def refuse_code(self):
return self._refuse_code
@refuse_code.setter
def refuse_code(self, value):
self._refuse_code = value
def to_alipay_dict(self):
params = dict()
if self.account_ext_no:
if hasattr(self.account_ext_no, 'to_alipay_dict'):
params['account_ext_no'] = self.account_ext_no.to_alipay_dict()
else:
params['account_ext_no'] = self.account_ext_no
if self.account_fip_branch_code:
if hasattr(self.account_fip_branch_code, 'to_alipay_dict'):
params['account_fip_branch_code'] = self.account_fip_branch_code.to_alipay_dict()
else:
params['account_fip_branch_code'] = self.account_fip_branch_code
if self.account_fip_code:
if hasattr(self.account_fip_code, 'to_alipay_dict'):
params['account_fip_code'] = self.account_fip_code.to_alipay_dict()
else:
params['account_fip_code'] = self.account_fip_code
if self.account_fip_name:
if hasattr(self.account_fip_name, 'to_alipay_dict'):
params['account_fip_name'] = self.account_fip_name.to_alipay_dict()
else:
params['account_fip_name'] = self.account_fip_name
if self.account_no:
if hasattr(self.account_no, 'to_alipay_dict'):
params['account_no'] = self.account_no.to_alipay_dict()
else:
params['account_no'] = self.account_no
if self.account_type:
if hasattr(self.account_type, 'to_alipay_dict'):
params['account_type'] = self.account_type.to_alipay_dict()
else:
params['account_type'] = self.account_type
if self.available:
if hasattr(self.available, 'to_alipay_dict'):
params['available'] = self.available.to_alipay_dict()
else:
params['available'] = self.available
if self.bank_card_category:
if hasattr(self.bank_card_category, 'to_alipay_dict'):
params['bank_card_category'] = self.bank_card_category.to_alipay_dict()
else:
params['bank_card_category'] = self.bank_card_category
if self.card_holder_name:
if hasattr(self.card_holder_name, 'to_alipay_dict'):
params['card_holder_name'] = self.card_holder_name.to_alipay_dict()
else:
params['card_holder_name'] = self.card_holder_name
if self.grant_channel:
if hasattr(self.grant_channel, 'to_alipay_dict'):
params['grant_channel'] = self.grant_channel.to_alipay_dict()
else:
params['grant_channel'] = self.grant_channel
if self.refuse_code:
if hasattr(self.refuse_code, 'to_alipay_dict'):
params['refuse_code'] = self.refuse_code.to_alipay_dict()
else:
params['refuse_code'] = self.refuse_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MyBkAccountVO()
if 'account_ext_no' in d:
o.account_ext_no = d['account_ext_no']
if 'account_fip_branch_code' in d:
o.account_fip_branch_code = d['account_fip_branch_code']
if 'account_fip_code' in d:
o.account_fip_code = d['account_fip_code']
if 'account_fip_name' in d:
o.account_fip_name = d['account_fip_name']
if 'account_no' in d:
o.account_no = d['account_no']
if 'account_type' in d:
o.account_type = d['account_type']
if 'available' in d:
o.available = d['available']
if 'bank_card_category' in d:
o.bank_card_category = d['bank_card_category']
if 'card_holder_name' in d:
o.card_holder_name = d['card_holder_name']
if 'grant_channel' in d:
o.grant_channel = d['grant_channel']
if 'refuse_code' in d:
o.refuse_code = d['refuse_code']
return o
|
docs/examples/python/scomparator_test_client.py | radetsky/themis | 1,561 | 11167138 | <reponame>radetsky/themis
#
# Copyright (c) 2015 Cossack Labs Limited
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
echo client with handmade ssession wrappers (see ssession_wrappers.py)
for none event handled transport, like plain socket
"""
import socket
from pythemis import scomparator
comparator = scomparator.SComparator(b"shared secret")
socket = socket.socket()
socket.connect(("127.0.0.1", 26260))
try:
data = comparator.begin_compare()
while not comparator.is_compared():
socket.sendall(data)
data = comparator.proceed_compare(socket.recv(1024))
if comparator.is_equal():
print("match")
else:
print("not match")
finally:
socket.close()
|
code_examples/popart/block_sparse/examples/conftest.py | payoto/graphcore_examples | 260 | 11167144 | <reponame>payoto/graphcore_examples
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import pathlib
from utils import build_custom_ops
from mnist.common import download_mnist
import pytest
import ctypes
import os
def pytest_sessionstart(session):
# Builds the custom ops
so_path = pathlib.Path(__file__).parent.parent.resolve() / "custom_ops.so"
build_custom_ops(so_path)
# Download MNIST dataset
download_mnist(pathlib.Path(__file__).parent.resolve() / "mnist")
# Sets the IPUs to wait before attaching.
os.environ["POPTORCH_WAIT_FOR_IPU"] = "1"
@pytest.fixture
def custom_ops():
so_path = pathlib.Path(__file__).parent.parent.resolve() / "custom_ops.so"
ctypes.cdll.LoadLibrary(so_path)
return so_path
|
examples/basic_geometries.py | ConnectionMaster/qgis-earthengine-plugin | 307 | 11167153 | import ee
from ee_plugin import Map
point = ee.Geometry.Point([1.5, 1.5])
Map.addLayer(point, {'color': '1eff05'}, 'point')
lineString = ee.Geometry.LineString(
[[-35, -10], [35, -10], [35, 10], [-35, 10]])
Map.addLayer(lineString, {'color': 'FF0000'}, 'lineString')
linearRing = ee.Geometry.LinearRing(
[[-35, -10], [35, -10], [35, 10], [-35, 10], [-35, -10]])
Map.addLayer(linearRing, {'color': 'ee38ff'}, 'linearRing')
rectangle = ee.Geometry.Rectangle([-40, -20, 40, 20])
Map.addLayer(rectangle, {'color': 'ffa05c'}, 'rectangle')
polygon = ee.Geometry.Polygon([
[[-5, 40], [65, 40], [65, 60], [-5, 60], [-5, 60]]
])
planarPolygon = ee.Geometry(polygon, None, False)
Map.addLayer(polygon, {'color': 'FF0000'}, 'geodesic polygon')
Map.addLayer(planarPolygon, {'color': '000000'}, 'planar polygon')
Map.centerObject(polygon) |
manila/api/views/share_instance.py | kpawar89/manila | 159 | 11167160 | <reponame>kpawar89/manila
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from manila.api import common
from manila.common import constants
class ViewBuilder(common.ViewBuilder):
"""Model a server API response as a python dictionary."""
_collection_name = 'share_instances'
_collection_links = 'share_instances_links'
_detail_version_modifiers = [
"remove_export_locations",
"add_access_rules_status_field",
"add_replication_fields",
"add_share_type_field",
"add_cast_rules_to_readonly_field",
"add_progress_field",
"translate_creating_from_snapshot_status",
]
def detail_list(self, request, instances):
"""Detailed view of a list of share instances."""
return self._list_view(self.detail, request, instances)
def detail(self, request, share_instance):
"""Detailed view of a single share instance."""
export_locations = [e['path'] for e in share_instance.export_locations]
instance_dict = {
'id': share_instance.get('id'),
'share_id': share_instance.get('share_id'),
'availability_zone': share_instance.get('availability_zone'),
'created_at': share_instance.get('created_at'),
'host': share_instance.get('host'),
'status': share_instance.get('status'),
'share_network_id': share_instance.get('share_network_id'),
'share_server_id': share_instance.get('share_server_id'),
'export_location': share_instance.get('export_location'),
'export_locations': export_locations,
}
self.update_versioned_resource_dict(
request, instance_dict, share_instance)
return {'share_instance': instance_dict}
def _list_view(self, func, request, instances):
"""Provide a view for a list of share instances."""
instances_list = [func(request, instance)['share_instance']
for instance in instances]
instances_links = self._get_collection_links(request,
instances,
self._collection_name)
instances_dict = {self._collection_name: instances_list}
if instances_links:
instances_dict[self._collection_links] = instances_links
return instances_dict
@common.ViewBuilder.versioned_method("2.9")
def remove_export_locations(self, context, share_instance_dict,
share_instance):
share_instance_dict.pop('export_location')
share_instance_dict.pop('export_locations')
@common.ViewBuilder.versioned_method("2.10")
def add_access_rules_status_field(self, context, instance_dict,
share_instance):
instance_dict['access_rules_status'] = (
share_instance.get('access_rules_status')
)
@common.ViewBuilder.versioned_method("2.11")
def add_replication_fields(self, context, instance_dict, share_instance):
instance_dict['replica_state'] = share_instance.get('replica_state')
@common.ViewBuilder.versioned_method("2.22")
def add_share_type_field(self, context, instance_dict, share_instance):
instance_dict['share_type_id'] = share_instance.get('share_type_id')
@common.ViewBuilder.versioned_method("2.30")
def add_cast_rules_to_readonly_field(self, context, instance_dict,
share_instance):
instance_dict['cast_rules_to_readonly'] = share_instance.get(
'cast_rules_to_readonly', False)
@common.ViewBuilder.versioned_method("1.0", "2.53")
def translate_creating_from_snapshot_status(self, context, instance_dict,
share_instance):
if (share_instance.get('status') ==
constants.STATUS_CREATING_FROM_SNAPSHOT):
instance_dict['status'] = constants.STATUS_CREATING
@common.ViewBuilder.versioned_method("2.54")
def add_progress_field(self, context, instance_dict, share_instance):
instance_dict['progress'] = share_instance.get('progress')
|
scripts/pendulum_data_collect.py | SaminYeasar/inverse_rl | 220 | 11167164 | from sandbox.rocky.tf.algos.trpo import TRPO
from sandbox.rocky.tf.envs.base import TfEnv
from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from inverse_rl.utils.log_utils import rllab_logdir
def main():
env = TfEnv(GymEnv('Pendulum-v0', record_video=False, record_log=False))
policy = GaussianMLPPolicy(name='policy', env_spec=env.spec, hidden_sizes=(32, 32))
algo = TRPO(
env=env,
policy=policy,
n_itr=200,
batch_size=1000,
max_path_length=100,
discount=0.99,
store_paths=True,
baseline=LinearFeatureBaseline(env_spec=env.spec)
)
with rllab_logdir(algo=algo, dirname='data/pendulum'):
algo.train()
if __name__ == "__main__":
main()
|
self_driving/ml_training/test/TestTrainer.py | cclauss/self_driving_pi_car | 724 | 11167182 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os
import sys
import inspect
import numpy as np
import tensorflow as tf
import itertools
import shutil
almost_current = os.path.abspath(inspect.getfile(inspect.currentframe()))
currentdir = os.path.dirname(almost_current)
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from util import run_test, reconstruct_from_record # noqa
from Config import Config # noqa
from DataHolder import DataHolder # noqa
from DFN import DFN # noqa
from Trainer import Trainer # noqa
class TestTrainer(unittest.TestCase):
"""
Class that test the Trainer class in optimization and prediction
"""
@classmethod
def setUpClass(cls):
data_name = "toy_160_90_3_data.npy"
label_name = "toy_160_90_3_labels.npy"
cls.config3d = Config(epochs=1,
architecture=[4],
num_steps=100,
save_step=10)
cls.config_green = Config(epochs=1,
architecture=[4],
num_steps=100,
save_step=10)
cls.config_gray = Config(epochs=1,
architecture=[4],
num_steps=100,
save_step=10)
cls.config_bin = Config(epochs=1,
architecture=[4],
num_steps=100,
save_step=10)
cls.data_aug = DataHolder(cls.config3d,
data_name,
label_name,
record_path="toy_aug",
flip=True,
augmentation=True)
cls.data_gray = DataHolder(cls.config_gray,
data_name,
label_name,
record_path="toy_gray",
flip=True,
augmentation=False,
gray=True)
cls.data_green = DataHolder(cls.config_green,
data_name,
label_name,
record_path="toy_green",
flip=True,
augmentation=False,
green=True)
cls.data_binary = DataHolder(cls.config_bin,
data_name,
label_name,
flip=True,
augmentation=False,
record_path="toy_bin",
binary=True)
cls.data_aug.create_records()
cls.data_gray.create_records()
cls.data_green.create_records()
cls.data_binary.create_records()
cls.all_paths = ["toy_aug",
"toy_gray",
"toy_green",
"toy_bin"]
cls.data_list = [cls.data_gray, cls.data_green, cls.data_binary]
cls.end = False
@classmethod
def tearDown(cls):
if cls.end:
sufixes = ['_train.tfrecords', '_valid.tfrecords', '_test.tfrecords'] # noqa
for car, cdr in itertools.product(cls.all_paths, sufixes):
file_name = car + cdr
if os.path.exists(file_name):
os.remove(file_name)
if os.path.exists("checkpoints"):
shutil.rmtree("checkpoints")
def check_overfitting_valid_data(self,
config,
dataholder):
if os.path.exists("checkpoints"):
shutil.rmtree("checkpoints")
graph = tf.Graph()
network = DFN(graph, config)
trainer = Trainer(graph,
config,
network,
dataholder)
non_trained_acc = trainer.get_valid_accuracy()
trainer.fit(verbose=False)
trained_acc = trainer.get_valid_accuracy()
condition = non_trained_acc < trained_acc
msg = "Performance on valid data not better after training\n"
msg += " non_trained_acc = {0:.6f}".format(non_trained_acc)
msg += " | trained_acc = {0:.6f}".format(trained_acc)
self.assertTrue(condition, msg=msg)
def check_prediction(self, config, dataholder, num_classes=4):
if os.path.exists("checkpoints"):
shutil.rmtree("checkpoints")
record_path = dataholder.get_test_tfrecord()
images, _, shape = reconstruct_from_record(record_path)
images = images.astype(np.float32) / 255
num_images = images.shape[0]
graph = tf.Graph()
network = DFN(graph, config)
trainer = Trainer(graph,
config,
network,
dataholder)
non_trained_predictions = trainer.predict(images)
trainer.fit(verbose=False)
trained_predictions = trainer.predict(images)
image = images[0].reshape((1, images[0].shape[0]))
single_prediction = trainer.predict(image)
self.assertEqual(non_trained_predictions.shape, (num_images,))
self.assertEqual(trained_predictions.shape, (num_images,))
self.assertEqual(np.int32, non_trained_predictions.dtype)
self.assertEqual(np.int32, trained_predictions.dtype)
self.assertEqual(np.int32, single_prediction.dtype)
def test_model_is_fitting_valid_dataset(self):
self.check_overfitting_valid_data(self.config3d,
self.data_aug)
for dh in self.data_list:
self.check_overfitting_valid_data(self.config_gray,
dh)
def test_prediction(self):
self.check_prediction(self.config3d,
self.data_aug)
for dh in self.data_list:
self.check_prediction(self.config_gray,
dh)
TestTrainer.end = True # hack to use TearDown only here
if __name__ == "__main__":
run_test(TestTrainer)
|
tools/azure-sdk-tools/packaging_tools/__main__.py | rsdoherty/azure-sdk-for-python | 2,728 | 11167206 | import argparse
import logging
import os
import sys
from . import build_packaging
_LOGGER = logging.getLogger(__name__)
_epilog = """This script will automatically build the TOML configuration file with default value if it doesn't exist.
"""
parser = argparse.ArgumentParser(
description="Packaging tools for Azure SDK for Python",
formatter_class=argparse.RawTextHelpFormatter,
epilog=_epilog,
)
parser.add_argument(
"--output", "-o", dest="output", default=".", help="Output dir, should be SDK repo folder. [default: %(default)s]"
)
parser.add_argument("--debug", dest="debug", action="store_true", help="Verbosity in DEBUG mode")
parser.add_argument(
"--build-conf",
dest="build_conf",
action="store_true",
help="Build a default TOML file, with package name, fake pretty name, as beta package and no doc page. Do nothing if the file exists, remove manually the file if needed.",
)
parser.add_argument(
"--jenkins",
dest="jenkins",
action="store_true",
help="In Jenkins mode, try to find what to generate from Jenkins env variables. Package names are then optional.",
)
parser.add_argument("package_names", nargs="*", help="The package name.")
args = parser.parse_args()
main_logger = logging.getLogger()
logging.basicConfig()
main_logger.setLevel(logging.DEBUG if args.debug else logging.INFO)
if not args.package_names and not args.jenkins:
raise ValueError("At least one package name or Jenkins mode is required")
try:
build_packaging(
args.output, os.environ.get("GH_TOKEN", None), args.jenkins, args.package_names, build_conf=args.build_conf
)
except Exception as err:
if args.debug:
_LOGGER.exception(err)
else:
_LOGGER.critical(err)
sys.exit(1)
|
mayan/apps/lock_manager/apps.py | nattangwiwat/Mayan-EDMS-recitation | 343 | 11167209 | import logging
import sys
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.apps import MayanAppConfig
from .backends.base import LockingBackend
from .literals import PURGE_LOCKS_COMMAND, TEST_LOCK_NAME
from .settings import setting_backend
logger = logging.getLogger(name=__name__)
class LockManagerApp(MayanAppConfig):
has_tests = True
name = 'mayan.apps.lock_manager'
verbose_name = _('Lock manager')
def ready(self):
super().ready()
if PURGE_LOCKS_COMMAND not in sys.argv:
logger.debug('Starting lock backend connectivity test')
# Don't test for locks during the `purgelocks` command as there
# may be some stuck locks which will block the command.
lock_instance = LockingBackend.get_backend()
try:
lock = lock_instance.acquire_lock(
name=TEST_LOCK_NAME, timeout=1
)
lock.release()
except Exception as exception:
raise RuntimeError(
'Error initializing the locking backend: {}; {}'.format(
setting_backend.value, exception
)
) from exception
|
dart_fss/api/filings/document.py | dveamer/dart-fss | 243 | 11167221 | <filename>dart_fss/api/filings/document.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
from dart_fss.auth import get_api_key
from dart_fss.utils import request
def download_document(path: str, rcept_no: str) -> str:
""" 공시서류원본파일 다운로드
Parameters
----------
path: str
download path
rcept_no: str
접수번호
Returns
-------
str
download full path
"""
url = 'https://opendart.fss.or.kr/api/document.xml'
# Set API KEY
api_key = get_api_key()
payload = {
'crtfc_key': api_key,
'rcept_no': rcept_no,
}
resp = request.download(url=url, path=path, payload=payload)
return resp['full_path'] |
tests/r/test_rdtelec.py | hajime9652/observations | 199 | 11167234 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.rdtelec import rdtelec
def test_rdtelec():
"""Test module rdtelec.py by downloading
rdtelec.csv and testing shape of
extracted data has 29 rows and 6 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = rdtelec(test_path)
try:
assert x_train.shape == (29, 6)
except:
shutil.rmtree(test_path)
raise()
|
tests/utils_tests/autoload/__init__.py | fy0/mapi | 219 | 11167268 | <reponame>fy0/mapi
FLAG = 1
|
lldb/test/API/lang/cpp/enum_types/TestCPP11EnumTypes.py | LaudateCorpus1/llvm-project | 605 | 11167283 | <reponame>LaudateCorpus1/llvm-project<gh_stars>100-1000
"""Look up enum type information and check for correct display."""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
class CPP11EnumTypesTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def check_enum(self, suffix):
"""
:param suffix The suffix of the enum type name (enum_<suffix>) that
should be checked.
:param test_values A list of integet values that shouldn't be converted
to any valid enum case.
"""
enum_name = "enum_" + suffix
unsigned = suffix.startswith("u")
self.expect("image lookup -t " + enum_name,
patterns=["enum( struct| class)? " + enum_name + " {"],
substrs=["Case1",
"Case2",
"Case3"])
# Test each case in the enum.
self.expect_expr("var1_" + suffix, result_type=enum_name, result_value="Case1")
self.expect_expr("var2_" + suffix, result_type=enum_name, result_value="Case2")
self.expect_expr("var3_" + suffix, result_type=enum_name, result_value="Case3")
if unsigned:
self.expect_expr("var_below_" + suffix, result_type=enum_name, result_value="199")
self.expect_expr("var_above_" + suffix, result_type=enum_name, result_value="203")
else:
self.expect_expr("var_below_" + suffix, result_type=enum_name, result_value="-3")
self.expect_expr("var_above_" + suffix, result_type=enum_name, result_value="1")
@skipIf(dwarf_version=['<', '4'])
def test(self):
self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.check_enum("uc")
self.check_enum("c")
self.check_enum("us")
self.check_enum("s")
self.check_enum("ui")
self.check_enum("i")
self.check_enum("ul")
self.check_enum("l")
self.check_enum("ull")
self.check_enum("ll")
|
questionary/form.py | qualichat/questionary | 851 | 11167292 | <filename>questionary/form.py
from typing import Any, Dict, NamedTuple, Sequence
from questionary.constants import DEFAULT_KBI_MESSAGE
from questionary.question import Question
class FormField(NamedTuple):
key: str
question: Question
def form(**kwargs: Question) -> "Form":
"""Create a form with multiple questions.
The parameter name of a question will be the key for the answer in
the returned dict.
Args:
kwargs: Questions to ask in the form.
"""
return Form(*(FormField(k, q) for k, q in kwargs.items()))
class Form:
"""Multi question prompts. Questions are asked one after another.
All the answers are returned as a dict with one entry per question.
This class should not be invoked directly, instead use :func:`form`.
"""
form_fields: Sequence[FormField]
def __init__(self, *form_fields: FormField) -> None:
self.form_fields = form_fields
def unsafe_ask(self, patch_stdout: bool = False) -> Dict[str, Any]:
"""Ask the questions synchronously and return user response.
Does not catch keyboard interrupts.
Args:
patch_stdout: Ensure that the prompt renders correctly if other threads
are printing to stdout.
Returns:
The answers from the form.
"""
return {f.key: f.question.unsafe_ask(patch_stdout) for f in self.form_fields}
async def unsafe_ask_async(self, patch_stdout: bool = False) -> Dict[str, Any]:
"""Ask the questions using asyncio and return user response.
Does not catch keyboard interrupts.
Args:
patch_stdout: Ensure that the prompt renders correctly if other threads
are printing to stdout.
Returns:
The answers from the form.
"""
return {
f.key: await f.question.unsafe_ask_async(patch_stdout)
for f in self.form_fields
}
def ask(
self, patch_stdout: bool = False, kbi_msg: str = DEFAULT_KBI_MESSAGE
) -> Dict[str, Any]:
"""Ask the questions synchronously and return user response.
Args:
patch_stdout: Ensure that the prompt renders correctly if other threads
are printing to stdout.
kbi_msg: The message to be printed on a keyboard interrupt.
Returns:
The answers from the form.
"""
try:
return self.unsafe_ask(patch_stdout)
except KeyboardInterrupt:
print("")
print(kbi_msg)
print("")
return {}
async def ask_async(
self, patch_stdout: bool = False, kbi_msg: str = DEFAULT_KBI_MESSAGE
) -> Dict[str, Any]:
"""Ask the questions using asyncio and return user response.
Args:
patch_stdout: Ensure that the prompt renders correctly if other threads
are printing to stdout.
kbi_msg: The message to be printed on a keyboard interrupt.
Returns:
The answers from the form.
"""
try:
return await self.unsafe_ask_async(patch_stdout)
except KeyboardInterrupt:
print("")
print(kbi_msg)
print("")
return {}
|
recipes/Python/576477_Yet_another_signalslot/recipe-576477.py | tdiprima/code | 2,023 | 11167326 | <filename>recipes/Python/576477_Yet_another_signalslot/recipe-576477.py
"""
File: signal.py
Author: <NAME>
Created: August 28, 2008
Purpose: A signal/slot implementation
"""
from weakref import WeakValueDictionary
class Signal(object):
def __init__(self):
self.__slots = WeakValueDictionary()
def __call__(self, *args, **kargs):
for key in self.__slots:
func, _ = key
func(self.__slots[key], *args, **kargs)
def connect(self, slot):
key = (slot.im_func, id(slot.im_self))
self.__slots[key] = slot.im_self
def disconnect(self, slot):
key = (slot.im_func, id(slot.im_self))
if key in self.__slots:
self.__slots.pop(key)
def clear(self):
self.__slots.clear()
# Sample usage:
class Model(object):
def __init__(self, value):
self.__value = value
self.changed = Signal()
def set_value(self, value):
self.__value = value
self.changed() # Emit signal
def get_value(self):
return self.__value
class View(object):
def __init__(self, model):
self.model = model
model.changed.connect(self.model_changed)
def model_changed(self):
print "New value:", self.model.get_value()
model = Model(10)
view1 = View(model)
view2 = View(model)
view3 = View(model)
model.set_value(20)
del view1
model.set_value(30)
model.changed.clear()
model.set_value(40)
|
tests/popmon/spark/test_spark.py | Sharath302/popmon | 265 | 11167334 | <reponame>Sharath302/popmon<gh_stars>100-1000
from os.path import abspath, dirname, join
import pandas as pd
import pytest
from popmon.hist.filling import make_histograms
from popmon.pipeline.metrics import df_stability_metrics
try:
from pyspark import __version__ as pyspark_version
from pyspark.sql import SparkSession
spark_found = True
except (ModuleNotFoundError, AttributeError):
spark_found = False
@pytest.fixture
def spark_context():
if not spark_found:
return None
current_path = dirname(abspath(__file__))
scala = "2.12" if int(pyspark_version[0]) >= 3 else "2.11"
hist_spark_jar = join(
current_path, f"jars/histogrammar-sparksql_{scala}-1.0.11.jar"
)
hist_jar = join(current_path, f"jars/histogrammar_{scala}-1.0.11.jar")
spark = (
SparkSession.builder.master("local")
.appName("popmon-pytest")
.config("spark.jars", f"{hist_spark_jar},{hist_jar}")
.config("spark.sql.execution.arrow.enabled", "false")
.config("spark.sql.session.timeZone", "GMT")
.getOrCreate()
)
return spark
@pytest.mark.spark
@pytest.mark.skipif(not spark_found, reason="spark not found")
@pytest.mark.filterwarnings(
"ignore:createDataFrame attempted Arrow optimization because"
)
def test_spark_stability_metrics(spark_context):
spark_df = spark_context.createDataFrame(pytest.test_df)
# generate metrics directly from spark dataframe
features = ["date:isActive", "date:eyeColor", "date:latitude"]
bin_specs = {
"date": {
"bin_width": pd.Timedelta("1y").value,
"bin_offset": pd.Timestamp("2000-1-1").value,
},
"latitude": {"bin_width": 5.0, "bin_offset": 0.0},
}
ds = df_stability_metrics(
spark_df,
time_axis="date",
features=features,
binning="unit",
bin_specs=bin_specs,
)
cols = ["profiles", "comparisons", "traffic_lights", "alerts"]
for c in cols:
assert c in list(ds.keys())
@pytest.mark.spark
@pytest.mark.skipif(not spark_found, reason="spark not found")
@pytest.mark.filterwarnings(
"ignore:createDataFrame attempted Arrow optimization because"
)
def test_spark_make_histograms(spark_context):
pytest.age["data"]["name"] = "b'age'"
pytest.company["data"]["name"] = "b'company'"
pytest.eyesColor["data"]["name"] = "b'eyeColor'"
pytest.gender["data"]["name"] = "b'gender'"
pytest.isActive["data"]["name"] = "b'isActive'"
pytest.latitude["data"]["name"] = "b'latitude'"
pytest.longitude["data"]["name"] = "b'longitude'"
pytest.transaction["data"]["name"] = "b'transaction'"
pytest.latitude_longitude["data"]["name"] = "b'latitude:longitude'"
pytest.latitude_longitude["data"]["bins:name"] = "unit_func"
spark_df = spark_context.createDataFrame(pytest.test_df)
# test make_histograms() function call with spark df
current_hists = make_histograms(
spark_df,
features=[
"date",
"isActive",
"age",
"eyeColor",
"gender",
"company",
"latitude",
"longitude",
["isActive", "age"],
["latitude", "longitude"],
"transaction",
],
bin_specs={
"transaction": {"num": 100, "low": -2000, "high": 2000},
"longitude": {"bin_width": 5.0, "bin_offset": 0.0},
"latitude": {"bin_width": 5.0, "bin_offset": 0.0},
},
binning="unit",
)
assert current_hists["age"].toJson() == pytest.age
assert current_hists["company"].toJson() == pytest.company
assert current_hists["eyeColor"].toJson() == pytest.eyesColor
assert current_hists["gender"].toJson() == pytest.gender
assert current_hists["latitude"].toJson() == pytest.latitude
assert current_hists["longitude"].toJson() == pytest.longitude
assert current_hists["transaction"].toJson() == pytest.transaction
|
code/deep/CSG/a-mnist/makedata.py | jiaruonan/transferlearning | 9,657 | 11167361 | #!/usr/bin/env python3.6
'''For generating MNIST-01 and its shifted interventional datasets.
'''
import torch as tc
import torchvision as tv
import torchvision.transforms.functional as tvtf
import argparse
__author__ = "<NAME>"
__email__ = "<EMAIL>"
def select_xy(dataset, selected_y = (0,1), piltransf = None, ytransf = None):
dataset_selected = [(
tvtf.to_tensor( img if piltransf is None else piltransf(img, label) ),
label if ytransf is None else ytransf(label)
) for img, label in dataset if label in selected_y]
xs, ys = tuple(zip(*dataset_selected))
return tc.cat(xs, dim=0), tc.tensor(ys)
def get_shift_transf(pleft: list, distr: str, loc: float, scale: float):
return lambda img, label: tvtf.affine(img, angle=0, translate=(
scale * getattr(tc, distr)(()) + loc * (1. - 2. * tc.bernoulli(tc.tensor(pleft[label]))), 0.
), scale=1., shear=0, fillcolor=0)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("mode", type = str, choices = {"train", "test"})
parser.add_argument("--pleft", type = float, nargs = '+', default = [0.5, 0.5])
parser.add_argument("--distr", type = str, choices = {"randn", "rand"})
parser.add_argument("--loc", type = float, default = 4.)
parser.add_argument("--scale", type = float, default = 1.)
parser.add_argument("--procroot", type = str, default = "./data/MNIST/processed/")
ag = parser.parse_args()
dataset = tv.datasets.MNIST(root="./data", train = ag.mode=="train", download=True) # as PIL
piltransf = get_shift_transf(ag.pleft, ag.distr, ag.loc, ag.scale)
selected_y = tuple(range(len(ag.pleft)))
shift_x, shift_y = select_xy(dataset, selected_y, piltransf)
filename = ag.procroot + ag.mode + "".join(str(y) for y in selected_y) + (
"_" + "_".join(f"{p:.1f}" for p in ag.pleft) +
"_" + ag.distr + f"_{ag.loc:.1f}_{ag.scale:.1f}.pt" )
tc.save((shift_x, shift_y), filename)
print("Processed data saved to '" + filename + "'")
|
custom_components/reolink_dev/device_action.py | gasecki/Home-Assistant_Config | 163 | 11167364 | """ custom helper actions """
import logging
from typing import List, Optional
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_TYPE,
DEVICE_CLASS_TIMESTAMP,
)
from homeassistant.core import Context, HomeAssistant
from homeassistant.helpers import config_validation as cv
from homeassistant.components.camera import DOMAIN as CAMERA_DOMAIN, SERVICE_SNAPSHOT
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from .utils import async_get_device_entries
from .const import DOMAIN
VOD_THUMB_CAP = "capture_vod_thumbnail"
ACTION_TYPES = {VOD_THUMB_CAP}
ACTION_SCHEMA = cv.DEVICE_ACTION_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(ACTION_TYPES),
vol.Optional(CONF_ENTITY_ID): cv.entities_domain(
[CAMERA_DOMAIN, SENSOR_DOMAIN]
),
}
)
_LOGGER = logging.getLogger(__name__)
async def async_get_actions(hass: HomeAssistant, device_id: str):
"""List device actions for devices."""
actions = []
(device, device_entries) = await async_get_device_entries(hass, device_id)
if not device or not device_entries or len(device_entries) < 2:
return actions
sensor = None
camera = None
for entry in device_entries:
if (
entry.domain == SENSOR_DOMAIN
and entry.device_class == DEVICE_CLASS_TIMESTAMP
):
sensor = entry
if entry.domain == CAMERA_DOMAIN:
camera = entry
if sensor and camera:
actions.append(
{
CONF_DOMAIN: DOMAIN,
CONF_DEVICE_ID: device_id,
CONF_ENTITY_ID: [camera.entity_id, sensor.cv.entity_id],
CONF_TYPE: VOD_THUMB_CAP,
}
)
sensor = None
camera = None
_LOGGER.debug("actions: %s", actions)
return actions
async def async_call_action_from_config(
hass: HomeAssistant, config: dict, variables: dict, context: Optional[Context]
):
"""Execute a device action."""
if config[CONF_TYPE] == VOD_THUMB_CAP:
entity_ids: List[str] = config.get(CONF_ENTITY_ID)
camera_entity_id: str = None
thumbnail_path: str = None
if entity_ids and len(entity_ids) > 0:
for entity_id in entity_ids:
state = hass.states.get(entity_id)
if state and state.domain == CAMERA_DOMAIN:
camera_entity_id = entity_id
elif state and state.domain == SENSOR_DOMAIN:
thumbnail_path = state.attributes.get("thumbnail_path")
if not camera_entity_id or not thumbnail_path:
(_, device_entries) = await async_get_device_entries(
hass, config[CONF_DEVICE_ID]
)
for entry in device_entries:
if not camera_entity_id and entry.domain == CAMERA_DOMAIN:
camera_entity_id = entry.entity_id
if (
not thumbnail_path
and entry.domain == SENSOR_DOMAIN
and entry.device_class == DEVICE_CLASS_TIMESTAMP
):
state = hass.states.get(entry.entity_id)
thumbnail_path = (
state.attributes.get("thumbnail_path") if state else None
)
service_data = {
ATTR_ENTITY_ID: camera_entity_id,
"filename": thumbnail_path,
}
_LOGGER.debug("service_data: %s", service_data)
_LOGGER.debug("variables: %s", variables)
return await hass.services.async_call(
CAMERA_DOMAIN,
SERVICE_SNAPSHOT,
service_data,
blocking=True,
context=context,
)
|
depth_upsampling/sampler.py | Levintsky/ARKitScenes | 237 | 11167367 | import numpy as np
import torch.utils.data
class MultiEpochSampler(torch.utils.data.Sampler):
r"""Samples elements randomly over multiple epochs
Arguments:
data_source (Dataset): dataset to sample from
num_iter (int) : Number of times to loop over the dataset
start_itr (int) : which iteration to begin from
"""
def __init__(self, data_source, num_iter, start_itr=0, batch_size=128):
super().__init__(data_source)
self.data_source = data_source
self.dataset_size = len(self.data_source)
self.num_iter = num_iter
self.start_itr = start_itr
self.batch_size = batch_size
self.num_epochs = int(np.ceil((self.num_iter * self.batch_size) / float(self.dataset_size)))
if not isinstance(self.dataset_size, int) or self.dataset_size <= 0:
raise ValueError("dataset size should be a positive integeral "
"value, but got dataset_size={}".format(self.dataset_size))
def __iter__(self):
n = self.dataset_size
# Determine number of epochs
num_epochs = int(np.ceil(((self.num_iter - self.start_itr) * self.batch_size) / float(n)))
out = np.concatenate([np.random.permutation(n) for epoch in range(self.num_epochs)])[-num_epochs * n: self.num_iter * self.batch_size]
out = out[(self.start_itr * self.batch_size % n):]
return iter(out)
def __len__(self):
return (self.num_iter - self.start_itr) * self.batch_size
|
homeassistant/components/humidifier/const.py | mtarjoianu/core | 30,023 | 11167401 | """Provides the constants needed for component."""
from enum import IntEnum
MODE_NORMAL = "normal"
MODE_ECO = "eco"
MODE_AWAY = "away"
MODE_BOOST = "boost"
MODE_COMFORT = "comfort"
MODE_HOME = "home"
MODE_SLEEP = "sleep"
MODE_AUTO = "auto"
MODE_BABY = "baby"
ATTR_AVAILABLE_MODES = "available_modes"
ATTR_HUMIDITY = "humidity"
ATTR_MAX_HUMIDITY = "max_humidity"
ATTR_MIN_HUMIDITY = "min_humidity"
DEFAULT_MIN_HUMIDITY = 0
DEFAULT_MAX_HUMIDITY = 100
DOMAIN = "humidifier"
# DEVICE_CLASS_* below are deprecated as of 2021.12
# use the HumidifierDeviceClass enum instead.
DEVICE_CLASS_HUMIDIFIER = "humidifier"
DEVICE_CLASS_DEHUMIDIFIER = "dehumidifier"
SERVICE_SET_MODE = "set_mode"
SERVICE_SET_HUMIDITY = "set_humidity"
class HumidifierEntityFeature(IntEnum):
"""Supported features of the alarm control panel entity."""
MODES = 1
# The SUPPORT_MODES constant is deprecated as of Home Assistant 2022.5.
# Please use the HumidifierEntityFeature enum instead.
SUPPORT_MODES = 1
|
run_gradient_descent_2d.py | Gautam-J/ML-Sklearn | 415 | 11167422 | import os
import time
import argparse
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import animation
from algorithms.gradient_descent_2d import GradientDescent2D
from algorithms.momentum_2d import Momentum2D
plt.style.use('seaborn')
def getArguments():
parser = argparse.ArgumentParser(description='Parameters to tweak gradient descent.')
parser.add_argument('--lr', type=float, default=3e-2,
help='Learning rate. Set to 0.2 to see gradient descent NOT converging. Defaults to 0.03')
parser.add_argument('--max_iterations', type=int, default=150,
help='Maximum iterations for gradient descent to run. Defaults to 150')
parser.add_argument('--start_point', type=float, default=1.0,
help='Starting point for gradient descent. Defaults to 1.0')
parser.add_argument('-e', '--epsilon', type=float, default=1e-3,
help='Epsilon for checking convergence. Defaults to 0.001')
parser.add_argument('-r', '--random', action='store_true',
help='Flag to initialize a random starting point')
parser.add_argument('-s', '--save', action='store_true',
help="Flag to save visualizations and animations")
parser.add_argument('-l', '--length', type=int, default=5,
help="Length of the animation in seconds. Defaults to 5")
parser.add_argument('--use-momentum', action='store_true',
help='Flag to use momentum in gradient descent')
parser.add_argument('--momentum', type=float, default=0.3,
help='Momentum for gradient descent. Only used when use-momentum is True. Defaults to 0.3')
return parser.parse_args()
def animate(i, dataset, line):
line.set_data(dataset[:, :i])
return line
def plotAndSaveGraphs(gd, args):
fig = plt.figure(figsize=(16, 9))
# plot the original function
ax = fig.add_subplot(111)
x = np.linspace(-2.5, 1, 1000)
y = gd.f(x)
ax.plot(x, y, c='b', label='function', alpha=0.6)
# destructure history object
history = gd.getHistory()
gradientHistory = history['grads']
xHistory = history['x']
yHistory = gd.f(np.array(xHistory))
dataset = np.array([xHistory, yHistory])
totalIterations = len(xHistory) - 1
line = ax.plot(dataset[0], dataset[1], label='optimization', c='r', marker='.', alpha=0.4)[0]
ax.set_title(f'Iterations: {totalIterations} lr: {args.lr}')
ax.set_xlabel('X')
ax.set_ylabel('f(x)')
ax.legend()
lengthOfVideo = args.length
nFrames = totalIterations + 1
interval = lengthOfVideo * 1000 / nFrames
fps = (1 / (interval / 1000))
print('=' * 80)
print('[INFO]\t\tParameters for Animation')
print('=' * 80)
print(f'[INFO] Duration of video: {lengthOfVideo} seconds')
print(f'[DEBUG] Total number of frames: {nFrames}')
print(f'[DEBUG] Interval for each frame: {interval}')
print(f'[DEBUG] FPS of video: {fps}')
print('=' * 80)
ani = animation.FuncAnimation(fig, animate, fargs=(dataset, line),
frames=nFrames, blit=False,
interval=interval, repeat=True)
# make directories
if args.save:
pathToDirectory = os.path.join('visualizations', 'gradient_descent')
if not os.path.exists(pathToDirectory):
os.makedirs(pathToDirectory)
# save animation
if args.save:
fileName = os.path.join(pathToDirectory, 'GradientDescent2D.mp4')
print('[INFO] Saving animation...')
startTime = time.time()
ani.save(fileName, fps=fps)
timeDifference = time.time() - startTime
print(f'[INFO] Animation saved to {fileName}. Took {timeDifference:.2f} seconds.')
plt.close()
else:
plt.show()
sns.kdeplot(x=gradientHistory, fill=True)
plt.xlabel('Gradients')
plt.title('Distribution of Gradients')
# save distribution of gradients
if args.save:
fileName = os.path.join(pathToDirectory, 'DistributionOfGradients2D.png')
plt.savefig(fileName)
print(f'[INFO] Distribution of gradients saved to {fileName}')
plt.close()
else:
plt.show()
def main():
args = getArguments()
print('[DEBUG]', args)
if args.use_momentum:
gd = Momentum2D(alpha=args.lr,
max_iterations=args.max_iterations,
start_point=args.start_point,
random=args.random,
epsilon=args.epsilon,
momentum=args.momentum)
else:
gd = GradientDescent2D(alpha=args.lr,
max_iterations=args.max_iterations,
start_point=args.start_point,
random=args.random,
epsilon=args.epsilon)
gd.run()
print(f'[DEBUG] Value of x: {gd.x}')
print('[DEBUG] Expected value: -1.59791')
plotAndSaveGraphs(gd, args)
if __name__ == "__main__":
main()
|
src/admin/widgets/polygon.py | aimanow/sft | 280 | 11167440 | <reponame>aimanow/sft
import wtforms
from flask import render_template
from godmode.widgets.base import BaseWidget
class PolygonWidget(BaseWidget):
filterable = False
field = wtforms.TextAreaField()
def render_list(self, item):
value = getattr(item, self.name, None) if item else None
value = str(value if value is not None else "")
maps_path = value[2:-2].replace("),(", "|")
return "<img src='http://maps.googleapis.com/maps/api/staticmap?size=300x100" \
"&path=color:0xff0000|fillcolor:0xAA000033|weight:1|%s" \
"&sensor=false&key=<KEY>'>" % maps_path
def render_edit(self, form=None, item=None):
value = getattr(item, self.name, None) if item else None
value = str(value if value is not None else "")
value = value.replace('"', """)
return render_template("widgets/polygon.html", name=self.name, value=value, form=form)
|
examples/postgres-alembic/app.py | psy-repos-rust/vagga | 1,974 | 11167451 | import os
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL')
db = SQLAlchemy(app)
@app.route('/')
def hello_world():
return '; '.join(db.engine.table_names())
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
tools/perf/benchmarks/jitter.py | google-ar/chromium | 777 | 11167468 | <reponame>google-ar/chromium
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from telemetry.timeline import chrome_trace_category_filter
from telemetry.web_perf import timeline_based_measurement
from telemetry.web_perf.metrics import jitter_timeline
import page_sets
JITTER_CATEGORY = 'cdp.perf'
TIMELINE_REQUIRED_CATEGORY = 'blink.console'
class Jitter(perf_benchmark.PerfBenchmark):
"""Timeline based measurement benchmark for jitter."""
page_set = page_sets.JitterPageSet
def CreateTimelineBasedMeasurementOptions(self):
cat_filter = chrome_trace_category_filter.ChromeTraceCategoryFilter()
cat_filter.AddIncludedCategory(JITTER_CATEGORY)
cat_filter.AddIncludedCategory(TIMELINE_REQUIRED_CATEGORY)
options = timeline_based_measurement.Options(
overhead_level=cat_filter)
options.SetLegacyTimelineBasedMetrics(
[jitter_timeline.JitterTimelineMetric()])
return options
@classmethod
def Name(cls):
return 'jitter'
|
notebook/pandas_error_ambiguous.py | vhn0912/python-snippets | 174 | 11167477 | import pandas as pd
df = pd.DataFrame(pd.np.arange(12).reshape(3, 4), columns=['a', 'b', 'c', 'd'], index=['x', 'y', 'z'])
print(df)
# a b c d
# x 0 1 2 3
# y 4 5 6 7
# z 8 9 10 11
print((df > 3) & (df % 2 == 0))
# a b c d
# x False False False False
# y True False True False
# z True False True False
# print((df > 3) and (df % 2 == 0))
# ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().
# print(df > 3 & df % 2 == 0)
# ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().
print(df & 7)
# a b c d
# x 0 1 2 3
# y 4 5 6 7
# z 0 1 2 3
print(df | 1)
# a b c d
# x 1 1 3 3
# y 5 5 7 7
# z 9 9 11 11
# print(df << 1)
# TypeError: unsupported operand type(s) for <<: 'DataFrame' and 'int'
# print(df << df)
# TypeError: unsupported operand type(s) for <<: 'DataFrame' and 'DataFrame'
print(df > 3)
# a b c d
# x False False False False
# y True True True True
# z True True True True
print((df > 3).all())
# a False
# b False
# c False
# d False
# dtype: bool
print((df > 3).all(axis=1))
# x False
# y True
# z True
# dtype: bool
print((df > 3).all(axis=None))
# False
print(df.empty)
# False
df_empty = pd.DataFrame()
print(df_empty.empty)
# True
print(df.size)
# 12
print(df_empty.size)
# 0
|
configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py | YuxinZou/mmclassification | 1,190 | 11167478 | <gh_stars>1000+
_base_ = '../repvgg-A0_4xb64-coslr-120e_in1k.py'
model = dict(backbone=dict(deploy=True))
|
alipay/aop/api/domain/LiveInfo.py | antopen/alipay-sdk-python-all | 213 | 11167486 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.LiveContentInfo import LiveContentInfo
class LiveInfo(object):
def __init__(self):
self._content_info_list = None
self._live_end_time = None
self._live_id = None
self._live_start_time = None
self._summary = None
self._title = None
@property
def content_info_list(self):
return self._content_info_list
@content_info_list.setter
def content_info_list(self, value):
if isinstance(value, list):
self._content_info_list = list()
for i in value:
if isinstance(i, LiveContentInfo):
self._content_info_list.append(i)
else:
self._content_info_list.append(LiveContentInfo.from_alipay_dict(i))
@property
def live_end_time(self):
return self._live_end_time
@live_end_time.setter
def live_end_time(self, value):
self._live_end_time = value
@property
def live_id(self):
return self._live_id
@live_id.setter
def live_id(self, value):
self._live_id = value
@property
def live_start_time(self):
return self._live_start_time
@live_start_time.setter
def live_start_time(self, value):
self._live_start_time = value
@property
def summary(self):
return self._summary
@summary.setter
def summary(self, value):
self._summary = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
def to_alipay_dict(self):
params = dict()
if self.content_info_list:
if isinstance(self.content_info_list, list):
for i in range(0, len(self.content_info_list)):
element = self.content_info_list[i]
if hasattr(element, 'to_alipay_dict'):
self.content_info_list[i] = element.to_alipay_dict()
if hasattr(self.content_info_list, 'to_alipay_dict'):
params['content_info_list'] = self.content_info_list.to_alipay_dict()
else:
params['content_info_list'] = self.content_info_list
if self.live_end_time:
if hasattr(self.live_end_time, 'to_alipay_dict'):
params['live_end_time'] = self.live_end_time.to_alipay_dict()
else:
params['live_end_time'] = self.live_end_time
if self.live_id:
if hasattr(self.live_id, 'to_alipay_dict'):
params['live_id'] = self.live_id.to_alipay_dict()
else:
params['live_id'] = self.live_id
if self.live_start_time:
if hasattr(self.live_start_time, 'to_alipay_dict'):
params['live_start_time'] = self.live_start_time.to_alipay_dict()
else:
params['live_start_time'] = self.live_start_time
if self.summary:
if hasattr(self.summary, 'to_alipay_dict'):
params['summary'] = self.summary.to_alipay_dict()
else:
params['summary'] = self.summary
if self.title:
if hasattr(self.title, 'to_alipay_dict'):
params['title'] = self.title.to_alipay_dict()
else:
params['title'] = self.title
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = LiveInfo()
if 'content_info_list' in d:
o.content_info_list = d['content_info_list']
if 'live_end_time' in d:
o.live_end_time = d['live_end_time']
if 'live_id' in d:
o.live_id = d['live_id']
if 'live_start_time' in d:
o.live_start_time = d['live_start_time']
if 'summary' in d:
o.summary = d['summary']
if 'title' in d:
o.title = d['title']
return o
|
futu/common/pb/Qot_GetUserSecurityGroup_pb2.py | Hason-Cheung/py-futu-api | 858 | 11167487 | <gh_stars>100-1000
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: Qot_GetUserSecurityGroup.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import Common_pb2 as Common__pb2
import Qot_Common_pb2 as Qot__Common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='Qot_GetUserSecurityGroup.proto',
package='Qot_GetUserSecurityGroup',
syntax='proto2',
serialized_pb=_b('\n\x1eQot_GetUserSecurityGroup.proto\x12\x18Qot_GetUserSecurityGroup\x1a\x0c\x43ommon.proto\x1a\x10Qot_Common.proto\"\x18\n\x03\x43\x32S\x12\x11\n\tgroupType\x18\x01 \x02(\x05\"1\n\tGroupData\x12\x11\n\tgroupName\x18\x01 \x02(\t\x12\x11\n\tgroupType\x18\x02 \x02(\x05\"=\n\x03S2C\x12\x36\n\tgroupList\x18\x01 \x03(\x0b\x32#.Qot_GetUserSecurityGroup.GroupData\"5\n\x07Request\x12*\n\x03\x63\x32s\x18\x01 \x02(\x0b\x32\x1d.Qot_GetUserSecurityGroup.C2S\"n\n\x08Response\x12\x15\n\x07retType\x18\x01 \x02(\x05:\x04-400\x12\x0e\n\x06retMsg\x18\x02 \x01(\t\x12\x0f\n\x07\x65rrCode\x18\x03 \x01(\x05\x12*\n\x03s2c\x18\x04 \x01(\x0b\x32\x1d.Qot_GetUserSecurityGroup.S2C*a\n\tGroupType\x12\x15\n\x11GroupType_Unknown\x10\x00\x12\x14\n\x10GroupType_Custom\x10\x01\x12\x14\n\x10GroupType_System\x10\x02\x12\x11\n\rGroupType_All\x10\x03\x42N\n\x13\x63om.futu.openapi.pbZ7github.com/futuopen/ftapi4go/pb/qotgetusersecuritygroup')
,
dependencies=[Common__pb2.DESCRIPTOR,Qot__Common__pb2.DESCRIPTOR,])
_GROUPTYPE = _descriptor.EnumDescriptor(
name='GroupType',
full_name='Qot_GetUserSecurityGroup.GroupType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='GroupType_Unknown', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GroupType_Custom', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GroupType_System', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GroupType_All', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=399,
serialized_end=496,
)
_sym_db.RegisterEnumDescriptor(_GROUPTYPE)
GroupType = enum_type_wrapper.EnumTypeWrapper(_GROUPTYPE)
GroupType_Unknown = 0
GroupType_Custom = 1
GroupType_System = 2
GroupType_All = 3
_C2S = _descriptor.Descriptor(
name='C2S',
full_name='Qot_GetUserSecurityGroup.C2S',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='groupType', full_name='Qot_GetUserSecurityGroup.C2S.groupType', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=92,
serialized_end=116,
)
_GROUPDATA = _descriptor.Descriptor(
name='GroupData',
full_name='Qot_GetUserSecurityGroup.GroupData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='groupName', full_name='Qot_GetUserSecurityGroup.GroupData.groupName', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='groupType', full_name='Qot_GetUserSecurityGroup.GroupData.groupType', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=118,
serialized_end=167,
)
_S2C = _descriptor.Descriptor(
name='S2C',
full_name='Qot_GetUserSecurityGroup.S2C',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='groupList', full_name='Qot_GetUserSecurityGroup.S2C.groupList', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=169,
serialized_end=230,
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='Qot_GetUserSecurityGroup.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='c2s', full_name='Qot_GetUserSecurityGroup.Request.c2s', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=232,
serialized_end=285,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='Qot_GetUserSecurityGroup.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='retType', full_name='Qot_GetUserSecurityGroup.Response.retType', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=True, default_value=-400,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retMsg', full_name='Qot_GetUserSecurityGroup.Response.retMsg', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='errCode', full_name='Qot_GetUserSecurityGroup.Response.errCode', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s2c', full_name='Qot_GetUserSecurityGroup.Response.s2c', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=287,
serialized_end=397,
)
_S2C.fields_by_name['groupList'].message_type = _GROUPDATA
_REQUEST.fields_by_name['c2s'].message_type = _C2S
_RESPONSE.fields_by_name['s2c'].message_type = _S2C
DESCRIPTOR.message_types_by_name['C2S'] = _C2S
DESCRIPTOR.message_types_by_name['GroupData'] = _GROUPDATA
DESCRIPTOR.message_types_by_name['S2C'] = _S2C
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
DESCRIPTOR.enum_types_by_name['GroupType'] = _GROUPTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
C2S = _reflection.GeneratedProtocolMessageType('C2S', (_message.Message,), dict(
DESCRIPTOR = _C2S,
__module__ = 'Qot_GetUserSecurityGroup_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetUserSecurityGroup.C2S)
))
_sym_db.RegisterMessage(C2S)
GroupData = _reflection.GeneratedProtocolMessageType('GroupData', (_message.Message,), dict(
DESCRIPTOR = _GROUPDATA,
__module__ = 'Qot_GetUserSecurityGroup_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetUserSecurityGroup.GroupData)
))
_sym_db.RegisterMessage(GroupData)
S2C = _reflection.GeneratedProtocolMessageType('S2C', (_message.Message,), dict(
DESCRIPTOR = _S2C,
__module__ = 'Qot_GetUserSecurityGroup_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetUserSecurityGroup.S2C)
))
_sym_db.RegisterMessage(S2C)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(
DESCRIPTOR = _REQUEST,
__module__ = 'Qot_GetUserSecurityGroup_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetUserSecurityGroup.Request)
))
_sym_db.RegisterMessage(Request)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
DESCRIPTOR = _RESPONSE,
__module__ = 'Qot_GetUserSecurityGroup_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetUserSecurityGroup.Response)
))
_sym_db.RegisterMessage(Response)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.futu.openapi.pbZ7github.com/futuopen/ftapi4go/pb/qotgetusersecuritygroup'))
# @@protoc_insertion_point(module_scope)
|
etc/pending_ugens/Formant.py | butayama/supriya | 191 | 11167490 | import collections
from supriya.enums import CalculationRate
from supriya.synthdefs import PureUGen
class Formant(PureUGen):
"""
::
>>> formant = supriya.ugens.Formant.ar(
... bwfrequency=880,
... formfrequency=1760,
... fundfrequency=440,
... )
>>> formant
Formant.ar()
"""
### CLASS VARIABLES ###
__documentation_section__ = None
_ordered_input_names = collections.OrderedDict(
'fundfrequency',
'formfrequency',
'bwfrequency',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
bwfrequency=880,
formfrequency=1760,
fundfrequency=440,
):
PureUGen.__init__(
self,
calculation_rate=calculation_rate,
bwfrequency=bwfrequency,
formfrequency=formfrequency,
fundfrequency=fundfrequency,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
bwfrequency=880,
formfrequency=1760,
fundfrequency=440,
):
"""
Constructs an audio-rate Formant.
::
>>> formant = supriya.ugens.Formant.ar(
... bwfrequency=880,
... formfrequency=1760,
... fundfrequency=440,
... )
>>> formant
Formant.ar()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
bwfrequency=bwfrequency,
formfrequency=formfrequency,
fundfrequency=fundfrequency,
)
return ugen
### PUBLIC PROPERTIES ###
@property
def bwfrequency(self):
"""
Gets `bwfrequency` input of Formant.
::
>>> formant = supriya.ugens.Formant.ar(
... bwfrequency=880,
... formfrequency=1760,
... fundfrequency=440,
... )
>>> formant.bwfrequency
880.0
Returns ugen input.
"""
index = self._ordered_input_names.index('bwfrequency')
return self._inputs[index]
@property
def formfrequency(self):
"""
Gets `formfrequency` input of Formant.
::
>>> formant = supriya.ugens.Formant.ar(
... bwfrequency=880,
... formfrequency=1760,
... fundfrequency=440,
... )
>>> formant.formfrequency
1760.0
Returns ugen input.
"""
index = self._ordered_input_names.index('formfrequency')
return self._inputs[index]
@property
def fundfrequency(self):
"""
Gets `fundfrequency` input of Formant.
::
>>> formant = supriya.ugens.Formant.ar(
... bwfrequency=880,
... formfrequency=1760,
... fundfrequency=440,
... )
>>> formant.fundfrequency
440.0
Returns ugen input.
"""
index = self._ordered_input_names.index('fundfrequency')
return self._inputs[index]
|
test/py/TestRenderedDoubleConstants.py | Jimexist/thrift | 8,514 | 11167503 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from DoubleConstantsTest import constants
#
# In order to run the test under Windows. We need to create symbolic link
# name 'thrift' to '../src' folder by using:
#
# mklink /D thrift ..\src
#
class TestRenderedDoubleConstants(unittest.TestCase):
ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST = \
"failed to verify a double constant generated by Thrift (expected = %f, got = %f)"
ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_LIST_TEST =\
"failed to verify a list item by Thrift (expected = %f, got = %f)"
ASSERTION_MESSAGE_FOR_TYPE_CHECKS = "the rendered variable with name %s is not of double type"
# to make sure the variables inside Thrift files are generated correctly
def test_rendered_double_constants(self):
EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT = 1.0
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT = -100.0
EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT = 9223372036854775807.0
EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT = -9223372036854775807.0
EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS = 3.14159265359
EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE = 1000000.1
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE = -1000000.1
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE = 1.7e+308
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE = 9223372036854775816.43
EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE = -1.7e+308
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE = -9223372036854775816.43
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT, constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT,
places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT,
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT,
places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT,
constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT,
places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT,
constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST,
EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS,
constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE,
places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST,
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST,
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST,
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST))
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST")
# to make sure the variables inside Thrift files are generated correctly
def test_rendered_double_list(self):
EXPECTED_DOUBLE_LIST = [1.0, -100.0, 100.0, 9223372036854775807.0, -9223372036854775807.0, 3.14159265359,
1000000.1, -1000000.1, 1.7e+308, -1.7e+308, 9223372036854775816.43,
-9223372036854775816.43]
self.assertEqual(len(constants.DOUBLE_LIST_TEST), len(EXPECTED_DOUBLE_LIST))
for i, expectedValue in enumerate(EXPECTED_DOUBLE_LIST):
self.assertAlmostEqual(constants.DOUBLE_LIST_TEST[i], expectedValue, places=7)
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TestRenderedDoubleConstants))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=2))
|
api/v2/tests/tools.py | nathandarnell/sal | 215 | 11167529 | <reponame>nathandarnell/sal
"""Tools used in testing the Sal API app."""
import contextlib
import io
import sys
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from server.models import (ApiKey, BusinessUnit, MachineGroup, Machine)
# Get rid of pylint complaining about Django ORM stuff
# pylint: disable=no-member
@contextlib.contextmanager
def no_stdout():
"""Method decorator to prevent stdout from mucking up test output"""
saved_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
yield
except Exception:
saved_output = sys.stdout
sys.stdout = saved_stdout
print(saved_output.getvalue())
raise
sys.stdout = saved_stdout
class SalAPITestCase(APITestCase):
"""Overriden DRF TestCase to simplify data setup and authenticated
request generation.
"""
setup_data = []
def setUp(self):
# Set up an APIKey for authenticated tests to use.
api_key = ApiKey.objects.create()
self.headers = {
'HTTP_PRIVATEKEY': api_key.private_key,
'HTTP_PUBLICKEY': api_key.public_key}
for data_method_name in self.setup_data:
data_method = getattr(self, data_method_name)
data_method()
# pylint: disable=no-self-use
def create_machine_data(self):
"""Created all data needed for machine testing."""
machine_group = self.create_machine_group_data()
Machine.objects.create(
serial='C0DEADBEEF', machine_group=machine_group)
Machine.objects.create(
serial='C1DEADBEEF', machine_group=machine_group)
# pylint: disable=no-self-use
def create_business_unit_data(self):
"""Set up business unit data."""
return BusinessUnit.objects.create(name='test')
# pylint: disable=no-self-use
def create_machine_group_data(self):
"""Set up machine group and dependent data."""
business_unit = self.create_business_unit_data()
return MachineGroup.objects.create(
name='test', business_unit=business_unit)
def authed_get(self, name, args=None, params=None):
"""Perform an authenticated get request to API."""
if not params:
params = {}
url = reverse(name, args=args) if args else reverse(name)
return self.client.get(url, params, **self.headers)
def authed_options(self, name, args=None, params=None):
"""Perform an authenticated get request to API."""
if not params:
params = {}
url = reverse(name, args=args) if args else reverse(name)
return self.client.options(url, params, **self.headers)
class TestGeneratorMeta(type):
"""Automatically generate basic tests for 'boring' endpoints.
By replacing a UnitTest subclass' metaclass with this, and by adding
a `tests` class property, you can automatically generate access,
listing, and detail tests (that basically just ensure they return
200 at this time).
`tests` should be an iterable of route names (lowercase,
underscores stripped).
"""
def __new__(mcs, name, bases, attrs):
"""Override new to create our test methods with correct names"""
for test_name in attrs['tests']:
access_method, list_method, detail_method = mcs.generate_tests(
test_name)
attrs['test_{}_access'.format(test_name)] = access_method
attrs['test_{}_list'.format(test_name)] = list_method
attrs['test_{}_detail'.format(test_name)] = detail_method
return super(TestGeneratorMeta, mcs).__new__(mcs, name, bases, attrs)
@classmethod
def generate_tests(mcs, route_name):
"""Generate the tests to attach to the new class."""
def access_method(self):
"""Test that unauthenticated access is denied."""
response = self.client.get(reverse('{}-list'.format(route_name)))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def list_method(self):
"""Test that listing operations work"""
response = self.authed_get('{}-list'.format(route_name))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def detail_method(self):
"""Test that detail operations work"""
response = self.authed_get(
'{}-detail'.format(route_name), args=(1,))
self.assertEqual(response.status_code, status.HTTP_200_OK)
return (access_method, list_method, detail_method)
|
src/common/config.py | THUKElab/Video2Description | 152 | 11167552 | """
Configuration Parser for V2D
"""
import json
import threading
import os
lock = threading.Lock()
def get_config():
with lock:
if hasattr(get_config, "config"):
return get_config.config
fname = os.environ.get("V2D_CONFIG_FILE", "config.json")
with open(fname, "r") as fin:
get_config.config = json.load(fin)
return get_config.config
def clear():
with lock:
if hasattr(get_config, "config"):
delattr(get_config, "config")
def get_app_config():
return get_config()["app"]
def get_rpc_config():
return get_config()["rpc"]
def get_vpreprocess_config():
return get_config()["vpreprocess"]
def get_vocab_config():
return get_config()["vocab"]
def get_tests_config():
return get_config()["tests"]
|
utils.py | VishnuAvNair/ICIAR2018 | 185 | 11167566 | #!/usr/bin/env python3
"""Utilities for cross-validation.
Notice data/folds-10.pkl we use in 10-fold cross-val. Keep it to replicate our results"""
import numpy as np
import glob
from os.path import basename, join
from sklearn.model_selection import StratifiedKFold
import pickle
def load_data(in_dir, folds=None, split=None):
"""Builds train/test data from preprocessed features for a given split
# Arguments
in_dir: Input directory containing *.npy CNN feature files.
folds: None or list of splits dict{
"train": {
"x": train files list,
"y": train labels},
"test": {
"x": test files list,
"y": test labels}}
}
split: None or split number.
# Returns
Tran/test data (features and labels) for a given split, if `folds` is not None
Test data (only features) and file names, if `folds` is None
"""
if folds:
y_train = []
x_train = []
for f, l in zip(folds[split]["train"]["x"], folds[split]["train"]["y"]):
x = np.load(join(in_dir, f))
x_train.append(x)
y_train.append([l] * len(x))
x_train = np.vstack(x_train)
y_train = np.concatenate(y_train)
y_test = []
x_test = []
for f, l in zip(folds[split]["test"]["x"], folds[split]["test"]["y"]):
x = np.load(join(in_dir, f))
x_test.append(x)
y_test.append([l] * len(x))
x_test = np.vstack(x_test)
y_test = np.concatenate(y_test)
return x_train, y_train, x_test, y_test
else:
files = glob.glob(in_dir + "/*.npy")
x = []
for f in files:
x.append(np.load(f))
return np.vstack(x), np.array([basename(f) for f in files])
def make_folds():
"""Creates stratified splits based on train directory listing
# Dumps
folds: list of splits dict{
"train": {
"x": train files list,
"y": train labels},
"test": {
"x": test files list,
"y": test labels}}
}
"""
files = np.array([basename(f) for f in glob.glob("data/preprocessed/train/ResNet-0.5-400/*.npy")])
labels = []
classes = np.array([0, 1, 2, 3])
for f in files:
lb = np.array([f.startswith("n"),
f.startswith("b"),
f.startswith("is"),
f.startswith("iv")])
labels.append(classes[np.argmax(lb)])
labels = np.array(labels)
folds = []
skf = StratifiedKFold(n_splits=10, shuffle=True)
for train_index, test_index in skf.split(files, labels):
f_train, f_test = files[train_index], files[test_index]
y_train, y_test = labels[train_index], labels[test_index]
folds.append({"train": {"x": f_train, "y": y_train}, "test": {"x": f_test, "y": y_test}})
with open("data/folds-10.pkl", "wb") as f:
pickle.dump(folds, f)
|
infra/scripts/legacy/scripts/common/chromium_utils.py | google-ar/chromium | 2,151 | 11167576 | <reponame>google-ar/chromium<filename>infra/scripts/legacy/scripts/common/chromium_utils.py
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Set of basic operations/utilities that are used by the build. """
from contextlib import contextmanager
import ast
import cStringIO
import copy
import errno
import fnmatch
import glob
import json
import os
import re
import shutil
import socket
import stat
import subprocess
import sys
import threading
import time
import traceback
BUILD_DIR = os.path.realpath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir))
# Local errors.
class MissingArgument(Exception):
pass
class PathNotFound(Exception):
pass
class ExternalError(Exception):
pass
def IsWindows():
return sys.platform == 'cygwin' or sys.platform.startswith('win')
def IsLinux():
return sys.platform.startswith('linux')
def IsMac():
return sys.platform.startswith('darwin')
def convert_json(option, _, value, parser):
"""Provide an OptionParser callback to unmarshal a JSON string."""
setattr(parser.values, option.dest, json.loads(value))
def AddPropertiesOptions(option_parser):
"""Registers command line options for parsing build and factory properties.
After parsing, the options object will have the 'build_properties' and
'factory_properties' attributes. The corresponding values will be python
dictionaries containing the properties. If the options are not given on
the command line, the dictionaries will be empty.
Args:
option_parser: An optparse.OptionParser to register command line options
for build and factory properties.
"""
option_parser.add_option('--build-properties', action='callback',
callback=convert_json, type='string',
nargs=1, default={},
help='build properties in JSON format')
option_parser.add_option('--factory-properties', action='callback',
callback=convert_json, type='string',
nargs=1, default={},
help='factory properties in JSON format')
|
neurst_pt/models/model_utils.py | ishine/neurst | 208 | 11167595 | # Copyright 2020 ByteDance Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def input_length_to_nonpadding(lengths, max_len, dtype=None):
""" Creates a bias tensor according to the non-padding tensor for cross entropy.
Args:
length: A Tensor with shape [batch_size, ], indicating the true length.
max_len: A scalar tensor indicating the maximum length.
Returns:
A float tensor with shape [batch_size, max_len],
indicating the padding positions, where 0.0 for padding and
1.0 for non-padding.
"""
row_vector = torch.arange(0, max_len)
matrix = torch.unsqueeze(lengths, dim=-1)
mask = (row_vector < matrix).to(dtype or torch.float)
return mask # 1.0 for non-padding
def input_length_to_padding(lengths, max_len, dtype=None):
""" Creates a bias tensor according to the padding tensor for attention.
Args:
length: A Tensor with shape [batch_size, ], indicating the true length.
max_len: A scalar tensor indicating the maximum length.
Returns:
A float tensor with shape [batch_size, max_len],
indicating the padding positions, where 1.0 for padding and
0.0 for non-padding.
"""
return 1. - input_length_to_nonpadding(lengths, max_len, dtype)
|
tests/test_db.py | witold-gren/django-health-check | 739 | 11167606 | from django.db import DatabaseError, IntegrityError
from django.db.models import Model
from django.test import TestCase
from mock import patch
from health_check.db.backends import DatabaseBackend
class MockDBModel(Model):
"""
A Mock database used for testing.
error_thrown - The Exception to be raised when save() is called, if any
"""
error_thrown = None
def __init__(self, error_thrown=None, *args, **kwargs):
super(MockDBModel, self).__init__(*args, **kwargs)
self.error_thrown = error_thrown
def save(self, *args, **kwargs):
if self.error_thrown is not None:
raise self.error_thrown
else:
return True
def delete(self, *args, **kwargs):
return True
def raise_(ex):
raise ex
class HealthCheckDatabaseTests(TestCase):
"""
Tests health check behavior with a mocked database backend.
Ensures check_status returns/raises the expected result when the database works or raises exceptions.
"""
@patch('health_check.db.backends.TestModel.objects.create',
lambda title=None: MockDBModel())
def test_check_status_works(self):
db_backend = DatabaseBackend()
db_backend.check_status()
self.assertFalse(db_backend.errors)
@patch('health_check.db.backends.TestModel.objects.create',
lambda title=None: raise_(IntegrityError))
def test_raise_integrity_error(self):
db_backend = DatabaseBackend()
db_backend.run_check()
self.assertTrue(db_backend.errors)
self.assertIn('unexpected result: Integrity Error', db_backend.pretty_status())
@patch('health_check.db.backends.TestModel.objects.create',
lambda title=None: MockDBModel(error_thrown=DatabaseError))
def test_raise_database_error(self):
db_backend = DatabaseBackend()
db_backend.run_check()
self.assertTrue(db_backend.errors)
self.assertIn('unavailable: Database error', db_backend.pretty_status())
@patch('health_check.db.backends.TestModel.objects.create',
lambda title=None: MockDBModel(error_thrown=Exception))
def test_raise_exception(self):
db_backend = DatabaseBackend()
with self.assertRaises(Exception):
db_backend.run_check()
|
intro/numpy/examples/plot_polyfit.py | zmoon/scipy-lecture-notes | 2,538 | 11167617 | <filename>intro/numpy/examples/plot_polyfit.py
"""
Fitting to polynomial
=====================
Plot noisy data and their polynomial fit
"""
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(12)
x = np.linspace(0, 1, 20)
y = np.cos(x) + 0.3*np.random.rand(20)
p = np.poly1d(np.polyfit(x, y, 3))
t = np.linspace(0, 1, 200)
plt.plot(x, y, 'o', t, p(t), '-')
plt.show()
|
EXAMPLES/Python/DHT11_SENSOR/dht11.py | CodeRancher/pigpio | 654 | 11167629 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import pigpio
class DHT11(object):
"""
The DHT11 class is a stripped version of the DHT22 sensor code by joan2937.
You can find the initial implementation here:
- https://github.com/srounet/pigpio/tree/master/EXAMPLES/Python/DHT22_AM2302_SENSOR
example code:
>>> pi = pigpio.pi()
>>> sensor = DHT11(pi, 4) # 4 is the data GPIO pin connected to your sensor
>>> for response in sensor:
.... print("Temperature: {}".format(response['temperature']))
.... print("Humidity: {}".format(response['humidity']))
"""
def __init__(self, pi, gpio):
"""
pi (pigpio): an instance of pigpio
gpio (int): gpio pin number
"""
self.pi = pi
self.gpio = gpio
self.high_tick = 0
self.bit = 40
self.temperature = 0
self.humidity = 0
self.either_edge_cb = None
self.setup()
def setup(self):
"""
Clears the internal gpio pull-up/down resistor.
Kills any watchdogs.
"""
self.pi.set_pull_up_down(self.gpio, pigpio.PUD_OFF)
self.pi.set_watchdog(self.gpio, 0)
self.register_callbacks()
def register_callbacks(self):
"""
Monitors RISING_EDGE changes using callback.
"""
self.either_edge_cb = self.pi.callback(
self.gpio,
pigpio.EITHER_EDGE,
self.either_edge_callback
)
def either_edge_callback(self, gpio, level, tick):
"""
Either Edge callbacks, called each time the gpio edge changes.
Accumulate the 40 data bits from the dht11 sensor.
"""
level_handlers = {
pigpio.FALLING_EDGE: self._edge_FALL,
pigpio.RISING_EDGE: self._edge_RISE,
pigpio.EITHER_EDGE: self._edge_EITHER
}
handler = level_handlers[level]
diff = pigpio.tickDiff(self.high_tick, tick)
handler(tick, diff)
def _edge_RISE(self, tick, diff):
"""
Handle Rise signal.
"""
val = 0
if diff >= 50:
val = 1
if diff >= 200: # Bad bit?
self.checksum = 256 # Force bad checksum
if self.bit >= 40: # Message complete
self.bit = 40
elif self.bit >= 32: # In checksum byte
self.checksum = (self.checksum << 1) + val
if self.bit == 39:
# 40th bit received
self.pi.set_watchdog(self.gpio, 0)
total = self.humidity + self.temperature
# is checksum ok ?
if not (total & 255) == self.checksum:
raise
elif 16 <= self.bit < 24: # in temperature byte
self.temperature = (self.temperature << 1) + val
elif 0 <= self.bit < 8: # in humidity byte
self.humidity = (self.humidity << 1) + val
else: # skip header bits
pass
self.bit += 1
def _edge_FALL(self, tick, diff):
"""
Handle Fall signal.
"""
self.high_tick = tick
if diff <= 250000:
return
self.bit = -2
self.checksum = 0
self.temperature = 0
self.humidity = 0
def _edge_EITHER(self, tick, diff):
"""
Handle Either signal.
"""
self.pi.set_watchdog(self.gpio, 0)
def read(self):
"""
Start reading over DHT11 sensor.
"""
self.pi.write(self.gpio, pigpio.LOW)
time.sleep(0.017) # 17 ms
self.pi.set_mode(self.gpio, pigpio.INPUT)
self.pi.set_watchdog(self.gpio, 200)
time.sleep(0.2)
def close(self):
"""
Stop reading sensor, remove callbacks.
"""
self.pi.set_watchdog(self.gpio, 0)
if self.either_edge_cb:
self.either_edge_cb.cancel()
self.either_edge_cb = None
def __iter__(self):
"""
Support the iterator protocol.
"""
return self
def next(self):
"""
Call the read method and return temperature and humidity informations.
"""
self.read()
response = {
'humidity': self.humidity,
'temperature': self.temperature
}
return response
if __name__ == '__main__':
pi = pigpio.pi()
sensor = DHT11(pi, 4)
for d in sensor:
print("temperature: {}".format(d['temperature']))
print("humidity: {}".format(d['humidity']))
time.sleep(1)
sensor.close()
|
help_utils/head_op.py | loceyi/CSL_RetinaNet_Tensorflow | 187 | 11167640 | <reponame>loceyi/CSL_RetinaNet_Tensorflow
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
import tensorflow as tf
def get_head_quadrant(head, gtbox):
"""
:param head: [head_x, head_y]
:param gtbox: [x_c, y_c, w, h, theta, label]
:return: head quadrant 0/1/2/3
"""
head_quadrant = []
for i, box in enumerate(gtbox):
detla_x = head[i][0] - box[0]
detla_y = head[i][1] - box[1]
if (detla_x >= 0) and (detla_y >= 0):
head_quadrant.append(0)
elif (detla_x >= 0) and (detla_y <= 0):
head_quadrant.append(1)
elif (detla_x <= 0) and (detla_y <= 0):
head_quadrant.append(2)
else:
head_quadrant.append(3)
return np.array(head_quadrant, np.int32)
def get_head(gtboxes_and_label_batch):
"""
:param gtboxes_and_label_batch: [x1, y1, x2, y2, x3, y3, x4, y4, head_x, head_y, label]
:return: [x1, y1, x2, y2, x3, y3, x4, y4, label], [head_x, head_y]
"""
x1, y1, x2, y2, x3, y3, x4, y4, head_x, head_y, label = tf.unstack(gtboxes_and_label_batch, axis=1)
coords_label = tf.transpose(tf.stack([x1, y1, x2, y2, x3, y3, x4, y4, label]))
head = tf.transpose(tf.stack([head_x, head_y]))
return coords_label, head |
tests/micropython/const.py | LabAixBidouille/micropython | 303 | 11167644 | <reponame>LabAixBidouille/micropython
# test constant optimisation
X = const(123)
Y = const(X + 456)
print(X, Y + 1)
def f():
print(X, Y + 1)
f()
|
RecoEgamma/Examples/python/photonsWithConversionsAnalyzer_cfi.py | ckamtsikis/cmssw | 852 | 11167647 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
#
# Author: <NAME>, U. of Notre Dame, US
#
photonsWithConversionsAnalyzer = cms.EDAnalyzer("PhotonsWithConversionsAnalyzer",
phoProducer = cms.string('correctedPhotons'),
HistOutFile = cms.untracked.string('analyzer.root'),
moduleLabelMC = cms.untracked.string('source'),
moduleLabelTk = cms.untracked.string('g4SimHits'),
photonCollection = cms.string('correctedPhotonsWithConversions'),
moduleLabelHit = cms.untracked.string('g4SimHits'),
moduleLabelVtx = cms.untracked.string('g4SimHits')
)
|
samples/python/49.qnamaker-all-features/bots/__init__.py | Aliacf21/BotBuilder-Samples | 1,998 | 11167650 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .qna_bot import QnABot
__all__ = ["QnABot"]
|
bernoulli.py | susan120433/distribution-is-all-you-need | 1,390 | 11167697 | """
Code by <NAME>(@graykode)
https://en.wikipedia.org/wiki/Bernoulli_distribution
"""
import random
import numpy as np
from matplotlib import pyplot as plt
def bernoulli(p, k):
return p if k else 1 - p
n_experiment = 100
p = 0.6
x = np.arange(n_experiment)
y = []
for _ in range(n_experiment):
pick = bernoulli(p, k=bool(random.getrandbits(1)))
y.append(pick)
u, s = np.mean(y), np.std(y)
plt.scatter(x, y, label=r'$\mu=%.2f,\ \sigma=%.2f$' % (u, s))
plt.legend()
plt.savefig('graph/bernoulli.png')
plt.show()
|
s5_r2/local/prepare_dir_structure.py | protofy/kaldi-tuda-de | 125 | 11167747 | # This script builds the folder structure needed to train the model. It also asks where bigger files (mfcc, lm, exp)
# Copyright 2015 Language Technology, Technische Universitaet Darmstadt (author: <NAME>)
# Copyright 2018 Language Technology, Universitaet Hamburg (author: <NAME>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from common_utils import make_sure_path_exists
from six.moves import input
import os
import sys
def symlink_file(file1,file2):
try:
os.symlink(file1, file2)
except OSError as e:
if e.errno == errno.EEXIST:
print('Omitted symlink', file1, '->', file2, ', because it already exists')
if not os.path.exists('run.sh'):
print('You have to run this python script from the base dir, where run.sh is located. WARNING: aborting.')
sys.exit('wrong woring directory')
data_lex_dir = 'data/lexicon/'
data_local_dir = 'data/local/'
data_local_dict_dir = 'data/local/dict/'
print('Creating local data directories...')
print('Creating directoy if necessary:', data_lex_dir)
make_sure_path_exists(data_lex_dir)
print('Creating directoy if necessary:', data_local_dir)
make_sure_path_exists(data_local_dir)
print('Creating directoy if necessary:', data_local_dict_dir)
make_sure_path_exists(data_local_dict_dir)
#if exp and mfcc don't exist locally, create them as link to some other directory on a larger disk
if not os.path.exists('exp/') and not os.path.exists('mfcc/'):
default_dir = '/srv/data/speech/tuda_kaldi_de/'
myinput = ''
while(myinput != 'y' and myinput != 'n'):
myinput = input('Do you want to symlink big data directories (features, models, wavs) to another path than the current directory? (y/n) ')
data_dir = '.'
mfcc_dir_src = data_dir + '/mfcc/'
exp_dir_src = data_dir + '/exp/'
lm_dir_src = data_dir + '/lm/'
lm_dir_3gram_src = data_dir + '/lm/3gram-mincount/'
lang_dir_src = data_dir + '/lang/'
wav_dir_src = data_dir + '/wav/'
if myinput == 'y':
data_dir = input('Where do you want to store mfcc vectors and models (exp)? It should point to some largish disk. default: ' + default_dir + ' : ')
if data_dir == '':
data_dir = default_dir
if data_dir.endswith('/'):
data_dir = data_dir[:-1]
print('Mfcc source dir: ',mfcc_dir_src,' model (exp) dir: ', exp_dir_src)
mfcc_dir_src = data_dir + '/mfcc/'
exp_dir_src = data_dir + '/exp/'
lm_dir_src = data_dir + '/lm/'
lm_dir_3gram_src = data_dir + '/lm/3gram-mincount/'
lang_dir_src = data_dir + '/lang/'
wav_dir_src = data_dir + '/wav/'
print(mfcc_dir_src)
make_sure_path_exists(mfcc_dir_src)
print(exp_dir_src)
make_sure_path_exists(exp_dir_src)
print(lm_dir_src)
make_sure_path_exists(lm_dir_src)
print(lm_dir_3gram_src)
make_sure_path_exists(lm_dir_3gram_src)
print(lang_dir_src)
make_sure_path_exists(lang_dir_src)
print(wav_dir_src)
make_sure_path_exists(wav_dir_src)
symlink_file(mfcc_dir_src,'./mfcc')
symlink_file(exp_dir_src,'./exp')
symlink_file(lm_dir_src,'./data/local/lm')
symlink_file(lang_dir_src,'./data/local/lang')
symlink_file(wav_dir_src,'./data/wav')
else:
# just create the directories without symlinking them
print('Creating directoy if necessary:', mfcc_dir_src)
make_sure_path_exists(mfcc_dir_src)
print('Creating directoy if necessary:', exp_dir_src)
make_sure_path_exists(exp_dir_src)
print('Creating directoy if necessary:', lm_dir_src)
make_sure_path_exists(lm_dir_src)
print('Creating directoy if necessary:', lang_dir_src)
make_sure_path_exists(lang_dir_src)
print('Creating directoy if necessary:', wav_dir_src)
make_sure_path_exists(wav_dir_src)
|
aicsimageio/tests/utils/test_io_utils.py | brisvag/aicsimageio | 110 | 11167786 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from aicsimageio.utils.io_utils import pathlike_to_fs
from ..conftest import LOCAL, REMOTE, get_resource_full_path
@pytest.mark.parametrize("host", [LOCAL, REMOTE])
@pytest.mark.parametrize(
"filename, enforce_exists",
[
("example.txt", False),
("example.txt", False),
("does-not-exist.good", False),
("does-not-exist.good", False),
pytest.param(
"does-not-exist.bad",
True,
marks=pytest.mark.raises(exception=FileNotFoundError),
),
pytest.param(
"does-not-exist.bad",
True,
marks=pytest.mark.raises(exception=FileNotFoundError),
),
],
)
def test_pathlike_to_fs(filename: str, host: str, enforce_exists: bool) -> None:
# Construct full filepath
uri = get_resource_full_path(filename, host)
pathlike_to_fs(uri, enforce_exists)
|
tests/import/import-from.test/kitchen/__init__.py | nanolikeyou/pysonar2 | 2,574 | 11167794 | # knife is not exported
__all__ = ["spoon", "fork"]
def spoon(x):
return x
def fork(x):
return [x,x]
# knife is not exported
def knife(x):
return x+1
|
tests/context.py | chatzich/python-bitcoin-utils | 104 | 11167804 | <gh_stars>100-1000
# Copyright (C) 2018-2020 The python-bitcoin-utils developers
#
# This file is part of python-bitcoin-utils
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoin-utils, including this file, may be copied,
# modified, propagated, or distributed except according to the terms contained
# in the LICENSE file.
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'bitcoinutils')))
import bitcoinutils
|
tests/api/endpoints/admin/test_sys_notifications.py | weimens/seahub | 420 | 11167811 | import json
from django.utils.crypto import get_random_string
from seahub.test_utils import BaseTestCase
from seahub.notifications.models import Notification
class AdminSysNotificationsTest(BaseTestCase):
def setUp(self):
self.url = '/api/v2.1/admin/sys-notifications/'
self.notification = Notification.objects.create()
def tearDown(self):
try:
self.notification.delete()
except Exception as e:
pass
def test_get_notifications(self):
self.login_as(self.admin)
resp = self.client.get(self.url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert 'id' in json_resp['notifications'][0]
assert 'msg' in json_resp['notifications'][0]
assert 'is_current' in json_resp['notifications'][0]
def test_no_permission(self):
self.login_as(self.admin_no_other_permission)
resp = self.client.get(self.url)
self.assertEqual(403, resp.status_code)
def test_get_with_invalid_user_permission(self):
self.login_as(self.user)
resp = self.client.get(self.url)
self.assertEqual(403, resp.status_code)
def test_create_notification(self):
self.login_as(self.admin)
data = {
'msg': get_random_string(),
}
resp = self.client.post(self.url, json.dumps(data),
'application/json')
json_resp = json.loads(resp.content)
self.assertEqual(200, resp.status_code)
assert json_resp['notification']['msg'] == data['msg']
def test_create_notification_with_invalid_user_permission(self):
self.login_as(self.user)
data = {
'msg': get_random_string(),
}
resp = self.client.post(self.url, json.dumps(data),
'application/json')
self.assertEqual(403, resp.status_code)
class AdminSysNotificationTest(BaseTestCase):
def setUp(self):
self.url = '/api/v2.1/admin/sys-notifications/'
self.notification = Notification.objects.create()
def tearDown(self):
try:
self.notification.delete()
except Exception as e:
pass
def test_set_notification_primary(self):
self.login_as(self.admin)
resp = self.client.put(self.url + str(self.notification.id) + '/')
self.assertEqual(200, resp.status_code)
def delete_notification(self):
self.login_as(self.admin)
resp = self.client.delete(self.url + str(self.notification.id) + '/')
self.assertEqual(200, resp.status_code)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.