max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
snmpsim/reporting/formats/alljson.py | RuiCunhaM/snmpsim | 298 | 12702029 | <filename>snmpsim/reporting/formats/alljson.py
#
# This file is part of snmpsim software.
#
# Copyright (c) 2010-2019, <NAME> <<EMAIL>>
# License: http://snmplabs.com/snmpsim/license.html
#
# SNMP Agent Simulator
#
import json
import os
import re
import tempfile
import time
import uuid
from functools import wraps
from pyasn1.type import univ
from pysnmp.carrier.asyncore.dgram import udp
from pysnmp.carrier.asyncore.dgram import udp6
from pysnmp.entity import engine
from snmpsim import error
from snmpsim import log
from snmpsim.reporting.formats import base
def camel2snake(name):
s1 = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def ensure_base_types(f):
"""Convert decorated function's kwargs to Python types.
Also turn camel-cased keys into snake case.
"""
def to_base_types(item):
if isinstance(item, engine.SnmpEngine):
item = item.snmpEngineID
if isinstance(item, (univ.Integer, univ.OctetString,
univ.ObjectIdentifier)):
item = item.prettyPrint()
if item.startswith('0x'):
item = item[2:]
return item
if isinstance(item, (udp.UdpTransportAddress, udp6.Udp6TransportAddress)):
return str(item[0])
return item
def to_dct(dct):
items = {}
for k, v in dct.items():
k = to_base_types(k)
k = camel2snake(k)
if isinstance(v, dict):
v = to_dct(v)
else:
v = to_base_types(v)
items[k] = v
return items
@wraps(f)
def decorated_function(*args, **kwargs):
return f(*args, **to_dct(kwargs))
return decorated_function
class NestingDict(dict):
"""Dict with sub-dict as a defaulted value"""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
class BaseJsonReporter(base.BaseReporter):
"""Common base for JSON-backed family of reporters.
"""
REPORTING_PERIOD = 300
REPORTING_FORMAT = ''
REPORTING_VERSION = 1
PRODUCER_UUID = str(uuid.uuid1())
def __init__(self, *args):
if not args:
raise error.SnmpsimError(
'Missing %s parameter(s). Expected: '
'<method>:<reports-dir>[:dumping-period]' % self.__class__.__name__)
self._reports_dir = os.path.join(args[0], self.REPORTING_FORMAT)
if len(args) > 1:
try:
self.REPORTING_PERIOD = int(args[1])
except Exception as exc:
raise error.SnmpsimError(
'Malformed reports dumping period: %s' % args[1])
try:
if not os.path.exists(self._reports_dir):
os.makedirs(self._reports_dir)
except OSError as exc:
raise error.SnmpsimError(
'Failed to create reports directory %s: '
'%s' % (self._reports_dir, exc))
self._metrics = NestingDict()
self._next_dump = time.time() + self.REPORTING_PERIOD
log.debug(
'Initialized %s metrics reporter for instance %s, metrics '
'directory %s, dumping period is %s seconds' % (
self.__class__.__name__, self.PRODUCER_UUID, self._reports_dir,
self.REPORTING_PERIOD))
def flush(self):
"""Dump accumulated metrics into a JSON file.
Reset all counters upon success.
"""
if not self._metrics:
return
now = int(time.time())
if self._next_dump > now:
return
self._next_dump = now + self.REPORTING_PERIOD
self._metrics['format'] = self.REPORTING_FORMAT
self._metrics['version'] = self.REPORTING_VERSION
self._metrics['producer'] = self.PRODUCER_UUID
dump_path = os.path.join(self._reports_dir, '%s.json' % now)
log.debug('Dumping JSON metrics to %s' % dump_path)
try:
json_doc = json.dumps(self._metrics, indent=2)
with tempfile.NamedTemporaryFile(delete=False) as fl:
fl.write(json_doc.encode('utf-8'))
os.rename(fl.name, dump_path)
except Exception as exc:
log.error(
'Failure while dumping metrics into '
'%s: %s' % (dump_path, exc))
self._metrics.clear()
class MinimalJsonReporter(BaseJsonReporter):
"""Collect activity metrics and dump brief report.
Accumulates and periodically dumps activity metrics reflecting
SNMP command responder performance.
These counters are accumulated in memory for some time, then get
written down as a JSON file indexed by time. Consumers are expected
to process each of these files and are free to remove them.
`MinimalJsonReporter` works with both SNMPv1/v2c and SNMPv3
command responder.
Activity metrics are arranged as a data structure like this:
.. code-block:: python
{
'format': 'minimaljson',
'version': 1,
'producer': <UUID>,
'first_update': '{timestamp}',
'last_update': '{timestamp}',
'transports': {
'total': 0,
'failures': 0
},
'agents': {
'total': 0,
'failures': 0
},
'data_files': {
'total': 0,
'failures': 0
}
}
"""
REPORTING_FORMAT = 'minimaljson'
def update_metrics(self, **kwargs):
"""Process activity update.
Update internal counters based on activity update information.
Parameters in `kwargs` serve two purposes: some are used to
build activity scopes e.g. {transport_domain}->{snmp_engine},
however those suffixed `*_count` are used to update corresponding
activity counters that eventually will make their way to
consumers.
"""
root_metrics = self._metrics
metrics = root_metrics
now = int(time.time())
if 'first_update' not in metrics:
metrics['first_update'] = now
metrics['last_update'] = now
metrics = root_metrics
try:
metrics = metrics['transports']
metrics['total'] = (
metrics.get('total', 0)
+ kwargs.get('transport_call_count', 0))
metrics['failures'] = (
metrics.get('failures', 0)
+ kwargs.get('transport_failure_count', 0))
except KeyError:
pass
metrics = root_metrics
try:
metrics = metrics['data_files']
metrics['total'] = (
metrics.get('total', 0)
+ kwargs.get('datafile_call_count', 0))
metrics['failures'] = (
metrics.get('failures', 0)
+ kwargs.get('datafile_failure_count', 0))
# TODO: some data is still not coming from snmpsim v2carch core
except KeyError:
pass
class FullJsonReporter(BaseJsonReporter):
"""Collect activity metrics and dump detailed report.
Accumulates and periodically dumps activity counters reflecting
SNMP command responder performance.
These counters are accumulated in memory for some time, then get
written down as a JSON file indexed by time. Consumers are expected
to process each of these files and are free to remove them.
`FullJsonReporter` can only work within full SNMPv3 command responder.
Activity metrics are arranged as a data structure like this:
.. code-block:: python
{
'format': 'fulljson',
'version': 1,
'producer': <UUID>,
'first_update': '{timestamp}',
'last_update': '{timestamp}',
'{transport_protocol}': {
'{transport_endpoint}': { # local address
'transport_domain': '{transport_domain}', # endpoint ID
'{transport_address}', { # peer address
'packets': 0,
'parse_failures': 0, # n/a
'auth_failures': 0, # n/a
'context_failures': 0, # n/a
'{snmp_engine}': {
'{security_model}': {
'{security_level}': {
'{security_name}': {
'{context_engine_id}': {
'{context_name}': {
'{pdu_type}': {
'{data_file}': {
'pdus': 0,
'varbinds': 0,
'failures': 0,
'{variation_module}': {
'calls': 0,
'failures': 0
}
}
}
}
}
}
}
}
}
}
}
}
}
Where `{token}` is replaced with a concrete value taken from request.
"""
REPORTING_FORMAT = 'fulljson'
@ensure_base_types
def update_metrics(self, **kwargs):
"""Process activity update.
Update internal counters based on activity update information.
Parameters in `kwargs` serve two purposes: some are used to
build activity scopes e.g. {transport_domain}->{snmp_engine},
however those suffixed `*_count` are used to update corresponding
activity counters that eventually will make their way to
consumers.
"""
metrics = self._metrics
now = int(time.time())
if 'first_update' not in metrics:
metrics['first_update'] = now
metrics['last_update'] = now
try:
metrics = metrics[kwargs['transport_protocol']]
metrics = metrics['%s:%s' % kwargs['transport_endpoint']]
metrics['transport_domain'] = kwargs['transport_domain']
metrics = metrics[kwargs['transport_address']]
metrics['packets'] = (
metrics.get('packets', 0)
+ kwargs.get('transport_call_count', 0))
# TODO: collect these counters
metrics['parse_failures'] = 0
metrics['auth_failures'] = 0
metrics['context_failures'] = 0
metrics = metrics[kwargs['snmp_engine']]
metrics = metrics[kwargs['security_model']]
metrics = metrics[kwargs['security_level']]
metrics = metrics[kwargs['security_name']]
metrics = metrics[kwargs['context_engine_id']]
metrics = metrics[kwargs['pdu_type']]
metrics = metrics[kwargs['data_file']]
metrics['pdus'] = (
metrics.get('pdus', 0)
+ kwargs.get('datafile_call_count', 0))
metrics['failures'] = (
metrics.get('failures', 0)
+ kwargs.get('datafile_failure_count', 0))
metrics['varbinds'] = (
metrics.get('varbinds', 0)
+ kwargs.get('varbind_count', 0))
metrics = metrics['variations']
metrics = metrics[kwargs['variation']]
metrics['calls'] = (
metrics.get('pdus', 0)
+ kwargs.get('variation_call_count', 0))
metrics['failures'] = (
metrics.get('failures', 0)
+ kwargs.get('variation_failure_count', 0))
except KeyError:
return
|
PWGJE/EMCALJetTasks/Tracks/analysis/base/ComparisonData.py | maroozm/AliPhysics | 114 | 12702038 | #**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
from PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics import GraphicsObject,SinglePanelPlot
from ROOT import TFile
class ComparisonObject(object):
"""
Base entry type for object inside comparison data
"""
def __init__(self, data, style):
self.__data = data
self.__style = style
def GetData(self):
return self.__data
def GetGraphicsObject(self):
return GraphicsObject(self.__data, self.__style)
def GetRootPrimitive(self):
self.__data.SetName(self.GetObjectName())
return self.__data
def Draw(self, pad, addToLegend = True):
pad.DrawGraphicsObject(self.GetGraphicsObject(), addToLegend, self.GetLegendTitle())
def GetLegendTitle(self):
"""
To be implemented in inheriting classes
"""
return ""
def GetObjectName(self):
"""
To be implemented in inheriting classes
"""
return ""
class ComparisonData(object):
"""
General comparison data collection
"""
def __init__(self):
"""
Constructor
"""
self.__entries = []
def GetEntries(self):
return self.__entries
def AddEntry(self, entry):
self.__entries.append(entry)
def DrawObjects(self, pad, addToLegend = True):
for entry in self.__entries:
entry.Draw(pad, addToLegend)
def GetListOfRootObjects(self):
"""
Get a list of root-primitive trigger efficiencies
"""
rootprimitives = []
for entry in self.__entries:
rootprimitives.append(entry.GetRootPrimitive())
return rootprimitives
class ComparisonPlot(SinglePanelPlot):
"""
General comparison plot type
"""
def __init__(self):
"""
Constructor
"""
SinglePanelPlot.__init__(self)
self.__frame = None
self._comparisonContainer = None # be specified in inheriting classes
self.__legendAttributes = None
self.__padattributes = {"logx":False, "logy":False, "gridx":False, "gridy":False}
def SetFrame(self, frame):
self.__frame = frame
def SetLegendAttributes(self, xmin, ymin, xmax, ymax):
self.__legendAttributes = {"xmin":xmin, "xmax":xmax, "ymin":ymin, "ymax":ymax}
def SetPadAttributes(self, logx, logy, gridx, gridy):
self.__padattributes["logx"] = logx
self.__padattributes["logy"] = logy
self.__padattributes["gridx"] = gridx
self.__padattributes["gridy"] = gridy
def _Create(self, canvasname, canvastitle):
"""
Make the plot
"""
self._OpenCanvas(canvasname, canvastitle)
pad = self._GetFramedPad()
if self.__padattributes["logx"]:
pad.GetPad().SetLogx()
if self.__padattributes["logy"]:
pad.GetPad().SetLogy()
pad.DrawFrame(self.__frame)
doLegend = False
if self.__legendAttributes:
doLegend = True
self._comparisonContainer.DrawObjects(pad, doLegend)
if doLegend:
pad.CreateLegend(self.__legendAttributes["xmin"], self.__legendAttributes["ymin"], self.__legendAttributes["xmax"], self.__legendAttributes["ymax"])
def WriteData(self, rootfilename):
"""
Write out trigger efficiency curves to a root file
"""
outputfile = TFile(rootfilename, "RECREATE")
for rootprim in self._comparisonContainer.GetListOfRootObjects():
rootprim.Write()
outputfile.Close() |
cea/analysis/costs/system_costs.py | architecture-building-systems/cea-toolbox | 121 | 12702047 | <reponame>architecture-building-systems/cea-toolbox
"""
costs according to supply systems
"""
import numpy as np
import pandas as pd
from geopandas import GeoDataFrame as gpdf
import itertools
import cea.config
import cea.inputlocator
from cea.analysis.costs.equations import calc_capex_annualized, calc_opex_annualized
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>", " <NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def costs_main(locator, config):
# get local variables
capital = config.costs.capital
operational = config.costs.operational
# get demand
demand = pd.read_csv(locator.get_total_demand())
# get the databases for each main system
cooling_db, hot_water_db, electricity_db, heating_db = get_databases(demand, locator)
# COSTS DUE TO HEATING SERIVICES (EXCEPT HOTWATER)
heating_final_services = ['OIL_hs', 'NG_hs', 'WOOD_hs', 'COAL_hs', 'GRID_hs', 'DH_hs']
costs_heating_services_dict = calc_costs_per_energy_service(heating_db, heating_final_services)
# COSTS DUE TO HOT WATER SERVICES
hot_water_final_services = ['OIL_ww', 'NG_ww', 'WOOD_ww', 'COAL_ww', 'GRID_ww', 'DH_ww']
costs_hot_water_services_dict = calc_costs_per_energy_service(hot_water_db, hot_water_final_services)
# COSTS DUE TO COOLING SERVICES
cooling_final_services = ['GRID_cs', 'GRID_cdata', 'GRID_cre', 'DC_cs']
costs_cooling_services_dict = calc_costs_per_energy_service(cooling_db, cooling_final_services)
# COSTS DUE TO ELECTRICITY SERVICES
electricity_final_services = ['GRID_pro', 'GRID_l', 'GRID_aux', 'GRID_v', 'GRID_a', 'GRID_data', 'GRID_ve']
costs_electricity_services_dict = calc_costs_per_energy_service(electricity_db, electricity_final_services)
# COMBINE INTO ONE DICT
result = dict(itertools.chain(costs_heating_services_dict.items(), costs_hot_water_services_dict.items(),
costs_cooling_services_dict.items(), costs_electricity_services_dict.items()))
# sum up for all fields
# create a dict to map from the convention of fields to the final variables
mapping_dict = {'_capex_total_USD': 'Capex_total_sys_USD',
'_opex_fixed_USD': 'Opex_fixed_sys_USD',
'_opex_var_USD': 'Opex_var_sys_USD',
'_opex_USD': 'Opex_sys_USD',
# all system annualized
'_capex_a_USD': 'Capex_a_sys_USD',
'_opex_a_var_USD': 'Opex_a_var_sys_USD',
'_opex_a_fixed_USD': 'Opex_a_fixed_sys_USD',
'_opex_a_USD': 'Opex_a_sys_USD',
'_TAC_USD': 'TAC_sys_USD',
# building_scale_systems
'_capex_total_building_scale_USD': 'Capex_total_sys_building_scale_USD',
'_opex_building_scale_USD': 'Opex_sys_building_scale_USD',
'_capex_a_building_scale_USD': 'Capex_a_sys_building_scale_USD',
'_opex_a_building_scale_USD': 'Opex_a_sys_building_scale_USD',
# district_scale_systems
'_capex_total_district_scale_USD': 'Capex_total_sys_district_scale_USD',
'_opex_district_scale_USD': 'Opex_sys_district_scale_USD',
'_capex_a_district_scale_USD': 'Capex_a_sys_district_scale_USD',
'_opex_a_district_scale_USD': 'Opex_a_sys_district_scale_USD',
# city_scale_systems
'_capex_total_city_scale_USD': 'Capex_total_sys_city_scale_USD',
'_opex_city_scale_USD': 'Opex_sys_city_scale_USD',
'_capex_a_city_scale_USD': 'Capex_a_sys_city_scale_USD',
'_opex_a_city_scale_USD': 'Opex_a_sys_city_scale_USD',
}
# initialize the names of the variables in the result to zero
n_buildings = demand.shape[0]
for _, value in mapping_dict.items():
result[value] = np.zeros(n_buildings)
# loop inside the results and sum the results
for field in result.keys():
for key, value in mapping_dict.items():
if key in field:
result[value] += result[field]
# add name and create dataframe
result.update({'Name': demand.Name.values})
result_out = pd.DataFrame(result)
# save dataframe
result_out.to_csv(locator.get_costs_operation_file(), index=False, float_format='%.2f', na_rep='nan')
def calc_costs_per_energy_service(database, heating_services):
result = {}
for service in heating_services:
# TOTALS
result[service + '_capex_total_USD'] = (database[service + '0_kW'].values *
database['efficiency'].values * # because it is based on the end use
database['CAPEX_USD2015kW'].values)
result[service + '_opex_fixed_USD'] = (result[service + '_capex_total_USD'] * database['O&M_%'].values / 100)
result[service + '_opex_var_USD'] = database[service + '_MWhyr'].values * database[
'Opex_var_buy_USD2015kWh'].values * 1000
result[service + '_opex_USD'] = result[service + '_opex_fixed_USD'] + result[service + '_opex_var_USD']
# ANNUALIZED
result[service + '_capex_a_USD'] = np.vectorize(calc_capex_annualized)(result[service + '_capex_total_USD'],
database['IR_%'],
database['LT_yr'])
result[service + '_opex_a_fixed_USD'] = np.vectorize(calc_opex_annualized)(result[service + '_opex_fixed_USD'],
database['IR_%'],
database['LT_yr'])
result[service + '_opex_a_var_USD'] = np.vectorize(calc_opex_annualized)(result[service + '_opex_var_USD'],
database['IR_%'],
database['LT_yr'])
result[service + '_opex_a_USD'] = np.vectorize(calc_opex_annualized)(result[service + '_opex_USD'],
database['IR_%'],
database['LT_yr'])
result[service + '_TAC_USD'] = result[service + '_opex_a_USD'] + result[service + '_capex_a_USD']
# GET CONNECTED AND DISCONNECTED
for field in ['_capex_total_USD', '_capex_a_USD', '_opex_USD', '_opex_a_USD']:
field_district = field.split("_USD")[0] + "_district_scale_USD"
field_building_scale = field.split("_USD")[0] + "_building_scale_USD"
field_city_scale = field.split("_USD")[0] + "_city_scale_USD"
result[service + field_district], \
result[service + field_building_scale], \
result[service + field_city_scale] = np.vectorize(calc_scale_costs)(result[service + field],
database['scale'])
return result
def calc_scale_costs(value, flag_scale):
if flag_scale == "BUILDING":
district = 0.0
building = value
city = 0.0
elif flag_scale == "DISTRICT":
district = value
building = 0.0
city = 0.0
elif flag_scale == "CITY":
district = 0.0
building = 0.0
city = value
elif flag_scale == "NONE":
if value == 0.0 or np.isnan(value):
district = 0.0
building = 0.0
city = 0.0
else:
raise ValueError("the scale is NONE but somehow there is a cost here?"
" the inputs of SUPPLY database may be wrong")
else:
raise ValueError("the scale in the system is {}, this is not a valid argument"
"valid arguments are CITY, DISTRICT, BUILDING, NONE".format(flag_scale))
return district, building, city
def get_databases(demand, locator):
supply_systems = gpdf.from_file(locator.get_building_supply()).drop('geometry', axis=1)
data_all_in_one_systems = pd.read_excel(locator.get_database_supply_assemblies(), sheet_name=None)
factors_heating = data_all_in_one_systems['HEATING']
factors_dhw = data_all_in_one_systems['HOT_WATER']
factors_cooling = data_all_in_one_systems['COOLING']
factors_electricity = data_all_in_one_systems['ELECTRICITY']
factors_resources = pd.read_excel(locator.get_database_feedstocks(), sheet_name=None)
# get the mean of all values for this
factors_resources_simple = [(name, values['Opex_var_buy_USD2015kWh'].mean()) for name, values in
factors_resources.items()]
factors_resources_simple = pd.DataFrame(factors_resources_simple,
columns=['code', 'Opex_var_buy_USD2015kWh']).append(
# append NONE choice with zero values
{'code': 'NONE'}, ignore_index=True).fillna(0)
# local variables
# calculate the total operational non-renewable primary energy demand and CO2 emissions
## create data frame for each type of end use energy containing the type of supply system use, the final energy
## demand and the primary energy and emissions factors for each corresponding type of supply system
heating_costs = factors_heating.merge(factors_resources_simple, left_on='feedstock', right_on='code')[
['code_x', 'feedstock', 'scale', 'efficiency', 'Opex_var_buy_USD2015kWh', 'CAPEX_USD2015kW', 'LT_yr', 'O&M_%',
'IR_%']]
cooling_costs = factors_cooling.merge(factors_resources_simple, left_on='feedstock', right_on='code')[
['code_x', 'feedstock', 'scale', 'efficiency', 'Opex_var_buy_USD2015kWh', 'CAPEX_USD2015kW', 'LT_yr', 'O&M_%',
'IR_%']]
dhw_costs = factors_dhw.merge(factors_resources_simple, left_on='feedstock', right_on='code')[
['code_x', 'feedstock', 'scale', 'efficiency', 'Opex_var_buy_USD2015kWh', 'CAPEX_USD2015kW', 'LT_yr', 'O&M_%',
'IR_%']]
electricity_costs = factors_electricity.merge(factors_resources_simple, left_on='feedstock', right_on='code')[
['code_x', 'feedstock', 'scale', 'efficiency', 'Opex_var_buy_USD2015kWh', 'CAPEX_USD2015kW', 'LT_yr', 'O&M_%',
'IR_%']]
heating = supply_systems.merge(demand, on='Name').merge(heating_costs, left_on='type_hs', right_on='code_x')
dhw = supply_systems.merge(demand, on='Name').merge(dhw_costs, left_on='type_dhw', right_on='code_x')
cooling = supply_systems.merge(demand, on='Name').merge(cooling_costs, left_on='type_cs', right_on='code_x')
electricity = supply_systems.merge(demand, on='Name').merge(electricity_costs, left_on='type_el', right_on='code_x')
return cooling, dhw, electricity, heating
def main(config):
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
print('Running system-costs with scenario = %s' % config.scenario)
costs_main(locator=locator, config=config)
if __name__ == '__main__':
main(cea.config.Configuration())
|
cloudmarker/events/azvmextensionevent.py | TinLe/cloudmarker | 208 | 12702048 | """Microsoft Azure VM extension event.
This module defines the :class:`AzVMExtensionEvent` class that
evaluates Azure VM extensions. This plugin works on the virtual
machine properties found in the ``ext`` bucket of ``vm_instance_view``
records.
"""
import logging
from cloudmarker import util
_log = logging.getLogger(__name__)
class AzVMExtensionEvent:
"""Az VM Data extension event plugin."""
def __init__(self, whitelisted=None, blacklisted=None, required=None):
"""Create an instance of :class:`AzVMExtensionEvent`.
Arguments:
whitelisted (list): List of whitelisted extensions.
blacklisted (list): List of blacklisted extensions.
required (list): List of required extensions.
"""
if whitelisted is None:
whitelisted = []
if blacklisted is None:
blacklisted = []
if required is None:
required = []
self._whitelisted = whitelisted
self._blacklisted = blacklisted
self._required = required
def eval(self, record):
"""Evaluate Azure virtual machine for extensions.
Arguments:
record (dict): A virtual machine record.
Yields:
dict: An event record representing an Azure VM with
misconfigured extensions
"""
com = record.get('com', {})
if com is None:
return
if com.get('cloud_type') != 'azure':
return
ext = record.get('ext', {})
if ext is None:
return
if ext.get('record_type') != 'vm_instance_view':
return
extensions = ext.get('extensions')
added_extensions = set(extensions)
if self._blacklisted:
added_blacklisted_ext = list(set(self._blacklisted) &
added_extensions)
yield from _get_azure_vm_blacklisted_extension_event(
com, ext, added_blacklisted_ext)
if self._whitelisted:
added_unapproved_ext = list(added_extensions -
(set(self._whitelisted) -
set(self._blacklisted)))
yield from _get_azure_vm_unapproved_extension_event(
com, ext, added_unapproved_ext)
if self._required:
missing_required_ext = list((set(self._required) -
set(self._blacklisted)) -
added_extensions)
yield from _get_azure_vm_required_extension_event(
com, ext, missing_required_ext)
def done(self):
"""Perform cleanup work.
Currently, this method does nothing. This may change in future.
"""
def _get_azure_vm_blacklisted_extension_event(com, ext, blacklisted):
"""Evaluate Azure VM for blacklisted extensions.
Arguments:
com (dict): Virtual machine record `com` bucket
ext (dict): Virtual machine record `ext` bucket
blacklisted (list): Added blacklisted extension list
Returns:
dict: An event record representing VM with blacklisted extenstions
"""
if not blacklisted:
return
friendly_cloud_type = util.friendly_string(com.get('cloud_type'))
reference = com.get('reference')
description = (
'{} virtual machine {} has blacklisted extensions {}'
.format(friendly_cloud_type, reference,
util.friendly_list(blacklisted))
)
recommendation = (
'Check {} virtual machine {} and remove blacklisted extensions {}'
.format(friendly_cloud_type, reference,
util.friendly_list(blacklisted))
)
event_record = {
# Preserve the extended properties from the virtual
# machine record because they provide useful context to
# locate the virtual machine that led to the event.
'ext': util.merge_dicts(ext, {
'record_type': 'vm_blacklisted_extension_event'
}),
'com': {
'cloud_type': com.get('cloud_type'),
'record_type': 'vm_blacklisted_extension_event',
'reference': reference,
'description': description,
'recommendation': recommendation,
}
}
_log.info('Generating vm_blacklisted_extension_event; %r', event_record)
yield event_record
def _get_azure_vm_unapproved_extension_event(com, ext, not_whitelisted):
"""Evaluate Azure VM for unapproved extensions.
Arguments:
com (dict): Virtual machine record `com` bucket
ext (dict): Virtual machine record `ext` bucket
not_whitelisted (list): Not whitelisted extension list
Returns:
dict: An event record representing VM with unapproved extenstions
"""
if not not_whitelisted:
return
friendly_cloud_type = util.friendly_string(com.get('cloud_type'))
reference = com.get('reference')
description = (
'{} virtual machine {} has unapproved extensions {}'
.format(friendly_cloud_type, reference,
util.friendly_list(not_whitelisted))
)
recommendation = (
'Check {} virtual machine {} and remove unapproved extensions {}'
.format(friendly_cloud_type, reference,
util.friendly_list(not_whitelisted))
)
event_record = {
# Preserve the extended properties from the virtual
# machine record because they provide useful context to
# locate the virtual machine that led to the event.
'ext': util.merge_dicts(ext, {
'record_type': 'vm_unapproved_extension_event'
}),
'com': {
'cloud_type': com.get('cloud_type'),
'record_type': 'vm_unapproved_extension_event',
'reference': reference,
'description': description,
'recommendation': recommendation,
}
}
_log.info('Generating vm_unapproved_extension_event; %r', event_record)
yield event_record
def _get_azure_vm_required_extension_event(com, ext, missing_required):
"""Evaluate Azure VM for unapproved extensions.
Arguments:
com (dict): Virtual machine record `com` bucket
ext (dict): Virtual machine record `ext` bucket
missing_required (list): Missing required extension list
Returns:
dict: An event record representing VM with unapproved extenstions
"""
if not missing_required:
return
friendly_cloud_type = util.friendly_string(com.get('cloud_type'))
reference = com.get('reference')
description = (
'{} virtual machine {} is missing required extensions {}'
.format(friendly_cloud_type, reference,
util.friendly_list(missing_required))
)
recommendation = (
'Check {} virtual machine {} and add required extensions {}'
.format(friendly_cloud_type, reference,
util.friendly_list(missing_required))
)
event_record = {
# Preserve the extended properties from the virtual
# machine record because they provide useful context to
# locate the virtual machine that led to the event.
'ext': util.merge_dicts(ext, {
'record_type': 'vm_required_extension_event'
}),
'com': {
'cloud_type': com.get('cloud_type'),
'record_type': 'vm_required_extension_event',
'reference': reference,
'description': description,
'recommendation': recommendation,
}
}
_log.info('Generating vm_required_extension_event; %r', event_record)
yield event_record
|
tests/test_s1_download.py | d-chambers/OpenSarToolkit | 131 | 12702057 | <filename>tests/test_s1_download.py
import pytest
import os
import pandas as pd
from tempfile import TemporaryDirectory
from ost.helpers.asf import check_connection as check_connection_asf
from ost.helpers.scihub import check_connection as check_connection_scihub, connect
from ost.helpers.settings import HERBERT_USER
from ost.s1.download import download_sentinel1
from ost.helpers.helpers import check_zipfile
@pytest.mark.skip(reason="not running in pip build")
def test_asf_connection():
herbert_uname = HERBERT_USER["uname"]
herbert_password = HERBERT_USER["<PASSWORD>"]
response_code = check_connection_asf(uname=herbert_uname, pword=<PASSWORD>)
control_code = 200
assert response_code == control_code
@pytest.mark.skip(reason="not running in pip build")
def test_esa_scihub_connection(s1_grd_notnr_ost_product):
herbert_uname = HERBERT_USER["uname"]
herbert_password = <PASSWORD>["<PASSWORD>"]
response_code = check_connection_scihub(uname=herbert_uname, pword=<PASSWORD>)
control_code = 200
assert response_code == control_code
opener = connect(
base_url="https://apihub.copernicus.eu/apihub/",
uname=herbert_uname,
pword=<PASSWORD>,
)
control_uuid = "1b43fb7d-bd2c-41cd-86a1-3442b1fbd5bb"
uuid = s1_grd_notnr_ost_product[1].scihub_uuid(opener)
assert uuid == control_uuid
@pytest.mark.skip(reason="not running in pip build")
def test_esa_scihub_download(s1_grd_notnr_ost_product, mirror=1):
herbert_uname = HERBERT_USER["uname"]
herbert_password = <PASSWORD>["pword"]
df = pd.DataFrame({"identifier": [s1_grd_notnr_ost_product[1].scene_id]})
with TemporaryDirectory(dir=os.getcwd()) as temp:
download_sentinel1(
inventory_df=df,
download_dir=temp,
mirror=mirror,
concurrent=1,
uname=herbert_uname,
pword=<PASSWORD>,
)
product_path = s1_grd_notnr_ost_product[1].get_path(download_dir=temp)
return_code = check_zipfile(product_path)
assert return_code is None
@pytest.mark.skip(reason="not running in pip build")
def test_asf_download(s1_grd_notnr_ost_product, mirror=2):
herbert_uname = HERBERT_USER["uname"]
herbert_password = <PASSWORD>["<PASSWORD>"]
df = pd.DataFrame({"identifier": [s1_grd_notnr_ost_product[1].scene_id]})
with TemporaryDirectory(dir=os.getcwd()) as temp:
download_sentinel1(
inventory_df=df,
download_dir=temp,
mirror=mirror,
concurrent=1,
uname=herbert_uname,
pword=<PASSWORD>,
)
from ost.helpers.helpers import check_zipfile
product_path = s1_grd_notnr_ost_product[1].get_path(download_dir=temp)
return_code = check_zipfile(product_path)
assert return_code is None
|
tests/pyvs/html_validator.py | tlalexander/stitchEm | 182 | 12702089 | <reponame>tlalexander/stitchEm
from HTMLParser import HTMLParser
class HTMLValidator(HTMLParser):
"""
super simple html validator : check that each opening tag is closed
with respect to tag hierarchy
"""
def __init__(self):
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
self.tag_stack.append(tag)
def handle_endtag(self, tag):
try:
open_tag = self.tag_stack.pop()
assert open_tag == tag
except IndexError:
raise Exception(
"found an end tag but there was no more opened ones")
except AssertionError:
raise Exception(
"mismatch between opened tag {} and closing tag {}".format(
open_tag, tag))
def feed(self, data):
self.tag_stack = []
HTMLParser.feed(self, data)
|
malaya_speech/utils/__init__.py | ishine/malaya-speech | 111 | 12702097 | <gh_stars>100-1000
from malaya_boilerplate.utils import (
available_device,
available_gpu,
close_session,
describe_availability,
)
from malaya_boilerplate.frozen_graph import (
nodes_session,
generate_session,
get_device,
)
from malaya_boilerplate import backblaze
from malaya_boilerplate import frozen_graph
from malaya_boilerplate import utils
from malaya_speech import package, url
def print_cache(location=None):
return utils.print_cache(package=package, location=location)
def delete_cache(location):
return utils.delete_cache(package=package, location=location)
def delete_all_cache():
return utils.delete_all_cache(package=package)
def check_file(file, s3_file=None, **kwargs):
return backblaze.check_file(file, package, url, s3_file=s3_file, **kwargs)
def load_graph(frozen_graph_filename, **kwargs):
return frozen_graph.load_graph(package, frozen_graph_filename, **kwargs)
from . import arange
from . import aligner
from . import astype
from . import char
from . import combine
from . import constant
from . import dist
from . import featurization
from . import generator
from . import griffin_lim
from . import group
from . import metrics
from . import outlier
from . import padding
from . import read
from . import speechsplit
from . import split
from . import text
from . import subword
from . import text
from . import tf_featurization
from . import validator
|
deluca/agents/_gpc.py | google/deluca | 105 | 12702119 | <reponame>google/deluca<filename>deluca/agents/_gpc.py
# Copyright 2021 The Deluca Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""deluca.agents._gpc"""
from numbers import Real
from typing import Callable
import jax
import jax.numpy as jnp
import numpy as np
from jax import grad
from jax import jit
from deluca.agents._lqr import LQR
from deluca.agents.core import Agent
def quad_loss(x: jnp.ndarray, u: jnp.ndarray) -> Real:
"""
Quadratic loss.
Args:
x (jnp.ndarray):
u (jnp.ndarray):
Returns:
Real
"""
return jnp.sum(x.T @ x + u.T @ u)
class GPC(Agent):
def __init__(
self,
A: jnp.ndarray,
B: jnp.ndarray,
Q: jnp.ndarray = None,
R: jnp.ndarray = None,
K: jnp.ndarray = None,
start_time: int = 0,
cost_fn: Callable[[jnp.ndarray, jnp.ndarray], Real] = None,
H: int = 3,
HH: int = 2,
lr_scale: Real = 0.005,
decay: bool = True,
) -> None:
"""
Description: Initialize the dynamics of the model.
Args:
A (jnp.ndarray): system dynamics
B (jnp.ndarray): system dynamics
Q (jnp.ndarray): cost matrices (i.e. cost = x^TQx + u^TRu)
R (jnp.ndarray): cost matrices (i.e. cost = x^TQx + u^TRu)
K (jnp.ndarray): Starting policy (optional). Defaults to LQR gain.
start_time (int):
cost_fn (Callable[[jnp.ndarray, jnp.ndarray], Real]):
H (postive int): history of the controller
HH (positive int): history of the system
lr_scale (Real):
lr_scale_decay (Real):
decay (Real):
"""
cost_fn = cost_fn or quad_loss
d_state, d_action = B.shape # State & Action Dimensions
self.A, self.B = A, B # System Dynamics
self.t = 0 # Time Counter (for decaying learning rate)
self.H, self.HH = H, HH
self.lr_scale, self.decay = lr_scale, decay
self.bias = 0
# Model Parameters
# initial linear policy / perturbation contributions / bias
# TODO: need to address problem of LQR with jax.lax.scan
self.K = K if K is not None else LQR(self.A, self.B, Q, R).K
self.M = jnp.zeros((H, d_action, d_state))
# Past H + HH noises ordered increasing in time
self.noise_history = jnp.zeros((H + HH, d_state, 1))
# past state and past action
self.state, self.action = jnp.zeros((d_state, 1)), jnp.zeros((d_action, 1))
def last_h_noises():
"""Get noise history"""
return jax.lax.dynamic_slice_in_dim(self.noise_history, -H, H)
self.last_h_noises = last_h_noises
def policy_loss(M, w):
"""Surrogate cost function"""
def action(state, h):
"""Action function"""
return -self.K @ state + jnp.tensordot(
M, jax.lax.dynamic_slice_in_dim(w, h, H), axes=([0, 2], [0, 1])
)
def evolve(state, h):
"""Evolve function"""
return self.A @ state + self.B @ action(state, h) + w[h + H], None
final_state, _ = jax.lax.scan(evolve, np.zeros((d_state, 1)), np.arange(H - 1))
return cost_fn(final_state, action(final_state, HH - 1))
self.policy_loss = policy_loss
self.grad = jit(grad(policy_loss, (0, 1)))
def __call__(self, state: jnp.ndarray) -> jnp.ndarray:
"""
Description: Return the action based on current state and internal parameters.
Args:
state (jnp.ndarray): current state
Returns:
jnp.ndarray: action to take
"""
action = self.get_action(state)
self.update(state, action)
return action
def update(self, state: jnp.ndarray, u: jnp.ndarray) -> None:
"""
Description: update agent internal state.
Args:
state (jnp.ndarray):
Returns:
None
"""
noise = state - self.A @ self.state - self.B @ u
self.noise_history = self.noise_history.at[0].set(noise)
self.noise_history = jnp.roll(self.noise_history, -1, axis=0)
delta_M, delta_bias = self.grad(self.M, self.noise_history)
lr = self.lr_scale
lr *= (1 / (self.t + 1)) if self.decay else 1
self.M -= lr * delta_M
self.bias -= lr * delta_bias
# update state
self.state = state
self.t += 1
def get_action(self, state: jnp.ndarray) -> jnp.ndarray:
"""
Description: get action from state.
Args:
state (jnp.ndarray):
Returns:
jnp.ndarray
"""
return -self.K @ state + jnp.tensordot(self.M, self.last_h_noises(), axes=([0, 2], [0, 1]))
|
python/afdko/buildcff2vf.py | liuxilu/afdko | 732 | 12702121 | <filename>python/afdko/buildcff2vf.py
# Copyright 2017 Adobe. All rights reserved.
"""
Builds a CFF2 variable font from a designspace file and its UFO masters.
"""
import argparse
from ast import literal_eval
from copy import deepcopy
import logging
import os
import re
import sys
from fontTools import varLib
from fontTools.cffLib.specializer import commandsToProgram
from fontTools.designspaceLib import DesignSpaceDocument
from fontTools.misc.fixedTools import otRound
from fontTools.misc.psCharStrings import T2OutlineExtractor, T2CharString
from fontTools.ttLib import TTFont
from fontTools.varLib.cff import (CFF2CharStringMergePen,
VarLibCFFPointTypeMergeError)
from afdko.fdkutils import validate_path
__version__ = '2.0.2'
STAT_FILENAME = 'override.STAT.ttx'
class CFF2VFError(Exception):
"""Base exception for buildcff2vf"""
# set up for printing progress notes
def progress(self, message, *args, **kws):
# Note: message must contain the format specifiers for any strings in args.
level = self.getEffectiveLevel()
self._log(level, message, args, **kws)
PROGRESS_LEVEL = logging.INFO + 5
PROGESS_NAME = "progress"
logging.addLevelName(PROGRESS_LEVEL, PROGESS_NAME)
logger = logging.getLogger(__name__)
logging.Logger.progress = progress
def getSubset(subset_Path):
with open(subset_Path, "rt") as fp:
text_lines = fp.readlines()
locationDict = {}
cur_key_list = None
for li, line in enumerate(text_lines):
idx = line.find('#')
if idx >= 0:
line = line[:idx]
line = line.strip()
if not line:
continue
if line[0] == "(":
cur_key_list = []
location_list = literal_eval(line)
for location_entry in location_list:
cur_key_list.append(location_entry)
if location_entry not in locationDict:
locationDict[location_entry] = []
else:
m = re.match(r"(\S+)", line)
if m:
if cur_key_list is None:
logger.error(
"Error parsing subset file. "
"Seeing a glyph name record before "
"seeing a location record.")
logger.error(f'Line number: {li}.')
logger.error(f'Line text: {line}.')
for key in cur_key_list:
locationDict[key].append(m.group(1))
return locationDict
def subset_masters(designspace, subsetDict):
from fontTools import subset
subset_options = subset.Options(notdef_outline=True, layout_features='*')
for ds_source in designspace.sources:
key = tuple(ds_source.location.items())
included = set(subsetDict[key])
ttf_font = ds_source.font
subsetter = subset.Subsetter(options=subset_options)
subsetter.populate(glyphs=included)
subsetter.subset(ttf_font)
subset_path = f'{os.path.splitext(ds_source.path)[0]}.subset.otf'
logger.progress(f'Saving subset font {subset_path}')
ttf_font.save(subset_path)
ds_source.font = TTFont(subset_path)
class CompatibilityPen(CFF2CharStringMergePen):
def __init__(self, default_commands,
glyphName, num_masters, master_idx, roundTolerance=0.5):
super(CompatibilityPen, self).__init__(
default_commands, glyphName, num_masters, master_idx,
roundTolerance=0.5)
self.fixed = False
def add_point(self, point_type, pt_coords):
if self.m_index == 0:
self._commands.append([point_type, [pt_coords]])
else:
cmd = self._commands[self.pt_index]
if cmd[0] != point_type:
# Fix some issues that show up in some
# CFF workflows, even when fonts are
# topologically merge compatible.
success, new_pt_coords = self.check_and_fix_flat_curve(
cmd, point_type, pt_coords)
if success:
logger.progress(f"Converted between line and curve in "
f"source font index '{self.m_index}' "
f"glyph '{self.glyphName}', point index "
f"'{self.pt_index}' at '{pt_coords}'. "
f"Please check correction.")
pt_coords = new_pt_coords
else:
success = self.check_and_fix_closepath(
cmd, point_type, pt_coords)
if success:
# We may have incremented self.pt_index
cmd = self._commands[self.pt_index]
if cmd[0] != point_type:
success = False
if not success:
raise VarLibCFFPointTypeMergeError(
point_type, self.pt_index, self.m_index, cmd[0],
self.glyphName)
self.fixed = True
cmd[1].append(pt_coords)
self.pt_index += 1
def make_flat_curve(self, cur_coords):
# Convert line coords to curve coords.
dx = self.round(cur_coords[0] / 3.0)
dy = self.round(cur_coords[1] / 3.0)
new_coords = [dx, dy, dx, dy,
cur_coords[0] - 2 * dx,
cur_coords[1] - 2 * dy]
return new_coords
def make_curve_coords(self, coords, is_default):
# Convert line coords to curve coords.
if is_default:
new_coords = []
for cur_coords in coords:
master_coords = self.make_flat_curve(cur_coords)
new_coords.append(master_coords)
else:
cur_coords = coords
new_coords = self.make_flat_curve(cur_coords)
return new_coords
def check_and_fix_flat_curve(self, cmd, point_type, pt_coords):
success = False
if (point_type == 'rlineto') and (cmd[0] == 'rrcurveto'):
is_default = False # the line is in the master font we are adding
pt_coords = self.make_curve_coords(pt_coords, is_default)
success = True
elif (point_type == 'rrcurveto') and (cmd[0] == 'rlineto'):
is_default = True # the line is in the default font commands
expanded_coords = self.make_curve_coords(cmd[1], is_default)
cmd[1] = expanded_coords
cmd[0] = point_type
success = True
return success, pt_coords
def check_and_fix_closepath(self, cmd, point_type, pt_coords):
""" Some workflows drop a lineto which closes a path.
Also, if the last segment is a curve in one master,
and a flat curve in another, the flat curve can get
converted to a closing lineto, and then dropped.
Test if:
1) one master op is a moveto,
2) the previous op for this master does not close the path
3) in the other master the current op is not a moveto
4) the current op in the otehr master closes the current path
If the default font is missing the closing lineto, insert it,
then proceed with merging the current op and pt_coords.
If the current region is missing the closing lineto
and therefore the current op is a moveto,
then add closing coordinates to self._commands,
and increment self.pt_index.
Note that if this may insert a point in the default font list,
so after using it, 'cmd' needs to be reset.
return True if we can fix this issue.
"""
if point_type == 'rmoveto':
# If this is the case, we know that cmd[0] != 'rmoveto'
# The previous op must not close the path for this region font.
prev_moveto_coords = self._commands[self.prev_move_idx][1][-1]
prv_coords = self._commands[self.pt_index - 1][1][-1]
if prev_moveto_coords == prv_coords[-2:]:
return False
# The current op must close the path for the default font.
prev_moveto_coords2 = self._commands[self.prev_move_idx][1][0]
prv_coords = self._commands[self.pt_index][1][0]
if prev_moveto_coords2 != prv_coords[-2:]:
return False
# Add the closing line coords for this region
# so self._commands, then increment self.pt_index
# so that the current region op will get merged
# with the next default font moveto.
if cmd[0] == 'rrcurveto':
new_coords = self.make_curve_coords(prev_moveto_coords, False)
cmd[1].append(new_coords)
self.pt_index += 1
return True
if cmd[0] == 'rmoveto':
# The previous op must not close the path for the default font.
prev_moveto_coords = self._commands[self.prev_move_idx][1][0]
prv_coords = self._commands[self.pt_index - 1][1][0]
if prev_moveto_coords == prv_coords[-2:]:
return False
# The current op must close the path for this region font.
prev_moveto_coords2 = self._commands[self.prev_move_idx][1][-1]
if prev_moveto_coords2 != pt_coords[-2:]:
return False
# Insert the close path segment in the default font.
# We omit the last coords from the previous moveto
# is it will be supplied by the current region point.
# after this function returns.
new_cmd = [point_type, None]
prev_move_coords = self._commands[self.prev_move_idx][1][:-1]
# Note that we omit the last region's coord from prev_move_coords,
# as that is from the current region, and we will add the
# current pts' coords from the current region in its place.
if point_type == 'rlineto':
new_cmd[1] = prev_move_coords
else:
# We omit the last set of coords from the
# previous moveto, as it will be supplied by the coords
# for the current region pt.
new_cmd[1] = self.make_curve_coords(prev_move_coords, True)
self._commands.insert(self.pt_index, new_cmd)
return True
return False
def getCharStrings(self, num_masters, private=None, globalSubrs=None,
default_idx=0):
""" A command looks like:
[op_name, [
[source 0 arglist for op],
[source 1 arglist for op],
...
[source n arglist for op],
I am not optimizing this there, as that will be done when
the CFF2 Charstring is created in fontTools.varLib.build().
If I did, I would have to rearrange the arguments to:
[
[arg 0 for source 0 ... arg 0 for source n]
[arg 1 for source 0 ... arg 1 for source n]
...
[arg M for source 0 ... arg M for source n]
]
before calling specialize.
"""
t2List = []
merged_commands = self._commands
for i in range(num_masters):
commands = []
for op in merged_commands:
source_op = [op[0], op[1][i]]
commands.append(source_op)
program = commandsToProgram(commands)
if self._width is not None:
assert not self._CFF2, (
"CFF2 does not allow encoding glyph width in CharString.")
program.insert(0, otRound(self._width))
if not self._CFF2:
program.append('endchar')
charString = T2CharString(
program=program, private=private, globalSubrs=globalSubrs)
t2List.append(charString)
# if default_idx is not 0, we need to move it to the right index.
if default_idx:
default_font_cs = t2List.pop(0)
t2List.insert(default_idx, default_font_cs)
return t2List
def _get_cs(charstrings, glyphName):
if glyphName not in charstrings:
return None
return charstrings[glyphName]
def do_compatibility(vf, master_fonts, default_idx):
default_font = vf
default_charStrings = default_font['CFF '].cff.topDictIndex[0].CharStrings
glyphOrder = default_font.getGlyphOrder()
charStrings = [
font['CFF '].cff.topDictIndex[0].CharStrings for font in master_fonts]
for gname in glyphOrder:
all_cs = [_get_cs(cs, gname) for cs in charStrings]
if len([gs for gs in all_cs if gs is not None]) < 2:
continue
# remove the None's from the list.
cs_list = [cs for cs in all_cs if cs]
num_masters = len(cs_list)
default_charstring = default_charStrings[gname]
compat_pen = CompatibilityPen([], gname, num_masters, 0)
default_charstring.outlineExtractor = T2OutlineExtractor
default_charstring.draw(compat_pen)
# Add the coordinates from all the other regions to the
# blend lists in the CFF2 charstring.
region_cs = cs_list[:]
del region_cs[default_idx]
for region_idx, region_charstring in enumerate(region_cs, start=1):
compat_pen.restart(region_idx)
region_charstring.draw(compat_pen)
if compat_pen.fixed:
fixed_cs_list = compat_pen.getCharStrings(
num_masters, private=default_charstring.private,
globalSubrs=default_charstring.globalSubrs,
default_idx=default_idx)
cs_list = list(cs_list)
for i, cs in enumerate(cs_list):
mi = all_cs.index(cs)
charStrings[mi][gname] = fixed_cs_list[i]
def otfFinder(s):
return s.replace('.ufo', '.otf')
def suppress_glyph_names(tt_font):
postTable = tt_font['post']
postTable.formatType = 3.0
postTable.compile(tt_font)
def remove_mac_names(tt_font):
name_tb = tt_font['name']
name_tb.names = [nr for nr in name_tb.names if nr.platformID != 1]
def update_stat_name_ids(tt_font):
"""
The STAT spec says that axes must point to the same name ID used
in the fvar so check here and update if they are different.
"""
fvar = tt_font['fvar']
stat = tt_font['STAT']
fvar_axis_names = {}
for axis in fvar.axes:
fvar_axis_names[axis.axisTag] = axis.axisNameID
for axis in stat.table.DesignAxisRecord.Axis:
fvar_id = fvar_axis_names.get(axis.AxisTag)
if fvar_id is None:
# Not required for all STAT axes to be in fvar
continue
if axis.AxisNameID != fvar_id:
axis.AxisNameID = fvar_id
def validate_stat_axes(tt_font):
"""
Ensure all axes defined in fvar also exist in the STAT table
"""
fvar = tt_font['fvar']
stat = tt_font['STAT']
fvar_axis_tags = [axis.axisTag for axis in fvar.axes]
stat_axis_tags = [axis.AxisTag for axis in
stat.table.DesignAxisRecord.Axis]
diff = set(fvar_axis_tags) - set(stat_axis_tags)
if diff:
raise CFF2VFError(
f'All fvar axes must also be defined in the STAT table. '
f'Axes for {str(list(diff))} are missing.'
)
def validate_stat_values(ttFont):
"""
Check axis values in the STAT table to ensure they are within the ranges
defined in the fvar
"""
fvar = ttFont['fvar']
stat = ttFont['STAT']
logger.progress('Validating STAT axis values...')
errors = []
stat_range_vals = {}
if hasattr(stat.table, "AxisValueArray.AxisValue"):
for av in stat.table.AxisValueArray.AxisValue:
axis_tag = stat.table.DesignAxisRecord.Axis[av.AxisIndex].AxisTag
if axis_tag not in stat_range_vals:
stat_range_vals[axis_tag] = []
if hasattr(av, 'NominalValue'):
stat_range_vals[axis_tag].append(av.NominalValue)
if not av.RangeMinValue <= av.NominalValue <= av.RangeMaxValue:
errors.append(
f'Invalid default value {av.NominalValue} for range '
f'{av.RangeMinValue} - {av.RangeMaxValue}'
)
if hasattr(av, 'RangeMaxValue'):
stat_range_vals[axis_tag].append(av.RangeMaxValue)
if hasattr(av, 'RangeMinValue'):
stat_range_vals[axis_tag].append(av.RangeMinValue)
if hasattr(av, 'Value'):
stat_range_vals[axis_tag].append(av.Value)
for axis in fvar.axes:
stat_ref = stat_range_vals.get(axis.axisTag)
if stat_ref is None:
continue
out_of_range = []
for val in stat_ref:
if (val > axis.maxValue and int(val) != 32767) or \
(val < axis.minValue and int(val) != -32767):
out_of_range.append(val)
if out_of_range:
expected_range = f'{axis.minValue} - {axis.maxValue}'
errors.append(
f'{axis.axisTag} values {str(sorted(set(out_of_range)))} are '
f'outside of range {expected_range} specified in fvar'
)
if errors:
msg = '\n'.join(errors)
raise CFF2VFError(f'Invalid STAT table. {msg}')
def import_stat_override(tt_font, stat_file_path):
if 'STAT' in tt_font:
logger.warning(
f'Overwriting existing STAT table with {stat_file_path}.'
)
tt_font.importXML(stat_file_path)
def get_options(args):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description=__doc__
)
parser.add_argument(
'--version',
action='version',
version=__version__
)
parser.add_argument(
'-v',
'--verbose',
action='count',
default=0,
help='verbose mode\n'
'Use -vv for debug mode'
)
parser.add_argument(
'-d',
'--designspace',
metavar='PATH',
dest='design_space_path',
type=validate_path,
help='path to design space file',
required=True
)
parser.add_argument(
'-o',
'--output',
dest='var_font_path',
metavar='PATH',
help='path to output variable font file. Default is base name\n'
'of the design space file.',
)
parser.add_argument(
'-k',
'--keep-glyph-names',
action='store_true',
help='Preserve glyph names in output variable font\n'
"(using 'post' table format 2).",
)
parser.add_argument(
'--omit-mac-names',
action='store_true',
help="Omit Macintosh strings from 'name' table.",
)
parser.add_argument(
'-c',
'--check-compat',
dest='check_compatibility',
action='store_true',
help='Check outline compatibility in source fonts, and fix flat\n'
'curves.',
)
parser.add_argument(
'-i',
'--include-glyphs',
dest='include_glyphs_path',
metavar='PATH',
type=validate_path,
help='Path to file containing a python dict specifying which\n'
'glyph names should be included from which source fonts.'
)
options = parser.parse_args(args)
if not options.var_font_path:
var_font_path = f'{os.path.splitext(options.design_space_path)[0]}.otf'
options.var_font_path = var_font_path
if not options.verbose:
level = PROGRESS_LEVEL
logging.basicConfig(level=level, format="%(message)s")
else:
level = logging.INFO
logging.basicConfig(level=level)
logger.setLevel(level)
return options
def main(args=None):
options = get_options(args)
if os.path.exists(options.var_font_path):
os.remove(options.var_font_path)
designspace = DesignSpaceDocument.fromfile(options.design_space_path)
ds_data = varLib.load_designspace(designspace)
master_fonts = varLib.load_masters(designspace, otfFinder)
logger.progress("Reading source fonts...")
for i, master_font in enumerate(master_fonts):
designspace.sources[i].font = master_font
# Subset source fonts
if options.include_glyphs_path:
logger.progress("Subsetting source fonts...")
subsetDict = getSubset(options.include_glyphs_path)
subset_masters(designspace, subsetDict)
if options.check_compatibility:
logger.progress("Checking outline compatibility in source fonts...")
font_list = [src.font for src in designspace.sources]
default_font = designspace.sources[ds_data.base_idx].font
vf = deepcopy(default_font)
# We copy vf from default_font, because we use VF to hold
# merged arguments from each source font charstring - this alters
# the font, which we don't want to do to the default font.
do_compatibility(vf, font_list, ds_data.base_idx)
logger.progress("Building variable OTF (CFF2) font...")
# Note that we now pass in the design space object, rather than a path to
# the design space file, in order to pass in the modified source fonts
# fonts without having to recompile and save them.
try:
varFont, _, _ = varLib.build(designspace, otfFinder)
except VarLibCFFPointTypeMergeError:
logger.error("The input set requires compatibilization. Please try "
"again with the -c (--check-compat) option.")
return 0
if not options.keep_glyph_names:
suppress_glyph_names(varFont)
if options.omit_mac_names:
remove_mac_names(varFont)
stat_file_path = os.path.join(
os.path.dirname(options.var_font_path), STAT_FILENAME)
if os.path.exists(stat_file_path):
logger.progress("Importing STAT table override...")
import_stat_override(varFont, stat_file_path)
validate_stat_axes(varFont)
validate_stat_values(varFont)
update_stat_name_ids(varFont)
varFont.save(options.var_font_path)
logger.progress(f"Built variable font '{options.var_font_path}'")
if __name__ == '__main__':
sys.exit(main())
|
codigo_das_aulas/aula_03/aula_03.py | VeirichR/curso-python-selenium | 234 | 12702127 | <gh_stars>100-1000
from selenium.webdriver import Firefox
from time import sleep
url = 'https://curso-python-selenium.netlify.app/aula_03.html'
navegador = Firefox()
navegador.get(url)
sleep(1)
a = navegador.find_element_by_tag_name('a')
for click in range(10):
ps = navegador.find_elements_by_tag_name('p')
a.click()
print(f'Valor do ultimo p: {ps[-1].text} valor do click: {click}')
print(f'Os valors são iguais {ps[-1].text == str(click)}')
navegador.quit()
|
build/python/examples/ClusteringModulesExamples/cluster_tree_example.py | torydebra/grt | 818 | 12702140 | import GRT
import sys
import numpy as np
import argparse
def main():
# Parse the data filename from the argument list
parser = argparse.ArgumentParser(description='Process some data.')
parser.add_argument('filename', help='A data file')
args = parser.parse_args()
filename = args.filename
# Load some training data to train the ClusterTree model
trainingData = np.loadtxt(filename, delimiter=',')
# Create a new ClusterTree instance
ctree = GRT.ClusterTree()
# Set the number of steps that will be used to choose the best splitting values
# More steps will give you a better model, but will take longer to train
ctree.setNumSplittingSteps( 100 )
# Set the maximum depth of the tree
ctree.setMaxDepth( 10 )
# Set the minimum number of samples allowed per node
ctree.setMinNumSamplesPerNode( 10 )
# Set the minimum RMS error allowed per node
ctree.setMinRMSErrorPerNode( 0.1 )
# Train a cluster tree model
if not ctree.train( trainingData ):
print("Failed to train model!")
sys.exit(1)
# if not ctree.save("CTreeModel.grt"): # this fails for some reason
# print("Failed to save model!")
# sys.exit(1)
# if not ctree.load("CTreeModel.grt"):
# print("Failed to train model!")
# sys.exit(1)
# Print the tree
ctree._print()
if __name__ == '__main__':
main()
sys.exit(0) |
modules/countdown.py | nikolas/jenni | 133 | 12702164 | #!/usr/bin/env python
"""
countdown.py - jenni Countdown Module
Copyright 2011-2013, yano (yanovich.net)
Licensed under the Eiffel Forum License 2.
More info:
* jenni: https://github.com/myano/jenni/
* Phenny: http://inamidst.com/phenny/
"""
from datetime import datetime, timedelta
## TODO: just scrape, https://www.timeanddate.com/countdown/generic?iso=20170411T070001&p0=1440&msg=DO+SFO2+DOWNTIME&ud=1&font=cursive
bad_format = "Please use correct format: .countdown 2012 12 21 You can also try: '.nye -5'"
## 2036 02 07
def get_output(calculate_date, today, nye):
#ending = "%s %s-%s-%sT%s00Z"
verb = str()
if calculate_date <= today:
diff = today - calculate_date
verb = "since"
# if nye:
# return get_output(calculate_date + timedelta(days=365), today, False)
else:
diff = calculate_date - today
verb = "until"
output = str()
mills = 0
centuries = 0
decades = 0
years = 0
days = abs(diff.days)
unit = str()
if days > 365250:
mills = diff.days / 365250
days -= mills * 365250
if mills == 1: unit = "millennium"
else: unit = "millenniums"
if mills:
output += "%s %s, " % (str(mills), unit)
if days > 36525:
centuries = days / 36525
days -= centuries * 36525
if centuries == 1: unit = "century"
else: unit = "centuries"
if centuries:
output += "%s %s, " % (str(centuries), unit)
if days > 3652:
decades = days / 3652
days -= decades * 3652
if decades == 1: unit = "decade"
else: unit = "decades"
if decades:
output += "%s %s, " % (str(decades), unit)
if days > 365:
years = days / 365
days -= years * 365
if years == 1: unit = "year"
else: unit = "years"
if years:
output += "%s %s, " % (str(years), unit)
if days:
if days == 1: unit = "day"
else: unit = "days"
output += "%s %s, " % (str(days), unit)
hours = diff.seconds / 3600
if hours:
if hours == 1: unit = "hour"
else: unit = "hours"
output += "%s %s, " % (str(hours), unit)
minutes = (diff.seconds/60 - hours * 60)
if minutes:
if minutes > 1: unit = "minutes"
elif minutes == 1: unit = "minute"
output += "%s %s, " % (str(minutes), unit)
seconds = (diff.seconds/60.0 - hours * 60) - (diff.seconds/60 - hours * 60)
seconds *= 60.0
seconds = int(seconds)
if seconds:
if seconds > 1: unit = 'seconds'
elif seconds == 1: unit = 'second'
output += '%s %s, ' % (str(seconds), unit)
if output and output[0] == "-":
output = output[1:]
#output += ending % (verb, year.zfill(4), month.zfill(2), day.zfill(2), offset.zfill(2))
return '%s%s' % (output, verb)
def two(inc):
return str(inc).zfill(2)
def three(inc):
return str(inc).zfill(3)
def generic_countdown(jenni, input):
""" .countdown <year> <month> <day> - displays a countdown to a given date. """
ending = "%s %s-%s-%sT%s"
text = input.group(2)
if text and len(text.split()) >= 3:
text = input.group(2).split()
year = text[0]
month = text[1]
day = text[2]
if not year.isdigit() and not month.isdigit() and not day.isdigit():
return jenni.reply('What are you even trying to do?')
try:
offset = text[3]
except:
offset = 0
else:
if text:
offset = text.split()[0]
else:
offset = 0
year = str(int(datetime.now().year))
month = '01'
day = '01'
try:
float(offset)
except:
#return jenni.reply(':-(')
offset = 0
if text and len(text) >= 3 and year.isdigit() and month.isdigit() and day.isdigit():
calculate_date = datetime(int(year), int(month), int(day), 0, 0, 0)
if abs(float(offset)) >= 14:
return jenni.reply('Do you not love me anymore?')
today = datetime.now() + timedelta(hours=float(offset))
nye = False
elif -14 <= int(offset) <= 14:
if len(input) <= 3:
offset = 0
else:
offset = offset
calculate_date = datetime(int(datetime.now().year), 1, 1, 0, 0, 0)
today = datetime.now() + timedelta(hours=int(offset))
nye = True
else:
return jenni.say(bad_format)
output = get_output(calculate_date, today, nye)
if offset == 0:
off = '00'
else:
if offset[0] == '+' or offset[0] == '-':
offset = offset[1:]
prefix = str()
if float(offset) >= 0:
prefix = '+'
else:
prefix = '-'
if float(offset) % 1 == 0:
off = '%s%s00' % (prefix, two(offset))
else:
parts = str(offset).split('.')
wholenum = parts[0]
first_part = two(wholenum)
second_part = int(float('.%s' % parts[1]) * 60.0)
second_part = two(second_part)
off = '%s%s%s' % (prefix, first_part, second_part)
output = ending % (output, two(year), two(month), two(day), off)
jenni.say(output)
generic_countdown.commands = ['countdown', 'cd', 'nye']
generic_countdown.priority = 'low'
if __name__ == '__main__':
print __doc__.strip()
|
awx/main/tests/unit/api/serializers/test_token_serializer.py | bhyunki/awx | 11,396 | 12702166 | <filename>awx/main/tests/unit/api/serializers/test_token_serializer.py
import pytest
from awx.api.serializers import OAuth2TokenSerializer
@pytest.mark.parametrize('scope, expect', [('', False), ('read', True), ('read read', False), ('write read', True), ('read rainbow', False)])
def test_invalid_scopes(scope, expect):
assert OAuth2TokenSerializer()._is_valid_scope(scope) is expect
|
java/java2py/java2python-0.5.1/java2python/mod/__init__.py | DanielMabadeje/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 3,266 | 12702182 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# java2python.mod -> provides simple modification routines for the
# library and projects using it.
#
# The java2python.mod.basic module contains various functions for
# sprinkling generated source with docstrings, comments, decorators,
# etc.
#
# The java2python.mod.inclues module contains functions that the
# library will include directly -- as source code -- in the generated
# output.
#
# The java2python.mod.transform module contains values and functions
# for transforming input AST nodes.
|
examples/django_qrcode/qrcode/views.py | dbajar/segno | 254 | 12702191 | <filename>examples/django_qrcode/qrcode/views.py
import io
from django.http import HttpResponse, HttpResponseRedirect
from django.core.files.base import ContentFile
from django.shortcuts import render
import segno
from .models import Ticket
from .forms import TicketForm
def index(request):
"""Renders the form to create a ticket"""
if request.method == 'POST':
form = TicketForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
qr = segno.make_qr(name)
buff = io.BytesIO()
qr.save(buff, kind='png', scale=3, dark='darkblue')
ticket = Ticket(name=name)
ticket.qrcode.save(name + '.png', ContentFile(buff.getvalue()),
save=True)
return HttpResponseRedirect('/thanks/')
else:
form = TicketForm()
return render(request, 'qrcode/example.html', {'form': form})
def thanks(request):
return HttpResponse('Thanks, a new ticket was created')
|
custom_components/waste_collection_schedule/waste_collection_schedule/source/rh_entsorgung_de.py | UBS-P/hacs_waste_collection_schedule | 142 | 12702229 | <gh_stars>100-1000
import re
from datetime import date
from html.parser import HTMLParser
import requests
from waste_collection_schedule import Collection # type: ignore[attr-defined]
TITLE = "RH Entsorgung"
DESCRIPTION = "Source for RHE (Rhein Hunsrück Entsorgung)."
URL = "https://www.rh-entsorgung.de"
TEST_CASES = {
"Horn": {
"city": "Rheinböllen",
"street": "Erbacher Straße",
"house_number": 13,
"address_suffix": "A",
},
"Bärenbach": {
"city": "Bärenbach",
"street": "Schwarzener Straße",
"house_number": 10,
},
}
# Parser for HTML input (hidden) text
class HiddenInputParser(HTMLParser):
def __init__(self):
super().__init__()
self._args = {}
@property
def args(self):
return self._args
def handle_starttag(self, tag, attrs):
if tag == "input":
d = dict(attrs)
if str(d["type"]).lower() == "hidden":
self._args[d["name"]] = d["value"] if "value" in d else ""
class CollectionParser(HTMLParser):
def __init__(self) -> None:
super().__init__()
self._entries: list[Collection] = []
self._current_type: str = None
self._capture_type: bool = False
self._capture_date: bool = False
self._date_pattern = re.compile(
r"(?P<day>\d{2})\.(?P<month>\d{2})\.(?P<year>\d{4})"
)
@property
def entries(self):
return self._entries
def handle_starttag(self, tag: str, attrs) -> None:
if tag == "p":
d = dict(attrs)
if str(d["class"]).lower() == "work":
self._capture_type = True
if self._current_type is not None and tag == "td":
d = dict(attrs)
if ("class" in d) and ("dia_c_abfuhrdatum" in str(d["class"])):
self._capture_date = True
def handle_data(self, data: str) -> None:
if self._capture_type:
self._current_type = data
if self._capture_date:
match = self._date_pattern.match(data)
self._entries.append(
Collection(
date(int(match.group(3)), int(match.group(2)), int(match.group(1))),
self._current_type,
)
)
def handle_endtag(self, tag: str) -> None:
if tag == "p" and self._capture_type:
self._capture_type = False
if tag == "td" and self._capture_date:
self._capture_date = False
class Source:
def __init__(
self,
city: str,
street: str,
house_number: int,
address_suffix: str = "",
garbage_types: list[int] = [1, 2, 3, 4, 5],
):
self._city = city
self._street = street
self._hnr = house_number
self._suffix = address_suffix
self._garbage_types = garbage_types
def fetch(self):
r = requests.get(
"https://aao.rh-entsorgung.de/WasteManagementRheinhunsrueck/WasteManagementServlet",
params={"SubmitAction": "wasteDisposalServices", "InFrameMode": "TRUE"},
)
r.encoding = "utf-8"
parser = HiddenInputParser()
parser.feed(r.text)
args = parser.args
args["Ort"] = self._city
args["Strasse"] = self._street
args["Hausnummer"] = str(self._hnr)
args["Hausnummerzusatz"] = self._suffix
args["Zeitraum"] = "Die Leerungen der nächsten 3 Monate"
args["SubmitAction"] = "forward"
for type in range(1, 6):
args[f"ContainerGewaehlt_{type}"] = (
"on" if type in self._garbage_types else "off"
)
# First request returns wrong city. has to be called twice!
r = requests.post(
"https://aao.rh-entsorgung.de/WasteManagementRheinhunsrueck/WasteManagementServlet",
data=args,
)
r = requests.post(
"https://aao.rh-entsorgung.de/WasteManagementRheinhunsrueck/WasteManagementServlet",
data=args,
)
r.encoding = "utf-8"
date_parser = CollectionParser()
date_parser.feed(r.text)
return date_parser.entries
|
observations/r/mathpnl.py | hajime9652/observations | 199 | 12702238 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def mathpnl(path):
"""mathpnl
Data loads lazily. Type data(mathpnl) into the console.
A data.frame with 3850 rows and 52 variables:
- distid. district identifier
- intid. intermediate school district
- lunch. percent eligible for free lunch
- enrol. school enrollment
- ptr. pupil/teacher: 1995-98
- found. foundation grant, $: 1995-98
- expp. expenditure per pupil
- revpp. revenue per pupil
- avgsal. average teacher salary
- drop. high school dropout rate, percent
- grad. high school grad. rate, percent
- math4. percent satisfactory, 4th grade math
- math7. percent satisfactory, 7th grade math
- choice. number choice students
- psa. # public school academy studs.
- year. 1992-1998
- staff. staff per 1000 students
- avgben. avg teacher fringe benefits
- y92. =1 if year == 1992
- y93. =1 if year == 1993
- y94. =1 if year == 1994
- y95. =1 if year == 1995
- y96. =1 if year == 1996
- y97. =1 if year == 1997
- y98. =1 if year == 1998
- lexpp. log(expp)
- lfound. log(found)
- lexpp\_1. lexpp[\_n-1]
- lfnd\_1. lfnd[\_n-1]
- lenrol. log(enrol)
- lenrolsq. lenrol^2
- lunchsq. lunch^2
- lfndsq. lfnd^2
- math4\_1. math4[\_n-1]
- cmath4. math4 - math4\_1
- gexpp. lexpp - lexpp\_1
- gexpp\_1. gexpp[\_n-1
- gfound. lfound - lfnd\_1
- gfnd\_1. gfound[\_n-1]
- clunch. lunch - lunch[\_n-1]
- clnchsq. lunchsq - lunchsq[\_n-1]
- genrol. lenrol - lenrol[\_n-1]
- genrolsq. genrol^2
- expp92. expp in 1992
- lexpp92. log(expp92)
- math4\_92. math4 in 1992
- cpi. consumer price index
- rexpp. real spending per pupil, 1997$
- lrexpp. log(rexpp)
- lrexpp\_1. lrexpp[\_n-1]
- grexpp. lrexpp - lrexpp\_1
- grexpp\_1. grexpp[\_n-1]
https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_
isbn_issn=9781111531041
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `mathpnl.csv`.
Returns:
Tuple of np.ndarray `x_train` with 3850 rows and 52 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'mathpnl.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/wooldridge/mathpnl.csv'
maybe_download_and_extract(path, url,
save_file_name='mathpnl.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
toyplot/reportlab/__init__.py | eaton-lab/toyplot | 438 | 12702248 | <filename>toyplot/reportlab/__init__.py
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
"""Support functions for rendering using ReportLab."""
import base64
import io
import re
import numpy
import reportlab.lib.colors
import reportlab.lib.utils
import toyplot.color
import toyplot.units
def render(svg, canvas):
"""Render the SVG representation of a toyplot canvas to a ReportLab canvas.
Parameters
----------
svg: xml.etree.ElementTree.Element
SVG representation of a :class:`toyplot.canvas.Canvas` returned by
:func:`toyplot.svg.render()`.
canvas: reportlab.pdfgen.canvas.Canvas
ReportLab canvas that will be used to render the plot.
"""
def get_fill(root, style):
if "fill" not in style:
return None, None # pragma: no cover
gradient_id = re.match("^url[(]#(.*)[)]$", style["fill"])
if gradient_id:
gradient_id = gradient_id.group(1)
gradient_xml = root.find(".//*[@id='%s']" % gradient_id)
if gradient_xml.tag != "linearGradient":
raise NotImplementedError("Only linear gradients are implemented.") # pragma: no cover
if gradient_xml.get("gradientUnits") != "userSpaceOnUse":
raise NotImplementedError("Only userSpaceOnUse gradients are implemented.") # pragma: no cover
return None, gradient_xml
color = toyplot.color.css(style["fill"])
if color is None:
return None, None
fill_opacity = float(style.get("fill-opacity", 1.0))
opacity = float(style.get("opacity", 1.0))
fill = toyplot.color.rgba(
color["r"],
color["g"],
color["b"],
color["a"] * fill_opacity * opacity,
)
return fill, None
def get_stroke(style):
if "stroke" not in style:
return None # pragma: no cover
color = toyplot.color.css(style["stroke"])
if color is None:
return None
stroke_opacity = float(style.get("stroke-opacity", 1.0))
opacity = float(style.get("opacity", 1.0))
return toyplot.color.rgba(
color["r"],
color["g"],
color["b"],
color["a"] * stroke_opacity * opacity,
)
def get_line_cap(style):
if "stroke-linecap" not in style:
return 0
elif style["stroke-linecap"] == "butt":
return 0
elif style["stroke-linecap"] == "round":
return 1
elif style["stroke-linecap"] == "square":
return 2
def get_font_family(style):
if "font-family" not in style:
return None # pragma: no cover
bold = True if style.get("font-weight", "") == "bold" else False
italic = True if style.get("font-style", "") == "italic" else False
for font_family in style["font-family"].split(","):
font_family = font_family.lower()
if font_family in get_font_family.substitutions:
font_family = get_font_family.substitutions[font_family]
return get_font_family.font_table[(font_family, bold, italic)]
raise ValueError("Unknown font family: %s" % style["font-family"]) # pragma: no cover
get_font_family.font_table = {
("courier", False, False): "Courier",
("courier", True, False): "Courier-Bold",
("courier", False, True): "Courier-Oblique",
("courier", True, True): "Courier-BoldOblique",
("helvetica", False, False): "Helvetica",
("helvetica", True, False): "Helvetica-Bold",
("helvetica", False, True): "Helvetica-Oblique",
("helvetica", True, True): "Helvetica-BoldOblique",
("times", False, False): "Times-Roman",
("times", True, False): "Times-Bold",
("times", False, True): "Times-Italic",
("times", True, True): "Times-BoldItalic",
}
get_font_family.substitutions = {
"courier": "courier",
"helvetica": "helvetica",
"monospace": "courier",
"sans-serif": "helvetica",
"serif": "times",
"times": "times",
}
def set_fill_color(canvas, color):
canvas.setFillColorRGB(color["r"], color["g"], color["b"])
canvas.setFillAlpha(numpy.asscalar(color["a"]))
def set_stroke_color(canvas, color):
canvas.setStrokeColorRGB(color["r"], color["g"], color["b"])
canvas.setStrokeAlpha(numpy.asscalar(color["a"]))
def render_element(root, element, canvas, styles):
canvas.saveState()
current_style = {}
if styles:
current_style.update(styles[-1])
for declaration in element.get("style", "").split(";"):
if declaration == "":
continue
key, value = declaration.split(":")
current_style[key] = value
styles.append(current_style)
if "stroke-width" in current_style:
canvas.setLineWidth(float(current_style["stroke-width"]))
if "stroke-dasharray" in current_style:
canvas.setDash([float(length) for length in current_style["stroke-dasharray"].split(",")])
if current_style.get("visibility") != "hidden":
if "transform" in element.attrib:
for transformation in element.get("transform").split(")")[::1]:
if transformation:
transform, arguments = transformation.split("(")
arguments = arguments.split(",")
if transform.strip() == "translate":
if len(arguments) == 2:
canvas.translate(float(arguments[0]), float(arguments[1]))
elif transform.strip() == "rotate":
if len(arguments) == 1:
canvas.rotate(float(arguments[0]))
if len(arguments) == 3:
canvas.translate(float(arguments[1]), float(arguments[2]))
canvas.rotate(float(arguments[0]))
canvas.translate(-float(arguments[1]), -float(arguments[2]))
if element.tag == "svg":
if "background-color" in current_style:
set_fill_color(canvas, toyplot.color.css(current_style["background-color"]))
canvas.rect(
0,
0,
float(element.get("width")[:-2]),
float(element.get("height")[:-2]),
stroke=0,
fill=1,
)
if current_style["border-style"] != "none":
set_stroke_color(canvas, toyplot.color.css(current_style["border-color"]))
canvas.setLineWidth(float(current_style["border-width"]))
canvas.rect(
0,
0,
float(element.get("width")[:-2]),
float(element.get("height")[:-2]),
stroke=1,
fill=0,
)
for child in element:
render_element(root, child, canvas, styles)
elif element.tag == "a":
# At the moment, it doesn't look like reportlab supports external hyperlinks.
for child in element:
render_element(root, child, canvas, styles)
elif element.tag == "g":
if element.get("clip-path", None) is not None:
clip_id = element.get("clip-path")[5:-1]
clip_path = root.find(".//*[@id='%s']" % clip_id)
for child in clip_path:
if child.tag == "rect":
x = float(child.get("x"))
y = float(child.get("y"))
width = float(child.get("width"))
height = float(child.get("height"))
path = canvas.beginPath()
path.moveTo(x, y)
path.lineTo(x + width, y)
path.lineTo(x + width, y + height)
path.lineTo(x, y + height)
path.close()
canvas.clipPath(path, stroke=0, fill=1)
else:
toyplot.log.error("Unhandled clip tag: %s", child.tag) # pragma: no cover
for child in element:
render_element(root, child, canvas, styles)
elif element.tag == "clipPath":
pass
elif element.tag == "line":
stroke = get_stroke(current_style)
if stroke is not None:
set_stroke_color(canvas, stroke)
canvas.setLineCap(get_line_cap(current_style))
canvas.line(
float(element.get("x1", 0)),
float(element.get("y1", 0)),
float(element.get("x2", 0)),
float(element.get("y2", 0)),
)
elif element.tag == "path":
stroke = get_stroke(current_style)
if stroke is not None:
set_stroke_color(canvas, stroke)
canvas.setLineCap(get_line_cap(current_style))
path = canvas.beginPath()
commands = element.get("d").split()
while commands:
command = commands.pop(0)
if command == "L":
path.lineTo(
float(commands.pop(0)), float(commands.pop(0)))
elif command == "M":
path.moveTo(
float(commands.pop(0)), float(commands.pop(0)))
canvas.drawPath(path)
elif element.tag == "polygon":
fill, fill_gradient = get_fill(root, current_style)
if fill_gradient is not None:
raise NotImplementedError("Gradient <polygon> not implemented.") # pragma: no cover
if fill is not None:
set_fill_color(canvas, fill)
stroke = get_stroke(current_style)
if stroke is not None:
set_stroke_color(canvas, stroke)
points = [point.split(",") for point in element.get("points").split()]
path = canvas.beginPath()
for point in points[:1]:
path.moveTo(float(point[0]), float(point[1]))
for point in points[1:]:
path.lineTo(float(point[0]), float(point[1]))
path.close()
canvas.drawPath(path, stroke=stroke is not None, fill=fill is not None)
elif element.tag == "rect":
fill, fill_gradient = get_fill(root, current_style)
if fill is not None:
set_fill_color(canvas, fill)
stroke = get_stroke(current_style)
if stroke is not None:
set_stroke_color(canvas, stroke)
x = float(element.get("x", 0))
y = float(element.get("y", 0))
width = float(element.get("width"))
height = float(element.get("height"))
path = canvas.beginPath()
path.moveTo(x, y)
path.lineTo(x + width, y)
path.lineTo(x + width, y + height)
path.lineTo(x, y + height)
path.close()
if fill_gradient is not None:
pdf_colors = []
pdf_offsets = []
for stop in fill_gradient:
offset = float(stop.get("offset"))
color = toyplot.color.css(stop.get("stop-color"))
opacity = float(stop.get("stop-opacity"))
pdf_colors.append(reportlab.lib.colors.Color(color["r"], color["g"], color["b"], color["a"] * opacity))
pdf_offsets.append(offset)
canvas.saveState()
canvas.clipPath(path, stroke=0, fill=1)
canvas.setFillAlpha(1)
canvas.linearGradient(
float(fill_gradient.get("x1")),
float(fill_gradient.get("y1")),
float(fill_gradient.get("x2")),
float(fill_gradient.get("y2")),
pdf_colors,
pdf_offsets,
)
canvas.restoreState()
canvas.drawPath(path, stroke=stroke is not None, fill=fill is not None)
elif element.tag == "circle":
fill, fill_gradient = get_fill(root, current_style)
if fill_gradient is not None:
raise NotImplementedError("Gradient <circle> not implemented.") # pragma: no cover
if fill is not None:
set_fill_color(canvas, fill)
stroke = get_stroke(current_style)
if stroke is not None:
set_stroke_color(canvas, stroke)
cx = float(element.get("cx", 0))
cy = float(element.get("cy", 0))
r = float(element.get("r"))
canvas.circle(cx, cy, r, stroke=stroke is not None, fill=fill is not None)
elif element.tag == "text":
x = float(element.get("x", 0))
y = float(element.get("y", 0))
fill, fill_gradient = get_fill(element, current_style)
stroke = get_stroke(current_style)
font_family = get_font_family(current_style)
font_size = toyplot.units.convert(current_style["font-size"], target="px")
text = element.text
canvas.saveState()
canvas.setFont(font_family, font_size)
if fill is not None:
set_fill_color(canvas, fill)
if stroke is not None:
set_stroke_color(canvas, stroke)
canvas.translate(x, y)
canvas.scale(1, -1)
canvas.drawString(0, 0, text)
canvas.restoreState()
elif element.tag == "image":
import PIL.Image
image = element.get("xlink:href")
if not image.startswith("data:image/png;base64,"):
raise ValueError("Unsupported image type.") # pragma: no cover
image = base64.standard_b64decode(image[22:])
image = io.BytesIO(image)
image = PIL.Image.open(image)
image = reportlab.lib.utils.ImageReader(image)
x = float(element.get("x", 0))
y = float(element.get("y", 0))
width = float(element.get("width"))
height = float(element.get("height"))
canvas.saveState()
path = canvas.beginPath()
set_fill_color(canvas, toyplot.color.rgb(1, 1, 1))
canvas.rect(x, y, width, height, stroke=0, fill=1)
canvas.translate(x, y + height)
canvas.scale(1, -1)
canvas.drawImage(image=image, x=0, y=0, width=width, height=height, mask=None)
canvas.restoreState()
elif element.tag in ["defs", "title"]:
pass
else:
raise Exception("unhandled tag: %s" % element.tag) # pragma: no cover
styles.pop()
canvas.restoreState()
render_element(svg, svg, canvas, [])
|
ATMega48 2013-05-14 SHRFP monitor/GUI/theory.py | swharden/AVR-projects | 121 | 12702252 | import matplotlib
matplotlib.use('TkAgg') # THIS MAKES IT FAST!
import numpy
import random
import pylab
import scipy.signal
docs="""
BIG PICTURE:
continuously record sound in buffers.
if buffer is detected:
### POPULATE DELAYS ###
downsample data
find Is where data>0
use ediff1d to get differences between Is
append >1 values to delays[]
--if the old audio[] ended high, figure out how many
--on next run, add that number to the first I
### PLUCK DELAYS, POPULATE VALUES ###
only analyze delays through the last 'break'
values[] is populated with decoded delays.
"""
LENGTH_0=20
LENGTH_1=40
LENGTH_2=60
LENGTH_FUDGE=3
SAMPLEBY=4
### MAKE PERFECT DATA ###
data=[]
orig=""
for i in range(50):
data+=[0]*LENGTH_0
if random.randint(0,1)==0:
data+=[1]*LENGTH_0
orig+="0"
else:
data+=[1]*LENGTH_1
orig+="1"
if i%10==0:
data+=[0]*LENGTH_0+[1]*LENGTH_2
orig+="b"
### ADD SOME NOISE ### (simulates excessively weak signal)
#for i in range(len(data)-3):
# if random.randint(1,40)==10:
# for j in range(random.randint(1,3)):
# data[i+j]=abs(data[i+j]-1)
### DROP SOME VALUES (simulating inaccurate timing) ###
i=0
while i<len(data):
if random.randint(0,20)==0:
data.pop(i)
i+=1
### RESAMPLE
### GOTTA BE NUMPY WHEN GIVEN FROM SOUND CARD ###
# SAMPLE DOWN
data=scipy.signal.resample(data,len(data)/SAMPLEBY)
LENGTH_0=20/SAMPLEBY
LENGTH_1=40/SAMPLEBY
LENGTH_2=60/SAMPLEBY
LENGTH_FUDGE=10/SAMPLEBY
if LENGTH_FUDGE==0: LENGTH_FUDGE+=1
print "FINAL SAMPLE LENGTHS:",LENGTH_0,LENGTH_1,LENGTH_2,LENGTH_FUDGE
### FIND DISTANCE BETWEEN ALL LOW POINTS ###
data=numpy.where(data<.5)[0]
data=numpy.ediff1d(data)
### START DETECTING PULSES ###
pulses=''
for i in range(len(data)):
pulseLength=data[i]
if pulseLength<(LENGTH_0-LENGTH_FUDGE):
#consecutive data point, not done with bit
continue
elif (LENGTH_0-LENGTH_FUDGE)<pulseLength<(LENGTH_0+LENGTH_FUDGE):
pulses+='0'
elif (LENGTH_1-LENGTH_FUDGE)<pulseLength<(LENGTH_1+LENGTH_FUDGE):
pulses+='1'
elif (LENGTH_2-LENGTH_FUDGE)<pulseLength<(LENGTH_2+LENGTH_FUDGE):
pulses+='b'
else:
pulses+='?'
print orig
print pulses |
rgbd_seg/models/__init__.py | tomchol/ShapeConv | 438 | 12702286 | <reponame>tomchol/ShapeConv
from .builder import build_model
from .registry import MODELS
|
jaxrl/wrappers/absorbing_states.py | dibyaghosh/jaxrl | 157 | 12702305 | import gym
import numpy as np
from gym import Wrapper
def make_non_absorbing(observation):
return np.concatenate([observation, [0.0]], -1)
class AbsorbingStatesWrapper(Wrapper):
def __init__(self, env):
super().__init__(env)
low = env.observation_space.low
high = env.observation_space.high
self._absorbing_state = np.concatenate([np.zeros_like(low), [1.0]], 0)
low = np.concatenate([low, [0]], 0)
high = np.concatenate([high, [1]], 0)
self.observation_space = gym.spaces.Box(
low=low, high=high, dtype=env.observation_space.dtype)
def reset(self, **kwargs):
self._done = False
self._absorbing = False
self._info = {}
return make_non_absorbing(self.env.reset(**kwargs))
def step(self, action):
if not self._done:
observation, reward, done, info = self.env.step(action)
observation = make_non_absorbing(observation)
self._done = done
self._info = info
truncated_done = 'TimeLimit.truncated' in info
return observation, reward, truncated_done, info
else:
if not self._absorbing:
self._absorbing = True
return self._absorbing_state, 0.0, False, self._info
else:
return self._absorbing_state, 0.0, True, self._info
if __name__ == '__main__':
env = gym.make('Hopper-v2')
env = AbsorbingStatesWrapper(env)
env.reset()
done = False
while not done:
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
print(obs, done)
|
Configuration/DataProcessing/python/Impl/HeavyIonsEra_Run2_2018.py | Purva-Chaudhari/cmssw | 852 | 12702310 | #!/usr/bin/env python3
"""
_HeavyIonsEra_Run2_2018_
Scenario supporting heavy ions collisions
"""
import os
import sys
from Configuration.DataProcessing.Reco import Reco
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run2_2018_cff import Run2_2018
from Configuration.DataProcessing.Impl.HeavyIons import HeavyIons
class HeavyIonsEra_Run2_2018(HeavyIons):
def __init__(self):
HeavyIons.__init__(self)
self.eras = Run2_2018
self.promptCustoms += [ 'Configuration/DataProcessing/RecoTLR.customise_HI_PostEra_Run2_2018' ]
self.expressCustoms += [ 'Configuration/DataProcessing/RecoTLR.customise_HI_PostEra_Run2_2018' ]
self.visCustoms += [ 'Configuration/DataProcessing/RecoTLR.customise_HI_PostEra_Run2_2018' ]
"""
_HeavyIonsEra_Run2_2018_
Implement configuration building for data processing for Heavy Ions
collision data taking for Run2 in 2018
"""
|
src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/callchannel.py | sridish123/htcondor | 217 | 12702332 | '''Data channels for calls.
'''
from utils import *
from enums import *
from errors import ISkypeError
import time
class ICallChannel(object):
'''Represents a call channel.
'''
def __init__(self, Manager, Call, Stream, Type):
'''__init__.
@param Manager: Manager
@type Manager: L{ICallChannelManager}
@param Call: Call
@type Call: L{ICall}
@param Stream: Stream
@type Stream: L{IApplicationStream}
@param Type: Type
@type Type: L{Call channel type<enums.cctUnknown>}
'''
self._Manager = Manager
self._Call = Call
self._Stream = Stream
self._Type = Type
def __repr__(self):
return '<%s with Manager=%s, Call=%s, Stream=%s>' % (object.__repr__(self)[1:-1], repr(self.Manager), repr(self.Call), repr(self.Stream))
def SendTextMessage(self, Text):
'''Sends text message over channel.
@param Text: Text
@type Text: unicode
'''
if self._Type == cctReliable:
self._Stream.Write(Text)
elif self._Type == cctDatagram:
self._Stream.SendDatagram(Text)
else:
raise ISkypeError(0, 'Cannot send using %s channel type' & repr(self._Type))
def _GetCall(self):
return self._Call
Call = property(_GetCall,
doc='''Call.
@type: L{ICall}
''')
def _GetManager(self):
return self._Manager
Manager = property(_GetManager,
doc='''Manager.
@type: L{ICallChannelManager}
''')
def _GetStream(self):
return self._Stream
Stream = property(_GetStream,
doc='''Stream.
@type: L{IApplicationStream}
''')
def _GetType(self):
return self._Type
Type = property(_GetType,
doc='''Type.
@type: L{Call channel type<enums.cctUnknown>}
''')
class ICallChannelManager(EventHandlingBase):
'''Instatinate this class to create a call channel manager. A call channel manager will
automatically create a data channel for voice calls based on the APP2APP protocol.
1. Usage.
You should access this class using the alias at the package level::
import Skype4Py
skype = Skype4Py.Skype()
ccm = Skype4Py.CallChannelManager()
ccm.Connect(skype)
For possible constructor arguments, read the L{ICallChannelManager.__init__} description.
2. Events.
This class provides events.
The events names and their arguments lists can be found in L{ICallChannelManagerEvents} class.
The usage of events is described in L{EventHandlingBase} class which is a superclass of
this class. Follow the link for more information.
@ivar OnChannels: Event handler for L{ICallChannelManagerEvents.Channels} event. See L{EventHandlingBase} for more information on events.
@type OnChannels: callable
@ivar OnMessage: Event handler for L{ICallChannelManagerEvents.Message} event. See L{EventHandlingBase} for more information on events.
@type OnMessage: callable
@ivar OnCreated: Event handler for L{ICallChannelManagerEvents.Created} event. See L{EventHandlingBase} for more information on events.
@type OnCreated: callable
'''
def __del__(self):
if self._Application:
self._Application.Delete()
self._Application = None
self._Skype.UnregisterEventHandler('ApplicationStreams', self._OnApplicationStreams)
self._Skype.UnregisterEventHandler('ApplicationReceiving', self._OnApplicationReceiving)
self._Skype.UnregisterEventHandler('ApplicationDatagram', self._OnApplicationDatagram)
def __init__(self, Events=None):
'''__init__.
@param Events: Events
@type Events: An optional object with event handlers. See L{EventHandlingBase} for more information on events.
'''
EventHandlingBase.__init__(self)
if Events:
self._SetEventHandlerObj(Events)
self._Skype = None
self._CallStatusEventHandler = None
self._ApplicationStreamsEventHandler = None
self._ApplicationReceivingEventHandler = None
self._ApplicationDatagramEventHandler = None
self._Application = None
self._Name = u'CallChannelManager'
self._ChannelType = cctReliable
self._Channels = []
def _OnApplicationDatagram(self, pApp, pStream, Text):
if pApp == self._Application:
for ch in self_Channels:
if ch.Stream == pStream:
msg = ICallChannelMessage(Text)
self._CallEventHandler('Message', self, ch, msg)
break
def _OnApplicationReceiving(self, pApp, pStreams):
if pApp == self._Application:
for ch in self._Channels:
if ch.Stream in pStreams:
msg = ICallChannelMessage(ch.Stream.Read())
self._CallEventHandler('Message', self, ch, msg)
def _OnApplicationStreams(self, pApp, pStreams):
if pApp == self._Application:
for ch in self._Channels:
if ch.Stream not in pStreams:
self._Channels.remove(ch)
self._CallEventHandler('Channels', self, tuple(self._Channels))
def _OnCallStatus(self, pCall, Status):
if Status == clsRinging:
if self._Application is None:
self.CreateApplication()
self._Application.Connect(pCall.PartnerHandle, True)
for stream in self._Application.Streams:
if stream.PartnerHandle == pCall.PartnerHandle:
self._Channels.append(ICallChannel(self, pCall, stream, self._ChannelType))
self._CallEventHandler('Channels', self, tuple(self._Channels))
break
elif Status in (clsCancelled, clsFailed, clsFinished, clsRefused, clsMissed):
for ch in self._Channels:
if ch.Call == pCall:
self._Channels.remove(ch)
self._CallEventHandler('Channels', self, tuple(self._Channels))
try:
ch.Stream.Disconnect()
except ISkypeError:
pass
break
def Connect(self, Skype):
'''Connects this call channel manager instance to Skype. This is the first thing you should
do after creating this object.
@param Skype: Skype object
@type Skype: L{ISkype}
@see: L{Disconnect}
'''
self._Skype = Skype
self._Skype.RegisterEventHandler('CallStatus', self._OnCallStatus)
def CreateApplication(self, ApplicationName=None):
'''Creates an APP2APP application context. The application is automatically created using
L{IApplication.Create<application.IApplication.Create>}.
@param ApplicationName: Application name
@type ApplicationName: unicode
'''
if ApplicationName is not None:
self.Name = ApplicationName
self._Application = self._Skype.Application(self.Name)
self._Skype.RegisterEventHandler('ApplicationStreams', self._OnApplicationStreams)
self._Skype.RegisterEventHandler('ApplicationReceiving', self._OnApplicationReceiving)
self._Skype.RegisterEventHandler('ApplicationDatagram', self._OnApplicationDatagram)
self._Application.Create()
self._CallEventHandler('Created', self)
def Disconnect(self):
'''Disconnects from Skype.
@see: L{Connect}
'''
self._Skype.UnregisterEventHandler('CallStatus', self._OnCallStatus)
self._Skype = None
def _GetChannels(self):
return tuple(self._Channels)
Channels = property(_GetChannels,
doc='''All call data channels.
@type: tuple of L{ICallChannel}
''')
def _GetChannelType(self):
return self._ChannelType
def _SetChannelType(self, ChannelType):
self._ChannelType = ChannelType
ChannelType = property(_GetChannelType, _SetChannelType,
doc='''Queries/sets the default channel type.
@type: L{Call channel type<enums.cctUnknown>}
''')
def _GetCreated(self):
return bool(self._Application)
Created = property(_GetCreated,
doc='''Returns True if the application context has been created.
@type: bool
''')
def _GetName(self):
return self._Name
def _SetName(self, Name):
self._Name = unicode(Name)
Name = property(_GetName, _SetName,
doc='''Queries/sets the application context name.
@type: unicode
''')
class ICallChannelManagerEvents(object):
'''Events defined in L{ICallChannelManager}.
See L{EventHandlingBase} for more information on events.
'''
def Channels(self, Manager, Channels):
'''This event is triggered when list of call channels changes.
@param Manager: Manager
@type Manager: L{ICallChannelManager}
@param Channels: Channels
@type Channels: tuple of L{ICallChannel}
'''
def Created(self, Manager):
'''This event is triggered when the application context has successfuly been created.
@param Manager: Manager
@type Manager: L{ICallChannelManager}
'''
def Message(self, Manager, Channel, Message):
'''This event is triggered when a call channel message has been received.
@param Manager: Manager
@type Manager: L{ICallChannelManager}
@param Channel: Channel
@type Channel: L{ICallChannel}
@param Message: Message
@type Message: L{ICallChannelMessage}
'''
ICallChannelManager._AddEvents(ICallChannelManagerEvents)
class ICallChannelMessage(object):
'''Represents a call channel message.
'''
def __init__(self, Text):
'''__init__.
@param Text: Text
@type Text: unicode
'''
self._Text = Text
def _GetText(self):
return self._Text
def _SetText(self, Text):
self._Text = Text
Text = property(_GetText, _SetText,
doc='''Queries/sets message text.
@type: unicode
''')
|
telegram/API/main-test.py | whitmans-max/python-examples | 140 | 12702349 | #!/usr/bin/env python3
import os
import requests
from pprint import pprint
#token = '<MY TOKEN>'
token = os.environ['TELEGRAM_TOKEN']
# get infromation about bot
url = f'https://api.telegram.org/bot{token}/getMe'
r = requests.get(url)
pprint(r.json())
print('---')
# get messages for bot
url = f'https://api.telegram.org/bot{token}/getUpdates'
r = requests.get(url)
pprint(r.json())
print('---')
# ID of first chat - I will use it to send answers
response = r.json()
chat_id = response['result'][0]['message']['chat']['id']
print('chat_id:', chat_id)
print('---')
# send back message
data = {'chat_id': chat_id, 'text': 'Hello World'}
url = f'https://api.telegram.org/bot{token}/sendMessage'
r = requests.get(url, params=data)
# send back image from internet - I can use POST or GET
data = {'chat_id': chat_id, 'photo': 'https://blog.furas.pl/theme/images/me/furas.png'}
url = f'https://api.telegram.org/bot{token}/sendPhoto'
r = requests.get(url, params=data)
#r = requests.post(url, data=data)
pprint(r.json())
print('---')
# get ID of file on Telegram
response = r.json()
photo_id = response['result']['photo'][0]['file_id']
print('photo_id:', photo_id)
print('---')
# send image which already is on Telegram server - I can use POST or GET
data = {'chat_id': chat_id, 'photo': photo_id}
url = f'https://api.telegram.org/bot{token}/sendPhoto'
r = requests.get(url, params=data)
#r = requests.post(url, data=data)
pprint(r.json())
print('---')
|
venv/Lib/site-packages/ipython_genutils/tests/test_tempdir.py | ajayiagbebaku/NFL-Model | 1,318 | 12702426 |
import os
from ..tempdir import NamedFileInTemporaryDirectory
from ..tempdir import TemporaryWorkingDirectory
def test_named_file_in_temporary_directory():
with NamedFileInTemporaryDirectory('filename') as file:
name = file.name
assert not file.closed
assert os.path.exists(name)
file.write(b'test')
assert file.closed
assert not os.path.exists(name)
def test_temporary_working_directory():
with TemporaryWorkingDirectory() as dir:
assert os.path.exists(dir)
assert os.path.realpath(os.curdir) == os.path.realpath(dir)
assert not os.path.exists(dir)
assert os.path.abspath(os.curdir) != dir
|
self_organising_systems/texture_ca/texture_synth.py | Bathsheba/self-organising-systems | 121 | 12702428 | <reponame>Bathsheba/self-organising-systems
# Lint as: python3
"""
Texture synth experiments.
"""
from absl import app
from absl import flags
from absl import logging
FLAGS = flags.FLAGS
from self_organising_systems.texture_ca.config import cfg
tcfg = cfg.texture_ca
from self_organising_systems.texture_ca.losses import StyleModel, Inception
from self_organising_systems.shared.video import VideoWriter
from self_organising_systems.shared.util import tile2d, Bunch
from self_organising_systems.texture_ca.ca import CAModel, to_rgb
import tensorflow as tf
# TF voodoo during migration period...
tf.compat.v1.enable_v2_behavior()
import numpy as np
def main(_):
texture_synth_trainer = TextureSynthTrainer()
texture_synth_trainer.train()
class SamplePool:
def __init__(self, *, _parent=None, _parent_idx=None, **slots):
self._parent = _parent
self._parent_idx = _parent_idx
self._slot_names = slots.keys()
self._size = None
for k, v in slots.items():
if self._size is None:
self._size = len(v)
assert self._size == len(v)
setattr(self, k, np.asarray(v))
def sample(self, n):
idx = np.random.choice(self._size, n, False)
batch = {k: getattr(self, k)[idx] for k in self._slot_names}
batch = SamplePool(**batch, _parent=self, _parent_idx=idx)
return batch
def commit(self):
for k in self._slot_names:
getattr(self._parent, k)[self._parent_idx] = getattr(self, k)
def create_loss_model():
loss_type, loss_params = tcfg.objective.split(':', 1)
if loss_type == "style":
texture_fn = loss_params
input_texture_path = "%s/%s"%(tcfg.texture_dir, texture_fn)
loss_model = StyleModel(input_texture_path)
elif loss_type == "inception":
layer_name, ch = loss_params.split(':')
loss_model = Inception(layer_name, int(ch))
return loss_model
class TextureSynthTrainer:
def __init__(self, loss_model=None):
self.experiment_log_dir = "%s/%s"%(cfg.logdir, cfg.experiment_name)
self.writer = tf.summary.create_file_writer(self.experiment_log_dir)
if loss_model is None:
loss_model = create_loss_model()
self.loss_model = loss_model
self.ca = CAModel()
if tcfg.ancestor_npy:
self.ancestor_ca = CAModel()
ancestor_fn = "%s/%s" % (tcfg.ancestor_dir, tcfg.ancestor_npy)
self.ancestor_ca.load_params(ancestor_fn)
self.ca.load_params(ancestor_fn)
logging.info("loaded pre-trained model %s" % tcfg.ancestor_npy)
self.loss_log = []
self.pool = SamplePool(x=self.seed_fn(tcfg.pool_size))
lr_sched = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
[1000], [tcfg.lr, tcfg.lr*0.1])
self.trainer = tf.keras.optimizers.Adam(lr_sched)
def visualize_batch_tf(self, x0, x, step_num):
vis0 = np.hstack(to_rgb(x0))
vis1 = np.hstack(to_rgb(x))
vis = np.vstack([vis0, vis1])
tf.summary.image("batch_vis", vis[None, ...])
def train(self):
with self.writer.as_default():
for _ in range(tcfg.train_steps+1):
step_num = len(self.loss_log)
step = self.train_step()
if step_num%50 == 0 or step_num == tcfg.train_steps:
self.visualize_batch_tf(step.x0, step.batch.x, step_num)
self.ca.save_params("%s/%s.npy" % (cfg.logdir, cfg.experiment_name))
logging.info('step: %d, log10(loss): %s, loss: %s'%(len(self.loss_log), np.log10(step.loss), step.loss.numpy()))
self.save_video("%s/%s.mp4" % (cfg.logdir, cfg.experiment_name), self.ca.embody)
def train_step(self):
step_num = len(self.loss_log)
tf.summary.experimental.set_step(step_num)
batch = self.pool.sample(tcfg.batch_size)
x0 = batch.x.copy()
if step_num%2==0:
x0[:1] = self.seed_fn(1)
batch.x[:], loss = self._train_step(x0)
batch.commit()
tf.summary.scalar("loss", loss)
self.loss_log.append(loss.numpy())
return Bunch(batch=batch, x0=x0, loss=loss, step_num=step_num)
@tf.function
def _train_step(self, x):
iter_n = tf.random.uniform([], tcfg.rollout_len_min, tcfg.rollout_len_max, tf.int32)
with tf.GradientTape(persistent=False) as g:
f = self.ca.embody()
for i in tf.range(iter_n):
x = f(x)
loss = self.loss_model(to_rgb(x))
grads = g.gradient(loss, self.ca.params)
grads = [g/(tf.norm(g)+1e-8) for g in grads]
self.trainer.apply_gradients(zip(grads, self.ca.params))
return x, loss
def seed_fn(self, n):
states = np.zeros([n, tcfg.img_size, tcfg.img_size, tcfg.channel_n], np.float32)
return states
def save_video(self, path, f):
state = self.seed_fn(1)
f = self.ca.embody()
if tcfg.ancestor_npy:
state_ancestor = self.seed_fn(1)
f_ancestor = self.ancestor_ca.embody()
with VideoWriter(path, 60.0) as vid:
for i in range(tcfg.viz_rollout_len):
# visualize the RGB + hidden states.
if tcfg.hidden_viz_group:
padding_channel_len = (3 - state[0].shape[2] % 3) % 3
splitframe = np.split(np.pad(state[0], ((0,0), (0,0), (0,padding_channel_len)), mode='constant'), (state[0].shape[2] + padding_channel_len)/3, 2)
else:
hidden = np.transpose(np.repeat(state[0][..., 3:, None], 3, -1), (2, 0, 1, 3))
splitframe = np.concatenate([state[0][None, ..., :3], hidden], 0)
frame = to_rgb(tile2d(splitframe))
vid.add(frame)
if tcfg.ancestor_npy:
c_state = f(state, fire_rate=0.5)
a_state = f_ancestor(state, fire_rate=0.5)
progress = max(1.25*(i/tcfg.viz_rollout_len) - 0.25, 0.0)
state = (1-progress)*c_state + progress*a_state
else:
state = f(state, fire_rate=0.5)
if __name__ == '__main__':
app.run(main)
|
metrics/totto/evaluator.py | HKUNLP/UnifiedSKG | 191 | 12702564 | # encoding=utf8
import numpy as np
from datasets import load_metric
# the code below refers to the https://github.com/Yale-LILY/FeTaQA/blob/main/end2end/train.py
def postprocess_text(preds, references_s, metric_name):
preds = [pred.strip() for pred in preds]
references_s = [[reference.strip() for reference in references] for references in references_s]
# rougeLSum expects newline after each sentence
if metric_name in ["sacrebleu"]:
# since hf sacrebleu only support references with same length, we have to pad them into the same length
ref_max_len = max([len(ref) for ref in references_s])
for ref in references_s:
for _ in range(ref_max_len - len(ref)):
ref.append(None) # see https://github.com/mjpost/sacrebleu/pull/132
print(ref)
elif metric_name == "bleu":
preds = [pred.split(' ') for pred in preds]
references_s = [[reference.split(' ') for reference in references] for references in references_s]
else:
pass
return preds, references_s
class EvaluateTool(object):
def __init__(self, args):
self.args = args
def evaluate(self, preds, golds, section):
summary = {}
references_s = [item["final_sentences"] for item in golds]
assert len(preds) == len(references_s)
metric_list = []
if section in ['train', 'dev']:
metric_list = ['sacrebleu']
elif section == 'test':
metric_list = ["sacrebleu", "bleurt"] # TODO: add PARENT
for metric_name in metric_list:
metric = load_metric(metric_name)
processed_preds, processed_golds = postprocess_text(preds, references_s, metric_name)
if metric_name == "sacrebleu":
res = metric.compute(predictions=processed_preds, references=processed_golds)
summary[metric_name] = res["score"] * 0.01
elif metric_name == "bleurt":
# We refer to the realization in https://github.com/google-research/language/blob/13fd14e1b285002412252097586f8fe405ba8a24/language/totto/totto_bleurt_eval.py#L94-L131
multi_references = [[], [], []]
for references in processed_golds: # here "references" mean references for one prediction string.
if len(references) == 2:
multi_references[2].append('')
elif len(references) == 3:
multi_references[2].append(references[2])
else:
raise ValueError("The references num for each candidate should be 2 or 3 in ToTTo dataset.")
multi_references[0].append(references[0])
multi_references[1].append(references[1])
multi_bleurt_scores = []
for references in multi_references:
multi_bleurt_scores.append(metric.compute(predictions=processed_preds, references=references))
assert len(multi_references) == 3
avg_bleurt_scores = []
for i in range(len(processed_preds)):
# All examples have atleast two references but some do not have three.
assert multi_references[0][i] and multi_references[1][i]
r2 = multi_references[2][i]
if r2:
# Take average over 3 references.
score_i = (multi_bleurt_scores[0][i] + multi_bleurt_scores[1][i] +
multi_bleurt_scores[2][i]) / 3
else:
# print("only two refs")
# Take average over two references.
score_i = (multi_bleurt_scores[0][i] + multi_bleurt_scores[1][i]) / 2
avg_bleurt_scores.append(score_i)
summary["bleurt"] = np.mean(avg_bleurt_scores)
else:
res = metric.compute(predictions=processed_preds, references=processed_golds)
summary[metric_name] = res[metric_name]
return summary
if __name__ == '__main__':
import json
with open("predictions_eval_3.179650238473768.json") as f:
test_data = json.load(f)
with open("dev_result.txt") as f:
preds = [line.strip() for line in f.readlines()]
evaluator = EvaluateTool(args=None)
score = evaluator.evaluate(preds, test_data, section="test")
print(score)
|
chapter4_serving_patterns/batch_pattern/src/db/initialize.py | sudabon/ml-system-in-actions | 133 | 12702577 | from logging import getLogger
from src.configurations import PlatformConfigurations
from src.db import cruds, models, schemas
from src.db.database import get_context_db
logger = getLogger(__name__)
def initialize_database(engine, checkfirst: bool = True):
models.create_tables(engine=engine, checkfirst=checkfirst)
with get_context_db() as db:
sample_data = PlatformConfigurations.sample_data
items = [schemas.ItemBase(values=values) for values in sample_data]
cruds.register_items(db=db, items=items, commit=True)
|
python/tvm/tir/schedule/_type_checker.py | shengxinhu/tvm | 4,640 | 12702589 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Type checking functionality"""
import functools
import inspect
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import typing
def _is_none_type(type_: Any) -> bool:
return type_ is None or type_ is type(None)
if hasattr(typing, "_GenericAlias"):
class _Subtype:
@staticmethod
def _origin(type_: Any) -> Any:
if isinstance(type_, typing._GenericAlias): # type: ignore # pylint: disable=protected-access
return type_.__origin__
return None
@staticmethod
def list_(type_: Any) -> Any:
if _Subtype._origin(type_) is list:
(subtype,) = type_.__args__
return [subtype]
return None
@staticmethod
def tuple_(type_: Any) -> Optional[List[type]]:
if _Subtype._origin(type_) is tuple:
subtypes = type_.__args__
return subtypes
return None
@staticmethod
def optional(type_: Any) -> Optional[List[type]]:
if _Subtype._origin(type_) is Union:
subtypes = type_.__args__
if len(subtypes) == 2 and _is_none_type(subtypes[1]):
return [subtypes[0]]
return None
@staticmethod
def union(type_: Any) -> Optional[List[type]]:
if _Subtype._origin(type_) is Union:
subtypes = type_.__args__
if len(subtypes) != 2 or not _is_none_type(subtypes[1]):
return list(subtypes)
return None
elif hasattr(typing, "_Union"):
class _Subtype: # type: ignore
@staticmethod
def list_(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing.GenericMeta): # type: ignore # pylint: disable=no-member
if type_.__name__ == "List":
(subtype,) = type_.__args__ # type: ignore # pylint: disable=no-member
return [subtype]
return None
@staticmethod
def tuple_(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing.GenericMeta): # type: ignore # pylint: disable=no-member
if type_.__name__ == "Tuple":
subtypes = type_.__args__ # type: ignore # pylint: disable=no-member
return subtypes
return None
@staticmethod
def optional(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing._Union): # type: ignore # pylint: disable=no-member,protected-access
subtypes = type_.__args__
if len(subtypes) == 2 and _is_none_type(subtypes[1]):
return [subtypes[0]]
return None
@staticmethod
def union(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing._Union): # type: ignore # pylint: disable=no-member,protected-access
subtypes = type_.__args__
if len(subtypes) != 2 or not _is_none_type(subtypes[1]):
return list(subtypes)
return None
def _dispatcher(type_: Any) -> Tuple[str, List[type]]:
if _is_none_type(type_):
return "none", []
subtype = _Subtype.list_(type_)
if subtype is not None:
return "list", subtype
subtype = _Subtype.tuple_(type_)
if subtype is not None:
return "tuple", subtype
subtype = _Subtype.optional(type_)
if subtype is not None:
return "optional", subtype
subtype = _Subtype.union(type_)
if subtype is not None:
return "union", subtype
return "atomic", [type_]
_TYPE2STR: Dict[Any, Callable] = {
"none": lambda: "None",
"atomic": lambda t: str(t.__name__),
"list": lambda t: f"List[{_type2str(t)}]",
"tuple": lambda *t: f"Tuple[{', '.join([_type2str(x) for x in t])}]",
"optional": lambda t: f"Optional[{_type2str(t)}]",
"union": lambda *t: f"Union[{', '.join([_type2str(x) for x in t])}]",
}
def _type2str(type_: Any) -> str:
key, subtypes = _dispatcher(type_)
return _TYPE2STR[key](*subtypes)
def _val2type(value: Any):
if isinstance(value, list):
types = set(_val2type(x) for x in value)
if len(types) == 1:
return List[types.pop()] # type: ignore
return List[Union[tuple(types)]] # type: ignore
if isinstance(value, tuple):
types = tuple(_val2type(x) for x in value) # type: ignore
return Tuple[types]
return type(value)
def _type_check_err(x: Any, name: str, expected: Any) -> str:
return (
f'"{name}" has wrong type. '
f'Expected "{_type2str(expected)}", '
f'but gets: "{_type2str(_val2type(x))}"'
)
def _type_check_vtable() -> Dict[str, Callable]:
def _type_check_none(v: Any, name: str) -> Optional[str]:
return None if v is None else _type_check_err(v, name, None)
def _type_check_atomic(v: Any, name: str, type_: Any) -> Optional[str]:
return None if isinstance(v, type_) else _type_check_err(v, name, type_)
def _type_check_list(v: List[Any], name: str, type_: Any) -> Optional[str]:
if not isinstance(v, (list, tuple)):
return _type_check_err(v, name, list)
for i, x in enumerate(v):
error_msg = _type_check(x, f"{name}[{i}]", type_)
if error_msg is not None:
return error_msg
return None
def _type_check_tuple(v: Any, name: str, *types: Any) -> Optional[str]:
if not isinstance(v, tuple):
return _type_check_err(v, name, Tuple[types])
if len(types) != len(v):
return _type_check_err(v, name, Tuple[types])
for i, (x, type_) in enumerate(zip(v, types)):
error_msg = _type_check(x, f"{name}[{i}]", type_)
if error_msg is not None:
return error_msg
return None
def _type_check_optional(v: Any, name: str, type_: Any) -> Optional[str]:
return None if v is None else _type_check(v, name, type_)
def _type_check_union(v: Any, name: str, *types: Any) -> Optional[str]:
for type_ in types:
error_msg = _type_check(v, name, type_)
if error_msg is None:
return None
return _type_check_err(v, name, Union[types])
return {
"none": _type_check_none,
"atomic": _type_check_atomic,
"list": _type_check_list,
"tuple": _type_check_tuple,
"optional": _type_check_optional,
"union": _type_check_union,
}
_TYPE_CHECK: Dict[Any, Callable] = _type_check_vtable()
def _type_check(v: Any, name: str, type_: Any) -> Optional[str]:
key, subtypes = _dispatcher(type_)
return _TYPE_CHECK[key](v, name, *subtypes)
def type_checked(func: Callable) -> Callable:
"""Type check the input arguments of a function."""
sig = inspect.signature(func)
@functools.wraps(func)
def wrap(*args, **kwargs):
bound_args = sig.bind(*args, **kwargs)
bound_args.apply_defaults()
for param in sig.parameters.values():
if param.annotation != inspect.Signature.empty:
error_msg = _type_check(
bound_args.arguments[param.name],
param.name,
param.annotation,
)
if error_msg is not None:
error_msg = f'In "{func.__qualname__}", {error_msg}'
raise TypeError(error_msg)
return func(*args, **kwargs)
return wrap
|
example_scripts/gzip_mzml.py | RJMW/pymzML | 117 | 12702614 | <reponame>RJMW/pymzML<filename>example_scripts/gzip_mzml.py
#!/usr/bin/env python3
import sys
import os
from pymzml.utils.utils import index_gzip
from pymzml.run import Reader
def main(mzml_path, out_path):
"""
Create and indexed gzip mzML file from a plain mzML.
Usage: python3 gzip_mzml.py <path/to/mzml> <path/to/output>
"""
with open(mzml_path) as fin:
fin.seek(0, 2)
max_offset_len = fin.tell()
max_spec_no = Reader(mzml_path).get_spectrum_count() + 10
index_gzip(
mzml_path, out_path, max_idx=max_spec_no, idx_len=len(str(max_offset_len))
)
if __name__ == "__main__":
if len(sys.argv) > 2:
main(sys.argv[1], sys.argv[2])
else:
print(main.__doc__)
|
envs/hns/mujoco-worldgen/mujoco_worldgen/util/sim_funcs.py | jiayu-ch15/curriculum | 424 | 12702653 | import logging
import numpy as np
import itertools
logger = logging.getLogger(__name__)
# #######################################
# ############ set_action ###############
# #######################################
def ctrl_set_action(sim, action):
"""
For torque actuators it copies the action into mujoco ctrl field.
For position actuators it sets the target relative to the current qpos.
"""
if sim.model.nmocap > 0:
_, action = np.split(action, (sim.model.nmocap * 7, ))
if sim.data.ctrl is not None:
for i in range(action.shape[0]):
if sim.model.actuator_biastype[i] == 0:
sim.data.ctrl[i] = action[i]
else:
idx = sim.model.jnt_qposadr[sim.model.actuator_trnid[i, 0]]
sim.data.ctrl[i] = sim.data.qpos[idx] + action[i]
# #######################################
# ############ get_reward ###############
# #######################################
def zero_get_reward(sim):
return 0.0
def gps_dist(sim, obj0, obj1):
obj0 = sim.data.get_site_xpos(obj0)
obj1 = sim.data.get_site_xpos(obj1)
diff = np.sum(np.square(obj0 - obj1))
return diff + 0.3 * np.log(diff + 1e-4)
def l2_dist(sim, obj0, obj1):
obj0 = sim.data.get_site_xpos(obj0)
obj1 = sim.data.get_site_xpos(obj1)
return np.sqrt(np.mean(np.square(obj0 - obj1)))
# #######################################
# ########### get_diverged ##############
# #######################################
def false_get_diverged(sim):
return False, 0.0
def simple_get_diverged(sim):
if sim.data.qpos is not None and \
(np.max(np.abs(sim.data.qpos)) > 1000.0 or
np.max(np.abs(sim.data.qvel)) > 100.0):
return True, -20.0
return False, 0.0
# #######################################
# ########### get_info ##############
# #######################################
def empty_get_info(sim):
return {}
# #######################################
# ############## get_obs ################
# #######################################
def flatten_get_obs(sim):
if sim.data.qpos is None:
return np.zeros(0)
return np.concatenate([sim.data.qpos, sim.data.qvel])
def image_get_obs(sim):
return sim.render(100, 100, camera_name="rgb")
# Helpers
def get_body_geom_ids(model, body_name):
""" Returns geom_ids in the body. """
body_id = model.body_name2id(body_name)
geom_ids = []
for geom_id in range(model.ngeom):
if model.geom_bodyid[geom_id] == body_id:
geom_ids.append(geom_id)
return geom_ids
def change_geom_alpha(model, body_name_prefix, new_alpha):
''' Changes the visual transparency (alpha) of an object'''
for body_name in model.body_names:
if body_name.startswith(body_name_prefix):
for geom_id in get_body_geom_ids(model, body_name):
model.geom_rgba[geom_id, 3] = new_alpha
def joint_qpos_idxs(sim, joint_name):
''' Gets indexes for the specified joint's qpos values'''
addr = sim.model.get_joint_qpos_addr(joint_name)
if isinstance(addr, tuple):
return list(range(addr[0], addr[1]))
else:
return [addr]
def qpos_idxs_from_joint_prefix(sim, prefix):
''' Gets indexes for the qpos values of all joints matching the prefix'''
qpos_idxs_list = [joint_qpos_idxs(sim, name)
for name in sim.model.joint_names
if name.startswith(prefix)]
return list(itertools.chain.from_iterable(qpos_idxs_list))
def joint_qvel_idxs(sim, joint_name):
''' Gets indexes for the specified joint's qvel values'''
addr = sim.model.get_joint_qvel_addr(joint_name)
if isinstance(addr, tuple):
return list(range(addr[0], addr[1]))
else:
return [addr]
def qvel_idxs_from_joint_prefix(sim, prefix):
''' Gets indexes for the qvel values of all joints matching the prefix'''
qvel_idxs_list = [joint_qvel_idxs(sim, name)
for name in sim.model.joint_names
if name.startswith(prefix)]
return list(itertools.chain.from_iterable(qvel_idxs_list))
def body_names_from_joint_prefix(sim, prefix):
''' Returns a list of body names that contain joints matching the given prefix'''
return [sim.model.body_id2name(sim.model.jnt_bodyid[sim.model.joint_name2id(name)])
for name in sim.model.joint_names
if name.startswith(prefix)]
|
tests/functions/test_render.py | tteaka/sqlalchemy-utils | 879 | 12702668 | <reponame>tteaka/sqlalchemy-utils<filename>tests/functions/test_render.py
import pytest
import sqlalchemy as sa
from sqlalchemy_utils.functions import (
mock_engine,
render_expression,
render_statement
)
class TestRender(object):
@pytest.fixture
def User(self, Base):
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
return User
@pytest.fixture
def init_models(self, User):
pass
def test_render_orm_query(self, session, User):
query = session.query(User).filter_by(id=3)
text = render_statement(query)
assert 'SELECT user.id, user.name' in text
assert 'FROM user' in text
assert 'WHERE user.id = 3' in text
def test_render_statement(self, session, User):
statement = User.__table__.select().where(User.id == 3)
text = render_statement(statement, bind=session.bind)
assert 'SELECT user.id, user.name' in text
assert 'FROM user' in text
assert 'WHERE user.id = 3' in text
def test_render_statement_without_mapper(self, session):
statement = sa.select([sa.text('1')])
text = render_statement(statement, bind=session.bind)
assert 'SELECT 1' in text
def test_render_ddl(self, engine, User):
expression = 'User.__table__.create(engine)'
stream = render_expression(expression, engine)
text = stream.getvalue()
assert 'CREATE TABLE user' in text
assert 'PRIMARY KEY' in text
def test_render_mock_ddl(self, engine, User):
# TODO: mock_engine doesn't seem to work with locally scoped variables.
self.engine = engine
with mock_engine('self.engine') as stream:
User.__table__.create(self.engine)
text = stream.getvalue()
assert 'CREATE TABLE user' in text
assert 'PRIMARY KEY' in text
|
analysis/combine_ckpt.py | taokong/ibot | 327 | 12702731 | import torch
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--checkpoint_backbone', required=True, type=str)
parser.add_argument('--checkpoint_linear', required=True, type=str)
parser.add_argument('--output_file', required=True, type=str)
if __name__ == "__main__":
args = parser.parse_args()
backbone = torch.load(args.checkpoint_backbone)['state_dict']
model = torch.load(args.checkpoint_linear)
linear = model['state_dict']
head_index = model['best_acc_hidx']
new_linear = {}
for key, val in linear.items():
splits = key.split('.')
if splits[0] == str(head_index):
new_linear['.'.join(splits[2:])] = val
backbone.update(new_linear)
model['state_dict'] = backbone
print(f"save {head_index}th head with acc {model['best_acc']}")
torch.save(model, args.output_file) |
rlpy/Policies/UniformRandom.py | okkhoy/rlpy | 265 | 12702732 | <gh_stars>100-1000
"""Uniform Random policy"""
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import super
from future import standard_library
standard_library.install_aliases()
from past.utils import old_div
from .Policy import Policy
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["<NAME>", "<NAME>", "<NAME>",
"<NAME>", "<NAME>"]
__license__ = "BSD 3-Clause"
__author__ = "<NAME>"
class UniformRandom(Policy):
"""
Select an action uniformly at random from those available in a particular
state.
"""
def __init__(self, representation, seed=1):
super(UniformRandom, self).__init__(representation, seed)
def prob(self, s, terminal, p_actions):
p = old_div(np.ones(len(p_actions)), len(p_actions))
return p
def pi(self, s, terminal, p_actions):
return self.random_state.choice(p_actions)
|
recipes/Python/135700_win32_service_administration/recipe-135700.py | tdiprima/code | 2,023 | 12702735 | # svcAdmin.py
# in principal this could be expanded to support linux operations
# such as the RedHat style: service <name> status|start|stop|restart
#
import os
import time
from types import *
if os.name == 'nt':
import win32service
import win32serviceutil
RUNNING = win32service.SERVICE_RUNNING
STARTING = win32service.SERVICE_START_PENDING
STOPPING = win32service.SERVICE_STOP_PENDING
STOPPED = win32service.SERVICE_STOPPED
def svcStatus( svc_name, machine=None):
return win32serviceutil.QueryServiceStatus( svc_name, machine)[1] # scvType, svcState, svcControls, err, svcErr, svcCP, svcWH
def svcStop( svc_name, machine=None):
status = win32serviceutil.StopService( svc_name, machine)[1]
while status == STOPPING:
time.sleep(1)
status = svcStatus( svc_name, machine)
return status
def svcStart( svc_name, svc_arg = None, machine=None):
if not svc_arg is None:
if type(svc_arg) in StringTypes:
# win32service expects a list of string arguments
svc_arg = [ svc_arg]
win32serviceutil.StartService( svc_name, svc_arg, machine)
status = svcStatus( svc_name, machine)
while status == STARTING:
time.sleep(1)
status = svcStatus( svc_name, machine)
return status
if __name__ == "__main__":
svc = 'mysql'
#machine = '192.168.0.4' # no \\ prefix
machine = None # localhost
test_arg = (None, r'--datadir=f:\mysql\data_playpark')
modulus = len(test_arg)
argndx = 0
for i in range(2 * modulus):
status = svcStatus( svc, machine=machine)
if status == STOPPED:
arg = test_arg[argndx % modulus]
new_status = svcStart( svc, arg, machine=machine)
argndx += 1
elif status == RUNNING:
arg = None
new_status = svcStop( svc, machine=machine)
else:
arg = None
new_status = "Not changed"
print "Status changed from %s to %s (with arg: %s)" % (status, new_status, arg)
|
src/gamemodes/__init__.py | codebam/lykos | 122 | 12702738 | <reponame>codebam/lykos
# Imports all gamemode definitions
import os.path
import glob
import importlib
import src.settings as var
from src.messages import messages
from src.events import Event, EventListener
from src.cats import All, Cursed, Wolf, Wolfchat, Innocent, Village, Neutral, Hidden, Team_Switcher, Win_Stealer, Nocturnal, Killer, Spy
__all__ = ["InvalidModeException", "game_mode", "import_builtin_modes" "GameMode"]
class InvalidModeException(Exception):
pass
def game_mode(name, minp, maxp, likelihood=0):
def decor(c):
c.name = name
var.GAME_MODES[name] = (c, minp, maxp, likelihood)
return c
return decor
def import_builtin_modes():
path = os.path.dirname(os.path.abspath(__file__))
search = os.path.join(path, "*.py")
for f in glob.iglob(search):
f = os.path.basename(f)
n, _ = os.path.splitext(f)
if f.startswith("_"):
continue
importlib.import_module("." + n, package="src.gamemodes")
class GameMode:
def __init__(self, arg=""):
# Default values for the role sets and secondary roles restrictions
self.ROLE_SETS = {
"gunner/sharpshooter": {"gunner": 4, "sharpshooter": 1},
}
self.SECONDARY_ROLES = {
"cursed villager" : All - Cursed - Wolf - Innocent - {"seer", "oracle"},
"gunner" : Village + Neutral + Hidden - Innocent - Team_Switcher,
"sharpshooter" : Village + Neutral + Hidden - Innocent - Team_Switcher,
"mayor" : All - Innocent - Win_Stealer,
"assassin" : All - Nocturnal + Killer - Spy + Wolfchat - Wolf - Innocent - Team_Switcher - {"traitor"},
}
self.DEFAULT_TOTEM_CHANCES = self.TOTEM_CHANCES = {}
self.NUM_TOTEMS = {}
self.EVENTS = {}
# Support all shamans and totems
# Listeners should add their custom totems with non-zero chances, and custom roles in evt.data["shaman_roles"]
# Totems (both the default and custom ones) get filled with every shaman role at a chance of 0
# Add totems with a priority of 1 and shamans with a priority of 3
# Listeners at priority 5 can make use of this information freely
evt = Event("default_totems", {"shaman_roles": set()})
evt.dispatch(self.TOTEM_CHANCES)
shamans = evt.data["shaman_roles"]
for chances in self.TOTEM_CHANCES.values():
if chances.keys() != shamans:
for role in shamans:
if role not in chances:
chances[role] = 0 # default to 0 for new totems/shamans
for role in shamans:
if role not in self.NUM_TOTEMS:
self.NUM_TOTEMS[role] = 1 # shamans get 1 totem per night by default
if not arg:
return
arg = arg.replace("=", ":").replace(";", ",")
pairs = [arg]
while pairs:
pair, *pairs = pairs[0].split(",", 1)
change = pair.lower().replace(":", " ").strip().rsplit(None, 1)
if len(change) != 2:
raise InvalidModeException(messages["invalid_mode_args"].format(arg))
key, val = change
if key == "role reveal":
if val not in ("on", "off", "team"):
raise InvalidModeException(messages["invalid_reveal"].format(val))
self.ROLE_REVEAL = val
if val == "off" and not hasattr(self, "STATS_TYPE"):
self.STATS_TYPE = "disabled"
elif val == "team" and not hasattr(self, "STATS_TYPE"):
self.STATS_TYPE = "team"
elif key == "stats":
if val not in ("default", "accurate", "team", "disabled"):
raise InvalidModeException(messages["invalid_stats"].format(val))
self.STATS_TYPE = val
elif key == "abstain":
if val not in ("enabled", "restricted", "disabled"):
raise InvalidModeException(messages["invalid_abstain"].format(val))
if val == "enabled":
self.ABSTAIN_ENABLED = True
self.LIMIT_ABSTAIN = False
elif val == "restricted":
self.ABSTAIN_ENABLED = True
self.LIMIT_ABSTAIN = True
elif val == "disabled":
self.ABSTAIN_ENABLED = False
def startup(self):
for event, listeners in self.EVENTS.items():
if isinstance(listeners, EventListener):
listeners.install(event)
else:
for listener in listeners:
listener.install(event)
def teardown(self):
for event, listeners in self.EVENTS.items():
if isinstance(listeners, EventListener):
listeners.remove(event)
else:
for listener in listeners:
listener.remove(event)
def can_vote_bot(self, var):
return False
def set_default_totem_chances(self):
if self.TOTEM_CHANCES is self.DEFAULT_TOTEM_CHANCES:
return # nothing more we can do
for totem, chances in self.TOTEM_CHANCES.items():
if totem not in self.DEFAULT_TOTEM_CHANCES or self.DEFAULT_TOTEM_CHANCES[totem].keys() == chances.keys():
continue
for role, value in self.DEFAULT_TOTEM_CHANCES[totem].items():
if role not in chances:
chances[role] = value
# Here so any game mode can use it
def lovers_chk_win(self, evt, var, rolemap, mainroles, lpl, lwolves, lrealwolves):
winner = evt.data["winner"]
if winner in Win_Stealer:
return # fool won, lovers can't win even if they would
from src.roles.matchmaker import get_lovers
all_lovers = get_lovers()
if len(all_lovers) != 1:
return # we need exactly one cluster alive for this to trigger
lovers = all_lovers[0]
if len(lovers) == lpl:
evt.data["winner"] = "lovers"
evt.data["message"] = messages["lovers_win"]
def all_dead_chk_win(self, evt, var, rolemap, mainroles, lpl, lwolves, lrealwolves):
if evt.data["winner"] == "no_team_wins":
evt.data["winner"] = "everyone"
evt.data["message"] = messages["everyone_died_won"]
|
src/encoded/tests/fixtures/schemas/correlation_quality_metric.py | procha2/encoded | 102 | 12702743 | import pytest
@pytest.fixture
def correlation_quality_metric(testapp, analysis_step_run_bam, file_tsv_1_2, award, lab):
item = {
'step_run': analysis_step_run_bam['@id'],
'quality_metric_of': [file_tsv_1_2['@id']],
'Pearson correlation': 0.1,
'award': award['@id'],
'lab': lab['@id']
}
return testapp.post_json('/correlation_quality_metric', item).json['@graph'][0]
@pytest.fixture
def spearman_correlation_quality_metric(testapp, analysis_step_run_bam, file_tsv_1_2, award, lab):
item = {
'step_run': analysis_step_run_bam['@id'],
'quality_metric_of': [file_tsv_1_2['@id']],
'Spearman correlation': 0.7,
'award': award['@id'],
'lab': lab['@id']
}
return testapp.post_json('/correlation_quality_metric', item).json['@graph'][0]
|
vendor/github.com/paulsmith/gogeos/geos/geoscapi.py | venicegeo/pzsvc-catalog | 197 | 12702753 | import sys
from string import Template
from collections import namedtuple
from pycparser import c_parser, c_ast, parse_file
Func = namedtuple('Func', ('name', 'type', 'args'))
Arg = namedtuple('Arg', ('name', 'type'))
Type = namedtuple('Type', ('ptr', 'name', 'array'))
class FuncDeclVisitor(c_ast.NodeVisitor):
def __init__(self):
self.funcs = []
self.reset()
def reset(self):
self.name = None
self.ptr = ''
self.type = None
self.inargs = False
self.args = []
self.argname = None
self.array = False
def visit_Typedef(self, node):
# Prevent func decls in typedefs from being visited
pass
def visit_FuncDecl(self, node):
self.visit(node.type)
if node.args:
self.inargs = True
self.visit(node.args)
self.funcs.append(Func(self.name, self.type, self.args))
self.reset()
def visit_PtrDecl(self, node):
self.ptr += '*'
self.visit(node.type)
def visit_TypeDecl(self, node):
if node.type.__class__.__name__ == 'Struct':
return
if self.inargs:
self.argname = node.declname
else:
self.name = node.declname
self.visit(node.type)
def visit_ArrayDecl(self, node):
self.array = True
self.visit(node.type)
def visit_IdentifierType(self, node):
type_ = Type(self.ptr, ' '.join(node.names), self.array)
if self.inargs:
self.args.append(Arg(self.argname, type_))
else:
self.type = type_
self.ptr = ''
self.array = False
def cgo_func_wrappers(filename):
ast = parse_file(filename, use_cpp=True)
v = FuncDeclVisitor()
v.visit(ast)
funcnames = {}
threadsafe = []
for func in v.funcs:
funcnames[func.name] = func
for func in v.funcs:
if not func.name.endswith('_r'):
if func.name + '_r' in funcnames:
threadsafe.append(funcnames[func.name + '_r'])
else:
threadsafe.append(func)
print("""
package geos
// Created mechanically from C API header - DO NOT EDIT
/*
#include <geos_c.h>
*/
import "C"
import (
"unsafe"
)\
""")
typemap = {
"unsigned char": "uchar",
"unsigned int": "uint",
}
identmap = {
"type": "_type",
}
for func in threadsafe:
def gotype(ctype):
type_ = "C." + typemap.get(ctype.name, ctype.name)
if ctype.ptr:
type_ = ctype.ptr + type_
if ctype.array:
type_ = '[]' + type_
return type_
def goident(arg, inbody=True):
def voidptr(ctype):
return ctype.ptr and ctype.name == 'void'
ident = identmap.get(arg.name, arg.name)
if arg.type.array and inbody:
ident = '&' + ident + '[0]'
if voidptr(arg.type) and inbody:
ident = 'unsafe.Pointer(' + ident + ')'
return ident
# Go function signature
gosig = "func $name($parameters)"
if func.type.name != "void":
gosig += " $result"
gosig += " {"
t = Template(gosig)
params = ", ".join([goident(p, inbody=False) + " " + gotype(p.type) for p in func.args if p.type.name != 'GEOSContextHandle_t'])
result = gotype(func.type)
func_name = "c" + func.name
if func_name.endswith('_r'):
func_name = func_name[:-2]
print(t.substitute(name=func_name, parameters=params, result=result))
# Go function body
gobody = """\
\t${return_stmt}C.$name($args)
}
"""
if func.name.endswith("_r") and func.name != "initGEOS_r":
gobody = """\
\t${handle_lock}.Lock()
\tdefer ${handle_lock}.Unlock()
""" + gobody
t = Template(gobody)
args = ", ".join([goident(p) for p in func.args])
return_stmt = 'return ' if func.type.name != 'void' else ''
print(t.substitute(return_stmt=return_stmt, name=func.name, args=args, handle_lock='handlemu'))
if __name__ == "__main__":
cgo_func_wrappers(sys.argv[1])
#from pycparser.c_generator import CGenerator
#ast = parse_file(sys.argv[1], use_cpp=True)
#print(CGenerator().visit(ast))
|
bcs-ui/backend/resources/node/client.py | laodiu/bk-bcs | 599 | 12702815 | <filename>bcs-ui/backend/resources/node/client.py
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import functools
import logging
from typing import Any, Dict, List, Type
from kubernetes.dynamic.resource import ResourceInstance
from backend.resources.constants import K8sResourceKind, NodeConditionStatus, NodeConditionType
from backend.resources.resource import ResourceClient, ResourceObj
from backend.utils.async_run import async_run
from backend.utils.basic import getitems
logger = logging.getLogger(__name__)
class NodeObj(ResourceObj):
def __init__(self, data: ResourceInstance):
super().__init__(data)
# NOTE: 属性不存在时,返回None
self.labels = dict(self.data.metadata.labels or {})
self.taints = [dict(t) for t in self.data.spec.taints or []]
@property
def inner_ip(self) -> str:
"""获取inner ip"""
addresses = self.data.status.addresses
for addr in addresses:
if addr.type == "InternalIP":
return addr.address
logger.warning("inner ip of addresses is null, address is %s", addresses)
return ""
@property
def node_status(self) -> str:
"""获取节点状态
ref: https://github.com/kubernetes/dashboard/blob/0de61860f8d24e5a268268b1fbadf327a9bb6013/src/app/backend/resource/node/list.go#L106 # noqa
"""
for condition in self.data.status.conditions:
if condition.type != NodeConditionType.Ready:
continue
# 正常可用状态
if condition.status == "True":
return NodeConditionStatus.Ready
# 节点不健康而且不能接收 Pod
return NodeConditionStatus.NotReady
# 节点控制器在最近 node-monitor-grace-period 期间(默认 40 秒)没有收到节点的消息
return NodeConditionStatus.Unknown
class Node(ResourceClient):
"""节点 client
针对节点的查询、操作等
"""
kind = K8sResourceKind.Node.value
result_type: Type['ResourceObj'] = NodeObj
def set_labels_for_multi_nodes(self, node_labels: List[Dict]):
"""设置标签
:param node_labels: 要设置的标签信息,格式: [{"node_name": "", "labels": {"key": "val"}}]
NOTE: 如果要删除某个label时,不建议使用replace,可以把要删除的label的值设置为None
"""
filter_labels = self.filter_nodes_field_data(
"labels", [label["node_name"] for label in node_labels], node_id_field="name", default_data={}
)
# 比对数据,当label在集群节点中存在,而变更的数据中不存在,则需要在变更的数据中设置为None
for node in node_labels:
labels = filter_labels.get(node["node_name"])
# 设置要删除key的值为None
for key in set(labels) - set(node["labels"]):
node["labels"][key] = None
# 下发的body格式: {"metadata": {"labels": {"demo": "demo"}}}
tasks = [
functools.partial(self.patch, {"metadata": {"labels": l["labels"]}}, l["node_name"]) for l in node_labels
]
# 当有操作失败的,抛出异常
async_run(tasks)
def set_taints_for_multi_nodes(self, node_taints: List[Dict]):
"""设置污点
:param node_taints: 要设置的污点信息,格式: [{"node_name": "", "taints": [{"key": "", "value": "", "effect": ""}]}]
"""
# 下发的body格式: {"spec": {"taints": [{"key": xxx, "value": xxx, "effect": xxx}]}}
tasks = [functools.partial(self.patch, {"spec": {"taints": t["taints"]}}, t["node_name"]) for t in node_taints]
# 当有操作失败的,抛出异常
async_run(tasks)
def filter_nodes_field_data(
self,
field: str,
filter_node_names: List[str],
node_id_field: str = "inner_ip",
default_data: Any = None,
) -> Dict:
"""查询节点属性
:param field: 查询的属性
:param filter_node_names: 节点name列表
:param node_id_field: 节点标识的属性名称,支持name和inner_ip,默认是inner_ip
:returns: 返回节点的属性数据
"""
nodes = self.list(is_format=False)
data = {}
for node in nodes.items:
if node.name not in filter_node_names:
continue
# 因为field字段可控,先不添加异常处理
node_id = getattr(node, node_id_field, "")
data[node_id] = getattr(node, field, default_data)
return data
|
boto3_type_annotations/boto3_type_annotations/appstream/client.py | cowboygneox/boto3_type_annotations | 119 | 12702831 | <reponame>cowboygneox/boto3_type_annotations
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def associate_fleet(self, FleetName: str, StackName: str) -> Dict:
pass
def batch_associate_user_stack(self, UserStackAssociations: List) -> Dict:
pass
def batch_disassociate_user_stack(self, UserStackAssociations: List) -> Dict:
pass
def can_paginate(self, operation_name: str = None):
pass
def copy_image(self, SourceImageName: str, DestinationImageName: str, DestinationRegion: str, DestinationImageDescription: str = None) -> Dict:
pass
def create_directory_config(self, DirectoryName: str, OrganizationalUnitDistinguishedNames: List, ServiceAccountCredentials: Dict) -> Dict:
pass
def create_fleet(self, Name: str, InstanceType: str, ComputeCapacity: Dict, ImageName: str = None, ImageArn: str = None, FleetType: str = None, VpcConfig: Dict = None, MaxUserDurationInSeconds: int = None, DisconnectTimeoutInSeconds: int = None, Description: str = None, DisplayName: str = None, EnableDefaultInternetAccess: bool = None, DomainJoinInfo: Dict = None, Tags: Dict = None) -> Dict:
pass
def create_image_builder(self, Name: str, InstanceType: str, ImageName: str = None, ImageArn: str = None, Description: str = None, DisplayName: str = None, VpcConfig: Dict = None, EnableDefaultInternetAccess: bool = None, DomainJoinInfo: Dict = None, AppstreamAgentVersion: str = None, Tags: Dict = None) -> Dict:
pass
def create_image_builder_streaming_url(self, Name: str, Validity: int = None) -> Dict:
pass
def create_stack(self, Name: str, Description: str = None, DisplayName: str = None, StorageConnectors: List = None, RedirectURL: str = None, FeedbackURL: str = None, UserSettings: List = None, ApplicationSettings: Dict = None, Tags: Dict = None) -> Dict:
pass
def create_streaming_url(self, StackName: str, FleetName: str, UserId: str, ApplicationId: str = None, Validity: int = None, SessionContext: str = None) -> Dict:
pass
def create_user(self, UserName: str, AuthenticationType: str, MessageAction: str = None, FirstName: str = None, LastName: str = None) -> Dict:
pass
def delete_directory_config(self, DirectoryName: str) -> Dict:
pass
def delete_fleet(self, Name: str) -> Dict:
pass
def delete_image(self, Name: str) -> Dict:
pass
def delete_image_builder(self, Name: str) -> Dict:
pass
def delete_image_permissions(self, Name: str, SharedAccountId: str) -> Dict:
pass
def delete_stack(self, Name: str) -> Dict:
pass
def delete_user(self, UserName: str, AuthenticationType: str) -> Dict:
pass
def describe_directory_configs(self, DirectoryNames: List = None, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def describe_fleets(self, Names: List = None, NextToken: str = None) -> Dict:
pass
def describe_image_builders(self, Names: List = None, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def describe_image_permissions(self, Name: str, MaxResults: int = None, SharedAwsAccountIds: List = None, NextToken: str = None) -> Dict:
pass
def describe_images(self, Names: List = None, Arns: List = None, Type: str = None, NextToken: str = None, MaxResults: int = None) -> Dict:
pass
def describe_sessions(self, StackName: str, FleetName: str, UserId: str = None, NextToken: str = None, Limit: int = None, AuthenticationType: str = None) -> Dict:
pass
def describe_stacks(self, Names: List = None, NextToken: str = None) -> Dict:
pass
def describe_user_stack_associations(self, StackName: str = None, UserName: str = None, AuthenticationType: str = None, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def describe_users(self, AuthenticationType: str, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def disable_user(self, UserName: str, AuthenticationType: str) -> Dict:
pass
def disassociate_fleet(self, FleetName: str, StackName: str) -> Dict:
pass
def enable_user(self, UserName: str, AuthenticationType: str) -> Dict:
pass
def expire_session(self, SessionId: str) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def list_associated_fleets(self, StackName: str, NextToken: str = None) -> Dict:
pass
def list_associated_stacks(self, FleetName: str, NextToken: str = None) -> Dict:
pass
def list_tags_for_resource(self, ResourceArn: str) -> Dict:
pass
def start_fleet(self, Name: str) -> Dict:
pass
def start_image_builder(self, Name: str, AppstreamAgentVersion: str = None) -> Dict:
pass
def stop_fleet(self, Name: str) -> Dict:
pass
def stop_image_builder(self, Name: str) -> Dict:
pass
def tag_resource(self, ResourceArn: str, Tags: Dict) -> Dict:
pass
def untag_resource(self, ResourceArn: str, TagKeys: List) -> Dict:
pass
def update_directory_config(self, DirectoryName: str, OrganizationalUnitDistinguishedNames: List = None, ServiceAccountCredentials: Dict = None) -> Dict:
pass
def update_fleet(self, ImageName: str = None, ImageArn: str = None, Name: str = None, InstanceType: str = None, ComputeCapacity: Dict = None, VpcConfig: Dict = None, MaxUserDurationInSeconds: int = None, DisconnectTimeoutInSeconds: int = None, DeleteVpcConfig: bool = None, Description: str = None, DisplayName: str = None, EnableDefaultInternetAccess: bool = None, DomainJoinInfo: Dict = None, AttributesToDelete: List = None) -> Dict:
pass
def update_image_permissions(self, Name: str, SharedAccountId: str, ImagePermissions: Dict) -> Dict:
pass
def update_stack(self, Name: str, DisplayName: str = None, Description: str = None, StorageConnectors: List = None, DeleteStorageConnectors: bool = None, RedirectURL: str = None, FeedbackURL: str = None, AttributesToDelete: List = None, UserSettings: List = None, ApplicationSettings: Dict = None) -> Dict:
pass
|
inventory/admin.py | TechNic11/Try-Django-3.2 | 136 | 12702844 | from django.contrib import admin
# Register your models here.
from .models import InventoryRequest
class InventoryRequestAdmin(admin.ModelAdmin):
list_display = ['user', 'name', 'total', 'unit', 'status']
admin.site.register(InventoryRequest, InventoryRequestAdmin) |
test.py | cuicaihao/STANet | 220 | 12702846 | from data import create_dataset
from models import create_model
from util.util import save_images
import numpy as np
from util.util import mkdir
import argparse
from PIL import Image
import torchvision.transforms as transforms
def transform():
transform_list = []
transform_list += [transforms.ToTensor()]
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def val(opt):
image_1_path = opt.image1_path
image_2_path = opt.image2_path
A_img = Image.open(image_1_path).convert('RGB')
B_img = Image.open(image_2_path).convert('RGB')
trans = transform()
A = trans(A_img).unsqueeze(0)
B = trans(B_img).unsqueeze(0)
# dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
save_path = opt.results_dir
mkdir(save_path)
model.eval()
data = {}
data['A']= A
data['B'] = B
data['A_paths'] = [image_1_path]
model.set_input(data) # unpack data from data loader
pred = model.test(val=False) # run inference return pred
img_path = [image_1_path] # get image paths
save_images(pred, save_path, img_path)
if __name__ == '__main__':
# 从外界调用方式:
# python test.py --image1_path [path-to-img1] --image2_path [path-to-img2] --results_dir [path-to-result_dir]
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--image1_path', type=str, default='./samples/A/test_2_0000_0000.png',
help='path to images A')
parser.add_argument('--image2_path', type=str, default='./samples/B/test_2_0000_0000.png',
help='path to images B')
parser.add_argument('--results_dir', type=str, default='./samples/output/', help='saves results here.')
parser.add_argument('--name', type=str, default='pam',
help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='CDFA', help='chooses which model to use. [CDF0 | CDFA]')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB ')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB')
parser.add_argument('--arch', type=str, default='mynet3', help='feature extractor architecture | mynet3')
parser.add_argument('--f_c', type=int, default=64, help='feature extractor channel num')
parser.add_argument('--n_class', type=int, default=2, help='# of output pred channels: 2 for num of classes')
parser.add_argument('--SA_mode', type=str, default='PAM',
help='choose self attention mode for change detection, | ori |1 | 2 |pyramid, ...')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='changedetection',
help='chooses how datasets are loaded. [changedetection | json]')
parser.add_argument('--val_dataset_mode', type=str, default='changedetection',
help='chooses how datasets are loaded. [changedetection | json]')
parser.add_argument('--split', type=str, default='train',
help='chooses wihch list-file to open when use listDataset. [train | val | test]')
parser.add_argument('--ds', type=int, default='1', help='self attention module downsample rate')
parser.add_argument('--angle', type=int, default=0, help='rotate angle')
parser.add_argument('--istest', type=bool, default=False, help='True for the case without label')
parser.add_argument('--serial_batches', action='store_true',
help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=0, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size', type=int, default=256, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"),
help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop',
help='scaling and cropping of images at load time [resize_and_crop | none]')
parser.add_argument('--no_flip', type=bool, default=True,
help='if specified, do not flip(left-right) the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
parser.add_argument('--epoch', type=str, default='pam',
help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0',
help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
parser.add_argument('--isTrain', type=bool, default=False, help='is or not')
parser.add_argument('--num_test', type=int, default=np.inf, help='how many test images to run')
opt = parser.parse_args()
val(opt)
|
system.d/library/tools_installer/tools_to_install/responder/tools/MultiRelay/creddump/framework/addrspace.py | CraftyBastard/PiBunny | 201 | 12702855 | <reponame>CraftyBastard/PiBunny<gh_stars>100-1000
# Volatility
# Copyright (C) 2007 Volatile Systems
#
# Original Source:
# Copyright (C) 2004,2005,2006 4tphi Research
# Author: {npetroni,<EMAIL> (<NAME> and <NAME>)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
@author: <NAME>
@license: GNU General Public License 2.0 or later
@contact: <EMAIL>
@organization: Volatile Systems
"""
""" Alias for all address spaces """
import os
import struct
class FileAddressSpace:
def __init__(self, fname, mode='rb', fast=False):
self.fname = fname
self.name = fname
self.fhandle = open(fname, mode)
self.fsize = os.path.getsize(fname)
if fast == True:
self.fast_fhandle = open(fname, mode)
def fread(self,len):
return self.fast_fhandle.read(len)
def read(self, addr, len):
self.fhandle.seek(addr)
return self.fhandle.read(len)
def read_long(self, addr):
string = self.read(addr, 4)
(longval, ) = struct.unpack('L', string)
return longval
def get_address_range(self):
return [0,self.fsize-1]
def get_available_addresses(self):
return [self.get_address_range()]
def is_valid_address(self, addr):
return addr < self.fsize - 1
def close():
self.fhandle.close()
# Code below written by <NAME>
BLOCK_SIZE = 0x1000
class HiveFileAddressSpace:
def __init__(self, fname):
self.fname = fname
self.base = FileAddressSpace(fname)
def vtop(self, vaddr):
return vaddr + BLOCK_SIZE + 4
def read(self, vaddr, length, zero=False):
first_block = BLOCK_SIZE - vaddr % BLOCK_SIZE
full_blocks = ((length + (vaddr % BLOCK_SIZE)) / BLOCK_SIZE) - 1
left_over = (length + vaddr) % BLOCK_SIZE
paddr = self.vtop(vaddr)
if paddr == None and zero:
if length < first_block:
return "\0" * length
else:
stuff_read = "\0" * first_block
elif paddr == None:
return None
else:
if length < first_block:
stuff_read = self.base.read(paddr, length)
if not stuff_read and zero:
return "\0" * length
else:
return stuff_read
stuff_read = self.base.read(paddr, first_block)
if not stuff_read and zero:
stuff_read = "\0" * first_block
new_vaddr = vaddr + first_block
for i in range(0,full_blocks):
paddr = self.vtop(new_vaddr)
if paddr == None and zero:
stuff_read = stuff_read + "\0" * BLOCK_SIZE
elif paddr == None:
return None
else:
new_stuff = self.base.read(paddr, BLOCK_SIZE)
if not new_stuff and zero:
new_stuff = "\0" * BLOCK_SIZE
elif not new_stuff:
return None
else:
stuff_read = stuff_read + new_stuff
new_vaddr = new_vaddr + BLOCK_SIZE
if left_over > 0:
paddr = self.vtop(new_vaddr)
if paddr == None and zero:
stuff_read = stuff_read + "\0" * left_over
elif paddr == None:
return None
else:
stuff_read = stuff_read + self.base.read(paddr, left_over)
return stuff_read
def read_long_phys(self, addr):
string = self.base.read(addr, 4)
(longval, ) = struct.unpack('L', string)
return longval
def is_valid_address(self, vaddr):
paddr = self.vtop(vaddr)
if not paddr: return False
return self.base.is_valid_address(paddr)
|
johnny/backends/locmem.py | bennylope/johnny-cache | 124 | 12702869 | """
Infinite caching locmem class. Caches forever when passed timeout of 0.
This actually doesn't cache "forever", just for a very long time. On
32 bit systems, it will cache for 68 years, quite a bit longer than any
computer will last. On a 64 bit machine, your cache will expire about
285 billion years after the Sun goes red-giant and destroys Earth.
"""
import sys
from django.core.cache.backends import locmem
class LocMemCache(locmem.LocMemCache):
def add(self, key, value, timeout=None, **kwargs):
if timeout is 0:
timeout = sys.maxsize
return super(LocMemCache, self).add(
key, value, timeout=timeout, **kwargs)
def set(self, key, value, timeout=None, **kwargs):
if timeout is 0:
timeout = sys.maxsize
return super(LocMemCache, self).set(
key, value, timeout=timeout, **kwargs)
|
src/foremast/configs/outputs.py | dnava013/foremast | 157 | 12702883 | # Foremast - Pipeline Tooling
#
# Copyright 2018 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write output files for configurations."""
import json
import logging
from pprint import pformat
import foremastutils
from ..consts import APP_FORMATS
from ..utils import DeepChainMap, get_template
LOG = logging.getLogger(__name__)
def convert_ini(config_dict):
"""Convert _config_dict_ into a list of INI formatted strings.
Args:
config_dict (dict): Configuration dictionary to be flattened.
Returns:
(list) Lines to be written to a file in the format of KEY1_KEY2=value.
"""
config_lines = []
for env, configs in sorted(config_dict.items()):
for resource, app_properties in sorted(configs.items()):
try:
for app_property, value in sorted(app_properties.items()):
variable = '{env}_{resource}_{app_property}'.format(
env=env, resource=resource, app_property=app_property).upper()
if isinstance(value, (dict, DeepChainMap)):
safe_value = "'{0}'".format(json.dumps(dict(value)))
else:
safe_value = json.dumps(value)
line = "{variable}={value}".format(variable=variable, value=safe_value)
LOG.debug('INI line: %s', line)
config_lines.append(line)
except AttributeError:
resource = resource.upper()
app_properties = "'{}'".format(json.dumps(app_properties))
line = '{0}={1}'.format(resource, app_properties)
LOG.debug('INI line: %s', line)
config_lines.append(line)
return config_lines
def write_variables(app_configs=None, out_file='', git_short=''):
"""Append _application.json_ configs to _out_file_, .exports, and .json.
Variables are written in INI style, e.g. UPPER_CASE=value. The .exports file
contains 'export' prepended to each line for easy sourcing. The .json file
is a minified representation of the combined configurations.
Args:
app_configs (dict): Environment configurations from _application.json_
files, e.g. {'dev': {'elb': {'subnet_purpose': 'internal'}}}.
out_file (str): Name of INI file to append to.
git_short (str): Short name of Git repository, e.g. forrest/core.
Returns:
dict: Configuration equivalent to the JSON output.
"""
generated = foremastutils.Generator(*foremastutils.Parser(git_short).parse_url(), formats=APP_FORMATS)
json_configs = {}
for env, configs in app_configs.items():
if env != 'pipeline':
instance_profile = generated.iam()['profile']
rendered_configs = json.loads(
get_template(
'configs/configs.json.j2',
env=env,
app=generated.app_name(),
profile=instance_profile,
formats=generated))
json_configs[env] = dict(DeepChainMap(configs, rendered_configs))
region_list = configs.get('regions', rendered_configs['regions'])
json_configs[env]['regions'] = region_list # removes regions defined in templates but not configs.
for region in region_list:
region_config = json_configs[env][region]
json_configs[env][region] = dict(DeepChainMap(region_config, rendered_configs))
else:
default_pipeline_json = json.loads(get_template('configs/pipeline.json.j2', formats=generated))
json_configs['pipeline'] = dict(DeepChainMap(configs, default_pipeline_json))
LOG.debug('Compiled configs:\n%s', pformat(json_configs))
config_lines = convert_ini(json_configs)
with open(out_file, 'at') as jenkins_vars:
LOG.info('Appending variables to %s.', out_file)
jenkins_vars.write('\n'.join(config_lines))
with open(out_file + '.exports', 'wt') as export_vars:
LOG.info('Writing sourceable variables to %s.', export_vars.name)
export_vars.write('\n'.join('export {0}'.format(line) for line in config_lines))
with open(out_file + '.json', 'wt') as json_handle:
LOG.info('Writing JSON to %s.', json_handle.name)
LOG.debug('Total JSON dict:\n%s', json_configs)
json.dump(json_configs, json_handle)
return json_configs
|
libtbx/configure.py | rimmartin/cctbx_project | 155 | 12702889 | from __future__ import absolute_import, division, print_function
import sys, os
def run():
if sys.hexversion < 0x02070000:
print()
print("*" * 78)
print("FATAL: Python 2.7 or higher is required.")
print("Version currently in use:", sys.version)
print("*" * 78)
print()
return False
# test for six and future
try:
import six
import future
except ImportError:
print()
print("*" * 78)
print("FATAL: For Python 2/3 compatibility, CCTBX currently requires the six and\n future modules.")
print("To install, please use pip or conda (if available)")
print(" pip install six future")
print(" conda install six future")
print("*" * 78)
print()
return False
sys.path[0] = os.path.dirname(sys.path[0])
import libtbx.env_config
libtbx.env_config.cold_start(sys.argv)
print("Done.")
return True
if __name__ == "__main__":
if not run():
sys.exit(1)
|
tests/onnx/test_onnx.py | techthiyanes/optimum | 414 | 12702911 | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile
from unittest import TestCase
import numpy as np
import torch
from transformers import AutoModel, AutoTokenizer
from transformers.models.albert import AlbertOnnxConfig
from transformers.onnx import export
from onnx import load as onnx_load
from onnxruntime import InferenceSession
from optimum.onnx.graph_transformations import remove_duplicate_weights
class WeightSharingTestCase(TestCase):
def test_weight_sharing_output_match(self):
with torch.no_grad():
for model in {"albert-base-v1", "albert-base-v2"}:
tokenizer = AutoTokenizer.from_pretrained(model)
model = AutoModel.from_pretrained(model)
onnx_config = AlbertOnnxConfig.from_model_config(model.config)
with NamedTemporaryFile("w+b") as original_onnx_f:
export(tokenizer, model, onnx_config, opset=12, output=Path(original_onnx_f.name))
original_albert_ir = onnx_load(original_onnx_f)
compressed_albert_ir = remove_duplicate_weights(original_albert_ir, inplace=False)
compressed_albert_session = InferenceSession(
compressed_albert_ir.SerializeToString(), providers=["CPUExecutionProvider"]
)
original_outputs = model(**tokenizer("Hello from Hugging Face", return_tensors="pt"))
compressed_outputs = compressed_albert_session.run(
None, dict(tokenizer("Hello from Hugging Face", return_tensors="np"))
)
self.assertTrue(
np.allclose(original_outputs.last_hidden_state.cpu().numpy(), compressed_outputs[0], atol=1e-4)
)
self.assertTrue(
np.allclose(original_outputs.pooler_output.cpu().numpy(), compressed_outputs[1], atol=1e-4)
)
if __name__ == "__main__":
unittest.main()
|
easyrobust/test_scripts/utils.py | thu-ml/realsafe | 107 | 12702955 | import torch
import torch.nn as nn
robust_model_urls = {
'resnet50': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_resnet50_ep4.pth',
'resnet101': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_resnet101_ep4.pth',
'vgg13': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_vgg13_ep4.pth',
'vgg16': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_vgg16_ep4.pth',
'resnext50_32x4d': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_resnext50_32x4d_ep4.pth',
'densenet121': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_densenet121_ep4.pth',
'seresnet50': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_seresnet50_ep4.pth',
'seresnet101': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_seresnet101_ep4.pth',
'resnest50d': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_resnest50d_ep4.pth',
'efficientnet_b0': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_efficientnet_b0_ep4.pth',
'efficientnet_b1': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_efficientnet_b1_ep4.pth',
'efficientnet_b2': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_efficientnet_b2_ep4.pth',
'efficientnet_b3': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_efficientnet_b3_ep4.pth',
'vit_small_patch16_224': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_vit_small_patch16_224_ep4.pth',
'vit_base_patch32_224': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_vit_base_patch32_224_ep4.pth',
'vit_base_patch16_224': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_vit_base_patch16_224_ep4.pth',
'swin_small_patch4_window7_224': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_swin_small_patch4_window7_224_ep4.pth',
'swin_base_patch4_window7_224': 'http://alisec-competition.oss-cn-shanghai.aliyuncs.com/xiaofeng/imagenet_pretrained_models/advtrain_models/advtrain_swin_base_patch4_window7_224_ep4.pth'
}
def normalize_fn(tensor, mean, std):
"""Differentiable version of torchvision.functional.normalize"""
# here we assume the color channel is in at dim=1
mean = mean[None, :, None, None]
std = std[None, :, None, None]
return tensor.sub(mean).div(std)
class NormalizeByChannelMeanStd(nn.Module):
def __init__(self, mean, std):
super(NormalizeByChannelMeanStd, self).__init__()
if not isinstance(mean, torch.Tensor):
mean = torch.tensor(mean)
if not isinstance(std, torch.Tensor):
std = torch.tensor(std)
self.register_buffer("mean", mean)
self.register_buffer("std", std)
def forward(self, tensor):
return normalize_fn(tensor, self.mean, self.std)
def extra_repr(self):
return 'mean={}, std={}'.format(self.mean, self.std)
def replace_best(loss, bloss, x, bx):
if bloss is None:
bx = x.clone().detach()
bloss = loss.clone().detach()
else:
replace = bloss < loss
bx[replace] = x[replace].clone().detach()
bloss[replace] = loss[replace]
return bloss, bx
class AttackerStep:
'''
Generic class for attacker steps, under perturbation constraints
specified by an "origin input" and a perturbation magnitude.
Must implement project, step, and random_perturb
'''
def __init__(self, orig_input, eps, step_size, use_grad=True):
'''
Initialize the attacker step with a given perturbation magnitude.
Args:
eps (float): the perturbation magnitude
orig_input (ch.tensor): the original input
'''
self.orig_input = orig_input
self.eps = eps
self.step_size = step_size
self.use_grad = use_grad
def project(self, x):
'''
Given an input x, project it back into the feasible set
Args:
ch.tensor x : the input to project back into the feasible set.
Returns:
A `ch.tensor` that is the input projected back into
the feasible set, that is,
.. math:: \min_{x' \in S} \|x' - x\|_2
'''
raise NotImplementedError
def step(self, x, g):
'''
Given a gradient, make the appropriate step according to the
perturbation constraint (e.g. dual norm maximization for :math:`\ell_p`
norms).
Parameters:
g (ch.tensor): the raw gradient
Returns:
The new input, a ch.tensor for the next step.
'''
raise NotImplementedError
def random_perturb(self, x):
'''
Given a starting input, take a random step within the feasible set
'''
raise NotImplementedError
def to_image(self, x):
'''
Given an input (which may be in an alternative parameterization),
convert it to a valid image (this is implemented as the identity
function by default as most of the time we use the pixel
parameterization, but for alternative parameterizations this functino
must be overriden).
'''
return x
# L-infinity threat model
class LinfStep(AttackerStep):
"""
Attack step for :math:`\ell_\infty` threat model. Given :math:`x_0`
and :math:`\epsilon`, the constraint set is given by:
.. math:: S = \{x | \|x - x_0\|_\infty \leq \epsilon\}
"""
def project(self, x):
"""
"""
diff = x - self.orig_input
diff = torch.clamp(diff, -self.eps, self.eps)
return torch.clamp(diff + self.orig_input, 0, 1)
def step(self, x, g):
"""
"""
step = torch.sign(g) * self.step_size
return x + step
def random_perturb(self, x):
"""
"""
new_x = x + 2 * (torch.rand_like(x) - 0.5) * self.eps
return torch.clamp(new_x, 0, 1)
def pgd_attack(images, target, model, eps, attack_steps, attack_lr, num_restart, device, random_start=True, use_best=True):
# generate adversarial examples
prev_training = bool(model.training)
model.eval()
orig_input = images.clone().detach().to(device)
attack_criterion = torch.nn.CrossEntropyLoss(reduction='none')
best_loss = None
best_x = None
for _ in range(num_restart):
step = LinfStep(eps=eps, orig_input=orig_input, step_size=attack_lr)
new_images = orig_input.clone().detach()
images = new_images
if random_start:
images = step.random_perturb(images)
for _ in range(attack_steps):
images = images.clone().detach().requires_grad_(True)
adv_losses = attack_criterion(model(images), target)
torch.mean(adv_losses).backward()
grad = images.grad.detach()
with torch.no_grad():
varlist = [adv_losses, best_loss, images, best_x]
best_loss, best_x = replace_best(*varlist) if use_best else (adv_losses, images)
images = step.step(images, grad)
images = step.project(images)
adv_losses = attack_criterion(model(images), target)
varlist = [adv_losses, best_loss, images, best_x]
best_loss, best_x = replace_best(*varlist) if use_best else (adv_losses, images)
if prev_training:
model.train()
return best_x |
gmplot/drawables/marker_dropper.py | Monti03/gmplot | 606 | 12702969 | <gh_stars>100-1000
from gmplot.drawables.marker_icon import _MarkerIcon
from gmplot.drawables.raw_marker import _RawMarker
class _MarkerDropper(object):
'''
Handler that drops markers on map clicks.
The markers can be deleted when clicked on.
'''
_MARKER_NAME = 'dropped_marker'
_EVENT_OBJECT_NAME = 'event'
def __init__(self, color, **kwargs):
'''
Args:
color (str): Color of the markers to be dropped. Can be hex ('#00FFFF'), named ('cyan'), or matplotlib-like ('c').
Optional:
Args:
title (str): Hover-over title of the markers to be dropped.
label (str): Label displayed on the markers to be dropped.
draggable (bool): Whether or not the markers to be dropped are draggable.
'''
self._marker_icon = _MarkerIcon(color)
self._marker = _RawMarker(
'%s.latLng' % self._EVENT_OBJECT_NAME,
self._marker_icon.get_name(),
**kwargs
)
def write(self, w, context):
'''
Write the marker dropper.
Args:
w (_Writer): Writer used to write the marker dropper.
context (_Context): Context used to keep track of what was drawn to the map.
'''
# Write the marker icon (if it isn't written already):
self._marker_icon.write(w, context)
# Write the marker-dropping handler:
w.write('map.addListener("click", function(%s) {' % self._EVENT_OBJECT_NAME)
w.indent()
self._marker.write(w, self._MARKER_NAME)
w.write('''
{marker_name}.addListener('click', function() {{
{marker_name}.setMap(null);
}});
'''.format(marker_name=self._MARKER_NAME))
w.dedent()
w.write('});')
w.write()
|
usefulscripts/hcxgrep.py | roycewilliams/hcxtools | 1,382 | 12702973 | #!/usr/bin/env python2
'''
greps inside hccapx/pmkid structs by essid, mac_ap or mac_sta
This software is Copyright (c) 2019-2020, <NAME> <alex at stanev.org> and it is
hereby released to the general public under the following terms:
Redistribution and use in source and binary forms, with or without
modification, are permitted.
'''
from __future__ import print_function
import argparse
import os
import sys
import binascii
import struct
import re
import sre_constants
try:
from string import maketrans
except ImportError:
maketrans = bytearray.maketrans # pylint: disable=no-member
def parse_hccapx(hccapx):
'''hccapx decompose
https://hashcat.net/wiki/doku.php?id=hccapx
struct hccapx
{
u32 signature;
u32 version;
u8 message_pair;
u8 essid_len;
u8 essid[32];
u8 keyver;
u8 keymic[16];
u8 mac_ap[6];
u8 nonce_ap[32];
u8 mac_sta[6];
u8 nonce_sta[32];
u16 eapol_len;
u8 eapol[256];
} __attribute__((packed));
'''
hccapx_fmt = '< 4x 4x B B 32s B 16s 6s 32s 6s 32s H 256s'
try:
(message_pair,
essid_len, essid,
keyver, keymic,
mac_ap, nonce_ap, mac_sta, nonce_sta,
eapol_len, eapol) = struct.unpack(hccapx_fmt, hccapx)
except struct.error as ex:
sys.stderr.write(str(ex + '\n'))
exit(1)
# fixup
res = ''
if args.t == 'essid':
res = essid[:essid_len]
elif args.t == 'mac_ap':
res = binascii.hexlify(mac_ap).zfill(12)
elif args.t == 'mac_sta':
res = binascii.hexlify(mac_sta).zfill(12)
return res
def parse_pmkid(pmkid):
'''pmkid decompose
format:
pmkid*mac_ap*mac_sta*essid
'''
arr = pmkid.split(b'*', 4)
res = ''
if len(arr) == 4:
try:
if args.t == 'essid':
res = binascii.unhexlify(arr[3].strip())
elif args.t == 'mac_ap':
res = arr[1]
elif args.t == 'mac_sta':
res = arr[2]
except TypeError as ex:
sys.stderr.write(str(ex + '\n'))
exit(1)
return res
def parse_combined(hashline):
'''m22000 hashline decompose
format:
SIGNATURE*TYPE*PMKID/MIC*MACAP*MACSTA*ESSID*ANONCE*EAPOL*MESSAGEPAIR
'''
arr = hashline.split(b'*', 9)
res = ''
if len(arr) == 9:
try:
if args.t == 'essid':
res = binascii.unhexlify(arr[5].strip())
elif args.t == 'mac_ap':
res = arr[3]
elif args.t == 'mac_sta':
res = arr[4]
except TypeError as ex:
sys.stderr.write(str(ex + '\n'))
exit(1)
return res
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Extract records from wpa combined hashline/hccapx/pmkid file based on regexp')
#group = parser.add_mutually_exclusive_group(required=True)
parser.add_argument(
'-f', '--file', type=argparse.FileType('r'),
help='Obtain patterns from FILE, one per line.')
parser.add_argument(
'PATTERNS', type=str, nargs='?',
help='RegExp pattern')
parser.add_argument(
'-v', '--invert-match', dest='v', action='store_true',
help='Invert the sense of matching, to select non-matching nets')
parser.add_argument(
'-t', '--type', dest='t', choices=['essid','mac_ap','mac_sta'], default='essid',
help='Field to apply matching, default essid')
parser.add_argument(
'infile', type=str, nargs='?',
help='hccapx/pmkid file to process')
try:
args = parser.parse_args()
except IOError as ex:
parser.error(str(ex))
# workaround encoding issues with python2
if sys.version_info[0] == 2:
reload(sys) # pylint: disable=undefined-variable
sys.setdefaultencoding('utf-8') # pylint: disable=no-member
# shift parameters
if args.file and args.PATTERNS:
args.infile = args.PATTERNS
args.PATTERNS = None
# no patterns set
if args.PATTERNS is None and args.file is None:
parser.print_help(sys.stderr)
sys.stderr.write('You must provide PATTERNS or -f FILE\n')
exit(1)
# read patterns from file
if args.PATTERNS is None:
args.PATTERNS = '|'.join('(?:{0})'.format(x.strip()) for x in args.file)
try:
regexp = re.compile(args.PATTERNS)
except sre_constants.error as e:
sys.stderr.write('Wrong regexp {0}: {1} \n'.format(args.PATTERNS, e))
exit(1)
if args.infile is not None and os.path.isfile(args.infile):
fd = open(args.infile, 'rb')
else:
fd = sys.stdin
structformat = ''
while True:
buf = fd.read(4)
if buf == 'WPA*':
buf = buf + fd.readline()
structformat = 'combined'
elif buf == 'HCPX':
buf = buf + fd.read(393 - 4)
structformat = 'hccapx'
else:
buf = buf + fd.readline()
structformat = 'pmkid'
if not buf:
break
if structformat == 'combined':
target = parse_combined(buf)
elif structformat == 'hccapx':
target = parse_hccapx(buf)
elif structformat == 'pmkid':
target = parse_pmkid(buf)
else:
sys.stderr.write('Unrecognized input format\n')
exit(1)
res = regexp.search(str(target))
if (res is not None and not args.v) or (res is None and args.v):
sys.stdout.write(buf)
|
billing/tests/test_bitcoin.py | timgates42/merchant | 332 | 12702979 | <gh_stars>100-1000
import mock
import decimal
from bitcoinrpc.data import TransactionInfo
from django.conf import settings
from django.test import TestCase
from django.utils.unittest import skipIf
from billing import get_gateway
from billing.signals import transaction_was_successful, transaction_was_unsuccessful
TEST_AMOUNT = decimal.Decimal('0.01')
TEST_ADDRESS = 'n2RL9NRRGvKNqovb14qacSfbz6zQBkzDbU'
TEST_SUCCESSFUL_TXNS = [TransactionInfo(address=TEST_ADDRESS, amount=TEST_AMOUNT)]
@skipIf(not settings.MERCHANT_SETTINGS.get("bitcoin", None), "gateway not configured")
class BitcoinGatewayTestCase(TestCase):
def setUp(self):
with mock.patch('bitcoinrpc.connection.BitcoinConnection') as MockBitcoinConnection:
connection = MockBitcoinConnection()
connection.getnewaddress.return_value = TEST_ADDRESS
connection.listtransactions.return_value = TEST_SUCCESSFUL_TXNS
self.merchant = get_gateway("bitcoin")
self.address = self.merchant.get_new_address()
def testPurchase(self):
resp = self.merchant.purchase(TEST_AMOUNT, self.address)
self.assertEquals(resp['status'], 'SUCCESS')
def testPaymentSuccessfulSignal(self):
received_signals = []
def receive(sender, **kwargs):
received_signals.append(kwargs.get("signal"))
transaction_was_successful.connect(receive)
self.merchant.purchase(TEST_AMOUNT, self.address)
self.assertEquals(received_signals, [transaction_was_successful])
def testPaymentUnSuccessfulSignal(self):
received_signals = []
def receive(sender, **kwargs):
received_signals.append(kwargs.get("signal"))
transaction_was_unsuccessful.connect(receive)
self.merchant.purchase(TEST_AMOUNT/2, self.address)
self.assertEquals(received_signals, [transaction_was_unsuccessful])
|
Sign-Language-Recognition/code/generate_images_labels.py | lhvubtqn/Sign-Language-Recognition | 131 | 12702995 | """
Contains code to generate the <image-path> vs <image-label> list for a set of
images and write it to disk.
"""
import sys
import logging
import os
from common.config import get_config
logging_format = '[%(asctime)s||%(name)s||%(levelname)s]::%(message)s'
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"),
format=logging_format,
datefmt='%Y-%m-%d %H:%M:%S',)
logger = logging.getLogger(__file__)
def get_images_labels_list(images_dir_path):
"""
Recursively iterates through a directory and its subdirectories to list
the info all the images found in it.
Returns a list of dictionary where each dictionary contains `image_path`
and `image_label`.
"""
images_labels_list = []
logger.info('Images directory - "{}"'.format(images_dir_path))
for (dirpath, dirnames, filenames) in os.walk(images_dir_path):
for filename in filenames:
image_path = os.path.join(dirpath, filename)
image_label = os.path.splitext(os.path.basename(dirpath))[0]
image_info = {}
image_info['image_path'] = image_path
image_info['image_label'] = image_label
images_labels_list.append(image_info)
return images_labels_list
def write_images_labels_to_file(images_labels_list, output_file_path):
"""
Writes the list of images-labels to a file.
"""
os.makedirs(os.path.dirname(output_file_path), exist_ok=True)
with open(output_file_path, "w") as output_file:
for image_info in images_labels_list:
image_path = image_info['image_path']
image_label = image_info['image_label']
line = image_path + "\t" + image_label + '\n'
output_file.write(line)
def main():
images_source = sys.argv[1]
if images_source not in ['train', 'test']:
logger.error("Invalid image-source '{}'!".format(images_source))
return
images_dir_path = get_config('{}ing_images_dir_path'.format(images_source))
images_labels_path = get_config(
'{}ing_images_labels_path'.format(images_source))
logger.info("Gathering info about images at path '{}'..."
.format(images_dir_path))
images_labels_list = get_images_labels_list(images_dir_path)
logger.info("Done!")
logger.info("Writing images labels info to file at path '{}'...".format(
images_labels_path))
write_images_labels_to_file(images_labels_list, images_labels_path)
logger.info("Done!")
if __name__ == '__main__':
main()
|
dlt/train/wganct.py | dmarnerides/pydlt | 236 | 12703037 | <gh_stars>100-1000
import torch
from torch import autograd
from torch.autograd import Variable
from .ganbasetrainer import GANBaseTrainer
class WGANCTTrainer(GANBaseTrainer):
"""Wasserstein GAN Trainer with gradient penalty and correction term.
From Improving the Improved Training of Wasserstein GANs: A Consistency
Term and Its Dual Effect.
https://openreview.net/forum?id=SJx9GQb0-
Args:
generator (nn.Module): The generator network.
discriminator (nn.Module): The discriminator network.
g_optimizer (torch.optim.Optimizer): Generator Optimizer.
d_optimizer (torch.optim.Optimizer): Discriminator Optimizer.
lambda_gp (float): Weight of gradient penalty.
m_ct (float): Constant bound for consistency term.
lambda_ct (float): Weight of consistency term.
d_iter (int, optional): Number of discriminator steps per generator
step (default 1).
Each iteration returns the mini-batch and a tuple containing:
- The generator prediction.
- A dictionary containing a `d_loss` (not when validating) and a
`g_loss` dictionary (only if a generator step is performed):
- `d_loss contains`: `d_loss`, `w_loss`, `gp` and `ct`.
- `g_loss` contains: `g_loss`.
Warning:
The discriminator forward function needs to be able to accept an optional
bool argument `correction_term`. When set to true, the forward function
must add dropout noise to the model and return a tuple containing the
second to last output of the discriminator along with the final output.
Example:
>>> trainer = dlt.train.WGANCTTrainer(gen, disc, g_optim, d_optim, lambda_gp, m_ct, lambda_ct)
>>> # Training mode
>>> trainer.train()
>>> for batch, (prediction, loss) in trainer(train_data_loader):
>>> print(loss['d_loss']['w_loss'])
"""
def __init__(self, generator, discriminator, g_optimizer, d_optimizer, lambda_gp, m_ct, lambda_ct, d_iter=1):
super(WGANCTTrainer, self).__init__(generator, discriminator, g_optimizer,
d_optimizer, d_iter)
# Register losses
self._losses['training'] = ['w_loss', 'd_loss', 'gp', 'ct', 'g_loss']
self._losses['validation'] = ['g_loss']
self.lambda_gp = lambda_gp
self.m_ct = m_ct
self.lambda_ct = lambda_ct
def d_step(self, g_input, real_input):
disc, gen = self._models['discriminator'], self._models['generator']
self._set_gradients('discriminator', True)
self._set_gradients('generator', False)
prediction = gen(g_input)
error_fake = disc(prediction).mean()
error_real = disc(Variable(real_input)).mean()
gp = self.get_gp(prediction, real_input)
ct = self.get_ct(real_input)
w_loss = error_fake - error_real
total_loss = w_loss + gp + ct
disc.zero_grad()
total_loss.backward()
self._optimizers['discriminator'].step()
ret_losses = {'w_loss': w_loss.item(), 'gp': gp.item(),
'ct': ct.item(), 'd_loss': total_loss.item()}
self.d_iter_counter += 1
return prediction, ret_losses
def g_step(self, g_input, real_input):
disc, gen = self._models['discriminator'], self._models['generator']
self._set_gradients('discriminator', False)
self._set_gradients('generator', True)
prediction = gen(Variable(g_input))
loss = - disc(prediction).mean()
if self.training:
gen.zero_grad()
loss.backward()
self._optimizers['generator'].step()
return prediction, {'g_loss': loss.item()}
def get_gp(self, fake_input, real_input):
dimensions = [real_input.size(0)] + [1] * (real_input.ndimension() - 1)
alpha = torch.Tensor(*dimensions).to(real_input.device).uniform_()
interpolates = alpha * real_input + ((1 - alpha) * fake_input)
interpolates.requires_grad_(True)
disc_interpolates = self._models['discriminator'](interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones_like(disc_interpolates),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = torch.mean((1. - torch.sqrt(1e-8+torch.sum(gradients**2, dim=1)))**2)*self.lambda_gp
return gradient_penalty
def l2_norm(self, x):
return x.pow(2).view(x.size(0), -1).sum(-1).add(1e-8).sqrt()
def get_ct(self, real_input):
dx_dash_n2last, dx_dash = self._models['discriminator'](real_input, correction_term=True)
dx_dashdash_n2last, dx_dashdash = self._models['discriminator'](real_input, correction_term=True)
res = self.l2_norm(dx_dash - dx_dashdash) + 0.1 \
* self.l2_norm(dx_dash_n2last - dx_dashdash_n2last) \
- self.m_ct
return torch.nn.functional.relu(res, 0).mean()*self.lambda_ct
|
code/utils/augs.py | noammy/videowalk | 227 | 12703048 | import torchvision
import skimage
import torch
from torchvision import transforms
import numpy as np
from PIL import Image
IMG_MEAN = (0.4914, 0.4822, 0.4465)
IMG_STD = (0.2023, 0.1994, 0.2010)
NORM = [transforms.ToTensor(),
transforms.Normalize(IMG_MEAN, IMG_STD)]
class MapTransform(object):
def __init__(self, transforms, pil_convert=True):
self.transforms = transforms
self.pil_convert = pil_convert
def __call__(self, vid):
if isinstance(vid, Image.Image):
return np.stack([self.transforms(vid)])
if isinstance(vid, torch.Tensor):
vid = vid.numpy()
if self.pil_convert:
x = np.stack([np.asarray(self.transforms(Image.fromarray(v))) for v in vid])
return x
else:
return np.stack([self.transforms(v) for v in vid])
def n_patches(x, n, transform, shape=(64, 64, 3), scale=[0.2, 0.8]):
''' unused '''
if shape[-1] == 0:
shape = np.random.uniform(64, 128)
shape = (shape, shape, 3)
crop = transforms.Compose([
lambda x: Image.fromarray(x) if not 'PIL' in str(type(x)) else x,
transforms.RandomResizedCrop(shape[0], scale=scale)
])
if torch.is_tensor(x):
x = x.numpy().transpose(1,2, 0)
P = []
for _ in range(n):
xx = transform(crop(x))
P.append(xx)
return torch.cat(P, dim=0)
def patch_grid(transform, shape=(64, 64, 3), stride=[0.5, 0.5]):
stride = np.random.random() * (stride[1] - stride[0]) + stride[0]
stride = [int(shape[0]*stride), int(shape[1]*stride), shape[2]]
spatial_jitter = transforms.Compose([
lambda x: Image.fromarray(x),
transforms.RandomResizedCrop(shape[0], scale=(0.7, 0.9))
])
def aug(x):
if torch.is_tensor(x):
x = x.numpy().transpose(1, 2, 0)
elif 'PIL' in str(type(x)):
x = np.array(x)#.transpose(2, 0, 1)
winds = skimage.util.view_as_windows(x, shape, step=stride)
winds = winds.reshape(-1, *winds.shape[-3:])
P = [transform(spatial_jitter(w)) for w in winds]
return torch.cat(P, dim=0)
return aug
def get_frame_aug(frame_aug, patch_size):
train_transform = []
if 'cj' in frame_aug:
_cj = 0.1
train_transform += [
#transforms.RandomGrayscale(p=0.2),
transforms.ColorJitter(_cj, _cj, _cj, 0),
]
if 'flip' in frame_aug:
train_transform += [transforms.RandomHorizontalFlip()]
train_transform += NORM
train_transform = transforms.Compose(train_transform)
print('Frame augs:', train_transform, frame_aug)
if 'grid' in frame_aug:
aug = patch_grid(train_transform, shape=np.array(patch_size))
else:
aug = train_transform
return aug
def get_frame_transform(frame_transform_str, img_size):
tt = []
fts = frame_transform_str
norm_size = torchvision.transforms.Resize((img_size, img_size))
if 'crop' in fts:
tt.append(torchvision.transforms.RandomResizedCrop(
img_size, scale=(0.8, 0.95), ratio=(0.7, 1.3), interpolation=2),)
else:
tt.append(norm_size)
if 'cj' in fts:
_cj = 0.1
# tt += [#transforms.RandomGrayscale(p=0.2),]
tt += [transforms.ColorJitter(_cj, _cj, _cj, 0),]
if 'flip' in fts:
tt.append(torchvision.transforms.RandomHorizontalFlip())
print('Frame transforms:', tt, fts)
return tt
def get_train_transforms(args):
norm_size = torchvision.transforms.Resize((args.img_size, args.img_size))
frame_transform = get_frame_transform(args.frame_transforms, args.img_size)
frame_aug = get_frame_aug(args.frame_aug, args.patch_size)
frame_aug = [frame_aug] if args.frame_aug != '' else NORM
transform = frame_transform + frame_aug
train_transform = MapTransform(
torchvision.transforms.Compose(transform)
)
plain = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
norm_size,
*NORM,
])
def with_orig(x):
x = train_transform(x), \
plain(x[0]) if 'numpy' in str(type(x[0])) else plain(x[0].permute(2, 0, 1))
return x
return with_orig
|
tools/network/detectify-modules.py | wxyyxc1992/ChaosWarden | 123 | 12703055 | <filename>tools/network/detectify-modules.py
#
# Get CVE list:
# wget https://cve.mitre.org/data/downloads/allitems.csv
#
# Get Detectify modules list:
# 1/ login on Detectify
# 2/ Perform the following request:
# POST /ajax/application_load/50bdd32f114d2e889f29f31e3e79a1ac/
# with body: navigation[mode]=modules
# 3/ save the json returned in detectify-modules.json
#
import sys
import json
import csv
import re
import argparse
from termcolor import colored
parser = argparse.ArgumentParser()
parser.add_argument("-s","--search",help="search a specific keyword")
parser.add_argument("-l","--limit",help="display only n first results")
parser.add_argument("-d","--detectify",help="display only when a Detectify module is available", action="store_true")
parser.parse_args()
args = parser.parse_args()
if args.search:
search = args.search
else:
search = ''
if args.limit:
limit = int(args.limit)
else:
limit = 0
if args.detectify:
detectify = 1
else:
detectify = 0
def search_module( cve, search, detectify ):
if search is '' or search.lower() in cve[2].lower():
for mod in t_modules:
if cve[0] in mod['moduleName']:
return [ mod['moduleName'], mod['userName'], mod['dateAdded'] ]
return 1
return 0
with open('detectify-modules.json') as json_file:
j_detectify = json.load(json_file)
t_modules = j_detectify['data']['widgets']['AllModulesList']['props']['changed']['modules']
with open('allitems.csv') as csv_file:
i = 0
csv_reader = csv.reader(csv_file, delimiter=',')
for cve in reversed(list(csv_reader)):
if "** RESERVED **" not in cve[2]:
r = search_module( cve, search, detectify )
if r != 0:
if detectify == 0 or type(r) is list:
i = i + 1
#sys.stdout.write("https://cve.mitre.org/cgi-bin/cvename.cgi?name=%s - %s..." % (cve[0],cve[2][:150]))
sys.stdout.write("https://cve.mitre.org/cgi-bin/cvename.cgi?name=%s - %s..." % (cve[0],cve[2][:150]))
if type(r) is list:
sys.stdout.write( colored(" -> %s - %s - %s" % (r[0],r[1],r[2]),"red") )
if detectify == 0 or type(r) is list:
sys.stdout.write("\n")
if limit and i >= limit:
break
|
Ryven/packages/auto_generated/glob/nodes.py | tfroehlich82/Ryven | 2,872 | 12703056 |
from NENV import *
import glob
class NodeBase(Node):
pass
class _Glob0_Node(NodeBase):
"""
"""
title = '_glob0'
type_ = 'glob'
init_inputs = [
NodeInputBP(label='dirname'),
NodeInputBP(label='basename'),
NodeInputBP(label='dironly'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, glob._glob0(self.input(0), self.input(1), self.input(2)))
class _Glob1_Node(NodeBase):
"""
"""
title = '_glob1'
type_ = 'glob'
init_inputs = [
NodeInputBP(label='dirname'),
NodeInputBP(label='pattern'),
NodeInputBP(label='dironly'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, glob._glob1(self.input(0), self.input(1), self.input(2)))
class _Glob2_Node(NodeBase):
"""
"""
title = '_glob2'
type_ = 'glob'
init_inputs = [
NodeInputBP(label='dirname'),
NodeInputBP(label='pattern'),
NodeInputBP(label='dironly'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, glob._glob2(self.input(0), self.input(1), self.input(2)))
class _Iglob_Node(NodeBase):
"""
"""
title = '_iglob'
type_ = 'glob'
init_inputs = [
NodeInputBP(label='pathname'),
NodeInputBP(label='recursive'),
NodeInputBP(label='dironly'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, glob._iglob(self.input(0), self.input(1), self.input(2)))
class _Ishidden_Node(NodeBase):
"""
"""
title = '_ishidden'
type_ = 'glob'
init_inputs = [
NodeInputBP(label='path'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, glob._ishidden(self.input(0)))
class _Isrecursive_Node(NodeBase):
"""
"""
title = '_isrecursive'
type_ = 'glob'
init_inputs = [
NodeInputBP(label='pattern'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, glob._isrecursive(self.input(0)))
class _Iterdir_Node(NodeBase):
"""
"""
title = '_iterdir'
type_ = 'glob'
init_inputs = [
NodeInputBP(label='dirname'),
NodeInputBP(label='dironly'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, glob._iterdir(self.input(0), self.input(1)))
class _Rlistdir_Node(NodeBase):
"""
"""
title = '_rlistdir'
type_ = 'glob'
init_inputs = [
NodeInputBP(label='dirname'),
NodeInputBP(label='dironly'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, glob._rlistdir(self.input(0), self.input(1)))
class Escape_Node(NodeBase):
"""
Escape all special characters.
"""
title = 'escape'
type_ = 'glob'
init_inputs = [
NodeInputBP(label='pathname'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, glob.escape(self.input(0)))
class Glob_Node(NodeBase):
"""
Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
"""
title = 'glob'
type_ = 'glob'
init_inputs = [
NodeInputBP(label='pathname'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, glob.glob(self.input(0)))
class Glob0_Node(NodeBase):
"""
"""
title = 'glob0'
type_ = 'glob'
init_inputs = [
NodeInputBP(label='dirname'),
NodeInputBP(label='pattern'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, glob.glob0(self.input(0), self.input(1)))
class Glob1_Node(NodeBase):
"""
"""
title = 'glob1'
type_ = 'glob'
init_inputs = [
NodeInputBP(label='dirname'),
NodeInputBP(label='pattern'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, glob.glob1(self.input(0), self.input(1)))
class Has_Magic_Node(NodeBase):
"""
"""
title = 'has_magic'
type_ = 'glob'
init_inputs = [
NodeInputBP(label='s'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, glob.has_magic(self.input(0)))
class Iglob_Node(NodeBase):
"""
Return an iterator which yields the paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
"""
title = 'iglob'
type_ = 'glob'
init_inputs = [
NodeInputBP(label='pathname'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, glob.iglob(self.input(0)))
export_nodes(
_Glob0_Node,
_Glob1_Node,
_Glob2_Node,
_Iglob_Node,
_Ishidden_Node,
_Isrecursive_Node,
_Iterdir_Node,
_Rlistdir_Node,
Escape_Node,
Glob_Node,
Glob0_Node,
Glob1_Node,
Has_Magic_Node,
Iglob_Node,
)
|
my/pinboard.py | aluhrs13/HPI | 1,026 | 12703062 | <gh_stars>1000+
"""
[[https://pinboard.in][Pinboard]] bookmarks
"""
REQUIRES = [
'git+https://github.com/karlicoss/pinbexport',
]
from my.config import pinboard as config
import pinbexport.dal as pinbexport
Bookmark = pinbexport.Bookmark
# yep; clearly looks that the purpose of my. package is to wire files to DAL implicitly; otherwise it's just passtrhough.
def dal() -> pinbexport.DAL:
from .core import get_files
inputs = get_files(config.export_dir) # todo rename to export_path
model = pinbexport.DAL(inputs)
return model
from typing import Iterable
def bookmarks() -> Iterable[pinbexport.Bookmark]:
return dal().bookmarks()
|
neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_conv_with_math.py | kevinintel/neural-compressor | 172 | 12703069 | <filename>neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_conv_with_math.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from neural_compressor.utils.utility import dump_elapsed_time
from ..graph_base import GraphRewriterBase
from ..graph_util import GraphAnalyzer
from ..graph_util import GraphRewriterHelper as Helper
class FuseConvWithMathOptimizer(GraphRewriterBase):
""" Convert below subgraph to Conv2D + BiasAdd by eliminating math ops.
Conv2D Conv2D
| |
Sub |
| ----> |
RealDiv |
| |
Mul |
| |
BiasAdd BiasAdd
"""
@dump_elapsed_time("Pass FuseConvWithMathOptimizer")
def do_transformation(self):
g = GraphAnalyzer()
g.graph = self.model
graph_info = g.parse_graph()
pattern_definition = [['Conv2D'], ['Sub'], ['RealDiv'], ['Mul'], ['BiasAdd']]
target_nodes = g.query_fusion_pattern_nodes(pattern_definition)
for i in target_nodes:
weights_node_name = graph_info[i[0]].node.input[1]
weights_node = graph_info[weights_node_name].node
sub_input_names = list(graph_info[i[1]].node.input)
sub_content_node_name = list(set(sub_input_names).difference([i[0]]))[0]
sub_content_node = graph_info[sub_content_node_name].node
sub_tensor = tensor_util.MakeNdarray(sub_content_node.attr['value'].tensor)
real_div_input_names = list(graph_info[i[2]].node.input)
real_div_content_node_name = list(set(real_div_input_names).difference([i[1]]))[0]
real_div_node = graph_info[real_div_content_node_name].node
real_div_tensor = tensor_util.MakeNdarray(real_div_node.attr['value'].tensor)
mul_input_names = list(graph_info[i[3]].node.input)
mul_content_node_name = list(set(mul_input_names).difference([i[2]]))[0]
mul_content_node = graph_info[mul_content_node_name].node
mul_tensor = tensor_util.MakeNdarray(mul_content_node.attr['value'].tensor)
bias_input_names = list(graph_info[i[4]].node.input)
bias_content_node_name = list(set(bias_input_names).difference([i[3]]))[0]
bias_content_node = graph_info[bias_content_node_name].node
bias_tensor = tensor_util.MakeNdarray(bias_content_node.attr['value'].tensor)
bias_offset_value = bias_tensor - sub_tensor*mul_tensor / real_div_tensor
weights_offset = mul_tensor / real_div_tensor
weights = Helper.values_from_const(weights_node)
original_shape = weights.shape
tmp_shape = (original_shape[-1], int(weights.size/original_shape[-1]))
tmp_order = [weights.ndim - 1] + [i for i in range(weights.ndim - 1)]
scaled_weights = np.copy(weights).transpose(tmp_order).ravel().reshape(tmp_shape)
reshape_scale = np.array(weights_offset).reshape(len(weights_offset), 1)
scaled_weights = np.multiply(
scaled_weights, reshape_scale).transpose().reshape(original_shape)
scaled_weight_name = weights_node_name + "_conv_math_offset"
scaled_weights_node = Helper.create_constant_node(scaled_weight_name,
scaled_weights, dtypes.float32, shape=weights.shape)
g.add_node(scaled_weights_node, None, [i[0]])
g.replace_const_node(scaled_weights_node, [i[0]], weights_node_name)
offset_node = Helper.create_constant_node(i[0] + "_biasadd_math_offset",
bias_offset_value, dtypes.float32)
g.add_node(offset_node, None, [i[4]])
graph_info[i[4]].node.input[0] = i[0]
graph_info[i[4]].node.input[1] = offset_node.name
g.remove_node(i[1])
g.remove_node(sub_content_node_name)
g.remove_node(i[2])
g.remove_node(real_div_content_node_name)
g.remove_node(i[3])
g.remove_node(mul_content_node_name)
g.remove_node(bias_content_node_name)
return g.dump_graph()
|
contrib/opencensus-ext-azure/examples/metrics/sum.py | Flared/opencensus-python | 650 | 12703071 | # Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from opencensus.ext.azure import metrics_exporter
from opencensus.stats import aggregation as aggregation_module
from opencensus.stats import measure as measure_module
from opencensus.stats import stats as stats_module
from opencensus.stats import view as view_module
from opencensus.tags import tag_map as tag_map_module
stats = stats_module.stats
view_manager = stats.view_manager
stats_recorder = stats.stats_recorder
REQUEST_MEASURE = measure_module.MeasureFloat("Requests",
"number of requests",
"requests")
NUM_REQUESTS_VIEW = view_module.View("Number of Requests",
"number of requests",
["url"],
REQUEST_MEASURE,
aggregation_module.SumAggregation())
def main():
# Enable metrics
# Set the interval in seconds in which you want to send metrics
# TODO: you need to specify the instrumentation key in a connection string
# and place it in the APPLICATIONINSIGHTS_CONNECTION_STRING
# environment variable.
exporter = metrics_exporter.new_metrics_exporter()
view_manager.register_exporter(exporter)
view_manager.register_view(NUM_REQUESTS_VIEW)
mmap = stats_recorder.new_measurement_map()
tmap = tag_map_module.TagMap()
tmap.insert("url", "http://example.com")
for i in range(100):
print(i)
mmap.measure_int_put(REQUEST_MEASURE, i)
mmap.record(tmap)
time.sleep(1)
print("Done recording metrics")
if __name__ == "__main__":
main()
|
utils/commons/hparams.py | leminhnguyen/NATSpeech | 561 | 12703086 | import argparse
import os
import yaml
from utils.os_utils import remove_file
global_print_hparams = True
hparams = {}
class Args:
def __init__(self, **kwargs):
for k, v in kwargs.items():
self.__setattr__(k, v)
def override_config(old_config: dict, new_config: dict):
for k, v in new_config.items():
if isinstance(v, dict) and k in old_config:
override_config(old_config[k], new_config[k])
else:
old_config[k] = v
def set_hparams(config='', exp_name='', hparams_str='', print_hparams=True, global_hparams=True):
if config == '' and exp_name == '':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--config', type=str, default='',
help='location of the data corpus')
parser.add_argument('--exp_name', type=str, default='', help='exp_name')
parser.add_argument('-hp', '--hparams', type=str, default='',
help='location of the data corpus')
parser.add_argument('--infer', action='store_true', help='infer')
parser.add_argument('--validate', action='store_true', help='validate')
parser.add_argument('--reset', action='store_true', help='reset hparams')
parser.add_argument('--remove', action='store_true', help='remove old ckpt')
parser.add_argument('--debug', action='store_true', help='debug')
args, unknown = parser.parse_known_args()
print("| Unknow hparams: ", unknown)
else:
args = Args(config=config, exp_name=exp_name, hparams=hparams_str,
infer=False, validate=False, reset=False, debug=False, remove=False)
global hparams
assert args.config != '' or args.exp_name != ''
if args.config != '':
assert os.path.exists(args.config)
config_chains = []
loaded_config = set()
def load_config(config_fn):
# deep first inheritance and avoid the second visit of one node
if not os.path.exists(config_fn):
return {}
with open(config_fn) as f:
hparams_ = yaml.safe_load(f)
loaded_config.add(config_fn)
if 'base_config' in hparams_:
ret_hparams = {}
if not isinstance(hparams_['base_config'], list):
hparams_['base_config'] = [hparams_['base_config']]
for c in hparams_['base_config']:
if c.startswith('.'):
c = f'{os.path.dirname(config_fn)}/{c}'
c = os.path.normpath(c)
if c not in loaded_config:
override_config(ret_hparams, load_config(c))
override_config(ret_hparams, hparams_)
else:
ret_hparams = hparams_
config_chains.append(config_fn)
return ret_hparams
saved_hparams = {}
args_work_dir = ''
if args.exp_name != '':
args_work_dir = f'checkpoints/{args.exp_name}'
ckpt_config_path = f'{args_work_dir}/config.yaml'
if os.path.exists(ckpt_config_path):
with open(ckpt_config_path) as f:
saved_hparams_ = yaml.safe_load(f)
if saved_hparams_ is not None:
saved_hparams.update(saved_hparams_)
hparams_ = {}
if args.config != '':
hparams_.update(load_config(args.config))
if not args.reset:
hparams_.update(saved_hparams)
hparams_['work_dir'] = args_work_dir
# Support config overriding in command line. Support list type config overriding.
# Examples: --hparams="a=1,b.c=2,d=[1 1 1]"
if args.hparams != "":
for new_hparam in args.hparams.split(","):
k, v = new_hparam.split("=")
v = v.strip("\'\" ")
config_node = hparams_
for k_ in k.split(".")[:-1]:
config_node = config_node[k_]
k = k.split(".")[-1]
if v in ['True', 'False'] or type(config_node[k]) in [bool, list, dict]:
if type(config_node[k]) == list:
v = v.replace(" ", ",")
config_node[k] = eval(v)
else:
config_node[k] = type(config_node[k])(v)
if args_work_dir != '' and args.remove:
answer = input("REMOVE old checkpoint? Y/N [Default: N]: ")
if answer.lower() == "y":
remove_file(args_work_dir)
if args_work_dir != '' and (not os.path.exists(ckpt_config_path) or args.reset) and not args.infer:
os.makedirs(hparams_['work_dir'], exist_ok=True)
with open(ckpt_config_path, 'w') as f:
yaml.safe_dump(hparams_, f)
hparams_['infer'] = args.infer
hparams_['debug'] = args.debug
hparams_['validate'] = args.validate
hparams_['exp_name'] = args.exp_name
global global_print_hparams
if global_hparams:
hparams.clear()
hparams.update(hparams_)
if print_hparams and global_print_hparams and global_hparams:
print('| Hparams chains: ', config_chains)
print('| Hparams: ')
for i, (k, v) in enumerate(sorted(hparams_.items())):
print(f"\033[;33;m{k}\033[0m: {v}, ", end="\n" if i % 5 == 4 else "")
print("")
global_print_hparams = False
return hparams_
|
src/main/python/khaiii/train/evaluator.py | cjdans5545/khaiii | 1,235 | 12703129 | # -*- coding: utf-8 -*-
"""
evaluation related module
__author__ = 'Jamie (<EMAIL>)'
__copyright__ = 'Copyright (C) 2019-, Kakao Corp. All rights reserved.'
"""
###########
# imports #
###########
from collections import Counter
import logging
from typing import List, TextIO, Tuple
from khaiii.train.sentence import PosMorph, PosSentence, PosWord
#########
# types #
#########
class Evaluator:
"""
evauator
"""
def __init__(self):
self.cnt = Counter()
def evaluate(self) -> Tuple[float, float, float]:
"""
char/word accuracy, f-score(recall/precision)를 측정한다.
Returns:
character accuracy
word accuracy
f-score
"""
char_acc = self.cnt['match_chars'] / self.cnt['total_chars']
word_acc = self.cnt['match_words'] / self.cnt['total_words']
if self.cnt['match_morphs'] == 0:
recall = precision = f_score = 0.0
else:
recall = self.cnt['match_morphs'] / self.cnt['total_gold_morphs']
precision = self.cnt['match_morphs'] / self.cnt['total_pred_morphs']
f_score = 2.0 * recall * precision / (recall + precision)
self.cnt.clear()
return char_acc, word_acc, f_score
def count(self, correct_sent: PosSentence, predict_sent: PosSentence):
"""
정답 문장과 비교하여 맞춘 갯수를 샌다.
Args:
correct_sent: 정답 문장
predict_sent: 예측한 문장
"""
assert len(correct_sent.words) == len(predict_sent.words)
for gold, pred in zip(correct_sent.pos_tagged_words, predict_sent.pos_tagged_words):
self.cnt['total_chars'] += len(gold.res_tags)
self.cnt['match_chars'] += len([1 for x, y in zip(gold.res_tags, pred.res_tags)
if x == y])
self._count_word(gold, pred)
def _count_word(self, gold: PosWord, pred: PosWord):
"""
count with gold standard and predicted (will update counter)
Args:
gold: gold standard word
pred: predicted word
"""
self.cnt['total_words'] += 1
gold_morphs = gold.pos_tagged_morphs
pred_morphs = pred.pos_tagged_morphs
if gold == pred:
self.cnt['match_words'] += 1
num_match = len(gold_morphs)
self.cnt['total_gold_morphs'] += num_match
self.cnt['total_pred_morphs'] += num_match
self.cnt['match_morphs'] += num_match
return
logging.debug('gold: %s', ' '.join([str(_) for _ in gold_morphs]))
logging.debug('pred: %s', ' '.join([str(_) for _ in pred_morphs]))
self.cnt['total_gold_morphs'] += len(gold_morphs)
self.cnt['total_pred_morphs'] += len(pred_morphs)
gold_set = self.morphs_to_set(gold_morphs)
pred_set = self.morphs_to_set(pred_morphs)
self.cnt['match_morphs'] += len(gold_set & pred_set)
@classmethod
def morphs_to_set(cls, morphs: List[PosMorph]) -> set:
"""
make set from morpheme list
Args:
morphs: morpheme list
Returns:
morphemes set
"""
morph_cnt = Counter([(morph.morph, morph.pos_tag) for morph in morphs])
morph_set = set()
for (lex, tag), freq in morph_cnt.items():
if freq == 1:
morph_set.add((lex, tag))
else:
morph_set.update([(lex, tag, _) for _ in range(1, freq+1)])
return morph_set
def report(self, fout: TextIO):
"""
report recall/precision to file
Args:
fout: output file
"""
print('word accuracy: %d / %d = %.4f' % (self.cnt['match_words'], self.cnt['total_words'],
self.cnt['match_words'] / self.cnt['total_words']),
file=fout)
if self.cnt['match_morphs'] == 0:
recall = precision = f_score = 0.0
else:
recall = self.cnt['match_morphs'] / self.cnt['total_gold_morphs']
precision = self.cnt['match_morphs'] / self.cnt['total_pred_morphs']
f_score = 2.0 * recall * precision / (recall + precision)
print('f-score / (recall, precision): %.4f / (%.4f, %.4f)' % (f_score, recall, precision),
file=fout)
|
demo/infinity/fast_api_server_onnx.py | dumpmemory/transformer-deploy | 698 | 12703151 | # Copyright 2022, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from fastapi import FastAPI
from onnxruntime import GraphOptimizationLevel, InferenceSession, SessionOptions
from transformers import AutoTokenizer, BatchEncoding, TensorType
app = FastAPI()
options = SessionOptions()
options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL
model = InferenceSession("triton_models/model.onnx", options, providers=["CUDAExecutionProvider"])
tokenizer = AutoTokenizer.from_pretrained("philschmid/MiniLM-L6-H384-uncased-sst2")
@app.get("/predict")
def predict(query: str):
encode_dict: BatchEncoding = tokenizer(
text=query,
max_length=128,
truncation=True,
return_tensors=TensorType.NUMPY,
)
result: np.ndarray = model.run(None, dict(encode_dict))[0]
return result.tolist()
|
platform/resources/challenges/templates/template.py | WHYSOEASY/emkc | 173 | 12703172 | <filename>platform/resources/challenges/templates/template.py
import sys
%%_INSERTVALUES_%%
# write your solution here
|
backpack/core/derivatives/conv_transpose2d.py | jabader97/backpack | 395 | 12703181 | """Partial derivatives for `torch.nn.ConvTranspose2d`."""
from backpack.core.derivatives.conv_transposend import ConvTransposeNDDerivatives
class ConvTranspose2DDerivatives(ConvTransposeNDDerivatives):
def __init__(self):
super().__init__(N=2)
|
zhuaxia/option.py | yangchuansheng/zhuaxia | 332 | 12703185 | <filename>zhuaxia/option.py<gh_stars>100-1000
# -*- coding:utf-8 -*-
import log
import zhuaxia.config as config
LOG = log.get_logger("zxLogger")
class Option(object):
"""
a class containing user given options
"""
def __init__(self):
self.is_hq = False
self.need_proxy_pool = False
self.proxy_pool = None
self.dl_lyric = False
self.inFile = ''
self.inUrl = ''
self.incremental_dl = False
self.proxy = config.CHINA_PROXY_HTTP
def debug_me(self):
LOG.debug( "hq: "+str(self.is_hq))
LOG.debug( "inFile: "+self.inFile)
LOG.debug( "inUrl: "+self.inUrl)
LOG.debug( "proxy: "+ str(self.proxy))
LOG.debug( "needProxyPool: "+ str(self.need_proxy_pool))
LOG.debug( "dl_lyric: "+str(self.dl_lyric))
LOG.debug( "incremental_dl: "+str(self.incremental_dl))
|
utils/preprocess.py | jeasinema/MV3D_plus | 177 | 12703211 | #!/usr/bin/env python
# -*- coding:UTF-8 -*-
# File Name : preprocess.py
# Purpose :
# Creation Date : 10-12-2017
# Last Modified : Thu 18 Jan 2018 05:34:42 PM CST
# Created By : <NAME> [jeasinema[at]gmail[dot]com]
import os
import multiprocessing
import numpy as np
from config import cfg
data_dir = 'velodyne'
def process_pointcloud(point_cloud, cls=cfg.DETECT_OBJ):
# Input:
# (N, 4)
# Output:
# voxel_dict
if cls == 'Car':
scene_size = np.array([4, 80, 70.4], dtype=np.float32)
voxel_size = np.array([0.4, 0.2, 0.2], dtype=np.float32)
grid_size = np.array([10, 400, 352], dtype=np.int64)
lidar_coord = np.array([0, 40, 3], dtype=np.float32)
max_point_number = 35
else:
scene_size = np.array([4, 40, 48], dtype=np.float32)
voxel_size = np.array([0.4, 0.2, 0.2], dtype=np.float32)
grid_size = np.array([10, 200, 240], dtype=np.int64)
lidar_coord = np.array([0, 20, 3], dtype=np.float32)
max_point_number = 45
np.random.shuffle(point_cloud)
shifted_coord = point_cloud[:, :3] + lidar_coord
# reverse the point cloud coordinate (X, Y, Z) -> (Z, Y, X)
voxel_index = np.floor(
shifted_coord[:, ::-1] / voxel_size).astype(np.int)
bound_x = np.logical_and(
voxel_index[:, 2] >= 0, voxel_index[:, 2] < grid_size[2])
bound_y = np.logical_and(
voxel_index[:, 1] >= 0, voxel_index[:, 1] < grid_size[1])
bound_z = np.logical_and(
voxel_index[:, 0] >= 0, voxel_index[:, 0] < grid_size[0])
bound_box = np.logical_and(np.logical_and(bound_x, bound_y), bound_z)
point_cloud = point_cloud[bound_box]
voxel_index = voxel_index[bound_box]
# [K, 3] coordinate buffer as described in the paper
coordinate_buffer = np.unique(voxel_index, axis=0)
K = len(coordinate_buffer)
T = max_point_number
# [K, 1] store number of points in each voxel grid
number_buffer = np.zeros(shape=(K), dtype=np.int64)
# [K, T, 7] feature buffer as described in the paper
feature_buffer = np.zeros(shape=(K, T, 7), dtype=np.float32)
# build a reverse index for coordinate buffer
index_buffer = {}
for i in range(K):
index_buffer[tuple(coordinate_buffer[i])] = i
for voxel, point in zip(voxel_index, point_cloud):
index = index_buffer[tuple(voxel)]
number = number_buffer[index]
if number < T:
feature_buffer[index, number, :4] = point
number_buffer[index] += 1
feature_buffer[:, :, -3:] = feature_buffer[:, :, :3] - \
feature_buffer[:, :, :3].sum(axis=1, keepdims=True)/number_buffer.reshape(K, 1, 1)
voxel_dict = {'feature_buffer': feature_buffer,
'coordinate_buffer': coordinate_buffer,
'number_buffer': number_buffer}
return voxel_dict
def worker(filelist):
for file in filelist:
point_cloud = np.fromfile(
os.path.join(data_dir, file), dtype=np.float32).reshape(-1, 4)
name, extension = os.path.splitext(file)
voxel_dict = process_pointcloud(point_cloud)
output_dir = 'voxel' if cfg.DETECT_OBJ == 'Car' else 'voxel_ped'
np.savez_compressed(os.path.join(output_dir, name), **voxel_dict)
if __name__ == '__main__':
filelist = [f for f in os.listdir(data_dir) if f.endswith('bin')]
num_worker = 8
for sublist in np.array_split(filelist, num_worker):
p = multiprocessing.Process(target=worker, args=(sublist,))
p.start()
|
util/online.py | MohammedAljahdali/shrinkbench | 345 | 12703234 | <filename>util/online.py
import numpy as np
class OnlineStats:
"""
Welford's algorithm to compute sample mean and sample variance incrementally.
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#On-line_algorithm
"""
def __init__(self, iterable=None):
"""Online Mean and Variance from single samples
Running stats,
This is compatible with np.ndarray objects and as long as the
Keyword Arguments:
iterable {[iterable} -- Values to initialize (default: {None})
"""
self.n = 0
self.mean = 0.0
self.S = 0.0
if iterable is not None:
self.addN(iterable)
def add(self, datum):
"""Add a single datum
Internals are updated using Welford's method
Arguments:
datum -- Numerical object
"""
self.n += 1
delta = datum - self.mean
# Mk = Mk-1+ (xk – Mk-1)/k
self.mean += delta / self.n
# Sk = Sk-1 + (xk – Mk-1)*(xk – Mk).
self.S += delta * (datum - self.mean)
def addN(self, iterable, batch=False):
"""Add N data to the stats
Arguments:
iterable {[type]} -- [description]
Keyword Arguments:
batch {bool} -- If true, then the mean and std are computed over
the new array using numpy and then that updates the current stats
"""
if batch:
add = self + OnlineStats.from_values(len(iterable), np.mean(iterable), np.std(iterable), 0)
self.n, self.mean, self.S = add.n, add.mean, add.S
else:
for datum in iterable:
self.add(datum)
def pop(self, datum):
if self.n == 0:
raise ValueError("Stats must be non empty")
self.n -= 1
delta = datum - self.mean
# Mk-1 = Mk - (xk - Mk) / (k - 1)
self.mean -= delta / self.n
# Sk-1 = Sk - (xk – Mk-1) * (xk – Mk)
self.S -= (datum - self.mean) * delta
def popN(self, iterable, batch=False):
raise NotImplementedError
@property
def variance(self):
# For 2 ≤ k ≤ n, the kth estimate of the variance is s2 = Sk/(k – 1).
return self.S / self.n
@property
def std(self):
return np.sqrt(self.variance)
@property
def flatmean(self):
# for datapoints which are arrays
return np.mean(self.mean)
@property
def flatvariance(self):
# for datapoints which are arrays
return np.mean(self.variance+self.mean**2) - self.flatmean**2
@property
def flatstd(self):
return np.sqrt(self.flatvariance)
@staticmethod
def from_values(n, mean, std):
stats = OnlineStats()
stats.n = n
stats.mean = mean
stats.S = std**2 * n
return stats
@staticmethod
def from_raw_values(n, mean, S):
stats = OnlineStats()
stats.n = n
stats.mean = mean
stats.S = S
return stats
def __str__(self):
return f"n={self.n} mean={self.mean} std={self.std}"
def __repr__(self):
return f"OnlineStats.from_values(" + \
f"n={self.n}, mean={self.mean}, " + \
f"std={self.std})"
def __add__(self, other):
"""Adding can be done with int|float or other Online Stats
For other int|float, it is added to all previous values
Arguments:
other {[type]} -- [description]
Returns:
OnlineStats -- New instance with the sum.
Raises:
TypeError -- If the type is different from int|float|OnlineStas
"""
if isinstance(other, OnlineStats):
# Add the means, variances and n_samples of two objects
n1, n2 = self.n, other.n
mu1, mu2 = self.mean, other.mean
S1, S2 = self.S, other.S
# New stats
n = n1 + n2
mu = n1/n * mu1 + n2/n * mu2
S = (S1 + n1 * mu1*mu1) + (S2 + n2 * mu2*mu2) - n * mu*mu
return OnlineStats.from_raw_values(n, mu, S)
if isinstance(other, (int, float)):
# Add a fixed amount to all values. Only changes the mean
return OnlineStats.from_raw_values(self.n, self.mean+other, self.S)
else:
raise TypeError("Can only add other groups or numbers")
def __sub__(self, other):
raise NotImplementedError
def __mul__(self, k):
# Multiply all values seen by some constant
return OnlineStats.from_raw_values(self.n, self.mean*k, self.S*k**2)
class OnlineStatsMap:
def __init__(self, *keys):
self.stats = {}
if keys is not None:
self.register(*keys)
def register(self, *keys):
for k in keys:
if k not in self.stats:
self.stats[k] = OnlineStats()
def __str__(self):
s = "Stats"
for k in self.stats:
s += f' {k}: {str(self.stats[k])}'
|
napari/layers/utils/_tests/test_color_manager_utils.py | MaksHess/napari | 1,345 | 12703280 | import numpy as np
from napari.layers.utils.color_manager_utils import (
guess_continuous,
is_color_mapped,
)
def test_guess_continuous():
continuous_annotation = np.array([1, 2, 3], dtype=np.float32)
assert guess_continuous(continuous_annotation)
categorical_annotation_1 = np.array([True, False], dtype=bool)
assert not guess_continuous(categorical_annotation_1)
categorical_annotation_2 = np.array([1, 2, 3], dtype=int)
assert not guess_continuous(categorical_annotation_2)
def test_is_colormapped_string():
color = 'hello'
properties = {
'hello': np.array([1, 1, 1, 1]),
'hi': np.array([1, 0, 0, 1]),
}
assert is_color_mapped(color, properties)
assert not is_color_mapped('red', properties)
def test_is_colormapped_dict():
"""Colors passed as dicts are treated as colormapped"""
color = {0: np.array([1, 1, 1, 1]), 1: np.array([1, 1, 0, 1])}
properties = {
'hello': np.array([1, 1, 1, 1]),
'hi': np.array([1, 0, 0, 1]),
}
assert is_color_mapped(color, properties)
def test_is_colormapped_array():
"""Colors passed as list/array are treated as not colormapped"""
color_list = [[1, 1, 1, 1], [1, 1, 0, 1]]
properties = {
'hello': np.array([1, 1, 1, 1]),
'hi': np.array([1, 0, 0, 1]),
}
assert not is_color_mapped(color_list, properties)
color_array = np.array(color_list)
assert not is_color_mapped(color_array, properties)
|
tests/deployment/sagemaker/sagemaker_moto/response.py | co42/BentoML | 3,451 | 12703286 | import json
from moto.core.responses import BaseResponse
from tests.deployment.sagemaker.sagemaker_moto.model import sagemaker_backends
class SageMakerResponse(BaseResponse):
"""SageMaker response for moto mock.
References API operations and result from
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Operations.html
"""
@property
def request_body(self):
return json.loads(self.body)
@property
def backend(self):
return sagemaker_backends[self.region]
def create_model(self):
model_name = self.request_body['ModelName']
tags = self.request_body.get('Tags', [])
primary_container = self.request_body['PrimaryContainer']
execution_role_arn = self.request_body['ExecutionRoleArn']
result = self.backend.create_model(
model_name, tags, primary_container, execution_role_arn, self.region
)
return json.dumps({'ModelArn': result['arn']})
def create_endpoint_config(self):
config_name = self.request_body['EndpointConfigName']
production_variants = self.request_body['ProductionVariants']
result = self.backend.create_endpoint_config(
config_name, production_variants, self.region
)
return json.dumps({'EndpointConfigArn': result['arn']})
def create_endpoint(self):
endpoint_name = self.request_body['EndpointName']
config_name = self.request_body['EndpointConfigName']
result = self.backend.create_endpoint(endpoint_name, config_name, self.region)
return json.dumps({'EndpointArn': result['arn']})
def describe_endpoint(self):
endpoint_name = self.request_body['EndpointName']
endpoint_description = self.backend.describe_endpoint(endpoint_name)
return json.dumps(endpoint_description)
def delete_model(self):
model_name = self.request_body['ModelName']
self.backend.delete_model(model_name)
return ''
def delete_endpoint_config(self):
config_name = self.request_body['EndpointConfigName']
self.backend.delete_endpoint_config(config_name)
return ''
def delete_endpoint(self):
endpoint_name = self.request_body['EndpointName']
self.backend.delete_endpoint(endpoint_name)
return ''
def update_endpoint(self):
endpoint_name = self.request_body['EndpointName']
config_name = self.request_body['EndpointConfigName']
result = self.backend.update_endpoint(endpoint_name, config_name)
return json.dumps({'EndpointArn': result['arn']})
|
settings.py | nnivruth/simple_api | 150 | 12703287 | <gh_stars>100-1000
DB_URI = 'sqlite:///./main.db'
|
tensorflow/standard/reinforcement_learning/rl_on_gcp_demo/trainer/agent.py | VanessaDo/cloudml-samples | 1,552 | 12703294 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General interface of an RL agent.
The classes implements this class need to support the following interfaces:
1. random_action(observation), given an observation return a random action.
2. action(observation), given an observation return an action from the policy.
3. train(global_step), improves the agents internal policy once.
"""
class Agent(object):
"""General interface of an RL agent."""
def initialize(self):
"""Initialization before playing.
This function serves as a unified interface to do some pre-work.
E.g., for DDPG and TD3, update target network should be done here.
"""
pass
def random_action(self, observation):
"""Return a random action.
Given an observation return a random action.
Specifications of the action space should be given/initialized
when the agent is initialized.
Args:
observation: object, observations from the env.
Returns:
numpy.array, represent an action.
"""
raise NotImplementedError('Not implemented')
def action(self, observation):
"""Return an action according to the agent's internal policy.
Given an observation return an action according to the agent's
internal policy. Specifications of the action space should be
given/initialized when the agent is initialized.
Args:
observation: object, observations from the env.
Returns:
numpy.array, represent an action.
"""
raise NotImplementedError('Not implemented')
def action_with_noise(self, observation):
"""Return a noisy action.
Given an observation return a noisy action according to the agent's
internal policy and exploration noise process.
Specifications of the action space should be given/initialized
when the agent is initialized.
Args:
observation: object, observations from the env.
Returns:
numpy.array, represent an action.
"""
raise NotImplementedError('Not implemented')
def train(self, global_step):
"""Improve the agent's policy once.
Train the agent and improve its internal policy once.
Args:
global_step: int, global step count.
Returns:
object, represent training metrics.
"""
raise NotImplementedError('Not implemented')
|
tests/test_performance.py | Sunkist-Cherry/Spam-Filter | 433 | 12703298 | <reponame>Sunkist-Cherry/Spam-Filter
import os
import unittest
import cherry
from unittest import mock
from cherry.base import DATA_DIR, load_data
class PerformanceTest(unittest.TestCase):
# __init__()
@mock.patch('cherry.performancer.load_all')
def test_init(self, mock_load):
mock_load.return_value = ([1], [0], 'vectorizer', 'clf')
cherry.performancer.Performance('foo')
mock_load.assert_called_with(
'foo', categories=None, clf=None, clf_method=None,
encoding=None, language=None, preprocessing=None,
vectorizer=None, vectorizer_method=None, x_data=None, y_data=None)
|
tods/sk_interface/detection_algorithm/ABOD_skinterface.py | ZhuangweiKang/tods | 544 | 12703332 | <gh_stars>100-1000
import numpy as np
from ..base import BaseSKI
from tods.detection_algorithm.PyodABOD import ABODPrimitive
class ABODSKI(BaseSKI):
def __init__(self, **hyperparams):
super().__init__(primitive=ABODPrimitive, **hyperparams)
self.fit_available = True
self.predict_available = True
self.produce_available = False
|
packs/pagerduty/actions/lib/action.py | jonico/st2contrib | 164 | 12703333 | <reponame>jonico/st2contrib<filename>packs/pagerduty/actions/lib/action.py
import pygerduty
from st2actions.runners.pythonrunner import Action
class PagerDutyAction(Action):
def __init__(self, config):
super(PagerDutyAction, self).__init__(config)
self.pager = self._init_client()
self.trigger = []
def _init_client(self):
api_key = self.config['api_key']
# service_api = self.config['service_api']
subdomain = self.config['subdomain']
pager = pygerduty.PagerDuty(subdomain, api_token=api_key)
return pager
# get all the acknowledged incidents
def get_ack_incidents(self):
ack_alarms = []
for incident in self.pager.incidents.list(status="acknowledged"):
ack_alarms.append(incident.incident_key)
return ack_alarms
# get all the triggered incidents
def get_triggered_incidents(self):
trigger_alarms = []
for incident in self.pager.incidents.list(status="triggered"):
trigger_alarms.append(incident.incident_key)
return trigger_alarms
|
proxy/models.py | cklewar/wistar | 152 | 12703382 | from __future__ import unicode_literals
from django.db import models
from django.forms import ModelForm
|
examples/plot_cross_session_motor_imagery.py | plcrodrigues/moabb | 321 | 12703390 | """
===========================
Cross Session Motor Imagery
===========================
This Example show how to perform a cross session motor imagery analysis on the
very popular dataset 2a from the BCI competition IV.
We will compare two pipelines :
- CSP+LDA
- Riemannian Geometry+Logistic Regression
We will use the LeftRightImagery paradigm. this will restrict the analysis
to two classes (left hand versus righ hand) and use AUC as metric.
The cross session evaluation context will evaluate performance using a leave
one session out cross-validation. For each session in the dataset, a model
is trained on every other session and performance are evaluated on the current
session.
"""
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import seaborn as sns
from mne.decoding import CSP
from pyriemann.estimation import Covariances
from pyriemann.tangentspace import TangentSpace
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
import moabb
from moabb.datasets import BNCI2014001
from moabb.evaluations import CrossSessionEvaluation
from moabb.paradigms import LeftRightImagery
moabb.set_log_level("info")
##############################################################################
# Create pipelines
# ----------------
#
# Pipelines must be a dict of sklearn pipeline transformer.
#
# The csp implementation from MNE is used. We selected 8 CSP components, as
# usually done in the litterature.
#
# The riemannian geometry pipeline consists in covariance estimation, tangent
# space mapping and finaly a logistic regression for the classification.
pipelines = {}
pipelines["CSP+LDA"] = make_pipeline(CSP(n_components=8), LDA())
pipelines["RG+LR"] = make_pipeline(
Covariances(), TangentSpace(), LogisticRegression(solver="lbfgs")
)
##############################################################################
# Evaluation
# ----------
#
# We define the paradigm (LeftRightImagery) and the dataset (BNCI2014001).
# The evaluation will return a dataframe containing a single AUC score for
# each subject / session of the dataset, and for each pipeline.
#
# Results are saved into the database, so that if you add a new pipeline, it
# will not run again the evaluation unless a parameter has changed. Results can
# be overwrited if necessary.
paradigm = LeftRightImagery()
# Because this is being auto-generated we only use 2 subjects
dataset = BNCI2014001()
dataset.subject_list = dataset.subject_list[:2]
datasets = [dataset]
overwrite = False # set to True if we want to overwrite cached results
evaluation = CrossSessionEvaluation(
paradigm=paradigm, datasets=datasets, suffix="examples", overwrite=overwrite
)
results = evaluation.process(pipelines)
print(results.head())
##############################################################################
# Plot Results
# ----------------
#
# Here we plot the results. We the first plot is a pointplot with the average
# performance of each pipeline across session and subjects.
# The second plot is a paired scatter plot. Each point representing the score
# of a single session. An algorithm will outperforms another is most of the
# points are in its quadrant.
fig, axes = plt.subplots(1, 2, figsize=[8, 4], sharey=True)
sns.stripplot(
data=results,
y="score",
x="pipeline",
ax=axes[0],
jitter=True,
alpha=0.5,
zorder=1,
palette="Set1",
)
sns.pointplot(data=results, y="score", x="pipeline", ax=axes[0], zorder=1, palette="Set1")
axes[0].set_ylabel("ROC AUC")
axes[0].set_ylim(0.5, 1)
# paired plot
paired = results.pivot_table(
values="score", columns="pipeline", index=["subject", "session"]
)
paired = paired.reset_index()
sns.regplot(data=paired, y="RG+LR", x="CSP+LDA", ax=axes[1], fit_reg=False)
axes[1].plot([0, 1], [0, 1], ls="--", c="k")
axes[1].set_xlim(0.5, 1)
plt.show()
|
numpyro/contrib/optim.py | vishalbelsare/numpyro | 1,394 | 12703395 | <gh_stars>1000+
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
This module provides a wrapper for Optax optimizers so that they can be used with
NumPyro inference algorithms.
"""
from typing import Tuple, TypeVar
import optax
from numpyro.optim import _NumPyroOptim
_Params = TypeVar("_Params")
_State = Tuple[_Params, optax.OptState]
def optax_to_numpyro(transformation: optax.GradientTransformation) -> _NumPyroOptim:
"""
This function produces a ``numpyro.optim._NumPyroOptim`` instance from an
``optax.GradientTransformation`` so that it can be used with
``numpyro.infer.svi.SVI``. It is a lightweight wrapper that recreates the
``(init_fn, update_fn, get_params_fn)`` interface defined by
:mod:`jax.example_libraries.optimizers`.
:param transformation: An ``optax.GradientTransformation`` instance to wrap.
:return: An instance of ``numpyro.optim._NumPyroOptim`` wrapping the supplied
Optax optimizer.
"""
def init_fn(params: _Params) -> _State:
opt_state = transformation.init(params)
return params, opt_state
def update_fn(step, grads: _Params, state: _State) -> _State:
params, opt_state = state
updates, opt_state = transformation.update(grads, opt_state, params)
updated_params = optax.apply_updates(params, updates)
return updated_params, opt_state
def get_params_fn(state: _State) -> _Params:
params, _ = state
return params
return _NumPyroOptim(lambda x, y, z: (x, y, z), init_fn, update_fn, get_params_fn)
|
examples/05_aws_batch/scripts/workflow2.py | cclauss/redun | 303 | 12703400 | import subprocess
from redun import task
from task_lib2.utils import lib_task_on_batch
redun_namespace = "redun.examples.aws_batch"
@task()
def task_on_default(x: int):
return [
'task_on_default',
subprocess.check_output(['uname', '-a']),
x
]
@task(executor='batch', version="12")
def task_on_batch(x: int):
return [
'task_on_batch',
subprocess.check_output(['uname', '-a']),
task_on_default(x + 5),
x
]
@task(executor='batch_debug', interactive=True)
def task_on_batch_debug(x: int):
import pdb; pdb.set_trace()
return [
'task_on_batch_debug',
subprocess.check_output(['uname', '-a']),
task_on_default(x + 5),
x
]
@task()
def main(y: int=10):
return [
'main',
subprocess.check_output(['uname', '-a']),
task_on_batch(y),
lib_task_on_batch(y),
#task_on_batch_debug(y),
]
|
tests/update.py | robstein/rules_go | 1,099 | 12703404 | #!/usr/bin/env python
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script should be run without any arguments to update the tests
documentation cross linking.
It updates sections bounded by
.. Child list start
.. Child list end
With links to all the child documentation, picking the text for each item
from the firts non blank line of the README.rst for that folder.
"""
import os
README = "README.rst"
START_MARKER = ".. Child list start\n"
END_MARKER = ".. Child list end\n"
def main():
for dirname, subdirs, files in os.walk("."):
if README not in files:
continue
readme = os.path.join(dirname, README)
out = []
lines = []
with open(readme) as f:
lines = f.readlines()
try:
start = lines.index(START_MARKER)
end = lines.index(END_MARKER)
except ValueError:
print('{}: No child markers'.format(readme))
continue
if end < start:
print('{}: Invalid child markers'.format(readme))
continue
print('{}: updating from {} to {}'.format(readme, start, end))
out = lines[:start+1]
out.append("\n")
for sub in subdirs:
child = os.path.join(dirname, sub, README)
try:
with open(child) as f:
for line in f.readlines():
childname = line.strip()
if childname:
break
if childname:
out.append("* `{} <{}/{}>`_\n".format(childname, sub, README))
except:
continue
out.append("\n")
out.extend(lines[end:])
if out:
with open(readme, "w") as f:
f.writelines(out)
if __name__ == "__main__":
main() |
vumi/application/sandbox_rlimiter.py | seidu626/vumi | 199 | 12703421 | <filename>vumi/application/sandbox_rlimiter.py
# -*- test-case-name: vumi.application.tests.test_sandbox_rlimiter -*-
"""NOTE:
This module is also used as a standalone Python program that is executed by
the sandbox machinery. It must never, ever import non-stdlib modules.
"""
import os
import sys
import json
import signal
import resource
class SandboxRlimiter(object):
"""This reads rlimits in from stdin, applies them and then execs a
new executable.
It's necessary because Twisted's spawnProcess has no equivalent of
the `preexec_fn` argument to :class:`subprocess.POpen`.
See http://twistedmatrix.com/trac/ticket/4159.
"""
def __init__(self, argv, env):
start = argv.index('--') + 1
self._executable = argv[start]
self._args = [self._executable] + argv[start + 1:]
self._env = env
def _apply_rlimits(self):
data = os.environ[self._SANDBOX_RLIMITS_]
rlimits = json.loads(data) if data.strip() else {}
for rlimit, (soft, hard) in rlimits.iteritems():
# Cap our rlimits to the maximum allowed.
rsoft, rhard = resource.getrlimit(int(rlimit))
soft = min(soft, rsoft)
hard = min(hard, rhard)
resource.setrlimit(int(rlimit), (soft, hard))
def _reset_signals(self):
# reset all signal handlers to their defaults
for i in range(1, signal.NSIG):
if signal.getsignal(i) == signal.SIG_IGN:
signal.signal(i, signal.SIG_DFL)
def _sanitize_fds(self):
# close everything except stdin, stdout and stderr
maxfds = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
os.closerange(3, maxfds)
def execute(self):
self._apply_rlimits()
self._restore_child_env(os.environ)
self._sanitize_fds()
self._reset_signals()
os.execvpe(self._executable, self._args, self._env)
_SANDBOX_RLIMITS_ = "_SANDBOX_RLIMITS_"
@classmethod
def _override_child_env(cls, env, rlimits):
"""Put RLIMIT config in the env."""
env[cls._SANDBOX_RLIMITS_] = json.dumps(rlimits)
@classmethod
def _restore_child_env(cls, env):
"""Remove RLIMIT config."""
del env[cls._SANDBOX_RLIMITS_]
@classmethod
def script_name(cls):
# we need to pass Python the actual filename of this script
# (rather than using -m __name__) so that is doesn't import
# Twisted's reactor (since that causes errors when we close
# all the file handles if using certain reactors).
script_name = __file__
if script_name.endswith('.pyc') or script_name.endswith('.pyo'):
script_name = script_name[:-len('.pyc')] + '.py'
return script_name
@classmethod
def spawn(cls, reactor, protocol, executable, rlimits, **kwargs):
# spawns a SandboxRlimiter, connectionMade then passes the rlimits
# through to stdin and the SandboxRlimiter applies them
args = kwargs.pop('args', [])
# the -u for unbuffered I/O is important (otherwise the process
# execed will be very confused about where its stdin data has
# gone)
args = [sys.executable, '-u', cls.script_name(), '--'] + args
env = kwargs.pop('env', {})
cls._override_child_env(env, rlimits)
reactor.spawnProcess(protocol, sys.executable, args=args, env=env,
**kwargs)
if __name__ == "__main__":
rlimiter = SandboxRlimiter(sys.argv, os.environ)
rlimiter.execute()
|
autobahntestsuite/autobahntestsuite/interfaces.py | rishabh-bector/autobahn-testsuite | 595 | 12703423 | ###############################################################################
##
## Copyright (c) Crossbar.io Technologies GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ('ITestDb', 'IReportGenerator', )
import zope
from zope.interface import Interface, Attribute
class ICaseSet(Interface):
"""
"""
pass
class ITestDb(Interface):
"""
A Test database provides storage and query capabilities
for test cases, results and related data.
"""
def newRun(specId):
"""
Create a new testsuite run.
:param mode: The testsuite mode.
:type mode: str
:param spec: The test specification.
:type spec: object (a JSON serializable test spec)
:returns Deferred -- The test run ID.
"""
def closeRun(runId):
"""
Closes a testsuite run. After a testsuite run is closed,
the test result data cannot be changed or new data added.
:param testRunId: ID of test run as previously returned by newRun().
:type testRunId: str
"""
def generateCasesByTestee(specId):
"""
"""
def saveResult(runId, testRun, test, result):
"""
Saves a test result in the database.
:param runId: The test run ID.
:type runId: str
:param result: The test result. An instance of TestResult.
:type result: object
:returns Deferred -- The test result ID.
"""
# def registerResultFile(resultId, type, sha1, path):
# """
# When a report file generator has produced it's output
# and created (or recreated/modified) a file, it should
# register the file location via this function.
# :param resultId: The ID of the test result this file was generated for.
# :type resultId: str
# :param type: The type of file produced (FIXME: ['html', 'json'] ??)
# :type type: FIXME
# :param sha1: The SHA-1 computed over the generated octet stream.
# :type sha1 str
# :param path: The filesystem path to the generated file.
# :type path: str
# """
ITestDb.TESTMODES = set(['fuzzingwampclient', 'fuzzingclient'])
"""
The list of implemented test modes.
"""
class ITestRunner(Interface):
"""
"""
def runAndObserve(specName, observers = [], saveResults = True):
"""
:param observers: An iterable of ITestRunObserver instances.
:type observers: iterable
"""
class IReportGenerator(Interface):
"""
A Report generator is able to produce report files (in a
format the generator supports) from test results stored
in a Test database.
"""
outputDirectory = Attribute("""Default output directory base path. (e.g. 'reports/wamp/servers')""")
fileExtension = Attribute("""Default file extension for report files (e.g. '.html').""")
mimeType = Attribute("""Default MIME type for generated reports (e.g. 'text/html').""")
def writeReportIndexFile(runId, file = None):
"""
Generate a test report index and write to file like object or
to an automatically chosen report file (under the default
output directory
:param runId: The test run ID for which to generate the index for.
:type runId: object
:param file: A file like object or None (automatic)
:type file: object
:returns -- None if file was provided, or the pathname
of the created report file (automatic).
"""
def writeReportFile(resultId, file = None):
"""
Generate a test report and write to file like object or
to an automatically chosen report file (under the default
output directory
:param resultId: The test result ID for which to generate the report for.
:type resultId: object
:param file: A file like object or None (automatic)
:type file: object
:returns -- None if file was provided, or the pathname
of the created report file (automatic).
"""
class ITestRun(Interface):
"""
"""
def next():
"""
Returns the next test case for this run or None when
the test run is finished.
:returns ICase -- The next test case or None.
"""
def remaining():
"""
Number of remaining test cases in this test run.
:returns int -- Number of remaining test cases.
"""
def __len__():
"""
The length of this test run (note that fetching
test cases does not change the length).
"""
class ITestRunObserver(Interface):
"""
"""
def progress(runId, testRun, testCase, result, remaining):
"""
"""
class ITestCase(Interface):
"""
Tests are instantiated as objects providing this interface.
They have their run() method called exactly once before
being disposed.
"""
index = Attribute("""Test case index - a tuple of ints.""")
description = Attribute("""Test case description.""")
expectation = Attribute("""Test case expectation.""")
params = Attribute("""Test case parameters.""")
def run():
"""
Run the test case. Returns a deferred that provides an instance
of TestResult when successful.
"""
|
utils/palette_davis.py | Nitin-Mane/dense-ulearn-vos | 157 | 12703427 | palette_str = '''0 0 0
128 0 0
0 128 0
128 128 0
0 0 128
128 0 128
0 128 128
128 128 128
64 0 0
191 0 0
64 128 0
191 128 0
64 0 128
191 0 128
64 128 128
191 128 128
0 64 0
128 64 0
0 191 0
128 191 0
0 64 128
128 64 128
22 22 22
23 23 23
24 24 24
25 25 25
26 26 26
27 27 27
28 28 28
29 29 29
30 30 30
31 31 31
32 32 32
33 33 33
34 34 34
35 35 35
36 36 36
37 37 37
38 38 38
39 39 39
40 40 40
41 41 41
42 42 42
43 43 43
44 44 44
45 45 45
46 46 46
47 47 47
48 48 48
49 49 49
50 50 50
51 51 51
52 52 52
53 53 53
54 54 54
55 55 55
56 56 56
57 57 57
58 58 58
59 59 59
60 60 60
61 61 61
62 62 62
63 63 63
64 64 64
65 65 65
66 66 66
67 67 67
68 68 68
69 69 69
70 70 70
71 71 71
72 72 72
73 73 73
74 74 74
75 75 75
76 76 76
77 77 77
78 78 78
79 79 79
80 80 80
81 81 81
82 82 82
83 83 83
84 84 84
85 85 85
86 86 86
87 87 87
88 88 88
89 89 89
90 90 90
91 91 91
92 92 92
93 93 93
94 94 94
95 95 95
96 96 96
97 97 97
98 98 98
99 99 99
100 100 100
101 101 101
102 102 102
103 103 103
104 104 104
105 105 105
106 106 106
107 107 107
108 108 108
109 109 109
110 110 110
111 111 111
112 112 112
113 113 113
114 114 114
115 115 115
116 116 116
117 117 117
118 118 118
119 119 119
120 120 120
121 121 121
122 122 122
123 123 123
124 124 124
125 125 125
126 126 126
127 127 127
128 128 128
129 129 129
130 130 130
131 131 131
132 132 132
133 133 133
134 134 134
135 135 135
136 136 136
137 137 137
138 138 138
139 139 139
140 140 140
141 141 141
142 142 142
143 143 143
144 144 144
145 145 145
146 146 146
147 147 147
148 148 148
149 149 149
150 150 150
151 151 151
152 152 152
153 153 153
154 154 154
155 155 155
156 156 156
157 157 157
158 158 158
159 159 159
160 160 160
161 161 161
162 162 162
163 163 163
164 164 164
165 165 165
166 166 166
167 167 167
168 168 168
169 169 169
170 170 170
171 171 171
172 172 172
173 173 173
174 174 174
175 175 175
176 176 176
177 177 177
178 178 178
179 179 179
180 180 180
181 181 181
182 182 182
183 183 183
184 184 184
185 185 185
186 186 186
187 187 187
188 188 188
189 189 189
190 190 190
191 191 191
192 192 192
193 193 193
194 194 194
195 195 195
196 196 196
197 197 197
198 198 198
199 199 199
200 200 200
201 201 201
202 202 202
203 203 203
204 204 204
205 205 205
206 206 206
207 207 207
208 208 208
209 209 209
210 210 210
211 211 211
212 212 212
213 213 213
214 214 214
215 215 215
216 216 216
217 217 217
218 218 218
219 219 219
220 220 220
221 221 221
222 222 222
223 223 223
224 224 224
225 225 225
226 226 226
227 227 227
228 228 228
229 229 229
230 230 230
231 231 231
232 232 232
233 233 233
234 234 234
235 235 235
236 236 236
237 237 237
238 238 238
239 239 239
240 240 240
241 241 241
242 242 242
243 243 243
244 244 244
245 245 245
246 246 246
247 247 247
248 248 248
249 249 249
250 250 250
251 251 251
252 252 252
253 253 253
254 254 254
255 255 255'''
import numpy as np
tensor = np.array([[float(x)/255 for x in line.split()] for line in palette_str.split('\n')])
from matplotlib.colors import ListedColormap
palette = ListedColormap(tensor, 'davis')
|
doc/support/generate_doxygen.py | dark-richie/crashpad | 14,668 | 12703428 | <reponame>dark-richie/crashpad<gh_stars>1000+
#!/usr/bin/env python
# Copyright 2017 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generating Doxygen documentation requires Doxygen, http://www.doxygen.org/.
import os
import shutil
import subprocess
import sys
def main(args):
script_dir = os.path.dirname(__file__)
crashpad_dir = os.path.join(script_dir, os.pardir, os.pardir)
# Run from the Crashpad project root directory.
os.chdir(crashpad_dir)
output_dir = os.path.join('out', 'doc', 'doxygen')
if os.path.isdir(output_dir) and not os.path.islink(output_dir):
shutil.rmtree(output_dir)
elif os.path.exists(output_dir):
os.unlink(output_dir)
os.makedirs(output_dir, 0o755)
doxy_file = os.path.join('doc', 'support', 'crashpad.doxy')
subprocess.check_call(['doxygen', doxy_file])
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
pyvex/errors.py | Pascal-0x90/pyvex | 261 | 12703441 |
class PyVEXError(Exception):
pass
class SkipStatementsError(PyVEXError):
pass
#
# Exceptions and notifications that post-processors can raise
#
class LiftingException(Exception):
pass
class NeedStatementsNotification(LiftingException):
"""
A post-processor may raise a NeedStatementsNotification if it needs to work with statements, but the current IRSB
is generated without any statement available (skip_stmts=True). The lifter will re-lift the current block with
skip_stmts=False upon catching a NeedStatementsNotification, and re-run the post-processors.
It's worth noting that if a post-processor always raises this notification for every basic block without statements,
it will essentially disable the skipping statement optimization, and it is bad for performance (especially for
CFGFast, which heavily relies on this optimization). Post-processor authors are encouraged to at least filter the
IRSBs based on available properties (jumpkind, next, etc.). If a post-processor must work with statements for the
majority of IRSBs, the author should implement it in PyVEX in C for the sake of a better performance.
"""
pass
|
lasagne/random.py | goncaloperes/Library_Lasagne | 3,986 | 12703449 | """
A module with a package-wide random number generator,
used for weight initialization and seeding noise layers.
This can be replaced by a :class:`numpy.random.RandomState` instance with a
particular seed to facilitate reproducibility.
Note: When using cuDNN, the backward passes of convolutional and max-pooling
layers will introduce additional nondeterminism (for performance reasons).
For 2D convolutions, you can enforce a deterministic backward pass
implementation via the Theano flags ``dnn.conv.algo_bwd_filter=deterministic``
and ``dnn.conv.algo_bwd_data=deterministic``. Alternatively, you can disable
cuDNN completely with ``dnn.enabled=False``.
"""
import numpy as np
_rng = np.random
def get_rng():
"""Get the package-level random number generator.
Returns
-------
:class:`numpy.random.RandomState` instance
The :class:`numpy.random.RandomState` instance passed to the most
recent call of :func:`set_rng`, or ``numpy.random`` if :func:`set_rng`
has never been called.
"""
return _rng
def set_rng(new_rng):
"""Set the package-level random number generator.
Parameters
----------
new_rng : ``numpy.random`` or a :class:`numpy.random.RandomState` instance
The random number generator to use.
"""
global _rng
_rng = new_rng
|
sanitycheck/test_sanity_check.py | philipperemy/tensorflow-phased-lstm | 133 | 12703472 | import collections
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.rnn import dynamic_rnn
from tensorflow.contrib.rnn import BasicLSTMCell
from helpers import FileLogger
from ml_utils import create_adam_optimizer
from ml_utils import create_weight_variable
from phased_lstm import PhasedLSTMCell
from sanitycheck.constants import *
from sanitycheck.data_reader import next_batch
def get_placeholders():
return tf.placeholder('float32', [BATCH_SIZE, SEQUENCE_LENGTH, 2 if ADD_TIME_INPUTS else 1]), tf.placeholder(
'float32', [BATCH_SIZE, 1])
def run_experiment(init_session=None, placeholder_def_func=get_placeholders):
batch_size = BATCH_SIZE
hidden_size = HIDDEN_STATES
learning_rate = 3e-4
momentum = 0.9
file_logger = FileLogger('log.tsv', ['step', 'training_loss', 'benchmark_loss'])
x, y = placeholder_def_func()
if ADD_TIME_INPUTS:
lstm = PhasedLSTMCell(hidden_size)
print('Using PhasedLSTMCell impl.')
else:
lstm = BasicLSTMCell(hidden_size)
print('Using BasicLSTMCell impl.')
initial_state = (tf.random_normal([batch_size, hidden_size], stddev=0.1),
tf.random_normal([batch_size, hidden_size], stddev=0.1))
outputs, state = dynamic_rnn(lstm, x, initial_state=initial_state, dtype=tf.float32)
rnn_out = tf.squeeze(tf.slice(outputs, begin=[0, tf.shape(outputs)[1] - 1, 0], size=[-1, -1, -1]))
# _, final_hidden = state
fc0_w = create_weight_variable('fc0_w', [hidden_size, 1])
fc0_b = tf.get_variable('fc0_b', [1])
out = tf.matmul(rnn_out, fc0_w) + fc0_b
loss = tf.reduce_mean(tf.square(out - y))
optimizer = create_adam_optimizer(learning_rate, momentum)
trainable = tf.trainable_variables()
grad_update = optimizer.minimize(loss, var_list=trainable)
if init_session is not None:
sess = init_session
else:
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
init = tf.global_variables_initializer()
sess.run(init)
# lstm.__call__(x[:, 0, :], initial_state, scope=None)
d = collections.deque(maxlen=10)
benchmark_d = collections.deque(maxlen=10)
max_steps = int(1e6)
for step in range(1, max_steps):
if step % 10 == 0:
print('step {}/{}'.format(step, max_steps))
x_s, y_s = next_batch(batch_size)
loss_value, _, pred_value = sess.run([loss, grad_update, out], feed_dict={x: x_s, y: y_s})
# The mean converges to 0.5 for IID U(0,1) random variables. Good benchmark.
benchmark_d.append(np.mean(np.square(0.5 - y_s)))
d.append(loss_value)
mean_loss = np.mean(d)
benchmark_mean_loss = np.mean(benchmark_d)
file_logger.write([step, mean_loss, benchmark_mean_loss])
file_logger.close()
if __name__ == '__main__':
run_experiment()
|
clearml/utilities/pigar/log.py | arielleoren/clearml | 2,097 | 12703519 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import logging.handlers
logger = logging.getLogger('pigar')
logger.setLevel(logging.WARNING)
|
lldb/scripts/swig_bot_lib/server.py | dan-zheng/llvm-project | 765 | 12703558 | #!/usr/bin/env python
"""
SWIG generation server. Listens for connections from swig generation clients
and runs swig in the requested fashion, sending back the results.
"""
# Future imports
from __future__ import absolute_import
from __future__ import print_function
# Python modules
import argparse
import io
import logging
import os
import select
import shutil
import socket
import struct
import sys
import tempfile
import traceback
# LLDB modules
import use_lldb_suite
from lldbsuite.support import fs
from lldbsuite.support import sockutil
# package imports
from . import local
from . import remote
default_port = 8537
def add_subparser_args(parser):
parser.add_argument(
"--port",
action="store",
default=default_port,
help=("The local port to bind to"))
parser.add_argument(
"--swig-executable",
action="store",
default=fs.find_executable("swig"),
dest="swig_executable")
def finalize_subparser_options(options):
pass
def initialize_listening_socket(options):
logging.debug("Creating socket...")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logging.info("Binding to ip address '', port {}".format(options.port))
s.bind(('', options.port))
logging.debug("Putting socket in listen mode...")
s.listen()
return s
def accept_once(sock, options):
logging.debug("Waiting for connection...")
while True:
rlist, wlist, xlist = select.select([sock], [], [], 0.5)
if not rlist:
continue
client, addr = sock.accept()
logging.info("Received connection from {}".format(addr))
data_size = struct.unpack("!I", sockutil.recvall(client, 4))[0]
logging.debug("Expecting {} bytes of data from client"
.format(data_size))
data = sockutil.recvall(client, data_size)
logging.info("Received {} bytes of data from client"
.format(len(data)))
pack_location = None
try:
tempfolder = os.path.join(tempfile.gettempdir(), "swig-bot")
os.makedirs(tempfolder, exist_ok=True)
pack_location = tempfile.mkdtemp(dir=tempfolder)
logging.debug("Extracting archive to {}".format(pack_location))
local.unpack_archive(pack_location, data)
logging.debug("Successfully unpacked archive...")
config_file = os.path.normpath(os.path.join(pack_location,
"config.json"))
parsed_config = remote.parse_config(io.open(config_file))
config = local.LocalConfig()
config.languages = parsed_config["languages"]
config.swig_executable = options.swig_executable
config.src_root = pack_location
config.target_dir = os.path.normpath(
os.path.join(config.src_root, "output"))
logging.info(
"Running swig. languages={}, swig={}, src_root={}, target={}"
.format(config.languages, config.swig_executable,
config.src_root, config.target_dir))
status = local.generate(config)
logging.debug("Finished running swig. Packaging up files {}"
.format(os.listdir(config.target_dir)))
zip_data = io.BytesIO()
zip_file = local.pack_archive(zip_data, config.target_dir, None)
response_status = remote.serialize_response_status(status)
logging.debug("Sending response status {}".format(response_status))
logging.info("(swig output) -> swig_output.json")
zip_file.writestr("swig_output.json", response_status)
zip_file.close()
response_data = zip_data.getvalue()
logging.info("Sending {} byte response".format(len(response_data)))
client.sendall(struct.pack("!I", len(response_data)))
client.sendall(response_data)
finally:
if pack_location is not None:
logging.debug("Removing temporary folder {}"
.format(pack_location))
shutil.rmtree(pack_location)
def accept_loop(sock, options):
while True:
try:
accept_once(sock, options)
except Exception as e:
error = traceback.format_exc()
logging.error("An error occurred while processing the connection.")
logging.error(error)
def run(options):
print(options)
sock = initialize_listening_socket(options)
accept_loop(sock, options)
return options
|
flask_rest_jsonapi/schema.py | fossasia/flask-rest-jsonapi | 1,757 | 12703571 | <filename>flask_rest_jsonapi/schema.py
# -*- coding: utf-8 -*-
from marshmallow import class_registry
from marshmallow.base import SchemaABC
from marshmallow_jsonapi.fields import Relationship
from flask_rest_jsonapi.exceptions import InvalidField, InvalidInclude
def compute_schema(schema_cls, default_kwargs, qs, include):
"""Compute a schema around compound documents and sparse fieldsets
:param Schema schema_cls: the schema class
:param dict default_kwargs: the schema default kwargs
:param QueryStringManager qs: qs
:param list include: the relation field to include data from
:return Schema schema: the schema computed
"""
# manage include_data parameter of the schema
schema_kwargs = default_kwargs
schema_kwargs['include_data'] = schema_kwargs.get('include_data', tuple())
if include:
for include_path in include:
field = include_path.split('.')[0]
if field not in schema_cls._declared_fields:
raise InvalidInclude("{} has no attribute {}".format(schema_cls.__name__, field))
elif not isinstance(schema_cls._declared_fields[field], Relationship):
raise InvalidInclude("{} is not a relationship attribute of {}".format(field, schema_cls.__name__))
schema_kwargs['include_data'] += (field, )
# make sure id field is in only parameter unless marshamllow will raise an Exception
if schema_kwargs.get('only') is not None and 'id' not in schema_kwargs['only']:
schema_kwargs['only'] += ('id',)
# create base schema instance
schema = schema_cls(**schema_kwargs)
# manage sparse fieldsets
if schema.opts.type_ in qs.fields:
# check that sparse fieldsets exists in the schema
for field in qs.fields[schema.opts.type_]:
if field not in schema.declared_fields:
raise InvalidField("{} has no attribute {}".format(schema.__class__.__name__, field))
tmp_only = set(schema.declared_fields.keys()) & set(qs.fields[schema.opts.type_])
if schema.only:
tmp_only &= set(schema.only)
schema.only = tuple(tmp_only)
# make sure again that id field is in only parameter unless marshamllow will raise an Exception
if schema.only is not None and 'id' not in schema.only:
schema.only += ('id',)
# manage compound documents
if include:
for include_path in include:
field = include_path.split('.')[0]
relation_field = schema.declared_fields[field]
related_schema_cls = schema.declared_fields[field].__dict__['_Relationship__schema']
related_schema_kwargs = {}
if isinstance(related_schema_cls, SchemaABC):
related_schema_kwargs['many'] = related_schema_cls.many
related_schema_kwargs['include_data'] = related_schema_cls.__dict__.get('include_data')
related_schema_cls = related_schema_cls.__class__
if isinstance(related_schema_cls, str):
related_schema_cls = class_registry.get_class(related_schema_cls)
if '.' in include_path:
related_include = ['.'.join(include_path.split('.')[1:])]
else:
related_include = None
related_schema = compute_schema(related_schema_cls, related_schema_kwargs, qs, related_include)
relation_field.__dict__['_Relationship__schema'] = related_schema
return schema
def get_model_field(schema, field):
"""Get the model field of a schema field
:param Schema schema: a marshmallow schema
:param str field: the name of the schema field
:return str: the name of the field in the model
"""
if schema._declared_fields[field].attribute is not None:
return schema._declared_fields[field].attribute
return field
def get_relationships(schema):
"""Return relationship mapping from schema to model
:param Schema schema: a marshmallow schema
:param list: list of dict with schema field and model field
"""
return {get_model_field(schema, key): key for (key, value) in schema._declared_fields.items()
if isinstance(value, Relationship)}
|
arch/arm/tests/adr_t2.py | Samsung/ADBI | 312 | 12703581 | <gh_stars>100-1000
from test import *
import random
print ' .thumb'
def test(rd, imm12):
name = 'test_adr_t2_%s' % tn()
cleanup = asm_wrap(name, rd)
print '%s_tinsn:' % name
print ' sub.w %s, pc, #%i' % (rd, imm12)
cleanup()
def iter_cases():
while True:
yield random.choice(T32REGS), random.randint(0, 4095)
print ' .thumb'
tests(test, iter_cases(), 30) |
manga_py/providers/manga_online_com_ua.py | sonvt1710/manga-py | 337 | 12703583 | from manga_py.provider import Provider
from .helpers.std import Std
class MangaOnlineCom(Provider, Std):
__local_storage = None
def __init_storage(self):
if not self.__local_storage:
self.__local_storage = {}
def get_chapter_index(self) -> str:
self.__init_storage()
idx_reg = r'/\d+.+-(\d+).+?-(\d+).*?html'
idx = self.re.search(idx_reg, self.chapter).groups()
if not idx:
idx_reg = r'/\d+.+-(\d+).+?html'
idx = (self.re.search(idx_reg, self.chapter).group(1), 0)
return '{:0>3}-{:0>3}'.format(*idx)
def get_content(self):
return ['0']
def get_manga_name(self) -> str:
self.__init_storage()
if not self.__local_storage.get('chapters', False):
self.__local_storage['chapters'] = self.get_chapters()
if len(self.__local_storage['chapters']):
return self.re.search(r'/manga/(.+)/.+\.html', self.__local_storage['chapters'][0]).group(1)
raise AttributeError()
def _get_chapters_cmanga(self):
s = '#dle-content > div > a[href*="/manga/"]'
return self.html_fromstring(self.get_url(), s)[::-1]
def _get_chapters_manga(self):
s = '.fullstory_main select.selectmanga option'
items = self.html_fromstring(self.get_url(), s)
return [i.get('value') for i in items[::-1]]
def get_chapters(self):
self.__init_storage()
if self.re.search('/cmanga/', self.get_url()):
return self._get_chapters_cmanga()
if self.re.search(r'/manga/[^/]+/\d+-', self.get_url()):
return self._get_chapters_manga()
return []
@staticmethod
def _get_pages_count(parser):
_len = len(parser.cssselect('#pages_all a'))
return _len + 1 if _len else 0
def get_files(self):
chapter = self.chapter
parser = self.html_fromstring(chapter, '.main_body', 0)
pages = self._get_pages_count(parser)
images = []
idx = self.re.search(r'/manga/[^/]+/(\d+)', chapter).group(1)
for n in range(pages):
url = '{}/engine/ajax/sof_fullstory.php?id={}&page={}'.format(self.domain, idx, n + 1)
parser = self.html_fromstring(url)[0]
images += self._images_helper(parser, 'img')
return images
def book_meta(self) -> dict:
# todo meta
pass
main = MangaOnlineCom
|
ggplot/geoms/geom_tile.py | themiwi/ggplot | 1,133 | 12703610 | <reponame>themiwi/ggplot<gh_stars>1000+
import pandas as pd
import matplotlib.patches as patches
from .geom import geom
from ..utils import calc_n_bins
class geom_tile(geom):
"""
Frequency table / heatmap
Parameters
----------
x:
x values for bins/categories
y:
y values for bins/categories
color:
color of the outer line
alpha:
transparency of fill
size:
thickness of outer line
linetype:
type of the outer line ('solid', 'dashed', 'dashdot', 'dotted')
fill:
color the interior of the bar will be
Examples
--------
"""
DEFAULT_AES = {'alpha': None, 'color': None, 'fill': '#333333',
'linetype': 'solid', 'size': 1.0}
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {'xbins': 20, 'ybins': 20, 'interpolate': False}
_aes_renames = {'linetype': 'linestyle', 'size': 'linewidth',
'fill': 'facecolor', 'color': 'edgecolor'}
def plot(self, ax, data, _aes):
(data, _aes) = self._update_data(data, _aes)
params = self._get_plot_args(data, _aes)
variables = _aes.data
x = data[variables['x']]
y = data[variables['y']]
weight = variables['fill']
if 'fill' in variables:
del variables['fill']
n_xbins = self.params.get('xbins', calc_n_bins(x))
n_ybins = self.params.get('ybins', calc_n_bins(y))
x_cut, x_bins = pd.cut(x, n_xbins, retbins=True)
y_cut, y_bins = pd.cut(y, n_ybins, retbins=True)
data[variables['x'] + "_cut"] = x_cut
data[variables['y'] + "_cut"] = y_cut
counts = data[[weight, variables['x'] + "_cut", variables['y'] + "_cut"]].groupby([variables['x'] + "_cut", variables['y'] + "_cut"]).count().fillna(0)
weighted = data[[weight, variables['x'] + "_cut", variables['y'] + "_cut"]].groupby([variables['x'] + "_cut", variables['y'] + "_cut"]).sum().fillna(0)
if self.params['interpolate']==False:
def get_xy():
for x in x_bins:
for y in y_bins:
yield (x, y)
xy = get_xy()
xstep = x_bins[1] - x_bins[0]
ystep = y_bins[1] - y_bins[0]
maxval = counts.max().max() * weighted.max().max()
for ((idx, cnt), (_, wt)) in zip(counts.iterrows(), weighted.iterrows()):
xi, yi = next(xy)
params['alpha'] = (wt.values * cnt.values) / float(maxval)
ax.add_patch(
patches.Rectangle(
(xi, yi), # (x,y)
xstep, # width
ystep, # height
**params
)
)
else:
import matplotlib.pyplot as plt
z = []
for xi in x:
z.append([xi * yi for yi in y])
ax.contourf(x, y, z, 10, cmap=plt.cm.Blues)
# matplotlib patches don't automatically impact the scale of the ax, so
# we manually autoscale the x and y axes
ax.autoscale_view()
|
examples/keras_recipes/quasi_svm.py | rickiepark/keras-io | 1,542 | 12703612 | <reponame>rickiepark/keras-io
"""
Title: A Quasi-SVM in Keras
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2020/04/17
Last modified: 2020/04/17
Description: Demonstration of how to train a Keras model that approximates a SVM.
"""
"""
## Introduction
This example demonstrates how to train a Keras model that approximates a Support Vector
Machine (SVM).
The key idea is to stack a `RandomFourierFeatures` layer with a linear layer.
The `RandomFourierFeatures` layer can be used to "kernelize" linear models by applying
a non-linear transformation to the input
features and then training a linear model on top of the transformed features. Depending
on the loss function of the linear model, the composition of this layer and the linear
model results to models that are equivalent (up to approximation) to kernel SVMs (for
hinge loss), kernel logistic regression (for logistic loss), kernel linear regression
(for MSE loss), etc.
In our case, we approximate SVM using a hinge loss.
"""
"""
## Setup
"""
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import RandomFourierFeatures
"""
## Build the model
"""
model = keras.Sequential(
[
keras.Input(shape=(784,)),
RandomFourierFeatures(
output_dim=4096, scale=10.0, kernel_initializer="gaussian"
),
layers.Dense(units=10),
]
)
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
loss=keras.losses.hinge,
metrics=[keras.metrics.CategoricalAccuracy(name="acc")],
)
"""
## Prepare the data
"""
# Load MNIST
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Preprocess the data by flattening & scaling it
x_train = x_train.reshape(-1, 784).astype("float32") / 255
x_test = x_test.reshape(-1, 784).astype("float32") / 255
# Categorical (one hot) encoding of the labels
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
"""
## Train the model
"""
model.fit(x_train, y_train, epochs=20, batch_size=128, validation_split=0.2)
"""
I can't say that it works well or that it is indeed a good idea, but you can probably
get decent results by tuning your hyperparameters.
You can use this setup to add a "SVM layer" on top of a deep learning model, and train
the whole model end-to-end.
"""
|
ding/framework/tests/test_task.py | opendilab/DI-engine | 464 | 12703628 | from concurrent.futures import thread
from os import spawnl
from attr.validators import instance_of
import pytest
import time
import copy
import random
from mpire import WorkerPool
from ding.framework import Task
from ding.framework.context import Context
from ding.framework.parallel import Parallel
from ding.utils.design_helper import SingletonMetaclass
@pytest.mark.unittest
def test_serial_pipeline():
def step0(ctx):
ctx.setdefault("pipeline", [])
ctx.pipeline.append(0)
def step1(ctx):
ctx.pipeline.append(1)
# Execute step1, step2 twice
with Task() as task:
for _ in range(2):
task.forward(step0)
task.forward(step1)
assert task.ctx.pipeline == [0, 1, 0, 1]
# Renew and execute step1, step2
task.renew()
assert task.ctx.total_step == 1
task.forward(step0)
task.forward(step1)
assert task.ctx.pipeline == [0, 1]
# Test context inheritance
task.renew()
@pytest.mark.unittest
def test_serial_yield_pipeline():
def step0(ctx):
ctx.setdefault("pipeline", [])
ctx.pipeline.append(0)
yield
ctx.pipeline.append(0)
def step1(ctx):
ctx.pipeline.append(1)
with Task() as task:
task.forward(step0)
task.forward(step1)
task.backward()
assert task.ctx.pipeline == [0, 1, 0]
assert len(task._backward_stack) == 0
@pytest.mark.unittest
def test_async_pipeline():
def step0(ctx):
ctx.setdefault("pipeline", [])
ctx.pipeline.append(0)
def step1(ctx):
ctx.pipeline.append(1)
# Execute step1, step2 twice
with Task(async_mode=True) as task:
for _ in range(2):
task.forward(step0)
time.sleep(0.1)
task.forward(step1)
time.sleep(0.1)
task.backward()
assert task.ctx.pipeline == [0, 1, 0, 1]
task.renew()
assert task.ctx.total_step == 1
@pytest.mark.unittest
def test_async_yield_pipeline():
def step0(ctx):
ctx.setdefault("pipeline", [])
time.sleep(0.1)
ctx.pipeline.append(0)
yield
ctx.pipeline.append(0)
def step1(ctx):
time.sleep(0.2)
ctx.pipeline.append(1)
with Task(async_mode=True) as task:
task.forward(step0)
task.forward(step1)
time.sleep(0.3)
task.backward().sync()
assert task.ctx.pipeline == [0, 1, 0]
assert len(task._backward_stack) == 0
def parallel_main():
sync_count = 0
def on_sync_parallel_ctx(ctx):
nonlocal sync_count
assert isinstance(ctx, Context)
sync_count += 1
with Task() as task:
task.on("sync_parallel_ctx", on_sync_parallel_ctx)
task.use(lambda _: time.sleep(0.2 + random.random() / 10))
task.run(max_step=10)
assert sync_count > 0
def parallel_main_eager():
sync_count = 0
def on_sync_parallel_ctx(ctx):
nonlocal sync_count
assert isinstance(ctx, Context)
sync_count += 1
with Task() as task:
task.on("sync_parallel_ctx", on_sync_parallel_ctx)
for _ in range(10):
task.forward(lambda _: time.sleep(0.2 + random.random() / 10))
task.renew()
assert sync_count > 0
@pytest.mark.unittest
def test_parallel_pipeline():
Parallel.runner(n_parallel_workers=2)(parallel_main_eager)
Parallel.runner(n_parallel_workers=2)(parallel_main)
def attach_mode_main_task():
with Task() as task:
task.use(lambda _: time.sleep(0.1))
task.run(max_step=10)
def attach_mode_attach_task():
ctx = None
def attach_callback(new_ctx):
nonlocal ctx
ctx = new_ctx
with Task(attach_callback=attach_callback) as task:
task.use(lambda _: time.sleep(0.1))
task.run(max_step=10)
assert ctx is not None
def attach_mode_main(job):
if job == "run_task":
Parallel.runner(
n_parallel_workers=2, protocol="tcp", address="127.0.0.1", ports=[50501, 50502]
)(attach_mode_main_task)
elif "run_attach_task":
time.sleep(0.3)
try:
Parallel.runner(
n_parallel_workers=1,
protocol="tcp",
address="127.0.0.1",
ports=[50503],
attach_to=["tcp://127.0.0.1:50501", "tcp://127.0.0.1:50502"]
)(attach_mode_attach_task)
finally:
del SingletonMetaclass.instances[Parallel]
else:
raise Exception("Unknown task")
@pytest.mark.unittest
def test_attach_mode():
with WorkerPool(n_jobs=2, daemon=False, start_method="spawn") as pool:
pool.map(attach_mode_main, ["run_task", "run_attach_task"])
@pytest.mark.unittest
def test_label():
with Task() as task:
result = {}
task.use(lambda _: result.setdefault("not_me", True), filter_labels=["async"])
task.use(lambda _: result.setdefault("has_me", True))
task.run(max_step=1)
assert "not_me" not in result
assert "has_me" in result
def sync_parallel_ctx_main():
with Task() as task:
task.use(lambda _: time.sleep(1))
if task.router.node_id == 0: # Fast
task.run(max_step=2)
else: # Slow
task.run(max_step=10)
assert task.parallel_ctx
assert task.ctx.finish
assert task.ctx.total_step < 9
@pytest.mark.unittest
def test_sync_parallel_ctx():
Parallel.runner(n_parallel_workers=2)(sync_parallel_ctx_main)
@pytest.mark.unittest
def test_emit():
with Task() as task:
greets = []
task.on("Greeting", lambda msg: greets.append(msg))
def step1(ctx):
task.emit("Greeting", "Hi")
task.use(step1)
task.run(max_step=10)
assert len(greets) == 10
def emit_remote_main():
with Task() as task:
time.sleep(0.3) # Wait for bound
greets = []
if task.router.node_id == 0:
task.on("Greeting", lambda msg: greets.append(msg))
else:
for _ in range(10):
task.emit_remote("Greeting", "Hi")
time.sleep(0.1)
time.sleep(1.2)
if task.router.node_id == 0:
assert len(greets) > 5
else:
assert len(greets) == 0
@pytest.mark.unittest
def test_emit_remote():
Parallel.runner(n_parallel_workers=2)(emit_remote_main)
@pytest.mark.unittest
def test_wait_for():
# Wait for will only work in async or parallel mode
with Task(async_mode=True, n_async_workers=2) as task:
greets = []
def step1(_):
hi = task.wait_for("Greeting")[0][0]
if hi:
greets.append(hi)
def step2(_):
task.emit("Greeting", "Hi")
task.use(step1)
task.use(step2)
task.run(max_step=10)
assert len(greets) == 10
assert all(map(lambda hi: hi == "Hi", greets))
# Test timeout exception
with Task(async_mode=True, n_async_workers=2) as task:
def step1(_):
task.wait_for("Greeting", timeout=0.3, ignore_timeout_exception=False)
task.use(step1)
with pytest.raises(TimeoutError):
task.run(max_step=1)
@pytest.mark.unittest
def test_async_exception():
with Task(async_mode=True, n_async_workers=2) as task:
def step1(_):
task.wait_for("any_event") # Never end
def step2(_):
time.sleep(0.3)
raise Exception("Oh")
task.use(step1)
task.use(step2)
with pytest.raises(Exception):
task.run(max_step=2)
assert task.ctx.total_step == 0
|
tests/examples/test_custom_object_repr.py | dtczest/syrupy | 147 | 12703633 | <reponame>dtczest/syrupy<filename>tests/examples/test_custom_object_repr.py
class MyCustomClass:
prop1 = 1
prop2 = "a"
prop3 = {1, 2, 3}
def test_snapshot_custom_class(snapshot):
assert MyCustomClass() == snapshot
class MyCustomReprClass(MyCustomClass):
def __repr__(self):
state = "\n".join(
f" {a}={repr(getattr(self, a))},"
for a in sorted(dir(self))
if not a.startswith("_")
)
return f"{self.__class__.__name__}(\n{state}\n)"
def test_snapshot_custom_repr_class(snapshot):
assert MyCustomReprClass() == snapshot
|
jixianjiancha/check/main.py | zx273983653/vulscan | 582 | 12703635 | <filename>jixianjiancha/check/main.py
#!/usr/bin/env python
#encoding=utf-8
# __author__ test
# __time__ 2018-4-25
import sys
import paramiko
import time
import json
import commands
import ast
import threading
reload(sys)
sys.setdefaultencoding('utf-8')
lock=threading.Lock()
#登录远程服务器并执行命令
def remote_ssh(ip,user,password):
try:
#将脚本传送到远程服务器
transport = paramiko.Transport((ip,22))
transport.connect(username=user, password=password)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, 22, username=user, password=password, timeout=200)
print u"[*]连接到远程服务器"
#登录远程服务器的路径
stdin, stdout, stderr=ssh.exec_command("pwd")
path=stdout.read().strip("\n")
sftp = paramiko.SFTPClient.from_transport(transport)
sftp.put('jixianjiancha.tar.gz', '%s/jixianjiancha.tar.gz'%path)
print u"[*]将脚本传送到远程服务器"
time.sleep(20)
stdin, stdout, stderr=ssh.exec_command('tar -xvf %s/jixianjiancha.tar.gz'%path)
print u"[*]在远程服务器上解压脚本"
time.sleep(10)
stdin, stdout, stderr=ssh.exec_command("python %s/jixianjiancha/main.py"%path)
print u"[*]在远程服务器上执行脚本......"
#判断服务器上的脚本是否执行完成
'''
tag=True
while(tag):
stdin, stdout, stderr=ssh.exec_command("cat /root/jixianjiancha/tip.txt")
if 'finsh' in stdout.read():
tag=False
'''
time.sleep(30)
#将远程服务器上的运行结果result.json获取到本地
sftp.get('%s/jixianjiancha/result.json'%path,'result.json')
print u"[*]将扫描结果拉取到本地,结果保存在result.json"
time.sleep(10)
sftp.close()
transport.close()
#删除远程服务器上的文件:
stdin, stdout, stderr=ssh.exec_command("rm -rf %s/jixianjiancha*"%path)
ssh.close()
print u"[*]删除远程服务器上的文件"
print u"[+]系统漏洞扫描结束"
except Exception as e:
print u"[-]连接失败,请重新连接"
print e
finally:
ssh.close()
transport.close()
#端口扫描,获取远程服务器端口信息,并写入json
def portScan(ip,user,password):
try:
result={}
with open("result.json","r") as fn:
data=fn.readlines()
fn.close()
result=json.loads(data[0])
result[u"ip"]=ip
result[u"username"]=user
result[u"password"]=password
#进行端口扫描
print u"[*]对主机%s进行端口扫描......"%ip
commandResult=commands.getoutput("python port_scan.py %s"%ip)
result[u"端口详情"]=ast.literal_eval(commandResult)
#将主机信息,端口扫描信息保存到json中
with open("result.json",'w') as fn:
json.dump(result,fn,ensure_ascii=False)
fn.close()
print u"[+]本次端口扫描结束"
except Exception as e:
print u"[-]端口扫描失败,请重新扫描"
print e
'''
if __name__ == '__main__':
ip="192.168.159.132"
user="root"
password="<PASSWORD>"
lock.acquire()
remote_ssh(ip,user,password)
lock.release()
portScan(ip,user,password)
print u"[-]本次扫描结束,结果保存在result.json文件中"
'''
ip=sys.argv[1]
username=sys.argv[2]
password=sys.argv[3]
lock.acquire()
remote_ssh(ip,user,password)
lock.release()
portScan(ip,user,password)
print u"[-]本次扫描结束,结果保存在result.json文件中"
commands.getoutput("echo 'finsh'>end.txt")
|
packages/@aws-cdk-containers/ecs-service-extensions/lib/extensions/assign-public-ip/lambda/lib/events.py | RichiCoder1/aws-cdk | 6,159 | 12703681 | from lib.records import TaskInfo, EniInfo
def extract_event_task_info(task_description) -> TaskInfo:
arn = task_description['taskArn']
# Parse the eni info out of the the attachments
enis = [
EniInfo(eni_id=detail['value']) for network_interface in task_description['attachments']
if network_interface['type'] == 'eni' for detail in network_interface['details']
if detail['name'] == 'networkInterfaceId'
]
# Create an object out of the extracted information
return TaskInfo(task_arn=arn, enis=enis) |
book_manager/bookman.py | anshul2807/Automation-scripts | 496 | 12703695 | <filename>book_manager/bookman.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GUI.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import sqlite3
from book import show_books, add_book, edit_book, delete_book
from Dialog import Ui_Dialog
from data_model import Book
conn = sqlite3.connect("books.db")
books = []
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
global books
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1037, 622)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.list_1 = QtWidgets.QListWidget(self.centralwidget)
self.list_1.setGeometry(QtCore.QRect(170, 20, 831, 601))
self.list_1.setObjectName("list_1")
books = show_books(conn)
for item in books:
newitem = QtWidgets.QListWidgetItem(
f"Name: {item[0]}\nPath: {item[1]}\nTags: {', '.join(item[2])}\nNotes: {item[3]}\n")
font = QtGui.QFont('Times', 12)
font.setBold(True)
font.setWeight(50)
newitem.setFont(font)
self.list_1.addItem(newitem)
self.list_1.itemDoubleClicked.connect(self.open_properties)
# self.list_1.item
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 10, 131, 61))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(85, 170, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active,
QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 170, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive,
QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 170, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled,
QtGui.QPalette.BrightText, brush)
self.label.setPalette(palette)
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap("Icons/logo.png"))
self.label.setScaledContents(True)
self.label.setObjectName("label")
self.button_add = QtWidgets.QPushButton(self.centralwidget)
self.button_add.setGeometry(QtCore.QRect(20, 80, 111, 41))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("Icons/add.svg"),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.button_add.setIcon(icon)
self.button_add.setObjectName("button_add")
self.button_add.clicked.connect(self.add_books)
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setGeometry(QtCore.QRect(10, 130, 151, 261))
self.groupBox.setObjectName("groupBox")
self.button_search = QtWidgets.QPushButton(self.groupBox)
self.button_search.setGeometry(QtCore.QRect(40, 180, 41, 31))
self.button_search.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("Icons/search.webp"),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.button_search.setIcon(icon1)
self.button_search.setObjectName("button_search")
self.button_search.clicked.connect(self.search_books)
self.radioButton_1 = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_1.setGeometry(QtCore.QRect(10, 40, 95, 20))
self.radioButton_1.setChecked(True)
self.radioButton_1.setObjectName("radioButton_1")
self.radioButton_2 = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_2.setGeometry(QtCore.QRect(10, 60, 95, 20))
self.radioButton_2.setObjectName("radioButton_2")
self.radioButton_3 = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_3.setGeometry(QtCore.QRect(10, 80, 95, 20))
self.radioButton_3.setObjectName("radioButton_3")
self.radioButton_4 = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_4.setGeometry(QtCore.QRect(10, 100, 95, 20))
self.radioButton_4.setObjectName("radioButton_4")
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setGeometry(QtCore.QRect(10, 20, 61, 16))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.groupBox)
self.label_3.setGeometry(QtCore.QRect(10, 130, 55, 16))
self.label_3.setObjectName("label_3")
self.lineEdit = QtWidgets.QLineEdit(self.groupBox)
self.lineEdit.setGeometry(QtCore.QRect(10, 150, 113, 22))
self.lineEdit.setText("")
self.lineEdit.setObjectName("lineEdit")
self.pushButton = QtWidgets.QPushButton(self.groupBox)
self.pushButton.setGeometry(QtCore.QRect(20, 220, 93, 28))
self.pushButton.setObjectName("pushButton")
self.pushButton.clicked.connect(self.show_all)
self.groupBox.raise_()
self.list_1.raise_()
self.label.raise_()
self.button_add.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "BookMan"))
self.button_add.setText(_translate("MainWindow", " Add Book"))
self.groupBox.setTitle(_translate("MainWindow", "Search"))
self.radioButton_1.setText(_translate("MainWindow", "Name"))
self.radioButton_2.setText(_translate("MainWindow", "Link"))
self.radioButton_3.setText(_translate("MainWindow", "Tags"))
self.radioButton_4.setText(_translate("MainWindow", "Notes"))
self.label_2.setText(_translate("MainWindow", "Search by"))
self.label_3.setText(_translate("MainWindow", "KeyWord"))
self.pushButton.setText(_translate("MainWindow", "Clear Search"))
def show_all(self):
self.list_1.clear()
global books
# print(books)
for item in books:
newitem = QtWidgets.QListWidgetItem(
f"Name: {item[0]}\nPath: {item[1]}\nTags: {', '.join(item[2])}\nNotes: {item[3]}\n")
font = QtGui.QFont('Times', 12)
font.setBold(True)
font.setWeight(50)
newitem.setFont(font)
self.list_1.addItem(newitem)
# print("Clicked")
self.lineEdit.clear()
def search_books(self):
global books
results = []
searchtext = self.lineEdit.text()
field = [self.radioButton_1.isChecked(), self.radioButton_2.isChecked(
), self.radioButton_3.isChecked(), self.radioButton_4.isChecked()]
ind = field.index(True)
for i in books:
if searchtext in i[ind]:
results.append(i)
self.list_1.clear()
for item in results:
newitem = QtWidgets.QListWidgetItem(
f"Name: {item[0]}\nPath: {item[1]}\nTags: {', '.join(item[2])}\nNotes: {item[3]}\n")
font = QtGui.QFont('Times', 12)
font.setBold(True)
font.setWeight(50)
newitem.setFont(font)
self.list_1.addItem(newitem)
def add_books(self):
global books
res = self.open_dialog_box()
name = res[0].split('/')[-1]
Dialog2 = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog2, path=res[0], name=name, add=1)
Dialog2.show()
s = Dialog2.exec_()
if s == 1:
if ui.data['path'] not in [i[1] for i in books]:
books.append([ui.data['name'], ui.data['path'],
ui.data['tags'], ui.data['notes']])
item = books[-1]
newitem = QtWidgets.QListWidgetItem(
f"Name: {item[0]}\nPath: {item[1]}\nTags: {', '.join(item[2])}\nNotes: {item[3]}\n")
font = QtGui.QFont('Times', 12)
font.setBold(True)
font.setWeight(50)
newitem.setFont(font)
self.list_1.addItem(newitem)
add_book(conn, Book(ui.data['name'], ui.data['path'], ', '.join(
item[2]), ui.data['notes']))
def open_dialog_box(self):
filename = QtWidgets.QFileDialog.getOpenFileName()
return filename
# print(filename)
def open_properties(self, item):
global books
lines = item.text().split('\n')
final = []
for index, i in enumerate(lines):
lines[index] = i.strip()
val = ' '.join(lines[index].split(' ')[1:])
final.append(val)
Dialog2 = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(
Dialog2, path=final[1], name=final[0], tags=final[2], notes=final[3])
Dialog2.show()
s = Dialog2.exec_()
if s == 1:
if ui.data['delete'] is True:
for index, i in enumerate(books):
if i[1] == final[1]:
ind = index
break
delete_book(conn, books[ind][1])
del books[ind]
self.show_all()
else:
# Ok is clicked
for index, i in enumerate(books):
if i[1] == final[1]:
ind = index
break
books[index][0] = ui.data['name']
books[index][1] = ui.data['path']
books[index][2] = ui.data['tags']
books[index][3] = ui.data['notes']
edit_book(conn, 'name', books[index][0], books[index][1])
edit_book(conn, 'tags', ', '.join(
books[index][2]), books[index][1])
edit_book(conn, 'notes', books[index][3], books[index][1])
self.show_all()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
leetcode/322.coin-change.py | geemaple/algorithm | 177 | 12703732 | <reponame>geemaple/algorithm
#
# @lc app=leetcode id=322 lang=python3
#
# [322] Coin Change
#
# https://leetcode.com/problems/coin-change/description/
#
# algorithms
# Medium (27.95%)
# Total Accepted: 261.6K
# Total Submissions: 810K
# Testcase Example: '[1,2,5]\n11'
#
# You are given coins of different denominations and a total amount of money
# amount. Write a function to compute the fewest number of coins that you need
# to make up that amount. If that amount of money cannot be made up by any
# combination of the coins, return -1.
#
# Example 1:
#
#
# Input: coins = [1, 2, 5], amount = 11
# Output: 3
# Explanation: 11 = 5 + 5 + 1
#
# Example 2:
#
#
# Input: coins = [2], amount = 3
# Output: -1
#
#
# Note:
# You may assume that you have an infinite number of each kind of coin.
#
#
class Solution(object):
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
table = [float('inf') for _ in range(amount + 1)]
table[0] = 0
for i in range(1, amount + 1):
for coin in coins:
if i - coin >= 0 and table[i - coin] + 1 < table[i]:
table[i] = table[i - coin] + 1
return table[amount] if table[amount] < float('inf') else -1
class Solution2(object):
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
table = [float('inf') for _ in range(amount + 1)]
table[0] = 0
t = [0 for _ in range(amount + 1)]
for i in range(1, amount + 1):
for coin in coins:
if i - coin >= 0 and table[i - coin] + 1 < table[i]:
table[i] = table[i - coin] + 1
t[i] = coin
k = amount
print('combinations', end=' ')
while (t[k] > 0):
print(t[k], end=' ')
k = k - t[k]
return table[amount] if table[amount] < float('inf') else -1
if __name__ == "__main__":
s = Solution2()
print(s.coinChange([2, 5, 7], 27))
|
tests/functional/services/catalog/utils/api/__init__.py | rbrady/anchore-engine | 1,484 | 12703755 | from tests.functional.services.catalog.utils.api import objects
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.