code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
"""
Util functions for ts.py
"""
import functools
import numpy
from qcelemental import constants as qcc
import automol
def reorder_zmatrix_for_redef(zma, a_idx, h_idx,
frm_bnd_keys, brk_bnd_keys):
""" performs z-matrix reordering operations required to
build proper z-matrices for hydrogen migrations
"""
# initialize zmat components needed later
symbols = automol.zmatrix.symbols(zma)
# Get the longest chain for all the atoms
_, gras = shifted_standard_zmas_graphs([zma], remove_stereo=True)
gra = functools.reduce(automol.graph.union, gras)
xgr1, = automol.graph.connected_components(gra)
chains_dct = automol.graph.atom_longest_chains(xgr1)
# find the longest heavy-atom chain for the forming atom
form_chain = chains_dct[a_idx]
# get the indices used for reordering
# get the longest chain from the bond forming atom (not including H)
order_idxs = [idx for idx in form_chain
if symbols[idx] != 'H' and idx != h_idx]
# add all the heavy-atoms not in the chain
for i, atom in enumerate(symbols):
if i not in order_idxs and atom != 'H':
order_idxs.append(i)
# add all the hydrogens
for i, atom in enumerate(symbols):
if i != h_idx and atom == 'H':
order_idxs.append(i)
# add the migrating atoms
order_idxs.append(h_idx)
# get the geometry and redorder it according to the order_idxs list
geo = [list(x) for x in automol.zmatrix.geometry(zma)]
geo2 = tuple(tuple(geo[idx]) for idx in order_idxs)
# Convert the reordered geometry into a zma
print('init_geo2\n', automol.geom.string(geo2))
zma2 = automol.geom.zmatrix(geo2)
# Convert the frm and brk keys
order_dct = automol.geom.zmatrix_atom_ordering(geo2)
frm_bnd_keys = frozenset(order_dct[x] for x in frm_bnd_keys)
brk_bnd_keys = frozenset(frozenset(order_dct[x] for x in keys)
for keys in brk_bnd_keys)
# brk_bnd_keys = frozenset(order_dct[x] for x in brk_bnd_keys)
return zma2, frm_bnd_keys, brk_bnd_keys
def include_babs3(frm_bnd, rct2_gra):
"""Should we include babs3?
"""
include = False
atm_ngbs = automol.graph.atom_neighbor_keys(rct2_gra)
is_terminal = False
for atm in list(frm_bnd):
if atm in atm_ngbs:
if len(atm_ngbs[atm]) == 1:
is_terminal = True
if len(atm_ngbs.keys()) > 2 and is_terminal:
include = True
return include
def shifted_standard_zmas_graphs(zmas, remove_stereo=False):
""" Generate zmas and graphs from input zmas
shfited and in their standard form
"""
zmas = list(zmas)
conv = functools.partial(
automol.convert.zmatrix.graph, remove_stereo=remove_stereo)
gras = list(map(conv, zmas))
shift = 0
for idx, (zma, gra) in enumerate(zip(zmas, gras)):
zmas[idx] = automol.zmatrix.standard_form(zma, shift=shift)
gras[idx] = automol.graph.transform_keys(gra, lambda x: x+shift)
shift += len(automol.graph.atoms(gra))
zmas = tuple(zmas)
gras = tuple(map(automol.graph.without_dummy_atoms, gras))
return zmas, gras
def join_atom_keys(zma, atm1_key):
""" returns available join atom keys (if available) and a boolean
indicating whether the atoms are in a chain or not
"""
gra = automol.convert.zmatrix.graph(zma)
atm1_chain = (
automol.graph.atom_longest_chains(gra)[atm1_key])
atm1_ngb_keys = (
automol.graph.atom_neighbor_keys(gra)[atm1_key])
if len(atm1_chain) == 1:
atm2_key = None
atm3_key = None
chain = False
elif len(atm1_chain) == 2 and len(atm1_ngb_keys) == 1:
atm2_key = atm1_chain[1]
atm3_key = None
chain = False
elif len(atm1_chain) == 2:
atm2_key = atm1_chain[1]
atm3_key = sorted(atm1_ngb_keys - {atm2_key})[0]
chain = False
else:
atm2_key, atm3_key = atm1_chain[1:3]
chain = True
return atm2_key, atm3_key, chain
def reorder_zma_for_radicals(zma, rad_idx):
""" Creates a zmatrix where the radical atom is the first entry
in the zmatrix
"""
geo = automol.zmatrix.geometry(zma)
geo_swp = automol.geom.swap_coordinates(geo, 0, rad_idx)
zma_swp = automol.geom.zmatrix(geo_swp)
return zma_swp
def shift_vals_from_dummy(vals, zma):
""" Shift a set of values using remdummy
Shift requires indices be 1-indexed
"""
type_ = type(vals)
dummy_idxs = automol.zmatrix.atom_indices(zma, sym='X')
shift_vals = []
for val in vals:
shift = 0
for dummy in dummy_idxs:
if val >= dummy:
shift += 1
shift_vals.append(val+shift)
shift_vals = type_(shift_vals)
return shift_vals
def sort_zma_idxs(rct_zmas, prd_zmas, rxn_idxs):
""" zma idxs
"""
rct_idxs, prd_idxs = rxn_idxs
rct_zmas = list(map(rct_zmas.__getitem__, rct_idxs))
prd_zmas = list(map(prd_zmas.__getitem__, prd_idxs))
return rct_zmas, prd_zmas
def rct1_x_join(rct1_zma, atm1_key, atm2_key, atm3_key):
""" Build the R1+X matrix for bimol reactions
"""
x_zma = ((('X', (None, None, None), (None, None, None)),), {})
x_join_val_dct = {
'rx': 1. * qcc.conversion_factor('angstrom', 'bohr'),
'ax': 90. * qcc.conversion_factor('degree', 'radian'),
'dx': 180. * qcc.conversion_factor('degree', 'radian'),
}
x_join_keys = numpy.array(
[[atm1_key, atm2_key, atm3_key]])
x_join_names = numpy.array([['rx', 'ax', 'dx']],
dtype=numpy.object_)
x_join_names[numpy.equal(x_join_keys, None)] = None
x_join_name_set = set(numpy.ravel(x_join_names)) - {None}
x_join_val_dct = {name: x_join_val_dct[name]
for name in x_join_name_set}
rct1_x_zma = automol.zmatrix.join(
rct1_zma, x_zma, x_join_keys, x_join_names, x_join_val_dct)
return rct1_x_zma
# def _rct2_x_join():
# """
# """
def rct1x_rct2_join(rct1_x_zma, rct2_zma,
dist_name, dist_val,
jkey1, jkey2, jkey3,
join_vals=(85., 85., 170., 85., 170.)):
""" Second join function
"""
rct2_natms = automol.zmatrix.count(rct2_zma)
join_val_dct = {
dist_name: dist_val,
'aabs1': 85. * qcc.conversion_factor('degree', 'radian'),
'aabs2': 85. * qcc.conversion_factor('degree', 'radian'),
'babs1': 170. * qcc.conversion_factor('degree', 'radian'),
'babs2': 85. * qcc.conversion_factor('degree', 'radian'),
'babs3': 170. * qcc.conversion_factor('degree', 'radian'),
}
join_keys = numpy.array(
[[jkey1, jkey2, jkey3],
[None, jkey1, jkey2],
[None, None, jkey1]])[:rct2_natms]
join_names = numpy.array(
[[dist_name, 'aabs1', 'babs1'],
[None, 'aabs2', 'babs2'],
[None, None, 'babs3']])[:rct2_natms]
join_names[numpy.equal(join_keys, None)] = None
join_name_set = set(numpy.ravel(join_names)) - {None}
join_val_dct = {name: join_val_dct[name] for name in join_name_set}
rct1_x_rct2_zma = automol.zmatrix.join(
rct1_x_zma, rct2_zma, join_keys, join_names, join_val_dct)
return rct1_x_rct2_zma
| [
"numpy.equal",
"numpy.array",
"automol.graph.atom_neighbor_keys",
"automol.zmatrix.symbols",
"automol.zmatrix.atom_indices",
"automol.geom.zmatrix_atom_ordering",
"automol.zmatrix.geometry",
"automol.zmatrix.join",
"functools.reduce",
"automol.geom.swap_coordinates",
"automol.zmatrix.standard_form",
"automol.zmatrix.count",
"automol.geom.string",
"automol.graph.transform_keys",
"automol.graph.atoms",
"automol.graph.connected_components",
"automol.geom.zmatrix",
"functools.partial",
"automol.convert.zmatrix.graph",
"numpy.ravel",
"qcelemental.constants.conversion_factor",
"automol.graph.atom_longest_chains"
] | [((417, 445), 'automol.zmatrix.symbols', 'automol.zmatrix.symbols', (['zma'], {}), '(zma)\n', (440, 445), False, 'import automol\n'), ((573, 616), 'functools.reduce', 'functools.reduce', (['automol.graph.union', 'gras'], {}), '(automol.graph.union, gras)\n', (589, 616), False, 'import functools\n'), ((629, 668), 'automol.graph.connected_components', 'automol.graph.connected_components', (['gra'], {}), '(gra)\n', (663, 668), False, 'import automol\n'), ((686, 725), 'automol.graph.atom_longest_chains', 'automol.graph.atom_longest_chains', (['xgr1'], {}), '(xgr1)\n', (719, 725), False, 'import automol\n'), ((1710, 1736), 'automol.geom.zmatrix', 'automol.geom.zmatrix', (['geo2'], {}), '(geo2)\n', (1730, 1736), False, 'import automol\n'), ((1789, 1829), 'automol.geom.zmatrix_atom_ordering', 'automol.geom.zmatrix_atom_ordering', (['geo2'], {}), '(geo2)\n', (1823, 1829), False, 'import automol\n'), ((2244, 2286), 'automol.graph.atom_neighbor_keys', 'automol.graph.atom_neighbor_keys', (['rct2_gra'], {}), '(rct2_gra)\n', (2276, 2286), False, 'import automol\n'), ((2731, 2808), 'functools.partial', 'functools.partial', (['automol.convert.zmatrix.graph'], {'remove_stereo': 'remove_stereo'}), '(automol.convert.zmatrix.graph, remove_stereo=remove_stereo)\n', (2748, 2808), False, 'import functools\n'), ((3396, 3430), 'automol.convert.zmatrix.graph', 'automol.convert.zmatrix.graph', (['zma'], {}), '(zma)\n', (3425, 3430), False, 'import automol\n'), ((4236, 4265), 'automol.zmatrix.geometry', 'automol.zmatrix.geometry', (['zma'], {}), '(zma)\n', (4260, 4265), False, 'import automol\n'), ((4280, 4326), 'automol.geom.swap_coordinates', 'automol.geom.swap_coordinates', (['geo', '(0)', 'rad_idx'], {}), '(geo, 0, rad_idx)\n', (4309, 4326), False, 'import automol\n'), ((4341, 4370), 'automol.geom.zmatrix', 'automol.geom.zmatrix', (['geo_swp'], {}), '(geo_swp)\n', (4361, 4370), False, 'import automol\n'), ((4569, 4611), 'automol.zmatrix.atom_indices', 'automol.zmatrix.atom_indices', (['zma'], {'sym': '"""X"""'}), "(zma, sym='X')\n", (4597, 4611), False, 'import automol\n'), ((5536, 5581), 'numpy.array', 'numpy.array', (['[[atm1_key, atm2_key, atm3_key]]'], {}), '([[atm1_key, atm2_key, atm3_key]])\n', (5547, 5581), False, 'import numpy\n'), ((5610, 5664), 'numpy.array', 'numpy.array', (["[['rx', 'ax', 'dx']]"], {'dtype': 'numpy.object_'}), "([['rx', 'ax', 'dx']], dtype=numpy.object_)\n", (5621, 5664), False, 'import numpy\n'), ((5932, 6017), 'automol.zmatrix.join', 'automol.zmatrix.join', (['rct1_zma', 'x_zma', 'x_join_keys', 'x_join_names', 'x_join_val_dct'], {}), '(rct1_zma, x_zma, x_join_keys, x_join_names, x_join_val_dct\n )\n', (5952, 6017), False, 'import automol\n'), ((6330, 6361), 'automol.zmatrix.count', 'automol.zmatrix.count', (['rct2_zma'], {}), '(rct2_zma)\n', (6351, 6361), False, 'import automol\n'), ((7245, 7324), 'automol.zmatrix.join', 'automol.zmatrix.join', (['rct1_x_zma', 'rct2_zma', 'join_keys', 'join_names', 'join_val_dct'], {}), '(rct1_x_zma, rct2_zma, join_keys, join_names, join_val_dct)\n', (7265, 7324), False, 'import automol\n'), ((1672, 1697), 'automol.geom.string', 'automol.geom.string', (['geo2'], {}), '(geo2)\n', (1691, 1697), False, 'import automol\n'), ((2940, 2987), 'automol.zmatrix.standard_form', 'automol.zmatrix.standard_form', (['zma'], {'shift': 'shift'}), '(zma, shift=shift)\n', (2969, 2987), False, 'import automol\n'), ((3008, 3062), 'automol.graph.transform_keys', 'automol.graph.transform_keys', (['gra', '(lambda x: x + shift)'], {}), '(gra, lambda x: x + shift)\n', (3036, 3062), False, 'import automol\n'), ((3458, 3496), 'automol.graph.atom_longest_chains', 'automol.graph.atom_longest_chains', (['gra'], {}), '(gra)\n', (3491, 3496), False, 'import automol\n'), ((3538, 3575), 'automol.graph.atom_neighbor_keys', 'automol.graph.atom_neighbor_keys', (['gra'], {}), '(gra)\n', (3570, 3575), False, 'import automol\n'), ((5713, 5743), 'numpy.equal', 'numpy.equal', (['x_join_keys', 'None'], {}), '(x_join_keys, None)\n', (5724, 5743), False, 'import numpy\n'), ((6768, 6847), 'numpy.array', 'numpy.array', (['[[jkey1, jkey2, jkey3], [None, jkey1, jkey2], [None, None, jkey1]]'], {}), '([[jkey1, jkey2, jkey3], [None, jkey1, jkey2], [None, None, jkey1]])\n', (6779, 6847), False, 'import numpy\n'), ((6905, 7002), 'numpy.array', 'numpy.array', (["[[dist_name, 'aabs1', 'babs1'], [None, 'aabs2', 'babs2'], [None, None, 'babs3']\n ]"], {}), "([[dist_name, 'aabs1', 'babs1'], [None, 'aabs2', 'babs2'], [None,\n None, 'babs3']])\n", (6916, 7002), False, 'import numpy\n'), ((7054, 7082), 'numpy.equal', 'numpy.equal', (['join_keys', 'None'], {}), '(join_keys, None)\n', (7065, 7082), False, 'import numpy\n'), ((1511, 1540), 'automol.zmatrix.geometry', 'automol.zmatrix.geometry', (['zma'], {}), '(zma)\n', (1535, 1540), False, 'import automol\n'), ((3082, 3106), 'automol.graph.atoms', 'automol.graph.atoms', (['gra'], {}), '(gra)\n', (3101, 3106), False, 'import automol\n'), ((5341, 5382), 'qcelemental.constants.conversion_factor', 'qcc.conversion_factor', (['"""angstrom"""', '"""bohr"""'], {}), "('angstrom', 'bohr')\n", (5362, 5382), True, 'from qcelemental import constants as qcc\n'), ((5404, 5445), 'qcelemental.constants.conversion_factor', 'qcc.conversion_factor', (['"""degree"""', '"""radian"""'], {}), "('degree', 'radian')\n", (5425, 5445), True, 'from qcelemental import constants as qcc\n'), ((5468, 5509), 'qcelemental.constants.conversion_factor', 'qcc.conversion_factor', (['"""degree"""', '"""radian"""'], {}), "('degree', 'radian')\n", (5489, 5509), True, 'from qcelemental import constants as qcc\n'), ((5778, 5803), 'numpy.ravel', 'numpy.ravel', (['x_join_names'], {}), '(x_join_names)\n', (5789, 5803), False, 'import numpy\n'), ((6436, 6477), 'qcelemental.constants.conversion_factor', 'qcc.conversion_factor', (['"""degree"""', '"""radian"""'], {}), "('degree', 'radian')\n", (6457, 6477), True, 'from qcelemental import constants as qcc\n'), ((6502, 6543), 'qcelemental.constants.conversion_factor', 'qcc.conversion_factor', (['"""degree"""', '"""radian"""'], {}), "('degree', 'radian')\n", (6523, 6543), True, 'from qcelemental import constants as qcc\n'), ((6569, 6610), 'qcelemental.constants.conversion_factor', 'qcc.conversion_factor', (['"""degree"""', '"""radian"""'], {}), "('degree', 'radian')\n", (6590, 6610), True, 'from qcelemental import constants as qcc\n'), ((6635, 6676), 'qcelemental.constants.conversion_factor', 'qcc.conversion_factor', (['"""degree"""', '"""radian"""'], {}), "('degree', 'radian')\n", (6656, 6676), True, 'from qcelemental import constants as qcc\n'), ((6702, 6743), 'qcelemental.constants.conversion_factor', 'qcc.conversion_factor', (['"""degree"""', '"""radian"""'], {}), "('degree', 'radian')\n", (6723, 6743), True, 'from qcelemental import constants as qcc\n'), ((7116, 7139), 'numpy.ravel', 'numpy.ravel', (['join_names'], {}), '(join_names)\n', (7127, 7139), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
_config = None
class CalvinConfig(object):
"""
Handle configuration of Calvin, works similiarly to python's ConfigParser
Looks for calvin.conf or .calvin.conf files in:
1. Built-ins
2. Calvin's install directory
3. $HOME
4. all directories between $CWD and $HOME
5. current working directory ($CWD)
If $CWD is outside of $HOME, only (1) through (3) are searched.
Simple values are overridden by later configs, whereas lists are prepended by later configs.
If the environment variable CALVIN_CONFIG_PATH is set, it will be taken as a path to the ONLY
configuration file, overriding even built-ins.
Finally, wildcard environment variables on the form CALVIN_<SECTION>_<OPTION> may override
options read from defaults or config files. <SECTION> must be one of GLOBAL, TESTING, or DEVELOPER,
e.g. CALVIN_TESTING_UNITTEST_LOOPS=42
Printing the config object provides a great deal of information about the configuration.
"""
def __init__(self):
super(CalvinConfig, self).__init__()
self.config = {}
self.wildcards = []
self.override_path = os.environ.get('CALVIN_CONFIG_PATH', None)
# Setting CALVIN_CONFIG_PATH takes preceedence over all other configs
if self.override_path is not None:
config = self.config_at_path(self.override_path)
if config is not None:
self.set_config(self.config_at_path(self.override_path))
else:
self.override_path = None
_log.info("CALVIN_CONFIG_PATH does not point to a valid config file.")
# This is the normal config procedure
if self.override_path is None:
# The next line is guaranteed to work, so we have at least a default config
self.set_config(self.default_config())
conf_paths = self.config_paths()
for p in conf_paths:
delta_config = self.config_at_path(p)
self.update_config(delta_config)
# Check if any options were set on the command line
self.set_wildcards()
_log.debug("\n{0}\n{1}\n{0}".format("-" * 80, self))
def default_config(self):
default = {
'global': {
'comment': 'User definable section',
'actor_paths': ['systemactors'],
'framework': 'twistedimpl',
'storage_type': 'dht', # supports dht, securedht, local, and proxy
'storage_proxy': None,
'capabilities_blacklist': [],
'remote_coder_negotiator': 'static',
'static_coder': 'json',
'metering_timeout': 10.0,
'metering_aggregated_timeout': 3600.0, # Larger or equal to metering_timeout
'media_framework': 'defaultimpl',
'display_plugin': 'stdout_impl',
'transports': ['calvinip'],
'control_proxy': None
},
'testing': {
'comment': 'Test settings',
'unittest_loops': 2
},
'developer': {
'comment': 'Experimental settings',
},
'security': {}
}
return default
def add_section(self, section):
"""Add a named section"""
self.config.setdefault(section.lower(), {})
def get_in_order(self, option, default=None):
v = self.get('ARGUMENTS', option)
if v is None:
v = self.get('GLOBAL', option)
if v is None:
v = default
return v
def get(self, section, option):
"""Get value of option in named section, if section is None 'global' section is implied."""
try:
_section = 'global' if section is None else section.lower()
_option = option.lower()
return self.config[_section][_option]
except KeyError:
_log.info("Option {}.{} not set".format(_section, _option ))
except Exception as e:
_log.error("Error reading option {}.{}: {}".format(_section, _option, e))
return None
def set(self, section, option, value):
"""Set value of option in named section"""
_section = self.config[section.lower()]
_section[option.lower()] = value
def append(self, section, option, value):
"""Append value (list) of option in named section"""
_section = self.config[section.lower()]
_option = option.lower()
old_value = _section.setdefault(_option, [])
if type(old_value) is not list:
raise Exception("Can't append, {}:{} is not a list".format(section, option))
if type(value) is not list:
raise Exception("Can't append, value is not a list")
_section[_option][:0] = value
def set_config(self, config):
"""Set complete config"""
for section in config:
_section = section.lower()
self.add_section(_section)
for option, value in config[section].iteritems():
_option = option.lower()
self.set(_section, _option, value)
def _case_sensitive_keys(self, section, option, conf):
"""Return the case sensitive keys for 'secton' and 'option' (or None if not present) in 'conf'."""
for _section in conf:
if _section.lower() != section.lower():
continue
for _option in conf[section]:
if _option.lower() == option.lower():
return _section, _option
return _section, None
return None, None
def _expand_actor_paths(self, conf, conf_dir):
"""Expand $HOME, $USER etc. and resolve './actors' etc. relative to the config file."""
# Get the correct keys to use with the config dict since we allow mixed case, but convert to lower internally
_section, _option = self._case_sensitive_keys('global', 'actor_paths', conf)
if not _option:
return
paths = conf[_section][_option]
# First handle expansion of env vars
expanded = [os.path.expandvars(p) for p in paths]
# Normalize and handle './', i.e. relative to config file
conf[_section][_option] = [os.path.normpath(os.path.join(conf_dir, p) if p.startswith('./') else p) for p in expanded]
def config_at_path(self, path):
"""Returns config or None if no config at path."""
if os.path.exists(path + '/calvin.conf'):
confpath = path + '/calvin.conf'
elif os.path.exists(path + '/.calvin.conf'):
confpath = path + '/.calvin.conf'
elif os.path.exists(path) and os.path.isfile(path):
confpath = path
else:
return None
try:
with open(confpath) as f:
conf = json.loads(f.read())
self._expand_actor_paths(conf, path)
except Exception as e:
_log.info("Could not read config at '{}': {}".format(confpath, e))
conf = None
return conf
def update_config(self, delta_config):
"""
Update config using delta_config.
If value in delta_config is list, prepend to value in config,
otherwise replace value in config.
"""
if not delta_config:
return
for section in delta_config:
for option, value in delta_config[section].iteritems():
if option.lower() == 'comment':
continue
operation = self.append if type(value) is list else self.set
operation(section, option, value)
def install_location(self):
"""Return the 'installation dir'."""
this_dir = os.path.dirname(os.path.realpath(__file__))
install_dir = os.path.abspath(os.path.join(this_dir, '..'))
return install_dir
def config_paths(self):
"""
Return the install dir and list of paths from $HOME to the current working directory (CWD),
unless CWD is not rooted in $HOME in which case only install dir and $HOME is returned.
If install dir is in the path from $HOME to CWD it is not included a second time.
"""
if self.override_path is not None:
return [self.override_path]
inst_loc = self.install_location()
curr_loc = os.getcwd()
home = os.environ.get('HOME', curr_loc)
paths = [home, inst_loc]
if not curr_loc.startswith(home):
return paths
dpaths = []
while len(curr_loc) > len(home):
if curr_loc != inst_loc:
dpaths.append(curr_loc)
curr_loc, part = curr_loc.rsplit('/', 1)
return dpaths + paths
def set_wildcards(self):
"""
Allow environment variables on the form CALVIN_<SECTION>_<OPTION> to override options
read from defaults or config files. <SECTION> must be one of GLOBAL, TESTING, or DEVELOPER.
"""
wildcards = [e for e in os.environ if e.startswith('CALVIN_') and e != 'CALVIN_CONFIG_PATH']
for wildcard in wildcards:
parts = wildcard.split('_', 2)
if len(parts) < 3 or parts[1] not in ['GLOBAL', 'TESTING', 'DEVELOPER', 'ARGUMENTS']:
_log.info("Malformed environment variable {}, skipping.".format(wildcard))
continue
section, option = parts[1:3]
value = os.environ[wildcard]
try:
self.set(section, option, json.loads(value))
self.wildcards.append(wildcard)
except Exception as e:
_log.warning("Value {} of environment variable {} is malformed, skipping.".format(repr(value), wildcard))
def save(self, path, skip_arguments=True):
json.dump({k: v for k, v in self.config.iteritems() if k != "arguments" or not skip_arguments}, open(path, 'w'))
def __str__(self):
d = {}
d['config searchpaths'] = self.config_paths(),
d['config paths'] = [p for p in self.config_paths() if self.config_at_path(p) is not None],
d['config'] = self.config
d['CALVIN_CONFIG_PATH'] = self.override_path
d['wildcards'] = self.wildcards
return self.__class__.__name__ + " : " + json.dumps(d, indent=4, sort_keys=True)
def get():
global _config
if _config is None:
_config = CalvinConfig()
return _config
if __name__ == "__main__":
os.environ['CALVIN_CONFIG_PATH'] = '/Users/eperspe/Source/spikes/ConfigParser'
os.environ['CALVIN_TESTING_UNITTEST_LOOPS'] = '44'
a = get()
print(a)
p = a.get('global', 'actor_paths')
print(p, type(p))
p = a.get(None, 'framework')
print(p, type(p))
p = a.get(None, 'unittest_loops')
print(p, type(p))
p = a.get('Testing', 'unittest_loops')
print(p, type(p))
| [
"os.path.exists",
"json.loads",
"os.path.expandvars",
"json.dumps",
"os.environ.get",
"os.path.join",
"os.getcwd",
"os.path.realpath",
"os.path.isfile",
"calvin.utilities.calvinlogger.get_logger"
] | [((688, 708), 'calvin.utilities.calvinlogger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (698, 708), False, 'from calvin.utilities.calvinlogger import get_logger\n'), ((1855, 1897), 'os.environ.get', 'os.environ.get', (['"""CALVIN_CONFIG_PATH"""', 'None'], {}), "('CALVIN_CONFIG_PATH', None)\n", (1869, 1897), False, 'import os\n'), ((7212, 7249), 'os.path.exists', 'os.path.exists', (["(path + '/calvin.conf')"], {}), "(path + '/calvin.conf')\n", (7226, 7249), False, 'import os\n'), ((9125, 9136), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9134, 9136), False, 'import os\n'), ((9152, 9184), 'os.environ.get', 'os.environ.get', (['"""HOME"""', 'curr_loc'], {}), "('HOME', curr_loc)\n", (9166, 9184), False, 'import os\n'), ((6874, 6895), 'os.path.expandvars', 'os.path.expandvars', (['p'], {}), '(p)\n', (6892, 6895), False, 'import os\n'), ((7309, 7347), 'os.path.exists', 'os.path.exists', (["(path + '/.calvin.conf')"], {}), "(path + '/.calvin.conf')\n", (7323, 7347), False, 'import os\n'), ((8517, 8543), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (8533, 8543), False, 'import os\n'), ((8583, 8611), 'os.path.join', 'os.path.join', (['this_dir', '""".."""'], {}), "(this_dir, '..')\n", (8595, 8611), False, 'import os\n'), ((11052, 11091), 'json.dumps', 'json.dumps', (['d'], {'indent': '(4)', 'sort_keys': '(True)'}), '(d, indent=4, sort_keys=True)\n', (11062, 11091), False, 'import json\n'), ((7030, 7055), 'os.path.join', 'os.path.join', (['conf_dir', 'p'], {}), '(conf_dir, p)\n', (7042, 7055), False, 'import os\n'), ((7408, 7428), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (7422, 7428), False, 'import os\n'), ((7433, 7453), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (7447, 7453), False, 'import os\n'), ((10289, 10306), 'json.loads', 'json.loads', (['value'], {}), '(value)\n', (10299, 10306), False, 'import json\n')] |
import random
from configuration import config
from src.genotype.cdn.genomes.blueprint_genome import BlueprintGenome
from src.genotype.cdn.nodes.blueprint_node import BlueprintNode
from src.genotype.cdn.nodes.module_node import ModuleNode
from src.genotype.neat import mutation_record
from src.genotype.neat.genome import Genome
from src.genotype.neat.node import Node
from src.genotype.neat.operators.mutators.genome_mutator import GenomeMutator
from src.genotype.neat.operators.mutators.mutation_report import MutationReport
class BlueprintGenomeMutator(GenomeMutator):
def mutate(self, genome: BlueprintGenome, mutation_record: mutation_record):
"""
performs base neat genome mutations, as well as node and genome property mutations
as well as all mutations specific to blueprint genomes
"""
mutation_report = self.mutate_base_genome(genome, mutation_record,
add_node_chance=config.blueprint_add_node_chance,
add_connection_chance=config.blueprint_add_connection_chance)
mutation_report += self.mutate_node_types(genome)
mutation_report += self.mutate_species_numbers(genome)
mutation_report += self.forget_module_mappings_mutation(genome)
mutation_report += self.forget_da_scheme(genome)
return genome
def mutate_node_types(self, genome:Genome) -> MutationReport:
"""
chance to change nodes from blueprint nodes to module nodes and visa versa
"""
mutation_report = MutationReport()
if random.random() < config.blueprint_node_type_switch_chance:
"""chose 1 node to change type"""
node: Node = random.choice(list(genome.nodes.values()))
if type(node) == BlueprintNode:
"""change node to a module node"""
module_node = ModuleNode(node.id,node.node_type)
genome.nodes[module_node.id] = module_node
mutation_report += "swapped blueprint node for a module node"
if type(node) == ModuleNode:
"""change node back to a blueprint node"""
blueprint_node = BlueprintNode(node.id,node.node_type)
genome.nodes[blueprint_node.id] = blueprint_node
mutation_report += "swapped module node for a blueprint node"
return mutation_report
def mutate_species_numbers(self, genome) -> MutationReport:
mutation_report = MutationReport()
import src.main.singleton as Singleton
for node in genome.nodes.values():
if type(node) != BlueprintNode:
continue
if random.random() < config.blueprint_node_species_switch_chance:
possible_species_ids = [spc.id for spc in Singleton.instance.module_population.species]
new_species_id = random.choice(possible_species_ids)
mutation_report+="changed species number of node " + str(node.id) + " from " + str(node.species_id) \
+ " to " + str(new_species_id)
node.species_id = new_species_id
return mutation_report
def forget_module_mappings_mutation(self, genome: BlueprintGenome) -> MutationReport:
mutation_report = MutationReport()
if config.use_module_retention and random.random()<config.module_map_forget_mutation_chance:
choices = list(set([node.species_id for node in genome.get_fully_connected_blueprint_nodes_iter() if node.linked_module_id != -1]))
if len(choices) == 0:
return mutation_report
species_id = random.choice(choices)
for node in genome.get_blueprint_nodes_iter():
if node.species_id == species_id:
node.linked_module_id = -1 # forget the link. will be sampled fresh next cycle
mutation_report += "forgot module mapping for species " + str(species_id)
return mutation_report
def forget_da_scheme(self, genome: BlueprintGenome) -> MutationReport:
mutation_report = MutationReport()
if not config.evolve_da_pop:
# blueprint tethered da schemes should not be forgotten by their da
return mutation_report
if random.random() < config.da_link_forget_chance:
genome.da = None
genome._da_id = -1
mutation_report += "forgot da scheme link"
return mutation_report
| [
"random.choice",
"src.genotype.cdn.nodes.blueprint_node.BlueprintNode",
"src.genotype.neat.operators.mutators.mutation_report.MutationReport",
"src.genotype.cdn.nodes.module_node.ModuleNode",
"random.random"
] | [((1604, 1620), 'src.genotype.neat.operators.mutators.mutation_report.MutationReport', 'MutationReport', ([], {}), '()\n', (1618, 1620), False, 'from src.genotype.neat.operators.mutators.mutation_report import MutationReport\n'), ((2542, 2558), 'src.genotype.neat.operators.mutators.mutation_report.MutationReport', 'MutationReport', ([], {}), '()\n', (2556, 2558), False, 'from src.genotype.neat.operators.mutators.mutation_report import MutationReport\n'), ((3350, 3366), 'src.genotype.neat.operators.mutators.mutation_report.MutationReport', 'MutationReport', ([], {}), '()\n', (3364, 3366), False, 'from src.genotype.neat.operators.mutators.mutation_report import MutationReport\n'), ((4165, 4181), 'src.genotype.neat.operators.mutators.mutation_report.MutationReport', 'MutationReport', ([], {}), '()\n', (4179, 4181), False, 'from src.genotype.neat.operators.mutators.mutation_report import MutationReport\n'), ((1633, 1648), 'random.random', 'random.random', ([], {}), '()\n', (1646, 1648), False, 'import random\n'), ((3712, 3734), 'random.choice', 'random.choice', (['choices'], {}), '(choices)\n', (3725, 3734), False, 'import random\n'), ((4347, 4362), 'random.random', 'random.random', ([], {}), '()\n', (4360, 4362), False, 'import random\n'), ((1932, 1967), 'src.genotype.cdn.nodes.module_node.ModuleNode', 'ModuleNode', (['node.id', 'node.node_type'], {}), '(node.id, node.node_type)\n', (1942, 1967), False, 'from src.genotype.cdn.nodes.module_node import ModuleNode\n'), ((2238, 2276), 'src.genotype.cdn.nodes.blueprint_node.BlueprintNode', 'BlueprintNode', (['node.id', 'node.node_type'], {}), '(node.id, node.node_type)\n', (2251, 2276), False, 'from src.genotype.cdn.nodes.blueprint_node import BlueprintNode\n'), ((2734, 2749), 'random.random', 'random.random', ([], {}), '()\n', (2747, 2749), False, 'import random\n'), ((2934, 2969), 'random.choice', 'random.choice', (['possible_species_ids'], {}), '(possible_species_ids)\n', (2947, 2969), False, 'import random\n'), ((3411, 3426), 'random.random', 'random.random', ([], {}), '()\n', (3424, 3426), False, 'import random\n')] |
import setuptools
import re
with open("README.md", "r") as fh:
long_description = fh.read()
version = re.search(
r"""__version__\s*=\s*[\'"]([^\'"]*)[\'"]""",
open("msteams/__init__.py", "r").read(),
).group(1)
setuptools.setup(
name="msteams",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="A builder/formatter for MS Teams cards",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/johanjeppsson/msteams",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=2.7",
)
| [
"setuptools.find_packages"
] | [((547, 573), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (571, 573), False, 'import setuptools\n')] |
from django.contrib import admin
from django.urls import include, path
from django.views.generic import TemplateView
from apps.core.views import Autocomplete, Home, Manual, Manual2
urlpatterns = [
path('', Home.as_view(), name='home'),
path('manual/', Manual.as_view(), name='manual'),
path('manual/2/', Manual2.as_view(), name='manual_2'),
path('about/', TemplateView.as_view(template_name="core/about.html"), name='about'),
path('tos/', TemplateView.as_view(template_name="core/tos.html"), name='tos'),
path('privacy/', TemplateView.as_view(template_name="core/privacy.html"), name='privacy'),
path('copyright/', TemplateView.as_view(template_name="core/copyright.html"), name='copyright'),
path('autocomplete/<path:scope>/', Autocomplete.as_view(), name='autocomplete'),
path('django-admin/', admin.site.urls),
path('', include('apps.auth.urls')),
path('', include('apps.univ.urls')),
path('', include('apps.wiki.urls')),
]
| [
"django.urls.include",
"apps.core.views.Manual.as_view",
"django.views.generic.TemplateView.as_view",
"apps.core.views.Home.as_view",
"apps.core.views.Autocomplete.as_view",
"django.urls.path",
"apps.core.views.Manual2.as_view"
] | [((812, 850), 'django.urls.path', 'path', (['"""django-admin/"""', 'admin.site.urls'], {}), "('django-admin/', admin.site.urls)\n", (816, 850), False, 'from django.urls import include, path\n'), ((211, 225), 'apps.core.views.Home.as_view', 'Home.as_view', ([], {}), '()\n', (223, 225), False, 'from apps.core.views import Autocomplete, Home, Manual, Manual2\n'), ((261, 277), 'apps.core.views.Manual.as_view', 'Manual.as_view', ([], {}), '()\n', (275, 277), False, 'from apps.core.views import Autocomplete, Home, Manual, Manual2\n'), ((317, 334), 'apps.core.views.Manual2.as_view', 'Manual2.as_view', ([], {}), '()\n', (332, 334), False, 'from apps.core.views import Autocomplete, Home, Manual, Manual2\n'), ((373, 426), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""core/about.html"""'}), "(template_name='core/about.html')\n", (393, 426), False, 'from django.views.generic import TemplateView\n'), ((460, 511), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""core/tos.html"""'}), "(template_name='core/tos.html')\n", (480, 511), False, 'from django.views.generic import TemplateView\n'), ((547, 602), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""core/privacy.html"""'}), "(template_name='core/privacy.html')\n", (567, 602), False, 'from django.views.generic import TemplateView\n'), ((644, 701), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""core/copyright.html"""'}), "(template_name='core/copyright.html')\n", (664, 701), False, 'from django.views.generic import TemplateView\n'), ((761, 783), 'apps.core.views.Autocomplete.as_view', 'Autocomplete.as_view', ([], {}), '()\n', (781, 783), False, 'from apps.core.views import Autocomplete, Home, Manual, Manual2\n'), ((866, 891), 'django.urls.include', 'include', (['"""apps.auth.urls"""'], {}), "('apps.auth.urls')\n", (873, 891), False, 'from django.urls import include, path\n'), ((907, 932), 'django.urls.include', 'include', (['"""apps.univ.urls"""'], {}), "('apps.univ.urls')\n", (914, 932), False, 'from django.urls import include, path\n'), ((948, 973), 'django.urls.include', 'include', (['"""apps.wiki.urls"""'], {}), "('apps.wiki.urls')\n", (955, 973), False, 'from django.urls import include, path\n')] |
from itertools import chain
from shardingpy.optimizer.insert_optimizer import InsertShardingCondition
from shardingpy.routing.types.base import RoutingResult, TableUnit, RoutingTable
from shardingpy.rule.base import DataNode
from shardingpy.util.types import OrderedSet
class StandardRoutingEngine:
def __init__(self, sharding_rule, logic_table_name, sharding_conditions):
self.sharding_rule = sharding_rule
self.logic_table_name = logic_table_name
self.sharding_conditions = sharding_conditions
def route(self):
table_rule = self.sharding_rule.get_table_rule(self.logic_table_name)
database_sharding_columns = self.sharding_rule.get_database_sharding_strategy(table_rule).get_sharding_columns()
table_sharding_columns = self.sharding_rule.get_table_sharding_strategy(table_rule).get_sharding_columns()
routed_data_nodes = list()
if not self.sharding_conditions.sharding_conditions:
routed_data_nodes.extend(self._route(table_rule, list(), list()))
else:
for each in self.sharding_conditions.sharding_conditions:
database_sharding_values = self._get_sharding_values(database_sharding_columns, each)
table_sharding_values = self._get_sharding_values(table_sharding_columns, each)
data_nodes = self._route(table_rule, database_sharding_values, table_sharding_values)
routed_data_nodes.extend(data_nodes)
if isinstance(each, InsertShardingCondition):
each.data_nodes.extend(data_nodes)
return self._generate_routing_result(list(OrderedSet(routed_data_nodes)))
def _route(self, table_rule, database_sharding_values, table_sharding_values):
routed_data_sources = self._route_data_sources(table_rule, database_sharding_values)
return list(chain(*[self._route_tables(table_rule, e, table_sharding_values) for e in routed_data_sources]))
def _route_data_sources(self, table_rule, database_sharding_values):
available_target_databases = table_rule.get_actual_data_source_names()
if not available_target_databases:
return available_target_databases
result = list(OrderedSet(
self.sharding_rule.get_database_sharding_strategy(table_rule).do_sharding(available_target_databases,
database_sharding_values)))
assert result, 'no database route info'
return result
def _route_tables(self, table_rule, routed_data_source, table_sharding_values):
available_target_tables = table_rule.get_actual_table_names(routed_data_source)
if not table_sharding_values:
routed_tables = available_target_tables
else:
routed_tables = list(OrderedSet(
self.sharding_rule.get_table_sharding_strategy.do_sharding(available_target_tables,
table_sharding_values)))
return [DataNode(routed_data_source, e) for e in routed_tables]
def _get_sharding_values(self, sharding_columns, sharding_condition):
return [e for e in sharding_condition.sharding_values if
self.logic_table_name == e.logic_table_name and e.column_name in sharding_columns]
def _generate_routing_result(self, routed_data_nodes):
result = RoutingResult()
for each in routed_data_nodes:
table_unit = TableUnit(each.data_source_name)
table_unit.routing_tables.append(RoutingTable(self.logic_table_name, each.table_name))
result.table_units.table_units.append(table_unit)
return result
| [
"shardingpy.rule.base.DataNode",
"shardingpy.routing.types.base.RoutingTable",
"shardingpy.routing.types.base.RoutingResult",
"shardingpy.routing.types.base.TableUnit",
"shardingpy.util.types.OrderedSet"
] | [((3450, 3465), 'shardingpy.routing.types.base.RoutingResult', 'RoutingResult', ([], {}), '()\n', (3463, 3465), False, 'from shardingpy.routing.types.base import RoutingResult, TableUnit, RoutingTable\n'), ((3078, 3109), 'shardingpy.rule.base.DataNode', 'DataNode', (['routed_data_source', 'e'], {}), '(routed_data_source, e)\n', (3086, 3109), False, 'from shardingpy.rule.base import DataNode\n'), ((3530, 3562), 'shardingpy.routing.types.base.TableUnit', 'TableUnit', (['each.data_source_name'], {}), '(each.data_source_name)\n', (3539, 3562), False, 'from shardingpy.routing.types.base import RoutingResult, TableUnit, RoutingTable\n'), ((1640, 1669), 'shardingpy.util.types.OrderedSet', 'OrderedSet', (['routed_data_nodes'], {}), '(routed_data_nodes)\n', (1650, 1669), False, 'from shardingpy.util.types import OrderedSet\n'), ((3608, 3660), 'shardingpy.routing.types.base.RoutingTable', 'RoutingTable', (['self.logic_table_name', 'each.table_name'], {}), '(self.logic_table_name, each.table_name)\n', (3620, 3660), False, 'from shardingpy.routing.types.base import RoutingResult, TableUnit, RoutingTable\n')] |
import clr, sys, os, ctypes
from os.path import dirname, abspath, join, exists, expanduser
# when we implement hosting in Trinity.FFI.Native, we can remove the dependency on pythonnet
__dep_packages = [
'FSharp.Core/4.2.3/lib/netstandard1.6/FSharp.Core.dll',
'GraphEngine.Core/2.0.9328/lib/netstandard2.0/Trinity.Core.dll',
'GraphEngine.Storage.Composite/2.0.9328/lib/netstandard2.0/Trinity.Storage.Composite.dll',
'GraphEngine.Jit/2.0.9328/lib/netstandard2.0/GraphEngine.Jit.dll',
'GraphEngine.FFI/2.0.9328/lib/netstandard2.0/Trinity.FFI.dll',
'Newtonsoft.Json/9.0.1/lib/netstandard1.0/Newtonsoft.Json.dll',
'Microsoft.Extensions.ObjectPool/2.0.0/lib/netstandard2.0/Microsoft.Extensions.ObjectPool.dll',
]
__module_dir = dirname(abspath(__file__))
__dep_proj = join(__module_dir, "Dependencies.csproj")
__nuget_root = expanduser('~/.nuget/packages')
__package_dirs = [join(__nuget_root, f) for f in __dep_packages]
if not all([exists(f) for f in __package_dirs]):
os.system('dotnet restore "{}"'.format(__dep_proj))
#todo detect os and determine .net rid
ge_native_lib = join(__nuget_root, "GraphEngine.Core/2.0.9328/runtimes/win-x64/native/Trinity.dll")
ffi_native_lib = join(__nuget_root, "GraphEngine.FFI/2.0.9328/runtimes/win-x64/native/trinity_ffi.dll")
sys.path.append(__module_dir)
ctypes.cdll.LoadLibrary(ge_native_lib)
ctypes.cdll.LoadLibrary(ffi_native_lib)
for f in __package_dirs:
clr.AddReference(f)
__Trinity = __import__('Trinity')
# set default storage root to cwd/storage
__Trinity.TrinityConfig.StorageRoot = join(os.getcwd(), "storage")
# set default logging level
__Trinity.TrinityConfig.LoggingLevel = __Trinity.Diagnostics.LogLevel.Info
# load default configuration file
__Trinity.TrinityConfig.LoadConfig(join(os.getcwd(), "trinity.xml"))
# then initialize Trinity
__Trinity.Global.Initialize()
__ffi = __import__('ffi')
__ffi.Init()
| [
"os.path.exists",
"ctypes.cdll.LoadLibrary",
"os.path.join",
"os.getcwd",
"clr.AddReference",
"os.path.abspath",
"sys.path.append",
"os.path.expanduser"
] | [((798, 839), 'os.path.join', 'join', (['__module_dir', '"""Dependencies.csproj"""'], {}), "(__module_dir, 'Dependencies.csproj')\n", (802, 839), False, 'from os.path import dirname, abspath, join, exists, expanduser\n'), ((857, 888), 'os.path.expanduser', 'expanduser', (['"""~/.nuget/packages"""'], {}), "('~/.nuget/packages')\n", (867, 888), False, 'from os.path import dirname, abspath, join, exists, expanduser\n'), ((1117, 1204), 'os.path.join', 'join', (['__nuget_root', '"""GraphEngine.Core/2.0.9328/runtimes/win-x64/native/Trinity.dll"""'], {}), "(__nuget_root,\n 'GraphEngine.Core/2.0.9328/runtimes/win-x64/native/Trinity.dll')\n", (1121, 1204), False, 'from os.path import dirname, abspath, join, exists, expanduser\n'), ((1218, 1308), 'os.path.join', 'join', (['__nuget_root', '"""GraphEngine.FFI/2.0.9328/runtimes/win-x64/native/trinity_ffi.dll"""'], {}), "(__nuget_root,\n 'GraphEngine.FFI/2.0.9328/runtimes/win-x64/native/trinity_ffi.dll')\n", (1222, 1308), False, 'from os.path import dirname, abspath, join, exists, expanduser\n'), ((1306, 1335), 'sys.path.append', 'sys.path.append', (['__module_dir'], {}), '(__module_dir)\n', (1321, 1335), False, 'import clr, sys, os, ctypes\n'), ((1336, 1374), 'ctypes.cdll.LoadLibrary', 'ctypes.cdll.LoadLibrary', (['ge_native_lib'], {}), '(ge_native_lib)\n', (1359, 1374), False, 'import clr, sys, os, ctypes\n'), ((1375, 1414), 'ctypes.cdll.LoadLibrary', 'ctypes.cdll.LoadLibrary', (['ffi_native_lib'], {}), '(ffi_native_lib)\n', (1398, 1414), False, 'import clr, sys, os, ctypes\n'), ((762, 779), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (769, 779), False, 'from os.path import dirname, abspath, join, exists, expanduser\n'), ((907, 928), 'os.path.join', 'join', (['__nuget_root', 'f'], {}), '(__nuget_root, f)\n', (911, 928), False, 'from os.path import dirname, abspath, join, exists, expanduser\n'), ((1445, 1464), 'clr.AddReference', 'clr.AddReference', (['f'], {}), '(f)\n', (1461, 1464), False, 'import clr, sys, os, ctypes\n'), ((1585, 1596), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1594, 1596), False, 'import clr, sys, os, ctypes\n'), ((1786, 1797), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1795, 1797), False, 'import clr, sys, os, ctypes\n'), ((967, 976), 'os.path.exists', 'exists', (['f'], {}), '(f)\n', (973, 976), False, 'from os.path import dirname, abspath, join, exists, expanduser\n')] |
import discord
import psutil
import os
from utils import default
from discord.ext import commands
class Información(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.config = default.config()
self.name = self.config['name']
self.version = self.config['version']
self.guild = self.config['guild']
self.process = psutil.Process(os.getpid())
@commands.command(aliases=['stats', 'botinfo'])
async def about(self, ctx):
ramUsage = self.process.memory_full_info().rss / 1024**2
before_ws = int(round(self.bot.latency * 1000, 1))
embed = discord.Embed(color=self.config['blurple'])
embed.set_thumbnail(url=ctx.bot.user.avatar_url)
embed.add_field(
name='Desarrollador',
value=', '.join([str(self.bot.get_user(x)) for x in self.config['owners']]),
inline=True
)
embed.add_field(name='Usuarios', value=f'{len(ctx.bot.users)}', inline=True)
embed.add_field(name='Emojis', value=f'{len(ctx.bot.emojis)}', inline=True)
embed.add_field(name='Librería', value=f'discord.py', inline=True)
embed.add_field(name='Comandos', value=len([x.name for x in self.bot.commands]), inline=True)
embed.add_field(name='Memoria', value=f'{ramUsage:.2f} MB / 16GB', inline=True)
embed.add_field(name='Latencia', value=f'Websocket: {before_ws}ms')
await ctx.send(content='ℹ Sobre **{0}** | **{1}**'.format(self.name, self.version), embed=embed, mention_author=False)
def setup(bot):
bot.add_cog(Información(bot)) | [
"utils.default.config",
"discord.Embed",
"discord.ext.commands.command",
"os.getpid"
] | [((409, 455), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['stats', 'botinfo']"}), "(aliases=['stats', 'botinfo'])\n", (425, 455), False, 'from discord.ext import commands\n'), ((207, 223), 'utils.default.config', 'default.config', ([], {}), '()\n', (221, 223), False, 'from utils import default\n'), ((629, 672), 'discord.Embed', 'discord.Embed', ([], {'color': "self.config['blurple']"}), "(color=self.config['blurple'])\n", (642, 672), False, 'import discord\n'), ((390, 401), 'os.getpid', 'os.getpid', ([], {}), '()\n', (399, 401), False, 'import os\n')] |
from jinja2 import FileSystemLoader
from oacensus.report import Report
import jinja2
import os
import shutil
cur_dir = os.path.abspath(os.path.dirname(__file__))
class JinjaReport(Report):
"""
Base class for reports which use jinja templating.
"""
_settings = {
'template-file' : ("Path to template file.", None),
'output-dir' : ("Directory to output report. Should start with 'report-' prefix", None),
'output-file' : ("Name of report.", "output.html"),
'template-dirs' : (
"Locations to look for template files.",
['.', os.path.join(cur_dir, 'templates')]
)
}
def file_in_output_dir(self, filename):
return os.path.join(self.setting('output-dir'), filename)
def template_data(self):
"""
Return a dictionary whose keys will be available in the jinja template.
"""
return {
'foo' : 123
}
def process_jinja_template(self):
dirs = self.setting('template-dirs')
loader = FileSystemLoader(dirs)
env = jinja2.Environment(loader=loader)
template = env.get_template(self.setting('template-file'))
template_data = self.template_data()
output_filepath = os.path.join(self.setting('output-dir'), self.setting('output-file'))
template.stream(template_data).dump(output_filepath, encoding="utf-8")
def setup_output_dir(self):
assert self.setting('output-dir'), "output-dir setting must be provided"
assert self.setting('output-dir').startswith('report-'), "output-dir should start with report- prefix"
shutil.rmtree(self.setting('output-dir'), ignore_errors=True)
os.makedirs(self.setting('output-dir'))
def run(self):
self.setup_output_dir()
self.process_jinja_template()
| [
"os.path.dirname",
"os.path.join",
"jinja2.FileSystemLoader",
"jinja2.Environment"
] | [((136, 161), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (151, 161), False, 'import os\n'), ((1097, 1119), 'jinja2.FileSystemLoader', 'FileSystemLoader', (['dirs'], {}), '(dirs)\n', (1113, 1119), False, 'from jinja2 import FileSystemLoader\n'), ((1134, 1167), 'jinja2.Environment', 'jinja2.Environment', ([], {'loader': 'loader'}), '(loader=loader)\n', (1152, 1167), False, 'import jinja2\n'), ((620, 654), 'os.path.join', 'os.path.join', (['cur_dir', '"""templates"""'], {}), "(cur_dir, 'templates')\n", (632, 654), False, 'import os\n')] |
from police_lineups.singletons import Configuration, DB
from .lineup_people import DbLineupPerson
from .lineups import DbLineup
from .people import DbPerson
from .users import DbUser
def init_current_db():
with DB().current as database:
database.create_tables([DbUser, DbPerson, DbLineup, DbLineupPerson])
init_users()
def init_users():
if len(DbUser) == 0:
root_user = DbUser(
is_admin=True,
username=Configuration().root_user.username,
email=Configuration().root_user.default_email,
full_name=Configuration().root_user.default_full_name,
password=Configuration().root_user.default_password
)
root_user.save()
| [
"police_lineups.singletons.DB",
"police_lineups.singletons.Configuration"
] | [((218, 222), 'police_lineups.singletons.DB', 'DB', ([], {}), '()\n', (220, 222), False, 'from police_lineups.singletons import Configuration, DB\n'), ((460, 475), 'police_lineups.singletons.Configuration', 'Configuration', ([], {}), '()\n', (473, 475), False, 'from police_lineups.singletons import Configuration, DB\n'), ((514, 529), 'police_lineups.singletons.Configuration', 'Configuration', ([], {}), '()\n', (527, 529), False, 'from police_lineups.singletons import Configuration, DB\n'), ((577, 592), 'police_lineups.singletons.Configuration', 'Configuration', ([], {}), '()\n', (590, 592), False, 'from police_lineups.singletons import Configuration, DB\n'), ((643, 658), 'police_lineups.singletons.Configuration', 'Configuration', ([], {}), '()\n', (656, 658), False, 'from police_lineups.singletons import Configuration, DB\n')] |
from keboola import docker
cfg = docker.Config('/data/')
params = cfg.get_parameters()
print(params['object'])
| [
"keboola.docker.Config"
] | [((34, 57), 'keboola.docker.Config', 'docker.Config', (['"""/data/"""'], {}), "('/data/')\n", (47, 57), False, 'from keboola import docker\n')] |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
A function to load our two datasets, returning a merged dataframe
:param messages_filepath: a string with the filepath to the file with the messages. This file has to be a CSV,
and to be in the same folder as our script
:param categories_filepath: a string with the filepath to the file with the categories of each message.
This file has to be a CSV, and to be in the same folder as our script
:return: tw
'''
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = messages.merge(categories, on ='id', how='inner')
return df
def clean_data(df):
'''
A function to clean our merged dataframe.
:param df: a merged dataframe of messages and the associated categories
:return: the same dataframe, cleaned
'''
# We isolate the categories from the categories column
categories = df.categories.str.split(';', expand=True)
# We use the first row for column names
row = categories.iloc[0]
category_colnames = row.apply(lambda x: x[:-2])
categories.columns = category_colnames
# We only keep the 0 and 1 values
for column in categories:
categories[column] = categories[column].str[-1]
categories[column] = categories[column].astype(int)
# We replace the categories column from our original dataframe with these new columns
df.drop('categories', axis=1, inplace=True)
df = pd.concat([df, categories], axis=1)
# We finish our cleaning by removing duplicates
# and rows where the "related" category is 2 (values should only be 0 and 1
df.drop_duplicates(inplace=True)
df = df.loc[df.related != 2,:]
return(df)
def save_data(df, database_filename):
'''
Stores the dataframe to a SQLLite database
:param df: the dataframe which we want to save to a SQLLite database
:param database_filename: the name of the database where we want to store our dataframe
:return: None
'''
engine = create_engine('sqlite:///'+ database_filename)
df.to_sql('messages_categorized', engine, index=False)
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main() | [
"sqlalchemy.create_engine",
"pandas.concat",
"pandas.read_csv"
] | [((588, 618), 'pandas.read_csv', 'pd.read_csv', (['messages_filepath'], {}), '(messages_filepath)\n', (599, 618), True, 'import pandas as pd\n'), ((636, 668), 'pandas.read_csv', 'pd.read_csv', (['categories_filepath'], {}), '(categories_filepath)\n', (647, 668), True, 'import pandas as pd\n'), ((1566, 1601), 'pandas.concat', 'pd.concat', (['[df, categories]'], {'axis': '(1)'}), '([df, categories], axis=1)\n', (1575, 1601), True, 'import pandas as pd\n'), ((2124, 2171), 'sqlalchemy.create_engine', 'create_engine', (["('sqlite:///' + database_filename)"], {}), "('sqlite:///' + database_filename)\n", (2137, 2171), False, 'from sqlalchemy import create_engine\n')] |
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
from textblob.blob import TextBlob
from sklearn.feature_extraction.text import TfidfVectorizer
def relevance_score(tweet: str, max_score: float) -> float:
"""Computes the 'relevance' of a tweet.
Also uses the TextBlob subjectitvity measure to make subjective tweets
more relevant. We don't want to highlight news articles, for example
Args:
tweets (str): Tweet (cleaned)
Returns:
float: Relevance (social importance of the tweets list)
"""
return ((tweet['likes']+tweet['rts'])*tweet['foll']/(max_score))*(
(TextBlob(tweet['text']).subjectivity))
def representatives(tweets: list, n: int, tfidf: TfidfVectorizer) -> list:
"""Returns the representative tweets from a list of them,
They are selected according to their cosine_similarity
to every other tweet.
The ones with the most cosine_similarity to every other tweet are selected
(thus, they are the ones that held the most similarity to the whole group,
the most representatives)
Args:
tweets (list): list of tweets (cleaned)
n (int): number of representatives to keep in the returned list
tfidf (TfidfVectorizer): TF-IDF vectorizer
Returns:
list: representatives
"""
tfidf_tweets = tfidf.transform([t['text'] for t in tweets])
tcosine = [(tweets[i], sum([cosine_similarity(tfidf_tweets[i], t2)
for t2 in tfidf_tweets]))
for i in range(0, len(tweets))]
return get_top_tweets(tcosine, n=n)
def get_top_tweets(tweets_w_score: list, n: int) -> list:
"""Get top n players by score
Args:
tweets_w_score (list): List of tuples (tweet(str), score(float))
n (int): Number of tweets to keep as "top" tweets according to
their score
(descending order)
Returns:
list: list of n top scored tweets
"""
indexes = [(i, e[1]) for i, e in enumerate(tweets_w_score)]
top = sorted(indexes, key=lambda x: x[1], reverse=True)[:n]
return list(np.array(
[t[0] for t in tweets_w_score]
)[np.array([ind[0] for ind in top])])
def max_relevance(tweets: list) -> float:
"""Computes the maximum 'relevance' of tweets
Args:
tweets (list): Tweets (cleaned)
Returns:
float: Max possible relevance (social importance of the tweets list)
"""
df = pd.DataFrame(list(tweets), columns=['text', 'rts', 'likes', 'foll'])
return (max(df['likes'])+max(df['rts']))*max(df['foll'])
| [
"numpy.array",
"textblob.blob.TextBlob",
"sklearn.metrics.pairwise.cosine_similarity"
] | [((658, 681), 'textblob.blob.TextBlob', 'TextBlob', (["tweet['text']"], {}), "(tweet['text'])\n", (666, 681), False, 'from textblob.blob import TextBlob\n'), ((2137, 2177), 'numpy.array', 'np.array', (['[t[0] for t in tweets_w_score]'], {}), '([t[0] for t in tweets_w_score])\n', (2145, 2177), True, 'import numpy as np\n'), ((2216, 2249), 'numpy.array', 'np.array', (['[ind[0] for ind in top]'], {}), '([ind[0] for ind in top])\n', (2224, 2249), True, 'import numpy as np\n'), ((1438, 1476), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['tfidf_tweets[i]', 't2'], {}), '(tfidf_tweets[i], t2)\n', (1455, 1476), False, 'from sklearn.metrics.pairwise import cosine_similarity\n')] |
import glob
import json
from pprint import pprint
# For merging data retrieved from API with data from dataset dumped as json
if __name__ == '__main__' :
raw_data_file = "../asinReviewDict.json"
asin_product_dict = json.load ( open ( raw_data_file ) )
directory = "../"
file_prefix = "partial"
files = glob.glob( directory + "partial-*.json" )
for name in files :
asin_new_data = json.load ( open ( name ) )
for asin in asin_new_data :
# pprint( asin_new_data[asin] )
description = asin_new_data [ asin ]['description']
salesrank_2018 = asin_new_data [ asin ]['salesrank_2018']
asin_product_dict[ asin ][ 'description' ] = description
asin_product_dict[ asin ][ 'salesrank_2018' ] = salesrank_2018
incomplete_asins = []
for asin in asin_product_dict :
if 'salesrank_2018' not in asin_product_dict[asin] :
# pprint(asin_product_dict[asin])
incomplete_asins.append( asin )
for asin in incomplete_asins :
pprint("deleting asin " + asin )
del asin_product_dict[ asin ]
pprint( "Final list length {}".format(len(asin_product_dict) ))
with open('../asinReviewDict_v2_modified.json', 'w') as outfile:
json.dump(asin_product_dict, outfile) | [
"json.dump",
"pprint.pprint",
"glob.glob"
] | [((327, 366), 'glob.glob', 'glob.glob', (["(directory + 'partial-*.json')"], {}), "(directory + 'partial-*.json')\n", (336, 366), False, 'import glob\n'), ((1061, 1092), 'pprint.pprint', 'pprint', (["('deleting asin ' + asin)"], {}), "('deleting asin ' + asin)\n", (1067, 1092), False, 'from pprint import pprint\n'), ((1279, 1316), 'json.dump', 'json.dump', (['asin_product_dict', 'outfile'], {}), '(asin_product_dict, outfile)\n', (1288, 1316), False, 'import json\n')] |
"""
Attitude discipline for CADRE.
"""
from six.moves import range
import numpy as np
from openmdao.api import ExplicitComponent
from CADRE.kinematics import computepositionrotd, computepositionrotdjacobian
class Attitude_Angular(ExplicitComponent):
"""
Calculates angular velocity vector from the satellite's orientation
matrix and its derivative.
"""
def __init__(self, n=2):
super(Attitude_Angular, self).__init__()
self.n = n
def setup(self):
n = self.n
# Inputs
self.add_input('O_BI', np.zeros((n, 3, 3)), units=None,
desc='Rotation matrix from body-fixed frame to Earth-centered '
'inertial frame over time')
self.add_input('Odot_BI', np.zeros((n, 3, 3)), units=None,
desc='First derivative of O_BI over time')
# Outputs
self.add_output('w_B', np.zeros((n, 3)), units='1/s',
desc='Angular velocity vector in body-fixed frame over time')
self.dw_dOdot = np.zeros((n, 3, 3, 3))
self.dw_dO = np.zeros((n, 3, 3, 3))
row = np.array([1, 1, 1, 2, 2, 2, 0, 0, 0])
col = np.array([6, 7, 8, 0, 1, 2, 3, 4, 5])
rows = np.tile(row, n) + np.repeat(3*np.arange(n), 9)
cols = np.tile(col, n) + np.repeat(9*np.arange(n), 9)
self.declare_partials('w_B', 'O_BI', rows=rows, cols=cols)
self.dw_dOdot = np.zeros((n, 3, 3, 3))
self.dw_dO = np.zeros((n, 3, 3, 3))
row = np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])
col = np.array([3, 4, 5, 6, 7, 8, 0, 1, 2])
rows = np.tile(row, n) + np.repeat(3*np.arange(n), 9)
cols = np.tile(col, n) + np.repeat(9*np.arange(n), 9)
self.declare_partials('w_B', 'Odot_BI', rows=rows, cols=cols)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
O_BI = inputs['O_BI']
Odot_BI = inputs['Odot_BI']
w_B = outputs['w_B']
w_B[:, 0] = np.einsum("ij,ij->i", Odot_BI[:, 2, :], O_BI[:, 1, :])
w_B[:, 1] = np.einsum("ij,ij->i", Odot_BI[:, 0, :], O_BI[:, 2, :])
w_B[:, 2] = np.einsum("ij,ij->i", Odot_BI[:, 1, :], O_BI[:, 0, :])
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
O_BI = inputs['O_BI']
Odot_BI = inputs['Odot_BI']
partials['w_B', 'O_BI'] = Odot_BI.flatten()
partials['w_B', 'Odot_BI'] = O_BI.flatten()
class Attitude_AngularRates(ExplicitComponent):
"""
Calculates time derivative of angular velocity vector.
This time derivative is a central difference at interior time points, and forward/backward
at the start and end time.
"""
def __init__(self, n=2, h=28.8):
super(Attitude_AngularRates, self).__init__()
self.n = n
self.h = h
def setup(self):
n = self.n
h = self.h
# Inputs
self.add_input('w_B', np.zeros((n, 3)), units='1/s',
desc='Angular velocity vector in body-fixed frame over time')
# Outputs
self.add_output('wdot_B', np.zeros((n, 3)), units='1/s**2',
desc='Time derivative of w_B over time')
# Upper and Lower Diag
row1 = np.arange(3*(n-1))
col1 = row1 + 3
val1a = 0.5 * np.ones(3*(n-1))
val1a[:3] = 1.0
val1b = 0.5 * np.ones(3*(n-1))
val1b[-3:] = 1.0
val1a *= (1.0 / h)
val1b *= (1.0 / h)
# Central Diag
row_col2 = np.array((0, 1, 2, 3*n-3, 3*n-2, 3*n-1))
val2 = np.array((-1.0, -1.0, -1.0, 1.0, 1.0, 1.0)) * (1.0 / h)
rows = np.concatenate((row1, col1, row_col2))
cols = np.concatenate((col1, row1, row_col2))
val = np.concatenate((val1a, -val1b, val2))
self.declare_partials('wdot_B', 'w_B', rows=rows, cols=cols, val=val)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
h = self.h
w_B = inputs['w_B']
wdot_B = outputs['wdot_B']
wdot_B[0, :] = w_B[1, :] - w_B[0, :]
wdot_B[1:-1, :] = (w_B[2:, :] - w_B[:-2, :]) * 0.5
wdot_B[-1, :] = w_B[-1, :] - w_B[-2, :]
wdot_B *= 1.0 / h
class Attitude_Attitude(ExplicitComponent):
"""
Coordinate transformation from the inertial plane to the rolled
(forward facing) plane.
"""
dvx_dv = np.zeros((3, 3, 3))
dvx_dv[0, :, 0] = (0., 0., 0.)
dvx_dv[1, :, 0] = (0., 0., -1.)
dvx_dv[2, :, 0] = (0., 1., 0.)
dvx_dv[0, :, 1] = (0., 0., 1.)
dvx_dv[1, :, 1] = (0., 0., 0.)
dvx_dv[2, :, 1] = (-1., 0., 0.)
dvx_dv[0, :, 2] = (0., -1., 0.)
dvx_dv[1, :, 2] = (1., 0., 0.)
dvx_dv[2, :, 2] = (0., 0., 0.)
def __init__(self, n=2):
super(Attitude_Attitude, self).__init__()
self.n = n
def setup(self):
n = self.n
# Inputs
self.add_input('r_e2b_I', np.zeros((n, 6)), units=None,
desc='Position and velocity vector from earth to satellite in '
'Earth-centered inertial frame over time')
# Outputs
self.add_output('O_RI', np.zeros((n, 3, 3)), units=None,
desc='Rotation matrix from rolled body-fixed frame to '
'Earth-centered inertial frame over time')
row1 = np.repeat(np.arange(3), 3)
col1 = np.tile(np.arange(3), 3)
col2 = col1 + 3
row3 = row1 + 3
row5 = row1 + 6
row = np.concatenate([row1, row1, row3, row3, row5])
col = np.concatenate([col1, col2, col1, col2, col2])
rows = np.tile(row, n) + np.repeat(9*np.arange(n), 45)
cols = np.tile(col, n) + np.repeat(6*np.arange(n), 45)
self.declare_partials('O_RI', 'r_e2b_I', rows=rows, cols=cols)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
r_e2b_I = inputs['r_e2b_I']
O_RI = outputs['O_RI']
O_RI[:] = np.zeros(O_RI.shape)
for i in range(0, self.n):
r = r_e2b_I[i, 0:3]
v = r_e2b_I[i, 3:]
normr = np.sqrt(np.dot(r, r))
normv = np.sqrt(np.dot(v, v))
# Prevent overflow
if normr < 1e-10:
normr = 1e-10
if normv < 1e-10:
normv = 1e-10
r = r / normr
v = v / normv
vx = np.zeros((3, 3))
vx[0, :] = (0., -v[2], v[1])
vx[1, :] = (v[2], 0., -v[0])
vx[2, :] = (-v[1], v[0], 0.)
iB = np.dot(vx, r)
jB = -np.dot(vx, iB)
O_RI[i, 0, :] = iB
O_RI[i, 1, :] = jB
O_RI[i, 2, :] = -v
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
r_e2b_I = inputs['r_e2b_I']
diB_dv = np.zeros((3, 3))
djB_dv = np.zeros((3, 3))
for i in range(0, self.n):
r = r_e2b_I[i, 0:3]
v = r_e2b_I[i, 3:]
normr = np.sqrt(np.dot(r, r))
normv = np.sqrt(np.dot(v, v))
# Prevent overflow
if normr < 1e-10:
normr = 1e-10
if normv < 1e-10:
normv = 1e-10
r = r / normr
v = v / normv
dr_dr = np.zeros((3, 3))
dv_dv = np.zeros((3, 3))
for k in range(0, 3):
dr_dr[k, k] += 1.0 / normr
dv_dv[k, k] += 1.0 / normv
dr_dr[:, k] -= r * r_e2b_I[i, k] / normr ** 2
dv_dv[:, k] -= v * r_e2b_I[i, 3 + k] / normv ** 2
vx = np.zeros((3, 3))
vx[0, :] = (0., -v[2], v[1])
vx[1, :] = (v[2], 0., -v[0])
vx[2, :] = (-v[1], v[0], 0.)
iB = np.dot(vx, r)
diB_dr = vx
diB_dv[:, 0] = np.dot(self.dvx_dv[:, :, 0], r)
diB_dv[:, 1] = np.dot(self.dvx_dv[:, :, 1], r)
diB_dv[:, 2] = np.dot(self.dvx_dv[:, :, 2], r)
djB_diB = -vx
djB_dv[:, 0] = -np.dot(self.dvx_dv[:, :, 0], iB)
djB_dv[:, 1] = -np.dot(self.dvx_dv[:, :, 1], iB)
djB_dv[:, 2] = -np.dot(self.dvx_dv[:, :, 2], iB)
n0 = i*45
partials['O_RI', 'r_e2b_I'][n0:n0+9] = np.dot(diB_dr, dr_dr).flatten()
partials['O_RI', 'r_e2b_I'][n0+9:n0+18] = np.dot(diB_dv, dv_dv).flatten()
partials['O_RI', 'r_e2b_I'][n0+18:n0+27] = np.dot(np.dot(djB_diB, diB_dr), dr_dr).flatten()
partials['O_RI', 'r_e2b_I'][n0+27:n0+36] = np.dot(np.dot(djB_diB, diB_dv) + djB_dv, dv_dv).flatten()
partials['O_RI', 'r_e2b_I'][n0+36:n0+45] = -dv_dv.flatten()
class Attitude_Roll(ExplicitComponent):
"""
Calculates the body-fixed orientation matrix.
"""
def __init__(self, n=2):
super(Attitude_Roll, self).__init__()
self.n = n
def setup(self):
n = self.n
# Inputs
self.add_input('Gamma', np.zeros(n), units='rad',
desc='Satellite roll angle over time')
# Outputs
self.add_output('O_BR', np.zeros((n, 3, 3)), units=None,\
desc='Rotation matrix from body-fixed frame to rolled ' 'body-fixed\
frame over time')
rows = np.tile(9*np.arange(n), 4) + np.repeat(np.array([0, 1, 3, 4]), n)
cols = np.tile(np.arange(n), 4)
self.declare_partials('O_BR', 'Gamma', rows=rows, cols=cols)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
Gamma = inputs['Gamma']
O_BR = outputs['O_BR']
O_BR[:] = np.zeros((self.n, 3, 3))
O_BR[:, 0, 0] = np.cos(Gamma)
O_BR[:, 0, 1] = np.sin(Gamma)
O_BR[:, 1, 0] = -O_BR[:, 0, 1]
O_BR[:, 1, 1] = O_BR[:, 0, 0]
O_BR[:, 2, 2] = np.ones(self.n)
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
n = self.n
Gamma = inputs['Gamma']
sin_gam = np.sin(Gamma)
cos_gam = np.cos(Gamma)
partials['O_BR', 'Gamma'][:n] = -sin_gam
partials['O_BR', 'Gamma'][n:2*n] = cos_gam
partials['O_BR', 'Gamma'][2*n:3*n] = -cos_gam
partials['O_BR', 'Gamma'][3*n:4*n] = -sin_gam
class Attitude_RotationMtx(ExplicitComponent):
"""
Multiplies transformations to produce the orientation matrix of the
body frame with respect to inertial.
"""
def __init__(self, n=2):
super(Attitude_RotationMtx, self).__init__()
self.n = n
def setup(self):
n = self.n
# Inputs
self.add_input('O_BR', np.zeros((n, 3, 3)), units=None,
desc='Rotation matrix from body-fixed frame to rolled '
'body-fixed frame over time')
self.add_input('O_RI', np.zeros((n, 3, 3)), units=None,
desc='Rotation matrix from rolled body-fixed '
'frame to Earth-centered inertial frame over time')
# Outputs
self.add_output('O_BI', np.zeros((n, 3, 3)), units=None,
desc='Rotation matrix from body-fixed frame to '
'Earth-centered inertial frame over time')
row = np.repeat(np.arange(3), 3)
row1 = np.tile(row, n) + np.repeat(9*np.arange(n), 9)
col = np.tile(np.arange(3), 3)
col1 = np.tile(col, n) + np.repeat(9*np.arange(n), 9)
# Transpose here instead of in compute_partials
rows = np.concatenate([col1, col1+3, col1+6])
cols = np.concatenate([row1, row1+3, row1+6])
self.declare_partials('O_BI', 'O_BR', rows=rows, cols=cols)
row = np.repeat(3*np.arange(3), 3)
row1 = np.tile(row, n) + np.repeat(9*np.arange(n), 9)
col = np.tile(np.array([0, 3, 6]), 3)
col1 = np.tile(col, n) + np.repeat(9*np.arange(n), 9)
rows = np.concatenate([row1, row1+1, row1+2])
cols = np.concatenate([col1, col1+1, col1+2])
self.declare_partials('O_BI', 'O_RI', rows=rows, cols=cols)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
O_BR = inputs['O_BR']
O_RI = inputs['O_RI']
O_BI = outputs['O_BI']
for i in range(0, self.n):
O_BI[i, :, :] = np.dot(O_BR[i, :, :], O_RI[i, :, :])
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
n = self.n
O_BR = inputs['O_BR'].flatten()
O_RI = inputs['O_RI'].flatten()
nn = 9*n
dO_BR = O_RI
partials['O_BI', 'O_BR'][:nn] = dO_BR
partials['O_BI', 'O_BR'][nn:2*nn] = dO_BR
partials['O_BI', 'O_BR'][2*nn:3*nn] = dO_BR
dO_RI = O_BR
partials['O_BI', 'O_RI'][:nn] = dO_RI
partials['O_BI', 'O_RI'][nn:2*nn] = dO_RI
partials['O_BI', 'O_RI'][2*nn:3*nn] = dO_RI
class Attitude_RotationMtxRates(ExplicitComponent):
"""
Calculates time derivative of body frame orientation matrix.
"""
def __init__(self, n=2, h=28.2):
super(Attitude_RotationMtxRates, self).__init__()
self.n = n
self.h = h
def setup(self):
n = self.n
h = self.h
# Inputs
self.add_input('O_BI', np.zeros((n, 3, 3)), units=None,
desc='Rotation matrix from body-fixed frame to Earth-centered '
'inertial frame over time')
# Outputs
self.add_output('Odot_BI', np.zeros((n, 3, 3)), units=None,
desc='First derivative of O_BI over time')
base1 = np.arange(9)
base2 = np.arange(9*(n - 2))
val1 = np.ones((9, )) / h
val2 = np.ones((9*(n - 2), )) / (2.0 * h)
nn = 9*(n - 1)
rows = np.concatenate([base1, base1, base2+9, base2+9, base1+nn, base1+nn])
cols = np.concatenate([base1+9, base1, base2+18, base2, base1+nn, base1+nn-9])
vals = np.concatenate([val1, -val1, val2, -val2, val1, -val1])
self.declare_partials('Odot_BI', 'O_BI', rows=rows, cols=cols, val=vals)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
O_BI = inputs['O_BI']
h = self.h
Odot_BI = outputs['Odot_BI']
Odot_BI[0, :, :] = O_BI[1, :, :]
Odot_BI[0, :, :] -= O_BI[0, :, :]
Odot_BI[1:-1, :, :] = O_BI[2:, :, :] / 2.0
Odot_BI[1:-1, :, :] -= O_BI[:-2, :, :] / 2.0
Odot_BI[-1, :, :] = O_BI[-1, :, :]
Odot_BI[-1, :, :] -= O_BI[-2, :, :]
Odot_BI *= 1.0/h
class Attitude_Sideslip(ExplicitComponent):
"""
Determine velocity in the body frame.
"""
def __init__(self, n=2):
super(Attitude_Sideslip, self).__init__()
self.n = n
def setup(self):
n = self.n
# Inputs
self.add_input('r_e2b_I', np.zeros((n, 6)), units=None,
desc='Position and velocity vector from earth to satellite '
'in Earth-centered inertial frame over time')
self.add_input('O_BI', np.zeros((n, 3, 3)), units=None,
desc='Rotation matrix from body-fixed frame to '
'Earth-centered inertial frame over time')
# Outputs
self.add_output('v_e2b_B', np.zeros((n, 3)), units='m/s',
desc='Velocity vector from earth to satellite'
'in body-fixed frame over time')
row = np.tile(np.array([0]), 9) + np.repeat(np.arange(3), 3)
rows = np.tile(row, n) + np.repeat(3*np.arange(n), 9)
col = np.tile(np.array([3, 4, 5]), 3)
cols = np.tile(col, n) + np.repeat(6*np.arange(n), 9)
self.declare_partials('v_e2b_B', 'r_e2b_I', rows=rows, cols=cols)
row = np.tile(np.array([0, 0, 0]), n) + np.repeat(3*np.arange(n), 3)
col = np.tile(np.arange(3), n) + np.repeat(9*np.arange(n), 3)
rows = np.concatenate([row, row+1, row+2])
cols = np.concatenate([col, col+3, col+6])
self.declare_partials('v_e2b_B', 'O_BI', rows=rows, cols=cols)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
r_e2b_I = inputs['r_e2b_I']
O_BI = inputs['O_BI']
v_e2b_B = outputs['v_e2b_B']
v_e2b_B[:] = np.einsum('kij,kj->ki', O_BI, r_e2b_I[:, 3:])
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
n = self.n
r_e2b_I = inputs['r_e2b_I'][:, 3:]
O_BI = inputs['O_BI']
partials['v_e2b_B', 'r_e2b_I'] = O_BI.flatten()
nn = 3*n
dO_BI = r_e2b_I.flatten()
partials['v_e2b_B', 'O_BI'][:nn] = dO_BI
partials['v_e2b_B', 'O_BI'][nn:2*nn] = dO_BI
partials['v_e2b_B', 'O_BI'][2*nn:3*nn] = dO_BI
class Attitude_Torque(ExplicitComponent):
"""
Compute the required reaction wheel tourque.
"""
J = np.zeros((3, 3))
J[0, :] = (0.018, 0., 0.)
J[1, :] = (0., 0.018, 0.)
J[2, :] = (0., 0., 0.006)
def __init__(self, n=2):
super(Attitude_Torque, self).__init__()
self.n = n
self.dwx_dw = np.zeros((3, 3, 3))
self.dwx_dw[0, :, 0] = (0., 0., 0.)
self.dwx_dw[1, :, 0] = (0., 0., -1.)
self.dwx_dw[2, :, 0] = (0., 1., 0.)
self.dwx_dw[0, :, 1] = (0., 0., 1.)
self.dwx_dw[1, :, 1] = (0., 0., 0.)
self.dwx_dw[2, :, 1] = (-1., 0, 0.)
self.dwx_dw[0, :, 2] = (0., -1., 0)
self.dwx_dw[1, :, 2] = (1., 0., 0.)
self.dwx_dw[2, :, 2] = (0., 0., 0.)
def setup(self):
n = self.n
# Inputs
self.add_input('w_B', np.zeros((n, 3)), units='1/s',
desc='Angular velocity in body-fixed frame over time')
self.add_input('wdot_B', np.zeros((n, 3)), units='1/s**2',
desc='Time derivative of w_B over time')
# Outputs
self.add_output('T_tot', np.zeros((n, 3)), units='N*m',
desc='Total reaction wheel torque over time')
rows = np.tile(np.array([0, 0, 0]), 3*n) + np.repeat(np.arange(3*n), 3)
col = np.tile(np.array([0, 1, 2]), 3)
cols = np.tile(col, n) + np.repeat(3*np.arange(n), 9)
self.declare_partials('T_tot', 'w_B', rows=rows, cols=cols)
self.declare_partials('T_tot', 'wdot_B', rows=rows, cols=cols)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
w_B = inputs['w_B']
wdot_B = inputs['wdot_B']
T_tot = outputs['T_tot']
wx = np.zeros((3, 3))
for i in range(0, self.n):
wx[0, :] = (0., -w_B[i, 2], w_B[i, 1])
wx[1, :] = (w_B[i, 2], 0., -w_B[i, 0])
wx[2, :] = (-w_B[i, 1], w_B[i, 0], 0.)
T_tot[i, :] = np.dot(self.J, wdot_B[i, :]) + \
np.dot(wx, np.dot(self.J, w_B[i, :]))
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
w_B = inputs['w_B']
dT_dw = np.zeros((self.n, 3, 3))
dT_dwdot = np.zeros((self.n, 3, 3))
wx = np.zeros((3, 3))
for i in range(0, self.n):
wx[0, :] = (0., -w_B[i, 2], w_B[i, 1])
wx[1, :] = (w_B[i, 2], 0., -w_B[i, 0])
wx[2, :] = (-w_B[i, 1], w_B[i, 0], 0.)
dT_dwdot[i, :, :] = self.J
dT_dw[i, :, :] = np.dot(wx, self.J)
for k in range(0, 3):
dT_dw[i, :, k] += np.dot(self.dwx_dw[:, :, k], np.dot(self.J, w_B[i, :]))
partials['T_tot', 'w_B'] = dT_dw.flatten()
partials['T_tot', 'wdot_B'] = dT_dwdot.flatten()
| [
"numpy.tile",
"six.moves.range",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.einsum",
"numpy.cos",
"numpy.concatenate",
"numpy.sin",
"numpy.arange"
] | [((4483, 4502), 'numpy.zeros', 'np.zeros', (['(3, 3, 3)'], {}), '((3, 3, 3))\n', (4491, 4502), True, 'import numpy as np\n'), ((17266, 17282), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (17274, 17282), True, 'import numpy as np\n'), ((1059, 1081), 'numpy.zeros', 'np.zeros', (['(n, 3, 3, 3)'], {}), '((n, 3, 3, 3))\n', (1067, 1081), True, 'import numpy as np\n'), ((1103, 1125), 'numpy.zeros', 'np.zeros', (['(n, 3, 3, 3)'], {}), '((n, 3, 3, 3))\n', (1111, 1125), True, 'import numpy as np\n'), ((1141, 1178), 'numpy.array', 'np.array', (['[1, 1, 1, 2, 2, 2, 0, 0, 0]'], {}), '([1, 1, 1, 2, 2, 2, 0, 0, 0])\n', (1149, 1178), True, 'import numpy as np\n'), ((1193, 1230), 'numpy.array', 'np.array', (['[6, 7, 8, 0, 1, 2, 3, 4, 5]'], {}), '([6, 7, 8, 0, 1, 2, 3, 4, 5])\n', (1201, 1230), True, 'import numpy as np\n'), ((1448, 1470), 'numpy.zeros', 'np.zeros', (['(n, 3, 3, 3)'], {}), '((n, 3, 3, 3))\n', (1456, 1470), True, 'import numpy as np\n'), ((1492, 1514), 'numpy.zeros', 'np.zeros', (['(n, 3, 3, 3)'], {}), '((n, 3, 3, 3))\n', (1500, 1514), True, 'import numpy as np\n'), ((1530, 1567), 'numpy.array', 'np.array', (['[2, 2, 2, 0, 0, 0, 1, 1, 1]'], {}), '([2, 2, 2, 0, 0, 0, 1, 1, 1])\n', (1538, 1567), True, 'import numpy as np\n'), ((1582, 1619), 'numpy.array', 'np.array', (['[3, 4, 5, 6, 7, 8, 0, 1, 2]'], {}), '([3, 4, 5, 6, 7, 8, 0, 1, 2])\n', (1590, 1619), True, 'import numpy as np\n'), ((2023, 2077), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'Odot_BI[:, 2, :]', 'O_BI[:, 1, :]'], {}), "('ij,ij->i', Odot_BI[:, 2, :], O_BI[:, 1, :])\n", (2032, 2077), True, 'import numpy as np\n'), ((2098, 2152), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'Odot_BI[:, 0, :]', 'O_BI[:, 2, :]'], {}), "('ij,ij->i', Odot_BI[:, 0, :], O_BI[:, 2, :])\n", (2107, 2152), True, 'import numpy as np\n'), ((2173, 2227), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'Odot_BI[:, 1, :]', 'O_BI[:, 0, :]'], {}), "('ij,ij->i', Odot_BI[:, 1, :], O_BI[:, 0, :])\n", (2182, 2227), True, 'import numpy as np\n'), ((3337, 3359), 'numpy.arange', 'np.arange', (['(3 * (n - 1))'], {}), '(3 * (n - 1))\n', (3346, 3359), True, 'import numpy as np\n'), ((3605, 3657), 'numpy.array', 'np.array', (['(0, 1, 2, 3 * n - 3, 3 * n - 2, 3 * n - 1)'], {}), '((0, 1, 2, 3 * n - 3, 3 * n - 2, 3 * n - 1))\n', (3613, 3657), True, 'import numpy as np\n'), ((3733, 3771), 'numpy.concatenate', 'np.concatenate', (['(row1, col1, row_col2)'], {}), '((row1, col1, row_col2))\n', (3747, 3771), True, 'import numpy as np\n'), ((3787, 3825), 'numpy.concatenate', 'np.concatenate', (['(col1, row1, row_col2)'], {}), '((col1, row1, row_col2))\n', (3801, 3825), True, 'import numpy as np\n'), ((3840, 3877), 'numpy.concatenate', 'np.concatenate', (['(val1a, -val1b, val2)'], {}), '((val1a, -val1b, val2))\n', (3854, 3877), True, 'import numpy as np\n'), ((5610, 5656), 'numpy.concatenate', 'np.concatenate', (['[row1, row1, row3, row3, row5]'], {}), '([row1, row1, row3, row3, row5])\n', (5624, 5656), True, 'import numpy as np\n'), ((5671, 5717), 'numpy.concatenate', 'np.concatenate', (['[col1, col2, col1, col2, col2]'], {}), '([col1, col2, col1, col2, col2])\n', (5685, 5717), True, 'import numpy as np\n'), ((6096, 6116), 'numpy.zeros', 'np.zeros', (['O_RI.shape'], {}), '(O_RI.shape)\n', (6104, 6116), True, 'import numpy as np\n'), ((6134, 6150), 'six.moves.range', 'range', (['(0)', 'self.n'], {}), '(0, self.n)\n', (6139, 6150), False, 'from six.moves import range\n'), ((7009, 7025), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (7017, 7025), True, 'import numpy as np\n'), ((7043, 7059), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (7051, 7059), True, 'import numpy as np\n'), ((7078, 7094), 'six.moves.range', 'range', (['(0)', 'self.n'], {}), '(0, self.n)\n', (7083, 7094), False, 'from six.moves import range\n'), ((9827, 9851), 'numpy.zeros', 'np.zeros', (['(self.n, 3, 3)'], {}), '((self.n, 3, 3))\n', (9835, 9851), True, 'import numpy as np\n'), ((9876, 9889), 'numpy.cos', 'np.cos', (['Gamma'], {}), '(Gamma)\n', (9882, 9889), True, 'import numpy as np\n'), ((9914, 9927), 'numpy.sin', 'np.sin', (['Gamma'], {}), '(Gamma)\n', (9920, 9927), True, 'import numpy as np\n'), ((10029, 10044), 'numpy.ones', 'np.ones', (['self.n'], {}), '(self.n)\n', (10036, 10044), True, 'import numpy as np\n'), ((10247, 10260), 'numpy.sin', 'np.sin', (['Gamma'], {}), '(Gamma)\n', (10253, 10260), True, 'import numpy as np\n'), ((10279, 10292), 'numpy.cos', 'np.cos', (['Gamma'], {}), '(Gamma)\n', (10285, 10292), True, 'import numpy as np\n'), ((11748, 11790), 'numpy.concatenate', 'np.concatenate', (['[col1, col1 + 3, col1 + 6]'], {}), '([col1, col1 + 3, col1 + 6])\n', (11762, 11790), True, 'import numpy as np\n'), ((11802, 11844), 'numpy.concatenate', 'np.concatenate', (['[row1, row1 + 3, row1 + 6]'], {}), '([row1, row1 + 3, row1 + 6])\n', (11816, 11844), True, 'import numpy as np\n'), ((12141, 12183), 'numpy.concatenate', 'np.concatenate', (['[row1, row1 + 1, row1 + 2]'], {}), '([row1, row1 + 1, row1 + 2])\n', (12155, 12183), True, 'import numpy as np\n'), ((12195, 12237), 'numpy.concatenate', 'np.concatenate', (['[col1, col1 + 1, col1 + 2]'], {}), '([col1, col1 + 1, col1 + 2])\n', (12209, 12237), True, 'import numpy as np\n'), ((12504, 12520), 'six.moves.range', 'range', (['(0)', 'self.n'], {}), '(0, self.n)\n', (12509, 12520), False, 'from six.moves import range\n'), ((13901, 13913), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (13910, 13913), True, 'import numpy as np\n'), ((13930, 13952), 'numpy.arange', 'np.arange', (['(9 * (n - 2))'], {}), '(9 * (n - 2))\n', (13939, 13952), True, 'import numpy as np\n'), ((14074, 14150), 'numpy.concatenate', 'np.concatenate', (['[base1, base1, base2 + 9, base2 + 9, base1 + nn, base1 + nn]'], {}), '([base1, base1, base2 + 9, base2 + 9, base1 + nn, base1 + nn])\n', (14088, 14150), True, 'import numpy as np\n'), ((14158, 14243), 'numpy.concatenate', 'np.concatenate', (['[base1 + 9, base1, base2 + 18, base2, base1 + nn, base1 + nn - 9]'], {}), '([base1 + 9, base1, base2 + 18, base2, base1 + nn, base1 + nn -\n 9])\n', (14172, 14243), True, 'import numpy as np\n'), ((14245, 14300), 'numpy.concatenate', 'np.concatenate', (['[val1, -val1, val2, -val2, val1, -val1]'], {}), '([val1, -val1, val2, -val2, val1, -val1])\n', (14259, 14300), True, 'import numpy as np\n'), ((16236, 16275), 'numpy.concatenate', 'np.concatenate', (['[row, row + 1, row + 2]'], {}), '([row, row + 1, row + 2])\n', (16250, 16275), True, 'import numpy as np\n'), ((16287, 16326), 'numpy.concatenate', 'np.concatenate', (['[col, col + 3, col + 6]'], {}), '([col, col + 3, col + 6])\n', (16301, 16326), True, 'import numpy as np\n'), ((16612, 16657), 'numpy.einsum', 'np.einsum', (['"""kij,kj->ki"""', 'O_BI', 'r_e2b_I[:, 3:]'], {}), "('kij,kj->ki', O_BI, r_e2b_I[:, 3:])\n", (16621, 16657), True, 'import numpy as np\n'), ((17494, 17513), 'numpy.zeros', 'np.zeros', (['(3, 3, 3)'], {}), '((3, 3, 3))\n', (17502, 17513), True, 'import numpy as np\n'), ((18927, 18943), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (18935, 18943), True, 'import numpy as np\n'), ((18961, 18977), 'six.moves.range', 'range', (['(0)', 'self.n'], {}), '(0, self.n)\n', (18966, 18977), False, 'from six.moves import range\n'), ((19422, 19446), 'numpy.zeros', 'np.zeros', (['(self.n, 3, 3)'], {}), '((self.n, 3, 3))\n', (19430, 19446), True, 'import numpy as np\n'), ((19466, 19490), 'numpy.zeros', 'np.zeros', (['(self.n, 3, 3)'], {}), '((self.n, 3, 3))\n', (19474, 19490), True, 'import numpy as np\n'), ((19504, 19520), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (19512, 19520), True, 'import numpy as np\n'), ((19539, 19555), 'six.moves.range', 'range', (['(0)', 'self.n'], {}), '(0, self.n)\n', (19544, 19555), False, 'from six.moves import range\n'), ((562, 581), 'numpy.zeros', 'np.zeros', (['(n, 3, 3)'], {}), '((n, 3, 3))\n', (570, 581), True, 'import numpy as np\n'), ((768, 787), 'numpy.zeros', 'np.zeros', (['(n, 3, 3)'], {}), '((n, 3, 3))\n', (776, 787), True, 'import numpy as np\n'), ((917, 933), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (925, 933), True, 'import numpy as np\n'), ((1246, 1261), 'numpy.tile', 'np.tile', (['row', 'n'], {}), '(row, n)\n', (1253, 1261), True, 'import numpy as np\n'), ((1308, 1323), 'numpy.tile', 'np.tile', (['col', 'n'], {}), '(col, n)\n', (1315, 1323), True, 'import numpy as np\n'), ((1635, 1650), 'numpy.tile', 'np.tile', (['row', 'n'], {}), '(row, n)\n', (1642, 1650), True, 'import numpy as np\n'), ((1697, 1712), 'numpy.tile', 'np.tile', (['col', 'n'], {}), '(col, n)\n', (1704, 1712), True, 'import numpy as np\n'), ((3022, 3038), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (3030, 3038), True, 'import numpy as np\n'), ((3191, 3207), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (3199, 3207), True, 'import numpy as np\n'), ((3402, 3422), 'numpy.ones', 'np.ones', (['(3 * (n - 1))'], {}), '(3 * (n - 1))\n', (3409, 3422), True, 'import numpy as np\n'), ((3465, 3485), 'numpy.ones', 'np.ones', (['(3 * (n - 1))'], {}), '(3 * (n - 1))\n', (3472, 3485), True, 'import numpy as np\n'), ((3661, 3704), 'numpy.array', 'np.array', (['(-1.0, -1.0, -1.0, 1.0, 1.0, 1.0)'], {}), '((-1.0, -1.0, -1.0, 1.0, 1.0, 1.0))\n', (3669, 3704), True, 'import numpy as np\n'), ((5016, 5032), 'numpy.zeros', 'np.zeros', (['(n, 6)'], {}), '((n, 6))\n', (5024, 5032), True, 'import numpy as np\n'), ((5255, 5274), 'numpy.zeros', 'np.zeros', (['(n, 3, 3)'], {}), '((n, 3, 3))\n', (5263, 5274), True, 'import numpy as np\n'), ((5466, 5478), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (5475, 5478), True, 'import numpy as np\n'), ((5506, 5518), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (5515, 5518), True, 'import numpy as np\n'), ((5734, 5749), 'numpy.tile', 'np.tile', (['row', 'n'], {}), '(row, n)\n', (5741, 5749), True, 'import numpy as np\n'), ((5797, 5812), 'numpy.tile', 'np.tile', (['col', 'n'], {}), '(col, n)\n', (5804, 5812), True, 'import numpy as np\n'), ((6524, 6540), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (6532, 6540), True, 'import numpy as np\n'), ((6682, 6695), 'numpy.dot', 'np.dot', (['vx', 'r'], {}), '(vx, r)\n', (6688, 6695), True, 'import numpy as np\n'), ((7471, 7487), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (7479, 7487), True, 'import numpy as np\n'), ((7508, 7524), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (7516, 7524), True, 'import numpy as np\n'), ((7547, 7558), 'six.moves.range', 'range', (['(0)', '(3)'], {}), '(0, 3)\n', (7552, 7558), False, 'from six.moves import range\n'), ((7792, 7808), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (7800, 7808), True, 'import numpy as np\n'), ((7950, 7963), 'numpy.dot', 'np.dot', (['vx', 'r'], {}), '(vx, r)\n', (7956, 7963), True, 'import numpy as np\n'), ((8016, 8047), 'numpy.dot', 'np.dot', (['self.dvx_dv[:, :, 0]', 'r'], {}), '(self.dvx_dv[:, :, 0], r)\n', (8022, 8047), True, 'import numpy as np\n'), ((8075, 8106), 'numpy.dot', 'np.dot', (['self.dvx_dv[:, :, 1]', 'r'], {}), '(self.dvx_dv[:, :, 1], r)\n', (8081, 8106), True, 'import numpy as np\n'), ((8134, 8165), 'numpy.dot', 'np.dot', (['self.dvx_dv[:, :, 2]', 'r'], {}), '(self.dvx_dv[:, :, 2], r)\n', (8140, 8165), True, 'import numpy as np\n'), ((9153, 9164), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (9161, 9164), True, 'import numpy as np\n'), ((9292, 9311), 'numpy.zeros', 'np.zeros', (['(n, 3, 3)'], {}), '((n, 3, 3))\n', (9300, 9311), True, 'import numpy as np\n'), ((9566, 9578), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (9575, 9578), True, 'import numpy as np\n'), ((10872, 10891), 'numpy.zeros', 'np.zeros', (['(n, 3, 3)'], {}), '((n, 3, 3))\n', (10880, 10891), True, 'import numpy as np\n'), ((11069, 11088), 'numpy.zeros', 'np.zeros', (['(n, 3, 3)'], {}), '((n, 3, 3))\n', (11077, 11088), True, 'import numpy as np\n'), ((11298, 11317), 'numpy.zeros', 'np.zeros', (['(n, 3, 3)'], {}), '((n, 3, 3))\n', (11306, 11317), True, 'import numpy as np\n'), ((11496, 11508), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (11505, 11508), True, 'import numpy as np\n'), ((11528, 11543), 'numpy.tile', 'np.tile', (['row', 'n'], {}), '(row, n)\n', (11535, 11543), True, 'import numpy as np\n'), ((11597, 11609), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (11606, 11609), True, 'import numpy as np\n'), ((11629, 11644), 'numpy.tile', 'np.tile', (['col', 'n'], {}), '(col, n)\n', (11636, 11644), True, 'import numpy as np\n'), ((11969, 11984), 'numpy.tile', 'np.tile', (['row', 'n'], {}), '(row, n)\n', (11976, 11984), True, 'import numpy as np\n'), ((12039, 12058), 'numpy.array', 'np.array', (['[0, 3, 6]'], {}), '([0, 3, 6])\n', (12047, 12058), True, 'import numpy as np\n'), ((12078, 12093), 'numpy.tile', 'np.tile', (['col', 'n'], {}), '(col, n)\n', (12085, 12093), True, 'import numpy as np\n'), ((12550, 12586), 'numpy.dot', 'np.dot', (['O_BR[i, :, :]', 'O_RI[i, :, :]'], {}), '(O_BR[i, :, :], O_RI[i, :, :])\n', (12556, 12586), True, 'import numpy as np\n'), ((13554, 13573), 'numpy.zeros', 'np.zeros', (['(n, 3, 3)'], {}), '((n, 3, 3))\n', (13562, 13573), True, 'import numpy as np\n'), ((13784, 13803), 'numpy.zeros', 'np.zeros', (['(n, 3, 3)'], {}), '((n, 3, 3))\n', (13792, 13803), True, 'import numpy as np\n'), ((13966, 13979), 'numpy.ones', 'np.ones', (['(9,)'], {}), '((9,))\n', (13973, 13979), True, 'import numpy as np\n'), ((14000, 14023), 'numpy.ones', 'np.ones', (['(9 * (n - 2),)'], {}), '((9 * (n - 2),))\n', (14007, 14023), True, 'import numpy as np\n'), ((15158, 15174), 'numpy.zeros', 'np.zeros', (['(n, 6)'], {}), '((n, 6))\n', (15166, 15174), True, 'import numpy as np\n'), ((15373, 15392), 'numpy.zeros', 'np.zeros', (['(n, 3, 3)'], {}), '((n, 3, 3))\n', (15381, 15392), True, 'import numpy as np\n'), ((15598, 15614), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (15606, 15614), True, 'import numpy as np\n'), ((15842, 15857), 'numpy.tile', 'np.tile', (['row', 'n'], {}), '(row, n)\n', (15849, 15857), True, 'import numpy as np\n'), ((15911, 15930), 'numpy.array', 'np.array', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (15919, 15930), True, 'import numpy as np\n'), ((15950, 15965), 'numpy.tile', 'np.tile', (['col', 'n'], {}), '(col, n)\n', (15957, 15965), True, 'import numpy as np\n'), ((18003, 18019), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (18011, 18019), True, 'import numpy as np\n'), ((18146, 18162), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (18154, 18162), True, 'import numpy as np\n'), ((18296, 18312), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (18304, 18312), True, 'import numpy as np\n'), ((18500, 18519), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (18508, 18519), True, 'import numpy as np\n'), ((18539, 18554), 'numpy.tile', 'np.tile', (['col', 'n'], {}), '(col, n)\n', (18546, 18554), True, 'import numpy as np\n'), ((19779, 19797), 'numpy.dot', 'np.dot', (['wx', 'self.J'], {}), '(wx, self.J)\n', (19785, 19797), True, 'import numpy as np\n'), ((19820, 19831), 'six.moves.range', 'range', (['(0)', '(3)'], {}), '(0, 3)\n', (19825, 19831), False, 'from six.moves import range\n'), ((6245, 6257), 'numpy.dot', 'np.dot', (['r', 'r'], {}), '(r, r)\n', (6251, 6257), True, 'import numpy as np\n'), ((6287, 6299), 'numpy.dot', 'np.dot', (['v', 'v'], {}), '(v, v)\n', (6293, 6299), True, 'import numpy as np\n'), ((6714, 6728), 'numpy.dot', 'np.dot', (['vx', 'iB'], {}), '(vx, iB)\n', (6720, 6728), True, 'import numpy as np\n'), ((7189, 7201), 'numpy.dot', 'np.dot', (['r', 'r'], {}), '(r, r)\n', (7195, 7201), True, 'import numpy as np\n'), ((7231, 7243), 'numpy.dot', 'np.dot', (['v', 'v'], {}), '(v, v)\n', (7237, 7243), True, 'import numpy as np\n'), ((8221, 8253), 'numpy.dot', 'np.dot', (['self.dvx_dv[:, :, 0]', 'iB'], {}), '(self.dvx_dv[:, :, 0], iB)\n', (8227, 8253), True, 'import numpy as np\n'), ((8282, 8314), 'numpy.dot', 'np.dot', (['self.dvx_dv[:, :, 1]', 'iB'], {}), '(self.dvx_dv[:, :, 1], iB)\n', (8288, 8314), True, 'import numpy as np\n'), ((8343, 8375), 'numpy.dot', 'np.dot', (['self.dvx_dv[:, :, 2]', 'iB'], {}), '(self.dvx_dv[:, :, 2], iB)\n', (8349, 8375), True, 'import numpy as np\n'), ((9516, 9538), 'numpy.array', 'np.array', (['[0, 1, 3, 4]'], {}), '([0, 1, 3, 4])\n', (9524, 9538), True, 'import numpy as np\n'), ((11937, 11949), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (11946, 11949), True, 'import numpy as np\n'), ((15780, 15793), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (15788, 15793), True, 'import numpy as np\n'), ((15810, 15822), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (15819, 15822), True, 'import numpy as np\n'), ((16095, 16114), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (16103, 16114), True, 'import numpy as np\n'), ((16172, 16184), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (16181, 16184), True, 'import numpy as np\n'), ((18421, 18440), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (18429, 18440), True, 'import numpy as np\n'), ((18459, 18475), 'numpy.arange', 'np.arange', (['(3 * n)'], {}), '(3 * n)\n', (18468, 18475), True, 'import numpy as np\n'), ((19158, 19186), 'numpy.dot', 'np.dot', (['self.J', 'wdot_B[i, :]'], {}), '(self.J, wdot_B[i, :])\n', (19164, 19186), True, 'import numpy as np\n'), ((1276, 1288), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1285, 1288), True, 'import numpy as np\n'), ((1338, 1350), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1347, 1350), True, 'import numpy as np\n'), ((1665, 1677), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1674, 1677), True, 'import numpy as np\n'), ((1727, 1739), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1736, 1739), True, 'import numpy as np\n'), ((5764, 5776), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (5773, 5776), True, 'import numpy as np\n'), ((5827, 5839), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (5836, 5839), True, 'import numpy as np\n'), ((8450, 8471), 'numpy.dot', 'np.dot', (['diB_dr', 'dr_dr'], {}), '(diB_dr, dr_dr)\n', (8456, 8471), True, 'import numpy as np\n'), ((8536, 8557), 'numpy.dot', 'np.dot', (['diB_dv', 'dv_dv'], {}), '(diB_dv, dv_dv)\n', (8542, 8557), True, 'import numpy as np\n'), ((9487, 9499), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (9496, 9499), True, 'import numpy as np\n'), ((11558, 11570), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (11567, 11570), True, 'import numpy as np\n'), ((11659, 11671), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (11668, 11671), True, 'import numpy as np\n'), ((11999, 12011), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (12008, 12011), True, 'import numpy as np\n'), ((12108, 12120), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (12117, 12120), True, 'import numpy as np\n'), ((15872, 15884), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (15881, 15884), True, 'import numpy as np\n'), ((15980, 15992), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (15989, 15992), True, 'import numpy as np\n'), ((16133, 16145), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (16142, 16145), True, 'import numpy as np\n'), ((16203, 16215), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (16212, 16215), True, 'import numpy as np\n'), ((18569, 18581), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (18578, 18581), True, 'import numpy as np\n'), ((19218, 19243), 'numpy.dot', 'np.dot', (['self.J', 'w_B[i, :]'], {}), '(self.J, w_B[i, :])\n', (19224, 19243), True, 'import numpy as np\n'), ((19896, 19921), 'numpy.dot', 'np.dot', (['self.J', 'w_B[i, :]'], {}), '(self.J, w_B[i, :])\n', (19902, 19921), True, 'import numpy as np\n'), ((8631, 8654), 'numpy.dot', 'np.dot', (['djB_diB', 'diB_dr'], {}), '(djB_diB, diB_dr)\n', (8637, 8654), True, 'import numpy as np\n'), ((8735, 8758), 'numpy.dot', 'np.dot', (['djB_diB', 'diB_dv'], {}), '(djB_diB, diB_dv)\n', (8741, 8758), True, 'import numpy as np\n')] |
from __future__ import division
"""
ResNeXt is introduced in https://arxiv.org/abs/1611.05431
Implementation is inspired by https://github.com/prlz77/ResNeXt.pytorch
"""
from keras import backend as K
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Activation
from keras.layers.merge import add
from keras.layers.merge import concatenate
from keras.layers.core import Lambda
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.pooling import AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
def _handle_dim_ordering():
global ROW_AXIS
global COL_AXIS
global CHANNEL_AXIS
if K.image_dim_ordering() == 'tf':
ROW_AXIS = 1
COL_AXIS = 2
CHANNEL_AXIS = 3
else:
CHANNEL_AXIS = 1
ROW_AXIS = 2
COL_AXIS = 3
def Conv2DGroup(filters, kernel_size, groups, strides=(1, 1), padding='valid',
use_bias=True,
kernel_initializer='glorot_uniform',
kernel_regularizer=None, **kwargs):
def get_slice(x, i, size):
return x[:, :, :, size * i: size * (i + 1)]
def f(inputs):
split_filter = filters // groups
split_input = K.int_shape(inputs)[-1] // groups
output_convs = []
if groups > 1:
for i in range(groups):
_slice = Lambda(get_slice,
arguments={'i': i,
'size': split_input})(inputs)
output_convs.append(Conv2D(filters=split_filter, kernel_size=kernel_size,
strides=strides, padding=padding, use_bias=use_bias,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
**kwargs)(_slice))
outputs = concatenate(output_convs)
else:
outputs = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding, use_bias=use_bias,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
**kwargs)(inputs)
return outputs
return f
def bottleneck_c(filters, strides, cardinality, base_width, widen_factor):
width_ratio = filters / (widen_factor * 64.)
D = cardinality * int(base_width * width_ratio)
def f(inputs):
conv_reduce = Conv2D(filters=D, kernel_size=1, strides=1, padding='valid',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))(inputs)
bn_reduce = BatchNormalization(axis=CHANNEL_AXIS)(conv_reduce)
bn_reduce = Activation('relu')(bn_reduce)
conv_conv = Conv2DGroup(filters=D, groups=cardinality, kernel_size=3, strides=strides,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))(bn_reduce)
norm = BatchNormalization(axis=CHANNEL_AXIS)(conv_conv)
norm = Activation('relu')(norm)
conv_expand = Conv2D(filters=filters, kernel_size=1, strides=1, padding='valid',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))(norm)
bn_expand = BatchNormalization(axis=CHANNEL_AXIS)(conv_expand)
if K.int_shape(inputs)[CHANNEL_AXIS] != filters:
shortcut = Conv2D(filters=filters, kernel_size=1, strides=strides, padding='valid',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))(inputs)
residual = BatchNormalization(axis=CHANNEL_AXIS)(shortcut)
return Activation('relu')(add([residual, bn_expand]))
else:
return Activation('relu')(bn_expand)
return f
class ResNeXt(object):
def __init__(self, input_shape, num_class, cardinality, depth, base_width, widen_factor=4):
self.input_shape = input_shape
self.cardinality = cardinality
self.depth = depth
self.block_depth = (self.depth - 2) // 9
self.base_width = base_width
self.widen_factor = widen_factor
self.num_class = num_class
self.filters = 64
self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]
def build(self):
_handle_dim_ordering()
inputs = Input(shape=self.input_shape)
conv_1 = Conv2D(filters=64, kernel_size=3, strides=1, padding='same',
use_bias=True,
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))(inputs)
bn_1 = BatchNormalization(axis=CHANNEL_AXIS)(conv_1)
init_block = Activation('relu')(bn_1)
stage1 = self.block(self.stages[1], 1)(init_block)
stage2 = self.block(self.stages[2], 2)(stage1)
stage3 = self.block(self.stages[3], 2)(stage2)
avg_pool = AveragePooling2D(pool_size=8, strides=1)(stage3)
flatten = Flatten()(avg_pool)
dense = Dense(units=self.num_class, kernel_initializer='he_normal',
activation='linear')(flatten)
model = Model(inputs=inputs, outputs=dense)
return model
def block(self, filters, strides=2):
def f(inputs):
for bottleneck in range(self.block_depth):
if bottleneck == 0:
block = bottleneck_c(filters, strides, self.cardinality, self.base_width,
self.widen_factor)(inputs)
else:
block = bottleneck_c(filters, 1, self.cardinality, self.base_width,
self.widen_factor)(block)
return block
return f | [
"keras.layers.Flatten",
"keras.layers.merge.concatenate",
"keras.layers.normalization.BatchNormalization",
"keras.backend.image_dim_ordering",
"keras.layers.core.Lambda",
"keras.layers.merge.add",
"keras.layers.Input",
"keras.layers.convolutional.Conv2D",
"keras.models.Model",
"keras.layers.Activation",
"keras.regularizers.l2",
"keras.layers.Dense",
"keras.backend.int_shape",
"keras.layers.pooling.AveragePooling2D"
] | [((817, 839), 'keras.backend.image_dim_ordering', 'K.image_dim_ordering', ([], {}), '()\n', (837, 839), True, 'from keras import backend as K\n'), ((4982, 5011), 'keras.layers.Input', 'Input', ([], {'shape': 'self.input_shape'}), '(shape=self.input_shape)\n', (4987, 5011), False, 'from keras.layers import Input\n'), ((5783, 5818), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'dense'}), '(inputs=inputs, outputs=dense)\n', (5788, 5818), False, 'from keras.models import Model\n'), ((2070, 2095), 'keras.layers.merge.concatenate', 'concatenate', (['output_convs'], {}), '(output_convs)\n', (2081, 2095), False, 'from keras.layers.merge import concatenate\n'), ((2970, 3007), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'CHANNEL_AXIS'}), '(axis=CHANNEL_AXIS)\n', (2988, 3007), False, 'from keras.layers.normalization import BatchNormalization\n'), ((3041, 3059), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3051, 3059), False, 'from keras.layers import Activation\n'), ((3415, 3452), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'CHANNEL_AXIS'}), '(axis=CHANNEL_AXIS)\n', (3433, 3452), False, 'from keras.layers.normalization import BatchNormalization\n'), ((3479, 3497), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3489, 3497), False, 'from keras.layers import Activation\n'), ((3784, 3821), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'CHANNEL_AXIS'}), '(axis=CHANNEL_AXIS)\n', (3802, 3821), False, 'from keras.layers.normalization import BatchNormalization\n'), ((5261, 5298), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'CHANNEL_AXIS'}), '(axis=CHANNEL_AXIS)\n', (5279, 5298), False, 'from keras.layers.normalization import BatchNormalization\n'), ((5328, 5346), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5338, 5346), False, 'from keras.layers import Activation\n'), ((5551, 5591), 'keras.layers.pooling.AveragePooling2D', 'AveragePooling2D', ([], {'pool_size': '(8)', 'strides': '(1)'}), '(pool_size=8, strides=1)\n', (5567, 5591), False, 'from keras.layers.pooling import AveragePooling2D\n'), ((5618, 5627), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5625, 5627), False, 'from keras.layers import Flatten\n'), ((5654, 5739), 'keras.layers.Dense', 'Dense', ([], {'units': 'self.num_class', 'kernel_initializer': '"""he_normal"""', 'activation': '"""linear"""'}), "(units=self.num_class, kernel_initializer='he_normal', activation='linear'\n )\n", (5659, 5739), False, 'from keras.layers import Dense\n'), ((1378, 1397), 'keras.backend.int_shape', 'K.int_shape', (['inputs'], {}), '(inputs)\n', (1389, 1397), True, 'from keras import backend as K\n'), ((2132, 2330), 'keras.layers.convolutional.Conv2D', 'Conv2D', ([], {'filters': 'filters', 'kernel_size': 'kernel_size', 'strides': 'strides', 'padding': 'padding', 'use_bias': 'use_bias', 'kernel_initializer': 'kernel_initializer', 'kernel_regularizer': 'kernel_regularizer'}), '(filters=filters, kernel_size=kernel_size, strides=strides, padding=\n padding, use_bias=use_bias, kernel_initializer=kernel_initializer,\n kernel_regularizer=kernel_regularizer, **kwargs)\n', (2138, 2330), False, 'from keras.layers.convolutional import Conv2D\n'), ((3848, 3867), 'keras.backend.int_shape', 'K.int_shape', (['inputs'], {}), '(inputs)\n', (3859, 3867), True, 'from keras import backend as K\n'), ((4185, 4222), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'CHANNEL_AXIS'}), '(axis=CHANNEL_AXIS)\n', (4203, 4222), False, 'from keras.layers.normalization import BatchNormalization\n'), ((4253, 4271), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4263, 4271), False, 'from keras.layers import Activation\n'), ((4272, 4298), 'keras.layers.merge.add', 'add', (['[residual, bn_expand]'], {}), '([residual, bn_expand])\n', (4275, 4298), False, 'from keras.layers.merge import add\n'), ((4334, 4352), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4344, 4352), False, 'from keras.layers import Activation\n'), ((1523, 1581), 'keras.layers.core.Lambda', 'Lambda', (['get_slice'], {'arguments': "{'i': i, 'size': split_input}"}), "(get_slice, arguments={'i': i, 'size': split_input})\n", (1529, 1581), False, 'from keras.layers.core import Lambda\n'), ((2932, 2942), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (2934, 2942), False, 'from keras.regularizers import l2\n'), ((3379, 3389), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (3381, 3389), False, 'from keras.regularizers import l2\n'), ((3748, 3758), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (3750, 3758), False, 'from keras.regularizers import l2\n'), ((5228, 5238), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (5230, 5238), False, 'from keras.regularizers import l2\n'), ((1698, 1901), 'keras.layers.convolutional.Conv2D', 'Conv2D', ([], {'filters': 'split_filter', 'kernel_size': 'kernel_size', 'strides': 'strides', 'padding': 'padding', 'use_bias': 'use_bias', 'kernel_initializer': 'kernel_initializer', 'kernel_regularizer': 'kernel_regularizer'}), '(filters=split_filter, kernel_size=kernel_size, strides=strides,\n padding=padding, use_bias=use_bias, kernel_initializer=\n kernel_initializer, kernel_regularizer=kernel_regularizer, **kwargs)\n', (1704, 1901), False, 'from keras.layers.convolutional import Conv2D\n'), ((4144, 4154), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (4146, 4154), False, 'from keras.regularizers import l2\n')] |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class cpu_util_process_history(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-RAS-operational - based on the path /cpu-state/history/cpu-util-process-history. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: CPU utilization histogram per process
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__cpu_process_id','__cpu_process_name','__cpu_util_current','__cpu_util_1m','__cpu_util_5m','__cpu_util_15m','__cpu_util_1h','__cpu_util_5h','__cpu_util_24h','__cpu_util_72h',)
_yang_name = 'cpu-util-process-history'
_rest_name = 'cpu-util-process-history'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__cpu_util_1m = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-1m", rest_name="cpu-util-1m", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
self.__cpu_util_current = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-current", rest_name="cpu-util-current", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
self.__cpu_util_1h = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-1h", rest_name="cpu-util-1h", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
self.__cpu_process_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="cpu-process-name", rest_name="cpu-process-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='string', is_config=False)
self.__cpu_process_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="cpu-process-id", rest_name="cpu-process-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='uint32', is_config=False)
self.__cpu_util_72h = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-72h", rest_name="cpu-util-72h", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
self.__cpu_util_15m = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-15m", rest_name="cpu-util-15m", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
self.__cpu_util_5h = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-5h", rest_name="cpu-util-5h", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
self.__cpu_util_5m = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-5m", rest_name="cpu-util-5m", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
self.__cpu_util_24h = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-24h", rest_name="cpu-util-24h", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'cpu-state', u'history', u'cpu-util-process-history']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'cpu-state', u'history', u'cpu-util-process-history']
def _get_cpu_process_id(self):
"""
Getter method for cpu_process_id, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_process_id (uint32)
YANG Description: Process ID
"""
return self.__cpu_process_id
def _set_cpu_process_id(self, v, load=False):
"""
Setter method for cpu_process_id, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_process_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_cpu_process_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cpu_process_id() directly.
YANG Description: Process ID
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="cpu-process-id", rest_name="cpu-process-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cpu_process_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="cpu-process-id", rest_name="cpu-process-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='uint32', is_config=False)""",
})
self.__cpu_process_id = t
if hasattr(self, '_set'):
self._set()
def _unset_cpu_process_id(self):
self.__cpu_process_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="cpu-process-id", rest_name="cpu-process-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='uint32', is_config=False)
def _get_cpu_process_name(self):
"""
Getter method for cpu_process_name, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_process_name (string)
YANG Description: Process name
"""
return self.__cpu_process_name
def _set_cpu_process_name(self, v, load=False):
"""
Setter method for cpu_process_name, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_process_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_cpu_process_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cpu_process_name() directly.
YANG Description: Process name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="cpu-process-name", rest_name="cpu-process-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cpu_process_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="cpu-process-name", rest_name="cpu-process-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='string', is_config=False)""",
})
self.__cpu_process_name = t
if hasattr(self, '_set'):
self._set()
def _unset_cpu_process_name(self):
self.__cpu_process_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="cpu-process-name", rest_name="cpu-process-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='string', is_config=False)
def _get_cpu_util_current(self):
"""
Getter method for cpu_util_current, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_util_current (decimal64)
YANG Description: Current CPU utilization of the process
"""
return self.__cpu_util_current
def _set_cpu_util_current(self, v, load=False):
"""
Setter method for cpu_util_current, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_util_current (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_cpu_util_current is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cpu_util_current() directly.
YANG Description: Current CPU utilization of the process
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-current", rest_name="cpu-util-current", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cpu_util_current must be of a type compatible with decimal64""",
'defined-type': "decimal64",
'generated-type': """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-current", rest_name="cpu-util-current", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)""",
})
self.__cpu_util_current = t
if hasattr(self, '_set'):
self._set()
def _unset_cpu_util_current(self):
self.__cpu_util_current = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-current", rest_name="cpu-util-current", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
def _get_cpu_util_1m(self):
"""
Getter method for cpu_util_1m, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_util_1m (decimal64)
YANG Description: CPU utilization of the process in the last 1 minute
"""
return self.__cpu_util_1m
def _set_cpu_util_1m(self, v, load=False):
"""
Setter method for cpu_util_1m, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_util_1m (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_cpu_util_1m is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cpu_util_1m() directly.
YANG Description: CPU utilization of the process in the last 1 minute
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-1m", rest_name="cpu-util-1m", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cpu_util_1m must be of a type compatible with decimal64""",
'defined-type': "decimal64",
'generated-type': """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-1m", rest_name="cpu-util-1m", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)""",
})
self.__cpu_util_1m = t
if hasattr(self, '_set'):
self._set()
def _unset_cpu_util_1m(self):
self.__cpu_util_1m = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-1m", rest_name="cpu-util-1m", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
def _get_cpu_util_5m(self):
"""
Getter method for cpu_util_5m, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_util_5m (decimal64)
YANG Description: CPU utilization of the process in the last 5 minute
"""
return self.__cpu_util_5m
def _set_cpu_util_5m(self, v, load=False):
"""
Setter method for cpu_util_5m, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_util_5m (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_cpu_util_5m is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cpu_util_5m() directly.
YANG Description: CPU utilization of the process in the last 5 minute
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-5m", rest_name="cpu-util-5m", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cpu_util_5m must be of a type compatible with decimal64""",
'defined-type': "decimal64",
'generated-type': """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-5m", rest_name="cpu-util-5m", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)""",
})
self.__cpu_util_5m = t
if hasattr(self, '_set'):
self._set()
def _unset_cpu_util_5m(self):
self.__cpu_util_5m = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-5m", rest_name="cpu-util-5m", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
def _get_cpu_util_15m(self):
"""
Getter method for cpu_util_15m, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_util_15m (decimal64)
YANG Description: CPU utilization of the process in the last 15 minute
"""
return self.__cpu_util_15m
def _set_cpu_util_15m(self, v, load=False):
"""
Setter method for cpu_util_15m, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_util_15m (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_cpu_util_15m is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cpu_util_15m() directly.
YANG Description: CPU utilization of the process in the last 15 minute
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-15m", rest_name="cpu-util-15m", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cpu_util_15m must be of a type compatible with decimal64""",
'defined-type': "decimal64",
'generated-type': """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-15m", rest_name="cpu-util-15m", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)""",
})
self.__cpu_util_15m = t
if hasattr(self, '_set'):
self._set()
def _unset_cpu_util_15m(self):
self.__cpu_util_15m = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-15m", rest_name="cpu-util-15m", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
def _get_cpu_util_1h(self):
"""
Getter method for cpu_util_1h, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_util_1h (decimal64)
YANG Description: CPU utilization of the process in the last 1 hour
"""
return self.__cpu_util_1h
def _set_cpu_util_1h(self, v, load=False):
"""
Setter method for cpu_util_1h, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_util_1h (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_cpu_util_1h is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cpu_util_1h() directly.
YANG Description: CPU utilization of the process in the last 1 hour
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-1h", rest_name="cpu-util-1h", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cpu_util_1h must be of a type compatible with decimal64""",
'defined-type': "decimal64",
'generated-type': """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-1h", rest_name="cpu-util-1h", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)""",
})
self.__cpu_util_1h = t
if hasattr(self, '_set'):
self._set()
def _unset_cpu_util_1h(self):
self.__cpu_util_1h = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-1h", rest_name="cpu-util-1h", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
def _get_cpu_util_5h(self):
"""
Getter method for cpu_util_5h, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_util_5h (decimal64)
YANG Description: CPU utilization of the process in the last 1 hour
"""
return self.__cpu_util_5h
def _set_cpu_util_5h(self, v, load=False):
"""
Setter method for cpu_util_5h, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_util_5h (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_cpu_util_5h is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cpu_util_5h() directly.
YANG Description: CPU utilization of the process in the last 1 hour
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-5h", rest_name="cpu-util-5h", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cpu_util_5h must be of a type compatible with decimal64""",
'defined-type': "decimal64",
'generated-type': """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-5h", rest_name="cpu-util-5h", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)""",
})
self.__cpu_util_5h = t
if hasattr(self, '_set'):
self._set()
def _unset_cpu_util_5h(self):
self.__cpu_util_5h = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-5h", rest_name="cpu-util-5h", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
def _get_cpu_util_24h(self):
"""
Getter method for cpu_util_24h, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_util_24h (decimal64)
YANG Description: CPU utilization of the process in the last 24 hour
"""
return self.__cpu_util_24h
def _set_cpu_util_24h(self, v, load=False):
"""
Setter method for cpu_util_24h, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_util_24h (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_cpu_util_24h is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cpu_util_24h() directly.
YANG Description: CPU utilization of the process in the last 24 hour
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-24h", rest_name="cpu-util-24h", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cpu_util_24h must be of a type compatible with decimal64""",
'defined-type': "decimal64",
'generated-type': """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-24h", rest_name="cpu-util-24h", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)""",
})
self.__cpu_util_24h = t
if hasattr(self, '_set'):
self._set()
def _unset_cpu_util_24h(self):
self.__cpu_util_24h = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-24h", rest_name="cpu-util-24h", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
def _get_cpu_util_72h(self):
"""
Getter method for cpu_util_72h, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_util_72h (decimal64)
YANG Description: CPU utilization of the process in the last 72 hour
"""
return self.__cpu_util_72h
def _set_cpu_util_72h(self, v, load=False):
"""
Setter method for cpu_util_72h, mapped from YANG variable /cpu_state/history/cpu_util_process_history/cpu_util_72h (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_cpu_util_72h is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cpu_util_72h() directly.
YANG Description: CPU utilization of the process in the last 72 hour
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-72h", rest_name="cpu-util-72h", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cpu_util_72h must be of a type compatible with decimal64""",
'defined-type': "decimal64",
'generated-type': """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-72h", rest_name="cpu-util-72h", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)""",
})
self.__cpu_util_72h = t
if hasattr(self, '_set'):
self._set()
def _unset_cpu_util_72h(self):
self.__cpu_util_72h = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="cpu-util-72h", rest_name="cpu-util-72h", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='decimal64', is_config=False)
cpu_process_id = __builtin__.property(_get_cpu_process_id)
cpu_process_name = __builtin__.property(_get_cpu_process_name)
cpu_util_current = __builtin__.property(_get_cpu_util_current)
cpu_util_1m = __builtin__.property(_get_cpu_util_1m)
cpu_util_5m = __builtin__.property(_get_cpu_util_5m)
cpu_util_15m = __builtin__.property(_get_cpu_util_15m)
cpu_util_1h = __builtin__.property(_get_cpu_util_1h)
cpu_util_5h = __builtin__.property(_get_cpu_util_5h)
cpu_util_24h = __builtin__.property(_get_cpu_util_24h)
cpu_util_72h = __builtin__.property(_get_cpu_util_72h)
_pyangbind_elements = {'cpu_process_id': cpu_process_id, 'cpu_process_name': cpu_process_name, 'cpu_util_current': cpu_util_current, 'cpu_util_1m': cpu_util_1m, 'cpu_util_5m': cpu_util_5m, 'cpu_util_15m': cpu_util_15m, 'cpu_util_1h': cpu_util_1h, 'cpu_util_5h': cpu_util_5h, 'cpu_util_24h': cpu_util_24h, 'cpu_util_72h': cpu_util_72h, }
| [
"pyangbind.lib.yangtypes.YANGDynClass",
"__builtin__.property",
"pyangbind.lib.yangtypes.RestrictedClassType",
"pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType"
] | [((30841, 30882), '__builtin__.property', '__builtin__.property', (['_get_cpu_process_id'], {}), '(_get_cpu_process_id)\n', (30861, 30882), False, 'import __builtin__\n'), ((30904, 30947), '__builtin__.property', '__builtin__.property', (['_get_cpu_process_name'], {}), '(_get_cpu_process_name)\n', (30924, 30947), False, 'import __builtin__\n'), ((30969, 31012), '__builtin__.property', '__builtin__.property', (['_get_cpu_util_current'], {}), '(_get_cpu_util_current)\n', (30989, 31012), False, 'import __builtin__\n'), ((31029, 31067), '__builtin__.property', '__builtin__.property', (['_get_cpu_util_1m'], {}), '(_get_cpu_util_1m)\n', (31049, 31067), False, 'import __builtin__\n'), ((31084, 31122), '__builtin__.property', '__builtin__.property', (['_get_cpu_util_5m'], {}), '(_get_cpu_util_5m)\n', (31104, 31122), False, 'import __builtin__\n'), ((31140, 31179), '__builtin__.property', '__builtin__.property', (['_get_cpu_util_15m'], {}), '(_get_cpu_util_15m)\n', (31160, 31179), False, 'import __builtin__\n'), ((31196, 31234), '__builtin__.property', '__builtin__.property', (['_get_cpu_util_1h'], {}), '(_get_cpu_util_1h)\n', (31216, 31234), False, 'import __builtin__\n'), ((31251, 31289), '__builtin__.property', '__builtin__.property', (['_get_cpu_util_5h'], {}), '(_get_cpu_util_5h)\n', (31271, 31289), False, 'import __builtin__\n'), ((31307, 31346), '__builtin__.property', '__builtin__.property', (['_get_cpu_util_24h'], {}), '(_get_cpu_util_24h)\n', (31327, 31346), False, 'import __builtin__\n'), ((31364, 31403), '__builtin__.property', '__builtin__.property', (['_get_cpu_util_72h'], {}), '(_get_cpu_util_72h)\n', (31384, 31403), False, 'import __builtin__\n'), ((3244, 3597), 'pyangbind.lib.yangtypes.YANGDynClass', 'YANGDynClass', ([], {'base': 'unicode', 'is_leaf': '(True)', 'yang_name': '"""cpu-process-name"""', 'rest_name': '"""cpu-process-name"""', 'parent': 'self', 'path_helper': 'self._path_helper', 'extmethods': 'self._extmethods', 'register_paths': '(True)', 'namespace': '"""urn:brocade.com:mgmt:brocade-RAS-operational"""', 'defining_module': '"""brocade-RAS-operational"""', 'yang_type': '"""string"""', 'is_config': '(False)'}), "(base=unicode, is_leaf=True, yang_name='cpu-process-name',\n rest_name='cpu-process-name', parent=self, path_helper=self.\n _path_helper, extmethods=self._extmethods, register_paths=True,\n namespace='urn:brocade.com:mgmt:brocade-RAS-operational',\n defining_module='brocade-RAS-operational', yang_type='string',\n is_config=False)\n", (3256, 3597), False, 'from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType\n'), ((11732, 12085), 'pyangbind.lib.yangtypes.YANGDynClass', 'YANGDynClass', ([], {'base': 'unicode', 'is_leaf': '(True)', 'yang_name': '"""cpu-process-name"""', 'rest_name': '"""cpu-process-name"""', 'parent': 'self', 'path_helper': 'self._path_helper', 'extmethods': 'self._extmethods', 'register_paths': '(True)', 'namespace': '"""urn:brocade.com:mgmt:brocade-RAS-operational"""', 'defining_module': '"""brocade-RAS-operational"""', 'yang_type': '"""string"""', 'is_config': '(False)'}), "(base=unicode, is_leaf=True, yang_name='cpu-process-name',\n rest_name='cpu-process-name', parent=self, path_helper=self.\n _path_helper, extmethods=self._extmethods, register_paths=True,\n namespace='urn:brocade.com:mgmt:brocade-RAS-operational',\n defining_module='brocade-RAS-operational', yang_type='string',\n is_config=False)\n", (11744, 12085), False, 'from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType\n'), ((10681, 11037), 'pyangbind.lib.yangtypes.YANGDynClass', 'YANGDynClass', (['v'], {'base': 'unicode', 'is_leaf': '(True)', 'yang_name': '"""cpu-process-name"""', 'rest_name': '"""cpu-process-name"""', 'parent': 'self', 'path_helper': 'self._path_helper', 'extmethods': 'self._extmethods', 'register_paths': '(True)', 'namespace': '"""urn:brocade.com:mgmt:brocade-RAS-operational"""', 'defining_module': '"""brocade-RAS-operational"""', 'yang_type': '"""string"""', 'is_config': '(False)'}), "(v, base=unicode, is_leaf=True, yang_name='cpu-process-name',\n rest_name='cpu-process-name', parent=self, path_helper=self.\n _path_helper, extmethods=self._extmethods, register_paths=True,\n namespace='urn:brocade.com:mgmt:brocade-RAS-operational',\n defining_module='brocade-RAS-operational', yang_type='string',\n is_config=False)\n", (10693, 11037), False, 'from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType\n'), ((2081, 2124), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (2111, 2124), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((2473, 2516), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (2503, 2516), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((2870, 2913), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (2900, 2913), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((3623, 3723), 'pyangbind.lib.yangtypes.RestrictedClassType', 'RestrictedClassType', ([], {'base_type': 'long', 'restriction_dict': "{'range': ['0..4294967295']}", 'int_size': '(32)'}), "(base_type=long, restriction_dict={'range': [\n '0..4294967295']}, int_size=32)\n", (3642, 3723), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((4082, 4125), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (4112, 4125), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((4472, 4515), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (4502, 4515), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((4861, 4904), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (4891, 4904), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((5248, 5291), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (5278, 5291), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((5636, 5679), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (5666, 5679), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((9432, 9532), 'pyangbind.lib.yangtypes.RestrictedClassType', 'RestrictedClassType', ([], {'base_type': 'long', 'restriction_dict': "{'range': ['0..4294967295']}", 'int_size': '(32)'}), "(base_type=long, restriction_dict={'range': [\n '0..4294967295']}, int_size=32)\n", (9451, 9532), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((14110, 14153), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (14140, 14153), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((16450, 16493), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (16480, 16493), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((18780, 18823), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (18810, 18823), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((21129, 21172), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (21159, 21172), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((23457, 23500), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (23487, 23500), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((25783, 25826), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (25813, 25826), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((28128, 28171), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (28158, 28171), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((30475, 30518), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (30505, 30518), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((8191, 8291), 'pyangbind.lib.yangtypes.RestrictedClassType', 'RestrictedClassType', ([], {'base_type': 'long', 'restriction_dict': "{'range': ['0..4294967295']}", 'int_size': '(32)'}), "(base_type=long, restriction_dict={'range': [\n '0..4294967295']}, int_size=32)\n", (8210, 8291), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((12977, 13020), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (13007, 13020), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((15357, 15400), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (15387, 15400), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((17687, 17730), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (17717, 17730), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((20028, 20071), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (20058, 20071), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((22364, 22407), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (22394, 22407), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((24690, 24733), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (24720, 24733), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((27027, 27070), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (27057, 27070), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n'), ((29374, 29417), 'pyangbind.lib.yangtypes.RestrictedPrecisionDecimalType', 'RestrictedPrecisionDecimalType', ([], {'precision': '(2)'}), '(precision=2)\n', (29404, 29417), False, 'from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\n')] |
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import numpy as np
def unique_boxes(boxes, scale=1.0):
"""Return indices of unique boxes."""
v = np.array([1, 1e3, 1e6, 1e9])
hashes = np.round(boxes * scale).dot(v).astype(np.int)
_, index = np.unique(hashes, return_index=True)
return np.sort(index)
def xywh_to_xyxy(boxes):
"""Convert [x y w h] box format to [x1 y1 x2 y2] format."""
return np.hstack((boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1))
def xyxy_to_xywh(boxes):
"""Convert [x1 y1 x2 y2] box format to [x y w h] format."""
return np.hstack((boxes[:, 0:2], boxes[:, 2:4] - boxes[:, 0:2] + 1))
def validate_boxes(boxes, width=0, height=0):
"""Check that a set of boxes are valid."""
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
assert (x1 >= 0).all()
assert (y1 >= 0).all()
assert (x2 >= x1).all()
assert (y2 >= y1).all()
assert (x2 < width).all()
assert (y2 < height).all()
def filter_small_boxes(boxes, min_size):
w = boxes[:, 2] - boxes[:, 0]
h = boxes[:, 3] - boxes[:, 1]
keep = np.where((w >= min_size) & (h > min_size))[0]
return keep
def filter_validate_boxes(boxes, width, height):
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
a = x1 >= 0
b = y1 >= 0
c = x2 >= x1
d = y2 >= y1
e = x2 < width
f = y2 < height
return np.logical_and(a, np.logical_and(b, np.logical_and(c, np.logical_and(d, np.logical_and(e, f)))))
def overlaps(boxes1, boxes2):
ovrs = np.zeros((boxes1.shape[0], boxes2.shape[0]), dtype=np.float32)
areas1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
areas2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
for i in range(boxes1.shape[0]):
xx1 = np.maximum(boxes1[i, 0], boxes2[:, 0])
yy1 = np.maximum(boxes1[i, 1], boxes2[:, 1])
xx2 = np.minimum(boxes1[i, 2], boxes2[:, 2])
yy2 = np.minimum(boxes1[i, 3], boxes2[:, 3])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas1[i] + areas2 - inter)
ovrs[i, :] = ovr[:]
return ovrs
def overlap(box, boxes):
area = (box[2] - box[0]) * (box[3] - box[1])
areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
xx1 = np.maximum(box[0], boxes[:, 0])
yy1 = np.maximum(box[1], boxes[:, 1])
xx2 = np.minimum(box[2], boxes[:, 2])
yy2 = np.minimum(box[3], boxes[:, 3])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas + areas - inter)
return ovr
| [
"numpy.unique",
"numpy.minimum",
"numpy.hstack",
"numpy.where",
"numpy.logical_and",
"numpy.sort",
"numpy.array",
"numpy.zeros",
"numpy.maximum",
"numpy.round"
] | [((321, 367), 'numpy.array', 'np.array', (['[1, 1000.0, 1000000.0, 1000000000.0]'], {}), '([1, 1000.0, 1000000.0, 1000000000.0])\n', (329, 367), True, 'import numpy as np\n'), ((424, 460), 'numpy.unique', 'np.unique', (['hashes'], {'return_index': '(True)'}), '(hashes, return_index=True)\n', (433, 460), True, 'import numpy as np\n'), ((472, 486), 'numpy.sort', 'np.sort', (['index'], {}), '(index)\n', (479, 486), True, 'import numpy as np\n'), ((589, 650), 'numpy.hstack', 'np.hstack', (['(boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1)'], {}), '((boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1))\n', (598, 650), True, 'import numpy as np\n'), ((753, 814), 'numpy.hstack', 'np.hstack', (['(boxes[:, 0:2], boxes[:, 2:4] - boxes[:, 0:2] + 1)'], {}), '((boxes[:, 0:2], boxes[:, 2:4] - boxes[:, 0:2] + 1))\n', (762, 814), True, 'import numpy as np\n'), ((1742, 1804), 'numpy.zeros', 'np.zeros', (['(boxes1.shape[0], boxes2.shape[0])'], {'dtype': 'np.float32'}), '((boxes1.shape[0], boxes2.shape[0]), dtype=np.float32)\n', (1750, 1804), True, 'import numpy as np\n'), ((2565, 2596), 'numpy.maximum', 'np.maximum', (['box[0]', 'boxes[:, 0]'], {}), '(box[0], boxes[:, 0])\n', (2575, 2596), True, 'import numpy as np\n'), ((2607, 2638), 'numpy.maximum', 'np.maximum', (['box[1]', 'boxes[:, 1]'], {}), '(box[1], boxes[:, 1])\n', (2617, 2638), True, 'import numpy as np\n'), ((2649, 2680), 'numpy.minimum', 'np.minimum', (['box[2]', 'boxes[:, 2]'], {}), '(box[2], boxes[:, 2])\n', (2659, 2680), True, 'import numpy as np\n'), ((2691, 2722), 'numpy.minimum', 'np.minimum', (['box[3]', 'boxes[:, 3]'], {}), '(box[3], boxes[:, 3])\n', (2701, 2722), True, 'import numpy as np\n'), ((2732, 2762), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (2742, 2762), True, 'import numpy as np\n'), ((2771, 2801), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (2781, 2801), True, 'import numpy as np\n'), ((1287, 1329), 'numpy.where', 'np.where', (['((w >= min_size) & (h > min_size))'], {}), '((w >= min_size) & (h > min_size))\n', (1295, 1329), True, 'import numpy as np\n'), ((2006, 2044), 'numpy.maximum', 'np.maximum', (['boxes1[i, 0]', 'boxes2[:, 0]'], {}), '(boxes1[i, 0], boxes2[:, 0])\n', (2016, 2044), True, 'import numpy as np\n'), ((2059, 2097), 'numpy.maximum', 'np.maximum', (['boxes1[i, 1]', 'boxes2[:, 1]'], {}), '(boxes1[i, 1], boxes2[:, 1])\n', (2069, 2097), True, 'import numpy as np\n'), ((2112, 2150), 'numpy.minimum', 'np.minimum', (['boxes1[i, 2]', 'boxes2[:, 2]'], {}), '(boxes1[i, 2], boxes2[:, 2])\n', (2122, 2150), True, 'import numpy as np\n'), ((2165, 2203), 'numpy.minimum', 'np.minimum', (['boxes1[i, 3]', 'boxes2[:, 3]'], {}), '(boxes1[i, 3], boxes2[:, 3])\n', (2175, 2203), True, 'import numpy as np\n'), ((2217, 2247), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (2227, 2247), True, 'import numpy as np\n'), ((2260, 2290), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (2270, 2290), True, 'import numpy as np\n'), ((363, 386), 'numpy.round', 'np.round', (['(boxes * scale)'], {}), '(boxes * scale)\n', (371, 386), True, 'import numpy as np\n'), ((1674, 1694), 'numpy.logical_and', 'np.logical_and', (['e', 'f'], {}), '(e, f)\n', (1688, 1694), True, 'import numpy as np\n')] |
#-*- coding:utf-8 -*-
#'''
# Created on 18-12-27 上午10:34
#
# @Author: <NAME>(laygin)
#'''
import os
import xml.etree.ElementTree as ET
import numpy as np
import cv2
from torch.utils.data import Dataset
import torch
from config import IMAGE_MEAN
from ctpn_utils import cal_rpn
def readxml(path):
gtboxes = []
imgfile = ''
xml = ET.parse(path)
for elem in xml.iter():
if 'filename' in elem.tag:
imgfile = elem.text
if 'object' in elem.tag:
for attr in list(elem):
if 'bndbox' in attr.tag:
xmin = int(round(float(attr.find('xmin').text)))
ymin = int(round(float(attr.find('ymin').text)))
xmax = int(round(float(attr.find('xmax').text)))
ymax = int(round(float(attr.find('ymax').text)))
gtboxes.append((xmin, ymin, xmax, ymax))
return np.array(gtboxes), imgfile
# for ctpn text detection
class VOCDataset(Dataset):
def __init__(self,
datadir,
labelsdir):
'''
:param txtfile: image name list text file
:param datadir: image's directory
:param labelsdir: annotations' directory
'''
if not os.path.isdir(datadir):
raise Exception('[ERROR] {} is not a directory'.format(datadir))
if not os.path.isdir(labelsdir):
raise Exception('[ERROR] {} is not a directory'.format(labelsdir))
self.datadir = datadir
self.img_names = os.listdir(self.datadir)
self.labelsdir = labelsdir
def __len__(self):
return len(self.img_names)
def __getitem__(self, idx):
img_name = self.img_names[idx]
img_path = os.path.join(self.datadir, img_name)
print(img_path)
xml_path = os.path.join(self.labelsdir, img_name.replace('.jpg', '.xml'))
gtbox, _ = readxml(xml_path)
img = cv2.imread(img_path)
h, w, c = img.shape
# clip image
if np.random.randint(2) == 1:
img = img[:, ::-1, :]
newx1 = w - gtbox[:, 2] - 1
newx2 = w - gtbox[:, 0] - 1
gtbox[:, 0] = newx1
gtbox[:, 2] = newx2
[cls, regr], _ = cal_rpn((h, w), (int(h / 16), int(w / 16)), 16, gtbox)
m_img = img - IMAGE_MEAN
regr = np.hstack([cls.reshape(cls.shape[0], 1), regr])
cls = np.expand_dims(cls, axis=0)
# transform to torch tensor
m_img = torch.from_numpy(m_img.transpose([2, 0, 1])).float()
cls = torch.from_numpy(cls).float()
regr = torch.from_numpy(regr).float()
return m_img, cls, regr
| [
"os.listdir",
"xml.etree.ElementTree.parse",
"os.path.join",
"torch.from_numpy",
"numpy.array",
"numpy.random.randint",
"os.path.isdir",
"numpy.expand_dims",
"cv2.imread"
] | [((342, 356), 'xml.etree.ElementTree.parse', 'ET.parse', (['path'], {}), '(path)\n', (350, 356), True, 'import xml.etree.ElementTree as ET\n'), ((912, 929), 'numpy.array', 'np.array', (['gtboxes'], {}), '(gtboxes)\n', (920, 929), True, 'import numpy as np\n'), ((1531, 1555), 'os.listdir', 'os.listdir', (['self.datadir'], {}), '(self.datadir)\n', (1541, 1555), False, 'import os\n'), ((1741, 1777), 'os.path.join', 'os.path.join', (['self.datadir', 'img_name'], {}), '(self.datadir, img_name)\n', (1753, 1777), False, 'import os\n'), ((1935, 1955), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1945, 1955), False, 'import cv2\n'), ((2415, 2442), 'numpy.expand_dims', 'np.expand_dims', (['cls'], {'axis': '(0)'}), '(cls, axis=0)\n', (2429, 2442), True, 'import numpy as np\n'), ((1253, 1275), 'os.path.isdir', 'os.path.isdir', (['datadir'], {}), '(datadir)\n', (1266, 1275), False, 'import os\n'), ((1369, 1393), 'os.path.isdir', 'os.path.isdir', (['labelsdir'], {}), '(labelsdir)\n', (1382, 1393), False, 'import os\n'), ((2016, 2036), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (2033, 2036), True, 'import numpy as np\n'), ((2563, 2584), 'torch.from_numpy', 'torch.from_numpy', (['cls'], {}), '(cls)\n', (2579, 2584), False, 'import torch\n'), ((2608, 2630), 'torch.from_numpy', 'torch.from_numpy', (['regr'], {}), '(regr)\n', (2624, 2630), False, 'import torch\n')] |
from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
from tdw.output_data import OutputData, CompositeObjects, IdPassSegmentationColors, SegmentationColors
from pathlib import Path
import platform
"""
Create a composite object from a local asset bundle.
Test that the object loaded correctly.
Apply sub-object commands to the sub-objects.
"""
class CompositeObject(Controller):
def run(self):
commands = [TDWUtils.create_empty_room(12, 12)]
# Find the local asset bundle for this platform.
url = "file:///" + str(Path("composite_objects/" + platform.system() + "/test_composite_object").resolve())
# Add the local object.
o_id = self.get_unique_id()
commands.extend([{"$type": "add_object",
"name": "test_composite_object",
"url": url,
"scale_factor": 1,
"id": o_id},
{"$type": "set_mass",
"id": o_id,
"mass": 100},
{"$type": "send_segmentation_colors"},
{"$type": "send_id_pass_segmentation_colors"},
{"$type": "send_composite_objects"},
{"$type": "send_rigidbodies",
"frequency": "always"}])
commands.extend(TDWUtils.create_avatar(position={"x": 0, "y": 1.49, "z": -2.77}))
resp = self.communicate(commands)
visible_objects = dict()
segmentation_colors = dict()
# Get all objects.
for i in range(len(resp) - 1):
r_id = OutputData.get_data_type_id(resp[i])
if r_id == "segm":
segm = SegmentationColors(resp[i])
for j in range(segm.get_num()):
object_id = segm.get_object_id(j)
visible_objects[object_id] = False
segmentation_colors[segm.get_object_color(j)] = object_id
# Get all visible objects. Also, get the ID of the light and motor sub-objects.
light_id = -1
motor_id = -1
for i in range(len(resp) - 1):
r_id = OutputData.get_data_type_id(resp[i])
if r_id == "ipsc":
ipsc = IdPassSegmentationColors(resp[i])
for j in range(ipsc.get_num_segmentation_colors()):
object_color = ipsc.get_segmentation_color(j)
object_id = segmentation_colors[object_color]
visible_objects[object_id] = True
elif r_id == "comp":
comp = CompositeObjects(resp[i])
for j in range(comp.get_num()):
object_id = comp.get_object_id(j)
if object_id == o_id:
for k in range(comp.get_num_sub_objects(j)):
sub_object_id = comp.get_sub_object_id(j, k)
sub_object_machine_type = comp.get_sub_object_machine_type(j, k)
if sub_object_machine_type == "light":
light_id = sub_object_id
elif sub_object_machine_type == "motor":
motor_id = sub_object_id
print(visible_objects)
# Start the motor and turn the light on.
is_on = True
self.communicate([{"$type": "set_motor",
"target_velocity": 500,
"force": 500,
"id": motor_id},
{"$type": "set_sub_object_light",
"is_on": is_on,
"id": light_id}])
for i in range(1000):
# Every 50 frames, blink the lights on and off.
if i % 50 == 0:
is_on = not is_on
self.communicate({"$type": "set_sub_object_light",
"is_on": is_on,
"id": light_id})
else:
self.communicate([])
self.communicate({"$type": "terminate"})
if __name__ == "__main__":
CompositeObject().run()
| [
"tdw.tdw_utils.TDWUtils.create_avatar",
"tdw.output_data.CompositeObjects",
"tdw.tdw_utils.TDWUtils.create_empty_room",
"tdw.output_data.OutputData.get_data_type_id",
"tdw.output_data.SegmentationColors",
"platform.system",
"tdw.output_data.IdPassSegmentationColors"
] | [((441, 475), 'tdw.tdw_utils.TDWUtils.create_empty_room', 'TDWUtils.create_empty_room', (['(12)', '(12)'], {}), '(12, 12)\n', (467, 475), False, 'from tdw.tdw_utils import TDWUtils\n'), ((1403, 1467), 'tdw.tdw_utils.TDWUtils.create_avatar', 'TDWUtils.create_avatar', ([], {'position': "{'x': 0, 'y': 1.49, 'z': -2.77}"}), "(position={'x': 0, 'y': 1.49, 'z': -2.77})\n", (1425, 1467), False, 'from tdw.tdw_utils import TDWUtils\n'), ((1667, 1703), 'tdw.output_data.OutputData.get_data_type_id', 'OutputData.get_data_type_id', (['resp[i]'], {}), '(resp[i])\n', (1694, 1703), False, 'from tdw.output_data import OutputData, CompositeObjects, IdPassSegmentationColors, SegmentationColors\n'), ((2211, 2247), 'tdw.output_data.OutputData.get_data_type_id', 'OutputData.get_data_type_id', (['resp[i]'], {}), '(resp[i])\n', (2238, 2247), False, 'from tdw.output_data import OutputData, CompositeObjects, IdPassSegmentationColors, SegmentationColors\n'), ((1758, 1785), 'tdw.output_data.SegmentationColors', 'SegmentationColors', (['resp[i]'], {}), '(resp[i])\n', (1776, 1785), False, 'from tdw.output_data import OutputData, CompositeObjects, IdPassSegmentationColors, SegmentationColors\n'), ((2302, 2335), 'tdw.output_data.IdPassSegmentationColors', 'IdPassSegmentationColors', (['resp[i]'], {}), '(resp[i])\n', (2326, 2335), False, 'from tdw.output_data import OutputData, CompositeObjects, IdPassSegmentationColors, SegmentationColors\n'), ((2646, 2671), 'tdw.output_data.CompositeObjects', 'CompositeObjects', (['resp[i]'], {}), '(resp[i])\n', (2662, 2671), False, 'from tdw.output_data import OutputData, CompositeObjects, IdPassSegmentationColors, SegmentationColors\n'), ((594, 611), 'platform.system', 'platform.system', ([], {}), '()\n', (609, 611), False, 'import platform\n')] |
from knackpy.models import FIELD_SETTINGS
from . import utils, formatters
def get_id_field_args():
"""TODO
- id field is a global field def. weird, right?
- field_defs should be immutable for this reason
"""
return {"key": "id", "name": "id", "type": "id", "obj": None}
def set_field_def_views(key: str, scenes: list):
"""
Update FieldDef's `views` property to include a list of all view keys that use this
field.
TODO: make side effect of FieldDef...and document
"""
views = []
for scene in scenes:
for view in scene["views"]:
if view["type"] == "table":
# must ignore "link" columns, etc
field_keys = [
column["field"]["key"]
for column in view["columns"]
if column.get("field")
]
if key in field_keys:
views.append(view["key"])
# associate the id field every view
elif key == "id":
views.append(view["key"])
else:
# todo: should we handle non-table views?
continue
return views
class FieldDef:
""" Knack field defintion wrapper """
def __repr__(self):
name = getattr(self, "name", "(no name)")
return f"<FieldDef '{name}'>"
def __init__(self, **kwargs):
for attr in [
# required definition attrs
"key",
"name",
"type",
"obj",
]:
try:
setattr(self, attr, kwargs[attr])
except KeyError:
raise KeyError(
f"FieldDef missing required FieldDef attribute: '{attr}'"
)
self.identifier = kwargs["identifier"] if kwargs.get("identifier") else False
self.views = []
self.settings = FIELD_SETTINGS.get(self.type)
self.subfields = self.settings.get("subfields") if self.settings else None
self.use_knack_format = (
self.settings.get("use_knack_format") if self.settings else False
)
try:
self.formatter = getattr(formatters, self.type)
except AttributeError:
self.formatter = getattr(formatters, "default")
def field_defs_from_metadata(metadata: dict):
"""Generate a list of FieldDef's from Knack metadata. Note the
"set_field_def_views()" side effect, which assigns to prop "views" a list of view
keys which use the field.
Args:
metadata (dict): Knack application metadata dict.
Returns:
list: A list of FieldDef instances.
"""
field_defs = []
for obj in metadata["objects"]:
id_field_args = get_id_field_args()
id_field_args["obj"] = obj["key"]
field_defs.append(FieldDef(**id_field_args))
for field in obj["fields"]:
field["name"] = utils.valid_name(field["name"])
# the object is also available at field["object_key"], but
# this is not always available
field["obj"] = obj["key"]
try:
if field["key"] == obj["identifier"]:
field["identifier"] = True
else:
field["identifier"] = False
except KeyError:
# built-in "Accounts" does not have an identifier
# also identifier may only be present if set manually in the builder?
field["identifier"] = False
field_defs.append(FieldDef(**field))
for field_def in field_defs:
field_def.views = set_field_def_views(field_def.key, metadata["scenes"])
return field_defs
class Field(object):
"""A container for a single column of Knack data. This is the lowest-level
container in the API. The hieracrchy being: App > Records > Record > Field.
Typically you would not construct this class directly, but instead an App, which
will generate Fields via App.records().
More specifically, the API is designed so that you would typically interface with a
Field instance through the records.Record class. That class operates on Fields by
returning their values through Record[<field name>] or Record[<field key>].
But it's fine to work directly with fields:
- field.value: the unformatted input value
- field.formatted: the formatted value
- field.key: the knack field key
- field.name: the knack field name
Args:
field_def (knackpy.fields.FieldDef): A knackpy FieldDef class object
value (object): Anything, really.
timezone ([pytz.timezone]): A pytz timezone object.
knack_formatted_value (str, optional): There a fiew fields where it's easier to
use knack's formatted value as a starting point, rather than the raw value.
E.g. timer and name. In those cases, we assign that value here and pass it
on to the self.formatter() function for further formatting.
"""
def __init__(
self, field_def: FieldDef, value: object, timezone, knack_formatted_value=None
):
self.key = field_def.key
self.name = field_def.name
self.raw = value
self.field_def = field_def
self.timezone = timezone
self.knack_formatted_value = knack_formatted_value
self.formatted = self._format()
def __repr__(self):
return f"<Field {{'{self.key}': '{self.formatted}'}}>"
def __contains__(self, item):
if item in self.raw:
return True
def _format(self):
"""
Knack applies it's own standard formatting to values, which are always
available at the non-raw key. Knack includes the raw key in the dict when
formatting is applied, allowing access to the unformatted data.
Generally, the Knack formatting, where it exists, is fine. However there are
cases where we want to apply our own formatters, such datestamps, (where the
formatted value does not include a timezone offset).
And there are other cases where we want to apply additional formatting to the
knack-formatted value, e.g. Timers.
See also: models.py, formatters.py.
"""
kwargs = self._set_formatter_kwargs()
try:
input_value = (
self.knack_formatted_value if self.knack_formatted_value else self.raw
)
return self.field_def.formatter(input_value, **kwargs)
except AttributeError:
# thrown when value is None
return self.raw
def _set_formatter_kwargs(self):
kwargs = {}
if self.field_def.type == "date_time":
kwargs["timezone"] = self.timezone
return kwargs
| [
"knackpy.models.FIELD_SETTINGS.get"
] | [((1921, 1950), 'knackpy.models.FIELD_SETTINGS.get', 'FIELD_SETTINGS.get', (['self.type'], {}), '(self.type)\n', (1939, 1950), False, 'from knackpy.models import FIELD_SETTINGS\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
# The MIT License
# Copyright (c) 2017 - 2022 <NAME>, <EMAIL>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
try:
from PIL import Image
except ImportError:
raise Exception('Need to have PIL / pillow installed for this example.')
try:
import numpy as np
except ImportError:
raise Exception('Need to have numpy installed for this example.')
from plotille import Canvas, Figure, hist, hsl
from plotille.data import circle
current_dir = os.path.dirname(os.path.abspath(__file__))
X = np.random.normal(size=10000)
width = 12
height = 10
spacer = ' '
def extend_plot_lines(lines):
lines[0] += spacer * 20
lines[1] += spacer * 20
for idx in range(2, len(lines) - 2):
lines[idx] += spacer * 7
return lines
def int_formatter(val, chars, delta, left):
return '{:{}{}}'.format(int(val), '<' if left else '>', chars)
def logo():
# Canvas on its own can draw an image using dots
img = Image.open(current_dir + '/../imgs/logo.png')
img = img.convert('L')
img = img.resize((270, 120))
cvs = Canvas(135, 30, background=hsl(0, 0, 0.8), mode='rgb')
cvs.braille_image(img.getdata(), inverse=True, color=hsl(0, 0.5, 0.4))
indent = ' ' * 6
print(indent + cvs.plot().replace(os.linesep, os.linesep + indent))
def histogram():
fig = Figure()
fig.width = width
fig.height = height
fig.color_mode = 'rgb'
fig.register_label_formatter(float, int_formatter)
fig.histogram(X, lc=hsl(17, 1, 0.8))
lines = extend_plot_lines(fig.show().split(os.linesep))
return lines
def crappyhist():
lines = hist(X, bins=12, width=12, lc=hsl(285, 1, 0.74), color_mode='rgb').split(os.linesep)
lines[1] += spacer
return lines
def plot():
fig = Figure()
fig.width = width
fig.height = height
fig.set_y_limits(-2, 2)
fig.color_mode = 'rgb'
fig.register_label_formatter(float, int_formatter)
x1 = np.random.normal(size=10)
fig.scatter(list(range(len(x1))), x1, lc=hsl(122, 0.55, 0.43), marker='o')
fig.plot([0, 9], [2, 0], lc=hsl(237, 1, 0.75), marker='x')
x2 = np.linspace(0, 9, 20)
fig.plot(x2, 0.25 * np.sin(x2) - 1, lc=hsl(70, 1, 0.5))
fig.text([5], [1], ['Hi'], lc=hsl(0, 0, 0.7))
fig.axvline(1, lc=hsl(0, 1, 0.5))
lines = extend_plot_lines(fig.show().split(os.linesep))
return lines
def heat():
fig = Figure()
fig.width = width
fig.height = height
fig.set_y_limits(-2, 2)
fig.set_x_limits(-2, 2)
fig.color_mode = 'rgb'
fig.origin = False
fig.register_label_formatter(float, int_formatter)
xy = circle(0, 0, 1.5)
fig.plot(xy[0], xy[1])
img = []
for _ in range(height):
img += [[None] * width]
img[int(height / 2)][int(width / 2)] = 1
img[int(height / 2) - 2][int(width / 2) - 1] = 0.8
img[int(height / 2) - 2][int(width / 2)] = 0.7
img[int(height / 2) - 1][int(width / 2) - 1] = 0.2
img[int(height / 2) ][int(width / 2) - 1] = 0.2 # noqa: E202
img[int(height / 2) + 1][int(width / 2) - 1] = 0.3
img[int(height / 2) - 1][int(width / 2) + 1] = 0.4
img[int(height / 2) ][int(width / 2) + 1] = 0.8 # noqa: E202
img[int(height / 2) + 1][int(width / 2) + 1] = 0.7
img[int(height / 2) - 1][int(width / 2)] = 0.7
img[int(height / 2) + 1][int(width / 2)] = 0.8
# img[int(height / 2)-1][int(width / 2)] = 1
# img[int(height / 2)][int(width / 2)] = 1
fig.imgshow(img, cmap='magma')
lines = extend_plot_lines(fig.show().split(os.linesep))
return lines
def main():
print('\n\n')
logo()
print()
for lines in zip(histogram(), plot(), heat(), crappyhist()):
print(' '.join(lines))
print('\n\n')
if __name__ == '__main__':
main()
| [
"numpy.random.normal",
"PIL.Image.open",
"numpy.sin",
"plotille.data.circle",
"numpy.linspace",
"os.path.abspath",
"plotille.Figure",
"plotille.hsl"
] | [((1627, 1655), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10000)'}), '(size=10000)\n', (1643, 1655), True, 'import numpy as np\n'), ((1596, 1621), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1611, 1621), False, 'import os\n'), ((2062, 2107), 'PIL.Image.open', 'Image.open', (["(current_dir + '/../imgs/logo.png')"], {}), "(current_dir + '/../imgs/logo.png')\n", (2072, 2107), False, 'from PIL import Image\n'), ((2431, 2439), 'plotille.Figure', 'Figure', ([], {}), '()\n', (2437, 2439), False, 'from plotille import Canvas, Figure, hist, hsl\n'), ((2871, 2879), 'plotille.Figure', 'Figure', ([], {}), '()\n', (2877, 2879), False, 'from plotille import Canvas, Figure, hist, hsl\n'), ((3046, 3071), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (3062, 3071), True, 'import numpy as np\n'), ((3224, 3245), 'numpy.linspace', 'np.linspace', (['(0)', '(9)', '(20)'], {}), '(0, 9, 20)\n', (3235, 3245), True, 'import numpy as np\n'), ((3499, 3507), 'plotille.Figure', 'Figure', ([], {}), '()\n', (3505, 3507), False, 'from plotille import Canvas, Figure, hist, hsl\n'), ((3725, 3742), 'plotille.data.circle', 'circle', (['(0)', '(0)', '(1.5)'], {}), '(0, 0, 1.5)\n', (3731, 3742), False, 'from plotille.data import circle\n'), ((2205, 2219), 'plotille.hsl', 'hsl', (['(0)', '(0)', '(0.8)'], {}), '(0, 0, 0.8)\n', (2208, 2219), False, 'from plotille import Canvas, Figure, hist, hsl\n'), ((2290, 2306), 'plotille.hsl', 'hsl', (['(0)', '(0.5)', '(0.4)'], {}), '(0, 0.5, 0.4)\n', (2293, 2306), False, 'from plotille import Canvas, Figure, hist, hsl\n'), ((2593, 2608), 'plotille.hsl', 'hsl', (['(17)', '(1)', '(0.8)'], {}), '(17, 1, 0.8)\n', (2596, 2608), False, 'from plotille import Canvas, Figure, hist, hsl\n'), ((3117, 3137), 'plotille.hsl', 'hsl', (['(122)', '(0.55)', '(0.43)'], {}), '(122, 0.55, 0.43)\n', (3120, 3137), False, 'from plotille import Canvas, Figure, hist, hsl\n'), ((3183, 3200), 'plotille.hsl', 'hsl', (['(237)', '(1)', '(0.75)'], {}), '(237, 1, 0.75)\n', (3186, 3200), False, 'from plotille import Canvas, Figure, hist, hsl\n'), ((3289, 3304), 'plotille.hsl', 'hsl', (['(70)', '(1)', '(0.5)'], {}), '(70, 1, 0.5)\n', (3292, 3304), False, 'from plotille import Canvas, Figure, hist, hsl\n'), ((3341, 3355), 'plotille.hsl', 'hsl', (['(0)', '(0)', '(0.7)'], {}), '(0, 0, 0.7)\n', (3344, 3355), False, 'from plotille import Canvas, Figure, hist, hsl\n'), ((3380, 3394), 'plotille.hsl', 'hsl', (['(0)', '(1)', '(0.5)'], {}), '(0, 1, 0.5)\n', (3383, 3394), False, 'from plotille import Canvas, Figure, hist, hsl\n'), ((3270, 3280), 'numpy.sin', 'np.sin', (['x2'], {}), '(x2)\n', (3276, 3280), True, 'import numpy as np\n'), ((2751, 2768), 'plotille.hsl', 'hsl', (['(285)', '(1)', '(0.74)'], {}), '(285, 1, 0.74)\n', (2754, 2768), False, 'from plotille import Canvas, Figure, hist, hsl\n')] |
"""
Wrapper class that takes a list of template loaders as an argument and attempts
to load templates from them in order, caching the result.
Basically identical to the django default caching loader, but takes
into account the blog id.
"""
from django.template.loaders.cached import Loader as BaseLoader
from wp_frontman.models import Blog
class Loader(BaseLoader):
def load_template(self, template_name, template_dirs=None):
blog = Blog.get_active()
key = '%s-%s-%s' % (site.site_id, blog.blog_id, template_name)
if template_dirs:
# If template directories were specified, use a hash to differentiate
key = '-'.join([siste.site_id, blog.blog_id, template_name, sha_constructor('|'.join(template_dirs)).hexdigest()])
if key not in self.template_cache:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = get_template_from_string(template, origin, template_name)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the template
# we were asked to load. This allows for correct identification (later)
# of the actual template that does not exist.
return template, origin
self.template_cache[key] = template
return self.template_cache[key], None
| [
"wp_frontman.models.Blog.get_active"
] | [((460, 477), 'wp_frontman.models.Blog.get_active', 'Blog.get_active', ([], {}), '()\n', (475, 477), False, 'from wp_frontman.models import Blog\n')] |
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
from watson.dev.middleware import StaticFileMiddleware
from watson.dev.reloader import main
def make_dev_server(app, host='0.0.0.0', port=8000,
noreload=False, script_dir=None, public_dir=None):
"""
A simple local development server utilizing the existing simple_server
module, but allows for serving of static files.
Never use this in production. EVER.
Example:
.. code-block:: python
def my_app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
return [b'<h1>Hello World!</h1>']
if __name__ == '__main__':
make_dev_server(my_app)
Args:
app: A WSGI callable
host: The host to bind to
port: The port
noreload: Whether or not to automatically reload the application when
source code changes.
"""
wrapped_app = StaticFileMiddleware(app, initial_dir=public_dir)
if not noreload:
main(__run_server, (wrapped_app, host, port), script_dir=script_dir)
else:
try:
__run_server(wrapped_app, host, port)
except KeyboardInterrupt:
print('\nTerminated.')
def __run_server(app, host, port):
print(
'Serving application at http://{0}:{1} in your favorite browser...'.format(host, port))
httpd = make_server(host, port, app)
httpd.serve_forever()
| [
"watson.dev.reloader.main",
"watson.dev.middleware.StaticFileMiddleware",
"wsgiref.simple_server.make_server"
] | [((979, 1028), 'watson.dev.middleware.StaticFileMiddleware', 'StaticFileMiddleware', (['app'], {'initial_dir': 'public_dir'}), '(app, initial_dir=public_dir)\n', (999, 1028), False, 'from watson.dev.middleware import StaticFileMiddleware\n'), ((1425, 1453), 'wsgiref.simple_server.make_server', 'make_server', (['host', 'port', 'app'], {}), '(host, port, app)\n', (1436, 1453), False, 'from wsgiref.simple_server import make_server\n'), ((1058, 1126), 'watson.dev.reloader.main', 'main', (['__run_server', '(wrapped_app, host, port)'], {'script_dir': 'script_dir'}), '(__run_server, (wrapped_app, host, port), script_dir=script_dir)\n', (1062, 1126), False, 'from watson.dev.reloader import main\n')] |
#imports
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import object
import MDAnalysis as mda
import numpy as np
import sys
from six.moves import range
class LipidCOM(object):
""" A lipid center of mass (COM) object.
This object stores the COM coordinates of a lipid (or other molecule or group
of atoms) computed from both the wrapped and unwrapped atomic coordinates. This
object also stores information about the type of lipid as well as the total mass
of the lipid.
"""
def __init__(self):
"""LipidCOM initialization
Attributes:
type (str): The lipid type (e.g. the lipid could be typed b resname).
com (np.array): The length three vector holding the wrapped xyz coordinates.
com_unwrap (np.array): The length three vector holding the unwrapped xyz coordinates.
mass (float): The total mass of the atoms used to define LipidCOM.
"""
# lipid type/resname or other name
self.type="UNK"
# wrapped coordinates
self.com=np.zeros(3)
# unwrapped coordinates
self.com_unwrap=np.zeros(3)
# total mass
self.mass=1.0
self.leaflet = "UNK"
self.resid = 0
return
# The name of this function could be changed to be more desriptive, e.g.
# extract_com_mda_residue
def extract(self, mda_residue, unwrap=False, box=None, name_dict=None, make_whole=True):
""" Get the center of mass coordinates from an MDAnalysis residue
This function calls the MDAnalysis member function center_of_mass() of the residue
to compute the center of mass of the atoms constituting the residue.
Args:
mda_residue (MDAnalysis.residue): An MDAnalysis residue object from which to extract
a center of masss coordinates.
unwrap (bool, optional): Define which com container to store coordiates in.
False (default) - The COM coordinates are stored in the
container designated for the wrapped coordinate representation.
True - The COM coordinates are stored in the container designated
for the unwrapped coordinate representation.
"""
self.type = mda_residue.resname
self.resid = mda_residue.resid
self.atom_names = mda_residue.atoms.names
atom_group = mda_residue.atoms
if isinstance(name_dict, dict):
names = name_dict[self.type]
self.atom_names = names
n_names = len(names)
#print(mda_residue.atoms.select_atoms('name {}'.format(names[0])))
#atom_group = mda.core.groups.AtomGroup([eval("mda_residue.atoms."+names[0])])
atom_group = mda_residue.atoms.select_atoms('name {}'.format(names[0]))
#if (not unwrap) and (n_names > 1):
# mda.lib.mdamath.make_whole(mda_residue.atoms, reference_atom=atom_group)
for i in range(1, n_names):
#atom_group+=eval("mda_residue.atoms."+names[i])
atom_group += mda_residue.atoms.select_atoms('name {}'.format(names[i]))
#else:
if (not unwrap) and make_whole:
mda.lib.mdamath.make_whole(mda_residue.atoms)
if unwrap:
self.com_unwrap = atom_group.center_of_mass()
else:
if box is not None:
self.com = atom_group.center_of_mass()
self.com_unwrap = self.com[:]
else:
self.com = atom_group.center_of_mass()
self.com_unwrap = self.com[:]
self.mass = atom_group.total_mass()
return
# a Center of Mass frame object
class COMFrame(object):
""" A molecular dynamics style Frame object for LipidCOM objects.
Atrributes:
lipidcom (list of obj:LipidCOM): A list of the LipidCOM objects assigned to the COMFrame.
box (np.array): A 3 element vector containing the (rectangular) xyz box edge lengths.
time (float): The simulation time that this Frame represents.
number (int): The frame number of this Frame.
mdnumber (int): The corresponding frame number in the original MD trajectory
"""
# does not check that nlipids is an int
def __init__(self, mda_frame, mda_bilayer_selection, unwrap_coords,
name_dict=None, multi_bead=False, rewrap=False,
make_whole=False):
""" Frame initialization.
Args:
mda_frame (MDAnalysis.Timestep): The MDAnalysis frame for the coordinates to use in computing the
lipid centers of mass.
mda_bilayer_selection (MDAnalysis.AtomSelection): The MDAnalsysis atom selection object for the
containing the atoms in the bilayer.
unwrap_coords (numpy.Array): A numpy array containing the the unwrapped coordinates of the bilayer selection
atoms.
"""
# list to store the nlipids LipidCOM objects
self.lipidcom = []
# box dimensions -- assumes the box originates a 0,0,0
# It might be worth adding functionality to specifiy the box origin (or center)
# This also assumes a rectangular box
self.box = mda_frame.dimensions[0:3]
# simulation time
self.time = mda_frame.time
# frame number in the MD trajectory
self.mdnumber = mda_frame.frame
self.number = self.mdnumber
self._multi_bead = multi_bead
self._name_dict = name_dict
self._rewrapped = rewrap
self._make_whole = make_whole
if (name_dict is not None) and multi_bead:
self._build_multi_bead(mda_frame, mda_bilayer_selection,
unwrap_coords, name_dict)
else:
self._build_single_bead(mda_frame, mda_bilayer_selection,
unwrap_coords, name_dict=name_dict)
if rewrap:
self._rewrap()
return
def _build_single_bead(self, mda_frame, mda_bilayer_selection,
unwrap_coords, name_dict=None):
nlipids = len(mda_bilayer_selection.residues)
# initialize all the LipidCOM objects
for dummy_i in range(nlipids):
self.lipidcom.append(LipidCOM())
#atom indices in mda selection/frame
index = mda_bilayer_selection.indices
# loop over the residues (lipids) and get the centers of mass
## do the wrapped coordinates
r=0
for res in mda_bilayer_selection.residues:
#print(res," ",res.center_of_mass())
self.lipidcom[r].extract(res, name_dict=name_dict, make_whole=self._make_whole)
#self.lipidcom[r].mass = res.total_mass()
r+=1
#now unwrapped coordinates
mda_frame._pos[index] = unwrap_coords[:]
#now we need to adjust for the center of mass motion of the membrane -- for simplicity set all frames to (0,0,0)
# to remove center of mass motion of the membrane
total_mass = mda_bilayer_selection.masses.sum()
mem_com_x = (mda_frame.positions[index][:,0]*mda_bilayer_selection.masses).sum()/total_mass
mem_com_y = (mda_frame.positions[index][:,1]*mda_bilayer_selection.masses).sum()/total_mass
mem_com_z = (mda_frame.positions[index][:,2]*mda_bilayer_selection.masses).sum()/total_mass
mem_com = np.array([mem_com_x, mem_com_y, mem_com_z])
mda_frame._pos[index] -= mem_com
self.mem_com = mem_com
r=0
for res in mda_bilayer_selection.residues:
self.lipidcom[r].extract(res, unwrap=True, name_dict=name_dict, make_whole=self._make_whole)
r+=1
return
def _build_multi_bead(self, mda_frame, mda_bilayer_selection, unwrap_coords, name_dict):
nlipids = len(mda_bilayer_selection.residues)
#atom indices in mda selection/frame
index = mda_bilayer_selection.indices
# loop over the residues (lipids) and get the centers of mass
## do the wrapped coordinates
r=0
for res in mda_bilayer_selection.residues:
#print(res," ",res.center_of_mass())
resname = res.resname
# initialize all the LipidCOM objects
for atom in name_dict[resname]:
name_dict_single = {resname:[atom]}
self.lipidcom.append(LipidCOM())
self.lipidcom[r].extract(res, name_dict=name_dict_single, make_whole=self._make_whole)
#self.lipidcom[r].mass = res.total_mass()
r+=1
#now unwrapped coordinates
mda_frame._pos[index] = unwrap_coords[:]
#now we need to adjust for the center of mass motion of the membrane -- for simplicity set all frames to (0,0,0)
# to remove center of mass motion of the membrane
total_mass = mda_bilayer_selection.masses.sum()
mem_com_x = (mda_frame.positions[index][:,0]*mda_bilayer_selection.masses).sum()/total_mass
mem_com_y = (mda_frame.positions[index][:,1]*mda_bilayer_selection.masses).sum()/total_mass
mem_com_z = (mda_frame.positions[index][:,2]*mda_bilayer_selection.masses).sum()/total_mass
mem_com = np.array([mem_com_x, mem_com_y, mem_com_z])
mda_frame._pos[index] -= mem_com
self.mem_com = mem_com
r=0
for res in mda_bilayer_selection.residues:
#print(res," ",res.center_of_mass())
resname = res.resname
# initialize all the LipidCOM objects
for atom in name_dict[resname]:
name_dict_single = {resname:[atom]}
self.lipidcom[r].extract(res, unwrap=True, name_dict=name_dict_single, make_whole=self._make_whole)
#self.lipidcom[r].mass = res.total_mass()
r+=1
return
def _rewrap(self):
ncom = len(self.lipidcom)
box_low = np.array([0.0, 0.0, 0.0])
box_high = self.box
# print(box_low)
# print(box_high)
# print(self.box)
# print(self.mem_com)
n_re = 0
for r in range(ncom):
pos_c = self.lipidcom[r].com_unwrap + self.mem_com
pos_n = np.zeros(3)
diff = pos_c - self.lipidcom[r].com
dist = np.sqrt(np.dot(diff, diff))
# is_wrap = False
if dist > 1.0:
n_re += 1
for i in range(3):
p_i = pos_c[i]
b_l = box_low[i]
b_h = box_high[i]
b = self.box[i]
while p_i < b_l:
p_i += b
# is_wrap = True
while p_i > b_h:
p_i -= b
# is_wrap = True
pos_n[i] = p_i
# print(r, pos_c, pos_n)
# print('com', self.lipidcom[r].com)
# print('com_u', self.lipidcom[r].com_unwrap)
# print('bl bh',box_low, box_high)
# print('mc b', self.mem_com, self.box)
self.lipidcom[r].com = pos_n
print(('n_re', n_re))
return
def __repr__(self):
return 'COMFrame for frame %s with %s lipids' % (self.number, len(self.lipidcom))
def set_box(self, box_lengths):
""" Set the rectangular xyz box edge lengths.
Args:
box_lengths (numpy.array): A 1d, 3 element numpy.array containing the x,y,z box sizes (or edge lengths)
"""
self.box = box_lengths
return
def set_time(self, time):
""" Set the simulation time.
Args:
time (float): The simulation time to assign to this Frame.
"""
self.time = time
return
def __len__(self):
""" Returns the number of LipidCOM objects assigned to this Frame
Returns:
int: Number of LipidCOM objects currently assigned to this Frame
"""
return len(self.lipidcom)
# def COG(self,unwrapped=False):
# cog_out = np.zeros(3)
# for lipid in self.lipidcom:
# if not unwrapped:
# cog_out+=lipid.com
# else:
# cog_out+=lipid.com_unwrap
# cog_out/=len(self)
# return com_out
def com(self, wrapped=True):
""" Computes the center of mass (COM) for the Frame
This member function is used to compute the overall center of mass (COM) of the
COMFrame using the LipidCOM object coordinates and masses.
Args:
wrapped (bool, optional): Define which set of coordinates to use in the computation.
True (default) - The wrapped LipidCOM coordinates are used to compute
the COM of the frame.
False - The unwrapped LipidCOM coordinates are used to compute
the COM of the frame.
Returns:
np.array: A 3 element vector containing the xyz coordinates of the Frame's COM
"""
com_out = np.zeros(3)
total_mass = 0.0
for lipid in self.lipidcom:
if wrapped:
com_out+=lipid.com*lipid.mass
total_mass+=lipid.mass
else:
com_out+=lipid.com_unwrap*lipid.mass
total_mass+=lipid.mass
com_out/=total_mass
return com_out
def cog(self, wrapped=True):
""" Computes the center of geometry (COG) for the Frame
This member function is used to compute the overall center of geometry (COG) of the
COMFrame using the LipidCOM object coordinates.
Args:
wrapped (bool, optional): Define which set of coordinates to use in the computation.
True (default) - The wrapped LipidCOM coordinates are used to compute
the COG of the frame.
False - The unwrapped LipidCOM coordinates are used to compute
the COG of the frame.
Returns:
np.array: A 3 element vector containing the xyz coordinates of the Frame's COG
"""
com_out = np.zeros(3)
total_mass = 0.0
for lipid in self.lipidcom:
if wrapped:
com_out+=lipid.com
total_mass+=1.0
else:
com_out+=lipid.com_unwrap
total_mass+=1.0
com_out/=total_mass
return com_out
def coordinates(self, wrapped=True, leaflet=None):
coords = []
if leaflet is None:
for item in self.lipidcom:
if wrapped:
coords.append(item.com)
else:
coords.append(item.com_unwrap)
else:
for item in self.lipidcom:
if item.leaflet == leaflet:
if wrapped:
coords.append(item.com)
else:
coords.append(item.com_unwrap)
return np.array(coords)
def masses(self):
output = []
for lipid in self.lipidcom:
output.append(lipid.mass)
return np.array(output)
def resids(self):
output = []
for lipid in self.lipidcom:
output.append(lipid.resid)
return np.array(output)
def resnames(self):
output = []
for lipid in self.lipidcom:
output.append(lipid.type)
return output
def leaflets(self):
output = []
for lipid in self.lipidcom:
output.append(lipid.leaflet)
return output
def unique_resnames(self):
output = []
for lipid in self.lipidcom:
if lipid.type not in output:
output.append(lipid.type)
return output
def write_xyz(self, xyz_name, wrapped=True, name_by_leaflet=False):
# Open up the file to write to
xyz_out = open(xyz_name, "w")
comment = "COMFrame "+str(self.number)+" MD Frame "+str(self.mdnumber)
xyz_out.write(str(len(self.lipidcom)))
xyz_out.write("\n")
xyz_out.write(comment)
xyz_out.write("\n")
i=0
for dummy_lip in self.lipidcom:
#get the coordinates
x = self.lipidcom[i].com[0]
y = self.lipidcom[i].com[1]
z = self.lipidcom[i].com_unwrap[2]
if not wrapped:
x = self.lipidcom[i].com_unwrap[0]
y = self.lipidcom[i].com_unwrap[1]
#z = self.lipidcom[i].com_unwrap[2]
#get the lipid resname
oname = self.lipidcom[i].type
if name_by_leaflet:
oname = self.lipidcom[i].leaflet
#write to file
line = str(oname)+" "+str(x)+" "+str(y)+" "+str(z)
xyz_out.write(line)
xyz_out.write("\n")
i+=1
xyz_out.close()
return
| [
"six.moves.range",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"MDAnalysis.lib.mdamath.make_whole"
] | [((1133, 1144), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1141, 1144), True, 'import numpy as np\n'), ((1201, 1212), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1209, 1212), True, 'import numpy as np\n'), ((6333, 6347), 'six.moves.range', 'range', (['nlipids'], {}), '(nlipids)\n', (6338, 6347), False, 'from six.moves import range\n'), ((7508, 7551), 'numpy.array', 'np.array', (['[mem_com_x, mem_com_y, mem_com_z]'], {}), '([mem_com_x, mem_com_y, mem_com_z])\n', (7516, 7551), True, 'import numpy as np\n'), ((9331, 9374), 'numpy.array', 'np.array', (['[mem_com_x, mem_com_y, mem_com_z]'], {}), '([mem_com_x, mem_com_y, mem_com_z])\n', (9339, 9374), True, 'import numpy as np\n'), ((10022, 10047), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (10030, 10047), True, 'import numpy as np\n'), ((10217, 10228), 'six.moves.range', 'range', (['ncom'], {}), '(ncom)\n', (10222, 10228), False, 'from six.moves import range\n'), ((13165, 13176), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (13173, 13176), True, 'import numpy as np\n'), ((14247, 14258), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (14255, 14258), True, 'import numpy as np\n'), ((15116, 15132), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (15124, 15132), True, 'import numpy as np\n'), ((15265, 15281), 'numpy.array', 'np.array', (['output'], {}), '(output)\n', (15273, 15281), True, 'import numpy as np\n'), ((15415, 15431), 'numpy.array', 'np.array', (['output'], {}), '(output)\n', (15423, 15431), True, 'import numpy as np\n'), ((3055, 3072), 'six.moves.range', 'range', (['(1)', 'n_names'], {}), '(1, n_names)\n', (3060, 3072), False, 'from six.moves import range\n'), ((3296, 3341), 'MDAnalysis.lib.mdamath.make_whole', 'mda.lib.mdamath.make_whole', (['mda_residue.atoms'], {}), '(mda_residue.atoms)\n', (3322, 3341), True, 'import MDAnalysis as mda\n'), ((10313, 10324), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (10321, 10324), True, 'import numpy as np\n'), ((10400, 10418), 'numpy.dot', 'np.dot', (['diff', 'diff'], {}), '(diff, diff)\n', (10406, 10418), True, 'import numpy as np\n'), ((10528, 10536), 'six.moves.range', 'range', (['(3)'], {}), '(3)\n', (10533, 10536), False, 'from six.moves import range\n')] |
import h5py
import numpy as np
class SignalGenerator(object):
def __init__(self):
self.pred_data = None
self.signal = None
def load_prediction(self):
prediction = "evaluater/predictions.hdf5"
with h5py.File(prediction, "r") as pred:
# List all groups
a_group_key = list(pred.keys())[0]
# Get the data
self.pred_data = np.vstack(list(pred[a_group_key]))
def generate_signal(self):
self.signal = np.zeros([len(self.pred_data), 2])
self.signal[self.pred_data[:, 0] < 0, 0] = -1
self.signal[self.pred_data[:, 1] < 0, 1] = -1
self.signal[self.pred_data[:, 0] > 0, 0] = 1
self.signal[self.pred_data[:, 1] > 0, 1] = 1
if __name__ == '__main__':
pass
| [
"h5py.File"
] | [((241, 267), 'h5py.File', 'h5py.File', (['prediction', '"""r"""'], {}), "(prediction, 'r')\n", (250, 267), False, 'import h5py\n')] |
"""
File URL API
"""
from contextlib import contextmanager
from pathlib import Path
from pkg_resources import iter_entry_points
from typing import Dict, Generator, Type
from urllib.parse import ParseResult, urlparse
from unfurl.provider import Provider
def load_providers() -> Dict[str, Type[Provider]]:
return {
entry_point.name: entry_point.load()
for entry_point in iter_entry_points("unfurl.providers")
}
def provider_for(parse_result: ParseResult) -> Provider:
providers = load_providers()
provider_class = providers.get(parse_result.scheme)
if not provider_class:
raise Exception(f"No unfurl provider found for scheme: {parse_result.scheme}")
return provider_class()
@contextmanager
def unfurl(url: str) -> Generator[Path, None, None]:
"""
"""
parse_result = urlparse(url)
provider = provider_for(parse_result)
with provider.unfurl(parse_result=parse_result) as path:
yield path
@contextmanager
def furl(url: str) -> Generator[Path, None, None]:
"""
"""
parse_result = urlparse(url)
provider = provider_for(parse_result)
with provider.furl(parse_result=parse_result) as path:
yield path
| [
"pkg_resources.iter_entry_points",
"urllib.parse.urlparse"
] | [((833, 846), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (841, 846), False, 'from urllib.parse import ParseResult, urlparse\n'), ((1073, 1086), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (1081, 1086), False, 'from urllib.parse import ParseResult, urlparse\n'), ((393, 430), 'pkg_resources.iter_entry_points', 'iter_entry_points', (['"""unfurl.providers"""'], {}), "('unfurl.providers')\n", (410, 430), False, 'from pkg_resources import iter_entry_points\n')] |
import numpy as np
import plotly.offline as pyo
import plotly.graph_objs as go
np.random.seed(42)
random_x=np.random.randint(1,101,100)
random_y=np.random.randint(1,101,100)
data = [go.Scatter(x=random_x,
y=random_y,
mode='markers',
marker=dict(
size=12,
color='rgb(51,210,152)',
symbol='pentagon',
line=dict(width=2)
))]
layout = go.Layout(title='Hello First Plot',
xaxis={'title':'MY X AXIS'},
yaxis=dict(title='MY Y AXIS'),
hovermode='closest')
fig=go.Figure(data=data,layout=layout)
pyo.plot(fig,filename='scatter.html')
| [
"plotly.graph_objs.Figure",
"numpy.random.randint",
"numpy.random.seed",
"plotly.offline.plot"
] | [((81, 99), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (95, 99), True, 'import numpy as np\n'), ((109, 139), 'numpy.random.randint', 'np.random.randint', (['(1)', '(101)', '(100)'], {}), '(1, 101, 100)\n', (126, 139), True, 'import numpy as np\n'), ((147, 177), 'numpy.random.randint', 'np.random.randint', (['(1)', '(101)', '(100)'], {}), '(1, 101, 100)\n', (164, 177), True, 'import numpy as np\n'), ((714, 749), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': 'data', 'layout': 'layout'}), '(data=data, layout=layout)\n', (723, 749), True, 'import plotly.graph_objs as go\n'), ((749, 787), 'plotly.offline.plot', 'pyo.plot', (['fig'], {'filename': '"""scatter.html"""'}), "(fig, filename='scatter.html')\n", (757, 787), True, 'import plotly.offline as pyo\n')] |
import os
import sys
import torch
from torch.autograd import Variable
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + os.path.sep + '../../../')
from in_out.deformable_object_reader import DeformableObjectReader
from core.observations.deformable_objects.deformable_multi_object import DeformableMultiObject
from core.model_tools.attachments.multi_object_attachment import MultiObjectAttachment
import support.kernels as kernel_factory
def compute_distance_squared(path_to_mesh_1, path_to_mesh_2, deformable_object_type, attachment_type,
kernel_width=None):
reader = DeformableObjectReader()
object_1 = reader.create_object(path_to_mesh_1, deformable_object_type.lower())
object_2 = reader.create_object(path_to_mesh_2, deformable_object_type.lower())
multi_object_1 = DeformableMultiObject([object_1])
multi_object_2 = DeformableMultiObject([object_2])
multi_object_attachment = MultiObjectAttachment([attachment_type], [kernel_factory.factory('torch', kernel_width)])
return multi_object_attachment.compute_distances(
{key: torch.from_numpy(value) for key, value in multi_object_1.get_points().items()},
multi_object_1, multi_object_2).data.cpu().numpy()
if __name__ == '__main__':
"""
Basic info printing.
"""
logger.info('')
logger.info('##############################')
logger.info('##### PyDeformetrica 1.0 #####')
logger.info('##############################')
logger.info('')
"""
Read command line.
"""
assert len(sys.argv) in [5, 6], \
'Usage: ' + sys.argv[0] \
+ " <path_to_mesh_1.vtk> <path_to_mesh_2.vtk> <deformable_object_type> <attachment_type> " \
"[optional kernel_width]"
path_to_mesh_1 = sys.argv[1]
path_to_mesh_2 = sys.argv[2]
deformable_object_type = sys.argv[3]
attachment_type = sys.argv[4]
kernel_width = None
if len(sys.argv) == 6:
kernel_width = float(sys.argv[5])
if not os.path.isfile(path_to_mesh_1):
raise RuntimeError('The specified source file ' + path_to_mesh_1 + ' does not exist.')
if not os.path.isfile(path_to_mesh_2):
raise RuntimeError('The specified source file ' + path_to_mesh_2 + ' does not exist.')
"""
Core part.
"""
logger.info(compute_distance_squared(
path_to_mesh_1, path_to_mesh_2, deformable_object_type, attachment_type, kernel_width))
| [
"in_out.deformable_object_reader.DeformableObjectReader",
"support.kernels.factory",
"torch.from_numpy",
"os.path.isfile",
"core.observations.deformable_objects.deformable_multi_object.DeformableMultiObject",
"os.path.abspath"
] | [((617, 641), 'in_out.deformable_object_reader.DeformableObjectReader', 'DeformableObjectReader', ([], {}), '()\n', (639, 641), False, 'from in_out.deformable_object_reader import DeformableObjectReader\n'), ((832, 865), 'core.observations.deformable_objects.deformable_multi_object.DeformableMultiObject', 'DeformableMultiObject', (['[object_1]'], {}), '([object_1])\n', (853, 865), False, 'from core.observations.deformable_objects.deformable_multi_object import DeformableMultiObject\n'), ((887, 920), 'core.observations.deformable_objects.deformable_multi_object.DeformableMultiObject', 'DeformableMultiObject', (['[object_2]'], {}), '([object_2])\n', (908, 920), False, 'from core.observations.deformable_objects.deformable_multi_object import DeformableMultiObject\n'), ((2009, 2039), 'os.path.isfile', 'os.path.isfile', (['path_to_mesh_1'], {}), '(path_to_mesh_1)\n', (2023, 2039), False, 'import os\n'), ((2147, 2177), 'os.path.isfile', 'os.path.isfile', (['path_to_mesh_2'], {}), '(path_to_mesh_2)\n', (2161, 2177), False, 'import os\n'), ((993, 1038), 'support.kernels.factory', 'kernel_factory.factory', (['"""torch"""', 'kernel_width'], {}), "('torch', kernel_width)\n", (1015, 1038), True, 'import support.kernels as kernel_factory\n'), ((104, 129), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (119, 129), False, 'import os\n'), ((1110, 1133), 'torch.from_numpy', 'torch.from_numpy', (['value'], {}), '(value)\n', (1126, 1133), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
import unittest
from dashboard.time_utils import (epoch_to_datetime, milliseconds_to_seconds,
seconds_to_time)
class TestTimeUtils(unittest.TestCase):
def test_empty_epoch_to_datetime_should_raise_exception(self):
with self.assertRaises(TypeError):
epoch_to_datetime(seconds=None)
def test_zero_epoch_to_datetime_should_return_data(self):
result = epoch_to_datetime(seconds=0)
expected = '1970-01-01 00:00:00'
self.assertEqual(result, expected)
def test_none_seconds_to_time_should_raise_excpetion(self):
with self.assertRaises(TypeError):
seconds_to_time(seconds=None)
def test_one_hour_one_minute_one_second_to_time_should_return_data(self):
result = seconds_to_time(seconds=3661)
expected = '1:01:01'
self.assertEqual(result, expected)
def test_none_ms_milliseconds_to_seconds_should_raise_excpetion(self):
with self.assertRaises(TypeError):
milliseconds_to_seconds(milliseconds=None)
def test_float_ms_milliseconds_to_seconds_should_return_data(self):
result = milliseconds_to_seconds(milliseconds=8000.9)
expected = 8.0
self.assertEqual(result, expected)
def test_int_ms_milliseconds_to_seconds_should_return_data(self):
result = milliseconds_to_seconds(milliseconds=8000)
expected = 8.0
self.assertEqual(result, expected)
| [
"dashboard.time_utils.milliseconds_to_seconds",
"dashboard.time_utils.epoch_to_datetime",
"dashboard.time_utils.seconds_to_time"
] | [((446, 474), 'dashboard.time_utils.epoch_to_datetime', 'epoch_to_datetime', ([], {'seconds': '(0)'}), '(seconds=0)\n', (463, 474), False, 'from dashboard.time_utils import epoch_to_datetime, milliseconds_to_seconds, seconds_to_time\n'), ((805, 834), 'dashboard.time_utils.seconds_to_time', 'seconds_to_time', ([], {'seconds': '(3661)'}), '(seconds=3661)\n', (820, 834), False, 'from dashboard.time_utils import epoch_to_datetime, milliseconds_to_seconds, seconds_to_time\n'), ((1171, 1215), 'dashboard.time_utils.milliseconds_to_seconds', 'milliseconds_to_seconds', ([], {'milliseconds': '(8000.9)'}), '(milliseconds=8000.9)\n', (1194, 1215), False, 'from dashboard.time_utils import epoch_to_datetime, milliseconds_to_seconds, seconds_to_time\n'), ((1370, 1412), 'dashboard.time_utils.milliseconds_to_seconds', 'milliseconds_to_seconds', ([], {'milliseconds': '(8000)'}), '(milliseconds=8000)\n', (1393, 1412), False, 'from dashboard.time_utils import epoch_to_datetime, milliseconds_to_seconds, seconds_to_time\n'), ((334, 365), 'dashboard.time_utils.epoch_to_datetime', 'epoch_to_datetime', ([], {'seconds': 'None'}), '(seconds=None)\n', (351, 365), False, 'from dashboard.time_utils import epoch_to_datetime, milliseconds_to_seconds, seconds_to_time\n'), ((679, 708), 'dashboard.time_utils.seconds_to_time', 'seconds_to_time', ([], {'seconds': 'None'}), '(seconds=None)\n', (694, 708), False, 'from dashboard.time_utils import epoch_to_datetime, milliseconds_to_seconds, seconds_to_time\n'), ((1038, 1080), 'dashboard.time_utils.milliseconds_to_seconds', 'milliseconds_to_seconds', ([], {'milliseconds': 'None'}), '(milliseconds=None)\n', (1061, 1080), False, 'from dashboard.time_utils import epoch_to_datetime, milliseconds_to_seconds, seconds_to_time\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
fullname = models.CharField(blank=True, max_length=1024)
age = models.IntegerField(default=None, blank=True, null=True)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save() | [
"django.dispatch.receiver",
"django.db.models.OneToOneField",
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((464, 496), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'User'}), '(post_save, sender=User)\n', (472, 496), False, 'from django.dispatch import receiver\n'), ((614, 646), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'User'}), '(post_save, sender=User)\n', (622, 646), False, 'from django.dispatch import receiver\n'), ((287, 339), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (307, 339), False, 'from django.db import models\n'), ((352, 397), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(1024)'}), '(blank=True, max_length=1024)\n', (368, 397), False, 'from django.db import models\n'), ((405, 461), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'blank': '(True)', 'null': '(True)'}), '(default=None, blank=True, null=True)\n', (424, 461), False, 'from django.db import models\n')] |
version = '0.0.5'
from setuptools import setup
setup(
name = 'nyaraka',
version = version,
url = 'http://github.com/edsu/nyaraka',
author = '<NAME>',
author_email = '<EMAIL>',
py_modules = ['nyaraka',],
scripts = ['nyaraka.py',],
install_requires = ['requests', 'tqdm'],
description = 'Download Omeka data',
)
| [
"setuptools.setup"
] | [((49, 300), 'setuptools.setup', 'setup', ([], {'name': '"""nyaraka"""', 'version': 'version', 'url': '"""http://github.com/edsu/nyaraka"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'py_modules': "['nyaraka']", 'scripts': "['nyaraka.py']", 'install_requires': "['requests', 'tqdm']", 'description': '"""Download Omeka data"""'}), "(name='nyaraka', version=version, url='http://github.com/edsu/nyaraka',\n author='<NAME>', author_email='<EMAIL>', py_modules=['nyaraka'],\n scripts=['nyaraka.py'], install_requires=['requests', 'tqdm'],\n description='Download Omeka data')\n", (54, 300), False, 'from setuptools import setup\n')] |
"""Binning-based MI estimator."""
import numpy as np
from frites.core.mi_bin_ephy import (mi_bin_time, mi_bin_ccd_time)
from frites.estimator.est_mi_base import BaseMIEstimator
from frites.utils import jit
class BinMIEstimator(BaseMIEstimator):
"""Binning-based Mutual-Information estimator.
.. note::
The functions for estimating the mutual-information using binning are
relatively slow. If Numba is installed, those functions can be
considerably accelerated.
Parameters
----------
mi_type : {'cc', 'cd', 'ccd'}
Mutual information type (default : 'cc') :
* 'cc' : MI between two continuous variables
* 'cd' : MI between a continuous and a discret variables
* 'ccd' : MI between two continuous variables conditioned by a
third discret one
n_bins : int | 4
Number of bins to estimate the probability distribution.
"""
def __init__(self, mi_type='cc', n_bins=4, verbose=None):
self.name = 'Binning-based Mutual Information Estimator'
add_str = f", n_bins={n_bins}"
super(BinMIEstimator, self).__init__(
mi_type=mi_type, add_str=add_str, verbose=verbose)
# =========================== Core function ===========================
fcn = {'cc': mi_bin_cc, 'cd': mi_bin_cd, 'ccd': mi_bin_ccd}[mi_type]
self._core_fun = fcn
# ========================== Function kwargs ==========================
# additional arguments that are going to be passed to the core function
self._kwargs = dict(n_bins=n_bins)
# update internal settings
settings = dict(mi_type=mi_type, core_fun=self._core_fun.__name__)
self.settings.merge([self._kwargs, settings])
def estimate(self, x, y, z=None, categories=None):
"""Estimate the (possibly conditional) mutual-information.
This method is made for computing the mutual-information on 3D
variables (i.e (n_var, 1, n_samples)) where n_var is an additional
dimension (e.g times, times x freqs etc.), 1 is a multivariate
axis and n_samples the number of samples. When computing MI, both the
multivariate and samples axes are reduced.
Parameters
----------
x : array_like
Array of shape (n_var, 1, n_samples). If x has more than three
dimensions, it's going to be internally reshaped.
y : array_like
Array with a shape that depends on the type of MI (mi_type) :
* If mi_type is 'cc' or 'ccd', y should be an array with the
same shape as x
* If mi_type is 'cd', y should be a row vector of shape
(n_samples,)
z : array_like | None
Array for conditional mutual-information. The shape is going to
depend on the type of MI (mi_type) :
* If mi_type is 'ccd', z should be a row vector of shape
(n_samples,)
* If mi_type is 'ccc', z should have the same shape as x and y
categories : array_like | None
Row vector of categories. This vector should have a shape of
(n_samples,) and should contains integers describing the category
of each sample. If categories are provided, the copnorm is going to
be performed per categories.
Returns
-------
mi : array_like
Array of (possibly conditional) mutual-information of shape
(n_categories, n_var). If categories is None when computing MI,
n_categories is going to be one.
"""
fcn = self.get_function()
return fcn(x, y, z=z, categories=categories)
def get_function(self):
"""Get the function to execute according to the input parameters.
This can be particulary usefull when computing MI in parallel as it
avoids to pickle the whole estimator and therefore, leading to faster
computations.
The returned function has the following signature :
* fcn(x, y, z=None, categories=None)
and return an array of shape (n_categories, n_var).
"""
n_bins = np.int64(self._kwargs['n_bins'])
core_fun = self._core_fun
mi_type = self.settings['mi_type']
def estimator(x, y, z=None, categories=None):
# be sure that x is at least 3d
if x.ndim == 1:
x = x[np.newaxis, np.newaxis, :]
if x.ndim == 2:
x = x[np.newaxis, :]
# internal reshaping if x has more than 3 dimensions
assert x.ndim >= 3
reshape = None
if x.ndim > 3:
head_shape = list(x.shape)[0:-2]
reshape = (head_shape, np.prod(head_shape))
tail_shape = list(x.shape)[-2::]
x = x.reshape([reshape[1]] + tail_shape)
# types checking
if x.dtype != np.float32:
x = x.astype(np.float32, copy=False)
if y.dtype != np.float32:
y = y.astype(np.float32, copy=False)
if isinstance(z, np.ndarray) and (z.dtype != np.float32):
z = z.astype(np.float32, copy=False)
if not isinstance(categories, np.ndarray):
categories = np.zeros((1), dtype=np.float32)
if categories.dtype != np.float32:
categories = categories.astype(np.float32, copy=False)
# additional arguments for cmi
args = ()
if mi_type in ['ccd', 'ccc']:
args = [z]
# compute mi
mi = core_fun(x, y, *args, n_bins, categories)
# retrieve original shape (if needed)
if reshape is not None:
mi = mi.reshape([mi.shape[0]] + reshape[0])
return mi
return estimator
@jit("f4[:, :](f4[:,:,:], f4[:], i8, f4[:])")
def mi_bin_cc(x, y, n_bins, categories):
# proper shape of the regressor
n_times, _, n_trials = x.shape
# compute mi across (ffx) or per subject (rfx)
if len(categories) != n_trials:
mi = np.zeros((1, n_times), dtype=np.float32)
mi[0, :] = mi_bin_time(x[:, 0, :], y, n_bins, n_bins)
else:
# get categories informations
u_cat = np.unique(categories)
n_cats = len(u_cat)
# compute mi per subject
mi = np.zeros((n_cats, n_times), dtype=np.float32)
for n_c, c in enumerate(u_cat):
is_cat = categories == c
x_cat, y_cat = x[:, :, is_cat], y[is_cat]
mi[n_c, :] = mi_bin_time(x_cat[:, 0, :], y_cat, n_bins, n_bins)
return mi
@jit("f4[:, :](f4[:,:,:], f4[:], i8, f4[:])")
def mi_bin_cd(x, y, bins_x, categories):
# proper shape of the regressor
n_times, _, n_trials = x.shape
# get the number of bins for the y variable
bins_y = len(np.unique(y))
# compute mi across (ffx) or per subject (rfx)
if len(categories) != n_trials:
mi = np.zeros((1, n_times), dtype=np.float32)
mi[0, :] = mi_bin_time(x[:, 0, :], y, bins_x, bins_y)
else:
# get categories informations
u_cat = np.unique(categories)
n_cats = len(u_cat)
# compute mi per subject
mi = np.zeros((n_cats, n_times), dtype=np.float32)
for n_c, c in enumerate(u_cat):
is_cat = categories == c
x_cat, y_cat = x[:, :, is_cat], y[is_cat]
mi[n_c, :] = mi_bin_time(x_cat[:, 0, :], y_cat, bins_x, bins_y)
return mi
@jit("f4[:, :](f4[:,:,:], f4[:], f4[:], i8, f4[:])")
def mi_bin_ccd(x, y, z, n_bins, categories):
# proper shape of the regressor
n_times, _, n_trials = x.shape
# compute mi across (ffx) or per subject (rfx)
if len(categories) != n_trials:
mi = np.zeros((1, n_times), dtype=np.float32)
mi[0, :] = mi_bin_ccd_time(x[:, 0, :], y, z, n_bins)
else:
# get categories informations
u_cat = np.unique(categories)
n_cats = len(u_cat)
# compute mi per subject
mi = np.zeros((n_cats, n_times), dtype=np.float32)
for n_c, c in enumerate(u_cat):
is_cat = categories == c
x_cat, y_cat, z_cat = x[:, :, is_cat], y[is_cat], z[is_cat]
mi[n_c, :] = mi_bin_ccd_time(x_cat[:, 0, :], y_cat, z_cat, n_bins)
return mi
| [
"numpy.prod",
"numpy.int64",
"numpy.unique",
"frites.core.mi_bin_ephy.mi_bin_ccd_time",
"numpy.zeros",
"frites.core.mi_bin_ephy.mi_bin_time",
"frites.utils.jit"
] | [((5943, 5987), 'frites.utils.jit', 'jit', (['"""f4[:, :](f4[:,:,:], f4[:], i8, f4[:])"""'], {}), "('f4[:, :](f4[:,:,:], f4[:], i8, f4[:])')\n", (5946, 5987), False, 'from frites.utils import jit\n'), ((6733, 6777), 'frites.utils.jit', 'jit', (['"""f4[:, :](f4[:,:,:], f4[:], i8, f4[:])"""'], {}), "('f4[:, :](f4[:,:,:], f4[:], i8, f4[:])')\n", (6736, 6777), False, 'from frites.utils import jit\n'), ((7602, 7653), 'frites.utils.jit', 'jit', (['"""f4[:, :](f4[:,:,:], f4[:], f4[:], i8, f4[:])"""'], {}), "('f4[:, :](f4[:,:,:], f4[:], f4[:], i8, f4[:])')\n", (7605, 7653), False, 'from frites.utils import jit\n'), ((4239, 4271), 'numpy.int64', 'np.int64', (["self._kwargs['n_bins']"], {}), "(self._kwargs['n_bins'])\n", (4247, 4271), True, 'import numpy as np\n'), ((6200, 6240), 'numpy.zeros', 'np.zeros', (['(1, n_times)'], {'dtype': 'np.float32'}), '((1, n_times), dtype=np.float32)\n', (6208, 6240), True, 'import numpy as np\n'), ((6260, 6302), 'frites.core.mi_bin_ephy.mi_bin_time', 'mi_bin_time', (['x[:, 0, :]', 'y', 'n_bins', 'n_bins'], {}), '(x[:, 0, :], y, n_bins, n_bins)\n', (6271, 6302), False, 'from frites.core.mi_bin_ephy import mi_bin_time, mi_bin_ccd_time\n'), ((6367, 6388), 'numpy.unique', 'np.unique', (['categories'], {}), '(categories)\n', (6376, 6388), True, 'import numpy as np\n'), ((6463, 6508), 'numpy.zeros', 'np.zeros', (['(n_cats, n_times)'], {'dtype': 'np.float32'}), '((n_cats, n_times), dtype=np.float32)\n', (6471, 6508), True, 'import numpy as np\n'), ((6955, 6967), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (6964, 6967), True, 'import numpy as np\n'), ((7069, 7109), 'numpy.zeros', 'np.zeros', (['(1, n_times)'], {'dtype': 'np.float32'}), '((1, n_times), dtype=np.float32)\n', (7077, 7109), True, 'import numpy as np\n'), ((7129, 7171), 'frites.core.mi_bin_ephy.mi_bin_time', 'mi_bin_time', (['x[:, 0, :]', 'y', 'bins_x', 'bins_y'], {}), '(x[:, 0, :], y, bins_x, bins_y)\n', (7140, 7171), False, 'from frites.core.mi_bin_ephy import mi_bin_time, mi_bin_ccd_time\n'), ((7236, 7257), 'numpy.unique', 'np.unique', (['categories'], {}), '(categories)\n', (7245, 7257), True, 'import numpy as np\n'), ((7332, 7377), 'numpy.zeros', 'np.zeros', (['(n_cats, n_times)'], {'dtype': 'np.float32'}), '((n_cats, n_times), dtype=np.float32)\n', (7340, 7377), True, 'import numpy as np\n'), ((7870, 7910), 'numpy.zeros', 'np.zeros', (['(1, n_times)'], {'dtype': 'np.float32'}), '((1, n_times), dtype=np.float32)\n', (7878, 7910), True, 'import numpy as np\n'), ((7930, 7971), 'frites.core.mi_bin_ephy.mi_bin_ccd_time', 'mi_bin_ccd_time', (['x[:, 0, :]', 'y', 'z', 'n_bins'], {}), '(x[:, 0, :], y, z, n_bins)\n', (7945, 7971), False, 'from frites.core.mi_bin_ephy import mi_bin_time, mi_bin_ccd_time\n'), ((8036, 8057), 'numpy.unique', 'np.unique', (['categories'], {}), '(categories)\n', (8045, 8057), True, 'import numpy as np\n'), ((8132, 8177), 'numpy.zeros', 'np.zeros', (['(n_cats, n_times)'], {'dtype': 'np.float32'}), '((n_cats, n_times), dtype=np.float32)\n', (8140, 8177), True, 'import numpy as np\n'), ((6665, 6715), 'frites.core.mi_bin_ephy.mi_bin_time', 'mi_bin_time', (['x_cat[:, 0, :]', 'y_cat', 'n_bins', 'n_bins'], {}), '(x_cat[:, 0, :], y_cat, n_bins, n_bins)\n', (6676, 6715), False, 'from frites.core.mi_bin_ephy import mi_bin_time, mi_bin_ccd_time\n'), ((7534, 7584), 'frites.core.mi_bin_ephy.mi_bin_time', 'mi_bin_time', (['x_cat[:, 0, :]', 'y_cat', 'bins_x', 'bins_y'], {}), '(x_cat[:, 0, :], y_cat, bins_x, bins_y)\n', (7545, 7584), False, 'from frites.core.mi_bin_ephy import mi_bin_time, mi_bin_ccd_time\n'), ((8352, 8405), 'frites.core.mi_bin_ephy.mi_bin_ccd_time', 'mi_bin_ccd_time', (['x_cat[:, 0, :]', 'y_cat', 'z_cat', 'n_bins'], {}), '(x_cat[:, 0, :], y_cat, z_cat, n_bins)\n', (8367, 8405), False, 'from frites.core.mi_bin_ephy import mi_bin_time, mi_bin_ccd_time\n'), ((5375, 5404), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.float32'}), '(1, dtype=np.float32)\n', (5383, 5404), True, 'import numpy as np\n'), ((4829, 4848), 'numpy.prod', 'np.prod', (['head_shape'], {}), '(head_shape)\n', (4836, 4848), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from pyhsmm.basic.distributions import PoissonDuration
from autoregressive.distributions import AutoRegression
from pyhsmm.util.text import progprint_xrange
from pyslds.models import DefaultSLDS
np.random.seed(0)
###################
# generate data #
###################
import autoregressive
As = [np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
for alpha, theta in ((0.95,0.1), (0.95,-0.1), (1., 0.))]
truemodel = autoregressive.models.ARHSMM(
alpha=4.,init_state_concentration=4.,
obs_distns=[AutoRegression(A=A,sigma=0.05*np.eye(2)) for A in As],
dur_distns=[PoissonDuration(alpha_0=3*50,beta_0=3) for _ in As])
truemodel.prefix = np.array([[0.,3.]])
data, labels = truemodel.generate(1000)
data = data[truemodel.nlags:]
plt.figure()
plt.plot(data[:,0],data[:,1],'bx-')
#################
# build model #
#################
Kmax = 10 # number of latent discrete states
D_latent = 2 # latent linear dynamics' dimension
D_obs = 2 # data dimension
Cs = [np.eye(D_obs) for _ in range(Kmax)] # Shared emission matrices
sigma_obss = [0.05 * np.eye(D_obs) for _ in range(Kmax)] # Emission noise covariances
model = DefaultSLDS(
K=Kmax, D_obs=D_obs, D_latent=D_latent,
Cs=Cs, sigma_obss=sigma_obss)
model.add_data(data)
model.resample_states()
for _ in progprint_xrange(10):
model.resample_model()
model.states_list[0]._init_mf_from_gibbs()
####################
# run mean field #
####################
vlbs = []
for _ in progprint_xrange(50):
vlbs.append(model.meanfield_coordinate_descent_step())
plt.figure()
plt.plot(vlbs)
plt.xlabel("Iteration")
plt.ylabel("VLB")
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=(9,3))
gs = gridspec.GridSpec(7,1)
ax1 = fig.add_subplot(gs[:-2])
ax2 = fig.add_subplot(gs[-2], sharex=ax1)
ax3 = fig.add_subplot(gs[-1], sharex=ax1)
im = ax1.matshow(model.states_list[0].expected_states.T, aspect='auto')
ax1.set_xticks([])
ax1.set_yticks(np.arange(Kmax))
ax1.set_ylabel("Discrete State")
ax2.matshow(model.states_list[0].expected_states.argmax(1)[None,:], aspect='auto')
ax2.set_xticks([])
ax2.set_yticks([])
ax3.matshow(labels[None,:], aspect='auto')
ax3.set_xticks([])
ax3.set_yticks([])
ax3.set_xlabel("Time")
plt.show()
| [
"pyslds.models.DefaultSLDS",
"numpy.eye",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"pyhsmm.basic.distributions.PoissonDuration",
"numpy.random.seed",
"numpy.cos",
"numpy.sin",
"pyhsmm.util.text.progprint_xrange",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((281, 298), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (295, 298), True, 'import numpy as np\n'), ((791, 813), 'numpy.array', 'np.array', (['[[0.0, 3.0]]'], {}), '([[0.0, 3.0]])\n', (799, 813), True, 'import numpy as np\n'), ((882, 894), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (892, 894), True, 'import matplotlib.pyplot as plt\n'), ((895, 934), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:, 0]', 'data[:, 1]', '"""bx-"""'], {}), "(data[:, 0], data[:, 1], 'bx-')\n", (903, 934), True, 'import matplotlib.pyplot as plt\n'), ((1370, 1456), 'pyslds.models.DefaultSLDS', 'DefaultSLDS', ([], {'K': 'Kmax', 'D_obs': 'D_obs', 'D_latent': 'D_latent', 'Cs': 'Cs', 'sigma_obss': 'sigma_obss'}), '(K=Kmax, D_obs=D_obs, D_latent=D_latent, Cs=Cs, sigma_obss=\n sigma_obss)\n', (1381, 1456), False, 'from pyslds.models import DefaultSLDS\n'), ((1517, 1537), 'pyhsmm.util.text.progprint_xrange', 'progprint_xrange', (['(10)'], {}), '(10)\n', (1533, 1537), False, 'from pyhsmm.util.text import progprint_xrange\n'), ((1694, 1714), 'pyhsmm.util.text.progprint_xrange', 'progprint_xrange', (['(50)'], {}), '(50)\n', (1710, 1714), False, 'from pyhsmm.util.text import progprint_xrange\n'), ((1776, 1788), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1786, 1788), True, 'import matplotlib.pyplot as plt\n'), ((1789, 1803), 'matplotlib.pyplot.plot', 'plt.plot', (['vlbs'], {}), '(vlbs)\n', (1797, 1803), True, 'import matplotlib.pyplot as plt\n'), ((1804, 1827), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (1814, 1827), True, 'import matplotlib.pyplot as plt\n'), ((1828, 1845), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""VLB"""'], {}), "('VLB')\n", (1838, 1845), True, 'import matplotlib.pyplot as plt\n'), ((1892, 1918), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 3)'}), '(figsize=(9, 3))\n', (1902, 1918), True, 'import matplotlib.pyplot as plt\n'), ((1923, 1946), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(7)', '(1)'], {}), '(7, 1)\n', (1940, 1946), True, 'import matplotlib.gridspec as gridspec\n'), ((2447, 2457), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2455, 2457), True, 'import matplotlib.pyplot as plt\n'), ((1191, 1204), 'numpy.eye', 'np.eye', (['D_obs'], {}), '(D_obs)\n', (1197, 1204), True, 'import numpy as np\n'), ((2168, 2183), 'numpy.arange', 'np.arange', (['Kmax'], {}), '(Kmax)\n', (2177, 2183), True, 'import numpy as np\n'), ((1293, 1306), 'numpy.eye', 'np.eye', (['D_obs'], {}), '(D_obs)\n', (1299, 1306), True, 'import numpy as np\n'), ((718, 759), 'pyhsmm.basic.distributions.PoissonDuration', 'PoissonDuration', ([], {'alpha_0': '(3 * 50)', 'beta_0': '(3)'}), '(alpha_0=3 * 50, beta_0=3)\n', (733, 759), False, 'from pyhsmm.basic.distributions import PoissonDuration\n'), ((402, 415), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (408, 415), True, 'import numpy as np\n'), ((451, 464), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (457, 464), True, 'import numpy as np\n'), ((466, 479), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (472, 479), True, 'import numpy as np\n'), ((418, 431), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (424, 431), True, 'import numpy as np\n'), ((677, 686), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (683, 686), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
import pygame
from polygon_app import PolygonApp, EqualPolygonApp
from circled_polygon import CircledPolygon
from polygoned_circle import PolygonedCircle, EqualPolygonedCircle
from circle_app import CircleApp
from math import pi, sin, cos, sqrt, pow
from itertools import starmap
from constants import OPAQUE
from geometry import to_degrees
class TextRing (CircledPolygon): # circled polygon with polygon'd circle # number of sides of polygon based on text
def __init__ (self, child, text, font=None, *args, **kwargs):
#assert child is not None
if not isinstance (child, PolygonedCircle) and not isinstance (child, EqualPolygonedCircle):
assert child is None or isinstance (child, CircleApp)
child = EqualPolygonedCircle (None, child, background=None, *args, **kwargs)
#child = EqualPolygonedCircle (None, child, *args, **kwargs)
#assert child is not None
CircledPolygon.__init__ (self, child, *args, **kwargs)
self.text = text
self.font = font
#assert self.child is not None
self.th = None
def set_subsurface (self, ss):
CircledPolygon.set_subsurface (self, ss)
if self.font is None:
df = pygame.font.get_default_font ()
font = pygame.font.Font (df, 8)
self.font = font
texts, tw, th, minn, maxn, x, y, w, h = self.compute_sizes ()
# TODO handle change in sizes
self.texts = texts
self.tw = tw
self.th = th
if self.child is not None:
rect = self.inner_rect ()
ss2 = ss.subsurface (rect)
self.child.set_subsurface (ss2)
self.minn = minn
self.maxn = maxn
self.x = x
self.y = y
self.w = w
self.h = h
#self.next_cycle ()
def compute_sizes (self):
text = self.text
print ("text: %s" % (text,))
N = len (text)
print ("N: %s" % (N,))
font = self.font
crfg = (0, 255, 0, 255)
f = lambda c: (font.render (c, True, crfg), *font.size (c))
g = lambda c: str (c)
texts = map (g, text)
texts = map (f, texts)
texts = tuple (texts) # image, w, h
f = lambda iwh: iwh[1]
tw = max (texts, key=f)[1]
f = lambda iwh: iwh[2]
th = max (texts, key=f)[2]
print ("tw: %s, th: %s" % (tw, th))
# each char of text is rotated => text is a polygon, circle is inscribed
X, Y, W, H = self.inner_rect () # outer radii
print ("(X, Y): (%s, %s) (W: %s, H: %s)" % (X, Y, W, H))
#w, h = W - 2 * tw, H - 2 * th # make room for text aligned at axes
x, y, w, h = X + tw / 2, Y + th / 2, W - tw, H - th # text center
print ("w: %s, h: %s" % (w, h))
# text is rendered between outer and inner radii
minn = 3 # min number of chars that will look "arcane"
n = minn
while True: # TODO if the formula doesn't work, at least use an interpolated binary search
n = n + 1
i = 0
theta1 = (i + 0) / n * 2 * pi
theta2 = (i + 1) / n * 2 * pi
dx = cos (theta2) - cos (theta1)
dy = sin (theta2) - sin (theta1)
sl = sqrt (pow (W * dx, 2) + pow (H * dy, 2)) # side length of polygon
if sl < tw: break
maxn = n - 1
print ("maxn: %s" % (maxn,))
assert maxn >= minn * (minn + 1) # lower bound is minn^2, and the numbers must be different
return texts, tw, th, minn, maxn, x, y, w, h
def transform_helper (self, text, w, h, angle):
intermediate_alpha_surface = pygame.Surface ((w, h), flags=pygame.SRCALPHA)
intermediate_alpha_surface.fill (pygame.Color (*OPAQUE))
text_rect = text.get_rect ()
text_rect.center = (w / 2, h / 2)
intermediate_alpha_surface.blit (text, text_rect, special_flags=pygame.BLEND_RGBA_MIN)
# when angle is 0 , rad is - pi / 2
# when angle is +pi / 2, rad is 0
# when angle is pi , rad is + pi / 2
# when angle is -pi / 2, rad is 0
#if 0 <= angle and angle <= pi: rad = angle
#else: rad = angle - pi
rad = angle - pi / 2
#orientation = NORTH
degrees = to_degrees (rad)
#degrees = 0
xform = pygame.transform.rotate (intermediate_alpha_surface, degrees)
#xform = pygame.transform.rotate (text, angle)
return xform
def get_transforms (self):
texts = self.texts # image, w, h
angles = self.angles
# TODO might have to blit onto a temp surface
f = lambda text, angle: self.transform_helper (*text, angle)
ntext = len (texts)
nangle = len (angles)
#assert ntext == nangle, "ntext: %s, nangle: %s" % (ntext, nangle)
k = zip (self.get_text_for_transforms (), angles)
xforms = starmap (f, k)
xforms = tuple (xforms)
return xforms
def get_text_for_transforms (self): return self.texts
# def minsz: minsz of inner circle... + tw, th => minsz of outer
# 3 * 4 = 12 points on polygon...
#def draw_foreground (self, temp):
def draw_cropped_scene (self, temp):
print ("circular_matrix_text.draw_foreground ()")
#CircleApp.draw_foreground (self, temp)
CircledPolygon.draw_cropped_scene (self, temp)
xforms = self.xforms # image, w, h
n = self.n
ndx = self.sectioni
pts = self.pts
angles = self.angles
print ("nsection: %s, ndx: %s" % (len (self.sections), ndx))
#k, section = self.sections[ndx]
section = self.sections[ndx]
#for i in range (0, n, k):
for i in section:
theta = angles[i]
xform = xforms[i]
pt = pts[i]
#rect = text.get_rect ()
rect = xform.get_rect ()
rect.center = (round (pt[0]), round (pt[1]))
temp.blit (xform, rect)
#self.increment_section_index () # TODO move this to the troller
def inner_rect (self):
rect = self.outer_rect ()
X, Y, W, H = rect
th = self.th
if th is None: return rect
w, h = W - 2 * th, H - 2 * th
x, y = X + (W - w) / 2, Y + (H - h) / 2
rect = x, y, w, h
return rect
if __name__ == "__main__":
def main ():
# TODO
a = None
with HAL9000 (app=a) as g: g.run ()
main ()
quit ()
| [
"pygame.Surface",
"circled_polygon.CircledPolygon.__init__",
"math.pow",
"circled_polygon.CircledPolygon.set_subsurface",
"pygame.transform.rotate",
"math.cos",
"math.sin",
"circled_polygon.CircledPolygon.draw_cropped_scene",
"itertools.starmap",
"pygame.font.Font",
"pygame.Color",
"geometry.to_degrees",
"pygame.font.get_default_font",
"polygoned_circle.EqualPolygonedCircle"
] | [((901, 954), 'circled_polygon.CircledPolygon.__init__', 'CircledPolygon.__init__', (['self', 'child', '*args'], {}), '(self, child, *args, **kwargs)\n', (924, 954), False, 'from circled_polygon import CircledPolygon\n'), ((1078, 1117), 'circled_polygon.CircledPolygon.set_subsurface', 'CircledPolygon.set_subsurface', (['self', 'ss'], {}), '(self, ss)\n', (1107, 1117), False, 'from circled_polygon import CircledPolygon\n'), ((3461, 3506), 'pygame.Surface', 'pygame.Surface', (['(w, h)'], {'flags': 'pygame.SRCALPHA'}), '((w, h), flags=pygame.SRCALPHA)\n', (3475, 3506), False, 'import pygame\n'), ((4042, 4057), 'geometry.to_degrees', 'to_degrees', (['rad'], {}), '(rad)\n', (4052, 4057), False, 'from geometry import to_degrees\n'), ((4084, 4144), 'pygame.transform.rotate', 'pygame.transform.rotate', (['intermediate_alpha_surface', 'degrees'], {}), '(intermediate_alpha_surface, degrees)\n', (4107, 4144), False, 'import pygame\n'), ((4587, 4600), 'itertools.starmap', 'starmap', (['f', 'k'], {}), '(f, k)\n', (4594, 4600), False, 'from itertools import starmap\n'), ((4975, 5020), 'circled_polygon.CircledPolygon.draw_cropped_scene', 'CircledPolygon.draw_cropped_scene', (['self', 'temp'], {}), '(self, temp)\n', (5008, 5020), False, 'from circled_polygon import CircledPolygon\n'), ((738, 805), 'polygoned_circle.EqualPolygonedCircle', 'EqualPolygonedCircle', (['None', 'child', '*args'], {'background': 'None'}), '(None, child, *args, background=None, **kwargs)\n', (758, 805), False, 'from polygoned_circle import PolygonedCircle, EqualPolygonedCircle\n'), ((1158, 1188), 'pygame.font.get_default_font', 'pygame.font.get_default_font', ([], {}), '()\n', (1186, 1188), False, 'import pygame\n'), ((1205, 1228), 'pygame.font.Font', 'pygame.font.Font', (['df', '(8)'], {}), '(df, 8)\n', (1221, 1228), False, 'import pygame\n'), ((3543, 3564), 'pygame.Color', 'pygame.Color', (['*OPAQUE'], {}), '(*OPAQUE)\n', (3555, 3564), False, 'import pygame\n'), ((2981, 2992), 'math.cos', 'cos', (['theta2'], {}), '(theta2)\n', (2984, 2992), False, 'from math import pi, sin, cos, sqrt, pow\n'), ((2996, 3007), 'math.cos', 'cos', (['theta1'], {}), '(theta1)\n', (2999, 3007), False, 'from math import pi, sin, cos, sqrt, pow\n'), ((3021, 3032), 'math.sin', 'sin', (['theta2'], {}), '(theta2)\n', (3024, 3032), False, 'from math import pi, sin, cos, sqrt, pow\n'), ((3036, 3047), 'math.sin', 'sin', (['theta1'], {}), '(theta1)\n', (3039, 3047), False, 'from math import pi, sin, cos, sqrt, pow\n'), ((3067, 3081), 'math.pow', 'pow', (['(W * dx)', '(2)'], {}), '(W * dx, 2)\n', (3070, 3081), False, 'from math import pi, sin, cos, sqrt, pow\n'), ((3085, 3099), 'math.pow', 'pow', (['(H * dy)', '(2)'], {}), '(H * dy, 2)\n', (3088, 3099), False, 'from math import pi, sin, cos, sqrt, pow\n')] |
# (c) 2015, <NAME> <<EMAIL>>
# Based on `runner/lookup_plugins/items.py` for Ansible
# (c) 2012, <NAME> <<EMAIL>>
#
# This file is part of Debops.
# This file is NOT part of Ansible yet.
#
# Debops is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Debops. If not, see <https://www.gnu.org/licenses/>.
'''
This file implements the `with_lists` lookup filter for Ansible. In
differenceto `with_items`, this one does *not* flatten the lists passed to.
Example:
- debug: msg="{{item.0}} -- {{item.1}} -- {{item.2}}"
with_lists:
- ["General", "Verbosity", "0"]
- ["Mapping", "Nobody-User", "nobody"]
- ["Mapping", "Nobody-Group", "nogroup"]
Output (shortend):
"msg": "General -- Verbosity -- 0"
"msg": "Mapping -- Nobody-User -- nobody"
"msg": "Mapping -- Nobody-Group -- nogroup"
'''
import ansible.utils as utils
import ansible.errors as errors
try:
from ansible.plugins.lookup import LookupBase
except ImportError:
LookupBase = object
class LookupModule(LookupBase):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if not isinstance(terms, (list, set)):
raise errors.AnsibleError("with_list expects a list or a set")
for i, elem in enumerate(terms):
if not isinstance(elem, (list, tuple)):
raise errors.AnsibleError(
"with_list expects a list (or a set) of lists"
" or tuples, but elem %i is not")
return terms
| [
"ansible.utils.listify_lookup_plugin_terms",
"ansible.errors.AnsibleError"
] | [((1668, 1730), 'ansible.utils.listify_lookup_plugin_terms', 'utils.listify_lookup_plugin_terms', (['terms', 'self.basedir', 'inject'], {}), '(terms, self.basedir, inject)\n', (1701, 1730), True, 'import ansible.utils as utils\n'), ((1797, 1853), 'ansible.errors.AnsibleError', 'errors.AnsibleError', (['"""with_list expects a list or a set"""'], {}), "('with_list expects a list or a set')\n", (1816, 1853), True, 'import ansible.errors as errors\n'), ((1970, 2077), 'ansible.errors.AnsibleError', 'errors.AnsibleError', (['"""with_list expects a list (or a set) of lists or tuples, but elem %i is not"""'], {}), "(\n 'with_list expects a list (or a set) of lists or tuples, but elem %i is not'\n )\n", (1989, 2077), True, 'import ansible.errors as errors\n')] |
from __future__ import annotations
from math import ceil
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ship_class import ShipClass
from starship import Starship
from components.starship_system import StarshipSystem
class Cannon(StarshipSystem):
starship:Starship
def __init__(self, shipclass:ShipClass) -> None:
super().__init__(f"{shipclass.energy_weapon.short_cannon_name_cap}s:")
@property
def ship_can_fire_cannons(self):
return self.starship.ship_class.ship_type_can_fire_cannons and self.is_opperational
@property
def get_max_effective_cannon_firepower(self):
return ceil(self.starship.ship_class.max_cannon_energy * self.get_effective_value)
@property
def get_max_cannon_firepower(self):
return self.starship.ship_class.max_cannon_energy | [
"math.ceil"
] | [((666, 741), 'math.ceil', 'ceil', (['(self.starship.ship_class.max_cannon_energy * self.get_effective_value)'], {}), '(self.starship.ship_class.max_cannon_energy * self.get_effective_value)\n', (670, 741), False, 'from math import ceil\n')] |
## The code is based on the implementation from:
## https://github.com/hongwang600/RelationDectection
##
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
from data import gen_data
from model import SimilarityModel
from utils import process_testing_samples, process_samples, ranking_sequence,\
copy_grad_data, get_grad_params
from evaluate import evaluate_model
from config import CONFIG as conf
embedding_dim = conf['embedding_dim']
hidden_dim = conf['hidden_dim']
batch_size = conf['batch_size']
model_path = conf['model_path']
num_cands = conf['num_cands']
device = conf['device']
lr = conf['learning_rate']
loss_margin = conf['loss_margin']
def sample_memory_data(sample_pool, sample_size):
if len(sample_pool) > 0:
sample_indexs = random.sample(range(len(sample_pool)),
min(sample_size, len(sample_pool)))
return [sample_pool[index] for index in sample_indexs]
else:
return []
def feed_samples(model, samples, loss_function, all_relations, device):
questions, relations, relation_set_lengths = process_samples(
samples, all_relations, device)
#print('got data')
ranked_questions, reverse_question_indexs = \
ranking_sequence(questions)
ranked_relations, reverse_relation_indexs =\
ranking_sequence(relations)
question_lengths = [len(question) for question in ranked_questions]
relation_lengths = [len(relation) for relation in ranked_relations]
#print(ranked_questions)
pad_questions = torch.nn.utils.rnn.pad_sequence(ranked_questions)
pad_relations = torch.nn.utils.rnn.pad_sequence(ranked_relations)
#print(pad_questions)
pad_questions = pad_questions.to(device)
pad_relations = pad_relations.to(device)
#print(pad_questions)
model.zero_grad()
model.init_hidden(device, sum(relation_set_lengths))
all_scores = model(pad_questions, pad_relations, device,
reverse_question_indexs, reverse_relation_indexs,
question_lengths, relation_lengths)
all_scores = all_scores.to('cpu')
pos_scores = []
neg_scores = []
start_index = 0
for length in relation_set_lengths:
pos_scores.append(all_scores[start_index].expand(length-1))
neg_scores.append(all_scores[start_index+1:start_index+length])
start_index += length
pos_scores = torch.cat(pos_scores)
neg_scores = torch.cat(neg_scores)
loss = loss_function(pos_scores, neg_scores,
torch.ones(sum(relation_set_lengths)-
len(relation_set_lengths)))
loss.backward()
# copied from facebook open scource. (https://github.com/facebookresearch/
# GradientEpisodicMemory/blob/master/model/gem.py)
def project2cone2(gradient, memories, margin=0.5):
"""
Solves the GEM dual QP described in the paper given a proposed
gradient "gradient", and a memory of task gradients "memories".
Overwrites "gradient" with the final projected update.
input: gradient, p-vector
input: memories, (t * p)-vector
output: x, p-vector
"""
memories_np = memories.cpu().view(-1).double().numpy()
gradient_np = gradient.cpu().contiguous().view(-1).double().numpy()
x = gradient_np - (np.dot(gradient_np, memories_np)/
np.dot(memories_np, memories_np)) * memories_np
gradient.copy_(torch.Tensor(x).view(-1))
# copied from facebook open scource. (https://github.com/facebookresearch/
# GradientEpisodicMemory/blob/master/model/gem.py)
def overwrite_grad(pp, newgrad, grad_dims):
"""
This is used to overwrite the gradients with a new gradient
vector, whenever violations occur.
pp: parameters
newgrad: corrected gradient
grad_dims: list storing number of parameters at each layer
"""
cnt = 0
for param in pp:
if param.grad is not None:
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
this_grad = newgrad[beg: en].contiguous().view(
param.grad.data.size())
param.grad.data.copy_(this_grad)
cnt += 1
def get_grads_memory_data(model, memory_data, loss_function,
all_relations, device):
if len(memory_data) == 0:
return []
memory_data_grads = []
memory_data_set = [memory_data]
for data in memory_data_set:
#print(data)
feed_samples(model, data, loss_function, all_relations, device)
memory_data_grads.append(copy_grad_data(model))
#print(memory_data_grads[-1][:10])
if len(memory_data_grads) > 1:
return torch.stack(memory_data_grads)
elif len(memory_data_grads) == 1:
return memory_data_grads[0].view(1,-1)
else:
return []
def train(training_data, valid_data, vocabulary, embedding_dim, hidden_dim,
device, batch_size, lr, model_path, embedding, all_relations,
model=None, epoch=100, all_seen_samples=[],
task_memory_size=100, loss_margin=0.5, all_seen_rels=[]):
if model is None:
torch.manual_seed(100)
model = SimilarityModel(embedding_dim, hidden_dim, len(vocabulary),
np.array(embedding), 1, device)
loss_function = nn.MarginRankingLoss(loss_margin)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
best_acc = 0
for epoch_i in range(epoch):
#print('epoch', epoch_i)
#training_data = training_data[0:100]
for i in range((len(training_data)-1)//batch_size+1):
memory_data = sample_memory_data(all_seen_samples, task_memory_size)
for this_sample in memory_data:
rel_cands = [rel for rel in all_seen_rels if rel!=this_sample[0]]
this_sample[1] = random.sample(rel_cands,
min(len(rel_cands),num_cands))
memory_data_grads = get_grads_memory_data(model, memory_data,
loss_function,
all_relations,
device)
#print(memory_data_grads)
samples = training_data[i*batch_size:(i+1)*batch_size]
feed_samples(model, samples, loss_function, all_relations, device)
sample_grad = copy_grad_data(model)
if len(memory_data_grads) > 0:
if torch.matmul(memory_data_grads,
torch.t(sample_grad.view(1,-1))) < 0:
project2cone2(sample_grad, memory_data_grads)
grad_params = get_grad_params(model)
grad_dims = [param.data.numel() for param in grad_params]
overwrite_grad(grad_params, sample_grad, grad_dims)
optimizer.step()
'''
acc=evaluate_model(model, valid_data, batch_size, all_relations, device)
if acc > best_acc:
torch.save(model, model_path)
best_model = torch.load(model_path)
return best_model
'''
return model
if __name__ == '__main__':
training_data, testing_data, valid_data, all_relations, vocabulary, \
embedding=gen_data()
train(training_data, valid_data, vocabulary, embedding_dim, hidden_dim,
device, batch_size, lr, model_path, embedding, all_relations,
model=None, epoch=100)
#print(training_data[0:10])
#print(testing_data[0:10])
#print(valid_data[0:10])
#print(all_relations[0:10])
| [
"torch.manual_seed",
"utils.ranking_sequence",
"data.gen_data",
"torch.stack",
"torch.Tensor",
"torch.nn.utils.rnn.pad_sequence",
"utils.get_grad_params",
"utils.process_samples",
"utils.copy_grad_data",
"numpy.array",
"numpy.dot",
"torch.nn.MarginRankingLoss",
"torch.cat"
] | [((1160, 1207), 'utils.process_samples', 'process_samples', (['samples', 'all_relations', 'device'], {}), '(samples, all_relations, device)\n', (1175, 1207), False, 'from utils import process_testing_samples, process_samples, ranking_sequence, copy_grad_data, get_grad_params\n'), ((1298, 1325), 'utils.ranking_sequence', 'ranking_sequence', (['questions'], {}), '(questions)\n', (1314, 1325), False, 'from utils import process_testing_samples, process_samples, ranking_sequence, copy_grad_data, get_grad_params\n'), ((1383, 1410), 'utils.ranking_sequence', 'ranking_sequence', (['relations'], {}), '(relations)\n', (1399, 1410), False, 'from utils import process_testing_samples, process_samples, ranking_sequence, copy_grad_data, get_grad_params\n'), ((1604, 1653), 'torch.nn.utils.rnn.pad_sequence', 'torch.nn.utils.rnn.pad_sequence', (['ranked_questions'], {}), '(ranked_questions)\n', (1635, 1653), False, 'import torch\n'), ((1674, 1723), 'torch.nn.utils.rnn.pad_sequence', 'torch.nn.utils.rnn.pad_sequence', (['ranked_relations'], {}), '(ranked_relations)\n', (1705, 1723), False, 'import torch\n'), ((2464, 2485), 'torch.cat', 'torch.cat', (['pos_scores'], {}), '(pos_scores)\n', (2473, 2485), False, 'import torch\n'), ((2503, 2524), 'torch.cat', 'torch.cat', (['neg_scores'], {}), '(neg_scores)\n', (2512, 2524), False, 'import torch\n'), ((5410, 5443), 'torch.nn.MarginRankingLoss', 'nn.MarginRankingLoss', (['loss_margin'], {}), '(loss_margin)\n', (5430, 5443), True, 'import torch.nn as nn\n'), ((7402, 7412), 'data.gen_data', 'gen_data', ([], {}), '()\n', (7410, 7412), False, 'from data import gen_data\n'), ((4782, 4812), 'torch.stack', 'torch.stack', (['memory_data_grads'], {}), '(memory_data_grads)\n', (4793, 4812), False, 'import torch\n'), ((5227, 5249), 'torch.manual_seed', 'torch.manual_seed', (['(100)'], {}), '(100)\n', (5244, 5249), False, 'import torch\n'), ((4666, 4687), 'utils.copy_grad_data', 'copy_grad_data', (['model'], {}), '(model)\n', (4680, 4687), False, 'from utils import process_testing_samples, process_samples, ranking_sequence, copy_grad_data, get_grad_params\n'), ((5358, 5377), 'numpy.array', 'np.array', (['embedding'], {}), '(embedding)\n', (5366, 5377), True, 'import numpy as np\n'), ((6545, 6566), 'utils.copy_grad_data', 'copy_grad_data', (['model'], {}), '(model)\n', (6559, 6566), False, 'from utils import process_testing_samples, process_samples, ranking_sequence, copy_grad_data, get_grad_params\n'), ((3380, 3412), 'numpy.dot', 'np.dot', (['gradient_np', 'memories_np'], {}), '(gradient_np, memories_np)\n', (3386, 3412), True, 'import numpy as np\n'), ((3437, 3469), 'numpy.dot', 'np.dot', (['memories_np', 'memories_np'], {}), '(memories_np, memories_np)\n', (3443, 3469), True, 'import numpy as np\n'), ((3504, 3519), 'torch.Tensor', 'torch.Tensor', (['x'], {}), '(x)\n', (3516, 3519), False, 'import torch\n'), ((6831, 6853), 'utils.get_grad_params', 'get_grad_params', (['model'], {}), '(model)\n', (6846, 6853), False, 'from utils import process_testing_samples, process_samples, ranking_sequence, copy_grad_data, get_grad_params\n')] |
from collections import defaultdict
import math
import numpy as np
import ray
from ray.rllib.agents.trainer import Trainer, COMMON_CONFIG
from ray.rllib.utils.annotations import override
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch, DEFAULT_POLICY_ID
from ray.rllib.agents import with_common_config
from ray.rllib.evaluation.metrics import collect_episodes, summarize_episodes
from ray.tune.result import TIMESTEPS_THIS_ITER
from algorithms.agents.deep_nash_v1.deep_nash_policy_v1 import DeepNashPolicy
DEFAULT_CONFIG = with_common_config({
# Replay buffer size (keep this fixed for now, we don't have a better way of setting it)
"buffer_size": 100000,
# Update batch size (number of transition to pass for each batch)
"batch_size": 128,
# Ratio between the number of steps sampled, and the number of updated performed
"sampling_ratio": 5,
# Learning rate for Adam updates
"learning_rate": 0.0001,
# Discount factor
"gamma": 0.99,
# Learning rate for multiplicative weights
"beta": 0.05,
# Implicit exploration constant
"implicit_exploration": 0.05,
# Whether the policy and Q-function should share feature layers
"share_layers": False,
# Q-network hidden layers
"hidden_sizes": [256, 256],
# TensorFlow activation function
"activation": "tanh",
})
class ReplayBuffer:
def __init__(self, initial_size):
self._max_size = initial_size
self._index = 0
self._data = []
def expand(self, new_size):
if new_size < self._max_size:
raise ValueError("New buffer size is smaller than current size - cannot shrink buffer")
self._max_size = new_size
def add(self, sample):
if len(self._data) <= self._index:
self._data.append(sample)
else:
self._data[self._index] = sample
self._index = (self._index + 1) % self._max_size
def sample_batch(self, batch_size):
indices = np.random.randint(0, len(self._data), batch_size)
batch = defaultdict(list)
for idx in indices:
for key, value in self._data[idx].items():
batch[key].append(value)
for key in batch.keys():
batch[key] = np.stack(batch[key])
return SampleBatch(batch)
class DeepNash(Trainer):
_policy = DeepNashPolicy
_name = "DEEP_NASH_V1"
_default_config = DEFAULT_CONFIG
def __init__(self, config=None, env=None, logger_creator=None):
Trainer.__init__(self, config, env, logger_creator)
@override(Trainer)
def _make_workers(self, env_creator, policy, config, num_workers):
return Trainer._make_workers(self, env_creator, policy, config, num_workers)
@override(Trainer)
def _init(self, config, env_creator):
# Define rollout-workers
self.workers = self._make_workers(env_creator, self._policy, config, self.config["num_workers"])
# Define replay buffers dictionary
self._replay_buffers = {}
# Compute effective planning horizon
self._horizon = np.log(0.1) / np.log(config["gamma"])
# Initialize learning rate and sample count
self._num_steps_observed = 0
def _train(self):
# Synchronize weights across remote workers if necessary
if self.workers.remote_workers():
weights = ray.put(self.workers.local_worker().get_weights())
for worker in self.workers.remote_workers():
worker.set_weights.remote(weights)
# Generate samples and add them to the replay buffer
if self.workers.remote_workers():
samples = SampleBatch.concat_samples(
ray.get([
worker.sample.remote()
for worker in self.workers.remote_workers()
]))
else:
samples = self.workers.local_worker().sample()
if isinstance(samples, SampleBatch):
samples = MultiAgentBatch({
DEFAULT_POLICY_ID: samples
}, samples.count)
self._num_steps_observed += samples.count
# Accumulate episode results
episodes, _ = collect_episodes(self.workers.local_worker(), self.workers.remote_workers())
results = summarize_episodes(episodes)
results[TIMESTEPS_THIS_ITER] = samples.count
# Add data to replay buffers and do updates of policies
policy_fetches = {}
for policy_id, policy_batch in samples.policy_batches.items():
if policy_id in self.workers.local_worker().policies_to_train:
if policy_id not in self._replay_buffers:
self._replay_buffers[policy_id] = ReplayBuffer(self.config["buffer_size"])
for sample in policy_batch.rows():
self._replay_buffers[policy_id].add(sample)
required_updates = math.ceil(policy_batch.count / self.config["sampling_ratio"])
average_fetches = defaultdict(list)
for _ in range(required_updates):
batch = self._replay_buffers[policy_id].sample_batch(self.config["batch_size"])
fetches = self.workers.local_worker().policy_map[policy_id].learn_on_batch(batch)
for key, value in fetches["learner_stats"].items():
if value is not None and not isinstance(value, dict):
average_fetches[key].append(value)
for key, value in average_fetches.items():
average_fetches[key] = np.mean(value)
policy_fetches[policy_id] = average_fetches
results["learner/info"] = policy_fetches
# Update learning rate across workers
learning_rate = self.config["beta"] * np.sqrt(self._num_steps_observed / self._horizon)
results["learner/info/learning_rate"] = learning_rate
self.workers.foreach_trainable_policy(lambda policy, pid: policy.set_learning_rate(learning_rate))
return results
| [
"numpy.mean",
"numpy.sqrt",
"math.ceil",
"ray.rllib.agents.with_common_config",
"ray.rllib.evaluation.metrics.summarize_episodes",
"numpy.log",
"ray.rllib.utils.annotations.override",
"ray.rllib.agents.trainer.Trainer._make_workers",
"numpy.stack",
"collections.defaultdict",
"ray.rllib.agents.trainer.Trainer.__init__",
"ray.rllib.policy.sample_batch.SampleBatch",
"ray.rllib.policy.sample_batch.MultiAgentBatch"
] | [((550, 803), 'ray.rllib.agents.with_common_config', 'with_common_config', (["{'buffer_size': 100000, 'batch_size': 128, 'sampling_ratio': 5,\n 'learning_rate': 0.0001, 'gamma': 0.99, 'beta': 0.05,\n 'implicit_exploration': 0.05, 'share_layers': False, 'hidden_sizes': [\n 256, 256], 'activation': 'tanh'}"], {}), "({'buffer_size': 100000, 'batch_size': 128,\n 'sampling_ratio': 5, 'learning_rate': 0.0001, 'gamma': 0.99, 'beta': \n 0.05, 'implicit_exploration': 0.05, 'share_layers': False,\n 'hidden_sizes': [256, 256], 'activation': 'tanh'})\n", (568, 803), False, 'from ray.rllib.agents import with_common_config\n'), ((2593, 2610), 'ray.rllib.utils.annotations.override', 'override', (['Trainer'], {}), '(Trainer)\n', (2601, 2610), False, 'from ray.rllib.utils.annotations import override\n'), ((2777, 2794), 'ray.rllib.utils.annotations.override', 'override', (['Trainer'], {}), '(Trainer)\n', (2785, 2794), False, 'from ray.rllib.utils.annotations import override\n'), ((2071, 2088), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2082, 2088), False, 'from collections import defaultdict\n'), ((2318, 2336), 'ray.rllib.policy.sample_batch.SampleBatch', 'SampleBatch', (['batch'], {}), '(batch)\n', (2329, 2336), False, 'from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch, DEFAULT_POLICY_ID\n'), ((2535, 2586), 'ray.rllib.agents.trainer.Trainer.__init__', 'Trainer.__init__', (['self', 'config', 'env', 'logger_creator'], {}), '(self, config, env, logger_creator)\n', (2551, 2586), False, 'from ray.rllib.agents.trainer import Trainer, COMMON_CONFIG\n'), ((2697, 2766), 'ray.rllib.agents.trainer.Trainer._make_workers', 'Trainer._make_workers', (['self', 'env_creator', 'policy', 'config', 'num_workers'], {}), '(self, env_creator, policy, config, num_workers)\n', (2718, 2766), False, 'from ray.rllib.agents.trainer import Trainer, COMMON_CONFIG\n'), ((4323, 4351), 'ray.rllib.evaluation.metrics.summarize_episodes', 'summarize_episodes', (['episodes'], {}), '(episodes)\n', (4341, 4351), False, 'from ray.rllib.evaluation.metrics import collect_episodes, summarize_episodes\n'), ((2281, 2301), 'numpy.stack', 'np.stack', (['batch[key]'], {}), '(batch[key])\n', (2289, 2301), True, 'import numpy as np\n'), ((3124, 3135), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (3130, 3135), True, 'import numpy as np\n'), ((3138, 3161), 'numpy.log', 'np.log', (["config['gamma']"], {}), "(config['gamma'])\n", (3144, 3161), True, 'import numpy as np\n'), ((4022, 4082), 'ray.rllib.policy.sample_batch.MultiAgentBatch', 'MultiAgentBatch', (['{DEFAULT_POLICY_ID: samples}', 'samples.count'], {}), '({DEFAULT_POLICY_ID: samples}, samples.count)\n', (4037, 4082), False, 'from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch, DEFAULT_POLICY_ID\n'), ((5889, 5938), 'numpy.sqrt', 'np.sqrt', (['(self._num_steps_observed / self._horizon)'], {}), '(self._num_steps_observed / self._horizon)\n', (5896, 5938), True, 'import numpy as np\n'), ((4950, 5011), 'math.ceil', 'math.ceil', (["(policy_batch.count / self.config['sampling_ratio'])"], {}), "(policy_batch.count / self.config['sampling_ratio'])\n", (4959, 5011), False, 'import math\n'), ((5046, 5063), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5057, 5063), False, 'from collections import defaultdict\n'), ((5654, 5668), 'numpy.mean', 'np.mean', (['value'], {}), '(value)\n', (5661, 5668), True, 'import numpy as np\n')] |
import os
from conans.errors import ConanException
import logging
from conans.util.env_reader import get_env
from conans.util.files import save, load
from ConfigParser import NoSectionError, ConfigParser
from conans.model.values import Values
MIN_SERVER_COMPATIBLE_VERSION = '0.4.0'
default_settings_yml = """
os: [Windows, Linux, Macos, Android]
arch: [x86, x86_64, arm]
compiler:
gcc:
version: ["4.6", "4.7", "4.8", "4.9", "5.0"]
Visual Studio:
runtime: [None, MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14"]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7"]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0"]
build_type: [None, Debug, Release]
"""
default_client_conf = '''
[storage]
# This is the default path, but you can write your own
path: ~/.conan/data
[remotes]
conan.io: https://server.conan.io
local: http://localhost:9300
[settings_defaults]
'''
class ConanClientConfigParser(ConfigParser):
def __init__(self, filename):
ConfigParser.__init__(self)
self.read(filename)
def get_conf(self, varname):
"""Gets the section from config file or raises an exception"""
try:
return self.items(varname)
except NoSectionError:
raise ConanException("Invalid configuration, missing %s" % varname)
@property
def storage(self):
return dict(self.get_conf("storage"))
@property
def storage_path(self):
try:
result = os.path.expanduser(self.storage["path"])
except KeyError:
result = None
result = get_env('CONAN_STORAGE_PATH', result)
return result
@property
def remotes(self):
return self.get_conf("remotes")
@property
def settings_defaults(self):
default_settings = self.get_conf("settings_defaults")
values = Values.from_list(default_settings)
return values
| [
"conans.model.values.Values.from_list",
"ConfigParser.ConfigParser.__init__",
"conans.util.env_reader.get_env",
"conans.errors.ConanException",
"os.path.expanduser"
] | [((1036, 1063), 'ConfigParser.ConfigParser.__init__', 'ConfigParser.__init__', (['self'], {}), '(self)\n', (1057, 1063), False, 'from ConfigParser import NoSectionError, ConfigParser\n'), ((1630, 1667), 'conans.util.env_reader.get_env', 'get_env', (['"""CONAN_STORAGE_PATH"""', 'result'], {}), "('CONAN_STORAGE_PATH', result)\n", (1637, 1667), False, 'from conans.util.env_reader import get_env\n'), ((1895, 1929), 'conans.model.values.Values.from_list', 'Values.from_list', (['default_settings'], {}), '(default_settings)\n', (1911, 1929), False, 'from conans.model.values import Values\n'), ((1521, 1561), 'os.path.expanduser', 'os.path.expanduser', (["self.storage['path']"], {}), "(self.storage['path'])\n", (1539, 1561), False, 'import os\n'), ((1298, 1359), 'conans.errors.ConanException', 'ConanException', (["('Invalid configuration, missing %s' % varname)"], {}), "('Invalid configuration, missing %s' % varname)\n", (1312, 1359), False, 'from conans.errors import ConanException\n')] |
"""empty message
Revision ID: fa952f537929
Revises: <PASSWORD>
Create Date: 2021-06-02 16:00:18.342313
"""
# revision identifiers, used by Alembic.
revision = 'fa952f537929'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
import app
import app.extensions
def upgrade():
"""
Upgrade Semantic Description:
ENTER DESCRIPTION HERE
"""
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('annotation_keywords',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('viewed', sa.DateTime(), nullable=False),
sa.Column('annotation_guid', app.extensions.GUID(), nullable=False),
sa.Column('keyword_guid', app.extensions.GUID(), nullable=False),
sa.ForeignKeyConstraint(['annotation_guid'], ['annotation.guid'], name=op.f('fk_annotation_keywords_annotation_guid_annotation')),
sa.ForeignKeyConstraint(['keyword_guid'], ['keyword.guid'], name=op.f('fk_annotation_keywords_keyword_guid_keyword')),
sa.PrimaryKeyConstraint('annotation_guid', 'keyword_guid', name=op.f('pk_annotation_keywords'))
)
# ### end Alembic commands ###
def downgrade():
"""
Downgrade Semantic Description:
ENTER DESCRIPTION HERE
"""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('annotation_keywords')
# ### end Alembic commands ###
| [
"alembic.op.drop_table",
"alembic.op.f",
"app.extensions.GUID",
"sqlalchemy.DateTime"
] | [((1404, 1440), 'alembic.op.drop_table', 'op.drop_table', (['"""annotation_keywords"""'], {}), "('annotation_keywords')\n", (1417, 1440), False, 'from alembic import op\n'), ((545, 558), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (556, 558), True, 'import sqlalchemy as sa\n'), ((602, 615), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (613, 615), True, 'import sqlalchemy as sa\n'), ((658, 671), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (669, 671), True, 'import sqlalchemy as sa\n'), ((723, 744), 'app.extensions.GUID', 'app.extensions.GUID', ([], {}), '()\n', (742, 744), False, 'import app\n'), ((793, 814), 'app.extensions.GUID', 'app.extensions.GUID', ([], {}), '()\n', (812, 814), False, 'import app\n'), ((908, 965), 'alembic.op.f', 'op.f', (['"""fk_annotation_keywords_annotation_guid_annotation"""'], {}), "('fk_annotation_keywords_annotation_guid_annotation')\n", (912, 965), False, 'from alembic import op\n'), ((1037, 1088), 'alembic.op.f', 'op.f', (['"""fk_annotation_keywords_keyword_guid_keyword"""'], {}), "('fk_annotation_keywords_keyword_guid_keyword')\n", (1041, 1088), False, 'from alembic import op\n'), ((1159, 1189), 'alembic.op.f', 'op.f', (['"""pk_annotation_keywords"""'], {}), "('pk_annotation_keywords')\n", (1163, 1189), False, 'from alembic import op\n')] |
import torch.optim as optim
from torch.nn.utils import clip_grad_norm_
class Optim(object):
def __init__(self, method, lr, alpha, max_grad_norm, model_size,
lr_decay=1, start_decay_at=None,
beta1=0.9, beta2=0.98, warm_up_step=400, warm_up_factor=1.0,
opt=None):
self.last_metric = None
self.lr = lr
self.model_size=model_size
self.factor=warm_up_factor
self.warmup=warm_up_step
self.alpha = alpha
self.max_grad_norm = max_grad_norm
self.method = method
self.lr_decay = lr_decay
self.start_decay_at = start_decay_at
self.start_decay = False
self._step = 0
self.betas = (beta1, beta2)
self.opt = opt
def set_parameters(self, params):
self.params = [p for p in params if p.requires_grad]
if self.method == 'sgd':
self.optimizer = optim.SGD(self.params, lr=self.lr)
elif self.method == 'rmsprop':
self.optimizer = optim.RMSprop(
self.params, lr=self.lr, alpha=self.alpha)
elif self.method == 'adam':
self.optimizer = optim.Adam(self.params, lr=self.lr,
betas=self.betas, eps=1e-9)
else:
raise RuntimeError("Invalid optim method: " + self.method)
def _setRate(self, lr):
self.lr = lr
for p in self.optimizer.param_groups:
p['lr'] = self.lr
#self.optimizer.param_groups[0]['lr'] = self.lr
def step(self):
"Compute gradients norm."
self._step += 1
if self.max_grad_norm:
clip_grad_norm_(self.params, self.max_grad_norm)
if self.opt.warm_up:
lr = self.rate()
self.lr=lr
for p in self.optimizer.param_groups:
p['lr'] = lr
self.optimizer.step()
def updateLearningRate(self, metric, epoch):
"""
Decay learning rate if val perf does not improve
or we hit the start_decay_at limit.
"""
if self.opt.warm_up:
print("Learning rate: %g" % self.lr)
else:
if (self.start_decay_at is not None) and (epoch >= self.start_decay_at):
self.start_decay = True
if (self.last_metric is not None) and (metric is not None) and (metric > self.last_metric):
self.start_decay = True
if self.start_decay:
self.lr = self.lr * self.lr_decay
print("Decaying learning rate to %g" % self.lr)
self.last_metric = metric
# self.optimizer.param_groups[0]['lr'] = self.lr
for p in self.optimizer.param_groups:
p['lr'] = self.lr
def rate(self, step=None):
"Implement `lrate` above"
if step is None:
step = self._step
return self.factor * \
(self.model_size ** (-1) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
class NoamOpt:
"Optim wrapper that implements rate."
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def step(self):
"Update parameters and rate"
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step=None):
"Implement `lrate` above"
if step is None:
step = self._step
return self.factor * \
(self.model_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
def get_std_opt(model):
return NoamOpt(model.src_embed[0].d_model, 2, 4000,
optim.Adam(model.parameters(),
lr=0, betas=(0.9, 0.98), eps=1e-9)) | [
"torch.optim.Adam",
"torch.optim.RMSprop",
"torch.optim.SGD",
"torch.nn.utils.clip_grad_norm_"
] | [((930, 964), 'torch.optim.SGD', 'optim.SGD', (['self.params'], {'lr': 'self.lr'}), '(self.params, lr=self.lr)\n', (939, 964), True, 'import torch.optim as optim\n'), ((1666, 1714), 'torch.nn.utils.clip_grad_norm_', 'clip_grad_norm_', (['self.params', 'self.max_grad_norm'], {}), '(self.params, self.max_grad_norm)\n', (1681, 1714), False, 'from torch.nn.utils import clip_grad_norm_\n'), ((1033, 1089), 'torch.optim.RMSprop', 'optim.RMSprop', (['self.params'], {'lr': 'self.lr', 'alpha': 'self.alpha'}), '(self.params, lr=self.lr, alpha=self.alpha)\n', (1046, 1089), True, 'import torch.optim as optim\n'), ((1172, 1236), 'torch.optim.Adam', 'optim.Adam', (['self.params'], {'lr': 'self.lr', 'betas': 'self.betas', 'eps': '(1e-09)'}), '(self.params, lr=self.lr, betas=self.betas, eps=1e-09)\n', (1182, 1236), True, 'import torch.optim as optim\n')] |
import sys
from config import DEBUG_MODE, SAIKI_BOT_VERSION
if "-v" in sys.argv or "--version" in sys.argv:
print(f"Saiki Discord Bot {SAIKI_BOT_VERSION}")
quit()
if "-h" in sys.argv or "--help" in sys.argv:
print()
print(" SAIKI DISCORD BOT SERVER HELP CENTER")
print()
print(f"Saiki Discord {SAIKI_BOT_VERSION}")
print(f"DEBUG_MODE: {DEBUG_MODE}")
print("""
The main discord bot server for Saiki.
Args:
--clear-log Clears the 'saiki_discord.log' file
-h, --help Shows the Saiki Discord Bot Server Help Center and exits
-d, --debug Launches Saiki Discord Server in DEBUG_MODE (note: --debug enables a higher debug level)
-v, --version Shows the Server version and exits
""")
quit()
from __protected import DISCORD_BOT_TOKEN
from saiki import client
from utils.log import log
log("Running the Discord Bot")
client.run(DISCORD_BOT_TOKEN)
| [
"utils.log.log",
"saiki.client.run"
] | [((929, 959), 'utils.log.log', 'log', (['"""Running the Discord Bot"""'], {}), "('Running the Discord Bot')\n", (932, 959), False, 'from utils.log import log\n'), ((960, 989), 'saiki.client.run', 'client.run', (['DISCORD_BOT_TOKEN'], {}), '(DISCORD_BOT_TOKEN)\n', (970, 989), False, 'from saiki import client\n')] |
import argparse
def get_args():
parser = argparse.ArgumentParser(description='Model architecture and training parameters')
parser.add_argument('--mode', default='train', type=str, help='train/test/bc')
# parser.add_argument('--env', default='Pendulum-v0', type=str, help='open-ai gym environment')
# parser.add_argument('--env', default='BipedalWalker-v3', type=str, help='open-ai gym environment')
# parser.add_argument('--env', default='BipedalWalkerHardcore-v3', type=str, help='open-ai gym environment')
# parser.add_argument('--env', default='Hopper-v2', type=str, help='gym mujoco environment')
# parser.add_argument('--env', default='Ant-v2', type=str, help='gym mujoco environment')
# parser.add_argument('--env', default='Walker2d-v2', type=str, help='gym mujoco environment')
# parser.add_argument('--env', default='HalfCheetah-v2', type=str, help='gym mujoco environment')
parser.add_argument('--env', default='gym_kraby:HexapodBulletEnv-v0', type=str, help='open-ai gym Kraby environment')
parser.add_argument('--method', default='NORMAL', type=str, choices=['NORMAL', 'COUPLR', 'ROTATION', 'AUTO'], help='trining method, MODE, ROTATION, AUTO')
parser.add_argument('--demonstration_path', default='data/walker2d-poor.pkl', type=str, help='expert demonstration path')
parser.add_argument('--demonstration_ratio', default=1, type=float)
parser.add_argument('--demonstration_length', default=10, type=int)
parser.add_argument('--debug', default=True, dest='debug', action='store')
parser.add_argument('--seed', default=2, type=int, help='experiment seed')
parser.add_argument('--resume', default='default', type=str, help='model saved path')
parser.add_argument('--hidden1', default=256, type=int, help='units of first layer')
parser.add_argument('--hidden2', default=512, type=int, help='units of of second layer')
parser.add_argument('--rate', default=0.0001, type=float, help='learning rate')
parser.add_argument('--L2', default=0.0001, type=float)
parser.add_argument('--prate', default=0.001, type=float, help='policy net learning rate')
parser.add_argument('--warmup', default=1000, type=int, help='warm up steps (fill the memory)')
parser.add_argument('--discount', default=0.99, type=float, help='')
parser.add_argument('--bsize', default=256, type=int, help='mini-batch size')
# parser.add_argument('--rmsize', default=6000000, type=int, help='memory size')
parser.add_argument('--rmsize', default=50000, type=int, help='memory size')
parser.add_argument('--window_length', default=1, type=int, help='')
parser.add_argument('--tau', default=0.001, type=float, help='moving average for target network')
parser.add_argument('--ou_theta', default=0.15, type=float, help='noise theta')
parser.add_argument('--ou_sigma', default=0.2, type=float, help='noise sigma')
parser.add_argument('--ou_mu', default=0.0, type=float, help='noise mu')
parser.add_argument('--max_episode_length', default=2000, type=int, help='')
parser.add_argument('--validate_episodes', default=5, type=int, help='num of episode during validate experiment')
parser.add_argument('--validate_steps', default=10000, type=int, help='num of steps per validate experiment')
parser.add_argument('--output', default='output', type=str, help='')
parser.add_argument('--init_w', default=0.003, type=float, help='')
parser.add_argument('--train_iter', default=1000000, type=int, help='total training steps')
parser.add_argument('--epsilon', default=800000, type=int, help='linear decay of exploration policy')
parser.add_argument('--num_interm', default=20, type=int, help='how many intermidate saves')
parser.add_argument('--policy_delay', default=2, type=int)
parser.add_argument('--noise_decay', default=800000, type=int)
args = parser.parse_args()
return args
| [
"argparse.ArgumentParser"
] | [((49, 136), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Model architecture and training parameters"""'}), "(description=\n 'Model architecture and training parameters')\n", (72, 136), False, 'import argparse\n')] |
#!/usr/bin/python3
from scripts.utils import (
get_account,
get_verify_status
)
from brownie import (
Storage,
network,
Contract
)
def main():
account = get_account()
print(f"You are using the account {account}")
verify = get_verify_status()
deployed_contract = Storage.deploy({"from": account}, publish_source=verify)
if type(deployed_contract) is network.transaction.TransactionReceipt:
if network.active() == "halongbay":
deployed_contract.wait(3)
deployed_contract = Contract.from_abi("Storage", deployed_contract.contract_address, Storage.abi)
print(deployed_contract) | [
"scripts.utils.get_account",
"brownie.Contract.from_abi",
"scripts.utils.get_verify_status",
"brownie.network.active",
"brownie.Storage.deploy"
] | [((181, 194), 'scripts.utils.get_account', 'get_account', ([], {}), '()\n', (192, 194), False, 'from scripts.utils import get_account, get_verify_status\n'), ((258, 277), 'scripts.utils.get_verify_status', 'get_verify_status', ([], {}), '()\n', (275, 277), False, 'from scripts.utils import get_account, get_verify_status\n'), ((302, 358), 'brownie.Storage.deploy', 'Storage.deploy', (["{'from': account}"], {'publish_source': 'verify'}), "({'from': account}, publish_source=verify)\n", (316, 358), False, 'from brownie import Storage, network, Contract\n'), ((543, 620), 'brownie.Contract.from_abi', 'Contract.from_abi', (['"""Storage"""', 'deployed_contract.contract_address', 'Storage.abi'], {}), "('Storage', deployed_contract.contract_address, Storage.abi)\n", (560, 620), False, 'from brownie import Storage, network, Contract\n'), ((444, 460), 'brownie.network.active', 'network.active', ([], {}), '()\n', (458, 460), False, 'from brownie import Storage, network, Contract\n')] |
from flask import Flask, render_template, redirect, url_for, request
# create the application object
app = Flask(__name__)
@app.route('/')
def home():
return render_template('./home.html')
##############################################################################
@app.route('/userLogin')
def userLogin():
return render_template('./userLogin.html')
@app.route('/userLogin', methods=['POST'])
def userLoginPost():
username = request.form['username']
password = request.form['password']
msg = userLoginDriver(username, password)
return msg
# main driver function
def userLoginDriver(username, password):
userId = getUserId(username)
if not userId:
return "username not found"
cipher = str(password) + str(userId)
import hashlib
hashedData = hashlib.sha256(cipher.encode())
if checkPassword(userId, hashedData):
return "login successful"
else:
return "invalid password"
# getting userId of username (returns None if not found)
def getUserId(username):
fp = open("file1.txt", "r")
dataLine = fp.readline()
userId = None
while(dataLine):
uname = dataLine.split(",")[1]
uname = uname.replace("\n", "")
if uname == username:
userId = dataLine.split(",")[0]
dataLine = fp.readline()
fp.close()
return userId
# checking if password exist for userId
def checkPassword(userId, hashedData):
fp = open("file2.txt", "r")
string = str(userId) + "," + str(hashedData.hexdigest())
dataLine = fp.readline()
passwordFound = False
while(dataLine):
dataLine = dataLine.replace("\n", "")
if dataLine == string:
passwordFound = True
dataLine = fp.readline()
fp.close()
return passwordFound
##############################################################################
@app.route('/updatePassword')
def updatePassword():
return render_template('./updatePassword.html')
@app.route('/updatePassword', methods=['POST'])
def updatePasswordPost():
username = request.form['username']
oldPassword = request.form['oldPassword']
newPassword = request.form['newPassword']
msg = updatePasswordDriver(username, oldPassword, newPassword)
return msg
# before ensure user is logged in
# main driver function
def updatePasswordDriver(username, password, newPassword):
userId = getUserId(username)
if not userId:
return "username not found"
cipher = str(password) + str(userId)
newCipher = str(newPassword) + str(userId)
import hashlib
hashedData = hashlib.sha256(cipher.encode())
newHashedData = hashlib.sha256(newCipher.encode())
if checkPassword(userId, hashedData):
changePassword(userId, hashedData, newHashedData)
return "password changed"
else:
return "invalid password"
# getting userId of username (returns None if not found)
def getUserId(username):
fp = open("file1.txt", "r")
dataLine = fp.readline()
userId = None
while(dataLine):
uname = dataLine.split(",")[1]
uname = uname.replace("\n", "")
if uname == username:
userId = dataLine.split(",")[0]
dataLine = fp.readline()
fp.close()
return userId
# checking if password exist for userId
def checkPassword(userId, hashedData):
fp = open("file2.txt", "r")
string = str(userId) + "," + str(hashedData.hexdigest())
dataLine = fp.readline()
passwordFound = False
while(dataLine):
dataLine = dataLine.replace("\n", "")
if dataLine == string:
passwordFound = True
dataLine = fp.readline()
fp.close()
return passwordFound
# code for changing the password
def changePassword(userId, hashedData, newHashedData):
old = str(userId) + "," + str(hashedData.hexdigest())
new = str(userId) + "," + str(newHashedData.hexdigest())
with open("file2.txt", "r") as fp:
lines = fp.readlines()
with open("file2.txt", "w") as fp:
for line in lines:
if line.strip("\n") != old:
fp.write(line)
fp.write(new)
##############################################################################
@app.route('/newUser')
def newUser():
return render_template('./newUser.html')
@app.route('/newUser', methods=['POST'])
def newUserPost():
username = request.form['username']
password = request.form['password']
msg = addNewUserDriver(username, password)
return msg
# Main driver function
def addNewUserDriver(username, password):
if checkUsernameExists(username):
return 'username already exists, try a new one'
idExists = True
while(idExists):
userId = generateRandomId()
# check if id already exists
idExists = checkIdExists(userId)
writeFile("file1.txt", userId, username)
cipher = str(password) + str(userId)
import hashlib
hashedData = hashlib.sha256(cipher.encode())
writeFile("file2.txt", userId, hashedData.hexdigest())
return "New user added!"
# checking if usename exists in file1
def checkUsernameExists(username):
import os.path
if not os.path.isfile("file1.txt"):
fp = open("file1.txt", "w+")
fp.close()
fp = open("file1.txt", "r")
dataLine = fp.readline()
existsFlag = False
while(dataLine):
uname = dataLine.split(",")[1]
uname = uname.replace("\n", "")
if uname == username:
existsFlag = True
dataLine = fp.readline()
fp.close()
return existsFlag
# generating random userId
def generateRandomId():
import random, string
x = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(8))
return x
# checking if userId already exists
def checkIdExists(userId):
fp = open("file1.txt", "r")
dataLine = fp.readline()
existsFlag = False
while(dataLine):
uId = dataLine.split(",")[1]
if uId == userId:
existsFlag = True
dataLine = fp.readline()
fp.close()
return existsFlag
# writing data in file
def writeFile(file, arg1, arg2):
fp = open(file, "a")
string = str(arg1) + "," + str(arg2) + "\n"
fp.write(string)
fp.close()
############################################################################## | [
"flask.render_template",
"random.choice",
"flask.Flask"
] | [((111, 126), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (116, 126), False, 'from flask import Flask, render_template, redirect, url_for, request\n'), ((171, 201), 'flask.render_template', 'render_template', (['"""./home.html"""'], {}), "('./home.html')\n", (186, 201), False, 'from flask import Flask, render_template, redirect, url_for, request\n'), ((342, 377), 'flask.render_template', 'render_template', (['"""./userLogin.html"""'], {}), "('./userLogin.html')\n", (357, 377), False, 'from flask import Flask, render_template, redirect, url_for, request\n'), ((2014, 2054), 'flask.render_template', 'render_template', (['"""./updatePassword.html"""'], {}), "('./updatePassword.html')\n", (2029, 2054), False, 'from flask import Flask, render_template, redirect, url_for, request\n'), ((4422, 4455), 'flask.render_template', 'render_template', (['"""./newUser.html"""'], {}), "('./newUser.html')\n", (4437, 4455), False, 'from flask import Flask, render_template, redirect, url_for, request\n'), ((5877, 5928), 'random.choice', 'random.choice', (['(string.ascii_letters + string.digits)'], {}), '(string.ascii_letters + string.digits)\n', (5890, 5928), False, 'import random, string\n')] |
import unittest
from capnp import _capnp # pylint: disable=unused-import
try:
from capnp import _capnp_test
except ImportError:
_capnp_test = None
# pylint: disable=c-extension-no-member
@unittest.skipUnless(_capnp_test, '_capnp_test unavailable')
class MessageTest(unittest.TestCase):
def test_reader(self):
data = b''
r = _capnp.FlatArrayMessageReader(data)
self.assertFalse(r.isCanonical())
def test_builder(self):
b = _capnp.MallocMessageBuilder()
self.assertTrue(b.isCanonical())
self.assertEqual(_capnp.computeSerializedSizeInWords(b), 2)
self.assertEqual(len(_capnp.messageToFlatArray(b).asBytes()), 16)
array = _capnp.messageToPackedArray(b)
self.assertEqual(_capnp.computeUnpackedSizeInWords(array.asBytes()), 2)
self.assertEqual(array.asBytes(), b'\x10\x01\x00\x00')
if __name__ == '__main__':
unittest.main()
| [
"capnp._capnp.FlatArrayMessageReader",
"unittest.skipUnless",
"capnp._capnp.messageToFlatArray",
"capnp._capnp.computeSerializedSizeInWords",
"capnp._capnp.messageToPackedArray",
"capnp._capnp.MallocMessageBuilder",
"unittest.main"
] | [((202, 261), 'unittest.skipUnless', 'unittest.skipUnless', (['_capnp_test', '"""_capnp_test unavailable"""'], {}), "(_capnp_test, '_capnp_test unavailable')\n", (221, 261), False, 'import unittest\n'), ((914, 929), 'unittest.main', 'unittest.main', ([], {}), '()\n', (927, 929), False, 'import unittest\n'), ((359, 394), 'capnp._capnp.FlatArrayMessageReader', '_capnp.FlatArrayMessageReader', (['data'], {}), '(data)\n', (388, 394), False, 'from capnp import _capnp\n'), ((478, 507), 'capnp._capnp.MallocMessageBuilder', '_capnp.MallocMessageBuilder', ([], {}), '()\n', (505, 507), False, 'from capnp import _capnp\n'), ((707, 737), 'capnp._capnp.messageToPackedArray', '_capnp.messageToPackedArray', (['b'], {}), '(b)\n', (734, 737), False, 'from capnp import _capnp\n'), ((574, 612), 'capnp._capnp.computeSerializedSizeInWords', '_capnp.computeSerializedSizeInWords', (['b'], {}), '(b)\n', (609, 612), False, 'from capnp import _capnp\n'), ((646, 674), 'capnp._capnp.messageToFlatArray', '_capnp.messageToFlatArray', (['b'], {}), '(b)\n', (671, 674), False, 'from capnp import _capnp\n')] |
import sys
def read_file(self, filename):
try:
with open(filename, 'r') as file:
content = file.readlines()
return content
except Exception as e:
print(e)
sys.exit(1)
| [
"sys.exit"
] | [((212, 223), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (220, 223), False, 'import sys\n')] |
"""
출처: https://www.acmicpc.net/problem/5430
"""
import collections
import sys
T = int(sys.stdin.readline().rstrip())
for _ in range(T):
p = sys.stdin.readline().rstrip()
n = int(sys.stdin.readline().rstrip())
x = list(sys.stdin.readline().rstrip()[1:-1].split(','))
if n == 0:
x = []
deque = collections.deque(x)
left = True
try:
for command in p:
if command == "R":
left = not left
elif command == "D":
if left:
deque.popleft()
else:
deque.pop()
print("["+",".join(deque)+"]") if left else print("["+",".join(reversed(deque))+"]")
except IndexError:
print("error")
| [
"sys.stdin.readline",
"collections.deque"
] | [((323, 343), 'collections.deque', 'collections.deque', (['x'], {}), '(x)\n', (340, 343), False, 'import collections\n'), ((88, 108), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (106, 108), False, 'import sys\n'), ((146, 166), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (164, 166), False, 'import sys\n'), ((188, 208), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (206, 208), False, 'import sys\n'), ((232, 252), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (250, 252), False, 'import sys\n')] |
from pathlib import Path
from django.core.management import call_command
from pytest import fixture
@fixture(scope='class')
def django_db_setup(django_db_setup, django_db_blocker):
fixtures_dir = Path(__file__).resolve().parent / 'fixtures'
series_fixture = fixtures_dir / 'series.json'
chapters_fixture = fixtures_dir / 'chapters.json'
authors_artists_fixture = fixtures_dir / 'authors_artists.json'
groups_fixture = fixtures_dir / 'groups.json'
with django_db_blocker.unblock():
call_command('flush', '--no-input')
call_command('loaddata', 'categories.xml')
call_command('loaddata', str(authors_artists_fixture))
call_command('loaddata', str(groups_fixture))
call_command('loaddata', str(series_fixture))
call_command('loaddata', str(chapters_fixture))
| [
"pytest.fixture",
"pathlib.Path",
"django.core.management.call_command"
] | [((105, 127), 'pytest.fixture', 'fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (112, 127), False, 'from pytest import fixture\n'), ((517, 552), 'django.core.management.call_command', 'call_command', (['"""flush"""', '"""--no-input"""'], {}), "('flush', '--no-input')\n", (529, 552), False, 'from django.core.management import call_command\n'), ((561, 603), 'django.core.management.call_command', 'call_command', (['"""loaddata"""', '"""categories.xml"""'], {}), "('loaddata', 'categories.xml')\n", (573, 603), False, 'from django.core.management import call_command\n'), ((204, 218), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (208, 218), False, 'from pathlib import Path\n')] |
# Generated by Django 2.1 on 2018-11-01 15:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gw_app', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='nasmodel',
options={'ordering': ('ip_address',), 'verbose_name': 'Network access server. Gateway', 'verbose_name_plural': 'Network access servers. Gateways'},
),
migrations.AlterField(
model_name='nasmodel',
name='auth_login',
field=models.CharField(max_length=64, verbose_name='Auth login'),
),
migrations.AlterField(
model_name='nasmodel',
name='auth_passw',
field=models.CharField(max_length=127, verbose_name='Auth password'),
),
]
| [
"django.db.migrations.AlterModelOptions",
"django.db.models.CharField"
] | [((221, 423), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""nasmodel"""', 'options': "{'ordering': ('ip_address',), 'verbose_name':\n 'Network access server. Gateway', 'verbose_name_plural':\n 'Network access servers. Gateways'}"}), "(name='nasmodel', options={'ordering': (\n 'ip_address',), 'verbose_name': 'Network access server. Gateway',\n 'verbose_name_plural': 'Network access servers. Gateways'})\n", (249, 423), False, 'from django.db import migrations, models\n'), ((566, 624), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'verbose_name': '"""Auth login"""'}), "(max_length=64, verbose_name='Auth login')\n", (582, 624), False, 'from django.db import migrations, models\n'), ((752, 814), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(127)', 'verbose_name': '"""Auth password"""'}), "(max_length=127, verbose_name='Auth password')\n", (768, 814), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python
# coding: utf-8
from injectable import autowired
from pandas.io.json import json_normalize
from filmweb_integrator.fwimdbmerge.filmweb import Filmweb
from movies_analyzer.Imdb import Imdb
import json
import pandas as pd
def get_json_df(json_text):
return json_normalize(json.loads(json_text))
def get_json_list_df(json_text):
return pd.DataFrame(json.loads(json_text))
class Merger(object):
@autowired
def __init__(self, *, filmweb: Filmweb, imdb: Imdb):
self.filmweb = filmweb
self.imdb = imdb
def get_data(self, df):
filmweb_df = self.filmweb.get_dataframe(df, False)
return filmweb_df, self.imdb.merge(filmweb_df)
| [
"json.loads"
] | [((299, 320), 'json.loads', 'json.loads', (['json_text'], {}), '(json_text)\n', (309, 320), False, 'import json\n'), ((381, 402), 'json.loads', 'json.loads', (['json_text'], {}), '(json_text)\n', (391, 402), False, 'import json\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of Spactor, The SMS spam detector.
# License: MIT, see the file "LICENSE" for details.
import pickle
import random
from os import getcwd, path
import numpy as np
from pandas import read_csv
from sklearn.calibration import *
from sklearn.dummy import *
from sklearn.ensemble import *
from sklearn.feature_extraction.text import (CountVectorizer,
HashingVectorizer,
TfidfVectorizer)
from sklearn.linear_model import *
from sklearn.multiclass import *
from sklearn.naive_bayes import *
from sklearn.neighbors import *
from sklearn.svm import *
from sklearn.tree import *
def perform(classifiers, vectorizers, train_data, test_data):
scores = {}
for vectorizer in vectorizers:
features = vectorizer.fit_transform(train_data.text)
for classifier in classifiers:
if classifier.__class__.__name__ == "RidgeClassifierCV" and vectorizer.__class__.__name__ == "HashingVectorizer":
continue
# train
classifier.fit(features, train_data.tag)
# score
vectorize_text = vectorizer.transform(test_data.text)
score = classifier.score(vectorize_text, test_data.tag)
string = "{}:{}:{}".format(classifier.__class__.__name__,
vectorizer.__class__.__name__, score)
scores[str(score)] = (score, classifier, vectorizer)
print(string)
scores_sorted = list(scores.keys())
scores_sorted.sort(reverse=True)
heighest_score = scores_sorted[0]
print(
"Heighest: clf: {clf:s}, vec: {vec:s}, score: {score:s}.{score_prec:s}%"
.format(clf=scores[heighest_score][1].__class__.__name__,
vec=scores[heighest_score][2].__class__.__name__,
score=heighest_score[2:4],
score_prec=heighest_score[4:6]), )
print("-" * 10)
return scores[heighest_score]
def train(dataset_path, output_path="/etc/spactor", stop_score=0.95, test_size=0.2):
# select the fastest combination
clf = None
vec = None
score = 0
while score < stop_score:
lines = []
with open(dataset_path, "r") as fd:
lines = fd.readlines()
lines.pop(0)
random.shuffle(lines)
with open(dataset_path, "w") as fd:
fd.write("tag;text\n")
for line in lines:
fd.write(line)
data = read_csv(dataset_path, encoding='ISO-8859-1', sep=";")
test_size = 0.1
learn_items = int(len(data) - (len(data) * test_size))
learn = data[:learn_items].fillna(" ")
test = data[learn_items:].fillna(" ")
score, clf, vec = perform([
BernoulliNB(),
AdaBoostClassifier(),
BaggingClassifier(),
ExtraTreesClassifier(),
GradientBoostingClassifier(),
DecisionTreeClassifier(),
CalibratedClassifierCV(),
DummyClassifier(),
PassiveAggressiveClassifier(),
RidgeClassifier(),
RidgeClassifierCV(),
SGDClassifier(),
OneVsRestClassifier(SVC(kernel='linear')),
OneVsRestClassifier(LogisticRegression()),
KNeighborsClassifier()
], [
CountVectorizer(),
TfidfVectorizer(),
HashingVectorizer()
], learn, test)
# save the clf
clf_filename = "clf.pkl"
clf_filepath = path.join(output_path, clf_filename)
with open(clf_filepath, "wb") as file:
pickle.dump(clf, file)
print("clf saved")
# save the vectorizer
vec_filename = "vec.pkl"
vec_filepath = path.join(getcwd(), "models", vec_filename)
with open(vec_filepath, "wb") as file:
pickle.dump(vec, file)
print("Vectorizer saved")
return 0
| [
"pickle.dump",
"random.shuffle",
"pandas.read_csv",
"sklearn.feature_extraction.text.CountVectorizer",
"os.path.join",
"os.getcwd",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.feature_extraction.text.HashingVectorizer"
] | [((3583, 3619), 'os.path.join', 'path.join', (['output_path', 'clf_filename'], {}), '(output_path, clf_filename)\n', (3592, 3619), False, 'from os import getcwd, path\n'), ((2373, 2394), 'random.shuffle', 'random.shuffle', (['lines'], {}), '(lines)\n', (2387, 2394), False, 'import random\n'), ((2552, 2606), 'pandas.read_csv', 'read_csv', (['dataset_path'], {'encoding': '"""ISO-8859-1"""', 'sep': '""";"""'}), "(dataset_path, encoding='ISO-8859-1', sep=';')\n", (2560, 2606), False, 'from pandas import read_csv\n'), ((3671, 3693), 'pickle.dump', 'pickle.dump', (['clf', 'file'], {}), '(clf, file)\n', (3682, 3693), False, 'import pickle\n'), ((3802, 3810), 'os.getcwd', 'getcwd', ([], {}), '()\n', (3808, 3810), False, 'from os import getcwd, path\n'), ((3887, 3909), 'pickle.dump', 'pickle.dump', (['vec', 'file'], {}), '(vec, file)\n', (3898, 3909), False, 'import pickle\n'), ((3409, 3426), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (3424, 3426), False, 'from sklearn.feature_extraction.text import CountVectorizer, HashingVectorizer, TfidfVectorizer\n'), ((3440, 3457), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (3455, 3457), False, 'from sklearn.feature_extraction.text import CountVectorizer, HashingVectorizer, TfidfVectorizer\n'), ((3471, 3490), 'sklearn.feature_extraction.text.HashingVectorizer', 'HashingVectorizer', ([], {}), '()\n', (3488, 3490), False, 'from sklearn.feature_extraction.text import CountVectorizer, HashingVectorizer, TfidfVectorizer\n')] |
import csv, argparse, os, re
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_file", default=None, type=str, required=True, help="The input data file (a text file)."
)
parser.add_argument("--output_dir", default=None, type=str, required=True)
args = parser.parse_args()
with open(args.data_file,'r') as in_file:
with open(os.path.join(args.output_dir,args.data_file.split('/')[-1]+'.proc'),'w') as out_file:
reader = csv.reader(in_file, delimiter='\t')
next(reader)
sentences = []
for row in reader:
if row[1] != 'misc':
speaker = row[1].split('_')[1]
if speaker == 'int':
continue
if row[3][:6] == '(pause':
continue
#print(row[3])
s = re.sub(r'<(.*?)>', '', row[3])
s = re.sub(r'\((.*?)\)', '', s)
s = re.sub(r'/\?(.*?)/', '', s)
s = s.replace('[','').replace(']','').replace('/unintelligible/','').replace('/','').replace(' ',' ').strip()
if not s:
continue
if sentences and s[0].islower():
sentences[-1] += ' ' + s
elif sentences and sentences[-1][-1] in ',-':
sentences[-1] += ' ' + s
else:
sentences.append(s)
for s in sentences:
if len(s.split()) > 3:
out_file.write(s + '\n')
| [
"re.sub",
"csv.reader",
"argparse.ArgumentParser"
] | [((39, 64), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (62, 64), False, 'import csv, argparse, os, re\n'), ((458, 493), 'csv.reader', 'csv.reader', (['in_file'], {'delimiter': '"""\t"""'}), "(in_file, delimiter='\\t')\n", (468, 493), False, 'import csv, argparse, os, re\n'), ((818, 847), 're.sub', 're.sub', (['"""<(.*?)>"""', '""""""', 'row[3]'], {}), "('<(.*?)>', '', row[3])\n", (824, 847), False, 'import csv, argparse, os, re\n'), ((865, 893), 're.sub', 're.sub', (['"""\\\\((.*?)\\\\)"""', '""""""', 's'], {}), "('\\\\((.*?)\\\\)', '', s)\n", (871, 893), False, 'import csv, argparse, os, re\n'), ((909, 936), 're.sub', 're.sub', (['"""/\\\\?(.*?)/"""', '""""""', 's'], {}), "('/\\\\?(.*?)/', '', s)\n", (915, 936), False, 'import csv, argparse, os, re\n')] |
from torch import nn
from torch.nn import functional as F
import torch
from .base import Conv2dBnRelu, DecoderBlock
from .encoders import get_encoder_channel_nr
"""
This script has been taken (and modified) from :
https://github.com/ternaus/TernausNet
@ARTICLE{arXiv:1801.05746,
author = {<NAME> and <NAME>},
title = {TernausNet: U-Net with VGG11 Encoder Pre-Trained on ImageNet for Image Segmentation},
journal = {ArXiv e-prints},
eprint = {1801.05746},
year = 2018
}
"""
class UNet(nn.Module):
def __init__(self, encoder, num_classes, dropout_2d=0.0, use_hypercolumn=False, pool0=False):
super().__init__()
self.num_classes = num_classes
self.dropout_2d = dropout_2d
self.use_hypercolumn = use_hypercolumn
self.pool0 = pool0
self.encoder = encoder
encoder_channel_nr = get_encoder_channel_nr(self.encoder)
self.center = nn.Sequential(Conv2dBnRelu(encoder_channel_nr[3], encoder_channel_nr[3]),
Conv2dBnRelu(encoder_channel_nr[3], encoder_channel_nr[2]),
nn.AvgPool2d(kernel_size=2, stride=2)
)
self.dec5 = DecoderBlock(encoder_channel_nr[3] + encoder_channel_nr[2],
encoder_channel_nr[3],
encoder_channel_nr[3] // 8)
self.dec4 = DecoderBlock(encoder_channel_nr[2] + encoder_channel_nr[3] // 8,
encoder_channel_nr[3] // 2,
encoder_channel_nr[3] // 8)
self.dec3 = DecoderBlock(encoder_channel_nr[1] + encoder_channel_nr[3] // 8,
encoder_channel_nr[3] // 4,
encoder_channel_nr[3] // 8)
self.dec2 = DecoderBlock(encoder_channel_nr[0] + encoder_channel_nr[3] // 8,
encoder_channel_nr[3] // 8,
encoder_channel_nr[3] // 8)
self.dec1 = DecoderBlock(encoder_channel_nr[3] // 8,
encoder_channel_nr[3] // 16,
encoder_channel_nr[3] // 8)
self.dec0 = DecoderBlock(encoder_channel_nr[3] // 8,
encoder_channel_nr[3] // 16,
encoder_channel_nr[3] // 8)
if self.use_hypercolumn:
self.dec0 = DecoderBlock(5 * encoder_channel_nr[3] // 8,
encoder_channel_nr[3] // 8,
5 * encoder_channel_nr[3] // 8)
self.final = nn.Sequential(Conv2dBnRelu(5 * encoder_channel_nr[3] // 8, encoder_channel_nr[3] // 8),
nn.Conv2d(encoder_channel_nr[3] // 8, num_classes, kernel_size=1, padding=0))
else:
self.dec0 = DecoderBlock(encoder_channel_nr[3] // 8,
encoder_channel_nr[3] // 8,
encoder_channel_nr[3] // 8)
self.final = nn.Sequential(Conv2dBnRelu(encoder_channel_nr[3] // 8, encoder_channel_nr[3] // 8),
nn.Conv2d(encoder_channel_nr[3] // 8, num_classes, kernel_size=1, padding=0))
def forward(self, x):
encoder2, encoder3, encoder4, encoder5 = self.encoder(x)
encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)
center = self.center(encoder5)
dec5 = self.dec5(center, encoder5)
dec4 = self.dec4(dec5, encoder4)
dec3 = self.dec3(dec4, encoder3)
dec2 = self.dec2(dec3, encoder2)
dec1 = self.dec1(dec2)
if self.use_hypercolumn:
dec1 = torch.cat([dec1,
F.upsample(dec2, scale_factor=2, mode='bilinear'),
F.upsample(dec3, scale_factor=4, mode='bilinear'),
F.upsample(dec4, scale_factor=8, mode='bilinear'),
F.upsample(dec5, scale_factor=16, mode='bilinear'),
], 1)
if self.pool0:
dec1 = self.dec0(dec1)
return self.final(dec1)
| [
"torch.nn.AvgPool2d",
"torch.nn.functional.upsample",
"torch.nn.functional.dropout2d",
"torch.nn.Conv2d"
] | [((3437, 3477), 'torch.nn.functional.dropout2d', 'F.dropout2d', (['encoder5'], {'p': 'self.dropout_2d'}), '(encoder5, p=self.dropout_2d)\n', (3448, 3477), True, 'from torch.nn import functional as F\n'), ((1159, 1196), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (1171, 1196), False, 'from torch import nn\n'), ((2813, 2889), 'torch.nn.Conv2d', 'nn.Conv2d', (['(encoder_channel_nr[3] // 8)', 'num_classes'], {'kernel_size': '(1)', 'padding': '(0)'}), '(encoder_channel_nr[3] // 8, num_classes, kernel_size=1, padding=0)\n', (2822, 2889), False, 'from torch import nn\n'), ((3248, 3324), 'torch.nn.Conv2d', 'nn.Conv2d', (['(encoder_channel_nr[3] // 8)', 'num_classes'], {'kernel_size': '(1)', 'padding': '(0)'}), '(encoder_channel_nr[3] // 8, num_classes, kernel_size=1, padding=0)\n', (3257, 3324), False, 'from torch import nn\n'), ((3816, 3865), 'torch.nn.functional.upsample', 'F.upsample', (['dec2'], {'scale_factor': '(2)', 'mode': '"""bilinear"""'}), "(dec2, scale_factor=2, mode='bilinear')\n", (3826, 3865), True, 'from torch.nn import functional as F\n'), ((3897, 3946), 'torch.nn.functional.upsample', 'F.upsample', (['dec3'], {'scale_factor': '(4)', 'mode': '"""bilinear"""'}), "(dec3, scale_factor=4, mode='bilinear')\n", (3907, 3946), True, 'from torch.nn import functional as F\n'), ((3978, 4027), 'torch.nn.functional.upsample', 'F.upsample', (['dec4'], {'scale_factor': '(8)', 'mode': '"""bilinear"""'}), "(dec4, scale_factor=8, mode='bilinear')\n", (3988, 4027), True, 'from torch.nn import functional as F\n'), ((4059, 4109), 'torch.nn.functional.upsample', 'F.upsample', (['dec5'], {'scale_factor': '(16)', 'mode': '"""bilinear"""'}), "(dec5, scale_factor=16, mode='bilinear')\n", (4069, 4109), True, 'from torch.nn import functional as F\n')] |
from lxml import etree, objectify
import re
class norm_attribute:
def __remove_attributes_node(self, mt_node):
if not mt_node.attrib: return True
for at in mt_node.attrib.keys():
del mt_node.attrib[at]
def __remove_attributes_tree(self, mt_tree):
self.__remove_attributes_node(mt_tree)
for child in mt_tree:
self.__remove_attributes_tree(child)
def __remove_xmlns(self, mt_string):
mt_string = re.sub(' xmlns="[^"]+"', '', mt_string, count = 1)
return mt_string
def normalize(self, mt_string):
mt_string = self.__remove_xmlns(mt_string)
mt_tree = etree.fromstring(mt_string)
self.__remove_attributes_tree(mt_tree)
objectify.deannotate(mt_tree, cleanup_namespaces=True)
return etree.tostring(mt_tree)
| [
"lxml.objectify.deannotate",
"re.sub",
"lxml.etree.fromstring",
"lxml.etree.tostring"
] | [((473, 521), 're.sub', 're.sub', (['""" xmlns="[^"]+\\""""', '""""""', 'mt_string'], {'count': '(1)'}), '(\' xmlns="[^"]+"\', \'\', mt_string, count=1)\n', (479, 521), False, 'import re\n'), ((655, 682), 'lxml.etree.fromstring', 'etree.fromstring', (['mt_string'], {}), '(mt_string)\n', (671, 682), False, 'from lxml import etree, objectify\n'), ((738, 792), 'lxml.objectify.deannotate', 'objectify.deannotate', (['mt_tree'], {'cleanup_namespaces': '(True)'}), '(mt_tree, cleanup_namespaces=True)\n', (758, 792), False, 'from lxml import etree, objectify\n'), ((808, 831), 'lxml.etree.tostring', 'etree.tostring', (['mt_tree'], {}), '(mt_tree)\n', (822, 831), False, 'from lxml import etree, objectify\n')] |
"""
## Zemberek: Noisy Text Normalization Example
# Documentation: https://bit.ly/2WkUVVF
# Java Code Example: https://bit.ly/31Qi9Ew
"""
from jpype import JClass, JString
from examples import DATA_PATH
TurkishMorphology: JClass = JClass('zemberek.morphology.TurkishMorphology')
TurkishSentenceNormalizer: JClass = JClass(
'zemberek.normalization.TurkishSentenceNormalizer'
)
Paths: JClass = JClass('java.nio.file.Paths')
def run(text: str) -> None:
"""
Noisy text normalization example.
Args:
text (str): Noisy text to normalize.
"""
normalizer = TurkishSentenceNormalizer(
TurkishMorphology.createWithDefaults(),
Paths.get(str(DATA_PATH.joinpath('normalization'))),
Paths.get(str(DATA_PATH.joinpath('lm', 'lm.2gram.slm'))),
)
print(f'\nNormalized: {normalizer.normalize(JString(text))}')
| [
"examples.DATA_PATH.joinpath",
"jpype.JClass",
"jpype.JString"
] | [((233, 280), 'jpype.JClass', 'JClass', (['"""zemberek.morphology.TurkishMorphology"""'], {}), "('zemberek.morphology.TurkishMorphology')\n", (239, 280), False, 'from jpype import JClass, JString\n'), ((317, 375), 'jpype.JClass', 'JClass', (['"""zemberek.normalization.TurkishSentenceNormalizer"""'], {}), "('zemberek.normalization.TurkishSentenceNormalizer')\n", (323, 375), False, 'from jpype import JClass, JString\n'), ((398, 427), 'jpype.JClass', 'JClass', (['"""java.nio.file.Paths"""'], {}), "('java.nio.file.Paths')\n", (404, 427), False, 'from jpype import JClass, JString\n'), ((683, 718), 'examples.DATA_PATH.joinpath', 'DATA_PATH.joinpath', (['"""normalization"""'], {}), "('normalization')\n", (701, 718), False, 'from examples import DATA_PATH\n'), ((744, 784), 'examples.DATA_PATH.joinpath', 'DATA_PATH.joinpath', (['"""lm"""', '"""lm.2gram.slm"""'], {}), "('lm', 'lm.2gram.slm')\n", (762, 784), False, 'from examples import DATA_PATH\n'), ((843, 856), 'jpype.JString', 'JString', (['text'], {}), '(text)\n', (850, 856), False, 'from jpype import JClass, JString\n')] |
from cProfile import run
from time import sleep
from typing import Optional, List
from telegram import TelegramError
from telegram import Update
from telegram.error import BadRequest
from telegram.ext import Filters, CommandHandler
from telegram.ext.dispatcher import run_async, CallbackContext
import random
import YorForger.modules.sql.users_sql as sql
from YorForger.modules.helper_funcs.filters import CustomFilters
from YorForger import dispatcher, DEV_USERS, LOGGER
from YorForger.modules.disable import DisableAbleCommandHandler
def snipe(update: Update, context: CallbackContext):
args = context.args
bot = context.bot
try:
chat_id = str(args[0])
del args[0]
except TypeError:
update.effective_message.reply_text(
"Please give me a chat to echo to!")
to_send = " ".join(args)
if len(to_send) >= 2:
try:
bot.sendMessage(int(chat_id), str(to_send))
except TelegramError:
LOGGER.warning("Couldn't send to group %s", str(chat_id))
update.effective_message.reply_text(
"Couldn't send the message. Perhaps I'm not part of that group?")
__help__ = """
*Dev only:*
🔹 `/snipe` <chatid> <string>
Make me send a message to a specific chat.
"""
__mod_name__ = "Snipe"
SNIPE_HANDLER = CommandHandler(
"snipe",
snipe,
pass_args=True,
filters=CustomFilters.dev_filter,
run_async = True)
dispatcher.add_handler(SNIPE_HANDLER)
| [
"YorForger.dispatcher.add_handler",
"telegram.ext.CommandHandler"
] | [((1319, 1420), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""snipe"""', 'snipe'], {'pass_args': '(True)', 'filters': 'CustomFilters.dev_filter', 'run_async': '(True)'}), "('snipe', snipe, pass_args=True, filters=CustomFilters.\n dev_filter, run_async=True)\n", (1333, 1420), False, 'from telegram.ext import Filters, CommandHandler\n'), ((1440, 1477), 'YorForger.dispatcher.add_handler', 'dispatcher.add_handler', (['SNIPE_HANDLER'], {}), '(SNIPE_HANDLER)\n', (1462, 1477), False, 'from YorForger import dispatcher, DEV_USERS, LOGGER\n')] |
import http.server
import socketserver
import threading
import os
import pkg_resources as p
def generate_handler(html, scripts=None):
"""
Generates an http.server.BaseHTTPRequestHandler that is triggered on webrequest
:param html: path to root html file
:param scripts: list of paths to scripts to add to page
:return:
"""
if scripts is None:
scripts = []
if isinstance(scripts, str):
scripts = [scripts]
class MyHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
"""Respond to a GET request."""
if self.path == '/':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(html.encode())
for script in scripts:
self.wfile.write("\n\n<script>".encode())
self.wfile.write(script.encode())
self.wfile.write("\n\n</script>".encode())
else:
self.send_error(404)
return MyHandler
class webserver:
"""
"""
def __init__(self, host="0.0.0.0", port=8889, commport=8890, serveDirectory=""):
"""!
@param host:
@param port:
"""
html, script = self.__getSources__(serveDirectory=serveDirectory)
self.__loadWebFiles__(html=html, script=script, commport=commport)
self.handler = generate_handler(self.html, scripts=[self.script])
self.host = host
self.port = port
self.server = None
self.serving = False
self.serverThread = None
self.start()
def __loadWebFiles__(self, html="", script="", commport=0):
assert(html != "")
assert(script != "")
assert(commport != 0)
with open(html, "r") as f:
self.html = f.read()
with open(script, "r") as f:
self.script = f.read()
# set requested communication port
self.script = self.script.replace("port: \"7777\"", "port: \"" + str(commport) + "\"")
# add local plotly & msgpack scripts:
scriptPlotly = p.resource_filename('webplot', 'web/plotly-latest.min.js')
scriptMsgpack = p.resource_filename('webplot', 'web/msgpack.min.js')
assert (os.path.isfile(scriptPlotly))
assert (os.path.isfile(scriptMsgpack))
with open(scriptPlotly, "r") as f:
scriptPlotlyRaw = f.read()
with open(scriptMsgpack, "r") as f:
scriptMsgpackRaw = f.read()
self.html = self.html + "\n\n" + \
"<script>" + scriptMsgpackRaw + "</script>\n" + \
"<script>" + scriptPlotlyRaw + "</script>\n"
@staticmethod
def __getSources__(serveDirectory=""):
if serveDirectory != "":
htmlExternal = serveDirectory + ('/' if serveDirectory[-1] != '/' else '') + 'index.html'
scriptExternal = serveDirectory + ('/' if serveDirectory[-1] != '/' else '') + 'visualizer.js'
# check if it is a directory
if os.path.isdir(serveDirectory) and os.path.isfile(htmlExternal) and os.path.isfile(scriptExternal):
return htmlExternal, scriptExternal
else:
html = p.resource_filename('webplot', 'web/index.html')
script = p.resource_filename('webplot', 'web/visualizer.js')
assert(os.path.isfile(html))
assert(os.path.isfile(script))
return html, script
def stop(self):
"""
:return:
"""
self.server.shutdown()
self.serverThread.join()
print("Stopping web serving")
def start(self):
"""
:return:
"""
self.serverThread = threading.Thread(target=self.serve)
self.serverThread.start()
self.serving = True
def serve(self):
"""
:return:
"""
print("Starting web serving at: http://{:s}:{:d}\n\n".format(self.host,self.port))
self.server = socketserver.TCPServer(("", self.port), self.handler, bind_and_activate=False)
self.server.allow_reuse_address = True
try:
self.server.server_bind()
self.server.server_activate()
except:
self.server.server_close()
raise
# Star the server
self.server.serve_forever()
| [
"socketserver.TCPServer",
"pkg_resources.resource_filename",
"os.path.isfile",
"os.path.isdir",
"threading.Thread"
] | [((2168, 2226), 'pkg_resources.resource_filename', 'p.resource_filename', (['"""webplot"""', '"""web/plotly-latest.min.js"""'], {}), "('webplot', 'web/plotly-latest.min.js')\n", (2187, 2226), True, 'import pkg_resources as p\n'), ((2251, 2303), 'pkg_resources.resource_filename', 'p.resource_filename', (['"""webplot"""', '"""web/msgpack.min.js"""'], {}), "('webplot', 'web/msgpack.min.js')\n", (2270, 2303), True, 'import pkg_resources as p\n'), ((2320, 2348), 'os.path.isfile', 'os.path.isfile', (['scriptPlotly'], {}), '(scriptPlotly)\n', (2334, 2348), False, 'import os\n'), ((2366, 2395), 'os.path.isfile', 'os.path.isfile', (['scriptMsgpack'], {}), '(scriptMsgpack)\n', (2380, 2395), False, 'import os\n'), ((3809, 3844), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.serve'}), '(target=self.serve)\n', (3825, 3844), False, 'import threading\n'), ((4084, 4162), 'socketserver.TCPServer', 'socketserver.TCPServer', (["('', self.port)", 'self.handler'], {'bind_and_activate': '(False)'}), "(('', self.port), self.handler, bind_and_activate=False)\n", (4106, 4162), False, 'import socketserver\n'), ((3314, 3362), 'pkg_resources.resource_filename', 'p.resource_filename', (['"""webplot"""', '"""web/index.html"""'], {}), "('webplot', 'web/index.html')\n", (3333, 3362), True, 'import pkg_resources as p\n'), ((3384, 3435), 'pkg_resources.resource_filename', 'p.resource_filename', (['"""webplot"""', '"""web/visualizer.js"""'], {}), "('webplot', 'web/visualizer.js')\n", (3403, 3435), True, 'import pkg_resources as p\n'), ((3455, 3475), 'os.path.isfile', 'os.path.isfile', (['html'], {}), '(html)\n', (3469, 3475), False, 'import os\n'), ((3496, 3518), 'os.path.isfile', 'os.path.isfile', (['script'], {}), '(script)\n', (3510, 3518), False, 'import os\n'), ((3128, 3157), 'os.path.isdir', 'os.path.isdir', (['serveDirectory'], {}), '(serveDirectory)\n', (3141, 3157), False, 'import os\n'), ((3163, 3191), 'os.path.isfile', 'os.path.isfile', (['htmlExternal'], {}), '(htmlExternal)\n', (3177, 3191), False, 'import os\n'), ((3197, 3227), 'os.path.isfile', 'os.path.isfile', (['scriptExternal'], {}), '(scriptExternal)\n', (3211, 3227), False, 'import os\n')] |
from haystack import indexes
from cyder.cydns.address_record.models import AddressRecord
from cyder.cydns.cydns_index import CydnsIndex
class AddressRecordIndex(CydnsIndex, indexes.Indexable):
ip_str = indexes.CharField(model_attr='ip_str')
def get_model(self):
return AddressRecord
| [
"haystack.indexes.CharField"
] | [((208, 246), 'haystack.indexes.CharField', 'indexes.CharField', ([], {'model_attr': '"""ip_str"""'}), "(model_attr='ip_str')\n", (225, 246), False, 'from haystack import indexes\n')] |
"""Unit tests for relabelling estimator."""
import numpy as np
import pytest
from themis_ml.preprocessing.relabelling import Relabeller
from conftest import create_linear_X, create_y, create_s
def test_relabeller_fit():
"""Test that relabeller fitting """
relabeller = Relabeller()
X_input = create_linear_X()
targets = create_y()
protected_class = create_s()
# The formula to determine how many observations to promote/demote is
# the number needed to make the proportion of positive labels equal
# between the two groups. This proportion is rounded up.
expected_n = 3
# Given data specified in X function, the default LogisticRegression
# estimator should be able to draw a perfect decision boundary to seperate
# the y target.
relabeller.fit(X_input, targets, protected_class)
assert relabeller.n_relabels_ == expected_n
assert (relabeller.X_ == X_input).all()
assert (relabeller.y_ == targets).all()
assert (relabeller.s_ == protected_class).all()
def test_relabeller_transform():
"""Test that relabeller correctly relabels targets."""
expected = np.array([[0, 0, 1, 1, 1, 0, 0, 0, 1, 1]])
assert (Relabeller().fit_transform(
create_linear_X(), create_y(), s=create_s()) == expected).all()
def test_fit_error():
"""Test fit method errors out."""
# case: s array not the same length as y array
with pytest.raises(ValueError):
Relabeller().fit(create_linear_X(), create_y(), np.array([1, 0, 0, 1]))
# case: y targets are not a binary variable
with pytest.raises(TypeError):
targets = create_y()
targets[0] = 100
Relabeller().fit_transform(create_linear_X(), targets, create_s())
# case: s protected classes are not a binary variable
with pytest.raises(TypeError):
s_classes = create_y()
s_classes[0] = 100
Relabeller().fit_transform(create_linear_X(), targets, s_classes)
def test_fit_transform_error():
"""Test fit_transform method errors out.
ValueError should occur when X input to transform method is not the same
as the X input to fit method.
"""
X_input = create_linear_X()
with pytest.raises(ValueError):
Relabeller().fit(X_input, create_y(), create_s()).transform(X_input.T)
| [
"themis_ml.preprocessing.relabelling.Relabeller",
"conftest.create_y",
"conftest.create_s",
"numpy.array",
"pytest.raises",
"conftest.create_linear_X"
] | [((283, 295), 'themis_ml.preprocessing.relabelling.Relabeller', 'Relabeller', ([], {}), '()\n', (293, 295), False, 'from themis_ml.preprocessing.relabelling import Relabeller\n'), ((310, 327), 'conftest.create_linear_X', 'create_linear_X', ([], {}), '()\n', (325, 327), False, 'from conftest import create_linear_X, create_y, create_s\n'), ((342, 352), 'conftest.create_y', 'create_y', ([], {}), '()\n', (350, 352), False, 'from conftest import create_linear_X, create_y, create_s\n'), ((375, 385), 'conftest.create_s', 'create_s', ([], {}), '()\n', (383, 385), False, 'from conftest import create_linear_X, create_y, create_s\n'), ((1135, 1177), 'numpy.array', 'np.array', (['[[0, 0, 1, 1, 1, 0, 0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1, 1, 0, 0, 0, 1, 1]])\n', (1143, 1177), True, 'import numpy as np\n'), ((2169, 2186), 'conftest.create_linear_X', 'create_linear_X', ([], {}), '()\n', (2184, 2186), False, 'from conftest import create_linear_X, create_y, create_s\n'), ((1412, 1437), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1425, 1437), False, 'import pytest\n'), ((1576, 1600), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1589, 1600), False, 'import pytest\n'), ((1620, 1630), 'conftest.create_y', 'create_y', ([], {}), '()\n', (1628, 1630), False, 'from conftest import create_linear_X, create_y, create_s\n'), ((1798, 1822), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1811, 1822), False, 'import pytest\n'), ((1844, 1854), 'conftest.create_y', 'create_y', ([], {}), '()\n', (1852, 1854), False, 'from conftest import create_linear_X, create_y, create_s\n'), ((2196, 2221), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2209, 2221), False, 'import pytest\n'), ((1464, 1481), 'conftest.create_linear_X', 'create_linear_X', ([], {}), '()\n', (1479, 1481), False, 'from conftest import create_linear_X, create_y, create_s\n'), ((1483, 1493), 'conftest.create_y', 'create_y', ([], {}), '()\n', (1491, 1493), False, 'from conftest import create_linear_X, create_y, create_s\n'), ((1495, 1517), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (1503, 1517), True, 'import numpy as np\n'), ((1691, 1708), 'conftest.create_linear_X', 'create_linear_X', ([], {}), '()\n', (1706, 1708), False, 'from conftest import create_linear_X, create_y, create_s\n'), ((1719, 1729), 'conftest.create_s', 'create_s', ([], {}), '()\n', (1727, 1729), False, 'from conftest import create_linear_X, create_y, create_s\n'), ((1917, 1934), 'conftest.create_linear_X', 'create_linear_X', ([], {}), '()\n', (1932, 1934), False, 'from conftest import create_linear_X, create_y, create_s\n'), ((1447, 1459), 'themis_ml.preprocessing.relabelling.Relabeller', 'Relabeller', ([], {}), '()\n', (1457, 1459), False, 'from themis_ml.preprocessing.relabelling import Relabeller\n'), ((1664, 1676), 'themis_ml.preprocessing.relabelling.Relabeller', 'Relabeller', ([], {}), '()\n', (1674, 1676), False, 'from themis_ml.preprocessing.relabelling import Relabeller\n'), ((1890, 1902), 'themis_ml.preprocessing.relabelling.Relabeller', 'Relabeller', ([], {}), '()\n', (1900, 1902), False, 'from themis_ml.preprocessing.relabelling import Relabeller\n'), ((1226, 1243), 'conftest.create_linear_X', 'create_linear_X', ([], {}), '()\n', (1241, 1243), False, 'from conftest import create_linear_X, create_y, create_s\n'), ((1245, 1255), 'conftest.create_y', 'create_y', ([], {}), '()\n', (1253, 1255), False, 'from conftest import create_linear_X, create_y, create_s\n'), ((2257, 2267), 'conftest.create_y', 'create_y', ([], {}), '()\n', (2265, 2267), False, 'from conftest import create_linear_X, create_y, create_s\n'), ((2269, 2279), 'conftest.create_s', 'create_s', ([], {}), '()\n', (2277, 2279), False, 'from conftest import create_linear_X, create_y, create_s\n'), ((1190, 1202), 'themis_ml.preprocessing.relabelling.Relabeller', 'Relabeller', ([], {}), '()\n', (1200, 1202), False, 'from themis_ml.preprocessing.relabelling import Relabeller\n'), ((1259, 1269), 'conftest.create_s', 'create_s', ([], {}), '()\n', (1267, 1269), False, 'from conftest import create_linear_X, create_y, create_s\n'), ((2231, 2243), 'themis_ml.preprocessing.relabelling.Relabeller', 'Relabeller', ([], {}), '()\n', (2241, 2243), False, 'from themis_ml.preprocessing.relabelling import Relabeller\n')] |
"""
Secured-Pi, 2016.
This is a script for the Secured-Pi project dealing with the machine learning
aspects of the facial recognition.
This script relies on the OpenCV 3.1 library installed with the opencv-contrib
face module. If you do not have OpenCV installed, I highly recommend that you
attempt to do so using a virtual box, making sure to also install the face
module, which is part of the OpenCV-contrib project.
For training, create a directory called 'training' inside of this directory.
Each person's photo should be named like "member-1-a", where 1 identifies the
person to whom the image belongs to, and the letter increments for each photo.
For example, a training set of 2 people might contain the images:
member-1-a.gif
member-1-b.gif
member-2-a.gif
member-2-b.gif .. and so on ..
For accuracy, we recommend a -minimum- of 10 pictures for each member. It is
also important that no other files be contained in the training directory
besides the pictures. Just want to thank the creators of the tutorials at:
https://pythonprogramming.net/loading-video-python-opencv-tutorial/
"""
import os
import cv2
import numpy as np
import re
from PIL import Image
from securedpi.settings import BASE_DIR, MEDIA_ROOT
HERE = os.path.dirname(os.path.abspath(__file__))
CASCADE_MODEL = os.path.join(HERE, 'haarcascade_frontalface_default.xml')
FACE_CASCADE = cv2.CascadeClassifier(CASCADE_MODEL)
TRAINING_SET_PATH = os.path.join(MEDIA_ROOT, 'training')
RECOG_MODEL = os.path.join(HERE, 'recog_brain.yml')
def train_recognizer(recognizer=cv2.face.createLBPHFaceRecognizer,
image_path=TRAINING_SET_PATH,
save_file=os.path.join(HERE, 'recog_brain.yml'),
recog_model=None,
demo=False):
"""Train the facial recognition software with some training photos.
This will train the recognizer, and save the training. This saved file
will be used by test_individual to verify membership.
"""
recognizer = recognizer()
if recog_model is not None:
recognizer.load(recog_model)
tr_files = os.listdir(image_path)
tr_files = [image_path + '/' + f for f in tr_files]
images = [] # image arrays of face cut-outs
members = [] # this will be the ids of the members
for tr_f in tr_files:
print('training with file:' + tr_f)
tr_img = np.array(Image.open(tr_f).convert('L'), 'uint8')
curr_member = int(re.search('-(\d+)-',
os.path.split(tr_f)[1]).group(1))
curr_face = FACE_CASCADE.detectMultiScale(tr_img)
for (x, y, w, h) in curr_face:
images.append(tr_img[y: y + h, x: x + w])
members.append(curr_member)
if demo:
cv2.imshow("Training...", tr_img[y: y + h, x: x + w])
cv2.waitKey(20)
if demo:
cv2.destroyAllWindows()
recognizer.train(images, np.array(members))
recognizer.save(save_file)
def test_individual(image_to_test, recog_model=RECOG_MODEL, verbose=False):
"""Test if an individual has access to the lock.
Make sure there is a recog_model file that has been generated by the
train_recognizer function. Returns a boolean.
"""
recognizer = cv2.face.createLBPHFaceRecognizer
recognizer = recognizer()
try:
recognizer.load(recog_model)
except:
print('No training yml file found!')
return (None, None)
# image_array = np.array(Image.open(image_to_test).convert('L'), 'uint8')
image_array = np.array(Image.open(os.path.join(
BASE_DIR, image_to_test)).convert('L'), 'uint8')
curr_face = FACE_CASCADE.detectMultiScale(image_array)[0]
x, y, w, h = curr_face
test_image = image_array[y: y + h, x: x + w]
member_prediction, confidence = recognizer.predict(test_image)
if verbose is True:
print('member number: {}, confidence: {}'.format(member_prediction,
confidence))
return (member_prediction, confidence)
| [
"os.listdir",
"PIL.Image.open",
"os.path.join",
"cv2.imshow",
"os.path.split",
"numpy.array",
"cv2.destroyAllWindows",
"os.path.abspath",
"cv2.CascadeClassifier",
"cv2.waitKey"
] | [((1291, 1348), 'os.path.join', 'os.path.join', (['HERE', '"""haarcascade_frontalface_default.xml"""'], {}), "(HERE, 'haarcascade_frontalface_default.xml')\n", (1303, 1348), False, 'import os\n'), ((1364, 1400), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['CASCADE_MODEL'], {}), '(CASCADE_MODEL)\n', (1385, 1400), False, 'import cv2\n'), ((1421, 1457), 'os.path.join', 'os.path.join', (['MEDIA_ROOT', '"""training"""'], {}), "(MEDIA_ROOT, 'training')\n", (1433, 1457), False, 'import os\n'), ((1472, 1509), 'os.path.join', 'os.path.join', (['HERE', '"""recog_brain.yml"""'], {}), "(HERE, 'recog_brain.yml')\n", (1484, 1509), False, 'import os\n'), ((1248, 1273), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1263, 1273), False, 'import os\n'), ((1661, 1698), 'os.path.join', 'os.path.join', (['HERE', '"""recog_brain.yml"""'], {}), "(HERE, 'recog_brain.yml')\n", (1673, 1698), False, 'import os\n'), ((2104, 2126), 'os.listdir', 'os.listdir', (['image_path'], {}), '(image_path)\n', (2114, 2126), False, 'import os\n'), ((2881, 2904), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2902, 2904), False, 'import cv2\n'), ((2935, 2952), 'numpy.array', 'np.array', (['members'], {}), '(members)\n', (2943, 2952), True, 'import numpy as np\n'), ((2774, 2825), 'cv2.imshow', 'cv2.imshow', (['"""Training..."""', 'tr_img[y:y + h, x:x + w]'], {}), "('Training...', tr_img[y:y + h, x:x + w])\n", (2784, 2825), False, 'import cv2\n'), ((2844, 2859), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (2855, 2859), False, 'import cv2\n'), ((2398, 2414), 'PIL.Image.open', 'Image.open', (['tr_f'], {}), '(tr_f)\n', (2408, 2414), False, 'from PIL import Image\n'), ((3577, 3614), 'os.path.join', 'os.path.join', (['BASE_DIR', 'image_to_test'], {}), '(BASE_DIR, image_to_test)\n', (3589, 3614), False, 'import os\n'), ((2511, 2530), 'os.path.split', 'os.path.split', (['tr_f'], {}), '(tr_f)\n', (2524, 2530), False, 'import os\n')] |
import sys
from typing import Optional
import click
from stackwar.surveyutils import iter_squashed
def mostly_sanitize(lang: str, year: int) -> Optional[str]:
from stackwar.languages import unalias, get_ban, is_known
real = unalias(lang.lower(), year)
if real is None:
return None
ban = get_ban(real)
if ban:
click.secho(f"{lang!r} is not supported: {ban}", fg='red')
sys.exit(1)
if not is_known(real):
click.secho(f"Unknown language {lang!r}", fg='red')
sys.exit(1)
return real
def sanitize(lang: str, year: int) -> str:
real = mostly_sanitize(lang, year)
if real is None:
click.secho(f"No data for {lang} in {year}", fg='red')
sys.exit(1)
return real
@click.command()
@click.option('--year', '-y', type=int, required=True)
def print_languages(year: int) -> None:
languages = set()
for used, want in iter_squashed(year):
languages.update(used)
languages.update(want)
print(f"{len(languages)} languages:")
for lang in sorted(languages):
print(lang)
| [
"stackwar.surveyutils.iter_squashed",
"click.secho",
"click.option",
"stackwar.languages.is_known",
"sys.exit",
"stackwar.languages.get_ban",
"click.command"
] | [((765, 780), 'click.command', 'click.command', ([], {}), '()\n', (778, 780), False, 'import click\n'), ((782, 835), 'click.option', 'click.option', (['"""--year"""', '"""-y"""'], {'type': 'int', 'required': '(True)'}), "('--year', '-y', type=int, required=True)\n", (794, 835), False, 'import click\n'), ((318, 331), 'stackwar.languages.get_ban', 'get_ban', (['real'], {}), '(real)\n', (325, 331), False, 'from stackwar.languages import unalias, get_ban, is_known\n'), ((921, 940), 'stackwar.surveyutils.iter_squashed', 'iter_squashed', (['year'], {}), '(year)\n', (934, 940), False, 'from stackwar.surveyutils import iter_squashed\n'), ((352, 410), 'click.secho', 'click.secho', (['f"""{lang!r} is not supported: {ban}"""'], {'fg': '"""red"""'}), "(f'{lang!r} is not supported: {ban}', fg='red')\n", (363, 410), False, 'import click\n'), ((419, 430), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (427, 430), False, 'import sys\n'), ((443, 457), 'stackwar.languages.is_known', 'is_known', (['real'], {}), '(real)\n', (451, 457), False, 'from stackwar.languages import unalias, get_ban, is_known\n'), ((467, 518), 'click.secho', 'click.secho', (['f"""Unknown language {lang!r}"""'], {'fg': '"""red"""'}), "(f'Unknown language {lang!r}', fg='red')\n", (478, 518), False, 'import click\n'), ((527, 538), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (535, 538), False, 'import sys\n'), ((670, 724), 'click.secho', 'click.secho', (['f"""No data for {lang} in {year}"""'], {'fg': '"""red"""'}), "(f'No data for {lang} in {year}', fg='red')\n", (681, 724), False, 'import click\n'), ((733, 744), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (741, 744), False, 'import sys\n')] |
import os
import time
from typing import List, Union, Dict, Callable, Any
from functools import partial
from queue import Queue
from threading import Thread
from ding.utils import read_file, save_file, get_data_decompressor, COMM_LEARNER_REGISTRY
from ding.utils.file_helper import read_from_di_store
from ding.interaction import Slave, TaskFail
from .base_comm_learner import BaseCommLearner
from ..learner_hook import LearnerHook
class LearnerSlave(Slave):
"""
Overview:
A slave, whose master is coordinator.
Used to pass message between comm learner and coordinator.
"""
def __init__(self, *args, callback_fn: Dict[str, Callable], **kwargs) -> None:
"""
Overview:
Init callback functions additionally. Callback functions are methods in comm learner.
"""
super().__init__(*args, **kwargs)
self._callback_fn = callback_fn
def _process_task(self, task: dict) -> Union[dict, TaskFail]:
"""
Overview:
Process a task according to input task info dict, which is passed in by master coordinator.
For each type of task, you can refer to corresponding callback function in comm learner for details.
Arguments:
- cfg (:obj:`EasyDict`): Task dict. Must contain key "name".
Returns:
- result (:obj:`Union[dict, TaskFail]`): Task result dict, or task fail exception.
"""
task_name = task['name']
if task_name == 'resource':
return self._callback_fn['deal_with_resource']()
elif task_name == 'learner_start_task':
self._current_task_info = task['task_info']
self._callback_fn['deal_with_learner_start'](self._current_task_info)
return {'message': 'learner task has started'}
elif task_name == 'learner_get_data_task':
data_demand = self._callback_fn['deal_with_get_data']()
ret = {
'task_id': self._current_task_info['task_id'],
'buffer_id': self._current_task_info['buffer_id'],
}
ret.update(data_demand)
return ret
elif task_name == 'learner_learn_task':
info = self._callback_fn['deal_with_learner_learn'](task['data'])
data = {'info': info}
data['buffer_id'] = self._current_task_info['buffer_id']
data['task_id'] = self._current_task_info['task_id']
return data
elif task_name == 'learner_close_task':
self._callback_fn['deal_with_learner_close']()
return {
'task_id': self._current_task_info['task_id'],
'buffer_id': self._current_task_info['buffer_id'],
}
else:
raise TaskFail(result={'message': 'task name error'}, message='illegal learner task <{}>'.format(task_name))
@COMM_LEARNER_REGISTRY.register('flask_fs')
class FlaskFileSystemLearner(BaseCommLearner):
"""
Overview:
An implementation of CommLearner, using flask and the file system.
Interfaces:
__init__, send_policy, get_data, send_learn_info, start, close
Property:
hooks4call
"""
def __init__(self, cfg: 'EasyDict') -> None: # noqa
"""
Overview:
Init method.
Arguments:
- cfg (:obj:`EasyDict`): Config dict.
"""
BaseCommLearner.__init__(self, cfg)
# Callback functions for message passing between comm learner and coordinator.
self._callback_fn = {
'deal_with_resource': self.deal_with_resource,
'deal_with_learner_start': self.deal_with_learner_start,
'deal_with_get_data': self.deal_with_get_data,
'deal_with_learner_learn': self.deal_with_learner_learn,
'deal_with_learner_close': self.deal_with_learner_close,
}
# Learner slave to implement those callback functions. Host and port is used to build connection with master.
host, port = cfg.host, cfg.port
if isinstance(port, list):
port = port[self._rank]
elif isinstance(port, int) and self._world_size > 1:
port = port + self._rank
self._slave = LearnerSlave(host, port, callback_fn=self._callback_fn)
self._path_data = cfg.path_data # path to read data from
self._path_policy = cfg.path_policy # path to save policy
# Queues to store info dicts. Only one info is needed to pass between learner and coordinator at a time.
self._data_demand_queue = Queue(maxsize=1)
self._data_result_queue = Queue(maxsize=1)
self._learn_info_queue = Queue(maxsize=1)
# Task-level learner and policy will only be set once received the task.
self._learner = None
self._policy_id = None
def start(self) -> None:
"""
Overview:
Start comm learner itself and the learner slave.
"""
BaseCommLearner.start(self)
self._slave.start()
def close(self) -> None:
"""
Overview:
Join learner thread and close learner if still running.
Then close learner slave and comm learner itself.
"""
if self._end_flag:
return
if self._learner is not None:
self.deal_with_learner_close()
self._slave.close()
BaseCommLearner.close(self)
def __del__(self) -> None:
"""
Overview:
Call ``close`` for deletion.
"""
self.close()
def deal_with_resource(self) -> dict:
"""
Overview:
Callback function. Return how many resources are needed to start current learner.
Returns:
- resource (:obj:`dict`): Resource info dict, including ["gpu"].
"""
return {'gpu': self._world_size}
def deal_with_learner_start(self, task_info: dict) -> None:
"""
Overview:
Callback function. Create a learner and help register its hooks. Start a learner thread of the created one.
Arguments:
- task_info (:obj:`dict`): Task info dict.
.. note::
In ``_create_learner`` method in base class ``BaseCommLearner``, 3 methods
('get_data', 'send_policy', 'send_learn_info'), dataloader and policy are set.
You can refer to it for details.
"""
self._policy_id = task_info['policy_id']
self._league_save_checkpoint_path = task_info.get('league_save_checkpoint_path', None)
self._learner = self._create_learner(task_info)
for h in self.hooks4call:
self._learner.register_hook(h)
self._learner_thread = Thread(target=self._learner.start, args=(), daemon=True, name='learner_start')
self._learner_thread.start()
def deal_with_get_data(self) -> Any:
"""
Overview:
Callback function. Get data demand info dict from ``_data_demand_queue``,
which will be sent to coordinator afterwards.
Returns:
- data_demand (:obj:`Any`): Data demand info dict.
"""
data_demand = self._data_demand_queue.get()
return data_demand
def deal_with_learner_learn(self, data: dict) -> dict:
"""
Overview:
Callback function. Put training data info dict (i.e. meta data), which is received from coordinator, into
``_data_result_queue``, and wait for ``get_data`` to retrieve. Wait for learner training and
get learn info dict from ``_learn_info_queue``. If task is finished, join the learner thread and
close the learner.
Returns:
- learn_info (:obj:`Any`): Learn info dict.
"""
self._data_result_queue.put(data)
learn_info = self._learn_info_queue.get()
return learn_info
def deal_with_learner_close(self) -> None:
self._learner.close()
self._learner_thread.join()
del self._learner_thread
self._learner = None
self._policy_id = None
# override
def send_policy(self, state_dict: dict) -> None:
"""
Overview:
Save learner's policy in corresponding path, called by ``SendPolicyHook``.
Arguments:
- state_dict (:obj:`dict`): State dict of the policy.
"""
if not os.path.exists(self._path_policy):
os.mkdir(self._path_policy)
path = self._policy_id
if self._path_policy not in path:
path = os.path.join(self._path_policy, path)
setattr(self, "_latest_policy_path", path)
save_file(path, state_dict, use_lock=True)
if self._league_save_checkpoint_path is not None:
save_file(self._league_save_checkpoint_path, state_dict, use_lock=True)
@staticmethod
def load_data_fn(path, meta: Dict[str, Any], decompressor: Callable) -> Any:
"""
Overview:
The function that is used to load data file.
Arguments:
- meta (:obj:`Dict[str, Any]`): Meta data info dict.
- decompressor (:obj:`Callable`): Decompress function.
Returns:
- s (:obj:`Any`): Data which is read from file.
"""
# Due to read-write conflict, read_file raise an error, therefore we set a while loop.
while True:
try:
s = read_from_di_store(path) if read_from_di_store else read_file(path, use_lock=False)
s = decompressor(s)
break
except Exception:
time.sleep(0.01)
unroll_len = meta.get('unroll_len', 1)
if 'unroll_split_begin' in meta:
begin = meta['unroll_split_begin']
if unroll_len == 1:
s = s[begin]
s.update(meta)
else:
end = begin + unroll_len
s = s[begin:end]
# add metadata key-value to stepdata
for i in range(len(s)):
s[i].update(meta)
else:
s.update(meta)
return s
# override
def get_data(self, batch_size: int) -> List[Callable]:
"""
Overview:
Get a list of data loading function, which can be implemented by dataloader to read data from files.
Arguments:
- batch_size (:obj:`int`): Batch size.
Returns:
- data (:obj:`List[Callable]`): A list of callable data loading function.
"""
while self._learner is None:
time.sleep(1)
# Tell coordinator that we need training data, by putting info dict in data_demand_queue.
assert self._data_demand_queue.qsize() == 0
self._data_demand_queue.put({'batch_size': batch_size, 'cur_learner_iter': self._learner.last_iter.val})
# Get a list of meta data (data info dict) from coordinator, by getting info dict from data_result_queue.
data = self._data_result_queue.get()
assert isinstance(data, list)
assert len(data) == batch_size, '{}/{}'.format(len(data), batch_size)
# Transform meta data to callable data loading function (partial ``load_data_fn``).
decompressor = get_data_decompressor(data[0].get('compressor', 'none'))
data = [
partial(
FlaskFileSystemLearner.load_data_fn,
path=m['object_ref'] if read_from_di_store else os.path.join(self._path_data, m['data_id']),
meta=m,
decompressor=decompressor,
) for m in data
]
return data
# override
def send_learn_info(self, learn_info: dict) -> None:
"""
Overview:
Store learn info dict in queue, which will be retrieved by callback function "deal_with_learner_learn"
in learner slave, then will be sent to coordinator.
Arguments:
- learn_info (:obj:`dict`): Learn info in `dict` type. Keys are like 'learner_step', 'priority_info' \
'finished_task', etc. You can refer to ``learn_info``(``worker/learner/base_learner.py``) for details.
"""
assert self._learn_info_queue.qsize() == 0
self._learn_info_queue.put(learn_info)
@property
def hooks4call(self) -> List[LearnerHook]:
"""
Overview:
Return the hooks that are related to message passing with coordinator.
Returns:
- hooks (:obj:`list`): The hooks which comm learner has. Will be registered in learner as well.
"""
return [
SendPolicyHook('send_policy', 100, position='before_run', ext_args={}),
SendPolicyHook('send_policy', 100, position='after_iter', ext_args={'send_policy_freq': 1}),
SendLearnInfoHook(
'send_learn_info',
100,
position='after_iter',
ext_args={'freq': 10},
),
SendLearnInfoHook(
'send_learn_info',
100,
position='after_run',
ext_args={'freq': 1},
),
]
class SendPolicyHook(LearnerHook):
"""
Overview:
Hook to send policy
Interfaces:
__init__, __call__
Property:
name, priority, position
"""
def __init__(self, *args, ext_args: dict = {}, **kwargs) -> None:
"""
Overview:
init SendpolicyHook
Arguments:
- ext_args (:obj:`dict`): Extended arguments. Use ``ext_args.freq`` to set send_policy_freq
"""
super().__init__(*args, **kwargs)
if 'send_policy_freq' in ext_args:
self._freq = ext_args['send_policy_freq']
else:
self._freq = 1
def __call__(self, engine: 'BaseLearner') -> None: # noqa
"""
Overview:
Save learner's policy in corresponding path at interval iterations by calling ``engine``'s ``send_policy``.
Saved file includes model_state_dict, learner_last_iter.
Arguments:
- engine (:obj:`BaseLearner`): The BaseLearner.
.. note::
Only rank == 0 learner will save policy.
"""
last_iter = engine.last_iter.val
if engine.rank == 0 and last_iter % self._freq == 0:
state_dict = {'model': engine.policy.state_dict()['model'], 'iter': last_iter}
engine.send_policy(state_dict)
engine.debug('{} save iter{} policy'.format(engine.instance_name, last_iter))
class SendLearnInfoHook(LearnerHook):
"""
Overview:
Hook to send learn info
Interfaces:
__init__, __call__
Property:
name, priority, position
"""
def __init__(self, *args, ext_args: dict, **kwargs) -> None:
"""
Overview:
init SendLearnInfoHook
Arguments:
- ext_args (:obj:`dict`): extended_args, use ext_args.freq
"""
super().__init__(*args, **kwargs)
self._freq = ext_args['freq']
def __call__(self, engine: 'BaseLearner') -> None: # noqa
"""
Overview:
Send learn info including last_iter at interval iterations and priority info
Arguments:
- engine (:obj:`BaseLearner`): the BaseLearner
"""
last_iter = engine.last_iter.val
engine.send_learn_info(engine.learn_info)
if last_iter % self._freq == 0:
engine.debug('{} save iter{} learn_info'.format(engine.instance_name, last_iter))
| [
"os.path.exists",
"ding.utils.read_file",
"os.path.join",
"ding.utils.COMM_LEARNER_REGISTRY.register",
"time.sleep",
"ding.utils.save_file",
"os.mkdir",
"threading.Thread",
"queue.Queue",
"ding.utils.file_helper.read_from_di_store"
] | [((2886, 2928), 'ding.utils.COMM_LEARNER_REGISTRY.register', 'COMM_LEARNER_REGISTRY.register', (['"""flask_fs"""'], {}), "('flask_fs')\n", (2916, 2928), False, 'from ding.utils import read_file, save_file, get_data_decompressor, COMM_LEARNER_REGISTRY\n'), ((4579, 4595), 'queue.Queue', 'Queue', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (4584, 4595), False, 'from queue import Queue\n'), ((4630, 4646), 'queue.Queue', 'Queue', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (4635, 4646), False, 'from queue import Queue\n'), ((4680, 4696), 'queue.Queue', 'Queue', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (4685, 4696), False, 'from queue import Queue\n'), ((6730, 6808), 'threading.Thread', 'Thread', ([], {'target': 'self._learner.start', 'args': '()', 'daemon': '(True)', 'name': '"""learner_start"""'}), "(target=self._learner.start, args=(), daemon=True, name='learner_start')\n", (6736, 6808), False, 'from threading import Thread\n'), ((8658, 8700), 'ding.utils.save_file', 'save_file', (['path', 'state_dict'], {'use_lock': '(True)'}), '(path, state_dict, use_lock=True)\n', (8667, 8700), False, 'from ding.utils import read_file, save_file, get_data_decompressor, COMM_LEARNER_REGISTRY\n'), ((8394, 8427), 'os.path.exists', 'os.path.exists', (['self._path_policy'], {}), '(self._path_policy)\n', (8408, 8427), False, 'import os\n'), ((8441, 8468), 'os.mkdir', 'os.mkdir', (['self._path_policy'], {}), '(self._path_policy)\n', (8449, 8468), False, 'import os\n'), ((8561, 8598), 'os.path.join', 'os.path.join', (['self._path_policy', 'path'], {}), '(self._path_policy, path)\n', (8573, 8598), False, 'import os\n'), ((8772, 8843), 'ding.utils.save_file', 'save_file', (['self._league_save_checkpoint_path', 'state_dict'], {'use_lock': '(True)'}), '(self._league_save_checkpoint_path, state_dict, use_lock=True)\n', (8781, 8843), False, 'from ding.utils import read_file, save_file, get_data_decompressor, COMM_LEARNER_REGISTRY\n'), ((10588, 10601), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (10598, 10601), False, 'import time\n'), ((9423, 9447), 'ding.utils.file_helper.read_from_di_store', 'read_from_di_store', (['path'], {}), '(path)\n', (9441, 9447), False, 'from ding.utils.file_helper import read_from_di_store\n'), ((9475, 9506), 'ding.utils.read_file', 'read_file', (['path'], {'use_lock': '(False)'}), '(path, use_lock=False)\n', (9484, 9506), False, 'from ding.utils import read_file, save_file, get_data_decompressor, COMM_LEARNER_REGISTRY\n'), ((9611, 9627), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (9621, 9627), False, 'import time\n'), ((11467, 11510), 'os.path.join', 'os.path.join', (['self._path_data', "m['data_id']"], {}), "(self._path_data, m['data_id'])\n", (11479, 11510), False, 'import os\n')] |
import os
import base64
import json
import logging
from requests.adapters import Response
import speech_recognition as sr
from flask import Flask
from flask_sockets import Sockets
from pydub import AudioSegment
from twilio.rest import Client
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from scipy.special import softmax
import numpy as np
from indictrans import Transliterator
import time
for i in os.listdir("Audio"):
os.remove(f"Audio/{i}") if os.path.exists("recording.wav") else None
trn = Transliterator(source='eng', target='hin', build_lookup=True)
mode = "rohanrajpal/bert-base-codemixed-uncased-sentiment"
tokenizer = AutoTokenizer.from_pretrained(mode)
model = AutoModelForSequenceClassification.from_pretrained(mode)
app = Flask(__name__)
sockets = Sockets(app)
HTTP_SERVER_PORT = 8080
RAW_AUDIO_FILE_EXTENSION = "ulaw"
CONVERTED_AUDIO_FILE_EXTENSION = "wav"
ACC_SID = "XXXXXXXXX"
AUTH_TOKEN = "<PASSWORD>"
FROM_NUMBER = "XXXXXXXXX"
TO_NUMBER = "XXXXXXXXX"
account_sid = ACC_SID
auth_token = AUTH_TOKEN
client = Client(account_sid, auth_token)
ngrok_url = "XXXXXXXXX.ngrok.io"
class Sequence:
def __init__(self):
self.CALL_FLOW = 0
self.responses = [f"""<Response>
<Play>XXXXXXXXX.wav</Play>
<Start> <Stream url="wss://{ngrok_url}/" /> </Start>
<Pause length = "10"/>
</Response>""",
f"""<Response>
<Play>XXXXXXXXX.wav</Play>
<Pause length = "10"/>
</Response>""",
f"""<Response>
<Play>XXXXXXXXX.wav</Play>
<Pause length = "30"/>
</Response>""",
f"""<Response>
<Play>XXXXXXXXX.wav</Play>
<Pause length = "30"/>
</Response>""",
f"""<Response>
<Play>XXXXXXXXX.wav</Play>
<Pause length = "30"/>
</Response>""",
f"""<Response>
<Play>XXXXXXXXX.wav</Play>
</Response>"""]
def get_call_flow(self):
return self.CALL_FLOW
def get_response(self):
try:
tmp = self.responses[self.CALL_FLOW]
self.CALL_FLOW += 1
except:
print("exception in get response")
tmp = self.responses[-1]
return tmp
seq = Sequence()
call = client.calls.create(
twiml=seq.get_response(), from_=FROM_NUMBER, to=TO_NUMBER)
call_sid = call.sid
r = sr.Recognizer()
def getSentiment(text):
labels = ["Neutral", "Negative", "Positive"]
try:
encoded_input = tokenizer(text, return_tensors='pt')
output = model(**encoded_input)
except:
text = text[:512]
encoded_input = tokenizer(text, return_tensors='pt')
output = model(**encoded_input)
scores = output[0][0].detach().numpy()
scores = softmax(scores)
sc = {}
for i in range(3):
sc[labels[i]] = np.round(float(scores[i]), 4)
return 0 if sc["Negative"] > sc["Positive"] else 1
def recognize_speech(recording_audio_path):
with sr.AudioFile(recording_audio_path) as source:
audio = r.record(source)
try:
return r.recognize_google(audio)
except Exception as e:
return None
def make_update(speech):
hin = trn.transform(speech)
sentiment = getSentiment(hin)
CALL_FLOW = seq.get_call_flow()
print(f"You said: {hin} | Sentiment: {sentiment} | Callflow: {CALL_FLOW}")
if(CALL_FLOW == 2 or CALL_FLOW == 3 or CALL_FLOW == 4):
call = client.calls(call_sid).update(
twiml=seq.get_response())
else:
if sentiment:
call = client.calls(call_sid).update(
twiml=seq.get_response())
else:
call = client.calls(call_sid).update(
twiml=seq.responses[-1])
class StreamAudioRecording:
def __init__(self, audio_recording_path):
self.audio_recording_path = audio_recording_path
self.f = None
self.audio_file_path = None
self.data_buffer = b''
self.data = []
def start_recording(self, call_id):
self.audio_file_path = os.path.join(
self.audio_recording_path, f"{call_id}.{RAW_AUDIO_FILE_EXTENSION}")
self.f = open(self.audio_file_path, 'wb')
def write_buffer(self, buffer):
self.data_buffer += buffer
self.f.write(buffer)
def append_buffer(self):
self.data.append(self.data_buffer)
self.data_buffer = b''
def stop_recording(self):
self.f.close()
converted_audio_path = self.audio_file_path.replace(RAW_AUDIO_FILE_EXTENSION,
CONVERTED_AUDIO_FILE_EXTENSION)
self.convert_call_recording(self.audio_file_path, converted_audio_path)
return converted_audio_path
@ staticmethod
def convert_call_recording(mulaw_path, wav_path):
new = AudioSegment.from_file(
mulaw_path, "mulaw", frame_rate=8000, channels=1, sample_width=1)
new.frame_rate = 8000
new.export(wav_path, format="wav", bitrate="8k")
@ sockets.route('/')
def echo(ws):
app.logger.info("Connection accepted")
# A lot of messages will be sent rapidly. We'll stop showing after the first one.
has_seen_media = False
message_count = 1
recording = StreamAudioRecording("/Users/ace/Desktop/Twilio/Audio")
recording.start_recording("0")
while not ws.closed:
message = ws.receive()
# print(f"Received message: {message}")
if message is None:
app.logger.info("No message received...")
continue
# Messages are a JSON encoded string
data = json.loads(message)
# Using the event type you can determine what type of message you are receiving
if data['event'] == "connected":
app.logger.info("Connected Message received: {}".format(message))
if data['event'] == "start":
app.logger.info("Start Message received: {}".format(message))
if data['event'] == "media":
payload = data['media']['payload']
chunk = base64.b64decode(payload)
recording.write_buffer(chunk)
if(message_count % 58 == 0):
recording.append_buffer()
try:
rb1 = recording.data[-1].count(b'\xff')
if(rb1 > 7000):
st = time.time()
recording_audio_path = recording.stop_recording()
recording.start_recording(str(message_count))
speech = recognize_speech(recording_audio_path)
if speech:
print(time.time()-st)
make_update(speech)
except:
pass
message_count += 1
if not has_seen_media:
payload = data['media']['payload']
chunk = base64.b64decode(payload)
has_seen_media = True
if data['event'] == "stop":
app.logger.info("Stop Message received: {}".format(message))
recording.append_buffer()
break
app.logger.info(
"Connection closed. Received a total of {} messages".format(message_count))
recording_audio_path = recording.stop_recording()
recording_audio_path = recording.stop_recording()
speech = recognize_speech(recording_audio_path)
if speech:
print("\n*****\n", speech, "\n******")
if __name__ == '__main__':
app.logger.setLevel(logging.DEBUG)
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
server = pywsgi.WSGIServer(
('', HTTP_SERVER_PORT), app, handler_class=WebSocketHandler)
print("Server listening on: http://localhost:" + str(HTTP_SERVER_PORT))
server.serve_forever()
| [
"os.path.exists",
"json.loads",
"os.listdir",
"indictrans.Transliterator",
"flask.Flask",
"flask_sockets.Sockets",
"speech_recognition.AudioFile",
"os.path.join",
"base64.b64decode",
"transformers.AutoModelForSequenceClassification.from_pretrained",
"speech_recognition.Recognizer",
"os.remove",
"pydub.AudioSegment.from_file",
"transformers.AutoTokenizer.from_pretrained",
"gevent.pywsgi.WSGIServer",
"twilio.rest.Client",
"time.time",
"scipy.special.softmax"
] | [((430, 449), 'os.listdir', 'os.listdir', (['"""Audio"""'], {}), "('Audio')\n", (440, 449), False, 'import os\n'), ((531, 592), 'indictrans.Transliterator', 'Transliterator', ([], {'source': '"""eng"""', 'target': '"""hin"""', 'build_lookup': '(True)'}), "(source='eng', target='hin', build_lookup=True)\n", (545, 592), False, 'from indictrans import Transliterator\n'), ((664, 699), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['mode'], {}), '(mode)\n', (693, 699), False, 'from transformers import AutoTokenizer, AutoModelForSequenceClassification\n'), ((708, 764), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (['mode'], {}), '(mode)\n', (758, 764), False, 'from transformers import AutoTokenizer, AutoModelForSequenceClassification\n'), ((772, 787), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (777, 787), False, 'from flask import Flask\n'), ((798, 810), 'flask_sockets.Sockets', 'Sockets', (['app'], {}), '(app)\n', (805, 810), False, 'from flask_sockets import Sockets\n'), ((1063, 1094), 'twilio.rest.Client', 'Client', (['account_sid', 'auth_token'], {}), '(account_sid, auth_token)\n', (1069, 1094), False, 'from twilio.rest import Client\n'), ((2454, 2469), 'speech_recognition.Recognizer', 'sr.Recognizer', ([], {}), '()\n', (2467, 2469), True, 'import speech_recognition as sr\n'), ((2850, 2865), 'scipy.special.softmax', 'softmax', (['scores'], {}), '(scores)\n', (2857, 2865), False, 'from scipy.special import softmax\n'), ((7724, 7802), 'gevent.pywsgi.WSGIServer', 'pywsgi.WSGIServer', (["('', HTTP_SERVER_PORT)", 'app'], {'handler_class': 'WebSocketHandler'}), "(('', HTTP_SERVER_PORT), app, handler_class=WebSocketHandler)\n", (7741, 7802), False, 'from gevent import pywsgi\n'), ((482, 513), 'os.path.exists', 'os.path.exists', (['"""recording.wav"""'], {}), "('recording.wav')\n", (496, 513), False, 'import os\n'), ((455, 478), 'os.remove', 'os.remove', (['f"""Audio/{i}"""'], {}), "(f'Audio/{i}')\n", (464, 478), False, 'import os\n'), ((3065, 3099), 'speech_recognition.AudioFile', 'sr.AudioFile', (['recording_audio_path'], {}), '(recording_audio_path)\n', (3077, 3099), True, 'import speech_recognition as sr\n'), ((4139, 4224), 'os.path.join', 'os.path.join', (['self.audio_recording_path', 'f"""{call_id}.{RAW_AUDIO_FILE_EXTENSION}"""'], {}), "(self.audio_recording_path, f'{call_id}.{RAW_AUDIO_FILE_EXTENSION}'\n )\n", (4151, 4224), False, 'import os\n'), ((4924, 5016), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', (['mulaw_path', '"""mulaw"""'], {'frame_rate': '(8000)', 'channels': '(1)', 'sample_width': '(1)'}), "(mulaw_path, 'mulaw', frame_rate=8000, channels=1,\n sample_width=1)\n", (4946, 5016), False, 'from pydub import AudioSegment\n'), ((5707, 5726), 'json.loads', 'json.loads', (['message'], {}), '(message)\n', (5717, 5726), False, 'import json\n'), ((6150, 6175), 'base64.b64decode', 'base64.b64decode', (['payload'], {}), '(payload)\n', (6166, 6175), False, 'import base64\n'), ((6998, 7023), 'base64.b64decode', 'base64.b64decode', (['payload'], {}), '(payload)\n', (7014, 7023), False, 'import base64\n'), ((6447, 6458), 'time.time', 'time.time', ([], {}), '()\n', (6456, 6458), False, 'import time\n'), ((6744, 6755), 'time.time', 'time.time', ([], {}), '()\n', (6753, 6755), False, 'import time\n')] |
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
"""
Internal module dedicated to translating observables and indicators
as well as translating OpenIOC to CybOX and STIX.
"""
import logging
import cybox.utils
from cybox.core import Observables, Observable, ObservableComposition
from cybox.common import ToolInformationList, ToolInformation
import stix.utils
from stix.core import STIXPackage, STIXHeader
from stix.common import InformationSource
from stix.common.vocabs import PackageIntent
from stix.indicator import Indicator
from . import openioc
from . import objectify
from . import xml
from . import utils
from . import version
# ID format for translated OpenIOC items
OPENIOC_ID_FMT = "openioc:item-%s"
# Map of IndicatorItem conditions to CybOX operators
CONDITIONS = {
'is': 'Equals',
'isnot': 'DoesNotEqual',
'contains': 'Contains',
'containsnot': 'DoesNotContain'
}
LOG = logging.getLogger(__name__)
def _translate_id(id_):
"""Process an id which is normalized and has 'openioc:item-' prepended
Args:
id: String to uniquely represent an observable or indicator
Returns:
If there is no id, None is returned.
Otherwise a normalized id with 'openioc:item-' prepended is returned.
"""
id_ = utils.normalize_id(id_)
if not id_:
return None
return OPENIOC_ID_FMT % id_
def _make_observable(item):
"""Process an indicator item and creates a single observable from it.
Args:
item: Individual indicator item
Returns:
A cybox.core.Observable object
"""
content = openioc.get_content(item)
search = openioc.get_search(item)
condition = openioc.get_condition(item)
if not (content and search and condition):
fmt = "Unable to produce Observable from IndicatorItem on line: %d"
LOG.warn(fmt, item.sourceline)
return None
# Map the OpenIOC condition to a CybOX condition
condition = CONDITIONS[condition]
# Convert the IndicatorItem into a CybOX Object
object_ = objectify.make_object(search, content, condition)
if object_:
return Observable(object_)
skipped_term = utils.forcestring(search)
fmt = ("Error|Ignore. IndicatorItem not translated. Encountered IOC "
"term '%s' , which does not currently map to CybOX.")
desc = fmt % skipped_term
desc = cybox.utils.wrap_cdata(desc)
obs = Observable(description=desc)
return obs
def _translate_item(item):
"""Process an indicator item and creates a single observable from it.
Args:
item: Individual indicator item
Returns:
A cybox.core.Observable object
"""
return _make_observable(item)
def _translate_items(items):
"""Process an indicator item(s) and creates an observable list from it.
Args:
item: Indicator item(s)
Returns:
cybox.core.Observable object list.
"""
observables = (_make_observable(x) for x in items)
return [o for o in observables if o is not None]
def _indicator_to_observable(indicator):
"""Process indicator item(s), that can be nested, and create a composite object with observables.
Args:
indicator: Indicator(s) that will be translated
Returns:
A cybox.core.Observable object if `indicator` can be translated.
None is returned if `indicator` contains invalid or untranslatable items.
"""
items = openioc.get_items(indicator)
nested = openioc.get_indicators(indicator)
if not (nested or items):
return None
# If the openioc indicator has only one IndicatorItem, return an Observable
# object which has a single CybOX Object child.
if not nested and len(items) == 1:
return _translate_item(items[0])
# The openioc Indicator had more than one item or nested indicators, so
# we need to create an Observable Composition.
# Initialize the parent Observable
id_ = _translate_id(indicator.attrib.get("id"))
root = Observable(id_=id_)
operator = indicator.attrib.get("operator", "AND")
composite = ObservableComposition(operator=operator)
root.observable_composition = composite
# Translate all the IndicatorItem and nested Indicator children
observables = _translate_items(items) + _translate_indicators(nested)
# Add the translated Observable objects to the composite
composite.observables.extend(observables)
return root
def _observable_to_indicator_stix(observable):
"""Translate a CybOX Observable into a STIX Indicator.
Args:
observable: Observable object that will be translated
Returns:
Indicator object with STIX utility and CybOX tags
"""
# Build STIX tool content
tool = ToolInformation(tool_name='OpenIOC to STIX Utility')
tool.version = version.__version__
# Build Indicator.producer contents
producer = InformationSource()
producer.tools = ToolInformationList(tool)
# Build Indicator
indicator = Indicator(title="CybOX-represented Indicator Created from OpenIOC File")
indicator.producer = producer
indicator.add_observable(observable)
return indicator
def _translate_indicators(indicators):
"""Process an indicator item(s) and creates an observable list from it.
Args:
item: Indicator item(s)
Returns:
A cybox.core.Observable object list if `indicators` can be translated.
"""
is_empty = utils.is_empty_observable
translated = (_indicator_to_observable(x) for x in indicators)
return [x for x in translated if not is_empty(x)]
def to_cybox(infile):
"""Translate the `infile` OpenIOC xml document into a CybOX Observable.
Args:
infile: OpenIOC xml filename to translate
Returns:
cybox.core.Observables object
"""
iocdoc = xml.parse(infile)
indicators = openioc.get_top_indicators(iocdoc)
if len(indicators) == 0:
raise Exception("Input document contained no indicator items.")
observables = _translate_indicators(indicators)
if not observables:
raise Exception("Input document contained no indicator items compatible with CybOX.")
obsdoc = Observables(observables)
return obsdoc
@stix.utils.silence_warnings
def to_stix(infile):
"""Converts the `infile` OpenIOC xml document into a STIX Package.
Args:
infile: OpenIOC xml filename to translate
Returns:
stix.core.STIXPackage object
"""
observables = to_cybox(infile)
# Build Indicators from the Observable objects
indicators = [_observable_to_indicator_stix(o) for o in observables]
# Wrap the created Observables in a STIX Package/Indicator
stix_package = STIXPackage()
# Set the Indicators collection
stix_package.indicators = indicators
# Create and write the STIX Header. Warning: these fields have been
# deprecated in STIX v1.2!
stix_header = STIXHeader()
stix_header.package_intent = PackageIntent.TERM_INDICATORS_MALWARE_ARTIFACTS
stix_header.description = "CybOX-represented Indicators Translated from OpenIOC File"
stix_package.stix_header = stix_header
return stix_package
| [
"logging.getLogger",
"stix.core.STIXHeader",
"cybox.common.ToolInformation",
"cybox.common.ToolInformationList",
"cybox.core.Observable",
"stix.indicator.Indicator",
"cybox.core.ObservableComposition",
"stix.core.STIXPackage",
"stix.common.InformationSource",
"cybox.core.Observables"
] | [((963, 990), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (980, 990), False, 'import logging\n'), ((2475, 2503), 'cybox.core.Observable', 'Observable', ([], {'description': 'desc'}), '(description=desc)\n', (2485, 2503), False, 'from cybox.core import Observables, Observable, ObservableComposition\n'), ((4063, 4082), 'cybox.core.Observable', 'Observable', ([], {'id_': 'id_'}), '(id_=id_)\n', (4073, 4082), False, 'from cybox.core import Observables, Observable, ObservableComposition\n'), ((4155, 4195), 'cybox.core.ObservableComposition', 'ObservableComposition', ([], {'operator': 'operator'}), '(operator=operator)\n', (4176, 4195), False, 'from cybox.core import Observables, Observable, ObservableComposition\n'), ((4809, 4861), 'cybox.common.ToolInformation', 'ToolInformation', ([], {'tool_name': '"""OpenIOC to STIX Utility"""'}), "(tool_name='OpenIOC to STIX Utility')\n", (4824, 4861), False, 'from cybox.common import ToolInformationList, ToolInformation\n'), ((4957, 4976), 'stix.common.InformationSource', 'InformationSource', ([], {}), '()\n', (4974, 4976), False, 'from stix.common import InformationSource\n'), ((4998, 5023), 'cybox.common.ToolInformationList', 'ToolInformationList', (['tool'], {}), '(tool)\n', (5017, 5023), False, 'from cybox.common import ToolInformationList, ToolInformation\n'), ((5063, 5135), 'stix.indicator.Indicator', 'Indicator', ([], {'title': '"""CybOX-represented Indicator Created from OpenIOC File"""'}), "(title='CybOX-represented Indicator Created from OpenIOC File')\n", (5072, 5135), False, 'from stix.indicator import Indicator\n'), ((6247, 6271), 'cybox.core.Observables', 'Observables', (['observables'], {}), '(observables)\n', (6258, 6271), False, 'from cybox.core import Observables, Observable, ObservableComposition\n'), ((6775, 6788), 'stix.core.STIXPackage', 'STIXPackage', ([], {}), '()\n', (6786, 6788), False, 'from stix.core import STIXPackage, STIXHeader\n'), ((6989, 7001), 'stix.core.STIXHeader', 'STIXHeader', ([], {}), '()\n', (6999, 7001), False, 'from stix.core import STIXPackage, STIXHeader\n'), ((2187, 2206), 'cybox.core.Observable', 'Observable', (['object_'], {}), '(object_)\n', (2197, 2206), False, 'from cybox.core import Observables, Observable, ObservableComposition\n')] |
# !/usr/bin/env python3
# -*- coding:utf-8 -*-
__author__ = '<NAME>'
__date__ = '2018/7/21 22:07'
import socket
class NetworkService(object):
def __init__(self, _ip, _p):
self.__host_ip = _ip
self.__port = _p
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__socket.bind((self.__host_ip, self.__port))
self.__max_connect_num = 512
@property
def socket(self):
return self.__socket
def listen(self):
self.__socket.listen(self.__max_connect_num)
# def __address_request(self, _c_socket_info):
# client_socket = _c_socket_info[0]
# try:
# # Receive Client Message
# while True:
# data = client_socket.recv(2048)
# if not data:
# break
# client_msg = data.decode('utf-8')
# server_reply = self.__ref_ser_manager.address_request(client_msg, _c_socket_info)
# client_socket.sendall(server_reply.get_message().encode('utf-8'))
# client_socket.close()
# except Exception:
# print('Class:ServerNetwork:address_request')
# client_socket.close()
| [
"socket.socket"
] | [((255, 304), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (268, 304), False, 'import socket\n')] |
import sys
sys.path.insert(1, "../../")
import h2o, tests
def show_jira():
local_data = [[1, 'a'],[0, 'b']]
h2o_data = h2o.H2OFrame(python_obj=local_data)
h2o_data.set_names(['response', 'predictor'])
h2o_data.show()
if __name__ == "__main__":
tests.run_test(sys.argv, show_jira)
| [
"sys.path.insert",
"h2o.H2OFrame",
"tests.run_test"
] | [((11, 39), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../"""'], {}), "(1, '../../')\n", (26, 39), False, 'import sys\n'), ((133, 168), 'h2o.H2OFrame', 'h2o.H2OFrame', ([], {'python_obj': 'local_data'}), '(python_obj=local_data)\n', (145, 168), False, 'import h2o, tests\n'), ((271, 306), 'tests.run_test', 'tests.run_test', (['sys.argv', 'show_jira'], {}), '(sys.argv, show_jira)\n', (285, 306), False, 'import h2o, tests\n')] |
# Copyright (c) 2022, <NAME>. and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
class Kurs(Document):
def after_insert(self):
for kurstermin in self.kurstermine:
kurstermin.ende = frappe.utils.add_to_date(kurstermin.termin, hours=2)
kurstermin.save()
pass | [
"frappe.utils.add_to_date"
] | [((262, 314), 'frappe.utils.add_to_date', 'frappe.utils.add_to_date', (['kurstermin.termin'], {'hours': '(2)'}), '(kurstermin.termin, hours=2)\n', (286, 314), False, 'import frappe\n')] |
import click
from arrow.cli import pass_context, json_loads
from arrow.decorators import custom_exception, list_output
@click.command('get_statuses')
@pass_context
@custom_exception
@list_output
def cli(ctx):
"""Get all statuses available in this Apollo instance
Output:
list of status info dictionaries
"""
return ctx.gi.status.get_statuses()
| [
"click.command"
] | [((122, 151), 'click.command', 'click.command', (['"""get_statuses"""'], {}), "('get_statuses')\n", (135, 151), False, 'import click\n')] |
"""empty message
Revision ID: <KEY>
Revises: <KEY>
Create Date: 2021-03-26 23:46:21.991775
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('phone_number_verifications', 'carrier_name',
existing_type=sa.VARCHAR(length=30),
type_=sa.String(length=255),
existing_nullable=True)
op.alter_column('phone_number_verifications', 'national_format',
existing_type=sa.VARCHAR(length=20),
type_=sa.String(length=30),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('phone_number_verifications', 'national_format',
existing_type=sa.String(length=30),
type_=sa.VARCHAR(length=20),
existing_nullable=True)
op.alter_column('phone_number_verifications', 'carrier_name',
existing_type=sa.String(length=255),
type_=sa.VARCHAR(length=30),
existing_nullable=True)
# ### end Alembic commands ###
| [
"sqlalchemy.VARCHAR",
"sqlalchemy.String"
] | [((447, 468), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(30)'}), '(length=30)\n', (457, 468), True, 'import sqlalchemy as sa\n'), ((491, 512), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (500, 512), True, 'import sqlalchemy as sa\n'), ((651, 672), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(20)'}), '(length=20)\n', (661, 672), True, 'import sqlalchemy as sa\n'), ((695, 715), 'sqlalchemy.String', 'sa.String', ([], {'length': '(30)'}), '(length=30)\n', (704, 715), True, 'import sqlalchemy as sa\n'), ((974, 994), 'sqlalchemy.String', 'sa.String', ([], {'length': '(30)'}), '(length=30)\n', (983, 994), True, 'import sqlalchemy as sa\n'), ((1017, 1038), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(20)'}), '(length=20)\n', (1027, 1038), True, 'import sqlalchemy as sa\n'), ((1174, 1195), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (1183, 1195), True, 'import sqlalchemy as sa\n'), ((1218, 1239), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(30)'}), '(length=30)\n', (1228, 1239), True, 'import sqlalchemy as sa\n')] |
#!/usr/bin/env python
class Aln :
def __init__(self):
self.names = []
self.seq = []
# self.seq_idnt = []
self.length = 0
self.idnt2loc = []
self.loc2idnt = []
self.loc2res = []
self.res2loc = []
self.gap = []
from my_error import Error
class AlnFile :
def __init__(self, filename, status):
if status in ('r','w') :
self._file = open(filename, status)
else :
raise Error('PdbFile', 'open_for_read', 'file is not closed')
def close(self):
self._file.close()
def read_all(self):
al = Aln()
line = self._file.readline()
# flg_read_aln = False
while line :
if line[6:27] == '|PDBID|CHAIN|SEQUENCE':
self._add_aln(al, line)
# flg_read_aln = True
# elif flg_read_aln :
# self._add_idnt(al, line)
# flg_read_aln = False
line = self._file.readline()
length = len(al.seq[0])
for idx in range(len(al.names)) :
if len(al.seq[idx]) != length :
raise Error('AlnFile', 'read_all', 'al.seq[idx] != length')
al.loc2res.append([])
al.res2loc.append([-1])
res_num = 0
# res -- 1 start
# loc -- 0 start
for i,s in enumerate(al.seq[idx]) :
if s == '-' :
al.loc2res[idx].append(-1)
else :
res_num += 1
al.loc2res[idx].append(res_num)
al.res2loc[idx].append(i)
al.length = length
#
# for i,s in enumerate(al.seq_idnt) :
# if i == length :
# break
# if s == ' ' :
# al.loc2idnt.append(False)
# elif s == '*' :
# al.loc2idnt.append(True)
# al.idnt2loc.append(i)
# else :
# raise Error('AlnFile', 'read_all', 'unknown letter in seq_idnt')
for i in range(length) :
ref = al.seq[0][i]
flg = True
for idx in range(len(al.names)) :
if al.seq[idx][i] != ref :
flg = False
break
al.loc2idnt.append(flg)
for i in range(length) :
flg = True
for idx in range(len(al.names)) :
if al.seq[idx][i] == '-' :
flg = False
break
if not flg :
al.gap.append(True)
else :
al.gap.append(False)
return al
def _add_aln(self, al, line):
if line[6:27] != '|PDBID|CHAIN|SEQUENCE':
raise Error('AlnFile', '_add_aln', 'line[6:27] != |PDBID|CHAIN|SEQUENCE')
name = line[0:6]
seqall = line[33:83]
if seqall.find(' ') != -1:
seq = seqall[0: seqall.find(' ')]
else :
seq = seqall[:]
# print name
# print seq
if name in al.names :
idx = al.names.index(name)
else :
al.names.append(name)
idx = len(al.names) - 1
if idx < len(al.seq) :
al.seq[idx].extend(list(seq))
else :
al.seq.append(list(seq))
# def _add_idnt(self, al, line):
# al.seq_idnt.extend(list(line[33:83]))
import sys
if len(sys.argv) != 3:
print ('\n Usage: [input aln file] [output list file]\n')
sys.exit(2)
f_aln = AlnFile(sys.argv[1], 'r')
f_out = open(sys.argv[2], 'w')
al = f_aln.read_all()
for i in range(al.length) :
print(al.seq[0][i], al.seq[1][i], al.loc2res[0][i], al.loc2res[1][i], al.loc2idnt[i], al.gap[i])
print('al.loc2res')
print(al.loc2res[0])
print('al.res2loc')
print(al.res2loc[0])
print('loc2idnt')
print(al.loc2idnt)
print('idnt2loc')
print(al.idnt2loc)
# write header
f_out.write('#')
for name in al.names :
f_out.write('%6s' % name)
f_out.write('\n')
# write data
for i in range(al.length) :
if al.gap[i] :
continue
f_out.write(' ')
for idx in range(len(al.names)) :
f_out.write(' %5i' % al.loc2res[idx][i])
f_out.write('\n')
#### Aln file sample
'''
0 1 2 3 4 5 6 7 8 9
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
3R8O_A|PDBID|CHAIN|SEQUENCE AAUUGAAGAGUUUGAUCAUGGCUCAGAUUGAACGCUGGCGGCAGGCCUAA
3I8H_A|PDBID|CHAIN|SEQUENCE ---UGGAGAGUUUGAUCCUGGCUCAGGGUGAACGCUGGCGGCGUGCCUAA
** *********** ******** ************** ******
3R8O_A|PDBID|CHAIN|SEQUENCE CACAUGCAAGUCGAACGGUAACAGGAAGAAGCUUGCUUCUUUGCUGACGA
3I8H_A|PDBID|CHAIN|SEQUENCE GACAUGCAAGUCGUGCGGG---CCGCGGGGUUUUACUCCGU----GGUCA
************ *** * * ** ** * * * *
'''
| [
"my_error.Error",
"sys.exit"
] | [((3694, 3705), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (3702, 3705), False, 'import sys\n'), ((503, 558), 'my_error.Error', 'Error', (['"""PdbFile"""', '"""open_for_read"""', '"""file is not closed"""'], {}), "('PdbFile', 'open_for_read', 'file is not closed')\n", (508, 558), False, 'from my_error import Error\n'), ((2909, 2976), 'my_error.Error', 'Error', (['"""AlnFile"""', '"""_add_aln"""', '"""line[6:27] != |PDBID|CHAIN|SEQUENCE"""'], {}), "('AlnFile', '_add_aln', 'line[6:27] != |PDBID|CHAIN|SEQUENCE')\n", (2914, 2976), False, 'from my_error import Error\n'), ((1207, 1260), 'my_error.Error', 'Error', (['"""AlnFile"""', '"""read_all"""', '"""al.seq[idx] != length"""'], {}), "('AlnFile', 'read_all', 'al.seq[idx] != length')\n", (1212, 1260), False, 'from my_error import Error\n')] |
import pygame,sys,random
from pygame.math import Vector2
class SNAKE:
def __init__(self):
self.body = [Vector2(5,10),Vector2(4,10),Vector2(3,10)]
self.direction = Vector2(0,0)
self.new_block = False
self.head_up = pygame.image.load('Graphics/head_up.png').convert_alpha()
self.head_down = pygame.image.load('Graphics/head_down.png').convert_alpha()
self.head_right = pygame.image.load('Graphics/head_right.png').convert_alpha()
self.head_left = pygame.image.load('Graphics/head_left.png').convert_alpha()
self.tail_up = pygame.image.load('Graphics/tail_up.png').convert_alpha()
self.tail_down = pygame.image.load('Graphics/tail_down.png').convert_alpha()
self.tail_right = pygame.image.load('Graphics/tail_right.png').convert_alpha()
self.tail_left = pygame.image.load('Graphics/tail_left.png').convert_alpha()
self.body_vertical = pygame.image.load('Graphics/body_vertical.png').convert_alpha()
self.body_horizontal = pygame.image.load('Graphics/body_horizontal.png').convert_alpha()
self.body_tr = pygame.image.load('Graphics/body_tr.png').convert_alpha()
self.body_tl = pygame.image.load('Graphics/body_tl.png').convert_alpha()
self.body_br = pygame.image.load('Graphics/body_br.png').convert_alpha()
self.body_bl = pygame.image.load('Graphics/body_bl.png').convert_alpha()
self.crunch_sound = pygame.mixer.Sound('Sound/crunch.wav')
def draw_snake(self):
self.update_head_graphics()
self.update_tail_graphics()
for index,block in enumerate(self.body):
x_pos = int(block.x * cell_size)
y_pos = int(block.y * cell_size)
block_rect = pygame.Rect(x_pos,y_pos,cell_size,cell_size)
if index == 0:
screen.blit(self.head,block_rect)
elif index == len(self.body) - 1:
screen.blit(self.tail,block_rect)
else:
previous_block = self.body[index + 1] - block
next_block = self.body[index - 1] - block
if previous_block.x == next_block.x:
screen.blit(self.body_vertical,block_rect)
elif previous_block.y == next_block.y:
screen.blit(self.body_horizontal,block_rect)
else:
if previous_block.x == -1 and next_block.y == -1 or previous_block.y == -1 and next_block.x == -1:
screen.blit(self.body_tl,block_rect)
elif previous_block.x == -1 and next_block.y == 1 or previous_block.y == 1 and next_block.x == -1:
screen.blit(self.body_bl,block_rect)
elif previous_block.x == 1 and next_block.y == -1 or previous_block.y == -1 and next_block.x == 1:
screen.blit(self.body_tr,block_rect)
elif previous_block.x == 1 and next_block.y == 1 or previous_block.y == 1 and next_block.x == 1:
screen.blit(self.body_br,block_rect)
def update_head_graphics(self):
head_relation = self.body[1] - self.body[0]
if head_relation == Vector2(1,0): self.head = self.head_left
elif head_relation == Vector2(-1,0): self.head = self.head_right
elif head_relation == Vector2(0,1): self.head = self.head_up
elif head_relation == Vector2(0,-1): self.head = self.head_down
def update_tail_graphics(self):
tail_relation = self.body[-2] - self.body[-1]
if tail_relation == Vector2(1,0): self.tail = self.tail_left
elif tail_relation == Vector2(-1,0): self.tail = self.tail_right
elif tail_relation == Vector2(0,1): self.tail = self.tail_up
elif tail_relation == Vector2(0,-1): self.tail = self.tail_down
def move_snake(self):
if self.new_block == True:
body_copy = self.body[:]
body_copy.insert(0,body_copy[0] + self.direction)
self.body = body_copy[:]
self.new_block = False
else:
body_copy = self.body[:-1]
body_copy.insert(0,body_copy[0] + self.direction)
self.body = body_copy[:]
def add_block(self):
self.new_block = True
def play_crunch_sound(self):
self.crunch_sound.play()
def reset(self):
self.body = [Vector2(5,10),Vector2(4,10),Vector2(3,10)]
self.direction = Vector2(0,0)
class FRUIT:
def __init__(self):
self.randomize()
def draw_fruit(self):
fruit_rect = pygame.Rect(int(self.pos.x * cell_size),int(self.pos.y * cell_size),cell_size,cell_size)
screen.blit(apple,fruit_rect)
#pygame.draw.rect(screen,(126,166,114),fruit_rect)
def randomize(self):
self.x = random.randint(0,cell_number - 1)
self.y = random.randint(0,cell_number - 1)
self.pos = Vector2(self.x,self.y)
class MAIN:
def __init__(self):
self.snake = SNAKE()
self.fruit = FRUIT()
def update(self):
self.snake.move_snake()
self.check_collision()
self.check_fail()
def draw_elements(self):
self.draw_grass()
self.fruit.draw_fruit()
self.snake.draw_snake()
self.draw_score()
def check_collision(self):
if self.fruit.pos == self.snake.body[0]:
self.fruit.randomize()
self.snake.add_block()
self.snake.play_crunch_sound()
for block in self.snake.body[1:]:
if block == self.fruit.pos:
self.fruit.randomize()
def check_fail(self):
if not 0 <= self.snake.body[0].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:
self.game_over()
for block in self.snake.body[1:]:
if block == self.snake.body[0]:
self.game_over()
def game_over(self):
self.snake.reset()
def draw_grass(self):
grass_color = (167,209,61)
for row in range(cell_number):
if row % 2 == 0:
for col in range(cell_number):
if col % 2 == 0:
grass_rect = pygame.Rect(col * cell_size,row * cell_size,cell_size,cell_size)
pygame.draw.rect(screen,grass_color,grass_rect)
else:
for col in range(cell_number):
if col % 2 != 0:
grass_rect = pygame.Rect(col * cell_size,row * cell_size,cell_size,cell_size)
pygame.draw.rect(screen,grass_color,grass_rect)
def draw_score(self):
score_text = str(len(self.snake.body) - 3)
score_surface = game_font.render(score_text,True,(56,74,12))
score_x = int(cell_size * cell_number - 60)
score_y = int(cell_size * cell_number - 40)
score_rect = score_surface.get_rect(center = (score_x,score_y))
apple_rect = apple.get_rect(midright = (score_rect.left,score_rect.centery))
bg_rect = pygame.Rect(apple_rect.left,apple_rect.top,apple_rect.width + score_rect.width + 6,apple_rect.height)
pygame.draw.rect(screen,(167,209,61),bg_rect)
screen.blit(score_surface,score_rect)
screen.blit(apple,apple_rect)
pygame.draw.rect(screen,(56,74,12),bg_rect,2)
pygame.mixer.pre_init(44100,-16,2,512)
pygame.init()
cell_size = 40
cell_number = 20
screen = pygame.display.set_mode((cell_number * cell_size,cell_number * cell_size))
clock = pygame.time.Clock()
apple = pygame.image.load('Graphics/apple.png').convert_alpha()
game_font = pygame.font.Font('Font/PoetsenOne-Regular.ttf', 25)
SCREEN_UPDATE = pygame.USEREVENT
pygame.time.set_timer(SCREEN_UPDATE,150)
main_game = MAIN()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == SCREEN_UPDATE:
main_game.update()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
if main_game.snake.direction.y != 1:
main_game.snake.direction = Vector2(0,-1)
if event.key == pygame.K_RIGHT:
if main_game.snake.direction.x != -1:
main_game.snake.direction = Vector2(1,0)
if event.key == pygame.K_DOWN:
if main_game.snake.direction.y != -1:
main_game.snake.direction = Vector2(0,1)
if event.key == pygame.K_LEFT:
if main_game.snake.direction.x != 1:
main_game.snake.direction = Vector2(-1,0)
screen.fill((175,215,70))
main_game.draw_elements()
pygame.display.update()
clock.tick(60) | [
"sys.exit",
"pygame.init",
"pygame.quit",
"pygame.time.set_timer",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.mixer.pre_init",
"pygame.mixer.Sound",
"pygame.Rect",
"pygame.math.Vector2",
"pygame.draw.rect",
"pygame.time.Clock",
"pygame.image.load",
"pygame.font.Font",
"pygame.display.update",
"random.randint"
] | [((6432, 6473), 'pygame.mixer.pre_init', 'pygame.mixer.pre_init', (['(44100)', '(-16)', '(2)', '(512)'], {}), '(44100, -16, 2, 512)\n', (6453, 6473), False, 'import pygame, sys, random\n'), ((6472, 6485), 'pygame.init', 'pygame.init', ([], {}), '()\n', (6483, 6485), False, 'import pygame, sys, random\n'), ((6530, 6605), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(cell_number * cell_size, cell_number * cell_size)'], {}), '((cell_number * cell_size, cell_number * cell_size))\n', (6553, 6605), False, 'import pygame, sys, random\n'), ((6614, 6633), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (6631, 6633), False, 'import pygame, sys, random\n'), ((6712, 6763), 'pygame.font.Font', 'pygame.font.Font', (['"""Font/PoetsenOne-Regular.ttf"""', '(25)'], {}), "('Font/PoetsenOne-Regular.ttf', 25)\n", (6728, 6763), False, 'import pygame, sys, random\n'), ((6801, 6842), 'pygame.time.set_timer', 'pygame.time.set_timer', (['SCREEN_UPDATE', '(150)'], {}), '(SCREEN_UPDATE, 150)\n', (6822, 6842), False, 'import pygame, sys, random\n'), ((6894, 6912), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (6910, 6912), False, 'import pygame, sys, random\n'), ((7633, 7656), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (7654, 7656), False, 'import pygame, sys, random\n'), ((175, 188), 'pygame.math.Vector2', 'Vector2', (['(0)', '(0)'], {}), '(0, 0)\n', (182, 188), False, 'from pygame.math import Vector2\n'), ((1367, 1405), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""Sound/crunch.wav"""'], {}), "('Sound/crunch.wav')\n", (1385, 1405), False, 'import pygame, sys, random\n'), ((3935, 3948), 'pygame.math.Vector2', 'Vector2', (['(0)', '(0)'], {}), '(0, 0)\n', (3942, 3948), False, 'from pygame.math import Vector2\n'), ((4263, 4297), 'random.randint', 'random.randint', (['(0)', '(cell_number - 1)'], {}), '(0, cell_number - 1)\n', (4277, 4297), False, 'import pygame, sys, random\n'), ((4309, 4343), 'random.randint', 'random.randint', (['(0)', '(cell_number - 1)'], {}), '(0, cell_number - 1)\n', (4323, 4343), False, 'import pygame, sys, random\n'), ((4357, 4380), 'pygame.math.Vector2', 'Vector2', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (4364, 4380), False, 'from pygame.math import Vector2\n'), ((6153, 6262), 'pygame.Rect', 'pygame.Rect', (['apple_rect.left', 'apple_rect.top', '(apple_rect.width + score_rect.width + 6)', 'apple_rect.height'], {}), '(apple_rect.left, apple_rect.top, apple_rect.width + score_rect.\n width + 6, apple_rect.height)\n', (6164, 6262), False, 'import pygame, sys, random\n'), ((6260, 6309), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', '(167, 209, 61)', 'bg_rect'], {}), '(screen, (167, 209, 61), bg_rect)\n', (6276, 6309), False, 'import pygame, sys, random\n'), ((6383, 6433), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', '(56, 74, 12)', 'bg_rect', '(2)'], {}), '(screen, (56, 74, 12), bg_rect, 2)\n', (6399, 6433), False, 'import pygame, sys, random\n'), ((6643, 6682), 'pygame.image.load', 'pygame.image.load', (['"""Graphics/apple.png"""'], {}), "('Graphics/apple.png')\n", (6660, 6682), False, 'import pygame, sys, random\n'), ((112, 126), 'pygame.math.Vector2', 'Vector2', (['(5)', '(10)'], {}), '(5, 10)\n', (119, 126), False, 'from pygame.math import Vector2\n'), ((126, 140), 'pygame.math.Vector2', 'Vector2', (['(4)', '(10)'], {}), '(4, 10)\n', (133, 140), False, 'from pygame.math import Vector2\n'), ((140, 154), 'pygame.math.Vector2', 'Vector2', (['(3)', '(10)'], {}), '(3, 10)\n', (147, 154), False, 'from pygame.math import Vector2\n'), ((1631, 1678), 'pygame.Rect', 'pygame.Rect', (['x_pos', 'y_pos', 'cell_size', 'cell_size'], {}), '(x_pos, y_pos, cell_size, cell_size)\n', (1642, 1678), False, 'import pygame, sys, random\n'), ((2818, 2831), 'pygame.math.Vector2', 'Vector2', (['(1)', '(0)'], {}), '(1, 0)\n', (2825, 2831), False, 'from pygame.math import Vector2\n'), ((3166, 3179), 'pygame.math.Vector2', 'Vector2', (['(1)', '(0)'], {}), '(1, 0)\n', (3173, 3179), False, 'from pygame.math import Vector2\n'), ((3872, 3886), 'pygame.math.Vector2', 'Vector2', (['(5)', '(10)'], {}), '(5, 10)\n', (3879, 3886), False, 'from pygame.math import Vector2\n'), ((3886, 3900), 'pygame.math.Vector2', 'Vector2', (['(4)', '(10)'], {}), '(4, 10)\n', (3893, 3900), False, 'from pygame.math import Vector2\n'), ((3900, 3914), 'pygame.math.Vector2', 'Vector2', (['(3)', '(10)'], {}), '(3, 10)\n', (3907, 3914), False, 'from pygame.math import Vector2\n'), ((6951, 6964), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (6962, 6964), False, 'import pygame, sys, random\n'), ((6969, 6979), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6977, 6979), False, 'import pygame, sys, random\n'), ((234, 275), 'pygame.image.load', 'pygame.image.load', (['"""Graphics/head_up.png"""'], {}), "('Graphics/head_up.png')\n", (251, 275), False, 'import pygame, sys, random\n'), ((312, 355), 'pygame.image.load', 'pygame.image.load', (['"""Graphics/head_down.png"""'], {}), "('Graphics/head_down.png')\n", (329, 355), False, 'import pygame, sys, random\n'), ((393, 437), 'pygame.image.load', 'pygame.image.load', (['"""Graphics/head_right.png"""'], {}), "('Graphics/head_right.png')\n", (410, 437), False, 'import pygame, sys, random\n'), ((474, 517), 'pygame.image.load', 'pygame.image.load', (['"""Graphics/head_left.png"""'], {}), "('Graphics/head_left.png')\n", (491, 517), False, 'import pygame, sys, random\n'), ((556, 597), 'pygame.image.load', 'pygame.image.load', (['"""Graphics/tail_up.png"""'], {}), "('Graphics/tail_up.png')\n", (573, 597), False, 'import pygame, sys, random\n'), ((634, 677), 'pygame.image.load', 'pygame.image.load', (['"""Graphics/tail_down.png"""'], {}), "('Graphics/tail_down.png')\n", (651, 677), False, 'import pygame, sys, random\n'), ((715, 759), 'pygame.image.load', 'pygame.image.load', (['"""Graphics/tail_right.png"""'], {}), "('Graphics/tail_right.png')\n", (732, 759), False, 'import pygame, sys, random\n'), ((796, 839), 'pygame.image.load', 'pygame.image.load', (['"""Graphics/tail_left.png"""'], {}), "('Graphics/tail_left.png')\n", (813, 839), False, 'import pygame, sys, random\n'), ((882, 929), 'pygame.image.load', 'pygame.image.load', (['"""Graphics/body_vertical.png"""'], {}), "('Graphics/body_vertical.png')\n", (899, 929), False, 'import pygame, sys, random\n'), ((972, 1021), 'pygame.image.load', 'pygame.image.load', (['"""Graphics/body_horizontal.png"""'], {}), "('Graphics/body_horizontal.png')\n", (989, 1021), False, 'import pygame, sys, random\n'), ((1058, 1099), 'pygame.image.load', 'pygame.image.load', (['"""Graphics/body_tr.png"""'], {}), "('Graphics/body_tr.png')\n", (1075, 1099), False, 'import pygame, sys, random\n'), ((1134, 1175), 'pygame.image.load', 'pygame.image.load', (['"""Graphics/body_tl.png"""'], {}), "('Graphics/body_tl.png')\n", (1151, 1175), False, 'import pygame, sys, random\n'), ((1210, 1251), 'pygame.image.load', 'pygame.image.load', (['"""Graphics/body_br.png"""'], {}), "('Graphics/body_br.png')\n", (1227, 1251), False, 'import pygame, sys, random\n'), ((1286, 1327), 'pygame.image.load', 'pygame.image.load', (['"""Graphics/body_bl.png"""'], {}), "('Graphics/body_bl.png')\n", (1303, 1327), False, 'import pygame, sys, random\n'), ((2884, 2898), 'pygame.math.Vector2', 'Vector2', (['(-1)', '(0)'], {}), '(-1, 0)\n', (2891, 2898), False, 'from pygame.math import Vector2\n'), ((3232, 3246), 'pygame.math.Vector2', 'Vector2', (['(-1)', '(0)'], {}), '(-1, 0)\n', (3239, 3246), False, 'from pygame.math import Vector2\n'), ((2952, 2965), 'pygame.math.Vector2', 'Vector2', (['(0)', '(1)'], {}), '(0, 1)\n', (2959, 2965), False, 'from pygame.math import Vector2\n'), ((3300, 3313), 'pygame.math.Vector2', 'Vector2', (['(0)', '(1)'], {}), '(0, 1)\n', (3307, 3313), False, 'from pygame.math import Vector2\n'), ((7183, 7197), 'pygame.math.Vector2', 'Vector2', (['(0)', '(-1)'], {}), '(0, -1)\n', (7190, 7197), False, 'from pygame.math import Vector2\n'), ((7310, 7323), 'pygame.math.Vector2', 'Vector2', (['(1)', '(0)'], {}), '(1, 0)\n', (7317, 7323), False, 'from pygame.math import Vector2\n'), ((7435, 7448), 'pygame.math.Vector2', 'Vector2', (['(0)', '(1)'], {}), '(0, 1)\n', (7442, 7448), False, 'from pygame.math import Vector2\n'), ((7559, 7573), 'pygame.math.Vector2', 'Vector2', (['(-1)', '(0)'], {}), '(-1, 0)\n', (7566, 7573), False, 'from pygame.math import Vector2\n'), ((3016, 3030), 'pygame.math.Vector2', 'Vector2', (['(0)', '(-1)'], {}), '(0, -1)\n', (3023, 3030), False, 'from pygame.math import Vector2\n'), ((3364, 3378), 'pygame.math.Vector2', 'Vector2', (['(0)', '(-1)'], {}), '(0, -1)\n', (3371, 3378), False, 'from pygame.math import Vector2\n'), ((5431, 5498), 'pygame.Rect', 'pygame.Rect', (['(col * cell_size)', '(row * cell_size)', 'cell_size', 'cell_size'], {}), '(col * cell_size, row * cell_size, cell_size, cell_size)\n', (5442, 5498), False, 'import pygame, sys, random\n'), ((5503, 5552), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'grass_color', 'grass_rect'], {}), '(screen, grass_color, grass_rect)\n', (5519, 5552), False, 'import pygame, sys, random\n'), ((5640, 5707), 'pygame.Rect', 'pygame.Rect', (['(col * cell_size)', '(row * cell_size)', 'cell_size', 'cell_size'], {}), '(col * cell_size, row * cell_size, cell_size, cell_size)\n', (5651, 5707), False, 'import pygame, sys, random\n'), ((5712, 5761), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'grass_color', 'grass_rect'], {}), '(screen, grass_color, grass_rect)\n', (5728, 5761), False, 'import pygame, sys, random\n')] |
import uuid
from functools import lru_cache
from typing import List
from django.core.exceptions import ValidationError
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from Learning.utils import produce_dataframe, read_file_data, ALLOWED_EXTENSIONS
from .utils import arrayfy_strings, clean_array, validate_dataset
class TrainingModel(models.Model):
class FeatureSelectionAlgorithm(models.TextChoices):
rfe = "rfe", "Recursive Feature Elimination"
pearson = "pearson", "Pearson Correlation"
class TrainingAlgorithm(models.TextChoices):
dt = 'decision_tree', "Decision Tree Classifier"
rf = 'random_forest', "Random Forest"
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
title = models.CharField(max_length=64)
dataset = models.FileField(
upload_to='models', validators=[validate_dataset])
feature_selection_algorithm = models.CharField(
choices=FeatureSelectionAlgorithm.choices, max_length=7, blank=True)
training_algorithm = models.CharField(
choices=TrainingAlgorithm.choices, max_length=13, blank=True)
target_column = models.CharField(max_length=50, blank=True)
feature_columns = models.TextField(blank=True)
trained_model = models.FileField(upload_to='models/trained', blank=True)
created = models.DateTimeField(auto_now_add=True, editable=False)
last_updated = models.DateTimeField(auto_now=True, editable=False)
def __str__(self):
return self.title
class Meta:
ordering = ['-last_updated']
@lru_cache
def get_dataframe_from_dataset(self, dataset_path, columns: List[str] = None):
file_data = read_file_data(dataset_path.path)
dataframe = produce_dataframe(file_data, columns)
return dataframe
def clean(self):
extension = self.dataset.name.split('.')[-1]
if extension not in ALLOWED_EXTENSIONS:
raise ValidationError(
_(f'Expected file with extension: {ALLOWED_EXTENSIONS}, found file type of {extension}'))
try:
value_array = arrayfy_strings(self.feature_columns)
except:
raise ValidationError(
_('Columns are not valid as an array. Ensure input is a string of comma-separated values'))
dataframe = self.get_dataframe_from_dataset(self.dataset)
if self.target_column and self.target_column not in dataframe.columns:
raise ValidationError(
_(f"Target column '{self.target_column}' is not in dataset"))
columns = clean_array(value_array)
for column in columns:
if column and column not in dataframe.columns:
raise ValidationError(
_(f"{column} is not a column in dataset"))
def save(self, *args, **kwargs):
value_array = arrayfy_strings(self.feature_columns)
if self.target_column in value_array:
value_array.remove(self.target_column)
self.feature_columns = ', '.join(value_array)
return super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse("prediction:training-detail", kwargs={"uuid": self.uuid})
| [
"django.db.models.TextField",
"Learning.utils.read_file_data",
"django.urls.reverse",
"django.utils.translation.gettext_lazy",
"django.db.models.FileField",
"Learning.utils.produce_dataframe",
"django.db.models.CharField",
"django.db.models.DateTimeField",
"django.db.models.UUIDField"
] | [((747, 799), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)'}), '(default=uuid.uuid4, editable=False)\n', (763, 799), False, 'from django.db import models\n'), ((812, 843), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (828, 843), False, 'from django.db import models\n'), ((858, 925), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""models"""', 'validators': '[validate_dataset]'}), "(upload_to='models', validators=[validate_dataset])\n", (874, 925), False, 'from django.db import models\n'), ((969, 1058), 'django.db.models.CharField', 'models.CharField', ([], {'choices': 'FeatureSelectionAlgorithm.choices', 'max_length': '(7)', 'blank': '(True)'}), '(choices=FeatureSelectionAlgorithm.choices, max_length=7,\n blank=True)\n', (985, 1058), False, 'from django.db import models\n'), ((1089, 1167), 'django.db.models.CharField', 'models.CharField', ([], {'choices': 'TrainingAlgorithm.choices', 'max_length': '(13)', 'blank': '(True)'}), '(choices=TrainingAlgorithm.choices, max_length=13, blank=True)\n', (1105, 1167), False, 'from django.db import models\n'), ((1197, 1240), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'blank': '(True)'}), '(max_length=50, blank=True)\n', (1213, 1240), False, 'from django.db import models\n'), ((1263, 1291), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (1279, 1291), False, 'from django.db import models\n'), ((1312, 1368), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""models/trained"""', 'blank': '(True)'}), "(upload_to='models/trained', blank=True)\n", (1328, 1368), False, 'from django.db import models\n'), ((1383, 1438), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'editable': '(False)'}), '(auto_now_add=True, editable=False)\n', (1403, 1438), False, 'from django.db import models\n'), ((1458, 1509), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'editable': '(False)'}), '(auto_now=True, editable=False)\n', (1478, 1509), False, 'from django.db import models\n'), ((1733, 1766), 'Learning.utils.read_file_data', 'read_file_data', (['dataset_path.path'], {}), '(dataset_path.path)\n', (1747, 1766), False, 'from Learning.utils import produce_dataframe, read_file_data, ALLOWED_EXTENSIONS\n'), ((1787, 1824), 'Learning.utils.produce_dataframe', 'produce_dataframe', (['file_data', 'columns'], {}), '(file_data, columns)\n', (1804, 1824), False, 'from Learning.utils import produce_dataframe, read_file_data, ALLOWED_EXTENSIONS\n'), ((3192, 3257), 'django.urls.reverse', 'reverse', (['"""prediction:training-detail"""'], {'kwargs': "{'uuid': self.uuid}"}), "('prediction:training-detail', kwargs={'uuid': self.uuid})\n", (3199, 3257), False, 'from django.urls import reverse\n'), ((2025, 2118), 'django.utils.translation.gettext_lazy', '_', (['f"""Expected file with extension: {ALLOWED_EXTENSIONS}, found file type of {extension}"""'], {}), "(f'Expected file with extension: {ALLOWED_EXTENSIONS}, found file type of {extension}'\n )\n", (2026, 2118), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2549, 2609), 'django.utils.translation.gettext_lazy', '_', (['f"""Target column \'{self.target_column}\' is not in dataset"""'], {}), '(f"Target column \'{self.target_column}\' is not in dataset")\n', (2550, 2609), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2260, 2355), 'django.utils.translation.gettext_lazy', '_', (['"""Columns are not valid as an array. Ensure input is a string of comma-separated values"""'], {}), "('Columns are not valid as an array. Ensure input is a string of comma-separated values'\n )\n", (2261, 2355), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2805, 2846), 'django.utils.translation.gettext_lazy', '_', (['f"""{column} is not a column in dataset"""'], {}), "(f'{column} is not a column in dataset')\n", (2806, 2846), True, 'from django.utils.translation import gettext_lazy as _\n')] |
import re
from collections import defaultdict
from typing import List
from uuid import uuid4
import pytest
from protean import BaseAggregate, BaseRepository, BaseValueObject, UnitOfWork
from protean.exceptions import ExpectedVersionError
from protean.fields import Integer, String, ValueObject
from protean.globals import current_domain
class Person(BaseAggregate):
first_name = String(max_length=50, required=True)
last_name = String(max_length=50, required=True)
age = Integer(default=21)
class PersonRepository(BaseRepository):
def find_adults(self, minimum_age: int = 21) -> List[Person]:
return current_domain.repository_for(Person)._dao.filter(age__gte=minimum_age)
class Meta:
aggregate_cls = Person
class Email(BaseValueObject):
REGEXP = r"\"?([-a-zA-Z0-9.`?{}]+@\w+\.\w+)\"?"
# This is the external facing data attribute
address = String(max_length=254, required=True)
def clean(self):
"""Business rules of Email address"""
errors = defaultdict(list)
if not bool(re.match(Email.REGEXP, self.address)):
errors["address"].append("is invalid")
return errors
class User(BaseAggregate):
email = ValueObject(Email, required=True)
password = String(required=True, max_length=255)
@pytest.fixture(autouse=True)
def register_elements(test_domain):
test_domain.register(Person)
test_domain.register(PersonRepository)
test_domain.register(User)
@pytest.mark.database
class TestPersistenceViaRepository:
def test_that_aggregate_can_be_persisted_with_repository(self, test_domain):
test_domain.repository_for(Person).add(
Person(first_name="John", last_name="Doe")
)
assert len(test_domain.repository_for(Person)._dao.query.all().items) == 1
def test_that_an_aggregate_can_be_retrieved_with_repository(self, test_domain):
person = Person(first_name="John", last_name="Doe")
test_domain.repository_for(Person).add(person)
assert test_domain.repository_for(Person).get(person.id) == person
def test_that_all_aggregates_can_be_retrieved_with_repository(self, test_domain):
person = Person(first_name="John", last_name="Doe")
test_domain.repository_for(Person).add(person)
assert test_domain.repository_for(Person).all() == [person]
@pytest.mark.database
class TestConcurrency:
def test_expected_version_error_on_version_mismatch(self, test_domain):
identifier = str(uuid4())
with UnitOfWork():
repo = test_domain.repository_for(Person)
person = Person(id=identifier, first_name="John", last_name="Doe")
repo.add(person)
person_dup1 = repo.get(identifier)
person_dup2 = repo.get(identifier)
with UnitOfWork():
person_dup1.first_name = "Jane"
repo.add(person_dup1)
with pytest.raises(ExpectedVersionError) as exc:
with UnitOfWork():
person_dup2.first_name = "Baby"
repo.add(person_dup2)
assert exc.value.args[0] == (
f"Wrong expected version: {person_dup2._version} "
f"(Aggregate: Person({identifier}), Version: {person_dup2._version+1})"
)
| [
"protean.fields.ValueObject",
"protean.fields.String",
"protean.fields.Integer",
"protean.globals.current_domain.repository_for",
"re.match",
"uuid.uuid4",
"collections.defaultdict",
"pytest.raises",
"protean.UnitOfWork",
"pytest.fixture"
] | [((1305, 1333), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (1319, 1333), False, 'import pytest\n'), ((388, 424), 'protean.fields.String', 'String', ([], {'max_length': '(50)', 'required': '(True)'}), '(max_length=50, required=True)\n', (394, 424), False, 'from protean.fields import Integer, String, ValueObject\n'), ((441, 477), 'protean.fields.String', 'String', ([], {'max_length': '(50)', 'required': '(True)'}), '(max_length=50, required=True)\n', (447, 477), False, 'from protean.fields import Integer, String, ValueObject\n'), ((488, 507), 'protean.fields.Integer', 'Integer', ([], {'default': '(21)'}), '(default=21)\n', (495, 507), False, 'from protean.fields import Integer, String, ValueObject\n'), ((899, 936), 'protean.fields.String', 'String', ([], {'max_length': '(254)', 'required': '(True)'}), '(max_length=254, required=True)\n', (905, 936), False, 'from protean.fields import Integer, String, ValueObject\n'), ((1215, 1248), 'protean.fields.ValueObject', 'ValueObject', (['Email'], {'required': '(True)'}), '(Email, required=True)\n', (1226, 1248), False, 'from protean.fields import Integer, String, ValueObject\n'), ((1264, 1301), 'protean.fields.String', 'String', ([], {'required': '(True)', 'max_length': '(255)'}), '(required=True, max_length=255)\n', (1270, 1301), False, 'from protean.fields import Integer, String, ValueObject\n'), ((1022, 1039), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1033, 1039), False, 'from collections import defaultdict\n'), ((2510, 2517), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2515, 2517), False, 'from uuid import uuid4\n'), ((2533, 2545), 'protean.UnitOfWork', 'UnitOfWork', ([], {}), '()\n', (2543, 2545), False, 'from protean import BaseAggregate, BaseRepository, BaseValueObject, UnitOfWork\n'), ((2810, 2822), 'protean.UnitOfWork', 'UnitOfWork', ([], {}), '()\n', (2820, 2822), False, 'from protean import BaseAggregate, BaseRepository, BaseValueObject, UnitOfWork\n'), ((2916, 2951), 'pytest.raises', 'pytest.raises', (['ExpectedVersionError'], {}), '(ExpectedVersionError)\n', (2929, 2951), False, 'import pytest\n'), ((1061, 1097), 're.match', 're.match', (['Email.REGEXP', 'self.address'], {}), '(Email.REGEXP, self.address)\n', (1069, 1097), False, 'import re\n'), ((2977, 2989), 'protean.UnitOfWork', 'UnitOfWork', ([], {}), '()\n', (2987, 2989), False, 'from protean import BaseAggregate, BaseRepository, BaseValueObject, UnitOfWork\n'), ((631, 668), 'protean.globals.current_domain.repository_for', 'current_domain.repository_for', (['Person'], {}), '(Person)\n', (660, 668), False, 'from protean.globals import current_domain\n')] |
from django import forms
from .models import Post, Comment
from users.models import Profile
from django.contrib.auth.models import User
class CommentModelForm(forms.ModelForm):
comment = forms.CharField(label="", widget=forms.Textarea(attrs={'class': 'form-control', 'placeholder': 'Comment here!', 'rows': '3', 'cols': '50'}))
class Meta:
model = Comment
fields = ['comment'] | [
"django.forms.Textarea"
] | [((224, 334), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Comment here!', 'rows': '3',\n 'cols': '50'}"}), "(attrs={'class': 'form-control', 'placeholder':\n 'Comment here!', 'rows': '3', 'cols': '50'})\n", (238, 334), False, 'from django import forms\n')] |
#!/usr/bin/env python
import os
import sys
import django
if __name__ == "__main__":
# decide which settings to use
base_dir = os.path.dirname(__file__)
local_settings_module = ["slcpy","settings","local_settings"]
filepath = os.path.join(base_dir,"apps","/".join(local_settings_module)+".py")
if os.path.isfile(filepath):
django_settings_module = ".".join(local_settings_module)
else:
django_settings_module = "slcpy.settings.local"
# set the local settins
os.environ.setdefault("DJANGO_SETTINGS_MODULE",django_settings_module)
# execute from command line
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"os.path.isfile",
"os.path.dirname",
"os.environ.setdefault",
"django.core.management.execute_from_command_line"
] | [((136, 161), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (151, 161), False, 'import os\n'), ((323, 347), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (337, 347), False, 'import os\n'), ((517, 588), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', 'django_settings_module'], {}), "('DJANGO_SETTINGS_MODULE', django_settings_module)\n", (538, 588), False, 'import os\n'), ((694, 729), 'django.core.management.execute_from_command_line', 'execute_from_command_line', (['sys.argv'], {}), '(sys.argv)\n', (719, 729), False, 'from django.core.management import execute_from_command_line\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 4 13:54:30 2018
@author: uqytu1
"""
from osgeo import osr, ogr
import json
from . import prompt_widget
def CreateGeojsonFC(AOI):
# Open geometry
shp = ogr.Open(AOI)
lyr = shp.GetLayer()
LyrDefn = lyr.GetLayerDefn()
# Transform coordinate to WGS84 geographic crs
sourceSR = lyr.GetSpatialRef()
targetSR = osr.SpatialReference()
# Set the axis order as the traditional x y z way (for gdal above 3)
try:
sourceSR.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
targetSR.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
except AttributeError:
pass
targetSR.ImportFromEPSG(4326)
coordTrans = osr.CoordinateTransformation(sourceSR,targetSR)
# Create feature list for storing GeoJson feature collection
my_features = list()
# Loop through features
for feat in lyr:
geom = feat.GetGeometryRef()
try:
geom.Transform(coordTrans)
except TypeError:
prompt_widget.InfoBox('No projection found',
'If the CRS is not EPSG::4326, search result may not be desired.')
pass
# Flat geometry to 2D and assign it to the transformed feature
geom.FlattenTo2D()
transformed_feat = ogr.Feature(LyrDefn)
transformed_feat.SetGeometry(geom)
# Create a feature JSON object and strip 'properties' field as we don't need it
feature = transformed_feat.ExportToJson()
feature_json = json.loads(feature)
feature_json.pop('properties')
# Add the geometry into feature collections
my_features.append(feature_json)
# Close shapefile
lyr = None
shp = None
# Create a Feature Collection geojson
FeatureCollections = {'type': 'FeatureCollection', 'features': my_features}
return FeatureCollections
def LoadGeoJSON(AOI):
# This function can check CRS information in a GeoJSON file
with open(AOI) as json_data:
#AOI_geojson = geojson.load(json_data)
AOI_geojson = json.load(json_data)
# Show warning if no CRS is detected
if 'crs' not in AOI_geojson.keys():
prompt_widget.InfoBox('No projection found',
'If the CRS is not EPSG::4326, search result may not be desired.')
# Transform coordinate to WGS84 geographic crs and convert it back to geojson
FeatureCollections = CreateGeojsonFC(json.dumps(AOI_geojson))
return FeatureCollections
def CalculateCoverPercentage(AOI_geom, ImageFrame_geom):
# Create a list of CoverPercentage in case of multipolygon
CoverPercentage = list()
AOI_count = AOI_geom.GetGeometryCount()
# Loop through geometry for every shapes in case it's multipolygon
for i in range(AOI_count):
AOI_shp = AOI_geom.GetGeometryRef(i)
# If the AOI is not a valid polygon. Skip the calculation
if not AOI_shp.IsValid() or not ImageFrame_geom.IsValid():
CoverPercentage.append(None)
continue
# Get the areas
AOI_Area = AOI_shp.GetArea()
ImageFrame_Area = ImageFrame_geom.GetArea()
# Use AOI as reference to calculate the coverage if the image frame is larger than AOI
# Otherwise, the coverage is calculated the other way around
if ImageFrame_Area >= AOI_Area:
Reference_Area = AOI_Area
else:
Reference_Area = ImageFrame_Area
# Calculate the intersection percentage
intersection = AOI_shp.Intersection(ImageFrame_geom)
Cover_Area = intersection.GetArea()
CoverPercentage.append(int((Cover_Area/Reference_Area)*100))
return CoverPercentage
def MeetCoverageRange(AOI_geojson, ImageFrame_geojson, _min, _max):
# Load data as ogr vectors. The GeoJSON inputs must be string
AOI = ogr.Open(AOI_geojson)
AOI_lyr = AOI.GetLayer()
ImageFrame = ogr.Open(ImageFrame_geojson)
ImageFrame_lyr = ImageFrame.GetLayer()
# Get the geometry from Image Frame
ImageFrame_feat = ImageFrame_lyr.GetNextFeature()
ImageFrame_geom = ImageFrame_feat.GetGeometryRef()
# Get the feature from AOI. Only one feature per input.
AOI_feat = AOI_lyr.GetNextFeature()
AOI_geom = AOI_feat.GetGeometryRef()
# Get the list of coverage percentage. Null means to skip that AOI
Coverage = CalculateCoverPercentage(AOI_geom, ImageFrame_geom)
CoverageResults = list()
for Result in Coverage:
# If meet the Area Coverage condition or the AOI is invalid polygon, make the coverage valid
if Result is None:
CoverageResults.append(True)
elif (Result >= _min and Result <= _max):
CoverageResults.append(True)
# Otherwise, invalid coverage percentage
else:
CoverageResults.append(False)
return CoverageResults
| [
"json.loads",
"osgeo.osr.SpatialReference",
"json.dumps",
"osgeo.ogr.Open",
"json.load",
"osgeo.osr.CoordinateTransformation",
"osgeo.ogr.Feature"
] | [((209, 222), 'osgeo.ogr.Open', 'ogr.Open', (['AOI'], {}), '(AOI)\n', (217, 222), False, 'from osgeo import osr, ogr\n'), ((383, 405), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (403, 405), False, 'from osgeo import osr, ogr\n'), ((729, 777), 'osgeo.osr.CoordinateTransformation', 'osr.CoordinateTransformation', (['sourceSR', 'targetSR'], {}), '(sourceSR, targetSR)\n', (757, 777), False, 'from osgeo import osr, ogr\n'), ((3918, 3939), 'osgeo.ogr.Open', 'ogr.Open', (['AOI_geojson'], {}), '(AOI_geojson)\n', (3926, 3939), False, 'from osgeo import osr, ogr\n'), ((3987, 4015), 'osgeo.ogr.Open', 'ogr.Open', (['ImageFrame_geojson'], {}), '(ImageFrame_geojson)\n', (3995, 4015), False, 'from osgeo import osr, ogr\n'), ((1343, 1363), 'osgeo.ogr.Feature', 'ogr.Feature', (['LyrDefn'], {}), '(LyrDefn)\n', (1354, 1363), False, 'from osgeo import osr, ogr\n'), ((1569, 1588), 'json.loads', 'json.loads', (['feature'], {}), '(feature)\n', (1579, 1588), False, 'import json\n'), ((2130, 2150), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (2139, 2150), False, 'import json\n'), ((2508, 2531), 'json.dumps', 'json.dumps', (['AOI_geojson'], {}), '(AOI_geojson)\n', (2518, 2531), False, 'import json\n')] |
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
# Berlin, 14195 Berlin, Germany.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on 22.01.2015
@author: marscher
'''
from __future__ import absolute_import
import numpy as np
from pyemma.coordinates.transform.transformer import Transformer
class WriterCSV(Transformer):
'''
shall write to csv files
'''
def __init__(self, filename):
'''
Constructor
'''
super(WriterCSV, self).__init__()
# filename should be obtained from source trajectory filename,
# eg suffix it to given filename
self.filename = filename
self._last_frame = False
self._reset()
def describe(self):
return "[Writer filename='%s']" % self.filename
def dimension(self):
return self.data_producer.dimension()
def _map_array(self, X):
pass
def _reset(self, stride=1):
try:
self._fh.close()
self._logger.debug('closed file')
except EnvironmentError:
self._logger.exception('during close')
except AttributeError:
# no file handle exists yet
pass
try:
self._fh = open(self.filename, 'wb')
except EnvironmentError:
self._logger.exception('could not open file "%s" for writing.')
raise
def _param_add_data(self, X, itraj, t, first_chunk, last_chunk_in_traj, last_chunk, ipass, Y=None, stride=1):
np.savetxt(self._fh, X)
if last_chunk:
self._logger.debug("closing file")
self._fh.close()
return True # finished
| [
"numpy.savetxt"
] | [((2791, 2814), 'numpy.savetxt', 'np.savetxt', (['self._fh', 'X'], {}), '(self._fh, X)\n', (2801, 2814), True, 'import numpy as np\n')] |
#This Script normalizes the grounds data as Colombo(NBC) & Colombo(SBS) into Single venue like Colombo etc.
import pandas as pd
import re
df = pd.read_csv('dataset.csv')
ourRegex = r"(?P<Ground_id>[A-Za-z '.0-9]+(?<! ))+(.*)"
matched = r"\g<Ground_id>"
df['Ground'].replace(to_replace=ourRegex,value=matched,regex=True,inplace=True)
df.to_csv('dataset_final.csv')
| [
"pandas.read_csv"
] | [((145, 171), 'pandas.read_csv', 'pd.read_csv', (['"""dataset.csv"""'], {}), "('dataset.csv')\n", (156, 171), True, 'import pandas as pd\n')] |
# Entrance of program
from PCANet import *
from data_loader import *
import sys
from sklearn.metrics import accuracy_score
from sklearn import svm
train_images, train_labels, test_images, test_labels = load_mnist('data/MNIST')
test_train = (train_images[:10, :, :], train_labels[:10])
net = PCANet(k1=7, k2=7, L1=8, L2=8, block_size=7, overlapping_radio=0)
test_predict = test_images[:10, :, :]
prediction = net.predict(test_predict)
print(accuracy_score(test_labels[:10], prediction))
#
# cifar_train, cifar_train_labels, cifar_test, cifat_test_label = load_CIFAR10('data/cifar-10-batches-py')
# test_train = (cifar_train[:10, :, :, :], cifar_train_labels[:10])
# print(test_train[0].shape, test_train[1].shape)
#
# net = PCANet(k1=5, k2=5, L1=40, L2=8, block_size=8, overlapping_radio=0.5, spp_parm=(4, 2, 1), dim_reduction=1280)
#
# net.classifier = svm.LinearSVC(C=10)
#
# net.fit(*test_train)
# test_predict = cifar_test[:10, :, :]
# prediction = net.predict(test_predict)
# print('acc:', accuracy_score(cifat_test_label[:10], prediction))
| [
"sklearn.metrics.accuracy_score"
] | [((444, 488), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['test_labels[:10]', 'prediction'], {}), '(test_labels[:10], prediction)\n', (458, 488), False, 'from sklearn.metrics import accuracy_score\n')] |
"""
| Created: 2017-08-13
| Updated: 2017-08-13
"""
from db import db
class ItemModel(db.Model):
"""Item model."""
__tablename__ = 'items'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
price = db.Column(db.Float(precision=2))
store_id = db.Column(db.Integer, db.ForeignKey('stores.id'))
store = db.relationship('StoreModel')
def __init__(self, name, price, store_id):
self.name = name
self.price = price
self.store_id = store_id
def json(self):
"""
Converts this item to JSON.
:return: this item.
:rtype: JSON.
"""
return {'name': self.name, 'price': self.price}
@classmethod
def find_by_name(cls, name):
"""
Selects an item from the DB and returns it.
:param name: the name of the item.
:type name: str
:return: an item.
:rtype: ItemModel.
"""
return cls.query.filter_by(name=name).first()
def save_to_db(self):
"""
Inserts this item in the DB.
"""
db.session.add(self)
db.session.commit()
def delete_from_db(self):
"""
Deletes this item from the DB.
"""
db.session.delete(self)
db.session.commit()
| [
"db.db.session.delete",
"db.db.session.commit",
"db.db.relationship",
"db.db.ForeignKey",
"db.db.Column",
"db.db.String",
"db.db.Float",
"db.db.session.add"
] | [((160, 199), 'db.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (169, 199), False, 'from db import db\n'), ((359, 388), 'db.db.relationship', 'db.relationship', (['"""StoreModel"""'], {}), "('StoreModel')\n", (374, 388), False, 'from db import db\n'), ((221, 234), 'db.db.String', 'db.String', (['(80)'], {}), '(80)\n', (230, 234), False, 'from db import db\n'), ((258, 279), 'db.db.Float', 'db.Float', ([], {'precision': '(2)'}), '(precision=2)\n', (266, 279), False, 'from db import db\n'), ((319, 345), 'db.db.ForeignKey', 'db.ForeignKey', (['"""stores.id"""'], {}), "('stores.id')\n", (332, 345), False, 'from db import db\n'), ((1108, 1128), 'db.db.session.add', 'db.session.add', (['self'], {}), '(self)\n', (1122, 1128), False, 'from db import db\n'), ((1137, 1156), 'db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1154, 1156), False, 'from db import db\n'), ((1259, 1282), 'db.db.session.delete', 'db.session.delete', (['self'], {}), '(self)\n', (1276, 1282), False, 'from db import db\n'), ((1291, 1310), 'db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1308, 1310), False, 'from db import db\n')] |
# Generated by Django 3.0.7 on 2020-07-05 15:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('socialaccount', '0003_extra_data_default_dict'),
]
operations = [
migrations.AlterUniqueTogether(
name='socialtoken',
unique_together=set(),
),
migrations.RemoveField(
model_name='socialtoken',
name='app',
),
]
| [
"django.db.migrations.RemoveField"
] | [((356, 416), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""socialtoken"""', 'name': '"""app"""'}), "(model_name='socialtoken', name='app')\n", (378, 416), False, 'from django.db import migrations\n')] |
#!/usr/bin/env python
"""
This code provides a method to perform sensitivity analysis of the Metadynamics parameters 'w' and 'sigma'.
"""
import run_ebmetad.pair_data as pd
import argparse
import sys
import json
sys.path.append('/home/jennifer/Git/sample_restraint/build/src/pythonmodule')
def force_table(weights=[0.1], sigmas=[0.2]):
force_table = {}
multi_pair = pd.MultiPair()
multi_pair.read_from_json(args.f)
# We'll make a whole bunch of these tables for different values of w and sigma
for w in weights:
for s in sigmas:
key = 'w{}_s{}'.format(w, s)
force_table[key] = {}
for name in multi_pair.get_names():
idx = multi_pair.name_to_id(name)
ft = multi_pair[idx].build_force_table(w=w, sigma=s)
force_table[key][name] = ft
return force_table
if __name__ == '__main__':
parser = argparse.ArgumentParser(
"Builds a force table for specified w and sigma")
parser.add_argument(
'-f',
help=
'path to json of pair data; should include the smoothed DEER distribution and a '
'list of associated distance bins. See pair_data.json in the tests/ directory'
)
parser.add_argument(
'-w',
nargs='+',
help="weight, or height, of Gaussians (as in standard metadynmaics).",
type=float)
parser.add_argument(
'-s', nargs='+', help="sigma. Width of Gaussians", type=float)
parser.add_argument(
'-o',
help=
"path to where the force table will be stored. For now, stored as json."
)
args = parser.parse_args()
ft = force_table(weights=args.w, sigmas=args.s)
json.dump(ft, open(args.o, 'w'), indent=2)
| [
"sys.path.append",
"run_ebmetad.pair_data.MultiPair",
"argparse.ArgumentParser"
] | [((215, 292), 'sys.path.append', 'sys.path.append', (['"""/home/jennifer/Git/sample_restraint/build/src/pythonmodule"""'], {}), "('/home/jennifer/Git/sample_restraint/build/src/pythonmodule')\n", (230, 292), False, 'import sys\n'), ((381, 395), 'run_ebmetad.pair_data.MultiPair', 'pd.MultiPair', ([], {}), '()\n', (393, 395), True, 'import run_ebmetad.pair_data as pd\n'), ((917, 990), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Builds a force table for specified w and sigma"""'], {}), "('Builds a force table for specified w and sigma')\n", (940, 990), False, 'import argparse\n')] |
from db import loadNews
if __name__ == '__main__':
print(loadNews(2, 0)) | [
"db.loadNews"
] | [((63, 77), 'db.loadNews', 'loadNews', (['(2)', '(0)'], {}), '(2, 0)\n', (71, 77), False, 'from db import loadNews\n')] |
import sys
from ply import *
TOP = 'True'
BOTTOM = 'False'
# Define classes for required operators - atomic, modalities and operators
class Atomic(object):
def __init__(self, atom):
self.atom = atom
def __str__(self):
return str(self.atom)
def __eq__(self, other):
if str(self.atom) == str(other):
return True
else:
return False
def __hash__(self):
return hash(str(self.atom))
__repr__ = __str__
class Modality(object):
def __init__(self, mode, m_id):
self.mode = mode
self.id = m_id
def __repr__(self):
return str(self.mode)
def __str__(self):
if self.mode == "box":
return "[" + self.id + "]"
elif self.mode == "dia":
return "<" + self.id + ">"
def __eq__(self, other):
if isinstance(self, Modality) and isinstance(other, Modality):
if str(self.mode) == str(other.mode) and self.id == other.id:
return True
else:
return False
def __hash__(self):
return hash(self.mode) + hash(self.id)
class Operator(object):
def __init__(self, mode):
self.mode = mode
def __str__(self):
return str(self.mode)
__repr__ = __str__
# Get the token mapping from lexer.
from lexer import tokens
# Define a BNF grammar for modal logic.
'''
formula0 : 'false'
| 'true'
| term
| '~' formula1
| '(' formula1 ')'
| '[id]' formula1
| '<id>' formula1
| formula1 '=>' formula1 # right associative
| formula1 '|' formula1 # left associative
| formula1 '&' formula1 # left associative
term0 : ATOM
'''
# Define token preference, from lowest to highest.
precedence = (
('right', 'IMP', 'IFF'),
('left', 'OR'),
('left', 'AND'),
('right', 'BOX', 'DIA', 'NOT'),
)
def p_formula_braced(p):
"""
formula : LPAREN formula RPAREN
"""
p[0] = p[2]
def p_formula_modal(p):
"""
formula : BOX formula
| DIA formula
"""
if p[1][:1] == '[':
syn = 'box'
elif p[1][:1] == '<':
syn = 'dia'
else:
pass
m_id = p[1][1:-1]
p[0] = [Modality(syn, m_id), p[2]]
def p_formula_not(p):
"""
formula : NOT formula
"""
p[0] = [Operator(p[1]), p[2]]
def p_formula_binary(p):
"""
formula : formula IMP formula
| formula IFF formula
| formula AND formula
| formula OR formula
"""
p[0] = [Operator(p[2]), p[1], p[3]]
def p_formula_atomic(p):
"""
formula : false
| true
| term
"""
global TOP, BOTTOM
if str(p[1]).lower() == 'true':
p[0] = Atomic(TOP)
elif str(p[1]).lower() == 'false':
p[0] = Atomic(BOTTOM)
else:
p[0] = p[1]
def p_term_atom(p):
"""
term : ATOM
"""
p[0] = Atomic(p[1])
# Error rule for syntax errors
def p_error(p):
sys.stderr.write('Syntax error in input. \n')
raise SystemExit(1)
bparser = yacc.yacc()
def parse(data, debug=0):
bparser.error = 0
p = bparser.parse(data, debug=debug)
if bparser.error:
return None
return p
| [
"sys.stderr.write"
] | [((3003, 3048), 'sys.stderr.write', 'sys.stderr.write', (['"""Syntax error in input. \n"""'], {}), "('Syntax error in input. \\n')\n", (3019, 3048), False, 'import sys\n')] |
import z3
from pdb import set_trace
import string
import toolz
import enum
import logging
import itertools
from typing import Iterable, Tuple, Set
from . import parsing
from .parsing import (PToken, EMPTY, CHAR, DOT, STAR, BAR, CONCAT, GROUP, BACKREF, CARET, DOLLAR)
from .preprocessing import (convert_stars, convert_bars, flatten_regex, remove_path, convert_to_encoded_symbols)
logger = logging.getLogger(__name__)
# set of nonzero lengths with which to approximate star
# the zero length is included automatically
DEFAULT_STAR_LENGTHS = [4]
# space is not valid URL char.
# pound (#) is invalid in domain. It gets replaced with /# in Chrome in the URL bar.
# question (?) is also invalid in domain, and gets replaced with /? in Chrome URL bar.
DEFAULT_DOT_CHARSET = 'abcdefghijklmnop012345' + "/"
class RegexStringExpr:
scratch_var_cnt = 0
ignore_wildcards = z3.Bool('ignore_wildcards')
def _gen_string_var(self):
x = z3.String('_x_{}'.format(self.string_var_count))
self.string_var_count += 1
return x
def _gen_bool_var(self):
b = z3.Bool('_b_{}'.format(self.bool_var_count))
self.bool_var_count += 1
return b
def __init__(self, regex: str, unknown: z3.StringSort(),
word_choice_cutoff=10,
dot_charset=DEFAULT_DOT_CHARSET,
star_lengths: Iterable[int] = DEFAULT_STAR_LENGTHS,
symbolic=False):
"""
Compiles Regex to Z3 String expressions
:param dot_charset: Characters that the DOT metachar can match. This should be limited to
valid URL characters, or can be set to a taint marker.
"""
self.unknown = unknown
self.star_lengths = star_lengths
self.string_var_count = 0
self.bool_var_count = 0
self.symbolic = symbolic
_parser = parsing.RegexParser()
parse_result = _parser.parse(regex)
self.parsing_errors = parse_result['errors']
regex_0 = flatten_regex(parse_result['root'])
regex_1 = remove_path(regex_0)
regex_2 = convert_stars(regex_1, star_lengths)
regex_3 = convert_bars(regex_2, cutoff=word_choice_cutoff)
if symbolic:
regex_4, self.symbols = convert_to_encoded_symbols(regex_3, {})
else:
regex_4, self.symbols = regex_3, {}
self.regex = regex_4
assert self.regex
self.groups = parse_result['groups']
self.backrefs = parse_result['backrefs']
self.dot_charset = dot_charset
def _sat_expr(self, regex: Tuple) -> Tuple[z3.SeqRef, z3.BoolRef, z3.BoolRef, z3.BoolRef]:
"""
:returns: string that matches regex, constraint on string,
whether string contains caret, whether string contains dollar
Whether there is a caret or dollar needs to be tracked because they imply constraints on
neighboring strings to the one returned.
"""
ty = regex[0]
if ty == EMPTY:
return (z3.StringVal(''), z3.BoolVal(True), z3.BoolVal(False), z3.BoolVal(False))
elif ty == CHAR:
return (z3.StringVal(regex[1]), z3.BoolVal(True), z3.BoolVal(False), z3.BoolVal(False))
elif ty == DOT:
x = self._gen_string_var()
constraint = z3.And(z3.Implies(self.ignore_wildcards, x == z3.StringVal('')),
z3.Implies(z3.Not(self.ignore_wildcards),
z3.Or(*(x == z3.StringVal(y) for y in self.dot_charset))))
return (x, constraint, z3.BoolVal(False), z3.BoolVal(False))
elif ty == STAR:
# STAR should have been approximated with something else during preprocessing.
raise NotImplementedError
elif ty == BAR:
ys, constraints_list, carets_list, dollars_list = zip(*map(self._sat_expr, regex[1:]))
x = self._gen_string_var()
x_constraint = z3.Or(*(z3.And(x == y, y_constraint)
for y, y_constraint in zip(ys, constraints_list)))
return (x, x_constraint, z3.Or(*carets_list), z3.Or(*dollars_list))
elif ty == CONCAT:
ys, y_constraints, carets_list, dollars_list = zip(*map(self._sat_expr, regex[1:]))
x = z3.Concat(*ys)
start_constraints = (
z3.Implies(b, z3.Length(y) == 0)
for ii, b in enumerate(carets_list)
for y in ys[:ii])
end_constraints = (
z3.Implies(b, z3.Length(y) == 0)
for ii, b in enumerate(dollars_list)
for y in ys[ii+1:]
)
x_constraint = z3.And(*toolz.concatv(y_constraints, start_constraints, end_constraints))
return (x, x_constraint, z3.Or(*carets_list), z3.Or(*dollars_list))
elif ty == GROUP:
# backrefs not supported
idx = regex[1] - 1 # not used currently; would be used to implement backrefs
inner = regex[2]
return self._sat_expr(inner)
elif ty == BACKREF:
raise NotImplementedError
elif ty == CARET:
assert len(regex) == 1
b = self._gen_bool_var()
return (z3.StringVal(''), b, b, z3.BoolVal(False))
elif ty == DOLLAR:
assert len(regex) == 1
b = self._gen_bool_var()
return (z3.StringVal(''), b, z3.BoolVal(False), b)
else:
raise ValueError("Unknown regex_parser type '%s'" % repr(ty))
def re_expr(self):
ss, expr, carets, dollars = self._sat_expr(self.regex)
return z3.simplify(z3.And(self.unknown == ss, expr))
| [
"logging.getLogger",
"z3.Concat",
"z3.And",
"z3.Bool",
"z3.BoolVal",
"toolz.concatv",
"z3.Or",
"z3.Not",
"z3.StringVal",
"z3.StringSort",
"z3.Length"
] | [((394, 421), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (411, 421), False, 'import logging\n'), ((879, 906), 'z3.Bool', 'z3.Bool', (['"""ignore_wildcards"""'], {}), "('ignore_wildcards')\n", (886, 906), False, 'import z3\n'), ((1236, 1251), 'z3.StringSort', 'z3.StringSort', ([], {}), '()\n', (1249, 1251), False, 'import z3\n'), ((5708, 5740), 'z3.And', 'z3.And', (['(self.unknown == ss)', 'expr'], {}), '(self.unknown == ss, expr)\n', (5714, 5740), False, 'import z3\n'), ((3038, 3054), 'z3.StringVal', 'z3.StringVal', (['""""""'], {}), "('')\n", (3050, 3054), False, 'import z3\n'), ((3056, 3072), 'z3.BoolVal', 'z3.BoolVal', (['(True)'], {}), '(True)\n', (3066, 3072), False, 'import z3\n'), ((3074, 3091), 'z3.BoolVal', 'z3.BoolVal', (['(False)'], {}), '(False)\n', (3084, 3091), False, 'import z3\n'), ((3093, 3110), 'z3.BoolVal', 'z3.BoolVal', (['(False)'], {}), '(False)\n', (3103, 3110), False, 'import z3\n'), ((3158, 3180), 'z3.StringVal', 'z3.StringVal', (['regex[1]'], {}), '(regex[1])\n', (3170, 3180), False, 'import z3\n'), ((3182, 3198), 'z3.BoolVal', 'z3.BoolVal', (['(True)'], {}), '(True)\n', (3192, 3198), False, 'import z3\n'), ((3200, 3217), 'z3.BoolVal', 'z3.BoolVal', (['(False)'], {}), '(False)\n', (3210, 3217), False, 'import z3\n'), ((3219, 3236), 'z3.BoolVal', 'z3.BoolVal', (['(False)'], {}), '(False)\n', (3229, 3236), False, 'import z3\n'), ((3603, 3620), 'z3.BoolVal', 'z3.BoolVal', (['(False)'], {}), '(False)\n', (3613, 3620), False, 'import z3\n'), ((3622, 3639), 'z3.BoolVal', 'z3.BoolVal', (['(False)'], {}), '(False)\n', (3632, 3639), False, 'import z3\n'), ((3435, 3464), 'z3.Not', 'z3.Not', (['self.ignore_wildcards'], {}), '(self.ignore_wildcards)\n', (3441, 3464), False, 'import z3\n'), ((3373, 3389), 'z3.StringVal', 'z3.StringVal', (['""""""'], {}), "('')\n", (3385, 3389), False, 'import z3\n'), ((4148, 4167), 'z3.Or', 'z3.Or', (['*carets_list'], {}), '(*carets_list)\n', (4153, 4167), False, 'import z3\n'), ((4169, 4189), 'z3.Or', 'z3.Or', (['*dollars_list'], {}), '(*dollars_list)\n', (4174, 4189), False, 'import z3\n'), ((4332, 4346), 'z3.Concat', 'z3.Concat', (['*ys'], {}), '(*ys)\n', (4341, 4346), False, 'import z3\n'), ((4845, 4864), 'z3.Or', 'z3.Or', (['*carets_list'], {}), '(*carets_list)\n', (4850, 4864), False, 'import z3\n'), ((4866, 4886), 'z3.Or', 'z3.Or', (['*dollars_list'], {}), '(*dollars_list)\n', (4871, 4886), False, 'import z3\n'), ((3995, 4023), 'z3.And', 'z3.And', (['(x == y)', 'y_constraint'], {}), '(x == y, y_constraint)\n', (4001, 4023), False, 'import z3\n'), ((4741, 4805), 'toolz.concatv', 'toolz.concatv', (['y_constraints', 'start_constraints', 'end_constraints'], {}), '(y_constraints, start_constraints, end_constraints)\n', (4754, 4805), False, 'import toolz\n'), ((3522, 3537), 'z3.StringVal', 'z3.StringVal', (['y'], {}), '(y)\n', (3534, 3537), False, 'import z3\n'), ((4412, 4424), 'z3.Length', 'z3.Length', (['y'], {}), '(y)\n', (4421, 4424), False, 'import z3\n'), ((4580, 4592), 'z3.Length', 'z3.Length', (['y'], {}), '(y)\n', (4589, 4592), False, 'import z3\n'), ((5299, 5315), 'z3.StringVal', 'z3.StringVal', (['""""""'], {}), "('')\n", (5311, 5315), False, 'import z3\n'), ((5323, 5340), 'z3.BoolVal', 'z3.BoolVal', (['(False)'], {}), '(False)\n', (5333, 5340), False, 'import z3\n'), ((5462, 5478), 'z3.StringVal', 'z3.StringVal', (['""""""'], {}), "('')\n", (5474, 5478), False, 'import z3\n'), ((5483, 5500), 'z3.BoolVal', 'z3.BoolVal', (['(False)'], {}), '(False)\n', (5493, 5500), False, 'import z3\n')] |
#coding:utf-8
from rest_framework import generics
from rest_framework import permissions
from rest_framework import response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.reverse import reverse
from catalogue import models
from catalogue import serializers
class FunctionList(generics.ListCreateAPIView):
queryset = models.Function.objects.all()
serializer_class = serializers.FunctionSerializer
class FunctionDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = models.Function.objects.all()
serializer_class = serializers.FunctionSerializer
class PersonList(generics.ListCreateAPIView):
queryset = models.Person.objects.all()
serializer_class = serializers.PersonSerializer
class PersonDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = models.Person.objects.all()
serializer_class = serializers.PersonSerializer
class ThemeList(generics.ListCreateAPIView):
queryset = models.Theme.objects.all()
serializer_class = serializers.ThemeSerializer
class ThemeDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = models.Theme.objects.all()
serializer_class = serializers.ThemeSerializer
class TypeRessourceList(generics.ListCreateAPIView):
queryset = models.TypeRessource.objects.all()
serializer_class = serializers.TypeRessourceSerializer
class TypeRessourceDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = models.TypeRessource.objects.all()
serializer_class = serializers.TypeRessourceSerializer
class RessourceList(generics.ListCreateAPIView):
queryset = models.Ressource.objects.all()
serializer_class = serializers.RessourceSerializer
class RessourceDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = models.Ressource.objects.all()
serializer_class = serializers.RessourceSerializer
@api_view(['GET'])
@permission_classes((permissions.AllowAny,))
def api_root(request, format=None):
return response.Response({
'function_list': reverse('function_list', request=request, format=format),
'person_list': reverse('person_list', request=request, format=format),
'theme_list': reverse('theme_list', request=request, format=format),
'type_ressource_list': reverse('type_ressource_list', request=request, format=format),
'ressource_list': reverse('ressource_list', request=request, format=format),
}) | [
"catalogue.models.TypeRessource.objects.all",
"rest_framework.decorators.permission_classes",
"catalogue.models.Ressource.objects.all",
"catalogue.models.Theme.objects.all",
"catalogue.models.Person.objects.all",
"catalogue.models.Function.objects.all",
"rest_framework.decorators.api_view",
"rest_framework.reverse.reverse"
] | [((1864, 1881), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (1872, 1881), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((1883, 1926), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(permissions.AllowAny,)'], {}), '((permissions.AllowAny,))\n', (1901, 1926), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((365, 394), 'catalogue.models.Function.objects.all', 'models.Function.objects.all', ([], {}), '()\n', (392, 394), False, 'from catalogue import models\n'), ((527, 556), 'catalogue.models.Function.objects.all', 'models.Function.objects.all', ([], {}), '()\n', (554, 556), False, 'from catalogue import models\n'), ((674, 701), 'catalogue.models.Person.objects.all', 'models.Person.objects.all', ([], {}), '()\n', (699, 701), False, 'from catalogue import models\n'), ((830, 857), 'catalogue.models.Person.objects.all', 'models.Person.objects.all', ([], {}), '()\n', (855, 857), False, 'from catalogue import models\n'), ((972, 998), 'catalogue.models.Theme.objects.all', 'models.Theme.objects.all', ([], {}), '()\n', (996, 998), False, 'from catalogue import models\n'), ((1125, 1151), 'catalogue.models.Theme.objects.all', 'models.Theme.objects.all', ([], {}), '()\n', (1149, 1151), False, 'from catalogue import models\n'), ((1273, 1307), 'catalogue.models.TypeRessource.objects.all', 'models.TypeRessource.objects.all', ([], {}), '()\n', (1305, 1307), False, 'from catalogue import models\n'), ((1450, 1484), 'catalogue.models.TypeRessource.objects.all', 'models.TypeRessource.objects.all', ([], {}), '()\n', (1482, 1484), False, 'from catalogue import models\n'), ((1610, 1640), 'catalogue.models.Ressource.objects.all', 'models.Ressource.objects.all', ([], {}), '()\n', (1638, 1640), False, 'from catalogue import models\n'), ((1775, 1805), 'catalogue.models.Ressource.objects.all', 'models.Ressource.objects.all', ([], {}), '()\n', (1803, 1805), False, 'from catalogue import models\n'), ((2019, 2075), 'rest_framework.reverse.reverse', 'reverse', (['"""function_list"""'], {'request': 'request', 'format': 'format'}), "('function_list', request=request, format=format)\n", (2026, 2075), False, 'from rest_framework.reverse import reverse\n'), ((2100, 2154), 'rest_framework.reverse.reverse', 'reverse', (['"""person_list"""'], {'request': 'request', 'format': 'format'}), "('person_list', request=request, format=format)\n", (2107, 2154), False, 'from rest_framework.reverse import reverse\n'), ((2178, 2231), 'rest_framework.reverse.reverse', 'reverse', (['"""theme_list"""'], {'request': 'request', 'format': 'format'}), "('theme_list', request=request, format=format)\n", (2185, 2231), False, 'from rest_framework.reverse import reverse\n'), ((2264, 2326), 'rest_framework.reverse.reverse', 'reverse', (['"""type_ressource_list"""'], {'request': 'request', 'format': 'format'}), "('type_ressource_list', request=request, format=format)\n", (2271, 2326), False, 'from rest_framework.reverse import reverse\n'), ((2354, 2411), 'rest_framework.reverse.reverse', 'reverse', (['"""ressource_list"""'], {'request': 'request', 'format': 'format'}), "('ressource_list', request=request, format=format)\n", (2361, 2411), False, 'from rest_framework.reverse import reverse\n')] |
import os
def main():
x = os.listdir("/data/tmp/")
y = []
for i in x:
if i[0] != ".":
n = int(i[0:8])
if n < 20200414:
y.append(i)
for i in y:
os.remove("/data/tmp/" + i)
main()
| [
"os.listdir",
"os.remove"
] | [((28, 52), 'os.listdir', 'os.listdir', (['"""/data/tmp/"""'], {}), "('/data/tmp/')\n", (38, 52), False, 'import os\n'), ((163, 190), 'os.remove', 'os.remove', (["('/data/tmp/' + i)"], {}), "('/data/tmp/' + i)\n", (172, 190), False, 'import os\n')] |
# 01-01-2019
# <NAME>
# This script is designed for renaming the files by using python os and sys.
# Files are generated from Zen export.
# %%
import os, sys
import re
datapath = "/Volumes/LaCie_DataStorage/Mast_Lab/Mast_Lab_002/resource/raw_output"
# %%
nameset1 = os.listdir(datapath)
print(nameset1)
for i, item in enumerate(nameset1):
print(i, item)
slide_number = re.search('slide_(.*)-Scene', item)
if slide_number != None:
print(slide_number.group(1))
scene_number = re.search('Scene-(.*)-Scan', item)
print(scene_number.group(1))
newfilename = 'batch_02_slide_' + slide_number.group(1) + '_scene_' + scene_number.group(1) + '.ome.tiff'
print(newfilename)
filedir_org = os.path.join(datapath, item)
filedir_new = os.path.join(datapath, newfilename)
os.rename(filedir_org, filedir_new)
# %%
nameset2 = os.listdir(datapath)
print(nameset2)
for i, item in enumerate(nameset2):
print(i, item)
slide_number = re.search('_ome.tiff', item)
if slide_number != None:
p = re.compile('_ome.tiff')
newfilename = p.sub('.ome.tiff', item)
print(newfilename)
filedir_org = os.path.join(datapath, item)
filedir_new = os.path.join(datapath, newfilename)
os.rename(filedir_org, filedir_new)
# %%
for filename in os.listdir():
print(filename)
| [
"os.listdir",
"re.compile",
"os.rename",
"os.path.join",
"re.search"
] | [((273, 293), 'os.listdir', 'os.listdir', (['datapath'], {}), '(datapath)\n', (283, 293), False, 'import os, sys\n'), ((902, 922), 'os.listdir', 'os.listdir', (['datapath'], {}), '(datapath)\n', (912, 922), False, 'import os, sys\n'), ((1362, 1374), 'os.listdir', 'os.listdir', ([], {}), '()\n', (1372, 1374), False, 'import os, sys\n'), ((384, 419), 're.search', 're.search', (['"""slide_(.*)-Scene"""', 'item'], {}), "('slide_(.*)-Scene', item)\n", (393, 419), False, 'import re\n'), ((1013, 1041), 're.search', 're.search', (['"""_ome.tiff"""', 'item'], {}), "('_ome.tiff', item)\n", (1022, 1041), False, 'import re\n'), ((514, 548), 're.search', 're.search', (['"""Scene-(.*)-Scan"""', 'item'], {}), "('Scene-(.*)-Scan', item)\n", (523, 548), False, 'import re\n'), ((750, 778), 'os.path.join', 'os.path.join', (['datapath', 'item'], {}), '(datapath, item)\n', (762, 778), False, 'import os, sys\n'), ((801, 836), 'os.path.join', 'os.path.join', (['datapath', 'newfilename'], {}), '(datapath, newfilename)\n', (813, 836), False, 'import os, sys\n'), ((845, 880), 'os.rename', 'os.rename', (['filedir_org', 'filedir_new'], {}), '(filedir_org, filedir_new)\n', (854, 880), False, 'import os, sys\n'), ((1088, 1111), 're.compile', 're.compile', (['"""_ome.tiff"""'], {}), "('_ome.tiff')\n", (1098, 1111), False, 'import re\n'), ((1209, 1237), 'os.path.join', 'os.path.join', (['datapath', 'item'], {}), '(datapath, item)\n', (1221, 1237), False, 'import os, sys\n'), ((1260, 1295), 'os.path.join', 'os.path.join', (['datapath', 'newfilename'], {}), '(datapath, newfilename)\n', (1272, 1295), False, 'import os, sys\n'), ((1304, 1339), 'os.rename', 'os.rename', (['filedir_org', 'filedir_new'], {}), '(filedir_org, filedir_new)\n', (1313, 1339), False, 'import os, sys\n')] |
import datetime
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
# from financial.models import Orders
class Post(models.Model):
type_of_shipping = [
('post', 'post'),
('pishtaz', 'pishtaz'),
('aadi', 'aadi'),
('ersal_forooshgahi', "ersal_forooshgahi"),
('ersal_pekmotori', 'ersal_peykmotori '),
]
date_chices = [
(datetime.date)
]
day_division = [
('صبح', '۸ تا ۱۲ بعد از ظهر'),
('ظهر', '۱۲ تا ۶ عصر'),
("شب", "۶ عصر تا ۱۲ شب"),
]
post = models.CharField(max_length=20, choices=type_of_shipping)
user = models.ForeignKey(User, on_delete=models.CASCADE)
time_recived = models.CharField(max_length=3, choices=day_division)
date_recived = models.DateField()
class Meta:
verbose_name = 'نحوه ارسال'
verbose_name_plural = 'نحوه ارسال'
| [
"django.db.models.DateField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((599, 656), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'choices': 'type_of_shipping'}), '(max_length=20, choices=type_of_shipping)\n', (615, 656), False, 'from django.db import models\n'), ((668, 717), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (685, 717), False, 'from django.db import models\n'), ((737, 789), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)', 'choices': 'day_division'}), '(max_length=3, choices=day_division)\n', (753, 789), False, 'from django.db import models\n'), ((809, 827), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (825, 827), False, 'from django.db import models\n')] |
from setuptools import setup
setup(
name='ml02450',
version='1.0.0',
packages=[''],
url='https://github.com/thomasnilsson/ml02450',
license='MIT',
author='tnni',
author_email='<EMAIL>',
description='A library for the machine learning and data mining course at DTU'
)
| [
"setuptools.setup"
] | [((30, 280), 'setuptools.setup', 'setup', ([], {'name': '"""ml02450"""', 'version': '"""1.0.0"""', 'packages': "['']", 'url': '"""https://github.com/thomasnilsson/ml02450"""', 'license': '"""MIT"""', 'author': '"""tnni"""', 'author_email': '"""<EMAIL>"""', 'description': '"""A library for the machine learning and data mining course at DTU"""'}), "(name='ml02450', version='1.0.0', packages=[''], url=\n 'https://github.com/thomasnilsson/ml02450', license='MIT', author=\n 'tnni', author_email='<EMAIL>', description=\n 'A library for the machine learning and data mining course at DTU')\n", (35, 280), False, 'from setuptools import setup\n')] |
import pkg_resources
version_file = pkg_resources.resource_stream(__name__, "VERSION")
VERSION = version_file.readline().strip().decode()
| [
"pkg_resources.resource_stream"
] | [((37, 87), 'pkg_resources.resource_stream', 'pkg_resources.resource_stream', (['__name__', '"""VERSION"""'], {}), "(__name__, 'VERSION')\n", (66, 87), False, 'import pkg_resources\n')] |
# /*******************************************************************************
# Copyright Intel Corporation.
# This software and the related documents are Intel copyrighted materials, and your use of them
# is governed by the express license under which they were provided to you (License).
# Unless the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose
# or transmit this software or the related documents without Intel's prior written permission.
# This software and the related documents are provided as is, with no express or implied warranties,
# other than those that are expressly stated in the License.
#
# *******************************************************************************/
from modules.check import CheckSummary, CheckMetadataPy
import os
import re
import json
import subprocess
from typing import List, Dict
def get_hostname(json_node: Dict) -> None:
value = {
"Value": "Undefined",
"RetVal": "INFO",
"Command": "cat /etc/hostname"
}
try:
with open("/etc/hostname", "r") as etc_hostname:
hostname = etc_hostname.readline().strip()
value["Value"] = hostname
except Exception as error:
value["RetVal"] = "ERROR"
value["Message"] = str(error)
json_node.update({"Hostname": value})
def _get_bios_vendor(json_node: Dict) -> None:
value = {"BIOS vendor": {
"Value": "Undefined",
"RetVal": "INFO",
"Command": "cat /sys/class/dmi/id/bios_vendor"
}}
try:
with open("/sys/class/dmi/id/bios_vendor", "r") as bios_vendor_file:
bios_vendor = bios_vendor_file.readline().strip()
value["BIOS vendor"]["Value"] = bios_vendor
except Exception as error:
value["BIOS vendor"]["RetVal"] = "ERROR"
value["BIOS vendor"]["Message"] = str(error)
json_node.update(value)
def _get_bios_version(json_node: Dict) -> None:
value = {"BIOS version": {
"Value": "Undefined",
"RetVal": "INFO",
"Command": "cat /sys/class/dmi/id/bios_version"
}}
try:
with open("/sys/class/dmi/id/bios_version", "r") as bios_verion_file:
bios_version = bios_verion_file.readline().strip()
value["BIOS version"]["Value"] = bios_version
except Exception as error:
value["BIOS version"]["RetVal"] = "ERROR"
value["BIOS version"]["Message"] = str(error)
json_node.update(value)
def _get_bios_release(json_node: Dict) -> None:
value = {"BIOS release": {
"Value": "Undefined",
"RetVal": "INFO",
"Command": "cat /sys/class/dmi/id/bios_release"
}}
can_provide_info = os.path.exists("/sys/class/dmi/id/bios_release")
if can_provide_info:
try:
with open("/sys/class/dmi/id/bios_release", "r") as bios_release_file:
bios_release = bios_release_file.readline().strip()
value["BIOS release"]["Value"] = bios_release
value["BIOS release"]["Verbosity"] = 1
except Exception as error:
value["BIOS release"]["RetVal"] = "ERROR"
value["BIOS release"]["Message"] = str(error)
json_node.update(value)
def _get_bios_date(json_node: Dict) -> None:
value = {"BIOS date": {
"Value": "Undefined",
"RetVal": "INFO",
"Command": "cat /sys/class/dmi/id/bios_date"
}}
can_provide_info = os.path.exists("/sys/class/dmi/id/bios_date")
if can_provide_info:
try:
with open("/sys/class/dmi/id/bios_date", "r") as bios_date_file:
bios_date = bios_date_file.readline().strip()
value["BIOS date"]["Value"] = bios_date
value["BIOS date"]["Verbosity"] = 2
except Exception as error:
value["BIOS date"]["RetVal"] = "ERROR"
value["BIOS date"]["Message"] = str(error)
json_node.update(value)
def get_bios_information(json_node: Dict) -> None:
value = {"Value": "Undefined", "RetVal": "INFO"}
bios_info = {}
_get_bios_vendor(bios_info)
_get_bios_version(bios_info)
_get_bios_release(bios_info)
_get_bios_date(bios_info)
value["Value"] = bios_info
json_node.update({"BIOS information": value})
def get_uname(json_node: Dict) -> None:
value = {"Value": "Undefined", "RetVal": "INFO", "Command": "uname -a"}
try:
command = ["uname", "-a"]
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
stdout, _ = process.communicate()
if process.returncode != 0:
raise Exception("Cannot get information about operating system name")
uname = stdout.splitlines()[0].strip()
value["Value"] = uname
except Exception as error:
value["RetVal"] = "ERROR"
value["Message"] = str(error)
json_node.update({"Operating system name": value})
def get_cpu_frequency(json_node: Dict) -> None:
verbosity_level = 1
MHz_pattern = re.compile(r"cpu MHz\s*\:\s((\d*[.])?\d+)")
value = {
"Value": "Undefined",
"RetVal": "INFO",
"Verbosity": verbosity_level,
"Command": "cat /proc/cpuinfo"}
try:
with open("/proc/cpuinfo", "r") as cpu_frequency_file:
cpu_frequency = {}
core_number = 0
for line in cpu_frequency_file.readlines():
result = MHz_pattern.search(line)
if result:
cpu_frequency.update(
{f"Core {core_number}": {
"Value": f"{result.group(1)} MHz",
"RetVal": "INFO",
"Verbosity": verbosity_level
}})
core_number += 1
value["Value"] = cpu_frequency
except Exception as error:
value["RetVal"] = "ERROR"
value["Message"] = str(error)
json_node.update({"CPU frequency": value})
def get_cpu_info(json_node: Dict) -> None:
value = {
"Value": "Undefined",
"RetVal": "INFO",
"Command": "lscpu"
}
try:
command = ["lscpu"]
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
stdout, _ = process.communicate()
if process.returncode != 0:
raise Exception("Cannot get information about CPU")
output = {}
for line in stdout.splitlines():
key, val = [elem.strip() for elem in line.split(":", 1)]
output.update({key: val})
cpu_info = {}
cpu_info.update({"Model name": {"Value": output["Model name"], "RetVal": "INFO"}})
cpu_info.update({"Architecture": {"Value": output["Architecture"], "RetVal": "INFO"}})
cpu_info.update({"Vendor": {"Value": output["Vendor ID"], "RetVal": "INFO", "Verbosity": 1}})
cpu_info.update({"CPU count": {"Value": output["CPU(s)"], "RetVal": "INFO"}})
cpu_info.update(
{"Thread(s) per core": {"Value": output["Thread(s) per core"], "RetVal": "INFO", "Verbosity": 2}})
cpu_info.update(
{"Core(s) per socket": {"Value": output["Core(s) per socket"], "RetVal": "INFO", "Verbosity": 2}})
cpu_info.update({"Socket(s)": {"Value": output["Socket(s)"], "RetVal": "INFO", "Verbosity": 2}})
get_cpu_frequency(cpu_info)
value["Value"] = cpu_info
except Exception as error:
value["RetVal"] = "ERROR"
value["Message"] = str(error)
json_node.update({"CPU information": value})
def run_base_check(data: dict) -> CheckSummary:
result_json = {"Value": {}}
get_hostname(result_json["Value"])
get_cpu_info(result_json["Value"])
get_bios_information(result_json["Value"])
get_uname(result_json["Value"])
check_summary = CheckSummary(
result=json.dumps(result_json, indent=4)
)
return check_summary
def get_api_version() -> str:
return "0.1"
def get_check_list() -> List[CheckMetadataPy]:
someCheck = CheckMetadataPy(
name="base_system_check",
type="Data",
tags="sysinfo,compile,runtime,host,target",
descr="This check shows information about hostname, CPU, BIOS and operating system.",
dataReq="{}",
rights="user",
timeout=5,
version="0.1",
run="run_base_check"
)
return [someCheck]
| [
"os.path.exists",
"re.compile",
"modules.check.CheckMetadataPy",
"subprocess.Popen",
"json.dumps"
] | [((2698, 2746), 'os.path.exists', 'os.path.exists', (['"""/sys/class/dmi/id/bios_release"""'], {}), "('/sys/class/dmi/id/bios_release')\n", (2712, 2746), False, 'import os\n'), ((3446, 3491), 'os.path.exists', 'os.path.exists', (['"""/sys/class/dmi/id/bios_date"""'], {}), "('/sys/class/dmi/id/bios_date')\n", (3460, 3491), False, 'import os\n'), ((5058, 5105), 're.compile', 're.compile', (['"""cpu MHz\\\\s*\\\\:\\\\s((\\\\d*[.])?\\\\d+)"""'], {}), "('cpu MHz\\\\s*\\\\:\\\\s((\\\\d*[.])?\\\\d+)')\n", (5068, 5105), False, 'import re\n'), ((8118, 8399), 'modules.check.CheckMetadataPy', 'CheckMetadataPy', ([], {'name': '"""base_system_check"""', 'type': '"""Data"""', 'tags': '"""sysinfo,compile,runtime,host,target"""', 'descr': '"""This check shows information about hostname, CPU, BIOS and operating system."""', 'dataReq': '"""{}"""', 'rights': '"""user"""', 'timeout': '(5)', 'version': '"""0.1"""', 'run': '"""run_base_check"""'}), "(name='base_system_check', type='Data', tags=\n 'sysinfo,compile,runtime,host,target', descr=\n 'This check shows information about hostname, CPU, BIOS and operating system.'\n , dataReq='{}', rights='user', timeout=5, version='0.1', run=\n 'run_base_check')\n", (8133, 8399), False, 'from modules.check import CheckSummary, CheckMetadataPy\n'), ((4465, 4560), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'encoding': '"""utf-8"""'}), "(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n encoding='utf-8')\n", (4481, 4560), False, 'import subprocess\n'), ((6233, 6328), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'encoding': '"""utf-8"""'}), "(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n encoding='utf-8')\n", (6249, 6328), False, 'import subprocess\n'), ((7938, 7971), 'json.dumps', 'json.dumps', (['result_json'], {'indent': '(4)'}), '(result_json, indent=4)\n', (7948, 7971), False, 'import json\n')] |
# Copyright 2017 <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from abc import ABCMeta, abstractmethod
import logging
import six
from nirikshak.common import plugins
LOG = logging.getLogger(__name__)
@six.add_metaclass(ABCMeta)
class FormatOutput(object):
@abstractmethod
def format_output(self, **kwargs):
pass
def format_for_output(**kwargs):
if kwargs.get('post_task', 'console') == 'console':
post_task = kwargs.get('post_task', 'console')
else:
post_task = kwargs.get('post_task', 'dummy')
plugin = plugins.get_plugin(post_task)
soochis = None
try:
soochis = getattr(plugin, 'format_output')(**kwargs)
except AttributeError:
LOG.error("%s plugin for task could not be found", post_task)
return kwargs
except Exception:
LOG.error("Error in formatting %s jaanch for %s post_task",
kwargs['name'], post_task, exc_info=True)
else:
LOG.info("%s jaanch has been formatter by %s plugin",
kwargs['name'], post_task)
return soochis
| [
"logging.getLogger",
"six.add_metaclass",
"nirikshak.common.plugins.get_plugin"
] | [((686, 713), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (703, 713), False, 'import logging\n'), ((717, 743), 'six.add_metaclass', 'six.add_metaclass', (['ABCMeta'], {}), '(ABCMeta)\n', (734, 743), False, 'import six\n'), ((1068, 1097), 'nirikshak.common.plugins.get_plugin', 'plugins.get_plugin', (['post_task'], {}), '(post_task)\n', (1086, 1097), False, 'from nirikshak.common import plugins\n')] |
from distutils.core import setup, Extension
StringDict_module = Extension('StringDict',
sources = ['src/StringDict.cpp', 'src/StringDictEntry.c', 'src/KeyInfo.c'],
depends = ['LEB128.h', 'MakeKeyInfo.h', 'PythonUtils.h', 'StringDict_Docs.h', 'StringDictEntry.h', 'setup.py'],
include_dirs = ['include'],
extra_compile_args = ["-std=c++17", "-O3", '-fno-delete-null-pointer-checks'])
setup (name = 'StringDict',
version = '0.1',
description = 'Provides a dict-like type that only allows bytes() (and bytes-like types) or str() keys.',
ext_modules = [StringDict_module]
)
| [
"distutils.core.Extension",
"distutils.core.setup"
] | [((65, 396), 'distutils.core.Extension', 'Extension', (['"""StringDict"""'], {'sources': "['src/StringDict.cpp', 'src/StringDictEntry.c', 'src/KeyInfo.c']", 'depends': "['LEB128.h', 'MakeKeyInfo.h', 'PythonUtils.h', 'StringDict_Docs.h',\n 'StringDictEntry.h', 'setup.py']", 'include_dirs': "['include']", 'extra_compile_args': "['-std=c++17', '-O3', '-fno-delete-null-pointer-checks']"}), "('StringDict', sources=['src/StringDict.cpp',\n 'src/StringDictEntry.c', 'src/KeyInfo.c'], depends=['LEB128.h',\n 'MakeKeyInfo.h', 'PythonUtils.h', 'StringDict_Docs.h',\n 'StringDictEntry.h', 'setup.py'], include_dirs=['include'],\n extra_compile_args=['-std=c++17', '-O3', '-fno-delete-null-pointer-checks']\n )\n", (74, 396), False, 'from distutils.core import setup, Extension\n'), ((451, 637), 'distutils.core.setup', 'setup', ([], {'name': '"""StringDict"""', 'version': '"""0.1"""', 'description': '"""Provides a dict-like type that only allows bytes() (and bytes-like types) or str() keys."""', 'ext_modules': '[StringDict_module]'}), "(name='StringDict', version='0.1', description=\n 'Provides a dict-like type that only allows bytes() (and bytes-like types) or str() keys.'\n , ext_modules=[StringDict_module])\n", (456, 637), False, 'from distutils.core import setup, Extension\n')] |
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
def email(user, message):
"""
:param user:
:param message:
:return: 邮件发送
"""
msg = MIMEText(message, 'plain', 'utf-8')
msg['From'] = formataddr(["监控", '<EMAIL>'])
msg['To'] = formataddr(["同信监控", '<EMAIL>'])
msg['Subject'] = "短信下发接口监控"
try:
server = smtplib.SMTP("smtp.126.com", 25, timeout=5)
server.set_debuglevel(1)
server.login("<EMAIL>", "<PASSWORD>") # 126邮箱需要开启SMTP功能,使用授权码
server.sendmail('<EMAIL>', ['<EMAIL>',], msg.as_string())
server.quit()
except Exception as err:
print(err)
| [
"smtplib.SMTP",
"email.utils.formataddr",
"email.mime.text.MIMEText"
] | [((196, 231), 'email.mime.text.MIMEText', 'MIMEText', (['message', '"""plain"""', '"""utf-8"""'], {}), "(message, 'plain', 'utf-8')\n", (204, 231), False, 'from email.mime.text import MIMEText\n'), ((250, 279), 'email.utils.formataddr', 'formataddr', (["['监控', '<EMAIL>']"], {}), "(['监控', '<EMAIL>'])\n", (260, 279), False, 'from email.utils import formataddr\n'), ((296, 327), 'email.utils.formataddr', 'formataddr', (["['同信监控', '<EMAIL>']"], {}), "(['同信监控', '<EMAIL>'])\n", (306, 327), False, 'from email.utils import formataddr\n'), ((387, 430), 'smtplib.SMTP', 'smtplib.SMTP', (['"""smtp.126.com"""', '(25)'], {'timeout': '(5)'}), "('smtp.126.com', 25, timeout=5)\n", (399, 430), False, 'import smtplib\n')] |
from manim import *
import colorsys
RAINBOW = [PURPLE, PURE_BLUE, PURE_GREEN, YELLOW, ORANGE, PURE_RED]
def shift_discrete_gradient(colors, delta: int):
return list(map(lambda n: colors[(n + delta) % (len(colors) - 1)], range(len(colors))))
class ColorUpdater:
def __init__(self) -> None:
self.time_state = 0
def rotate_color(self, c, t):
[r, g, b] = hex_to_rgb(c)
[h, s, v] = colorsys.rgb_to_hsv(r, g, b)
new_c = colorsys.hsv_to_rgb((h + t) % 1, s, v)
return rgb_to_hex(new_c)
def update_color(self, sq, dt):
self.time_state = self.time_state + (dt / 60)
sq.set_color_by_gradient([self.rotate_color(n, self.time_state) for n in RAINBOW])
| [
"colorsys.rgb_to_hsv",
"colorsys.hsv_to_rgb"
] | [((419, 447), 'colorsys.rgb_to_hsv', 'colorsys.rgb_to_hsv', (['r', 'g', 'b'], {}), '(r, g, b)\n', (438, 447), False, 'import colorsys\n'), ((465, 503), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['((h + t) % 1)', 's', 'v'], {}), '((h + t) % 1, s, v)\n', (484, 503), False, 'import colorsys\n')] |
import torch
import torch.nn as nn
import os
import numpy as np
import cv2
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from utils.misc import get_curtime
from utils.loss import LossAll
import utils.func_utils as func_utils
def collater(data):
out_data_dict = {}
for name in data[0]:
out_data_dict[name] = []
for sample in data:
for name in sample:
out_data_dict[name].append(torch.from_numpy(sample[name]))
for name in out_data_dict:
out_data_dict[name] = torch.stack(out_data_dict[name], dim=0)
return out_data_dict
class TrainModule(object):
def __init__(self, dataset, num_classes, model, decoder, down_ratio):
torch.manual_seed(317)
self.dataset = dataset
self.dataset_phase = {
'dota': ['train'],
'railway': ['train']
}
self.num_classes = num_classes
self.model = model
self.decoder = decoder
self.down_ratio = down_ratio
def save_model(self, path, epoch, model, optimizer):
if isinstance(model, torch.nn.DataParallel):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
torch.save({
'epoch': epoch,
'model_state_dict': state_dict,
'optimizer_state_dict': optimizer.state_dict(),
# 'loss': loss
}, path)
def load_model(self, model, optimizer, resume):
checkpoint = torch.load(resume, map_location=lambda storage, loc: storage)
print('loaded weights from {}, epoch {}'.format(resume, checkpoint['epoch']))
state_dict_ = checkpoint['model_state_dict']
# 从 state_dict_ 解析出参数
state_dict = {}
for k in state_dict_:
# 多 GPU 训练 key, module
if k.startswith('module') and not k.startswith('module_list'):
state_dict[k[7:]] = state_dict_[k]
else:
state_dict[k] = state_dict_[k]
# override model param
model.load_state_dict(state_dict, strict=True)
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# for state in optimizer.state.values():
# for k, v in state.items():
# if isinstance(v, torch.Tensor):
# state[k] = v.cuda()
epoch = checkpoint['epoch']
return model, optimizer, epoch
def train_network(self, args):
optimizer = torch.optim.Adam(self.model.parameters(), args.init_lr)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.96, last_epoch=-1)
save_path = os.path.join('runs', args.dataset, f'{args.exp}_{get_curtime()}')
writer = SummaryWriter(save_path)
start_epoch = 1
# add resume part for continuing training when break previously, 10-16-2020
if args.resume_train:
model, optimizer, start_epoch = self.load_model(self.model,
optimizer,
args.resume_train)
if not os.path.exists(save_path):
os.makedirs(save_path)
self.model = torch.nn.DataParallel(self.model, device_ids=[0, 1]) # todo: gpus
self.model.cuda()
criterion = LossAll()
print('Setting up data...')
dataset_module = self.dataset[args.dataset] # get dataset cls
# datasets
dsets = {
x: dataset_module(data_dir=args.data_dir,
phase=x, # train
input_h=args.input_h,
input_w=args.input_w,
down_ratio=self.down_ratio)
for x in self.dataset_phase[args.dataset]
}
dsets_loader = {
'train': torch.utils.data.DataLoader(dsets['train'],
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True,
collate_fn=collater)
}
print('Starting training...')
best_loss = 100.
for epoch in range(start_epoch, args.num_epoch + 1):
print('-' * 10)
print('Epoch: {}/{} '.format(epoch, args.num_epoch))
epoch_loss = self.run_epoch(phase='train',
data_loader=dsets_loader['train'],
criterion=criterion,
optimizer=optimizer)
scheduler.step() # 每个 epoch 变换一次 lr
writer.add_scalar('train_loss', epoch_loss, global_step=epoch)
if epoch_loss < best_loss:
best_loss = epoch_loss
self.save_model(os.path.join(save_path, 'model_best.pth'),
epoch,
self.model,
optimizer)
# test 测试模型
if 'test' in self.dataset_phase[args.dataset] and epoch % 5 == 0:
mAP = self.dec_eval(args, dsets['test'])
writer.add_scalar('mAP', mAP, global_step=epoch)
def run_epoch(self, phase, data_loader, criterion, optimizer):
if phase == 'train':
self.model.train()
else:
self.model.eval()
running_loss = 0.
# note: item
tbar = tqdm(data_loader)
for i, data_dict in enumerate(tbar):
for name in data_dict:
data_dict[name] = data_dict[name].cuda()
if phase == 'train':
optimizer.zero_grad()
with torch.enable_grad():
pr_decs = self.model(data_dict['input']) # dict
loss = criterion(pr_decs, data_dict)
loss.backward()
optimizer.step()
else:
with torch.no_grad():
pr_decs = self.model(data_dict['input'])
loss = criterion(pr_decs, data_dict)
running_loss += loss.item()
tbar.set_description('{} loss: {:.3f}'.format(phase, running_loss / (i + 1)))
epoch_loss = running_loss / len(data_loader)
print('{} loss: {}'.format(phase, epoch_loss))
return epoch_loss
def dec_eval(self, args, dsets):
result_path = 'result_' + args.dataset
if not os.path.exists(result_path):
os.mkdir(result_path)
self.model.eval()
func_utils.write_results(args,
self.model, dsets,
self.down_ratio,
self.decoder,
result_path)
ap = dsets.dec_evaluation(result_path)
return ap
| [
"torch.manual_seed",
"torch.utils.tensorboard.SummaryWriter",
"utils.func_utils.write_results",
"os.path.exists",
"torch.enable_grad",
"torch.optim.lr_scheduler.ExponentialLR",
"os.makedirs",
"utils.misc.get_curtime",
"torch.load",
"torch.stack",
"utils.loss.LossAll",
"torch.nn.DataParallel",
"tqdm.tqdm",
"torch.from_numpy",
"os.path.join",
"os.mkdir",
"torch.utils.data.DataLoader",
"torch.no_grad"
] | [((558, 597), 'torch.stack', 'torch.stack', (['out_data_dict[name]'], {'dim': '(0)'}), '(out_data_dict[name], dim=0)\n', (569, 597), False, 'import torch\n'), ((740, 762), 'torch.manual_seed', 'torch.manual_seed', (['(317)'], {}), '(317)\n', (757, 762), False, 'import torch\n'), ((1548, 1609), 'torch.load', 'torch.load', (['resume'], {'map_location': '(lambda storage, loc: storage)'}), '(resume, map_location=lambda storage, loc: storage)\n', (1558, 1609), False, 'import torch\n'), ((2633, 2709), 'torch.optim.lr_scheduler.ExponentialLR', 'torch.optim.lr_scheduler.ExponentialLR', (['optimizer'], {'gamma': '(0.96)', 'last_epoch': '(-1)'}), '(optimizer, gamma=0.96, last_epoch=-1)\n', (2671, 2709), False, 'import torch\n'), ((2815, 2839), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['save_path'], {}), '(save_path)\n', (2828, 2839), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((3313, 3365), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['self.model'], {'device_ids': '[0, 1]'}), '(self.model, device_ids=[0, 1])\n', (3334, 3365), False, 'import torch\n'), ((3430, 3439), 'utils.loss.LossAll', 'LossAll', ([], {}), '()\n', (3437, 3439), False, 'from utils.loss import LossAll\n'), ((5830, 5847), 'tqdm.tqdm', 'tqdm', (['data_loader'], {}), '(data_loader)\n', (5834, 5847), False, 'from tqdm import tqdm\n'), ((6964, 7062), 'utils.func_utils.write_results', 'func_utils.write_results', (['args', 'self.model', 'dsets', 'self.down_ratio', 'self.decoder', 'result_path'], {}), '(args, self.model, dsets, self.down_ratio, self.\n decoder, result_path)\n', (6988, 7062), True, 'import utils.func_utils as func_utils\n'), ((3226, 3251), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (3240, 3251), False, 'import os\n'), ((3266, 3288), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (3277, 3288), False, 'import os\n'), ((3977, 4155), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["dsets['train']"], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)', 'drop_last': '(True)', 'collate_fn': 'collater'}), "(dsets['train'], batch_size=args.batch_size,\n shuffle=True, num_workers=args.num_workers, pin_memory=True, drop_last=\n True, collate_fn=collater)\n", (4004, 4155), False, 'import torch\n'), ((6862, 6889), 'os.path.exists', 'os.path.exists', (['result_path'], {}), '(result_path)\n', (6876, 6889), False, 'import os\n'), ((6904, 6925), 'os.mkdir', 'os.mkdir', (['result_path'], {}), '(result_path)\n', (6912, 6925), False, 'import os\n'), ((463, 493), 'torch.from_numpy', 'torch.from_numpy', (['sample[name]'], {}), '(sample[name])\n', (479, 493), False, 'import torch\n'), ((2780, 2793), 'utils.misc.get_curtime', 'get_curtime', ([], {}), '()\n', (2791, 2793), False, 'from utils.misc import get_curtime\n'), ((5183, 5224), 'os.path.join', 'os.path.join', (['save_path', '"""model_best.pth"""'], {}), "(save_path, 'model_best.pth')\n", (5195, 5224), False, 'import os\n'), ((6083, 6102), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (6100, 6102), False, 'import torch\n'), ((6348, 6363), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6361, 6363), False, 'import torch\n')] |
import os
from logging import Logger
from typing import List, Optional
import numpy as np
import pandas as pd
from expiringdict import ExpiringDict
from injector import inject, singleton
from altymeter.api.exchange import (ExchangeOpenOrder,
ExchangeOrder,
ExchangeTransfer,
PairRecentStats,
TradedPair,
TradingExchange)
from altymeter.module.constants import Configuration
from altymeter.pricing import PriceData
@singleton
class QuadrigaCxApi(TradingExchange):
"""
"""
@inject
def __init__(self, config: Configuration,
logger: Logger,
price_data: PriceData,
):
config = config['exchanges'].get('QuadrigaCX') or dict()
self._logger = logger
self._price_data = price_data
self._traded_pairs_cache = ExpiringDict(max_len=1, max_age_seconds=24 * 60 * 60)
@property
def name(self):
return "QuadrigaCX"
def collect_data(self, pair: str, since=None, sleep_time=90, stop_event=None):
raise NotImplementedError
def convert_actions(self, dir_path: str) -> pd.DataFrame:
result = []
for path in os.listdir(dir_path):
path = os.path.join(dir_path, path)
data = pd.read_csv(path,
parse_dates=['datetime'],
)
if 'fundings' in path:
net_amount_index = data.columns.get_loc('net amount') + 1
for row in data.itertuples():
quantity = row.gross
if np.isnan(quantity):
quantity = row[net_amount_index]
if isinstance(row.address, str):
wallet = row.address
from_exchange = 'Local Wallet'
else:
wallet = None
from_exchange = None
currency = row.currency.upper()
result.append({
'Date': row.datetime,
'Type': 'Transfer',
'Quantity': quantity,
'Currency': currency,
'Exchange': from_exchange,
'Wallet': wallet,
'Price': quantity,
'Currency.1': currency,
'Exchange.1': self.name,
'Wallet.1': None,
'Disabled': None,
})
elif 'trades' in path:
for row in data.itertuples():
if row.type == 'buy':
from_currency = row.minor.upper()
to_currency = row.major.upper()
price = row.value
elif row.type == 'sell':
from_currency = row.major.upper()
to_currency = row.minor.upper()
price = row.amount
else:
raise ValueError(f"Invalid row type: {row.type}\nfor row: {row}")
result.append({
'Date': row.datetime,
'Type': 'Trade',
'Quantity': row.total,
'Currency': to_currency,
'Exchange': self.name,
'Wallet': f'{to_currency} Wallet',
'Price': price,
'Currency.1': from_currency,
'Exchange.1': self.name,
'Wallet.1': f'{from_currency} Wallet',
'Disabled': None,
})
elif 'withdrawals' in path:
for row in data.itertuples():
if isinstance(row.address, str):
wallet = row.address
to_exchange = 'Local Wallet'
else:
wallet = None
to_exchange = None
currency = row.currency.upper()
result.append({
'Date': row.datetime,
'Type': 'Transfer',
'Quantity': row.amount,
'Currency': currency,
'Exchange': self.name,
'Wallet': f'{currency} Wallet',
'Price': row.amount,
'Currency.1': currency,
'Exchange.1': to_exchange,
'Wallet.1': wallet,
'Disabled': None,
})
result = pd.DataFrame(result)
return result
def create_order(self, pair: str,
action_type: str,
order_type: str,
volume: float,
price: Optional[float] = None,
**kwargs) -> ExchangeOrder:
raise NotImplementedError
def get_deposit_history(self) -> List[ExchangeTransfer]:
raise NotImplementedError
def get_order_book(self, pair: Optional[str] = None,
base: Optional[str] = None, to: Optional[str] = None,
order_type: Optional[str] = 'all') \
-> List[ExchangeOpenOrder]:
raise NotImplementedError
def get_pair(self, pair: Optional[str] = None,
base: Optional[str] = None, to: Optional[str] = None) -> str:
raise NotImplementedError
def get_recent_stats(self, pair: str) -> PairRecentStats:
raise NotImplementedError
def get_traded_pairs(self) -> List[TradedPair]:
raise NotImplementedError
def get_withdrawal_history(self) -> List[ExchangeTransfer]:
raise NotImplementedError
if __name__ == '__main__':
from altymeter.module.module import AltymeterModule
injector = AltymeterModule.get_injector()
q: QuadrigaCxApi = injector.get(QuadrigaCxApi)
| [
"os.listdir",
"altymeter.module.module.AltymeterModule.get_injector",
"expiringdict.ExpiringDict",
"pandas.read_csv",
"os.path.join",
"numpy.isnan",
"pandas.DataFrame"
] | [((6114, 6144), 'altymeter.module.module.AltymeterModule.get_injector', 'AltymeterModule.get_injector', ([], {}), '()\n', (6142, 6144), False, 'from altymeter.module.module import AltymeterModule\n'), ((984, 1037), 'expiringdict.ExpiringDict', 'ExpiringDict', ([], {'max_len': '(1)', 'max_age_seconds': '(24 * 60 * 60)'}), '(max_len=1, max_age_seconds=24 * 60 * 60)\n', (996, 1037), False, 'from expiringdict import ExpiringDict\n'), ((1322, 1342), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (1332, 1342), False, 'import os\n'), ((4870, 4890), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {}), '(result)\n', (4882, 4890), True, 'import pandas as pd\n'), ((1363, 1391), 'os.path.join', 'os.path.join', (['dir_path', 'path'], {}), '(dir_path, path)\n', (1375, 1391), False, 'import os\n'), ((1411, 1454), 'pandas.read_csv', 'pd.read_csv', (['path'], {'parse_dates': "['datetime']"}), "(path, parse_dates=['datetime'])\n", (1422, 1454), True, 'import pandas as pd\n'), ((1739, 1757), 'numpy.isnan', 'np.isnan', (['quantity'], {}), '(quantity)\n', (1747, 1757), True, 'import numpy as np\n')] |
Subsets and Splits