content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
import mock
from jinja2.exceptions import UndefinedError
from st2common import log as logging
from st2common.content.loader import MetaLoader
from st2common.models.db.rule import RuleDB
from st2common.models.db.trigger import TriggerDB
from st2common.models.db.trigger import TriggerInstanceDB
from st2common.models.system.common import ResourceReference
from st2common.persistence.reactor import Rule, TriggerInstance, Trigger
from st2reactor.rules.enforcer import RuleEnforcer
from st2reactor.rules.matcher import RulesMatcher
__all__ = [
'RuleTester'
]
LOG = logging.getLogger(__name__)
class RuleTester(object):
def __init__(self, rule_file_path=None, rule_ref=None, trigger_instance_file_path=None,
trigger_instance_id=None):
"""
:param rule_file_path: Path to the file containing rule definition.
:type rule_file_path: ``str``
:param trigger_instance_file_path: Path to the file containg trigger instance definition.
:type trigger_instance_file_path: ``str``
"""
self._rule_file_path = rule_file_path
self._rule_ref = rule_ref
self._trigger_instance_file_path = trigger_instance_file_path
self._trigger_instance_id = trigger_instance_id
self._meta_loader = MetaLoader()
def evaluate(self):
"""
Evaluate trigger instance against the rule.
:return: ``True`` if the rule matches, ``False`` otherwise.
:rtype: ``boolean``
"""
rule_db = self._get_rule_db()
trigger_instance_db, trigger_db = self._get_trigger_instance_db()
# The trigger check needs to be performed here as that is not performed
# by RulesMatcher.
if rule_db.trigger != trigger_db.ref:
LOG.info('rule.trigger "%s" and trigger.ref "%s" do not match.',
rule_db.trigger, trigger_db.ref)
return False
# Check if rule matches criteria.
matcher = RulesMatcher(trigger_instance=trigger_instance_db, trigger=trigger_db,
rules=[rule_db], extra_info=True)
matching_rules = matcher.get_matching_rules()
# Rule does not match so early exit.
if len(matching_rules) < 1:
return False
# Check if rule can be enforced
enforcer = RuleEnforcer(trigger_instance=trigger_instance_db, rule=rule_db)
runner_type_db = mock.Mock()
runner_type_db.runner_parameters = {}
action_db = mock.Mock()
action_db.parameters = {}
params = rule_db.action.parameters # pylint: disable=no-member
context, additional_contexts = enforcer.get_action_execution_context(action_db=action_db,
trace_context=None)
# Note: We only return partially resolved parameters.
# To be able to return all parameters we would need access to corresponding ActionDB,
# RunnerTypeDB and ConfigDB object, but this would add a dependency on the database and the
# tool is meant to be used standalone.
try:
params = enforcer.get_resolved_parameters(action_db=action_db,
runnertype_db=runner_type_db,
params=params,
context=context,
additional_contexts=additional_contexts)
LOG.info('Action parameters resolved to:')
for param in six.iteritems(params):
LOG.info('\t%s: %s', param[0], param[1])
return True
except (UndefinedError, ValueError) as e:
LOG.error('Failed to resolve parameters\n\tOriginal error : %s', six.text_type(e))
return False
except:
LOG.exception('Failed to resolve parameters.')
return False
def _get_rule_db(self):
if self._rule_file_path:
return self._get_rule_db_from_file(
file_path=os.path.realpath(self._rule_file_path))
elif self._rule_ref:
return Rule.get_by_ref(self._rule_ref)
raise ValueError('One of _rule_file_path or _rule_ref should be specified.')
def _get_trigger_instance_db(self):
if self._trigger_instance_file_path:
return self._get_trigger_instance_db_from_file(
file_path=os.path.realpath(self._trigger_instance_file_path))
elif self._trigger_instance_id:
trigger_instance_db = TriggerInstance.get_by_id(self._trigger_instance_id)
trigger_db = Trigger.get_by_ref(trigger_instance_db.trigger)
return trigger_instance_db, trigger_db
raise ValueError('One of _trigger_instance_file_path or'
'_trigger_instance_id should be specified.')
def _get_rule_db_from_file(self, file_path):
data = self._meta_loader.load(file_path=file_path)
pack = data.get('pack', 'unknown')
name = data.get('name', 'unknown')
trigger = data['trigger']['type']
criteria = data.get('criteria', None)
action = data.get('action', {})
rule_db = RuleDB(pack=pack, name=name, trigger=trigger, criteria=criteria, action=action,
enabled=True)
rule_db.id = 'rule_tester_rule'
return rule_db
def _get_trigger_instance_db_from_file(self, file_path):
data = self._meta_loader.load(file_path=file_path)
instance = TriggerInstanceDB(**data)
instance.id = 'rule_tester_instance'
trigger_ref = ResourceReference.from_string_reference(instance['trigger'])
trigger_db = TriggerDB(pack=trigger_ref.pack, name=trigger_ref.name, type=trigger_ref.ref)
return instance, trigger_db
| 41.125 | 99 | 0.658663 | [
"Apache-2.0"
] | Horizon-95/st2 | st2reactor/st2reactor/rules/tester.py | 6,580 | Python |
"""
Support for the Dyson 360 eye vacuum cleaner robot.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/vacuum.dyson/
"""
import logging
from homeassistant.components.vacuum import (
SUPPORT_BATTERY, SUPPORT_FAN_SPEED, SUPPORT_PAUSE, SUPPORT_RETURN_HOME,
SUPPORT_STATUS, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
VacuumDevice)
from homeassistant.helpers.icon import icon_for_battery_level
from . import DYSON_DEVICES
_LOGGER = logging.getLogger(__name__)
ATTR_CLEAN_ID = 'clean_id'
ATTR_FULL_CLEAN_TYPE = 'full_clean_type'
ATTR_POSITION = 'position'
DEPENDENCIES = ['dyson']
DYSON_360_EYE_DEVICES = "dyson_360_eye_devices"
SUPPORT_DYSON = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PAUSE | \
SUPPORT_RETURN_HOME | SUPPORT_FAN_SPEED | SUPPORT_STATUS | \
SUPPORT_BATTERY | SUPPORT_STOP
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Dyson 360 Eye robot vacuum platform."""
from libpurecoollink.dyson_360_eye import Dyson360Eye
_LOGGER.debug("Creating new Dyson 360 Eye robot vacuum")
if DYSON_360_EYE_DEVICES not in hass.data:
hass.data[DYSON_360_EYE_DEVICES] = []
# Get Dyson Devices from parent component
for device in [d for d in hass.data[DYSON_DEVICES] if
isinstance(d, Dyson360Eye)]:
dyson_entity = Dyson360EyeDevice(device)
hass.data[DYSON_360_EYE_DEVICES].append(dyson_entity)
add_entities(hass.data[DYSON_360_EYE_DEVICES])
return True
class Dyson360EyeDevice(VacuumDevice):
"""Dyson 360 Eye robot vacuum device."""
def __init__(self, device):
"""Dyson 360 Eye robot vacuum device."""
_LOGGER.debug("Creating device %s", device.name)
self._device = device
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.async_add_job(
self._device.add_message_listener, self.on_message)
def on_message(self, message):
"""Handle a new messages that was received from the vacuum."""
_LOGGER.debug("Message received for %s device: %s", self.name, message)
self.schedule_update_ha_state()
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
@property
def name(self):
"""Return the name of the device."""
return self._device.name
@property
def status(self):
"""Return the status of the vacuum cleaner."""
from libpurecoollink.const import Dyson360EyeMode
dyson_labels = {
Dyson360EyeMode.INACTIVE_CHARGING: "Stopped - Charging",
Dyson360EyeMode.INACTIVE_CHARGED: "Stopped - Charged",
Dyson360EyeMode.FULL_CLEAN_PAUSED: "Paused",
Dyson360EyeMode.FULL_CLEAN_RUNNING: "Cleaning",
Dyson360EyeMode.FULL_CLEAN_ABORTED: "Returning home",
Dyson360EyeMode.FULL_CLEAN_INITIATED: "Start cleaning",
Dyson360EyeMode.FAULT_USER_RECOVERABLE: "Error - device blocked",
Dyson360EyeMode.FAULT_REPLACE_ON_DOCK:
"Error - Replace device on dock",
Dyson360EyeMode.FULL_CLEAN_FINISHED: "Finished",
Dyson360EyeMode.FULL_CLEAN_NEEDS_CHARGE: "Need charging"
}
return dyson_labels.get(
self._device.state.state, self._device.state.state)
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
return self._device.state.battery_level
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
from libpurecoollink.const import PowerMode
speed_labels = {
PowerMode.MAX: "Max",
PowerMode.QUIET: "Quiet"
}
return speed_labels[self._device.state.power_mode]
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return ["Quiet", "Max"]
@property
def device_state_attributes(self):
"""Return the specific state attributes of this vacuum cleaner."""
return {
ATTR_POSITION: str(self._device.state.position)
}
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
from libpurecoollink.const import Dyson360EyeMode
return self._device.state.state in [
Dyson360EyeMode.FULL_CLEAN_INITIATED,
Dyson360EyeMode.FULL_CLEAN_ABORTED,
Dyson360EyeMode.FULL_CLEAN_RUNNING
]
@property
def available(self) -> bool:
"""Return True if entity is available."""
return True
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_DYSON
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
from libpurecoollink.const import Dyson360EyeMode
charging = self._device.state.state in [
Dyson360EyeMode.INACTIVE_CHARGING]
return icon_for_battery_level(
battery_level=self.battery_level, charging=charging)
def turn_on(self, **kwargs):
"""Turn the vacuum on."""
from libpurecoollink.const import Dyson360EyeMode
_LOGGER.debug("Turn on device %s", self.name)
if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]:
self._device.resume()
else:
self._device.start()
def turn_off(self, **kwargs):
"""Turn the vacuum off and return to home."""
_LOGGER.debug("Turn off device %s", self.name)
self._device.pause()
def stop(self, **kwargs):
"""Stop the vacuum cleaner."""
_LOGGER.debug("Stop device %s", self.name)
self._device.pause()
def set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
from libpurecoollink.const import PowerMode
_LOGGER.debug("Set fan speed %s on device %s", fan_speed, self.name)
power_modes = {
"Quiet": PowerMode.QUIET,
"Max": PowerMode.MAX
}
self._device.set_power_mode(power_modes[fan_speed])
def start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task."""
from libpurecoollink.const import Dyson360EyeMode
if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]:
_LOGGER.debug("Resume device %s", self.name)
self._device.resume()
elif self._device.state.state in [Dyson360EyeMode.INACTIVE_CHARGED,
Dyson360EyeMode.INACTIVE_CHARGING]:
_LOGGER.debug("Start device %s", self.name)
self._device.start()
else:
_LOGGER.debug("Pause device %s", self.name)
self._device.pause()
def return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
_LOGGER.debug("Return to base device %s", self.name)
self._device.abort()
| 34.655502 | 79 | 0.654701 | [
"Apache-2.0"
] | FlorianLudwig/home-assistant | homeassistant/components/dyson/vacuum.py | 7,243 | Python |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Unit tests for PyMVPA misc.plot"""
from mvpa2.testing import *
skip_if_no_external("pylab")
import pylab as pl
from matplotlib.figure import Figure
from mvpa2.misc.plot.base import plot_dataset_chunks
import numpy as np
from glob import glob
from mock import patch
from os.path import join as pjoin
data2d = np.random.randn(2, 4, 4)
data3d = np.random.randn(3, 4, 4)
data2d_3d = np.random.randn(2, 4, 4, 4)
data2d_4d = np.random.randn(2, 4, 4, 4, 2)
data2d_5d = np.random.randn(2, 4, 4, 4, 2, 3)
from mvpa2.testing.datasets import datasets
@sweepargs(dsp=list(datasets.items()))
def test_plot_dataset_chunks(dsp):
dsname, ds = dsp
if ds.targets.dtype.kind == "f":
return
# smoke test for now
if "chunks" not in ds.sa:
return # nothing to plot in this one
print(dsname)
plot_dataset_chunks(ds[:, :2]) # could only plot two
pl.close(pl.gcf())
if ds.nfeatures > 2:
assert_raises(ValueError, plot_dataset_chunks, ds)
| 29 | 78 | 0.612619 | [
"MIT"
] | mortonne/PyMVPA | mvpa2/tests/test_misc_plot.py | 1,363 | Python |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from collections import OrderedDict
import numpy as np
import warnings
import scipy.constants
import re
__author__ = "Sudarsan Surendralal"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "Sudarsan Surendralal"
__email__ = "[email protected]"
__status__ = "production"
__date__ = "Sep 1, 2017"
KBAR_TO_EVA = (
scipy.constants.physical_constants["joule-electron volt relationship"][0] / 1e22
)
class Outcar(object):
"""
This module is used to parse VASP OUTCAR files.
Attributes:
parse_dict (dict): A dictionary with all the useful quantities parsed from an OUTCAR file after from_file() is
executed
"""
def __init__(self):
self.parse_dict = dict()
def from_file(self, filename="OUTCAR"):
"""
Parse and store relevant quantities from the OUTCAR file into parse_dict.
Args:
filename (str): Filename of the OUTCAR file to parse
"""
with open(filename, "r") as f:
lines = f.readlines()
energies = self.get_total_energies(filename=filename, lines=lines)
energies_int = self.get_energy_without_entropy(filename=filename, lines=lines)
energies_zero = self.get_energy_sigma_0(filename=filename, lines=lines)
scf_energies = self.get_all_total_energies(filename=filename, lines=lines)
n_atoms = self.get_number_of_atoms(filename=filename, lines=lines)
forces = self.get_forces(filename=filename, lines=lines, n_atoms=n_atoms)
positions = self.get_positions(filename=filename, lines=lines, n_atoms=n_atoms)
cells = self.get_cells(filename=filename, lines=lines)
steps = self.get_steps(filename=filename, lines=lines)
temperatures = self.get_temperatures(filename=filename, lines=lines)
time = self.get_time(filename=filename, lines=lines)
fermi_level = self.get_fermi_level(filename=filename, lines=lines)
scf_moments = self.get_dipole_moments(filename=filename, lines=lines)
kin_energy_error = self.get_kinetic_energy_error(filename=filename, lines=lines)
stresses = self.get_stresses(filename=filename, si_unit=False, lines=lines)
n_elect = self.get_nelect(filename=filename, lines=lines)
e_fermi_list, vbm_list, cbm_list = self.get_band_properties(filename=filename, lines=lines)
elastic_constants = self.get_elastic_constants(filename=filename, lines=lines)
try:
irreducible_kpoints = self.get_irreducible_kpoints(
filename=filename, lines=lines
)
except ValueError:
print("irreducible kpoints not parsed !")
irreducible_kpoints = None
magnetization, final_magmom_lst = self.get_magnetization(
filename=filename, lines=lines
)
broyden_mixing = self.get_broyden_mixing_mesh(filename=filename, lines=lines)
self.parse_dict["energies"] = energies
self.parse_dict["energies_int"] = energies_int
self.parse_dict["energies_zero"] = energies_zero
self.parse_dict["scf_energies"] = scf_energies
self.parse_dict["forces"] = forces
self.parse_dict["positions"] = positions
self.parse_dict["cells"] = cells
self.parse_dict["steps"] = steps
self.parse_dict["temperatures"] = temperatures
self.parse_dict["time"] = time
self.parse_dict["fermi_level"] = fermi_level
self.parse_dict["scf_dipole_moments"] = scf_moments
self.parse_dict["kin_energy_error"] = kin_energy_error
self.parse_dict["stresses"] = stresses
self.parse_dict["irreducible_kpoints"] = irreducible_kpoints
self.parse_dict["magnetization"] = magnetization
self.parse_dict["final_magmoms"] = final_magmom_lst
self.parse_dict["broyden_mixing"] = broyden_mixing
self.parse_dict["n_elect"] = n_elect
self.parse_dict["e_fermi_list"] = e_fermi_list
self.parse_dict["vbm_list"] = vbm_list
self.parse_dict["cbm_list"] = cbm_list
self.parse_dict["elastic_constants"] = elastic_constants
try:
self.parse_dict["pressures"] = (
np.average(stresses[:, 0:3], axis=1) * KBAR_TO_EVA
)
except IndexError:
self.parse_dict["pressures"] = np.zeros(len(steps))
def to_hdf(self, hdf, group_name="outcar"):
"""
Store output in an HDF5 file
Args:
hdf (pyiron_base.generic.hdfio.FileHDFio): HDF5 group or file
group_name (str): Name of the HDF5 group
"""
with hdf.open(group_name) as hdf5_output:
for key in self.parse_dict.keys():
hdf5_output[key] = self.parse_dict[key]
def to_hdf_minimal(self, hdf, group_name="outcar"):
"""
Store minimal output in an HDF5 file (output unique to OUTCAR)
Args:
hdf (pyiron_base.generic.hdfio.FileHDFio): HDF5 group or file
group_name (str): Name of the HDF5 group
"""
unique_quantities = [
"kin_energy_error",
"broyden_mixing",
"stresses",
"irreducible_kpoints",
]
with hdf.open(group_name) as hdf5_output:
for key in self.parse_dict.keys():
if key in unique_quantities:
hdf5_output[key] = self.parse_dict[key]
def from_hdf(self, hdf, group_name="outcar"):
"""
Load output from an HDF5 file
Args:
hdf (pyiron_base.generic.hdfio.FileHDFio): HDF5 group or file
group_name (str): Name of the HDF5 group
"""
with hdf.open(group_name) as hdf5_output:
for key in hdf5_output.list_nodes():
self.parse_dict[key] = hdf5_output[key]
def get_positions_and_forces(self, filename="OUTCAR", lines=None, n_atoms=None):
"""
Gets the forces and positions for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
n_atoms (int/None): number of ions in OUTCAR
Returns:
[positions, forces] (sequence)
numpy.ndarray: A Nx3xM array of positions in $\AA$
numpy.ndarray: A Nx3xM array of forces in $eV / \AA$
where N is the number of atoms and M is the number of time steps
"""
if n_atoms is None:
n_atoms = self.get_number_of_atoms(filename=filename, lines=lines)
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="TOTAL-FORCE (eV/Angst)"
)
return self._get_positions_and_forces_parser(
lines=lines,
trigger_indices=trigger_indices,
n_atoms=n_atoms,
pos_flag=True,
force_flag=True,
)
def get_positions(self, filename="OUTCAR", lines=None, n_atoms=None):
"""
Gets the positions for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
n_atoms (int/None): number of ions in OUTCAR
Returns:
numpy.ndarray: A Nx3xM array of positions in $\AA$
where N is the number of atoms and M is the number of time steps
"""
if n_atoms is None:
n_atoms = self.get_number_of_atoms(filename=filename, lines=lines)
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="TOTAL-FORCE (eV/Angst)"
)
return self._get_positions_and_forces_parser(
lines=lines,
trigger_indices=trigger_indices,
n_atoms=n_atoms,
pos_flag=True,
force_flag=False,
)
def get_forces(self, filename="OUTCAR", lines=None, n_atoms=None):
"""
Gets the forces for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
n_atoms (int/None): number of ions in OUTCAR
Returns:
numpy.ndarray: A Nx3xM array of forces in $eV / \AA$
where N is the number of atoms and M is the number of time steps
"""
if n_atoms is None:
n_atoms = self.get_number_of_atoms(filename=filename, lines=lines)
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="TOTAL-FORCE (eV/Angst)"
)
return self._get_positions_and_forces_parser(
lines=lines,
trigger_indices=trigger_indices,
n_atoms=n_atoms,
pos_flag=False,
force_flag=True,
)
def get_cells(self, filename="OUTCAR", lines=None):
"""
Gets the cell size and shape for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: A 3x3xM array of the cell shape in $\AA$
where M is the number of time steps
"""
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="VOLUME and BASIS-vectors are now :"
)
return self._get_cells_praser(lines=lines, trigger_indices=trigger_indices)
@staticmethod
def get_stresses(filename="OUTCAR", lines=None, si_unit=True):
"""
Args:
filename (str): Input filename
lines (list/None): lines read from the file
si_unit (bool): True SI units are used
Returns:
numpy.ndarray: An array of stress values
"""
trigger_indices, lines = _get_trigger(
lines=lines,
filename=filename,
trigger="FORCE on cell =-STRESS in cart. coord. units (eV):",
)
pullay_stress_lst = []
for j in trigger_indices:
try:
if si_unit:
pullay_stress_lst.append(
[float(l) for l in lines[j + 13].split()[1:7]]
)
else:
pullay_stress_lst.append(
[float(l) for l in lines[j + 14].split()[2:8]]
)
except ValueError:
if si_unit:
pullay_stress_lst.append([float("NaN")] * 6)
else:
pullay_stress_lst.append([float("NaN")] * 6)
return np.array(pullay_stress_lst)
@staticmethod
def get_irreducible_kpoints(
filename="OUTCAR", reciprocal=True, weight=True, planewaves=True, lines=None
):
"""
Function to extract the irreducible kpoints from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
reciprocal (bool): Get either the reciprocal or the cartesian coordinates
weight (bool): Get the weight assigned to the irreducible kpoints
planewaves (bool): Get the planewaves assigned to the irreducible kpoints
lines (list/None): lines read from the file
Returns:
numpy.ndarray: An array of k-points
"""
kpoint_lst = []
weight_lst = []
planewaves_lst = []
trigger_number_str = "Subroutine IBZKPT returns following result:"
trigger_plane_waves_str = "k-point 1 :"
trigger_number = 0
trigger_plane_waves = 0
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if trigger_number_str in line:
trigger_number = int(i)
elif planewaves:
if trigger_plane_waves_str in line:
trigger_plane_waves = int(i)
number_irr_kpoints = int(lines[trigger_number + 3].split()[1])
if reciprocal:
trigger_start = trigger_number + 7
else:
trigger_start = trigger_number + 10 + number_irr_kpoints
for line in lines[trigger_start : trigger_start + number_irr_kpoints]:
line = line.strip()
line = _clean_line(line)
kpoint_lst.append([float(l) for l in line.split()[0:3]])
if weight:
weight_lst.append(float(line.split()[3]))
if planewaves and trigger_plane_waves != 0:
for line in lines[
trigger_plane_waves : trigger_plane_waves + number_irr_kpoints
]:
line = line.strip()
line = _clean_line(line)
planewaves_lst.append(float(line.split()[-1]))
if weight and planewaves:
return np.array(kpoint_lst), np.array(weight_lst), np.array(planewaves_lst)
elif weight:
return np.array(kpoint_lst), np.array(weight_lst)
elif planewaves:
return np.array(kpoint_lst), np.array(planewaves_lst)
else:
return np.array(kpoint_lst)
@staticmethod
def get_total_energies(filename="OUTCAR", lines=None):
"""
Gets the total energy for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: A 1xM array of the total energies in $eV$
where M is the number of time steps
"""
def get_total_energies_from_line(line):
return float(_clean_line(line.strip()).split()[-2])
trigger_indices, lines = _get_trigger(
lines=lines,
filename=filename,
trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)",
)
return np.array(
[get_total_energies_from_line(lines[j + 2]) for j in trigger_indices]
)
@staticmethod
def get_energy_without_entropy(filename="OUTCAR", lines=None):
"""
Gets the total energy for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: A 1xM array of the total energies in $eV$
where M is the number of time steps
"""
def get_energy_without_entropy_from_line(line):
return float(_clean_line(line.strip()).split()[3])
trigger_indices, lines = _get_trigger(
lines=lines,
filename=filename,
trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)",
)
return np.array(
[
get_energy_without_entropy_from_line(lines[j + 4])
for j in trigger_indices
]
)
@staticmethod
def get_energy_sigma_0(filename="OUTCAR", lines=None):
"""
Gets the total energy for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: A 1xM array of the total energies in $eV$
where M is the number of time steps
"""
def get_energy_sigma_0_from_line(line):
return float(_clean_line(line.strip()).split()[-1])
trigger_indices, lines = _get_trigger(
lines=lines,
filename=filename,
trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)",
)
return np.array(
[get_energy_sigma_0_from_line(lines[j + 4]) for j in trigger_indices]
)
@staticmethod
def get_all_total_energies(filename="OUTCAR", lines=None):
"""
Gets the energy at every electronic step
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
list: A list of energie for every electronic step at every ionic step
"""
ionic_trigger = "FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)"
electronic_trigger = "free energy TOTEN ="
scf_energies = list()
lines = _get_lines_from_file(filename=filename, lines=lines)
istep_energies = list()
for i, line in enumerate(lines):
line = line.strip()
if ionic_trigger in line:
scf_energies.append(np.array(istep_energies))
istep_energies = list()
if electronic_trigger in line:
line = _clean_line(line)
ene = float(line.split()[-2])
istep_energies.append(ene)
return scf_energies
@staticmethod
def get_magnetization(filename="OUTCAR", lines=None):
"""
Gets the magnetization
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
list: A list with the mgnetization values
"""
ionic_trigger = "FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)"
electronic_trigger = "eigenvalue-minimisations"
nion_trigger = "NIONS ="
mag_lst = list()
local_spin_trigger = False
n_atoms = None
mag_dict = dict()
mag_dict["x"] = list()
mag_dict["y"] = list()
mag_dict["z"] = list()
lines = _get_lines_from_file(filename=filename, lines=lines)
istep_energies = list()
final_magmom_lst = list()
for i, line in enumerate(lines):
line = line.strip()
if ionic_trigger in line:
mag_lst.append(np.array(istep_energies))
istep_energies = list()
if "Atomic Wigner-Seitz radii" in line:
local_spin_trigger = True
if electronic_trigger in line:
try:
line = lines[i + 2].split("magnetization")[-1]
if line != " \n":
spin_str_lst = line.split()
spin_str_len = len(spin_str_lst)
if spin_str_len == 1:
ene = float(line)
elif spin_str_len == 3:
ene = [
float(spin_str_lst[0]),
float(spin_str_lst[1]),
float(spin_str_lst[2]),
]
else:
warnings.warn("Unrecognized spin configuration.")
return mag_lst, final_magmom_lst
istep_energies.append(ene)
except ValueError:
warnings.warn("Something went wrong in parsing the magnetization")
if n_atoms is None:
if nion_trigger in line:
n_atoms = int(line.split(nion_trigger)[-1])
if local_spin_trigger:
try:
for ind_dir, direc in enumerate(["x", "y", "z"]):
if "magnetization ({})".format(direc) in line:
mag_dict[direc].append(
[
float(lines[i + 4 + atom_index].split()[-1])
for atom_index in range(n_atoms)
]
)
except ValueError:
warnings.warn(
"Something went wrong in parsing the magnetic moments"
)
if len(mag_dict["x"]) > 0:
if len(mag_dict["y"]) == 0:
final_mag = np.array(mag_dict["x"])
else:
n_ionic_steps = np.array(mag_dict["x"]).shape[0]
final_mag = np.abs(np.zeros((n_ionic_steps, n_atoms, 3)))
final_mag[:, :, 0] = np.array(mag_dict["x"])
final_mag[:, :, 1] = np.array(mag_dict["y"])
final_mag[:, :, 2] = np.array(mag_dict["z"])
final_magmom_lst = final_mag.tolist()
return mag_lst, final_magmom_lst
@staticmethod
def get_broyden_mixing_mesh(filename="OUTCAR", lines=None):
"""
Gets the Broyden mixing mesh size
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
int: Mesh size
"""
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="gives a total of "
)
if len(trigger_indices) > 0:
line_ngx = lines[trigger_indices[0] - 2]
else:
warnings.warn(
"Unable to parse the Broyden mixing mesh. Returning 0 instead"
)
return 0
# Exclude all alphabets, and spaces. Then split based on '='
str_list = re.sub(
r"[a-zA-Z]", r"", line_ngx.replace(" ", "").replace("\n", "")
).split("=")
return np.prod([int(val) for val in str_list[1:]])
@staticmethod
def get_temperatures(filename="OUTCAR", lines=None):
"""
Gets the temperature at each ionic step (applicable for MD)
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: An array of temperatures in Kelvin
"""
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="kin. lattice EKIN_LAT= "
)
temperatures = []
if len(trigger_indices) > 0:
for j in trigger_indices:
line = lines[j].strip()
line = _clean_line(line)
temperatures.append(float(line.split()[-2]))
else:
temperatures = np.zeros(
len(
_get_trigger(
lines=lines,
trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)",
return_lines=False,
)
)
)
return np.array(temperatures)
@staticmethod
def get_steps(filename="OUTCAR", lines=None):
"""
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: Steps during the simulation
"""
nblock_trigger = "NBLOCK ="
trigger = "FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)"
trigger_indices = list()
read_nblock = True
n_block = 1
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if trigger in line:
trigger_indices.append(i)
if read_nblock is None:
if nblock_trigger in line:
line = _clean_line(line)
n_block = int(line.split(nblock_trigger)[-1])
return n_block * np.linspace(0, len(trigger_indices))
def get_time(self, filename="OUTCAR", lines=None):
"""
Time after each simulation step (for MD)
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: An array of time values in fs
"""
potim_trigger = "POTIM ="
read_potim = True
potim = 1.0
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if read_potim is None:
if potim_trigger in line:
line = _clean_line(line)
potim = float(line.split(potim_trigger)[0])
return potim * self.get_steps(filename)
@staticmethod
def get_kinetic_energy_error(filename="OUTCAR", lines=None):
"""
Get the kinetic energy error
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
float: The kinetic energy error in eV
"""
trigger = "kinetic energy error for atom="
e_kin_err = list()
n_species_list = list()
nion_trigger = "ions per type ="
tot_kin_error = 0.0
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if trigger in line:
e_kin_err.append(float(line.split()[5]))
if nion_trigger in line:
n_species_list = [
float(val) for val in line.split(nion_trigger)[-1].strip().split()
]
if len(n_species_list) > 0 and len(n_species_list) == len(e_kin_err):
tot_kin_error = np.sum(np.array(n_species_list) * np.array(e_kin_err))
return tot_kin_error
@staticmethod
def get_fermi_level(filename="OUTCAR", lines=None):
"""
Getting the Fermi-level (Kohn_Sham) from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
float: The Kohn-Sham Fermi level in eV
"""
trigger = "E-fermi :"
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger=trigger
)
if len(trigger_indices) != 0:
try:
return float(lines[trigger_indices[-1]].split(trigger)[-1].split()[0])
except ValueError:
return
else:
return
@staticmethod
def get_dipole_moments(filename="OUTCAR", lines=None):
"""
Get the electric dipole moment at every electronic step
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
list: A list of dipole moments in (eA) for each electronic step
"""
moment_trigger = "dipolmoment"
istep_trigger = "FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)"
dip_moms = list()
lines = _get_lines_from_file(filename=filename, lines=lines)
istep_mom = list()
for i, line in enumerate(lines):
line = line.strip()
if istep_trigger in line:
dip_moms.append(np.array(istep_mom))
istep_mom = list()
if moment_trigger in line:
line = _clean_line(line)
mom = np.array([float(val) for val in line.split()[1:4]])
istep_mom.append(mom)
return dip_moms
@staticmethod
def get_nelect(filename="OUTCAR", lines=None):
"""
Returns the number of electrons in the simulation
Args:
filename (str): OUTCAR filename
lines (list/None): lines read from the file
Returns:
float: The number of electrons in the simulation
"""
nelect_trigger = "NELECT"
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if nelect_trigger in line:
return float(line.split()[2])
@staticmethod
def get_number_of_atoms(filename="OUTCAR", lines=None):
"""
Returns the number of ions in the simulation
Args:
filename (str): OUTCAR filename
lines (list/None): lines read from the file
Returns:
int: The number of ions in the simulation
"""
ions_trigger = "NIONS ="
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger=ions_trigger
)
if len(trigger_indices) != 0:
return int(lines[trigger_indices[0]].split(ions_trigger)[-1])
else:
raise ValueError()
@staticmethod
def get_band_properties(filename="OUTCAR", lines=None):
fermi_trigger = "E-fermi"
fermi_trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger=fermi_trigger
)
fermi_level_list = list()
vbm_level_dict = OrderedDict()
cbm_level_dict = OrderedDict()
for ind in fermi_trigger_indices:
fermi_level_list.append(float(lines[ind].strip().split()[2]))
band_trigger = "band No. band energies occupation"
is_spin_polarized = False
for n, ind in enumerate(fermi_trigger_indices):
if n == len(fermi_trigger_indices) - 1:
trigger_indices, lines_new = _get_trigger(
lines=lines[ind:-1], filename=filename, trigger=band_trigger
)
else:
trigger_indices, lines_new = _get_trigger(
lines=lines[ind:fermi_trigger_indices[n+1]], filename=filename, trigger=band_trigger
)
band_data = list()
for ind in trigger_indices:
if "spin component" in lines_new[ind-3]:
is_spin_polarized = True
for line in lines_new[ind+1:]:
data = line.strip().split()
if len(data) != 3:
break
band_data.append([float(d) for d in data[1:]])
if is_spin_polarized:
band_data_per_spin = [np.array(band_data[0:int(len(band_data)/2)]).tolist(),
np.array(band_data[int(len(band_data)/2):]).tolist()]
else:
band_data_per_spin = [band_data]
for spin, band_data in enumerate(band_data_per_spin):
if spin in cbm_level_dict.keys():
pass
else:
cbm_level_dict[spin] = list()
if spin in vbm_level_dict.keys():
pass
else:
vbm_level_dict[spin] = list()
if len(band_data) > 0:
band_energy, band_occ = [np.array(band_data)[:, i] for i in range(2)]
args = np.argsort(band_energy)
band_occ = band_occ[args]
band_energy = band_energy[args]
cbm_bool = np.abs(band_occ) < 1e-6
if any(cbm_bool):
cbm_level_dict[spin].append(band_energy[np.abs(band_occ) < 1e-6][0])
else:
cbm_level_dict[spin].append(band_energy[-1])
# If spin channel is completely empty, setting vbm=cbm
if all(cbm_bool):
vbm_level_dict[spin].append(cbm_level_dict[spin][-1])
else:
vbm_level_dict[spin].append(band_energy[~cbm_bool][-1])
return np.array(fermi_level_list), np.array([val for val
in vbm_level_dict.values()]), np.array([val
for val in
cbm_level_dict.values()])
@staticmethod
def get_elastic_constants(filename="OUTCAR", lines=None):
lines = _get_lines_from_file(filename=filename, lines=lines)
trigger_indices = _get_trigger(lines=lines, filename=filename, trigger="TOTAL ELASTIC MODULI (kBar)", return_lines=False)
if len(trigger_indices) != 1:
return None
else:
start_index = trigger_indices[0] + 3
end_index = start_index + 6
elastic_constants = []
for line in lines[start_index:end_index]:
elastic_constants.append(line.split()[1:])
elastic_GPa = np.array(elastic_constants, dtype=float) / 10
return elastic_GPa
@staticmethod
def _get_positions_and_forces_parser(
lines, trigger_indices, n_atoms, pos_flag=True, force_flag=True
):
"""
Parser to get the forces and or positions for every ionic step from the OUTCAR file
Args:
lines (list): lines read from the file
trigger_indices (list): list of line indices where the trigger was found.
n_atoms (int): number of atoms
pos_flag (bool): parse position
force_flag (bool): parse forces
Returns:
[positions, forces] (sequence)
numpy.ndarray: A Nx3xM array of positions in $\AA$
numpy.ndarray: A Nx3xM array of forces in $eV / \AA$
where N is the number of atoms and M is the number of time steps
"""
positions = []
forces = []
for j in trigger_indices:
pos = []
force = []
for line in lines[j + 2 : j + n_atoms + 2]:
line = line.strip()
line = _clean_line(line)
if pos_flag:
pos.append([float(l) for l in line.split()[0:3]])
if force_flag:
force.append([float(l) for l in line.split()[3:]])
forces.append(force)
positions.append(pos)
if pos_flag and force_flag:
return np.array(positions), np.array(forces)
elif pos_flag:
return np.array(positions)
elif force_flag:
return np.array(forces)
@staticmethod
def _get_cells_praser(lines, trigger_indices):
"""
Parser to get the cell size and shape for every ionic step from the OUTCAR file
Args:
lines (list): lines read from the file
trigger_indices (list): list of line indices where the trigger was found.
n_atoms (int): number of atoms
Returns:
numpy.ndarray: A 3x3xM array of the cell shape in $\AA$
where M is the number of time steps
"""
cells = []
try:
for j in trigger_indices:
cell = []
for line in lines[j + 5: j + 8]:
line = line.strip()
line = _clean_line(line)
cell.append([float(l) for l in line.split()[0:3]])
cells.append(cell)
return np.array(cells)
except ValueError:
warnings.warn("Unable to parse the cells from the OUTCAR file")
return
def _clean_line(line):
return line.replace("-", " -")
def _get_trigger(trigger, filename=None, lines=None, return_lines=True):
"""
Find the lines where a specific trigger appears.
Args:
trigger (str): string pattern to search for
lines (list/None): list of lines
filename (str/None): file to read lines from
Returns:
list: indicies of the lines where the trigger string was found and list of lines
"""
lines = _get_lines_from_file(filename=filename, lines=lines)
trigger_indicies = [i for i, line in enumerate(lines) if trigger in line.strip()]
if return_lines:
return trigger_indicies, lines
else:
return trigger_indicies
def _get_lines_from_file(filename, lines=None):
"""
If lines is None read the lines from the file with the filename filename.
Args:
filename (str): file to read lines from
lines (list/ None): list of lines
Returns:
list: list of lines
"""
if lines is None:
with open(filename, "r") as f:
lines = f.readlines()
return lines
| 37.365779 | 129 | 0.56681 | [
"BSD-3-Clause"
] | pyiron/pyiron_atomistic | pyiron_atomistics/vasp/outcar.py | 36,471 | Python |
"""
All things CloudFormation Init.
"""
from paco.models.base import Named
from paco.models import schemas
from zope.interface import implementer
from zope.schema.fieldproperty import FieldProperty
import troposphere.cloudformation
def export_attrs_as_dicts(obj, attrs):
out = {}
for name in attrs:
value = getattr(obj, name, None)
if value:
out[name] = dict(value)
return out
@implementer(schemas.ICloudFormationParameters)
class CloudFormationParameters(Named, dict):
pass
@implementer(schemas.ICloudFormationConfigSets)
class CloudFormationConfigSets(Named, dict):
def export_as_troposphere(self):
# plain dict of list values
return dict(self)
@implementer(schemas.ICloudFormationConfigurations)
class CloudFormationConfigurations(Named, dict):
def export_as_troposphere(self):
out = {}
for key, value in self.items():
out[key] = troposphere.cloudformation.InitConfig(
**self[key].export_as_troposphere()
)
return out
@implementer(schemas.ICloudFormationInitVersionedPackageSet)
class CloudFormationInitVersionedPackageSet(dict):
pass
@implementer(schemas.ICloudFormationInitPathOrUrlPackageSet)
class CloudFormationInitPathOrUrlPackageSet(dict):
pass
@implementer(schemas.ICloudFormationInitPackages)
class CloudFormationInitPackages(Named):
apt = FieldProperty(schemas.ICloudFormationInitPackages['apt'])
msi = FieldProperty(schemas.ICloudFormationInitPackages['msi'])
python = FieldProperty(schemas.ICloudFormationInitPackages['python'])
rpm = FieldProperty(schemas.ICloudFormationInitPackages['rpm'])
rubygems = FieldProperty(schemas.ICloudFormationInitPackages['rubygems'])
yum = FieldProperty(schemas.ICloudFormationInitPackages['yum'])
def __init__(self, name, parent):
super().__init__(name, parent)
self.apt = CloudFormationInitVersionedPackageSet()
self.msi = CloudFormationInitPathOrUrlPackageSet()
self.python = CloudFormationInitVersionedPackageSet()
self.rpm = CloudFormationInitPathOrUrlPackageSet()
self.rubygems = CloudFormationInitVersionedPackageSet()
self.yum = CloudFormationInitVersionedPackageSet()
def export_as_troposphere(self):
return export_attrs_as_dicts(
self,
('apt', 'msi', 'python', 'rpm', 'rubygems', 'yum')
)
@implementer(schemas.ICloudFormationInitGroup)
class CloudFormationInitGroup(Named):
gid = FieldProperty(schemas.ICloudFormationInitGroup['gid'])
def export_as_troposphere(self):
out = {}
for name in ('gid'):
value = getattr(self, name, None)
if value != None:
out[name] = value
return out
@implementer(schemas.ICloudFormationInitGroups)
class CloudFormationInitGroups(Named, dict):
def export_as_troposphere(self):
out = {}
for key, value in self.items():
out[key] = self[key].export_as_troposphere()
return out
@implementer(schemas.ICloudFormationInitUser)
class CloudFormationInitUser(Named):
groups = FieldProperty(schemas.ICloudFormationInitUser['groups'])
uid = FieldProperty(schemas.ICloudFormationInitUser['uid'])
home_dir = FieldProperty(schemas.ICloudFormationInitUser['home_dir'])
def __init__(self, name, parent):
super().__init__(name, parent)
self.groups = []
def export_as_troposphere(self):
out = {}
for name in ('groups', 'uid', 'home_dir'):
value = getattr(self, name, None)
if name == 'home_dir':
name = 'homeDir'
if name == 'uid':
value = str(value)
if value != None:
out[name] = value
return out
@implementer(schemas.ICloudFormationInitUsers)
class CloudFormationInitUsers(Named, dict):
def export_as_troposphere(self):
out = {}
for key, value in self.items():
out[key] = self[key].export_as_troposphere()
return out
@implementer(schemas.ICloudFormationInitSources)
class CloudFormationInitSources(Named, dict):
def export_as_troposphere(self):
out = {}
for key, value in self.items():
out[key] = value
return out
@implementer(schemas.ICloudFormationInitFiles)
class CloudFormationInitFiles(Named, dict):
def export_as_troposphere(self):
out = {}
for key, value in self.items():
out[key] = self[key].export_as_troposphere()
return out
@implementer(schemas.ICloudFormationInitFile)
class CloudFormationInitFile(Named):
content_cfn_file = FieldProperty(schemas.ICloudFormationInitFile['content_cfn_file'])
content_file = FieldProperty(schemas.ICloudFormationInitFile['content_file'])
source = FieldProperty(schemas.ICloudFormationInitFile['source'])
encoding = FieldProperty(schemas.ICloudFormationInitFile['encoding'])
group = FieldProperty(schemas.ICloudFormationInitFile['group'])
owner = FieldProperty(schemas.ICloudFormationInitFile['owner'])
mode = FieldProperty(schemas.ICloudFormationInitFile['mode'])
authentication = FieldProperty(schemas.ICloudFormationInitFile['authentication'])
context = FieldProperty(schemas.ICloudFormationInitFile['context'])
_content = None
@property
def content(self):
"Return a string or a Troposphere CFN Function object"
if self.content_file:
return self.content_file
elif self.content_cfn_file:
return self.content_cfn_file
return self._content
@content.setter
def content(self, value):
self._content = value
def export_as_troposphere(self):
out = {}
for name in ('content', 'source', 'encoding', 'group', 'owner', 'mode', 'authentication', 'context'):
value = getattr(self, name, None)
if value != None:
out[name] = value
return out
@implementer(schemas.ICloudFormationInitCommands)
class CloudFormationInitCommands(Named, dict):
def export_as_troposphere(self):
out = {}
for key, command_obj in self.items():
command_dict = {}
for name in ('command', 'env', 'cwd', 'test', 'ignore_errors'):
value = getattr(command_obj, name)
if name == 'ignore_errors':
name = 'ignoreErrors'
if value != None:
command_dict[name] = value
out[key] = command_dict
return out
@implementer(schemas.ICloudFormationInitCommand)
class CloudFormationInitCommand(Named):
command = FieldProperty(schemas.ICloudFormationInitCommand['command'])
env = FieldProperty(schemas.ICloudFormationInitCommand['env'])
cwd = FieldProperty(schemas.ICloudFormationInitCommand['cwd'])
test = FieldProperty(schemas.ICloudFormationInitCommand['test'])
ignore_errors = FieldProperty(schemas.ICloudFormationInitCommand['ignore_errors'])
def __init__(self, name, parent):
super().__init__(name, parent)
self.env = {}
@implementer(schemas.ICloudFormationInitService)
class CloudFormationInitService(Named, dict):
ensure_running = FieldProperty(schemas.ICloudFormationInitService['ensure_running'])
enabled = FieldProperty(schemas.ICloudFormationInitService['enabled'])
files = FieldProperty(schemas.ICloudFormationInitService['files'])
sources = FieldProperty(schemas.ICloudFormationInitService['sources'])
packages = FieldProperty(schemas.ICloudFormationInitService['packages'])
commands = FieldProperty(schemas.ICloudFormationInitService['commands'])
def __init__(self, name, parent):
super().__init__(name, parent)
self.files = []
self.packages = {}
self.commands = []
self.sources = []
@implementer(schemas.ICloudFormationInitServiceCollection)
class CloudFormationInitServiceCollection(Named, dict):
def export_as_troposphere(self):
out = {}
for key, service_obj in self.items():
service_dict = {}
for name in ('ensure_running', 'enabled', 'files', 'sources', 'packages', 'commands'):
value = getattr(service_obj, name)
if name == 'ensure_running':
name = 'ensureRunning'
if value != None:
service_dict[name] = value
out[key] = service_dict
return out
@implementer(schemas.ICloudFormationInitServices)
class CloudFormationInitServices(Named):
sysvinit = FieldProperty(schemas.ICloudFormationInitServices['sysvinit'])
windows = FieldProperty(schemas.ICloudFormationInitServices['windows'])
def __init__(self, name, parent):
super().__init__(name, parent)
self.sysvinit = CloudFormationInitServiceCollection('sysvinit', self)
self.windows = CloudFormationInitServiceCollection('windows', self)
def export_as_troposphere(self):
out = {}
if self.sysvinit:
out['sysvinit'] = self.sysvinit.export_as_troposphere()
if self.windows:
out['windows'] = self.windows.export_as_troposphere()
return out
@implementer(schemas.ICloudFormationConfiguration)
class CloudFormationConfiguration(Named):
packages = FieldProperty(schemas.ICloudFormationConfiguration['packages'])
groups = FieldProperty(schemas.ICloudFormationConfiguration['groups'])
users = FieldProperty(schemas.ICloudFormationConfiguration['users'])
sources = FieldProperty(schemas.ICloudFormationConfiguration['sources'])
files = FieldProperty(schemas.ICloudFormationConfiguration['files'])
commands = FieldProperty(schemas.ICloudFormationConfiguration['commands'])
services = FieldProperty(schemas.ICloudFormationConfiguration['services'])
def __init__(self, name, parent):
super().__init__(name, parent)
self.packages = CloudFormationInitPackages('packages', self)
self.files = CloudFormationInitFiles('files', self)
self.commands = CloudFormationInitCommands('commands', self)
self.services = CloudFormationInitServices('services', self)
self.sources = CloudFormationInitSources('sources', self)
self.groups = CloudFormationInitGroups('groups', self)
self.users = CloudFormationInitUsers('users', self)
def export_as_troposphere(self):
out = {}
for name in ('packages', 'files', 'commands', 'services', 'sources', 'groups', 'users'):
obj = getattr(self, name, None)
if obj:
out[name] = obj.export_as_troposphere()
return out
@implementer(schemas.ICloudFormationInit)
class CloudFormationInit(Named):
config_sets = FieldProperty(schemas.ICloudFormationInit['config_sets'])
configurations = FieldProperty(schemas.ICloudFormationInit['configurations'])
parameters = FieldProperty(schemas.ICloudFormationInit['parameters'])
def __init__(self, name, parent):
super().__init__(name, parent)
self.parameters = {}
def export_as_troposphere(self):
init_resource = troposphere.cloudformation.Init(
troposphere.cloudformation.InitConfigSets(
**self.config_sets.export_as_troposphere()
),
**self.configurations.export_as_troposphere()
)
return init_resource | 37.749175 | 109 | 0.687358 | [
"MPL-2.0"
] | waterbear-cloud/aim.models | src/paco/models/cfn_init.py | 11,438 | Python |
import numpy as np
import os, time
import random
import tensorflow as tf
from lookalike_model.trainer.model_new import Model
import argparse
random.seed(1234)
# adding arguments for tfrecord directory and the checkpoint directory
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, help="input data tfrecords dir location")
parser.add_argument("--check_point_dir", type=str, help="Check Point dir location")
args, unknown = parser.parse_known_args()
if len(unknown) != 0:
print("unknown args:%s", unknown)
# tfrecord location and the check point directory location
tfrecord_location =args.data_dir + "/tf_records_lookalike_data_08july"
output = args.check_point_dir
def __data_parser(serialized_example):
features = tf.parse_single_example(serialized_example,
features={'keywords_list': tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),
'ucdoc': tf.FixedLenFeature([], tf.int64),
'keyword': tf.FixedLenFeature([], tf.int64),
'is_click': tf.FixedLenFeature([], tf.float32),
'sl': tf.FixedLenFeature([], tf.int64),
'lr': tf.FixedLenFeature([], tf.float32)
})
keywords_list = tf.cast(features['keywords_list'], tf.int32)
ucdoc = tf.cast(features['ucdoc'], tf.int32)
keyword = tf.cast(features['keyword'], tf.int32)
is_click = tf.cast(features['is_click'], tf.float32)
sl = tf.cast(features['sl'], tf.int32)
lr = tf.cast(features['lr'], tf.float32)
return ucdoc, keyword, keywords_list, is_click,sl,lr
names = []
for file in os.listdir(tfrecord_location):
if file.startswith("part"):
names.append(file)
file_paths = [os.path.join(tfrecord_location, name) for name in names]
dataset = tf.data.TFRecordDataset(file_paths)
shuffle_value = 2000
repeat_value = 10
batch_size = 1000
prefetch_buffer = 2000
dataset = dataset.map(__data_parser)
dataset = dataset.repeat(repeat_value).shuffle(shuffle_value).prefetch(buffer_size=prefetch_buffer).batch(batch_size)
iterator = dataset.make_one_shot_iterator()
tf_ucdoc, tf_keyword, tf_keywords_list, tf_is_click, tf_sl, tf_lr = iterator.get_next()
unique_keywords = 811
cate_list = np.array([x for x in range(unique_keywords)])
user_count = 1349500103
item_count, cate_count = unique_keywords, unique_keywords
predict_batch_size = 5000
predict_ads_num = 30
total_iterations = int((user_count * epoch)//batch_size)
print('total iterations = {}'.format(total_iterations))
max_epochs = 500
model = Model(user_count, item_count, cate_count, cate_list, predict_batch_size, predict_ads_num,tf_ucdoc,tf_keyword,tf_is_click,tf_keywords_list,tf_sl)
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
start_time = time.time()
count_epoch = 0
last_100_loss = []
print('shuffle = {}, epochs = {}, batch_size = {}, predict_batch_size = {}'.format(shuffle_value, epoch, batch_size, predict_batch_size))
for i in range(max_epochs*500):
loss, _,sl = sess.run([model.loss, model.train_op, tf_sl])
loss = round(loss, 2)
last_100_loss.append(loss)
if len(last_100_loss) == 101:
del last_100_loss[0]
if i%500==0:
print('Epoch {} DONE Iteration: {} Cost time: {} Model Loss: {} Average Loss: {}'.format(count_epoch, i, time.time()-start_time, loss,
round(sum(last_100_loss)/100, 2)))
model.save(sess, output)
count_epoch += 1
# print("i: ",i," loss: ",loss)
model.save(sess, output)
| 42.510638 | 152 | 0.654905 | [
"Apache-2.0"
] | Faezehvaseghi/incubator-bluemarlin | Model/lookalike-model/lookalike_model/trainer/lookalike_trainer_tfrecords.py | 3,996 | Python |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import numpy
import copy
from dcase_util.ui import FancyStringifier, FancyLogger
from dcase_util.containers import ContainerMixin
from dcase_util.data import DataBuffer
def get_keras_data_sequence_class():
# Use getter method to avoid importing Keras when importing dcase_util. This allows user to decide when import
# Keras, so that user can set random seeds before Keras import.
from keras.utils import Sequence
class KerasDataSequence(Sequence, ContainerMixin):
def __init__(self, item_list=None, batch_size=64,
buffer_size=None,
data_processing_chain=None, meta_processing_chain=None,
data_processing_chain_callback_on_epoch_end=None, meta_processing_chain_callback_on_epoch_end=None,
transformer_callbacks=None,
refresh_buffer_on_epoch=False,
data_format='channels_last',
target_format='single_target_per_sequence',
**kwargs):
"""Constructor
Parameters
----------
item_list : list or dict
Items in the data sequence. List containing multi-level dictionary with first level key
'data' and 'meta'. Second level should contain parameters for process method in the processing chain.
Default value None
batch_size : int
Batch size (item count).
Default value 64
buffer_size : int
Internal buffer size (item count). By setting this sufficiently high, data sequence generator can
possibly fit all sequence items into internal buffer and can fetch without loading from disk.
Set to None, if no internal buffer used.
Default value None
data_processing_chain : ProcessingChain
Data processing chain.
Default value None
meta_processing_chain : ProcessingChain
Meta processing chain.
Default value None
data_processing_chain_callback_on_epoch_end : list of dict
Can be used to call methods with parameters for processing chain at the end of epoch. This can be
used to control processing chain's internal status (e.g. roll the data).
Default value None
meta_processing_chain_callback_on_epoch_end : list of dict
Can be used to call methods with parameters for processing chain at the end of epoch. This can be
used to control processing chain's internal status (e.g. roll the data).
Default value None
transformer_callbacks : list of func
Transformer callbacks to jointly process data and meta. This can be used for local data modification and
data augmentation.
Default value None
refresh_buffer_on_epoch : bool
In case internal data buffer is used, force data and meta refresh at the end of each epoch. Use this if
data is modified/augmented differently for each epoch.
In case data_processing_chain_callback_on_epoch_end or meta_processing_chain_callback_on_epoch_end is
used, this parameter is automatically set to True.
Default value False
data_format : str
Keras like data format, controls where channel should be added.
Possible values ['channels_first', 'channels_last']
Default value 'channels_last'
target_format : str
Meta data interpretation in the relation to the data items.
Default value 'single_target_per_segment'
"""
# Run ContainerMixin init
ContainerMixin.__init__(self, **kwargs)
self._data_shape = None
self._data_axis = None
self.item_list = copy.copy(item_list)
self.batch_size = batch_size
self.buffer_size = buffer_size
self.data_refresh_on_epoch = refresh_buffer_on_epoch
if data_format is None:
data_format = 'channels_last'
self.data_format = data_format
if self.data_format not in ['channels_first', 'channels_last']:
message = '{name}: Unknown data_format [{data_format}].'.format(
name=self.__class__.__name__,
data_format=self.data_format
)
self.logger.exception(message)
raise NotImplementedError(message)
if target_format is None:
target_format = 'single_target_per_sequence'
self.target_format = target_format
if self.target_format not in ['same', 'single_target_per_sequence']:
message = '{name}: Unknown target_format [{target_format}].'.format(
name=self.__class__.__name__,
target_format=self.target_format
)
self.logger.exception(message)
raise NotImplementedError(message)
if data_processing_chain_callback_on_epoch_end is None:
data_processing_chain_callback_on_epoch_end = []
self.data_processing_chain_callback_on_epoch_end = data_processing_chain_callback_on_epoch_end
if self.data_processing_chain_callback_on_epoch_end:
self.data_refresh_on_epoch = True
if meta_processing_chain_callback_on_epoch_end is None:
meta_processing_chain_callback_on_epoch_end = []
self.meta_processing_chain_callback_on_epoch_end = meta_processing_chain_callback_on_epoch_end
if transformer_callbacks is None:
transformer_callbacks = []
self.transformer_callbacks = transformer_callbacks
# Processing chains
self.data_processing_chain = data_processing_chain
self.meta_processing_chain = meta_processing_chain
if self.buffer_size is not None:
# Initialize data buffer
self.data_buffer = DataBuffer(
size=self.buffer_size
)
else:
self.data_buffer = None
def __str__(self):
ui = FancyStringifier()
output = ''
output += ui.class_name(self.__class__.__name__) + '\n'
output += ui.data(
indent=2,
field='Batch size',
value=self.batch_size
) + '\n'
output += ui.data(
indent=2,
field='Epoch size',
value=len(self), unit='batches'
) + '\n'
shape = self.data_shape
axis = self.data_axis
output += ui.data(field='Data item shape', value=shape) + '\n'
output += ui.data(
indent=4,
field='Time',
value=shape[axis['time_axis']]
) + '\n'
output += ui.data(
indent=4,
field='Data',
value=shape[axis['data_axis']]
) + '\n'
if 'sequence_axis' in axis:
output += ui.data(
indent=4,
field='Sequence',
value=shape[axis['sequence_axis']]
) + '\n'
output += ui.data(
indent=4,
field='Axis',
value=axis
) + '\n'
if self.buffer_size is not None:
output += ui.line(field='Buffer') + '\n'
output += ui.data(
indent=4,
field='buffer_size',
value=self.buffer_size,
unit='items'
) + '\n'
output += ui.data(
indent=4,
field='buffer usage',
value=self.data_buffer.count,
unit='items'
) + '\n'
output += ui.data(
indent=4,
field='buffer usage',
value=(self.data_buffer.count / float(self.buffer_size)) * 100,
unit='%'
) + '\n'
return output
def __getitem__(self, index):
start_index = index * self.batch_size
stop_index = (index + 1) * self.batch_size
batch_buffer_data = []
batch_buffer_meta = []
for item_index in range(start_index, stop_index):
if item_index < len(self.item_list):
item = self.item_list[item_index]
# Load item data
data, meta = self.process_item(item=item)
if self.transformer_callbacks:
# Apply transformer callbacks
for callback in self.transformer_callbacks:
data, meta = callback(
data=data,
meta=meta
)
# Collect data
batch_buffer_data.append(data.data)
# Collect meta
if self.target_format == 'single_target_per_sequence':
# Collect single target per sequence
for i in range(0, data.shape[data.sequence_axis]):
batch_buffer_meta.append(meta.data[:, 0])
elif self.target_format == 'same':
# Collect single target per sequence
batch_buffer_meta.append(
numpy.repeat(
a=meta.data,
repeats=data.length,
axis=1
)
)
if len(data.shape) == 2:
# Prepare 2D data, stack along time_axis
if data.time_axis == 0:
batch_buffer_data = numpy.vstack(batch_buffer_data)
elif data.time_axis == 1:
batch_buffer_data = numpy.hstack(batch_buffer_data)
elif len(data.shape) == 3:
# Prepare 3D data, stack along sequence_axis
if data.sequence_axis == 0:
batch_buffer_data = numpy.vstack(batch_buffer_data)
elif data.sequence_axis == 1:
batch_buffer_data = numpy.hstack(batch_buffer_data)
elif data.sequence_axis == 2:
batch_buffer_data = numpy.dstack(batch_buffer_data)
# Add channel dimension to the data
if self.data_format == 'channels_first':
batch_buffer_data = numpy.expand_dims(
batch_buffer_data,
axis=0
)
elif self.data_format == 'channels_last':
batch_buffer_data = numpy.expand_dims(
batch_buffer_data,
axis=3
)
# Prepare meta
if self.target_format == 'single_target_per_sequence':
batch_buffer_meta = numpy.vstack(batch_buffer_meta)
elif self.target_format == 'same':
batch_buffer_meta = numpy.hstack(batch_buffer_meta).T
return batch_buffer_data, batch_buffer_meta
def __len__(self):
num_batches = int(numpy.ceil(len(self.item_list) / float(self.batch_size)))
if num_batches > 0:
return num_batches
else:
return 1
@property
def data_shape(self):
if self._data_shape is None:
# Load first item and get data length
data = self.process_item(
item=self.item_list[0]
)[0]
self._data_shape = data.shape
self._data_axis = {
'time_axis': data.time_axis,
'data_axis': data.data_axis
}
if hasattr(data,'sequence_axis'):
self._data_axis['sequence_axis']= data.sequence_axis
return self._data_shape
@property
def data_axis(self):
if self._data_axis is None:
# Load first item and get data length
data = self.process_item(
item=self.item_list[0]
)[0]
self._data_shape = data.shape
self._data_axis = {
'time_axis': data.time_axis,
'data_axis': data.data_axis
}
if hasattr(data, 'sequence_axis'):
self._data_axis['sequence_axis'] = data.sequence_axis
return self._data_axis
@property
def data_size(self):
shape = self.data_shape
axis = self.data_axis
size = {
'time': shape[axis['time_axis']],
'data': shape[axis['data_axis']],
}
if 'sequence_axis' in axis:
size['sequence'] = shape[axis['sequence_axis']]
return size
def process_item(self, item):
if self.data_buffer is not None:
# Fetch data and meta through internal buffer
if not self.data_buffer.key_exists(key=item):
data = self.data_processing_chain.process(**item['data'])
meta = self.meta_processing_chain.process(**item['meta'])
self.data_buffer.set(
key=item,
data=data,
meta=meta
)
else:
data, meta = self.data_buffer.get(key=item)
else:
# Fetch data and meta directly.
data = self.data_processing_chain.process(**item['data'])
meta = self.meta_processing_chain.process(**item['meta'])
return data, meta
def on_epoch_end(self):
if self.data_processing_chain_callback_on_epoch_end:
for callback_parameters in self.data_processing_chain_callback_on_epoch_end:
if 'method_name' in callback_parameters:
self.data_processing_chain.call_method(
method_name=callback_parameters['method_name'],
parameters=callback_parameters.get('parameters', {})
)
if self.meta_processing_chain_callback_on_epoch_end:
for callback_parameters in self.meta_processing_chain_callback_on_epoch_end:
if 'method_name' in callback_parameters:
self.data_processing_chain.call_method(
method_name=callback_parameters['method_name'],
parameters=callback_parameters.get('parameters', {})
)
if self.data_buffer is not None and self.data_refresh_on_epoch:
# Force reload of data
self.data_buffer.clear()
return KerasDataSequence
def data_collector(item_list=None,
data_processing_chain=None, meta_processing_chain=None,
target_format='single_target_per_sequence',
channel_dimension='channels_last',
verbose=True,
print_indent=2
):
"""Data collector
Collects data and meta into matrices while processing them through processing chains.
Parameters
----------
item_list : list or dict
Items in the data sequence. List containing multi-level dictionary with first level key
'data' and 'meta'. Second level should contain parameters for process method in the processing chain.
Default value None
data_processing_chain : ProcessingChain
Data processing chain.
Default value None
meta_processing_chain : ProcessingChain
Meta processing chain.
Default value None
channel_dimension : str
Controls where channel dimension should be added. Similar to Keras data format parameter.
If None given, no channel dimension is added.
Possible values [None, 'channels_first', 'channels_last']
Default value None
target_format : str
Meta data interpretation in the relation to the data items.
Default value 'single_target_per_segment'
verbose : bool
Print information about the data
Default value True
print_indent : int
Default value 2
Returns
-------
numpy.ndarray
data
numpy.ndarray
meta
dict
data size information
"""
if item_list:
# Collect all data and meta
X = []
Y = []
for item in item_list:
data = data_processing_chain.process(**item['data'])
meta = meta_processing_chain.process(**item['meta'])
X.append(data.data)
# Collect meta
if target_format == 'single_target_per_sequence':
# Collect single target per sequence
for i in range(0, data.shape[data.sequence_axis]):
Y.append(meta.data[:, 0])
elif target_format == 'same':
# Collect single target per sequence
Y.append(
numpy.repeat(
a=meta.data,
repeats=data.length,
axis=1
).T
)
data_size = {}
if len(data.shape) == 2:
# Stack collected data and meta correct way
if data.time_axis == 0:
X = numpy.vstack(X)
Y = numpy.vstack(Y)
else:
X = numpy.hstack(X)
Y = numpy.hstack(Y)
# Get data item size
data_size = {
'data': X.shape[data.data_axis],
'time': X.shape[data.time_axis],
}
elif len(data.shape) == 3:
# Stack collected data and meta correct way
if data.sequence_axis == 0:
X = numpy.vstack(X)
Y = numpy.vstack(Y)
elif data.sequence_axis == 1:
X = numpy.hstack(X)
Y = numpy.hstack(Y)
elif data.sequence_axis == 2:
X = numpy.dstack(X)
Y = numpy.dstack(Y)
if channel_dimension:
# Add channel dimension to the data
if channel_dimension == 'channels_first':
X = numpy.expand_dims(X, axis=1)
elif channel_dimension == 'channels_last':
X = numpy.expand_dims(X, axis=3)
# Get data item size
data_size = {
'data': X.shape[data.data_axis],
'time': X.shape[data.time_axis],
'sequence': X.shape[data.sequence_axis],
}
if verbose:
data_shape = data.shape
data_axis = {
'time_axis': data.time_axis,
'data_axis': data.data_axis
}
if hasattr(data, 'sequence_axis'):
data_axis['sequence_axis'] = data.sequence_axis
meta_shape = meta.shape
meta_axis = {
'time_axis': meta.time_axis,
'data_axis': meta.data_axis
}
if hasattr(meta, 'sequence_axis'):
meta_axis['sequence_axis'] = meta.sequence_axis
logger = FancyLogger()
# Data information
logger.line('Data', indent=print_indent)
# Matrix
logger.data(
field='Matrix shape',
value=X.shape,
indent=print_indent + 2
)
# Item
logger.data(
field='Item shape',
value=data_shape,
indent=print_indent + 2
)
logger.data(
field='Time',
value=data_shape[data_axis['time_axis']],
indent=print_indent + 4
)
logger.data(
field='Data',
value=data_shape[data_axis['data_axis']],
indent=print_indent + 4
)
if 'sequence_axis' in data_axis:
logger.data(
field='Sequence',
value=data_shape[data_axis['sequence_axis']],
indent=print_indent + 4
)
# Meta information
logger.line('Meta', indent=print_indent)
# Matrix
logger.data(
field='Matrix shape',
value=Y.shape,
indent=print_indent + 2
)
# Item
logger.data(
field='Item shape',
value=meta_shape,
indent=print_indent + 2
)
logger.data(
field='Time',
value=meta_shape[meta_axis['time_axis']],
indent=print_indent + 4
)
logger.data(
field='Data',
value=meta_shape[meta_axis['data_axis']],
indent=print_indent + 4
)
if 'sequence_axis' in meta_axis:
logger.data(
field='Sequence',
value=meta_shape[meta_axis['sequence_axis']],
indent=print_indent + 4
)
return X, Y, data_size
| 34.787167 | 120 | 0.519951 | [
"MIT"
] | AlexBruBuxo/TFG--ASC-Deep-Learning | venv/Lib/site-packages/dcase_util/keras/data.py | 22,229 | Python |
class Action():
()
| 7.666667 | 15 | 0.478261 | [
"MIT"
] | nickswalker/counterpoint-reinforcement-learning | rl/action.py | 23 | Python |
import dataclasses
import os
from typing import List
import hydra
@dataclasses.dataclass
class ModelConfig:
"""Configuration for the model.
Note that `block_sizes` must be specified using the `dataclasses.field`
function, as you are not allowed to supply default values for mutable fields.
Instead, the default value is supplied through a default factory function which
creates a new list every time.
"""
architecture: str = 'lenet'
hidden_size: int = 20
block_sizes: List[int] = dataclasses.field(default_factory=lambda: [10, 10, 10])
@dataclasses.dataclass
class TrainingConfig:
model: ModelConfig = ModelConfig()
num_epochs: int = 10
data_path: str = 'data.npy'
@hydra.main(config_path=None, config_name='config')
def main(config: TrainingConfig):
print(f'Got configuration: {config}')
# Note here: when loading data, should convert to absolute path
data_path = hydra.utils.to_absolute_path(config.data_path)
print(f'Loading data from {data_path}')
# Note here: saving to relative path is set to output folder
result_path = os.path.abspath('result.txt')
print(f'Saving results to {result_path}')
if __name__ == '__main__':
from hydra.core.config_store import ConfigStore
cs = ConfigStore()
cs.store('config', node=TrainingConfig)
main()
| 28.595745 | 84 | 0.72247 | [
"MIT"
] | wendazhou/cds-bootcamp | lecture3/bootcamp3/script.py | 1,344 | Python |
import numpy as np
import h5py as py
import matplotlib.pyplot as plt
import sys
hdf5_file = py.File("..\\Build\\TestsWithGL\\t2d_mpm_chm_t_bar_conference_restart.hdf5", "r")
frame_id = 0
th_grp = hdf5_file['TimeHistory']['penetration']
pcl_dset = th_grp['frame_%d' % frame_id]['ParticleData']
pcl_num = pcl_dset.attrs['pcl_num']
print(pcl_num)
pcl_stress = np.zeros([pcl_num, 4])
p_min_id = 0
p_min = sys.float_info.min
p_max_id = 0
p_max = -sys.float_info.max
for pcl_id in range(pcl_num):
pcl_data = pcl_dset[pcl_id]
pcl_stress[pcl_id][0] = pcl_data['s11']
pcl_stress[pcl_id][1] = pcl_data['s22']
pcl_stress[pcl_id][2] = pcl_data['s12']
pcl_stress[pcl_id][3] = pcl_data['p']
#p = pcl_stress[pcl_id][3]
p = (pcl_stress[pcl_id][0] + pcl_stress[pcl_id][1] + pcl_stress[pcl_id][2]) / 3.0
if (p < p_min):
p_min = p
p_min_id = pcl_id
if (p > p_max):
p_max = p
p_max_id = pcl_id
print("p min: %f pcl %d\np max: %f pcl %d" % (p_min, p_min_id, p_max, p_max_id))
hdf5_file.close()
| 29.055556 | 93 | 0.664436 | [
"MIT"
] | MingAtUWA/SimpleMPM2 | PyUtilities/hdf5_stress_range.py | 1,046 | Python |
from flask import render_template
from app import app
from app.forms import LoginForm
# ...
@app.route('/login')
def login():
form = LoginForm()
return render_template('login.html', title='Sign In', form=form) | 21.9 | 68 | 0.712329 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | 1586-ItSolSchool/Pythonodavy | app/routes.py | 219 | Python |
from datetime import datetime, timedelta
import requests
from decouple import config
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from .models import Socio
class ModelTest(TestCase):
def setUp(self):
Socio(
user=User.objects.create_user(
username='00000000',
password='000000'
),
nome='João de Souza',
apelido='João',
whatsapp='(86) 9 9123-4567',
cpf='068.008.773-79',
rg='123456789',
data_nascimento='2000-01-01',
data_inicio=timezone.now(),
data_fim=timezone.now() + timedelta(days=40),
is_socio=True,
stripe_customer_id='cus_00000000',).save()
def test_notificar_email(self):
socio = Socio.objects.create(
user=User.objects.create_user(
username='12345678',
password='123456',
),
nome='Fulano',
stripe_customer_id='cus_123456789',
)
notificar = socio.notificar(metodo='email', mensagem='teste')
self.assertEqual(notificar, 'Enviando email...')
def test_datetime(self):
current_period_end = datetime(
2022, 6, 30, 23, 59, 59
)
if current_period_end - datetime.now() > timedelta(days=30):
if datetime.now().month < 7:
if current_period_end.month > 6:
current_period_end = datetime(
datetime.now().year, 6, 30, 23, 59, 59
)
def test_adicionar_socio_cheers(self):
socio: Socio = Socio.objects.get(user__username='00000000')
if socio.data_fim - timezone.now().date() > timedelta(days=30) and socio.is_socio:
url = 'https://cheersshop.com.br/socio/adicionar'
obj = {
"nome": socio.nome,
"email": socio.email,
"telefone": socio.whatsapp,
"matricula": socio.matricula,
"observacao": "",
"cpf": socio.cpf,
"data_fim_plano": socio.data_fim,
"vendedor": "1874"
}
response = requests.post(url, data=obj, headers={
'Authorization': f'Bearer {config("CHEERS_TOKEN")}'})
self.assertEqual(response.status_code, 200)
def test_adicionar_coupom_cheers(self):
socio: Socio = Socio.objects.get(user__username='00000000')
if socio.is_socio:
url = 'https://cheersshop.com.br/codigo'
obj = {
"nome": socio.cpf,
"uso": 1,
"ativo": True,
"desconto_reais": 70 if socio.is_atleta else 65,
"maximo_usuario": "1",
"quantidade": "1",
"usuario": 192061,
"vendedor": "1874",
}
response = requests.post(url, data=obj, headers={
'Authorization': f'Bearer {config("CHEERS_TOKEN")}'})
self.assertEqual(response.json()['status'], 'Success')
| 33.041667 | 90 | 0.533733 | [
"MIT"
] | leonunesbs/aaafuria-rebon-backend | core/tests.py | 3,174 | Python |
# Script to convert json into proprietary .mat files
# Licensed under Apache v2 (see LICENSE)
import sys
import os
import glob
import json
from scipy.io import savemat
def main(json_dir, out_dir):
""" Script to convert all .json files in json_dir into corresponding .mat
files in out_dir
.mat files have the same basename as the .json files
This script is meant for data files that contain data from
OpenSauce / VoiceSauce variables.
"""
# Find all .json files in json_dir
json_files = glob.glob(os.path.join(json_dir, '*.json'))
# Iterate through each .mat file
for json_file in json_files:
with open(json_file) as f:
json_dict = json.load(f)
# Write json dict to mat
# Check that output directory exists, if not create it
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
fn = os.path.join(out_dir, os.path.splitext(os.path.basename(json_file))[0]) + '.mat'
savemat(fn, json_dict)
print('Wrote data in {} to {}'.format(json_file, fn))
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
| 29.815789 | 93 | 0.661077 | [
"Apache-2.0"
] | CobiELF/opensauce-python | tools/convert_json_to_mat.py | 1,133 | Python |
"""Model module for images."""
from django.db import models
from django.contrib.auth.models import User
from imager_profile.models import ImagerProfile
# Create your models here.
class ImageBaseClass(models.Model):
"""Base class for Photo and Album classes."""
PRIVATE = 'PRVT'
SHARED = 'SHRD'
PUBLIC = 'PBLC'
PUBLISHED = ((PRIVATE, 'private'),
(SHARED, 'shared'),
(PUBLIC, 'public'))
title = models.CharField(max_length=180)
description = models.TextField(max_length=500, blank=True, null=True)
date_modified = models.DateField(auto_now=True)
date_published = models.DateField(blank=True, null=True)
published = models.CharField(choices=PUBLISHED, max_length=8)
class Meta:
"""Meta."""
abstract = True
class Photo(ImageBaseClass):
"""Photo model."""
user = models.ForeignKey(User, on_delete=models.CASCADE,
related_name='photo')
image = models.ImageField(upload_to='images')
date_uploaded = models.DateField(editable=False, auto_now_add=True)
def __str__(self):
"""Print function displays username."""
return self.title
class Album(ImageBaseClass):
"""Album model."""
user = models.ForeignKey(User, on_delete=models.CASCADE,
related_name='album')
cover = models.ImageField(upload_to='images')
date_created = models.DateField(editable=False, auto_now_add=True)
photos = models.ManyToManyField(Photo, related_name='albums', blank=True)
def __str__(self):
"""Print function displays username."""
return self.title
| 29.625 | 77 | 0.65642 | [
"MIT"
] | Loaye/django-imager-group | imagersite/imager_images/models.py | 1,659 | Python |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
| 45.082826 | 340 | 0.514184 | [
"MIT"
] | DoctorSlimm/ccxt | python/ccxt/async_support/bitvavo.py | 74,026 | Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error Reporting Handler."""
import sys
import traceback
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.error_reporting import util
from googlecloudsdk.api_lib.util import apis as core_apis
from googlecloudsdk.calliope import backend
from googlecloudsdk.command_lib import error_reporting_util
from googlecloudsdk.core import config
from googlecloudsdk.core import http
from googlecloudsdk.core import log
from googlecloudsdk.core import metrics
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
def _IsInstallationCorruption(err):
"""Determines if the error may be from installation corruption.
Args:
err: Exception err.
Returns:
bool, True if installation error, False otherwise
"""
return (isinstance(err, backend.CommandLoadFailure) and
isinstance(err.root_exception, ImportError))
def _PrintInstallationAction(err, err_string):
"""Prompts installation error action.
Args:
err: Exception err.
err_string: Exception err string.
"""
# This usually indicates installation corruption.
# We do want to suggest `gcloud components reinstall` here (ex. as opposed
# to the similar message in gcloud.py), because there's a good chance it'll
# work (rather than a manual reinstall).
# Don't suggest `gcloud feedback`, because this is probably an
# installation problem.
log.error(
('gcloud failed to load ({0}): {1}\n\n'
'This usually indicates corruption in your gcloud installation or '
'problems with your Python interpreter.\n\n'
'Please verify that the following is the path to a working Python 2.7 '
'executable:\n'
' {2}\n'
'If it is not, please set the CLOUDSDK_PYTHON environment variable to '
'point to a working Python 2.7 executable.\n\n'
'If you are still experiencing problems, please run the following '
'command to reinstall:\n'
' $ gcloud components reinstall\n\n'
'If that command fails, please reinstall the Cloud SDK using the '
'instructions here:\n'
' https://cloud.google.com/sdk/'
).format(err.command, err_string, sys.executable))
CRASH_SERVICE = 'gcloud'
ERROR_SERVICE = 'gcloud-user-error'
CRASH_PROJECT = 'cloud-sdk-errors'
CRASH_API_KEY = 'AIzaSyA45D7bA0Y1vyLmQ_Gl10G149M8jiwwK-s'
def _GetReportingClient():
"""Returns a client that uses an API key for Cloud SDK crash reports.
Returns:
An error reporting client that uses an API key for Cloud SDK crash reports.
"""
client_class = core_apis.GetClientClass(util.API_NAME, util.API_VERSION)
client_instance = client_class(get_credentials=False, http=http.Http())
client_instance.AddGlobalParam('key', CRASH_API_KEY)
return client_instance
def ReportError(err, is_crash):
"""Report the anonymous crash information to the Error Reporting service.
Args:
err: Exception, the error that caused the crash.
is_crash: bool, True if this is a crash, False if it is a user error.
"""
if properties.VALUES.core.disable_usage_reporting.GetBool():
return
stacktrace = traceback.format_exc(err)
stacktrace = error_reporting_util.RemovePrivateInformationFromTraceback(
stacktrace)
command = properties.VALUES.metrics.command_name.Get()
cid = metrics.GetCIDIfMetricsEnabled()
client = _GetReportingClient()
reporter = util.ErrorReporting(client)
try:
method_config = client.projects_events.GetMethodConfig('Report')
request = reporter.GenerateReportRequest(
error_message=stacktrace,
service=CRASH_SERVICE if is_crash else ERROR_SERVICE,
version=config.CLOUD_SDK_VERSION, project=CRASH_PROJECT,
request_url=command, user=cid)
http_request = client.projects_events.PrepareHttpRequest(
method_config, request)
metrics.CustomBeacon(http_request.url, http_request.http_method,
http_request.body, http_request.headers)
except apitools_exceptions.Error as e:
log.file_only_logger.error(
'Unable to report crash stacktrace:\n{0}'.format(
console_attr.EncodeForConsole(e)))
def HandleGcloudCrash(err):
"""Checks if installation error occurred, then proceeds with Error Reporting.
Args:
err: Exception err.
"""
err_string = console_attr.EncodeForConsole(err)
log.file_only_logger.exception('BEGIN CRASH STACKTRACE')
if _IsInstallationCorruption(err):
_PrintInstallationAction(err, err_string)
else:
log.error(u'gcloud crashed ({0}): {1}'.format(
getattr(err, 'error_name', type(err).__name__), err_string))
ReportError(err, is_crash=True)
log.err.Print('\nIf you would like to report this issue, please run the '
'following command:')
log.err.Print(' gcloud feedback')
log.err.Print('\nTo check gcloud for common problems, please run the '
'following command:')
log.err.Print(' gcloud info --run-diagnostics')
| 37.039735 | 79 | 0.734311 | [
"Apache-2.0"
] | bopopescu/searchparty | google-cloud-sdk/lib/googlecloudsdk/command_lib/crash_handling.py | 5,593 | Python |
import inspect
import textwrap
import pytest
from _pytest.compat import MODULE_NOT_FOUND_ERROR
from _pytest.doctest import _get_checker
from _pytest.doctest import _is_mocked
from _pytest.doctest import _patch_unwrap_mock_aware
from _pytest.doctest import DoctestItem
from _pytest.doctest import DoctestModule
from _pytest.doctest import DoctestTextfile
class TestDoctests:
def test_collect_testtextfile(self, testdir):
w = testdir.maketxtfile(whatever="")
checkfile = testdir.maketxtfile(
test_something="""
alskdjalsdk
>>> i = 5
>>> i-1
4
"""
)
for x in (testdir.tmpdir, checkfile):
# print "checking that %s returns custom items" % (x,)
items, reprec = testdir.inline_genitems(x)
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestTextfile)
# Empty file has no items.
items, reprec = testdir.inline_genitems(w)
assert len(items) == 0
def test_collect_module_empty(self, testdir):
path = testdir.makepyfile(whatever="#")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 0
def test_collect_module_single_modulelevel_doctest(self, testdir):
path = testdir.makepyfile(whatever='""">>> pass"""')
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
def test_collect_module_two_doctest_one_modulelevel(self, testdir):
path = testdir.makepyfile(
whatever="""
'>>> x = None'
def my_func():
">>> magic = 42 "
"""
)
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_collect_module_two_doctest_no_modulelevel(self, testdir):
path = testdir.makepyfile(
whatever="""
'# Empty'
def my_func():
">>> magic = 42 "
def unuseful():
'''
# This is a function
# >>> # it doesn't have any doctest
'''
def another():
'''
# This is another function
>>> import os # this one does have a doctest
'''
"""
)
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_simple_doctestfile(self, testdir):
p = testdir.maketxtfile(
test_doc="""
>>> x = 1
>>> x == 1
False
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(failed=1)
def test_new_pattern(self, testdir):
p = testdir.maketxtfile(
xdoc="""
>>> x = 1
>>> x == 1
False
"""
)
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1)
def test_multiple_patterns(self, testdir):
"""Test support for multiple --doctest-glob arguments (#1255).
"""
testdir.maketxtfile(
xdoc="""
>>> 1
1
"""
)
testdir.makefile(
".foo",
test="""
>>> 1
1
""",
)
testdir.maketxtfile(
test_normal="""
>>> 1
1
"""
)
expected = {"xdoc.txt", "test.foo", "test_normal.txt"}
assert {x.basename for x in testdir.tmpdir.listdir()} == expected
args = ["--doctest-glob=xdoc*.txt", "--doctest-glob=*.foo"]
result = testdir.runpytest(*args)
result.stdout.fnmatch_lines(["*test.foo *", "*xdoc.txt *", "*2 passed*"])
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*test_normal.txt *", "*1 passed*"])
@pytest.mark.parametrize(
" test_string, encoding",
[("foo", "ascii"), ("öäü", "latin1"), ("öäü", "utf-8")],
)
def test_encoding(self, testdir, test_string, encoding):
"""Test support for doctest_encoding ini option.
"""
testdir.makeini(
"""
[pytest]
doctest_encoding={}
""".format(
encoding
)
)
doctest = """
>>> "{}"
{}
""".format(
test_string, repr(test_string)
)
testdir._makefile(".txt", [doctest], {}, encoding=encoding)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_doctest_unexpected_exception(self, testdir):
testdir.maketxtfile(
"""
>>> i = 0
>>> 0 / i
2
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*unexpected_exception*",
"*>>> i = 0*",
"*>>> 0 / i*",
"*UNEXPECTED*ZeroDivision*",
]
)
def test_doctest_skip(self, testdir):
testdir.maketxtfile(
"""
>>> 1
1
>>> import pytest
>>> pytest.skip("")
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["*1 skipped*"])
def test_docstring_partial_context_around_error(self, testdir):
"""Test that we show some context before the actual line of a failing
doctest.
"""
testdir.makepyfile(
'''
def foo():
"""
text-line-1
text-line-2
text-line-3
text-line-4
text-line-5
text-line-6
text-line-7
text-line-8
text-line-9
text-line-10
text-line-11
>>> 1 + 1
3
text-line-after
"""
'''
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_partial_context_around_error*",
"005*text-line-3",
"006*text-line-4",
"013*text-line-11",
"014*>>> 1 + 1",
"Expected:",
" 3",
"Got:",
" 2",
]
)
# lines below should be trimmed out
result.stdout.no_fnmatch_line("*text-line-2*")
result.stdout.no_fnmatch_line("*text-line-after*")
def test_docstring_full_context_around_error(self, testdir):
"""Test that we show the whole context before the actual line of a failing
doctest, provided that the context is up to 10 lines long.
"""
testdir.makepyfile(
'''
def foo():
"""
text-line-1
text-line-2
>>> 1 + 1
3
"""
'''
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_full_context_around_error*",
"003*text-line-1",
"004*text-line-2",
"006*>>> 1 + 1",
"Expected:",
" 3",
"Got:",
" 2",
]
)
def test_doctest_linedata_missing(self, testdir):
testdir.tmpdir.join("hello.py").write(
textwrap.dedent(
"""\
class Fun(object):
@property
def test(self):
'''
>>> a = 1
>>> 1/0
'''
"""
)
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*hello*",
"*EXAMPLE LOCATION UNKNOWN, not showing all tests of that example*",
"*1/0*",
"*UNEXPECTED*ZeroDivision*",
"*1 failed*",
]
)
def test_doctest_unex_importerror_only_txt(self, testdir):
testdir.maketxtfile(
"""
>>> import asdalsdkjaslkdjasd
>>>
"""
)
result = testdir.runpytest()
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*>>> import asdals*",
"*UNEXPECTED*{e}*".format(e=MODULE_NOT_FOUND_ERROR),
"{e}: No module named *asdal*".format(e=MODULE_NOT_FOUND_ERROR),
]
)
def test_doctest_unex_importerror_with_module(self, testdir):
testdir.tmpdir.join("hello.py").write(
textwrap.dedent(
"""\
import asdalsdkjaslkdjasd
"""
)
)
testdir.maketxtfile(
"""
>>> import hello
>>>
"""
)
result = testdir.runpytest("--doctest-modules")
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*ERROR collecting hello.py*",
"*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR),
"*Interrupted: 1 error during collection*",
]
)
def test_doctestmodule(self, testdir):
p = testdir.makepyfile(
"""
'''
>>> x = 1
>>> x == 1
False
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1)
def test_doctestmodule_external_and_issue116(self, testdir):
p = testdir.mkpydir("hello")
p.join("__init__.py").write(
textwrap.dedent(
"""\
def somefunc():
'''
>>> i = 0
>>> i + 1
2
'''
"""
)
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(
[
"003 *>>> i = 0",
"004 *>>> i + 1",
"*Expected:",
"* 2",
"*Got:",
"* 1",
"*:4: DocTestFailure",
]
)
def test_txtfile_failing(self, testdir):
p = testdir.maketxtfile(
"""
>>> i = 0
>>> i + 1
2
"""
)
result = testdir.runpytest(p, "-s")
result.stdout.fnmatch_lines(
[
"001 >>> i = 0",
"002 >>> i + 1",
"Expected:",
" 2",
"Got:",
" 1",
"*test_txtfile_failing.txt:2: DocTestFailure",
]
)
def test_txtfile_with_fixtures(self, testdir):
p = testdir.maketxtfile(
"""
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
def test_txtfile_with_usefixtures_in_ini(self, testdir):
testdir.makeini(
"""
[pytest]
usefixtures = myfixture
"""
)
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def myfixture(monkeypatch):
monkeypatch.setenv("HELLO", "WORLD")
"""
)
p = testdir.maketxtfile(
"""
>>> import os
>>> os.environ["HELLO"]
'WORLD'
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
def test_doctestmodule_with_fixtures(self, testdir):
p = testdir.makepyfile(
"""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_doctestmodule_three_tests(self, testdir):
p = testdir.makepyfile(
"""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
def my_func():
'''
>>> magic = 42
>>> magic - 42
0
'''
def unuseful():
pass
def another():
'''
>>> import os
>>> os is os
True
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=3)
def test_doctestmodule_two_tests_one_fail(self, testdir):
p = testdir.makepyfile(
"""
class MyClass(object):
def bad_meth(self):
'''
>>> magic = 42
>>> magic
0
'''
def nice_meth(self):
'''
>>> magic = 42
>>> magic - 42
0
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=1)
def test_ignored_whitespace(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
p = testdir.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
p = testdir.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=0)
def test_ignored_whitespace_glob(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
p = testdir.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace_glob(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
p = testdir.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1, passed=0)
def test_contains_unicode(self, testdir):
"""Fix internal error with docstrings containing non-ascii characters.
"""
testdir.makepyfile(
'''\
def foo():
"""
>>> name = 'с' # not letter 'c' but instead Cyrillic 's'.
'anything'
"""
'''
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["Got nothing", "* 1 failed in*"])
def test_ignore_import_errors_on_doctest(self, testdir):
p = testdir.makepyfile(
"""
import asdf
def add_one(x):
'''
>>> add_one(1)
2
'''
return x + 1
"""
)
reprec = testdir.inline_run(
p, "--doctest-modules", "--doctest-ignore-import-errors"
)
reprec.assertoutcome(skipped=1, failed=1, passed=0)
def test_junit_report_for_doctest(self, testdir):
"""
#713: Fix --junit-xml option when used with --doctest-modules.
"""
p = testdir.makepyfile(
"""
def foo():
'''
>>> 1 + 1
3
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-modules", "--junit-xml=junit.xml")
reprec.assertoutcome(failed=1)
def test_unicode_doctest(self, testdir):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest contains non-ascii
characters.
"""
p = testdir.maketxtfile(
test_unicode_doctest="""
.. doctest::
>>> print(
... "Hi\\n\\nByé")
Hi
...
Byé
>>> 1/0 # Byé
1
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
["*UNEXPECTED EXCEPTION: ZeroDivisionError*", "*1 failed*"]
)
def test_unicode_doctest_module(self, testdir):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest docstring
contains non-ascii characters.
"""
p = testdir.makepyfile(
test_unicode_doctest_module="""
def fix_bad_unicode(text):
'''
>>> print(fix_bad_unicode('único'))
único
'''
return "único"
"""
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_print_unicode_value(self, testdir):
"""
Test case for issue 3583: Printing Unicode in doctest under Python 2.7
doesn't work
"""
p = testdir.maketxtfile(
test_print_unicode_value=r"""
Here is a doctest::
>>> print('\xE5\xE9\xEE\xF8\xFC')
åéîøü
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_reportinfo(self, testdir):
"""
Test case to make sure that DoctestItem.reportinfo() returns lineno.
"""
p = testdir.makepyfile(
test_reportinfo="""
def foo(x):
'''
>>> foo('a')
'b'
'''
return 'c'
"""
)
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
reportinfo = items[0].reportinfo()
assert reportinfo[1] == 1
def test_valid_setup_py(self, testdir):
"""
Test to make sure that pytest ignores valid setup.py files when ran
with --doctest-modules
"""
p = testdir.makepyfile(
setup="""
from setuptools import setup, find_packages
setup(name='sample',
version='0.0',
description='description',
packages=find_packages()
)
"""
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 0 items*"])
def test_invalid_setup_py(self, testdir):
"""
Test to make sure that pytest reads setup.py files that are not used
for python packages when ran with --doctest-modules
"""
p = testdir.makepyfile(
setup="""
def test_foo():
return 'bar'
"""
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 1 item*"])
class TestLiterals:
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_allow_unicode(self, testdir, config_mode):
"""Test that doctests which output unicode work in all python versions
tested by pytest when the ALLOW_UNICODE option is used (either in
the ini file or by an inline comment).
"""
if config_mode == "ini":
testdir.makeini(
"""
[pytest]
doctest_optionflags = ALLOW_UNICODE
"""
)
comment = ""
else:
comment = "#doctest: +ALLOW_UNICODE"
testdir.maketxtfile(
test_doc="""
>>> b'12'.decode('ascii') {comment}
'12'
""".format(
comment=comment
)
)
testdir.makepyfile(
foo="""
def foo():
'''
>>> b'12'.decode('ascii') {comment}
'12'
'''
""".format(
comment=comment
)
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_allow_bytes(self, testdir, config_mode):
"""Test that doctests which output bytes work in all python versions
tested by pytest when the ALLOW_BYTES option is used (either in
the ini file or by an inline comment)(#1287).
"""
if config_mode == "ini":
testdir.makeini(
"""
[pytest]
doctest_optionflags = ALLOW_BYTES
"""
)
comment = ""
else:
comment = "#doctest: +ALLOW_BYTES"
testdir.maketxtfile(
test_doc="""
>>> b'foo' {comment}
'foo'
""".format(
comment=comment
)
)
testdir.makepyfile(
foo="""
def foo():
'''
>>> b'foo' {comment}
'foo'
'''
""".format(
comment=comment
)
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
def test_unicode_string(self, testdir):
"""Test that doctests which output unicode fail in Python 2 when
the ALLOW_UNICODE option is not used. The same test should pass
in Python 3.
"""
testdir.maketxtfile(
test_doc="""
>>> b'12'.decode('ascii')
'12'
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_bytes_literal(self, testdir):
"""Test that doctests which output bytes fail in Python 3 when
the ALLOW_BYTES option is not used. (#1287).
"""
testdir.maketxtfile(
test_doc="""
>>> b'foo'
'foo'
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(failed=1)
def test_number_re(self) -> None:
_number_re = _get_checker()._number_re # type: ignore
for s in [
"1.",
"+1.",
"-1.",
".1",
"+.1",
"-.1",
"0.1",
"+0.1",
"-0.1",
"1e5",
"+1e5",
"1e+5",
"+1e+5",
"1e-5",
"+1e-5",
"-1e-5",
"1.2e3",
"-1.2e-3",
]:
print(s)
m = _number_re.match(s)
assert m is not None
assert float(m.group()) == pytest.approx(float(s))
for s in ["1", "abc"]:
print(s)
assert _number_re.match(s) is None
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_number_precision(self, testdir, config_mode):
"""Test the NUMBER option."""
if config_mode == "ini":
testdir.makeini(
"""
[pytest]
doctest_optionflags = NUMBER
"""
)
comment = ""
else:
comment = "#doctest: +NUMBER"
testdir.maketxtfile(
test_doc="""
Scalars:
>>> import math
>>> math.pi {comment}
3.141592653589793
>>> math.pi {comment}
3.1416
>>> math.pi {comment}
3.14
>>> -math.pi {comment}
-3.14
>>> math.pi {comment}
3.
>>> 3. {comment}
3.0
>>> 3. {comment}
3.
>>> 3. {comment}
3.01
>>> 3. {comment}
2.99
>>> .299 {comment}
.3
>>> .301 {comment}
.3
>>> 951. {comment}
1e3
>>> 1049. {comment}
1e3
>>> -1049. {comment}
-1e3
>>> 1e3 {comment}
1e3
>>> 1e3 {comment}
1000.
Lists:
>>> [3.1415, 0.097, 13.1, 7, 8.22222e5, 0.598e-2] {comment}
[3.14, 0.1, 13., 7, 8.22e5, 6.0e-3]
>>> [[0.333, 0.667], [0.999, 1.333]] {comment}
[[0.33, 0.667], [0.999, 1.333]]
>>> [[[0.101]]] {comment}
[[[0.1]]]
Doesn't barf on non-numbers:
>>> 'abc' {comment}
'abc'
>>> None {comment}
""".format(
comment=comment
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize(
"expression,output",
[
# ints shouldn't match floats:
("3.0", "3"),
("3e0", "3"),
("1e3", "1000"),
("3", "3.0"),
# Rounding:
("3.1", "3.0"),
("3.1", "3.2"),
("3.1", "4.0"),
("8.22e5", "810000.0"),
# Only the actual output is rounded up, not the expected output:
("3.0", "2.98"),
("1e3", "999"),
# The current implementation doesn't understand that numbers inside
# strings shouldn't be treated as numbers:
pytest.param("'3.1416'", "'3.14'", marks=pytest.mark.xfail),
],
)
def test_number_non_matches(self, testdir, expression, output):
testdir.maketxtfile(
test_doc="""
>>> {expression} #doctest: +NUMBER
{output}
""".format(
expression=expression, output=output
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=0, failed=1)
def test_number_and_allow_unicode(self, testdir):
testdir.maketxtfile(
test_doc="""
>>> from collections import namedtuple
>>> T = namedtuple('T', 'a b c')
>>> T(a=0.2330000001, b=u'str', c=b'bytes') # doctest: +ALLOW_UNICODE, +ALLOW_BYTES, +NUMBER
T(a=0.233, b=u'str', c='bytes')
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
class TestDoctestSkips:
"""
If all examples in a doctest are skipped due to the SKIP option, then
the tests should be SKIPPED rather than PASSED. (#957)
"""
@pytest.fixture(params=["text", "module"])
def makedoctest(self, testdir, request):
def makeit(doctest):
mode = request.param
if mode == "text":
testdir.maketxtfile(doctest)
else:
assert mode == "module"
testdir.makepyfile('"""\n%s"""' % doctest)
return makeit
def test_one_skipped(self, testdir, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
4
"""
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=1)
def test_one_skipped_failed(self, testdir, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
200
"""
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(failed=1)
def test_all_skipped(self, testdir, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2 # doctest: +SKIP
200
"""
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(skipped=1)
def test_vacuous_all_skipped(self, testdir, makedoctest):
makedoctest("")
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=0, skipped=0)
def test_continue_on_failure(self, testdir):
testdir.maketxtfile(
test_something="""
>>> i = 5
>>> def foo():
... raise ValueError('error1')
>>> foo()
>>> i
>>> i + 2
7
>>> i + 1
"""
)
result = testdir.runpytest("--doctest-modules", "--doctest-continue-on-failure")
result.assert_outcomes(passed=0, failed=1)
# The lines that contains the failure are 4, 5, and 8. The first one
# is a stack trace and the other two are mismatches.
result.stdout.fnmatch_lines(
["*4: UnexpectedException*", "*5: DocTestFailure*", "*8: DocTestFailure*"]
)
class TestDoctestAutoUseFixtures:
SCOPES = ["module", "session", "class", "function"]
def test_doctest_module_session_fixture(self, testdir):
"""Test that session fixtures are initialized for doctest modules (#768)
"""
# session fixture which changes some global data, which will
# be accessed by doctests in a module
testdir.makeconftest(
"""
import pytest
import sys
@pytest.yield_fixture(autouse=True, scope='session')
def myfixture():
assert not hasattr(sys, 'pytest_session_data')
sys.pytest_session_data = 1
yield
del sys.pytest_session_data
"""
)
testdir.makepyfile(
foo="""
import sys
def foo():
'''
>>> assert sys.pytest_session_data == 1
'''
def bar():
'''
>>> assert sys.pytest_session_data == 1
'''
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["*2 passed*"])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("enable_doctest", [True, False])
def test_fixture_scopes(self, testdir, scope, enable_doctest):
"""Test that auto-use fixtures work properly with doctest modules.
See #1057 and #1100.
"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
return 99
""".format(
scope=scope
)
)
testdir.makepyfile(
test_1='''
def test_foo():
"""
>>> getfixture('auto') + 1
100
"""
def test_bar():
assert 1
'''
)
params = ("--doctest-modules",) if enable_doctest else ()
passes = 3 if enable_doctest else 2
result = testdir.runpytest(*params)
result.stdout.fnmatch_lines(["*=== %d passed in *" % passes])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("autouse", [True, False])
@pytest.mark.parametrize("use_fixture_in_doctest", [True, False])
def test_fixture_module_doctest_scopes(
self, testdir, scope, autouse, use_fixture_in_doctest
):
"""Test that auto-use fixtures work properly with doctest files.
See #1057 and #1100.
"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse={autouse}, scope="{scope}")
def auto(request):
return 99
""".format(
scope=scope, autouse=autouse
)
)
if use_fixture_in_doctest:
testdir.maketxtfile(
test_doc="""
>>> getfixture('auto')
99
"""
)
else:
testdir.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.no_fnmatch_line("*FAILURES*")
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
@pytest.mark.parametrize("scope", SCOPES)
def test_auto_use_request_attributes(self, testdir, scope):
"""Check that all attributes of a request in an autouse fixture
behave as expected when requested for a doctest item.
"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
if "{scope}" == 'module':
assert request.module is None
if "{scope}" == 'class':
assert request.cls is None
if "{scope}" == 'function':
assert request.function is None
return 99
""".format(
scope=scope
)
)
testdir.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
result = testdir.runpytest("--doctest-modules")
str(result.stdout.no_fnmatch_line("*FAILURES*"))
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
class TestDoctestNamespaceFixture:
SCOPES = ["module", "session", "class", "function"]
@pytest.mark.parametrize("scope", SCOPES)
def test_namespace_doctestfile(self, testdir, scope):
"""
Check that inserting something into the namespace works in a
simple text file doctest
"""
testdir.makeconftest(
"""
import pytest
import contextlib
@pytest.fixture(autouse=True, scope="{scope}")
def add_contextlib(doctest_namespace):
doctest_namespace['cl'] = contextlib
""".format(
scope=scope
)
)
p = testdir.maketxtfile(
"""
>>> print(cl.__name__)
contextlib
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize("scope", SCOPES)
def test_namespace_pyfile(self, testdir, scope):
"""
Check that inserting something into the namespace works in a
simple Python file docstring doctest
"""
testdir.makeconftest(
"""
import pytest
import contextlib
@pytest.fixture(autouse=True, scope="{scope}")
def add_contextlib(doctest_namespace):
doctest_namespace['cl'] = contextlib
""".format(
scope=scope
)
)
p = testdir.makepyfile(
"""
def foo():
'''
>>> print(cl.__name__)
contextlib
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
class TestDoctestReportingOption:
def _run_doctest_report(self, testdir, format):
testdir.makepyfile(
"""
def foo():
'''
>>> foo()
a b
0 1 4
1 2 4
2 3 6
'''
print(' a b\\n'
'0 1 4\\n'
'1 2 5\\n'
'2 3 6')
"""
)
return testdir.runpytest("--doctest-modules", "--doctest-report", format)
@pytest.mark.parametrize("format", ["udiff", "UDIFF", "uDiFf"])
def test_doctest_report_udiff(self, testdir, format):
result = self._run_doctest_report(testdir, format)
result.stdout.fnmatch_lines(
[" 0 1 4", " -1 2 4", " +1 2 5", " 2 3 6"]
)
def test_doctest_report_cdiff(self, testdir):
result = self._run_doctest_report(testdir, "cdiff")
result.stdout.fnmatch_lines(
[
" a b",
" 0 1 4",
" ! 1 2 4",
" 2 3 6",
" --- 1,4 ----",
" a b",
" 0 1 4",
" ! 1 2 5",
" 2 3 6",
]
)
def test_doctest_report_ndiff(self, testdir):
result = self._run_doctest_report(testdir, "ndiff")
result.stdout.fnmatch_lines(
[
" a b",
" 0 1 4",
" - 1 2 4",
" ? ^",
" + 1 2 5",
" ? ^",
" 2 3 6",
]
)
@pytest.mark.parametrize("format", ["none", "only_first_failure"])
def test_doctest_report_none_or_only_first_failure(self, testdir, format):
result = self._run_doctest_report(testdir, format)
result.stdout.fnmatch_lines(
[
"Expected:",
" a b",
" 0 1 4",
" 1 2 4",
" 2 3 6",
"Got:",
" a b",
" 0 1 4",
" 1 2 5",
" 2 3 6",
]
)
def test_doctest_report_invalid(self, testdir):
result = self._run_doctest_report(testdir, "obviously_invalid_format")
result.stderr.fnmatch_lines(
[
"*error: argument --doctest-report: invalid choice: 'obviously_invalid_format' (choose from*"
]
)
@pytest.mark.parametrize("mock_module", ["mock", "unittest.mock"])
def test_doctest_mock_objects_dont_recurse_missbehaved(mock_module, testdir):
pytest.importorskip(mock_module)
testdir.makepyfile(
"""
from {mock_module} import call
class Example(object):
'''
>>> 1 + 1
2
'''
""".format(
mock_module=mock_module
)
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
class Broken:
def __getattr__(self, _):
raise KeyError("This should be an AttributeError")
@pytest.mark.parametrize( # pragma: no branch (lambdas are not called)
"stop", [None, _is_mocked, lambda f: None, lambda f: False, lambda f: True]
)
def test_warning_on_unwrap_of_broken_object(stop):
bad_instance = Broken()
assert inspect.unwrap.__module__ == "inspect"
with _patch_unwrap_mock_aware():
assert inspect.unwrap.__module__ != "inspect"
with pytest.warns(
pytest.PytestWarning, match="^Got KeyError.* when unwrapping"
):
with pytest.raises(KeyError):
inspect.unwrap(bad_instance, stop=stop)
assert inspect.unwrap.__module__ == "inspect"
| 29.477516 | 109 | 0.46041 | [
"MIT"
] | NNRepos/pytest | testing/test_doctest.py | 41,317 | Python |
from statistics import mean
with open("input.txt") as f:
values = [int(i) for i in f.readline().split(",")]
m_values = int(mean(values))
print(min(
sum(sum(range(1, abs(pos - i) + 1)) for pos in values)
for i in range(m_values - 1, m_values + 1)
))
| 22 | 58 | 0.625 | [
"MIT"
] | Ashwin-op/Advent_of_Code | 2021/Day 7 - The Treachery of Whales/2.py | 264 | Python |
symbols = [ '1288.HK', '3988.HK', '0883.HK', '0939.HK', '2628.HK', '3968.HK', '0941.HK', '0688.HK', '0386.HK', '1088.HK', '0728.HK', '0762.HK', '1398.HK', '0857.HK', '2318.HK', '0700.HK', 'GAZPq.L', 'LKOHyq.L', 'NKELyq.L', 'NVTKq.L', 'RELIq.L', 'ROSNq.L', 'SNGSyq.L', 'TATNxq.L', 'BSBR.N', 'BBD.N', 'ABV.N', 'CIG.N', 'SID.N', 'GGB.N', 'HDB.N', 'IBN.N', 'ITUB.N', 'MBT.N', 'PBR.N', 'TNE.N', 'VALE.N', 'VIP.N', 'BIDU.OQ', 'INFY.OQ']
#lineProcessor = CSVReutersAdaptative('BRIC_1min.csv')
textFormat = MessageFormat("{0}")
dateFormat = SimpleDateFormat('dd-MMM-yyyy')
timeFormat = SimpleDateFormat('HH:mm:ss.SSS')
doubleFormat = DecimalFormat('#.##')
lineProcessor = CSVSourceLineProcessor([textFormat,dateFormat,timeFormat,None,None,doubleFormat,doubleFormat,doubleFormat,doubleFormat,doubleFormat,doubleFormat,doubleFormat,doubleFormat],[None,None,None,None,None,OPEN(PRICE),HIGH(PRICE),LOW(PRICE),CLOSE(PRICE),VOLUME(PRICE),Field.EXTENDED(PRICE,"Ave. Price"),Field.EXTENDED(PRICE,"VWAP"),Field.EXTENDED(PRICE,"No. Trades")],0,[1,2])
source = SecondOrderSource('BRIC40_1min.csv', symbols, lineProcessor)
print "Ready"
class MyObserver(PricesListener):
def update(self, ss, when):
strLine = Long.toString(when.getTimeInMillis()).encode('utf-8')
strLine = strLine + when.toString().encode('utf-8')
for s in symbols:
if s in ss:
strLine = strLine + ',' \
+ str(market.getLastPrice(0,s+'-OPEN')) + ','\
+ str(market.getLastPrice(0,s+'-HIGH')) + ','\
+ str(market.getLastPrice(0,s+'-LOW')) + ','\
+ str(market.getLastPrice(0,s+'-CLOSE')) + ','\
+ str(market.getLastPrice(0,s+'-VOLUME')) + ','\
else:
strLine = strLine + ',-,-,-,-,-'
print strLine
market = RandomAccessMarket(0.0, 5000)
lineProcessor.addMarketListener(market)
lineProcessor.addPricesListener(MyObserver())
print "Go!"
strLine = 'milliseconds'
for s in symbols:
strLine = strLine + ',' + s + '-OPEN'
strLine = strLine + ',' + s + '-HIGH'
strLine = strLine + ',' + s + '-LOW'
strLine = strLine + ',' + s + '-CLOSE'
strLine = strLine + ',' + s + '-Volume'
print strLine
source.run()
| 44.882353 | 430 | 0.596767 | [
"MIT"
] | K0414/metaos | src/attic/attic-python/test/test-secondorder.py | 2,289 | Python |
"""Example reStructuredText from Sphinx-Needs project.
From http://sphinxcontrib-needs.readthedocs.io/en/latest/
but will not work in isolation - cut down just to trigger
RST304.
**Some text**
Wohooo, we have created :need:`req_001`,
which is linked by :need_incoming:`req_001`.
"""
print("sphinx-needs defines its own reStructuredText roles.")
| 23.333333 | 61 | 0.76 | [
"MIT"
] | Smirenost/flake8-rst-docstrings | tests/RST304/sphinx-roles.py | 350 | Python |
import requests
from bs4 import BeautifulSoup
"""
Bu modül burçlar ile ilgilenen arkadaşlarımın işine yarayacaktır.
Çok basit bir kullanımı mevcuttur.
Bir sorunuz olursa seve seve yardım etmek isterim profilimdeki linklerden bana ulaşabilirsiniz.
"""
def makeAPIRequest(path: str, type: str) -> str:
type = type if (type == "gunluk") or (type == "haftalik") else "gunluk"
r = requests.get(
f"https://www.mynet.com/kadin/burclar-astroloji/{path}-burcu-{type}-yorumu.html")
soup = BeautifulSoup(r.content, "html.parser")
data = soup.find_all("div", {"class": "detail-content-inner"})
burc = (data[0].contents)[len(data[0].contents) - 5]
burcYorum = burc.text
return burcYorum
burcList = ["yengec", "koc", "boga", "ikizler", "aslan",
"basak", "terazi", "akrep", "yay", "oglak", "kova", "balik"]
burcStr = ",".join(burcList)
class burclar:
def burc(name: str, type="gunluk") -> str:
if name not in burcList:
raise Exception(f"Geçerli bir burç giriniz. ({burcStr})")
return makeAPIRequest(name, type)
def yengec(type="gunluk") -> str:
return makeAPIRequest("yengec", type)
def koc(type="gunluk") -> str:
return makeAPIRequest("koc", type)
def boga(type="gunluk") -> str:
return makeAPIRequest("boga", type)
def ikizler(type="gunluk") -> str:
return makeAPIRequest("ikizler", type)
def aslan(type="gunluk") -> str:
return makeAPIRequest("aslan", type)
def basak(type="gunluk") -> str:
return makeAPIRequest("basak", type)
def terazi(type="gunluk") -> str:
return makeAPIRequest("terazi", type)
def akrep(type="gunluk") -> str:
return makeAPIRequest("akrep", type)
def yay(type="gunluk") -> str:
return makeAPIRequest("yay", type)
def oglak(type="gunluk") -> str:
return makeAPIRequest("oglak", type)
def kova(type="gunluk") -> str:
return makeAPIRequest("kova", type)
def balik(type="gunluk") -> str:
return makeAPIRequest("balik", type)
"""Haftalik ve Günlük yorumların bitiş kısımı"""
def makeAPIRequestOz(path: str, type: str) -> str:
type = type if (type == "ozellikleri") else "ozellikleri"
y = requests.get(
f"https://www.mynet.com/kadin/burclar-astroloji/{path}-burcu-ozellikleri.html")
soupOz = BeautifulSoup(y.content, "html.parser")
dataOz = soupOz.find_all("div", {"class": "medyanet-content"})
burcOz = (dataOz[0].contents)[len(dataOz[0].contents) - 12]
burcYorumOz = burcOz.text
return burcYorumOz
class burclarOz:
def burcOz(name: str, type="ozellikleri") -> str:
if name not in burcList:
raise Exception(f"Geçerli bir burç giriniz. ({burcStr})")
return makeAPIRequestOz(name, type)
def yengec(type="ozellikleri") -> str:
return makeAPIRequestOz("yengec", type)
def koc(type="ozellikleri") -> str:
return makeAPIRequestOz("koc", type)
def boga(type="ozellikleri") -> str:
return makeAPIRequestOz("boga", type)
def ikizler(type="ozellikleri") -> str:
return makeAPIRequestOz("ikizler", type)
def aslan(type="ozellikleri") -> str:
return makeAPIRequestOz("aslan", type)
def basak(type="ozellikleri") -> str:
return makeAPIRequestOz("basak", type)
def terazi(type="ozellikleri") -> str:
return makeAPIRequestOz("terazi", type)
def akrep(type="ozellikleri") -> str:
return makeAPIRequestOz("akrep", type)
def yay(type="ozellikleri") -> str:
return makeAPIRequestOz("yay", type)
def oglak(type="ozellikleri") -> str:
return makeAPIRequestOz("oglak", type)
def kova(type="ozellikleri") -> str:
return makeAPIRequestOz("kova", type)
def balik(type="ozellikleri") -> str:
return makeAPIRequestOz("balik", type)
"""Burçların özelliklerinin çekildiği kısım"""
| 31.71875 | 96 | 0.619704 | [
"MIT"
] | The-Special/Burc-api | burclar/__init__.py | 4,090 | Python |
# -*- coding: utf-8 -*-
from guillotina import configure
from guillotina.catalog.utils import get_index_fields
from guillotina.component import get_utilities_for
from guillotina.content import IResourceFactory
from guillotina.utils import get_dotted_name
from packaging import version
import aioelasticsearch
ES_CLIENT_VERSION = version.parse(aioelasticsearch.__version__)
ELASTIC6 = ES_CLIENT_VERSION.minor == 5
def default_refresh():
return False
app_settings = {
"elasticsearch": {
"bulk_size": 50,
"refresh": "guillotina_elasticsearch.default_refresh",
"index_name_prefix": "guillotina-",
"connection_settings": {"hosts": [], "timeout": 2},
"index": {},
"security_query_builder": "guillotina_elasticsearch.queries.build_security_query", # noqa
},
"load_utilities": {
"catalog": {
"provides": "guillotina_elasticsearch.interfaces.IElasticSearchUtility", # noqa
"factory": "guillotina_elasticsearch.utility.ElasticSearchUtility",
"settings": {},
}
},
"commands": {
"es-migrate": "guillotina_elasticsearch.commands.migrate.MigrateCommand", # noqa
"es-reindex": "guillotina_elasticsearch.commands.reindex.ReindexCommand", # noqa
"es-vacuum": "guillotina_elasticsearch.commands.vacuum.VacuumCommand",
"es-fields": "guillotina_elasticsearch.commands.fields.FieldsCommand",
},
}
def includeme(root):
configure.scan("guillotina_elasticsearch.utility")
configure.scan("guillotina_elasticsearch.manager")
configure.scan("guillotina_elasticsearch.parser")
# add store true to guillotina indexes
for name, utility in get_utilities_for(IResourceFactory):
if not get_dotted_name(utility._callable).startswith("guillotina."):
continue
for field_name, catalog_info in get_index_fields(name).items():
if field_name in (
"id",
"path",
"uuid",
"type_name",
"tid",
"creators",
"contributors",
"access_roles",
"access_users",
"parent_uuid",
"title",
"creation_date",
"modification_date",
"tags",
):
catalog_info["store"] = True
| 33.291667 | 98 | 0.629537 | [
"BSD-2-Clause"
] | vjove/guillotina_elasticsearch | guillotina_elasticsearch/__init__.py | 2,397 | Python |
import nltk
import numpy as np
#nltk.download('punkt') #downloading a package with a pretrained tokenizer
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
def tokenize(sentence): #splitting a string into meaningful units
return nltk.word_tokenize(sentence)
def stem(word): #Generating the root form of the words
return stemmer.stem(word.lower())
def bag_of_words(tokenized_sentence, all_words):
tokenized_sentence = [stem(w) for w in tokenized_sentence]
bag = np.zeros(len(all_words), dtype = np.float32)
for idx, w in enumerate(all_words):
if w in tokenized_sentence:
bag[idx] = 1.0
return bag
| 26.615385 | 80 | 0.703757 | [
"MIT"
] | Serkanbezek/Chatbot-NLP-PyTorch | nltk_utils.py | 692 | Python |
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team and authors from University of Illinois at Chicago.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import argparse
import random
import json
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
import squad_data_utils as data_utils
import modelconfig
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def gen(args):
tokenizer = BertTokenizer.from_pretrained(modelconfig.MODEL_ARCHIVE_MAP[args.bert_model] )
train_examples = data_utils.read_squad_examples(os.path.join(args.input_dir, "train.json"), is_training=True)
train_features = data_utils.convert_examples_to_features(
train_examples, tokenizer, args.max_seq_length, args.doc_stride, args.max_query_length, is_training=True)
logger.info("***** Running training *****")
logger.info(" Num orig examples = %d", len(train_examples))
logger.info(" Num split examples = %d", len(train_features))
input_ids_np = np.array([f.input_ids for f in train_features], dtype=np.int16)
segment_ids_np = np.array([f.segment_ids for f in train_features], dtype=np.int16)
input_mask_np = np.array([f.input_mask for f in train_features], dtype=np.int16)
start_positions_np = np.array([f.start_position for f in train_features], dtype=np.int16)
end_positions_np = np.array([f.end_position for f in train_features], dtype=np.int16)
np.savez_compressed(os.path.join(args.output_dir, "data.npz"),
input_ids=input_ids_np,
segment_ids = segment_ids_np,
input_mask = input_mask_np,
start_positions = start_positions_np,
end_positions = end_positions_np)
#>>>>> validation
valid_examples=data_utils.read_squad_examples(os.path.join(args.input_dir,"dev.json"), is_training=True)
valid_features = data_utils.convert_examples_to_features(
valid_examples, tokenizer, args.max_seq_length, args.doc_stride, args.max_query_length, is_training=True)
logger.info(" Num orig examples = %d", len(valid_examples))
logger.info(" Num split examples = %d", len(valid_features))
valid_input_ids_np = np.array([f.input_ids for f in valid_features], dtype=np.int16)
valid_segment_ids_np = np.array([f.segment_ids for f in valid_features], dtype=np.int16)
valid_input_mask_np = np.array([f.input_mask for f in valid_features], dtype=np.int16)
valid_start_positions_np = np.array([f.start_position for f in valid_features], dtype=np.int16)
valid_end_positions_np = np.array([f.end_position for f in valid_features], dtype=np.int16)
np.savez_compressed(os.path.join(args.output_dir, "dev.npz"),
input_ids=valid_input_ids_np,
segment_ids = valid_segment_ids_np,
input_mask = valid_input_mask_np,
start_positions = valid_start_positions_np,
end_positions = valid_end_positions_np)
#<<<<< end of validation declaration
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--bert-model", default='bert-base', type=str)
parser.add_argument("--input_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=320,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument('--seed',
type=int,
default=0,
help="random seed for initialization")
parser.add_argument('--doc_stride',
type=int,
default=128)
parser.add_argument('--max_query_length',
type=int,
default=30)
parser.add_argument('--max_answer_length',
type=int,
default=30)
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
gen(args)
if __name__=="__main__":
main() | 42.100719 | 134 | 0.641319 | [
"Apache-2.0"
] | AndrewSchoeller/BERT-for-RRC-ABSA | pytorch-pretrained-bert/src/gen_pt_squad.py | 5,852 | Python |
from logging import getLogger
import gokart
import luigi
import swifter # noqa
from dajare_detector.utils.base_task import DajareTask
from dajare_detector.preprocessing.make_kana_pattern import MakeKanaPattern
from dajare_detector.preprocessing.make_splited_pattern import MakeSplitedPattern
from dajare_detector.preprocessing.decide_kana_pattern import DecideKanaPattern
from dajare_detector.preprocessing.normalize_kana_pattern import NormalizeKanaPattern
logger = getLogger(__name__)
class MakeDecideKanaFeature(DajareTask):
"""カタカナの繰り返しが発生したか"""
target = gokart.TaskInstanceParameter()
split_window_size = luigi.IntParameter()
def requires(self):
kana_task = NormalizeKanaPattern(target=MakeKanaPattern(
target=self.target))
split_task = MakeSplitedPattern(
target=kana_task, split_window_size=self.split_window_size)
return DecideKanaPattern(split_pattern_target=split_task,
kana_pattern_target=kana_task,
split_window_size=self.split_window_size)
def run(self):
df = self.load_data_frame().reset_index(drop=True)
df[f'decide_kana_{self.split_window_size}'] = df[
'decide_kana_flag_list'].swifter.apply(lambda x: 1
if any(x) else 0)
self.dump(df[['_id', f'decide_kana_{self.split_window_size}']])
| 39.75 | 85 | 0.715584 | [
"MIT"
] | vaaaaanquish/dajare-detector | dajare_detector/featurize/make_decide_kana_feature.py | 1,461 | Python |
#!/usr/bin/python3
# System imports
import argparse
import sys
import serial
# Data processing imports
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import seaborn as sns
def checkparams(pwm_freq, pwm_duty, num_samples):
check_ok = True
if pwm_freq < 20 or pwm_freq > 100:
print("Allowed PWM freq is between in [20, 100] kHz interval.")
check_ok = False
if pwm_duty < 5 or pwm_duty > 80:
print("Allowed PWM duty is between in [5, 80] percent interval.")
check_ok = False
if num_samples < 1 or num_samples > 20000:
print("Allowed samples num is between in [1, 8192] interval.")
check_ok = False
if check_ok == False:
sys.exit(1);
def main(baudrate, pwm_freq, pwm_duty, num_samples, delays_file):
ser = serial.Serial(
port='/dev/ttyUSB0',
baudrate=baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
rtscts=0
)
if not ser.is_open:
print("Error opening serial port device.")
sys.exit(1)
checkparams(pwm_freq, pwm_duty, num_samples)
print("Params OK!")
delays = np.empty(num_samples)
ser.write(str.encode('{},{},{}\r\n'.format(
pwm_freq,
pwm_duty,
num_samples)))
timer_frequency = int(ser.readline().strip()) # MHz
ser.write(str.encode('\n')); # start measurement
for i in range(num_samples):
delays[i] = int(ser.readline().strip())
ser.close()
delays *= (1e-6 / timer_frequency);
delays = np.delete(delays, 0);
delays = np.delete(delays, 0);
print("min: {}, avg: {}, max = {}".format(
np.min(delays),
np.mean(delays),
np.max(delays)));
print("std: ", np.std(delays))
LOG_FILE = open(delays_file, 'w')
np.save(delays_file, delays);
# mean = np.mean(delays);
# maxi = np.max(delays);
# mini = np.min(delays);
# # sns.distplot(delays, norm_hist=True);
# # plt.show();
# #
# delays *= 1e6;
# plt.plot(delays)
# plt.ylabel('Vrijeme kašnjenja (${\mu}s$)')
# plt.xlabel('Uzorci (padajući brid odziva)')
# plt.show()
# plt.figure(0)
# n, bins, patches = plt.hist(delays, 50, normed=True,
# histtype='step');
# y = mlab.normpdf(bins,
# np.mean(delays),
# np.std(delays))
# plt.show()
# plt.figure(1)
# plt.plot(bins, y)
# plt.xlabel('Vrijeme kašnjenja (${\mu}s$)')
# plt.ylabel('Funkcija gustoće vjerojatnosti')
# plt.show();
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--baudrate', type=int, default=115200)
parser.add_argument('--pwm_freq', type=int, default=20)
parser.add_argument('--pwm_duty', type=int, default=50)
parser.add_argument('--num_samples', type=int, default=20000)
parser.add_argument('--delays_file', type=str, default='novo.npy')
ARGS, other = parser.parse_known_args()
main(ARGS.baudrate, ARGS.pwm_freq, ARGS.pwm_duty, ARGS.num_samples,
ARGS.delays_file);
| 25.504065 | 73 | 0.613006 | [
"MIT"
] | dumpram/stm32_real_time_test | scripts/test.py | 3,141 | Python |
# -*- coding: utf-8 -*-
# 留言板
# 1、新建目录下一定要有__init__.py文件,否则不能被其它文件引用、不能沿路径读写文件。from ... 。
# 2、urls.py中,设置第一级路由名ask。 在.../mysite/mysite/urls.py中 url(r'^ask/', include('account.ask.urls')),
# 3、admin.py中,设置数据库显示。在.../mysite/account/admin.py中 @admin.register(Technologyask) ...
# 4、templates中,增加模板文件目录/ask
import datetime
import os
import json
from django.shortcuts import render
from django.http.response import HttpResponseRedirect,HttpResponse
from . models import Guestbook,Reply
from django.contrib.auth.decorators import login_required #使用注意在settings.py中设置 LOGIN_URL = '/login/'
from django.contrib.auth.models import User
from myAPI.pageAPI import djangoPage
from django.contrib import messages
PAGE_NUM = 20 #每页显示数
# http://localhost:9000/guestbook/reply/
#@login_required
def reply(request):
if request.method != 'POST':
return render(request, 'guestbook/reply.html', context=locals())
title = request.POST['title']
content = request.POST['content']
Guestbook.objects.filter(title=title).update(state=1)#更改回答状态
if request.user.username == 'admin': #admin回复
Reply.objects.filter(title=title).update(content=content )
Reply.objects.filter(title=title).update(username = 'admin' )
Reply.objects.filter(title=title).update(date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") )
return HttpResponseRedirect('/guestbook/showreply/')
@login_required
def gettitle(request):
title = request.GET.get('title','')
if title == '':
return HttpResponse('no')
return render(request, 'guestbook/reply.html', context=locals())
# http://localhost:9000/guestbook/create/
@login_required
def create(request):
if request.method != 'POST':
return render(request, 'guestbook/create.html', context=locals())
title = request.POST['title']
content = request.POST['content']
istitle = Guestbook.objects.filter(title = title)
if istitle:
messages.info(request, '告警:标题 '+ title + '已经被使用!')
return HttpResponseRedirect('/guestbook/show/')
if content:
guestbooks = Guestbook(username=request.user,title=title,content=content)
guestbooks.save()
guestbookname = Guestbook.objects.get(title=title).username
replys = Reply(guestbookname=guestbookname,title=title)
replys.save()
else:
messages.info(request,'告警:留言内容为空!')
return HttpResponseRedirect('/guestbook/show/')
# http://localhost:9000/guestbook/show/
@login_required
def show(request, page):
if request.user.is_superuser:
guestbooks = Guestbook.objects.filter().order_by('-date','-id')
guestbooks, pageList, paginator, page = djangoPage(guestbooks,page,PAGE_NUM) #调用分页函数
replys = Reply.objects.filter(guestbookname=request.user.username).order_by('-date', '-id')
offset = PAGE_NUM * (page - 1)
return render(request, 'guestbook/showall.html', context=locals())
guestbooks = Guestbook.objects.filter(username=request.user.username).order_by('-date', '-id')
guestbooks, pageList, paginator, page = djangoPage(guestbooks,page,PAGE_NUM) #调用分页函数
replys = Reply.objects.filter(guestbookname=request.user.username).order_by('-date', '-id')
offset = PAGE_NUM * (page - 1)
return render(request, 'guestbook/show.html', context=locals())
# http://localhost:9000/guestbook/showreply/
@login_required
def showreply(request, page):
title = request.GET.get('title','')
if title != '':
replys = Reply.objects.filter(title=title)
else:
replys = Reply.objects.filter(username=request.user).order_by('-date', '-id')
replys, pageList, paginator, page = djangoPage(replys,page,PAGE_NUM) #调用分页函数
offset = PAGE_NUM * (page - 1)
return render(request, 'guestbook/showreply.html', context=locals())
| 43.088889 | 114 | 0.690304 | [
"Apache-2.0"
] | wcl6005/testgit | mysite/guestbook/guestbook.py | 4,146 | Python |
"""This module contains the HelpCommandHandler class."""
from telegram import Update
from telegram.ext import CommandHandler, CallbackContext
import utils.helper as helper
class HelpCommandHandler(CommandHandler):
"""Handler for /help command"""
def __init__(self):
CommandHandler.__init__(self, "help", callback)
def callback(update: Update, _: CallbackContext):
"""Print the help text for a /start or /help command"""
update.message.reply_text(helper.create_help_text())
| 27.944444 | 59 | 0.747515 | [
"MIT"
] | walkerjens/telegram.ongabot | ongabot/handler/helpcommand.py | 503 | Python |
#!/usr/bin/python
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Print the list of available maps according to the game."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from pysc2 import run_configs
def main(unused_argv):
with run_configs.get().start(want_rgb=False) as controller:
available_maps = controller.available_maps()
print("\n")
print("Local map paths:")
for m in sorted(available_maps.local_map_paths):
print(" ", m)
print()
print("Battle.net maps:")
for m in sorted(available_maps.battlenet_map_names):
print(" ", m)
if __name__ == "__main__":
app.run(main)
| 31 | 74 | 0.730921 | [
"Apache-2.0"
] | rainwangphy/pysc2 | pysc2/bin/battle_net_maps.py | 1,271 | Python |
#!/usr/bin/env python3
import fnmatch
import os
import re
import ntpath
import sys
import argparse
excluded_files = []
def check_config_style(filepath):
bad_count_file = 0
def pushClosing(t):
closingStack.append(closing.expr)
closing << Literal( closingFor[t[0]] )
def popClosing():
closing << closingStack.pop()
with open(filepath, 'r', encoding='utf-8', errors='ignore') as file:
content = file.read()
# Store all brackets we find in this file, so we can validate everything on the end
brackets_list = []
# To check if we are in a comment block
isInCommentBlock = False
checkIfInComment = False
# Used in case we are in a line comment (//)
ignoreTillEndOfLine = False
# Used in case we are in a comment block (/* */). This is true if we detect a * inside a comment block.
# If the next character is a /, it means we end our comment block.
checkIfNextIsClosingBlock = False
# We ignore everything inside a string
isInString = False
# Used to store the starting type of a string, so we can match that to the end of a string
inStringType = '';
lastIsCurlyBrace = False
checkForSemiColumn = False
# Extra information so we know what line we find errors at
lineNumber = 1
indexOfCharacter = 0
# Parse all characters in the content of this file to search for potential errors
for c in content:
if (lastIsCurlyBrace):
lastIsCurlyBrace = False
if c == '\n': # Keeping track of our line numbers
lineNumber += 1 # so we can print accurate line number information when we detect a possible error
if (isInString): # while we are in a string, we can ignore everything else, except the end of the string
if (c == inStringType):
isInString = False
# if we are not in a comment block, we will check if we are at the start of one or count the () {} and []
elif (isInCommentBlock == False):
# This means we have encountered a /, so we are now checking if this is an inline comment or a comment block
if (checkIfInComment):
checkIfInComment = False
if c == '*': # if the next character after / is a *, we are at the start of a comment block
isInCommentBlock = True
elif (c == '/'): # Otherwise, will check if we are in an line comment
ignoreTillEndOfLine = True # and an line comment is a / followed by another / (//) We won't care about anything that comes after it
if (isInCommentBlock == False):
if (ignoreTillEndOfLine): # we are in a line comment, just continue going through the characters until we find an end of line
if (c == '\n'):
ignoreTillEndOfLine = False
else: # validate brackets
if (c == '"' or c == "'"):
isInString = True
inStringType = c
elif (c == '/'):
checkIfInComment = True
elif (c == '('):
brackets_list.append('(')
elif (c == ')'):
if (len(brackets_list) > 0 and brackets_list[-1] in ['{', '[']):
print("ERROR: Possible missing round bracket ')' detected at {0} Line number: {1}".format(filepath,lineNumber))
bad_count_file += 1
brackets_list.append(')')
elif (c == '['):
brackets_list.append('[')
elif (c == ']'):
if (len(brackets_list) > 0 and brackets_list[-1] in ['{', '(']):
print("ERROR: Possible missing square bracket ']' detected at {0} Line number: {1}".format(filepath,lineNumber))
bad_count_file += 1
brackets_list.append(']')
elif (c == '{'):
brackets_list.append('{')
elif (c == '}'):
lastIsCurlyBrace = True
if (len(brackets_list) > 0 and brackets_list[-1] in ['(', '[']):
print("ERROR: Possible missing curly brace '}}' detected at {0} Line number: {1}".format(filepath,lineNumber))
bad_count_file += 1
brackets_list.append('}')
else: # Look for the end of our comment block
if (c == '*'):
checkIfNextIsClosingBlock = True;
elif (checkIfNextIsClosingBlock):
if (c == '/'):
isInCommentBlock = False
elif (c != '*'):
checkIfNextIsClosingBlock = False
indexOfCharacter += 1
if brackets_list.count('[') != brackets_list.count(']'):
print("ERROR: A possible missing square bracket [ or ] in file {0} [ = {1} ] = {2}".format(filepath,brackets_list.count('['),brackets_list.count(']')))
bad_count_file += 1
if brackets_list.count('(') != brackets_list.count(')'):
print("ERROR: A possible missing round bracket ( or ) in file {0} ( = {1} ) = {2}".format(filepath,brackets_list.count('('),brackets_list.count(')')))
bad_count_file += 1
if brackets_list.count('{') != brackets_list.count('}'):
print("ERROR: A possible missing curly brace {{ or }} in file {0} {{ = {1} }} = {2}".format(filepath,brackets_list.count('{'),brackets_list.count('}')))
bad_count_file += 1
return bad_count_file
def main():
print("Validating Config Style")
for test in excluded_files:
print("Excluded File: ",test)
sqf_list = []
bad_count = 0
parser = argparse.ArgumentParser()
parser.add_argument('-m','--module', help='only search specified module addon folder', required=False, default="")
args = parser.parse_args()
# Allow running from root directory as well as from inside the tools directory
rootDir = "Swamp Aux/"
for root, dirnames, filenames in os.walk(rootDir + '/' + args.module):
for filename in fnmatch.filter(filenames, '*.cpp'):
sqf_list.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.hpp'):
sqf_list.append(os.path.join(root, filename))
for filename in sqf_list:
if (filename not in excluded_files):
bad_count = bad_count + check_config_style(filename)
print("------\nChecked {0} files\nErrors detected: {1}".format(len(sqf_list), bad_count))
if (bad_count == 0):
print("Config validation PASSED")
else:
print("Config validation FAILED")
return bad_count
if __name__ == "__main__":
sys.exit(main()) | 45.651899 | 164 | 0.539859 | [
"MIT"
] | jdoxley/Swamp-Aux | tools/config_style_checker.py | 7,213 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import django.dispatch
# Signal to inform application about ready .mo files, so server will know
# when to restart itself.
post_compilemessages = django.dispatch.Signal()
| 24.333333 | 73 | 0.739726 | [
"MIT"
] | VorskiImagineering/django-C3PO | django_c3po/signals.py | 219 | Python |
# Copyright 2019 BlueCat Networks. All rights reserved.
import ipaddress
from flask import request, g, abort, jsonify
from bluecat.api_exception import PortalException, APIException
from bluecat import route, util
from main_app import app
# application config
# Define global variable to hold handle to API object
api = None
#
# GET, PUT or POST
#
@route(app, '/lumeta/getnetworklist', methods=['GET', 'PUT', 'POST'])
@util.rest_workflow_permission_required('lumeta_workflow_page')
@util.rest_exception_catcher
def get_networks_get_networks_page():
# are we authenticated?
g.user.logger.info('SUCCESS')
configurations = None
configurations_json = []
if g.user:
configurations = g.user.get_api().get_configurations()
for c in configurations:
print (c)
configuration_json = {"id": c.get_id(), "name": c.get_name()}
configurations_json.append(configuration_json)
return jsonify(configurations_json)
@route(app, '/lumeta/getiplist', methods=['GET', 'PUT', 'POST'])
@util.rest_workflow_permission_required('lumeta_workflow_page')
@util.rest_exception_catcher
def getiplist_getiplist_page():
# are we authenticated?
g.user.logger.info('SUCCESS')
networks = []
# Return object that contains all the networks (and eventually all ip addresses)
# list of all properties objects
ip_addresses = []
# If name is given, use get_configuration(name)
if g.user:
configurations = g.user.get_api().get_configurations()
for c in configurations:
print(c)
configuration_json = {"id": c.get_id(), "name": c.get_name()}
# FIXME - need code to get network list from configuration id. Is there a call to get children_of_types
# (['IP4Block', 'IP4Network', 'IP6Block', 'IP6Network'
# use get_by_object_types(*, ['IP4Block', 'IP4Network', 'IP6Block', 'IP6Network']) - returns flat list
# We might want to request IP4Network, IP6Network
# FIXME - extract below code in a function and call it for IP4Block and IP6Block
try:
for nw in c.get_children_of_type('IP4Block'):
print(nw)
# get all blocks and networks for block
for n in g.user.get_api().get_by_object_types(nw.get_property('CIDR'),
['IP4Network', 'IP4Block', 'IP6Network', 'IP6Block']):
if '6' in n.get_type():
networks.append({'network_id': n.get_id(), 'display_text': n.get_properties()['prefix']})
ip_addresses.extend(calculate_block_stats(n, c.get_id(), c.get_name()))
else:
networks.append({'network_id': n.get_id(), 'display_text': n.get_properties()['CIDR']})
ip_addresses.extend(calculate_block_stats(n, c.get_id(), c.get_name()))
except Exception as e:
app.loggererror('get_subnets: ' + e.message)
return jsonify(ip_addresses)
def calculate_network_stats(bam_network, config_id, config_name):
if bam_network.get_type() == 'IP4Network':
network_address = bam_network.get_property('CIDR')
network = ipaddress.ip_network(network_address)
else:
network_address = bam_network.get_property('prefix')
network = ipaddress.ip_network(network_address)
ip_addresses = []
ip_data = {}
if bam_network.get_type() == 'IP4Network':
# run below for IP4Address, IP6Address - properties will be populated as well
for n in bam_network.get_children_of_type('IP4Address'):
# Sometimes below list contains all ip addresses and sometimes only one for gateway address
# Look through n.get_properties() and add them to ip_data
ip_data = {}
ip_data.update({'ip_address': n.get_address()})
ip_data.update({'properties': n.get_properties()})
ip_data.update({'config_id': config_id})
ip_data.update({'config_name': config_name})
ip_data.update({'id': n.get_id()})
ip_addresses.append(ip_data)
next_address = bam_network.get_next_available_ip4_address()
else:
for n in bam_network.get_children_of_type('IP6Address'):
ip_data = {}
ip_data.update({'ip_address': n.get_address()})
ip_data.update({'properties': n.get_properties()})
ip_data.update({'config_id': config_id})
ip_data.update({'config_name': config_name})
ip_data.update({'id': n.get_id()})
ip_addresses.append(ip_data)
#return network_data
return ip_addresses
def calculate_block_stats(bam_block, config_id, config_name):
if bam_block.get_type() == 'IP6Block':
block_address = bam_block.get_property('prefix')
block = ipaddress.ip_network(block_address)
else:
block_address = bam_block.get_property('CIDR')
# block = ipaddress.ip_network(block_address, config_id, config_name)
block = ipaddress.ip_network(block_address)
block_data = {}
block_data_list = []
if bam_block.get_type() == 'IP4Block':
for network in bam_block.get_ip4_networks():
return_data = calculate_network_stats(network, config_id, config_name)
# This constructs adding network as key with all values that were returned from calculate network stats
block_data_list.extend(return_data)
for found_block in bam_block.get_ip4_blocks():
return_data = calculate_block_stats(found_block, config_id, config_name)
block_data_list.extend(return_data)
next_address = bam_block.get_next_available_ip4_address()
if next_address != '':
block_data.update({'next_available_address': next_address})
try:
next_available = bam_block.get_next_available_ip4_network(256, auto_create=False)
block_data.update({'next_available_network': next_available})
except APIException as e:
# Nothing to do here since we aren't adding anything to the object
next_available = ''
elif bam_block.get_type() == 'IP6Block':
for network in bam_block.get_ip6_networks():
return_data = calculate_network_stats(network, config_id, config_name)
for found_block in bam_block.get_ip6_blocks():
return_data = calculate_block_stats(found_block, config_id, config_name)
else:
next_available = ''
return block_data_list
# to tag address, add_ip4 - get back IP4Address object. Call object.link_entity(entity id of the tag)
#
# GET, PUT or POST
@route(app, '/lumeta/addiplist', methods=['GET', 'PUT', 'POST'])
# @util.rest_workflow_permission_required('addiplist_page')
@util.rest_workflow_permission_required('lumeta_workflow_page')
@util.rest_exception_catcher
def addiplist_addiplist_page():
# are we authenticated?
g.user.logger.info('SUCCESS')
rdata_arr = request.get_json()
stats = {}
global api
for rdata in rdata_arr:
config_name = rdata["config_name"]
add_network = rdata["add_network_block"]
device_list = rdata["deviceList"]
added_ips = 0
dup_ips = 0
# Get API object up front and use it going forward. That way, auth key doesn't expire on us
# when we are midway in processing
api = g.user.get_api()
print(add_network)
print(device_list)
config = api.get_configuration(config_name)
for device in device_list:
print(device["ip"])
(added_ip, dup_ip, ip) = add_device(device, config, add_network)
added_ips += added_ip
dup_ips += dup_ip
# Add tag if ip was added
if added_ip == 1:
add_tag(ip)
stats.update({config_name: {"added_ips": added_ips, "dup_ips": dup_ips}})
return jsonify(stats)
def add_device(device, config, add_network):
# Algorithm to add ip to BAM
# check if block exists for this ip address.
try:
ip = device["ip"]
mac = ''
mac = device["mac"]
family = device["family"]
blk_data = None
dup_ip = 0
added_ip = 0
ip_obj = None
if family == '4':
blk_data = config.get_ip_range_by_ip('IP4Block', ip)
else:
blk_data = config.get_ip_range_by_ip('IP6Block', ip)
# if block exists, check for network
network_data = None
if family == '4':
network_data = config.get_ip_range_by_ip('IP4Network', ip)
else:
network_data = config.get_ip_range_by_ip('IP6Network', ip)
# If Block and Network exists, add ip address
# currently, assigning ip address is throwing API exception:Server raised fault: "Duplicate of another item"
# Need to see how we can catch it
if blk_data is not None and network_data is not None:
# Add ip address
ip_obj = assign_ip(network_data, ip, mac, family)
added_ip += 1
# If no block exists and add_network is set to true, create Block with /32, create Network with /32 and then
# create ip with /32
except PortalException as e:
# No block address containing input ip address exists. Check the flag and create one
if add_network:
try:
# Add Block, then network and finally add ip
# Below line is returning BAMException - IPv4 Blocks cannot be in size of /31 and /32
# So, at this point, if there is no container, do not add ip address
# config.add_ip4_block_by_cidr(ip)
if blk_data is None:
# add /30 for addressblock
block_network = ipaddress.ip_network(ip + '/30', strict=False)
config.add_ip4_block_by_cidr(block_network.exploded)
blk_data = config.get_ip_range_by_ip('IP4Block', ip)
if blk_data is not None:
# create network in block
blk_data.add_ip4_network(ip + '/32')
# create ip under above created network
network_data = config.get_ip_range_by_ip('IP4Network', ip)
if network_data is not None:
# Add ip address
ip_obj = assign_ip(network_data, ip, mac, family)
added_ip += 1
except APIException as ex:
if "Duplicate" in ex.get_message():
dup_ip += 1
# else:
# Seeing intermittent error while adding address block, so had to stop logging error
# app.loggererror('add_ip: ' + ex.message)
except APIException as ex:
# when ip address already exists, it returns BAMException with message 'Server raised fault: "Duplicate of another item"'
# "Duplicate" in ex.get_message()
if "Duplicate" in ex.get_message():
dup_ip += 1
else:
# TODO - how to log info message and not error?
app.loggererror('add_ip: ' + ex.get_message())
return (added_ip, dup_ip, ip_obj)
def assign_ip(network_data, ip, mac, family):
if mac is not '':
if family == '4':
ip = network_data.assign_ip4_address(ip, mac, '', 'MAKE_DHCP_RESERVED')
else:
ip = network_data.assign_ip6_address(ip, mac, '', 'MAKE_DHCP_RESERVED')
else:
if family == '4':
ip = network_data.assign_ip4_address(ip, '', '', 'MAKE_STATIC')
else:
ip = network_data.assign_ip6_address(ip, '', '', 'MAKE_STATIC')
return ip
def add_tag(ip):
tag_group = None
tag = None
try:
tag_group = api.get_tag_group_by_name("Lumeta")
# If tag group exists, chances are that tag exists as well, but just in case if it doesn't
tag = tag_group.get_tag_by_name("Discovered Device")
except PortalException as e:
if tag_group is None:
# Tag group does not exist, create one
tag_group = api.add_tag_group("Lumeta")
if tag is None:
# Get tag group object. above API to add tag group is only returning object id instead of entire object
# Calling add_tag on it is throwing exception 'int' object has no attribute 'add_tag'
tag_group = api.get_tag_group_by_name("Lumeta")
# Create Tag under Lumeta
tag = tag_group.add_tag("Discovered Device")
try:
# assign tag to ip
ip.link_entity(tag)
except APIException as ex:
print(ex.get_message())
| 41.13141 | 129 | 0.623003 | [
"Apache-2.0"
] | npatellumeta/gateway-workflows | Community/AssetManagement/lumeta_workflow_page.py | 12,833 | Python |
import dmc2gym
from domains.wrappers import ConcatObs
def mdp():
return dmc2gym.make(domain_name="cartpole", task_name="swingup", keys_to_exclude=[], frame_skip=5, track_prev_action=False)
def p():
return dmc2gym.make(domain_name="cartpole", task_name="swingup", keys_to_exclude=['velocity'], frame_skip=5, track_prev_action=False)
def va():
return dmc2gym.make(domain_name="cartpole", task_name="swingup", keys_to_exclude=['position'], frame_skip=5, track_prev_action=True)
def p_concat5():
return ConcatObs(p(), 5)
def va_concat10():
return ConcatObs(va(), 10)
| 26.909091 | 137 | 0.741554 | [
"MIT"
] | zhihanyang2022/CleanRL | offpcc/domains/dmc_cartpole_su.py | 592 | Python |
import logging
from .Container import Container
class MqttBrokerContainer(Container):
def __init__(self, name, vols, network, image_store, command=None):
super().__init__(name, 'mqtt-broker', vols, network, image_store, command)
def get_startup_finished_log_entry(self):
return "mosquitto version [0-9\\.]+ running"
def deploy(self):
if not self.set_deployed():
return
logging.info('Creating and running MQTT broker docker container...')
self.client.containers.run(
self.image_store.get_image(self.get_engine()),
detach=True,
name=self.name,
network=self.network.name,
ports={'1883/tcp': 1883},
entrypoint=self.command)
logging.info('Added container \'%s\'', self.name)
| 32.76 | 82 | 0.634921 | [
"Apache-2.0"
] | rustammendel/nifi-minifi-cpp | docker/test/integration/minifi/core/MqttBrokerContainer.py | 819 | Python |
# Generated by Django 2.2.1 on 2019-07-10 04:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('investment_bot', '0005_amount_restrictions'),
]
operations = [
migrations.CreateModel(
name='Section_Deduction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.CharField(max_length=10)),
('employee_code', models.CharField(max_length=100)),
('section_id', models.CharField(max_length=100)),
('subsection_id', models.CharField(max_length=100)),
('amount', models.IntegerField()),
],
),
]
| 31.48 | 114 | 0.584498 | [
"MIT"
] | dreamvrutik/Investment-Chatbot | Chatbot_investment/chatbot/investment_bot/migrations/0006_section_deduction.py | 787 | Python |
####################
# ES-DOC CIM Questionnaire
# Copyright (c) 2017 ES-DOC. All rights reserved.
#
# University of Colorado, Boulder
# http://cires.colorado.edu/
#
# This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].
####################
from django.conf import settings
from django.conf.urls import patterns, url, include
from django.views.generic.base import RedirectView
from rest_framework.urlpatterns import format_suffix_patterns
from Q.questionnaire.views import *
from Q.questionnaire.views.api import *
from Q.questionnaire.views.services import *
from Q.questionnaire.views.views_feed import QFeed, q_publication
api_urls = patterns('',
# just some testing (obviously)...
url(r'^projects_test/(?P<pk>[0-9]+)/$', QProjectTestDetail.as_view(), name="project-test-detail"),
# just some lite serializations for populating the project page...
url(r'^customizations_lite/$', QCustomizationLiteList.as_view(), name="customization_lite-list"),
url(r'^realizations_lite/$', QRealizationLiteList.as_view(), name="realization_lite-list"),
url(r'^projects_lite/$', QProjectLiteList.as_view(), name="project_lite-list"),
url(r'^projects_lite/(?P<pk>[0-9]+)/$', QProjectLiteDetail.as_view(), name="project_lite-detail"),
# getting project info...
url(r'^projects/$', QProjectList.as_view(), name="project-list"),
url(r'^projects/(?P<pk>[0-9]+)/$', QProjectDetail.as_view(), name="project-detail"),
# getting ontology info...
url(r'^ontologies/$', QOntologyList.as_view(), name="ontology-list"),
# getting customization info...
url(r'^customizations/$', QModelCustomizationList.as_view(), name="customization-list"),
url(r'^customizations/(?P<pk>[0-9]+)/$', QModelCustomizationDetail.as_view(), name="customization-detail"),
url(r'^customizations/cache/$', get_cached_customizations, name="customization-cache"),
# getting realization info...
url(r'^realizations/$', QModelRealizationList.as_view(), name="realization-list"),
url(r'^realizations/(?P<pk>[0-9]+)/$', QModelRealizationDetail.as_view(), name="realization-detail"),
url(r'^realizations/cache/$', get_cached_realizations, name="realization-cache"),
)
if settings.DEBUG:
# only expose pre-defined api urls in debug mode...
api_urls += patterns('', url(r'^$', api_root))
# automatically add support for different serialization formats (JSON is default)...
api_urls = format_suffix_patterns(api_urls)
services_urls = patterns('',
# testing (obviously)...
url(r'^test/$', q_services_test),
# getting pending messages...
url(r'^messages/$', get_django_messages),
# routing http calls through a proxy...
url(r'^proxy/$', q_proxy, name="proxy"),
# logging data from the client...
url(r'^log/$', q_log, name="log"),
# the WORLD-FAMOUS load-on-demand paradigm...
url(r'^load_section/(?P<section_type>[^/]+)/$', q_load_section, name="load_section"),
# joining a project...
url(r'^(?P<project_name>[^/]+)/project_join_request/$', q_project_join_request, name="project_join_request"),
# managing a project...
url(r'^(?P<project_name>[^/]+)/project_add_member/$', q_project_add_member, name="project_add_member"),
# deleting a customization...
url(r'^customization_delete/$', q_customization_delete, name="customization_delete"),
# adding a relationship...
url(r'^realization_add_relationship_value/$', q_realization_add_relationship_value, name="realization_add_relationsip_value"),
# removing a relationship...
url(r'^realization_remove_relationship_value/$', q_realization_remove_relationship_value, name="realization_remove_relationsip_value"),
# publishing a realization...
url(r'^realization_publish/$', q_realization_publish, name="realization_publish"),
)
urlpatterns = patterns('',
# RESTful API...
url(r'^api/', include(api_urls)),
# webservices (AJAX POST only) outside of RESTful API...
url(r'^services/', include(services_urls)),
# testing (obviously)...
url(r'^test/$', q_test, name="test"),
# help...
url(r'^help/$', RedirectView.as_view(url=settings.Q_HELP_URL, permanent=True), name="help"),
# customizations...
url(r'^(?P<project_name>[^/]+)/customize/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', q_customize_new, name="customize_new"),
url(r'^(?P<project_name>[^/]+)/customize/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<customization_name>[^/]+)/$', q_customize_existing, name="customize_existing"),
# realizations...
url(r'^(?P<project_name>[^/]+)/edit/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', q_edit_new, name="edit_new"),
url(r'^(?P<project_name>[^/]+)/edit/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<realization_pk>[^/]+)/$', q_edit_existing, name="edit_existing"),
url(r'^(?P<project_name>[^/]+)/view/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', q_view_new, name="view_new"),
url(r'^(?P<project_name>[^/]+)/view/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<realization_pk>[^/]+)/$', q_view_existing, name="view_existing"),
url(r'^(?P<project_name>[^/]+)/get/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', q_get_existing, name="get_existing"),
# publications (ATOM feed)...
url(r'^feed/$', QFeed(), name="feed"),
url(r'^feed/(?P<project_name>[^/]+)/$', QFeed(), name="feed_project"),
url(r'^feed/(?P<project_name>[^/]+)/(?P<ontology_key>[^/]+)/$', QFeed(), name="feed_project_ontology"),
url(r'^feed/(?P<project_name>[^/]+)/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', QFeed(), name="feed_project_ontology_proxy"),
url(r'^feed/(?P<project_name>[^/]+)/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<publication_name>[^/]+)/$', q_publication, name="publication_latest"),
url(r'^feed/(?P<project_name>[^/]+)/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<publication_name>[^/]+)/(?P<publication_version>[^/]+)/$', q_publication, name="publication_version"),
# projects...
url(r'^(?P<project_name>[^/]+)/$', q_project, name="project"),
url(r'^(?P<project_name>[^/]+)/customize/$', q_project_customize, name="project_customize"),
url(r'^(?P<project_name>[^/]+)/manage/$', q_project_manage, name="project_manage"),
# index...
url(r'^$', 'questionnaire.views.q_index', name="index"),
)
| 46.941606 | 195 | 0.664438 | [
"MIT"
] | ES-DOC/esdoc-questionnaire | Q/questionnaire/q_urls.py | 6,431 | Python |
def pattern_sixteen(steps):
''' Pattern sixteen
9
9 8
9 8 7
9 8 7 6
9 8 7 6 5
9 8 7 6 5 4
9 8 7 6 5 4 3
9 8 7 6 5 4 3 2
9 8 7 6 5 4 3 2 1
'''
get_range = [str(i) for i in range(1, steps + 1)][::-1] # Getting range of number in string and reverse it
for gr in range(1, len(get_range) + 1):
join = ' '.join(get_range[:gr]) # Slicing values
print(join)
if __name__ == '__main__':
try:
pattern_sixteen(9)
except NameError:
print('Integer was expected')
| 21.857143 | 112 | 0.48366 | [
"MIT"
] | chandthash/nppy | Project Pattern/pattern_16.py | 612 | Python |
from __future__ import absolute_import, division, print_function
from tensorflow.python.keras.layers import Input, Dense
from tensorflow.python.keras.layers.normalization import BatchNormalization
from tensorflow.python.keras.models import Model
# 第一種架構: 深度前饋網路(deep feedforward network)
# 也叫做前饋神經網路(feedforward neural network)或多層感知機(multilayer perceptron, MLP)
def get_dfn(output_size, img_height, img_width, show=True):
model_input = Input(shape=(img_height * img_width,), name='Main_input')
x = Dense(256, activation='selu', name='Dense_selu_1')(model_input)
x = BatchNormalization(name='BN_1')(x)
x = Dense(256, activation='tanh', name='Dense_tanh_1')(x)
x = BatchNormalization(name='BN_2')(x)
x = Dense(256, activation='tanh', name='Dense_tanh_2')(x)
dfn_output = Dense(output_size, activation='linear',
name='Output_Dense_linear')(x)
dfn = Model(inputs=model_input, outputs=dfn_output, name='DFN')
if show:
print('DFN summary:')
dfn.summary()
print()
return dfn
def get_dfn_relu(output_size, img_height, img_width, show=True):
model_input = Input(shape=(img_height * img_width,), name='Main_input')
x = BatchNormalization(name='BN_1')(model_input)
x = Dense(256, activation='relu', name='Dense_relu_1')(x)
# x = BatchNormalization()(x)
x = Dense(256, activation='relu', name='Dense_relu_2')(x)
# x = BatchNormalization()(x)
x = Dense(256, activation='relu', name='Dense_relu_3')(x)
dfn_output = Dense(output_size, activation='linear',
name='Output_Dense_linear')(x)
dfn = Model(inputs=model_input, outputs=dfn_output, name='DFN_relu')
if show:
print('DFN_relu summary:')
dfn.summary()
print()
return dfn
def get_dfn_selu(output_size, img_height, img_width, show=True):
model_input = Input(shape=(img_height * img_width,), name='Main_input')
x = BatchNormalization()(model_input)
x = Dense(256, activation='selu', name='Dense_selu_1')(x)
# x = BatchNormalization()(x)
x = Dense(256, activation='selu', name='Dense_selu_2')(x)
# x = BatchNormalization()(x)
x = Dense(256, activation='selu', name='Dense_selu_3')(x)
dfn_output = Dense(output_size, activation='linear',
name='Output_Dense_linear')(x)
dfn = Model(inputs=model_input, outputs=dfn_output, name='DFN_selu')
if show:
print('DFN_selu summary:')
dfn.summary()
print()
return dfn
| 36.085714 | 75 | 0.672605 | [
"MIT"
] | swcjack6931677/ERINN | erinn/python/models/DFN.py | 2,578 | Python |
from rest_framework import serializers, generics
from snippets.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
from users.models import UserProfile
# class SnippetSerializer(serializers.Serializer):
# id = serializers.IntegerField(read_only=True)
# title = serializers.CharField(required=False, allow_blank=True, max_length=100)
# code = serializers.CharField(style={'base_template': 'textarea.html'})
# linenos = serializers.BooleanField(required=False)
# language = serializers.ChoiceField(choices=LANGUAGE_CHOICES, default='python')
# style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly')
#
# def create(self, validated_data):
# """
# 给定验证过的数据创建并返回一个新的 Snippet 实例。
# """
# return Snippet.objects.create(**validated_data)
#
# def update(self, instance, validated_data):
# """
# 根据已验证的数据更新并返回已存在的 Snippet 实例。
# """
# instance.title = validated_data.get('title', instance.title)
# instance.code = validated_data.get('code', instance.code)
# instance.linenos = validated_data.get('linenos', instance.linenos)
# instance.language = validated_data.get('language', instance.language)
# instance.style = validated_data.get('style', instance.style)
# instance.save()
# return instance
class SnippetSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Snippet
fields = ('id', 'title', 'code', 'linenos', 'language', 'style', 'owner')
class UserSerializer(serializers.ModelSerializer):
snippets = serializers.PrimaryKeyRelatedField(many=True, queryset=Snippet.objects.all())
class Meta:
model = UserProfile
fields = ('id', 'username', 'first_name', 'last_name', 'snippets', 'password')
| 41.577778 | 92 | 0.691074 | [
"Apache-2.0"
] | minicloudsky/MxShop | apps/snippets/serializers.py | 1,951 | Python |
"""Support for sending data to StatsD."""
import logging
import statsd
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_PREFIX, EVENT_STATE_CHANGED
from homeassistant.helpers import state as state_helper
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_ATTR = "log_attributes"
CONF_RATE = "rate"
CONF_VALUE_MAP = "value_mapping"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8125
DEFAULT_PREFIX = "hass"
DEFAULT_RATE = 1
DOMAIN = "statsd"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_ATTR, default=False): cv.boolean,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PREFIX, default=DEFAULT_PREFIX): cv.string,
vol.Optional(CONF_RATE, default=DEFAULT_RATE): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_VALUE_MAP): dict,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the StatsD component."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
sample_rate = conf.get(CONF_RATE)
prefix = conf.get(CONF_PREFIX)
value_mapping = conf.get(CONF_VALUE_MAP)
show_attribute_flag = conf.get(CONF_ATTR)
statsd_client = statsd.StatsClient(host=host, port=port, prefix=prefix)
def statsd_event_listener(event):
"""Listen for new messages on the bus and sends them to StatsD."""
state = event.data.get("new_state")
if state is None:
return
try:
if value_mapping and state.state in value_mapping:
_state = float(value_mapping[state.state])
else:
_state = state_helper.state_as_number(state)
except ValueError:
# Set the state to none and continue for any numeric attributes.
_state = None
states = dict(state.attributes)
_LOGGER.debug("Sending %s", state.entity_id)
if show_attribute_flag is True:
if isinstance(_state, (float, int)):
statsd_client.gauge("%s.state" % state.entity_id, _state, sample_rate)
# Send attribute values
for key, value in states.items():
if isinstance(value, (float, int)):
stat = "%s.%s" % (state.entity_id, key.replace(" ", "_"))
statsd_client.gauge(stat, value, sample_rate)
else:
if isinstance(_state, (float, int)):
statsd_client.gauge(state.entity_id, _state, sample_rate)
# Increment the count
statsd_client.incr(state.entity_id, rate=sample_rate)
hass.bus.listen(EVENT_STATE_CHANGED, statsd_event_listener)
return True
| 31.126316 | 86 | 0.630369 | [
"Apache-2.0"
] | 0x00-0xFF/home-assistant | homeassistant/components/statsd/__init__.py | 2,957 | Python |
import os
from pathlib import Path
DB_NAME = "chatapp.db"
PROJECT_PATH = Path(__file__).parents[1]
DB_PATH = os.path.join(PROJECT_PATH, "resource", DB_NAME)
PORT_MIN = 1024
PORT_MAX = 65535
DEBUG = os.getenv("CHAT_APP_DEBUG", False)
if DEBUG:
TIMEOUT = 30
else:
TIMEOUT = 0.5
| 16.941176 | 57 | 0.71875 | [
"MIT"
] | xckomorebi/ChatApp | ChatApp/settings.py | 288 | Python |
import base64
from datetime import timedelta
import logging
import time
import uuid
import warnings
import httpx
from ably.types.capability import Capability
from ably.types.tokendetails import TokenDetails
from ably.types.tokenrequest import TokenRequest
from ably.util.exceptions import AblyException, IncompatibleClientIdException
__all__ = ["Auth"]
log = logging.getLogger(__name__)
class Auth:
class Method:
BASIC = "BASIC"
TOKEN = "TOKEN"
def __init__(self, ably, options):
self.__ably = ably
self.__auth_options = options
if options.token_details:
self.__client_id = options.token_details.client_id
else:
self.__client_id = options.client_id
self.__client_id_validated = False
self.__basic_credentials = None
self.__auth_params = None
self.__token_details = None
self.__time_offset = None
must_use_token_auth = options.use_token_auth is True
must_not_use_token_auth = options.use_token_auth is False
can_use_basic_auth = options.key_secret is not None
if not must_use_token_auth and can_use_basic_auth:
# We have the key, no need to authenticate the client
# default to using basic auth
log.debug("anonymous, using basic auth")
self.__auth_mechanism = Auth.Method.BASIC
basic_key = "%s:%s" % (options.key_name, options.key_secret)
basic_key = base64.b64encode(basic_key.encode('utf-8'))
self.__basic_credentials = basic_key.decode('ascii')
return
elif must_not_use_token_auth and not can_use_basic_auth:
raise ValueError('If use_token_auth is False you must provide a key')
# Using token auth
self.__auth_mechanism = Auth.Method.TOKEN
if options.token_details:
self.__token_details = options.token_details
elif options.auth_token:
self.__token_details = TokenDetails(token=options.auth_token)
else:
self.__token_details = None
if options.auth_callback:
log.debug("using token auth with auth_callback")
elif options.auth_url:
log.debug("using token auth with auth_url")
elif options.key_secret:
log.debug("using token auth with client-side signing")
elif options.auth_token:
log.debug("using token auth with supplied token only")
elif options.token_details:
log.debug("using token auth with supplied token_details")
else:
raise ValueError("Can't authenticate via token, must provide "
"auth_callback, auth_url, key, token or a TokenDetail")
async def __authorize_when_necessary(self, token_params=None, auth_options=None, force=False):
self.__auth_mechanism = Auth.Method.TOKEN
if token_params is None:
token_params = dict(self.auth_options.default_token_params)
else:
self.auth_options.default_token_params = dict(token_params)
self.auth_options.default_token_params.pop('timestamp', None)
if auth_options is not None:
self.auth_options.replace(auth_options)
auth_options = dict(self.auth_options.auth_options)
if self.client_id is not None:
token_params['client_id'] = self.client_id
token_details = self.__token_details
if not force and not self.token_details_has_expired():
log.debug("using cached token; expires = %d",
token_details.expires)
return token_details
self.__token_details = await self.request_token(token_params, **auth_options)
self._configure_client_id(self.__token_details.client_id)
return self.__token_details
def token_details_has_expired(self):
token_details = self.__token_details
if token_details is None:
return True
expires = token_details.expires
if expires is None:
return False
timestamp = self._timestamp()
if self.__time_offset:
timestamp += self.__time_offset
return expires < timestamp + token_details.TOKEN_EXPIRY_BUFFER
async def authorize(self, token_params=None, auth_options=None):
return await self.__authorize_when_necessary(token_params, auth_options, force=True)
async def authorise(self, *args, **kwargs):
warnings.warn(
"authorise is deprecated and will be removed in v2.0, please use authorize",
DeprecationWarning)
return await self.authorize(*args, **kwargs)
async def request_token(self, token_params=None,
# auth_options
key_name=None, key_secret=None, auth_callback=None,
auth_url=None, auth_method=None, auth_headers=None,
auth_params=None, query_time=None):
token_params = token_params or {}
token_params = dict(self.auth_options.default_token_params,
**token_params)
key_name = key_name or self.auth_options.key_name
key_secret = key_secret or self.auth_options.key_secret
log.debug("Auth callback: %s" % auth_callback)
log.debug("Auth options: %s" % self.auth_options)
if query_time is None:
query_time = self.auth_options.query_time
query_time = bool(query_time)
auth_callback = auth_callback or self.auth_options.auth_callback
auth_url = auth_url or self.auth_options.auth_url
auth_params = auth_params or self.auth_options.auth_params or {}
auth_method = (auth_method or self.auth_options.auth_method).upper()
auth_headers = auth_headers or self.auth_options.auth_headers or {}
log.debug("Token Params: %s" % token_params)
if auth_callback:
log.debug("using token auth with authCallback")
token_request = await auth_callback(token_params)
elif auth_url:
log.debug("using token auth with authUrl")
token_request = await self.token_request_from_auth_url(
auth_method, auth_url, token_params, auth_headers, auth_params)
else:
token_request = await self.create_token_request(
token_params, key_name=key_name, key_secret=key_secret,
query_time=query_time)
if isinstance(token_request, TokenDetails):
return token_request
elif isinstance(token_request, dict) and 'issued' in token_request:
return TokenDetails.from_dict(token_request)
elif isinstance(token_request, dict):
token_request = TokenRequest.from_json(token_request)
elif isinstance(token_request, str):
return TokenDetails(token=token_request)
token_path = "/keys/%s/requestToken" % token_request.key_name
response = await self.ably.http.post(
token_path,
headers=auth_headers,
body=token_request.to_dict(),
skip_auth=True
)
AblyException.raise_for_response(response)
response_dict = response.to_native()
log.debug("Token: %s" % str(response_dict.get("token")))
return TokenDetails.from_dict(response_dict)
async def create_token_request(self, token_params=None,
key_name=None, key_secret=None, query_time=None):
token_params = token_params or {}
token_request = {}
key_name = key_name or self.auth_options.key_name
key_secret = key_secret or self.auth_options.key_secret
if not key_name or not key_secret:
log.debug('key_name or key_secret blank')
raise AblyException("No key specified: no means to generate a token", 401, 40101)
token_request['key_name'] = key_name
if token_params.get('timestamp'):
token_request['timestamp'] = token_params['timestamp']
else:
if query_time is None:
query_time = self.auth_options.query_time
if query_time:
if self.__time_offset is None:
server_time = await self.ably.time()
local_time = self._timestamp()
self.__time_offset = server_time - local_time
token_request['timestamp'] = server_time
else:
local_time = self._timestamp()
token_request['timestamp'] = local_time + self.__time_offset
else:
token_request['timestamp'] = self._timestamp()
token_request['timestamp'] = int(token_request['timestamp'])
ttl = token_params.get('ttl')
if ttl is not None:
if isinstance(ttl, timedelta):
ttl = ttl.total_seconds() * 1000
token_request['ttl'] = int(ttl)
capability = token_params.get('capability')
if capability is not None:
token_request['capability'] = str(Capability(capability))
token_request["client_id"] = (
token_params.get('client_id') or self.client_id)
# Note: There is no expectation that the client
# specifies the nonce; this is done by the library
# However, this can be overridden by the client
# simply for testing purposes
token_request["nonce"] = token_params.get('nonce') or self._random_nonce()
token_request = TokenRequest(**token_request)
if token_params.get('mac') is None:
# Note: There is no expectation that the client
# specifies the mac; this is done by the library
# However, this can be overridden by the client
# simply for testing purposes.
token_request.sign_request(key_secret.encode('utf8'))
else:
token_request.mac = token_params['mac']
return token_request
@property
def ably(self):
return self.__ably
@property
def auth_mechanism(self):
return self.__auth_mechanism
@property
def auth_options(self):
return self.__auth_options
@property
def auth_params(self):
return self.__auth_params
@property
def basic_credentials(self):
return self.__basic_credentials
@property
def token_credentials(self):
if self.__token_details:
token = self.__token_details.token
token_key = base64.b64encode(token.encode('utf-8'))
return token_key.decode('ascii')
@property
def token_details(self):
return self.__token_details
@property
def client_id(self):
return self.__client_id
@property
def time_offset(self):
return self.__time_offset
def _configure_client_id(self, new_client_id):
# If new client ID from Ably is a wildcard, but preconfigured clientId is set,
# then keep the existing clientId
if self.client_id != '*' and new_client_id == '*':
self.__client_id_validated = True
return
# If client_id is defined and not a wildcard, prevent it changing, this is not supported
if self.client_id is not None and self.client_id != '*' and new_client_id != self.client_id:
raise IncompatibleClientIdException(
"Client ID is immutable once configured for a client. "
"Client ID cannot be changed to '{}'".format(new_client_id), 400, 40012)
self.__client_id_validated = True
self.__client_id = new_client_id
def can_assume_client_id(self, assumed_client_id):
if self.__client_id_validated:
return self.client_id == '*' or self.client_id == assumed_client_id
elif self.client_id is None or self.client_id == '*':
return True # client ID is unknown
else:
return self.client_id == assumed_client_id
async def _get_auth_headers(self):
if self.__auth_mechanism == Auth.Method.BASIC:
# RSA7e2
if self.client_id:
return {
'Authorization': 'Basic %s' % self.basic_credentials,
'X-Ably-ClientId': base64.b64encode(self.client_id.encode('utf-8'))
}
return {
'Authorization': 'Basic %s' % self.basic_credentials,
}
else:
await self.__authorize_when_necessary()
return {
'Authorization': 'Bearer %s' % self.token_credentials,
}
def _timestamp(self):
"""Returns the local time in milliseconds since the unix epoch"""
return int(time.time() * 1000)
def _random_nonce(self):
return uuid.uuid4().hex[:16]
async def token_request_from_auth_url(self, method, url, token_params, headers, auth_params):
body = None
params = None
if method == 'GET':
body = {}
params = dict(auth_params, **token_params)
elif method == 'POST':
params = {}
body = dict(auth_params, **token_params)
from ably.http.http import Response
async with httpx.AsyncClient(http2=True) as client:
resp = await client.request(method=method, url=url, headers=headers, params=params, data=body)
response = Response(resp)
AblyException.raise_for_response(response)
try:
token_request = response.to_native()
except ValueError:
token_request = response.text
return token_request
| 38.025 | 106 | 0.633867 | [
"Apache-2.0"
] | ably/ably-python | ably/rest/auth.py | 13,689 | Python |
from urllib.parse import urlparse
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.shortcuts import resolve_url
from gate.views import redirect_to_gate
from gate import REDIRECT_FIELD_NAME
class GateLockMixin:
gate_url = None
permission_denied_message = ''
raise_exception = False
redirect_field_name = REDIRECT_FIELD_NAME
def get_gate_url(self):
"""
Override this method to override the gate_url attribute.
"""
gate_url = self.gate_url or settings.GATE_URL
if not gate_url:
raise ImproperlyConfigured(
'{0} is missing the gate_url attribute. Define {0}.gate_url, settings.GATE_URL, or override '
'{0}.get_gate_url().'.format(self.__class__.__name__)
)
return str(gate_url)
def get_permission_denied_message(self):
"""
Override this method to override the permission_denied_message attribute.
"""
return self.permission_denied_message
def get_redirect_field_name(self):
"""
Override this method to override the redirect_field_name attribute.
"""
return self.redirect_field_name
def handle_no_permission(self):
if self.raise_exception:
raise PermissionDenied(self.get_permission_denied_message())
path = self.request.build_absolute_uri()
resolved_gate_url = resolve_url(self.get_gate_url())
# If the gate url is the same scheme and net location then use the
# path as the "next" url.
gate_scheme, gate_netloc = urlparse(resolved_gate_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if (
(not gate_scheme or gate_scheme == current_scheme) and
(not gate_netloc or gate_netloc == current_netloc)
):
path = self.request.get_full_path()
return redirect_to_gate(
path,
resolved_gate_url,
self.get_redirect_field_name(),
)
def lock_test_func(self, key):
raise NotImplementedError(
'{} is missing the implementation of the test_func() method.'.format(self.__class__.__name__)
)
def get_lock_test_func(self):
"""
Override this method to use a different test_func method.
"""
return self.lock_test_func
def dispatch(self, request, *args, **kwargs):
key = request.session.get('gate_key', None)
key_test_result = self.get_lock_test_func()(key)
if not key_test_result:
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
| 34.468354 | 109 | 0.658465 | [
"MIT"
] | n-serrette/wedding-website | gate/mixin.py | 2,723 | Python |
import random
import string
from discord import TextChannel
from discord.ext import commands
from discord.ext.tasks import loop
from discord_components import Button, ButtonStyle
from config import settings
from util.Match import Match
class Matchmaking(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.match_create_channel: TextChannel = None
self.ongoing_matches_channel: TextChannel = None
self.match_results_channel: TextChannel = None
self.match_create_message_id = None
self.queue = []
self.active_matches = {} # Match ID -> Match instance
@commands.Cog.listener()
async def on_ready(self):
self.match_create_channel = self.bot.get_channel(settings.MATCH_CREATE_CHANNEL)
self.ongoing_matches_channel = self.bot.get_channel(settings.ONGOING_MATCHES_CHANNEL)
self.match_results_channel = self.bot.get_channel(settings.MATCH_RESULTS_CHANNEL)
# Clear the match create channel
await self.match_create_channel.purge()
button = [Button(style=ButtonStyle.green, label='Enter Queue', emoji='✅', custom_id=settings.MATCHMAKING_JOIN_QUEUE_CUSTOM_ID)]
# create the queue message
self.match_create_message_id = await self.match_create_channel.send("enter queue msg", components=button)
# Start the attempt create match loop
self.attempt_create_match.start()
def handle_enter_queue(self, user_id):
if user_id in self.queue:
print(f"tried adding {user_id} to queue but they are already in it")
return
self.queue.append(user_id)
print(f"{user_id} has joined the queue")
async def handle_match_win(self, match, custom_id):
winner_id = None
if custom_id:
winner_id = custom_id.replace(settings.MATCHMAKING_ONGOING_CUSTOM_ID, '')
if winner_id:
msg = await self.match_results_channel.send(content=f"User {winner_id} won match {match.id}!")
del self.active_matches[match.id]
match_msg = self.bot.get_message(self.ongoing_matches_channel, match.message_id)
await self.bot.delete_message(match_msg)
@loop(seconds=settings.MATCHMAKING_CREATE_MATCH_FREQUENCY)
async def attempt_create_match(self):
print(f"[Matchmaking] attempting to create a match with {len(self.queue)} members")
if len(self.queue) <= 1:
print("tried creating match with less than 2 members")
return
#split queues later on based on rank/elo
matched_players = random.sample(self.queue, 2)
u1 = matched_players[0]
u2 = matched_players[1]
await self.create_match(u1, u2)
def generate_match_id(self):
avail_chars = string.ascii_uppercase + string.digits
id_list = []
for _ in range(6):
id_list.append(random.choice(avail_chars))
generated_id = ''.join(id_list)
if generated_id not in self.active_matches:
return generated_id
return self.generate_match_id()
def get_match(self, msg_id):
for match in self.active_matches.values():
if msg_id == match.message_id:
return match
return None
async def create_match(self, u1, u2):
match_id = self.generate_match_id()
buttons = [
Button(style=ButtonStyle.grey, label=f"{u1} won", emoji='✅', custom_id=f"{settings.MATCHMAKING_ONGOING_CUSTOM_ID}{u1}"),
Button(style=ButtonStyle.grey, label=f"{u2} won", emoji='✅', custom_id=f"{settings.MATCHMAKING_ONGOING_CUSTOM_ID}{u2}")
]
msg = await self.ongoing_matches_channel.send(content=f"Match between {u1}, {u2}", components=buttons)
self.active_matches[match_id] = Match(match_id, msg.id, [u1, u2])
# remove them from the queue
self.queue.remove(u1)
self.queue.remove(u2)
def setup(bot):
bot.add_cog(Matchmaking(bot))
| 33.475 | 135 | 0.668658 | [
"MIT"
] | DevvyDont/CraneDuels | cogs/Matchmaking.py | 4,023 | Python |
import PySimpleGUI as sg
layout = [
[sg.Text('text')],
[sg.Input('input', key= 'input1')],
[sg.Input('input', key='input2')],
[sg.Button('button', key='button1')]
]
window = sg.Window('list values - list or dict', layout)
while True:
event, values = window.Read()
if event == 'button1':
print(values['input1'])
print(values['input2'])
# prints button key because that's current events' key
print(event)
elif event is None:
break
window.Close()
| 18 | 58 | 0.627572 | [
"MIT"
] | CrazyJ36/python | pysimplegui/values_from_some_elements.py | 486 | Python |
from MonetDBtesting.sqltest import SQLTestCase
try:
from MonetDBtesting import process
except ImportError:
import process
with process.server(args=[],
mapiport='0',
stdin=process.PIPE,
stdout=process.PIPE,
stderr=process.PIPE) as s:
with SQLTestCase() as tc:
tc.connect(username="monetdb", password="monetdb", port=str(s.dbport))
tc.execute("select * from t1;").assertSucceeded().assertDataResultMatch([(1,)])
tc.execute("create view v1 as select * from t1;").assertSucceeded()
tc.execute("create view v2 as select * from t1;").assertSucceeded()
tc.execute("drop view v2;").assertSucceeded()
tc.execute("insert into v1 (a) values ( 2 );").assertFailed(err_message='INSERT INTO: cannot insert into view \'v1\'')
tc.execute("update v1 set a = 3 where a = 2;").assertFailed(err_message='UPDATE: cannot update view \'v1\'')
tc.execute("delete from v1 where a = 3;").assertFailed(err_message='DELETE FROM: cannot delete from view \'v1\'')
s.communicate()
with process.server(args=["--readonly"],
mapiport='0',
stdin=process.PIPE,
stdout=process.PIPE,
stderr=process.PIPE) as s:
with SQLTestCase() as tc:
tc.connect(username="monetdb", password="monetdb", port=str(s.dbport))
tc.execute("select * from t1;").assertSucceeded().assertDataResultMatch([(1,)])
tc.execute("create view v2 as select * from t1;").assertFailed(err_message='Schema statements cannot be executed on a readonly database.')
tc.execute("drop view v1;").assertFailed(err_message='Schema statements cannot be executed on a readonly database.')
tc.execute("select * from v1;").assertSucceeded()
tc.execute("insert into v1 (a) values ( 1 );").assertFailed(err_message='INSERT INTO: cannot insert into view \'v1\'')
tc.execute("update v1 set a = 2 where a = 1;").assertFailed(err_message='UPDATE: cannot update view \'v1\'')
tc.execute("delete from v1 where a = 1;").assertFailed(err_message='DELETE FROM: cannot delete from view \'v1\'')
s.communicate()
| 55.75 | 146 | 0.637668 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | MonetDB/MonetDB | sql/test/mserver5-sql-readonly/Tests/mserver5-sql-readonly-view.py | 2,230 | Python |
import os.path
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
import h5py
import numpy as np
from skimage.transform import resize as skResize
from util.util import normalize, adaptive_instance_normalization
class UnalignedDataset(BaseDataset):
"""
This dataset class can load unaligned/unpaired datasets.
It requires two directories to host training images from domain A '/path/to/data/trainA'
and from domain B '/path/to/data/trainB' respectively.
You can train the model with the dataset flag '--dataroot /path/to/data'.
Similarly, you need to prepare two directories:
'/path/to/data/testA' and '/path/to/data/testB' during test time.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
self.dir_B = os.path.join(opt.dataroot_B, opt.phase + 'B') # create a path '/path/to/data/trainB'
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
self.A_size = len(self.A_paths) # get the size of dataset A
self.B_size = len(self.B_paths) # get the size of dataset B
btoA = self.opt.direction == 'BtoA'
input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
"""
A_path = self.A_paths[index % self.A_size] # make sure index is within then range
if self.opt.serial_batches: # make sure index is within then range
index_B = index % self.B_size
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B]
A_img = np.array(Image.open(A_path).convert('RGB'))
A_img = self.stack(A_img)
#Added a new loader for loading hsi images. Uncomment the following line for normal images.
try:
B_img = self.hsi_loader(B_path)
except KeyError:
print(B_path)
B = normalize(B_img, max_=4096)
A = normalize(A_img, max_=1)
A = adaptive_instance_normalization(A, B)
del A_img, B_img
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
"""Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
"""
return max(self.A_size, self.B_size)
def stack(self, img, resize=True):
_R = img[:,:,0]
_G = img[:,:,1]
_B = img[:,:,2]
R_img = np.stack((_R,)*10, axis=2)
G_img = np.stack((_G,)*10, axis=2)
B_img = np.stack((_B,)*11, axis=2)
hsi_img = np.concatenate((B_img, G_img, R_img), axis=2)
hsi_img = self.resize(hsi_img)
hsi_img = np.einsum('abc->cab', hsi_img)
return hsi_img
def resize(self, img):
img = skResize(img, (self.opt.crop_size, self.opt.crop_size))
return img
def hsi_loader(self, path):
with h5py.File(path, 'r') as f:
d = np.array(f['data'])
hs_data = np.einsum('abc -> cab',self.resize(d))
#print('Inside hsi loader, {0}'.format(np.shape(hs_data)))
return hs_data
| 41.290909 | 122 | 0.625055 | [
"BSD-3-Clause"
] | sinhaharsh/pytorch-CycleGAN-and-pix2pix | data/unaligned_dataset.py | 4,542 | Python |
##
# The MIT License (MIT)
#
# Copyright (c) 2016 Stefan Wendler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##
import os
import subprocess
class AbstractBrowser:
_binary = None
def __init__(self, url, user_data_dir):
self.user_data_dir = os.path.join(user_data_dir, self._binary)
self.url = url
if not os.path.exists(self.user_data_dir):
os.makedirs(self.user_data_dir)
@staticmethod
def _available(binary):
extensions = os.environ.get("PATHEXT", "").split(os.pathsep)
for directory in os.environ.get("PATH", "").split(os.pathsep):
base = os.path.join(directory, binary)
options = [base] + [(base + ext) for ext in extensions]
for filename in options:
if os.path.exists(filename):
return True
return False
def _start(self, args):
print("running: " + self._binary)
try:
subprocess.check_output([self._binary] + args)
except subprocess.CalledProcessError as e:
print(e.output)
return e.returncode
except Exception as e:
print(e)
return -1
return 0
def start(self):
return -1
@staticmethod
def available():
return False
class Chrome(AbstractBrowser):
_binary = "google-chrome"
@staticmethod
def available():
return AbstractBrowser._available(Chrome._binary)
def start(self):
args = ["--app=%s" % self.url]
args += ["--user-data-dir=%s" % self.user_data_dir]
return self._start(args)
class Chromium(Chrome):
_binary = "xchromium"
@staticmethod
def available():
return AbstractBrowser._available(Chromium._binary)
class Firefox(AbstractBrowser):
_binary = "firefox"
@staticmethod
def available():
return AbstractBrowser._available(Firefox._binary)
def start(self):
args = ["--profile", self.user_data_dir]
args += ["--no-remote"]
args += [self.url]
return self._start(args)
class Browser:
def __init__(self, url, user_data_dir=None):
self.client = None
for cls in [Chrome, Chromium, Firefox]:
if cls.available():
self.client = cls(url, user_data_dir)
break
if self.client is None:
raise Exception("No suitable client found!")
def start(self):
return self.client.start()
| 26.074074 | 79 | 0.646591 | [
"MIT"
] | wendlers/edubot-snap | src/edubot/client.py | 3,520 | Python |
from project.hardware.hardware import Hardware
class HeavyHardware(Hardware):
TYPE = "Heavy"
def __init__(self, name, capacity, memory):
super().__init__(name, self.TYPE, capacity * 2, int(memory * 0.75))
| 24.888889 | 75 | 0.691964 | [
"MIT"
] | geodimitrov/Python-OOP-SoftUni | Exam-Prep/Exam_16-Aug-20/project/hardware/heavy_hardware.py | 224 | Python |
import logging, ast, os
from bisect import bisect_left, bisect
import louie as dispatcher
from twisted.internet import reactor
from rdflib import Literal
from light9 import showconfig
from light9.namespaces import L9, RDF, RDFS
from rdfdb.patch import Patch
log = logging.getLogger()
# todo: move to config, consolidate with ascoltami, musicPad, etc
introPad = 4
postPad = 4
class Curve(object):
"""curve does not know its name. see Curveset"""
def __init__(self, uri, pointsStorage='graph'):
self.uri = uri
self.pointsStorage = pointsStorage
self.points = [] # x-sorted list of (x,y)
self._muted = False
def __repr__(self):
return "<%s %s (%s points)>" % (self.__class__.__name__, self.uri,
len(self.points))
def muted():
doc = "Whether to currently send levels (boolean, obviously)"
def fget(self):
return self._muted
def fset(self, val):
self._muted = val
dispatcher.send('mute changed', sender=self)
return locals()
muted = property(**muted())
def toggleMute(self):
self.muted = not self.muted
def load(self, filename):
self.points[:] = []
for line in open(filename):
x, y = line.split()
self.points.append((float(x), ast.literal_eval(y)))
self.points.sort()
dispatcher.send("points changed", sender=self)
def set_from_string(self, pts):
self.points[:] = []
vals = pts.split()
pairs = list(zip(vals[0::2], vals[1::2]))
for x, y in pairs:
self.points.append((float(x), ast.literal_eval(y)))
self.points.sort()
dispatcher.send("points changed", sender=self)
def points_as_string(self):
def outVal(x):
if isinstance(x, str): # markers
return x
return "%.4g" % x
return ' '.join(
"%s %s" % (outVal(p[0]), outVal(p[1])) for p in self.points)
def save(self, filename):
# this is just around for markers, now
if filename.endswith('-music') or filename.endswith('_music'):
print("not saving music track")
return
f = open(filename, 'w')
for p in self.points:
f.write("%s %r\n" % p)
f.close()
def eval(self, t, allow_muting=True):
if self.muted and allow_muting:
return 0
if not self.points:
raise ValueError("curve has no points")
i = bisect_left(self.points, (t, None)) - 1
if i == -1:
return self.points[0][1]
if self.points[i][0] > t:
return self.points[i][1]
if i >= len(self.points) - 1:
return self.points[i][1]
p1, p2 = self.points[i], self.points[i + 1]
frac = (t - p1[0]) / (p2[0] - p1[0])
y = p1[1] + (p2[1] - p1[1]) * frac
return y
__call__ = eval
def insert_pt(self, new_pt):
"""returns index of new point"""
i = bisect(self.points, (new_pt[0], None))
self.points.insert(i, new_pt)
# missing a check that this isn't the same X as the neighbor point
dispatcher.send("points changed", sender=self)
return i
def live_input_point(self, new_pt, clear_ahead_secs=.01):
x, y = new_pt
exist = self.points_between(x, x + clear_ahead_secs)
for pt in exist:
self.remove_point(pt)
self.insert_pt(new_pt)
dispatcher.send("points changed", sender=self)
# now simplify to the left
def set_points(self, updates):
for i, pt in updates:
self.points[i] = pt
# this should be on, but live_input_point made it fail a
# lot. need a new solution.
#self.checkOverlap()
dispatcher.send("points changed", sender=self)
def checkOverlap(self):
x = None
for p in self.points:
if p[0] <= x:
raise ValueError("overlapping points")
x = p[0]
def pop_point(self, i):
p = self.points.pop(i)
dispatcher.send("points changed", sender=self)
return p
def remove_point(self, pt):
self.points.remove(pt)
dispatcher.send("points changed", sender=self)
def indices_between(self, x1, x2, beyond=0):
leftidx = max(0, bisect(self.points, (x1, None)) - beyond)
rightidx = min(len(self.points),
bisect(self.points, (x2, None)) + beyond)
return list(range(leftidx, rightidx))
def points_between(self, x1, x2):
"""returns (x,y) points"""
return [self.points[i] for i in self.indices_between(x1, x2)]
def point_before(self, x):
"""(x,y) of the point left of x, or None"""
leftidx = self.index_before(x)
if leftidx is None:
return None
return self.points[leftidx]
def index_before(self, x):
leftidx = bisect(self.points, (x, None)) - 1
if leftidx < 0:
return None
return leftidx
class CurveResource(object):
"""
holds a Curve, deals with graphs
"""
def __init__(self, graph, uri):
# probably newCurve and loadCurve should be the constructors instead.
self.graph, self.uri = graph, uri
def curvePointsContext(self):
return self.uri
def newCurve(self, ctx, label):
"""
Save type/label for a new :Curve resource.
Pass the ctx where the main curve data (not the points) will go.
"""
if hasattr(self, 'curve'):
raise ValueError('CurveResource already has a curve %r' %
self.curve)
self.graph.patch(
Patch(addQuads=[
(self.uri, RDF.type, L9['Curve'], ctx),
(self.uri, RDFS.label, label, ctx),
]))
self.curve = Curve(self.uri)
self.curve.points.extend([(0, 0)])
self.saveCurve()
self.watchCurvePointChanges()
def loadCurve(self):
if hasattr(self, 'curve'):
raise ValueError('CurveResource already has a curve %r' %
self.curve)
pointsFile = self.graph.value(self.uri, L9['pointsFile'])
self.curve = Curve(self.uri,
pointsStorage='file' if pointsFile else 'graph')
if hasattr(self.graph, 'addHandler'):
self.graph.addHandler(self.pointsFromGraph)
else:
# given a currentState graph
self.pointsFromGraph()
def pointsFromGraph(self):
pts = self.graph.value(self.uri, L9['points'])
if pts is not None:
self.curve.set_from_string(pts)
else:
diskPts = self.graph.value(self.uri, L9['pointsFile'])
if diskPts is not None:
self.curve.load(os.path.join(showconfig.curvesDir(), diskPts))
else:
log.warn("curve %s has no points", self.uri)
self.watchCurvePointChanges()
def saveCurve(self):
self.pendingSave = None
for p in self.getSavePatches():
self.graph.patch(p)
def getSavePatches(self):
if self.curve.pointsStorage == 'file':
log.warn("not saving file curves anymore- skipping %s" % self.uri)
#cur.save("%s-%s" % (basename,name))
return []
elif self.curve.pointsStorage == 'graph':
return [
self.graph.getObjectPatch(self.curvePointsContext(),
subject=self.uri,
predicate=L9['points'],
newObject=Literal(
self.curve.points_as_string()))
]
else:
raise NotImplementedError(self.curve.pointsStorage)
def watchCurvePointChanges(self):
"""start watching and saving changes to the graph"""
dispatcher.connect(self.onChange, 'points changed', sender=self.curve)
def onChange(self):
# Don't write a patch for the edited curve points until they've been
# stable for this long. This can be very short, since it's just to
# stop a 100-point edit from sending many updates. If it's too long,
# you won't see output lights change while you drag a point. Todo:
# this is just the wrong timing algorithm- it should be a max rate,
# not a max-hold-still-time.
HOLD_POINTS_GRAPH_COMMIT_SECS = .1
if getattr(self, 'pendingSave', None):
self.pendingSave.cancel()
self.pendingSave = reactor.callLater(HOLD_POINTS_GRAPH_COMMIT_SECS,
self.saveCurve)
class Markers(Curve):
"""Marker is like a point but the y value is a string"""
def eval(self):
raise NotImplementedError()
def slope(p1, p2):
if p2[0] == p1[0]:
return 0
return (p2[1] - p1[1]) / (p2[0] - p1[0])
class Curveset(object):
def __init__(self, graph, session):
self.graph, self.session = graph, session
self.currentSong = None
self.curveResources = {} # uri : CurveResource
self.markers = Markers(uri=None, pointsStorage='file')
graph.addHandler(self.loadCurvesForSong)
def curveFromUri(self, uri):
return self.curveResources[uri].curve
def loadCurvesForSong(self):
"""
current curves will track song's curves.
This fires 'add_curve' dispatcher events to announce the new curves.
"""
log.info('loadCurvesForSong')
dispatcher.send("clear_curves")
self.curveResources.clear()
self.markers = Markers(uri=None, pointsStorage='file')
self.currentSong = self.graph.value(self.session, L9['currentSong'])
if self.currentSong is None:
return
for uri in sorted(self.graph.objects(self.currentSong, L9['curve'])):
try:
cr = self.curveResources[uri] = CurveResource(self.graph, uri)
cr.loadCurve()
curvename = self.graph.label(uri)
if not curvename:
raise ValueError("curve %r has no label" % uri)
dispatcher.send("add_curve",
sender=self,
uri=uri,
label=curvename,
curve=cr.curve)
except Exception as e:
log.error("loading %s failed: %s", uri, e)
basename = os.path.join(
showconfig.curvesDir(),
showconfig.songFilenameFromURI(self.currentSong))
try:
self.markers.load("%s.markers" % basename)
except IOError:
print("no marker file found")
def save(self):
"""writes a file for each curve with a name
like basename-curvename, or saves them to the rdf graph"""
basename = os.path.join(
showconfig.curvesDir(),
showconfig.songFilenameFromURI(self.currentSong))
patches = []
for cr in list(self.curveResources.values()):
patches.extend(cr.getSavePatches())
self.markers.save("%s.markers" % basename)
# this will cause reloads that will rebuild our curve list
for p in patches:
self.graph.patch(p)
def sorter(self, name):
return self.curves[name].uri
def curveUrisInOrder(self):
return sorted(self.curveResources.keys())
def currentCurves(self):
# deprecated
for uri, cr in sorted(self.curveResources.items()):
with self.graph.currentState(tripleFilter=(uri, RDFS['label'],
None)) as g:
yield uri, g.label(uri), cr.curve
def globalsdict(self):
raise NotImplementedError('subterm used to get a dict of name:curve')
def get_time_range(self):
return 0, dispatcher.send("get max time")[0][1]
def new_curve(self, name):
if isinstance(name, Literal):
name = str(name)
uri = self.graph.sequentialUri(self.currentSong + '/curve-')
cr = self.curveResources[uri] = CurveResource(self.graph, uri)
cr.newCurve(ctx=self.currentSong, label=Literal(name))
s, e = self.get_time_range()
cr.curve.points.extend([(s, 0), (e, 0)])
ctx = self.currentSong
self.graph.patch(
Patch(addQuads=[
(self.currentSong, L9['curve'], uri, ctx),
]))
cr.saveCurve()
| 33.025974 | 78 | 0.562407 | [
"MIT"
] | drewp/light9 | light9/curvecalc/curve.py | 12,715 | Python |
import paddle
import paddle.nn as nn
class ContrastiveLoss(nn.Layer):
"""
Compute contrastive loss
"""
def __init__(self, margin=0, max_violation=False):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.max_violation = max_violation
def forward(self, scores):
# compute image-sentence score matrix
diag_idx = [[i, i] for i in range(len(scores))]
diagonal = paddle.gather_nd(scores, paddle.to_tensor(diag_idx)).unsqueeze(1)
d1 = diagonal.expand_as(scores)
d2 = paddle.transpose(d1, (1,0)).expand_as(scores)
# compare every diagonal score to scores in its column
# caption retrieval
cost_s = (self.margin + scores - d1).clip(min=0)
# compare every diagonal score to scores in its row
# image retrieval
cost_im = (self.margin + scores - d2).clip(min=0)
# clear diagonals
mask = paddle.eye(scores.shape[0]) < .5
cost_s = cost_s * mask
cost_im = cost_im * mask
# keep the maximum violating negative for each query
if self.max_violation:
cost_s = cost_s.max(1)
cost_im = cost_im.max(0)
return cost_s.sum() + cost_im.sum() | 32.921053 | 84 | 0.620304 | [
"Apache-2.0"
] | njustkmg/PaddleMM | paddlemm/models/retrieval/layers/contrastive.py | 1,251 | Python |
# coding=utf-8
import json
import falcon
from ..db import db
class LogTheError(object):
def on_post(self, req, resp):
os = str(req.body['os']).lower()
account_addr = str(req.body['account_addr']).lower()
error_str = str(req.body['error_str']).lower()
log_type = 'error'
_ = db.logs.insert_one({
'os': os,
'account_addr': account_addr,
'error_str': error_str,
'log_type': log_type
})
message = {
'success': True,
'message': 'Error reported successfully.'
}
resp.status = falcon.HTTP_200
resp.body = json.dumps(message)
| 22.7 | 60 | 0.543319 | [
"MIT"
] | baymax19/Sentinel | master-node-docker/sentinel/logs/errors.py | 681 | Python |
# Enter your code for "Degree Distribution" here.
import csv
degrees = []
students = []
for l in csv.DictReader(open("degrees.csv")):
degrees.append(l)
for l in csv.DictReader(open("students.csv")):
students.append(l)
students = sorted(students, key=lambda x: float(x["score"]))
students.reverse()
print(students)
| 18.222222 | 60 | 0.698171 | [
"BSD-2-Clause"
] | monkee52/NCSSChallenge | Degree Distribution.py | 328 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['TagArgs', 'Tag']
@pulumi.input_type
class TagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
resource_arn: pulumi.Input[str],
value: pulumi.Input[str]):
"""
The set of arguments for constructing a Tag resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "resource_arn", resource_arn)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Input[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class _TagState:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Tag resources.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if resource_arn is not None:
pulumi.set(__self__, "resource_arn", resource_arn)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
class Tag(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TagArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param TagArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TagArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TagArgs.__new__(TagArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
if resource_arn is None and not opts.urn:
raise TypeError("Missing required property 'resource_arn'")
__props__.__dict__["resource_arn"] = resource_arn
if value is None and not opts.urn:
raise TypeError("Missing required property 'value'")
__props__.__dict__["value"] = value
super(Tag, __self__).__init__(
'aws:ecs/tag:Tag',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None) -> 'Tag':
"""
Get an existing Tag resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TagState.__new__(_TagState)
__props__.__dict__["key"] = key
__props__.__dict__["resource_arn"] = resource_arn
__props__.__dict__["value"] = value
return Tag(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
| 35.509728 | 134 | 0.60103 | [
"ECL-2.0",
"Apache-2.0"
] | pulumi/pulumi-aws | sdk/python/pulumi_aws/ecs/tag.py | 9,126 | Python |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import io
import os
import platform
import sys
import time
import unittest
import common
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, 'tools', 'variations'))
import fieldtrial_util
test_blacklist = [
# These tests set their own field trials and should be ignored.
'quic.Quic.testCheckPageWithQuicProxy',
'quic.Quic.testCheckPageWithQuicProxyTransaction',
'smoke.Smoke.testCheckPageWithHoldback',
]
def GetExperimentArgs():
"""Returns a list of arguments with all tested field trials.
This function is a simple wrapper around the variation team's fieldtrail_util
script that generates command line arguments to test Chromium field trials.
Returns:
an array of command line arguments to pass to chrome
"""
config_path = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, 'testing', 'variations', 'fieldtrial_testing_config.json')
my_platform = ''
if common.ParseFlags().android:
my_platform = 'android'
elif platform.system().lower() == 'linux':
my_platform = 'linux'
elif platform.system().lower() == 'windows':
my_platform = 'windows'
elif platform.system().lower() == 'darwin':
my_platform = 'mac'
else:
raise Exception('unknown platform!')
return fieldtrial_util.GenerateArgs(config_path, my_platform)
def GenerateTestSuites():
"""A generator function that yields non-blacklisted tests to run.
This function yields test suites each with a single test case whose id is not
blacklisted in the array at the top of this file.
Yields:
non-blacklisted test suites to run
"""
loader = unittest.TestLoader()
for test_suite in loader.discover(os.path.dirname(__file__), pattern='*.py'):
for test_case in test_suite:
for test_method in test_case:
if test_method.id() not in test_blacklist:
ts = unittest.TestSuite()
ts.addTest(test_method)
yield (ts, test_method.id())
def ParseFlagsWithExtraBrowserArgs(extra_args):
"""Generates a function to override common.ParseFlags.
The returned function will honor everything in the original ParseFlags(), but
adds on additional browser_args.
Args:
extra_args: The extra browser agruments to add.
Returns:
A function to override common.ParseFlags with additional browser_args.
"""
original_flags = common.ParseFlags()
def AddExtraBrowserArgs():
original_flags.browser_args = ((original_flags.browser_args if
original_flags.browser_args else '') + ' ' + extra_args)
return original_flags
return AddExtraBrowserArgs
def main():
"""Runs all non-blacklisted tests against Chromium field trials.
This script run all chrome proxy integration tests that haven't been
blacklisted against the field trial testing configuration used by Chromium
perf bots.
"""
flags = common.ParseFlags()
experiment_args = ' '.join(GetExperimentArgs())
common.ParseFlags = ParseFlagsWithExtraBrowserArgs(experiment_args)
# Each test is wrapped in its own test suite so results can be evaluated
# individually.
for test_suite, test_id in GenerateTestSuites():
buf = io.BytesIO()
sys.stdout.write('%s... ' % test_id)
sys.stdout.flush()
testRunner = unittest.runner.TextTestRunner(stream=buf, verbosity=2,
buffer=(not flags.disable_buffer))
result = testRunner.run(test_suite)
if result.wasSuccessful():
print('ok')
else:
print('failed')
print(buf.getvalue())
print('To repeat this test, run: ')
print("%s %s %s --test_filter=%s --browser_args='%s'" % (
sys.executable,
os.path.join(os.path.dirname(__file__), 'run_all_tests.py'), ' '.join(
sys.argv[1:]), '.'.join(test_id.split('.')[1:]), experiment_args))
if flags.failfast:
return
if __name__ == '__main__':
main()
| 33.545455 | 80 | 0.716433 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | jnpatel2811/chromium | tools/chrome_proxy/webdriver/variations_combinations.py | 4,059 | Python |
# Link for the problem : https://leetcode.com/problems/next-permutation/
class Solution(object):
def nextPermutation(self, nums):
found = False
i = len(nums)-2
while i >=0:
if nums[i] < nums[i+1]:
found =True
break
i-=1
if not found:
nums.sort()
else:
m = self.findMaxIndex(i+1,nums,nums[i])
nums[i],nums[m] = nums[m],nums[i]
nums[i+1:] = nums[i+1:][::-1]
return nums
def findMaxIndex(self,index,a,curr):
ans = -1
index = 0
for i in range(index,len(a)):
if a[i]>curr:
if ans == -1:
ans = curr
index = i
else:
ans = min(ans,a[i])
index = i
return index
ob1 = Solution()
| 23.371429 | 72 | 0.462103 | [
"MIT"
] | 5l1v3r1/CompetitiveProgrammingQuestionBank | DSA 450 GFG/next_permutation.py | 818 | Python |
#
# blood_graph.py
#
# vanilla_core execution visualizer.
#
# input: vanilla_operation_trace.csv
# vanilla_stats.csv (for timing)
# output: blood graph file (blood_abstrat/detailed.png)
# blood graph key (key_abstract/detailed.png)
#
# @author Tommy, Borna
#
# How to use:
# python blood_graph.py --trace {vanilla_operation_trace.csv}
# --stats {vanilla_stats.csv}
# --abstract {optional}
# --generate-key {optional}
# --cycle {start_cycle@end_cycle}
#
# ex) python blood_graph.py --trace vanilla_operation_trace.csv
# --stats vanilla_stats.csv
# --abstract --generate-key
# --cycle 10000@20000
#
#
# {stats} used for extracting the timing window for blood graph
# {abstract} used for abstract simplifed bloodgraph
# {generate-key} also generates a color key for the blood graph
# {cycle} used for user-specified custom timing window
#
#
# Note: You can use the "Digital Color Meter" in MacOS in order to compare
# the values from the color key to the values in the bloodgraph, if you are
# having trouble distinguishing a color.
import sys
import csv
import argparse
import warnings
import os.path
from PIL import Image, ImageDraw, ImageFont
from itertools import chain
from . import common
class BloodGraph:
# for generating the key
_KEY_WIDTH = 512
_KEY_HEIGHT = 512
# List of types of stalls incurred by the core
_STALLS_LIST = ["stall_depend_dram_load",
"stall_depend_group_load",
"stall_depend_global_load",
"stall_depend_idiv",
"stall_depend_fdiv",
"stall_depend_local_load",
"stall_depend_imul",
"stall_amo_aq",
"stall_amo_rl",
"stall_bypass",
"stall_lr_aq",
"stall_fence",
"stall_remote_req",
"stall_remote_credit",
"stall_fdiv_busy",
"stall_idiv_busy",
"stall_fcsr",
"stall_remote_ld",
"stall_ifetch_wait",
"stall_remote_flw_wb",
"stall_remote_ld_wb",
"bubble_branch_miss",
"bubble_jalr_miss",
"bubble_icache_miss"]
# List of types of integer instructions executed by the core
_INSTRS_LIST = [
"local_ld",
"local_st",
"remote_ld_dram",
"remote_ld_global",
"remote_ld_group",
"remote_st_dram",
"remote_st_global",
"remote_st_group",
"local_flw",
"local_fsw",
"remote_flw_dram",
"remote_flw_global",
"remote_flw_group",
"remote_fsw_dram",
"remote_fsw_global",
"remote_fsw_group",
# icache_miss is no longer treated as an instruction
# but treated the same as stall_ifetch_wait
# "icache_miss",
"lr",
"lr_aq",
"amoswap",
"amoor",
"amoadd",
"beq",
"bne",
"blt",
"bge",
"bltu",
"bgeu",
"jal",
"jalr",
"beq_miss",
"bne_miss",
"blt_miss",
"bge_miss",
"bltu_miss",
"bgeu_miss",
"jalr_miss",
"sll",
"slli",
"srl",
"srli",
"sra",
"srai",
"add",
"addi",
"sub",
"lui",
"auipc",
"xor",
"xori",
"or",
"ori",
"and",
"andi",
"slt",
"slti",
"sltu",
"sltiu",
"div",
"divu",
"rem",
"remu",
"mul",
"fence",
"csrrw",
"csrrs",
"csrrc",
"csrrwi",
"csrrsi",
"csrrci",
"barsend",
"barrecv",
"unknown"]
# List of types of floating point instructions executed by the core
_FP_INSTRS_LIST = ["fadd",
"fsub",
"fmul",
"fsgnj",
"fsgnjn",
"fsgnjx",
"fmin",
"fmax",
"fcvt_s_w",
"fcvt_s_wu",
"fmv_w_x",
"fmadd",
"fmsub",
"fnmsub",
"fnmadd",
"feq",
"flt",
"fle",
"fcvt_w_s",
"fcvt_wu_s",
"fclass",
"fmv_x_w",
"fdiv",
"fsqrt"]
# Coloring scheme for different types of operations
# For detailed mode
# i_cache miss is treated the same is stall_ifetch_wait
_DETAILED_STALL_BUBBLE_COLOR = {
"stall_depend_dram_load" : (0xff, 0x00, 0x00), ## red
"stall_depend_group_load" : (0x00, 0xff, 0x00), ## green
"stall_depend_global_load" : (0x00, 0x55, 0x00), ## dark green
"stall_depend_local_load" : (0x00, 0xff, 0xff), ## cyan
"stall_depend_idiv" : (0xff, 0xf0, 0xa0), ## light orange
"stall_depend_fdiv" : (0xff, 0xf0, 0xa0), ## light orange
"stall_depend_imul" : (0xff, 0xf0, 0xa0), ## light orange
"stall_fdiv_busy" : (0x00, 0xaa, 0xff), ## dark cyan
"stall_idiv_busy" : (0x00, 0xaa, 0xff), ## dark cyan
"stall_amo_aq" : (0x8b, 0x45, 0x13), ## brown
"stall_amo_rl" : (0x8b, 0x45, 0x13), ## brown
"stall_bypass" : (0xff, 0x00, 0xff), ## pink
"stall_lr_aq" : (0x40, 0x40, 0x40), ## dark gray
"stall_fence" : (0x00, 0x00, 0x80), ## navy blue
"stall_remote_req" : (0xff, 0xff, 0x00), ## yellow
"stall_barrier" : (0x00, 0x11, 0xff), ## blue
"stall_remote_credit" : (0x80, 0x00, 0x00), ## maroon
"stall_fcsr" : (0x00, 0x55, 0xff), ## dark blue
"stall_remote_ld" : (0xaa, 0x00, 0x00), ## dark red
"stall_remote_flw_wb" : (0xff, 0xff, 0x80), ## light yellow
"stall_remote_ld_wb" : (0xff, 0xff, 0x40), ## light-ish yellow
"bubble_branch_miss" : (0x80, 0x00, 0x80), ## purple
"bubble_jalr_miss" : (0xff, 0xa5, 0x00), ## orange
"icache_miss" : (0x00, 0x00, 0xff), ## blue
"bubble_icache_miss" : (0x00, 0x00, 0xff), ## blue
"stall_ifetch_wait" : (0x00, 0x00, 0xff), ## blue
}
_DETAILED_UNIFIED_INSTR_COLOR = (0xff, 0xff, 0xff) ## white
_DETAILED_UNIFIED_FP_INSTR_COLOR = (0xff, 0xaa, 0xff) ## light pink
# Coloring scheme for different types of operations
# For abstract mode
# i_cache miss is treated the same is stall_ifetch_wait
_ABSTRACT_STALL_BUBBLE_COLOR = {
"stall_depend_dram_load" : (0xff, 0x00, 0x00), ## red
"stall_depend_group_load" : (0x00, 0xff, 0x00), ## green
"stall_depend_global_load" : (0x00, 0xff, 0x00), ## green
"stall_depend_local_load" : (0x00, 0xff, 0xff), ## cyan
"stall_depend_idiv" : (0xff, 0xff, 0xff), ## white
"stall_depend_fdiv" : (0xff, 0xff, 0xff), ## white
"stall_depend_imul" : (0xff, 0xff, 0xff), ## white
"stall_fdiv_busy" : (0xff, 0xff, 0xff), ## white
"stall_idiv_busy" : (0xff, 0xff, 0xff), ## white
"stall_amo_aq" : (0x00, 0x00, 0x00), ## black
"stall_amo_rl" : (0x00, 0x00, 0x00), ## black
"stall_bypass" : (0x00, 0x00, 0x00), ## black
"stall_lr_aq" : (0x40, 0x40, 0x40), ## dark gray
"stall_fence" : (0x00, 0x00, 0x00), ## black
"stall_remote_req" : (0x00, 0x00, 0x00), ## black
"stall_barrier" : (0x00, 0x11, 0xff), ## blue
"stall_remote_credit" : (0x00, 0x00, 0x00), ## black
"stall_fcsr" : (0x00, 0x00, 0x00), ## black
"stall_remote_ld" : (0x00, 0x00, 0x00), ## black
"stall_remote_flw_wb" : (0x00, 0x00, 0x00), ## black
"stall_remote_ld_wb" : (0x00, 0x00, 0x00), ## black
"bubble_branch_miss" : (0x00, 0x00, 0x00), ## black
"bubble_jalr_miss" : (0x00, 0x00, 0x00), ## black
"icache_miss" : (0x00, 0x00, 0xff), ## blue
"bubble_icache_miss" : (0x00, 0x00, 0xff), ## blue
"stall_ifetch_wait" : (0x00, 0x00, 0xff), ## blue
}
_ABSTRACT_UNIFIED_INSTR_COLOR = (0xff, 0xff, 0xff) ## white
_ABSTRACT_UNIFIED_FP_INSTR_COLOR = (0xff, 0xff, 0xff) ## white
# default constructor
def __init__(self, trace_file, stats_file, cycle, abstract):
self.abstract = abstract
# Determine coloring rules based on mode {abstract / detailed}
if (self.abstract):
self.stall_bubble_color = self._ABSTRACT_STALL_BUBBLE_COLOR
self.unified_instr_color = self._ABSTRACT_UNIFIED_INSTR_COLOR
self.unified_fp_instr_color = self._ABSTRACT_UNIFIED_INSTR_COLOR
else:
self.stall_bubble_color = self._DETAILED_STALL_BUBBLE_COLOR
self.unified_instr_color = self._DETAILED_UNIFIED_INSTR_COLOR
self.unified_fp_instr_color = self._DETAILED_UNIFIED_INSTR_COLOR
# Parse vanilla operation trace file to generate traces
self.traces = self.__parse_traces(trace_file)
# Parse vanilla stats file to generate timing stats
self.stats = self.__parse_stats(stats_file)
# get tile group diemsnions
self.__get_tile_group_dim(self.traces)
# get the timing window (start and end cycle) for blood graph
self.start_cycle, self.end_cycle = self.__get_timing_window(self.traces, self.stats, cycle)
# parses vanilla_operation_trace.csv to generate operation traces
def __parse_traces(self, trace_file):
traces = []
with open(trace_file) as f:
csv_reader = csv.DictReader(f, delimiter=",")
for row in csv_reader:
trace = {}
trace["x"] = int(row["x"])
trace["y"] = int(row["y"])
trace["operation"] = row["operation"]
trace["cycle"] = int(row["cycle"])
traces.append(trace)
return traces
# Parses vanilla_stats.csv to generate timing stats
# to gather start and end cycle of entire graph
def __parse_stats(self, stats_file):
stats = []
if(stats_file):
if (os.path.isfile(stats_file)):
with open(stats_file) as f:
csv_reader = csv.DictReader(f, delimiter=",")
for row in csv_reader:
stat = {}
stat["global_ctr"] = int(row["global_ctr"])
stat["time"] = int(row["time"])
stats.append(stat)
else:
warnings.warn("Stats file not found, overriding blood graph's start/end cycle with traces.")
return stats
# look through the input file to get the tile group dimension (x,y)
def __get_tile_group_dim(self, traces):
xs = [t["x"] for t in traces]
ys = [t["y"] for t in traces]
self.xmin = min(xs)
self.xmax = max(xs)
self.ymin = min(ys)
self.ymax = max(ys)
self.xdim = self.xmax-self.xmin+1
self.ydim = self.ymax-self.ymin+1
return
# Determine the timing window (start and end) cycle of graph
# The timing window will be calculated using:
# Custom input: if custom start cycle is given by using the --cycle argument
# Vanilla stats file: otherwise if vanilla stats file is given as input
# Traces: otherwise the entire course of simulation
def __get_timing_window(self, traces, stats, cycle):
custom_start, custom_end = cycle.split('@')
if (custom_start):
start = int(custom_start)
elif (stats):
start = stats[0]["global_ctr"]
else:
start = traces[0]["cycle"]
if (custom_end):
end = int(custom_end)
elif (stats):
end = stats[-1]["global_ctr"]
else:
end = traces[-1]["cycle"]
return start, end
# main public method
def generate(self):
# init image
self.__init_image()
# create image
for trace in self.traces:
self.__mark_trace(trace)
#self.img.show()
mode = "abstract" if self.abstract else "detailed"
self.img.save(("blood_" + mode + ".png"))
return
# public method to generate key for bloodgraph
# called if --generate-key argument is true
def generate_key(self, key_image_fname = "key"):
img = Image.new("RGB", (self._KEY_WIDTH, self._KEY_HEIGHT), "black")
draw = ImageDraw.Draw(img)
font = ImageFont.load_default()
# the current row position of our key
yt = 0
# for each color in stalls...
for (operation,color) in chain([(stall_bubble, self.stall_bubble_color[stall_bubble]) for stall_bubble in self._STALLS_LIST],
[("unified_instr" ,self.unified_instr_color),
("unified_fp_instr" ,self.unified_fp_instr_color)]):
# get the font size
(font_height,font_width) = font.getsize(operation)
# draw a rectangle with color fill
yb = yt + font_width
# [0, yt, 64, yb] is [top left x, top left y, bottom right x, bottom left y]
draw.rectangle([0, yt, 64, yb], color)
# write the label for this color in white
# (68, yt) = (top left x, top left y)
# (255, 255, 255) = white
draw.text((68, yt), operation, (255,255,255))
# create the new row's y-coord
yt += font_width
# save the key
mode = "abstract" if self.abstract else "detailed"
img.save("{}.png".format(key_image_fname + "_" + mode))
return
# initialize image
def __init_image(self):
self.img_width = 2048 # default
self.img_height = (((self.end_cycle-self.start_cycle)+self.img_width)//self.img_width)*(2+(self.xdim*self.ydim))
self.img = Image.new("RGB", (self.img_width, self.img_height), "black")
self.pixel = self.img.load()
return
# mark the trace on output image
def __mark_trace(self, trace):
# ignore trace outside the cycle range
if trace["cycle"] < self.start_cycle or trace["cycle"] >= self.end_cycle:
return
# determine pixel location
cycle = (trace["cycle"] - self.start_cycle)
col = cycle % self.img_width
floor = cycle // self.img_width
tg_x = trace["x"] - self.xmin
tg_y = trace["y"] - self.ymin
row = floor*(2+(self.xdim*self.ydim)) + (tg_x+(tg_y*self.xdim))
# determine color
if trace["operation"] in self.stall_bubble_color.keys():
self.pixel[col,row] = self.stall_bubble_color[trace["operation"]]
elif trace["operation"] in self._INSTRS_LIST:
self.pixel[col,row] = self.unified_instr_color
elif trace["operation"] in self._FP_INSTRS_LIST:
self.pixel[col,row] = self.unified_fp_instr_color
else:
raise Exception('Invalid operation in vanilla operation trace log {}'.format(trace["operation"]))
return
# Parse input arguments and options
def add_args(parser):
parser.add_argument("--no-blood-graph", default=False, action='store_true',
help="Skip blood graph generation")
def main(args):
bg = BloodGraph(args.trace, args.stats, args.cycle, args.abstract)
if not args.no_blood_graph:
bg.generate()
if args.generate_key:
bg.generate_key()
# main()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Argument parser for blood_graph.py")
common.add_args(parser)
add_args(parser)
args = parser.parse_args()
main(args)
| 41.784232 | 133 | 0.445482 | [
"SHL-0.51"
] | XQc0214/bsg_manycore | software/py/vanilla_parser/blood_graph.py | 20,140 | Python |
from conans import ConanFile, CMake, tools
import os
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
bin_path = os.path.join("bin", "test_package")
self.run(bin_path, run_environment=True)
| 23.058824 | 54 | 0.632653 | [
"MIT"
] | 0x8000-0000/conan-center-index | recipes/freetype/all/test_package/conanfile.py | 392 | Python |
colors = {"clean": "\033[m",
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"purple": "\033[35m",
"cian": "\033[36m"}
n1 = float(input("Enter how many money do you have in your wallet(in Real): R$"))
print("You have {}R${:.2f}{} reals and you can buy {}US${:.2f}{} dollars"
"\nBuy {}EUR${:.2f}{} and buy {}GBP${:.2f}{}"
.format(colors["green"], n1, colors["clean"],
colors["blue"], n1/5.59, colors["clean"],
colors["red"], n1/6.69, colors["clean"],
colors["yellow"], n1/7.79, colors["clean"]))
| 42.866667 | 81 | 0.479005 | [
"MIT"
] | MiguelChichorro/PythonExercises | World 1/First attempts/ex010 - Real to Dollar.py | 643 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Submits files or a URL to Cuckoo"""
from builtins import input
from argparse import ArgumentParser
from distutils.util import strtobool
from io import BytesIO
from time import sleep
from glob import glob
from zipfile import ZipFile
from os.path import basename
from cuckooutils import Cuckoo, get_file_hash
__version__ = "1.0.0"
__license = """Copyright 2016 Sean Whalen
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
cuckoo = Cuckoo("https://cuckoo.example.net", "username", "password")
parser = ArgumentParser(description=__doc__, version=__version__)
parser.add_argument("sample", nargs="+", help="One or more filenames or globs, or a single URL")
parser.add_argument("--tags",
help="Comma separated tags for selecting an analysis VM",
default=None)
parser.add_argument("--options",
help="Comma separated option=value pairs",
default=None)
parser.add_argument("--tor", action="store_true",
help="Enable Tor during analysis")
parser.add_argument("--procmemdump", action="store_true",
help="Dump and analyze process memory")
args = parser.parse_args()
options = {}
if args.tor:
options['tor'] = 'yes'
if args.procmemdump:
options['procmemdump'] = 'yes'
options = ",".join(list(map(lambda option: "{0}={1}".format(option, options[option]), options.keys())))
if args.options:
if len(options) > 0:
options += ","
options += args.options
url = len(args.sample) == 1 and args.sample[0].lower().startswith("http")
if url:
url = args.sample[0]
results = cuckoo.submit_url(url, tags=args.tags, options=options)
else:
filenames = []
for filename in args.sample:
filenames += glob(filename)
if len(filenames) == 0:
raise ValueError("No matching files found")
elif len(filenames) > 1:
multi_file = True
else:
multi_file = False
if multi_file:
temp_file = BytesIO()
temp_filename = "bulk.zip"
with ZipFile(temp_file, 'a') as temp_zip:
temp_zip.setpassword("infected")
for filename in filenames:
temp_zip.write(filename)
else:
temp_filename = basename(filenames[0])
with open(temp_filename, 'rb') as sample_file:
temp_file = BytesIO(sample_file.read())
file_hash = get_file_hash(temp_file)
existing_tasks = cuckoo.find_tasks(file_hash)
if len(existing_tasks) > 0:
print("The following analysis reports already exist for this sample:")
for task_id in existing_tasks:
print("{0}/analysis/{1}".format(cuckoo.root, task_id))
try:
resubmit = strtobool(input("Would you like to resubmit it? (/y/N)").lower())
except ValueError:
exit()
if not resubmit:
exit()
results = cuckoo.submit_file(temp_filename, temp_file.getvalue(), tags=args.tags, options=options)
tasks = {}
task_ids = results['task_ids']
for task_id in task_ids:
tasks[task_id] = dict(previous_state=None, current_state=None)
while (len(tasks)) > 0:
for task_id in tasks.keys():
tasks[task_id]['previous_state'] = tasks[task_id]['current_state']
tasks[task_id]['current_state'] = cuckoo.get_task_status(task_id)
if tasks[task_id]['current_state'] != tasks[task_id]['previous_state']:
print("Task {0} is {1}".format(task_id, tasks[task_id]['current_state']))
if tasks[task_id]['current_state'] == "reported":
print("{0}/analysis/{1}".format(cuckoo.root, task_id))
if tasks[task_id]['current_state'] == "reported" or tasks[task_id]['current_state'].startswith("failed"):
del tasks[task_id]
sleep(1) | 35.243902 | 113 | 0.655363 | [
"Apache-2.0"
] | seanthegeek/cuckoo-modified-utils | submit-to-cuckoo.py | 4,335 | Python |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A module for visualizing device coupling maps"""
import math
import numpy as np
from qiskit.exceptions import QiskitError
from .matplotlib import HAS_MATPLOTLIB
from .exceptions import VisualizationError
class _GraphDist():
"""Transform the circles properly for non-square axes.
"""
def __init__(self, size, ax, x=True):
self.size = size
self.ax = ax # pylint: disable=invalid-name
self.x = x
@property
def dist_real(self):
"""Compute distance.
"""
x0, y0 = self.ax.transAxes.transform( # pylint: disable=invalid-name
(0, 0))
x1, y1 = self.ax.transAxes.transform( # pylint: disable=invalid-name
(1, 1))
value = x1 - x0 if self.x else y1 - y0
return value
@property
def dist_abs(self):
"""Distance abs
"""
bounds = self.ax.get_xlim() if self.x else self.ax.get_ylim()
return bounds[0] - bounds[1]
@property
def value(self):
"""Return value.
"""
return (self.size / self.dist_real) * self.dist_abs
def __mul__(self, obj):
return self.value * obj
def plot_gate_map(backend, figsize=None,
plot_directed=False,
label_qubits=True,
qubit_size=24,
line_width=4,
font_size=12,
qubit_color=None,
qubit_labels=None,
line_color=None,
font_color='w',
ax=None):
"""Plots the gate map of a device.
Args:
backend (BaseBackend): A backend instance,
figsize (tuple): Output figure size (wxh) in inches.
plot_directed (bool): Plot directed coupling map.
label_qubits (bool): Label the qubits.
qubit_size (float): Size of qubit marker.
line_width (float): Width of lines.
font_size (int): Font size of qubit labels.
qubit_color (list): A list of colors for the qubits
qubit_labels (list): A list of qubit labels
line_color (list): A list of colors for each line from coupling_map.
font_color (str): The font color for the qubit labels.
ax (Axes): A Matplotlib axes instance.
Returns:
Figure: A Matplotlib figure instance.
Raises:
QiskitError: if tried to pass a simulator.
ImportError: if matplotlib not installed.
Example:
.. jupyter-execute::
:hide-code:
:hide-output:
from qiskit.test.ibmq_mock import mock_get_backend
mock_get_backend('FakeVigo')
.. jupyter-execute::
from qiskit import QuantumCircuit, execute, IBMQ
from qiskit.visualization import plot_gate_map
%matplotlib inline
provider = IBMQ.load_account()
accountProvider = IBMQ.get_provider(hub='ibm-q')
backend = accountProvider.get_backend('ibmq_vigo')
plot_gate_map(backend)
"""
if not HAS_MATPLOTLIB:
raise ImportError('Must have Matplotlib installed. To install, '
'run "pip install matplotlib".')
from matplotlib import get_backend
import matplotlib.pyplot as plt # pylint: disable=import-error
import matplotlib.patches as mpatches
if backend.configuration().simulator:
raise QiskitError('Requires a device backend, not simulator.')
input_axes = False
if ax:
input_axes = True
mpl_data = {}
mpl_data[1] = [[0, 0]]
mpl_data[5] = [[1, 0], [0, 1], [1, 1], [1, 2], [2, 1]]
mpl_data[7] = [[0, 0], [0, 1], [0, 2],
[1, 1],
[2, 0], [2, 1], [2, 2]]
mpl_data[20] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[1, 0], [1, 1], [1, 2], [1, 3], [1, 4],
[2, 0], [2, 1], [2, 2], [2, 3], [2, 4],
[3, 0], [3, 1], [3, 2], [3, 3], [3, 4]]
mpl_data[15] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[0, 5], [0, 6], [1, 7], [1, 6], [1, 5],
[1, 4], [1, 3], [1, 2], [1, 1], [1, 0]]
mpl_data[16] = [[1, 0], [0, 0], [0, 1], [0, 2], [0, 3],
[0, 4], [0, 5], [0, 6], [0, 7], [1, 7],
[1, 6], [1, 5], [1, 4], [1, 3], [1, 2], [1, 1]]
mpl_data[27] = [[1, 0], [1, 1], [2, 1], [3, 1], [1, 2],
[3, 2], [0, 3], [1, 3], [3, 3], [4, 3],
[1, 4], [3, 4], [1, 5], [2, 5], [3, 5],
[1, 6], [3, 6], [0, 7], [1, 7], [3, 7],
[4, 7], [1, 8], [3, 8], [1, 9], [2, 9],
[3, 9], [3, 10]]
mpl_data[28] = [[0, 2], [0, 3], [0, 4], [0, 5], [0, 6],
[1, 2], [1, 6],
[2, 0], [2, 1], [2, 2], [2, 3], [2, 4],
[2, 5], [2, 6], [2, 7], [2, 8],
[3, 0], [3, 4], [3, 8],
[4, 0], [4, 1], [4, 2], [4, 3], [4, 4],
[4, 5], [4, 6], [4, 7], [4, 8]]
mpl_data[53] = [[0, 2], [0, 3], [0, 4], [0, 5], [0, 6],
[1, 2], [1, 6],
[2, 0], [2, 1], [2, 2], [2, 3], [2, 4],
[2, 5], [2, 6], [2, 7], [2, 8],
[3, 0], [3, 4], [3, 8],
[4, 0], [4, 1], [4, 2], [4, 3], [4, 4],
[4, 5], [4, 6], [4, 7], [4, 8],
[5, 2], [5, 6],
[6, 0], [6, 1], [6, 2], [6, 3], [6, 4],
[6, 5], [6, 6], [6, 7], [6, 8],
[7, 0], [7, 4], [7, 8],
[8, 0], [8, 1], [8, 2], [8, 3], [8, 4],
[8, 5], [8, 6], [8, 7], [8, 8],
[9, 2], [9, 6]]
mpl_data[65] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[0, 5], [0, 6], [0, 7], [0, 8], [0, 9],
[1, 0], [1, 4], [1, 8],
[2, 0], [2, 1], [2, 2], [2, 3], [2, 4],
[2, 5], [2, 6], [2, 7], [2, 8], [2, 9], [2, 10],
[3, 2], [3, 6], [3, 10],
[4, 0], [4, 1], [4, 2], [4, 3], [4, 4],
[4, 5], [4, 6], [4, 7], [4, 8], [4, 9], [4, 10],
[5, 0], [5, 4], [5, 8],
[6, 0], [6, 1], [6, 2], [6, 3], [6, 4],
[6, 5], [6, 6], [6, 7], [6, 8], [6, 9], [6, 10],
[7, 2], [7, 6], [7, 10],
[8, 1], [8, 2], [8, 3], [8, 4],
[8, 5], [8, 6], [8, 7], [8, 8], [8, 9], [8, 10]]
config = backend.configuration()
num_qubits = config.n_qubits
cmap = config.coupling_map
if qubit_labels is None:
qubit_labels = list(range(num_qubits))
else:
if len(qubit_labels) != num_qubits:
raise QiskitError('Length of qubit labels '
'does not equal number '
'of qubits.')
if num_qubits in mpl_data.keys():
grid_data = mpl_data[num_qubits]
else:
if not input_axes:
fig, ax = plt.subplots(figsize=(5, 5)) # pylint: disable=invalid-name
ax.axis('off')
return fig
x_max = max([d[1] for d in grid_data])
y_max = max([d[0] for d in grid_data])
max_dim = max(x_max, y_max)
if figsize is None:
if num_qubits == 1 or (x_max / max_dim > 0.33 and y_max / max_dim > 0.33):
figsize = (5, 5)
else:
figsize = (9, 3)
if ax is None:
fig, ax = plt.subplots(figsize=figsize) # pylint: disable=invalid-name
ax.axis('off')
# set coloring
if qubit_color is None:
qubit_color = ['#648fff'] * config.n_qubits
if line_color is None:
line_color = ['#648fff'] * len(cmap) if cmap else []
# Add lines for couplings
if num_qubits != 1:
for ind, edge in enumerate(cmap):
is_symmetric = False
if edge[::-1] in cmap:
is_symmetric = True
y_start = grid_data[edge[0]][0]
x_start = grid_data[edge[0]][1]
y_end = grid_data[edge[1]][0]
x_end = grid_data[edge[1]][1]
if is_symmetric:
if y_start == y_end:
x_end = (x_end - x_start) / 2 + x_start
elif x_start == x_end:
y_end = (y_end - y_start) / 2 + y_start
else:
x_end = (x_end - x_start) / 2 + x_start
y_end = (y_end - y_start) / 2 + y_start
ax.add_artist(plt.Line2D([x_start, x_end], [-y_start, -y_end],
color=line_color[ind], linewidth=line_width,
zorder=0))
if plot_directed:
dx = x_end - x_start # pylint: disable=invalid-name
dy = y_end - y_start # pylint: disable=invalid-name
if is_symmetric:
x_arrow = x_start + dx * 0.95
y_arrow = -y_start - dy * 0.95
dx_arrow = dx * 0.01
dy_arrow = -dy * 0.01
head_width = 0.15
else:
x_arrow = x_start + dx * 0.5
y_arrow = -y_start - dy * 0.5
dx_arrow = dx * 0.2
dy_arrow = -dy * 0.2
head_width = 0.2
ax.add_patch(mpatches.FancyArrow(x_arrow,
y_arrow,
dx_arrow,
dy_arrow,
head_width=head_width,
length_includes_head=True,
edgecolor=None,
linewidth=0,
facecolor=line_color[ind],
zorder=1))
# Add circles for qubits
for var, idx in enumerate(grid_data):
_idx = [idx[1], -idx[0]]
width = _GraphDist(qubit_size, ax, True)
height = _GraphDist(qubit_size, ax, False)
ax.add_artist(mpatches.Ellipse(
_idx, width, height, color=qubit_color[var], zorder=1))
if label_qubits:
ax.text(*_idx, s=qubit_labels[var],
horizontalalignment='center',
verticalalignment='center',
color=font_color, size=font_size, weight='bold')
ax.set_xlim([-1, x_max + 1])
ax.set_ylim([-(y_max + 1), 1])
if not input_axes:
if get_backend() in ['module://ipykernel.pylab.backend_inline',
'nbAgg']:
plt.close(fig)
return fig
return None
def plot_circuit_layout(circuit, backend, view='virtual'):
"""Plot the layout of a circuit transpiled for a given
target backend.
Args:
circuit (QuantumCircuit): Input quantum circuit.
backend (BaseBackend): Target backend.
view (str): Layout view: either 'virtual' or 'physical'.
Returns:
Figure: A matplotlib figure showing layout.
Raises:
QiskitError: Invalid view type given.
VisualizationError: Circuit has no layout attribute.
Example:
.. jupyter-execute::
:hide-code:
:hide-output:
from qiskit.test.ibmq_mock import mock_get_backend
mock_get_backend('FakeVigo')
.. jupyter-execute::
import numpy as np
from qiskit import QuantumCircuit, IBMQ, transpile
from qiskit.visualization import plot_histogram, plot_gate_map, plot_circuit_layout
from qiskit.tools.monitor import job_monitor
import matplotlib.pyplot as plt
%matplotlib inline
IBMQ.load_account()
ghz = QuantumCircuit(3, 3)
ghz.h(0)
for idx in range(1,3):
ghz.cx(0,idx)
ghz.measure(range(3), range(3))
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_vigo')
new_circ_lv3 = transpile(ghz, backend=backend, optimization_level=3)
plot_circuit_layout(new_circ_lv3, backend)
"""
if circuit._layout is None:
raise QiskitError('Circuit has no layout. '
'Perhaps it has not been transpiled.')
num_qubits = backend.configuration().n_qubits
qubits = []
qubit_labels = [None] * num_qubits
if view == 'virtual':
for key, val in circuit._layout.get_virtual_bits().items():
if key.register.name != 'ancilla':
qubits.append(val)
qubit_labels[val] = key.index
elif view == 'physical':
for key, val in circuit._layout.get_physical_bits().items():
if val.register.name != 'ancilla':
qubits.append(key)
qubit_labels[key] = key
else:
raise VisualizationError("Layout view must be 'virtual' or 'physical'.")
qcolors = ['#648fff'] * num_qubits
for k in qubits:
qcolors[k] = 'k'
cmap = backend.configuration().coupling_map
lcolors = ['#648fff'] * len(cmap)
for idx, edge in enumerate(cmap):
if edge[0] in qubits and edge[1] in qubits:
lcolors[idx] = 'k'
fig = plot_gate_map(backend,
qubit_color=qcolors,
qubit_labels=qubit_labels,
line_color=lcolors)
return fig
def plot_error_map(backend, figsize=(12, 9), show_title=True):
"""Plots the error map of a given backend.
Args:
backend (IBMQBackend): Given backend.
figsize (tuple): Figure size in inches.
show_title (bool): Show the title or not.
Returns:
Figure: A matplotlib figure showing error map.
Raises:
VisualizationError: Input is not IBMQ backend.
ImportError: If seaborn is not installed
Example:
.. jupyter-execute::
:hide-code:
:hide-output:
from qiskit.test.ibmq_mock import mock_get_backend
mock_get_backend('FakeVigo')
.. jupyter-execute::
from qiskit import QuantumCircuit, execute, IBMQ
from qiskit.visualization import plot_error_map
%matplotlib inline
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_vigo')
plot_error_map(backend)
"""
try:
import seaborn as sns
except ImportError:
raise ImportError('Must have seaborn installed to use plot_error_map. '
'To install, run "pip install seaborn".')
if not HAS_MATPLOTLIB:
raise ImportError('Must have Matplotlib installed. To install, '
'run "pip install matplotlib".')
import matplotlib
from matplotlib import get_backend
import matplotlib.pyplot as plt # pylint: disable=import-error
import matplotlib.gridspec as gridspec
from matplotlib import ticker
color_map = sns.cubehelix_palette(reverse=True, as_cmap=True)
props = backend.properties().to_dict()
config = backend.configuration().to_dict()
num_qubits = config['n_qubits']
# U2 error rates
single_gate_errors = [0]*num_qubits
for gate in props['gates']:
if gate['gate'] == 'u2':
_qubit = gate['qubits'][0]
single_gate_errors[_qubit] = gate['parameters'][0]['value']
# Convert to percent
single_gate_errors = 100 * np.asarray(single_gate_errors)
avg_1q_err = np.mean(single_gate_errors)
single_norm = matplotlib.colors.Normalize(
vmin=min(single_gate_errors), vmax=max(single_gate_errors))
q_colors = [color_map(single_norm(err)) for err in single_gate_errors]
cmap = config['coupling_map']
directed = False
line_colors = []
if cmap:
directed = False
if num_qubits < 20:
for edge in cmap:
if not [edge[1], edge[0]] in cmap:
directed = True
break
cx_errors = []
for line in cmap:
for item in props['gates']:
if item['qubits'] == line:
cx_errors.append(item['parameters'][0]['value'])
break
else:
continue
# Convert to percent
cx_errors = 100 * np.asarray(cx_errors)
avg_cx_err = np.mean(cx_errors)
cx_norm = matplotlib.colors.Normalize(
vmin=min(cx_errors), vmax=max(cx_errors))
line_colors = [color_map(cx_norm(err)) for err in cx_errors]
# Measurement errors
read_err = []
for qubit in range(num_qubits):
for item in props['qubits'][qubit]:
if item['name'] == 'readout_error':
read_err.append(item['value'])
read_err = 100 * np.asarray(read_err)
avg_read_err = np.mean(read_err)
max_read_err = np.max(read_err)
fig = plt.figure(figsize=figsize)
gridspec.GridSpec(nrows=2, ncols=3)
grid_spec = gridspec.GridSpec(12, 12, height_ratios=[1] * 11 + [0.5],
width_ratios=[2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2])
left_ax = plt.subplot(grid_spec[2:10, :1])
main_ax = plt.subplot(grid_spec[:11, 1:11])
right_ax = plt.subplot(grid_spec[2:10, 11:])
bleft_ax = plt.subplot(grid_spec[-1, :5])
if cmap:
bright_ax = plt.subplot(grid_spec[-1, 7:])
plot_gate_map(backend, qubit_color=q_colors,
line_color=line_colors,
qubit_size=28,
line_width=5,
plot_directed=directed,
ax=main_ax)
main_ax.axis('off')
main_ax.set_aspect(1)
if cmap:
single_cb = matplotlib.colorbar.ColorbarBase(bleft_ax, cmap=color_map,
norm=single_norm,
orientation='horizontal')
tick_locator = ticker.MaxNLocator(nbins=5)
single_cb.locator = tick_locator
single_cb.update_ticks()
single_cb.update_ticks()
bleft_ax.set_title('H error rate (%) [Avg. = {}]'.format(round(avg_1q_err, 3)))
if cmap is None:
bleft_ax.axis('off')
bleft_ax.set_title('H error rate (%) = {}'.format(round(avg_1q_err, 3)))
if cmap:
cx_cb = matplotlib.colorbar.ColorbarBase(bright_ax, cmap=color_map,
norm=cx_norm,
orientation='horizontal')
tick_locator = ticker.MaxNLocator(nbins=5)
cx_cb.locator = tick_locator
cx_cb.update_ticks()
bright_ax.set_title('CNOT error rate (%) [Avg. = {}]'.format(round(avg_cx_err, 3)))
if num_qubits < 10:
num_left = num_qubits
num_right = 0
else:
num_left = math.ceil(num_qubits / 2)
num_right = num_qubits - num_left
left_ax.barh(range(num_left), read_err[:num_left], align='center', color='#DDBBBA')
left_ax.axvline(avg_read_err, linestyle='--', color='#212121')
left_ax.set_yticks(range(num_left))
left_ax.set_xticks([0, round(avg_read_err, 2), round(max_read_err, 2)])
left_ax.set_yticklabels([str(kk) for kk in range(num_left)], fontsize=12)
left_ax.invert_yaxis()
left_ax.set_title('Readout Error (%)', fontsize=12)
for spine in left_ax.spines.values():
spine.set_visible(False)
if num_right:
right_ax.barh(range(num_left, num_qubits), read_err[num_left:],
align='center', color='#DDBBBA')
right_ax.axvline(avg_read_err, linestyle='--', color='#212121')
right_ax.set_yticks(range(num_left, num_qubits))
right_ax.set_xticks([0, round(avg_read_err, 2), round(max_read_err, 2)])
right_ax.set_yticklabels([str(kk) for kk in range(num_left, num_qubits)],
fontsize=12)
right_ax.invert_yaxis()
right_ax.invert_xaxis()
right_ax.yaxis.set_label_position("right")
right_ax.yaxis.tick_right()
right_ax.set_title('Readout Error (%)', fontsize=12)
else:
right_ax.axis('off')
for spine in right_ax.spines.values():
spine.set_visible(False)
if show_title:
fig.suptitle('{name} Error Map'.format(name=backend.name()),
fontsize=24, y=0.9)
if get_backend() in ['module://ipykernel.pylab.backend_inline',
'nbAgg']:
plt.close(fig)
return fig
| 35.754622 | 95 | 0.512363 | [
"Apache-2.0"
] | AzizNgoueya/qiskit-terra | qiskit/visualization/gate_map.py | 21,274 | Python |
#1KiB
with open("Makeflow1KiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=1\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1))
#10KiB
with open("Makeflow10KiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=1\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*10))
#100KiB
with open("Makeflow100KiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=1\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*100))
#1MiB
with open("Makeflow1MiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=2\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*1))
#10MiB
with open("Makeflow10MiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=20\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*10))
#100MiB
with open("Makeflow100MiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=200\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*100))
#1GiB
with open("Makeflow1GiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=2000\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*1024*1))
#10GiB
with open("Makeflow10GiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=10738\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*1024*10))
| 34.877551 | 100 | 0.600936 | [
"MIT"
] | Nekel-Seyew/mpi-paper-tests | generation/generateMakeflows.py | 1,709 | Python |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
from bpy.types import (
Panel,
)
from bl_ui.utils import PresetPanel
from bl_ui.properties_physics_common import (
point_cache_ui,
effector_weights_ui,
)
def cloth_panel_enabled(md):
return md.point_cache.is_baked is False
class CLOTH_PT_presets(PresetPanel, Panel):
bl_label = "Cloth Presets"
preset_subdir = "cloth"
preset_operator = "script.execute_preset"
preset_add_operator = "cloth.preset_add"
class PhysicButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "physics"
@classmethod
def poll(cls, context):
ob = context.object
return (ob and ob.type == 'MESH') and (context.engine in cls.COMPAT_ENGINES) and (context.cloth)
class PHYSICS_PT_cloth(PhysicButtonsPanel, Panel):
bl_label = "Cloth"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header_preset(self, _context):
CLOTH_PT_presets.draw_panel_header(self.layout)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "quality", text="Quality Steps")
col = flow.column()
col.prop(cloth, "time_scale", text="Speed Multiplier")
class PHYSICS_PT_cloth_physical_properties(PhysicButtonsPanel, Panel):
bl_label = "Physical Properties"
bl_parent_id = 'PHYSICS_PT_cloth'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "mass", text="Vertex Mass")
col = flow.column()
col.prop(cloth, "air_damping", text="Air Viscosity")
col = flow.column()
col.prop(cloth, "bending_model")
class PHYSICS_PT_cloth_stiffness(PhysicButtonsPanel, Panel):
bl_label = "Stiffness"
bl_parent_id = 'PHYSICS_PT_cloth_physical_properties'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
if cloth.bending_model == 'ANGULAR':
col.prop(cloth, "tension_stiffness", text="Tension")
col = flow.column()
col.prop(cloth, "compression_stiffness", text="Compression")
else:
col.prop(cloth, "tension_stiffness", text="Structural")
col = flow.column()
col.prop(cloth, "shear_stiffness", text="Shear")
col = flow.column()
col.prop(cloth, "bending_stiffness", text="Bending")
class PHYSICS_PT_cloth_damping(PhysicButtonsPanel, Panel):
bl_label = "Damping"
bl_parent_id = 'PHYSICS_PT_cloth_physical_properties'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
if cloth.bending_model == 'ANGULAR':
col.prop(cloth, "tension_damping", text="Tension")
col = flow.column()
col.prop(cloth, "compression_damping", text="Compression")
else:
col.prop(cloth, "tension_damping", text="Structural")
col = flow.column()
col.prop(cloth, "shear_damping", text="Shear")
col = flow.column()
col.prop(cloth, "bending_damping", text="Bending")
class PHYSICS_PT_cloth_internal_springs(PhysicButtonsPanel, Panel):
bl_label = "Internal Springs"
bl_parent_id = 'PHYSICS_PT_cloth_physical_properties'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
cloth = context.cloth.settings
self.layout.active = cloth_panel_enabled(context.cloth)
self.layout.prop(cloth, "use_internal_springs", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.settings
md = context.cloth
ob = context.object
layout.active = cloth.use_internal_springs and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "internal_spring_max_length", text="Max Spring Creation Length")
col = flow.column()
col.prop(cloth, "internal_spring_max_diversion", text="Max Creation Diversion")
col = flow.column()
col.prop(cloth, "internal_spring_normal_check", text="Check Surface Normals")
col = flow.column()
col.prop(cloth, "internal_tension_stiffness", text="Tension")
col = flow.column()
col.prop(cloth, "internal_compression_stiffness", text="Compression")
col = flow.column()
col.prop_search(cloth, "vertex_group_intern", ob, "vertex_groups", text="Vertex Group")
col = flow.column()
col.prop(cloth, "internal_tension_stiffness_max", text="Max Tension")
col = flow.column()
col.prop(cloth, "internal_compression_stiffness_max", text="Max Compression")
class PHYSICS_PT_cloth_pressure(PhysicButtonsPanel, Panel):
bl_label = "Pressure"
bl_parent_id = 'PHYSICS_PT_cloth_physical_properties'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
cloth = context.cloth.settings
self.layout.active = cloth_panel_enabled(context.cloth)
self.layout.prop(cloth, "use_pressure", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.settings
md = context.cloth
ob = context.object
layout.active = cloth.use_pressure and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "uniform_pressure_force")
col = flow.column()
col.prop(cloth, "use_pressure_volume", text="Custom Volume")
col = flow.column()
col.active = cloth.use_pressure_volume
col.prop(cloth, "target_volume")
col = flow.column()
col.prop(cloth, "pressure_factor")
col = flow.column()
col.prop(cloth, "fluid_density")
col = flow.column()
col.prop_search(cloth, "vertex_group_pressure", ob, "vertex_groups", text="Vertex Group")
class PHYSICS_PT_cloth_cache(PhysicButtonsPanel, Panel):
bl_label = "Cache"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
md = context.cloth
point_cache_ui(self, md.point_cache, cloth_panel_enabled(md), 'CLOTH')
class PHYSICS_PT_cloth_shape(PhysicButtonsPanel, Panel):
bl_label = "Shape"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
ob = context.object
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column(align=True)
col.prop_search(cloth, "vertex_group_mass", ob, "vertex_groups", text="Pin Group")
sub = col.column(align=True)
sub.active = cloth.vertex_group_mass != ""
sub.prop(cloth, "pin_stiffness", text="Stiffness")
col.separator()
col = flow.column(align=True)
col.prop(cloth, "use_sewing_springs", text="Sewing")
sub = col.column(align=True)
sub.active = cloth.use_sewing_springs
sub.prop(cloth, "sewing_force_max", text="Max Sewing Force")
col.separator()
col = flow.column()
col.prop(cloth, "shrink_min", text="Shrinking Factor")
col = flow.column()
col.prop(cloth, "use_dynamic_mesh", text="Dynamic Mesh")
key = ob.data.shape_keys
if key:
col = flow.column()
col.active = not cloth.use_dynamic_mesh
col.prop_search(cloth, "rest_shape_key", key, "key_blocks", text="Rest Shape Key")
class PHYSICS_PT_cloth_collision(PhysicButtonsPanel, Panel):
bl_label = "Collisions"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.collision_settings
md = context.cloth
layout.active = (cloth.use_collision or cloth.use_self_collision) and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "collision_quality", text="Quality")
class PHYSICS_PT_cloth_object_collision(PhysicButtonsPanel, Panel):
bl_label = "Object Collisions"
bl_parent_id = 'PHYSICS_PT_cloth_collision'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
cloth = context.cloth.collision_settings
self.layout.active = cloth_panel_enabled(context.cloth)
self.layout.prop(cloth, "use_collision", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.collision_settings
md = context.cloth
layout.active = cloth.use_collision and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "distance_min", slider=True, text="Distance")
col = flow.column()
col.prop(cloth, "impulse_clamp")
col = flow.column()
col.prop(cloth, "collection")
class PHYSICS_PT_cloth_self_collision(PhysicButtonsPanel, Panel):
bl_label = "Self Collisions"
bl_parent_id = 'PHYSICS_PT_cloth_collision'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
cloth = context.cloth.collision_settings
self.layout.active = cloth_panel_enabled(context.cloth)
self.layout.prop(cloth, "use_self_collision", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.collision_settings
md = context.cloth
ob = context.object
layout.active = cloth.use_self_collision and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "self_friction", text="Friction")
col = flow.column()
col.prop(cloth, "self_distance_min", slider=True, text="Distance")
col = flow.column()
col.prop(cloth, "self_impulse_clamp")
col = flow.column()
col.prop_search(cloth, "vertex_group_self_collisions", ob, "vertex_groups", text="Vertex Group")
class PHYSICS_PT_cloth_property_weights(PhysicButtonsPanel, Panel):
bl_label = "Property Weights"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
ob = context.object
cloth = context.cloth.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop_search(
cloth, "vertex_group_structural_stiffness", ob, "vertex_groups",
text="Structural Group",
)
col.prop(cloth, "tension_stiffness_max", text="Max Tension")
col.prop(cloth, "compression_stiffness_max", text="Max Compression")
col.separator()
col = flow.column()
col.prop_search(
cloth, "vertex_group_shear_stiffness", ob, "vertex_groups",
text="Shear Group",
)
col.prop(cloth, "shear_stiffness_max", text="Max Shearing")
col.separator()
col = flow.column()
col.prop_search(
cloth, "vertex_group_bending", ob, "vertex_groups",
text="Bending Group"
)
col.prop(cloth, "bending_stiffness_max", text="Max Bending")
col.separator()
col = flow.column()
col.prop_search(
cloth, "vertex_group_shrink", ob, "vertex_groups",
text="Shrinking Group"
)
col.prop(cloth, "shrink_max", text="Max Shrinking")
class PHYSICS_PT_cloth_field_weights(PhysicButtonsPanel, Panel):
bl_label = "Field Weights"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
cloth = context.cloth.settings
effector_weights_ui(self, cloth.effector_weights, 'CLOTH')
classes = (
CLOTH_PT_presets,
PHYSICS_PT_cloth,
PHYSICS_PT_cloth_physical_properties,
PHYSICS_PT_cloth_stiffness,
PHYSICS_PT_cloth_damping,
PHYSICS_PT_cloth_internal_springs,
PHYSICS_PT_cloth_pressure,
PHYSICS_PT_cloth_cache,
PHYSICS_PT_cloth_shape,
PHYSICS_PT_cloth_collision,
PHYSICS_PT_cloth_object_collision,
PHYSICS_PT_cloth_self_collision,
PHYSICS_PT_cloth_property_weights,
PHYSICS_PT_cloth_field_weights,
)
if __name__ == "__main__": # only for live edit.
from bpy.utils import register_class
for cls in classes:
register_class(cls)
| 32.709278 | 107 | 0.67026 | [
"MIT"
] | calculusrobotics/RNNs-for-Bayesian-State-Estimation | Blender 2.91/2.91/scripts/startup/bl_ui/properties_physics_cloth.py | 15,864 | Python |
from django.template import Library
from django.utils import timezone
import datetime
register = Library()
@register.filter
def utcoffset(value):
# Yeap, it's strange, but tags are so ugly.. So I defined not use value, but get current timezone from utils
tz = timezone.get_current_timezone()
utc_offset = datetime.datetime.now(tz).utcoffset()
minutes = (utc_offset.days * 24 * 60) + (utc_offset.seconds / 60)
if minutes == 0:
return ''
return '(UTC%+03i:%02i)' % divmod(minutes, 60)
| 28.833333 | 112 | 0.695568 | [
"MIT"
] | 0MazaHacka0/drapo | src/web/drapo/templatetags/timezones.py | 519 | Python |
#
# Autogenerated by Thrift Compiler (0.10.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:tornado
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
import sys
from thrift.transport import TTransport
| 25.214286 | 93 | 0.798867 | [
"Apache-2.0"
] | BSlience/fastweb | examples/service/fastweb_thrift_async/HelloService/ttypes.py | 353 | Python |
"""
Tests for pagination template tags and filters.
"""
from mock import Mock
from django import template
from tests import case
class PaginateTest(case.DBTestCase):
"""Tests for paginate template tag."""
def test_paginate(self):
"""Places Pager object in context with size/num from request."""
from moztrap.model.tags.models import Tag
tpl = template.Template(
"{% load pagination %}{% paginate queryset as pager %}"
"{% for obj in pager.objects %}{{ obj }} {% endfor %}")
request = Mock()
request.GET = {"pagesize": 3, "pagenumber": 2}
for i in range(1, 7):
self.F.TagFactory.create(name=str(i))
qs = Tag.objects.all()
output = tpl.render(
template.Context({"request": request, "queryset": qs}))
self.assertEqual(output, "4 5 6 ")
class FilterTest(case.TestCase):
"""Tests for template filters."""
def test_pagenumber_url(self):
"""``pagenumber_url`` filter updates pagenumber in URL."""
from moztrap.view.lists.templatetags.pagination import pagenumber_url
request = Mock()
request.get_full_path.return_value = (
"http://localhost/?pagenumber=2&pagesize=10")
self.assertEqual(
pagenumber_url(request, 1),
"http://localhost/?pagenumber=1&pagesize=10")
def test_pagesize_url(self):
"""``pagesize_url`` updates pagesize in URL (and jumps to page 1)."""
from moztrap.view.lists.templatetags.pagination import pagesize_url
request = Mock()
request.get_full_path.return_value = (
"http://localhost/?pagenumber=2&pagesize=10")
self.assertEqual(
pagesize_url(request, 20),
"http://localhost/?pagenumber=1&pagesize=20")
def test_pagenumber(self):
"""``pagenumber`` gets the pagenumber from the request."""
from moztrap.view.lists.templatetags.pagination import pagenumber
request = Mock()
request.GET = {"pagenumber": 2, "pagesize": 10}
self.assertEqual(pagenumber(request), 2)
def test_pagesize(self):
"""``pagenumber`` gets the pagenumber from the request."""
from moztrap.view.lists.templatetags.pagination import pagesize
request = Mock()
request.GET = {"pagenumber": 2, "pagesize": 10}
self.assertEqual(pagesize(request), 10)
| 32.756757 | 77 | 0.625413 | [
"BSD-2-Clause"
] | UCL/moztrap | tests/view/lists/templatetags/test_pagination.py | 2,424 | Python |
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name='name', parent_name='streamtube', **kwargs):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'style'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 31.214286 | 79 | 0.647597 | [
"MIT"
] | 180Studios/LoginApp | venv/lib/python3.7/site-packages/plotly/validators/streamtube/_name.py | 437 | Python |
import copy
import numpy as np
from math import cos, sin, pi, atan2
import warnings
import matplotlib.patches as mpatches
from matplotlib.path import Path
from matplotlib.lines import Line2D
from matplotlib.transforms import Affine2D, Bbox, IdentityTransform
from matplotlib.text import Annotation
def rotated_polygon(xy, ox, oy, angle):
# angle in degree
theta = angle / 180. * pi
st = sin(theta)
ct = cos(theta)
xy = np.asarray(xy, dtype="d")
x, y = xy[:, 0], xy[:, 1]
x1 = x - ox
y1 = y - oy
x2 = ct * x1 + -st * y1
y2 = st * x1 + ct * y1
xp = x2 + ox
yp = y2 + oy
return np.hstack((xp.reshape((-1, 1)), yp.reshape((-1, 1))))
# sss3 = [s1[0] for s1 in sss2 if isinstance(s1[0], parser_ds9.Shape)]
_point_type_dict = dict(circle="o",
box="s",
diamond="D",
x="x",
cross="+",
arrow="^",
boxcircle="*")
_ds9_to_mpl_colormap = dict(green="lime",
)
def properties_func_default(shape, saved_attrs):
attr_list = copy.copy(shape.attr[0])
attr_dict = copy.copy(shape.attr[1])
attr_list.extend(saved_attrs[0])
attr_dict.update(saved_attrs[1])
color = attr_dict.get("color", None)
color = _ds9_to_mpl_colormap.get(color, color)
if shape.name == "text":
kwargs = dict(color=color,
rotation=attr_dict.get("textangle", 0),
)
font = attr_dict.get("font")
if font:
a = font.split()
if len(a) >= 3:
fontsize = float(a[1])
kwargs["fontsize"] = fontsize
elif shape.name == "point":
point_attrs = attr_dict.get("point", "boxcircle").split()
if len(point_attrs) == 1:
point_type = point_attrs[0]
point_size = 11
elif len(point_attrs) > 1:
point_type = point_attrs[0]
point_size = int(point_attrs[1])
marker = _point_type_dict.get(point_type, "o")
kwargs = dict(markeredgecolor=color,
markerfacecolor="none",
marker=marker,
markeredgewidth=int(attr_dict.get("width", 1)),
markersize=point_size
)
elif shape.name in ["line", "vector"]:
fontsize = 10 # default font size
font = attr_dict.get("font")
if font:
a = font.split()
if len(a) >= 3:
fontsize = float(a[1])
kwargs = dict(color=color,
linewidth=int(attr_dict.get("width", 1)),
mutation_scale=fontsize,
)
if int(attr_dict.get("dash", "0")):
kwargs["linestyle"] = "dashed"
else:
kwargs = dict(edgecolor=color,
linewidth=int(attr_dict.get("width", 1)),
facecolor="none"
)
if "background" in attr_list:
kwargs["linestyle"] = "dashed"
if int(attr_dict.get("dash", "0")):
kwargs["linestyle"] = "dashed"
if shape.exclude:
kwargs["hatch"] = "/"
return kwargs
def _get_text(txt, x, y, dx, dy, ha="center", va="center", **kwargs):
if "color" in kwargs:
textcolor = kwargs["color"]
del kwargs["color"]
elif "markeredgecolor" in kwargs:
textcolor = kwargs["markeredgecolor"]
else:
import matplotlib as mpl
textcolor = mpl.rcParams['text.color']
ann = Annotation(txt, (x, y), xytext=(dx, dy),
xycoords='data',
textcoords="offset points",
color=textcolor,
ha=ha, va=va,
**kwargs)
ann.set_transform(IdentityTransform())
return ann
def as_mpl_artists(shape_list,
properties_func=None,
text_offset=5.0, origin=1):
"""
Converts a region list to a list of patches and a list of artists.
Optional Keywords:
[ text_offset ] - If there is text associated with the regions, add
some vertical offset (in pixels) to the text so that it doesn't overlap
with the regions.
Often, the regions files implicitly assume the lower-left corner
of the image as a coordinate (1,1). However, the python convetion
is that the array index starts from 0. By default (origin = 1),
coordinates of the returned mpl artists have coordinate shifted by
(1, 1). If you do not want this shift, set origin=0.
"""
patch_list = []
artist_list = []
if properties_func is None:
properties_func = properties_func_default
# properties for continued(? multiline?) regions
saved_attrs = None
for shape in shape_list:
patches = []
if saved_attrs is None:
_attrs = [], {}
else:
_attrs = copy.copy(saved_attrs[0]), copy.copy(saved_attrs[1])
kwargs = properties_func(shape, _attrs)
if shape.name == "composite":
saved_attrs = shape.attr
continue
if saved_attrs is None and shape.continued:
saved_attrs = shape.attr
# elif (shape.name in shape.attr[1]):
# if (shape.attr[1][shape.name] != "ignore"):
# saved_attrs = shape.attr
if not shape.continued:
saved_attrs = None
# text associated with the shape
txt = shape.attr[1].get("text")
if shape.name == "polygon":
xy = np.array(shape.coord_list)
xy.shape = -1, 2
# -1 for change origin to 0,0
patches = [mpatches.Polygon(xy - origin, closed=True, **kwargs)]
elif shape.name == "rotbox" or shape.name == "box":
xc, yc, w, h, rot = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
_box = np.array([[-w / 2., -h / 2.],
[-w / 2., h / 2.],
[w / 2., h / 2.],
[w / 2., -h / 2.]])
box = _box + [xc, yc]
rotbox = rotated_polygon(box, xc, yc, rot)
patches = [mpatches.Polygon(rotbox, closed=True, **kwargs)]
elif shape.name == "ellipse":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
angle = shape.coord_list[-1]
maj_list, min_list = shape.coord_list[2:-1:2], shape.coord_list[3:-1:2]
patches = [mpatches.Ellipse((xc, yc), 2 * maj, 2 * min,
angle=angle, **kwargs)
for maj, min in zip(maj_list, min_list)]
elif shape.name == "annulus":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
r_list = shape.coord_list[2:]
patches = [mpatches.Ellipse((xc, yc), 2 * r, 2 * r, **kwargs) for r in r_list]
elif shape.name == "circle":
xc, yc, major = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
patches = [mpatches.Ellipse((xc, yc), 2 * major, 2 * major, angle=0, **kwargs)]
elif shape.name == "panda":
xc, yc, a1, a2, an, r1, r2, rn = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
patches = [mpatches.Arc((xc, yc), rr * 2, rr * 2, angle=0,
theta1=a1, theta2=a2, **kwargs)
for rr in np.linspace(r1, r2, rn + 1)]
for aa in np.linspace(a1, a2, an + 1):
xx = np.array([r1, r2]) * np.cos(aa / 180. * np.pi) + xc
yy = np.array([r1, r2]) * np.sin(aa / 180. * np.pi) + yc
p = Path(np.transpose([xx, yy]))
patches.append(mpatches.PathPatch(p, **kwargs))
elif shape.name == "pie":
xc, yc, r1, r2, a1, a2 = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
patches = [mpatches.Arc((xc, yc), rr * 2, rr * 2, angle=0,
theta1=a1, theta2=a2, **kwargs)
for rr in [r1, r2]]
for aa in [a1, a2]:
xx = np.array([r1, r2]) * np.cos(aa / 180. * np.pi) + xc
yy = np.array([r1, r2]) * np.sin(aa / 180. * np.pi) + yc
p = Path(np.transpose([xx, yy]))
patches.append(mpatches.PathPatch(p, **kwargs))
elif shape.name == "epanda":
xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
# mpl takes angle a1, a2 as angle as in circle before
# transformation to ellipse.
x1, y1 = cos(a1 / 180. * pi), sin(a1 / 180. * pi) * r11 / r12
x2, y2 = cos(a2 / 180. * pi), sin(a2 / 180. * pi) * r11 / r12
a1, a2 = atan2(y1, x1) / pi * 180., atan2(y2, x2) / pi * 180.
patches = [mpatches.Arc((xc, yc), rr1 * 2, rr2 * 2,
angle=angle, theta1=a1, theta2=a2,
**kwargs)
for rr1, rr2 in zip(np.linspace(r11, r21, rn + 1),
np.linspace(r12, r22, rn + 1))]
for aa in np.linspace(a1, a2, an + 1):
xx = np.array([r11, r21]) * np.cos(aa / 180. * np.pi)
yy = np.array([r11, r21]) * np.sin(aa / 180. * np.pi)
p = Path(np.transpose([xx, yy]))
tr = Affine2D().scale(1, r12 / r11).rotate_deg(angle).translate(xc, yc)
p2 = tr.transform_path(p)
patches.append(mpatches.PathPatch(p2, **kwargs))
elif shape.name == "text":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
if txt:
_t = _get_text(txt, xc, yc, 0, 0, **kwargs)
artist_list.append(_t)
elif shape.name == "point":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
artist_list.append(Line2D([xc], [yc],
**kwargs))
if txt:
textshape = copy.copy(shape)
textshape.name = "text"
textkwargs = properties_func(textshape, _attrs)
_t = _get_text(txt, xc, yc, 0, text_offset,
va="bottom",
**textkwargs)
artist_list.append(_t)
elif shape.name in ["line", "vector"]:
if shape.name == "line":
x1, y1, x2, y2 = shape.coord_list[:4]
# -1 for change origin to 0,0
x1, y1, x2, y2 = x1 - origin, y1 - origin, x2 - origin, y2 - origin
a1, a2 = shape.attr[1].get("line", "0 0").strip().split()[:2]
arrowstyle = "-"
if int(a1):
arrowstyle = "<" + arrowstyle
if int(a2):
arrowstyle = arrowstyle + ">"
else: # shape.name == "vector"
x1, y1, l, a = shape.coord_list[:4]
# -1 for change origin to 0,0
x1, y1 = x1 - origin, y1 - origin
x2, y2 = x1 + l * np.cos(a / 180. * np.pi), y1 + l * np.sin(a / 180. * np.pi)
v1 = int(shape.attr[1].get("vector", "0").strip())
if v1:
arrowstyle = "->"
else:
arrowstyle = "-"
patches = [mpatches.FancyArrowPatch(posA=(x1, y1),
posB=(x2, y2),
arrowstyle=arrowstyle,
arrow_transmuter=None,
connectionstyle="arc3",
patchA=None, patchB=None,
shrinkA=0, shrinkB=0,
connector=None,
**kwargs)]
else:
warnings.warn("'as_mpl_artists' does not know how to convert {0} "
"to mpl artist".format(shape.name))
patch_list.extend(patches)
if txt and patches:
# the text associated with a shape uses different
# matplotlib keywords than the shape itself for, e.g.,
# color
textshape = copy.copy(shape)
textshape.name = "text"
textkwargs = properties_func(textshape, _attrs)
# calculate the text position
_bb = [p.get_window_extent() for p in patches]
# this is to work around backward-incompatible change made
# in matplotlib 1.2. This change is later reverted so only
# some versions are affected. With affected version of
# matplotlib, get_window_extent method calls get_transform
# method which sets the _transformSet to True, which is
# not desired.
for p in patches:
p._transformSet = False
_bbox = Bbox.union(_bb)
x0, y0, x1, y1 = _bbox.extents
xc = .5 * (x0 + x1)
_t = _get_text(txt, xc, y1, 0, text_offset,
va="bottom",
**textkwargs)
artist_list.append(_t)
return patch_list, artist_list
| 36.069409 | 93 | 0.480721 | [
"MIT"
] | keflavich/pyregion | pyregion/mpl_helper.py | 14,031 | Python |
# SPDX-FileCopyrightText: 2021 Arthur Breitman
# SPDX-License-Identifier: LicenseRef-MIT-Arthur-Breitman
import math
from collections import defaultdict
from pycfmm.data import AutoRepr
infinity = 10 ** 100
class Tick(AutoRepr):
"""
An initialized tick, marking the beginning or end of a position
"""
def __init__(self, i_prev, i_next, feeGrowthOutside):
"""
:type i_prev: int
:type i_next: int
"""
self.i_prev = i_prev
self.i_next = i_next
self.Delta_L = 0
self.feeGrowthOutside = feeGrowthOutside
self.n_positions = 0
class Position(AutoRepr):
"""
A LP's position
"""
def __init__(self, L=0):
self.L = L
self.feeGrowthInsideLast = XY()
class XY(AutoRepr):
"""
A pair of balances in asset X and Y
"""
def __init__(self, x=0, y=0):
self.x, self.y = x, y
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return XY(x, y)
def __sub__(self, other):
x = self.x - other.x
y = self.y - other.y
return XY(x, y)
def __neg__(self):
return XY(-self.x, -self.y)
def __mul__(self, other):
return XY(other * self.x, other * self.y)
def __eq__(self, other):
return isinstance(other, XY) and self.x == other.x and self.y == other.y
class Contract(AutoRepr):
"""
A contract in the fashion of Uniswap v3
"""
@staticmethod
def tick(srp):
"""
Computes the closest tick index below a certain price, given its square root
:param srp: square root of a price
:return: the closest tick below a certain price
"""
if srp == infinity:
return infinity
else:
return math.floor(math.log(srp) / math.log(math.sqrt(1.0001)))
@staticmethod
def srp(tick):
"""
Computes the square root of the price corresponding to a given tick
:param tick: the index of a tick
:return: the corresponding square root price
"""
if tick == infinity:
return infinity
return math.pow(math.sqrt(1.0001), tick)
def __init__(self, X, Y, fee=0.3 / 100):
self.balance = XY(X, Y)
self.srP = math.sqrt(Y / X)
self.i_a = self.tick(self.srP)
self.L = math.floor(math.sqrt(X * Y))
self.fee = fee
self.i_l = -infinity
self.ticks = {-infinity: Tick(-infinity, infinity, XY()), infinity: Tick(-infinity, infinity, XY())}
self.positions = defaultdict(Position)
self.feeGrowth = XY()
def initialize_tick(self, i, i_l):
"""
Initialize a new tick at index i, provide the index of an initialized tick lower
than i to find it easily in the linked list. Assumes that i is *not* already initialized.
:param i:
:param i_l:
"""
assert (i not in self.ticks)
assert (i_l < i)
i_next = self.ticks[i_l].i_next
if i_next > i:
self.ticks[i_l].i_next = i
# find an instance where i_a = i and we set XY(0, 0) and that's wrong
self.ticks[i] = Tick(i_l, i_next, self.feeGrowth if self.i_a >= i else XY())
self.ticks[i_next].i_prev = i
else:
self.initialize_tick(i, i_next)
def collect_fees(self, user, i_l, i_u):
key = (user, i_l, i_u)
position = self.positions[key]
f_a = self.feeGrowth - self.ticks[i_u].feeGrowthOutside if self.i_a >= i_u else self.ticks[i_u].feeGrowthOutside
f_b = self.ticks[i_l].feeGrowthOutside if self.i_a >= i_l else self.feeGrowth - self.ticks[i_l].feeGrowthOutside
feeGrowthInside = self.feeGrowth - f_a - f_b
fees = (feeGrowthInside - position.feeGrowthInsideLast) * position.L
position.feeGrowthInsideLast = feeGrowthInside
return fees
def set_position(self, user, i_l, i_l_l, i_u, i_u_l, Delta_L):
assert (i_l_l <= i_l)
if i_l not in self.ticks:
self.initialize_tick(i_l, i_l_l)
assert (i_u_l <= i_u)
if i_u not in self.ticks:
self.initialize_tick(i_u, i_u_l)
position_key = (user, i_l, i_u)
fees = self.collect_fees(user, i_l, i_u)
self.positions[position_key].L += Delta_L
assert (self.positions[position_key].L >= 0)
# todo, garbage collect if we are unwinding the position completely?
Delta = XY()
# Add or remove liquidity above the current tick
if self.i_a < i_l:
Delta.x = Delta_L * (1 / self.srp(i_l) - 1 / self.srp(i_u))
Delta.y = 0
# Add or remove liquidity around the current tick
elif i_l <= self.i_a < i_u:
# update interval we are in if need be
if i_l > self.i_l:
self.i_l = i_l
Delta.x = Delta_L * (1 / self.srP - 1 / self.srp(i_u))
Delta.y = Delta_L * (self.srP - self.srp(i_l))
self.L += Delta_L
else: # i_a >= i_u
Delta.x = 0
Delta.y = Delta_L * (self.srp(i_u) - self.srp(i_l))
Delta -= fees
# make a note of how much liquidity is gained or lost when
# entering this interval
self.ticks[i_l].Delta_L += Delta_L
self.ticks[i_u].Delta_L -= Delta_L
self.balance += Delta
return -Delta
def X_to_Y(self, dX, fee=None):
# dX must be positive
assert (dX >= 0)
if fee is None:
fee = self.fee
# If there is no liquidity, stop the trade at this point
if self.L == 0:
self.i_a = self.tick(
self.srP) # we may need to update i_a if we went through several ticks to reach this point
return XY()
# Assume the trade will fit in a tick, what would the fees be like?
fees = XY(dX * fee, 0)
srp_new = 1.0 / (1.0 / self.srP + (dX - fees.x) / self.L)
i_l = self.i_l
tick_new = self.tick(srp_new)
if tick_new >= i_l: # we didn't pushed past the interval
dY = - (dX - fees.x) * self.srP * srp_new
self.srP = srp_new
self.i_a = tick_new
user = XY(-dX, -dY)
self.balance -= user
# Update fee growth with the fees we just collected
self.feeGrowth += fees * (1.0 / self.L)
return user
else:
# compute what we got up til i_u and how much it cost
# well, what delta_X would have taken me there?
self.i_l = self.ticks[self.i_l].i_prev
srP_l = self.srp(i_l)
dY = self.L * (srP_l - self.srP)
dX_ = - dY / (self.srP * srP_l)
tmp = dX_ / (1.0 - fee)
dX_, fees = tmp, XY(tmp - dX_, 0)
# update fee growth
self.feeGrowth += fees * (1.0 / self.L)
# remove the liquidity we used to have
self.L -= self.ticks[i_l].Delta_L
# flip feeGrowth
self.ticks[i_l].feeGrowthOutside = self.feeGrowth - self.ticks[i_l].feeGrowthOutside
self.srP = self.srp(i_l) - 1e-16 # todo can we do better than this crutch?
user = XY(-dX_, -dY)
self.balance -= user
return user + self.X_to_Y(dX - dX_, fee)
def Y_to_X(self, dY, fee=None):
# dY must be positive
assert (dY >= 0)
if fee is None:
fee = self.fee
# If there is no liquidity, stop the trade at this point
if self.L == 0:
self.i_a = self.tick(
self.srP) # we may need to update i_a if we went through several ticks to reach this point
return XY()
# Assume the trade will fit in a tick, what would the fees be like?
fees = XY(0, dY * fee)
srp_new = self.srP + (dY - fees.y) / self.L
i_u = self.ticks[self.i_l].i_next
tick_new = self.tick(srp_new)
if tick_new < i_u: # we did not push past the interval
dX = - (dY - fees.y) / (self.srP * srp_new)
self.srP = srp_new
self.i_a = tick_new
user = XY(-dX, -dY)
self.balance -= user
# Update fee growth with the fees we just collected
self.feeGrowth += fees * (1.0 / self.L)
return user
else:
self.i_l = i_u
srP_u = self.srp(i_u)
dY_ = self.L * (srP_u - self.srP)
dX = - dY_ / (self.srP * srP_u)
tmp = dY_ / (1.0 - fee)
dY_, fees = tmp, XY(0, tmp - dY_)
# update fee growth
self.feeGrowth += fees * (1.0 / self.L)
self.L += self.ticks[i_u].Delta_L
self.ticks[i_u].feeGrowthOutside = self.feeGrowth - self.ticks[i_u].feeGrowthOutside
self.srP = srP_u
user = XY(-dX, -dY_)
self.balance -= user
return user + self.Y_to_X(dY - dY_, fee)
| 34.007547 | 120 | 0.552818 | [
"MIT",
"Unlicense"
] | serokell/segmented-cfmm | python/scfmm/__init__.py | 9,012 | Python |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
from functools import reduce
import numpy, scipy
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
from pyscf import fci
b = 1.4
mol = gto.Mole()
mol.build(
verbose = 7,
output = '/dev/null',
atom = [
['N',( 0.000000, 0.000000, -b/2)],
['N',( 0.000000, 0.000000, b/2)], ],
basis = {'N': 'ccpvdz', },
symmetry = 1
)
mfr = scf.RHF(mol)
mfr.scf()
mcr = mcscf.CASSCF(mfr, 4, 4)
mcr.conv_tol_grad = 1e-6
mcr.mc1step()[0]
mfu = scf.UHF(mol)
mfu.scf()
mcu = mcscf.UCASSCF(mfu, 4, 4)
mcu.conv_tol_grad = 1e-6
mcu.mc1step()[0]
mol_prg = gto.M(
verbose = 0,
atom = [
['N',( 0.000000, 0.000000, -(b+0.1)/2)],
['N',( 0.000000, 0.000000, (b+0.1)/2)], ],
basis = 'ccpvdz',
symmetry=1)
mfr_prg = scf.RHF(mol_prg).set (max_cycle=1).run()
mcr_prg = mcscf.CASSCF(mfr_prg, 4, 4).set (max_cycle_macro=1).run()
mfu_prg = scf.UHF(mol_prg).set (max_cycle=1).run()
mcu_prg = mcscf.UCASSCF(mfu_prg, 4, 4).set (max_cycle_macro=1).run()
mol_prb = mol.copy ()
mol_prb.basis = {'N': 'aug-cc-pvdz' }
mol_prb.build ()
mfr_prb = scf.RHF(mol_prb).set (max_cycle=1).run()
mcr_prb = mcscf.CASSCF(mfr_prb, 4, 4).set (max_cycle_macro=1).run()
def tearDownModule():
global mol, mfr, mcr, mfu, mcu
mol.stdout.close()
del mol, mfr, mcr, mfu, mcu
class KnownValues(unittest.TestCase):
def test_spin_square(self):
ss = mcscf.addons.spin_square(mcr)[0]
self.assertAlmostEqual(ss, 0, 7)
def test_ucasscf_spin_square(self):
ss = mcscf.addons.spin_square(mcu)[0]
self.assertAlmostEqual(ss, 0, 7)
def test_rcas_natorb(self):
mo1, ci1, mocc1 = mcscf.addons.cas_natorb(mcr)
self.assertAlmostEqual(numpy.linalg.norm(mo1) , 9.9260608594977491, 6)
self.assertAlmostEqual(numpy.linalg.norm(mocc1), 5.1687145190800079, 6)
#TODO: def test_ucas_natorb(self):
#TODO: mo2, ci2, mocc2 = mcscf.addons.cas_natorb(mcu)
#TODO: self.assertAlmostEqual(numpy.linalg.norm(mo2) , 11.4470460817871*numpy.sqrt(2), 7)
#TODO: self.assertAlmostEqual(numpy.linalg.norm(mocc2), 2.59144951056707/numpy.sqrt(2), 7)
def test_get_fock(self):
f1 = mcscf.addons.get_fock(mcr)
self.assertTrue(numpy.allclose(f1, f1.T))
self.assertAlmostEqual(numpy.linalg.norm(f1), 25.482177562349467, 6)
#TODO: f1 = mcscf.addons.get_fock(mcu)
#TODO: self.assertTrue(numpy.allclose(f1[0], f1[0].T))
#TODO: self.assertTrue(numpy.allclose(f1[1], f1[1].T))
#TODO: self.assertAlmostEqual(numpy.linalg.norm(f1), 23.597476504476919*numpy.sqrt(2), 6)
def test_canonicalize1(self):
numpy.random.seed(1)
f1 = numpy.random.random(mcr.mo_coeff.shape)
u1 = numpy.linalg.svd(f1)[0]
mo1 = numpy.dot(mcr.mo_coeff, u1)
mo1 = lib.tag_array(mo1, orbsym=mcr.mo_coeff.orbsym)
mo, ci, mo_e = mcr.canonicalize(mo1)
e1 = numpy.einsum('ji,jk,ki', mo, f1, mo)
self.assertAlmostEqual(e1, 44.2658681077, 7)
self.assertAlmostEqual(lib.fp(mo_e), 5.1364166175063097, 7)
mo, ci, mo_e = mcr.canonicalize(mo1, eris=mcr.ao2mo(mcr.mo_coeff))
e1 = numpy.einsum('ji,jk,ki', mo, f1, mo)
self.assertAlmostEqual(e1, 44.2658681077, 7)
self.assertAlmostEqual(lib.fp(mo_e), 4.1206025804989173, 7)
mcr1 = copy.copy(mcr)
mcr1.frozen = 2
mo, ci, mo_e = mcr1.canonicalize(mo1)
self.assertAlmostEqual(lib.fp(mo_e), 6.6030999409178577, 7)
mcr1.frozen = [0,1]
mo, ci, mo_e = mcr1.canonicalize(mo1)
self.assertAlmostEqual(lib.fp(mo_e), 6.6030999409178577, 7)
mcr1.frozen = [1,12]
mo, ci, mo_e = mcr1.canonicalize(mo1)
self.assertAlmostEqual(lib.fp(mo_e), 5.2182584355788162, 7)
def test_canonicalize(self):
mo, ci, mo_e = mcr.canonicalize()
self.assertAlmostEqual(numpy.linalg.norm(mo), 9.9260608594977242, 7)
mo, ci, mo_e = mcr.canonicalize(eris=mcr.ao2mo(mcr.mo_coeff))
self.assertAlmostEqual(numpy.linalg.norm(mo), 9.9260608594977242, 7)
def test_make_rdm12(self):
dmr = mcscf.addons.make_rdm1(mcr)
dm1, dm2 = mcscf.addons.make_rdm12(mcr)
self.assertTrue(numpy.allclose(dmr, dm1))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 3.8205551262007567, 6)
self.assertAlmostEqual(numpy.linalg.norm(dm2), 14.987267883423314, 5)
def test_make_rdm1s(self):
dm1 = mcscf.addons.make_rdm1s(mcr)
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.7015404376335805, 5)
dm1 = mcscf.addons.make_rdm1s(mcu)
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.7015404376335805, 5)
def test_sort_mo(self):
mo1 = numpy.arange(mfr.mo_energy.size).reshape(1,-1)
ref = [[0, 1, 2, 3, 7, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27]]
mo2 = mcscf.addons.sort_mo(mcr, mo1, [5,6,7,9])
self.assertTrue(numpy.allclose(mo2, ref))
mo2 = mcscf.addons.sort_mo(mcu, (mo1,mo1), [5,6,7,9])
self.assertTrue(numpy.allclose(mo2, (ref,ref)))
mo2 = mcscf.addons.sort_mo(mcu, (mo1,mo1), [[5,6,7,9],[5,6,8,9]])
ref1 = [[0, 1, 2, 3, 6, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27]]
self.assertTrue(numpy.allclose(mo2, (ref,ref1)))
def test_sort_mo_by_irrep(self):
mc1 = mcscf.CASSCF(mfr, 8, 4)
mo0 = mcscf.sort_mo_by_irrep(mc1, mfr.mo_coeff, {'E1ux':2, 'E1uy':2, 'E1gx':2, 'E1gy':2})
mo1 = mcscf.sort_mo_by_irrep(mc1, mfr.mo_coeff, {2:2, 3:2, 6:2, 7:2}, {2:0, 3:0, 6:0, 7:0})
mo2 = mcscf.sort_mo_by_irrep(mc1, mfr.mo_coeff, (0,0,2,2,0,0,2,2))
mo3 = mcscf.sort_mo_by_irrep(mc1, mfr.mo_coeff, {'E1ux':2, 'E1uy':2, 2:2, 3:2})
self.assertTrue(numpy.allclose(mo0, mo1))
self.assertTrue(numpy.allclose(mo0, mo2))
self.assertTrue(numpy.allclose(mo0, mo3))
def test_sort_mo_by_irrep1(self):
mol = gto.M(atom='N 0 0 -.45; N 0 0 .45', basis='ccpvdz',
symmetry=True, verbose=0)
mf = scf.RHF(mol).run()
mc1 = mcscf.CASSCF(mf, 6, 6)
caslst = mcscf.addons.caslst_by_irrep(mc1, mf.mo_coeff,
{'A1g': 1, 'A1u': 1, 'E1uy': 1, 'E1ux': 1, 'E1gy': 1, 'E1gx': 1},
{'A1g': 2, 'A1u': 2})
self.assertEqual(list(caslst), [4,5,7,8,9,10])
caslst = mcscf.addons.caslst_by_irrep(mc1, mf.mo_coeff,
{'E1uy': 1, 'E1ux': 1, 'E1gy': 1, 'E1gx': 1},
{'A1g': 2, 'A1u': 2})
self.assertEqual(list(caslst), [4,5,7,8,9,10])
caslst = mcscf.addons.caslst_by_irrep(mc1, mf.mo_coeff,
{'E1uy': 1, 'E1ux': 1, 'E1gy': 1, 'E1gx': 1},
{'A1u': 2})
self.assertEqual(list(caslst), [4,5,7,8,9,10])
caslst = mcscf.addons.caslst_by_irrep(mc1, mf.mo_coeff,
{'A1g': 1, 'A1u': 1}, {'E1uy': 1, 'E1ux': 1})
self.assertEqual(list(caslst), [3,6,8,9,12,13])
self.assertRaises(ValueError, mcscf.addons.caslst_by_irrep, mc1, mf.mo_coeff,
{'A1g': 1, 'A1u': 1}, {'E1uy': 3, 'E1ux': 3})
self.assertRaises(ValueError, mcscf.addons.caslst_by_irrep, mc1, mf.mo_coeff,
{'A1g': 3, 'A1u': 4}, {'E1uy': 1, 'E1ux': 1})
self.assertRaises(ValueError, mcscf.addons.caslst_by_irrep, mc1, mf.mo_coeff,
{'E2ux': 2, 'E2uy': 2}, {'E1uy': 1, 'E1ux': 1})
def test_state_average(self):
mc = mcscf.CASSCF(mfr, 4, 4)
mc.fcisolver = fci.solver(mol, singlet=False)
mc.state_average_((.64,.36))
e = mc.kernel()
e = mc.e_states
self.assertAlmostEqual(mc.e_tot, -108.83342083775061, 7)
self.assertAlmostEqual(mc.e_average, -108.83342083775061, 7)
self.assertAlmostEqual(e[0]*.64+e[1]*.36, -108.83342083775061, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 0.52396929381500434, 4)
self.assertRaises(TypeError, mc.state_average_, (.64,.36))
def test_state_average_fci_dmrg(self):
fcisolver1 = fci.direct_spin1_symm.FCISolver(mol)
class FCI_as_DMRG(fci.direct_spin1_symm.FCISolver):
def __getattribute__(self, attr):
"""Prevent 'private' attribute access"""
if attr in ('make_rdm1s', 'spin_square', 'contract_2e',
'absorb_h1e'):
raise AttributeError
else:
return object.__getattribute__(self, attr)
def kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
def approx_kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
@property
def orbsym(self):
return fcisolver1.orbsym
@orbsym.setter
def orbsym(self, x):
fcisolver1.orbsym = x
spin_square = None
large_ci = None
transform_ci_for_orbital_rotation = None
mc = mcscf.CASSCF(mfr, 4, 4)
mc.fcisolver = FCI_as_DMRG(mol)
mc.fcisolver.nroots = fcisolver1.nroots = 2
mc.state_average_((.64,.36))
mc.kernel()
e = mc.e_states
self.assertAlmostEqual(e[0]*.64+e[1]*.36, -108.83342083775061, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 0.52396929381500434*2, 4)
def test_state_average_mix(self):
solver1 = fci.FCI(mol)
solver1.spin = 0
solver1.nroots = 2
solver2 = fci.FCI(mol, singlet=False)
solver2.spin = 2
mc = mcscf.CASSCF(mfr, 4, 4)
mc = mcscf.addons.state_average_mix_(mc, [solver1, solver2],
(0.25,0.25,0.5))
mc.kernel()
e = mc.e_states
self.assertAlmostEqual(mc.e_tot, -108.80340952016508, 7)
self.assertAlmostEqual(mc.e_average, -108.80340952016508, 7)
self.assertAlmostEqual(numpy.dot(e,[.25,.25,.5]), -108.80340952016508, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 0.52172669549357464, 4)
self.assertAlmostEqual(lib.fp(dm1[1]), 0.53366776017869022, 4)
self.assertAlmostEqual(lib.fp(dm1[0]+dm1[1]), 1.0553944556722636, 4)
mc.cas_natorb()
def test_state_average_mix_fci_dmrg(self):
fcisolver1 = fci.direct_spin0_symm.FCISolver(mol)
class FCI_as_DMRG(fci.direct_spin0_symm.FCISolver):
def __getattribute__(self, attr):
"""Prevent 'private' attribute access"""
if attr in ('make_rdm1s', 'spin_square', 'contract_2e',
'absorb_h1e'):
raise AttributeError
else:
return object.__getattribute__(self, attr)
def kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
def approx_kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
@property
def orbsym(self):
return fcisolver1.orbsym
@orbsym.setter
def orbsym(self, x):
fcisolver1.orbsym = x
spin_square = None
large_ci = None
transform_ci_for_orbital_rotation = None
solver1 = FCI_as_DMRG(mol)
solver1.spin = fcisolver1.spin = 0
solver1.nroots = fcisolver1.nroots = 2
solver2 = fci.FCI(mol, singlet=False)
solver2.spin = 2
mc = mcscf.CASSCF(mfr, 4, 4)
mc = mcscf.addons.state_average_mix_(mc, [solver1, solver2],
(0.25,0.25,0.5))
mc.kernel()
e = mc.e_states
self.assertAlmostEqual(numpy.dot(e, [.25,.25,.5]), -108.80340952016508, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 1.0553944556722636, 4)
self.assertEqual(dm1[1], None)
mc.cas_natorb()
def test_state_specific(self):
mc = mcscf.CASSCF(mfr, 4, 4)
mc.fcisolver = fci.solver(mol, singlet=False)
mc.state_specific_(state=1)
e = mc.kernel()[0]
self.assertAlmostEqual(e, -108.70065770892457, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 0.54605283139098515, 4)
mc = mcscf.CASSCF(mfr, 4, 4)
mc.state_specific_(state=0)
e = mc.kernel()[0]
self.assertAlmostEqual(mc.e_tot, mcr.e_tot, 7)
dm1 = mc.analyze()
dmref = mcr.analyze()
self.assertAlmostEqual(float(abs(dm1[0]-dmref[0]).max()), 0, 4)
def test_project_init_guess_geom (self):
mfr_mo_norm = numpy.einsum ('ip,ip->p', mfr.mo_coeff.conj (),
mfr_prg.get_ovlp ().dot (mfr.mo_coeff))
mfr_mo_norm = mfr.mo_coeff / numpy.sqrt (mfr_mo_norm)[None,:]
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff)
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
def test_project_init_guess_basis (self):
mo1 = mcscf.addons.project_init_guess (mcr_prb, mfr.mo_coeff, prev_mol=mfr.mol)
s1 = reduce(numpy.dot, (mo1.T, mfr_prb.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 6.782329983125268, 9)
def test_project_init_guess_uhf (self):
mo1_u = mcscf.addons.project_init_guess (mcu_prg, mfu.mo_coeff)
for mo1 in mo1_u:
s1 = reduce(numpy.dot, (mo1.T, mfu_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
def test_project_init_guess_activefirst (self):
with lib.temporary_env (mcr_prg, ncas=6, ncore=3):
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff, priority='active')
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
mfr_mo_norm = numpy.einsum ('ip,ip->p', mfr.mo_coeff.conj (),
mfr_prg.get_ovlp ().dot (mfr.mo_coeff))
mfr_mo_norm = mfr.mo_coeff / numpy.sqrt (mfr_mo_norm)[None,:]
s2 = [reduce (numpy.dot, (mfr_prg.get_ovlp (), mo1[:,i], mfr_mo_norm[:,i]))
for i in (1,3)] # core, active (same irrep)
self.assertAlmostEqual (s2[1], 1.0, 9)
self.assertFalse (s2[0] > s2[1])
def test_project_init_guess_corefirst (self):
with lib.temporary_env (mcr_prg, ncas=6, ncore=3):
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff, priority='core')
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
mfr_mo_norm = numpy.einsum ('ip,ip->p', mfr.mo_coeff.conj (),
mfr_prg.get_ovlp ().dot (mfr.mo_coeff))
mfr_mo_norm = mfr.mo_coeff / numpy.sqrt (mfr_mo_norm)[None,:]
s1 = [reduce (numpy.dot, (mfr_prg.get_ovlp (), mo1[:,i], mfr_mo_norm[:,i]))
for i in (1,3)] # core, active (same irrep)
self.assertAlmostEqual (s1[0], 1.0, 9)
self.assertTrue (s1[0] > s1[1])
def test_project_init_guess_gramschmidt (self):
gram_schmidt_idx = numpy.arange (27, dtype=numpy.integer)[:,None].tolist ()
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff, priority=gram_schmidt_idx)
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
mf2moi = reduce (numpy.dot, (mfr_prg.mo_coeff.conj ().T, mfr_prg.get_ovlp (), mfr.mo_coeff))
Q, R = scipy.linalg.qr (mf2moi) # Arbitrary sign, so abs below
mo2 = numpy.dot (mfr_prg.mo_coeff, Q)
s2 = numpy.abs (reduce (numpy.dot, (mo1.conj ().T, mfr_prg.get_ovlp (), mo2)))
self.assertAlmostEqual(numpy.linalg.norm(s2), 5.2915026221291841, 9)
def test_project_init_guess_prioritylists (self):
pr = [[[27],[5,3],[6,12]],[[5],[17],[13,10,8,6]]]
mo1_u = mcscf.addons.project_init_guess (mcu_prg, mfu.mo_coeff, priority=pr)
s0 = mfu_prg.get_ovlp ()
for ix, mo1 in enumerate (mo1_u):
s1 = reduce(numpy.dot, (mo1.T, s0, mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
mfu_mo = mfu.mo_coeff[ix]
mfu_mo_norm = numpy.einsum ('ip,ip->p', mfu_mo.conj (), s0.dot (mfu_mo))
mfu_mo_norm = mfu.mo_coeff[ix] / numpy.sqrt (mfu_mo_norm)[None,:]
p = pr[ix][0][0]
s2 = reduce (numpy.dot, (mfu_prg.get_ovlp (), mo1[:,p], mfu_mo_norm[:,p]))
self.assertAlmostEqual (s2, 1.0, 9)
def test_project_init_guess_usehfcore (self):
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff, use_hf_core=True)
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
s2 = reduce (numpy.dot, (mo1[:,:5].T, mfr_prg.get_ovlp (), mfr_prg.mo_coeff[:,:5]))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s2)[0]>1e-10),
s2.shape[0])
self.assertAlmostEqual (numpy.linalg.norm (s2), 2.23606797749979, 9)
def test_state_average_bad_init_guess(self):
mc = mcscf.CASCI(mfr, 4, 4)
mc.run()
mc.state_average_([.8, .2])
mscan = mc.as_scanner()
e = mscan(mol)
self.assertAlmostEqual(e, -108.84390277715984, 9)
if __name__ == "__main__":
print("Full Tests for mcscf.addons")
unittest.main()
| 44.20595 | 100 | 0.604255 | [
"Apache-2.0"
] | JFurness1/pyscf | pyscf/mcscf/test/test_addons.py | 19,318 | Python |
# Copyright (c) 2016-2017, Jani Nikula <[email protected]>
# Licensed under the terms of BSD 2-Clause, see LICENSE for details.
"""
Hawkmoth
========
Sphinx C Domain autodoc directive extension.
"""
import glob
import os
import re
import stat
import subprocess
import sys
from docutils import nodes, statemachine
from docutils.parsers.rst import directives, Directive
from docutils.statemachine import ViewList
from sphinx.ext.autodoc import AutodocReporter
from sphinx.util.nodes import nested_parse_with_titles
from sphinx.util.docutils import switch_source_input
from hawkmoth.parser import parse
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'VERSION')) as version_file:
__version__ = version_file.read().strip()
class CAutoDocDirective(Directive):
"""Extract all documentation comments from the specified file"""
required_argument = 1
optional_arguments = 1
# Allow passing a variable number of file patterns as arguments
final_argument_whitespace = True
option_spec = {
'compat': directives.unchanged_required,
'clang': directives.unchanged_required,
}
has_content = False
def __parse(self, viewlist, filename):
env = self.state.document.settings.env
compat = self.options.get('compat', env.config.cautodoc_compat)
clang = self.options.get('clang', env.config.cautodoc_clang)
comments = parse(filename, compat=compat, clang=clang)
for (comment, meta) in comments:
lineoffset = meta['line'] - 1
lines = statemachine.string2lines(comment, 8,
convert_whitespace=True)
for line in lines:
viewlist.append(line, filename, lineoffset)
lineoffset += 1
def run(self):
env = self.state.document.settings.env
result = ViewList()
for pattern in self.arguments[0].split():
filenames = glob.glob(env.config.cautodoc_root + '/' + pattern)
if len(filenames) == 0:
fmt = 'Pattern "{pat}" does not match any files.'
env.app.warn(fmt.format(pat=pattern),
location=(env.docname, self.lineno))
continue
for filename in filenames:
mode = os.stat(filename).st_mode
if stat.S_ISDIR(mode):
fmt = 'Path "{name}" matching pattern "{pat}" is a directory.'
env.app.warn(fmt.format(name=filename, pat=pattern),
location=(env.docname, self.lineno))
continue
# Tell Sphinx about the dependency and parse the file
env.note_dependency(os.path.abspath(filename))
self.__parse(result, filename)
# Parse the extracted reST
with switch_source_input(self.state, result):
node = nodes.section()
nested_parse_with_titles(self.state, result, node)
return node.children
def setup(app):
app.require_sphinx('1.8')
app.add_config_value('cautodoc_root', app.confdir, 'env')
app.add_config_value('cautodoc_compat', None, 'env')
app.add_config_value('cautodoc_clang', None, 'env')
app.add_directive_to_domain('c', 'autodoc', CAutoDocDirective)
return dict(version = __version__,
parallel_read_safe = True, parallel_write_safe = True)
| 34.316832 | 82 | 0.634737 | [
"BSD-2-Clause"
] | mv0/hawkmoth | hawkmoth/__init__.py | 3,466 | Python |
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from .source import SourceOrb
__all__ = ["SourceOrb"]
| 13.111111 | 56 | 0.694915 | [
"MIT"
] | 52-entertainment/airbyte | airbyte-integrations/connectors/source-orb/source_orb/__init__.py | 118 | Python |
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Autogenerated by tools/codegen/core/gen_stats_data.py
import massage_qps_stats_helpers
def massage_qps_stats(scenario_result):
for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:
if "coreStats" not in stats: return
core_stats = stats["coreStats"]
del stats["coreStats"]
stats["core_client_calls_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_calls_created")
stats["core_server_calls_created"] = massage_qps_stats_helpers.counter(
core_stats, "server_calls_created")
stats["core_cqs_created"] = massage_qps_stats_helpers.counter(
core_stats, "cqs_created")
stats[
"core_client_channels_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_channels_created")
stats[
"core_client_subchannels_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_subchannels_created")
stats[
"core_server_channels_created"] = massage_qps_stats_helpers.counter(
core_stats, "server_channels_created")
stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_poll")
stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_wait")
stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick")
stats[
"core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kicked_without_poller")
stats["core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kicked_again")
stats[
"core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_wakeup_fd")
stats[
"core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_wakeup_cv")
stats[
"core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_own_thread")
stats[
"core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(
core_stats, "histogram_slow_lookups")
stats["core_syscall_write"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_write")
stats["core_syscall_read"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_read")
stats[
"core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(
core_stats, "tcp_backup_pollers_created")
stats[
"core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(
core_stats, "tcp_backup_poller_polls")
stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_batches")
stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_cancel")
stats[
"core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_initial_metadata")
stats["core_http2_op_send_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_message")
stats[
"core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_trailing_metadata")
stats[
"core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_initial_metadata")
stats["core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_message")
stats[
"core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_trailing_metadata")
stats["core_http2_settings_writes"] = massage_qps_stats_helpers.counter(
core_stats, "http2_settings_writes")
stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter(
core_stats, "http2_pings_sent")
stats["core_http2_writes_begun"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_begun")
stats[
"core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_offloaded")
stats[
"core_http2_writes_continued"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_continued")
stats["core_http2_partial_writes"] = massage_qps_stats_helpers.counter(
core_stats, "http2_partial_writes")
stats[
"core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_initial_write")
stats[
"core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_start_new_stream")
stats[
"core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_message")
stats[
"core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_initial_metadata")
stats[
"core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_send_trailing_metadata")
stats[
"core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_retry_send_ping")
stats[
"core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_continue_pings")
stats[
"core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_goaway_sent")
stats[
"core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_rst_stream")
stats[
"core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_close_from_api")
stats[
"core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_stream_flow_control")
stats[
"core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_transport_flow_control")
stats[
"core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_settings")
stats[
"core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_bdp_estimator_ping")
stats[
"core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_flow_control_unstalled_by_setting")
stats[
"core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_flow_control_unstalled_by_update")
stats[
"core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_application_ping")
stats[
"core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_keepalive_ping")
stats[
"core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_transport_flow_control_unstalled")
stats[
"core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_ping_response")
stats[
"core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_force_rst_stream")
stats[
"core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(
core_stats, "http2_spurious_writes_begun")
stats["core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_indexed")
stats[
"core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_incidx")
stats[
"core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_incidx_v")
stats[
"core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_notidx")
stats[
"core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_notidx_v")
stats[
"core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_nvridx")
stats[
"core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_nvridx_v")
stats[
"core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_uncompressed")
stats["core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_huffman")
stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_binary")
stats[
"core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_binary_base64")
stats["core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_indexed")
stats[
"core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_incidx")
stats[
"core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_incidx_v")
stats[
"core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_notidx")
stats[
"core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_notidx_v")
stats[
"core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_nvridx")
stats[
"core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_nvridx_v")
stats[
"core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_uncompressed")
stats["core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_huffman")
stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_binary")
stats[
"core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_binary_base64")
stats[
"core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_initiated")
stats[
"core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_scheduled_items")
stats[
"core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_scheduled_final_items")
stats[
"core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_offloaded")
stats[
"core_call_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_locks_initiated")
stats[
"core_call_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_locks_scheduled_items")
stats[
"core_call_combiner_set_notify_on_cancel"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_set_notify_on_cancel")
stats[
"core_call_combiner_cancelled"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_cancelled")
stats[
"core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_short_items")
stats[
"core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_long_items")
stats[
"core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_to_self")
stats[
"core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "executor_wakeup_initiated")
stats[
"core_executor_queue_drained"] = massage_qps_stats_helpers.counter(
core_stats, "executor_queue_drained")
stats["core_executor_push_retries"] = massage_qps_stats_helpers.counter(
core_stats, "executor_push_retries")
stats[
"core_server_requested_calls"] = massage_qps_stats_helpers.counter(
core_stats, "server_requested_calls")
stats[
"core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(
core_stats, "server_slowpath_requests_queued")
stats[
"core_cq_ev_queue_trylock_failures"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_trylock_failures")
stats[
"core_cq_ev_queue_trylock_successes"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_trylock_successes")
stats[
"core_cq_ev_queue_transient_pop_failures"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_transient_pop_failures")
h = massage_qps_stats_helpers.histogram(core_stats, "call_initial_size")
stats["core_call_initial_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_call_initial_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"poll_events_returned")
stats["core_poll_events_returned"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_poll_events_returned_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_size")
stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_write_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats["core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats["core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats["core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_write_iov_size")
stats["core_tcp_write_iov_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_write_iov_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_read_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats["core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats["core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats["core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer")
stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_read_offer_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats["core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats["core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats["core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_read_offer_iov_size")
stats["core_tcp_read_offer_iov_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_tcp_read_offer_iov_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"http2_send_message_size")
stats["core_http2_send_message_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_message_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_initial_metadata_per_write")
stats["core_http2_send_initial_metadata_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"http2_send_message_per_write")
stats["core_http2_send_message_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_message_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_trailing_metadata_per_write")
stats["core_http2_send_trailing_metadata_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_trailing_metadata_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"http2_send_flowctl_per_write")
stats["core_http2_send_flowctl_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_flowctl_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"server_cqs_checked")
stats["core_server_cqs_checked"] = ",".join("%f" % x for x in h.buckets)
stats["core_server_cqs_checked_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
| 54.76383 | 118 | 0.661875 | [
"Apache-2.0"
] | 4con/grpc-win-xp | tools/run_tests/performance/massage_qps_stats.py | 25,739 | Python |
from tocenv.components.position import Position
class DirectionType:
# Clock-wise numbering
Up = 0
Down = 2
Left = 1
Right = 3
class Direction(object):
def __init__(self):
pass
class Direction(object):
def __init__(self, direction_type):
self.direction = direction_type
def turn_right(self) -> Direction:
self.direction = (self.direction + 1) % 4
return self
def turn_left(self) -> Direction:
self.direction = ((self.direction + 4) - 1) % 4
return self
def half_rotate(self) -> Direction:
self.direction = (self.direction + 2) % 4
return self
@property
def value(self):
return self.direction
def _to_position(self) -> Position:
if self.direction == DirectionType.Up:
return Position(x=0, y=1)
elif self.direction == DirectionType.Down:
return Position(x=0, y=-1)
elif self.direction == DirectionType.Left:
return Position(x=-1, y=0)
elif self.direction == DirectionType.Right:
return Position(x=1, y=0)
def _to_string(self) -> str:
if self.direction == DirectionType.Up:
return 'Up'
elif self.direction == DirectionType.Down:
return 'Down'
elif self.direction == DirectionType.Left:
return 'Left'
elif self.direction == DirectionType.Right:
return 'Right'
def get_type(self):
return self.direction
def __str__(self):
return 'Direction({0})'.format(self._to_string())
| 23.514706 | 57 | 0.595372 | [
"Apache-2.0"
] | KevinJeon/The-Tragedy-of-the-commons | tocenv/components/direction.py | 1,599 | Python |
from http import cookies
from io import StringIO
import pytest
def log_entry(entry):
return StringIO(entry)
@pytest.fixture
def cookie_zip_code():
cookie = cookies.SimpleCookie()
cookie.load(rawdata='zip=98101')
return cookie
@pytest.fixture
def cookie_empty():
cookie = cookies.SimpleCookie()
cookie.load(rawdata='')
return cookie
@pytest.fixture
def cloudfront_entry():
return log_entry('''2014-05-23 01:13:11 FRA2 182 192.0.2.10 GET d111111abcdef8.cloudfront.net /view/my/file.html 200 www.displaymyfiles.com Mozilla/4.0%20(compatible;%20MSIE%205.0b1;%20Mac_PowerPC) - zip=98101 RefreshHit MRVMF7KydIvxMWfJIglgwHQwZsbG2IhRJ07sn9AkKUFSHS9EXAMPLE== d111111abcdef8.cloudfront.net http - 0.001 - - - RefreshHit HTTP/1.1''') # noqa: E501
@pytest.fixture
def cloudfront_entry_broken_cookie():
return log_entry('''2014-05-23 01:13:11 FRA2 182 192.0.2.10 GET d111111abcdef8.cloudfront.net /view/my/file.html 200 www.displaymyfiles.com Mozilla/4.0%20(compatible;%20MSIE%205.0b1;%20Mac_PowerPC) - zip 98101 RefreshHit MRVMF7KydIvxMWfJIglgwHQwZsbG2IhRJ07sn9AkKUFSHS9EXAMPLE== d111111abcdef8.cloudfront.net http - 0.001 - - - RefreshHit HTTP/1.1''') # noqa: E501
@pytest.fixture
def cloudfront_entry2():
return log_entry('''2014-05-23 01:13:12 LAX1 2390282 192.0.2.202 GET d111111abcdef8.cloudfront.net /soundtrack/happy.mp3 304 www.unknownsingers.com Mozilla/4.0%20(compatible;%20MSIE%207.0;%20Windows%20NT%205.1) a=b&c=d zip=98101 Hit xGN7KWpVEmB9Dp7ctcVFQC4E-nrcOcEKS3QyAez--06dV7TEXAMPLE== d111111abcdef8.cloudfront.net http - 0.002 - - - Hit HTTP/1.1''') # noqa: E501
@pytest.fixture
def loadbalancer_http_entry():
return log_entry('''http 2018-07-02T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 192.168.131.39:2817 10.0.0.1:80 0.000 0.001 0.000 200 200 34 366 "GET http://www.example.com:80/?a=b&c=d&zip=98101 HTTP/1.1" "curl/7.46.0" - - arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337262-36d228ad5d99923122bbe354" "-" "-" 0 2018-07-02T22:22:48.364000Z "forward" "-" "-"''') # noqa: E501
@pytest.fixture
def loadbalancer_https_entry():
return log_entry('''https 2018-07-02T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 192.168.131.39:2817 10.0.0.1:80 0.086 0.048 0.037 200 200 0 57 "GET https://www.example.com:443/ HTTP/1.1" "curl/7.46.0" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2 arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337281-1d84f3d73c47ec4e58577259" "www.example.com" "arn:aws:acm:us-east-2:123456789012:certificate/12345678-1234-1234-1234-123456789012" 1 2018-07-02T22:22:48.364000Z "authenticate,forward" "-" "-"''') # noqa: E501
@pytest.fixture
def loadbalancer_http2_entry():
return log_entry('''h2 2018-07-02T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 10.0.1.252:48160 10.0.0.66:9000 0.000 0.002 0.000 200 200 5 257 "GET https://10.0.2.105:773/ HTTP/2.0" "curl/7.46.0" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2 arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337327-72bd00b0343d75b906739c42" "-" "-" 1 2018-07-02T22:22:48.364000Z "redirect" "https://example.com:80/" "-"''') # noqa: E501
@pytest.fixture
def loadbalancer_websockets_entry():
return log_entry('''ws 2018-07-02T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 10.0.0.140:40914 10.0.1.192:8010 0.001 0.003 0.000 101 101 218 587 "GET http://10.0.0.30:80/ HTTP/1.1" "-" - - arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337364-23a8c76965a2ef7629b185e3" "-" "-" 1 2018-07-02T22:22:48.364000Z "forward" "-" "-"''') # noqa: E501
@pytest.fixture
def loadbalancer_secured_websockets_entry():
return log_entry('''wss 2018-07-02T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 10.0.0.140:44244 10.0.0.171:8010 0.000 0.001 0.000 101 101 218 786 "GET https://10.0.0.30:443/ HTTP/1.1" "-" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2 arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337364-23a8c76965a2ef7629b185e3" "-" "-" 1 2018-07-02T22:22:48.364000Z "forward" "-" "-"''') # noqa: E501
@pytest.fixture
def loadbalancer_lambda_entry():
return log_entry('''http 2018-11-30T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 192.168.131.39:2817 - 0.000 0.001 0.000 200 200 34 366 "GET http://www.example.com:80/ HTTP/1.1" "curl/7.46.0" - - arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337364-23a8c76965a2ef7629b185e3" "-" "-" 0 2018-11-30T22:22:48.364000Z "forward" "-" "-"''') # noqa: E501
@pytest.fixture
def loadbalancer_lambda_failed_entry():
return log_entry('''http 2018-11-30T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 192.168.131.39:2817 - 0.000 0.001 0.000 502 - 34 366 "GET http://www.example.com:80/ HTTP/1.1" "curl/7.46.0" - - arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337364-23a8c76965a2ef7629b185e3" "-" "-" 0 2018-11-30T22:22:48.364000Z "forward" "-" "LambdaInvalidResponse"''') # noqa: E501
@pytest.fixture
def loadbalancer_cloudfront_forward():
return log_entry('''http 2018-11-30T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 192.168.131.39:2817 - 0.000 0.001 0.000 502 - 34 366 "GET http://www.example.com:80/ HTTP/1.1" "curl/7.46.0" - - arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337364-23a8c76965a2ef7629b185e3" "-" "-" 0 2018-11-30T22:22:48.364000Z "waf,forward" "-" "-"''') # noqa: E501
@pytest.fixture
def loadbalancer_cloudfront_forward_refused():
return log_entry('''http 2018-11-30T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 192.168.131.39:2817 - 0.000 0.001 0.000 502 - 34 366 "GET http://www.example.com:80/ HTTP/1.1" "curl/7.46.0" - - arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337364-23a8c76965a2ef7629b185e3" "api.example.com" "session-reused" 0 2018-11-30T22:22:48.364000Z "waf,forward" "-" "-"''') # noqa: E501
@pytest.fixture
def loadbalancer_cloudfront_forward_h2():
return log_entry('''h2 2018-11-30T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 192.168.131.39:2817 - 0.000 0.001 0.000 502 - 34 366 "GET http://www.example.com:80/ HTTP/1.1" "curl/7.46.0" - - arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337364-23a8c76965a2ef7629b185e3" "api.example.com" "-" 0 2018-11-30T22:22:48.364000Z "waf,forward" "-" "-"''') # noqa: E501
| 75.943182 | 569 | 0.748466 | [
"Apache-2.0"
] | gabriel4649/aws-log-parser | test/conftest.py | 6,683 | Python |
#!/home/observer/miniconda2/bin/python
import numpy as N
import sys, os
import logging as L
import subprocess as S
from collections import namedtuple
from sigpyproc.Readers import FilReader as F
sys.path.append("/home/vgupta/Codes/Fake_FRBs/")
from Furby_reader import Furby_reader
class FileNotFound(Exception):
pass
class Observation():
def __init__(self, utc, cfg_file = "/home/vgupta/resources/observations.cfg"):
self.utc = utc
self.cfg_file = cfg_file
self.read_conf()
self.get_results_dir()
self.get_archives_dir()
self.is_failed = self.if_failed()
self.read_info()
self.processed_offline()
self.annotation = self.read_annotation()
def __str__(self):
return self.utc
def __repr__(self):
return self.utc
def read_annotation(self):
afile = os.path.join(self.results_dir, "obs.txt")
if not os.path.exists(afile):
return None
with open(afile, 'r') as f:
return f.read()
def read_conf(self):
if not os.path.exists(self.cfg_file):
raise Exception("Cannot find observation configuration file - {0}".format(self.cfg_file))
#raise FileNotFound("Cannot find observation configuration file - {0}".format(self.cfg_file))
conf_tmp = {}
with open(self.cfg_file) as c:
lines = c.readlines()
for line in lines:
if (line.startswith("#") or line == "" or line == "\n"):
continue
key = line.strip().split()[0].strip()
val = line.strip().split()[1].strip()
val = self.check_type(val)
conf_tmp[key] = val
tmp = namedtuple("CONF", conf_tmp.keys())
self.conf = tmp(*conf_tmp.values())
def get_results_dir(self):
path1 = os.path.join(self.conf.results_dir, self.utc)
path2 = os.path.join(self.conf.old_results_dir, self.utc)
if os.path.isdir(path1):
self.results_dir = self.conf.results_dir
elif os.path.isdir(path2):
self.results_dir = self.conf.old_results_dir
else:
raise IOError("Directory for UTC: {0} does not exist in any of the new or old results. Neither {1} nor {2} exists".format(self.utc, path1, path2))
def get_archives_dir(self):
path1 = os.path.join(self.conf.archives_dir, self.utc)
path2 = os.path.join(self.conf.old_archives_dir, self.utc)
if os.path.isdir(path1):
self.archives_dir = self.conf.archives_dir
elif os.path.isdir(path2):
self.archives_dir = self.conf.old_archives_dir
else:
raise IOError("Directory for UTC: {0} does not exist in any of the new or old archives".format(self.utc))
def processed_offline(self):
self.offline_cand_file = os.path.join(self.archives_dir, self.utc, self.conf.offline_output_dir, self.conf.offline_output_file)
self.processed_offline = os.path.exists(self.offline_cand_file) and not self.is_failed
def read_header(self):
if self.is_failed:
self.header = None
return
self.header_file = os.path.join(self.results_dir, self.utc, "FB", self.conf.header_file)
if not os.path.exists(self.header_file):
raise Exception("Header file({0}) does not exist".format(self.header_file))
with open(self.header_file) as h:
lines = h.readlines()
hdr_tmp = {}
for line in lines:
key = line.split()[0].strip()
val = line.split()[1].strip()
cval = self.check_type(val)
if key.startswith("FURBY"):
cval = str(val)
hdr_tmp[key] = cval
keys = hdr_tmp.keys()
values = hdr_tmp.values()
tmp = namedtuple("HEADER", keys)
self.header = tmp(*values)
self.tres = self.header.TSAMP * 1e-6
return self.header
def read_info(self):
self.obs_info_file = os.path.join(self.results_dir, self.utc, "obs.info")
if not os.path.exists(self.obs_info_file):
raise Exception("obs.info file({0}) does not exist".format(self.obs_info_file))
with open(self.obs_info_file) as h:
lines = h.readlines()
hdr_tmp = {}
for line in lines:
if line.startswith("#") or line == "" or line == "\n":
continue
key = line.split()[0].strip()
val = line.split()[1].strip()
val = self.check_type(val)
hdr_tmp[key] = val
if key=="INT" and self.is_failed:
val = 0
keys = hdr_tmp.keys()
values = hdr_tmp.values()
tmp = namedtuple("INFO", keys)
self.info = tmp(*values)
#Getting Tobs-----------------
filterbank_name = self.utc + ".fil"
filterbank_file = os.path.join(self.archives_dir, self.utc, "FB/BEAM_001/", filterbank_name)
if os.path.exists(filterbank_file):
filt_header = F(filterbank_file).header
self.tobs = filt_header.tobs
if self.info.INT > self.tobs:
self.tobs = self.info.INT
else:
self.tobs = self.info.INT
#-----------------------------
return self.info
def check_type(self, val):
try:
ans=int(val)
return ans
except ValueError:
try:
ans=float(val)
return ans
except ValueError:
if val.lower()=="false":
return False
elif val.lower()=="true":
return True
else:
return val
def if_processing(self):
processing_file = os.path.join(self.results_dir, self.utc, "obs.processing")
return os.path.exists(processing_file)
def if_failed(self):
obs_failed_file = os.path.join(self.results_dir, self.utc, "obs.failed")
return os.path.exists(obs_failed_file)
def read_furby_params(self):
if self.is_failed:
self.inj_furbys = -1
return
if (self.info.MB_ENABLED or self.info.CORR_ENABLED):
self.inj_furbys = -1
else:
self.read_header()
try:
self.inj_furbys = self.header.INJECTED_FURBYS
except AttributeError as e:
#log.warn("Could not find INJECTED_FURBYS in the header file for UTC: {0}".format(self.utc))
#log.warn("Assuming no furby injection happened in this observation ({0})".format(self.utc))
self.inj_furbys = 0
else:
if self.inj_furbys > 0:
self.furby_beams = self.header.FURBY_BEAMS.strip(",")
self.furby_ids = self.header.FURBY_IDS.strip(",")
self.furby_tstamps = self.header.FURBY_TSTAMPS.strip(",")
#log.debug("Found: injected_furbys: {0}, furby_ids: {1}, furby_beams: {2}, furby_tstamps: {3}".format(self.inj_furbys, self.furby_ids, self.furby_beams, self.furby_tstamps))
def split_and_filter_furby_params(self):
if self.inj_furbys < 1:
raise ValueError("No furbies to split")
f_ids = N.array(self.furby_ids.split(","))
f_beams = N.array(self.furby_beams.split(","))
f_tstamps = N.array(self.furby_tstamps.split(","))
f_ids = f_ids[N.where(f_ids!='')]
f_beams = f_beams[N.where(f_beams!='')]
f_tstamps = f_tstamps[N.where(f_tstamps!='')]
test = N.array([len(f_ids), len(f_beams), len(f_tstamps)])
if N.any(test-self.inj_furbys):
raise ValueError("Incorrect number of furby params, observation should have failed")
self.furbies = []
self.dropped_furbies = []
for i in range(self.inj_furbys):
furby = Furby(f_ids[i], db = os.path.join(self.archives_dir, self.utc, "Furbys"))
furby.i_beam = int(f_beams[i])
furby.i_tstamp = float(f_tstamps[i])
furby.calc_times()
if (self.check_if_dropped(furby)):
self.dropped_furbies.append(furby)
else:
self.furbies.append(furby)
def check_if_dropped(self, furby):
if not hasattr(furby, 'header'):
furby.read_fheader()
if not hasattr(furby, 'length'):
furby.calc_times()
if furby.i_tstamp < furby.length/2:
return True
if (furby.i_tstamp - furby.length/2) > self.tobs:
return True
all_furby_tstamps = N.array([float(i.i_tstamp) for i in self.furbies])
diff = furby.i_tstamp - all_furby_tstamps
if N.any((diff < (furby.length + 512*self.tres)) & (diff > 0)):
return True
return False
#----------------------------------------------------------------------------------------#
class Furby(Furby_reader):
def __init__(self, ID, db = "/home/dada/furby_database"):
self.ID = ID
self.name = "furby_"+ID
self.DB = db
self.file = os.path.join(self.DB, self.name)
self.i_beam = None
self.i_tstamp = None
self.i_snr = None
def __repr__(self):
return str(self.ID)
def read_fheader(self):
#self.header = self.read_header(self.file)
self.read_header(self.file)
def calc_times(self):
log = L.getLogger("furby_manager")
if not hasattr(self, 'header'):
self.read_fheader()
chw = (self.header.FTOP - self.header.FBOTTOM) / self.header.NCHAN
f_chtop = self.header.FTOP - chw/2
f_chmid = f_chtop - (self.header.NCHAN/2 * chw)
f_chbottom = self.header.FBOTTOM + chw/2
delay_to_top = 4.14881 * 1e6 * self.header.DM * ( f_chtop**(-2) - f_chmid**(-2) ) *1e-3 #in s
delay_to_bottom = 4.14881 * 1e6 * self.header.DM * ( f_chbottom**(-2) - f_chmid**(-2) ) *1e-3 #in s
self.s_time = self.i_tstamp + delay_to_top
self.e_time = self.i_tstamp + delay_to_bottom
self.c_time = self.i_tstamp
self.length = self.header.NSAMPS * self.header.TSAMP * 1e-6
#---------------------------------------------------------------------------------------#
def list_UTCs_from(start_utc):
#Note to someone editing this in future: Keep in mind that other scripts depend upon that fact that this function returns the list of UTCs in correctly sorted order. Do not change that, even if that costs speed. Or make sure that the scripts using this can be edited accordingly.
start = Observation(start_utc)
cmd = "ls -1d "+start.results_dir+"/202* | grep -A 999999 "+start_utc+" | awk -F/ '{print $5}'"
utcs = S.Popen(cmd, shell=True, stdout=S.PIPE).communicate()[0].strip().split("\n")
#VG: 02/05/2020 -- disabling the section below -- It doesn't work, and I don't have a quick fix either.
'''
if start.results_dir == start.conf.old_results_dir:
#Also append utcs from the new results directory
cmd = "ls -1d "+conf.results_dir+"/20* | grep -A 999999 "+start_utc+" | awk -F/ '{print $5}'"
utcs.extend(S.Popen(cmd, shell=True, stdout=S.PIPE).communicate()[0].strip().split("\n"))
'''
if len(utcs) == 0:
raise Exception("Given start UTC ({}) not found in {}".format(start_utc, start.results_dir))
return utcs
def list_UTCs_until(utc):
check = Observation(utc)
start_utc = get_first_UTC()
UTCs_from_start = list_UTCs_from(start_utc)
#Assume that list_UTCs_from() returns UTCs sorted in correct order, which it should.
end_utc = utc
index = N.where(UTCs_from_start == end_utc)[0]
UTCs_until = UTCs_from_start[:index+1]
return UTCs_until
def list_UTCs_after(utc):
inclusive_utcs = list_UTCS_from(utc)
return inclusive_utcs[1:]
def get_latest_UTC():
cmd = "ls -1d -rt "+conf.results_dir+"/20* | tail -1 | awk -F/ '{print $5}'"
utc = S.Popen(cmd, shell=True, stdout=S.PIPE).communcate()[0].strip()
return utc
def get_first_UTC():
'''
Returns the first UTC recorded by Molonglo after the disk crash in October 2017
'''
return "2017-10-31-08:49:32"
| 34.079268 | 281 | 0.643049 | [
"MIT"
] | vg2691994/mock_frb_injection_results | helpers.py | 11,178 | Python |
# -*- coding: utf-8 -*-
import scrapy
class scrapyshkmbab39Spider(scrapy.Spider):
name = "scrapyshkmbab39"
allowed_domains = ["ganjoor.net"]
if 1 == 1:
start_urls = ["https://ganjoor.net/hojviri/kashfol-mahjoob/kmbab39/sh"]
else:
start_urls = ["https://ganjoor.net/hojviri/kashfol-mahjoob/kmbab39/sh" + "1"]
order = 1
def parse(self, response):
index = 0
sh = dict()
sh["type"] = "fasl"
sh["text"] = dict()
for i, poem in enumerate(response.css("div.poem>article>*")):
if index == 0:
if 0 == 1:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(poem.css("div.m1>p::text").extract()).strip()
elif 0 == 2:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(poem.css("div.m2>p::text").extract()).strip()
elif 0 == 3:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(response.css("div.poem>article>h2>a::text").extract()).strip() + ': ' + ''.join(poem.css("div.m1>p::text").extract()).strip()
elif 0 == 4:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(response.css("div.poem>article>h2>a::text").extract()).strip() + ': ' + ''.join(poem.css("div.m2>p::text").extract()).strip()
else:
sh["title"] = ''.join(response.css("div.poem>article>h2>a::text").extract_first()).strip()
if poem.css("p::text").extract_first() is None or 'rel="bookmark"' in poem.css('*').extract_first() or 'class="spacer"' in poem.css('*').extract_first() or '<div style=' in poem.css('*').extract_first():
continue
if len(poem.css("div.m1>p")) == 1:
if poem.css("div.b"):
if '٭٭٭' not in poem.css("div.m1>p::text").extract_first() and ''.join(poem.css("div.m1>p::text").extract()).strip() != '':
sh["text"][index] = dict([
("m1", ''.join(poem.css("div.m1>p::text").extract()).strip()),
("m2", ''.join(poem.css("div.m2>p::text").extract()).strip()),
])
else:
if '٭٭٭' not in poem.css("p:first-child::text").extract_first() and ''.join(poem.css("p:first-child::text").extract()).strip() != '':
sh["text"][index] = dict([
("t1", ''.join(poem.css("p:first-child::text").extract()).strip()),
("t2", ''.join(poem.css("p:last-child::text").extract()).strip()),
])
else:
if poem.css("div.b2"):
if '٭٭٭' not in poem.css("p:first-child::text").extract_first() and ''.join(poem.css("p:first-child::text").extract()).strip() != '':
sh["text"][index] = dict([
("t1", ''.join(poem.css("p:first-child::text").extract()).strip()),
("t2", ''.join(poem.css("p:last-child::text").extract()).strip()),
])
else:
if '٭٭٭' not in poem.css('p::text').extract_first() and ''.join(poem.css('p::text').extract()).strip() != '':
sh['text'][index] = dict([
('p', ''.join(poem.css('p::text').extract()).strip())
])
index = index + 1
sh["order"] = self.order
self.order = self.order + 1
yield sh
# next_page = response.css("div.navigation>div.navleft>a::attr(href)").extract_first()
if self.order < (1 + 1):
next_page = response.urljoin("https://ganjoor.net/hojviri/kashfol-mahjoob/kmbab39/sh" + str(self.order))
yield scrapy.Request(next_page, callback=self.parse)
| 59.5 | 215 | 0.466514 | [
"MIT"
] | amirmasoud/ganjoor-crawler | ganjoor/spiders/hojviri/kashfol-mahjoob/scrapyshkmbab39.py | 3,971 | Python |
import psycopg2
import psycopg2.extras
class DBHandler:
"""
Handles I/O concerning the database to hide its implementation from client services.
"""
def __init__(self,
postgres_username=None,
postgres_password=None,
db_username='dbpedia_app',
db_password='dummy_password'):
# ordinarily you would get these from some secret store
# e.g. heroku has a specific url that you parse to get both
# or os.environ storage (like those used for API keys and the like)
user_name = db_username
password = db_password
# check to see if the db exists locally, create it if necessary
if postgres_password is not None and postgres_username is not None:
try:
connection = psycopg2.connect("dbname='postgres' user='%s' "
"host='localhost' password='%s'"
% (postgres_username, postgres_password))
connection.autocommit = True
cursor = connection.cursor()
# queries the postgres catalog to see if 'dbpedia' exists
# if not, creates it
cursor.execute("SELECT COUNT(*) = 0 FROM pg_catalog.pg_database WHERE datname = 'dbpedia'")
not_exists_row = cursor.fetchone()
not_exists = not_exists_row[0]
if not_exists:
cursor.execute("CREATE USER %s PASSWORD '%s'" % (user_name, password))
cursor.execute('CREATE DATABASE dbpedia OWNER %s' % (user_name,))
connection.close()
except:
# Presume if credentials are passed the user wants to perform this check/DB construction
# fail via error propagation
raise
try:
self.connection = psycopg2.connect("dbname='dbpedia' user='%s' host='localhost' password='%s'"
% (user_name, password))
except:
raise AssertionError('Failed to connect to dbpedia database. Has the local dbpedia been created?')
def __del__(self):
self.connection.close()
def commit(self):
self.connection.commit()
def schema_exists(self):
"""
Checks the estimated number of tuples in the subjects table to determine if data exists
:return:
"""
with self.connection.cursor() as cursor:
cursor.execute('select reltuples FROM pg_class where relname = %s', ('subjects',))
result = cursor.fetchone()[0]
return result > 0
def build_table_schema(self, schema_name, schema_file_path):
"""
Loads the dbpedia schema used for supporting downstream analysis. If the schema already exists, it is
dropped (deleted) and recreated.
:param schema_name:
:param schema_file_path:
:return:
"""
# do not call with user input given the manual query construction here
with self.connection.cursor() as cursor:
cursor.execute('DROP SCHEMA IF EXISTS %s CASCADE' % schema_name)
schema_file = open(schema_file_path, 'rU').read()
cursor.execute(schema_file)
def build_indices(self):
"""
Builds the following indices:
Index on name for subjects
Index on predicate for predicate_object
Index on subject_id for predicate object
:return:
"""
with self.connection.cursor() as cursor:
cursor.execute('DROP INDEX IF EXISTS dbpedia.pv_subject_id_idx')
cursor.execute('DROP INDEX IF EXISTS dbpedia.subject_idx')
cursor.execute('DROP INDEX IF EXISTS dbpedia.pv_predicate_idx')
cursor.execute('create index subject_idx on dbpedia.subjects (name)')
cursor.execute('create index pv_subject_id_idx on dbpedia.predicate_object (subject_id)')
cursor.execute('create index pv_predicate_idx on dbpedia.predicate_object (predicate);')
def insert_spo_tuple(self, spo_tuple):
"""
Handles the insertion of spo tuples into the db. Workflow:
Attempt to find the subject table entry corresponding to your subject. If found, use that ID for
inserting your po values. Otherwise, insert your subject into the subject table and use that ID
instead. The resulting id, predicate, object tuple is then inserted into the predicate_object table.
:param spo_tuple:
:return:
"""
(subject, predicate, db_object) = spo_tuple
with self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
cursor.execute('select subject_id from dbpedia.subjects '
'where name = %s', (subject,))
results = cursor.fetchone()
if results is None or len(results) == 0:
cursor.execute('INSERT INTO dbpedia.subjects (name) VALUES (%s) '
'returning subject_id', (subject,))
results = cursor.fetchone()
id = results['subject_id']
# now we have the correct id in either case, insert the values into the db
cursor.execute('INSERT INTO dbpedia.predicate_object (subject_id, predicate, object) '
'VALUES (%s, %s, %s)', (id, predicate, db_object))
def get_person_metadata(self, person_name, use_exact_match=False):
"""
Returns all metadata associated with the provided person_name. However, does not actually check
to see if the identifier corresponds to a person or not; the class of the identifier will
be included in the returned metadata though. DBPedia People only contains people predicate
types as well.
Use_exact_match toggles between two behaviors: if True, then uses the exact identifier provided
to query against the subject table (WHERE = identifier). If False, uses the LIKE operator
to attempt to find similar IDs that are not exactly the same. Results will still be a superset
of the use_exact_match = True case.
:param person_name:
:param use_exact_match:
:return:
"""
# wikipedia replaces all spaces with under scores
# upper case to make case sensitive
person_name = person_name.replace(' ', '_').upper()
with self.connection.cursor() as cursor:
# get id associated with this person
# get all similar IDs
if not use_exact_match:
cursor.execute('SELECT subject_id, name FROM dbpedia.subjects WHERE upper(name) '
'LIKE %s',
('%%' + person_name + '%%',))
else:
cursor.execute('SELECT subject_id, name FROM dbpedia.subjects WHERE upper(name) = %s',
(person_name,))
results = cursor.fetchall()
# no person matches the input name
# return empty list
if results is None:
return []
subject_id_list = [x[0] for x in results]
# get all metadata associated with the subject_ids
cursor.execute('select dbpedia.subjects.name, predicate, object '
'FROM dbpedia.predicate_object '
'INNER JOIN dbpedia.subjects on (dbpedia.subjects.subject_id = dbpedia.predicate_object.subject_id) '
'WHERE dbpedia.predicate_object.subject_id = ANY(%s)', (subject_id_list,))
# this should never be none
# Sort results by name and return
return sorted(cursor.fetchall(), key=lambda x: x[0])
def get_tuples_by_predicate(self, predicate_of_interest):
"""
Extracts SPO tuples based on the predicate value passed to the function. This query will be slow since
you are querying such a large fraction of the po table at once (unless your predicate does not exist).
Predicates:
Name
Type
Gender
Description
Birthdate
GivenName
Surname
BirthPlace
DeathDate
DeathPlace
:param predicate_of_interest:
:return:
"""
with self.connection.cursor() as cursor:
cursor.execute('select dbpedia.subjects.name, '
'predicate, '
'object '
'FROM dbpedia.predicate_object '
'INNER JOIN dbpedia.subjects on (dbpedia.subjects.subject_id = dbpedia.predicate_object.subject_id) '
'WHERE upper(dbpedia.predicate_object.predicate) = upper(%s)', (predicate_of_interest,))
results = cursor.fetchall()
if results is None:
return []
else:
return results
| 36.549407 | 128 | 0.583324 | [
"MIT"
] | jdwinkler/dbpedia_service | database_query_handler.py | 9,247 | Python |
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import unittest
from unittest.mock import ANY
from databuilder.models.graph_serializable import (
RELATION_END_KEY, RELATION_END_LABEL, RELATION_REVERSE_TYPE, RELATION_START_KEY, RELATION_START_LABEL,
RELATION_TYPE,
)
from databuilder.models.user import User
from databuilder.serializers import neo4_serializer, neptune_serializer
from databuilder.serializers.neptune_serializer import (
NEPTUNE_CREATION_TYPE_JOB, NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT, NEPTUNE_HEADER_ID,
NEPTUNE_HEADER_LABEL, NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT,
NEPTUNE_RELATIONSHIP_HEADER_FROM, NEPTUNE_RELATIONSHIP_HEADER_TO,
)
class TestUser(unittest.TestCase):
def setUp(self) -> None:
super(TestUser, self).setUp()
self.user = User(first_name='test_first',
last_name='test_last',
name='test_first test_last',
email='[email protected]',
github_username='github_test',
team_name='test_team',
employee_type='FTE',
manager_email='[email protected]',
slack_id='slack',
is_active=True,
updated_at=1,
role_name='swe')
def test_get_user_model_key(self) -> None:
user_email = User.get_user_model_key(email=self.user.email)
self.assertEqual(user_email, '[email protected]')
def test_create_nodes(self) -> None:
nodes = self.user.create_nodes()
self.assertEqual(len(nodes), 1)
def test_create_node_additional_attr(self) -> None:
test_user = User(first_name='test_first',
last_name='test_last',
name='test_first test_last',
email='[email protected]',
github_username='github_test',
team_name='test_team',
employee_type='FTE',
manager_email='[email protected]',
slack_id='slack',
is_active=True,
updated_at=1,
role_name='swe',
enable_notify=True)
nodes = test_user.create_nodes()
serialized_node = neo4_serializer.serialize_node(nodes[0])
self.assertEqual(serialized_node['email'], '[email protected]')
self.assertEqual(serialized_node['role_name'], 'swe')
self.assertTrue(serialized_node['enable_notify:UNQUOTED'])
def test_create_node_additional_attr_neptune(self) -> None:
test_user = User(first_name='test_first',
last_name='test_last',
name='test_first test_last',
email='[email protected]',
github_username='github_test',
team_name='test_team',
employee_type='FTE',
manager_email='[email protected]',
slack_id='slack',
is_active=True,
updated_at=1,
role_name='swe',
enable_notify=True)
nodes = test_user.create_nodes()
serialized_node = neptune_serializer.convert_node(nodes[0])
self.assertEqual(serialized_node['email:String(single)'], '[email protected]')
self.assertEqual(serialized_node['role_name:String(single)'], 'swe')
self.assertTrue(serialized_node['enable_notify:Bool(single)'])
def test_create_relation(self) -> None:
relations = self.user.create_relation()
self.assertEqual(len(relations), 1)
start_key = '[email protected]'
end_key = '[email protected]'
expected_relation = {
RELATION_START_KEY: start_key,
RELATION_START_LABEL: User.USER_NODE_LABEL,
RELATION_END_KEY: end_key,
RELATION_END_LABEL: User.USER_NODE_LABEL,
RELATION_TYPE: User.USER_MANAGER_RELATION_TYPE,
RELATION_REVERSE_TYPE: User.MANAGER_USER_RELATION_TYPE
}
self.assertTrue(expected_relation, neo4_serializer.serialize_relationship(relations[0]))
def test_create_relation_neptune(self) -> None:
relations = self.user.create_relation()
serialized = neptune_serializer.convert_relationship(relations[0])
start_key = '{email}'.format(email='[email protected]')
end_key = '{email}'.format(email='[email protected]')
expected = [
{
NEPTUNE_HEADER_ID: "{from_vertex_id}_{to_vertex_id}_{label}".format(
from_vertex_id=start_key,
to_vertex_id=end_key,
label=User.USER_MANAGER_RELATION_TYPE
),
NEPTUNE_RELATIONSHIP_HEADER_FROM: start_key,
NEPTUNE_RELATIONSHIP_HEADER_TO: end_key,
NEPTUNE_HEADER_LABEL: User.USER_MANAGER_RELATION_TYPE,
NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: ANY,
NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: NEPTUNE_CREATION_TYPE_JOB
},
{
NEPTUNE_HEADER_ID: "{from_vertex_id}_{to_vertex_id}_{label}".format(
from_vertex_id=end_key,
to_vertex_id=start_key,
label=User.MANAGER_USER_RELATION_TYPE
),
NEPTUNE_RELATIONSHIP_HEADER_FROM: end_key,
NEPTUNE_RELATIONSHIP_HEADER_TO: start_key,
NEPTUNE_HEADER_LABEL: User.MANAGER_USER_RELATION_TYPE,
NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: ANY,
NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: NEPTUNE_CREATION_TYPE_JOB
}
]
self.assertListEqual(serialized, expected)
def test_not_including_empty_attribute(self) -> None:
test_user = User(email='[email protected]',
foo='bar')
self.assertDictEqual(neo4_serializer.serialize_node(test_user.create_next_node()),
{'KEY': '[email protected]', 'LABEL': 'User', 'email': '[email protected]',
'is_active:UNQUOTED': True, 'first_name': '', 'last_name': '', 'full_name': '',
'github_username': '', 'team_name': '', 'employee_type': '', 'slack_id': '',
'role_name': '', 'updated_at:UNQUOTED': 0, 'foo': 'bar'})
test_user2 = User(email='[email protected]',
foo='bar',
is_active=False,
do_not_update_empty_attribute=True)
self.assertDictEqual(neo4_serializer.serialize_node(test_user2.create_next_node()),
{'KEY': '[email protected]', 'LABEL': 'User', 'email': '[email protected]', 'foo': 'bar'})
| 46.038217 | 118 | 0.596292 | [
"Apache-2.0"
] | JacobSMoller/amundsendatabuilder | tests/unit/models/test_user.py | 7,228 | Python |
from django.contrib import admin
from .models import *
from django.contrib.auth.models import User
class ImageAdmin(admin.ModelAdmin):
fields = ( 'image','name','caption','profile','post_date', 'user', )
readonly_fields = ('profile', 'post_date', 'user',)
#registering the models
# admin.site.register(Image, ImageAdmin)
admin.site.register(Profile)
admin.site.register(Image)
admin.site.register(Like)
admin.site.register(Comment)
| 29.2 | 70 | 0.755708 | [
"MIT"
] | MachariaMark/fakeinsta | insta/admin.py | 438 | Python |
"""Registry for the TF Encrypted Converter."""
import array
import logging
import os
from typing import Any, List
from collections import OrderedDict
import yaml
import numpy as np
import tensorflow as tf
from ..layers import Conv2D, Relu, Sigmoid, Dense, AveragePooling2D, MaxPooling2D
from ..protocol.pond import PondPrivateTensor, PondMaskedTensor
def registry():
"""Map reserved names and scopes to their conversion functions."""
reg = {
'Placeholder': _placeholder,
'Const': _constant,
'Conv2D': _conv2d,
'Relu': _relu,
'Sigmoid': _sigmoid,
'MatMul': _matmul,
'Shape': _shape,
'StridedSlice': _strided_slice,
'Add': _add,
'Sub': _sub,
'Transpose': _transpose,
'Reshape': _reshape,
'Pack': _pack,
'Rsqrt': _rsqrt,
'Mul': _mul,
'ExpandDims': _expand_dims,
'AvgPool': _avgpool,
'Squeeze': _squeeze,
'ConcatV2': _concat,
'BiasAdd': _bias_add,
'MaxPool': _maxpool,
'Pad': _pad,
'BatchToSpaceND': _batch_to_space_nd,
'SpaceToBatchND': _space_to_batch_nd,
'ArgMax': _argmax,
'required_space_to_batch_paddings': _required_space_to_batch_paddings,
'flatten': _flatten,
'conv2d': _keras_conv2d,
'Slice': _slice,
'Neg': _negative,
'Split': _split,
'Identity': _identity,
"GatherV2": _gather,
"dense": _keras_dense,
}
return reg
convert_dir = os.path.dirname(os.path.abspath(__file__))
specops_path = os.path.join(convert_dir, "specops.yaml")
with open(specops_path, "r") as stream:
loaded_yaml = yaml.load(stream, Loader=yaml.SafeLoader)
sorted_yaml = sorted(loaded_yaml.items(), key=lambda kv: kv[0])
REGISTERED_SPECOPS = OrderedDict(sorted_yaml)
# pylint: disable=unused-argument
# pylint: disable=missing-docstring
def _placeholder(converter, node: Any, inputs: List[str]) -> Any:
return tf.placeholder(node.attr["dtype"].type,
shape=node.attr["shape"].shape)
def _constant(converter, node: Any, inputs: List[str]) -> Any:
# need to able to access the underlying weights return the node
return node
def _identity(converter, node: Any, inputs: List[str]) -> Any:
# need to able to access the underlying weights return the node
return converter.outputs[inputs[0]]
def _matmul(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
tensor = b.attr["value"].tensor
b_shape = [i.size for i in tensor.tensor_shape.dim]
transpose_a = node.attr["transpose_a"].b
transpose_b = node.attr["transpose_b"].b
layer = Dense(a.shape.as_list(),
b_shape[1],
transpose_input=transpose_a,
transpose_weight=transpose_b)
dtype = tensor.dtype
if dtype == tf.float32:
nums = array.array('f', tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', tensor.tensor_content)
else:
raise TypeError("Unsupported dtype for weights")
def inputter_fn():
return tf.constant(np.array(nums).reshape(b_shape))
w = converter.protocol.define_private_input(converter.model_provider,
inputter_fn)
layer.initialize(initial_weights=w)
return layer.forward(a)
def _conv2d(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
kernel = converter.outputs[inputs[1]]
if isinstance(kernel, tf.NodeDef):
shape = [i.size for i in kernel.attr["value"].tensor.tensor_shape.dim]
w = _nodef_to_private_pond(converter, kernel)
else:
shape = kernel.shape.as_list()
w = kernel
fmt = node.attr["data_format"].s.decode('ascii')
layer = Conv2D(x_in.shape.as_list(),
shape,
strides=int(max(node.attr["strides"].list.i)),
padding=node.attr["padding"].s.decode('ascii'),
channels_first=fmt == "NCHW")
layer.initialize(initial_weights=w)
out = layer.forward(x_in)
return out
def _keras_conv2d(converter, interiors, inputs):
x_in = converter.outputs[inputs[0]]
conv_op = interiors["Conv2D"]
kernel = interiors["kernel"]
k = _nodef_to_private_pond(converter, kernel)
try:
bias = interiors["bias"]
b = _nodef_to_private_pond(converter, bias)
for ax in [0, -1, -1]:
b = b.expand_dims(axis=ax)
except KeyError:
b = None
input_shape = x_in.shape.as_list()
shape = [i.size for i in kernel.attr["value"].tensor.tensor_shape.dim]
fmt = conv_op.attr["data_format"].s.decode('ascii')
strides = int(max(conv_op.attr["strides"].list.i))
padding = conv_op.attr["padding"].s.decode('ascii')
layer = Conv2D(
input_shape, shape,
strides=strides,
padding=padding,
channels_first=fmt == "NCHW"
)
layer.initialize(initial_weights=k, initial_bias=b)
out = layer.forward(x_in)
return out
def _keras_dense(converter, interiors, inputs):
x_in = converter.outputs[inputs[0]]
kernel = interiors["kernel"]
k = _nodef_to_private_pond(converter, kernel)
try:
bias = interiors["bias"]
b = _nodef_to_private_pond(converter, bias)
except KeyError:
b = None
input_shape = x_in.shape.as_list()
shape = [i.size for i in kernel.attr["value"].tensor.tensor_shape.dim]
layer = Dense(input_shape,
out_features=shape[1])
layer.initialize(initial_weights=k, initial_bias=b)
out = layer.forward(x_in)
return out
def _relu(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
return Relu(x_in.shape.as_list()).forward(x_in)
def _sigmoid(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
return Sigmoid(x_in.shape.as_list()).forward(x_in)
def _strided_slice(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
begin = converter.outputs[inputs[1]]
end = converter.outputs[inputs[2]]
strides = converter.outputs[inputs[3]]
begin_mask = node.attr["begin_mask"].i
end_mask = node.attr["end_mask"].i
ellipsis_mask = node.attr["ellipsis_mask"].i
new_axis_mask = node.attr["new_axis_mask"].i
shrink_axis_mask = node.attr["shrink_axis_mask"].i
begin = tf.constant(begin.attr["value"].tensor)
end = tf.constant(end.attr["value"].tensor)
strides = tf.constant(strides.attr["value"].tensor)
return converter.protocol.strided_slice(input_out, begin, end,
strides=strides,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
def _pack(converter, node: Any, inputs: List[str]) -> Any:
final_inputs = []
for x_in in inputs:
input_c = converter.outputs[x_in]
if isinstance(input_c, tf.NodeDef):
final_inputs.append(_nodef_to_private_pond(converter, input_c))
else:
final_inputs.append(input_c)
return converter.protocol.stack(final_inputs, axis=node.attr["axis"].i)
def _bias_add(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
if isinstance(a, tf.NodeDef):
a_out = _nodef_to_private_pond(converter, a)
else:
a_out = a
if isinstance(b, tf.NodeDef):
b_out = _nodef_to_private_pond(converter, b)
else:
b_out = b
return converter.protocol.add(a_out, b_out)
def _maxpool(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
ksize = node.attr["ksize"].list.i
s = node.attr["strides"].list.i
padding = node.attr["padding"].s.decode('ascii')
pool_size = [ksize[1], ksize[2]]
strides = [s[1], s[2]]
shape = [int(i) for i in x_in.shape]
channels_first = node.attr["data_format"].s.decode('ascii') == "NCHW"
pooler = MaxPooling2D(shape, pool_size, strides, padding, channels_first)
out = pooler.forward(x_in)
return out
def _shape(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
return x_in.shape
def _reshape(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
shape = converter.outputs[inputs[1]]
tensor = shape.attr["value"].tensor
dtype = shape.attr["dtype"].type
if dtype == tf.int32:
nums = array.array('i', tensor.tensor_content)
elif dtype == tf.int64:
nums = array.array('l', tensor.tensor_content)
else:
raise TypeError("Unsupported dtype for reshape shape")
return converter.protocol.reshape(x_in, list(nums))
def _transpose(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
perm = converter.outputs[inputs[1]]
tensor = perm.attr["value"].tensor
shape = [i.size for i in tensor.tensor_shape.dim]
dtype = perm.attr["dtype"].type
if dtype == tf.int32:
nums = array.array('i', tensor.tensor_content)
elif dtype == tf.int64:
nums = array.array('l', tensor.tensor_content)
else:
raise TypeError("Unsupported dtype for transpose perm")
return converter.protocol.transpose(x_in, np.array(nums).reshape(shape))
def _expand_dims(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
input_axis = converter.outputs[inputs[1]]
axis_attr = input_axis.attr["value"].tensor.int_val
axis_val = array.array('i', axis_attr)[0]
return converter.protocol.expand_dims(input_out, axis_val)
def _negative(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
return converter.protocol.negative(input_out)
def _gather(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
indices = converter.outputs[inputs[1]]
axis = converter.outputs[inputs[2]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
indices_out = list(_nodef_to_numpy_array(indices))
axis_val = axis.attr["value"].tensor.int_val[0]
return converter.protocol.gather(input_out, indices_out, axis_val)
def _squeeze(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
axis = node.attr["squeeze_dims"].list.i
return converter.protocol.squeeze(x_in, list(axis))
def _split(converter, node: Any, inputs: List[str]) -> Any:
axis = converter.outputs[inputs[0]]
x_in = converter.outputs[inputs[1]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
num_split = node.attr["num_split"].i
axis_val = axis.attr["value"].tensor.int_val[0]
return converter.protocol.split(input_out, num_split, axis_val)[0]
def _pad(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
p = (converter.outputs[inputs[1]])
paddings_t = p.attr["value"].tensor
paddings_arr = list(array.array('I', paddings_t.tensor_content))
paddings_lst = [paddings_arr[i:i + 2]
for i in range(0, len(paddings_arr), 2)]
return converter.protocol.pad(x_in, paddings_lst)
def _rsqrt(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
if isinstance(x_in, tf.NodeDef):
tensor = x_in.attr["value"].tensor
shape = [i.size for i in tensor.tensor_shape.dim]
dtype = x_in.attr["dtype"].type
if dtype == tf.float32:
nums = array.array('f', tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', tensor.tensor_content)
else:
raise TypeError("Unsupported dtype for rsqrt")
def inputter_fn():
return tf.constant(1 / np.sqrt(np.array(nums).reshape(shape)))
else:
# XXX this is a little weird but the input into rsqrt is public and
# being used only for batchnorm at the moment
decoded = converter.protocol._decode(x_in.value_on_0, True) # pylint: disable=protected-access
def inputter_fn():
return tf.rsqrt(decoded)
x = converter.protocol.define_public_input(
converter.model_provider, inputter_fn)
return x
def _add(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
if isinstance(a, tf.NodeDef):
a_out = _nodef_to_public_pond(converter, a)
else:
a_out = a
if isinstance(b, tf.NodeDef):
b_out = _nodef_to_public_pond(converter, b)
else:
b_out = b
return converter.protocol.add(a_out, b_out)
def _sub(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
if isinstance(a, tf.NodeDef):
a_out = _nodef_to_public_pond(converter, a)
else:
a_out = a
if isinstance(b, tf.NodeDef):
b_out = _nodef_to_public_pond(converter, b)
else:
b_out = b
return converter.protocol.sub(a_out, b_out)
def _mul(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
if isinstance(a, tf.NodeDef):
a_out = _nodef_to_public_pond(converter, a)
else:
a_out = a
if isinstance(b, tf.NodeDef):
b_out = _nodef_to_public_pond(converter, b)
else:
b_out = b
return converter.protocol.mul(a_out, b_out)
def _avgpool(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
ksize = node.attr["ksize"].list.i
s = node.attr["strides"].list.i
padding = node.attr["padding"].s.decode('ascii')
pool_size = [ksize[1], ksize[2]]
strides = [s[1], s[2]]
shape = [int(i) for i in x_in.shape]
channels_first = node.attr["data_format"].s.decode('ascii') == "NCHW"
avg = AveragePooling2D(shape, pool_size, strides, padding, channels_first)
out = avg.forward(x_in)
return out
def _concat(converter, node: Any, inputs: List[str]) -> Any:
input0 = converter.outputs[inputs[0]]
input1 = converter.outputs[inputs[1]]
axis = converter.outputs[inputs[2]]
axis_int = axis.attr["value"].tensor.int_val[0]
return converter.protocol.concat([input0, input1], axis_int)
def _batch_to_space_nd(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
block_shape = converter.outputs[inputs[1]].attr["value"].tensor
crops = converter.outputs[inputs[2]].attr["value"].tensor
return converter.protocol.batch_to_space_nd(x_in, block_shape, crops)
def _space_to_batch_nd(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
block_shape = converter.outputs[inputs[1]].attr["value"].tensor
paddings = converter.outputs[inputs[2]].attr["value"].tensor
return converter.protocol.space_to_batch_nd(x_in, block_shape, paddings)
def _flatten(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
shape = x_in.shape.as_list()
non_batch = 1
for dim in shape[1:]:
non_batch *= dim
return converter.protocol.reshape(x_in, [-1, non_batch])
def _required_space_to_batch_paddings(converter, node, inputs: List[str]):
inputs_node = [converter.outputs[inputs[i]] for i in range(len(inputs))]
inputs_int32 = []
for x_in in inputs_node:
pvt_check = isinstance(x_in, PondPrivateTensor)
msk_check = isinstance(x_in, PondMaskedTensor)
if pvt_check or msk_check:
logging.warning(("Revealing private input: "
"required_space_to_batch_paddings assumes public "
"input."))
inputs_int32.append(tf.cast(x_in.reveal().decode(), tf.int32))
elif isinstance(x_in, tf.NodeDef):
inputs_int32.append(_nodef_to_numpy_array(x_in))
else:
raise TypeError("Unexpected input of type {}.".format(type(x_in)))
if len(inputs_int32) == 2:
input_shape, block_shape = inputs_int32
def inputter_pad():
pads, _ = tf.required_space_to_batch_paddings(input_shape, block_shape)
return tf.cast(pads, tf.float64)
def inputter_crop():
_, crops = tf.required_space_to_batch_paddings(input_shape, block_shape)
return tf.cast(crops, tf.float64)
else:
base_paddings, input_shape, block_shape = inputs_int32
def inputter_pad():
pads, _ = tf.required_space_to_batch_paddings(
input_shape,
block_shape,
base_paddings=base_paddings,
)
return tf.cast(pads, tf.float64)
def inputter_crop():
_, crops = tf.required_space_to_batch_paddings(
input_shape,
block_shape,
base_paddings=base_paddings,
)
return tf.cast(crops, tf.float64)
pad_private = converter.protocol.define_public_input(
converter.model_provider, inputter_pad)
crop_private = converter.protocol.define_public_input(
converter.model_provider, inputter_crop)
return (pad_private, crop_private)
def _argmax(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
axis = converter.outputs[inputs[1]].attr["value"].tensor.int_val[0]
return converter.protocol.argmax(x_in, axis=axis)
def _slice(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
begin = _nodef_to_numpy_array(converter.outputs[inputs[1]])
size = _nodef_to_numpy_array(converter.outputs[inputs[2]])
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
# Slice is a special case of strided_slice. Slice takes size (the number of
# elements we want to slice) as an input. However strided_slice takes end
# (integer until which the slicing takes place) as input.
# We can infere the end parameter with : end[i] = begin[i] + size[i].
# If size is negative, the stepping go towards smaller indices.
# In this case we can infer the end parameter with: end[i] = input_shape[i] - size[i] + 1
end = np.zeros(len(begin))
input_shape = x_in.shape.as_list()
# if size is negative take the input dimension
for i in range(len(end)): # pylint: disable=consider-using-enumerate
if size[i] < 0:
end[i] = input_shape[i] - size[i] + 1
else:
end[i] = begin[i] + size[i]
return converter.protocol.strided_slice(input_out, begin, end)
# pylint: enable=unused-argument
# pylint: enable=missing-docstring
def _nodef_to_public_pond(converter, x):
"""Map a NodeDef x to a PublicPondTensor."""
dtype = x.attr["dtype"].type
x_shape = [i.size for i in x.attr["value"].tensor.tensor_shape.dim]
if not x_shape:
if dtype == tf.float32:
nums = x.attr["value"].tensor.float_val
elif dtype == tf.float64:
nums = x.attr["value"].tensor.float_val
elif dtype == tf.int32:
nums = x.attr["value"].tensor.int_val
else:
raise TypeError("Unsupported dtype")
def inputter_fn():
return tf.constant(np.array(nums).reshape(1, 1))
else:
if dtype == tf.float32:
nums = array.array('f', x.attr["value"].tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', x.attr["value"].tensor.tensor_content)
elif dtype == tf.int32:
nums = array.array('i', x.attr["value"].tensor.tensor_content)
else:
raise TypeError("Unsupported dtype")
def inputter_fn():
return tf.constant(np.array(nums).reshape(x_shape))
x_public = converter.protocol.define_public_input(
converter.model_provider, inputter_fn)
return x_public
def _nodef_to_private_pond(converter, x):
"""Map a NodeDef x to a PrivatePondTensor."""
dtype = x.attr["dtype"].type
warn_msg = "Unexpected dtype {} found at node {}"
err_msg = "Unsupported dtype {} found at node {}"
x_shape = [i.size for i in x.attr["value"].tensor.tensor_shape.dim]
if not x_shape:
if dtype == tf.float32:
nums = x.attr["value"].tensor.float_val
elif dtype == tf.float64:
nums = x.attr["value"].tensor.float_val
elif dtype == tf.int32:
logging.warning(warn_msg, dtype, x.name)
nums = x.attr["value"].tensor.int_val
else:
raise TypeError(err_msg.format(dtype, x.name))
def inputter_fn():
return tf.constant(np.array(nums).reshape(1, 1))
else:
if dtype == tf.float32:
nums = array.array('f', x.attr["value"].tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', x.attr["value"].tensor.tensor_content)
elif dtype == tf.int32:
logging.warning(warn_msg, dtype, x.name)
nums = array.array('i', x.attr["value"].tensor.tensor_content)
else:
raise TypeError(err_msg.format(dtype, x.name))
def inputter_fn():
return tf.constant(np.array(nums).reshape(x_shape))
x_private = converter.protocol.define_private_input(
converter.model_provider, inputter_fn)
return x_private
def _nodef_to_numpy_array(x):
"""Map a NodeDef x to a np.array."""
dtype = x.attr["dtype"].type
x_shape = [i.size for i in x.attr["value"].tensor.tensor_shape.dim]
if dtype == tf.float32:
nums = array.array('f', x.attr["value"].tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', x.attr["value"].tensor.tensor_content)
elif dtype == tf.int32:
nums = array.array('i', x.attr["value"].tensor.tensor_content)
else:
raise TypeError("Unsupported dtype")
return np.array(nums).reshape(x_shape)
| 29.144399 | 99 | 0.677348 | [
"Apache-2.0"
] | capeprivacy/tf-encrypted | tf_encrypted/convert/register.py | 21,596 | Python |
from lldbsuite.test.lldbtest import *
import os
import vscode
class VSCodeTestCaseBase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
def create_debug_adaptor(self):
'''Create the Visual Studio Code debug adaptor'''
self.assertTrue(os.path.exists(self.lldbVSCodeExec),
'lldb-vscode must exist')
log_file_path = self.getBuildArtifact('vscode.txt')
self.vscode = vscode.DebugAdaptor(
executable=self.lldbVSCodeExec, init_commands=self.setUpCommands(),
log_file=log_file_path)
def build_and_create_debug_adaptor(self):
self.build()
self.create_debug_adaptor()
def set_source_breakpoints(self, source_path, lines, condition=None,
hitCondition=None):
'''Sets source breakpoints and returns an array of strings containing
the breakpoint IDs ("1", "2") for each breakpoint that was set.
'''
response = self.vscode.request_setBreakpoints(
source_path, lines, condition=condition, hitCondition=hitCondition)
if response is None:
return []
breakpoints = response['body']['breakpoints']
breakpoint_ids = []
for breakpoint in breakpoints:
breakpoint_ids.append('%i' % (breakpoint['id']))
return breakpoint_ids
def set_function_breakpoints(self, functions, condition=None,
hitCondition=None):
'''Sets breakpoints by function name given an array of function names
and returns an array of strings containing the breakpoint IDs
("1", "2") for each breakpoint that was set.
'''
response = self.vscode.request_setFunctionBreakpoints(
functions, condition=condition, hitCondition=hitCondition)
if response is None:
return []
breakpoints = response['body']['breakpoints']
breakpoint_ids = []
for breakpoint in breakpoints:
breakpoint_ids.append('%i' % (breakpoint['id']))
return breakpoint_ids
def verify_breakpoint_hit(self, breakpoint_ids):
'''Wait for the process we are debugging to stop, and verify we hit
any breakpoint location in the "breakpoint_ids" array.
"breakpoint_ids" should be a list of breakpoint ID strings
(["1", "2"]). The return value from self.set_source_breakpoints()
or self.set_function_breakpoints() can be passed to this function'''
stopped_events = self.vscode.wait_for_stopped()
for stopped_event in stopped_events:
if 'body' in stopped_event:
body = stopped_event['body']
if 'reason' not in body:
continue
if body['reason'] != 'breakpoint':
continue
if 'description' not in body:
continue
# Descriptions for breakpoints will be in the form
# "breakpoint 1.1", so look for any description that matches
# ("breakpoint 1.") in the description field as verification
# that one of the breakpoint locations was hit. VSCode doesn't
# allow breakpoints to have multiple locations, but LLDB does.
# So when looking at the description we just want to make sure
# the right breakpoint matches and not worry about the actual
# location.
description = body['description']
print("description: %s" % (description))
for breakpoint_id in breakpoint_ids:
match_desc = 'breakpoint %s.' % (breakpoint_id)
if match_desc in description:
return
self.assertTrue(False, "breakpoint not hit")
def verify_exception_breakpoint_hit(self, filter_label):
'''Wait for the process we are debugging to stop, and verify the stop
reason is 'exception' and that the description matches
'filter_label'
'''
stopped_events = self.vscode.wait_for_stopped()
for stopped_event in stopped_events:
if 'body' in stopped_event:
body = stopped_event['body']
if 'reason' not in body:
continue
if body['reason'] != 'exception':
continue
if 'description' not in body:
continue
description = body['description']
if filter_label == description:
return True
return False
def verify_commands(self, flavor, output, commands):
self.assertTrue(output and len(output) > 0, "expect console output")
lines = output.splitlines()
prefix = '(lldb) '
for cmd in commands:
found = False
for line in lines:
if line.startswith(prefix) and cmd in line:
found = True
break
self.assertTrue(found,
"verify '%s' found in console output for '%s'" % (
cmd, flavor))
def get_dict_value(self, d, key_path):
'''Verify each key in the key_path array is in contained in each
dictionary within "d". Assert if any key isn't in the
corresponding dictionary. This is handy for grabbing values from VS
Code response dictionary like getting
response['body']['stackFrames']
'''
value = d
for key in key_path:
if key in value:
value = value[key]
else:
self.assertTrue(key in value,
'key "%s" from key_path "%s" not in "%s"' % (
key, key_path, d))
return value
def get_stackFrames_and_totalFramesCount(self, threadId=None, startFrame=None,
levels=None, dump=False):
response = self.vscode.request_stackTrace(threadId=threadId,
startFrame=startFrame,
levels=levels,
dump=dump)
if response:
stackFrames = self.get_dict_value(response, ['body', 'stackFrames'])
totalFrames = self.get_dict_value(response, ['body', 'totalFrames'])
self.assertTrue(totalFrames > 0,
'verify totalFrames count is provided by extension that supports '
'async frames loading')
return (stackFrames, totalFrames)
return (None, 0)
def get_stackFrames(self, threadId=None, startFrame=None, levels=None,
dump=False):
(stackFrames, totalFrames) = self.get_stackFrames_and_totalFramesCount(
threadId=threadId,
startFrame=startFrame,
levels=levels,
dump=dump)
return stackFrames
def get_source_and_line(self, threadId=None, frameIndex=0):
stackFrames = self.get_stackFrames(threadId=threadId,
startFrame=frameIndex,
levels=1)
if stackFrames is not None:
stackFrame = stackFrames[0]
['source', 'path']
if 'source' in stackFrame:
source = stackFrame['source']
if 'path' in source:
if 'line' in stackFrame:
return (source['path'], stackFrame['line'])
return ('', 0)
def get_stdout(self, timeout=0.0):
return self.vscode.get_output('stdout', timeout=timeout)
def get_console(self, timeout=0.0):
return self.vscode.get_output('console', timeout=timeout)
def get_local_as_int(self, name, threadId=None):
value = self.vscode.get_local_variable_value(name, threadId=threadId)
if value.startswith('0x'):
return int(value, 16)
elif value.startswith('0'):
return int(value, 8)
else:
return int(value)
def set_local(self, name, value, id=None):
'''Set a top level local variable only.'''
return self.vscode.request_setVariable(1, name, str(value), id=id)
def set_global(self, name, value, id=None):
'''Set a top level global variable only.'''
return self.vscode.request_setVariable(2, name, str(value), id=id)
def stepIn(self, threadId=None, waitForStop=True):
self.vscode.request_stepIn(threadId=threadId)
if waitForStop:
return self.vscode.wait_for_stopped()
return None
def stepOver(self, threadId=None, waitForStop=True):
self.vscode.request_next(threadId=threadId)
if waitForStop:
return self.vscode.wait_for_stopped()
return None
def stepOut(self, threadId=None, waitForStop=True):
self.vscode.request_stepOut(threadId=threadId)
if waitForStop:
return self.vscode.wait_for_stopped()
return None
def continue_to_next_stop(self):
self.vscode.request_continue()
return self.vscode.wait_for_stopped()
def continue_to_breakpoints(self, breakpoint_ids):
self.vscode.request_continue()
self.verify_breakpoint_hit(breakpoint_ids)
def continue_to_exception_breakpoint(self, filter_label):
self.vscode.request_continue()
self.assertTrue(self.verify_exception_breakpoint_hit(filter_label),
'verify we got "%s"' % (filter_label))
def continue_to_exit(self, exitCode=0):
self.vscode.request_continue()
stopped_events = self.vscode.wait_for_stopped()
self.assertEquals(len(stopped_events), 1,
"stopped_events = {}".format(stopped_events))
self.assertEquals(stopped_events[0]['event'], 'exited',
'make sure program ran to completion')
self.assertEquals(stopped_events[0]['body']['exitCode'], exitCode,
'exitCode == %i' % (exitCode))
def attach(self, program=None, pid=None, waitFor=None, trace=None,
initCommands=None, preRunCommands=None, stopCommands=None,
exitCommands=None, attachCommands=None, coreFile=None):
'''Build the default Makefile target, create the VSCode debug adaptor,
and attach to the process.
'''
# Make sure we disconnect and terminate the VSCode debug adaptor even
# if we throw an exception during the test case.
def cleanup():
self.vscode.request_disconnect(terminateDebuggee=True)
self.vscode.terminate()
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Initialize and launch the program
self.vscode.request_initialize()
response = self.vscode.request_attach(
program=program, pid=pid, waitFor=waitFor, trace=trace,
initCommands=initCommands, preRunCommands=preRunCommands,
stopCommands=stopCommands, exitCommands=exitCommands,
attachCommands=attachCommands, coreFile=coreFile)
if not (response and response['success']):
self.assertTrue(response['success'],
'attach failed (%s)' % (response['message']))
def launch(self, program=None, args=None, cwd=None, env=None,
stopOnEntry=False, disableASLR=True,
disableSTDIO=False, shellExpandArguments=False,
trace=False, initCommands=None, preRunCommands=None,
stopCommands=None, exitCommands=None,sourcePath=None,
debuggerRoot=None, launchCommands=None, sourceMap=None):
'''Sending launch request to vscode
'''
# Make sure we disconnect and terminate the VSCode debug adapter,
# if we throw an exception during the test case
def cleanup():
self.vscode.request_disconnect(terminateDebuggee=True)
self.vscode.terminate()
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Initialize and launch the program
self.vscode.request_initialize()
response = self.vscode.request_launch(
program,
args=args,
cwd=cwd,
env=env,
stopOnEntry=stopOnEntry,
disableASLR=disableASLR,
disableSTDIO=disableSTDIO,
shellExpandArguments=shellExpandArguments,
trace=trace,
initCommands=initCommands,
preRunCommands=preRunCommands,
stopCommands=stopCommands,
exitCommands=exitCommands,
sourcePath=sourcePath,
debuggerRoot=debuggerRoot,
launchCommands=launchCommands,
sourceMap=sourceMap)
if not (response and response['success']):
self.assertTrue(response['success'],
'launch failed (%s)' % (response['message']))
def build_and_launch(self, program, args=None, cwd=None, env=None,
stopOnEntry=False, disableASLR=True,
disableSTDIO=False, shellExpandArguments=False,
trace=False, initCommands=None, preRunCommands=None,
stopCommands=None, exitCommands=None,
sourcePath=None, debuggerRoot=None):
'''Build the default Makefile target, create the VSCode debug adaptor,
and launch the process.
'''
self.build_and_create_debug_adaptor()
self.assertTrue(os.path.exists(program), 'executable must exist')
self.launch(program, args, cwd, env, stopOnEntry, disableASLR,
disableSTDIO, shellExpandArguments, trace,
initCommands, preRunCommands, stopCommands, exitCommands,
sourcePath, debuggerRoot)
| 44.139319 | 86 | 0.58694 | [
"Apache-2.0"
] | Diatrus/llvm-project | lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/lldbvscode_testcase.py | 14,257 | Python |
from django.apps import AppConfig
class TestConfig(AppConfig):
name = "test_app"
| 14.5 | 33 | 0.747126 | [
"MIT"
] | eamigo86/django3_asgi | test_app/apps.py | 87 | Python |
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
| 41.678078 | 124 | 0.573119 | [
"MIT"
] | HangJie720/keras | keras/preprocessing/image.py | 69,394 | Python |
import sys
import os
from win32com.shell import shell
import logging
import argparse
from Tkinter import *
import tkMessageBox
from config import default_config_url
from ui import InstallerUI
from installer_api import InstallerAPI
def get_logfile_path():
profile = os.getenv('USERPROFILE')
company_name = "Peachy"
app_name = 'PeachyInstaller'
path = os.path.join(profile, 'AppData', 'Local', company_name, app_name)
if not os.path.exists(path):
os.makedirs(path)
return path
def setup_logging(args):
logging_path = get_logfile_path()
peachy_logger = logging.getLogger('peachy')
logfile = os.path.join(logging_path, 'peachyinstaller.log')
logging_format = '%(levelname)s: %(asctime)s %(module)s - %(message)s'
logging_level = getattr(logging, args.loglevel.upper(), "INFO")
if not isinstance(logging_level, int):
raise ValueError('Invalid log level: %s' % args.loglevel)
if True:
peachy_logger = logging.getLogger('peachy')
peachy_logger.propagate = False
logFormatter = logging.Formatter(logging_format)
fileHandler = logging.FileHandler(logfile)
consoleHandler = logging.StreamHandler()
fileHandler.setFormatter(logFormatter)
consoleHandler.setFormatter(logFormatter)
peachy_logger.addHandler(fileHandler)
peachy_logger.addHandler(consoleHandler)
peachy_logger.setLevel(logging_level)
else:
logging.basicConfig(filename=logfile, format=logging_format, level=logging_level)
peachy_logger.info("\n----------------------Logging Started------------------------")
if __name__ == '__main__':
parser = argparse.ArgumentParser("Configure and print with Peachy Printer")
parser.add_argument('-l', '--log', dest='loglevel', action='store', required=False, default="INFO", help="Enter the loglevel [DEBUG|INFO|WARNING|ERROR] default: WARNING")
parser.add_argument('-t', '--console', dest='console', action='store_true', required=False, help="Logs to console not file")
parser.add_argument('-a', '--alternate-config', dest='alt_config', action='store', required=False, default=default_config_url, help="Alternate url for config file")
args, unknown = parser.parse_known_args()
ASADMIN = 'asadmin'
if sys.argv[-1] != ASADMIN:
script = os.path.abspath(sys.argv[0])
params = ' '.join([script] + sys.argv[1:] + [ASADMIN])
shell.ShellExecuteEx(lpVerb='runas', lpFile=sys.executable, lpParameters=params)
sys.exit(0)
setup_logging(args)
logger = logging.getLogger('peashy')
try:
api = InstallerAPI(args.alt_config)
result, code, message = api.initialize()
logger.info('{} -- {} -- {}'.format(result, code, message))
root = Tk()
root.wm_title("Peachy Installer")
root.resizable(width=FALSE, height=FALSE)
root.geometry('{}x{}'.format(640, 400))
if not result:
tkMessageBox.showinfo("Something annoying has occured", message)
if code == 10304:
import webbrowser
webbrowser.open('https://github.com/PeachyPrinter/peachyinstaller/releases', new=0, autoraise=True)
sys.exit()
i = InstallerUI(api, master=root)
i.mainloop()
except Exception as ex:
logger.error(ex.message)
raise
| 39.206897 | 183 | 0.660803 | [
"Apache-2.0"
] | PeachyPrinter/peachyinstaller | windows/src/install.py | 3,411 | Python |
import re
from streamlink.compat import urlparse, parse_qsl
from streamlink.plugin import Plugin, PluginError
from streamlink.plugin.api import http, validate
from streamlink.plugin.api.utils import parse_query
from streamlink.stream import HTTPStream, HLSStream
from streamlink.compat import parse_qsl
from streamlink.stream.ffmpegmux import MuxedStream
API_KEY = "AIzaSyBDBi-4roGzWJN4du9TuDMLd_jVTcVkKz4"
API_BASE = "https://www.googleapis.com/youtube/v3"
API_SEARCH_URL = API_BASE + "/search"
API_VIDEO_INFO = "http://youtube.com/get_video_info"
HLS_HEADERS = {
"User-Agent": "Mozilla/5.0"
}
def parse_stream_map(stream_map):
if not stream_map:
return []
return [parse_query(s) for s in stream_map.split(",")]
def parse_fmt_list(formatsmap):
formats = {}
if not formatsmap:
return formats
for format in formatsmap.split(","):
s = format.split("/")
(w, h) = s[1].split("x")
formats[int(s[0])] = "{0}p".format(h)
return formats
_config_schema = validate.Schema(
{
validate.optional("fmt_list"): validate.all(
validate.text,
validate.transform(parse_fmt_list)
),
validate.optional("url_encoded_fmt_stream_map"): validate.all(
validate.text,
validate.transform(parse_stream_map),
[{
"itag": validate.all(
validate.text,
validate.transform(int)
),
"quality": validate.text,
"url": validate.url(scheme="http"),
validate.optional("s"): validate.text,
validate.optional("stereo3d"): validate.all(
validate.text,
validate.transform(int),
validate.transform(bool)
),
}]
),
validate.optional("adaptive_fmts"): validate.all(
validate.text,
validate.transform(parse_stream_map),
[{
validate.optional("s"): validate.text,
"type": validate.all(
validate.text,
validate.transform(lambda t: t.split(";")[0].split("/")),
[validate.text, validate.text]
),
"url": validate.all(
validate.url(scheme="http")
)
}]
),
validate.optional("hlsvp"): validate.text,
validate.optional("live_playback"): validate.transform(bool),
"status": validate.text
}
)
_search_schema = validate.Schema(
{
"items": [{
"id": {
"videoId": validate.text
}
}]
},
validate.get("items")
)
_channelid_re = re.compile(r'meta itemprop="channelId" content="([^"]+)"')
_livechannelid_re = re.compile(r'meta property="og:video:url" content="([^"]+)')
_url_re = re.compile(r"""
http(s)?://(\w+\.)?youtube.com
(?:
(?:
/(watch.+v=|embed/|v/)
(?P<video_id>[0-9A-z_-]{11})
)
|
(?:
/(user|channel)/(?P<user>[^/?]+)
)
|
(?:
/c/(?P<liveChannel>[^/?]+)/live
)
)
""", re.VERBOSE)
class YouTube(Plugin):
adp_video = {
137: "1080p",
303: "1080p60", # HFR
299: "1080p60", # HFR
264: "1440p",
308: "1440p60", # HFR
266: "2160p",
315: "2160p60", # HFR
138: "2160p",
302: "720p60", # HFR
}
adp_audio = {
140: 128,
141: 256,
171: 128,
249: 48,
250: 64,
251: 160,
}
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
@classmethod
def stream_weight(cls, stream):
match_3d = re.match(r"(\w+)_3d", stream)
match_hfr = re.match(r"(\d+p)(\d+)", stream)
if match_3d:
weight, group = Plugin.stream_weight(match_3d.group(1))
weight -= 1
group = "youtube_3d"
elif match_hfr:
weight, group = Plugin.stream_weight(match_hfr.group(1))
weight += 1
group = "high_frame_rate"
else:
weight, group = Plugin.stream_weight(stream)
return weight, group
def _find_channel_video(self):
res = http.get(self.url)
match = _channelid_re.search(res.text)
if not match:
return
return self._get_channel_video(match.group(1))
def _get_channel_video(self, channel_id):
query = {
"channelId": channel_id,
"type": "video",
"eventType": "live",
"part": "id",
"key": API_KEY
}
res = http.get(API_SEARCH_URL, params=query)
videos = http.json(res, schema=_search_schema)
for video in videos:
video_id = video["id"]["videoId"]
return video_id
def _find_canonical_stream_info(self):
res = http.get(self.url)
match = _livechannelid_re.search(res.text)
if not match:
return
return self._get_stream_info(match.group(1))
def _get_stream_info(self, url):
match = _url_re.match(url)
user = match.group("user")
live_channel = match.group("liveChannel")
if user:
video_id = self._find_channel_video()
elif live_channel:
return self._find_canonical_stream_info()
else:
video_id = match.group("video_id")
if video_id == "live_stream":
query_info = dict(parse_qsl(urlparse(url).query))
if "channel" in query_info:
video_id = self._get_channel_video(query_info["channel"])
if not video_id:
return
params = {
"video_id": video_id,
"el": "player_embedded"
}
res = http.get(API_VIDEO_INFO, params=params, headers=HLS_HEADERS)
return parse_query(res.text, name="config", schema=_config_schema)
def _get_streams(self):
info = self._get_stream_info(self.url)
if not info:
return
formats = info.get("fmt_list")
streams = {}
protected = False
for stream_info in info.get("url_encoded_fmt_stream_map", []):
if stream_info.get("s"):
protected = True
continue
stream = HTTPStream(self.session, stream_info["url"])
name = formats.get(stream_info["itag"]) or stream_info["quality"]
if stream_info.get("stereo3d"):
name += "_3d"
streams[name] = stream
adaptive_streams = {}
best_audio_itag = None
# Extract audio streams from the DASH format list
for stream_info in info.get("adaptive_fmts", []):
if stream_info.get("s"):
protected = True
continue
stream_params = dict(parse_qsl(stream_info["url"]))
if "itag" not in stream_params:
continue
itag = int(stream_params["itag"])
# extract any high quality streams only available in adaptive formats
adaptive_streams[itag] = stream_info["url"]
stream_type, stream_format = stream_info["type"]
if stream_type == "audio":
stream = HTTPStream(self.session, stream_info["url"])
name = "audio_{0}".format(stream_format)
streams[name] = stream
# find the best quality audio stream m4a, opus or vorbis
if best_audio_itag is None or self.adp_audio[itag] > self.adp_audio[best_audio_itag]:
best_audio_itag = itag
if best_audio_itag and adaptive_streams and MuxedStream.is_usable(self.session):
aurl = adaptive_streams[best_audio_itag]
for itag, name in self.adp_video.items():
if itag in adaptive_streams:
vurl = adaptive_streams[itag]
streams[name] = MuxedStream(self.session,
HTTPStream(self.session, vurl),
HTTPStream(self.session, aurl))
hls_playlist = info.get("hlsvp")
if hls_playlist:
try:
hls_streams = HLSStream.parse_variant_playlist(
self.session, hls_playlist, headers=HLS_HEADERS, namekey="pixels"
)
streams.update(hls_streams)
except IOError as err:
self.logger.warning("Failed to extract HLS streams: {0}", err)
if not streams and protected:
raise PluginError("This plugin does not support protected videos, "
"try youtube-dl instead")
return streams
__plugin__ = YouTube
| 30.951557 | 101 | 0.541643 | [
"BSD-2-Clause"
] | nxkbd/streamlink | src/streamlink/plugins/youtube.py | 8,945 | Python |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reroot to a subtree, maintaining an input proto index.
reroot is similar to get_descendant_or_error. However, this method allows
you to call create_proto_index(...) later on, that gives you a reference to the
original proto.
"""
from typing import FrozenSet, Optional, Sequence
from struct2tensor import calculate_options
from struct2tensor import expression
from struct2tensor import expression_add
from struct2tensor import path
from struct2tensor import prensor
import tensorflow as tf
def reroot(root: expression.Expression,
source_path: path.Path) -> expression.Expression:
"""Reroot to a new path, maintaining a input proto index.
Similar to root.get_descendant_or_error(source_path): however, this
method retains the ability to get a map to the original index.
Args:
root: the original root.
source_path: the path to the new root.
Returns:
the new root.
"""
new_root = root
for step in source_path.field_list:
new_root = _RerootExpression(new_root, step)
return new_root
def create_proto_index_field(root: expression.Expression,
new_field_name: path.Step
) -> expression.Expression:
return expression_add.add_paths(
root, {path.Path([new_field_name]): _InputProtoIndexExpression(root)})
class _RerootRootNodeTensor(prensor.RootNodeTensor):
"""The reroot root node.
This contains a map from a current index to the original index of a proto.
"""
def __init__(self, size: tf.Tensor, input_proto_index: tf.Tensor):
super().__init__(size)
self._input_proto_index = input_proto_index
@property
def input_proto_index(self):
return self._input_proto_index
def _get_proto_index_parent_index(node: prensor.RootNodeTensor):
return tf.range(node.size)
def _get_input_proto_index(node: prensor.RootNodeTensor):
if isinstance(node, _RerootRootNodeTensor):
return node.input_proto_index
return _get_proto_index_parent_index(node)
class _RerootExpression(expression.Expression):
"""Reroot to a new path, maintaining a input proto index."""
def __init__(self, original_root: expression.Expression,
field_name: path.Step):
super().__init__(True, None)
self._field_name = field_name
self._original_root = original_root
self._new_root = original_root.get_child_or_error(field_name)
if self._new_root.type is not None:
raise ValueError("New root must be a message type: {}".format(
str(self._field_name)))
# TODO(martinz): Check that the "original root source expression" has a type
# in (_RerootExpression, prensor._ProtoRootExpression)
# To do this, we need a general technique similar to
# expression_add._is_true_source_expression: however, this should also cover
# intermediate operations like "project".
# Since this check is not present, if it should have fired, there will be
# an error when calculate(...) is called.
def get_source_expressions(self) -> Sequence[expression.Expression]:
return [self._original_root, self._new_root]
def calculate(
self,
sources: Sequence[prensor.NodeTensor],
destinations: Sequence[expression.Expression],
options: calculate_options.Options,
side_info: Optional[prensor.Prensor] = None) -> prensor.NodeTensor:
[old_root_value, new_root_value] = sources
if isinstance(old_root_value, prensor.RootNodeTensor) and isinstance(
new_root_value, prensor.ChildNodeTensor):
old_input_proto_index = _get_input_proto_index(old_root_value)
# Notice that the "gather" operation is similar to promote.
return _RerootRootNodeTensor(
tf.size(new_root_value.parent_index, out_type=tf.int64),
tf.gather(old_input_proto_index, new_root_value.parent_index))
raise ValueError("Source types incorrect")
def calculation_is_identity(self) -> bool:
return False
def calculation_equal(self, expr: expression.Expression) -> bool:
# Although path can vary, it is not used in the calculation, just to
return isinstance(expr, _RerootExpression)
def _get_child_impl(self,
field_name: path.Step) -> Optional[expression.Expression]:
return self._new_root.get_child(field_name)
def known_field_names(self) -> FrozenSet[path.Step]:
return self._new_root.known_field_names()
class _InputProtoIndexExpression(expression.Leaf):
"""A proto index expression."""
def __init__(self, root: expression.Expression):
"""Constructor for proto index expression.
Args:
root: an expression that must return a RootNodeTensor.
"""
super().__init__(is_repeated=False, my_type=tf.int64)
self._root = root
def get_source_expressions(self) -> Sequence[expression.Expression]:
return [self._root]
def calculate(
self,
sources: Sequence[prensor.NodeTensor],
destinations: Sequence[expression.Expression],
options: calculate_options.Options,
side_info: Optional[prensor.Prensor] = None) -> prensor.NodeTensor:
[root_node] = sources
# The following check ensures not just that we can calculate the value,
# but that no "improper" reroots were done.
if isinstance(root_node, prensor.RootNodeTensor):
return prensor.LeafNodeTensor(
_get_proto_index_parent_index(root_node),
_get_input_proto_index(root_node),
is_repeated=False)
raise ValueError(
"Illegal operation: expected a true root node: got {}".format(
str(root_node)))
def calculation_is_identity(self) -> bool:
return False
def calculation_equal(self, expr: expression.Expression) -> bool:
# Although path can vary, it is not used in the calculation, just to
return isinstance(expr, _InputProtoIndexExpression)
| 36.056497 | 80 | 0.733469 | [
"Apache-2.0"
] | anukaal/struct2tensor | struct2tensor/expression_impl/reroot.py | 6,382 | Python |
class GEDAColor:
""" Enumeration of gEDA colors """
BACKGROUND_COLOR = 0
PIN_COLOR = 1
NET_ENDPOINT_COLOR = 2
GRAPHIC_COLOR = 3
NET_COLOR = 4
ATTRIBUTE_COLOR = 5
LOGIC_BUBBLE_COLOR = 6
DOTS_GRID_COLOR = 7
DETACHED_ATTRIBUTE_COLOR = 8
TEXT_COLOR = 9
BUS_COLOR = 10
SELECT_COLOR = 11
BOUNDINGBOX_COLOR = 12
ZOOM_BOX_COLOR = 13
STROKE_COLOR = 14
LOCK_COLOR = 15
class GEDAParameter(object):
TYPE = ''
def __init__(self, name, datatype=int, default=None):
self._name = name
self.datatype = datatype
self.default = default
@property
def name(self):
if self.TYPE:
return "%s_%s" % (self.TYPE, self._name)
return self._name
class GEDAStyleParameter(GEDAParameter):
""" Style parameter """
TYPE = 'style'
class GEDAExtraParameter(GEDAParameter):
""" Extra parameter """
TYPE = 'extra'
class GEDACommand(object):
""" Command """
TYPE = None
PARAMETERS = ()
EXTRA_PARAMETERS = ()
def parameters(self):
return self.PARAMETERS + self.EXTRA_PARAMETERS
def get_style_keywords(self):
style_type = GEDAStyleParameter.TYPE
return [p.name for p in self.PARAMETERS
if p.name.startswith(style_type)]
def update_default_kwargs(self, **kwargs):
default_kwargs = {}
for parameter in self.parameters():
default_kwargs[parameter.name] = parameter.default
default_kwargs.update(kwargs)
return default_kwargs
def generate_command(self, **kwargs):
kwargs = self.update_default_kwargs(**kwargs)
command = [self.TYPE]
for parameter in self.PARAMETERS:
command.append("%%(%s)s" % parameter.name)
return [" ".join(command) % kwargs]
class GEDALineCommand(GEDACommand):
""" Line command """
TYPE = 'L'
PARAMETERS = (
GEDAParameter('x1'),
GEDAParameter('y1'),
GEDAParameter('x2'),
GEDAParameter('y2'),
GEDAStyleParameter('color', default=GEDAColor.GRAPHIC_COLOR),
GEDAStyleParameter('width', default=10),
GEDAStyleParameter('capstyle', default=0),
GEDAStyleParameter('dashstyle', default=0),
GEDAStyleParameter('dashlength', default=-1),
GEDAStyleParameter('dashspace', default=-1),
)
class GEDABoxCommand(GEDACommand):
""" Box command """
TYPE = "B"
PARAMETERS = (
GEDAParameter('x'),
GEDAParameter('y'),
GEDAParameter('width'),
GEDAParameter('height'),
GEDAStyleParameter('color', default=GEDAColor.GRAPHIC_COLOR),
GEDAStyleParameter('width', default=10),
GEDAStyleParameter('capstyle', default=0),
GEDAStyleParameter('dashstyle', default=0),
GEDAStyleParameter('dashlength', default=-1),
GEDAStyleParameter('dashspace', default=-1),
GEDAStyleParameter('filltype', default=0),
GEDAStyleParameter('fillwidth', default=-1),
GEDAStyleParameter('angle1', default=-1),
GEDAStyleParameter('pitch1', default=-1),
GEDAStyleParameter('angle2', default=-1),
GEDAStyleParameter('pitch2', default=-1),
)
class GEDACircleCommand(GEDACommand):
""" Circle command """
TYPE = 'V'
PARAMETERS = (
GEDAParameter('x'),
GEDAParameter('y'),
GEDAParameter('radius'),
GEDAStyleParameter('color', default=GEDAColor.GRAPHIC_COLOR),
GEDAStyleParameter('width', default=10),
GEDAStyleParameter('capstyle', default=0),
GEDAStyleParameter('dashstyle', default=0),
GEDAStyleParameter('dashlength', default=-1),
GEDAStyleParameter('dashspace', default=-1),
GEDAStyleParameter('filltype', default=0),
GEDAStyleParameter('fillwidth', default=-1),
GEDAStyleParameter('angle1', default=-1),
GEDAStyleParameter('pitch1', default=-1),
GEDAStyleParameter('angle2', default=-1),
GEDAStyleParameter('pitch2', default=-1),
)
class GEDAArcCommand(GEDACommand):
""" Arc command """
TYPE = 'A'
PARAMETERS = (
GEDAParameter('x'),
GEDAParameter('y'),
GEDAParameter('radius'),
GEDAParameter('startangle'),
GEDAParameter('sweepangle'),
GEDAStyleParameter('color', default=GEDAColor.GRAPHIC_COLOR),
GEDAStyleParameter('width', default=10),
GEDAStyleParameter('capstyle', default=0),
GEDAStyleParameter('dashstyle', default=0),
GEDAStyleParameter('dashlength', default=-1),
GEDAStyleParameter('dashspace', default=-1),
)
class GEDATextCommand(GEDACommand):
""" Text command """
TYPE = 'T'
PARAMETERS = (
GEDAParameter('x'),
GEDAParameter('y'),
GEDAStyleParameter('color', default=GEDAColor.TEXT_COLOR),
# GEDAStyleParameter('size', default=10),
GEDAParameter('size'),
GEDAParameter('visibility', default=1),
GEDAParameter('show_name_value', default=1),
GEDAParameter('angle', default=0),
GEDAParameter('alignment', default=0),
GEDAParameter('num_lines', default=1),
)
class GEDASegmentCommand(GEDACommand):
""" Segment command """
TYPE = 'N'
PARAMETERS = (
GEDAParameter('x1'),
GEDAParameter('y1'),
GEDAParameter('x2'),
GEDAParameter('y2'),
GEDAStyleParameter('color', default=GEDAColor.NET_COLOR),
)
class GEDAPinCommand(GEDACommand):
""" Pin command """
TYPE = 'P'
PARAMETERS = (
GEDAParameter('x1'),
GEDAParameter('y1'),
GEDAParameter('x2'),
GEDAParameter('y2'),
GEDAStyleParameter('color', default=GEDAColor.PIN_COLOR),
# pin type is always 0
GEDAStyleParameter('pintype', default=0),
# first point is active/connected pin
GEDAParameter('whichend', default=0),
)
class GEDAComponentCommand(GEDACommand):
""" Component command """
TYPE = 'C'
PARAMETERS = (
GEDAParameter('x'),
GEDAParameter('y'),
# GEDAParameter('selectable', default=0),
GEDAParameter('selectable', default=1),
GEDAParameter('angle'),
GEDAParameter('mirror'),
GEDAParameter('basename', datatype=str),
)
class GEDAPathCommand(GEDACommand):
""" Path command """
TYPE = "H"
PARAMETERS = (
GEDAStyleParameter('color', default=GEDAColor.GRAPHIC_COLOR),
GEDAStyleParameter('width', default=10),
GEDAStyleParameter('capstyle', default=0),
GEDAStyleParameter('dashstyle', default=0),
GEDAStyleParameter('dashlength', default=-1),
GEDAStyleParameter('dashspace', default=-1),
GEDAStyleParameter('filltype', default=0),
GEDAStyleParameter('fillwidth', default=-1),
GEDAStyleParameter('angle1', default=-1),
GEDAStyleParameter('pitch1', default=-1),
GEDAStyleParameter('angle2', default=-1),
GEDAStyleParameter('pitch2', default=-1),
GEDAParameter('num_lines'),
)
EXTRA_PARAMERTERS = (
GEDAExtraParameter('id'),
)
class GEDAVersionCommand(GEDACommand):
""" Version command """
TYPE = 'v'
PARAMETERS = (
GEDAParameter('version'),
GEDAParameter('fileformat_version'),
)
class GEDABusCommand(GEDACommand):
""" Bus command """
TYPE = 'U'
PARAMETERS = (
GEDAParameter('x1'),
GEDAParameter('y1'),
GEDAParameter('x2'),
GEDAParameter('y2'),
GEDAStyleParameter('color', default=GEDAColor.BUS_COLOR),
GEDAParameter('ripperdir', default=0),
)
class GEDAPictureCommand(GEDACommand):
""" Picture command """
TYPE = 'G'
PARAMETERS = ()
class GEDAEmbeddedEnvironmentCommand(GEDACommand):
""" Embeded command """
TYPE = '['
PARAMETERS = ()
class GEDAAttributeEnvironmentCommand(GEDACommand):
""" Attribute environment command """
TYPE = '{'
PARAMETERS = ()
class GEDACommand(GEDACommand):
""" Command """
TYPE = 'U'
PARAMETERS = ()
| 28.921708 | 69 | 0.621632 | [
"Apache-2.0"
] | lehaianh1986/schematic-file-converter | upconvert/parser/geda_commands.py | 8,127 | Python |
"""
Django settings for doiainn project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fbrywz7o3a1=vf-+4luwn5h)!kt-xzghqtm#^3(epwcwcp^jws'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'doiainn.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'doiainn.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| 25.640777 | 71 | 0.702385 | [
"MIT"
] | bbenko/doiainn | code/doiainn/doiainn/settings.py | 2,641 | Python |
import logging
import pyrebase
from requests.exceptions import HTTPError
class Node:
def __init__(self, nodeName):
self._nodeName = nodeName
self._next = None
def child(self, nodeName):
if self._next == None:
self._next = Node(nodeName)
else:
self._next.next(nodeName)
return self
def set(self, data):
if self._next == None:
self._next = Set(data)
else:
self._next.set(data)
return self
def get(self):
if self._next == None:
self._next = Get()
else:
self._next.get()
return self
def eval(self, prev):
return self._next.eval(prev.child(self._nodeName))
def __str__(self):
if self._next == None:
return 'child(' + str(self._nodeName) + ')'
else:
return 'child(' + str(self._nodeName) + ').' + str(self._next)
class Set:
def __init__(self, data):
self._data = data
def eval(self, prev):
return prev.set(self._data)
def __str__(self):
return 'set(' + str(self._data) + ')'
class Get:
def eval(self, prev):
return prev.get()
def __str__(self):
return 'get()'
class Remove:
def eval(self, prev):
return prev.remove()
def __str__(self):
return 'remove()'
class Push:
def __init__(self, data):
self._data = data
def eval(self, prev):
return prev.push(self._data)
def __str__(self):
return 'push(' + str(self._data) + ')'
class Update:
def __init__(self, data):
self._data = data
def eval(self, prev):
return prev.update(self._data)
def __str__(self):
return 'update(' + str(self._data) + ')'
class FirebaseLiveEvaluator:
def __init__(self, config):
logging.info('Initializing Firebase connection...')
self._firebase = pyrebase.initialize_app(config)
self._db = self._firebase.database()
self._pathPrefix = config['firebasePathPrefix']
def eval(self, node):
# logging.debug(node)
if self._pathPrefix:
return node.eval(self._db.child(self._pathPrefix))
else:
return node.eval(self._db)
class FirebaseLoggingEvaluator:
def eval(self, node):
logging.info(node)
class FirebaseExceptionEvaluator:
def __init__(self, config):
logging.info('Initializing Firebase connection...')
self._firebase = pyrebase.initialize_app(config)
self._db = self._firebase.database()
self._pathPrefix = config['firebasePathPrefix']
self._throw = True
def eval(self, node):
if self._throw:
self._throw = False
raise HTTPError("I Broke")
logging.debug(node)
if self._pathPrefix:
return node.eval(self._db.child(self._pathPrefix))
else:
return node.eval(self._db)
| 24.336066 | 74 | 0.584035 | [
"MIT"
] | txsmith/p1-sensor | src/firebaseops.py | 2,969 | Python |
# -*- coding: utf-8 -*-
"""
User database models
--------------------
"""
import enum
import logging
import uuid
from flask import current_app
from sqlalchemy_utils import types as column_types
from flask_login import current_user # NOQA
from app.extensions import db, FeatherModel
from app.extensions.auth import security
from app.extensions.edm import EDMObjectMixin
from app.extensions.api.parameters import _get_is_static_role_property
import app.extensions.logging as AuditLog
log = logging.getLogger(__name__)
class UserEDMMixin(EDMObjectMixin):
# fmt: off
# Name of the module, used for knowing what to sync i.e user.list, user.data
EDM_NAME = 'user'
# The EDM attribute for the version, if reported
EDM_VERSION_ATTRIBUTE = 'version'
#
EDM_LOG_ATTRIBUTES = [
'emailAddress',
]
EDM_ATTRIBUTE_MAPPING = {
# Ignored
'id' : None,
'lastLogin' : None,
'username' : None,
# Attributes
'acceptedUserAgreement' : 'accepted_user_agreement',
'affiliation' : 'affiliation',
'emailAddress' : 'email',
'fullName' : 'full_name',
'receiveEmails' : 'receive_notification_emails',
'sharing' : 'shares_data',
'userURL' : 'website',
'version' : 'version',
# Functions
'organizations' : '_process_edm_user_organization',
'profileImageUrl' : '_process_edm_user_profile_url',
}
# fmt: on
@classmethod
def ensure_edm_obj(cls, guid):
user = User.query.filter(User.guid == guid).first()
is_new = user is None
if is_new:
email = '%s@localhost' % (guid,)
password = User.initial_random_password()
user = User(
guid=guid,
email=email,
password=password,
version=None,
is_active=True,
in_alpha=True,
)
with db.session.begin():
db.session.add(user)
db.session.refresh(user)
return user, is_new
def _process_edm_user_profile_url(self, url):
# TODO is this actually needed
log.warning('User._process_edm_profile_url() not implemented yet')
def _process_edm_user_organization(self, org):
# TODO is this actually needed
log.warning('User._process_edm_user_organization() not implemented yet')
class User(db.Model, FeatherModel, UserEDMMixin):
"""
User database model.
TODO:
* Upgrade to HoustonModel after full transition for Users out of EDM is
complete
"""
def __init__(self, *args, **kwargs):
if 'password' not in kwargs:
raise ValueError('User must have a password')
super().__init__(*args, **kwargs)
guid = db.Column(
db.GUID, default=uuid.uuid4, primary_key=True
) # pylint: disable=invalid-name
version = db.Column(db.BigInteger, default=None, nullable=True)
email = db.Column(
db.String(length=120), index=True, unique=True, default='', nullable=False
)
password = db.Column(
column_types.PasswordType(max_length=128, schemes=('bcrypt',)), nullable=False
) # can me migrated from EDM field "password"
full_name = db.Column(
db.String(length=120), default='', nullable=False
) # can be migrated from EDM field "fullName"
website = db.Column(
db.String(length=120), nullable=True
) # can be migrated from EDM field "userURL"
location = db.Column(db.String(length=120), default='', nullable=True)
affiliation = db.Column(
db.String(length=120), default='', nullable=True
) # can be migrated from BE field "affiliation"
forum_id = db.Column(db.String(length=120), default='', nullable=True)
locale = db.Column(db.String(length=20), default='EN', nullable=True)
accepted_user_agreement = db.Column(
db.Boolean, default=False, nullable=False
) # can be migrated from EDM field "acceptedUserAgreement"
use_usa_date_format = db.Column(db.Boolean, default=True, nullable=False)
show_email_in_profile = db.Column(db.Boolean, default=False, nullable=False)
receive_notification_emails = db.Column(
db.Boolean, default=True, nullable=False
) # can be migrated from BE field "receiveEmails"
receive_newsletter_emails = db.Column(db.Boolean, default=False, nullable=False)
shares_data = db.Column(
db.Boolean, default=True, nullable=False
) # can be migrated from BE field "sharing"
default_identification_catalogue = db.Column(
db.GUID, nullable=True
) # this may just be a string, however EDM wants to do ID catalogues
profile_fileupload_guid = db.Column(
db.GUID, db.ForeignKey('file_upload.guid'), nullable=True
)
# 'FileUpload' failed to locate a name (class not yet loaded)
# so explicitly import FileUpload here
from app.modules.fileuploads.models import FileUpload
profile_fileupload = db.relationship(FileUpload)
organization_membership_enrollments = db.relationship(
'OrganizationUserMembershipEnrollment', back_populates='user'
)
organization_moderator_enrollments = db.relationship(
'OrganizationUserModeratorEnrollment', back_populates='user'
)
project_membership_enrollments = db.relationship(
'ProjectUserMembershipEnrollment', back_populates='user'
)
user_collaboration_associations = db.relationship(
'CollaborationUserAssociations', back_populates='user'
)
asset_groups = db.relationship(
'AssetGroup',
back_populates='owner',
primaryjoin='User.guid == AssetGroup.owner_guid',
order_by='AssetGroup.guid',
)
submitted_asset_groups = db.relationship(
'AssetGroup',
back_populates='submitter',
primaryjoin='User.guid == AssetGroup.submitter_guid',
order_by='AssetGroup.guid',
)
owned_encounters = db.relationship(
'Encounter',
back_populates='owner',
primaryjoin='User.guid == Encounter.owner_guid',
order_by='Encounter.guid',
)
submitted_encounters = db.relationship(
'Encounter',
back_populates='submitter',
primaryjoin='User.guid == Encounter.submitter_guid',
order_by='Encounter.guid',
)
owned_organizations = db.relationship(
'Organization',
back_populates='owner',
primaryjoin='User.guid == Organization.owner_guid',
order_by='Organization.guid',
)
owned_projects = db.relationship(
'Project',
back_populates='owner',
primaryjoin='User.guid == Project.owner_guid',
order_by='Project.guid',
)
# User may have many notifications
notifications = db.relationship(
'Notification',
back_populates='recipient',
primaryjoin='User.guid == Notification.recipient_guid',
order_by='Notification.guid',
)
# All User specific Notification Preferences will be held in one instance
notification_preferences = db.relationship(
'UserNotificationPreferences',
back_populates='user',
primaryjoin='User.guid == UserNotificationPreferences.user_guid',
order_by='UserNotificationPreferences.guid',
)
PUBLIC_USER_EMAIL = 'public@localhost'
class StaticRoles(enum.Enum):
# pylint: disable=missing-docstring,unsubscriptable-object
DATA_MANAGER = (0x100000, 'DataManager', 'DataManager', 'is_data_manager')
USER_MANAGER = (0x80000, 'UserManager', 'UserManager', 'is_user_manager')
CONTRIBUTOR = (0x40000, 'Contributor', 'Contributor', 'is_contributor')
RESEARCHER = (0x20000, 'Researcher', 'Researcher', 'is_researcher')
EXPORTER = (0x10000, 'Exporter', 'Exporter', 'is_exporter')
INTERNAL = (0x08000, 'Internal', 'Internal', 'is_internal')
ADMIN = (0x04000, 'Site Administrator', 'Admin', 'is_admin')
STAFF = (0x02000, 'Staff Member', 'Staff', 'is_staff')
ACTIVE = (0x01000, 'Active Account', 'Active', 'is_active')
SETUP = (0x00800, 'Account in Setup', 'Setup', 'in_setup')
RESET = (0x00400, 'Account in Password Reset', 'Reset', 'in_reset')
ALPHA = (0x00200, 'Enrolled in Alpha', 'Alpha', 'in_alpha')
BETA = (0x00100, 'Enrolled in Beta', 'Beta', 'in_beta')
@property
def mask(self):
return self.value[0]
@property
def title(self):
return self.value[1]
@property
def shorthand(self):
return self.value[2]
static_roles = db.Column(db.Integer, default=0, nullable=False)
is_contributor = _get_is_static_role_property(
'is_contributor', StaticRoles.CONTRIBUTOR
)
is_user_manager = _get_is_static_role_property(
'is_user_manager', StaticRoles.USER_MANAGER
)
is_data_manager = _get_is_static_role_property(
'is_data_manager', StaticRoles.DATA_MANAGER
)
is_researcher = _get_is_static_role_property('is_researcher', StaticRoles.RESEARCHER)
is_exporter = _get_is_static_role_property('is_exporter', StaticRoles.EXPORTER)
is_internal = _get_is_static_role_property('is_internal', StaticRoles.INTERNAL)
is_admin = _get_is_static_role_property('is_admin', StaticRoles.ADMIN)
is_staff = _get_is_static_role_property('is_staff', StaticRoles.STAFF)
is_active = _get_is_static_role_property('is_active', StaticRoles.ACTIVE)
in_beta = _get_is_static_role_property('in_beta', StaticRoles.BETA)
in_alpha = _get_is_static_role_property('in_alpha', StaticRoles.ALPHA)
in_reset = _get_is_static_role_property('in_reset', StaticRoles.RESET)
in_setup = _get_is_static_role_property('in_setup', StaticRoles.SETUP)
@property
def is_privileged(self):
return self.is_staff or self.is_internal
def get_state(self):
state = []
state += [self.StaticRoles.ACTIVE.shorthand] if self.is_active else []
state += [self.StaticRoles.SETUP.shorthand] if self.in_setup else []
state += [self.StaticRoles.RESET.shorthand] if self.in_reset else []
state += [self.StaticRoles.ALPHA.shorthand] if self.in_alpha else []
state += [self.StaticRoles.BETA.shorthand] if self.in_beta else []
return state
def get_roles(self):
roles = []
roles += [self.StaticRoles.DATA_MANAGER.shorthand] if self.is_data_manager else []
roles += [self.StaticRoles.USER_MANAGER.shorthand] if self.is_user_manager else []
roles += [self.StaticRoles.INTERNAL.shorthand] if self.is_internal else []
roles += [self.StaticRoles.ADMIN.shorthand] if self.is_admin else []
roles += [self.StaticRoles.STAFF.shorthand] if self.is_staff else []
roles += [self.StaticRoles.CONTRIBUTOR.shorthand] if self.is_contributor else []
roles += [self.StaticRoles.RESEARCHER.shorthand] if self.is_researcher else []
roles += [self.StaticRoles.EXPORTER.shorthand] if self.is_exporter else []
return roles
def __repr__(self):
state = ', '.join(self.get_state())
roles = ', '.join(self.get_roles())
return (
'<{class_name}('
'guid={self.guid}, '
'email="{self.email}", '
'name="{self.full_name}", '
'state={state}, '
'roles={roles}'
')>'.format(
class_name=self.__class__.__name__, self=self, state=state, roles=roles
)
)
@classmethod
def get_admins(cls):
# used for first run admin creation
users = cls.query.all() # NOQA
admin_users = []
for user in users:
# TODO: Remove the check below at a later point after default admin create is removed
if user.email.endswith('@localhost'):
continue
if user.is_admin:
admin_users.append(user)
return admin_users
@classmethod
def admin_user_initialized(cls):
# used for first run admin creation
return len(cls.get_admins()) > 0
@classmethod
def ensure_user(
cls,
email,
password,
is_internal=False,
is_admin=False,
is_staff=False,
is_researcher=False,
is_contributor=True,
is_user_manager=False,
is_exporter=False,
is_active=True,
in_beta=False,
in_alpha=False,
update=False,
**kwargs,
):
"""
Create a new user.
"""
from app.extensions import db
user = User.find(email=email)
if user is None:
user = User(
password=password,
email=email,
is_internal=is_internal,
is_admin=is_admin,
is_staff=is_staff,
is_active=is_active,
is_researcher=is_researcher,
is_contributor=is_contributor,
is_user_manager=is_user_manager,
is_exporter=is_exporter,
in_beta=in_beta,
in_alpha=in_alpha,
**kwargs,
)
with db.session.begin():
db.session.add(user)
log.info('New user created: %r' % (user,))
elif update:
user.password = password
user.is_internal = is_internal
user.is_admin = is_admin
user.is_staff = is_staff
user.is_researcher = is_researcher
user.is_contributor = is_contributor
user.is_user_manager = is_user_manager
user.is_exporter = is_exporter
user.is_active = is_active
user.in_beta = in_beta
user.in_alpha = in_alpha
with db.session.begin():
db.session.merge(user)
log.info('Updated user: %r' % (user,))
db.session.refresh(user)
return user
@classmethod
def find(cls, email=None, password=None, edm_login_fallback=True):
# Look-up via email
if email is None:
return None
email_candidates = [
email,
'%s@localhost' % (email,),
]
for email_candidate in email_candidates:
user = cls.query.filter(User.email == email_candidate).first()
if password is None:
# If no password was provided to check, return any user account we find
if user is not None:
return user
else:
# Check local Houston password first
if user is not None:
# We found the user, check their provided password
if user.password == password:
return user
# As a fallback, check all EDMs if the user can login
if edm_login_fallback:
# We want to check the EDM even if we don't have a local user record
if current_app.edm.check_user_login(email_candidate, password):
log.info('User authenticated via EDM: %r' % (email_candidate,))
if user is not None:
# We authenticated a local user against an EDM (but the local password failed)
if user.password != password:
# The user passed the login with an EDM, update local password
log.warning(
"Updating user's local password: %r" % (user,)
)
user = user.set_password(password)
return user
else:
log.critical(
'The user authenticated via EDM but has no local user record'
)
# Try syncing all users from EDM
cls.edm_sync_all()
# If the user was just synced, go grab it (recursively) and return
user = cls.find(email=email, edm_login_fallback=False)
return user
# If we have gotten here, one of these things happened:
# 1) the user wasn't found
# 2) the user's password was provided and was incorrect
# 3) the user authenticated against the EDM but has no local user record
return None
@classmethod
def query_search(cls, search=None):
from sqlalchemy import or_, and_
from app.modules.auth.models import Code, CodeTypes
if search is not None:
search = search.strip().split(' ')
search = [term.strip() for term in search]
search = [term for term in search if len(term) > 0]
or_terms = []
for term in search:
codes = (
Code.query.filter_by(code_type=CodeTypes.checkin)
.filter(
Code.accept_code.contains(term),
)
.all()
)
code_users = set([])
for code in codes:
if not code.is_expired:
code_users.add(code.user.guid)
or_term = or_(
cls.guid.in_(code_users),
cls.email.contains(term),
cls.affiliation.contains(term),
cls.forum_id.contains(term),
cls.full_name.contains(term),
)
or_terms.append(or_term)
users = cls.query.filter(and_(*or_terms))
else:
users = cls.query
return users
@property
def is_authenticated(self):
return True
@property
def is_anonymous(self):
return False
@property
def is_email_confirmed(self):
from app.modules.auth.models import Code, CodeTypes
# Get any codes that fit this request
code = (
Code.query.filter_by(user=self, code_type=CodeTypes.email)
.order_by(Code.created.desc())
.first()
)
if code is None:
return False
return code.is_resolved
def get_org_memberships(self):
return [
enrollment.organization
for enrollment in self.organization_membership_enrollments
]
def get_org_moderatorships(self):
return [
enrollment.organization
for enrollment in self.organization_moderator_enrollments
]
def get_projects(self):
return [enrollment.project for enrollment in self.project_membership_enrollments]
def get_collaborations_as_json(self):
from app.modules.collaborations.schemas import DetailedCollaborationSchema
json_resp = []
for collab_assoc in self.user_collaboration_associations:
json_resp.append(
DetailedCollaborationSchema().dump(collab_assoc.collaboration).data
)
return json_resp
def get_notification_preferences(self):
from app.modules.notifications.models import UserNotificationPreferences
# User preferences are the system ones plus the ones stored in this class
# Return the combination to the REST API
preferences = UserNotificationPreferences.get_user_preferences(self)
return preferences
def unprocessed_asset_groups(self):
return [
asset_group.guid
for asset_group in self.asset_groups
if not asset_group.is_processed()
]
def unprocessed_sightings(self):
from app.modules.sightings.models import SightingStage
return [
sighting.guid
for sighting in self.get_sightings()
if not sighting.stage == SightingStage.processed
]
def get_id(self):
return self.guid
def has_static_role(self, role):
return (self.static_roles & role.mask) != 0
def set_static_role(self, role):
if self.has_static_role(role):
return
self.static_roles |= role.mask
def unset_static_role(self, role):
if not self.has_static_role(role):
return
self.static_roles ^= role.mask
def check_owner(self, user):
return self == user
def check_supervisor(self, user):
return self.check_owner(user)
def get_codes(self, code_type, **kwargs):
# This import for Code needs to be local
from app.modules.auth.models import Code
code = Code.get(self, code_type, **kwargs)
return code
def get_invite_code(self):
# This import for Code needs to be local
from app.modules.auth.models import CodeTypes
return self.get_codes(CodeTypes.invite, replace=True)
def get_email_confirmation_code(self):
# This import for Code needs to be local
from app.modules.auth.models import CodeTypes
return self.get_codes(CodeTypes.email, replace=True)
def get_account_recovery_code(self):
# This import for Code needs to be local
from app.modules.auth.models import CodeTypes
return self.get_codes(CodeTypes.recover, replace=True, replace_ttl=None)
def set_password(self, password):
if password is None:
# This function "sets" the password, it's the responsibility of the caller to ensure it's valid
raise ValueError('Empty password not allowed')
self.password = password
with db.session.begin():
db.session.merge(self)
db.session.refresh(self)
return self
def lockout(self):
from app.modules.auth.models import OAuth2Client, OAuth2Grant, OAuth2Token, Code
# Disable permissions
self.is_staff = False
self.is_admin = False
self.is_active = False
self.in_reset = False
self.in_setup = False
with db.session.begin():
db.session.merge(self)
db.session.refresh(self)
# Logout of sessions and API keys
auth_list = []
auth_list += OAuth2Token.query.filter_by(user_guid=self.guid).all()
auth_list += OAuth2Grant.query.filter_by(user_guid=self.guid).all()
auth_list += OAuth2Client.query.filter_by(user_guid=self.guid).all()
auth_list += Code.query.filter_by(user_guid=self.guid).all()
for auth_ in auth_list:
auth_.delete()
return self
def owns_object(self, obj):
from app.modules.assets.models import Asset
from app.modules.asset_groups.models import AssetGroup
from app.modules.encounters.models import Encounter
from app.modules.sightings.models import Sighting
from app.modules.projects.models import Project
from app.modules.individuals.models import Individual
from app.modules.notifications.models import Notification
ret_val = False
if isinstance(obj, User):
ret_val = obj == self
# AssetGroup, Encounters and Projects all have an owner field, check that
elif isinstance(obj, (AssetGroup, Encounter, Project, Notification)):
ret_val = obj.owner == self
elif isinstance(obj, Asset):
# assets are not owned directly by the user but the asset_group they're in is.
# TODO: need to understand once assets become part of an encounter, do they still have a asset_group
if obj.asset_group is not None:
ret_val = obj.asset_group.owner is self
elif isinstance(obj, Sighting):
# decided (2021-03-12) that "owner" of a Sighting is not applicable therefore always False
# permissions must be handled in ways not dependent on ownership
ret_val = False
elif isinstance(obj, Individual):
for encounter in obj.get_encounters():
if encounter.get_owner() is self:
ret_val = True
break
return ret_val
def get_my_annotations(self):
annotations = []
for encounter in self.owned_encounters:
annotations.extend(encounter.annotations)
return annotations
def get_all_encounters(self):
annotations = self.get_my_annotations()
# TODO add collaboration annotations
return annotations
def delete(self):
with db.session.begin():
# TODO: Ensure proper cleanup
for asset_group in self.asset_groups:
asset_group.delete()
AuditLog.delete_object(log, self)
db.session.delete(self)
@classmethod
def initial_random_password(cls):
return security.generate_random(128)
@classmethod
def get_public_user(cls):
return User.ensure_user(
email=User.PUBLIC_USER_EMAIL,
password=User.initial_random_password(),
full_name='Public User',
is_internal=True,
)
def get_sightings(self):
sightings = []
for encounter in self.owned_encounters:
sighting = encounter.get_sighting()
if sighting:
sightings.append(encounter.get_sighting())
sighting_set = set(sightings)
return list(sighting_set)
USER_ROLES = [
role.value[-1]
for role in User.StaticRoles.__members__.values()
if role.value[-1] not in ('in_setup', 'in_reset')
]
| 34.561497 | 112 | 0.609005 | [
"Apache-2.0"
] | karenc/houston | app/modules/users/models.py | 25,852 | Python |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""
Test PcapDataset
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import tensorflow_io.pcap as pcap_io # pylint: disable=wrong-import-position
if not (hasattr(tf, "version") and tf.version.VERSION.startswith("2.")):
tf.compat.v1.enable_eager_execution()
def test_pcap_input():
"""test_pcap_input
"""
print("Testing PcapDataset")
pcap_filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_pcap", "http.pcap")
file_url = "file://" + pcap_filename
url_filenames = [file_url]
dataset = pcap_io.PcapDataset(url_filenames, batch=1)
packets_total = 0
for v in dataset:
(packet_timestamp, packet_data) = v
if packets_total == 0:
assert packet_timestamp.numpy()[0] == 1084443427.311224 # we know this is the correct value in the test pcap file
assert len(packet_data.numpy()[0]) == 62 # we know this is the correct packet data buffer length in the test pcap file
packets_total += 1
assert packets_total == 43 # we know this is the correct number of packets in the test pcap file
if __name__ == "__main__":
test.main()
| 36.921569 | 124 | 0.709506 | [
"Apache-2.0"
] | HubBucket-Team/io | tests/test_pcap_eager.py | 1,883 | Python |
import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
| 33.334532 | 95 | 0.563721 | [
"MIT"
] | ENCODE-DCC/caper | caper/cromwell_rest_api.py | 18,534 | Python |
import cv2
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from data import lab_gamut
import numpy as np
class GUIGamut(QWidget):
def __init__(self, gamut_size=110):
QWidget.__init__(self)
self.gamut_size = gamut_size
self.win_size = gamut_size * 2 # divided by 4
self.setFixedSize(self.win_size, self.win_size)
self.ab_grid = lab_gamut.abGrid(gamut_size=gamut_size, D=1)
self.reset()
def set_gamut(self, l_in=50):
self.l_in = l_in
self.ab_map, self.mask = self.ab_grid.update_gamut(l_in=l_in)
self.update()
def set_ab(self, color):
self.color = color
self.lab = lab_gamut.rgb2lab_1d(self.color)
x, y = self.ab_grid.ab2xy(self.lab[1], self.lab[2])
self.pos = QPointF(x, y)
self.update()
def is_valid_point(self, pos):
if pos is None:
return False
else:
x = pos.x()
y = pos.y()
if x >= 0 and y >= 0 and x < self.win_size and y < self.win_size:
return self.mask[y, x]
else:
return False
def update_ui(self, pos):
self.pos = pos
a, b = self.ab_grid.xy2ab(pos.x(), pos.y())
# get color we need L
L = self.l_in
lab = np.array([L, a, b])
color = lab_gamut.lab2rgb_1d(lab, clip=True, dtype='uint8')
self.emit(SIGNAL('update_color'), color)
self.update()
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.fillRect(event.rect(), Qt.white)
if self.ab_map is not None:
ab_map = cv2.resize(self.ab_map, (self.win_size, self.win_size))
qImg = QImage(ab_map.tostring(), self.win_size, self.win_size, QImage.Format_RGB888)
painter.drawImage(0, 0, qImg)
painter.setPen(QPen(Qt.gray, 3, Qt.DotLine, cap=Qt.RoundCap, join=Qt.RoundJoin))
painter.drawLine(self.win_size/2, 0, self.win_size/2, self.win_size)
painter.drawLine(0, self.win_size/2, self.win_size, self.win_size/2)
if self.pos is not None:
painter.setPen(QPen(Qt.black, 2, Qt.SolidLine, cap=Qt.RoundCap, join=Qt.RoundJoin))
w = 5
x = self.pos.x()
y = self.pos.y()
painter.drawLine(x - w, y, x + w, y)
painter.drawLine(x, y - w, x, y + w)
painter.end()
def mousePressEvent(self, event):
pos = event.pos()
if event.button() == Qt.LeftButton and self.is_valid_point(pos): # click the point
self.update_ui(pos)
self.mouseClicked = True
def mouseMoveEvent(self, event):
pos = event.pos()
if self.is_valid_point(pos):
if self.mouseClicked:
self.update_ui(pos)
def mouseReleaseEvent(self, event):
self.mouseClicked = False
def sizeHint(self):
return QSize(self.win_size, self.win_size)
def reset(self):
self.ab_map = None
self.mask = None
self.color = None
self.lab = None
self.pos = None
self.mouseClicked = False
self.update()
| 32.626263 | 96 | 0.581734 | [
"BSD-2-Clause"
] | 3Demonica/colorization | interactive-deep-colorization/ui/gui_gamut.py | 3,230 | Python |
# coding: utf-8
__author__ = "Jerry He"
import dash_bootstrap_components as dbc
from dash import dcc, no_update
from dash_extensions.enrich import Dash, Output, Input, State, html
import flask
from flask import jsonify
from flask_cors import CORS
from dash import dash_table
import dash_ace
server = flask.Flask(__name__)
CORS(server)
from dash_extensions.enrich import DashProxy,ServersideOutput, TriggerTransform, MultiplexerTransform, ServersideOutputTransform, NoOutputTransform
app = DashProxy(__name__,
server=server,
transforms=[
ServersideOutputTransform(), # enable use of ServersideOutput objects
],
external_stylesheets=[dbc.themes.BOOTSTRAP]
)
server = app.server
import pandas as pd
def row_tf(row):
keep = ['title', 'userid']
newrow = {k:row[k] for k in keep}
newrow['name'] = newrow['title'].split("-")[0].strip()
return newrow
def df_transform(df):
return pd.DataFrame([row_tf(row) for _,row in df.iterrows()])
app.layout = html.Div(
[
dcc.Store(id="querystr"),
dcc.Store(id="store"),
dcc.Store(id="all-df"),
dcc.Interval(interval=1800, id="query_sto"),
dbc.Card([
dbc.CardImg(src="assets/brick_header.jpg"),
dbc.CardBody([
dbc.Tabs(
[
dbc.Tab([
html.Hr(),
dash_ace.DashAceEditor(
id='query-input',
value=r"SELECT * FROM my_music_collection WHERE artist like '%Jr%' LIMIT 8",
theme='github',
mode='sql',
tabSize=2,
height="35px",
enableBasicAutocompletion=True,
enableLiveAutocompletion=True,
autocompleter='/autocompleter?prefix=',
placeholder='SQL code ...'
),
dbc.Button("Query", color="secondary", className="me-1",
id='query-button'),
html.Hr(),
html.Div(id="query-output")
],label="SQL", tab_id="tab-1"),
dbc.Tab(label="History", tab_id="tab-2"),
],
id="tabs",
active_tab="tab-1",
),
html.Div(id="tab-content"),
])
])
]
)
import json
app.clientside_callback("""
function(n_intervals, data) {
var existing_data;
if(data) {
existing_data = JSON.parse(data)
}
var editor = ace.edit("query-input")
if(!existing_data || existing_data['querystr'] != editor.getValue().trim()) {
return JSON.stringify({
'querystr':editor.getValue().trim(),
'time':(new Date()).toISOString()
})
}
}
""".strip(),
Output("querystr", "data"), Input("query_sto",'n_intervals'), State("querystr", "data"))
from sqlalchemy import create_engine
engine = create_engine('postgresql://localhost:5432/jerry') # change this to your SQL endpoint/auth
import logging
import dateutil.parser
@app.callback(ServersideOutput("store", "data"), Input('query-button', 'n_clicks'),State("querystr", "data"), memoize=True)
def query(n_clicks, query_data):
if query_data is None:
return no_update
qdata = json.loads(query_data)
try:
dat = pd.read_sql(qdata["querystr"].replace("%", "%%"), con=engine)
return dat
except:
logging.exception("SQL query failed\n")
from datetime import datetime
@app.callback(Output("query-output", "children"), ServersideOutput("all-df", "data"), Input("store", "data"), State("all-df", "data"))
def render_query_res_table(data, all_df):
df = df_transform(data)
df = df[sorted(df.columns.tolist())]
if all_df is None:
all_df = [{'df':df, 'time':datetime.now()}]
else:
all_df.append({'df':df, 'time':datetime.now()})
return [dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict('records'),
style_header={
'backgroundColor': 'grey',
'fontWeight': 'bold'
},
)],all_df
@app.callback(Output("tab-content", "children"), [Input("tabs", "active_tab"), State("all-df", "data")])
def switch_tab(at, all_df):
if at == "tab-1":
return []
elif at == "tab-2":
return dbc.Accordion(
[
dbc.AccordionItem([
dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in query_hist['df'].columns],
data=query_hist['df'].to_dict('records'),
style_header={
'backgroundColor': 'grey',
'fontWeight': 'bold'
},
)
], title = query_hist['time'].strftime("%H:%M:%S")) for query_hist in all_df
])
return html.P("This shouldn't ever be displayed...")
@server.route('/autocompleter', methods=['GET'])
def autocompleter():
return jsonify([{"name": "Completed", "value": "Completed", "score": 100, "meta": "test"}])
app.run_server(host="127.0.0.1", debug=True, port=8080) | 31.575 | 147 | 0.582344 | [
"MIT"
] | xiangjerryhe/sql-ui-dash | dash_sql_client_ui.py | 5,052 | Python |
import numpy as np
import matplotlib.pyplot as plt
import time
from copy import copy
import os
from single_pitch import single_pitch
from channel import channel
from pseudo_speech import Pseudospeech_Synthetizer_class
from encryption import Encryption_class
from speech_analyzer import Speech_Analyzer_class
from speech_synthesizer import Speech_Synthesizer_class
################################################################
my_analyzer = Speech_Analyzer_class("speech_model.npz","spherical_code.npz") # model parameters generated by speech_model.py and spherical_code.py
my_encryptor = Encryption_class("spherical_code.npz") # model parameters generated by spherical_code.py
my_ps_sp_synthetizer = Pseudospeech_Synthetizer_class("pseudospeech_model.npz","spherical_code.npz") # model parameters generated by pseudo_speech_model.py and spherical_code.py
my_sp_synthesizer = Speech_Synthesizer_class("speech_model.npz") # model parameters generated by speech_model.py
# pseudo random data used for enciphering/deciphering
keybits = np.random.randint(2, size = (160, 10000))
print("step 1")
speech_samples = np.fromfile("temp/hts1a.raw", dtype='int16')
# print(speech_samples.shape)
##### SPEECH ENCODING ######
print("step 2")
pitch_indices, energy_indices, timbre_indices = my_analyzer.analyze_speech(speech_samples)
###### ENCRYPTION ######
print("step 3")
pitch_indices_enc, energy_indices_enc, timbre_indices_enc = my_encryptor.speech_encryption(pitch_indices, energy_indices, timbre_indices, keybits)
###### PSEUDOSPEECH SYNTHESIS ######
print("step 4")
signal = my_synthetizer.synthesize_pseudospeech(pitch_indices_enc, energy_indices_enc, timbre_indices_enc)
###### CHANNEL DISTORTION ######
print("step 5")
signal_rec = channel(signal, "SILK", 16000, 48000) # data samples, codec type, sampling frequency (Hz), compression rate (b/s)
###### PSEUDOSPEECH ANALYSIS ######
print("step 6")
pitch_indices_rec, energy_indices_rec, timbre_indices_rec = my_synthetizer.analyze_pseudospeech(signal_rec)
# ###### DECRYPTION ######
print("step 7")
pitch_indices_dec, energy_indices_dec, timbre_indices_dec = my_encryptor.speech_decryption(pitch_indices_rec, energy_indices_rec, timbre_indices_rec, keybits)
# ###### SPEECH SYNTHESIS ######
print("step 8")
my_speech_synthesizer.synthesize_speech(pitch_indices_dec, energy_indices_dec, timbre_indices_dec) # save to file / input of the narrowband (8kHz) LPCNet
print("Finished")
################
# plt.figure()
# plt.plot(energy_indices)
# plt.figure()
# plt.plot(pitch_indices)
# plt.figure()
# plt.plot(np.transpose(timbre_indices))
################
# plt.figure()
# plt.plot(energy_indices_enc)
# plt.figure()
# plt.plot(pitch_indices_enc)
# plt.figure()
# plt.plot(np.transpose(timbre_indices_enc))
################
# plt.figure()
# plt.plot(energy_indices_rec)
# plt.figure()
# plt.plot(pitch_indices_rec)
# plt.figure()
# plt.plot(np.transpose(timbre_indices_rec))
################
# plt.figure()
# plt.plot(energy_indices_dec)
# plt.figure()
# plt.plot(pitch_indices_dec)
# plt.figure()
# plt.plot(np.transpose(timbre_indices_dec))
################
plt.show()
| 29.579439 | 177 | 0.740916 | [
"MIT"
] | PiotrKrasnowski/Speech_Encryption | a_full_model.py | 3,165 | Python |
#!/usr/bin/env python -u
"""
All commands that can be run in this project are available through this unified interface.
This should be run with the ./plaster.sh helper to get into the correct context.
"""
import tempfile
import numpy as np
import time
import os
import sys
import pandas as pd
import json
from pathlib import Path
from munch import Munch
from plumbum import colors
from plumbum import FG, TF, cli, local
from plaster.tools.zlog.zlog import important
from plaster.run.sigproc_v2 import synth
from plaster.tools.zlog.profile import prof, profile_from_file, profile_dump
from plaster.tools.utils.tmp import tmp_file
from plaster.tools.assets import assets
from plaster.tools.test_tools.test_tools import run_p
from plaster.run.run import RunResult
from plaster.tools.zlog import zlog
from plaster.tools.zlog.zlog import tell, h_line, spy
from plaster.tools.utils import tmp
from plaster.tools.utils import utils
import logging
log = logging.getLogger(__name__)
class CommandError(Exception):
def __init__(self, retcode=None):
self.retcode = retcode
def assert_env():
must_exist = ("ERISYON_ROOT", "JOBS_FOLDER")
found = 0
for e in must_exist:
if e in local.env:
found += 1
else:
print(f'Environment variable "{e}" not found.')
if found != len(must_exist):
raise CommandError(f"Environment variable(s) not found.")
class DoFuncs:
def is_dev(self):
return local.env.get("ERISYON_DEV") == "1"
def folder_user(self):
return local.env["FOLDER_USER"]
def run_user(self):
return local.env["RUN_USER"]
def clear(self):
local["clear"] & FG
def _print_job_folders(self, file_list, show_plaster_json=True):
"""
file_list is a list of munches [Munch(folder="folder", name="foo.txt", size=123, mtime=123456789)]
"""
if len(file_list) == 0:
print("No files found")
return
folders = {
file.folder: Munch(folder=file.folder, size_gb=0, file_count=0,)
for file in file_list
}
gb = 1024 ** 3
total_gb = 0
for file in file_list:
folder = file.folder
total_gb += file.size / gb
folders[folder].size_gb += file.size / gb
folders[folder].file_count += 1
df = pd.DataFrame.from_dict(folders, orient="index")
formatters = dict(
size_gb="{:10.2f}".format,
folder="{:<40.40s}".format,
file_count="{:.0f}".format,
)
columns = ["folder", "size_gb", "file_count"]
df = df.append(dict(folder="TOTAL", size_gb=total_gb), ignore_index=True)
print(df.to_string(columns=columns, formatters=formatters))
def print_local_job_folders(self):
important("Local job folders:")
root = local.path("./jobs_folder")
self._print_job_folders(
[
Munch(
folder=(p - root)[0],
name=p.name,
size=int(p.stat().st_size),
mtime=int(p.stat().st_mtime),
)
for p in root.walk()
]
)
def validate_job_folder(self, job_folder, allow_run_folders=False):
return assets.validate_job_folder(
job_folder, allow_run_folders=allow_run_folders
)
def run_zests_v2(self, cli_args, debug_mode):
tell(f"Running zests v2...")
# as os.environ is evaluated when it is first imported
# we can't use any of the more graceful ways to set the environment
with local.env(RUN_ENV="test", ZAP_DEBUG_MODE=debug_mode):
zest_version = None
try:
from zest.version import __version__ as zest_version
except ImportError:
pass
assert zlog.config_dict is not None
assert zest_version.startswith("1.1.")
with tmp.tmp_file() as tmp_path:
with open(tmp_path, "w") as f:
f.write(json.dumps(zlog.config_dict))
# cli_args += ["--logger_config_json", tmp_path]
local["python"]["-u", "-m", "zest.zest_cli"].bound_command(
*cli_args
) & FG(retcode=None)
def run_nbstripout(self):
"""Strip all notebooks of output to save space in commits"""
important("Stripping Notebooks...")
result = (
local["find"][
".",
"-type",
"f",
"-not",
"-path",
"*/\.*",
"-name",
"*.ipynb",
"-print",
]
| local["xargs"]["nbstripout"]
) & TF(FG=True)
if not result:
raise CommandError
def run_docker_build(self, docker_tag, quiet=False):
important(f"Building docker tag {docker_tag}")
with local.env(LANG="en_US.UTF-8"):
args = [
"build",
"-t",
f"erisyon:{docker_tag}",
"-f",
"./scripts/main_env.docker",
]
if quiet:
args += ["--quiet"]
args += "."
local["docker"][args] & FG
class DoCommand(cli.Application, DoFuncs):
def main(self):
return
@DoCommand.subcommand("run_notebook")
class RunNotebookCommand(cli.Application, DoFuncs):
"""
Run a notebook rendered to HTML
"""
def main(self, notebook_path, output_path: Path = None):
args = [
"nbconvert",
"--to",
"html",
"--execute",
notebook_path,
"--ExecutePreprocessor.timeout=1800",
]
if output_path is not None:
args += ["--output", output_path]
local["jupyter"].bound_command(*args) & FG
@DoCommand.subcommand("profile")
class ProfileCommand(cli.Application, DoFuncs):
gb = 1024 ** 3
skip_hardware = cli.Flag("--skip_hardware", help="Do not include hardware profile")
skip_sigproc = cli.Flag("--skip_sigproc", help="Do not include sigproc profile")
def fileio_test(self, jobs_folder):
job_name = f"_profile/_{int(time.time()):08x}"
large_random = np.random.uniform(
size=1024 ** 3 // 8
) # 8 because floats are 8 bytes
def write_to(write_path):
# import shutil
# total, used, free = shutil.disk_usage(write_path.dirname)
# print(f"Free disk at {write_path}: {free / gb:2.2f}GB ({free / total:2.1f}%)")
write_path.dirname.mkdir()
with open(write_path, "wb") as f:
f.write(large_random)
# PROFILE write to jobs_folder
job_folder_write_path = jobs_folder / job_name
try:
with prof(
"fileio_to_jobs_folder", gbs=large_random.nbytes / self.gb, _tell=True,
):
write_to(job_folder_write_path)
finally:
job_folder_write_path.delete()
# PROFILE write to plaster_tmp
with tmp_file() as plaster_tmp_folder_write_path:
with prof(
"fileio_to_plaster_tmp", gbs=large_random.nbytes / self.gb, _tell=True,
):
write_to(plaster_tmp_folder_write_path)
# PROFILE write to /tmp
tmp_folder_write_path = local.path(tempfile.mkstemp())
try:
with prof("fileio_to_tmp", gbs=large_random.nbytes / self.gb, _tell=True):
write_to(tmp_folder_write_path)
finally:
tmp_folder_write_path.delete()
def cpu_test(self):
mat = np.random.uniform(size=(5000, 5000))
with prof(
"cpu_tests_matrix_invert",
mega_elems=(mat.shape[0] * mat.shape[1]) / 1e6,
_tell=True,
):
np.linalg.inv(mat)
def mem_test(self):
gb = 1024 ** 3
rnd = np.random.uniform(size=(1_000, 500_000))
with prof("mem_tests_copy", gbs=rnd.nbytes / gb, _tell=True):
rnd.copy()
def sigproc_test(self, jobs_folder):
"""
This is adapted from zest_sigproc_v2_integration
"""
profile_folder = jobs_folder / "_profile"
profile_folder.delete()
job_folder = profile_folder / "sigproc_test"
source_folder = profile_folder / "_synth_field"
job_folder.mkdir()
source_folder.mkdir()
# GENERATE some fake data
dim = (1024, 1024)
n_channels = 1
n_cycles = 10
n_peaks = 500
psf_width = 1.5
bg_mean = 100.0
bg_std = 30.0
gain = 5000.0
def _synth_field(fl_i):
with synth.Synth(n_channels=n_channels, n_cycles=n_cycles, dim=dim) as s:
peaks = (
synth.PeaksModelGaussianCircular(n_peaks=n_peaks)
.locs_randomize()
.widths_uniform(psf_width)
.amps_constant(gain)
)
synth.CameraModel(bg_mean=bg_mean, bg_std=bg_std)
synth.HaloModel()
synth.IlluminationQuadraticFalloffModel()
chcy_ims = s.render_chcy(0)
for ch_i in range(chcy_ims.shape[0]):
for cy_i in range(chcy_ims.shape[1]):
np.save(
str(
source_folder
/ f"area_{fl_i:03d}_cell_000_{ch_i:03d}nm_{cy_i:03d}.npy"
),
chcy_ims[ch_i, cy_i],
)
n_fields = 2
for fl_i in range(n_fields):
_synth_field(fl_i)
run_p(
[
f"gen",
f"sigproc_v2",
f"--job={job_folder}",
f"--sigproc_source={source_folder}",
f"--force",
f"--self_calib",
]
)
log_file = local.path(local.env["PLASTER_ROOT"]) / "plaster.log"
log_file.delete()
run_p(["run", job_folder, "--no_progress", "--skip_reports"])
profile_lines = profile_from_file(log_file)
with colors.fg.DeepSkyBlue3:
print()
print(h_line("--"))
print("PROFILE RESULTS")
print(h_line("--"))
profile_dump(profile_lines)
def main(self, jobs_folder):
assert_env()
jobs_folder = local.path(jobs_folder)
if not self.skip_hardware:
tell(colors.cyan | "Profiling file_io")
self.fileio_test(jobs_folder)
tell(colors.cyan | "Profiling cpu")
self.cpu_test()
tell(colors.cyan | "Profiling mem")
self.mem_test()
if not self.skip_sigproc:
tell(colors.cyan | "Profiling sigproc")
self.sigproc_test(jobs_folder)
@DoCommand.subcommand("profile_dump")
class ProfileDumpCommand(cli.Application, DoFuncs):
def main(self, log_path):
assert_env()
log_file = local.path(log_path)
profile_lines = profile_from_file(log_file)
profile_dump(profile_lines)
@DoCommand.subcommand("test")
class TestCommand(cli.Application, DoFuncs):
"""
Run tests
"""
no_clear = cli.Flag("--no_clear", help="Do not clear screen")
integration = cli.Flag("--integration", help="Run integration tests")
debug_mode = cli.Flag("--debug_mode", help="Put zap into debug_mode")
cli_mode = cli.Flag("--cli_mode", help="Run without ui")
def main(self, *args):
if not self.no_clear:
self.clear()
cli_args = list(args)
root = local.env["PLASTER_ROOT"]
cli_args += [f"--root={root}"]
folders = (
"./plaster",
"./plaster/scripts",
)
include_dirs = ":".join(folders)
cli_args += [f"--include_dirs={include_dirs}"]
with local.cwd(root):
cli_args += [f"--hook_start=./scripts/testing_start.py:test_setup_logs"]
if not self.debug_mode:
if not self.cli_mode:
cli_args += [f"--ui"]
cli_args += [f"--n_workers", "8"]
if self.integration:
cli_args += [f"--groups=integration"]
else:
cli_args += [f"--exclude_groups=integration"]
return self.run_zests_v2(cli_args, self.debug_mode)
@DoCommand.subcommand("jupyter")
class JupyterCommand(cli.Application, DoFuncs):
ip = cli.SwitchAttr("--ip", str, default="0.0.0.0", help="ip to bind to")
port = cli.SwitchAttr("--port", int, default="8080", help="port to bind to")
def main(self, *args):
assert_env()
os.execlp(
"jupyter",
"jupyter",
"notebook",
f"--ip={self.ip}",
f"--port={self.port}",
"--allow-root",
*args,
)
@DoCommand.subcommand("pluck")
class PluckCommand(cli.Application, DoFuncs):
"""
Pluck a field from a result pickle
"""
save_npy = cli.SwitchAttr("--save_npy", str, default=None, help="save as npy file")
save_csv = cli.SwitchAttr(
"--save_csv", str, default=None, help="save as csv file (dataframe only)"
)
save_pkl = cli.SwitchAttr(
"--save_pkl", str, default=None, help="save as pkl file (dataframe only)"
)
def main(self, run_path, symbol):
"""
run_path: path to the run folder
symbol: Eg: "sigproc_v2.sig"
"""
run = RunResult(run_path)
parts = symbol.split(".")
result = run[parts[0]]
sym = getattr(result, parts[1])
if callable(sym):
val = sym()
else:
val = sym
if self.save_npy is not None:
assert isinstance(val, np.ndarray)
np.save(self.save_npy, val)
if self.save_csv is not None:
assert isinstance(val, pd.DataFrame)
val.to_csv(self.save_csv)
if self.save_pkl is not None:
assert isinstance(val, pd.DataFrame)
val.to_pickle(self.save_pkl)
@DoCommand.subcommand("export_sigproc_v2")
class ExportSigprocV2Command(cli.Application, DoFuncs):
"""
Export sigproc_v2 and raw data in easy to use formats.
"""
def main(self, run_path):
"""
run_path: path to the run folder (don't forget this is a subfolder of job)
"""
run = RunResult(run_path)
name = run.run_folder.parent.name
prefix = f"{name}__"
tell(f"Prefixing saved files with {prefix}")
tell("Saving sig.npy")
np.save(f"{prefix}sig.npy", run.sigproc_v2.sig())
tell("Saving noi.npy")
np.save(f"{prefix}noi.npy", run.sigproc_v2.noi())
tell("Saving df.csv")
run.sigproc_v2.fields__n_peaks__peaks__radmat().to_csv(f"{prefix}df.csv")
ims = []
for fl_i in range(run.sigproc_v2.n_fields):
tell(f"Loading align field {fl_i} of {run.sigproc_v2.n_fields}")
ims += [run.sigproc_v2.aln_unfilt_chcy_ims(fl_i)]
tell("Saving aln_ims.npy")
np.save(f"{prefix}aln_ims.npy", np.stack(ims))
tell("Saving example.py")
utils.save(
f"{prefix}example.py",
f"import numpy as np\n"
+ f"import pandas as pd\n\n"
+ f'prefix = "{prefix}"'
+ utils.smart_wrap(
"""
sig = np.load(f"{prefix}sig.npy")
noi = np.load(f"{prefix}noi.npy")
df = pd.read_csv(f"{prefix}df.csv")
ims = np.load(f"{prefix}aln_ims.npy", mmap_mode="r")
n_peaks = sig.shape[0]
n_fields, n_channels, n_cycles, im_mea, _ = ims.shape
# Examine some peak
peak_i = 123 # 0 <= peak_i < n_peaks
ch_i = 0 # 0 <= ch_i < n_channels
cy_i = 0 # 0 <= cy_i < n_cycles
y, x, fl_i = df[df.peak_i == peak_i][["aln_y", "aln_x", "field_i"]].drop_duplicates().values.flatten().astype(int)
peak_radius = 10
peak_im = ims[fl_i, ch_i, cy_i, y-peak_radius:y+peak_radius, x-peak_radius:x+peak_radius]
# Now peak_im is a centered sub-image of that peak with shape=(peak_radius, peak_radius)
""",
width=200,
assert_if_exceeds_width=True,
),
)
tell("\n\nThe following commands may be useful:")
# tell(f" tar czf {prefix}data.tar.gz {prefix}sig.npy {prefix}noi.npy {prefix}df.csv")
# tell(f" tar czf {prefix}ims.tar.gz {prefix}aln_ims.npy")
# tell("")
# tell(f" aws s3 cp {prefix}data.tar.gz s3://erisyon-public")
# tell(f" aws s3 cp {prefix}ims.tar.gz s3://erisyon-public")
tell(f" aws s3 cp {prefix}sig.npy s3://erisyon-public")
tell(f" aws s3 cp {prefix}noi.npy s3://erisyon-public")
tell(f" aws s3 cp {prefix}df.csv s3://erisyon-public")
tell(f" aws s3 cp {prefix}aln_ims.npy s3://erisyon-public")
tell(f" aws s3 cp {prefix}example.py s3://erisyon-public")
if __name__ == "__main__":
try:
DoCommand.subcommand("gen", "plaster.gen.gen_main.GenApp")
DoCommand.subcommand("run", "plaster.run.run_main.RunApp")
DoCommand.run()
except (KeyboardInterrupt):
print() # Add an extra line because various thing terminate with \r
sys.exit(1)
except Exception as e:
log.exception(e)
sys.exit(1)
| 31.690519 | 130 | 0.556816 | [
"MIT"
] | erisyon/plaster | plaster/main.py | 17,715 | Python |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.hooks.base_hook import BaseHook
class GenericTransfer(BaseOperator):
"""
Moves data from a connection to another, assuming that they both
provide the required methods in their respective hooks. The source hook
needs to expose a `get_records` method, and the destination a
`insert_rows` method.
This is mean to be used on small-ish datasets that fit in memory.
:param sql: SQL query to execute against the source database
:type sql: str
:param destination_table: target table
:type destination_table: str
:param source_conn_id: source connection
:type source_conn_id: str
:param destination_conn_id: source connection
:type destination_conn_id: str
:param preoperator: sql statement or list of statements to be
executed prior to loading the data
:type preoperator: str or list of str
"""
template_fields = ('sql', 'destination_table', 'preoperator')
template_ext = ('.sql', '.hql',)
ui_color = '#b0f07c'
@apply_defaults
def __init__(
self,
sql,
destination_table,
source_conn_id,
destination_conn_id,
preoperator=None,
*args, **kwargs):
super(GenericTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.destination_table = destination_table
self.source_conn_id = source_conn_id
self.destination_conn_id = destination_conn_id
self.preoperator = preoperator
def execute(self, context):
source_hook = BaseHook.get_hook(self.source_conn_id)
self.logger.info("Extracting data from %s", self.source_conn_id)
self.logger.info("Executing: \n %s", self.sql)
results = source_hook.get_records(self.sql)
destination_hook = BaseHook.get_hook(self.destination_conn_id)
if self.preoperator:
self.logger.info("Running preoperator")
self.logger.info(self.preoperator)
destination_hook.run(self.preoperator)
self.logger.info("Inserting rows into %s", self.destination_conn_id)
destination_hook.insert_rows(table=self.destination_table, rows=results)
| 37.552632 | 80 | 0.700771 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | findpace/incubator-airflow | airflow/operators/generic_transfer.py | 2,854 | Python |
#For the whatsapp statuses url given below
#COOL
import requests
from bs4 import BeautifulSoup
url_to_scrape = 'https://www.appstatustxt.com/cool-whatsapp-status/'
r = requests.get(url_to_scrape)
soup = BeautifulSoup(r.text,"html5lib")
status_object=[]
statuses=[]
title=soup.title.string
print(title)
status_object=soup.find_all('span',style="color: #008000;")
fo = open("cool.txt", "a")
#Adding basic stuff for json syntax
#fo.write("{\n")
i=1;
for status in status_object:
if len(status.string)<=135:
statuses.append(status.string+'\n')
print(status.string)
# actual_status=status.string.encode('utf-8')
fo.write(status.string.encode('utf-8')+'\n')
# fo.write('"'+str(i)+'":"'+actual_status+'",\n')
i=i+1 | 29.8 | 69 | 0.695302 | [
"MIT"
] | anay97/python-scraper | cool.py | 745 | Python |
"""
Author: Nathan Clack
Date : 2009
Copyright (c) 2009 HHMI. Free downloads and distribution are allowed for any
non-profit research and educational purposes as long as proper credit is given
to the author. All other rights reserved.
"""
from .tests import plot_whiskers
from ui.whiskerdata.trace import Whisker_Seg
from numpy import *
import pdb
from functools import reduce
def load():
from ui.whiskerdata import load_whiskers, load_trajectories
from ui.genetiff import Reader
movie = Reader('data/seq/whisker_data_0140.seq',adjuststipple=1)
w,wid = load_whiskers('seq.whiskers')
#movie = Reader('../../data/W0.tif',adjuststipple=1)
#w,wid = load_whiskers('w0-grid.whiskers')
#w,wid = load_whiskers('whisk-vc/whisk-vc/seq.whiskers')
#movie = Reader('data/JF8410_041808_001.tif',adjuststipple=1)
#w,wid = load_whiskers('test.whiskers')
#movie = Reader('data/lorenz/090519-19a_0035.seq',adjuststipple=1)
#w,wid = load_whiskers('lorenz.whiskers')
#w,wid = load_whiskers('results/seq-hand.whiskers')
#t,tid = load_trajectories('results/seq-hand.trajectories')
return w,movie
def check_bounds(wvd,shape):
for fid, wv in wvd.items():
for i,w in wv.items():
for x,y,t,s in w:
if x<0 or x>=shape[1] or y<0 or y>=shape[0]:
print("out of bounds")
pdb.set_trace()
if not ( w.x.flags.contiguous and w.y.flags.contiguous ):
print("not contiguous")
pdb.set_trace()
def fix(wvd,movie,scale=2, signal_per_pixel = 0, max_dist = 60, max_angle = 20.*pi/180.):
shape = movie[0].shape
for fid,wv in list(wvd.items()):
print(fid)
table = CollisionTable( wv, shape, scale )
r = set( resolution( table, wv ) )
for j,l in choose_gaps(movie[fid],r,signal_per_pixel,max_dist,max_angle):
e = reduce( Whisker_Seg.join, j )
r.discard( j[0] )
r.discard( j[-1] )
r.add(e)
wvd[fid] = dict( [ p for p in enumerate(r) ] )
return wvd
def compute_join_length( px, py, tlow = 0.0, thigh = 1.0 ):
from scipy.integrate import quad
xp = polyder( px, 1 )
yp = polyder( py, 1 )
xp2 = polymul( xp, xp )
yp2 = polymul( yp, yp )
p = polyadd( xp2, yp2 )
integrand = lambda t: sqrt( polyval( p, t ) )
return quad(integrand, tlow, thigh) [0]
def compute_join_curvature( px, py ):
from scipy.integrate import quad
xp = polyder( px, 1 )
xpp = polyder( px, 2 )
yp = polyder( py, 1 )
ypp = polyder( py, 2 )
pn = polyadd( polymul( xp, ypp ), polymul( yp, xpp )) #numerator
pd = polyadd( polymul( xp, xp ) , polymul( yp, yp ) ) #denominator
integrand = lambda t: fabs(polyval( pn, t )/( polyval( pd, t )**(1.5)) )
return quad(integrand, 0, 1) [0]
def compute_join_angle( px, py ):
from scipy.integrate import quad
xp = polyder( px, 1 )
yp = polyder( py, 1 )
integrand = lambda t: arctan2(polyval(yp, t), polyval(xp, t))
return quad(integrand, 0, 1) [0]
def _compute_intensity( im, x, y ):
if ( x<0 ).any() or \
( x>=im.shape[1] ).any() or \
( y<0 ).any() or \
( y>=im.shape[0] ).any():
return inf
p = set( p for p in zip(x,y) )
score = 0
for j,i in p:
score += im[i,j]
return score/len(p)
def compute_join_intensity( im, px, py ):
tt = linspace(0,1,50)
x = array( [round(polyval(px,t)) for t in tt] )
y = array( [round(polyval(px,t)) for t in tt] )
return _compute_intensity(im,x,y)
def compute_join_score( im, px, py, thick = 2 ):
tt = linspace(0,1,50)
dpx = polyder(px)
dpy = polyder(py)
dL2 = polymul(dpx,dpx) + polymul(dpy,dpy)
ux = polyval( px,tt )
uy = polyval( py,tt )
dx = diff(ux) #polyval( px,tt )
dy = diff(uy) #polyval( py,tt )
dx = r_[dx[0],dx]
dy = r_[dy[0],dy]
dL = sqrt( dx**2 + dy**2 )
a = _compute_intensity(im, ux, uy )
b = _compute_intensity(im, ux + thick*dy/dL , uy - thick*dx/dL )
c = _compute_intensity(im, ux - thick*dy/dL , uy + thick*dx/dL )
return (2*a - b - c)/4.0
def solve_polynomial_join( left, right, reverse = 0):
"""
Solves for a parametric cubic polynomial curve joining the right side of left
to the left side of right. The curve matches slope and position at it's
boundaries and is parameterized from 0 to 1; 0 being the left boundary and 1
being the right.
method: parametric cubic matching position and slope of endpoints.
This ends up being cheap to compute, since the matrix is
known (interval of parameter is always 0 to 1) and so the
inverse can be precomputed.
minv is inverse of m, where:
m = array( [ [ a**3, a**2, a, 1 ],
[ b**3, b**2, b, 1 ],
[ 3*a**2, 2*a , 1, 0 ],
[ 3*b**2, 2*b , 1, 0 ] ] )
is the matrix for the linear system:
m * coeff = v,
with v = [ x(0) x(1) dx/dt(0) dx/dt(1) ].
Here a = 0 and b = 1 so m and it's inverse is always the same.
"""
minv = matrix( [[ 2., -2., 1., 1.],
[-3., 3., -2., -1.],
[ 0., 0., 1., 0.],
[ 1., 0., 0., 0.]])
#take care of cases joining very short segements
lr = len(right)
ll = len(left)
#L = length( right.x, right.y ) + length( left.x, left.y )
#dd = hypot( left.x[0] - right.x[-1], left.y[0] - right.y[-1] )
nl = ll/4
nr = lr/4
slope = lambda v: v[ 0] - v[-1] # want the total change over the length
#slope = lambda v: diff(v).mean()
length = lambda x,y: hypot(diff(x),diff(y)).sum() # euclidian distance in pixels
#
# Compute slope at boundary.
# Uses a number of points near the boundary to compute slope.
# Need to account for edge cases where one or both sides
# consist of very few points.
#
if nr < 2 and nl < 2:
lnorm = length( left.x , left.y )
rnorm = length( right.x , right.y )
dly = diff( left.y ).mean() / lnorm
dlx = diff( left.x ).mean() / lnorm
dry = diff(right.y ).mean() / rnorm
drx = diff(right.x ).mean() / rnorm
nl = 0
nr = lr - 1
elif nr < 2: # use the derivative on the other side
lnorm = length( left.x[:nl], left.y[:nl] )
rnorm = length( right.x , right.y )
dly = -slope( left.y[(-nl):] ) / lnorm
dlx = -slope( left.x[(-nl):] ) / lnorm
dry = diff(right.y ).mean() / rnorm
drx = diff(right.x ).mean() / rnorm
nr = lr - 1
#print dly,dlx,dry,drx
elif nl < 2: # use the derivative on the other side
rnorm = length( right.x[:nr], right.y[:nr] )
lnorm = length( left.x , left.y )
dry = -slope(right.y[:nr] ) / rnorm
drx = -slope(right.x[:nr] ) / rnorm
dly = diff( left.y ).mean() / lnorm
dlx = diff( left.x ).mean() / lnorm
nl = 0
else: # the "normal" case
rnorm = length( right.x[:nr], right.y[:nr] ) # Compute path length of right border region
lnorm = length( left.x[(-nl):], left.y[(-nl):] ) # Compute path length of left border region
dry = -slope(right.y[:nr] ) / rnorm # Compute dy/dl for right side
drx = -slope(right.x[:nr] ) / rnorm # etc...
dly = -slope( left.y[(-nl):] ) / lnorm
dlx = -slope( left.x[(-nl):] ) / lnorm
rnorm = hypot( left.x[0] - right.x[0], left.y[0] - right.y[0] )
lnorm = hypot( left.x[-1]- right.x[0], left.y[-1]- right.y[0] )
if not isfinite(dlx): dlx =(left.x[0] - right.x[0])/lnorm
if not isfinite(dly): dly =(left.y[0] - right.y[0])/lnorm
if not isfinite(drx): drx =(left.x[-1] - right.x[0])/rnorm
if not isfinite(dry): dry =(left.y[-1] - right.y[0])/rnorm
if reverse:
dlx = -dlx
dly = -dly
drx = -drx
dry = -dry
ry = right.y[ 0] ## right.y[nr]
ly = left.y[-1 ] ## left.y[-nl]
rx = right.x[ 0] ## right.x[nr]
lx = left.x[-1 ] ## left.x[-nl]
L = hypot( rx-lx, ry-ly ) # Approximate dl/dt
print("L:%g"%L)
yv = matrix( [[ ly ],
[ ry ],
[ dly * L ], # dy/dt = dy/dl * dl/dt
[ dry * L ]])
xv = matrix( [[ lx ],
[ rx ],
[ dlx * L ],
[ drx * L ]])
cx = minv*xv
cy = minv*yv
if not (isfinite(cx).any() and isfinite(cy).any()):
pdb.set_trace()
return [array(t).squeeze() for t in (cx,cy)]
def plot_join(px,py,*args,**kwargs):
from pylab import plot, polyval
tt = linspace(0,1,50)
plot( polyval(px,tt), polyval(py,tt), *args, **kwargs )
def plot_test(px,py,thick=2):
from pylab import plot
tt = linspace(0,1,50)
dpx = polyder(px)
dpy = polyder(py)
dL2 = polymul(dpx,dpx) + polymul(dpy,dpy)
ux = polyval( px,tt )
uy = polyval( py,tt )
dx = diff(ux) #polyval( px,tt )
dy = diff(uy) #polyval( py,tt )
dx = r_[dx[0],dx]
dy = r_[dy[0],dy]
dL = sqrt( dx**2 + dy**2 )
plot( ux, uy , '.-')
plot( ux + thick*dy/dL , uy - thick*dx/dL ,'-')
plot( ux - thick*dy/dL , uy + thick*dx/dL ,'-' )
def filter_ends( wv, min_score, shape, border = 10 ):
"""
Return candidate ends for joining.
Returns an iterator yielding (Whisker_Seg, side).
"""
maxy, maxx = [x - border for x in shape]
minx, miny = border, border
test_point = lambda x,y: x>minx and x<maxx and y > miny and y < maxy
bordertest = lambda e,side: test_point( e.x[side], e.y[side] )
scoretest = lambda e,side: e.scores[side] > min_score
sides = [0,-1]
for e in wv:
for s in sides:
if bordertest(e,s) and scoretest(e,s):
yield e,s
def plot_candidate_ends(im, wv, min_score, border = 10):
from pylab import plot, imshow, cm, ion,ioff, show, text
left,right = group_ends( list(filter_ends(wv,min_score,im.shape, border)) )
ioff()
#imshow(im,cmap=cm.gray,hold=0)
m = {0:'ro',-1:'gs'}
for i,e in enumerate(left):
s = 0
text(e.x[s],e.y[s],str(i),color=m[s][0])
plot([e.x[s]],[e.y[s]],m[s])
for i,e in enumerate(right):
s = -1
text(e.x[s],e.y[s],str(i),color=m[s][0])
plot([e.x[s]],[e.y[s]],m[s])
show()
ion()
def group_ends( ends ):
return [e for e,s in ends if s == 0], [e for e,s in ends if s == -1]
def end_direction(w, side, n=16):
a = 0
b = min( n, len(w) )
if side != 0:
a = -b
b = -1
dx = diff( w.x[a:b] ).mean()
dy = diff( w.y[a:b] ).mean()
return dx,dy
def make_joining_whisker(px,py,dist,lthick,lscore,rthick,rscore):
w = Whisker_Seg()
tt = linspace(0,1,round(dist))
w.x = polyval(px,tt).astype(float32)
w.y = polyval(py,tt).astype(float32)
w.thick = polyval( [rthick-lthick,lthick], tt ).astype(float32)
w.scores = polyval( [rscore-lscore,lscore], tt ).astype(float32)
return w
def choose_gaps(im,wv, signal_per_pixel = 0.0, max_dist=60, max_angle = pi/4.):
left,right = group_ends( list(filter_ends(wv,100,im.shape)) )
theta = lambda w,side: reduce(arctan2, reversed( end_direction(w,side) ) )
dtheta = lambda left,right: fabs(theta(left,0) - theta(right,-1))
for i,a in enumerate(left):
for j,b in enumerate(right):
dx = a.x[ 0]-b.x[-1]
dy = a.y[ 0]-b.y[-1]
d = hypot(dx,dy)
dth = dtheta(a,b)
v = end_direction(a,0)
norm = hypot(*v)
proj = dot( v/norm, (dx,dy) )
# jth: angle change from a to direct line joining a,b
jth = fabs(arctan2( hypot(*( dx-proj*v[0]/norm, dy-proj*v[1]/norm )) , proj ))
#print i,j,
#print "\tD: %g Proj: %g Theta: %g"%(d,proj,jth*180/pi)
l=0;
if d < max_dist and jth < max_angle and proj > 0:
px,py = solve_polynomial_join( b, a )
l = compute_join_score(im,px,py)
if l < -signal_per_pixel:
#plot_test(px,py)
print("\tScore: %g Theta: %g"%(l,jth*180/pi))
e = make_joining_whisker(px,py,d,b.thick[-1],b.scores[-1],a.thick[ 0],a.scores[ 0])
yield (b,e,a),l
def gap_measures(im,wv):
pmetric = lambda p: sqrt(dot(p[:-1],p[:-1]))
left,right = group_ends( list(filter_ends(wv,100,im.shape)) )
shape = (len(left),len(right) )
d = zeros( shape )
l = zeros( shape )
c = zeros( shape )
cx = zeros( shape )
cy = zeros( shape )
for i,a in enumerate(left):
for j,b in enumerate(right):
dx = a.x[0 ]-b.x[-1]
dy = a.y[0 ]-b.y[-1]
d[i,j] = hypot(dx,dy)
px,py = solve_polynomial_join( b, a )
lpx,lpy = solve_polynomial_join( a, a, reverse = 1 )
rpx,rpy = solve_polynomial_join( b, b, reverse = 1 )
cx[i,j] = max( pmetric( px - lpx ) , pmetric( px - rpx ) )
cy[i,j] = max( pmetric( px - lpx ) , pmetric( py - rpy ) )
#l[i,j] = compute_join_length(px,py)
l[i,j] = compute_join_score(im,px,py)
plot_test(px,py)
#c[i,j] = compute_join_curvature(px,py)
#if sqrt( px[0]**2 + py[0]**2 ) < 50.0:
# plot_join(px,py)
return d,l,cx,cy
def trace_overlap(xxx_todo_changeme, xxx_todo_changeme1, thresh = 2.0 ):
# DONE: does not assume that indexes run along same direction
(wa,i) = xxx_todo_changeme
(wb,j) = xxx_todo_changeme1
def dist(ia,ib):
a,b = wa[ia], wb[ib]
return hypot( a[0] - b[0], a[1] - b[1] )
# determine relative direction of indexing
ia,ib = i,j
if ia == len(wa)-1 or ib == len(wb)-1:
if ia != 0 and ib != 0:
dax = wa.x[ia-1] - wa.x[ia]
day = wa.y[ia-1] - wa.y[ia]
dbx = wb.x[ib-1] - wb.x[ib]
dby = wb.y[ib-1] - wb.y[ib]
elif ia == 0:
dax = wa.x[ia+1] - wa.x[ia]
day = wa.y[ia+1] - wa.y[ia]
dbx = - wb.x[ib-1] + wb.x[ib]
dby = - wb.y[ib-1] + wb.y[ib]
elif ib == 0:
dax = - wa.x[ia-1] + wa.x[ia]
day = - wa.y[ia-1] + wa.y[ia]
dbx = wb.x[ib+1] - wb.x[ib]
dby = wb.y[ib+1] - wb.y[ib]
else:
dax = wa.x[ia+1] - wa.x[ia]
day = wa.y[ia+1] - wa.y[ia]
dbx = wb.x[ib+1] - wb.x[ib]
dby = wb.y[ib+1] - wb.y[ib]
stepa = -1; #only need to keep track of one direction
enda = 0;
notend = lambda i,n: i>n
if( abs(dax) > abs(day) ): #determine by x change
if( dax*dbx < 0 ): #have different signs
stepa = 1
enda = len(wa)
notend = lambda i,n: i<n-1
else: #determine by y change
if( day*dby < 0 ): #have different signs
stepa = 1
enda = len(wa)
notend = lambda i,n: i<n-1
bnda = [i,i]
bndb = [j,j]
ms = 0
while ms < thresh and notend(ia,enda) and ib > 0:
moves = ( ( ia + stepa, ib - 1 ),
( ia + stepa, ib ),
( ia , ib - 1 ) )
scores = [dist( iam, ibm ) for iam, ibm in moves]
ms = min(scores)
for idx,s in enumerate( scores ): #choose best move
if s == ms:
ia,ib = moves[idx]
break
#relax at boundary, move downhill
if not notend(ia,enda) and ib == 0:
pass
elif not notend(ia,enda):
last = ms
s = dist( ia, ib - 1 )
while s < last and ib > 1:
ib -= 1
last = s
s = dist( ia, ib - 1 )
elif ib == 0:
last = ms
s = dist( ia + stepa, ib )
while s < last and notend(ia,enda-stepa):
ia += stepa
last = s
s = dist( ia + stepa, ib )
bnda[0] = ia
bndb[0] = ib
#flip direction
if stepa == -1:
stepa = 1
enda = len(wa)
notend = lambda i,n:i<n-1
else:
stepa = -1
enda = 0
notend = lambda i,n: i>n
ia,ib = i,j
ms = 0
while ms < thresh and notend(ia,enda) and ib < len(wb)-1:
moves = ( ( ia + stepa, ib + 1 ),
( ia + stepa, ib ),
( ia , ib + 1 ) )
scores = [dist( iam, ibm ) for iam, ibm in moves]
ms = min(scores)
for idx,s in enumerate(scores):
if s == ms:
ia,ib = moves[idx]
break
#relax at boundary, move downhill
if not notend(ia,enda) and ib == len(wb)-1:
pass
elif not notend(ia,enda):
last = ms
s = dist( ia, ib + 1 )
while s < last and ib < len(wb)-2:
ib += 1
last = s
s = dist( ia, ib + 1 )
elif ib == len(wb)-1:
last = ms
s = dist( ia + stepa, ib )
while s < last and notend(ia,enda-stepa):
ia += stepa
last = s
s = dist( ia + stepa, ib )
bnda[1] = ia
bndb[1] = ib
bnda.sort()
return bnda, bndb
def resolution(table, wvd):
rest = set(wvd.values())
match = next(table)
while match:
keep,discard = merge(match)
if discard:
for a in discard:
table.remove( a )
for a in keep:
yield a
for a,i in match:
rest.discard(a)
match = next(table)
for a in rest:
yield a
def pairwise_merge( match ):
overhang = 8
wa = match[0][0]
wb = match[1][0]
bnda, bndb = trace_overlap(*match)
iscomplete = lambda bnd,w: bnd[0] < overhang and bnd[1] >= len(w)-overhang
if iscomplete(bnda,wa) or iscomplete(bndb,wb):
sa = wa.scores.sum()
sb = wb.scores.sum()
if sa > sb:
return wa,None
else:
return None,wb
return None,None
def merge( match ):
dep = dict( [ (e[0],0) for e in match ] )
#iterate through all pairs and mark those who are contained in another whisker
# The pairwise merge should impose a strict ordering
match = list(match)
for i,ma in enumerate(match):
for j,mb in enumerate(match[ (i+1): ]):
ra,rb = pairwise_merge( (ma,mb) )
if ra or rb:
if not ra:
dep[ma[0]] = 1
if not rb:
dep[mb[0]] = 1
# partition into two sets. Those to keep and those to discard.
# Those to keep depend on none of the others.
return [ k for k,v in dep.items() if v==0 ], \
[ k for k,v in dep.items() if v!=0 ]
class CollisionTable(object):
def __init__(self, wvd, shape, scale):
""" `wvd` may be either a dict or list of whiskers """
object.__init__(self)
self._map = {}
self._shape = shape
self._scale = scale
self._stride = stride = shape[1]/scale
self.topx = lambda p: int(p[0]/scale) + stride * int(p[1]/scale)
self._build_inverse_table( wvd )
def _build_inverse_table(self, wvd ):
g = enumerate(wvd)
if isinstance(wvd, dict):
g = iter(wvd.items())
for i,w in g:
self.add(w)
def update( self, changes ):
""" Changes is a dict mapping old whisker segments to new segments """
last = None
for w,p in changes.items():
self.remove(w)
if p:
self.add(p[0]) # add back ends
self.add(p[-1])
last = p[1]
if last:
self.add(last) # add back last middle
def add(self, w):
if not w: return
hash = lambda e: enumerate( map(self.topx,list(zip(e.x,e.y))) )
for i,px in hash(w):
self._map.setdefault(px,set()).add( (w,i) )
for i,px in hash(w): # scan back through and remove repeat hits on a pixel
for x in [e for e in self._map[px] if e[0] == w][1:]:
self._map[px].remove(x)
def remove(self, w):
if not w: return
hash = lambda e: enumerate( map(self.topx,list(zip(e.x,e.y))) )
for i,px in hash(w):
s = self._map.get(px)
if s:
s.discard( (w,i) )
def __iter__(self):
m = next(self)
while m:
yield m
m = next(self)
def __next__(self):
""" This changes the inverse table by removing hits.
Returns a (Whisker_Seg, index),(Whisker_Seg, index)... tuple
or None, if done.
"""
todelete = []
retval = None
for px,s in self._map.items():
todelete.append(px) # get rid of references to visited pixels
if len(s) > 1:
retval = s
break
for k in todelete:
del self._map[k]
return retval
def counts( self ):
tosc = lambda e: e/self._scale
im = zeros(list(map(tosc, self._shape)))
imr = im.ravel()
for px,s in self._map.items():
imr[px] = len(s) #len(set( [e for e,i in s] ))
return im
| 32.529221 | 99 | 0.550155 | [
"BSD-3-Clause"
] | aiporre/whisk | whisk/test_merge3.py | 20,038 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.