id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
45441
|
from montepython.likelihood_class import Likelihood_clik
class Planck15_highl_TTTEEE(Likelihood_clik):
pass
|
45459
|
from flask_restful import marshal, abort, Resource
from app.models import Schedule2
from app.apiv2.decorators import permission_sudo
from app.apiv2.marshal import tasking_schedule_fields
class MobiusTaskApi(Resource):
method_decorators = [permission_sudo]
def get(self, schedule_id):
""" Peek at a schedule """
s = Schedule2.query.get_or_404(schedule_id)
return marshal(s, tasking_schedule_fields)
def delete(self, schedule_id):
""" Mark a task as done """
s = Schedule2.query.get_or_404(schedule_id)
if s.state != "mobius-processing":
abort(400)
s.transition_to_published()
return "{}", 204
|
45471
|
from werkzeug.exceptions import TooManyRequests as WerkzeugTooManyRequests
class TooManyRequests(WerkzeugTooManyRequests):
description = "Too many requests"
def __init__(self, description=None):
if description is not None:
self.description = description
|
45495
|
import numpy
from AnyQt.QtGui import QColor, QRadialGradient, QPainterPathStroker
def saturated(color, factor=150):
"""Return a saturated color.
"""
h = color.hsvHueF()
s = color.hsvSaturationF()
v = color.valueF()
a = color.alphaF()
s = factor * s / 100.0
s = max(min(1.0, s), 0.0)
return QColor.fromHsvF(h, s, v, a).convertTo(color.spec())
def sample_path(path, num=10):
"""Sample `num` equidistant points from the `path` (`QPainterPath`).
"""
space = numpy.linspace(0.0, 1.0, num, endpoint=True)
return [path.pointAtPercent(float(p)) for p in space]
def radial_gradient(color, color_light=50):
"""
radial_gradient(QColor, QColor)
radial_gradient(QColor, int)
Return a radial gradient. `color_light` can be a QColor or an int.
In the later case the light color is derived from `color` using
`saturated(color, color_light)`.
"""
if not isinstance(color_light, QColor):
color_light = saturated(color, color_light)
gradient = QRadialGradient(0.5, 0.5, 0.5)
gradient.setColorAt(0.0, color_light)
gradient.setColorAt(0.5, color_light)
gradient.setColorAt(1.0, color)
gradient.setCoordinateMode(QRadialGradient.ObjectBoundingMode)
return gradient
def toGraphicsObjectIfPossible(item):
"""Return the item as a QGraphicsObject if possible.
This function is intended as a workaround for a problem with older
versions of PyQt (< 4.9), where methods returning 'QGraphicsItem *'
lose the type of the QGraphicsObject subclasses and instead return
generic QGraphicsItem wrappers.
"""
if item is None:
return None
obj = item.toGraphicsObject()
return item if obj is None else obj
def linspace(count):
"""Return `count` evenly spaced points from 0..1 interval excluding
both end points, e.g. `linspace(3) == [0.25, 0.5, 0.75]`.
"""
return list(map(float, numpy.linspace(0.0, 1.0, count + 2, endpoint=True)[1:-1]))
def uniform_linear_layout(points):
"""Layout the points (a list of floats in 0..1 range) in a uniform
linear space while preserving the existing sorting order.
"""
indices = numpy.argsort(points)
space = numpy.asarray(linspace(len(points)))
# invert the indices
indices = invert_permutation_indices(indices)
# assert((numpy.argsort(points) == numpy.argsort(space[indices])).all())
points = space[indices]
return points.tolist()
def invert_permutation_indices(indices):
"""Invert the permutation giver by indices.
"""
inverted = [0] * len(indices)
for i, index in enumerate(indices):
inverted[index] = i
return inverted
def stroke_path(path, pen):
"""Create a QPainterPath stroke from the `path` drawn with `pen`.
"""
stroker = QPainterPathStroker()
stroker.setCapStyle(pen.capStyle())
stroker.setJoinStyle(pen.joinStyle())
stroker.setMiterLimit(pen.miterLimit())
stroker.setWidth(max(pen.widthF(), 1e-9))
return stroker.createStroke(path)
|
45528
|
from perfrunner.helpers.cbmonitor import timeit, with_stats
from perfrunner.tests import PerfTest
from perfrunner.workloads.kvgen import kvgen
class IndexTest(PerfTest):
COLLECTORS = {
'secondary_stats': True,
'secondary_debugstats': True,
'secondary_debugstats_bucket': True,
'secondary_debugstats_index': True,
}
@with_stats
@timeit
def init_index(self):
self.create_indexes()
self.wait_for_indexing()
@with_stats
@timeit
def incr_index(self):
self.access()
self.wait_for_indexing()
def _report_kpi(self, indexing_time: float):
self.reporter.post(
*self.metrics.indexing_time(indexing_time)
)
def run(self):
self.load()
self.wait_for_persistence()
self.init_index()
self.incr_index()
class InitialIndexTest(IndexTest):
def run(self):
self.load()
self.wait_for_persistence()
time_elapsed = self.init_index()
self.report_kpi(time_elapsed)
class FastIndexTest(PerfTest):
def load(self, *args):
kvgen(self.master_node, self.test_config.load_settings.items, wait=True)
def access(self, *args):
kvgen(self.master_node, self.test_config.load_settings.items, wait=False)
class FastInitialIndexTest(FastIndexTest, InitialIndexTest):
pass
|
45561
|
import unittest
from programy.clients.events.console.config import ConsoleConfiguration
from programy.config.brain.dynamic import BrainDynamicsConfiguration
from programy.config.file.yaml_file import YamlConfigurationFile
class BrainDynamicsConfigurationTests(unittest.TestCase):
def test_with_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
dynamic:
variables:
gettime: programy.dynamic.variables.datetime.GetTime
sets:
number: programy.dynamic.sets.numeric.IsNumeric
roman: programy.dynamic.sets.roman.IsRomanNumeral
maps:
romantodec: programy.dynamic.maps.roman.MapRomanToDecimal
dectoroman: programy.dynamic.maps.roman.MapDecimalToRoman
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
dynamic_config = BrainDynamicsConfiguration()
dynamic_config.load_config_section(yaml, brain_config, ".")
self.assertEquals({'GETTIME': 'programy.dynamic.variables.datetime.GetTime'}, dynamic_config.dynamic_vars)
self.assertEquals({'NUMBER': 'programy.dynamic.sets.numeric.IsNumeric', 'ROMAN': 'programy.dynamic.sets.roman.IsRomanNumeral'}, dynamic_config.dynamic_sets)
self.assertEquals({'ROMANTODEC': 'programy.dynamic.maps.roman.MapRomanToDecimal', 'DECTOROMAN': 'programy.dynamic.maps.roman.MapDecimalToRoman'}, dynamic_config.dynamic_maps)
def test_with_missing_vars_sets_maps(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
dynamic:
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
dynamic_config = BrainDynamicsConfiguration()
dynamic_config.load_config_section(yaml, brain_config, ".")
self.assertEquals({}, dynamic_config.dynamic_vars)
self.assertEquals({}, dynamic_config.dynamic_sets)
self.assertEquals({}, dynamic_config.dynamic_maps)
def test_with_missing_vars_sets_maps2(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
dynamic:
something: else
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
dynamic_config = BrainDynamicsConfiguration()
dynamic_config.load_config_section(yaml, brain_config, ".")
self.assertEquals({}, dynamic_config.dynamic_vars)
self.assertEquals({}, dynamic_config.dynamic_sets)
self.assertEquals({}, dynamic_config.dynamic_maps)
def test_without_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
dynamic:
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
dynamic_config = BrainDynamicsConfiguration()
dynamic_config.load_config_section(yaml, brain_config, ".")
self.assertEquals({}, dynamic_config.dynamic_vars)
self.assertEquals({}, dynamic_config.dynamic_sets)
self.assertEquals({}, dynamic_config.dynamic_maps)
def test_with_no_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
dynamic_config = BrainDynamicsConfiguration()
dynamic_config.load_config_section(yaml, brain_config, ".")
self.assertEquals({}, dynamic_config.dynamic_vars)
self.assertEquals({}, dynamic_config.dynamic_sets)
self.assertEquals({}, dynamic_config.dynamic_maps)
def test_to_yaml_defaults(self):
yaml = {}
dynamic_config = BrainDynamicsConfiguration()
dynamic_config.to_yaml(yaml, defaults=True)
self.assertEquals({'GETTIME': 'programy.dynamic.variables.datetime.GetTime'}, yaml['variables'])
self.assertEquals({'NUMBER': 'programy.dynamic.sets.numeric.IsNumeric',
'ROMAN': 'programy.dynamic.sets.roman.IsRomanNumeral',
'STOPWORD': 'programy.dynamic.sets.stopword.IsStopWord',
'SYNSETS': 'programy.dynamic.sets.synsets.IsSynset'}, yaml['sets'])
self.assertEquals({'ROMANTODDEC': 'programy.dynamic.maps.roman.MapRomanToDecimal',
'DECTOROMAN': 'programy.dynamic.maps.roman.MapDecimalToRoman',
'LEMMATIZE': 'programy.dynamic.maps.lemmatize.LemmatizeMap',
'STEMMER': 'programy.dynamic.maps.stemmer.StemmerMap'}, yaml['maps'])
def test_to_yaml_no_defaults(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
dynamic:
variables:
gettime: programy.dynamic.variables.datetime.GetTime
sets:
number: programy.dynamic.sets.numeric.IsNumeric
roman: programy.dynamic.sets.roman.IsRomanNumeral
maps:
romantodec: programy.dynamic.maps.roman.MapRomanToDecimal
dectoroman: programy.dynamic.maps.roman.MapDecimalToRoman
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
dynamic_config = BrainDynamicsConfiguration()
dynamic_config.load_config_section(yaml, brain_config, ".")
data = {}
dynamic_config.to_yaml(data, defaults=False)
self.assertEquals({'GETTIME': 'programy.dynamic.variables.datetime.GetTime'}, data['variables'])
self.assertEquals({'NUMBER': 'programy.dynamic.sets.numeric.IsNumeric', 'ROMAN': 'programy.dynamic.sets.roman.IsRomanNumeral'}, data['sets'])
self.assertEquals({'ROMANTODEC': 'programy.dynamic.maps.roman.MapRomanToDecimal', 'DECTOROMAN': 'programy.dynamic.maps.roman.MapDecimalToRoman'}, data['maps'])
def test_to_yaml_no_defaults_no_data(self):
yaml = {}
dynamic_config = BrainDynamicsConfiguration()
dynamic_config.to_yaml(yaml, defaults=False)
self.assertEquals({}, yaml['variables'])
self.assertEquals({}, yaml['sets'])
self.assertEquals({}, yaml['maps'])
def test_defaults(self):
dynamic_config = BrainDynamicsConfiguration()
data = {}
dynamic_config.to_yaml(data, True)
BrainDynamicsConfigurationTests.assert_defaults(self, data)
@staticmethod
def assert_defaults(test, data):
test.assertTrue('sets' in data)
test.assertEqual(data['sets']['NUMBER'], 'programy.dynamic.sets.numeric.IsNumeric')
test.assertEqual(data['sets']['ROMAN'], 'programy.dynamic.sets.roman.IsRomanNumeral')
test.assertEqual(data['sets']['STOPWORD'], 'programy.dynamic.sets.stopword.IsStopWord')
test.assertEqual(data['sets']['SYNSETS'], 'programy.dynamic.sets.synsets.IsSynset')
test.assertTrue('maps' in data)
test.assertEqual(data['maps']['ROMANTODDEC'], 'programy.dynamic.maps.roman.MapRomanToDecimal')
test.assertEqual(data['maps']['DECTOROMAN'], 'programy.dynamic.maps.roman.MapDecimalToRoman')
test.assertEqual(data['maps']['LEMMATIZE'], 'programy.dynamic.maps.lemmatize.LemmatizeMap')
test.assertEqual(data['maps']['STEMMER'], 'programy.dynamic.maps.stemmer.StemmerMap')
test.assertTrue('variables' in data)
test.assertEqual(data['variables']['GETTIME'], 'programy.dynamic.variables.datetime.GetTime')
|
45595
|
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, List, Optional
import numpy as np
import numpy.typing as npt
from nuplan.common.actor_state.agent import Agent
from nuplan.common.actor_state.ego_state import EgoState
from nuplan.common.actor_state.vehicle_parameters import get_pacifica_parameters
from nuplan.common.geometry.compute import signed_lateral_distance, signed_longitudinal_distance
from nuplan.planning.metrics.evaluation_metrics.base.metric_base import MetricBase
from nuplan.planning.metrics.metric_result import MetricStatistics, MetricStatisticsType, Statistic, TimeSeries
from nuplan.planning.scenario_builder.abstract_scenario import AbstractScenario
from nuplan.planning.simulation.history.simulation_history import SimulationHistory
from nuplan.planning.simulation.observation.observation_type import DetectionsTracks
@dataclass
class EgoAgentPair:
"""Class to pair ego and agent."""
ego_state: EgoState # Ego state
agent: Agent # Agent
@dataclass
class EgoToAgentDistances:
"""
Class to keep track of the history of projected distances from ego to an agent.
It also contains the length of the agent.
"""
agent_lengths: List[float] # A list of Length of agents [m]
longitudinal_distances: List[float] # Longitudinal distance from ego to the agent [m]
lateral_distances: List[float] # Lateral distance from ego to the agent [m]
class ClearanceFromStaticAgentsStatistics(MetricBase):
"""Metric on clearance while passing static vehicles."""
def __init__(self, name: str, category: str, lateral_distance_threshold: float) -> None:
"""
Initializes the ClearanceFromStaticAgentsStatistics class
:param name: Metric name
:param category: Metric category
:param lateral_distance_threshold: Agents laterally further away than this threshold are not considered.
"""
super().__init__(name=name, category=category)
self._lateral_distance_threshold = lateral_distance_threshold
self._ego_half_length = get_pacifica_parameters().half_length
def compute_score(
self,
scenario: AbstractScenario,
metric_statistics: Dict[str, Statistic],
time_series: Optional[TimeSeries] = None,
) -> float:
"""Inherited, see superclass."""
# TODO: Define the metric score
return 0.0
def compute(self, history: SimulationHistory, scenario: AbstractScenario) -> List[MetricStatistics]:
"""
Returns the estimated metric
:param history: History from a simulation engine
:param scenario: Scenario running this metric
:return the estimated metric.
"""
# Compute projected distances
agents_distances = self._extract_agent_projected_distances(history)
clearances_during_passing = self._extract_passing_clearances(agents_distances)
if not clearances_during_passing:
return []
statistics = {
MetricStatisticsType.MAX: Statistic(
name='max_clearance_overtaking_static_agent', unit='meters', value=np.amax(clearances_during_passing)
),
MetricStatisticsType.MIN: Statistic(
name='min_clearance_overtaking_static_agent', unit='meters', value=np.amin(clearances_during_passing)
),
MetricStatisticsType.P90: Statistic(
name='p90_clearance_overtaking_static_agent',
unit='meters',
value=np.percentile(np.abs(clearances_during_passing), 90),
),
}
results = self._construct_metric_results(metric_statistics=statistics, time_series=None, scenario=scenario)
return results # type: ignore
def get_overtake_start_idx(
self, longitudinal_dist: List[float], idx_overtake: int, critical_dist_abs: float
) -> int:
"""
Finds the index of the element which represents the start of the overtake
:param longitudinal_dist: longitudinal distances
:param idx_overtake: index of the distance closest to zero
:param critical_dist_abs: critical distance which represent start of overtake
:return index of the start of overtake.
"""
offset = self._get_overtake_edge(longitudinal_dist[idx_overtake::-1], critical_dist_abs)
return idx_overtake - offset if offset is not None else 0
def get_overtake_end_idx(self, longitudinal_dist: List[float], idx_overtake: int, critical_dist_abs: float) -> int:
"""
Finds the index of the element which represents the end of the overtake
:param longitudinal_dist: longitudinal distances
:param idx_overtake: index of the distance closest to zero
:param critical_dist_abs: critical distance which represent end of overtake
:return index of the end of overtake.
"""
offset = self._get_overtake_edge(longitudinal_dist[idx_overtake:], critical_dist_abs)
return idx_overtake + offset if offset is not None else -1
@staticmethod
def _get_overtake_edge(distances: List[float], critical_distance: float) -> Optional[int]:
"""
Finds the index of the first element which exceeds the given amount in a list
:param distances: list of distances
:param critical_distance: threshold distance
:return index of the first element exceeding the given amount, None if it doesn't happen.
"""
for idx_start, d in enumerate(distances):
if abs(d) > critical_distance:
return idx_start
return None
def _extract_agent_projected_distances(self, history: SimulationHistory) -> Dict[str, EgoToAgentDistances]:
"""
Computes the projected distances, for inactive agents only
:param history: The history of the scenario
:return A dict containing the projected distances to each inactive track in the entire scenario.
"""
agents_distances: Dict[str, EgoToAgentDistances] = {}
inactive_agents_scenario = self._get_inactive_agents_scenario(history)
for track_token, ego_agent_pairs in inactive_agents_scenario.items():
lateral_dist = [
signed_lateral_distance(ego_agent_pair.ego_state.rear_axle, ego_agent_pair.agent.box.geometry)
for ego_agent_pair in ego_agent_pairs
]
longitudinal_dist = [
signed_longitudinal_distance(ego_agent_pair.ego_state.rear_axle, ego_agent_pair.agent.box.geometry)
for ego_agent_pair in ego_agent_pairs
]
lengths = [ego_agent_pair.agent.box.length for ego_agent_pair in ego_agent_pairs]
agents_distances[track_token] = EgoToAgentDistances(
agent_lengths=lengths, longitudinal_distances=longitudinal_dist, lateral_distances=lateral_dist
)
return agents_distances
def _extract_passing_clearances(self, agents_distances: Dict[str, EgoToAgentDistances]) -> List[float]:
"""
Extracts the portion of projected distances relative to the passing of every agent and saves them to a list
:param agents_distances: The projected distances to each inactive agent
:return A list containing the lateral clearance of all inactive agents while ego is passing them.
"""
clearances_during_overtake = []
for distances in agents_distances.values():
max_longitudinal_dist = max(distances.longitudinal_distances)
idx_max = distances.longitudinal_distances.index(max_longitudinal_dist)
min_longitudinal_dist = min(distances.longitudinal_distances)
idx_min = distances.longitudinal_distances.index(min_longitudinal_dist)
if max_longitudinal_dist > 0 > min_longitudinal_dist and idx_max < idx_min:
overtake_idx = int(np.argmin(np.abs(distances.longitudinal_distances)))
if abs(distances.lateral_distances[overtake_idx]) < self._lateral_distance_threshold:
threshold = self._ego_half_length + distances.agent_lengths[overtake_idx] / 2.0
start_idx = self.get_overtake_start_idx(
distances.longitudinal_distances, int(overtake_idx), threshold
)
end_idx = self.get_overtake_end_idx(distances.longitudinal_distances, int(overtake_idx), threshold)
clearances_during_overtake.extend(np.abs(distances.lateral_distances[start_idx : end_idx + 1]))
return clearances_during_overtake
@staticmethod
def _get_inactive_agents_scenario(history: SimulationHistory) -> Dict[str, List[EgoAgentPair]]:
"""
Get a set of agents which are inactive for the full length of the scenario
An inactive agents in this context is an agent that for the entire scenario never moves
:param history: The history from the scenario
:return A dict of inactive tracks and their ego poses with agents.
"""
# Collect a series of agents to their tracks
agent_tracks = defaultdict(list)
for sample in history.data:
ego_state = sample.ego_state
if not isinstance(sample.observation, DetectionsTracks):
continue
for tracked_object in sample.observation.tracked_objects.get_agents():
agent_tracks[tracked_object.track_token].append(EgoAgentPair(ego_state=ego_state, agent=tracked_object))
inactive_track_agents = defaultdict(list)
for track_token, ego_agent_pairs in agent_tracks.items():
velocities: npt.NDArray[np.float64] = np.asarray(
[ego_agent_pair.agent.velocity.magnitude() for ego_agent_pair in ego_agent_pairs]
)
inactive_status = np.isclose(velocities, 0.0)
# Must all inactive
if np.sum(inactive_status) != len(velocities):
continue
inactive_track_agents[track_token] = ego_agent_pairs
return inactive_track_agents
|
45600
|
import unittest
import tempfile
import numpy as np
import coremltools
import os
import shutil
import tensorflow as tf
from tensorflow.keras import backend as _keras
from tensorflow.keras import layers
from coremltools._deps import HAS_TF_2
from test_utils import generate_data, tf_transpose
class TensorFlowKerasTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
tf.keras.backend.set_learning_phase(False)
def setUp(self):
self.saved_model_dir = tempfile.mkdtemp()
_, self.model_file = tempfile.mkstemp(suffix='.h5', prefix=self.saved_model_dir)
def tearDown(self):
if os.path.exists(self.saved_model_dir):
shutil.rmtree(self.saved_model_dir)
def _get_tf_tensor_name(self, graph, name):
return graph.get_operation_by_name(name).outputs[0].name
def _test_model(self, model, data_mode='random_zero_mean', decimal=4, use_cpu_only=False, has_variables=True, verbose=False):
if not HAS_TF_2:
self._test_keras_model_tf1(model, data_mode, decimal, use_cpu_only, has_variables, verbose)
else:
self._test_keras_model_tf2(model, data_mode, decimal, use_cpu_only, has_variables, verbose)
def _test_keras_model_tf1(self, model, data_mode, decimal, use_cpu_only, has_variables, verbose):
graph_def_file = os.path.join(self.saved_model_dir, 'graph.pb')
frozen_model_file = os.path.join(self.saved_model_dir, 'frozen.pb')
core_ml_model_file = os.path.join(self.saved_model_dir, 'model.mlmodel')
input_shapes = {inp.op.name: inp.shape.as_list() for inp in model.inputs}
for name, shape in input_shapes.items():
input_shapes[name] = [dim if dim is not None else 1 for dim in shape]
output_node_names = [output.op.name for output in model.outputs]
tf_graph = _keras.get_session().graph
tf.reset_default_graph()
if has_variables:
with tf_graph.as_default():
saver = tf.train.Saver()
# note: if Keras backend has_variable is False, we're not making variables constant
with tf.Session(graph=tf_graph) as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {}
for name, shape in input_shapes.items():
tensor_name = tf_graph.get_operation_by_name(name).outputs[0].name
feed_dict[tensor_name] = generate_data(shape, data_mode)
# run the result
fetches = [
tf_graph.get_operation_by_name(name).outputs[0] for name in output_node_names
]
result = sess.run(fetches, feed_dict=feed_dict)
# save graph definition somewhere
tf.train.write_graph(sess.graph, self.saved_model_dir, graph_def_file, as_text=False)
# freeze_graph() has been raising error with tf.keras models since no
# later than TensorFlow 1.6, so we're not using freeze_graph() here.
# See: https://github.com/tensorflow/models/issues/5387
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
tf_graph.as_graph_def(), # The graph_def is used to retrieve the nodes
output_node_names # The output node names are used to select the useful nodes
)
with tf.gfile.GFile(frozen_model_file, 'wb') as f:
f.write(output_graph_def.SerializeToString())
_keras.clear_session()
# convert to Core ML model format
core_ml_model = coremltools.converters.tensorflow.convert(
frozen_model_file,
inputs=input_shapes,
outputs=output_node_names,
use_cpu_only=use_cpu_only)
if verbose:
print('\nFrozen model saved at {}'.format(frozen_model_file))
print('\nCore ML model description:')
from coremltools.models.neural_network.printer import print_network_spec
print_network_spec(core_ml_model.get_spec(), style='coding')
core_ml_model.save(core_ml_model_file)
print('\nCore ML model saved at {}'.format(core_ml_model_file))
# transpose input data as Core ML requires
core_ml_inputs = {
name: tf_transpose(feed_dict[self._get_tf_tensor_name(tf_graph, name)])
for name in input_shapes
}
# run prediction in Core ML
core_ml_output = core_ml_model.predict(core_ml_inputs, useCPUOnly=use_cpu_only)
for idx, out_name in enumerate(output_node_names):
tf_out = result[idx]
if len(tf_out.shape) == 0:
tf_out = np.array([tf_out])
tp = tf_out.flatten()
coreml_out = core_ml_output[out_name]
cp = coreml_out.flatten()
self.assertTrue(tf_out.shape == coreml_out.shape)
for i in range(len(tp)):
max_den = max(1.0, tp[i], cp[i])
self.assertAlmostEqual(tp[i] / max_den, cp[i] / max_den, delta=10 ** -decimal)
def _test_keras_model_tf2(self, model, data_mode, decimal, use_cpu_only, has_variables, verbose):
core_ml_model_file = self.model_file.rsplit('.')[0] + '.mlmodel'
input_dict = {inp.op.name: inp.shape.as_list() for inp in model.inputs}
for name, shape in input_dict.items():
input_dict[name] = [dim if dim is not None else 1 for dim in shape]
output_list = ['Identity']
model.save(self.model_file)
# convert Keras model into Core ML model format
core_ml_model = coremltools.converters.tensorflow.convert(
filename=self.model_file,
inputs=input_dict,
outputs=output_list,
use_cpu_only=use_cpu_only)
if verbose:
print('\nKeras model saved at {}'.format(self.model_file))
print('\nCore ML model description:')
from coremltools.models.neural_network.printer import print_network_spec
print_network_spec(core_ml_model.get_spec(), style='coding')
core_ml_model.save(core_ml_model_file)
print('\nCore ML model saved at {}'.format(core_ml_model_file))
core_ml_inputs = {
name: generate_data(shape, data_mode) for name, shape in input_dict.items()
}
# run prediction and compare results
keras_output = model.predict(list(core_ml_inputs.values())[0])
core_ml_output = core_ml_model.predict(
core_ml_inputs, useCPUOnly=use_cpu_only)[output_list[0]]
if verbose:
print('\nPredictions', keras_output.shape, ' vs.', core_ml_output.shape)
print(keras_output.flatten()[:6])
print(core_ml_output.flatten()[:6])
np.testing.assert_array_equal(
keras_output.shape, core_ml_output.shape)
np.testing.assert_almost_equal(
keras_output.flatten(), core_ml_output.flatten(), decimal=decimal)
class SimpleLayerTests(TensorFlowKerasTests):
def test_dense_softmax(self):
model = tf.keras.Sequential()
model.add(layers.Dense(16, input_shape=(16,), activation=tf.nn.softmax))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_dense_elu(self):
model = tf.keras.Sequential()
model.add(layers.Dense(16, input_shape=(16,), activation=tf.nn.elu))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, decimal=2)
def test_dense_tanh(self):
model = tf.keras.Sequential()
model.add(layers.Dense(16, input_shape=(16,), activation=tf.nn.tanh))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_housenet_random(self):
num_hidden = 2
num_features = 3
model = tf.keras.Sequential()
model.add(layers.Dense(num_hidden, input_dim=num_features))
model.add(layers.Activation(tf.nn.relu))
model.add(layers.Dense(1, input_dim=num_features))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_tiny_conv2d_random(self):
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels, kernel_height, kernel_width = 3, 5, 5
model = tf.keras.Sequential()
model.add(layers.Conv2D(
input_shape=input_shape,
filters=num_kernels, kernel_size=(kernel_height, kernel_width)))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_tiny_conv2d_dilated_random(self):
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels, kernel_height, kernel_width = 3, 5, 5
model = tf.keras.Sequential()
model.add(layers.Conv2D(
input_shape=input_shape, dilation_rate=(2, 2),
filters=num_kernels, kernel_size=(kernel_height, kernel_width)))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_tiny_conv1d_same_random(self):
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = tf.keras.Sequential()
model.add(layers.Conv1D(
nb_filters, kernel_size=filter_length, padding='same',
input_shape=(input_length, input_dim)))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_tiny_conv1d_valid_random(self):
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = tf.keras.Sequential()
model.add(layers.Conv1D(
nb_filters, kernel_size=filter_length, padding='valid',
input_shape=(input_length, input_dim)))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
@unittest.skip('non-equal block shape is not yet supported')
def test_tiny_conv1d_dilated_random(self):
input_shape = (20, 1)
num_kernels = 2
filter_length = 3
model = tf.keras.Sequential()
model.add(layers.Conv1D(
num_kernels, kernel_size=filter_length, padding='valid',
input_shape=input_shape, dilation_rate=3))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_flatten(self):
model = tf.keras.Sequential()
model.add(layers.Flatten(input_shape=(2, 2, 2)))
self._test_model(model, data_mode='linear', has_variables=False)
def test_conv_dense(self):
input_shape = (48, 48, 3)
model = tf.keras.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation=tf.nn.relu, input_shape=input_shape))
model.add(layers.Flatten())
model.add(layers.Dense(10, activation=tf.nn.softmax))
self._test_model(model)
def test_conv_batchnorm_random(self):
input_dim = 10
input_shape = (input_dim, input_dim, 3)
num_kernels = 3
kernel_height = 5
kernel_width = 5
model = tf.keras.Sequential()
model.add(layers.Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width)))
model.add(layers.BatchNormalization(epsilon=1e-5))
model.add(layers.Dense(10, activation=tf.nn.softmax))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, decimal=2, has_variables=True)
@unittest.skip('list index out of range')
def test_tiny_deconv_random(self):
input_dim = 13
input_shape = (input_dim, input_dim, 5)
num_kernels = 16
kernel_height = 3
kernel_width = 3
model = tf.keras.Sequential()
model.add(layers.Conv2DTranspose(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape, padding='valid', strides=(2, 2)))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
@unittest.skip('Deconvolution layer has weight matrix of size 432 to encode a 3 x 4 x 3 x 3 convolution.')
def test_tiny_deconv_random_same_padding(self):
input_dim = 14
input_shape = (input_dim, input_dim, 3)
num_kernels = 16
kernel_height = 3
kernel_width = 3
model = tf.keras.Sequential()
model.add(layers.Conv2DTranspose(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape, padding='same', strides=(2, 2)))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_tiny_depthwise_conv_same_pad_depth_multiplier(self):
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 4
kernel_height = 3
kernel_width = 3
model = tf.keras.Sequential()
model.add(layers.DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape, padding='same', strides=(1, 1)))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_tiny_depthwise_conv_valid_pad_depth_multiplier(self):
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 2
kernel_height = 3
kernel_width = 3
model = tf.keras.Sequential()
model.add(layers.DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape, padding='valid', strides=(1, 1)))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_tiny_separable_conv_valid_depth_multiplier(self):
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 5
kernel_height = 3
kernel_width = 3
num_kernels = 40
model = tf.keras.Sequential()
model.add(layers.SeparableConv2D(
filters=num_kernels, kernel_size=(kernel_height, kernel_width),
padding='valid', strides=(1, 1), depth_multiplier=depth_multiplier,
input_shape=input_shape))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, decimal=2)
def test_tiny_separable_conv_same_fancy_depth_multiplier(self):
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 2
kernel_height = 3
kernel_width = 3
num_kernels = 40
model = tf.keras.Sequential()
model.add(layers.SeparableConv2D(
filters=num_kernels, kernel_size=(kernel_height, kernel_width),
padding='same', strides=(2, 2), activation='relu', depth_multiplier=depth_multiplier,
input_shape=input_shape))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, decimal=2)
def test_max_pooling_no_overlap(self):
# no_overlap: pool_size = strides
model = tf.keras.Sequential()
model.add(layers.MaxPooling2D(
input_shape=(16, 16, 3), pool_size=(2, 2),
strides=None, padding='valid'))
self._test_model(model, has_variables=False)
def test_max_pooling_overlap_multiple(self):
# input shape is multiple of pool_size, strides != pool_size
model = tf.keras.Sequential()
model.add(layers.MaxPooling2D(
input_shape=(18, 18, 3), pool_size=(3, 3),
strides=(2, 2), padding='valid'))
self._test_model(model, has_variables=False)
def test_max_pooling_overlap_odd(self):
model = tf.keras.Sequential()
model.add(layers.MaxPooling2D(
input_shape=(16, 16, 3), pool_size=(3, 3),
strides=(2, 2), padding='valid'))
self._test_model(model, has_variables=False)
def test_max_pooling_overlap_same(self):
model = tf.keras.Sequential()
model.add(layers.MaxPooling2D(
input_shape=(16, 16, 3), pool_size=(3, 3),
strides=(2, 2), padding='same'))
self._test_model(model, has_variables=False)
def test_global_max_pooling_2d(self):
model = tf.keras.Sequential()
model.add(layers.GlobalMaxPooling2D(input_shape=(16, 16, 3)))
self._test_model(model, has_variables=False)
def test_global_avg_pooling_2d(self):
model = tf.keras.Sequential()
model.add(layers.GlobalAveragePooling2D(input_shape=(16, 16, 3)))
self._test_model(model, has_variables=False)
def test_max_pooling_1d(self):
model = tf.keras.Sequential()
model.add(layers.MaxPooling1D(input_shape=(16, 3), pool_size=2))
self._test_model(model, has_variables=False)
if __name__ == '__main__':
np.random.seed(1984)
unittest.main()
|
45632
|
class Enum(object):
@classmethod
def parse(cls, value):
options = cls.options()
result = []
for k, v in options.items():
if type(v) is not int or v == 0:
continue
if value == 0 or (value & v) == v:
result.append(v)
return result
@classmethod
def options(cls):
result = {}
for key in dir(cls):
if key.startswith('_'):
continue
result[key] = getattr(cls, key)
return result
class Media(Enum):
All = 0
Movies = 1
Shows = 2
Seasons = 4
Episodes = 8
Lists = 16
__map__ = None
@classmethod
def get(cls, key):
if cls.__map__ is None:
cls.__map__ = {
Media.Movies: 'movies',
Media.Shows: 'shows',
Media.Seasons: 'seasons',
Media.Episodes: 'episodes',
Media.Lists: 'lists'
}
return cls.__map__.get(key)
class Data(Enum):
All = 0
Collection = 1
Playback = 2
Ratings = 4
Watched = 8
Watchlist = 16
# Lists
Liked = 32
Personal = 64
__attributes__ = None
__map__ = None
@classmethod
def initialize(cls):
if cls.__attributes__:
return
cls.__attributes__ = {
Data.Collection: {
'interface': 'sync/collection',
'timestamp': 'collected_at'
},
Data.Playback: {
'interface': 'sync/playback',
'timestamp': 'paused_at'
},
Data.Ratings: {
'interface': 'sync/ratings',
'timestamp': 'rated_at'
},
Data.Watched: {
'interface': 'sync/watched',
'timestamp': 'watched_at'
},
Data.Watchlist: {
'interface': 'sync/watchlist',
'timestamp': 'watchlisted_at'
},
# Lists
Data.Liked: {
'interface': 'users/likes',
'timestamp': 'updated_at'
},
Data.Personal: {
'interface': 'users/*/lists',
'timestamp': 'updated_at'
}
}
@classmethod
def get(cls, key):
if cls.__map__ is None:
cls.__map__ = {
Data.Collection: 'collection',
Data.Playback: 'playback',
Data.Ratings: 'ratings',
Data.Watched: 'watched',
Data.Watchlist: 'watchlist',
# Lists
Data.Liked: 'liked',
Data.Personal: 'personal'
}
return cls.__map__.get(key)
@classmethod
def get_interface(cls, key):
return cls.get_attribute(key, 'interface')
@classmethod
def get_timestamp_key(cls, key):
return cls.get_attribute(key, 'timestamp')
@classmethod
def get_attribute(cls, key, attribute):
cls.initialize()
attributes = cls.__attributes__.get(key)
if not attributes:
return None
return attributes.get(attribute)
|
45650
|
import sys
sys.path.append( "E:\\WORK_IN_PROGRESS\\C\\platfoorm\\engine\\misc\\exporters")
from maya import cmds
from maya import OpenMaya
from maya import OpenMayaAnim
import skeletonExporter
reload(skeletonExporter)
import json
MAX_INFLUENCE = 6;
def map_shadow_to_skeleton(root):
data,joints = skeletonExporter.get_skeleton_data(root)
shadow_to_skele = {}
skele_to_shadow={}
#for each joints we need to follow the constraint to find the driver and build
#a map with that data
for j in joints:
const = cmds.listConnections(j + '.tx', d=0,s=1)[0]
driver = cmds.listConnections(const + '.target[0].targetTranslate',s=1,d=0)
shadow_to_skele[j] = driver[0]
skele_to_shadow[driver[0]] = j
return shadow_to_skele, skele_to_shadow
def getWeightsData (mesh,skinNode, skele_to_shadow, joints):
'''
This procedure let you create a dictionary holding all the needed information to rebuild
a skinCluster map
'''
sknN = skinNode
cmds.undoInfo(openChunk = 1)
infls = cmds.skinCluster(skinNode, q=True, inf=True)
weightMap = []
# get the dag path of the shape node
sel = OpenMaya.MSelectionList()
cmds.select(skinNode)
OpenMaya.MGlobal.getActiveSelectionList(sel)
skinClusterObject = OpenMaya.MObject()
sel.getDependNode(0,skinClusterObject )
skinClusterFn = OpenMayaAnim.MFnSkinCluster(skinClusterObject)
cmds.select(mesh)
sel = OpenMaya.MSelectionList()
OpenMaya.MGlobal.getActiveSelectionList(sel)
shapeDag = OpenMaya.MDagPath()
sel.getDagPath(0, shapeDag)
# create the geometry iterator
geoIter = OpenMaya.MItGeometry(shapeDag)
# create a pointer object for the influence count of the MFnSkinCluster
infCount = OpenMaya.MScriptUtil()
infCountPtr = infCount.asUintPtr()
OpenMaya.MScriptUtil.setUint(infCountPtr, 0)
value = OpenMaya.MDoubleArray()
weightMap = []
infls= OpenMaya.MDagPathArray()
skinClusterFn.influenceObjects(infls)
while geoIter.isDone() == False:
skinClusterFn.getWeights(shapeDag, geoIter.currentItem(), value, infCountPtr)
vtx_data ={"idx": geoIter.index(),
"j":[],
"w":[]}
for j in range(0, infls.length()):
if value[j] > 0:
if skele_to_shadow:
jnt_idx = joints.index(skele_to_shadow[infls[j]])
else:
#node = cmds.listConnections(skinN + ".matrix[" + str(j) + "]",s=1,d=0)[0]
#jnt_idx = joints.index(node)
node = infls[j].fullPathName().rsplit("|",1)[1]
#print node
jnt_idx = joints.index(node)
#jnt_idx = j
weight= value[j]
vtx_data["j"].append(int(jnt_idx))
vtx_data["w"].append(float(weight))
currL = len(vtx_data["j"])
if currL>MAX_INFLUENCE:
print "vertex",vtx_data["idx"], "joints got more than "+str(MAX_INFLUENCE) + " infs"
return;
if currL!= MAX_INFLUENCE:
#lets format the data to have always 4 elemets
deltaSize = MAX_INFLUENCE - currL
vtx_data['j'].extend([int(0)]*deltaSize)
vtx_data['w'].extend([0.0]*deltaSize)
if len(vtx_data["j"]) != MAX_INFLUENCE:
print "vertex",vtx_data["idx"], "wrong formatting after correction"
if len(vtx_data["w"]) != MAX_INFLUENCE:
print "vertex",vtx_data["idx"], "wrong formatting after correction"
weightMap.append(vtx_data)
geoIter.next()
cmds.undoInfo(closeChunk = 1)
print "------> WeightMap has been saved!"
return weightMap
def export_skin(root, skin_name, path, mesh , tootle_path=None, is_shadow=True):
data,joints = skeletonExporter.get_skeleton_data(root)
#print joints.index("L_EyeAim0")
if is_shadow:
print "----> Remapping to shadow skeleton"
shadow_to_skele, skele_to_shadow = map_shadow_to_skeleton(root)
data = getWeightsData(mesh,skin_name,skele_to_shadow, joints)
else :
data = getWeightsData(mesh,skin_name,None, joints)
full = {"type":"skinCluster",
"data":data,
"skeleton": "dogSkeleton"
}
if tootle_path != None:
#read in the tootle
print "---> remapping skin using tootle data"
t = open(tootle_path, 'r')
tootle_map = json.load(t)
newData = [0]*len(full["data"])
for i,d in enumerate(full["data"]):
new = tootle_map[str(i)]
newData[new] = d
full["data"] = newData
else:
print "skippping tootle"
to_save = json.dumps(full)
f = open( path, 'w')
f.write(to_save)
f.close()
print "saved to", path
if __name__ == "__main__" or __name__ == "__builtin__":
print "exporting skin"
root = "root"
skin = "skinCluster1"
path = r"E:\WORK_IN_PROGRESS\C\platfoorm\engine\misc\exporters\temp_data\mannequin_skin.json"
mesh = "mannequin"
tootle_path = r"E:\WORK_IN_PROGRESS\C\platfoorm\engine\misc\exporters\temp_data\mannequin.tootle"
tootle_path=None
export_skin(root, skin, path, mesh, tootle_path, False)
"""
data,joints = skeleton_exporter.get_skeleton_data(root)
shadow_to_skele, skele_to_shadow = map_shadow_to_skeleton(root)
data = getWeightsData(mesh,skin,skele_to_shadow, joints)
full = {"type":"skinCluster",
"data":data,
"skeleton": "dogSkeleton"
}
to_save = json.dumps(full)
f = open( path, 'w')
f.write(to_save)
f.close()
print "saved to", path
"""
|
45663
|
from unittest import TestCase, skipUnless, mock
from pya import *
import numpy as np
import time
class TestAserver(TestCase):
def setUp(self) -> None:
self.backend = DummyBackend()
self.sig = np.sin(2 * np.pi * 440 * np.linspace(0, 1, 44100))
self.asine = Asig(self.sig, sr=44100, label="test_sine")
def test_default_server(self):
Aserver.startup_default_server(backend=self.backend, bs=512, channels=4)
s = Aserver.default
self.assertEqual(s, Aserver.default)
self.asine.play()
time.sleep(0.5)
s.stop()
self.assertGreater(len(s.stream.samples_out), 0)
sample = s.stream.samples_out[0]
self.assertEqual(sample.shape[0], 512)
self.assertEqual(sample.shape[1], 4)
self.assertAlmostEqual(np.max(sample), 1, places=2)
Aserver.shutdown_default_server()
self.assertIsNone(s.stream)
def test_play_float(self):
s = Aserver(backend=self.backend)
s.boot()
self.asine.play(server=s)
time.sleep(0.5)
s.stop()
self.assertGreater(len(s.stream.samples_out), 0)
sample = s.stream.samples_out[0]
self.assertEqual(sample.shape[0], s.bs)
self.assertEqual(sample.shape[1], s.channels)
self.assertAlmostEqual(np.max(sample), 1, places=2)
s.quit()
def test_repr(self):
s = Aserver(backend=self.backend)
s.boot()
print(s)
s.quit()
def test_get_devices(self):
s = Aserver(backend=self.backend)
d_in, d_out = s.get_devices(verbose=True)
self.assertListEqual(d_in, d_out)
self.assertListEqual(d_in, self.backend.dummy_devices)
def test_boot_twice(self):
s = Aserver(backend=self.backend)
s.boot()
self.assertEqual(s.boot(), -1)
s.quit()
def test_quit_not_booted(self):
s = Aserver(backend=self.backend)
self.assertEqual(s.quit(), -1)
def test_incompatible_backend(self):
s = Aserver(backend=self.backend)
sig = np.sin(2 * np.pi * 440 * np.linspace(0, 1, 44100) * np.iinfo(np.int16).max).astype(np.int16)
asine = Asig(sig, sr=44100)
s.boot()
asine.play(server=s)
s.quit()
|
45717
|
from detectors.detectsecrets import DetectSecrets
from detectors.gitsecrets import GitSecrets
from detectors.trufflehog import TruffleHog
# TODO: Turn this into a registry to match the notifiers pattern?
AvailableDetectors = {
'detect-secrets': DetectSecrets,
'git-secrets': GitSecrets,
'trufflehog': TruffleHog
}
|
45742
|
import torch
from torch import nn
from torch.nn import functional as F
from lib.utils import bounding_box_batch, get_member
from models.pose_discriminator import MIDisc, MIDiscConv1
from lib.utils import toggle_grad
from torch.optim import Adam
from collections import namedtuple
VGGOutput = namedtuple(
"VGGOutput",
["input", "relu1_2", "relu2_2", "relu3_2", "relu4_2", "relu5_2"],
)
def weight_decay(weights: list):
# reshaped = [weight.reshape(-1) for weight in weights]
tests = [
torch.dot(weight.reshape(-1), weight.reshape(-1)) for weight in weights
]
weight_norms = torch.stack(tests, dim=-1)
return torch.sum(weight_norms)
def latent_kl(prior_mean, posterior_mean):
"""
:param prior_mean:
:param posterior_mean:
:return:
"""
kl = 0.5 * torch.pow(prior_mean - posterior_mean, 2)
kl = torch.sum(kl, dim=[1, 2, 3])
kl = torch.mean(kl)
return kl
def aggregate_kl_loss(prior_means, posterior_means):
kl_loss = torch.sum(
torch.cat(
[
latent_kl(p, q).unsqueeze(dim=-1)
for p, q in zip(
list(prior_means.values()), list(posterior_means.values())
)
],
dim=-1,
)
)
return kl_loss
def compute_kl_loss(prior_means, posterior_means):
kl_loss = torch.sum(
torch.cat(
[
latent_kl(p, q).unsqueeze(dim=-1)
for p, q in zip(prior_means, posterior_means)
],
dim=-1,
)
)
return kl_loss
def compute_kl_with_prior(means, logstds):
kl_out = torch.mean(
torch.cat(
[
kl_loss(m.reshape(m.size(0),-1), l.reshape(l.size(0),-1)).unsqueeze(dim=-1)
for m, l in zip(means, logstds)
],
dim=-1,
)
)
return kl_out
def vgg_loss(custom_vgg, target, pred, weights=None):
"""
:param custom_vgg:
:param target:
:param pred:
:return:
"""
target_feats = custom_vgg(target)
pred_feats = custom_vgg(pred)
target_feats = VGGOutput(**target_feats)
pred_feats = VGGOutput(**pred_feats)
names = list(pred_feats._asdict().keys())
if weights is None:
losses = {}
for i, (tf, pf) in enumerate(zip(target_feats, pred_feats)):
loss = get_member(custom_vgg, "loss_weights")[i] * torch.mean(
torch.abs(tf - pf)
).unsqueeze(dim=-1)
losses.update({names[i]: loss})
else:
losses = {
names[0]: get_member(custom_vgg, "loss_weights")[0]
* torch.mean(weights * torch.abs(target_feats[0] - pred_feats[0]))
.unsqueeze(dim=-1)
.to(torch.float)
}
for i, (tf, pf) in enumerate(zip(target_feats[1:], pred_feats[1:])):
loss = get_member(custom_vgg, "loss_weights")[i + 1] * torch.mean(
torch.abs(tf - pf)
).unsqueeze(dim=-1)
losses.update({names[i + 1]: loss})
return losses
def zoom_loss(target, pred, kps, img_sizes, custom_vgg, spatial_size):
resized_pred = bounding_box_batch(kps, pred, img_sizes, spatial_size)
return vgg_loss(custom_vgg, target, resized_pred)
class GANLoss(nn.Module):
"""
The GAN loss; 'loss_type'-parameter defines the loss function which is actually computed
"""
def __init__(self, loss_type: str = "mse"):
super().__init__()
if loss_type == "vanilla":
self.loss = nn.BCEWithLogitsLoss()
elif loss_type == "mse":
self.loss = nn.MSELoss()
else:
raise ValueError(
f'The loss type for GANLoss must be either "vanilla" or "mse", but is actually {loss_type}.'
)
self.loss_type = loss_type
def forward(self, pred, target):
return self.loss(pred, target)
class TripletLoss(nn.Module):
def __init__(self, margin=0.2):
super(TripletLoss, self).__init__()
self.margin = margin
def forward(self, anchor, positive, negative, size_average=True):
distance_positive = (anchor - positive).pow(2).sum(1) # .pow(.5)
distance_negative = (anchor - negative).pow(2).sum(1) # .pow(.5)
losses = F.relu(distance_positive - distance_negative + self.margin)
return losses.mean() if size_average else losses.sum()
class SequentialDiscLoss(nn.Module):
def __init__(self, loss_type: str = "bce"):
super().__init__()
self.loss_type = loss_type
if loss_type == "bce":
self.loss = nn.BCEWithLogitsLoss()
elif loss_type == "mse":
loss_layers = [nn.Sigmoid(), nn.MSELoss()]
self.loss = nn.Sequential(*loss_layers)
else:
self.loss = None
assert self.loss_type in ["bce", "mse", "hinge"]
def forward(self, pred, target, mode="real"):
if self.loss_type in ["bce", "mse"]:
return self.loss(pred, target)
elif self.loss_type == "hinge":
assert mode in ["real", "fake", "gen"]
if mode == "real":
# discriminator training for real
return torch.mean(torch.nn.ReLU()(1.0 - pred))
elif mode == "fake":
# discriminator training for fake
return torch.mean(torch.nn.ReLU()(1.0 + pred))
else:
# generator training
return -torch.mean(pred)
else:
raise ValueError("Invalid loss type.")
class MILoss:
def __init__(self, input_dim, device,**kwargs):
n_layer = (
kwargs["n_layer_c"]
if not "n_layer_midisc" in kwargs
else kwargs["n_layer_midisc"]
)
nf_hidden = (
kwargs["dim_hidden_c"]
if not "nf_hidden_midisc" in kwargs
else kwargs["nf_hidden_midisc"]
)
if hasattr(kwargs, "conv_midisc") and kwargs.conv_midisc:
self.disc = MIDiscConv1(n_layer, input_dim, nf_hidden)
print("Using convolutional mi disc.")
else:
self.disc = MIDisc(n_layer, input_dim, nf_hidden)
print("Using linear mi disc.")
self.disc.to(device)
self.loss = nn.BCEWithLogitsLoss(reduction="mean")
self.disc_opt = Adam(
params=[{"params": self.disc.parameters(), "name": "mi_disc"}],
lr=kwargs.lr_init,
weight_decay=kwargs["weight_decay"],
)
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(
self.disc_opt, milestones=kwargs["tau"], gamma=kwargs["gamma"]
)
self.sigm = nn.Sigmoid()
def train_disc(self, zb_joint, zb_marg, seq_len=0):
# enable gradient
toggle_grad(self.disc, True)
self.disc.train()
self.disc_opt.zero_grad()
disc_joint = self.disc(zb_joint).squeeze()
joint_p = torch.mean(self.sigm(disc_joint))
out_dict = {"mi_true_p": joint_p.item()}
disc_marg = self.disc(zb_marg).squeeze()
marg_p = torch.mean(self.sigm(disc_marg))
out_dict.update({"mi_fake_p": marg_p.item()})
loss_joint = (
self.loss(disc_joint, torch.ones_like(disc_joint)) / seq_len
)
loss_marg = self.loss(disc_marg, torch.zeros_like(disc_marg))
out_dict.update({"mi_disc_loss_joint": loss_joint.item()})
out_dict.update({"mi_disc_loss_marg": loss_marg.item()})
loss = loss_joint + loss_marg
out_dict.update({"mi_disc_loss": loss.item()})
loss.backward(retain_graph=True)
self.disc_opt.step()
return out_dict
def train_gen(self, zb_joint, zb_marg):
# disable gradient
toggle_grad(self.disc, False)
self.disc.eval()
zb_joint.requires_grad_(True)
disc_joint = self.disc(zb_joint).squeeze()
zb_marg.requires_grad_(True)
disc_marg = self.disc(zb_marg).squeeze()
loss_joint = self.loss(disc_joint, torch.ones_like(disc_joint))
loss_marg = self.loss(disc_marg, torch.zeros_like(disc_marg))
return -(loss_joint + loss_marg)
def load(self, ckpt):
if ckpt is not None:
self.disc.load_state_dict(ckpt["mi_disc"])
self.disc_opt.load_state_dict(ckpt["mi_optimizer"])
def get_save_dict(self):
return {"mi_disc": self.disc, "mi_optimizer": self.disc_opt}
def kl_loss(mu, logstd):
# mu and logstd are b x k x d x d
# make them into b*d*d x k
dim = mu.shape[1]
std = torch.exp(logstd)
kl = torch.sum(-logstd + 0.5 * (std ** 2 + mu ** 2), dim=-1) - (0.5 * dim)
return kl.mean()
class FlowLoss(nn.Module):
def __init__(self,):
super().__init__()
# self.config = config
def forward(self, sample, logdet):
nll_loss = torch.mean(nll(sample))
assert len(logdet.shape) == 1
nlogdet_loss = -torch.mean(logdet)
loss = nll_loss + nlogdet_loss
reference_nll_loss = torch.mean(nll(torch.randn_like(sample)))
log = {
"flow_loss": loss.item(),
"reference_nll_loss": reference_nll_loss.item(),
"nlogdet_loss": nlogdet_loss.item(),
"nll_loss": nll_loss.item(),
}
return loss, log
class FlowLossUncond(nn.Module):
def __init__(self):
super().__init__()
def forward(self, sample, logdet):
nll_loss = torch.mean(nll(sample))
assert len(logdet.shape) == 1
nlogdet_loss = -torch.mean(logdet)
loss = nll_loss + nlogdet_loss
reference_nll_loss = torch.mean(nll(torch.randn_like(sample)))
log = {
"flow_loss": loss, "reference_nll_loss": reference_nll_loss,
"nlogdet_loss": nlogdet_loss, "nll_loss": nll_loss,
}
return loss, log
def nll(sample):
return 0.5 * torch.sum(torch.pow(sample, 2), dim=[1, 2, 3])
|
45747
|
from titan.components import Base
class Test(Base):
@staticmethod
def default():
print("test")
|
45833
|
import pytest
import tempfile
from conftest import load_circuit_files
def test_load_files():
# load nodes file
net = load_circuit_files(data_files='examples/v1_nodes.h5', data_type_files='examples/v1_node_types.csv')
assert(net.nodes is not None)
assert(net.has_nodes)
assert(net.edges is None)
assert(not net.has_edges)
# load edges file
net = load_circuit_files(data_files='examples/v1_v1_edges.h5', data_type_files='examples/v1_v1_edge_types.csv')
assert(net.nodes is None)
assert(not net.has_nodes)
assert(net.edges is not None)
assert(net.has_edges)
# load nodes and edges
net = load_circuit_files(data_files=['examples/v1_nodes.h5', 'examples/v1_v1_edges.h5'],
data_type_files=['examples/v1_node_types.csv', 'examples/v1_v1_edge_types.csv'])
assert(net.nodes is not None)
assert(net.has_nodes)
assert(net.edges is not None)
assert(net.has_edges)
def test_version():
net = load_circuit_files(data_files=['examples/v1_nodes.h5', 'examples/v1_v1_edges.h5'],
data_type_files=['examples/v1_node_types.csv', 'examples/v1_v1_edge_types.csv'])
assert(net.version == '0.1')
def test_bad_magic():
import h5py
tmp_file, tmp_file_name = tempfile.mkstemp(suffix='.hdf5')
# no magic
with h5py.File(tmp_file_name, 'r+') as h5:
h5.create_group('nodes')
with pytest.raises(Exception):
load_circuit_files(data_files=tmp_file_name, data_type_files='examples/v1_node_types.csv')
# bad magic
with h5py.File(tmp_file_name, 'r+') as h5:
h5.attrs['magic'] = 0x0A7B
with pytest.raises(Exception):
load_circuit_files(data_files=tmp_file_name, data_type_files='examples/v1_node_types.csv')
def test_no_files():
with pytest.raises(Exception):
load_circuit_files(data_files=[], data_type_files=[])
def test_no_node_types():
with pytest.raises(Exception):
load_circuit_files(data_files='examples/v1_nodes.h5', data_type_files=[])
def test_mixed_files():
with pytest.raises(Exception):
load_circuit_files(data_files='examples/v1_nodes.h5', data_type_files='examples/v1_v1_edge_types.csv')
|
45843
|
from distriopt import VirtualNetwork
from distriopt.embedding.physical import PhysicalNetwork
from distriopt.embedding.algorithms import (
EmbedBalanced,
# EmbedILP,
EmbedPartition,
EmbedGreedy,
)
from distriopt.packing.algorithms import ( BestFitDopProduct,
FirstFitDecreasingPriority,
FirstFitOrderedDeviation )
from distriopt.packing import CloudInstance
from distriopt.packing.algorithms import BestFitDopProduct,FirstFitDecreasingPriority,FirstFitOrderedDeviation
from random import randint
import subprocess
from pathlib import Path
class DummyMapper(object):
def __init__(self, places={}):
self.places = places
def place(self, node):
return self.places[node]
def placeLink(self, link):
return ({}, {})
class RoundRobinMapper(DummyMapper):
def __init__(self, virtual_topo, physical_topo=[]):
self.physical = physical_topo
self.vNodes = virtual_topo.hosts()+virtual_topo.switches()
self.places = self.__places(self.vNodes, physical_topo)
def __places(self, vNodes, physical_topo):
places={}
i=0
for node in vNodes:
places[node] = physical_topo[i % len(physical_topo)]
i += 1
return places
def place(self, node):
return self.places[node]
class RandomMapper(DummyMapper):
def __init__(self, virtual_topo, physical_topo=[]):
self.physical = physical_topo
self.vNodes = virtual_topo.hosts()+virtual_topo.switches()
self.places = self.__places(self.vNodes, physical_topo)
def __places(self, vNodes, physical_topo):
places={}
for node in vNodes:
places[node] = physical_topo[randint(0,len(physical_topo)-1)]
return places
def place(self, node):
return self.places[node]
class MaxinetMapper(DummyMapper):
def __init__(self, virtual_topo, physical_topo=[], share_path="/Users/giuseppe/Desktop/algo_experiments/algo_experiments/distrinet/mininet/mininet/mapper/shares/equal10.txt"):
self.physical = physical_topo
self.virtual_network = virtual_topo
self.vNodes = virtual_topo.hosts()+virtual_topo.switches()
self.vHosts = virtual_topo.hosts()
self.vSwitches = virtual_topo.switches()
self.vlinks = virtual_topo.links()
self.metis_node_mapping = None
self.node_metis_mapping = None
self.metis_dict = None
maxinet_dict = self.convert_in_maxinet_dict()
# OK
metis_dict = self.convert_in_metis_dict(maxinet_dict=maxinet_dict)
print(metis_dict) # OK
self.create_metis_file(metis_dict=metis_dict, path="/tmp/metis_file") #OK
print("USING {}".format(share_path))
self.run_metis(graph_path="/tmp/metis_file", share_path=share_path) # OK
mapping = self.get_mapping(graph_path="/tmp/metis_file", share_path=share_path) # OK
print(mapping)
mapping_converted = self.convert_mapping(mapping) # OK
print("MAPPING CONVERTED")
print(mapping_converted)
complete_mapping = self.get_mapping_for_all_nodes(mapping_converted) # OK
print("COMPLETE MAPPING")
print(complete_mapping)
print(self.metis_node_mapping)
compute_nodes = sorted(self.physical)
mapping = complete_mapping
sorted_keys = sorted(mapping.keys(), key=lambda x: int(x), reverse=True)
physical_names_mapping = {phy_name: metis_name for phy_name, metis_name in
zip(compute_nodes, sorted_keys)}
metis_name_mapping = {physical_names_mapping[x]: x for x in physical_names_mapping.keys()}
mapping_with_pyhisical_names = {metis_name_mapping[node]: mapping[node] for node in mapping.keys()}
print(mapping_with_pyhisical_names)
self.places = self.__places(mapping_with_pyhisical_names)
print("FINAL")
print(self.places)
def __places(self, mapping):
final = dict()
for physical, list_vnodes in mapping.items():
for v in list_vnodes:
final[v]=physical
return final
def get_mapping(self, graph_path, share_path):
gr_path = Path(graph_path)
if gr_path.is_file():
file_name = gr_path.name
else:
raise RuntimeError()
if Path(share_path).is_file():
physical_hosts = self.get_physical_hosts(share_path)
else:
raise RuntimeError()
mapping_file_name = file_name +".part."+ str(len(physical_hosts))
mapping_file_path = gr_path.parent / mapping_file_name
mapping = {host: [] for host in physical_hosts}
with open(mapping_file_path,"r") as file:
lines = list(map(lambda x:x.strip(), file.readlines()))
for c, m in enumerate(lines):
switch = c + 1
mapping[m].append(switch)
return mapping
def run_metis(self, graph_path, share_path):
n_physical_hosts = len(self.get_physical_hosts(share_path))
cmd=f"gpmetis -ptype=rb -tpwgts={str(share_path)} {str(graph_path)} {n_physical_hosts}"
output = subprocess.check_output(cmd, shell=True)
out = output.decode("utf-8")
return out
def get_mapping_for_all_nodes(self, mapping_node_names):
total_mapping={host: mapping_node_names[host] for host in mapping_node_names.keys()}
for host in total_mapping.keys():
for node in total_mapping[host]:
total_mapping[host] += self.get_connected_hosts(node)
return total_mapping
def get_connected_hosts(self, node_name):
nodes = []
for node in self.getNeighbors(node_name):
if node in self.vHosts:
nodes.append(node)
return nodes
def convert_mapping(self, mapping):
mapping_node_names = {host: [] for host in mapping.keys()}
for host in mapping.keys():
mapping_node_names[host] = [self.metis_node_mapping[node] for node in mapping[host]]
return mapping_node_names
def create_metis_file(self, metis_dict, path):
nodes, edges = len(self.get_metis_nodes()), len(self.get_metis_edges())
sorted_keys = sorted(list(metis_dict.keys()))
metis_lines = [[nodes, edges, "011", "0"]]
for k in sorted_keys:
weight = metis_dict[k]["weight"]
edges = metis_dict[k]["edges"]
line = [weight] + edges
metis_lines.append(line)
with open(Path(path), "w") as file:
for line in metis_lines:
file.write(" ".join([str(x) for x in line]) + "\n")
return metis_lines
def get_physical_hosts(self, share_path):
with open(share_path, "r") as file:
lines = file.readlines()
lines = list(map(lambda x: x.strip(), lines))
while [] in lines:
lines.remove([])
hosts = [x.split('=')[0].strip() for x in lines]
return hosts
def get_metis_nodes(self):
return self.vSwitches
def get_metis_edges(self):
edges = []
for u, v in self.vlinks:
if u in self.vSwitches and v in self.vSwitches:
edges.append((u, v))
return edges
def getNeighbors(self, n):
links = self.vlinks
links = list(filter(lambda x: x[0] == n or x[1] == n, links))
neighbors = set([x[0] for x in links]+[x[1] for x in links] )
neighbors.remove(n)
return list(neighbors)
def convert_in_maxinet_dict(self):
maxinet_nodes = dict()
for n in self.vSwitches:
maxinet_nodes[n] = {"weight": 1, "connected_switches": []}
for n in maxinet_nodes.keys():
connected_nodes = self.getNeighbors(n)
for connected_node in connected_nodes:
if connected_node in self.vHosts:
maxinet_nodes[n]["weight"] += 1
else:
maxinet_nodes[n]["connected_switches"].append(connected_node)
return maxinet_nodes
def req_rate(self, n1, n2):
links = self.virtual_network.links(withInfo=True)
for u, v, d in links:
if (u, v) == (n1,n2) or (v,u) == (n1,n2):
return d["bw"]
raise ValueError("Link {}-{} does not exist")
def convert_in_metis_dict(self, maxinet_dict):
metis_node_mapping = {num+1: node for num, node in enumerate(maxinet_dict.keys())}
node_metis_mapping = {metis_node_mapping[num]: num for num in metis_node_mapping.keys()}
metis_dict = {num: {"weight": None, "edges": []} for num in metis_node_mapping.keys()}
for node in maxinet_dict.keys():
num = node_metis_mapping[node]
metis_dict[num]["weight"] = maxinet_dict[node]["weight"]
for neighboor in maxinet_dict[node]["connected_switches"]:
neighboor_mapped = node_metis_mapping[neighboor]
required_edge_rate = self.req_rate(node, neighboor)
metis_dict[num]["edges"] += [neighboor_mapped, required_edge_rate]
self.metis_node_mapping = metis_node_mapping
self.node_metis_mapping = node_metis_mapping
self.metis_dict = metis_dict
return metis_dict
class BlockMapper(DummyMapper):
def __init__(self, virtual_topo, physical_topo=[],block=10):
self.physical = physical_topo
try:
self.vNodes = zip(sorted(virtual_topo.hosts(), key= lambda x:int(x[1:])),sorted(virtual_topo.switches(), key= lambda x:int(x[1:])))
except:
print("Not a valid Mapper for this instance")
exit(1)
self.places = self.__places(self.vNodes, physical_topo,block)
def __places(self, vNodes, physical_topo,block):
places={}
vNodes= list(vNodes)
if len(physical_topo) < len(vNodes) / block:
raise Exception("Not a valid Mapper for this instance")
for i, (v, s) in enumerate(vNodes):
places[v] = physical_topo[i//block]
places[s] = physical_topo[i//block]
return places
def place(self, node):
return self.places[node]
class Mapper(object):
def __init__(self, virtual_topo, physical_topo, solver=EmbedGreedy):
""" virtual_topo: virtual topology to map
physical_topo: physical topology to map on
solver: solver class to use to solve the mapping"""
self.virtual_topo = VirtualNetwork.from_mininet(virtual_topo)
self.mininet_virtual=virtual_topo
self.physical_topo = PhysicalNetwork.from_files(physical_topo)
self.prob = None
self.solver = solver
self.solve()
self.places= self.__places()
def solve(self, solver=None):
""" Solve the mapping problem of the virtual topology on the physical
one using the specified solver
solver: solver class to use to solve the mapping
"""
if solver is not None:
self.solver = solver
self.prob = self.solver(virtual=self.virtual_topo, physical=self.physical_topo)
time_solution, status = self.prob.solve()
if status == "0" or status == 0:
raise Exception("Failed to solve")
elif status == "-1" or status == - 1:
raise Exception("Unfeasible Problem")
def __places(self):
places={}
vNodes=self.mininet_virtual.hosts()+self.mininet_virtual.switches()
for node in vNodes:
places[node]=self.place(node)
return places
def place(self, node):
""" Returns physical placement of the node
node: node in the virtual topology
return: name of the physical host to use
"""
if self.prob == None:
self.solve()
place = self.prob.solution.node_info(node)
return place
def placeLink(self, link):
""" Returns physical placement of the link
link: link in the virtual topology
returns: list of placements for the link
"""
if self.prob == None:
self.solve()
n1,n2=link
#p1,p2 = self.prob.solution.node_info(n1),self.prob.solution.node_info(n2)
return {},{}
class Packing(object):
def __init__(self, virtual_topo, cloud_prices,solver=BestFitDopProduct):
""" virtual_topo: virtual topology to map
physical_topo: physical topology to map on
solver: solver class to use to solve the mapping"""
self.virtual_topo = VirtualNetwork.from_mininet(virtual_topo)
self.cloud = CloudInstance.read_ec2_instances(vm_type=cloud_prices)
self.mininet_virtual=virtual_topo
self.prob = None
self.solver = solver
self.places=self.__places()
def solve(self, solver=None):
""" Solve the mapping problem of the virtual topology on the physical
one using the specified solver
solver: solver class to use to solve the mapping
"""
if solver is not None:
self.solver = solver
#virtual_network= VirtualNetwork.from_mininet(self.virtual_topo)
self.prob = self.solver(virtual=self.virtual_topo, physical=self.cloud)
time_solution, status = self.prob.solve()
if status == "0":
raise Exception("Failed to solve")
elif status == "-1":
raise Exception("Unfeasible Problem")
def __places(self):
places=dict()
vNodes=self.mininet_virtual.hosts()+self.mininet_virtual.switches()
for node in vNodes:
places[node]=self.place(node)
return places
def place(self, node):
""" Returns physical placement of the node
node: node in the virtual topology
return: name of the physical host to use
"""
if self.prob == None:
self.solve()
place = self.prob.solution.node_info(node)
return place
def placeLink(self, link):
""" Returns physical placement of the link
link: link in the virtual topology
returns: list of placements for the link
"""
if self.prob == None:
self.solve()
place = self.prob.solution.link_mapping[link]
return place
if __name__ == '__main__':
#physical = PhysicalNetwork.from_files("/Users/giuseppe/.distrinet/gros_partial")
virtual_topo = VirtualNetwork.create_fat_tree(k=2, density=2, req_cores=2, req_memory=100,
req_rate=100)
from distriopt.packing import CloudInstance
|
45846
|
import torch
import torch_quiver as torch_qv
import random
import numpy as np
import time
from typing import List
from quiver.shard_tensor import ShardTensor, ShardTensorConfig, Topo
from quiver.utils import reindex_feature
import torch.multiprocessing as mp
from torch.multiprocessing import Process
import os
import sys
import quiver
import torch.distributed as dist
import torch
import torch_quiver as torch_qv
import random
import numpy as np
import time
from typing import List
from quiver.shard_tensor import ShardTensor, ShardTensorConfig, Topo
from quiver.utils import reindex_feature
__all__ = ["Feature"]
class Feature:
def __init__(self,
rank,
device_list,
device_cache_size=0,
cache_policy='device_replicate',
csr_topo=None):
self.device_cache_size = device_cache_size
self.cache_policy = cache_policy
self.device_list = device_list
self.device_tensor_list = {}
self.numa_tensor_list = {}
self.rank = rank
self.topo = Topo(self.device_list)
self.csr_topo = csr_topo
self.ipc_handle_ = None
def cal_memory_budget_bytes(self, memory_budget):
if isinstance(memory_budget, int):
return memory_budget
elif isinstance(memory_budget, float):
memory_budget = int(memory_budget)
elif isinstance(memory_budget, str):
if memory_budget.upper().endswith(
"M") or memory_budget.upper().endswith("MB"):
end = -1 if memory_budget.upper().endswith("M") else -2
memory_budget = int(float(memory_budget[:end]) * 1024 * 1024)
elif memory_budget.upper().endswith(
"G") or memory_budget.upper().endswith("GB"):
end = -1 if memory_budget.upper().endswith("G") else -2
memory_budget = int(
float(memory_budget[:end]) * 1024 * 1024 * 1024)
else:
raise Exception("memory budget input is not valid")
return memory_budget
def cal_size(self, cpu_tensor, cache_memory_budget):
element_size = cpu_tensor.shape[1] * 4
cache_size = cache_memory_budget // element_size
return cache_size
def partition(self, cpu_tensor, cache_memory_budget):
cache_size = self.cal_size(cpu_tensor, cache_memory_budget)
return [cpu_tensor[:cache_size], cpu_tensor[cache_size:]]
def from_cpu_tensor(self, cpu_tensor):
if self.cache_policy == "device_replicate":
cache_memory_budget = self.cal_memory_budget_bytes(
self.device_cache_size)
shuffle_ratio = 0.0
else:
cache_memory_budget = self.cal_memory_budget_bytes(
self.device_cache_size) * len(self.topo.Numa2Device[0])
shuffle_ratio = self.cal_size(
cpu_tensor, cache_memory_budget) / cpu_tensor.size(0)
print(
f"LOG>>> {min(100, int(100 * cache_memory_budget / cpu_tensor.numel() / 4))}% data cached"
)
if self.csr_topo is not None:
print("Create")
cpu_tensor, self.csr_topo.feature_order = reindex_feature(
self.csr_topo, cpu_tensor, shuffle_ratio)
self.feature_order = self.csr_topo.feature_order.to(self.rank)
print("Done Create")
cache_part, self.cpu_part = self.partition(cpu_tensor,
cache_memory_budget)
self.cpu_part = self.cpu_part.clone()
if cache_part.shape[0] > 0 and self.cache_policy == "device_replicate":
for device in self.device_list:
shard_tensor = ShardTensor(self.rank, ShardTensorConfig({}))
shard_tensor.append(cache_part, device)
self.device_tensor_list[device] = shard_tensor
elif cache_part.shape[0] > 0:
numa0_device_list = self.topo.Numa2Device[0]
numa1_device_list = self.topo.Numa2Device[1]
block_size = self.cal_size(
cpu_tensor,
cache_memory_budget // len(self.topo.Numa2Device[0]))
if len(numa0_device_list) > 0:
print(
f"LOG>>> GPU {numa0_device_list} belong to the same NUMA Domain"
)
shard_tensor = ShardTensor(self.rank, ShardTensorConfig({}))
cur_pos = 0
for idx, device in enumerate(numa0_device_list):
if idx == len(numa0_device_list) - 1:
shard_tensor.append(cache_part[cur_pos:], device)
else:
shard_tensor.append(
cache_part[cur_pos:cur_pos + block_size], device)
cur_pos += block_size
self.numa_tensor_list[0] = shard_tensor
if len(numa1_device_list) > 0:
print(
f"LOG>>> GPU {numa1_device_list} belong to the same NUMA Domain"
)
shard_tensor = ShardTensor(self.rank, ShardTensorConfig({}))
cur_pos = 0
for idx, device in enumerate(numa1_device_list):
if idx == len(numa1_device_list) - 1:
shard_tensor.append(cache_part[cur_pos:], device)
else:
shard_tensor.append(
cache_part[cur_pos:cur_pos + block_size], device)
cur_pos += block_size
self.numa_tensor_list[1] = shard_tensor
# 构建CPU Tensor
if self.cpu_part.numel() > 0:
if self.cache_policy == "device_replicate":
shard_tensor = self.device_tensor_list.get(
self.rank, None) or ShardTensor(self.rank,
ShardTensorConfig({}))
shard_tensor.append(self.cpu_part, -1)
self.device_tensor_list[self.rank] = shard_tensor
else:
numa_id = self.topo.get_numa_node(self.rank)
shard_tensor = self.numa_tensor_list.get(
numa_id, None) or ShardTensor(self.rank,
ShardTensorConfig({}))
shard_tensor.append(self.cpu_part, -1)
self.numa_tensor_list[numa_id] = shard_tensor
def __getitem__(self, node_idx):
self.lazy_init_from_ipc_handle()
node_idx = node_idx.to(self.rank)
if self.feature_order is not None:
node_idx = self.feature_order[node_idx]
if self.cache_policy == "device_replicate":
shard_tensor = self.device_tensor_list[self.rank]
return shard_tensor[node_idx]
else:
numa_id = self.topo.get_numa_node(self.rank)
shard_tensor = self.numa_tensor_list[numa_id]
return shard_tensor[node_idx]
def size(self, dim):
self.lazy_init_from_ipc_handle()
if self.cache_policy == "device_replicate":
shard_tensor = self.device_tensor_list[self.rank]
return shard_tensor.size(dim)
else:
numa_id = self.topo.get_numa_node(self.rank)
shard_tensor = self.numa_tensor_list[numa_id]
return shard_tensor.size(dim)
@property
def shape(self):
self.lazy_init_from_ipc_handle()
if self.cache_policy == "device_replicate":
shard_tensor = self.device_tensor_list[self.rank]
return shard_tensor.shape
else:
numa_id = self.topo.get_numa_node(self.rank)
shard_tensor = self.numa_tensor_list[numa_id]
return shard_tensor.shape
@property
def ipc_handle(self):
return self.ipc_handle_
@ipc_handle.setter
def ipc_handle(self, ipc_handle):
self.ipc_handle_ = ipc_handle
def share_ipc(self):
gpu_ipc_handle_dict = {}
if self.cache_policy == "device_replicate":
for device in self.device_tensor_list:
gpu_ipc_handle_dict[device] = self.device_tensor_list[
device].share_ipc()[0]
else:
for numa_node in self.numa_tensor_list:
gpu_ipc_handle_dict[numa_node] = self.numa_tensor_list[
numa_node].share_ipc()[0]
return gpu_ipc_handle_dict, self.cpu_part, self.device_list, self.device_cache_size, self.cache_policy, self.csr_topo
def from_gpu_ipc_handle_dict(self, gpu_ipc_handle_dict, cpu_tensor):
if self.cache_policy == "device_replicate":
ipc_handle = gpu_ipc_handle_dict.get(
self.rank, []), cpu_tensor, ShardTensorConfig({})
shard_tensor = ShardTensor.new_from_share_ipc(
ipc_handle, self.rank)
self.device_tensor_list[self.rank] = shard_tensor
else:
numa_node = self.topo.get_numa_node(self.rank)
ipc_handle = gpu_ipc_handle_dict.get(
numa_node, []), cpu_tensor, ShardTensorConfig({})
shard_tensor = ShardTensor.new_from_share_ipc(
ipc_handle, self.rank)
self.numa_tensor_list[numa_node] = shard_tensor
self.cpu_part = cpu_tensor
@classmethod
def new_from_ipc_handle(cls, rank, ipc_handle):
gpu_ipc_handle_dict, cpu_part, device_list, device_cache_size, cache_policy, csr_topo = ipc_handle
feature = cls(rank, device_list, device_cache_size, cache_policy)
feature.from_gpu_ipc_handle_dict(gpu_ipc_handle_dict, cpu_part)
if csr_topo is not None:
feature.feature_order = csr_topo.feature_order.to(rank)
self.csr_topo = csr_topo
return feature
@classmethod
def lazy_from_ipc_handle(cls, ipc_handle):
gpu_ipc_handle_dict, cpu_part, device_list, device_cache_size, cache_policy, _ = ipc_handle
feature = cls(device_list[0], device_list, device_cache_size,
cache_policy)
feature.ipc_handle = ipc_handle
return feature
def lazy_init_from_ipc_handle(self):
if self.ipc_handle is None:
return
self.rank = torch.cuda.current_device()
gpu_ipc_handle_dict, cpu_part, device_list, device_cache_size, cache_policy, csr_topo = self.ipc_handle
self.from_gpu_ipc_handle_dict(gpu_ipc_handle_dict, cpu_part)
self.csr_topo = csr_topo
if csr_topo is not None:
self.feature_order = csr_topo.feature_order.to(self.rank)
self.ipc_handle = None
from multiprocessing.reduction import ForkingPickler
def rebuild_feature(ipc_handle):
print("check rebuild")
feature = Feature.lazy_from_ipc_handle(ipc_handle)
return feature
def reduce_feature(feature):
ipc_handle = feature.share_ipc()
return (rebuild_feature, (ipc_handle, ))
def rebuild_pyg_sampler(cls, ipc_handle):
sampler = cls.lazy_from_ipc_handle(ipc_handle)
return sampler
def reduce_pyg_sampler(sampler):
ipc_handle = sampler.share_ipc()
return (rebuild_pyg_sampler, (
type(sampler),
ipc_handle,
))
def init_reductions():
ForkingPickler.register(Feature, reduce_feature)
def test_feature_basic():
rank = 0
NUM_ELEMENT = 1000000
SAMPLE_SIZE = 80000
FEATURE_DIM = 600
#########################
# Init With Numpy
########################
torch.cuda.set_device(rank)
host_tensor = np.random.randint(0,
high=10,
size=(2 * NUM_ELEMENT, FEATURE_DIM))
tensor = torch.from_numpy(host_tensor).type(torch.float32)
host_indice = np.random.randint(0, 2 * NUM_ELEMENT - 1, (SAMPLE_SIZE, ))
indices = torch.from_numpy(host_indice).type(torch.long)
print("host data size", host_tensor.size * 4 // 1024 // 1024, "MB")
device_indices = indices.to(rank)
############################
# define a quiver.Feature
###########################
feature = quiver.Feature(rank=rank,
device_list=[0, 1, 2, 3],
device_cache_size="0.9G",
cache_policy="numa_replicate")
feature.from_cpu_tensor(tensor)
####################
# Indexing
####################
res = feature[device_indices]
start = time.time()
res = feature[device_indices]
consumed_time = time.time() - start
res = res.cpu().numpy()
feature_gt = tensor[indices].numpy()
print("Correctness Check : ", np.array_equal(res, feature_gt))
print(
f"Process {os.getpid()}: TEST SUCCEED!, With Memory Bandwidth = {res.size * 4 / consumed_time / 1024 / 1024 / 1024} GB/s, consumed {consumed_time}s"
)
def child_proc(rank, world_size, host_tensor, feature):
torch.cuda.set_device(rank)
print(
f"Process {os.getpid()}: check current device {torch.cuda.current_device()}"
)
NUM_ELEMENT = host_tensor.shape[0]
SAMPLE_SIZE = 80000
device_tensor = host_tensor.to(rank)
bandwidth = []
for _ in range(30):
device_indices = torch.randint(0,
NUM_ELEMENT - 1, (SAMPLE_SIZE, ),
device=rank)
torch.cuda.synchronize()
start = time.time()
res = feature[device_indices]
consumed_time = time.time() - start
bandwidth.append(res.numel() * 4 / consumed_time / 1024 / 1024 / 1024)
assert torch.equal(res, device_tensor[device_indices])
print("Correctness check passed")
print(
f"Process {os.getpid()}: TEST SUCCEED!, With Memory Bandwidth = {np.mean(np.array(bandwidth[1:]))} GB/s, consumed {consumed_time}s, res size {res.numel() * 4 / 1024 / 1024 / 1024}GB"
)
def test_ipc():
rank = 0
NUM_ELEMENT = 1000000
FEATURE_DIM = 600
#########################
# Init With Numpy
########################
torch.cuda.set_device(rank)
host_tensor = np.random.randint(0,
high=10,
size=(2 * NUM_ELEMENT, FEATURE_DIM))
tensor = torch.from_numpy(host_tensor).type(torch.float32)
print("host data size", host_tensor.size * 4 // 1024 // 1024, "MB")
############################
# define a quiver.Feature
###########################
feature = quiver.Feature(rank=rank,
device_list=[0, 1],
device_cache_size=0,
cache_policy="numa_replicate")
feature.from_cpu_tensor(tensor)
world_size = 2
mp.spawn(child_proc,
args=(world_size, tensor, feature),
nprocs=world_size,
join=True)
def child_proc_real_data(rank, feature, host_tensor):
NUM_ELEMENT = 2000000
SAMPLE_SIZE = 800000
bandwidth = []
torch.cuda.set_device(rank)
device_tensor = host_tensor.to(rank)
for _ in range(300):
device_indices = torch.randint(0,
NUM_ELEMENT - 1, (SAMPLE_SIZE, ),
device=rank)
torch.cuda.synchronize()
start = time.time()
res = feature[device_indices]
consumed_time = time.time() - start
bandwidth.append(res.numel() * 4 / consumed_time / 1024 / 1024 / 1024)
assert torch.equal(device_tensor[device_indices], res)
print("Correctness check passed")
print(
f"Process {os.getpid()}: TEST SUCCEED!, With Memory Bandwidth = {np.mean(np.array(bandwidth[1:]))} GB/s, consumed {consumed_time}s, res size {res.numel() * 4 / 1024 / 1024 / 1024}GB"
)
def test_ipc_with_real_data():
from ogb.nodeproppred import PygNodePropPredDataset
root = "/data/data/products"
dataset = PygNodePropPredDataset('ogbn-products', root)
data = dataset[0]
world_size = torch.cuda.device_count()
##############################
# Create Sampler And Feature
##############################
csr_topo = quiver.CSRTopo(data.edge_index)
feature = torch.zeros(data.x.shape)
feature[:] = data.x
quiver_feature = Feature(rank=0,
device_list=list(range(world_size)),
device_cache_size="200M",
cache_policy="device_replicate",
csr_topo=csr_topo)
quiver_feature.from_cpu_tensor(feature)
print('Let\'s use', world_size, 'GPUs!')
mp.spawn(child_proc_real_data,
args=(quiver_feature, feature),
nprocs=world_size,
join=True)
def normal_test():
rank = 0
NUM_ELEMENT = 1000000
FEATURE_DIM = 600
SAMPLE_SIZE = 80000
#########################
# Init With Numpy
########################
torch.cuda.set_device(rank)
host_tensor = np.random.randint(0,
high=10,
size=(2 * NUM_ELEMENT, FEATURE_DIM))
tensor = torch.from_numpy(host_tensor).type(torch.float32)
host_indice = np.random.randint(0, 2 * NUM_ELEMENT - 1, (SAMPLE_SIZE, ))
indices = torch.from_numpy(host_indice).type(torch.long)
tensor.to(rank)
torch.cuda.synchronize()
start = time.time()
feature = tensor[indices]
feature = feature.to(rank)
torch.cuda.synchronize()
consumed_time = time.time() - start
print(
f"Process {os.getpid()}: TEST SUCCEED!, With Memory Bandwidth = {feature.numel() * 4 / consumed_time / 1024 / 1024 / 1024} GB/s, consumed {consumed_time}s"
)
def test_paper100M():
dataset = torch.load(
"/data/papers/ogbn_papers100M/quiver_preprocess/paper100M.pth")
csr_topo = dataset["csr_topo"]
feature = dataset["sorted_feature"]
NUM_ELEMENT = feature.shape[0]
SAMPLE_SIZE = 80000
world_size = 4
rank = 0
dataset["label"] = torch.from_numpy(dataset["label"])
dataset["num_features"] = feature.shape[1]
dataset["num_classes"] = 172
quiver_sampler = quiver.pyg.GraphSageSampler(csr_topo, [15, 10, 5],
0,
mode="UVA")
quiver_feature = quiver.Feature(rank=0,
device_list=list(range(world_size)),
device_cache_size="12G",
cache_policy="numa_replicate")
quiver_feature.from_cpu_tensor(feature)
device_indices = torch.randint(0,
NUM_ELEMENT - 1, (SAMPLE_SIZE, ),
device=rank)
res = quiver_feature[device_indices]
start = time.time()
res = quiver_feature[device_indices]
consumed_time = time.time() - start
print(
f"Process {os.getpid()}: TEST SUCCEED!, With Memory Bandwidth = {res.numel() * 4 / consumed_time / 1024 / 1024 / 1024} GB/s, consumed {consumed_time}s"
)
if __name__ == "__main__":
mp.set_start_method("spawn")
torch_qv.init_p2p([0, 1, 2, 3])
test_paper100M()
#init_reductions()
#test_feature_basic()
#test_ipc()
#normal_test()
#test_ipc_with_real_data()
|
45955
|
import gputransform
import numpy as np
import numpy.testing as npt
import time
import os
import numpy.testing as npt
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# load test point cloud util
def load_pc_file(filename):
# returns Nx3 matrix
pc = np.fromfile(os.path.join("./", filename), dtype=np.float64)
if(pc.shape[0] != 4096*3):
print("pc shape:", pc.shape)
print("Error in pointcloud shape")
return np.array([])
pc = np.reshape(pc,(pc.shape[0]//3, 3))
return pc
# load test point cloud
sim_data_orig = load_pc_file("2.bin")
# visualize point cloud
x = sim_data_orig[...,0]
y = sim_data_orig[...,1]
z = sim_data_orig[...,2]
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(x, y, z)
plt.show()
plt.pause(0.1)
plt.close()
# prepare data for gpu process
sim_data_orig = sim_data_orig.astype(np.float32)
sim_data_orig = sim_data_orig[np.newaxis,:,...]
size = sim_data_orig.shape[1]
num_sector = 120
num_ring = 40
num_height = 20
max_length = 1
max_height = 1
num_in_voxel = 1
sim_data = sim_data_orig.transpose()
sim_data = sim_data.flatten()
# tic
time_start = time.time()
# gpu process
adder = gputransform.GPUTransformer(sim_data, size, max_length, max_height, num_ring, num_sector, num_height, num_in_voxel)
adder.transform()
point_t = adder.retreive()
# toc
time_end = time.time()
print('process cost',time_end - time_start,'s')
# visualize multi-layer scan context image
point_t = point_t.reshape(-1,3)
point_t = point_t[...,2]
point_t = point_t.reshape(20,40,120)
point_t = (point_t + 1.0) / 2.0 *255.0
for i in range(num_height):
plt.imshow(point_t[i,:,:])
plt.show()
plt.pause(0.3)
|
45999
|
from hibp import HIBP, AsyncHIBP
import time
import logging
logging.basicConfig(level=logging.INFO, format='%(message)s')
logging.getLogger("requests").setLevel(logging.WARNING)
if __name__ == '__main__':
# random set of query paramaters
names = ['adobe','ashleymadison', 'naughtyamerica', 'myspace']
accounts = ["ssgrn", "pegasos1","bar<PASSWORD>obama"]
domains = ['twitter.com', 'facebook.com','github.com','adobe.com']
# setup HIBP objects for request executions
reqs = [HIBP.get_breach(x) for x in names] \
+ [HIBP.get_account_breaches(x) for x in accounts] \
+ [HIBP.get_domain_breaches(x) for x in domains]
### SERIAL
start_time = time.time()
for req in reqs:
req.execute()
elapsed_time = time.time() - start_time
logging.info("serial impl took %.2f seconds" % elapsed_time)
### CONCURRENT
start_time = time.time()
async_reqs = AsyncHIBP().map(reqs)
elapsed_time = time.time() - start_time
logging.info("concurrent impl took %.2f seconds" % elapsed_time)
### LAZILY CONCURRENT
start_time = time.time()
async_reqs = AsyncHIBP().imap(reqs)
elapsed_time = time.time() - start_time
logging.info("lazily concurrent impl took %.2f seconds" % elapsed_time)
|
46026
|
from enum import Enum
from stories import story
# Base classes.
class ChildWithNull:
@story
def x(I):
I.one
class NextChildWithNull:
@story
def y(I):
I.two
class ParentWithNull:
@story
def a(I):
I.before
I.x
I.after
class SequenceParentWithNull:
@story
def a(I):
I.before
I.x
I.y
I.after
class ChildWithList:
@story
def x(I):
I.one
ChildWithList.x.failures(["foo", "bar", "baz"])
class NextChildWithList:
@story
def y(I):
I.two
NextChildWithList.y.failures(["spam", "ham", "eggs"])
class ParentWithList:
@story
def a(I):
I.before
I.x
I.after
ParentWithList.a.failures(["foo", "bar", "baz"])
class WideParentWithList:
@story
def a(I):
I.before
I.x
I.after
WideParentWithList.a.failures(["foo", "bar", "baz", "quiz"])
class ShrinkParentWithList:
@story
def a(I):
I.before
I.x
I.after
ShrinkParentWithList.a.failures(["foo", "quiz"])
class ChildWithEnum:
@story
def x(I):
I.one
@x.failures
class Errors(Enum):
foo = 1
bar = 2
baz = 3
class NextChildWithEnum:
@story
def y(I):
I.two
@y.failures
class Errors(Enum):
spam = 1
ham = 2
eggs = 3
class ParentWithEnum:
@story
def a(I):
I.before
I.x
I.after
@ParentWithEnum.a.failures
class Errors(Enum):
foo = 1
bar = 2
baz = 3
class WideParentWithEnum:
@story
def a(I):
I.before
I.x
I.after
@WideParentWithEnum.a.failures
class Errors(Enum): # noqa: F811
foo = 1
bar = 2
baz = 3
quiz = 4
class ShrinkParentWithEnum:
@story
def a(I):
I.before
I.x
I.after
@ShrinkParentWithEnum.a.failures
class Errors(Enum): # noqa: F811
foo = 1
quiz = 4
|
46031
|
from UE4Parse.BinaryReader import BinaryStream
class FIoDirectoryIndexEntry:
Name: int
FirstChildEntry: int
NextSiblingEntry: int
FirstFileEntry: int
def __init__(self, reader: BinaryStream):
self.Name = reader.readUInt32()
self.FirstChildEntry = reader.readUInt32()
self.NextSiblingEntry = reader.readUInt32()
self.FirstFileEntry = reader.readUInt32()
|
46053
|
from awscfncli2.runner import Boto3Profile
class TestStackSelector(object):
def test_update(self):
s1 = Boto3Profile('foo','bar')
s2 = Boto3Profile('foo', 'baz')
assert s1.region_name == 'bar'
s1.update(s2)
|
46056
|
import tensorflow as tf
from tensorflow.keras.layers import (
BatchNormalization,
LeakyReLU,
Activation,
Conv1D,
ELU,
Add,
)
from functools import partial
from tensorflow.compat.v1.keras.initializers import he_uniform
def _get_conv_activation_layer(params):
"""
:param params:
:returns: Required Activation function.
"""
conv_activation = params.get('conv_activation')
if conv_activation == 'ReLU':
return ReLU()
elif conv_activation == 'ELU':
return ELU()
return LeakyReLU(0.2)
class UpSamplingLayer:
def __init__(self, channel_out, kernel_size=5, stride=1):
self.seq = tf.keras.Sequential()
self.seq.add(
tf.keras.layers.Conv1D(
channel_out,
kernel_size=kernel_size,
strides=stride,
padding='SAME',
dilation_rate=1,
)
)
self.seq.add(BatchNormalization(axis=-1))
self.seq.add(LeakyReLU(0.2))
def __call__(self, x, training=True):
return self.seq(x, training=training)
class Model:
def __init__(
self,
inputs,
training=True,
ksize=5,
n_layers=12,
channels_interval=24,
logging=True,
):
conv_activation_layer = _get_conv_activation_layer({})
kernel_initializer = he_uniform(seed=50)
conv1d_factory = partial(
Conv1D,
strides=(2),
padding='same',
kernel_initializer=kernel_initializer,
)
def resnet_block(input_tensor, filter_size):
res = conv1d_factory(
filter_size, (1), strides=(1), use_bias=False
)(input_tensor)
conv1 = conv1d_factory(filter_size, (5), strides=(1))(
input_tensor
)
batch1 = BatchNormalization(axis=-1)(conv1, training=training)
rel1 = conv_activation_layer(batch1)
conv2 = conv1d_factory(filter_size, (5), strides=(1))(rel1)
batch2 = BatchNormalization(axis=-1)(conv2, training=training)
resconnection = Add()([res, batch2])
rel2 = conv_activation_layer(resconnection)
return rel2
self.n_layers = n_layers
self.channels_interval = channels_interval
out_channels = [
i * self.channels_interval for i in range(1, self.n_layers + 1)
]
self.middle = tf.keras.Sequential()
self.middle.add(
tf.keras.layers.Conv1D(
self.n_layers * self.channels_interval,
kernel_size=15,
strides=1,
padding='SAME',
dilation_rate=1,
)
)
self.middle.add(BatchNormalization(axis=-1))
self.middle.add(LeakyReLU(0.2))
decoder_out_channels_list = out_channels[::-1]
self.decoder = []
for i in range(self.n_layers):
self.decoder.append(
UpSamplingLayer(channel_out=decoder_out_channels_list[i])
)
self.out = tf.keras.Sequential()
self.out.add(
tf.keras.layers.Conv1D(
1,
kernel_size=1,
strides=1,
padding='SAME',
dilation_rate=1,
)
)
self.out.add(Activation('tanh'))
tmp = []
o = inputs
for i in range(self.n_layers):
o = resnet_block(o, out_channels[i])
tmp.append(o)
o = o[:, ::2]
if logging:
print(o)
o = self.middle(o, training=training)
if logging:
print(o)
for i in range(self.n_layers):
o = tf.image.resize(
o, [tf.shape(o)[0], tf.shape(o)[1] * 2], method='nearest'
)
o = tf.concat([o, tmp[self.n_layers - i - 1]], axis=2)
o = self.decoder[i](o, training=training)
if logging:
print(o)
if logging:
print(o, inputs)
o = tf.concat([o, inputs], axis=2)
o = self.out(o, training=training)
self.logits = o
|
46068
|
import argparse
import os
import numpy as np
from torchdistill.datasets.transform import CustomCompose, CustomRandomResize
from torchdistill.datasets.util import load_coco_dataset, build_transform
from torchvision.datasets import ImageFolder, VOCSegmentation
from torchvision.transforms import transforms
from custom.transform import BPG
def get_argparser():
parser = argparse.ArgumentParser(description='BPG file size for ImageNet and COCO segmentation datasets')
parser.add_argument('--dataset', required=True, choices=['imagenet', 'coco_segment', 'pascal_segment'],
help='ckpt dir path')
return parser
def compute_bpg_file_size_with_transform(dataset, quality):
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224)
])
bpg_codec = BPG(bpg_quality=quality, encoder_path='~/manually_installed/libbpg-0.9.8/bpgenc',
decoder_path='~/manually_installed/libbpg-0.9.8/bpgdec')
file_size_list = list()
for img in dataset:
img = transform(img[0])
img, file_size_kbyte = bpg_codec.run(img)
file_size_list.append(file_size_kbyte)
file_sizes = np.array(file_size_list)
print('BPG quality: {}, File size [KB]: {} ± {}'.format(quality, file_sizes.mean(), file_sizes.std()))
def compute_bpg_file_size_for_imagenet_dataset():
dataset = ImageFolder(root=os.path.expanduser('~/dataset/ilsvrc2012/val'))
compute_bpg_file_size_with_transform(dataset, 50)
compute_bpg_file_size_with_transform(dataset, 45)
compute_bpg_file_size_with_transform(dataset, 40)
compute_bpg_file_size_with_transform(dataset, 35)
compute_bpg_file_size_with_transform(dataset, 30)
compute_bpg_file_size_with_transform(dataset, 25)
compute_bpg_file_size_with_transform(dataset, 20)
compute_bpg_file_size_with_transform(dataset, 15)
compute_bpg_file_size_with_transform(dataset, 10)
compute_bpg_file_size_with_transform(dataset, 5)
compute_bpg_file_size_with_transform(dataset, 0)
def compute_bpg_file_size(dataset, quality):
file_size_list = list()
bpg_codec = BPG(bpg_quality=quality, encoder_path='~/manually_installed/libbpg-0.9.8/bpgenc',
decoder_path='~/manually_installed/libbpg-0.9.8/bpgdec')
for img in dataset:
img = img[0]
img, file_size_kbyte = bpg_codec.run(img)
file_size_list.append(file_size_kbyte)
file_sizes = np.array(file_size_list)
print('BPG quality: {}, File size [KB]: {} ± {}'.format(quality, file_sizes.mean(), file_sizes.std()))
def compute_bpg_file_size_for_cocosegment_dataset():
split_config = {
'images': '~/dataset/coco2017/val2017',
'annotations': '~/dataset/coco2017/annotations/instances_val2017.json',
'annotated_only': False,
'is_segment': True,
'transforms_params': [
{'type': 'CustomRandomResize', 'params': {'min_size': 520, 'max_size': 520}}
]
}
is_segment = split_config.get('is_segment', False)
compose_cls = CustomCompose if is_segment else None
transforms = build_transform(split_config.get('transforms_params', None), compose_cls=compose_cls)
dataset = load_coco_dataset(split_config['images'], split_config['annotations'],
split_config['annotated_only'], split_config.get('random_horizontal_flip', None),
is_segment, transforms, split_config.get('bpg_quality', None))
compute_bpg_file_size(dataset, 50)
compute_bpg_file_size(dataset, 45)
compute_bpg_file_size(dataset, 40)
compute_bpg_file_size(dataset, 35)
compute_bpg_file_size(dataset, 30)
compute_bpg_file_size(dataset, 25)
compute_bpg_file_size(dataset, 20)
compute_bpg_file_size(dataset, 15)
compute_bpg_file_size(dataset, 10)
compute_bpg_file_size(dataset, 5)
compute_bpg_file_size(dataset, 0)
def compute_bpg_file_size_with_transform_and_target(dataset, transform, quality):
bpg_codec = BPG(bpg_quality=quality, encoder_path='~/manually_installed/libbpg-0.9.8/bpgenc',
decoder_path='~/manually_installed/libbpg-0.9.8/bpgdec')
file_size_list = list()
for img in dataset:
img, _ = transform(img[0], img[1])
img, file_size_kbyte = bpg_codec.run(img)
file_size_list.append(file_size_kbyte)
file_sizes = np.array(file_size_list)
print('bpg quality: {}, File size [KB]: {} ± {}'.format(quality, file_sizes.mean(), file_sizes.std()))
def compute_bpg_file_size_for_pascalsegment_dataset():
dataset = VOCSegmentation(root=os.path.expanduser('~/dataset/'), image_set='val', year='2012')
transform = CustomCompose([
CustomRandomResize(min_size=512, max_size=512)
])
compute_bpg_file_size_with_transform_and_target(dataset, transform, 50)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 45)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 40)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 35)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 30)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 25)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 20)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 15)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 10)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 5)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 0)
if __name__ == '__main__':
argparser = get_argparser()
args = argparser.parse_args()
if args.dataset == 'imagenet':
compute_bpg_file_size_for_imagenet_dataset()
elif args.dataset == 'coco_segment':
compute_bpg_file_size_for_cocosegment_dataset()
else:
compute_bpg_file_size_for_pascalsegment_dataset()
|
46084
|
import subprocess
import pytest
from build.platform.python.tests import testlib
PYTHON_VERSIONS = ["2.7", "3.4", "3.5", "3.6"] # 3.7, 3.8 are not runnable
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_version_matched(pyver):
testlib.check_python_version(pyver)
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_python_max_unicode_bytes(pyver):
cmd = [testlib.get_python_bin(pyver), '-c', 'import sys; print(sys.maxunicode)']
maxunicode = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8')
assert int(maxunicode) > 65535, "Found UCS2 build"
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_python_imports(pyver):
imports = {
"2.7": ['pkg_resources'],
"3.4": [],
"3.5": ['pkg_resources'],
"3.6": [],
}
for imp in imports[pyver]:
subprocess.check_call([testlib.get_python_bin(pyver), '-c', 'import ' + imp])
|
46087
|
from __future__ import print_function
import numpy as np
from scipy import sparse
from scipy.interpolate import griddata
def fast_histogram2d(x, y, bins=10, weights=None, reduce_w=None, NULL=None,
reinterp=None):
"""
Compute the sparse bi-dimensional histogram of two data samples where *x*,
and *y* are 1-D sequences of the same length. If *weights* is None
(default), this is a histogram of the number of occurences of the
observations at (x[i], y[i]).
If *weights* is specified, it specifies values at the coordinate (x[i],
y[i]). These values are accumulated for each bin and then reduced according
to *reduce_w* function, which defaults to numpy's sum function (np.sum).
(If *weights* is specified, it must also be a 1-D sequence of the same
length as *x* and *y*.)
Parameters
------
x: ndarray[ndim=1]
first data sample coordinates
y: ndarray[ndim=1]
second data sample coordinates
bins: int or [int, int]
int, the number of bins for the two dimensions (nx=ny=bins)
or [int, int], the number of bins in each dimension (nx, ny = bins)
weights: ndarray[ndim=1]
values *w_i* weighing each sample *(x_i, y_i)*
accumulated and reduced (using reduced_w) per bin
reduce_w: callable
function that will reduce the *weights* values accumulated per bin
defaults to numpy's sum function (np.sum)
NULL: value type
filling missing data value
reinterp: str in {‘linear’, ‘nearest’, ‘cubic’}, optional
Method of interpolation.
if set, reinterpolation is made using scipy.interpolate.griddata to
fill missing data within the convex polygone that encloses the data
Returns
-------
B: ndarray[ndim=2]
bi-dimensional histogram
extent: tuple(4)
(xmin, xmax, ymin, ymax) extension of the histogram
steps: tuple(2)
(dx, dy) bin size in x and y direction
"""
# define the bins (do anything you want here but needs edges and sizes of
# the 2d bins)
try:
nx, ny = bins
except TypeError:
nx = ny = bins
# values you want to be reported
if weights is None:
weights = np.ones(x.size)
if reduce_w is None:
reduce_w = np.sum
else:
if not hasattr(reduce_w, '__call__'):
raise TypeError('reduce function is not callable')
# culling nans
finite_inds = (np.isfinite(x) & np.isfinite(y) & np.isfinite(weights))
_x = np.asarray(x)[finite_inds]
_y = np.asarray(y)[finite_inds]
_w = np.asarray(weights)[finite_inds]
if not (len(_x) == len(_y)) & (len(_y) == len(_w)):
raise ValueError('Shape mismatch between x, y, and weights: {}, {}, {}'.format(_x.shape, _y.shape, _w.shape))
xmin, xmax = _x.min(), _x.max()
ymin, ymax = _y.min(), _y.max()
dx = (xmax - xmin) / (nx - 1.0)
dy = (ymax - ymin) / (ny - 1.0)
# Basically, this is just doing what np.digitize does with one less copy
xyi = np.vstack((_x, _y)).T
xyi -= [xmin, ymin]
xyi /= [dx, dy]
xyi = np.floor(xyi, xyi).T
# xyi contains the bins of each point as a 2d array [(xi,yi)]
d = {}
for e, k in enumerate(xyi.T):
key = (k[0], k[1])
if key in d:
d[key].append(_w[e])
else:
d[key] = [_w[e]]
_xyi = np.array(list(d.keys())).T
_w = np.array([reduce_w(v) for v in d.values()])
# exploit a sparse coo_matrix to build the 2D histogram...
_grid = sparse.coo_matrix((_w, _xyi), shape=(nx, ny))
if reinterp is None:
# convert sparse to array with filled value
# grid.toarray() does not account for filled value
# sparse.coo.coo_todense() does actually add the values to the existing
# ones, i.e. not what we want -> brute force
if NULL is None:
B = _grid.toarray()
else: # Brute force only went needed
B = np.zeros(_grid.shape, dtype=_grid.dtype)
B.fill(NULL)
for (x, y, v) in zip(_grid.col, _grid.row, _grid.data):
B[y, x] = v
else: # reinterp
xi = np.arange(nx, dtype=float)
yi = np.arange(ny, dtype=float)
# Old griddata from mlab
# B = griddata(_grid.col.astype(float), _grid.row.astype(float),
# _grid.data, xi, yi, interp=reinterp)
B = griddata(np.array([_grid.col.astype(float),
_grid.row.astype(float)]).T,
_grid.data,
np.array([xi, yi]).T, interp=reinterp)
return B, (xmin, xmax, ymin, ymax), (dx, dy)
def bayesian_blocks(t):
"""Bayesian Blocks Implementation
By <NAME>. License: BSD
Based on algorithm outlined in
http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
Parameters
----------
t : ndarray, length N
data to be histogrammed
Returns
-------
bins : ndarray
array containing the (N+1) bin edges
Notes
-----
This is an incomplete implementation: it may fail for some
datasets. Alternate fitness functions and prior forms can
be found in the paper listed above.
"""
# copy and sort the array
t = np.sort(t)
N = t.size
# create length-(N + 1) array of cell edges
edges = np.concatenate([t[:1], 0.5 * (t[1:] + t[:-1]), t[-1:]])
block_length = t[-1] - edges
# arrays needed for the iteration
nn_vec = np.ones(N)
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
# -----------------------------------------------------------------
# Start with first data cell; add one cell at each iteration
# -----------------------------------------------------------------
for K in range(N):
# Compute the width and count of the final bin for all possible
# locations of the K^th changepoint
width = block_length[:K + 1] - block_length[K + 1]
count_vec = np.cumsum(nn_vec[:K + 1][::-1])[::-1]
# evaluate fitness function for these possibilities
fit_vec = count_vec * (np.log(count_vec) - np.log(width))
fit_vec -= 4 # 4 comes from the prior on the number of changepoints
fit_vec[1:] += best[:K]
# find the max of the fitness: this is the K^th changepoint
i_max = np.argmax(fit_vec)
last[K] = i_max
best[K] = fit_vec[i_max]
# -----------------------------------------------------------------
# Recover changepoints by iteratively peeling off the last block
# -----------------------------------------------------------------
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
return edges[change_points]
def optbins(data, method='freedman', ret='N'):
""" Determine the optimal binning of the data based on common estimators
and returns either the number of bins of the width to use.
inputs
------
data 1d dataset to estimate from
keywords
--------
method the method to use: str in {sturge, scott, freedman}
ret set to N will return the number of bins / edges
set to W will return the width
refs
----
* <NAME>. (1926)."The choice of a class interval". J. American
Statistical Association, 65-66
* <NAME>. (1979), "On optimal and data-based histograms".
Biometrika, 66, 605-610
* <NAME>.; <NAME>. (1981). "On the histogram as a density
estimator: L2 theory". Zeitschrift fur Wahrscheinlichkeitstheorie und
verwandte Gebiete, 57, 453-476
* <NAME>. et al (2012) "Studies in Astronomical Time Series Analysis.
VI. Bayesian Block Representations."
"""
x = np.asarray(data)
n = x.size
r = x.max() - x.min()
def sturge():
if (n <= 30):
print("Warning: Sturge estimator can perform poorly for small samples")
k = int(np.log(n) + 1)
h = r / k
return h, k
def scott():
h = 3.5 * np.std(x) * float(n) ** (-1. / 3.)
k = int(r / h)
return h, k
def freedman():
q = quantiles(x, [25, 75])
h = 2 * (q[75] - q[25]) * float(n) ** (-1. / 3.)
k = int(r / h)
return h, k
def bayesian():
r = bayesian_blocks(x)
return np.diff(r), r
m = {'sturge': sturge, 'scott': scott, 'freedman': freedman,
'bayesian': bayesian}
if method.lower() in m:
s = m[method.lower()]()
if ret.lower() == 'n':
return s[1]
elif ret.lower() == 'w':
return s[0]
else:
return None
def quantiles(x, qlist=[2.5, 25, 50, 75, 97.5]):
"""computes quantiles from an array
Quantiles := points taken at regular intervals from the cumulative
distribution function (CDF) of a random variable. Dividing ordered data
into q essentially equal-sized data subsets is the motivation for
q-quantiles; the quantiles are the data values marking the boundaries
between consecutive subsets.
The quantile with a fraction 50 is called the median
(50% of the distribution)
Inputs:
x - variable to evaluate from
qlist - quantiles fraction to estimate (in %)
Outputs:
Returns a dictionary of requested quantiles from array
"""
# Make a copy of trace
x = x.copy()
# For multivariate node
if x.ndim > 1:
# Transpose first, then sort, then transpose back
sx = np.transpose(np.sort(np.transpose(x)))
else:
# Sort univariate node
sx = np.sort(x)
try:
# Generate specified quantiles
quants = [sx[int(len(sx) * q / 100.0)] for q in qlist]
return dict(zip(qlist, quants))
except IndexError:
print("Too few elements for quantile calculation")
|
46119
|
import pygame
from pygame.locals import *
from paddle import Paddle
from ball import Ball
from inputs import handle_events, handle_input
from constants import SCREEN_WIDTH, SCREEN_HEIGHT, WHITE, RED
ball = None
left_paddle = None
right_paddle = None
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption("Python PONG")
clock = pygame.time.Clock()
done = [False]
is_game_over = [False]
def setup_game():
global ball
global left_paddle
global right_paddle
ball = Ball((SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2))
left_paddle = Paddle()
right_paddle = Paddle()
right_paddle.rect.x = SCREEN_WIDTH - right_paddle.rect.width
def draw_game_over():
font = pygame.font.Font("freesansbold.ttf", 32)
game_over = font.render("GAME OVER", True, RED)
game_over_rect = game_over.get_rect()
game_over_rect.center = (SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2)
screen.blit(game_over, game_over_rect)
def draw_game():
left_paddle.draw(screen)
right_paddle.draw(screen)
ball.draw(screen)
def draw():
screen.fill(WHITE)
if is_game_over[0]:
draw_game_over()
else:
draw_game()
pygame.display.flip()
def update():
handle_events(done)
if not is_game_over[0]:
handle_input(left_paddle, right_paddle)
ball.update(left_paddle, right_paddle, is_game_over)
setup_game()
while not done[0]:
clock.tick(30)
update()
draw()
pygame.quit()
|
46148
|
from django.test import TestCase
from django.contrib.auth.models import Group
from hs_access_control.models import PrivilegeCodes
from hs_core import hydroshare
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.tests.utilities import global_reset, is_equal_to_as_set
class T09GroupPublic(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(T09GroupPublic, self).setUp()
global_reset()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.admin = hydroshare.create_account(
'<EMAIL>',
username='admin',
first_name='administrator',
last_name='couch',
superuser=True,
groups=[]
)
self.dog = hydroshare.create_account(
'<EMAIL>',
username='dog',
first_name='<NAME>',
last_name='last_name_dog',
superuser=False,
groups=[]
)
self.squirrels = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.dog,
title='all about chasing squirrels',
metadata=[],
)
self.holes = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.dog,
title='all about storing bones in holes',
metadata=[],
)
# dog owns canines group
self.canines = self.dog.uaccess.create_group(
title='canines', description="We are the canines")
def test_public_resources(self):
""" public resources contain those resources that are public and discoverable """
res = self.canines.gaccess.public_resources
self.assertTrue(is_equal_to_as_set(res, []))
self.dog.uaccess.share_resource_with_group(self.squirrels, self.canines,
PrivilegeCodes.VIEW)
self.dog.uaccess.share_resource_with_group(self.holes, self.canines,
PrivilegeCodes.VIEW)
res = self.canines.gaccess.public_resources
self.assertTrue(is_equal_to_as_set(res, []))
self.holes.raccess.public = True
self.holes.raccess.discoverable = True
self.holes.raccess.save() # this avoids regular requirements for "public"
res = self.canines.gaccess.public_resources
self.assertTrue(is_equal_to_as_set(res, [self.holes]))
for r in res:
self.assertEqual(r.public, r.raccess.public)
self.assertEqual(r.discoverable, r.raccess.discoverable)
self.assertEqual(r.published, r.raccess.published)
self.assertEqual(r.group_name, self.canines.name)
self.assertEqual(r.group_id, self.canines.id)
self.squirrels.raccess.discoverable = True
self.squirrels.raccess.save()
res = self.canines.gaccess.public_resources
self.assertTrue(is_equal_to_as_set(res, [self.holes, self.squirrels]))
for r in res:
self.assertEqual(r.public, r.raccess.public)
self.assertEqual(r.discoverable, r.raccess.discoverable)
self.assertEqual(r.published, r.raccess.published)
self.assertEqual(r.group_name, self.canines.name)
self.assertEqual(r.group_id, self.canines.id)
|
46150
|
from pandas import DataFrame
from weaverbird.backends.pandas_executor.types import DomainRetriever, PipelineExecutor
from weaverbird.pipeline.steps import UnpivotStep
def execute_unpivot(
step: UnpivotStep,
df: DataFrame,
domain_retriever: DomainRetriever = None,
execute_pipeline: PipelineExecutor = None,
) -> DataFrame:
df_melted = df.melt(
id_vars=step.keep,
value_vars=step.unpivot,
var_name=step.unpivot_column_name,
value_name=step.value_column_name,
)
return df_melted.dropna(subset=[step.value_column_name]) if step.dropna else df_melted
|
46161
|
from .grid_attention_example import run_grid_attention_example
from .region_attention_example import run_region_attention_example
|
46205
|
import datetime
import feedparser
import json
import os
import shutil
import sys
import time
from .common import p, FEEDS_FILE_NAME
from .config import TIMEZONE
def do(target_category=None, log=False):
def getFeedFromRSS(category, urls, show_author=False, log=False):
rslt = {}
for source, url in urls.items():
try:
if log:
sys.stdout.write(f"- {url}")
d = feedparser.parse(url)
if log:
sys.stdout.write(" - Done\n")
except:
sys.exit(" - Failed\n" if log else 0)
for feed in d.entries:
try:
at = datetime.datetime(*feed.published_parsed[:6]).replace(tzinfo=datetime.timezone.utc).astimezone(TIMEZONE)
except:
continue
pubDate = at.strftime("%H:%M" if at.date() == datetime.date.today() else "%b %d, %H:%M")
ts = int(time.mktime(feed.published_parsed))
entries = {
"id": ts,
"sourceName": source if not show_author else feed.author,
"pubDate": pubDate,
"timestamp": ts,
"url": feed.link,
"title": feed.title,
}
rslt[entries["id"]] = entries
rslt = [val for key, val in sorted(rslt.items(), reverse=True)]
rslt = {"entries": rslt, "created_at": int(time.time())}
with open(os.path.join(p["path_data"], f"rss_{category}.json"), "w", encoding="utf-8") as f:
f.write(json.dumps(rslt, ensure_ascii=False))
return rslt
if not os.path.isfile(FEEDS_FILE_NAME):
shutil.copyfile(os.path.join(os.path.dirname(os.path.abspath(__file__)), "feeds.json"), FEEDS_FILE_NAME)
with open(FEEDS_FILE_NAME, "r") as fp:
RSS = json.load(fp)
if target_category:
return getFeedFromRSS(target_category, RSS[target_category]["feeds"], show_author=RSS[target_category].get("show_author", False), log=log)
for category, d in RSS.items():
getFeedFromRSS(category, d["feeds"], show_author=d.get("show_author", False), log=log)
if __name__ == "__main__":
do()
|
46206
|
import math
import numpy as np
"""
This function calculates the roots of the quadratic inequality for the Rh reuse factor.
Parameters:
lx - list of input sizes of the lstms. The size of this list is equal to the number of layers.
lh - list of input sizes of the hidden layers. The size of this list is equal to the number of layers.
lt_sigma - the latency of the sigmoid/tanh functions.
lt_tail - the latency of the tail.
dsp_total - the total number of dsps
This returns the roots of the quadratic inequality.
"""
def reuse_factor(lx, lh, lt_sigma, lt_tail, dsp_total):
a = dsp_total - 4 * sum(lh)
b = dsp_total * (lt_sigma + lt_tail) - 4 * np.dot(lx, lh) - 4 * np.dot(lh, lh) - 4 * (lt_sigma + lt_tail) * sum(lh)
c = - 4 * (lt_sigma + lt_tail) * np.dot(lh, lh)
# print(a)
# print(b)
# print(c)
r_1 = (-b + math.sqrt(b**2 - 4*a*c)) / (2*a)
r_2 = (-b - math.sqrt(b**2 - 4*a*c)) / (2*a)
return r_1, r_2
print("ZYNQ")
print(reuse_factor([1,9],[9,9], 3,8,220))
print("lstm_ae_small exmaple")
print(reuse_factor([1,9],[9,9], 3,8,900))
print("\n")
print("KU115")
print("mnist 1/2 layers examples")
print(reuse_factor([28],[32], 3,8,5520))
print(reuse_factor([28,16],[16,16], 3,8,5520))
print("\n")
print("U250")
print("lstm_ae exmaple")
print(reuse_factor([1,32,8,8],[32,8,8,32], 3,8,12200))
|
46221
|
import numpy as np
import pytest
from tensorflow.keras import losses as losses_module
from tensorflow.keras import metrics as metrics_module
from scikeras.utils import loss_name, metric_name
class CustomLoss(losses_module.Loss):
pass
class CustomMetric(metrics_module.AUC):
pass
@pytest.mark.parametrize(
"obj",
[
"categorical_crossentropy",
"CategoricalCrossentropy",
losses_module.categorical_crossentropy,
losses_module.CategoricalCrossentropy,
losses_module.CategoricalCrossentropy(),
],
)
def test_loss_invariance(obj):
"""Test to make sure loss_name returns same string no matter which object
is passed (str, function, class, type)"""
assert loss_name(obj) == "categorical_crossentropy"
@pytest.mark.parametrize("obj", [CustomLoss, CustomLoss()])
def test_custom_loss(obj):
assert loss_name(obj) == "custom_loss"
@pytest.mark.parametrize(
"obj",
[
"categorical_crossentropy",
"CategoricalCrossentropy",
metrics_module.categorical_crossentropy,
metrics_module.CategoricalCrossentropy,
metrics_module.CategoricalCrossentropy(),
],
)
def test_metric_invariance(obj):
"""Test to make sure same metric returned no matter which object passed"""
assert metric_name(obj) == "categorical_crossentropy"
@pytest.mark.parametrize("loss", [object(), object, list()])
def test_loss_types(loss):
with pytest.raises(TypeError, match="``loss`` must be a"):
loss_name(loss)
def test_unknown_loss_raises():
with pytest.raises(ValueError, match="Unknown loss function"):
loss_name("unknown_loss")
@pytest.mark.parametrize("obj", [object(), object, list()])
def test_metric_types(obj):
with pytest.raises(TypeError, match="``metric`` must be a"):
metric_name(obj)
def test_unknown_metric():
with pytest.raises(ValueError, match="Unknown metric function"):
metric_name("unknown_metric")
@pytest.mark.parametrize("metric", [CustomMetric, CustomMetric()])
def test_custom_metric(metric):
assert metric_name(metric) == "custom_metric"
|
46233
|
import gym
import argparse
import tensorflow as tf
from tf_rl.common.memory import HER_replay_buffer
from tf_rl.common.utils import eager_setup, her_sampler, create_log_model_directory, get_alg_name, RunningMeanStd
from tf_rl.common.params import ROBOTICS_ENV_LIST
from tf_rl.common.train import train_HER, train_HER_ray
from tf_rl.common.networks import HER_Actor as Actor, HER_Critic as Critic
from tf_rl.agents.HER import HER_DDPG as HER, HER_DDPG_debug as HER_debug
eager_setup()
"""
# defined in params.py
ROBOTICS_ENV_LIST = {
"FetchPickAndPlace-v1"
"FetchPush-v1"
"FetchReach-v1"
"FetchSlide-v1"
}
"""
parser = argparse.ArgumentParser()
parser.add_argument("--mode", default="MuJoCo", help="Task mode")
parser.add_argument("--env_name", default="FetchPush-v1", help="Env title")
parser.add_argument("--seed", default=123, type=int, help="seed for randomness")
parser.add_argument("--num_epochs", default=200, type=int, help="number of epochs in a training")
parser.add_argument("--num_cycles", default=50, type=int, help="number of cycles in epoch")
parser.add_argument("--num_episodes", default=2, type=int, help="number of episodes in cycle")
parser.add_argument("--num_steps", default=50, type=int, help="number of steps in an episode")
parser.add_argument("--replay_strategy", default="future", help="replay_strategy")
parser.add_argument("--replay_k", default=4, type=int, help="number of replay strategy")
parser.add_argument("--num_updates", default=40, type=int, help="number of updates in cycle")
parser.add_argument("--memory_size", default=1000000, type=int, help="memory size in a training")
parser.add_argument("--batch_size", default=256, type=int, help="batch size of each iteration of update")
parser.add_argument("--gamma", default=0.98, type=float, help="discount factor")
parser.add_argument("--tau", default=0.05, type=float, help="soft-update tau")
parser.add_argument("--action_l2", default=1.0, type=float, help="magnitude of L2 regularisation")
parser.add_argument("--noise_eps", default=0.2, type=float, help="magnitude of noise")
parser.add_argument("--random_eps", default=0.3, type=float, help="magnitude of randomness")
parser.add_argument("--debug_flg", default=False, type=bool, help="debug mode or not")
parser.add_argument("--google_colab", default=False, type=bool, help="if you are executing this on GoogleColab")
params = parser.parse_args()
params.goal = ROBOTICS_ENV_LIST[params.env_name]
params.test_episodes = 10
env = gym.make(params.env_name)
params.max_action = env.action_space.high[0]
params.num_action = env.action_space.shape[0]
# set seed
env.seed(params.seed)
tf.random.set_random_seed(params.seed)
# create a directory for log/model
params = create_log_model_directory(params, get_alg_name())
# get init obs for creating env_params
obs = env.reset()
# prep for basic stats
env_params = {
'obs': obs['observation'].shape[0],
'goal': obs['desired_goal'].shape[0],
'action': env.action_space.shape[0],
'action_max': env.action_space.high[0],
'max_timesteps': env._max_episode_steps
}
her_sample_func = her_sampler(params.replay_strategy, params.replay_k, env.compute_reward)
replay_buffer = HER_replay_buffer(env_params, params.memory_size, her_sample_func.sample_her_transitions)
summary_writer = tf.contrib.summary.create_file_writer(params.log_dir)
o_norm = RunningMeanStd(env_params['obs'])
g_norm = RunningMeanStd(env_params['goal'])
if params.debug_flg:
agent = HER_debug(Actor, Critic, env.action_space.shape[0], params, o_norm, g_norm)
else:
agent = HER(Actor, Critic, env.action_space.shape[0], params, o_norm, g_norm)
train_HER(agent, env, replay_buffer, summary_writer)
# train_HER_ray(agent, env, replay_buffer, summary_writer)
|
46248
|
from setuptools import setup, find_packages
setup(
name='shamir',
version='17.12.0',
url='https://github.com/kurtbrose/shamir',
author='<NAME>',
author_email='<EMAIL>',
decription="fast, secure, pure python shamir's secret sharing",
long_description = open('README.rst').read(),
py_modules=['shamir'],
)
|
46287
|
from multiprocessing import Pool
def _calc_epa_func(thermostat):
""" Takes an individual thermostat and runs the
calculate_epa_field_savings_metrics method. This method is necessary for
the multiprocessing pool as map / imap need a function to run on.
Parameters
----------
thermostat : thermostat
Returns
-------
results : results from running calculate_epa_field_savings_metrics
"""
results = thermostat.calculate_epa_field_savings_metrics()
return results
def multiple_thermostat_calculate_epa_field_savings_metrics(thermostats):
""" Takes a list of thermostats and uses Python's Multiprocessing module to
run as many processes in parallel as the system will allow.
Parameters
----------
thermostats : thermostats iterator
A list of the thermostats run the calculate_epa_field_savings_metrics
upon.
Returns
-------
metrics : list
Returns a list of the metrics calculated for the thermostats
"""
# Convert the thermostats iterator to a list
thermostats_list = list(thermostats)
pool = Pool()
results = pool.imap(_calc_epa_func, thermostats_list)
pool.close()
pool.join()
metrics_dict = {}
for output in results:
thermostat_id = output[0]['ct_identifier']
metrics_dict[thermostat_id] = []
for individual_output in output:
metrics_dict[thermostat_id].append(individual_output)
# Get the order of the thermostats from the original input so the output
# matches the order that was sent in
thermostat_ids = \
[thermostat.thermostat_id for thermostat in thermostats_list]
metrics = []
for thermostat_id in thermostat_ids:
try:
for metric in metrics_dict[thermostat_id]:
metrics.append(metric)
# Prevent duplicate thermostat IDs from being double-counted
metrics_dict.pop(thermostat_id, None)
# Trap for missing keys
except KeyError:
pass
return metrics
|
46310
|
import numpy as np
from sklearn.cluster import KMeans
from splearn.cluster import SparkKMeans
from splearn.utils.testing import SplearnTestCase, assert_array_almost_equal
class TestKMeans(SplearnTestCase):
def test_same_centroids(self):
X, y, X_rdd = self.make_blobs(centers=4, n_samples=200000)
local = KMeans(n_clusters=4, init='k-means++', random_state=42)
dist = SparkKMeans(n_clusters=4, init='k-means++', random_state=42)
local.fit(X)
dist.fit(X_rdd)
local_centers = np.sort(local.cluster_centers_, axis=0)
dist_centers = np.sort(dist.cluster_centers_, axis=0)
assert_array_almost_equal(local_centers, dist_centers, decimal=4)
|
46372
|
from builtins import str
import sadi
import rdflib
import setlr
from datetime import datetime
from .service import Service
from nanopub import Nanopublication
from datastore import create_id
import flask
from flask import render_template
from flask import render_template_string
import logging
import sys, traceback
import database
import tempfile
from depot.io.interfaces import StoredFile
from whyis.namespace import whyis
class GlobalChangeService(Service):
@property
def query_predicate(self):
return whyis.globalChangeQuery
|
46400
|
import os
import torch
import torchvision.datasets as datasets
from .imagenet import ImageNetSubsample, ImageNetSubsampleValClasses
import numpy as np
CLASS_SUBLIST = [
1, 2, 4, 6, 8, 9, 11, 13, 22, 23, 26, 29, 31, 39, 47, 63, 71, 76, 79, 84, 90, 94, 96, 97, 99, 100, 105, 107,
113, 122,
125, 130, 132, 144, 145, 147, 148, 150, 151, 155, 160, 161, 162, 163, 171, 172, 178, 187, 195, 199, 203,
207, 208, 219,
231, 232, 234, 235, 242, 245, 247, 250, 251, 254, 259, 260, 263, 265, 267, 269, 276, 277, 281, 288, 289,
291, 292, 293,
296, 299, 301, 308, 309, 310, 311, 314, 315, 319, 323, 327, 330, 334, 335, 337, 338, 340, 341, 344, 347,
353, 355, 361,
362, 365, 366, 367, 368, 372, 388, 390, 393, 397, 401, 407, 413, 414, 425, 428, 430, 435, 437, 441, 447,
448, 457, 462,
463, 469, 470, 471, 472, 476, 483, 487, 515, 546, 555, 558, 570, 579, 583, 587, 593, 594, 596, 609, 613,
617, 621, 629,
637, 657, 658, 701, 717, 724, 763, 768, 774, 776, 779, 780, 787, 805, 812, 815, 820, 824, 833, 847, 852,
866, 875, 883,
889, 895, 907, 928, 931, 932, 933, 934, 936, 937, 943, 945, 947, 948, 949, 951, 953, 954, 957, 963, 965,
967, 980, 981,
983, 988]
CLASS_SUBLIST_MASK = [(i in CLASS_SUBLIST) for i in range(1000)]
class ImageNetRValClasses(ImageNetSubsampleValClasses):
def get_class_sublist_and_mask(self):
return CLASS_SUBLIST, CLASS_SUBLIST_MASK
class ImageNetR(ImageNetSubsample):
def get_class_sublist_and_mask(self):
return CLASS_SUBLIST, CLASS_SUBLIST_MASK
def get_test_path(self):
return os.path.join(self.location, 'imagenet-r')
|
46408
|
from flask import request
from assemblyline.common.concurrency import execute_concurrently
from assemblyline.common.importing import module_attribute_by_name
from al_ui.api_base import api_login, make_api_response
from al_ui.apiv3 import core
from al_ui.config import LOGGER, config
from assemblyline.al.datasource.common import hash_type
SUB_API = 'hash_search'
hash_search_api = core.make_subapi_blueprint(SUB_API)
hash_search_api._doc = "Search hashes through multiple data sources"
class SkipDatasource(Exception):
pass
def create_query_datasource(ds):
def query_datasource(h, u):
return {
'error': None,
'items': ds.parse(ds.query(h, **u), **u)
}
return query_datasource
sources = {}
# noinspection PyBroadException
try:
for name, settings in config.datasources.iteritems():
name = name.lower()
classpath = 'unknown'
# noinspection PyBroadException
try:
classpath = settings['classpath']
cfg = settings['config']
if isinstance(cfg, basestring):
path = cfg
cfg = config
for point in path.split('.'):
if 'enabled' in cfg:
if not cfg['enabled']:
raise SkipDatasource()
cfg = cfg.get(point)
cls = module_attribute_by_name(classpath)
obj = cls(LOGGER, **cfg)
sources[name] = create_query_datasource(obj)
except SkipDatasource:
continue
except: # pylint: disable=W0702
LOGGER.exception(
"Problem creating %s datasource (%s)", name, classpath
)
except: # pylint: disable=W0702
LOGGER.exception("No datasources")
# noinspection PyUnusedLocal
@hash_search_api.route("/<file_hash>/", methods=["GET"])
@api_login(required_priv=['R'])
def search_hash(file_hash, *args, **kwargs):
"""
Search for a hash in multiple data sources as configured in the seed.
Variables:
value => Hash to search in the multiple data sources
[MD5, SHA1 or SHA256]
Arguments:(optional)
db => | separated list of data sources
show_timers => Display time it took to query each sources
max_timeout => Maximum execution time for the call in seconds
Data Block:
None
API call examples:
/api/v3/hash_search/
/api/v3/hash_search/123456...654321/?db=nsrl|al&show_timers=true
Result example:
{ # Dictionary of:
"al": { # Data source queried
"error": null, # Error message returned by data source
"items": [ # List of items found in the data source
{"confirmed": true, # Is the maliciousness attribution confirmed or not
"data": # Raw data from the data source
"description": "", # Description of the findings
"malicious": false}, # Is the file found malicious or not
...
]
},
...
}
"""
user = kwargs['user']
if hash_type(file_hash) == "invalid":
return make_api_response("", "Invalid hash. This API only supports MD5, SHA1 and SHA256.", 400)
db_list = []
invalid_sources = []
db = request.args.get('db', None)
if db:
db_list = db.split("|")
invalid_sources = []
for x in db_list:
if x not in sources:
invalid_sources.append(x)
for x in invalid_sources:
db_list.remove(x)
show_timers = request.args.get('show_timers', False)
if show_timers:
show_timers = show_timers.lower() == 'true'
max_timeout = request.args.get('max_timeout', "2")
# noinspection PyBroadException
try:
max_timeout = float(max_timeout)
except: # pylint: disable=W0702
max_timeout = 2.0
if len(db_list) == 0 and len(invalid_sources) == 0:
db_list = sources.keys()
plan = [(sources[x], (file_hash.lower(), user), x) for x in db_list]
res = execute_concurrently(plan, calculate_timers=show_timers, max_timeout=max_timeout)
data = {}
for x in db_list:
if x not in res:
if x in res["_timeout_"]:
data[x] = {"items": [], "error": "Service reached the maximum execution time. [%s seconds]" %
max_timeout}
elif x in res["_exception_"]:
exception = res["_exception_"][x]
e = "%s: %s" % (exception.__class__.__name__, str(exception))
data[x] = {"items": [], "error": "Exception occured while querying datasource. [%s]" % e}
else:
data[x] = {"items": [], "error": "Service is currently not available."}
else:
data[x] = res[x]
if show_timers:
data['_timers_'] = res.get("_timers_", {})
return make_api_response(data)
# noinspection PyUnusedLocal
@hash_search_api.route("/list_data_sources/", methods=["GET"])
@api_login(audit=False, required_priv=['R'])
def list_data_sources(*args, **kwargs):
"""
List all available data sources to use the hash_search API
Variables:
None
Arguments:
None
Data Block:
None
Result example:
[ <list of sources> ]
"""
return make_api_response(sorted(sources.keys()))
|
46409
|
from asposeslides import Settings
from com.aspose.slides import Presentation
from com.aspose.slides import SaveFormat
from com.aspose.slides import XpsOptions
class Zoom:
def __init__(self):
dataDir = Settings.dataDir + 'WorkingWithPresentation/Zoom/'
# Create an instance of Presentation class
pres = Presentation()
# Setting View Properties of Presentation
pres.getViewProperties().getSlideViewProperties().setScale(50) # zoom value in percentages for slide view
pres.getViewProperties().getNotesViewProperties().setScale(50) # .Scale = 50 //zoom value in percentages for notes view
# Save the presentation as a PPTX file
save_format = SaveFormat
pres.save(dataDir + "Zoom.pptx", save_format.Pptx)
print "Set zoom value, please check the output file."
if __name__ == '__main__':
Zoom()
|
46431
|
from django.db import models
from mayan.apps.testing.tests.base import BaseTestCase
from ..classes import QuerysetParametersSerializer
class QuerysetParametersSerializerTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.TestModelParent = self._create_test_model(
model_name='TestModelParent'
)
self.TestModelChild = self._create_test_model(
fields={
'parent': models.ForeignKey(
on_delete=models.CASCADE, related_name='children',
to='TestModelParent'
)
}, model_name='TestModelChild'
)
self._test_object_parent = self.TestModelParent.objects.create()
self.TestModelChild.objects.create(parent_id=self._test_object_parent.pk)
def _assertQuerysetEqual(self):
rebuilt_items = list(map(repr, self.queryset_rebuilt))
self.assertQuerysetEqual(
qs=self.queryset_original, values=rebuilt_items
)
def test_without_kwargs(self):
self.queryset_original = self.TestModelParent.objects.all()
decomposed_queryset = QuerysetParametersSerializer.decompose(
_model=self.TestModelParent, _method_name='all'
)
self.queryset_rebuilt = QuerysetParametersSerializer.rebuild(
decomposed_queryset=decomposed_queryset
)
self._assertQuerysetEqual()
def test_foreign_key_model(self):
self.queryset_original = self.TestModelChild.objects.all()
decomposed_queryset = QuerysetParametersSerializer.decompose(
_model=self.TestModelChild, _method_name='filter',
parent=self._test_object_parent
)
self.queryset_rebuilt = QuerysetParametersSerializer.rebuild(
decomposed_queryset=decomposed_queryset
)
self._assertQuerysetEqual()
def test_foreign_key_model_id_query(self):
self.queryset_original = self.TestModelChild.objects.all()
decomposed_queryset = QuerysetParametersSerializer.decompose(
_model=self.TestModelChild, _method_name='filter',
parent_id=self._test_object_parent.pk
)
self.queryset_rebuilt = QuerysetParametersSerializer.rebuild(
decomposed_queryset=decomposed_queryset
)
self._assertQuerysetEqual()
|
46434
|
from django_tables2 import TemplateColumn
from service_catalog.models import GlobalHook
from Squest.utils.squest_table import SquestTable
class GlobalHookTable(SquestTable):
state = TemplateColumn(template_name='custom_columns/global_hook_state.html')
actions = TemplateColumn(template_name='custom_columns/global_hook_actions.html', orderable=False)
class Meta:
model = GlobalHook
attrs = {"id": "global_hook_table", "class": "table squest-pagination-tables"}
fields = ("name", "model", "state", "job_template", "actions")
|
46454
|
from HABApp.openhab.connection_handler import http_connection
from ._plugin import on_connect, on_disconnect, setup_plugins
log = http_connection.log
def setup():
from HABApp.runtime import shutdown
# initialize callbacks
http_connection.ON_CONNECTED = on_connect
http_connection.ON_DISCONNECTED = on_disconnect
# shutdown handler for connection
shutdown.register_func(http_connection.stop_connection, msg='Stopping openHAB connection')
# shutdown handler for plugins
shutdown.register_func(on_disconnect, msg='Stopping openHAB plugins')
# initialize all plugins
setup_plugins()
return None
async def start():
await http_connection.start_connection()
|
46461
|
import clockngpn.totp as totp
from clockngpn.proc_worker import Event, Broker, ProcWorkerEvent
from clockngpn.ttp import TocTocPorts, TocTocPortsWorker
from queue import Queue
import time
import os
import logging
from clockngpn.bidi import OTPBidi
import signal
import argparse
log = logging.getLogger(__name__)
def check_environment():
if not os.geteuid() == 0:
raise Exception("This program needs root for managing IPTABLES!")
try:
import iptc
except Exception as _:
if 'XTABLES_LIBDIR' not in os.environ:
os.environ['XTABLES_LIBDIR'] = '/usr/lib/x86_64-linux-gnu/xtables'
else:
raise Exception("Error, la variable XTABLES_LIBDIR está mal configurada")
# TODO Sacar a una clase y hacer el main con arg_parser
def main_server(secret, slot, address, ports, opened):
try:
check_environment()
except Exception as e:
log.error(e)
exit(-1)
log.debug("Secret: %s" % secret)
from clockngpn.port_manager import PortManagerWorker, PortManager
from clockngpn.firewall_manager import FirewallManager, FirewallManagerWorker
oq = Queue()
bq = Queue()
b = Broker(bq, oq)
fwmq = Queue()
b.add_client(fwmq)
fwm = FirewallManager()
fwmw = FirewallManagerWorker(fwmq, bq, fwm=fwm)
for port in opened:
fwm.open(port)
pmq = Queue()
b.add_client(pmq)
pm = PortManager(address, unmanaged_ports=opened)
pmw = PortManagerWorker(pmq, bq, pm=pm)
ttpq = Queue()
b.add_client(ttpq)
ttp = TocTocPorts(secret, destination=ports)
ttpw = TocTocPortsWorker(ttpq, bq, ttp)
fwmw.start()
pmw.start()
ttpw.start()
b.start()
# TODO Refactor de este método
def end(signum, *args):
log.warning('Signal handler called with signal %s' % signum)
bq.put(Event(ProcWorkerEvent.END, None))
retry = 0
while retry <= 3:
if not fwmw.is_alive() and not pmw.is_alive() and not ttpw.is_alive() and not b.is_alive():
break
time.sleep(retry * 1)
if fwmw.is_alive():
log.warning("Bad killing thread fwmw")
if pmw.is_alive():
log.warning("Bad killing thread pmw")
if ttpw.is_alive():
log.warning("Bad killing thread ttpw")
if b.is_alive():
log.warning("Bad killing thread broker")
if fwmw.is_alive() or pmw.is_alive() or ttpw.is_alive() or b.is_alive():
exit(0)
signal.signal(signal.SIGINT, end)
signal.signal(signal.SIGSEGV, end)
signal.signal(signal.SIGFPE, end)
signal.signal(signal.SIGABRT, end)
signal.signal(signal.SIGBUS, end)
signal.signal(signal.SIGILL, end)
# TODO Clase orquestador
def main():
log_levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL,
'QUIET': logging.NOTSET
}
parser = argparse.ArgumentParser(description='Launch TOTP based port knocking protection')
parser.add_argument('-ts', '--time-slot', dest='slot', default=30, type=int, help='Time slot for TOTP')
parser.add_argument('-a', '--address', default='0.0.0.0', help="Address to protect")
parser.add_argument('-s', '--secret', help="Secret part of TOTP")
parser.add_argument('-p', '--protected-ports', type=int, default=[], action='append', help="Port which has to be protected")
parser.add_argument('-o', '--opened-ports', type=int, default=[], action='append', help="Port which should be opened")
parser.add_argument('--gen-secret', help="Generate random secret", action='store_true')
parser.add_argument('--clean-firewall', help="Clean firewall configuration (e.g., after a bad close)", action='store_true')
parser.add_argument('--log-level', default="DEBUG", help="Log level")
# parser.add_argument('--config-file')
args = parser.parse_args()
log_level = args.log_level
level = log_levels.get(log_level, logging.DEBUG)
logging.basicConfig(
level=level,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
if args.clean_firewall:
try:
check_environment()
except Exception as e:
log.error(e)
exit(-1)
from clockng.firewall_manager import FirewallManager
FirewallManager().clean_firewall()
elif args.gen_secret:
i_secret = totp.gen_secret()
otp_bidi = OTPBidi(i_secret)
print("TOTP generated secret: %s" % i_secret)
print(otp_bidi.generate())
elif args.secret:
i_secret = args.secret
try:
secret = totp.web_secret_2_bytes(i_secret)
except Exception as e:
log.error("Bad secret: Remember secret must be b32")
return
slot = args.slot
address = args.address
ports = args.protected_ports if args.protected_ports else []
opened = args.opened_ports
main_server(secret, slot, address, ports, opened)
else:
log.error("A secret is required to start")
parser.print_help()
return
if __name__ == '__main__':
main()
|
46540
|
from typing import List, Optional # NOQA
import chainer
from chainer import cuda
from chainer import functions
from chainer import links
import numpy # NOQA
class Set2Set(chainer.Chain):
r"""MPNN subsubmodule for readout part.
See: <NAME>+, \
Order Matters: Sequence to sequence for sets. November 2015.
`arXiv:1511.06391 <https://arxiv.org/abs/1511.06391>`
Args:
in_channels (int): dimension of input feature vector
n_layers (int): number of LSTM layers
Returns (chainer.Variable):
Output feature vector: (minibatch, in_channels * 2)
"""
def __init__(self, in_channels, n_layers=1):
# type: (int, int) -> None
super(Set2Set, self).__init__()
with self.init_scope():
self.lstm_layer = links.NStepLSTM(
n_layers=n_layers,
in_size=in_channels * 2,
out_size=in_channels,
dropout=0)
self.in_channels = in_channels
self.n_layers = n_layers
self.hx = None # type: Optional[chainer.Variable]
self.cx = None # type: Optional[chainer.Variable]
self.q_star = None # type: Optional[List]
def __call__(self, h):
# type: (chainer.Variable) -> chainer.Variable
xp = cuda.get_array_module(h)
mb, node, ch = h.shape # type: int, int, int
if self.q_star is None:
self.q_star = [
xp.zeros((1, self.in_channels * 2)).astype('f')
for _ in range(mb)
]
self.hx, self.cx, q = self.lstm_layer(self.hx, self.cx, self.q_star)
# self.hx: (mb, mb, ch)
# self.cx: (mb, mb, ch)
# q: List[(1, ch) * mb]
q = functions.stack(q) # q: (mb, 1, ch)
q_ = functions.transpose(q, axes=(0, 2, 1)) # q_: (mb, ch, 1)
e = functions.matmul(h, q_) # e: (mb, node, 1)
a = functions.softmax(e) # a: (mb, node, 1)
a = functions.broadcast_to(a, h.shape) # a: (mb, node, ch)
r = functions.sum((a * h), axis=1, keepdims=True) # r: (mb, 1, ch)
q_star_ = functions.concat((q, r), axis=2) # q_star_: (mb, 1, ch*2)
self.q_star = functions.separate(q_star_)
return functions.reshape(q_star_, (mb, ch * 2))
def reset_state(self):
# type: () -> None
self.hx = None
self.cx = None
self.q_star = None
|
46545
|
from pydantic import UUID4, BaseModel, EmailStr
class Token(BaseModel):
access_token: str
token_type: str
class TokenData(BaseModel):
email: EmailStr = None
id: UUID4 = None
|
46579
|
from dataclasses import dataclass
from reinvent_scoring.scoring.diversity_filters.reinvent_core.diversity_filter_parameters import \
DiversityFilterParameters
from reinvent_scoring.scoring.scoring_function_parameters import ScoringFunctionParameters
from running_modes.configurations.reinforcement_learning.inception_configuration import InceptionConfiguration
from running_modes.configurations.reinforcement_learning.reinforcement_learning_configuration import \
ReinforcementLearningConfiguration
@dataclass
class ReinforcementLearningComponents:
"""This class holds the necessary configuration components to run RL"""
reinforcement_learning: ReinforcementLearningConfiguration
scoring_function: ScoringFunctionParameters
diversity_filter: DiversityFilterParameters
inception: InceptionConfiguration
|
46625
|
from GPy.kern import Kern
from GPy.core.parameterization import Param
import numpy as np
import sys
from paramz.transformations import Logexp
from ..kernels.tree.C_tree_kernel import wrapper_raw_SubsetTreeKernel
class SubsetTreeKernel(Kern):
"""
The SST kernel by Moschitti(2006), with two hyperparameters (lambda and sigma).
small lambda restricts the influence of large fragments, sigma controls the sparsity (sigma=0 only allows fragments with terminal symbols)
We calculate gradients w.r.t kernel phyperparameters following Beck (2015)
This is mainly a wrapper for a Cython implementation (see C_tree_kernel.pyx).
The Cython kernel is stored on the "kernel" attribute.
Following the GPy stanard, we require input in the form of 2-d numpy arrays of strings with dtype=object
e.g
X=np.array([['(S (NP ns) (VP v))'],
['(S (NP n) (VP v))'],
['(S (NP (N a)) (VP (V c)))'],
['(S (NP (Det a) (N b)) (VP (V c)))'],
['(S (NP (ADJ colorless) (N ideas)) (VP (V sleep) (ADV furiously)))']],
dtype=object)
Each inidivudal string should be in the prolog format e.g. "(C (B c) (D a))" for
C
/ \
B D
| |
c a
"""
def __init__(self, _lambda=1, _sigma=1, normalize=True, active_dims=None):
super(SubsetTreeKernel, self).__init__(1, active_dims, 'sstk')
self._lambda = Param('Lambda', _lambda,Logexp())
self._sigma = Param('Sigma', _sigma,Logexp())
self.link_parameters(self._lambda, self._sigma)
self.normalize = normalize
self.kernel = wrapper_raw_SubsetTreeKernel(_lambda, _sigma, normalize)
def _get_params(self):
# return kernel parameter values
return np.hstack((self._lambda, self._sigma))
def _set_params(self, x):
# set kernel parameters
self._lambda = x[0]
self._sigma = x[1]
def _get_param_names(self):
# return parameter names
return ['Lambda', 'Sigma']
def K(self, X, X2):
# calc the kernel for input X
# also calc the gradients w.r.t kernel parameters
self.kernel._lambda = self._lambda
self.kernel._sigma = self._sigma
result, dl, ds = self.kernel.K(X, X2)
self.dlambda = dl
self.dsigma = ds
return result
def Kdiag(self, X):
# Calc just the diagonal elements of a kernel matrix
self.kernel._lambda = self._lambda
self.kernel._sigma = self._sigma
if self.normalize:
# if normalizing then this will just be ones
return np.ones(X.shape[0])
else:
return self.kernel.Kdiag(X)
def dK_dtheta(self, dL_dK, X, X2):
# return the kerenl gradients w.r.t kernel parameter over the dataset
self.K(X,X2)
return np.array([np.sum(self.dlambda * dL_dK),
np.sum(self.dsigma * dL_dK)])
def update_gradients_full(self, dL_dK, X, X2):
# update gradients for optimization of kernel parameters
self._lambda.gradient = np.sum(self.dlambda * dL_dK)
self._sigma.gradient = np.sum(self.dsigma * dL_dK)
if __name__ == "__main__":
#Simple Demo
X=np.array([['(S (NP ns) (VP v))'],
['(S (NP n) (VP v))'],
['(S (NP (N a)) (VP (V c)))'],
['(S (NP (Det a) (N b)) (VP (V c)))'],
['(S (NP (ADJ colorless) (N ideas)) (VP (V sleep) (ADV furiously)))']],
dtype=object)
kern = SubsetTreeKernel(_lambda=1)
print("test calculations with normalization")
print(str(kern.K(X))+"\n should be\n"+str(np.array([[ 1., 0.5, 0.10540926, 0.08333333, 0.06711561],
[ 0.5, 1., 0.10540926, 0.08333333, 0.06711561],
[ 0.10540926, 0.10540926, 1., 0.31622777, 0.04244764],
[ 0.08333333, 0.08333333, 0.31622777, 1., 0.0335578 ],
[ 0.06711561, 0.06711561, 0.04244764, 0.0335578, 1. ]])))
|
46713
|
import os
import pytest
import shutil
from cloudify_agent.api import exceptions
from cloudify_agent.api import utils
from cloudify_agent.tests.utils import get_daemon_storage
from cloudify_agent.tests import random_id
def test_new_initd(daemon_factory, agent_ssl_cert):
daemon_name = 'test-daemon-{0}'.format(random_id(with_prefix=False))
daemon = daemon_factory.new(
**get_daemon_params(daemon_name, agent_ssl_cert))
assert daemon_name == daemon.name
assert 'queue' == daemon.queue
assert '127.0.0.1' == daemon.rest_host
assert 'user' == daemon.user
assert agent_ssl_cert.local_cert_path() == daemon.local_rest_cert_file
def test_save_load_delete(daemon_factory, agent_ssl_cert):
daemon_name = 'test-daemon-{0}'.format(random_id(with_prefix=False))
daemon = daemon_factory.new(
**get_daemon_params(daemon_name, agent_ssl_cert))
daemon_factory.save(daemon)
loaded = daemon_factory.load(daemon_name)
assert 'init.d' == loaded.PROCESS_MANAGEMENT
assert daemon_name == loaded.name
assert 'queue' == loaded.queue
assert '127.0.0.1' == loaded.rest_host
assert 'user' == loaded.user
daemon_factory.delete(daemon.name)
pytest.raises(exceptions.DaemonNotFoundError,
daemon_factory.load, daemon.name)
def test_new_no_implementation(daemon_factory):
pytest.raises(exceptions.DaemonNotImplementedError,
daemon_factory.new,
process_management='no-impl')
def test_load_non_existing(daemon_factory):
pytest.raises(exceptions.DaemonNotFoundError,
daemon_factory.load,
'non_existing_name')
def test_load_all(daemon_factory, agent_ssl_cert, tmp_path):
def _save_daemon(name):
daemon_name = 'test-daemon-{0}'.format(random_id(with_prefix=False))
params = get_daemon_params(daemon_name, agent_ssl_cert).copy()
params['name'] = name
daemon = daemon_factory.new(**params)
daemon_factory.save(daemon)
if os.path.exists(get_daemon_storage(str(tmp_path))):
shutil.rmtree(get_daemon_storage(str(tmp_path)))
daemons = daemon_factory.load_all()
assert 0 == len(daemons)
_save_daemon(utils.internal.generate_agent_name())
_save_daemon(utils.internal.generate_agent_name())
_save_daemon(utils.internal.generate_agent_name())
daemons = daemon_factory.load_all()
assert 3 == len(daemons)
def test_new_existing_agent(daemon_factory, agent_ssl_cert):
daemon_name = 'test-daemon-{0}'.format(random_id(with_prefix=False))
daemon = daemon_factory.new(
**get_daemon_params(daemon_name, agent_ssl_cert))
daemon_factory.save(daemon)
# without no_overwrite, this will overwrite the existing daemon
daemon = daemon_factory.new(
**get_daemon_params(daemon_name, agent_ssl_cert))
pytest.raises(exceptions.DaemonAlreadyExistsError,
daemon_factory.new,
no_overwrite=True,
**get_daemon_params(daemon_name, agent_ssl_cert))
def get_daemon_params(name, ssl_cert):
return {
'process_management': 'init.d',
'name': name,
'queue': 'queue',
'rest_host': '127.0.0.1',
'broker_ip': '127.0.0.1',
'user': 'user',
'broker_url': '127.0.0.1',
'broker_ssl_enabled': True,
'local_rest_cert_file': ssl_cert.local_cert_path(),
}
|
46720
|
from .example_model import get_chain_model,get_trailer_model
from .trailer_printer import trailer_print,draw_rectangular_obstacle,draw_rectangular_obstacle_around_center
|
46724
|
import numpy as np
import pandas as pd
import pytest
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import TheilSenRegressor
from etna.analysis import get_residuals
from etna.analysis import plot_residuals
from etna.analysis import plot_trend
from etna.analysis.plotters import _get_labels_names
from etna.datasets import TSDataset
from etna.metrics import MAE
from etna.models import LinearPerSegmentModel
from etna.pipeline import Pipeline
from etna.transforms import BinsegTrendTransform
from etna.transforms import LagTransform
from etna.transforms import LinearTrendTransform
from etna.transforms import STLTransform
from etna.transforms import TheilSenTrendTransform
@pytest.fixture
def residuals():
timestamp = pd.date_range("2020-01-01", periods=100, freq="D")
df = pd.DataFrame(
{
"timestamp": timestamp.tolist() * 2,
"segment": ["segment_0"] * len(timestamp) + ["segment_1"] * len(timestamp),
"target": np.arange(len(timestamp)).tolist() + (np.arange(len(timestamp)) + 1).tolist(),
}
)
df_wide = TSDataset.to_dataset(df)
ts = TSDataset(df=df_wide, freq="D")
forecast_df = ts[timestamp[10:], :, :]
forecast_df.loc[:, pd.IndexSlice["segment_0", "target"]] = -1
forecast_df.loc[:, pd.IndexSlice["segment_1", "target"]] = 1
residuals_df = ts[timestamp[10:], :, :]
residuals_df.loc[:, pd.IndexSlice["segment_0", "target"]] += 1
residuals_df.loc[:, pd.IndexSlice["segment_1", "target"]] -= 1
return residuals_df, forecast_df, ts
def test_get_residuals(residuals):
"""Test that get_residuals finds residuals correctly."""
residuals_df, forecast_df, ts = residuals
actual_residuals = get_residuals(forecast_df=forecast_df, ts=ts)
assert actual_residuals.to_pandas().equals(residuals_df)
def test_get_residuals_not_matching_lengths(residuals):
"""Test that get_residuals fails to find residuals correctly if ts hasn't answers."""
residuals_df, forecast_df, ts = residuals
ts = TSDataset(df=ts[ts.index[:-10], :, :], freq="D")
with pytest.raises(KeyError):
_ = get_residuals(forecast_df=forecast_df, ts=ts)
def test_get_residuals_not_matching_segments(residuals):
"""Test that get_residuals fails to find residuals correctly if segments of dataset and forecast differ."""
residuals_df, forecast_df, ts = residuals
columns_frame = forecast_df.columns.to_frame()
columns_frame["segment"] = ["segment_0", "segment_3"]
forecast_df.columns = pd.MultiIndex.from_frame(columns_frame)
with pytest.raises(KeyError, match="Segments of `ts` and `forecast_df` should be the same"):
_ = get_residuals(forecast_df=forecast_df, ts=ts)
def test_plot_residuals_fails_unkown_feature(example_tsdf):
"""Test that plot_residuals fails if meet unknown feature."""
pipeline = Pipeline(
model=LinearPerSegmentModel(), transforms=[LagTransform(in_column="target", lags=[5, 6, 7])], horizon=5
)
metrics, forecast_df, info = pipeline.backtest(ts=example_tsdf, metrics=[MAE()], n_folds=3)
with pytest.raises(ValueError, match="Given feature isn't present in the dataset"):
plot_residuals(forecast_df=forecast_df, ts=example_tsdf, feature="unkown_feature")
@pytest.mark.parametrize(
"poly_degree, trend_transform_class",
(
[1, LinearTrendTransform],
[2, LinearTrendTransform],
[1, TheilSenTrendTransform],
[2, TheilSenTrendTransform],
),
)
def test_plot_trend(poly_degree, example_tsdf, trend_transform_class):
plot_trend(ts=example_tsdf, trend_transform=trend_transform_class(in_column="target", poly_degree=poly_degree))
@pytest.mark.parametrize("detrend_model", (TheilSenRegressor(), LinearRegression()))
def test_plot_bin_seg(example_tsdf, detrend_model):
plot_trend(ts=example_tsdf, trend_transform=BinsegTrendTransform(in_column="target", detrend_model=detrend_model))
@pytest.mark.parametrize("period", (7, 30))
def test_plot_stl(example_tsdf, period):
plot_trend(ts=example_tsdf, trend_transform=STLTransform(in_column="target", period=period))
@pytest.mark.parametrize(
"poly_degree, expect_values, trend_class",
(
[1, True, LinearTrendTransform],
[2, False, LinearTrendTransform],
[1, True, TheilSenTrendTransform],
[2, False, TheilSenTrendTransform],
),
)
def test_get_labels_names_linear_coeffs(example_tsdf, poly_degree, expect_values, trend_class):
ln_tr = trend_class(in_column="target", poly_degree=poly_degree)
example_tsdf.fit_transform([ln_tr])
segments = example_tsdf.segments
_, linear_coeffs = _get_labels_names([ln_tr], segments)
if expect_values:
assert list(linear_coeffs.values()) != ["", ""]
else:
assert list(linear_coeffs.values()) == ["", ""]
|
46738
|
from .response import process_response
from .response import get_response
from .app import run
# __all__ = ["process_response", "get_response"]
|
46743
|
sessions = [{
"1": {
"type": "session",
"source": {"id": "scope"},
"id": "1",
'profile': {"id": "1"}
}
}]
profiles = [
{"1": {'id': "1", "traits": {}}},
{"2": {'id': "2", "traits": {}}},
]
class MockStorageCrud:
def __init__(self, index, domain_class_ref, entity):
self.index = index
self.domain_class_ref = domain_class_ref
self.entity = entity
if index == 'session':
self.data = sessions
elif index == 'profile':
self.data = profiles
async def load(self):
for item in self.data:
if self.entity.id in item:
return self.domain_class_ref(**item[self.entity.id])
return None
async def save(self):
self.data.append({self.entity.id: self.entity.dict(exclude_unset=True)})
async def delete(self):
del(self.data[self.entity.id])
class EntityStorageCrud:
def __init__(self, index, entity):
self.index = index
self.entity = entity
if index == 'session':
self.data = sessions
elif index == 'profile':
self.data = profiles
async def load(self, domain_class_ref):
for item in self.data:
if self.entity.id in item:
return domain_class_ref(**item[self.entity.id])
return None
async def save(self):
self.data.append({self.entity.id: self.entity.dict(exclude_unset=True)})
async def delete(self):
del(self.data[self.entity.id])
|
46862
|
def extractExpandablefemaleBlogspotCom(item):
'''
DISABLED
Parser for 'expandablefemale.blogspot.com'
'''
return None
|
46869
|
import tqdm
import torch
from lav.lav_privileged import LAV
from lav.utils.datasets import get_data_loader
from lav.utils.logger import Logger
def main(args):
dmd = LAV(args)
data_loader = get_data_loader('bev', args)
logger = Logger('lav_bev', args)
save_dir = logger.save_dir
torch.manual_seed(args.seed)
# logger.watch_model(dmd.uniplanner)
global_it = 0
for epoch in range(args.num_epoch):
for data in tqdm.tqdm(data_loader, desc=f'Epoch {epoch}'):
opt_info = dmd.train_bev(*data)
if global_it % args.num_per_log == 0:
logger.log_bev_info(global_it, opt_info)
global_it += 1
dmd.bev_scheduler.step()
if (epoch+1) % args.num_per_save == 0:
bev_path = f'{save_dir}/bev_{epoch+1}.th'
torch.save(dmd.state_dict('bev'), bev_path)
print (f'save to {bev_path}')
logger.save([bev_path])
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config-path', default='config.yaml')
parser.add_argument('--device', default='cuda', choices=['cuda', 'cpu'])
# Training misc
parser.add_argument('--num-epoch', type=int, default=160)
parser.add_argument('--num-per-log', type=int, default=100, help='log per iter')
parser.add_argument('--num-per-save', type=int, default=10, help='save per epoch')
parser.add_argument('--batch-size', type=int, default=512)
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--num-workers', type=int, default=16)
# Reproducibility (still not fully determinstic due to CUDA/CuDNN)
parser.add_argument('--seed', type=int, default=2021)
args = parser.parse_args()
main(args)
|
46870
|
import torch.utils.model_zoo as model_zoo
from .registry import is_model, is_model_in_modules, model_entrypoint
from .helpers import load_checkpoint
from .layers import set_layer_config
def create_model(
model_name,
pretrained=False,
num_classes=1000,
in_chans=3,
checkpoint_path='',
scriptable=None,
exportable=None,
no_jit=None,
strict=True,
**kwargs):
"""Create a model
Args:
model_name (str): name of model to instantiate
pretrained (bool): load pretrained ImageNet-1k weights if true
num_classes (int): number of classes for final fully connected layer (default: 1000)
in_chans (int): number of input channels / colors (default: 3)
checkpoint_path (str): path of checkpoint to load after model is initialized
scriptable (bool): set layer config so that model is jit scriptable (not working for all models yet)
exportable (bool): set layer config so that model is traceable / ONNX exportable (not fully impl/obeyed yet)
no_jit (bool): set layer config so that model doesn't utilize jit scripted layers (so far activations only)
Keyword Args:
drop_rate (float): dropout rate for training (default: 0.0)
global_pool (str): global pool type (default: 'avg')
**: other kwargs are model specific
"""
model_args = dict(pretrained=pretrained, num_classes=num_classes, in_chans=in_chans)
# Only EfficientNet and MobileNetV3 models have support for batchnorm params or drop_connect_rate passed as args
is_efficientnet = is_model_in_modules(model_name, ['efficientnet', 'mobilenetv3'])
is_resnet = is_model_in_modules(model_name, ['resnet', 'resnext'])
if not is_efficientnet:
kwargs.pop('bn_tf', None)
kwargs.pop('bn_momentum', None)
kwargs.pop('bn_eps', None)
if not is_resnet:
kwargs.pop('no_skip', None)
if model_name != 'resnetcustom' and model_name != 'ecaresnetdcustom' and model_name != 'ecaresnetcustom':
if 'resnet_structure' in kwargs:
kwargs.pop('resnet_structure')
kwargs.pop('resnet_block')
if 'mobilenasnet' not in model_name:
if 'heaviest_network' in kwargs:
kwargs.pop('heaviest_network')
if 'use_kernel_3' in kwargs:
kwargs.pop('use_kernel_3')
if 'exp_r' in kwargs:
kwargs.pop('exp_r')
if 'depth' in kwargs:
kwargs.pop('depth')
if 'reduced_exp_ratio' in kwargs:
kwargs.pop('reduced_exp_ratio')
if 'use_dedicated_pwl_se' in kwargs:
kwargs.pop('use_dedicated_pwl_se')
if 'force_sync_gpu' in kwargs:
kwargs.pop('force_sync_gpu')
if 'no_privatized_bn' in kwargs:
kwargs.pop('no_privatized_bn')
if 'multipath_sampling' in kwargs:
kwargs.pop('multipath_sampling')
if 'use_softmax' in kwargs:
kwargs.pop('use_softmax')
if 'detach_gs' in kwargs:
kwargs.pop('detach_gs')
if 'mobilenet_string' in kwargs:
kwargs.pop('mobilenet_string')
if 'search_mode' in kwargs:
kwargs.pop('search_mode')
if 'no_swish' in kwargs:
kwargs.pop('no_swish')
if 'use_swish' in kwargs:
kwargs.pop('use_swish')
# Parameters that aren't supported by all models should default to None in command line args,
# remove them if they are present and not set so that non-supporting models don't break.
if kwargs.get('drop_block_rate', None) is None:
kwargs.pop('drop_block_rate', None)
# handle backwards compat with drop_connect -> drop_path change
drop_connect_rate = kwargs.pop('drop_connect_rate', None)
if drop_connect_rate is not None and kwargs.get('drop_path_rate', None) is None:
print("WARNING: 'drop_connect' as an argument is deprecated, please use 'drop_path'."
" Setting drop_path to %f." % drop_connect_rate)
kwargs['drop_path_rate'] = drop_connect_rate
if kwargs.get('drop_path_rate', None) is None:
kwargs.pop('drop_path_rate', None)
with set_layer_config(scriptable=scriptable, exportable=exportable, no_jit=no_jit):
if is_model(model_name):
create_fn = model_entrypoint(model_name)
model = create_fn(**model_args, **kwargs)
else:
raise RuntimeError('Unknown model (%s)' % model_name)
if checkpoint_path:
load_checkpoint(model, checkpoint_path, strict=strict, use_ema=True)
return model
|
46937
|
from __future__ import print_function
import argparse
import glob
import io
import os
import subprocess as sp
import sys
from collections import defaultdict
from itertools import chain
import networkx as nx
import requests
import yaml
from conda_build import api
# ---------------------------------------------------------------------------------------------------------------------------------
## Global Variables
# ---------------------------------------------------------------------------------------------------------------------------------
REPODATA_URL = "https://conda.anaconda.org/{channel}/{subdir}/repodata.json"
REPODATA_LABELED_URL = "https://conda.anaconda.org/{channel}/label/{label}/{subdir}/repodata.json"
REPODATA_DEFAULTS_URL = "https://repo.anaconda.com/pkgs/main/{subdir}/repodata.json"
# ---------------------------------------------------------------------------------------------------------------------------------
## Argument Parser
# ---------------------------------------------------------------------------------------------------------------------------------
def arguments():
p = argparse.ArgumentParser(
description="Identify and build all ggd recipes that are not currently in any ggd conda channel"
)
req = p.add_argument_group("Required Arguments")
opt = p.add_argument_group("Optional Arguments")
req.add_argument(
"--recipe-dir",
metavar="Base Recipe Directory",
required=True,
help="(Required) The base recipe directory to start walking through to identify any ggd recipes. For example, the main 'recipes' folder in the ggd-recipes repo",
)
req.add_argument(
"--config-file",
metavar="Configuration File",
required=True,
help="(Required) Path to the configureation yaml file. This file should contain relevant information such as specific channels",
)
opt.add_argument(
"--packages",
metavar="Specific Packages to build",
nargs="*",
help="(Optional) A single or space seperated list of packages to build. Only these packages will be checked and potentially built",
)
opt.add_argument(
"--blacklist",
metavar="Black listed recipes",
help="(Optional) A file with recipes that are blacklisted. That is, recipes to skip build for. This file should contain a single recipe name per line. # comment lines will be ignored.",
)
opt.add_argument(
"--debug",
action="store_true",
help="(Optional) Whther or not to print the debug output from conad build to the screen.",
)
opt.add_argument(
"--force-build",
action="store_true",
help="(Optional) Whether or not to force all recipes being checked to be built or not. (Default = False).",
)
return p.parse_args()
# ---------------------------------------------------------------------------------------------------------------------------------
## Functions/methods
# ---------------------------------------------------------------------------------------------------------------------------------
def parse_backlist_file(file_path):
"""
parse_backlist_file
===================
Method to parse a file that is provided that represents recipes that should be blacklisted.
Any lines that start with a '#' or any empty lines will be skipped. This method will get all
other lines, and treat any recipes that match the recipes being checked as a blacklisted recipe.
Parameters:
-----------
1) file_path: (str) The file path to the blacklist file
Returns:
++++++++
1) A generator of blacklisted items
"""
assert os.path.isfile(
file_path
), ":ggd:build recipes: !!ERROR!! This blacklist file provided is not a file: '{}' please provide a correct file or remove the --backlist flag".foramt(
file_path
)
try:
with io.open(file_path, "rt", encoding="utf-8") as blist:
for line in blist:
if line[0] != "#" and line.strip():
yield os.path.basename(str(line.strip()))
except IOError as e:
print(
":ggd:build recipes: !!ERROR!! A problem occured trying to read the blacklist file. Fix the error and try again"
)
print(str(e))
sys.exit(1)
def get_all_recipes(base_recipe_folder, packages="*", exclude_recipes=set()):
"""
get_all_recipes
===============
Method to get all the ggd recipes from a base recipe directory. This method will walk through the directory and once it finds "meta.yaml" files it
returns the directory as a reciipe.
Parameters:
-----------
1) base_recipe_folder: (str) The directory path to the base recipe folder to search for recipes in
2) packages: (list) A specific package, a set of packages, or "*" for all packages to look for
Returns:
++++++++
1) A generator with the directory paths that represent the recipes
"""
## Identify if the packages are going to be filtered by a set of packages or not
filter_packages = False
if packages != "*":
filter_packages = True
exclude = False
if exclude_recipes:
exclude = True
##If the packages argument is a string, convert it to a list
if isinstance(packages, str):
packages = set(packages)
print(
":ggd:build recipes: Getting recipe(s) from '{recipe_dir}'. Recipes filtered by: '{p}'".format(
recipe_dir=base_recipe_folder, p=", ".join(list(packages))
)
)
## Dir path for walking over
for recipe_dir in glob.glob(os.path.join(base_recipe_folder, "*")):
## Identify all recipes with a meta.yaml file
for dir_path, dir_names, file_names in os.walk(recipe_dir):
## If a recipe has been found
if "meta.yaml" in file_names:
## Exclude any blacklisted recipes
if exclude:
if os.path.basename(dir_path) in exclude_recipes:
continue
## if filter by package
if filter_packages:
if os.path.basename(dir_path) in packages:
yield dir_path
## If not filtering by package
else:
yield dir_path
def load_config(config_file):
"""
load_config
===========
Method to load a the base config file for building recipes. The config file should be a yaml file
Parameters:
----------
1) config_file: (str) The file path to the config file for buliding recipes
Returns:
++++++++
1) A list of channels in the config file
"""
try:
config_dict = yaml.safe_load(io.open(config_file, "rt", encoding="utf-8"))
except IOError as e:
print(
":ggd:build recipes: !!ERROR!! A problem occured trying to read the config file. Fix the error and try again"
)
print(str(e))
sys.exit(1)
channel_dict = config_dict["channels"]
return channel_dict
def build_recipes(
recipe_list, check_channels, force=False, debug=False, n_workers=1, worker_offset=0
):
"""
build_recipes
=============
Controller method used to perpare, check, build, and process each recipe in the recipe list. It will
build a DAG with Nodes as recipes and dependencies, and edges connecting recipe to dependencies. It
removes any cyclic nodes that depend on each other. Identify new or updated recipes that need to
be built and build them.
Parameters:
-----------
1) recipe_list: (str) A list of recieps to check (The directory path of the recipe)
2) check_channels: (list) A list of channels to check against
3) force: (bool) Whether or not to force the recipe to be built even if the same verion and build exits in a channel being checked against (Default = False)
4) debug: (bool) Whether or not to run 'conda build' in the debug phase. (Default = False)
5) n_workers: (int) The number of works to use to create subdags. (Default = 1)
6) worker_offset: (int) The number to use to offset the n_workers used for subdag creation. (Default = 0)
Return:
+++++++
1) True if all recipes are checked and there are no errors. False otherwise
"""
if not recipe_list:
print(":ggd:build recipes: Nothing to be done")
return True
## create a dag
dag, name2recipe, recipe2name = build_recipe_dag(recipe_list)
if not dag:
print(":ggd:build recipes: Nothing to be done")
return True
## Remove cyclic dependencies in the build job
### If current build jobs depend on each other, can't build them
skip_dependent = defaultdict(list)
dag = remove_dag_cycles(dag, name2recipe, skip_dependent)
## Create subdag workers
subdag = get_subdags(dag, n_workers, worker_offset)
if not subdag:
print(":ggd:build recipes: Nothing to be done")
return True
print(
":ggd:build recipes: {} recipes to build and test: \n{}".format(
len(subdag), "\n".join(subdag.nodes())
)
)
## Filter recipes
filtered_recipes = [
(recipe, recipe2name[recipe])
for package in nx.topological_sort(subdag)
for recipe in name2recipe[package]
]
## Get the Repodata for each channel
repodata_by_channel, actualname_to_idname = get_repodata(check_channels)
## Remove defaults channel for now
if "defaults" in check_channels:
check_channels.remove("defaults")
## Check each recipe
built_recipes = []
skipped_recipes = []
failed_recipes = []
for recipe, name in filtered_recipes:
## Check if the recipe is being skipped
if name in skip_dependent:
print(
(
":ggd:build recipes: SKIPPING BUILD: skipping {} because it depends on {} "
" which failed build"
).format(recipe, skip_dependent[name])
)
skipped_recipes.append(recipe)
continue
print(":ggd:build recipes: Determining expected packages for {}".format(recipe))
## Check a recipe to see if it is any other channel repodata and if it is if it's version/build is greater then what is in the repo data
predicted_path = check_recipe_for_build(
recipe,
check_channels,
repodata_by_channel,
actualname_to_idname,
force=force,
)
## if no predicted path, skip building this recipe
if not predicted_path:
print(
":ggd:build recipes: Nothing to be done for recipe '{}'".format(recipe)
)
continue
## Build the recipe
success = conda_build_recipe(recipe, check_channels, predicted_path, debug)
## Check for a successful recipe build
if success:
built_recipes.append(recipe)
print(
":ggd:build recipes: Package recipe located at {}".format(
",".join(predicted_path)
)
)
else:
failed_recipes.append(recipe)
for pkg in nx.algorithms.descendants(subdag, name):
skip_dependent[pkg].append(recipe)
## Check for failed recipes
if failed_recipes:
print(
(
":ggd:build recipes: BUILD SUMMARY: of {} recipes, "
"{} failed and {} were skipped. "
).format(len(filtered_recipes), len(failed_recipes), len(skipped_recipes))
)
if built_recipes:
print(
(
":ggd:build recipes: BUILD SUMMARY: Although "
"the build process failed, there were {} recipes "
"built successfully."
).format(len(built_recipes))
)
for frecipe in failed_recipes:
print(":ggd:build recipes: BUILD SUMMARY: FAILED recipe {}".format(frecipe))
## Purge the builds
sp.check_call(["conda", "build", "purge"], stderr=sys.stderr, stdout=sys.stdout)
return False
## IF not failed recipes, prompt for a successful build
print(
":ggd:build recipes: BUILD SUMMARY: SUCCESSFULLY BUILT {} of {} recipes".format(
len(built_recipes), len(filtered_recipes)
)
)
return True
def conda_build_recipe(recipe, channels, predicted_path, debug=False):
"""
conda_build_recipe
==================
This method is used to build a single recipe using `conda build`
Parameters:
-----------
1) recipe: (str) The directory path to the recipe to build
2) channels: (list) A list of conda channels
3) predicted_path: (str) The file path to the predicted tarball file path once a recipe is built
4) debug: (bool) Whether or not to run `conda build` in debug mode. (Default = False)
Return:
+++++++
1) True if the recipe is successfully built, False otherwise
"""
print(":ggd:build recipe: BUILD STARTED for {}".format(recipe))
## set up args
args = ["--override-channels", "--no-anaconda-upload"]
## Add changels to args
for channel in channels + ["local"]:
args += ["-c", channel]
## Get config file
config = load_conda_build_config()
## Check for exclusions
for file_path in config.exclusive_config_files or []:
if file_path:
args += ["-e", file_path]
## check for additional configs
for file_path in config.variant_config_files or []:
if file_path:
args += ["-m", file_path]
## Get recipe path
recipe_path = os.path.join(recipe, "meta.yaml")
if debug:
cmd = ["conda", "build", "--debug"] + args + [recipe_path]
else:
cmd = ["conda", "build"] + args + [recipe_path]
## Run conda build
try:
sp.check_call(cmd, stderr=sys.stderr, stdout=sys.stdout)
except Exception as e:
print(":ggd:build recipes: Build failed for {}".format(recipe))
print(str(e))
return False
## Check that the predicted tarfile path was created
for ppath in predicted_path:
if os.path.exists(ppath) == False or os.path.isfile(ppath) == False:
print(
":ggd:build recipes: !!ERROR!! The predicted tarfile does not exists after building the recipe. The build failed"
)
return False
print(":ggd:build recipes: BUILD SUCCESS: Successfully built {}".format(recipe))
return True
def get_repodata(check_channels):
"""
get_repodata
============
Method to get the conda repodata for a list of conda channels
Parameters:
-----------
1) check_channels: (list) A list of channels to check and get repodata for
Returns:
++++++++
1) A dictionary with keys as channels and values as the repodata for that channel starting at the "packages" key.
"""
print(":ggd:build recipes: Loading repodata for each channel from the config file")
## Load the repodata for each channel
repodata_by_channel = dict()
name2tar = defaultdict(lambda: defaultdict(set))
## Check each channel
for channel in check_channels:
## No repodata for default (local) channel
if channel == "defaults":
continue
## NOTE: Hardset to noarch right now. This might need to change in the future
repodata_url = REPODATA_URL.format(channel=channel, subdir="noarch")
## Get the repodata from the anaconda url
try:
repodata_json = requests.get(repodata_url).json()
except ValueError as e:
print(
":ggd:build recipes: !!ERROR!! A problem occured loading the repodata for the conda channel: '{}'".format(
channel
)
)
print(str(e))
sys.exit(1)
## Add to dict
repodata_by_channel[channel] = repodata_json["packages"]
for tar, pkg in repodata_json["packages"].items():
name = pkg["name"]
name2tar[channel][name].add(tar)
return (repodata_by_channel, name2tar)
def load_conda_build_config(platform=None, trim_skip=True):
"""
load_conda_build_config
=======================
Load conda build config while considering global pinnings from conda-forge.
Parameters:
-----------
1) platform: (str) The platform to use. Example: noarch, linux-64, etc. (Default = None)
2) trim_skip: (bool) What to set conda build config trim skip to. (Default = True)
Return:
++++++
1) The conda build config object
"""
config = api.Config(no_download_source=True, set_build_id=False)
## Hardset to the bioconda_utils-conda_build_config.yaml file in the .circleci dir
### Will need to change this later
if os.path.basename(os.getcwd()) == "ggd-recipes":
config.exclusive_config_files = [
os.path.join(
os.getcwd(), "bioconda_utils-conda_build_config.yaml"
)
]
else:
config.exclusive_config_files = []
for cfg in chain(config.exclusive_config_files, config.variant_config_files or []):
assert os.path.exists(cfg), "error: {0} does not exist".format(cfg)
if platform:
config.platform = platform
config.trim_skip = trim_skip
return config
def load_all_meta(recipe, config=None, finalize=True):
"""
load_all_meta
=============
For each environment, yield the rendered meta.yaml.
Parameters
----------
1) recipe: (str) The directory path to the recipe
2) config: (str) The config file. (Default = None)
3) finalize: (bool) If True, do a full conda-build render. Determines exact package builds
of build/host dependencies. It involves costly dependency resolution
via conda and also download of those packages (to inspect possible
run_exports). For fast-running tasks like linting, set to False.
Returns:
++++++++
1) A list of metadata for each matching recipe
"""
bypass_env_check = not finalize
return [
meta
for (meta, _, _) in api.render(
recipe, config=config, finalize=finalize, bypass_env_check=bypass_env_check,
)
]
def load_platform_metas(recipe, finalize=True):
"""
load_platform_metas
===================
Method to laod conda build config metadata based on the current platoform
1) recipe: (str) The directory path to the recipe
2) finalize: (bool) Used for the load_all_meta() method. Whether or not to run finalize or not. (Default = True)
Return:
+++++++
1) The current system platform
2) the platfor specific cofig info fro load_all_meta()
"""
platform = os.environ.get("OSTYPE", sys.platform)
if platform.startswith("darwin"):
platform = "osx"
elif platform == "linux-gnu":
platform = "linux"
config = load_conda_build_config(platform=platform)
return (platform, load_all_meta(recipe, config=config, finalize=finalize))
def check_if_recipe_skippable(recipe, channels, repodata_dict, actualname_to_idname):
"""
check_if_recipe_skippable
=========================
Method used to check if a recipe should be skipped or not.
Skip criteria include:
- If the version of the recipe in the channel repodata is greater than or equal to the query recipe.
- If the query recipe's version and build are equal to or less than the recipe in the repodata
Non-Skip Citeria include:
- Opposite of skip criteria
- If the recipe is not in any channel
Parameters:
-----------
1) recipe: (str) The directory path to the query recipe
2) channels: (list) A list of channels to check against
3) repodata_dict: (dict) A dictionary of repodata by channel (From get_repodata() method)
4) actualname_to_idname: (dict) Dict of recipe names as keys as id names in the repodata_dict as keys. (From get_repodata() method)
Returns:
++++++++
- Return True if recipe building is skippable
- Return False if recipe building cannot be skipped
"""
platform, metas = load_platform_metas(recipe, finalize=False)
# The recipe likely defined skip: True
if not metas:
return True
## Get each packages name, version, and build number
packages = set(
(meta.name(), float(meta.version()), float(meta.build_number() or 0))
for meta in metas
)
for name, version, build_num in packages:
present = False
for c in channels:
## Check for the recipe in one of the channel's repodata
if name in actualname_to_idname[c].keys():
## Find the newest/highest versioned and build package
present = True
cur_version = -1.0
cur_build = -1.0
for pkg_tar in actualname_to_idname[c][name]:
repo_version = float(repodata_dict[c][pkg_tar]["version"])
repo_build_number = float(repodata_dict[c][pkg_tar]["build_number"])
## If version is greater than the previous version, reset values with this package
if repo_version > cur_version:
cur_version = repo_version
cur_build = repo_build_number
## If version is the same but the build number is greater, reset values with this package
elif version == cur_version and repo_build_number > cur_build:
cur_build = repo_build_number
## Check if the query package is newer then what is repoted in the repodata or not
## If the query package's version is greater than the best in the repodata, update recipe
if cur_version < version:
return False
## If the query package's is the same version but the build number is greater than the best in the repodata, update recipe
elif cur_version == version and cur_build < build_num:
return False
## If package not already in the repodata
if not present:
return False
print(
":ggd:build recipes: FILTER: not building recipe {} because the version and/or build number match what is already in the channel and not forced".format(
recipe
)
)
return True
def check_recipe_for_build(
recipe, check_channels, repodata_by_channel, actualname_to_idname, force=False
):
"""
check_recipe_for_build
======================
Method used to check if a recipe should be built or not
Parameters:
-----------
1) recipe: (str) The directory path for the recipe in question
2) check_channels: (list) A list of channels to check against
3) repodata_by_channel: (dict) A dictionary of repodata by channel (From get_repodata() method)
4) actualname_to_idname: (dict) Dict of recipe names as keys as id names in the repodata_dict as keys. (From get_repodata() method)
5) force: (bool) Whether or not to force a recipe to be built even if it should be skipped. "Force build" (Default = False)
Return:
+++++++
- Any empty list if the recipe should be skipped
- A list of predicted tarball file paths for the build recipe if the recipe should be built
"""
if not force:
## Check for recipes that could be skipped
if check_if_recipe_skippable(
recipe, check_channels, repodata_by_channel, actualname_to_idname
):
# NB: If we skip early here, we don't detect possible divergent builds.
return []
## Use conda build to get meta info
platform, metas = load_platform_metas(recipe, finalize=True)
# The recipe likely defined skip: True
if not metas:
return []
## Get the predicted tarball path
predicted_tar_paths = list(
chain.from_iterable(api.get_output_file_paths(meta) for meta in metas)
)
## Return predicted tarball file path
return predicted_tar_paths
def remove_dag_cycles(dag, name2recipes, skip_dependent):
"""
remove_dag_cycles
=================
Method to remove cycles in the dag. Cycles happen when mutliple recipes as nodes depend on each other.
Parameters:
-----------
1) dag: (networkx.DiGraph() object) The dag create from build_recipe_dag()
2) name2receips: (dict) A dictionary where keys are recipe names and values are sets of recipe paths
3) skip_dependent: (dict) A dictionary with recipes that should be skipped. (To be filled with this method)
Returns:
++++++++
1) an updated dag with cyclic nodes removed
"""
nodes_in_cycles = set()
for cycle in list(nx.simple_cycles(dag)):
print(
":ggd:build recipes: !!BUILD ERROR!! dependency cycle found for: {}".format(
cycle
)
)
nodes_in_cycles.update(cycle)
for name in sorted(nodes_in_cycles):
fail_recipes = sorted(name2recipes[name])
print(
(
":ggd:build recipes: !!BUILD ERROR!! cannot build recipes for {} since "
"it cyclically depends on other packages in the current build job. "
"Failed recipes: %s"
).format(name, fail_recipes)
)
for node in nx.algorithms.descendants(dag, name):
if node not in nodes_in_cycles:
skip_dependent[node].extend(cycle_fail_recipes)
return dag.subgraph(name for name in dag if name not in nodes_in_cycles)
def get_subdags(dag, n_workers, worker_offset):
"""
get_subdags
===========
Method to create subdags from the main dag based on the number or workers available
Parameters:
-----------
1) dag: (networkx.DiGraph() object) The recipe dag
2) n_workers: (int) The number of workers
3) worker_offset: (int) The worker offset
Returns:
++++++++
1) the subdags
"""
if n_workers > 1 and worker_offset >= n_workers:
raise ValueError(
"n-workers is less than the worker-offset given! "
"Either decrease --n-workers or decrease --worker-offset!"
)
# Get connected subdags and sort by nodes
if n_workers > 1:
root_nodes = sorted([k for k, v in dag.in_degree().items() if v == 0])
nodes = set()
found = set()
for idx, root_node in enumerate(root_nodes):
# Flatten the nested list
children = itertools.chain(*nx.dfs_successors(dag, root_node).values())
# This is the only obvious way of ensuring that all nodes are included
# in exactly 1 subgraph
found.add(root_node)
if idx % n_workers == worker_offset:
nodes.add(root_node)
for child in children:
if child not in found:
nodes.add(child)
found.add(child)
else:
for child in children:
found.add(child)
subdags = dag.subgraph(list(nodes))
print(
":ggd:build recipes: Building and testing sub-DAGs {} in each group of {}, which is {} packages".format(
worker_offset, n_workers, len(subdags.nodes())
)
)
else:
subdags = dag
return subdags
def build_recipe_dag(recipe_list, restricted=True):
"""
build_recipe_dag
================
Method to build the DAG for recipes. Nodes represent the recipes and their dependencies, while edges connect the recipe nodes
to their dependencies. (build or host deps)
Parameters:
-----------
1) recipe_list: (list) A list of recipes that to build the DAG for
2) restricted: (bool) Whether or not to restrict the final list of recipes to recipes only (True) or to include their deps as well (False)
Returns:
++++++++
1) The DAG
2) name2recipe_dict: (dict) A dictionary with names of recipes as keys, and sets of recipe paths as values
3) recipe2name_dict: (dict) A dictionary with recipe path as keys and names as values
"""
print(":ggd:build recipes: Generating recipe DAG")
name2recipe_dict = defaultdict(set)
recipe2name_dict = defaultdict(str)
## Create a dag
dag = nx.DiGraph()
## For each recipe, update the dag and update the name2recipe_dict
for recipe in recipe_list:
recipe_path = os.path.join(recipe, "meta.yaml")
recipe_meta = yaml.safe_load(io.open(recipe_path, "rt", encoding="utf-8"))
## get a dictionary to match recipe name to recipe dir
recipe_name = recipe_meta["package"]["name"]
name2recipe_dict[recipe_name].update([recipe])
## create another dict for recipe to name
recipe2name_dict[recipe] = recipe_name
## Add name as a node to the graph
dag.add_node(recipe_name)
## Check deps
for recipe in recipe_list:
recipe_path = os.path.join(recipe, "meta.yaml")
recipe_meta = yaml.safe_load(io.open(recipe_path, "rt", encoding="utf-8"))
## Get deps
if (
"build" in recipe_meta["requirements"]
and recipe_meta["requirements"]["build"]
):
## If the build reqs are in the current recipe list or the restricted is set to False, add the dep
build_reqs = [
x
for x in recipe_meta["requirements"]["build"]
if x in name2recipe_dict or not restricted
]
else:
build_reqs = []
if "run" in recipe_meta["requirements"] and recipe_meta["requirements"]["run"]:
run_reqs = [
x
for x in recipe_meta["requirements"]["run"]
if x in name2recipe_dict or not restricted
]
else:
run_reqs = []
if (
"host" in recipe_meta["requirements"]
and recipe_meta["requirements"]["host"]
):
host_reqs = [
x
for x in recipe_meta["requirements"]["host"]
if x in name2recipe_dict or not restricted
]
else:
host_reqs = []
## Add deps as edges to node
dag.add_edges_from((dep, recipe_name) for dep in set(build_reqs + host_reqs))
return (dag, name2recipe_dict, recipe2name_dict)
# ---------------------------------------------------------------------------------------------------------------------------------
## Main
# ---------------------------------------------------------------------------------------------------------------------------------
def main():
args = arguments()
## Get blacklisted recipes
blacklist_recipes = set()
if args.blacklist:
blacklist_recipes = set(parse_backlist_file(args.blacklist))
print(
":ggd:build recipes: The following recipes are being blacklisted: {}".format(
", ".join(list(blacklist_recipes))
)
)
## Get a list of ggd recipes
print(":ggd:build recipes: Gathering ggd recipes")
recipes = list(
get_all_recipes(
args.recipe_dir, args.packages if args.packages else "*", blacklist_recipes
)
)
print(":ggd:build recipes: Considering {} ggd recipes".format(len(recipes)))
## Load the configuration file
print(":ggd:build recipes: loading config file".format(len(recipes)))
channels = load_config(args.config_file)
print(
":ggd:build recipes: channels from config file: {}".format(", ".join(channels))
)
## Build the recipes
build_recipes(recipes, channels, debug=args.debug)
if __name__ == "__main__":
sys.exit(main() or 0)
|
46940
|
from django.test import TestCase
from corehq import toggles
from corehq.motech.dhis2.tasks import send_datasets_for_all_domains
class TestSendDatasetsForAllDomains(TestCase):
domain_name = 'does-not-exist'
def setUp(self):
toggles.DHIS2_INTEGRATION.set(
self.domain_name,
enabled=True,
namespace=toggles.NAMESPACE_DOMAIN
)
def tearDown(self):
toggles.DHIS2_INTEGRATION.set(
self.domain_name,
enabled=False,
namespace=toggles.NAMESPACE_DOMAIN
)
def test_check_domain_exists(self):
"""
send_datasets_for_all_domains() should not raise an AttributeError
if a domain does not exist
"""
send_datasets_for_all_domains()
|
46955
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from company.models import Company
from company.tasks import deploy_new_company
@receiver(post_save, sender=Company)
def company_created(sender, instance, created, **kwargs):
if created:
deploy_new_company.delay(instance.id)
|
47059
|
import datetime, os, pkg_resources, re, setuptools_scm
from .. import __name__ as package_name
try:
if int(os.environ.get("_ASTROPATH_VERSION_NO_GIT", 0)):
env_var_no_git = True
raise LookupError
env_var_no_git = False
astropathversion = "v"+setuptools_scm.get_version(root="../..", relative_to=__file__)
except LookupError:
try:
astropathversion = "v"+pkg_resources.get_distribution(package_name).version
except pkg_resources.DistributionNotFound:
astropathversion = "v0.0.0.dev0+g0000000.d"+datetime.date.today().strftime("%Y%m%d")
astropathversionmatch = re.match(r"v(?P<version>[0-9]+(?:\.[0-9]+)*)(?P<dev>\.dev[0-9]+\+g[0-9a-f]+)?(?P<date>\.d[0-9]+)?", astropathversion)
if not astropathversionmatch:
raise RuntimeError(f"got a version number '{astropathversion}' that doesn't match the desired regex")
|
47077
|
from pyparsing import LineEnd, LineStart, SkipTo, Regex
from regparser.grammar import atomic, unified
section = (
atomic.section_marker.copy().leaveWhitespace()
+ unified.part_section
+ SkipTo(LineEnd())
)
par = (
atomic.section.copy().leaveWhitespace()
+ unified.depth1_p
+ SkipTo(LineEnd())
)
marker_par = (
atomic.paragraph_marker.copy().leaveWhitespace()
+ atomic.section
+ unified.depth1_p
)
# This matches an appendix name in an appendix header. Here we'll match
# something with a dash in the appendix name (i.e. AA-1) but we'll
# remove the dash. The effect of this is that, for label purposes only,
# the appendix becomes known as 'AA1', and therefore we don't have weird
# label collisions with a node labeled '1' underneath the appendix.
appendix = (
atomic.appendix_marker.copy().leaveWhitespace()
+ Regex(r"[A-Z]+-?[0-9]*\b").setResultsName("appendix").setParseAction(
lambda r: r[0].replace('-', '')).setResultsName("appendix")
+ SkipTo(LineEnd())
)
parser = LineStart() + (section | marker_par | par | appendix)
|
47093
|
from enum import IntEnum
class States(IntEnum):
"""Enumerates the states a parsl task may be in.
These states occur inside the task record for a task inside
a `DataFlowKernel` and in the monitoring database.
In a single successful task execution, tasks will progress in this
sequence:
pending -> launched -> running -> exec_done
Other states represent deviations from this path, either due to
failures, or to deliberate changes to how tasks are executed (for
example due to join_app, or memoization).
All tasks should end up in one of the states listed in `FINAL_STATES`.
"""
unsched = -1
pending = 0
"""Task is known to parsl but cannot run yet. Usually, a task cannot
run because it is waiting for dependency tasks to complete.
"""
running = 2
"""Task is running on a resource. This state is special - a DFK task
record never goes to States.running state; but the monitoring database
may represent a task in this state based on non-DFK information received
from monitor_wrapper."""
exec_done = 3
"""Task has been executed successfully."""
failed = 4
"""Task has failed and no more attempts will be made to run it."""
dep_fail = 5
"""Dependencies of this task failed, so it is marked as failed without
even an attempt to launch it."""
launched = 7
"""Task has been passed to a `ParslExecutor` for execution."""
fail_retryable = 8
"""Task has failed, but can be retried"""
memo_done = 9
"""Task was found in the memoization table, so it is marked as done
without even an attempt to launch it."""
joining = 10
"""Task is a join_app, joining on internal tasks. The task has run its
own Python code, and is now waiting on other tasks before it can make
further progress (to a done/failed state)."""
FINAL_STATES = [States.exec_done, States.memo_done, States.failed, States.dep_fail]
"""States from which we will never move to another state, because the job has
either definitively completed or failed."""
FINAL_FAILURE_STATES = [States.failed, States.dep_fail]
"""States which are final and which indicate a failure. This must
be a subset of FINAL_STATES"""
|
47116
|
from plugin.core.environment import Environment
from ConfigParser import NoOptionError, NoSectionError, ParsingError, SafeConfigParser
import logging
import os
log = logging.getLogger(__name__)
CONFIGURATION_FILES = [
'advanced'
]
class ConfigurationFile(object):
def __init__(self, path):
self._path = path
self._relpath = os.path.relpath(self._path, Environment.path.plugin_support)
self._parser = None
self._error = False
def __getitem__(self, section):
# Ensure file is loaded
self.load()
# Construct section
return ConfigurationSection(self._parser, section)
def load(self):
if self._parser or self._error:
return
log.debug('Parsing configuration file: %r', self._relpath)
try:
self._parser = SafeConfigParser()
self._parser.read(self._path)
except ParsingError as ex:
log.info(ex.message)
self._parser = None
self._error = True
except Exception as ex:
log.warn('Unable to parse configuration file: %r - %s', self._relpath, ex, exc_info=True)
self._parser = None
self._error = True
class ConfigurationSection(object):
def __init__(self, parser, name):
self._parser = parser
self._name = name
def _get(self, func, key, default=None):
if not self._parser:
return default
if not self._parser.has_option(self._name, key):
return default
try:
return getattr(self._parser, func)(self._name, key)
except (NoSectionError, NoOptionError):
return default
def get(self, key, default=None):
return self._get('get', key, default)
def get_int(self, key, default=None):
return self._get('getint', key, default)
def get_float(self, key, default=None):
return self._get('getfloat', key, default)
def get_boolean(self, key, default=None):
return self._get('getboolean', key, default)
def __getitem__(self, key):
if not self._parser:
return None
return self._parser.get(self._name, key)
def __setitem__(self, key, value):
if not self._parser:
return
self._parser.set(self._name, key, value)
class ConfigurationMeta(type):
def __new__(cls, name, parents, dct):
# Load configuration files
for name in CONFIGURATION_FILES:
# Build path
path = os.path.join(Environment.path.plugin_data, '%s.ini' % name)
# Parse configuration file
dct[name] = ConfigurationFile(path)
# Construct object
return super(ConfigurationMeta, cls).__new__(cls, name, parents, dct)
class Configuration(object):
__metaclass__ = ConfigurationMeta
advanced = None
|
47120
|
import pytest
import xarray as xr
from datatree.datatree import DataTree
from datatree.mapping import TreeIsomorphismError, check_isomorphic, map_over_subtree
from datatree.testing import assert_equal
from datatree.treenode import TreeNode
from .test_datatree import create_test_datatree
empty = xr.Dataset()
class TestCheckTreesIsomorphic:
def test_not_a_tree(self):
with pytest.raises(TypeError, match="not a tree"):
check_isomorphic("s", 1)
def test_different_widths(self):
dt1 = DataTree.from_dict(data_objects={"a": empty})
dt2 = DataTree.from_dict(data_objects={"b": empty, "c": empty})
expected_err_str = (
"Number of children on node 'root' of the left object: 1\n"
"Number of children on node 'root' of the right object: 2"
)
with pytest.raises(TreeIsomorphismError, match=expected_err_str):
check_isomorphic(dt1, dt2)
def test_different_heights(self):
dt1 = DataTree.from_dict(data_objects={"a": empty})
dt2 = DataTree.from_dict(data_objects={"b": empty, "b/c": empty})
expected_err_str = (
"Number of children on node 'root/a' of the left object: 0\n"
"Number of children on node 'root/b' of the right object: 1"
)
with pytest.raises(TreeIsomorphismError, match=expected_err_str):
check_isomorphic(dt1, dt2)
def test_names_different(self):
dt1 = DataTree.from_dict(data_objects={"a": xr.Dataset()})
dt2 = DataTree.from_dict(data_objects={"b": empty})
expected_err_str = (
"Node 'root/a' in the left object has name 'a'\n"
"Node 'root/b' in the right object has name 'b'"
)
with pytest.raises(TreeIsomorphismError, match=expected_err_str):
check_isomorphic(dt1, dt2, require_names_equal=True)
def test_isomorphic_names_equal(self):
dt1 = DataTree.from_dict(
data_objects={"a": empty, "b": empty, "b/c": empty, "b/d": empty}
)
dt2 = DataTree.from_dict(
data_objects={"a": empty, "b": empty, "b/c": empty, "b/d": empty}
)
check_isomorphic(dt1, dt2, require_names_equal=True)
def test_isomorphic_ordering(self):
dt1 = DataTree.from_dict(
data_objects={"a": empty, "b": empty, "b/d": empty, "b/c": empty}
)
dt2 = DataTree.from_dict(
data_objects={"a": empty, "b": empty, "b/c": empty, "b/d": empty}
)
check_isomorphic(dt1, dt2, require_names_equal=False)
def test_isomorphic_names_not_equal(self):
dt1 = DataTree.from_dict(
data_objects={"a": empty, "b": empty, "b/c": empty, "b/d": empty}
)
dt2 = DataTree.from_dict(
data_objects={"A": empty, "B": empty, "B/C": empty, "B/D": empty}
)
check_isomorphic(dt1, dt2)
def test_not_isomorphic_complex_tree(self):
dt1 = create_test_datatree()
dt2 = create_test_datatree()
dt2.set_node("set1/set2", TreeNode("set3"))
with pytest.raises(TreeIsomorphismError, match="root/set1/set2"):
check_isomorphic(dt1, dt2)
def test_checking_from_root(self):
dt1 = create_test_datatree()
dt2 = create_test_datatree()
dt1.parent = DataTree(name="real_root")
with pytest.raises(TreeIsomorphismError):
check_isomorphic(dt1, dt2, check_from_root=True)
class TestMapOverSubTree:
def test_no_trees_passed(self):
@map_over_subtree
def times_ten(ds):
return 10.0 * ds
with pytest.raises(TypeError, match="Must pass at least one tree"):
times_ten("dt")
def test_not_isomorphic(self):
dt1 = create_test_datatree()
dt2 = create_test_datatree()
dt2["set4"] = None
@map_over_subtree
def times_ten(ds1, ds2):
return ds1 * ds2
with pytest.raises(TreeIsomorphismError):
times_ten(dt1, dt2)
def test_no_trees_returned(self):
dt1 = create_test_datatree()
dt2 = create_test_datatree()
@map_over_subtree
def bad_func(ds1, ds2):
return None
with pytest.raises(TypeError, match="return value of None"):
bad_func(dt1, dt2)
def test_single_dt_arg(self):
dt = create_test_datatree()
@map_over_subtree
def times_ten(ds):
return 10.0 * ds
expected = create_test_datatree(modify=lambda ds: 10.0 * ds)
result_tree = times_ten(dt)
assert_equal(result_tree, expected)
def test_single_dt_arg_plus_args_and_kwargs(self):
dt = create_test_datatree()
@map_over_subtree
def multiply_then_add(ds, times, add=0.0):
return (times * ds) + add
expected = create_test_datatree(modify=lambda ds: (10.0 * ds) + 2.0)
result_tree = multiply_then_add(dt, 10.0, add=2.0)
assert_equal(result_tree, expected)
def test_multiple_dt_args(self):
dt1 = create_test_datatree()
dt2 = create_test_datatree()
@map_over_subtree
def add(ds1, ds2):
return ds1 + ds2
expected = create_test_datatree(modify=lambda ds: 2.0 * ds)
result = add(dt1, dt2)
assert_equal(result, expected)
def test_dt_as_kwarg(self):
dt1 = create_test_datatree()
dt2 = create_test_datatree()
@map_over_subtree
def add(ds1, value=0.0):
return ds1 + value
expected = create_test_datatree(modify=lambda ds: 2.0 * ds)
result = add(dt1, value=dt2)
assert_equal(result, expected)
def test_return_multiple_dts(self):
dt = create_test_datatree()
@map_over_subtree
def minmax(ds):
return ds.min(), ds.max()
dt_min, dt_max = minmax(dt)
expected_min = create_test_datatree(modify=lambda ds: ds.min())
assert_equal(dt_min, expected_min)
expected_max = create_test_datatree(modify=lambda ds: ds.max())
assert_equal(dt_max, expected_max)
def test_return_wrong_type(self):
dt1 = create_test_datatree()
@map_over_subtree
def bad_func(ds1):
return "string"
with pytest.raises(TypeError, match="not Dataset or DataArray"):
bad_func(dt1)
def test_return_tuple_of_wrong_types(self):
dt1 = create_test_datatree()
@map_over_subtree
def bad_func(ds1):
return xr.Dataset(), "string"
with pytest.raises(TypeError, match="not Dataset or DataArray"):
bad_func(dt1)
@pytest.mark.xfail
def test_return_inconsistent_number_of_results(self):
dt1 = create_test_datatree()
@map_over_subtree
def bad_func(ds):
# Datasets in create_test_datatree() have different numbers of dims
# TODO need to instead return different numbers of Dataset objects for this test to catch the intended error
return tuple(ds.dims)
with pytest.raises(TypeError, match="instead returns"):
bad_func(dt1)
def test_wrong_number_of_arguments_for_func(self):
dt = create_test_datatree()
@map_over_subtree
def times_ten(ds):
return 10.0 * ds
with pytest.raises(
TypeError, match="takes 1 positional argument but 2 were given"
):
times_ten(dt, dt)
def test_map_single_dataset_against_whole_tree(self):
dt = create_test_datatree()
@map_over_subtree
def nodewise_merge(node_ds, fixed_ds):
return xr.merge([node_ds, fixed_ds])
other_ds = xr.Dataset({"z": ("z", [0])})
expected = create_test_datatree(modify=lambda ds: xr.merge([ds, other_ds]))
result_tree = nodewise_merge(dt, other_ds)
assert_equal(result_tree, expected)
@pytest.mark.xfail
def test_trees_with_different_node_names(self):
# TODO test this after I've got good tests for renaming nodes
raise NotImplementedError
def test_dt_method(self):
dt = create_test_datatree()
def multiply_then_add(ds, times, add=0.0):
return times * ds + add
expected = create_test_datatree(modify=lambda ds: (10.0 * ds) + 2.0)
result_tree = dt.map_over_subtree(multiply_then_add, 10.0, add=2.0)
assert_equal(result_tree, expected)
@pytest.mark.xfail
class TestMapOverSubTreeInplace:
def test_map_over_subtree_inplace(self):
raise NotImplementedError
|
47146
|
from flask import render_template, url_for
from appname.mailers import Mailer
class InviteEmail(Mailer):
TEMPLATE = 'email/teams/invite.html'
def __init__(self, invite):
self.recipient = None
self.invite = invite
self.recipient_email = invite.invite_email or (invite.user and invite.user.email)
@property
def subject(self):
return ("{0} invited you to join their team on appname"
.format(self.invite.inviter.email))
def send(self):
link = url_for('auth.invite_page', invite_id=self.invite.id,
secret=self.invite.invite_secret, _external=True)
html_body = render_template(self.TEMPLATE, link=link, invite=self.invite)
return self.deliver_now(self.recipient_email, self.subject, html_body)
|
47172
|
import numpy as np
import unittest
from deepblast.dataset.alphabet import UniprotTokenizer
import numpy.testing as npt
class TestAlphabet(unittest.TestCase):
def test_tokenizer(self):
tokenizer = UniprotTokenizer(pad_ends=True)
res = tokenizer(b'ARNDCQEGHILKMFPSTWYVXOUBZ')
# Need to account for padding and offset
exp = np.array([20] + list(range(0, 21)) + [11, 4, 20, 20] + [20])
npt.assert_allclose(res, exp)
def test_tokenizer_encode(self):
tokenizer = UniprotTokenizer(pad_ends=True)
x = 'ARNDCQEGHILKMFPSTWYVXOUBZ'
x = str.encode(x)
res = tokenizer(x)
exp = np.array(
[20, 0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 11, 4, 20, 20, 20])
npt.assert_allclose(exp, res)
def test_tokenizer_encode_no_padding(self):
tokenizer = UniprotTokenizer(pad_ends=False)
x = 'ARNDCQEGHILKMFPSTWYVXOUBZ'
x = str.encode(x)
res = tokenizer(x)
exp = np.array(
[0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 11, 4, 20, 20])
npt.assert_allclose(exp, res)
if __name__ == '__main__':
unittest.main()
|
47189
|
import numpy as np
from scipy import signal
from .. import MaskSeparationBase
from ...core import utils
from ...core import constants
class Duet(MaskSeparationBase):
"""
The DUET algorithm was originally proposed by S.Rickard and F.Dietrich for DOA
estimation and further developed for BSS and demixing by <NAME>, S.Rickard,
and <NAME>.
DUET extracts sources using the symmetric attenuation and relative delay between
two channels. The symmetric attenuation is calculated from the ratio of the two
channels' stft amplitudes, and the delay is the arrival delay between the two
sensors used to record the audio signal. These two values are clustered as peaks on
a histogram to determine where each source occurs. This implementation of DUET
creates and returns Mask objects after the run() function, which can then be
applied to the original audio signal to extract each individual source.
References:
[1] Rickard, Scott. "The DUET blind source separation algorithm."
Blind Speech Separation. Springer Netherlands, 2007. 217-241.
[2] Yilmaz, Ozgur, and <NAME>. "Blind separation of speech mixtures
via time-frequency masking."
Signal Processing, IEEE transactions on 52.7 (2004): 1830-1847.
Args:
input_audio_signal (np.array): a 2-row Numpy matrix containing samples of the
two-channel mixture.
num_sources (int): Number of sources to find.
attenuation_min (int): Minimum distance in utils.find_peak_indices, change if
not enough peaks are identified.
attenuation_max (int): Used for creating a histogram without outliers.
num_attenuation_bins (int): Number of bins for attenuation.
delay_min (int): Lower bound on delay, used as minimum distance in
utils.find_peak_indices.
delay_max (int): Upper bound on delay, used for creating a histogram without
outliers.
num_delay_bins (int): Number of bins for delay.
peak_threshold (float): Value in [0, 1] for peak picking.
attenuation_min_distance (int): Minimum distance between peaks wrt attenuation.
delay_min_distance (int): Minimum distance between peaks wrt delay.
p (int): Weight the histogram with the symmetric attenuation estimator.
q (int): Weight the histogram with the delay estimato
Notes:
On page 8 of his paper, Rickard recommends p=1 and q=0 as a default starting
point and p=.5, q=0 if one source is more dominant.
Attributes:
stft_ch0 (np.array): A Numpy matrix containing the stft data of channel 0.
stft_ch1 (np.array): A Numpy matrix containing the stft data of channel 1.
frequency_matrix (np.array): A Numpy matrix containing the frequencies of
analysis.
symmetric_atn (np.array): A Numpy matrix containing the symmetric attenuation
between the two channels.
delay (np.array): A Numpy matrix containing the delay between the two channels.
num_time_bins (np.array): The number of time bins for the frequency matrix and
mask arrays.
num_frequency_bins (int): The number of frequency bins for the mask arrays.
attenuation_bins (int): A Numpy array containing the attenuation bins for the
histogram.
delay_bins (np.array): A Numpy array containing the delay bins for the histogram.
normalized_attenuation_delay_histogram (np.array): A normalized Numpy matrix
containing the attenuation delay histogram, which has peaks for each source.
attenuation_delay_histogram (np.array): A non-normalized Numpy matrix containing
the attenuation delay histogram, which has peaks for each source.
peak_indices (np.array): A Numpy array containing the indices of the peaks for
the histogram.
separated_sources (np.array): A Numpy array of arrays containing each
separated source.
"""
def __init__(self, input_audio_signal, num_sources,
attenuation_min=-3, attenuation_max=3, num_attenuation_bins=50,
delay_min=-3, delay_max=3, num_delay_bins=50,
peak_threshold=0.0, attenuation_min_distance=5, delay_min_distance=5,
p=1, q=0, mask_type='binary'):
super().__init__(
input_audio_signal=input_audio_signal,
mask_type=mask_type)
self.num_sources = num_sources
self.attenuation_min = attenuation_min
self.attenuation_max = attenuation_max
self.num_attenuation_bins = num_attenuation_bins
self.delay_min = delay_min
self.delay_max = delay_max
self.num_delay_bins = num_delay_bins
self.peak_threshold = peak_threshold
self.attenuation_min_distance = attenuation_min_distance
self.delay_min_distance = delay_min_distance
self.p = p
self.q = q
self.stft_ch0 = None
self.stft_ch1 = None
self.frequency_matrix = None
self.symmetric_atn = None
self.delay = None
self.num_time_bins = None
self.num_frequency_bins = None
self.attenuation_bins = None
self.delay_bins = None
self.normalized_attenuation_delay_histogram = None
self.attenuation_delay_histogram = None
self.peak_indices = None
self.delay_peak = None
self.atn_peak = None
self.separated_sources = None
def run(self):
""" Extracts N sources from a given stereo audio mixture (N sources captured via 2 sensors)
Returns:
computed_masks (np.array): A list of binary mask objects that can be used to extract the sources
Example:
.. code-block:: python
:linenos:
#Import input audio signal
input_file_name = '../Input/dev1_female3_inst_mix.wav'
signal = AudioSignal(path_to_input_file=input_file_name)
# Set up and run Duet
duet = Duet(signal, a_min=-3, a_max=3, a_num=50, d_min=-3, d_max=3, d_num=50, threshold=0.2,
a_min_distance=5, d_min_distance=5, num_sources=3)
duet.run()
# plot histogram results
duet.plot(os.path.join('..', 'Output', 'duet_2d.png'))
duet.plot(os.path.join('..', 'Output', 'duet_3d.png'), three_d_plot=True)
# Create output file for each source found
output_name_stem = os.path.join('..', 'Output', 'duet_source')
i = 1
for s in duet.make_audio_signals():
output_file_name = f"{output_name_stem}{i}.wav"
s.write_audio_to_file(output_file_name)
i += 1
"""
self.result_masks = []
# Calculate the stft of both channels and create the frequency matrix (the matrix containing the
# frequencies of analysis of the Fourier transform)
self.stft_ch0, self.stft_ch1, self.frequency_matrix = self._compute_spectrogram(
self.sample_rate)
# Calculate the symmetric attenuation (alpha) and delay (delta) for each
# time-freq. point and return a matrix for each
self.symmetric_atn, self.delay = self._compute_atn_delay(
self.stft_ch0, self.stft_ch1, self.frequency_matrix)
# Make histogram of attenuation-delay values and get the center values for the bins in this histogram
self.normalized_attenuation_delay_histogram, self.attenuation_bins, self.delay_bins = (
self._make_histogram()
)
# Find the location of peaks in the attenuation-delay plane
self.peak_indices = utils.find_peak_indices(
self.normalized_attenuation_delay_histogram, self.num_sources,
threshold=self.peak_threshold,
min_dist=[self.attenuation_min_distance, self.delay_min_distance])
# compute delay_peak, attenuation peak, and attenuation/delay estimates
self.delay_peak, atn_delay_est, self.atn_peak = self._convert_peaks(
self.peak_indices)
# compute masks for separation
computed_masks = self._compute_masks()
return computed_masks
def _compute_spectrogram(self, sample_rate):
""" Creates the STFT matrices for channel 0 and 1, and computes the frequency matrix.
Parameter:
sample_rate (integer): sample rate
Returns:
stft_ch0 (np.matrix): a 2D Numpy matrix containing the stft of channel 0
stft_ch1 (np.matrix): a 2D Numpy matrix containing the stft of channel 1
wmat (np.matrix): a 2D Numpy matrix containing the frequencies of analysis of the Fourier transform
"""
# Compute the stft of the two channel mixtures
self.audio_signal.stft_params = self.stft_params
self.audio_signal.stft()
stft_ch0 = self.audio_signal.get_stft_channel(0)
stft_ch1 = self.audio_signal.get_stft_channel(1)
# Compute the freq. matrix for later use in phase calculations
n_time_bins = len(self.audio_signal.time_bins_vector)
wmat = np.array(np.tile(np.mat(
self.audio_signal.freq_vector).T, (1, n_time_bins))) * (
2 * np.pi / sample_rate)
wmat += constants.EPSILON
return stft_ch0, stft_ch1, wmat
@staticmethod
def _compute_atn_delay(stft_ch0, stft_ch1, frequency_matrix):
# Calculate the symmetric attenuation (alpha) and delay (delta) for each
# time-freq. point
inter_channel_ratio = (stft_ch1 + constants.EPSILON) / (stft_ch0 + constants.EPSILON)
attenuation = np.abs(inter_channel_ratio) # relative attenuation between the two channels
symmetric_attenuation = attenuation - 1 / attenuation # symmetric attenuation
relative_delay = -np.imag(np.log(inter_channel_ratio)) / (2 * np.pi * frequency_matrix) # relative delay
return symmetric_attenuation, relative_delay
def _make_histogram(self):
"""Receives the stft of the two channel mixtures and the frequency matrix to a create
a smooth and normalized histogram.
Parameters:
stft_ch0 (complex np.array): a 2D Numpy matrix containing the stft of channel 0
stft_ch1 (complex np.array): a 2D Numpy matrix containing the stft of channel 1
symmetric_atn (np.array): the symmetric attenuation between two channels
delay (np.array): the time delay between 2 channels
wmat(np.array): a 2D Numpy matrix containing the frequency matrix of the signal
Returns:
histogram (np.array): a smooth and normalized histogram
atn_bins (np.array): The range of attenuation values distributed into bins
delay_bins (np.array): The range of delay values distributed into bins
"""
# calculate the weighted histogram
time_frequency_weights = (np.abs(self.stft_ch0) * np.abs(self.stft_ch1)) ** self.p * \
(np.abs(self.frequency_matrix)) ** self.q
# only consider time-freq. points yielding estimates in bounds
attenuation_premask = np.logical_and(self.attenuation_min < self.symmetric_atn,
self.symmetric_atn < self.attenuation_max)
delay_premask = np.logical_and(self.delay_min < self.delay, self.delay < self.delay_max)
attenuation_delay_premask = np.logical_and(attenuation_premask, delay_premask)
nonzero_premask = np.nonzero(attenuation_delay_premask)
symmetric_attenuation_vector = self.symmetric_atn[nonzero_premask]
delay_vector = self.delay[nonzero_premask]
time_frequency_weights_vector = time_frequency_weights[nonzero_premask]
bins_array = np.array([self.num_attenuation_bins, self.num_delay_bins])
range_array = np.array([[self.attenuation_min, self.attenuation_max], [self.delay_min, self.delay_max]])
# compute the histogram
histogram, atn_bins, delay_bins = np.histogram2d(symmetric_attenuation_vector, delay_vector,
bins=bins_array, range=range_array,
weights=time_frequency_weights_vector)
# Save non-normalized as an option for plotting later
self.attenuation_delay_histogram = histogram
# Scale histogram from 0 to 1
histogram /= histogram.max()
# smooth the normalized histogram - local average 3-by-3 neighboring bins
histogram = self._smooth_matrix(histogram, np.array([3]))
return histogram, atn_bins, delay_bins
def _convert_peaks(self, peak_indices):
"""Receives the attenuation and delay bins and computes the delay/attenuation
peaks based on the peak finder indices.
Returns:
delay_peak(np.array): The delay peaks determined from the histogram
atn_delay_est (np.array): The estimated symmetric attenuation and delay values
atn_peak (np.array): Attenuation converted from symmetric attenuation
"""
atn_indices = [x[0] for x in peak_indices]
delay_indices = [x[1] for x in peak_indices]
symmetric_atn_peak = self.attenuation_bins[atn_indices]
delay_peak = self.delay_bins[delay_indices]
atn_delay_est = np.column_stack((symmetric_atn_peak, delay_peak))
# convert symmetric_atn to atn_peak using formula from Rickard
atn_peak = (symmetric_atn_peak + np.sqrt(symmetric_atn_peak ** 2 + 4)) / 2
return delay_peak, atn_delay_est, atn_peak
def _compute_masks(self):
"""Receives the attenuation and delay peaks and computes a mask to be applied to the signal for source
separation.
"""
# compute masks for separation
best_so_far = np.inf * np.ones_like(self.stft_ch0, dtype=float)
for i in range(0, self.num_sources):
mask_array = np.zeros_like(self.stft_ch0, dtype=bool)
phase = np.exp(-1j * self.frequency_matrix * self.delay_peak[i])
score = np.abs(self.atn_peak[i] * phase * self.stft_ch0 - self.stft_ch1) ** 2 / (1 + self.atn_peak[i] ** 2)
mask = (score < best_so_far)
mask_array[mask] = True
background_mask = self.mask_type(np.array(mask_array))
self.result_masks.append(background_mask)
self.result_masks[0].mask = np.logical_xor(self.result_masks[i].mask, self.result_masks[0].mask)
best_so_far[mask] = score[mask]
# Compute first mask based on what the other masks left remaining
self.result_masks[0].mask = np.logical_not(self.result_masks[0].mask)
return self.result_masks
@staticmethod
def _smooth_matrix(matrix, kernel):
"""Performs two-dimensional convolution in order to smooth the values of matrix elements.
(similar to low-pass filtering)
Parameters:
matrix (np.array): a 2D Numpy matrix to be smoothed
kernel (np.array): a 2D Numpy matrix containing kernel values
Note:
if Kernel is of size 1 by 1 (scalar), a Kernel by Kernel matrix of 1/Kernel**2 will be used as the matrix
averaging kernel
Output:
smoothed_matrix (np.array): a 2D Numpy matrix containing a smoothed version of Mat (same size as Mat)
"""
# check the dimensions of the Kernel matrix and set the values of the averaging
# matrix, kernel_matrix
kernel_matrix = np.ones((kernel[0], kernel[0])) / kernel[0] ** 2
krow, kcol = np.shape(kernel_matrix)
# adjust the matrix dimension for convolution
copy_row = int(np.floor(krow / 2)) # number of rows to copy on top and bottom
copy_col = int(np.floor(kcol / 2)) # number of columns to copy on either side
# TODO: This is very ugly. Make this readable.
# form the augmented matrix (rows and columns added to top, bottom, and sides)
matrix = np.mat(matrix) # make sure Mat is a Numpy matrix
augmented_matrix = np.vstack(
[
np.hstack(
[matrix[0, 0] * np.ones((copy_row, copy_col)),
np.ones((copy_row, 1)) * matrix[0, :],
matrix[0, -1] * np.ones((copy_row, copy_col))
]),
np.hstack(
[matrix[:, 0] * np.ones((1, copy_col)),
matrix,
matrix[:, -1] * np.ones((1, copy_col))]),
np.hstack(
[matrix[-1, 1] * np.ones((copy_row, copy_col)),
np.ones((copy_row, 1)) * matrix[-1, :],
matrix[-1, -1] * np.ones((copy_row, copy_col))
]
)
]
)
# perform two-dimensional convolution between the input matrix and the kernel
smooted_matrix = signal.convolve2d(augmented_matrix, kernel_matrix[::-1, ::-1], mode='valid')
return smooted_matrix
|
47190
|
from __future__ import unicode_literals
import os
import django
from django.test import TestCase
from mock import call, patch
from storage.brokers.host_broker import HostBroker
from storage.delete_files_job import delete_files
from storage.test import utils as storage_test_utils
class TestDeleteFiles(TestCase):
def setUp(self):
django.setup()
self.broker = HostBroker()
self.broker.load_configuration({'type': HostBroker().broker_type, 'host_path': '/host/path'})
@patch('storage.brokers.host_broker.os.path.exists')
@patch('storage.brokers.host_broker.os.remove')
def test_delete_file(self, mock_remove, mock_exists):
"""Tests removing a file"""
def new_exists(path):
return True
mock_exists.side_effect = new_exists
volume_path = os.path.join('the', 'volume', 'path')
file_path_1 = os.path.join('my_dir', 'my_file.txt')
file_path_2 = os.path.join('my_dir', 'my_file.json')
full_path_file_1 = os.path.join(volume_path, file_path_1)
full_path_file_2 = os.path.join(volume_path, file_path_2)
file_1 = storage_test_utils.create_file(file_path=file_path_1)
file_2 = storage_test_utils.create_file(file_path=file_path_2)
# Call function
test_1 = delete_files([file_1], volume_path, self.broker)
self.assertEqual(test_1, None)
test_2 = delete_files([file_2], volume_path, self.broker)
self.assertEqual(test_2, None)
# Check results
two_calls = [call(full_path_file_1), call(full_path_file_2)]
mock_remove.assert_has_calls(two_calls)
|
47211
|
import pytest
from demo_project.main import app
from fastapi.testclient import TestClient
openapi_schema = {
'openapi': '3.0.2',
'info': {
'title': 'My Project',
'description': '## Welcome to my API! \n This is my description, written in `markdown`',
'version': '1.0.0',
},
'paths': {
'/api/v1/hello': {
'get': {
'tags': ['hello'],
'summary': 'Say hello',
'description': 'Wonder who we say hello to?',
'operationId': 'helloWorld',
'responses': {
'200': {
'description': 'Successful Response',
'content': {
'application/json': {'schema': {'$ref': '#/components/schemas/HelloWorldResponse'}}
},
}
},
'security': [{'Azure AD - PKCE, Single-tenant': []}],
}
},
'/api/v1/hello-multi-auth': {
'get': {
'tags': ['hello'],
'summary': 'Say hello with an API key',
'description': 'Wonder how this auth is done?',
'operationId': 'helloWorldApiKey',
'responses': {
'200': {
'description': 'Successful Response',
'content': {'application/json': {'schema': {'$ref': '#/components/schemas/TokenType'}}},
}
},
'security': [{'Azure AD - PKCE, Multi-tenant': []}, {'APIKeyHeader': []}],
}
},
},
'components': {
'schemas': {
'HelloWorldResponse': {
'title': 'HelloWorldResponse',
'required': ['hello', 'user'],
'type': 'object',
'properties': {
'hello': {'title': 'Hello', 'type': 'string', 'description': 'What we\'re saying hello to'},
'user': {
'title': 'User',
'allOf': [{'$ref': '#/components/schemas/User'}],
'description': 'The user object',
},
},
},
'TokenType': {
'title': 'TokenType',
'required': ['api_key', 'azure_auth'],
'type': 'object',
'properties': {
'api_key': {'title': 'Api Key', 'type': 'boolean', 'description': 'API key was used'},
'azure_auth': {'title': 'Azure Auth', 'type': 'boolean', 'description': 'Azure auth was used'},
},
},
'User': {
'title': 'User',
'required': ['aud', 'tid', 'claims', 'access_token'],
'type': 'object',
'properties': {
'aud': {'title': 'Aud', 'type': 'string', 'description': 'Audience'},
'tid': {'title': 'Tid', 'type': 'string', 'description': 'Tenant ID'},
'roles': {
'title': 'Roles',
'type': 'array',
'items': {'type': 'string'},
'description': 'Roles (Groups) the user has for this app',
'default': [],
},
'claims': {'title': 'Claims', 'type': 'object', 'description': 'The entire decoded token'},
'scp': {'title': 'Scp', 'type': 'string', 'description': 'Scope'},
'name': {'title': 'Name', 'type': 'string', 'description': 'Name'},
'access_token': {
'title': 'Access Token',
'type': 'string',
'description': 'The access_token. Can be used for fetching the Graph API',
},
},
},
},
'securitySchemes': {
'Azure AD - PKCE, Single-tenant': {
'type': 'oauth2',
'description': '`Leave client_secret blank`',
'flows': {
'authorizationCode': {
'scopes': {
'api://oauth299-9999-9999-abcd-efghijkl1234567890/user_impersonation': '**No client secret needed, leave blank**'
},
'authorizationUrl': 'https://login.microsoftonline.com/intility_tenant_id/oauth2/v2.0/authorize',
'tokenUrl': 'https://login.microsoftonline.com/intility_tenant_id/oauth2/v2.0/token',
}
},
},
'Azure AD - PKCE, Multi-tenant': {
'description': '`Leave ' 'client_secret ' 'blank`',
'flows': {
'authorizationCode': {
'authorizationUrl': 'https://login.microsoftonline.com/common/oauth2/v2.0/authorize',
'scopes': {
'api://oauth299-9999-9999-abcd-efghijkl1234567890/user_impersonation': 'User '
'impersonation'
},
'tokenUrl': 'https://login.microsoftonline.com/common/oauth2/v2.0/token',
}
},
'type': 'oauth2',
},
'APIKeyHeader': {'type': 'apiKey', 'in': 'header', 'name': 'TEST-API-KEY'},
},
},
}
@pytest.fixture
def test_client():
"""
Test client that does not run startup event.
All these tests fails before we get to loading the OpenID Connect configuration.
"""
yield TestClient(app=app)
def test_openapi_schema(test_client):
response = test_client.get('api/v1/openapi.json')
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_no_token(test_client):
response = test_client.get('/api/v1/hello')
assert response.status_code == 401, response.text
assert response.json() == {'detail': 'Not authenticated'}
def test_incorrect_token(test_client):
response = test_client.get('/api/v1/hello', headers={'Authorization': 'Non-existent testtoken'})
assert response.status_code == 401, response.text
assert response.json() == {'detail': 'Not authenticated'}
def test_token(test_client):
response = test_client.get('/api/v1/hello', headers={'Authorization': 'Bearer '})
assert response.status_code == 401, response.text
assert response.json() == {'detail': 'Invalid token format'}
|
47214
|
import random
import sys
sys.path.append("../../")
from gfxlcd.driver.ssd1306.spi import SPI
from gfxlcd.driver.ssd1306.ssd1306 import SSD1306
def hole(x, y):
o.draw_pixel(x+1, y)
o.draw_pixel(x+2, y)
o.draw_pixel(x+3, y)
o.draw_pixel(x+1, y + 4)
o.draw_pixel(x+2, y + 4)
o.draw_pixel(x+3, y + 4)
o.draw_pixel(x, y + 1)
o.draw_pixel(x+4, y + 1)
o.draw_pixel(x, y + 2)
o.draw_pixel(x+4, y + 2)
o.draw_pixel(x, y + 3)
o.draw_pixel(x+4, y + 3)
drv = SPI()
o = SSD1306(128, 64, drv)
o.init()
o.auto_flush = False
for _ in range(0, 50):
hole(random.randint(2, 120), random.randint(2, 56))
hole(10, 10)
hole(15, 13)
hole(18, 23)
hole(40, 10)
o.flush(True)
# o.fill(0)
#
# o.fill(random.randint(0, 255))
#
# o.draw_pixels(2, 0, 128)
# o.draw_pixels(3, 0, 128)
# o.draw_pixels(7, 0, 128)
# o.draw_pixels(8, 0, 128)
# o.draw_pixels(1, 9, 7)
# o.draw_pixels(9, 9, 7)
# o.draw_pixels(2, 9, 8)
# o.draw_pixels(3, 9, 16)
# o.draw_pixels(4, 9, 33)
# o.draw_pixels(5, 9, 66)
# o.draw_pixels(6, 9, 33)
# o.draw_pixels(7, 9, 16)
# o.draw_pixels(8, 9, 8)
#
# o.draw_pixels(15, 9, 127)
# o.draw_pixels(16, 9, 65)
# o.draw_pixels(17, 9, 65)
# o.draw_pixels(18, 9, 62)
#
# o.draw_pixels(20, 9, 38)
# o.draw_pixels(21, 9, 73)
# o.draw_pixels(22, 9, 73)
# o.draw_pixels(23, 9, 50)
#
# o.draw_pixels(25, 9, 127)
# o.draw_pixels(26, 9, 9)
# o.draw_pixels(27, 9, 9)
# o.draw_pixels(28, 9, 6)
#
# o.draw_pixels(30, 9, 98)
# o.draw_pixels(31, 9, 81)
# o.draw_pixels(32, 9, 73)
# o.draw_pixels(33, 9, 70)
#
# o.draw_pixels(35, 9, 62)
# o.draw_pixels(36, 9, 65)
# o.draw_pixels(37, 9, 65)
# o.draw_pixels(38, 9, 62)
#
# o.draw_pixels(40, 9, 4)
# o.draw_pixels(41, 9, 2+64)
# o.draw_pixels(42, 9, 127)
# o.draw_pixels(43, 9, 64)
#
# o.draw_pixels(40, 9, 4)
# o.draw_pixels(41, 9, 2+64)
# o.draw_pixels(42, 9, 127)
# o.draw_pixels(43, 9, 64)
#
# o.draw_pixels(45, 9, 97)
# o.draw_pixels(46, 9, 25)
# o.draw_pixels(47, 9, 5)
# o.draw_pixels(48, 9, 3)
|
47232
|
from __future__ import annotations
import os
import signal
import subprocess
import sys
import time
from multiprocessing import cpu_count
from typing import List, Union
import click
from .__version__ import __version__
from .routing.commands import display_urls
from .utils import F, import_from_string, import_module
def execute(command: Union[List[str], str]) -> int:
if isinstance(command, str):
command = command.split(" ")
click.echo("Execute command: ", nl=False)
click.secho(" ".join(command), fg="green")
process = subprocess.Popen(command, shell=False)
def sigint_handler(signo, frame):
process.terminate()
process.wait()
signal.signal(signal.SIGTERM, sigint_handler)
while process.poll() is None:
time.sleep(1)
return process.returncode
@click.group(help=f"Index.py {__version__}")
def index_cli():
pass
try:
import hypercorn
except ImportError:
pass
else:
@click.command(help="use hypercorn to run Index.py application")
@click.option(
"--bind",
default="127.0.0.1:4190",
show_default=True,
help="A string of the form: HOST:PORT, unix:PATH, fd://FD.",
)
@click.option(
"--log-level",
type=click.Choice(["critical", "error", "warning", "info", "debug"]),
default="info",
show_default=True,
)
@click.option(
"--worker-class",
"-k",
default="asyncio",
type=click.Choice(["asyncio", "uvloop", "trio"]),
show_choices=True,
show_default=True,
)
@click.option(
"--configuration",
"-c",
type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True),
)
@click.argument("application")
def hypercorn_cli(
worker_class: str,
configuration: str,
application: str,
bind: str,
log_level: str,
):
sys.path.insert(0, os.getcwd())
asgi_app = import_from_string(application)
config = hypercorn.Config()
if configuration is not None:
if configuration.endswith(".py"):
config.from_pyfile(configuration)
elif configuration.endswith(".toml"):
config.from_toml(configuration)
else:
click.secho(
"Please use configuration file path endswith `.py` or `.toml`.",
fg="red",
)
raise SystemExit(1)
config.bind = [bind]
config.loglevel = log_level.upper()
config.worker_class = worker_class
create_signal_handle = lambda shutdown_event: lambda sig, frame: (
setattr(asgi_app, "should_exit", True), # type: ignore
shutdown_event.set(),
)
if worker_class == "uvloop":
import uvloop
uvloop.install()
if worker_class in ("asyncio", "uvloop"):
import asyncio
from hypercorn.asyncio import serve
loop = asyncio.get_event_loop()
shutdown_event = asyncio.Event(loop=loop)
for sig in {signal.SIGINT, signal.SIGTERM}:
signal.signal(sig, create_signal_handle(shutdown_event))
loop.run_until_complete(
serve(asgi_app, config, shutdown_trigger=shutdown_event.wait) # type: ignore
)
else:
import trio
from hypercorn.trio import serve # type: ignore
shutdown_event = trio.Event()
for sig in {signal.SIGINT, signal.SIGTERM}:
signal.signal(sig, create_signal_handle(shutdown_event))
trio.run(serve(asgi_app, config, shutdown_trigger=shutdown_event.wait)) # type: ignore
index_cli.add_command(hypercorn_cli, name="hypercorn")
try:
import uvicorn
except ImportError:
pass
else:
from .applications import Index
# See https://stackoverflow.com/questions/58133694/graceful-shutdown-of-uvicorn-starlette-app-with-websockets
origin_handle_exit = uvicorn.Server.handle_exit
def handle_exit(self: uvicorn.Server, sig, frame):
application = self.config.loaded_app
while not isinstance(application, Index):
application = application.app
application.should_exit = True
return origin_handle_exit(self, sig, frame)
uvicorn.Server.handle_exit = handle_exit
@click.command(help="use uvicorn to run Index.py application")
@click.option(
"--bind",
default="127.0.0.1:4190",
show_default=True,
help="A string of the form: HOST:PORT, unix:PATH, fd://FD.",
)
@click.option("--autoreload/--no-autoreload", default=True, show_default=True)
@click.option(
"--log-level",
type=click.Choice(["critical", "error", "warning", "info", "debug"]),
default="info",
show_default=True,
)
@click.argument("application")
def uvicorn_cli(application: str, bind: str, autoreload: bool, log_level: str):
sys.path.insert(0, os.getcwd())
if bind.startswith("unix:"):
bind_config = {"uds": bind[5:] | F(os.path.normpath) | F(os.path.abspath)}
if autoreload:
click.secho(
"Reload option doesnt work with unix sockets "
"in uvicorn: https://github.com/encode/uvicorn/issues/722",
fg="yellow",
)
elif bind.startswith("fd://"):
bind_config = {"fd": int(bind[5:])}
if autoreload:
click.secho(
"Reload option doesnt work with fd "
"in uvicorn: https://github.com/encode/uvicorn/issues/368",
fg="yellow",
)
else:
if ":" in bind:
host, port = bind.split(":")
bind_config = {"host": host, "port": int(port)}
else:
bind_config = {"host": bind, "port": 4190}
uvicorn.run(
application,
**bind_config,
log_level=log_level,
interface="asgi3",
lifespan="on",
reload=autoreload,
)
index_cli.add_command(uvicorn_cli, "uvicorn")
try:
import gunicorn
assert gunicorn.version_info > (20, 1)
del gunicorn
except ImportError:
pass
else:
MASTER_PID_FILE = ".gunicorn.pid"
def read_gunicorn_master_pid(pid_file: str = MASTER_PID_FILE) -> int:
try:
with open(os.path.join(os.getcwd(), pid_file), "r") as file:
return int(file.read())
except FileNotFoundError:
sys.exit(
(
f'File "{pid_file}" not found, '
+ "please make sure you have started gunicorn using the "
+ "`index-cli gunicorn start ...`."
)
)
@click.group(help="use gunicorn to run Index.py application")
def gunicorn_cli():
pass
@gunicorn_cli.command(help="Run gunicorn")
@click.option(
"--bind",
default="127.0.0.1:4190",
show_default=True,
help="A string of the form: HOST:PORT, unix:PATH, fd://FD.",
)
@click.option("--autoreload/--no-autoreload", default=False, show_default=True)
@click.option(
"--log-level",
type=click.Choice(["critical", "error", "warning", "info", "debug"]),
default="info",
show_default=True,
)
@click.option("--workers", "-w", default=cpu_count(), show_default=True)
@click.option(
"--worker-class",
"-k",
default="uvicorn.workers.UvicornWorker",
show_default=True,
)
@click.option("--daemon", "-d", default=False, is_flag=True, show_default=True)
@click.option(
"--configuration",
"-c",
type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True),
)
@click.argument("application")
def start(
workers: int,
worker_class: str,
daemon: bool,
configuration: str,
application: str,
bind: str,
autoreload: bool,
log_level: str,
):
command = (
f"{sys.executable} -m gunicorn -k {worker_class}"
+ f" --bind {bind}"
+ f" --chdir {os.getcwd()}"
+ f" --workers {workers}"
+ f" --pid {MASTER_PID_FILE}"
+ f" --log-level {log_level}"
)
args = command.split(" ")
if daemon:
args.extend("-D --log-file gunicorn.log".split(" "))
if autoreload:
args.append("--reload")
if configuration:
args.append("-c")
args.append(configuration.strip())
args.append(application)
execute(args)
# Gunicorn signal handler
# https://docs.gunicorn.org/en/stable/signals.html
@gunicorn_cli.command(help="Increment the number of processes by one")
def incr():
os.kill(read_gunicorn_master_pid(), signal.SIGTTIN)
@gunicorn_cli.command(help="Decrement the number of processes by one")
def decr():
os.kill(read_gunicorn_master_pid(), signal.SIGTTOU)
@gunicorn_cli.command(help="Stop gunicorn processes")
@click.option("--force", "-f", default=False, is_flag=True)
def stop(force):
os.kill(read_gunicorn_master_pid(), signal.SIGINT if force else signal.SIGTERM)
@gunicorn_cli.command(help="Reload configuration and recreate worker processes")
def reload():
os.kill(read_gunicorn_master_pid(), signal.SIGHUP)
@gunicorn_cli.command(help="Restart gunicorn master processes and worker processes")
@click.option("--force-stop", "-f", default=False, is_flag=True)
def restart(force_stop):
oldpid = read_gunicorn_master_pid()
os.kill(oldpid, signal.SIGUSR2)
# Waiting for starting new master process and worker processes
while not os.path.exists(os.path.join(os.getcwd(), MASTER_PID_FILE + ".2")):
time.sleep(0.5)
# Stop old master process and worker processes
os.kill(oldpid, signal.SIGINT if force_stop else signal.SIGTERM)
index_cli.add_command(gunicorn_cli, "gunicorn")
index_cli.add_command(display_urls, "display-urls")
import_module("commands")
|
47243
|
from jinja2 import Environment, FileSystemLoader, Template, TemplateNotFound
import collections
import os
import yaml
from os.path import dirname, basename
from .env import environ
from ..log import logger
def reader(fn):
logger.debug('loading', f=fn)
try:
tmplenv = Environment(loader=FileSystemLoader(dirname(fn)))
tmpl = tmplenv.get_template(str(basename(fn)))
part = tmpl.render(**environ)
data = yaml.load(part)
return data
except TemplateNotFound:
logger.warn('Template not found', file=fn)
except Exception:
logger.exception('config')
|
47271
|
from datetime import datetime
import sys
sys.path.insert(1, r'C:\Users\ASUS\Desktop\sources\Telegram\werewolf\Darkhelper\2\V2\Databases')
from Databases.Groups import GroupsPlayersBase , GroupsBase , GroupsControlBase
from Databases.Groups.Bet import BetBase
from Databases.Users import AdminsBase
from Databases.Users.AfksBase import Set_All_Group_AFK_Zero
from Databases.Stats import AdminStatsBase , GroupStatsBase
from Classes.Statics import Statics
from Databases.Users.UsersBase import Show_Group_ALL_User_Points , Show_All_user_Points
from Databases.Users.ShekarsBase import Delete_Shekar
class Group:
def __init__(self,Chat_id : int):
Details=GroupsBase.Show_Group_Features(int(Chat_id))
self.All_Atrebeutes=Details
self.chat_id=int(Chat_id)
self.Main = int(Details['group_id'])
self.Support = int(Details['support_id'])
self.Subscription_Date=str(Details['tamdid_date'])
self.Deadline=int(Details['davazdah'])
self.Auto_Tag=int(Details['auto_tag'])
self.Auto_DeleteTag=int(Details['auto_del'])
self.Auto_Tag_Support=int(Details['auto_tag_sup'])
self.Auto_DeleteTag_Sup=int(Details['auto_del_sup'])
self.Alarm=int(Details['alarm'])
self.Bet=int(Details['bet'])
self.Least_State=int(Details['state'])
self.State_Lock=int(Details['state_lock'])
self.Warn=int(Details['warn'])
#--------------------------------------|
# 0 - onyx |
# 1 - werewolf |
# 2 - black |
self.Bot_Kind=int(Details['bot_kind'])#|
#--------------------------------------|
self.Mute_Fun=int(Details['fun_mute'])
self.Auto_nextGame=int(Details['auto_next_game'])
self.NextGame_Response=int(Details['next_game_response'])
self.emoji1=str(Details['emoji1'])
self.emoji2=str(Details['emoji2'])
self.emoji3=str(Details['emoji3'])
self.Sooti=int(Details['sooti'])
self.Admin_Alarm=int(Details['admin_Alarm'])
self.Ghaleb=str(Details['ghaleb'])
self.JoinTime_Alarm=int(Details['jointime_sup'])
self.Dead_NextGame=int(Details['dead_next'])
self.Shekar_Pin=int(Details['pin_shekar'])
self.Nazer_pin=int(Details['pin_nazer'])
self.List_Pin=int(Details['pin_list'])
self.Role_Saver=int(Details['role_saver'])
self.Questions=int(Details['questions'])
self.Bors=int(Details['bors'])
self.Message_State=int(Details['message_state'])
self.Next_Message_Id=int(Details['auto_next_message_id'])
self.is_Question_Sended=int(Details['question_sended'])
self.Auto_Start=int(Details['auto_start'])
self.Afk_Warn=int(Details['afk_warn'])
self.Is_Join_Time=int(Details['join_time'])
self.Is_Tagging=int(Details['is_tagging'])
self.Is_Time_For_Question=bool(Details['Its_Question_Time'])
self.Players_Lock_Only=int(Details['players_state_lock'])
#-----------------------------------------------------------
Controls=GroupsControlBase.Show_Group_Control_Features(self.Main)
self.All_Controls=Controls
self.Welcome_Turn=int(Controls['welcometurn'])
self.Anti_Spam=int(Controls['anti_spam'])
self.Anti_Robot=int(Controls['anti_robot'])
self.Anti_NFSW=int(Controls['fosh_filter'])
self.Anti_Tabchi=int(Controls['anti_tabchi'])
self.Channel =str(Controls['channel'])
self.Channel_Lock=int(Controls['channellock'])
self.Group_Lock=int(Controls['lock'])
self.Voice_Lock=int(Controls['voice_lock'])
self.Sticker_Lock=int(Controls['sticker_lock'])
self.Photo_Lock=int(Controls['photo_lock'])
self.Link_Lock=int(Controls['link_lock'])
self.Forward_Lock=int(Controls['forward_lock'])
self.Video_Lock=int(Controls['video_lock'])
self.Service_Lock=int(Controls['service_lock'])
self.Spam_Count=int(Controls['spam_count'])
self.Welcome=str(Controls['welcome'])
self.Channel_Text=str(Controls['channel_text'])
#-----------------------------------------porn
self.Porn=str(Controls['porn'])
#-----------------------------
Controls=Controls['Filters']
self.Porn_All_Filters=Controls
self.Porn_Dick_Filter=str(Controls['dick'])
self.Porn_Pussy_Filter=str(Controls['pussy'])
self.Porn_Coverd_Pussy_Filter=str(Controls['coveredpossy'])
self.Porn_FBoobs_Filter=str(Controls['fboobs'])
self.Porn_MBoobs_Filter=str(Controls['mboobs'])
self.Porn_CoveredBoobs_Filter=str(Controls['coveredboobs'])
self.Porn_Stomach_Filter=str(Controls['stomack'])
self.Porn_ZirBaghal_Filter=str(Controls['baghal'])
self.Porn_Ass_Filter=str(Controls['ass'])
self.Porn_Feet_Filter=str(Controls['feet'])
self.Porn_Covered_ASS_Filter=str(Controls['coveredass'])
#-----------------------------------------------------------------
@property
def All_Players(self):
return Show_All_user_Points()
@property
def All_Group_Players(self):
return Show_Group_ALL_User_Points(self.Main)
async def Get_Players_usernames(self,bot,lists):
for i in lists:
try:
user=await bot.get_users(i)
if user.username :
yield user.mention
except:pass
#-----------------------------------------------------------------
def __int__(self) -> int:
return int(self.Support)
def __str__(self) -> str:
return str(self.Main)
#-----------------------------------------------------------------
@property
def Show_Istagging(self):
return GroupsBase.Show_one_feature('is_tagging',self.chat_id)
@property
def Show_JoinTime(self):
return GroupsBase.Show_one_feature('join_time',self.chat_id)
@property
def Join_time_Started(self):
GroupsBase.Change_Group_Feature(self.Main , 'join_time' , 1)
return 1
@property
def Join_time_Finished(self):
GroupsBase.Change_Group_Feature(self.Main , 'join_time' , 0)
return 0
#-----------------------------------------------------------------
@property
def Show_All_Admins_Points(self):
return AdminStatsBase.Show_Gap_All_Admins_Points(self.Main)
@property
def Show_Today_Admins_Points(self):
return AdminStatsBase.Show_Gap_All_Admins_Points_Today(self.Main)
@property
def Admins(self):
admins=AdminsBase.Show_All_Admins(self.Main)
return [ admins , len(admins) ]
@property
def Show_Owner(self):
return int(AdminsBase.Show_Owner(self.Main))
#-----------------------------------------------------------------
@property
def Show_Emojis(self):
return [ self.emoji1 , self.emoji2 , self.emoji3 ]
@property
def Show_Welcome(self):
wel=self.Welcome
if wel == 'none':
return None
else:return wel
@property
def Show_Ghaleb(self):
ghlb=self.Ghaleb
if ghlb == 'none':
return None
else:return ghlb
@property
def Show_Channel(self):
chnl=GroupsControlBase.Show_Channel(self.Main)
if chnl == 'none':
return None
else:return chnl
@property
def Show_Next_Game_Text(self):
if self.Bot_Kind ==0:return ' /nextgame@OnyxWereBetaBot '
elif self.Bot_Kind ==1:return ' /nextgame@werewolfbot '
elif self.Bot_Kind ==2:return ' /nextgame@Blackwwrobot \n /nextgame@blackwerewolfbot '
#-----------------------------------------------------------------
def Turn_Welcome_Turn(self):
if self.Welcome_Turn:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'welcometurn' , x)
return x
def Turn_Covered_Ass_Filter_Lock(self):
if self.Porn_Covered_ASS_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'coveredass' , x)
return x
def Turn_Dick_Filter_Lock(self):
if self.Porn_Dick_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'dick' , x)
return x
def Turn_pussy_Filter_Lock(self):
if self.Porn_Pussy_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'pussy' , x)
return x
def Turn_CoveredPussy_Filter_Lock(self):
if self.Porn_CoveredBoobs_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'coveredpossy' , x)
return x
def Turn_FBoobs_Filter_Lock(self):
if self.Porn_FBoobs_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'fboobs' , x)
return x
def Turn_MBoobs_Filter_Lock(self):
if self.Porn_MBoobs_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'mboobs' , x)
return x
def Turn_Covers_Boobs_Filter_Lock(self):
if self.Porn_CoveredBoobs_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'coveredboobs' , x)
return x
def Turn_Stomach_Filter_Lock(self):
if self.Porn_Stomach_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'stomack' , x)
return x
def Turn_ZirBaghal_Filter_Lock(self):
if self.Porn_ZirBaghal_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'baghal' , x)
return x
def Turn_Ass_Filter_Lock(self):
if self.Porn_Ass_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'ass' , x)
return x
def Turn_Feet_Filter_Lock(self):
if self.Porn_Feet_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'feet' , x)
return x
#-----------------------------------------------------------------
def Turn_Video_Lock(self):
if self.Video_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'video_lock' , x)
return x
def Turn_Service_Lock(self):
if self.Service_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'service_lock' , x)
return x
def Turn_Voice_Lock(self):
if self.Voice_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'voice_lock' , x)
return x
def Turn_Sticker_Lock(self):
if self.Sticker_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'sticker_lock' , x)
return x
def Turn_Photo_Lock(self):
if self.Photo_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'photo_lock' , x)
return x
def Turn_Link_Lock(self):
if self.Link_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'link_lock' , x)
return x
def Turn_Forward_Lock(self):
if self.Forward_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'forward_lock' , x)
return x
def Set_Anti_Spam(self,x):
if self.Anti_Robot:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'anti_spam' , x)
return x
def Turn_Anti_Robot(self):
if self.Anti_Robot:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'anti_robot' , x)
return x
def Turn_Anti_Porn(self):
if self.Porn:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'porn' , x)
return x
def Turn_Anti_NFSW(self):
if self.Anti_NFSW:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'fosh_filter' , x)
return x
def Turn_Anti_Tabchi(self):
if self.Anti_Tabchi:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'anti_tabchi' , x)
return x
def Set_Channel(self , x):
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'channel' , x)
return x
def Set_Channel_text(self , x):
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'channel_text' , x)
return x
def Set_Welcome(self , x):
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'welcome' , x)
return x
def Set_Spam_Count(self , x):
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'spam_count' , x)
return x
def Turn_Channel_Lock(self):
if self.Channel_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'channellock' , x)
return x
def Turn_Lock(self):
if self.Group_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'lock' , x)
return x
#--------------------------------------------------------------------------
def Change_Message_State(self):
if self.Message_State:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'message_state' , x)
return x
def Change_Bors(self):
if self.Bors:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'bors' , x)
return x
def Change_Questions(self):
if self.Questions:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'questions' , x)
return x
def Change_Role_Saver(self):
if self.Role_Saver:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'role_saver' , x)
return x
def Change_Nazer_pin(self):
if self.Nazer_pin:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'pin_nazer' , x)
return x
def Change_Shekar_Pin(self):
if self.Shekar_Pin:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'pin_shekar' , x)
return x
def Change_Dead_NextGame(self):
if self.Dead_NextGame:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'dead_next' , x)
return x
def Change_JoinTime_Alarm(self):
if self.JoinTime_Alarm:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'jointime_sup' , x)
return x
def Set_Ghaleb(self , x):
GroupsBase.Change_Group_Feature(self.Main , 'ghaleb' , x)
return x
def Set_Next_Message_Id(self , x):
GroupsBase.Change_Group_Feature(self.Main , 'auto_next_message_id' , x)
return x
def Change_Afk_Warn(self):
if self.Afk_Warn:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'afk_warn' , x)
return x
def Change_Admin_Alarm(self):
if self.Admin_Alarm:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'admin_Alarm' , x)
return x
def Change_Sooti(self):
if self.Sooti:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'sooti' , x)
return x
def Set_emoji1(self , x):
GroupsBase.Change_Group_Feature(self.Main , 'emoji1' , x)
return x
def Set_emoji2(self , x):
GroupsBase.Change_Group_Feature(self.Main , 'emoji2' , x)
return x
def Set_emoji3(self , x):
GroupsBase.Change_Group_Feature(self.Main , 'emoji3' , x)
return x
def Change_NextGame_Response(self):
if self.NextGame_Response:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'next_game_response' , x)
return x
def DeadLine_Ends(self):
GroupsBase.Change_Group_Feature(self.Main , 'davazdah' , 0)
return True
def Change_Auto_NextGame(self):
if self.Auto_nextGame:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'auto_next_game' , x)
return x
def Change_Mute_Fun(self):
if self.Mute_Fun:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'fun_mute' , x)
return x
def Change_Bot_Kind(self , x):
GroupsBase.Change_Group_Feature(self.Main , 'bot_kind' , x)
return x
def Set_Warn(self , x):
GroupsBase.Change_Group_Feature(self.Main , 'warn' , x)
return x
def Change_State_Lock(self,x):
GroupsBase.Change_Group_Feature(self.Main , 'state_lock' , x)
return x
def Set_State(self , x):
GroupsBase.Change_Group_Feature(self.Main , 'state' , x)
return x
def Change_Bet(self):
if self.Bet:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'bet' , x)
return x
def Change_Auto_Tag(self):
if self.Auto_Tag:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'auto_tag' , x)
return x
def Change_Auto_DeleteTag(self):
if self.Auto_DeleteTag:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'auto_del' , x)
return x
def Change_Auto_Tag_Support(self):
if self.Auto_Tag_Support:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'auto_tag_sup' , x)
return x
def Change_Auto_DeleteTag_Sup(self):
if self.Auto_DeleteTag_Sup:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'auto_del_sup' , x)
return x
def Change_Alarm(self):
if self.Alarm:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'alarm' , x)
return x
def Change_Auto_Start(self):
if self.Alarm:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'auto_start' , x)
return x
def Tag_Started(self):
GroupsBase.Change_Group_Feature(self.Main , 'is_tagging' , 1)
return
def Tag_Stopped(self):
GroupsBase.Change_Group_Feature(self.Main , 'is_tagging' , 0)
return
#------------------------------------------------------------------------|
def Manual_Control_Change(self,row,amnt): #|
if amnt: #|
x=0 #|
else: #|
x=1 #|
GroupsControlBase.Change_Group_Control_Feature(self.Main , row , x) #|
return x #|
#|
def Manual_Change(self,row,amnt): #|
if amnt: #|
x=0 #|
else: #|
x=1 #|
GroupsBase.Change_Group_Feature(self.Main , row , x) #|
return x #|
#------------------------------------------------------------------------|
def Reset_AFKS(self):
Set_All_Group_AFK_Zero( self.Main )
return True
def END_Bet(self , team : int ):
x=BetBase.win( team , self.Main )
return x
def Game_Started(self,hash,Join_Time,players):
'time,players,main,hour,afk,hash,date'
GroupStatsBase.Add_Game(Join_Time,players,self.Main,int((datetime.now()).hour),hash)
return True
def Add_Game_AFk(self , hash):
GroupStatsBase.Add_AFK(self.Main , hash)
return True
@property
def Last_Match(self):
return GroupStatsBase.Show_Group_State_last_Game(self.Main)
@property
def Show_Games(self):
return GroupStatsBase.Show_Group_State_All_Time(self.Main)
@property
def Show_Games_Today(self):
return GroupStatsBase.Show_Group_State_Today(self.Main)
def Is_Expired(self):
another_day = datetime.datetime.now().strptime(self.Subscription_Date,"%Y-%m-%d")
Day = datetime.datetime.now()
if Day < another_day:
return False
else : return True
#-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0|
def Tamdid(self,Date=30): #0|
GroupsBase.Add_Group(self.Main , self.Support , Date) #0|
return #0|
#-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0|
def Question_Sended(self):
GroupsBase.Change_Group_Feature(self.Main , 'question_sended' , 1)
return 1
def Question_Answered(self):
GroupsBase.Change_Group_Feature(self.Main , 'question_sended' , 0)
return 0
#----------------------------------------------------------------------------
def Its_Question_time(self):
GroupsBase.Change_Group_Feature(self.Main , 'Its_Question_Time' , 1)
return 1
def Question_Time_Passes(self):
GroupsBase.Change_Group_Feature(self.Main , 'Its_Question_Time' , 0)
return 0
#-------------------------------------------------------------------------
@property
def Show_Players(self):
return GroupsPlayersBase.Show_Players(self.Main)
def Delete_Players(self):
GroupsPlayersBase.Delete_All_Players(self.Main )
return True
async def Add_Players(self,id_list,bot):
for i in id_list:
name=(await bot.get_users(i)).first_name
GroupsPlayersBase.insert_players(i,self.Main,name)
return True
@property
def Show_Deads_Name(self):
return GroupsPlayersBase.Show_Dead_Players_Name(self.chat_id)
@property
def Zerib(self):
plyrs=self.Show_Players
deads=0
alives=0
all=len(plyrs)
for i in plyrs:
if int(i[1])==1:alives+=1
else:deads+=1
if alives>=40:
zarib_bet=float((all+alives)/(deads+30))
elif alives>35:
zarib_bet=float((all+alives)/(deads+26))
elif alives>=30:
zarib_bet=float((all+alives)/(deads+25))
elif alives>=25:
zarib_bet=float((all+alives)/(deads+23))
elif alives>=15:
zarib_bet=float((all+alives)/(deads+21))
elif alives>=10:
zarib_bet=float((all+alives)/(deads+16))
elif alives>=5:
zarib_bet=float((all+alives)/(deads+13))
elif alives<5:
zarib_bet=0.01
return zarib_bet
@property
def Team_Zarib(self):
Zr=float(self.Zerib)
return [Zr * 0.70 ,Zr * 0.80 ,Zr * 0.90 ,Zr ,Zr * 1 ,Zr * 1.05 ,Zr * 1.5 ]
@property
def Group_Teams(self):
if self.Bot_Kind==0:
Role='🧛🏻♀️ ومپایر 🧛🏻♀️'
elif self.Bot_Kind==1:
Role='💖 لاور 💖'
else:
Role='💣 بمبر 💣'
return {0:'👩🏻🦰👨🏻🦱 روستا 👩🏻🦰👨🏻🦱'
,1:'👥 فرقه 👥'
,2:'🐺 گرگ 🐺'
,3:'🔪 قاتل 🔪'
,4:Role
,5:'🔥 آتش 🔥'
,6:'👺 منافق 👺'}
@property
def Start_Command(self):
if self.Bot_Kind ==0: return '/startmighty@OnyxWereBetaBot'
elif self.Bot_Kind ==1: return '/startchaos@werewolfbot'
elif self.Bot_Kind ==2: return '/startmighty@Blackwwrobot'
def delete(self):
GroupsBase.Delete_Group(self.Main)
return True
def Delete_Shekar(self):
Delete_Shekar(self.Main)
return True
|
47295
|
import os
os.system("sudo apt-get update")
#os.system('sudo apt-get install openjdk-8-jre -y')
os.system('wget --header "Cookie: oraclelicense=accept-securebackup-cookie" http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz')
os.system('tar -zxf jdk-8u131-linux-x64.tar.gz')
os.system('export JAVA_HOME=/home/jupyter/Predict-Churn/jdk1.8.0_131/')
os.system('export PATH="$JAVA_HOME/bin:$PATH"')
os.system('pip install http://h2o-release.s3.amazonaws.com/h2o/rel-ueno/5/Python/h2o-3.10.4.5-py2.py3-none-any.whl')
|
47320
|
import torch
import torch.nn.functional as F
from cogdl.utils import spmm
from . import BaseLayer
class GINELayer(BaseLayer):
r"""The modified GINConv operator from the `"Graph convolutions that can finally model local structure" paper
<https://arxiv.org/pdf/2011.15069.pdf>`__.
Parameters
----------
apply_func : callable layer function)
layer or function applied to update node feature
eps : float32, optional
Initial `\epsilon` value.
train_eps : bool, optional
If True, `\epsilon` will be a learnable parameter.
"""
def __init__(self, apply_func=None, eps=0, train_eps=True):
super(GINELayer, self).__init__()
if train_eps:
self.eps = torch.nn.Parameter(torch.FloatTensor([eps]))
else:
self.register_buffer("eps", torch.FloatTensor([eps]))
self.apply_func = apply_func
def forward(self, graph, x):
# m = self.message(x[graph.edge_index[0]], graph.edge_attr)
# out = self.aggregate(graph, m)
out = spmm(graph, x)
out += (1 + self.eps) * x
if self.apply_func is not None:
out = self.apply_func(out)
return out
def message(self, x, attr):
return F.relu(x + attr)
|
47338
|
class Class:
def __init__(self, name: str):
self.name = name
class Instance:
def __init__(self, cls: Class):
self.cls = cls
self._fields = {}
def get_attr(self, name: str):
if name not in self._fields:
raise AttributeError(f"'{self.cls.name}' has no attribute {name}")
return self._fields[name]
def set_attr(self, name: str, value):
self._fields[name] = value
|
47339
|
import asyncio
from .utils import NamespacedClient
from .utils import _make_path
default = object()
def _decode_text(s):
return s
class CatClient(NamespacedClient):
@asyncio.coroutine
def aliases(self, *, name=default, h=default, help=default,
local=default, master_timeout=default, v=default):
"""
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cat-alias.html>`_
:arg name: A comma-separated list of alias names to return
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to
master node
:arg v: Verbose mode. Display column headers, default False
"""
params = {}
if name is not default:
params['name'] = name
if h is not default:
params['h'] = h
if help is not default:
params['help'] = bool(help)
if local is not default:
params['local'] = bool(local)
if master_timeout is not default:
params['master_timeout'] = master_timeout
if v is not default:
params['v'] = bool(v)
_, data = yield from self.transport.perform_request(
'GET', _make_path('_cat', 'aliases', name),
params=params, decoder=_decode_text
)
return data
@asyncio.coroutine
def allocation(self, node_id=None, *,
h=default, help=default, local=default,
master_timeout=default, v=default):
"""
Allocation provides a snapshot of how shards have located around the
cluster and the state of disk usage.
"""
params = {}
if h is not default:
params['h'] = h
if help is not default:
params['help'] = bool(help)
if local is not default:
params['local'] = bool(local)
if master_timeout is not default:
params['master_timeout'] = master_timeout
if v is not default:
params['v'] = bool(v)
_, data = yield from self.transport.perform_request(
'GET',
_make_path('_cat', 'allocation', node_id),
params=params, decoder=_decode_text)
return data
@asyncio.coroutine
def count(self, index=None, *, h=default, help=default, local=default,
master_timeout=default, v=default):
"""
Count provides quick access to the document count of the entire
cluster, or individual indices.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-count.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to
master node
:arg v: Verbose mode. Display column headers, default False
"""
params = {}
if h is not default:
params['h'] = h
if help is not default:
params['help'] = bool(help)
if local is not default:
params['local'] = bool(local)
if master_timeout is not default:
params['master_timeout'] = master_timeout
if v is not default:
params['v'] = bool(v)
_, data = yield from self.transport.perform_request(
'GET', _make_path('_cat', 'count', index),
params=params, decoder=_decode_text
)
return data
@asyncio.coroutine
def health(self, *, h=default, help=default, local=default,
master_timeout=default, ts=default, v=default):
"""
health is a terse, one-line representation of the same information from
:meth:`~elasticsearch.client.cluster.ClusterClient.health` API
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-health.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to
master node
:arg ts: Set to false to disable timestamping, default True
:arg v: Verbose mode. Display column headers, default False
"""
params = {}
if h is not default:
params['h'] = h
if help is not default:
params['help'] = bool(help)
if local is not default:
params['local'] = bool(local)
if master_timeout is not default:
params['master_timeout'] = master_timeout
if ts is not default:
params['ts'] = bool(ts)
if v is not default:
params['v'] = bool(v)
_, data = yield from self.transport.perform_request(
'GET', _make_path('_cat', 'health'),
params=params, decoder=_decode_text
)
return data
@asyncio.coroutine
def help(self, *, help=default):
"""A simple help for the cat api."""
params = {}
if help is not default:
params['help'] = bool(help)
_, data = yield from self.transport.perform_request(
'GET', '/_cat', params=params, decoder=_decode_text)
return data
@asyncio.coroutine
def indices(self, index=None, *, bytes=default, h=default, help=default,
local=default, master_timeout=default, pri=default, v=default):
"""
The indices command provides a cross-section of each index.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-indices.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg bytes: The unit in which to display byte values
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to
master node
:arg pri: Set to true to return stats only for primary shards, default
False
:arg v: Verbose mode. Display column headers, default False
"""
params = {}
if bytes is not default:
params['bytes'] = bytes
if h is not default:
params['h'] = h
if help is not default:
params['help'] = bool(help)
if local is not default:
params['local'] = bool(local)
if master_timeout is not default:
params['master_timeout'] = master_timeout
if pri is not default:
params['pri'] = bool(pri)
if v is not default:
params['v'] = bool(v)
_, data = yield from self.transport.perform_request(
'GET', _make_path('_cat', 'indices', index),
params=params, decoder=_decode_text
)
return data
@asyncio.coroutine
def master(self, *, h=default, help=default, local=default,
master_timeout=default, v=default):
"""
Displays the master's node ID, bound IP address, and node name.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-master.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to
master node
:arg v: Verbose mode. Display column headers, default False
"""
params = {}
if h is not default:
params['h'] = h
if help is not default:
params['help'] = bool(help)
if local is not default:
params['local'] = bool(local)
if master_timeout is not default:
params['master_timeout'] = master_timeout
if v is not default:
params['v'] = bool(v)
_, data = yield from self.transport.perform_request(
'GET', _make_path('_cat', 'master'),
params=params, decoder=_decode_text
)
return data
@asyncio.coroutine
def nodes(self, *, h=default, help=default, local=default,
master_timeout=default, v=default):
"""
The nodes command shows the cluster topology.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-nodes.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for master connection
node
:arg v: Verbose mode. Display column headers, default False
"""
params = {}
if h is not default:
params['h'] = h
if help is not default:
params['help'] = bool(help)
if local is not default:
params['local'] = bool(local)
if master_timeout is not default:
params['master_timeout'] = master_timeout
if v is not default:
params['v'] = bool(v)
_, data = yield from self.transport.perform_request(
'GET', '/_cat/nodes',
params=params, decoder=_decode_text
)
return data
@asyncio.coroutine
def recovery(self, index=None, *, bytes=default, h=default, help=default,
local=default, master_timeout=default, v=default):
"""
recovery is a view of shard replication.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-recovery.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg bytes: The unit in which to display byte values
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for master connection
node
:arg v: Verbose mode. Display column headers, default False
"""
params = {}
if bytes is not default:
params['bytes'] = bytes
if h is not default:
params['h'] = h
if help is not default:
params['help'] = bool(help)
if local is not default:
params['local'] = bool(local)
if master_timeout is not default:
params['master_timeout'] = master_timeout
if v is not default:
params['v'] = bool(v)
_, data = yield from self.transport.perform_request(
'GET', _make_path('_cat', 'recovery', index),
params=params, decoder=_decode_text
)
return data
@asyncio.coroutine
def shards(self, index=None, *, h=default, help=default, local=default,
master_timeout=default, v=default):
"""
The shards command is the detailed view of what nodes
contain which shards.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-shards.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for master connection
node
:arg v: Verbose mode. Display column headers, default False
"""
params = {}
if h is not default:
params['h'] = h
if help is not default:
params['help'] = bool(help)
if local is not default:
params['local'] = bool(local)
if master_timeout is not default:
params['master_timeout'] = master_timeout
if v is not default:
params['v'] = bool(v)
_, data = yield from self.transport.perform_request(
'GET', _make_path('_cat', 'shards', index),
params=params, decoder=_decode_text
)
return data
@asyncio.coroutine
def segments(self, index=None, *, h=default, help=default, local=default,
master_timeout=default, v=default):
"""
The segments command is the detailed view of Lucene segments per index.
:arg index: A comma-separated list of index names to limit the returned
information
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for master connection
node
:arg v: Verbose mode. Display column headers, default False
"""
params = {}
if h is not default:
params['h'] = h
if help is not default:
params['help'] = bool(help)
if local is not default:
params['local'] = bool(local)
if master_timeout is not default:
params['master_timeout'] = master_timeout
if v is not default:
params['v'] = bool(v)
_, data = yield from self.transport.perform_request(
'GET', _make_path('_cat', 'segments', index),
params=params, decoder=_decode_text
)
return data
@asyncio.coroutine
def pending_tasks(self, *, h=default, help=default, local=default,
master_timeout=default, v=default):
"""
pending_tasks provides the same information as the
:meth:`~elasticsearch.client.cluster.ClusterClient.pending_tasks` API
in a convenient tabular format.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-pending-tasks.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for master connection
node
:arg v: Verbose mode. Display column headers, default False
"""
params = {}
if h is not default:
params['h'] = h
if help is not default:
params['help'] = bool(help)
if local is not default:
params['local'] = bool(local)
if master_timeout is not default:
params['master_timeout'] = master_timeout
if v is not default:
params['v'] = bool(v)
_, data = yield from self.transport.perform_request(
'GET', '/_cat/pending_tasks',
params=params, decoder=_decode_text
)
return data
@asyncio.coroutine
def thread_pool(self, *, full_id=default, h=default, help=default,
local=default, master_timeout=default, v=default):
"""
Get information about thread pools.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-thread-pool.html>`_
:arg full_id: Enables displaying the complete node ids (default:false)
:arg h: Comma-separated list of column names to display
:arg help: Return help information (default: 'false')
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for master connection
node
:arg v: Verbose mode. Display column headers (default: 'false')
"""
params = {}
if full_id is not default:
params['full_id'] = bool(full_id)
if h is not default:
params['h'] = h
if help is not default:
params['help'] = bool(help)
if local is not default:
params['local'] = bool(local)
if master_timeout is not default:
params['master_timeout'] = master_timeout
if v is not default:
params['v'] = bool(v)
_, data = yield from self.transport.perform_request(
'GET', '/_cat/thread_pool',
params=params, decoder=_decode_text
)
return data
@asyncio.coroutine
def fielddata(self, *, fields=default, bytes=default, h=default,
help=default, local=default, master_timeout=default,
v=default):
"""
Shows information about currently loaded fielddata on a per-node basis.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-fielddata.html>`_
:arg fields: A comma-separated list of fields to return the fielddata
size
:arg bytes: The unit in which to display byte values
:arg h: Comma-separated list of column names to display
:arg help: Return help information (default: 'false')
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for master connection
node
:arg v: Verbose mode. Display column headers (default: 'false')
"""
params = {}
if fields is not default:
params['fields'] = fields
if bytes is not default:
params['bytes'] = bytes
if h is not default:
params['h'] = h
if help is not default:
params['help'] = bool(help)
if local is not default:
params['local'] = bool(local)
if master_timeout is not default:
params['master_timeout'] = master_timeout
if v is not default:
params['v'] = bool(v)
_, data = yield from self.transport.perform_request(
'GET', _make_path('_cat', 'fielddata'),
params=params, decoder=_decode_text
)
return data
@asyncio.coroutine
def plugins(self, *, h=default, help=default, local=default,
master_timeout=default, v=default):
"""
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cat-plugins.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for master connection
node
:arg v: Verbose mode. Display column headers, default False
"""
params = {}
if h is not default:
params['h'] = h
if help is not default:
params['help'] = bool(help)
if local is not default:
params['local'] = bool(local)
if master_timeout is not default:
params['master_timeout'] = master_timeout
if v is not default:
params['v'] = bool(v)
_, data = yield from self.transport.perform_request(
'GET', '/_cat/plugins',
params=params, decoder=_decode_text
)
return data
|
47357
|
from ics import Calendar, Event
from datetime import date, timedelta
from db import fetchall_dict
from rich import print
from flag import flag
c = Calendar()
def add_allday_event(c, event_start, event_name, event_description):
e = Event()
e.name = event_name
e.description = event_description
e.begin = event_start.isoformat()
e.end = (event_start + timedelta(days=1)).isoformat()
e.make_all_day()
c.events.add(e)
cities = fetchall_dict(
"""
select
u
, c.n
, concat_ws(', ', c.s,c.c) s
, v.video
from cities c
JOIN videos v ON v.id = c.id
order by (rank < 3500 and c.id < 3500) DESC
, ROW_NUMBER() OVER ( PARTITION BY u ) -- prefer to show many countries
, random()
--limit 2
"""
)
START_DATE = date.today()
for city in cities:
print(city)
add_allday_event(
c,
event_start=START_DATE,
event_name=flag(city["u"]) + " " + city["n"],
event_description=f"""{city["n"]} welcomes you !
{city["video"]}
{city["s"]}
""",
)
START_DATE += timedelta(2)
c.events
with open("my.ics", "w") as my_file:
my_file.writelines(c)
|
47358
|
import logging
import requests
from settings_csv import ALGO_NFDOMAINS
# API documentation: https://editor.swagger.io/?url=https://api.testnet.nf.domains/info/openapi3.yaml
class NFDomainsAPI:
session = requests.Session()
def get_address(self, name):
endpoint = f"nfd/{name}"
params = {"view": "brief"}
data, status_code = self._query(ALGO_NFDOMAINS, endpoint, params)
if status_code == 200:
# https://docs.nf.domains/docs/faq#how-do-i-set-my-address-to-resolve-my-nfd
# If present, use the primary/deposit address, otherwise resolve to the owner address
if "caAlgo" in data:
return data["caAlgo"][0]
else:
return data["owner"]
else:
return None
def _query(self, base_url, endpoint, params=None):
logging.info("Querying NFDomains endpoint %s...", endpoint)
url = f"{base_url}/{endpoint}"
response = self.session.get(url, params=params)
return response.json(), response.status_code
|
47378
|
import urllib.request, urllib.parse, urllib.error
import xml.etree.ElementTree as ET
url= input('Enter - ')
data= urllib.request.urlopen(url).read().decode()
#print(type('data'))
#data ='''<commentinfo>tag</commentinfo>'''
#print("~~~",data)
c=0
commentsinfo = ET.fromstring(data)#starting tag. ie is "commentinfo" in this case
lst = commentsinfo.findall('comments/comment')#and thge path.ie "commentinfo/comments/comment"
#print("$$",type('lst'))
print('User count:', len(lst))
for item in lst:
#print('Name', item.find('name').text) #get the txt btw <name> tag
#print('count', item.find('count').text) #get the txt btw <count> tag
c=c+int(item.find('count').text)
print("sum :",c)
#http://py4e-data.dr-chuck.net/comments_42.xml in this case 2553
#http://py4e-data.dr-chuck.net/comments_967204.xml in this case 2212
|
47387
|
import normalize_sentences
import spacy
nlp = spacy.load('de_core_news_sm')
test_sentence = 'Der schlaue Fuchs sagte "Treffen um 16:20 Uhr!" aber war schon 20 Minuten früher da. Im Jahre 1995 schuf er das Gedicht.'
def test_sent(test_sentence):
result = normalize_sentences.normalize(nlp, test_sentence)
print(test_sentence, '->', result)
test_sent('Der schlaue Fuchs sagte "Treffen um 16:20 Uhr!" aber war schon 20 Minuten früher da. Im Jahre 1995 schuf er das Gedicht.')
test_sent('Er war von 1920 bis 1988 durchgehend beschäftigt.')
|
47443
|
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from openbook_notifications.models.notification import Notification
from openbook_posts.models import PostReaction
class PostReactionNotification(models.Model):
notification = GenericRelation(Notification, related_name='post_reaction_notifications')
post_reaction = models.ForeignKey(PostReaction, on_delete=models.CASCADE)
@classmethod
def create_post_reaction_notification(cls, post_reaction_id, owner_id):
post_reaction_notification = cls.objects.create(post_reaction_id=post_reaction_id)
Notification.create_notification(type=Notification.POST_REACTION,
content_object=post_reaction_notification,
owner_id=owner_id)
return post_reaction_notification
@classmethod
def delete_post_reaction_notification(cls, post_reaction_id, owner_id):
cls.objects.filter(post_reaction_id=post_reaction_id,
notification__owner_id=owner_id).delete()
@classmethod
def delete_post_reaction_notifications(cls, post_reaction_id):
cls.objects.filter(post_reaction_id=post_reaction_id).delete()
|
47504
|
import package
import helper
import package.assistant
#We expect that 'a' below will be 1 not a module.
from confused_elements import a
import sys
|
47543
|
import numpy as np
from .. import T
from ..layer import ShapedLayer
from ..initialization import initialize_weights
from .full import Linear
from .. import stats
__all__ = ['Gaussian', 'Bernoulli', 'IdentityVariance']
class Gaussian(Linear):
def __init__(self, *args, **kwargs):
self.cov_type = kwargs.pop('cov_type', 'diagonal')
self.min_stdev = kwargs.pop('min_stdev', 1e-2)
super(Gaussian, self).__init__(*args, **kwargs)
assert not self.elementwise
def initialize(self):
if not self.elementwise:
dim_in, dim_out = self.get_dim_in()[-1], self.get_dim_out()[-1]
left = initialize_weights(self.initialization, [dim_in, dim_out // 2])
right = T.zeros([dim_in, dim_out // 2])
self.create_parameter('W', [dim_in, dim_out], initial_value=(
T.concatenate([
right, left
], -1)
))
self.create_parameter('b', [dim_out], initial_value=np.zeros([dim_out]))
def get_dim_out(self):
return [self.dim_out[0] * 2]
def activate(self, X):
if self.cov_type == 'diagonal':
scale_diag, mu = T.split(X, 2, axis=-1)
if hasattr(self, 'min_stdev'):
scale_diag = T.softplus(scale_diag) + self.min_stdev
else:
scale_diag = T.softplus(scale_diag) + 1e-5
return stats.GaussianScaleDiag([scale_diag, mu], parameter_type='regular')
raise Exception("Undefined covariance type: %s" % self.cov_type)
def __str__(self):
return "Gaussian(%s)" % self.dim_out
class Bernoulli(Linear):
def __init__(self, *args, **kwargs):
self.parameter_type = kwargs.pop('parameter_type', 'natural')
super(Bernoulli, self).__init__(*args, **kwargs)
def activate(self, X):
if self.elementwise:
return stats.Bernoulli(X, parameter_type=self.parameter_type)
return stats.Bernoulli(X, parameter_type=self.parameter_type)
def __str__(self):
return "Bernoulli(%s)" % self.dim_out
class IdentityVariance(ShapedLayer):
def __init__(self, variance=1e-4, *args, **kwargs):
self.variance = variance
super(IdentityVariance, self).__init__(*args, **kwargs)
def initialize(self):
pass
def get_parameters(self):
return []
def infer_shape(self, shape):
if shape is None: return
if self.elementwise:
self.dim_in = shape
self.dim_out = shape
return
if self.dim_in is None:
self.dim_in = shape
def forward(self, X):
return stats.GaussianScaleDiag([np.sqrt(self.variance) * T.ones_like(X), X])
|
47561
|
import pytest
from coloring import create_color
from coloring.consts import *
def test_create_color():
text = "Hello"
mycolor = create_color(120, 160, 200)
colored_text = mycolor(text)
assert colored_text == f"{CSI}38;2;120;160;200m{text}{RESET_COLOR}"
mycolor = create_color(128, 128, 128)
colored_text = mycolor(text)
assert colored_text == f"{CSI}38;2;128;128;128m{text}{RESET_COLOR}"
def test_create_color_bold():
text = "Hello"
mycolor = create_color(120, 160, 200, s="b")
colored_text = mycolor(text)
assert (
colored_text
== f"{CSI}38;2;120;160;200m{BOLD}{text}{RESET_BOLD_AND_DIM}{RESET_COLOR}"
)
def test_create_color_underline():
text = "Hello"
mycolor = create_color(120, 160, 200, s="u")
colored_text = mycolor(text)
assert (
colored_text
== f"{CSI}38;2;120;160;200m{UNDERLINE}{text}{RESET_UNDERLINE}{RESET_COLOR}"
)
def test_create_color_cross():
text = "Hello"
mycolor = create_color(120, 160, 200, s="c")
colored_text = mycolor(text)
assert (
colored_text == f"{CSI}38;2;120;160;200m{CROSS}{text}{RESET_CROSS}{RESET_COLOR}"
)
def test_create_color_style_only():
# Red background
text = "Hello"
mycolor = create_color(s="b")
colored_text = mycolor(text)
assert colored_text == f"{BOLD}{text}{RESET_BOLD_AND_DIM}"
def test_create_color_background():
# Red background
text = "Hello"
mycolor = create_color(bg=(120, 160, 200))
colored_text = mycolor(text)
assert colored_text == f"{CSI}48;2;120;160;200m{text}{RESET_BACKGROUND}"
mycolor = create_color(bg=(128, 128, 128))
colored_text = mycolor(text)
assert colored_text == f"{CSI}48;2;128;128;128m{text}{RESET_BACKGROUND}"
def test_create_color_signature_error():
with pytest.raises(TypeError):
create_color(12, 12)
with pytest.raises(TypeError):
create_color(12, 12, "lol")
with pytest.raises(TypeError):
create_color(12)
|
47615
|
import os
import sys
import tempfile
import subprocess
import cv2
import pymesh
import numpy as np
import torch
import triangle as tr
from tridepth import BaseMesh
from tridepth.extractor import calculate_canny_edges
from tridepth.extractor import SVGReader
from tridepth.extractor import resolve_self_intersection, cleanup
from tridepth.extractor import add_frame
class Mesh2DExtractor:
def __init__(self, canny_params={"denoise": False}, at_params={"filter_itr": 4, "error_thresh": 0.01}):
self.canny_params = canny_params # TODO
self.autotrace_cmd = ['autotrace',
'--centerline',
'--remove-adjacent-corners',
'--filter-iterations', str(at_params["filter_itr"]),
'--error-threshold', str(at_params["error_thresh"]),
'--input-format=bmp',
'--output-format=svg']
def _execute_autotrace(self, filename, debug=False):
"""Execute autotrace with input (bmp-file)
- https://github.com/autotrace/autotrace
Returns:
svg_string: string starting from '<svg/>'
"""
# Execute autotrace
p = subprocess.Popen(self.autotrace_cmd + [filename], stdout=subprocess.PIPE)
# Read the converted svg contents
svg_string = p.communicate()[0]
if not len(svg_string):
print("autotrace_cmd: " + ' '.join(self.autotrace_cmd + [filename]), file=sys.stderr)
print("ERROR: returned nothing, leaving tmp bmp file around for you to debug", file=sys.stderr)
sys.exit(1)
else:
if debug:
print(filename)
sys.exit(1)
else:
os.unlink(filename) # Remove the tempolary file
return svg_string
def _read_polygon_from_svg(self, svg_string):
"""
"""
# Extract polygon information from svg-string
# - https://github.com/guyc/scadtrace/blob/master/svg.py
svg_reader = SVGReader(svg_string)
verts_2d, edges = svg_reader.run()
# Store polygons as wire-format (w/ cleaning)
# - https://github.com/PyMesh/PyMesh/blob/master/scripts/svg_to_mesh.py
if verts_2d.shape[0] == 0:
wires = pymesh.wires.WireNetwork.create_empty()
else:
wires = pymesh.wires.WireNetwork.create_from_data(verts_2d, edges)
wires = resolve_self_intersection(wires, min_edge_size=1.5)
wires = cleanup(wires)
return wires
def _triangulation(self, np_edge, wires, output_size, debug=False):
"""
"""
height, width = output_size
# We use cython wrapper of Triangle,
# since other implementations (Pymesh) can't output edges :(
# - https://github.com/drufat/triangle
input_dic = {}
input_dic["vertices"] = wires.vertices.copy()
input_dic["segments"] = wires.edges.copy()
# [Options]
# p: Triangulates a Planar Straight Line Graph.
# q: no angles smaller than 20 degrees
try:
t = tr.triangulate(input_dic, 'pq')
except:
import uuid
unique_filename = str(uuid.uuid4()) + ".png"
print(wires.vertices.shape, wires.edges.shape)
cv2.imwrite(unique_filename, np_edge)
exit()
if debug:
import matplotlib.pyplot as plt
plt.gca().invert_yaxis()
# plt.imshow(np_edge)
for edge in wires.edges:
v1x, v1y = wires.vertices[edge[0]]
v2x, v2y = wires.vertices[edge[1]]
plt.plot([v1x, v2x], [v1y, v2y], 'k-', color='r', linewidth=1.0)
for tri in t['triangles']:
v1x, v1y = t['vertices'][tri[0]]
v2x, v2y = t['vertices'][tri[1]]
v3x, v3y = t['vertices'][tri[2]]
plt.plot([v1x, v2x], [v1y, v2y], 'k-', color='black', linewidth=1.0)
plt.plot([v2x, v3x], [v2y, v3y], 'k-', color='black', linewidth=1.0)
plt.plot([v3x, v1x], [v3y, v1y], 'k-', color='black', linewidth=1.0)
plt.scatter(wires.vertices[:, 0], wires.vertices[:, 1], s=3.0, c="black")
plt.show()
print(t['vertices'].shape, t['triangles'].shape)
exit()
# Normalize (range=[0,1])
vertices = t["vertices"]
t["vertices"] = np.concatenate((vertices[:, :1] / width,
vertices[:, 1:2] / height,
vertices[:, 2:]), 1)
t["edgemap"] = np_edge
return t
def __call__(self, np_scene):
"""
Args:
np_scene: [H,W,3] (ndarray, uint8)
"""
height, width, _ = np_scene.shape
# Calculate canny edge
np_edge, _ = calculate_canny_edges(np_scene, denoise=self.canny_params["denoise"])
# Save into temp file as bmp-format
with tempfile.NamedTemporaryFile(suffix='.bmp', delete=False) as temp:
cv2.imwrite(temp.name, np_edge)
# Execute vectorization (by Autotrace)
svg_string = self._execute_autotrace(temp.name)
# Extract polygon information
wires = self._read_polygon_from_svg(svg_string)
# Triangulation
wires = add_frame(wires, output_size=(height, width))
mesh_dic = self._triangulation(np_edge, wires, output_size=(height, width))
# Finally integrate all the information, and create disconnected mesh
mesh = BaseMesh(mesh_dic)
return mesh
|
47628
|
import unittest
from records_mover.records.targets.spectrum import SpectrumRecordsTarget
from records_mover.records.existing_table_handling import ExistingTableHandling
from mock import Mock, patch, MagicMock
class TestSpectrum(unittest.TestCase):
@patch('records_mover.records.targets.spectrum.ParquetRecordsFormat')
def setUp(self,
mock_ParquetRecordsFormat):
mock_schema_name = 'myschema'
mock_table_name = 'mytable'
mock_url_resolver = Mock(name='url_resolver')
mock_db_driver = Mock(name='db_driver')
mock_spectrum_base_url = Mock(name='spectrum_base_url')
self.mock_driver = mock_db_driver.return_value
self.mock_db = MagicMock(name='db')
self.mock_driver.db_engine = self.mock_db
self.mock_output_loc = mock_url_resolver.directory_url.return_value.\
directory_in_this_directory.return_value.\
directory_in_this_directory.return_value.\
directory_in_this_directory.return_value
self.mock_output_loc.url = 's3://output-loc/'
self.mock_output_loc.scheme = 's3'
self.records_format = mock_ParquetRecordsFormat.return_value
self.target =\
SpectrumRecordsTarget(schema_name=mock_schema_name,
table_name=mock_table_name,
db_engine=self.mock_db,
db_driver=mock_db_driver,
url_resolver=mock_url_resolver,
spectrum_base_url=mock_spectrum_base_url,
spectrum_rdir_url=None,
existing_table_handling=ExistingTableHandling.DROP_AND_RECREATE)
mock_url_resolver.directory_url.assert_called_with(mock_spectrum_base_url)
def test_init(self):
self.assertEqual(self.target.records_format, self.records_format)
self.assertEqual(self.target.db, self.mock_db)
@patch('records_mover.records.targets.spectrum.quote_schema_and_table')
def test_pre_load_hook_preps_bucket_with_default_prep(self, mock_quote_schema_and_table):
mock_schema_and_table = mock_quote_schema_and_table.return_value
mock_cursor = self.target.driver.db_engine.connect.return_value.__enter__.return_value
self.target.pre_load_hook()
mock_quote_schema_and_table.assert_called_with(self.target.db,
self.target.schema_name,
self.target.table_name)
mock_cursor.execution_options.assert_called_with(isolation_level='AUTOCOMMIT')
mock_cursor.execute.assert_called_with(f"DROP TABLE IF EXISTS {mock_schema_and_table}")
self.mock_output_loc.purge_directory.assert_called_with()
@patch('records_mover.records.targets.spectrum.RecordsDirectory')
def test_records_directory(self, mock_RecordsDirectory):
out = self.target.records_directory()
mock_RecordsDirectory.assert_called_with(self.mock_output_loc)
self.assertEqual(out, mock_RecordsDirectory.return_value)
@patch('records_mover.records.targets.spectrum.CreateTable')
@patch('records_mover.records.targets.spectrum.Table')
@patch('records_mover.records.targets.spectrum.MetaData')
@patch('records_mover.records.targets.spectrum.RecordsDirectory')
def test_post_load_hook_creates_table(self,
mock_RecordsDirectory,
mock_MetaData,
mock_Table,
mock_CreateTable):
mock_num_rows_loaded = 123
mock_directory = mock_RecordsDirectory.return_value
mock_records_schema = mock_directory.load_schema_json_obj.return_value
mock_field = Mock(name='field')
mock_records_schema.fields = [mock_field]
mock_meta = mock_MetaData.return_value
mock_columns = [mock_field.to_sqlalchemy_column.return_value]
mock_table = mock_Table.return_value
mock_CreateTable.return_value = "SOME GENERATED CREATE TABLES STATEMENT "
mock_cursor = self.target.driver.db_engine.connect.return_value.__enter__.return_value
self.target.post_load_hook(num_rows_loaded=mock_num_rows_loaded)
mock_directory.load_schema_json_obj.assert_called_with()
mock_directory.get_manifest.assert_called_with()
mock_field.to_sqlalchemy_column.assert_called_with(self.mock_driver)
mock_Table.assert_called_with('mytable', mock_meta,
*mock_columns, prefixes=['EXTERNAL'], schema='myschema')
mock_CreateTable.assert_called_with(mock_table, bind=self.mock_driver.db_engine)
mock_cursor.execution_options.assert_called_with(isolation_level='AUTOCOMMIT')
mock_cursor.execute.assert_called_with("SOME GENERATED CREATE TABLES STATEMENT "
"STORED AS PARQUET\n"
"LOCATION 's3://output-loc/_manifest'\n\n"
"TABLE PROPERTIES ('numRows'='123')")
|
47732
|
from datetime import datetime
import logging
from bs4 import BeautifulSoup
from db.models import Victim
from net.proxy import Proxy
from .sitecrawler import SiteCrawler
import time
class Nefilim(SiteCrawler):
actor = "Nefilim"
def _handle_page(self, soup):
victim_list = soup.find_all("header", class_="entry-header")
for victim in victim_list:
victim_title = victim.find("h2", class_="entry-title").text.strip()
victim_name = victim_title[0:victim_title.find(". Part")]
meta = victim.find("div", class_="entry-meta")
published = meta.find("time", class_="entry-date").attrs["datetime"]
published_dt = datetime.strptime(
published.strip()[:-6], "%Y-%m-%dT%H:%M:%S")
victim_leak_site = meta.find("span", class_="posted-on").find("a").attrs["href"]
q = self.session.query(Victim).filter_by(
url=victim_leak_site, site=self.site)
if q.count() == 0:
# new victim
v = Victim(name=victim_name, url=victim_leak_site, published=published_dt,
first_seen=datetime.utcnow(), last_seen=datetime.utcnow(), site=self.site)
self.session.add(v)
self.new_victims.append(v)
else:
# already seen, update last_seen
v = q.first()
v.last_seen = datetime.utcnow()
self.current_victims.append(v)
self.session.commit()
# server was timing out so slows it down a bit
time.sleep(1.0)
def scrape_victims(self):
with Proxy() as p:
r = p.get(f"{self.url}", headers=self.headers)
soup = BeautifulSoup(r.content.decode(), "html.parser")
page_count = 0
while True:
page_nav = soup.find("div", class_="nav-previous")
if page_nav is None:
break
url = page_nav.find("a").attrs["href"]
r = p.get(f"{url}", headers=self.headers)
soup = BeautifulSoup(r.content.decode(), "html.parser")
self._handle_page(soup)
|
47743
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import torch
import math
import copy
from .base_debugger import BaseDebugger
from models.utils import _tranpose_and_gather_feat, _gather_feat
from models.decode import _topk_original, _topk, _topk_channel, _nms
from datasets.dataset.utils import _bbox_overlaps
from utils.image import transform_preds
class TriCtdetDebugger(BaseDebugger):
def __init__(self, opt):
super(TriCtdetDebugger, self).__init__(opt)
def forward(self, images):
with torch.no_grad():
output = self.model(images)[-1]
tl = output['tl'].sigmoid_()
bl = output['bl'].sigmoid_()
br = output['br'].sigmoid_()
ct = output['ct'].sigmoid_()
tl_tag = output['tl_tag']
bl_tag = output['bl_tag']
br_tag = output['br_tag']
tl_reg = output['tl_reg']
bl_reg = output['bl_reg']
br_reg = output['br_reg']
ct_reg = output['ct_reg']
detections = {'tl_heatmap':tl, 'bl_heatmap':bl, 'br_heatmap':br, 'ct_heatmap':ct,
'tl_reg':tl_reg, 'bl_reg':bl_reg, 'br_reg':br_reg, 'ct_reg':ct_reg,
'tl_tag':tl_tag, 'bl_tag':bl_tag, 'br_tag':br_tag}
return detections
def debug(self, detections, targets, ae_threshold):
tl_heat = detections['tl_heatmap']
bl_heat = detections['bl_heatmap']
br_heat = detections['br_heatmap']
ct_heat = detections['ct_heatmap']
targets['tl_tag'] = targets['tl_tag'][targets['reg_mask']].unsqueeze(0)
targets['bl_tag'] = targets['bl_tag'][targets['reg_mask']].unsqueeze(0)
targets['br_tag'] = targets['br_tag'][targets['reg_mask']].unsqueeze(0)
targets['ct_tag'] = targets['ct_tag'][targets['reg_mask']].unsqueeze(0)
targets['tl_reg'] = targets['tl_reg'][targets['reg_mask']].unsqueeze(0)
targets['bl_reg'] = targets['bl_reg'][targets['reg_mask']].unsqueeze(0)
targets['br_reg'] = targets['br_reg'][targets['reg_mask']].unsqueeze(0)
targets['ct_reg'] = targets['ct_reg'][targets['reg_mask']].unsqueeze(0)
batch, cat, height, width = tl_heat.size()
# tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = _topk(tl_heat, K=256)
# bl_scores, bl_inds, bl_clses, bl_ys, bl_xs = _topk(bl_heat, K=256)
# br_scores, br_inds, br_clses, br_ys, br_xs = _topk(br_heat, K=256)
# ct_scores, ct_inds, ct_clses, ct_ys, ct_xs = _topk(ct_heat, K=256)
tl_tag = detections['tl_tag']
bl_tag = detections['bl_tag']
br_tag = detections['br_tag']
tl_reg = detections['tl_reg']
bl_reg = detections['bl_reg']
br_reg = detections['br_reg']
ct_reg = detections['ct_reg']
# gather by gt
tl_tag = _tranpose_and_gather_feat(tl_tag, targets['tl_tag'].to(torch.device("cuda")))
bl_tag = _tranpose_and_gather_feat(bl_tag, targets['bl_tag'].to(torch.device("cuda")))
br_tag = _tranpose_and_gather_feat(br_tag, targets['br_tag'].to(torch.device("cuda")))
# gather by top k
# tl_tag = _tranpose_and_gather_feat(tl_tag, tl_inds)
# bl_tag = _tranpose_and_gather_feat(bl_tag, bl_inds)
# br_tag = _tranpose_and_gather_feat(br_tag, br_inds)
avg_tag = (tl_tag + bl_tag + br_tag) / 3
dists_tl = torch.abs(avg_tag - tl_tag).to(torch.device("cpu")).numpy()
dists_bl = torch.abs(bl_tag - avg_tag).to(torch.device("cpu")).numpy()
dists_br = torch.abs(avg_tag - br_tag).to(torch.device("cpu")).numpy()
dists_avg = (dists_tl.sum() + dists_bl.sum() + dists_br.sum()) / dists_tl.shape[1] / 3
min_tl = dists_tl.min()
max_tl = dists_tl.max()
min_bl = dists_bl.min()
max_bl = dists_bl.max()
min_br = dists_br.min()
max_br = dists_br.max()
# gather by gt
tl_reg = _tranpose_and_gather_feat(tl_reg, targets['tl_tag'].to(torch.device("cuda")))
bl_reg = _tranpose_and_gather_feat(bl_reg, targets['bl_tag'].to(torch.device("cuda")))
br_reg = _tranpose_and_gather_feat(br_reg, targets['br_tag'].to(torch.device("cuda")))
ct_reg = _tranpose_and_gather_feat(ct_reg, targets['ct_tag'].to(torch.device("cuda")))
# reg_diff_tl = tl_reg - targets['tl_reg'].to(torch.device("cuda"))
# reg_diff_tl = torch.sqrt(reg_diff_tl[..., 0]*reg_diff_tl[..., 0] + reg_diff_tl[..., 1]*reg_diff_tl[..., 1])
# reg_diff_bl = bl_reg - targets['bl_reg'].to(torch.device("cuda"))
# reg_diff_bl = torch.sqrt(reg_diff_bl[..., 0] * reg_diff_bl[..., 0] + reg_diff_bl[..., 1] * reg_diff_bl[..., 1])
# reg_diff_br = br_reg - targets['br_reg'].to(torch.device("cuda"))
# reg_diff_br = torch.sqrt(reg_diff_br[..., 0] * reg_diff_br[..., 0] + reg_diff_br[..., 1] * reg_diff_br[..., 1])
# reg_diff_ct = ct_reg - targets['ct_reg'].to(torch.device("cuda"))
# reg_diff_ct = torch.sqrt(reg_diff_ct[..., 0] * reg_diff_ct[..., 0] + reg_diff_ct[..., 1] * reg_diff_ct[..., 1])
tl_xs = ((targets['tl_tag'] % (width * height)) % width).int().float().to(torch.device("cuda"))
tl_ys = ((targets['tl_tag'] % (width * height)) / width).int().float().to(torch.device("cuda"))
bl_xs = ((targets['bl_tag'] % (width * height)) % width).int().float().to(torch.device("cuda"))
bl_ys = ((targets['bl_tag'] % (width * height)) / width).int().float().to(torch.device("cuda"))
br_xs = ((targets['br_tag'] % (width * height)) % width).int().float().to(torch.device("cuda"))
br_ys = ((targets['br_tag'] % (width * height)) / width).int().float().to(torch.device("cuda"))
ct_xs = ((targets['ct_tag'] % (width * height)) % width).int().float().to(torch.device("cuda"))
ct_ys = ((targets['ct_tag'] % (width * height)) / width).int().float().to(torch.device("cuda"))
tl_xs_pr = (tl_xs + tl_reg[..., 0]).squeeze(0).to(torch.device("cpu")).numpy()
tl_ys_pr = (tl_ys + tl_reg[..., 1]).squeeze(0).to(torch.device("cpu")).numpy()
bl_xs_pr = (bl_xs + bl_reg[..., 0]).squeeze(0).to(torch.device("cpu")).numpy()
bl_ys_pr = (bl_ys + bl_reg[..., 1]).squeeze(0).to(torch.device("cpu")).numpy()
br_xs_pr = (br_xs + br_reg[..., 0]).squeeze(0).to(torch.device("cpu")).numpy()
br_ys_pr = (br_ys + br_reg[..., 1]).squeeze(0).to(torch.device("cpu")).numpy()
ct_xs_pr = (ct_xs + ct_reg[..., 0]).squeeze(0).to(torch.device("cpu")).numpy()
ct_ys_pr = (ct_ys + ct_reg[..., 1]).squeeze(0).to(torch.device("cpu")).numpy()
tl_xs_gt = (tl_xs + targets['tl_reg'][..., 0].to(torch.device("cuda"))).squeeze(0).to(torch.device("cpu")).numpy()
tl_ys_gt = (tl_ys + targets['tl_reg'][..., 1].to(torch.device("cuda"))).squeeze(0).to(torch.device("cpu")).numpy()
bl_xs_gt = (bl_xs + targets['bl_reg'][..., 0].to(torch.device("cuda"))).squeeze(0).to(torch.device("cpu")).numpy()
bl_ys_gt = (bl_ys + targets['bl_reg'][..., 1].to(torch.device("cuda"))).squeeze(0).to(torch.device("cpu")).numpy()
br_xs_gt = (br_xs + targets['br_reg'][..., 0].to(torch.device("cuda"))).squeeze(0).to(torch.device("cpu")).numpy()
br_ys_gt = (br_ys + targets['br_reg'][..., 1].to(torch.device("cuda"))).squeeze(0).to(torch.device("cpu")).numpy()
ct_xs_gt = (ct_xs + targets['ct_reg'][..., 0].to(torch.device("cuda"))).squeeze(0).to(torch.device("cpu")).numpy()
ct_ys_gt = (ct_ys + targets['ct_reg'][..., 1].to(torch.device("cuda"))).squeeze(0).to(torch.device("cpu")).numpy()
bboxes_gt = targets['bbox'][targets['reg_mask']]
nm_instances =tl_xs_pr.shape[0]
for i in range(nm_instances):
bbox_gt = bboxes_gt[i, :]
# prediction
bbox_coord_pr = []
tl_x_pr = tl_xs_pr[i]
tl_y_pr = tl_ys_pr[i]
bl_x_pr = bl_xs_pr[i]
bl_y_pr = bl_ys_pr[i]
br_x_pr = br_xs_pr[i]
br_y_pr = br_ys_pr[i]
# center
x_c = (tl_x_pr + br_x_pr) / 2.
y_c = (tl_y_pr + br_y_pr) / 2.
if bl_x_pr == br_x_pr:
p_y = tl_y_pr
p_x = br_x_pr
if br_y_pr > bl_y_pr:
angle = np.pi / 2.
else:
angle = -np.pi / 2.
elif bl_y_pr == br_y_pr:
p_x = tl_x_pr
p_y = br_y_pr
angle = 0.
else:
# angle
angle = math.atan2(-(br_y_pr - bl_y_pr), br_x_pr - bl_x_pr)
# find intersected point
a = (br_x_pr - bl_x_pr) / (br_y_pr - bl_y_pr)
b = br_y_pr - a * br_x_pr
delta_x = br_x_pr - bl_x_pr
delta_y = br_y_pr - bl_y_pr
p_x = (delta_x * tl_x_pr + delta_y * tl_y_pr - delta_x * b) / (delta_x + delta_x * a)
p_y = a * p_x + b
# w, h
w = np.sqrt((br_x_pr - p_x) * (br_x_pr - p_x) + (br_y_pr - p_y) * (br_y_pr - p_y))
h = np.sqrt((tl_x_pr - p_x) * (tl_x_pr - p_x) + (tl_y_pr - p_y) * (tl_y_pr - p_y))
bbox_coord_pr.append([x_c - w / 2, y_c - h / 2, x_c + w / 2, y_c + h / 2, angle])
bbox_coord_pr = np.array(bbox_coord_pr)
# groundtruth
boxes_coord_gt = []
tl_x_gt = tl_xs_gt[i]
tl_y_gt = tl_ys_gt[i]
bl_x_gt = bl_xs_gt[i]
bl_y_gt = bl_ys_gt[i]
br_x_gt = br_xs_gt[i]
br_y_gt = br_ys_gt[i]
if bl_x_gt == br_x_gt:
p_y = tl_y_gt
p_x = bl_x_gt
if br_y_gt > bl_y_gt:
angle = np.pi / 4
else:
angle = -np.pi / 4
else:
# center
x_c = (tl_x_gt + br_x_gt) / 2.
y_c = (tl_y_gt + br_y_gt) / 2.
# angle
angle = math.atan(-(br_y_gt - bl_y_gt)/(br_x_gt - bl_x_gt))
# find intersected point
a = (br_y_gt - bl_y_gt) / (br_x_gt - bl_x_gt)
b = br_y_gt - a * br_x_gt
delta_x = br_x_gt - bl_x_gt
delta_y = br_y_gt - bl_y_gt
p_x = (delta_x * tl_x_gt + delta_y * tl_y_gt - delta_y * b) / (delta_x + delta_y * a)
p_y = a * p_x + b
# w, h
w = np.sqrt((br_x_gt - p_x) * (br_x_gt - p_x) + (br_y_gt - p_y) * (br_y_gt - p_y))
h = np.sqrt((tl_x_gt - p_x) * (tl_x_gt - p_x) + (tl_y_gt - p_y) * (tl_y_gt - p_y))
boxes_coord_gt.append([x_c - w / 2, y_c - h / 2, x_c + w / 2, y_c + h / 2, angle])
boxes_coord_gt = np.array(boxes_coord_gt)
# print(np.array_equal(bbox_gt, boxes_coord_gt))
overlaps = _bbox_overlaps(np.ascontiguousarray(bbox_coord_pr[:, :4], dtype=np.float32),
np.ascontiguousarray(boxes_coord_gt[:, :4], dtype=np.float32),
bbox_coord_pr[:, -1], boxes_coord_gt[:, -1],
128, 128)
flag_suc = False
flag_exit = 0
for i in range(overlaps.shape[0]):
for j in range(overlaps.shape[1]):
value_overlap = overlaps[i, j]
angle_diff = math.fabs(bbox_coord_pr[i, -1] - boxes_coord_gt[j, -1])
if value_overlap > 0.25 and angle_diff < np.pi / 6:
flag_suc = True
flag_exit = 1
break
if flag_exit:
break
if flag_exit:
break
return min_tl, max_tl, min_bl, max_bl, min_br, max_br, dists_avg, flag_suc
def process(self, images, kernel=1, ae_threshold=1, K=100, num_dets=100):
with torch.no_grad():
output = self.model(images)[-1]
tl_heat = output['tl'].sigmoid_()
bl_heat = output['bl'].sigmoid_()
br_heat = output['br'].sigmoid_()
ct_heat = output['ct'].sigmoid_()
tl_tag = output['tl_tag']
bl_tag = output['bl_tag']
br_tag = output['br_tag']
tl_reg = output['tl_reg']
bl_reg = output['bl_reg']
br_reg = output['br_reg']
ct_reg = output['ct_reg']
batch, cat, height, width = tl_heat.size()
tl_heat = _nms(tl_heat, kernel=3)
bl_heat = _nms(bl_heat, kernel=3)
br_heat = _nms(br_heat, kernel=3)
ct_heat = _nms(ct_heat, kernel=3)
tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = _topk(tl_heat, K=K)
bl_scores, bl_inds, bl_clses, bl_ys, bl_xs = _topk(bl_heat, K=K)
br_scores, br_inds, br_clses, br_ys, br_xs = _topk(br_heat, K=K)
ct_scores, ct_inds, ct_clses, ct_ys, ct_xs = _topk(ct_heat, K=K)
tl_ys = tl_ys.view(batch, K, 1, 1).expand(batch, K, K, K)
tl_xs = tl_xs.view(batch, K, 1, 1).expand(batch, K, K, K)
bl_ys = bl_ys.view(batch, 1, K, 1).expand(batch, K, K, K)
bl_xs = bl_xs.view(batch, 1, K, 1).expand(batch, K, K, K)
br_ys = br_ys.view(batch, 1, 1, K).expand(batch, K, K, K)
br_xs = br_xs.view(batch, 1, 1, K).expand(batch, K, K, K)
ct_ys = ct_ys.view(batch, 1, K).expand(batch, K, K)
ct_xs = ct_xs.view(batch, 1, K).expand(batch, K, K)
if tl_reg is not None and bl_reg is not None and br_reg is not None:
tl_reg = _tranpose_and_gather_feat(tl_reg, tl_inds)
tl_reg = tl_reg.view(batch, K, 1, 1, 2)
bl_reg = _tranpose_and_gather_feat(bl_reg, bl_inds)
bl_reg = bl_reg.view(batch, 1, K, 1, 2)
br_reg = _tranpose_and_gather_feat(br_reg, br_inds)
br_reg = br_reg.view(batch, 1, 1, K, 2)
ct_reg = _tranpose_and_gather_feat(ct_reg, ct_inds)
ct_reg = ct_reg.view(batch, 1, K, 2)
tl_xs = tl_xs + tl_reg[..., 0]
tl_ys = tl_ys + tl_reg[..., 1]
bl_xs = bl_xs + bl_reg[..., 0]
bl_ys = bl_ys + bl_reg[..., 1]
br_xs = br_xs + br_reg[..., 0]
br_ys = br_ys + br_reg[..., 1]
ct_xs = ct_xs + ct_reg[..., 0]
ct_ys = ct_ys + ct_reg[..., 1]
# all possible boxes based on top k corners (ignoring class)
bboxes = torch.stack((tl_xs, tl_ys, bl_xs, bl_ys, br_xs, br_ys), dim=4)
tl_tag = _tranpose_and_gather_feat(tl_tag, tl_inds)
tl_tag = tl_tag.view(batch, K, 1, 1)
bl_tag = _tranpose_and_gather_feat(bl_tag, bl_inds)
bl_tag = bl_tag.view(batch, 1, K, 1)
br_tag = _tranpose_and_gather_feat(br_tag, br_inds)
br_tag = br_tag.view(batch, 1, 1, K)
avg_tag = (tl_tag + bl_tag + br_tag) / 3
dists = (torch.abs(tl_tag - avg_tag) + torch.abs(bl_tag - avg_tag) + torch.abs(br_tag - avg_tag)) / 3
tl_scores = tl_scores.view(batch, K, 1, 1).expand(batch, K, K, K)
bl_scores = bl_scores.view(batch, 1, K, 1).expand(batch, K, K, K)
br_scores = br_scores.view(batch, 1, 1, K).expand(batch, K, K, K)
# reject boxes based on corner scores
# sc_inds = (tl_scores < scores_thresh) | (bl_scores < scores_thresh) | (br_scores < scores_thresh)
scores = (tl_scores + bl_scores + br_scores) / 3
# reject boxes based on classes
tl_clses = tl_clses.view(batch, K, 1, 1).expand(batch, K, K, K)
bl_clses = bl_clses.view(batch, 1, K, 1).expand(batch, K, K, K)
br_clses = br_clses.view(batch, 1, 1, K).expand(batch, K, K, K)
cls_inds = (tl_clses != bl_clses) | (bl_clses != br_clses) | (tl_clses != br_clses)
# reject boxes based on distances
dist_inds = (dists > ae_threshold)
scores[cls_inds] = -1
scores[dist_inds] = -1
# scores[sc_inds] = -1
scores = scores.view(batch, -1)
scores, inds = torch.topk(scores, num_dets)
scores = scores.unsqueeze(2)
bboxes = bboxes.view(batch, -1, 6)
bboxes = _gather_feat(bboxes, inds)
clses = bl_clses.contiguous().view(batch, -1, 1)
clses = _gather_feat(clses, inds).float()
tl_scores = tl_scores.contiguous().view(batch, -1, 1)
tl_scores = _gather_feat(tl_scores, inds).float()
bl_scores = bl_scores.contiguous().view(batch, -1, 1)
bl_scores = _gather_feat(bl_scores, inds).float()
br_scores = br_scores.contiguous().view(batch, -1, 1)
br_scores = _gather_feat(br_scores, inds).float()
ct_xs = ct_xs[:, 0, :]
ct_ys = ct_ys[:, 0, :]
centers = torch.cat([ct_xs.unsqueeze(2), ct_ys.unsqueeze(2), ct_clses.float().unsqueeze(2), ct_scores.unsqueeze(2)], dim=2)
detections = torch.cat([bboxes, scores, tl_scores, bl_scores, br_scores, clses], dim=2)
# tl_heat = output['tl'].sigmoid_()
# bl_heat = output['bl'].sigmoid_()
# br_heat = output['br'].sigmoid_()
# ct_heat = output['ct'].sigmoid_()
#
# tl_tag = output['tl_tag']
# bl_tag = output['bl_tag']
# br_tag = output['br_tag']
#
# tl_reg = output['tl_reg']
# bl_reg = output['bl_reg']
# br_reg = output['br_reg']
# ct_reg = output['ct_reg']
#
# kernel = self.opt.nms_kernel
# ae_threshold = self.opt.ae_threshold
# K = self.opt.K
#
# batch, cat, height, width = tl_heat.size()
#
# # perform nms on heatmaps
# tl_heat = _nms(tl_heat, kernel=kernel)
# bl_heat = _nms(bl_heat, kernel=kernel)
# br_heat = _nms(br_heat, kernel=kernel)
# ct_heat = _nms(ct_heat, kernel=kernel)
#
# tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = _topk(tl_heat, K=K)
# bl_scores, bl_inds, bl_clses, bl_ys, bl_xs = _topk(bl_heat, K=K)
# br_scores, br_inds, br_clses, br_ys, br_xs = _topk(br_heat, K=K)
# ct_scores, ct_inds, ct_clses, ct_ys, ct_xs = _topk(ct_heat, K=K)
#
# tl_ys = tl_ys.view(batch, K, 1, 1).expand(batch, K, K, K)
# tl_xs = tl_xs.view(batch, K, 1, 1).expand(batch, K, K, K)
# bl_ys = bl_ys.view(batch, 1, K, 1).expand(batch, K, K, K)
# bl_xs = bl_xs.view(batch, 1, K, 1).expand(batch, K, K, K)
# br_ys = br_ys.view(batch, 1, 1, K).expand(batch, K, K, K)
# br_xs = br_xs.view(batch, 1, 1, K).expand(batch, K, K, K)
# ct_ys = ct_ys.view(batch, 1, K).expand(batch, K, K)
# ct_xs = ct_xs.view(batch, 1, K).expand(batch, K, K)
#
# if tl_reg is not None and bl_reg is not None and br_reg is not None:
# tl_reg = _tranpose_and_gather_feat(tl_reg, tl_inds)
# tl_reg = tl_reg.view(batch, K, 1, 1, 2)
# bl_reg = _tranpose_and_gather_feat(bl_reg, bl_inds)
# bl_reg = bl_reg.view(batch, 1, K, 1, 2)
# br_reg = _tranpose_and_gather_feat(br_reg, br_inds)
# br_reg = br_reg.view(batch, 1, 1, K, 2)
# ct_reg = _tranpose_and_gather_feat(ct_reg, ct_inds)
# ct_reg = ct_reg.view(batch, 1, K, 2)
#
# tl_xs = tl_xs + tl_reg[..., 0]
# tl_ys = tl_ys + tl_reg[..., 1]
# bl_xs = bl_xs + bl_reg[..., 0]
# bl_ys = bl_ys + bl_reg[..., 1]
# br_xs = br_xs + br_reg[..., 0]
# br_ys = br_ys + br_reg[..., 1]
# ct_xs = ct_xs + ct_reg[..., 0]
# ct_ys = ct_ys + ct_reg[..., 1]
#
# # all possible boxes based on top k corners (ignoring class)
# bboxes = torch.stack((tl_xs, tl_ys, bl_xs, bl_ys, br_xs, br_ys), dim=4)
#
# tl_tag = _tranpose_and_gather_feat(tl_tag, tl_inds)
# tl_tag = tl_tag.view(batch, K, 1, 1).expand(batch, K, K, K)
# bl_tag = _tranpose_and_gather_feat(bl_tag, bl_inds)
# bl_tag = bl_tag.view(batch, 1, K, 1).expand(batch, K, K, K)
# br_tag = _tranpose_and_gather_feat(br_tag, br_inds)
# br_tag = br_tag.view(batch, 1, 1, K).expand(batch, K, K, K)
# avg_tag = (tl_tag + bl_tag + br_tag) / 3
# dists = (torch.abs(tl_tag - avg_tag) + torch.abs(bl_tag - avg_tag) + torch.abs(br_tag - avg_tag)) / 3
#
# tl_scores = tl_scores.view(batch, K, 1, 1).expand(batch, K, K, K)
# bl_scores = bl_scores.view(batch, 1, K, 1).expand(batch, K, K, K)
# br_scores = br_scores.view(batch, 1, 1, K).expand(batch, K, K, K)
# scores = (tl_scores + bl_scores + br_scores) / 3
#
# # reject boxes based on classes
# tl_clses = tl_clses.view(batch, K, 1, 1).expand(batch, K, K, K)
# bl_clses = bl_clses.view(batch, 1, K, 1).expand(batch, K, K, K)
# br_clses = br_clses.view(batch, 1, 1, K).expand(batch, K, K, K)
# cls_inds = (tl_clses != bl_clses) | (bl_clses != br_clses) | (tl_clses != br_clses)
#
# # reject boxes based on distances
# dist_inds = (dists > ae_threshold)
#
# # instead of filtering prediction according to the out-of-bound rotation, do data augmentation to mirror groundtruth
#
# scores[cls_inds] = -1
# scores[dist_inds] = -1
#
# scores = scores.view(batch, -1)
# scores, inds = torch.topk(scores, 100)
# scores = scores.unsqueeze(2)
#
# bboxes = bboxes.view(batch, -1, 6)
# bboxes = _gather_feat(bboxes, inds)
#
# tl_tag = tl_tag.contiguous().view(batch, -1, 1)
# tl_tag = _gather_feat(tl_tag, inds)
# bl_tag = bl_tag.contiguous().view(batch, -1, 1)
# bl_tag = _gather_feat(bl_tag, inds)
# br_tag = br_tag.contiguous().view(batch, -1, 1)
# br_tag = _gather_feat(br_tag, inds)
# avg_tag = avg_tag.contiguous().view(batch, -1, 1)
# avg_tag = _gather_feat(avg_tag, inds)
#
# clses = bl_clses.contiguous().view(batch, -1, 1)
# clses = _gather_feat(clses, inds).float()
#
# tl_scores = tl_scores.contiguous().view(batch, -1, 1)
# tl_scores = _gather_feat(tl_scores, inds).float()
# bl_scores = bl_scores.contiguous().view(batch, -1, 1)
# bl_scores = _gather_feat(bl_scores, inds).float()
# br_scores = br_scores.contiguous().view(batch, -1, 1)
# br_scores = _gather_feat(br_scores, inds).float()
#
# ct_xs = ct_xs[:, 0, :]
# ct_ys = ct_ys[:, 0, :]
#
# centers = torch.cat(
# [ct_xs.unsqueeze(2), ct_ys.unsqueeze(2), ct_clses.float().unsqueeze(2), ct_scores.unsqueeze(2)], dim=2)
# detections = torch.cat([bboxes, scores, tl_scores, bl_scores, br_scores, clses, tl_tag, bl_tag, br_tag, avg_tag], dim=2)
return detections, centers
def post_process(self, detections, centers, num_classes, bbox_size_threshold, ori_threshold):
detections = detections.detach().cpu().numpy()
centers = centers.detach().cpu().numpy()
detections = detections.reshape(1, -1, detections.shape[2])
centers = centers.reshape(1, -1, centers.shape[2])
ret = []
for i in range(detections.shape[0]):
top_preds = {}
detections[i, :, 0:2] *= 4.
detections[i, :, 2:4] *= 4.
detections[i, :, 4:6] *= 4.
centers[i, :, 0:2] *= 4.
# Dump bbox whose central region has no center point
detections = np.concatenate(detections, axis=1)
centers = np.concatenate(centers, axis=1)
# filter by orientation distance between quantized and continuous predicted angle
classes = detections[..., -1]
quant_ori = (5.0 * classes - 85.0) / 180 * np.pi
bl_x = detections[..., 2]
bl_y = detections[..., 3]
br_x = detections[..., 4]
br_y = detections[..., 5]
cont_ori = np.arctan(-(br_y - bl_y) / (br_x - bl_x))
dist_ori = np.fabs(quant_ori - cont_ori)
ori_ind = dist_ori < ori_threshold
valid_detections = detections[ori_ind]
valid_ind = valid_detections[:, 6] > -1
valid_detections = valid_detections[valid_ind]
# valid_ind = detections[:, 6] > -1
# valid_detections = detections[valid_ind]
box_width = np.sqrt(np.power(valid_detections[:, 2] - valid_detections[:, 4], 2) + \
np.power(valid_detections[:, 3] - valid_detections[:, 5], 2))
box_height = np.sqrt(np.power(valid_detections[:, 2] - valid_detections[:, 0], 2) + \
np.power(valid_detections[:, 3] - valid_detections[:, 1], 2))
s_ind = (box_width * box_height <= bbox_size_threshold)
l_ind = (box_width * box_height > bbox_size_threshold)
s_detections = valid_detections[s_ind]
l_detections = valid_detections[l_ind]
# pro-process for small bounding box
s_tl_x = (2 * s_detections[:, 0] + s_detections[:, 4]) / 3
s_br_x = (s_detections[:, 0] + 2 * s_detections[:, 4]) / 3
s_tl_y = (2 * s_detections[:, 1] + s_detections[:, 5]) / 3
s_br_y = (s_detections[:, 1] + 2 * s_detections[:, 5]) / 3
s_temp_score = copy.copy(s_detections[:, 6])
s_detections[:, 6] = -1
center_x = centers[:, 0][:, np.newaxis]
center_y = centers[:, 1][:, np.newaxis]
s_tl_x = s_tl_x[np.newaxis, :]
s_br_x = s_br_x[np.newaxis, :]
s_tl_y = s_tl_y[np.newaxis, :]
s_br_y = s_br_y[np.newaxis, :]
ind_x1 = (center_x > s_tl_x) & (center_x < s_br_x)
ind_x2 = (center_x < s_tl_x) & (center_x > s_br_x)
ind_y1 = (center_y > s_tl_y) & (center_y < s_br_y)
ind_y2 = (center_y < s_tl_y) & (center_y > s_br_y)
ind_cls = (centers[:, 2][:, np.newaxis] - s_detections[:, -1][np.newaxis, :]) == 0
ind_s_new_score = np.max((((ind_x1 + 0) & (ind_y1 + 0) & (ind_cls + 0)) |
((ind_x1 + 0) & (ind_y2 + 0) & (ind_cls + 0)) |
((ind_x2 + 0) & (ind_y2 + 0) & (ind_cls + 0))), axis=0) == 1
index_s_new_score = np.argmax((((ind_x1 + 0) & (ind_y1 + 0) & (ind_cls + 0)) |
((ind_x1 + 0) & (ind_y2 + 0) & (ind_cls + 0)) |
((ind_x2 + 0) & (ind_y2 + 0) & (ind_cls + 0)))[:, ind_s_new_score], axis=0)
s_corner_score = s_temp_score[ind_s_new_score]
s_center_score = centers[index_s_new_score, 3]
s_detections[:, 6][ind_s_new_score] = (s_corner_score * 3 + s_center_score) / 4
# pro-process for large bounding box
l_tl_x = (2 * l_detections[:, 0] + l_detections[:, 4]) / 3
l_br_x = (l_detections[:, 0] + 2 * l_detections[:, 4]) / 3
l_tl_y = (2 * l_detections[:, 1] + l_detections[:, 5]) / 3
l_br_y = (l_detections[:, 1] + 2 * l_detections[:, 5]) / 3
l_temp_score = copy.copy(l_detections[:, 6])
l_detections[:, 6] = -1
center_x = centers[:, 0][:, np.newaxis]
center_y = centers[:, 1][:, np.newaxis]
l_tl_x = l_tl_x[np.newaxis, :]
l_br_x = l_br_x[np.newaxis, :]
l_tl_y = l_tl_y[np.newaxis, :]
l_br_y = l_br_y[np.newaxis, :]
ind_x1 = (center_x > l_tl_x) & (center_x < l_br_x)
ind_x2 = (center_x < l_tl_x) & (center_x > l_br_x)
ind_y1 = (center_y > l_tl_y) & (center_y < l_br_y)
ind_y2 = (center_y < l_tl_y) & (center_y > l_br_y)
ind_cls = (centers[:, 2][:, np.newaxis] - l_detections[:, -1][np.newaxis, :]) == 0
ind_l_new_score = np.max((((ind_x1 + 0) & (ind_y1 + 0) & (ind_cls + 0)) |
((ind_x1 + 0) & (ind_y2 + 0) & (ind_cls + 0)) |
((ind_x2 + 0) & (ind_y2 + 0) & (ind_cls + 0))), axis=0) == 1
index_l_new_score = np.argmax((((ind_x1 + 0) & (ind_y1 + 0) & (ind_cls + 0)) |
((ind_x1 + 0) & (ind_y2 + 0) & (ind_cls + 0)) |
((ind_x2 + 0) & (ind_y2 + 0) & (ind_cls + 0)))[:, ind_l_new_score], axis=0)
l_corner_score = l_temp_score[ind_l_new_score]
l_center_score = centers[index_l_new_score, 3]
l_detections[:, 6][ind_l_new_score] = (l_corner_score * 3 + l_center_score) / 4
detections = np.concatenate([l_detections, s_detections], axis=0)
detections = detections[np.argsort(-detections[:, 6])]
classes = detections[..., -1]
# reject detections with negative scores
keep_inds = (detections[:, 6] > -1)
detections = detections[keep_inds]
classes = classes[keep_inds]
detections = np.expand_dims(detections, axis=0)
for j in range(num_classes):
inds = (classes == j)
top_preds[j + 1] = detections[i, inds, :].astype(np.float32).tolist()
ret.append(top_preds)
for j in range(1, num_classes + 1):
ret[0][j] = np.array(ret[0][j], dtype=np.float32).reshape(-1, 11)
return ret[0]
def merge_outputs(self, detections):
results = {}
for j in range(1, self.num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0).astype(np.float32)
if len(self.scales) > 1 or self.opt.nms:
soft_nms(results[j], Nt=0.5, method=2)
scores = np.hstack(
[results[j][:, 6] for j in range(1, self.num_classes + 1)])
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.num_classes + 1):
keep_inds = (results[j][:, 6] >= thresh)
results[j] = results[j][keep_inds]
return results
|
47788
|
from rest_framework import serializers
from .models import NoticeProfile
class NoticeSerializer(serializers.ModelSerializer):
class Meta:
model = NoticeProfile
fields = "__all__"
|
47797
|
from __future__ import annotations
import asyncio
import typing
import types
import pandas as pd
import tooltime
from ctc import evm
from ctc import spec
async def async_get_lending_flows(
wallet: spec.Address,
pool_token: spec.ERC20Reference,
protocol: typing.Literal['aave', 'compound', 'rari'],
wallet_deposits: spec.DataFrame | None = None,
deposits: spec.DataFrame | None = None,
wallet_withdrawals: spec.DataFrame | None = None,
withdrawals: spec.DataFrame | None = None,
include_latest: bool = True,
provider: spec.ProviderSpec = None,
replace_symbols: bool = True,
normalize: bool = True,
include_rewards: bool = True,
) -> spec.DataFrame:
if protocol == 'aave':
from ctc.protocols import aave_v2_utils
protocol_module: types.ModuleType = aave_v2_utils
elif protocol == 'compound':
from ctc.protocols import compound_utils
protocol_module = compound_utils
elif protocol == 'rari':
from ctc.protocols import rari_utils
protocol_module = rari_utils
else:
raise Exception('unknown protocol: ' + str(protocol))
df = await _async_create_raw_wallet_flows_df(
wallet=wallet,
wallet_deposits=wallet_deposits,
deposits=deposits,
wallet_withdrawals=wallet_withdrawals,
withdrawals=withdrawals,
include_latest=include_latest,
provider=provider,
)
underlying = await protocol_module.async_get_underlying_asset(
pool_token=pool_token,
provider=provider,
)
# add time data
blocks = df.index.values
blocks_before = blocks - 1
# queue tasks
timestamps_coroutine = evm.async_get_block_timestamps(
blocks=blocks,
provider=provider,
)
timestamps_task = asyncio.create_task(timestamps_coroutine)
pool_token_balances_before_coroutine = (
evm.async_get_erc20_balance_of_by_block(
token=pool_token,
address=wallet,
blocks=blocks_before,
provider=provider,
)
)
pool_token_balances_before_task = asyncio.create_task(
pool_token_balances_before_coroutine
)
pool_token_balances_after_coroutine = (
evm.async_get_erc20_balance_of_by_block(
token=pool_token,
address=wallet,
blocks=blocks,
provider=provider,
)
)
pool_token_balances_after_task = asyncio.create_task(
pool_token_balances_after_coroutine
)
asset_prices_coroutine = protocol_module.async_get_asset_price_by_block(
asset=underlying,
blocks=blocks,
provider=provider,
)
asset_prices_task = asyncio.create_task(asset_prices_coroutine)
# queue optional tasks
if include_rewards:
reward_coroutine = protocol_module.async_compute_wallet_rewards(
wallet=wallet,
blocks=blocks,
provider=provider,
replace_symbol=replace_symbols,
)
reward_task = asyncio.create_task(reward_coroutine)
if normalize:
decimals_coroutine = evm.async_get_erc20_decimals(
underlying,
provider=provider,
)
decimals_task = asyncio.create_task(decimals_coroutine)
if replace_symbols:
underlying_symbol_coroutine = evm.async_get_erc20_symbol(
underlying,
provider=provider,
)
underlying_symbol_task = asyncio.create_task(
underlying_symbol_coroutine
)
pool_token_coroutine = evm.async_get_erc20_symbol(
pool_token,
provider=provider,
)
pool_token_symbol_task = asyncio.create_task(pool_token_coroutine)
# normalize deposits and withdrawals
if normalize:
decimals = await decimals_task
df['asset_deposit'] /= 10 ** decimals
df['asset_withdrawal'] /= 10 ** decimals
# compute time columns
timestamps = await timestamps_task
df.insert(loc=0, column='timestamp', value=timestamps) # type: ignore
df.insert(
loc=1,
column='time',
value=df['timestamp'].map(tooltime.timestamp_to_iso),
)
# add pool token balances
df['pool_token_balance_before'] = await pool_token_balances_before_task
df['pool_token_balance_after'] = await pool_token_balances_after_task
# add underlying balances
df['asset_balance_before'] = df['pool_token_balance_before']
df['asset_balance_after'] = df['pool_token_balance_after']
# add asset price
df['asset_price'] = await asset_prices_task
df['asset_balance_usd'] = df['asset_balance_after'] * df['asset_price']
# add rewards
rewards = await reward_task
for key, value in rewards.items():
df[key] = value
# replace symbols
if replace_symbols:
rename_columns = {}
underlying_symbol = await underlying_symbol_task
pool_token_symbol = await pool_token_symbol_task
for column in df.columns:
if 'asset' in column:
rename_columns[column] = column.replace(
'asset', underlying_symbol
)
if 'pool_token' in column:
rename_columns[column] = column.replace(
'pool_token', pool_token_symbol
)
df = df.rename(columns=rename_columns)
return df
async def _async_create_raw_wallet_flows_df(
wallet: spec.Address,
wallet_deposits: spec.DataFrame | None = None,
deposits: spec.DataFrame | None = None,
wallet_withdrawals: spec.DataFrame | None = None,
withdrawals: spec.DataFrame | None = None,
include_latest: bool = True,
provider: spec.ProviderSpec = None,
) -> spec.DataFrame:
from ctc.protocols import aave_v2_utils
no_deposits = wallet_deposits is None and deposits is None
no_withdrawals = wallet_withdrawals is None and withdrawals is None
if no_deposits and not no_withdrawals:
deposits = await aave_v2_utils.async_get_deposits()
elif not no_deposits and no_withdrawals:
withdrawals = await aave_v2_utils.async_get_withdrawals()
elif no_deposits and no_withdrawals:
deposits, withdrawals = await asyncio.gather(
aave_v2_utils.async_get_deposits(provider=provider),
aave_v2_utils.async_get_withdrawals(provider=provider),
)
wallet = wallet.lower()
if wallet_deposits is None:
if deposits is None:
raise Exception('could not determine deposits')
wallet_deposits = deposits[deposits['arg__user'] == wallet]
if isinstance(wallet_deposits.index, pd.MultiIndex):
wallet_deposits = wallet_deposits.groupby(level='block_number').sum()
if isinstance(wallet_deposits, pd.DataFrame):
wallet_deposits_series = wallet_deposits['arg__amount']
if wallet_withdrawals is None:
if withdrawals is None:
raise Exception('could not determine withdrawals')
wallet_withdrawals = withdrawals[withdrawals['arg__user'] == wallet]
if isinstance(wallet_withdrawals.index, pd.MultiIndex):
wallet_withdrawals = wallet_withdrawals.groupby(
level='block_number'
).sum()
if isinstance(wallet_withdrawals, pd.DataFrame):
wallet_withdrawals_series = wallet_withdrawals['arg__amount']
raw_data = {
'asset_deposit': wallet_deposits_series,
'asset_withdrawal': wallet_withdrawals_series,
}
raw_df = pd.DataFrame(raw_data)
raw_df = raw_df.fillna(0)
if include_latest:
block = await evm.async_get_latest_block_number(provider=provider)
raw_df.loc[block] = [0, 0]
return raw_df
|
47801
|
from os.path import abspath, dirname, join
WORLDGEN_ROOT_PATH = abspath(join(dirname(__file__), '..', '..'))
def worldgen_path(*args):
"""
Returns an absolute path from a path relative to the mujoco_worldgen repository
root directory.
"""
return join(WORLDGEN_ROOT_PATH, *args)
|
47822
|
import argparse
import json
from tqdm import tqdm
from common.dataset.reader import JSONLineReader
from common.util.log_helper import LogHelper
def _sent_to_str(sent):
return sent[-2] + "$$$" + str(sent[-1])
def _replace_sent_with_str(sent, string):
segments = string.split(r"$$$")
if len(segments) != 2:
raise Exception("Illegal string: " + string)
sent[-2] = segments[0]
sent[-1] = int(segments[1])
return sent
def _build_new_sent_with_str(string, num_of_segments=2):
if num_of_segments == 2:
sent = ["", -1]
elif num_of_segments == 4:
sent = [-1, -1, "", -1]
else:
raise Exception("Illegal num_of_segments: " + str(num_of_segments))
return _replace_sent_with_str(sent, string)
def _sents_from_evidences(evidences):
sents = set()
for evidence in evidences:
for s in evidence:
sent = _sent_to_str(s)
sents.add(sent)
return sents
def _fill_pred_sents_with_gold(pred_sents, gold_sents, max_sent):
selected_sents = pred_sents[:max_sent]
neg_indices = []
for i, selected in enumerate(selected_sents):
key_selected = _sent_to_str(selected)
if key_selected in gold_sents:
gold_sents.remove(key_selected)
else:
neg_indices.append(i)
if len(gold_sents) == 0:
return selected_sents
if len(selected_sents) <= max_sent:
for _ in range(max_sent - len(selected_sents)):
selected_sents.append(_build_new_sent_with_str(gold_sents.pop()))
if len(gold_sents) == 0:
return selected_sents
if len(neg_indices) > 0:
neg_indices = reversed(neg_indices)
for i in neg_indices:
sent = selected_sents[i]
selected_sents[i] = _replace_sent_with_str(sent, gold_sents.pop())
if len(gold_sents) == 0:
return selected_sents
if len(gold_sents) > 0:
logger.warn(str(len(gold_sents)) +
" gold sentences cannot be filled into prediction")
return selected_sents
if __name__ == '__main__':
LogHelper.setup()
logger = LogHelper.get_logger('fill_gold_sentences')
parser = argparse.ArgumentParser()
parser.add_argument(
'--input', help='/path/to/input/file', required=True)
parser.add_argument(
'--output', help='/path/to/output/file', required=True)
parser.add_argument(
'--max-sent', type=int, help='Maximal number of sentences per claim', default=10)
args = parser.parse_args()
jlr = JSONLineReader()
data = jlr.read(args.input)
with open(args.output, "w+") as output_file:
for data in tqdm(data):
if data['verifiable'] != 'NOT VERIFIABLE':
pred_sents = data['predicted_sentences']
gold_evidences = data['evidence']
gold_sents = _sents_from_evidences(gold_evidences)
filled_pred_sents = _fill_pred_sents_with_gold(
pred_sents, gold_sents, args.max_sent)
data['predicted_sentences'] = filled_pred_sents
output_file.write(json.dumps(data) + "\n")
|
47846
|
def test_evens():
yield check_even_cls
class Test(object):
def test_evens(self):
yield check_even_cls
class Check(object):
def __call__(self):
pass
check_even_cls = Check()
|
47851
|
import numpy as np
import os
import tensorflow as tf
EPS = 1e-8
def placeholder(dim=None):
return tf.placeholder(dtype=tf.float32, shape=(None,dim) if dim else (None,))
def placeholders(*args):
return [placeholder(dim) for dim in args]
def mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None):
init_fn = tf.keras.initializers.Orthogonal(1.0)
for h in hidden_sizes[:-1]:
x = tf.layers.dense(x, units=h, activation=activation, kernel_initializer=init_fn)
return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation, kernel_initializer=init_fn)
def get_vars(scope):
return [x for x in tf.global_variables() if scope in x.name]
def count_vars(scope):
v = get_vars(scope)
return sum([np.prod(var.shape.as_list()) for var in v])
def gaussian_likelihood(x, mu, log_std):
pre_sum = -0.5 * (((x-mu)/(tf.exp(log_std)+EPS))**2 + 2*log_std + np.log(2*np.pi))
return tf.reduce_sum(pre_sum, axis=1)
def clip_but_pass_gradient(x, l=-1., u=1.):
clip_up = tf.cast(x > u, tf.float32)
clip_low = tf.cast(x < l, tf.float32)
return x + tf.stop_gradient((u - x)*clip_up + (l - x)*clip_low)
"""
Policies
"""
def gumbel_policy(x, act_dim, hidden_sizes, activation):
# policy network outputs
net = mlp(x, list(hidden_sizes), activation, activation)
logits = tf.layers.dense(net, act_dim, activation='linear')
# action and log action probabilites (log_softmax covers numerical problems)
action_probs = tf.nn.softmax([logits], axis=-1)
log_action_probs = tf.nn.log_softmax([logits], axis=-1)
# policy with no noise
mu = tf.argmax(logits, axis=-1)
# add gumbel noise to action distributions
temperature = tf.convert_to_tensor(1.0) # 0 --> argmax, inf --> uniform
uniform_noise = tf.random_uniform(shape=tf.shape(logits),
minval=np.finfo(np.float32).tiny, # (0,1) range
maxval=1.)
gumbel_noise = -tf.log(-tf.log(uniform_noise))
noisy_logits = logits + gumbel_noise
pi_dist = tf.nn.softmax(noisy_logits / temperature[..., tf.newaxis])
# dont use tf.dist.relaxedCategorical for log_prob, seems to give wrong results
logp_pi = -tf.reduce_sum(-pi_dist * tf.nn.log_softmax(logits, axis=-1), axis=1)
return mu, pi_dist, logp_pi
"""
Actor-Critics
"""
def a_out_mlp_actor_critic(x, a, hidden_sizes=[400,300], activation=tf.nn.relu, policy=gumbel_policy):
act_dim = a.shape.as_list()[-1]
with tf.variable_scope('pi'):
mu, pi_dist, logp_pi = policy(x, act_dim, hidden_sizes, activation)
# vfs
with tf.variable_scope('q1'):
q1 = mlp(x, list(hidden_sizes)+[act_dim], activation, None)
q1_a = tf.reduce_sum(tf.multiply(q1, a), axis=1)
with tf.variable_scope('q2'):
q2 = mlp(x, list(hidden_sizes)+[act_dim], activation, None)
q2_a = tf.reduce_sum(tf.multiply(q2, a), axis=1)
return mu, pi_dist, logp_pi, q1_a, q2_a
def a_in_mlp_actor_critic(x, a, hidden_sizes=[400,300], activation=tf.nn.relu, policy=gumbel_policy):
act_dim = a.shape.as_list()[-1]
with tf.variable_scope('pi'):
mu, pi_dist, logp_pi = policy(x, act_dim, hidden_sizes, activation)
# vfs
with tf.variable_scope('q1'):
q1_a = tf.squeeze(mlp(tf.concat([x,a], axis=-1), list(hidden_sizes)+[1], activation, None), axis=1)
with tf.variable_scope('q2'):
q2_a = tf.squeeze(mlp(tf.concat([x,a], axis=-1), list(hidden_sizes)+[1], activation, None), axis=1)
return mu, pi_dist, logp_pi, q1_a, q2_a
|
47857
|
import mock
def test_setup(GPIO, spidev):
from unicornhatmini import UnicornHATMini
unicornhatmini = UnicornHATMini()
spidev.SpiDev.assert_has_calls((
mock.call(0, 0),
mock.call(0, 1)
), any_order=True)
GPIO.setwarnings.assert_called_once_with(False)
GPIO.setmode.assert_called_once_with(GPIO.BCM)
del unicornhatmini
def test_shutdown(GPIO, spidev, atexit):
from unicornhatmini import UnicornHATMini
unicornhatmini = UnicornHATMini()
atexit.register.assert_called_once_with(unicornhatmini._exit)
unicornhatmini._exit()
|
47858
|
import time
from ..base import order as od
from .api import BybitApi
class BybitOrderManager(od.OrderManagerBase):
def __init__(self, api, ws=None, retention=60):
super().__init__(api, ws, retention)
self.ws.subscribe('execution', self.__on_events, True)
self.ws.subscribe('position', self.__on_events, True)
self.ws.subscribe('order', self.__on_events, True)
def _generate_order_object(self, e):
info = e.info
if e.type != od.EVENT_OPEN:
self.log.warning(f'event for unknown order: {e}')
return None
api = BybitApi.ccxt_instance()
symbol = api.markets_by_id[info['symbol']]['symbol']
return od.Order(
symbol, info['order_type'].lower(), info['side'].lower(),
info['qty'], float(info['price']))
def __on_events(self, msg):
topic = msg['topic']
for e in msg['data']:
oe = od.OrderEvent()
oe.info = e
oe.ts = time.time()
if topic == 'order':
oe.id = e['order_id']
st = e['order_status']
if st == 'New':
oe.type = od.EVENT_OPEN
elif st == 'Filled':
oe.type = od.EVENT_CLOSE
elif st in ['Cancelled', 'Rejected']:
oe.type = od.EVENT_CANCEL
else: # ignore(PartiallyFilled, Created, PendingCancel)
continue
elif topic == 'execution':
oe.type = od.EVENT_EXECUTION
oe.id = e['order_id']
oe.price = float(e['price'])
size = e['exec_qty']
oe.size = -size if e['side'] == 'Sell' else size
oe.fee = float(e['exec_fee']) * size
elif topic == 'position':
break
else:
assert False
self._handle_order_event(oe)
class BybitPositionGroup(od.PositionGroupBase):
INVERSE = True
class BybitOrderGroup(od.OrderGroupBase):
PositionGroup = BybitPositionGroup
class BybitOrderGroupManager(od.OrderGroupManagerBase):
OrderGroup = BybitOrderGroup
# Future
class BybitUsdtOrderManager(BybitOrderManager):
pass
class BybitUsdtPositionGroup(BybitPositionGroup):
INVERSE = False
class BybitUsdtOrderGroup(BybitOrderGroup):
PositionGroup = BybitUsdtPositionGroup
class BybitUsdtOrderGroupManager(BybitOrderGroupManager):
OrderGroup = BybitUsdtOrderGroup
|
47859
|
import os
import wget
import paddle
from .tokenizer import Tokenizer
from .model import CLIP
from paddle.vision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
tokenizer = Tokenizer()
def get_transforms(image_resolution):
transforms = Compose([
Resize(image_resolution, interpolation='bicubic'),
CenterCrop(image_resolution),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711)),
])
return transforms
def clip_rn50():
model = CLIP(
embed_dim=1024,
image_resolution=224,
vision_layers=(3, 4, 6, 3),
vision_width=64,
vision_patch_size=None,
context_length=77,
vocab_size=49408,
transformer_width=512,
transformer_heads=8,
transformer_layers=12
)
return model, get_transforms(224)
def clip_rn101():
model = CLIP(
embed_dim=512,
image_resolution=224,
vision_layers=(3, 4, 23, 3),
vision_width=64,
vision_patch_size=None,
context_length=77,
vocab_size=49408,
transformer_width=512,
transformer_heads=8,
transformer_layers=12
)
return model, get_transforms(224)
def clip_rn50x4():
model = CLIP(
embed_dim=640,
image_resolution=288,
vision_layers=(4, 6, 10, 6),
vision_width=80,
vision_patch_size=None,
context_length=77,
vocab_size=49408,
transformer_width=640,
transformer_heads=10,
transformer_layers=12
)
return model, get_transforms(288)
def clip_vit_b_32():
model = CLIP(
embed_dim=512,
image_resolution=224,
vision_layers=12,
vision_width=768,
vision_patch_size=32,
context_length=77,
vocab_size=49408,
transformer_width=512,
transformer_heads=8,
transformer_layers=12
)
return model, get_transforms(224)
def tokenize(texts, context_length=77):
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = tokenizer.encoder["<|startoftext|>"]
eot_token = tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] +
tokenizer.encode(text) + [eot_token] for text in texts]
result = paddle.zeros((len(all_tokens), context_length), dtype='int64')
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
raise RuntimeError(
f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = paddle.to_tensor(tokens)
return result
model_dict = {
'RN50': [clip_rn50, r'https://bj.bcebos.com/v1/ai-studio-online/6ffc89246e974a809e6e4b40fdb58063a112a0153e674dae8ed5b6dfe5d46d86?responseContentDisposition=attachment%3B%20filename%3DRN50.pdparams', 'RN50.pdparams'],
'RN50x4': [clip_rn50x4, r'https://bj.bcebos.com/v1/ai-studio-online/9f874e0174da48ffbd7c17e77b1fb278632620a9995e476ba873e334caec9037?responseContentDisposition=attachment%3B%20filename%3DRN50x4.pdparams', 'RN50x4.pdparams'],
'RN101': [clip_rn101, r'https://bj.bcebos.com/v1/ai-studio-online/484592d98c584785bc8f6f9f7badbf4a9fb7a96f6102470697ed974e8eeee2a9?responseContentDisposition=attachment%3B%20filename%3DRN101.pdparams', 'RN101.pdparams'],
'ViT_B_32': [clip_vit_b_32, r'https://bj.bcebos.com/v1/ai-studio-online/eb5e4dbf1ec142caa003a27cefd510ef46a8a6c3932a4d60bfecb3f3ab746c02?responseContentDisposition=attachment%3B%20filename%3DViT-B-32.pdparams', 'ViT-B-32.pdparams']
}
def load_model(model_name, pretrained=False):
model_fn, url, file_name = model_dict[model_name]
model, transforms = model_fn()
if pretrained:
model_path = os.path.join('pretrained_models', file_name)
if not os.path.isfile(model_path):
if not os.path.exists('pretrained_models'):
os.mkdir('pretrained_models')
wget.download(url, out=model_path)
params = paddle.load(model_path)
model.set_dict(params)
model.eval()
return model, transforms
|
47902
|
import abc
import numbers
from typing import Union
import numpy as np
from river import base, optim, utils
VectorLike = Union[utils.VectorDict, np.ndarray]
__all__ = ["Initializer", "Scheduler", "Optimizer", "Loss"]
class Initializer(base.Base, abc.ABC):
"""An initializer is used to set initial weights in a model."""
@abc.abstractmethod
def __call__(self, shape=1):
"""Returns a fresh set of weights.
Parameters
----------
shape
Indicates how many weights to return. If `1`, then a single scalar value will be
returned.
"""
class Scheduler(base.Base, abc.ABC):
"""Can be used to program the learning rate schedule of an `optim.base.Optimizer`."""
@abc.abstractmethod
def get(self, t: int) -> float:
"""Returns the learning rate at a given iteration.
Parameters
----------
t
The iteration number.
"""
def __repr__(self):
return f"{self.__class__.__name__}({vars(self)})"
class Optimizer(base.Base):
"""Optimizer interface.
Every optimizer inherits from this base interface.
Parameters
----------
lr
Attributes
----------
learning_rate : float
Returns the current learning rate value.
"""
def __init__(self, lr: Union[Scheduler, float]):
if isinstance(lr, numbers.Number):
lr = optim.schedulers.Constant(lr)
self.lr = lr
self.n_iterations = 0
@property
def learning_rate(self) -> float:
return self.lr.get(self.n_iterations)
def look_ahead(self, w: dict) -> dict:
"""Updates a weight vector before a prediction is made.
Parameters:
w (dict): A dictionary of weight parameters. The weights are modified in-place.
Returns:
The updated weights.
"""
return w
def _step_with_dict(self, w: dict, g: dict) -> dict:
raise NotImplementedError
def _step_with_vector(self, w: VectorLike, g: VectorLike) -> VectorLike:
raise NotImplementedError
def step(
self, w: Union[dict, VectorLike], g: Union[dict, VectorLike]
) -> Union[dict, VectorLike]:
"""Updates a weight vector given a gradient.
Parameters
----------
w
A vector-like object containing weights. The weights are modified in-place.
g
A vector-like object of gradients.
Returns
-------
The updated weights.
"""
if isinstance(w, VectorLike.__args__) and isinstance(g, VectorLike.__args__):
try:
w = self._step_with_vector(w, g)
self.n_iterations += 1
return w
except NotImplementedError:
pass
w = self._step_with_dict(w, g)
self.n_iterations += 1
return w
def __repr__(self):
return f"{self.__class__.__name__}({vars(self)})"
class Loss(base.Base, abc.ABC):
"""Base class for all loss functions."""
def __repr__(self):
return f"{self.__class__.__name__}({vars(self)})"
@abc.abstractmethod
def __call__(self, y_true, y_pred):
"""Returns the loss.
Parameters
----------
y_true
Ground truth(s).
y_pred
Prediction(s).
Returns
-------
The loss(es).
"""
@abc.abstractmethod
def gradient(self, y_true, y_pred):
"""Return the gradient with respect to y_pred.
Parameters
----------
y_true
Ground truth(s).
y_pred
Prediction(s).
Returns
-------
The gradient(s).
"""
@abc.abstractmethod
def mean_func(self, y_pred):
"""Mean function.
This is the inverse of the link function. Typically, a loss function takes as input the raw
output of a model. In the case of classification, the raw output would be logits. The mean
function can be used to convert the raw output into a value that makes sense to the user,
such as a probability.
Parameters
----------
y_pred
Raw prediction(s).
Returns
-------
The adjusted prediction(s).
References
----------
[^1]: [Wikipedia section on link and mean function](https://www.wikiwand.com/en/Generalized_linear_model#/Link_function)
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.