max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
Introducing_Trinket_M0/Trinket_AnalogIn.py | joewalk102/Adafruit_Learning_System_Guides | 665 | 11078855 | # Trinket IO demo - analog inputs
import time
import board
from analogio import AnalogIn
analog0in = AnalogIn(board.D0)
analog1in = AnalogIn(board.D1)
analog2in = AnalogIn(board.D2)
analog3in = AnalogIn(board.D3)
analog4in = AnalogIn(board.D4)
def getVoltage(pin):
return (pin.value * 3.3) / 65536
while True:
print("D0: %0.2f \t D1: %0.2f \t D2: %0.2f \t D3: %0.2f \t D4: %0.2f" %
(getVoltage(analog0in),
getVoltage(analog1in),
getVoltage(analog2in),
getVoltage(analog3in),
getVoltage(analog4in)))
time.sleep(0.1)
|
automl/cvt_hparam_file.py | HatsuneMiku4/PocketFlow | 2,724 | 11078871 | # Tencent is pleased to support the open source community by making PocketFlow available.
#
# Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convert AutoML-generated hyper-parameter file to PocketFlow-compatible format."""
import sys
# file paths
assert len(sys.argv) == 2, '[HELP] python cvt_hparam_file.py <file_path>'
file_path = sys.argv[1]
# read hyper-parameters' values from file
with open(file_path, 'r') as i_file:
# obtain raw hyper-parameters' values
for i_line in i_file:
sub_strs = i_line.split()
name, val = sub_strs[0], float(sub_strs[2])
if name == 'ws_prune_ratio_exp':
ws_prune_ratio_exp = val
elif name == 'ws_iter_ratio_beg':
ws_iter_ratio_beg = val
elif name == 'ws_iter_ratio_end':
ws_iter_ratio_end = val
elif name == 'ws_update_mask_step':
ws_update_mask_step = val
# make sure <iter_ratio_beg> is smaller than <iter_ratio_end>
ws_iter_ratio_end = ws_iter_ratio_beg + ws_iter_ratio_end * (1.0 - ws_iter_ratio_beg)
# write hyper-parameters' values to file
output_str = ''
output_str += ' --ws_prune_ratio_exp %.4f' % ws_prune_ratio_exp
output_str += ' --ws_iter_ratio_beg %.4f' % ws_iter_ratio_beg
output_str += ' --ws_iter_ratio_end %.4f' % ws_iter_ratio_end
output_str += ' --ws_update_mask_step %d' % int(ws_update_mask_step)
print(output_str)
|
deep-rl/lib/python2.7/site-packages/OpenGL/raw/GLES2/NV/non_square_matrices.py | ShujaKhalid/deep-rl | 210 | 11078884 | <reponame>ShujaKhalid/deep-rl
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_NV_non_square_matrices'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_NV_non_square_matrices',error_checker=_errors._error_checker)
GL_FLOAT_MAT2x3_NV=_C('GL_FLOAT_MAT2x3_NV',0x8B65)
GL_FLOAT_MAT2x4_NV=_C('GL_FLOAT_MAT2x4_NV',0x8B66)
GL_FLOAT_MAT3x2_NV=_C('GL_FLOAT_MAT3x2_NV',0x8B67)
GL_FLOAT_MAT3x4_NV=_C('GL_FLOAT_MAT3x4_NV',0x8B68)
GL_FLOAT_MAT4x2_NV=_C('GL_FLOAT_MAT4x2_NV',0x8B69)
GL_FLOAT_MAT4x3_NV=_C('GL_FLOAT_MAT4x3_NV',0x8B6A)
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLfloatArray)
def glUniformMatrix2x3fvNV(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLfloatArray)
def glUniformMatrix2x4fvNV(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLfloatArray)
def glUniformMatrix3x2fvNV(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLfloatArray)
def glUniformMatrix3x4fvNV(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLfloatArray)
def glUniformMatrix4x2fvNV(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLfloatArray)
def glUniformMatrix4x3fvNV(location,count,transpose,value):pass
|
Chapter10/unittest_demo/test_mycalc.py | JTamarit/Tkinter_libro | 173 | 11078915 | <reponame>JTamarit/Tkinter_libro<filename>Chapter10/unittest_demo/test_mycalc.py<gh_stars>100-1000
import mycalc
import unittest
from unittest.mock import Mock, patch
class TestMyCalc(unittest.TestCase):
def setUp(self):
self.mycalc1_0 = mycalc.MyCalc(1, 0)
self.mycalc36_12 = mycalc.MyCalc(36, 12)
def test_add(self):
self.assertEqual(self.mycalc1_0.add(), 1)
self.assertEqual(self.mycalc36_12.add(), 48)
def test_mod_divide(self):
self.assertEqual(self.mycalc36_12.mod_divide(), (3, 0))
self.assertRaises(ValueError, self.mycalc1_0.mod_divide)
with self.assertRaises(ValueError):
self.mycalc1_0.mod_divide()
def test_rand_between(self):
# not a good way to do it:
rv = self.mycalc1_0.rand_between()
self.assertLessEqual(rv, 1)
self.assertGreaterEqual(rv, 0)
# better, but clumsy
fakerandom = Mock(return_value=.5)
orig_random = mycalc.random.random
mycalc.random.random = fakerandom
rv = self.mycalc1_0.rand_between()
self.assertEqual(rv, 0.5)
mycalc.random.random = orig_random
# clean and neat
with patch('mycalc.random.random') as fakerandom:
fakerandom.return_value = 0.5
rv = self.mycalc1_0.rand_between()
self.assertEqual(rv, 0.5)
@patch('mycalc.random.random')
def test_rand_between2(self, fakerandom):
fakerandom.return_value = 0.5
rv = self.mycalc1_0.rand_between()
self.assertEqual(rv, 0.5)
if __name__ == '__main__':
unittest.main()
|
tests/test_bigoven.py | mathiazom/recipe-scrapers | 811 | 11078962 | from recipe_scrapers.bigoven import BigOven
from tests import ScraperTest
class TestBigOven(ScraperTest):
scraper_class = BigOven
def test_host(self):
self.assertEqual("bigoven.com", self.harvester_class.host())
def test_canonical_url(self):
self.assertEqual(
"https://www.bigoven.com/recipe/no-knead-herb-focaccia/2719857",
self.harvester_class.canonical_url(),
)
def test_title(self):
self.assertEqual(self.harvester_class.title(), "No-Knead Herb Focaccia")
def test_total_time(self):
self.assertEqual(720, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("24 serving(s)", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://photos.bigoven.com/recipe/hero/no-knead-herb-focaccia-79674b.jpg",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertCountEqual(
[
"1 packet active dry yeast ; (or 2 ¼ teaspoons)",
"2 cups warm water ; divided",
"2 teaspoons granulated sugar",
"5 ½ cups bread flour",
"¼ cup extra-virgin olive oil ; (plus more to grease pans and dough)",
"1 tablespoon sea salt (or kosher salt)",
"flaky sea salt ; (like Maldon)",
"¼ cup extra-virgin olive oil",
"4 sprigs fresh rosemary leaves",
"3 sprigs fresh thyme leaves",
"2 sprigs fresh oregano ; (or various herbs of choice)",
"¼ teaspoon crushed red pepper",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
return self.assertEqual(
"\n".join(
[
"Herb Topping:",
"In a small bowl, combine olive oil, herbs, and crushed red pepper. Set aside until ready.",
"For the dough:",
"1. In a small saucepan, warm 1 cup water to a lukewarm temperature of 105°F to 110°F on an instant-read thermometer. Pour water into the bowl of a stand mixer and gently whisk in yeast and sugar to dissolve. Allow the yeast to bloom briefly, about 5-10 minutes.",
"2. Add remaining cup water, flour, and sea salt. With the dough hook attachment on lowest speed, mix until a loose, craggy dough forms and then increase speed to medium-high and mix until a smooth dough begins to wrap around the hook, about 5 to 7 minutes. Turn off the mixer, cover bowl with a damp towel, and allow dough to rest for 10 to 15 minutes. Resume mixing on medium-high for 10 to 15 minutes until dough becomes very elastic and begins tugging away from the sides of the bowl. Dough will be quite sticky, but resist the urge to add additional flour -- dough.",
"3. For the first rise, drizzle ¼ cup olive oil into the mixing bowl and swirl the sides of the bowl to coat as well as the surface of the dough. Cover and allow to rise in a warm place until doubled in size, about 90 to 120 minutes.",
"4. Use this time to prepare your herb oil and prep your decorative toppings, if using.",
"5. For the second rise, drizzle another ¼ cup olive oil on a half sheet pan (or two quarter sheet pans). Then, with well-oiled hands, gingerly raise the dough from the mixing bowl and allow its weight to stretch the dough down upon itself. Repeat this stretching technique 4 to 6 times to help encourage extra rise. Transfer the dough to the sheet pan and stretch in all directions to coax it into rectangular submission. It will likely not comply straight away. Cover with oiled plastic wrap and, after a brief rest (about 10 to 15 minutes), stretch the dough a second time.",
"6. If you plan to delay baking, now is the time to cover the sheet pan with oiled plastic wrap and place in the refrigerator for up to 24 hours. Bring to room temperature about an hour before baking so that the dough has doubled in height.",
"7. While the dough is doubling, preheat the oven to 450°F. Arrange the oven racks to both their highest and lowest positions.",
"8. Once risen and ready to bake, uncover the dough and, with well-oiled hands, dimple the dough by plunging fingers spread wide downward into the bottom of the pan. Bubbles are good, but especially large ones can be gently deflated.",
"9. Drizzle the focaccia as desired. We recommend a bare minimum of ¼ extra-virgin olive oil and a generous flurry of flaky sea salt, but encourage you to gild the lily here with herbs and garlic aplenty.",
"10. Place focaccia on the lowest rack and bake until the edges begin to pull away from the sides and corners of the pan, about 15 to 20 minutes. Transfer to the top rack and continue baking until the top is golden brown and bubbles are very well browned, about 5 minutes. Cool in the pan briefly then transfer to a cooling rack until completely cooled.",
"Possible toppings to decorate:",
"red onions",
"shallots",
"chives",
"fresh sage",
"fresh thyme",
"fresh oregano",
"fresh dill",
"fresh parsley",
"fresh basil",
"mix of mini sweet peppers",
"bell peppers",
"banana peppers",
"jalapeno peppers",
"cherry tomatoes",
"kalamata olives, pitted",
]
),
self.harvester_class.instructions(),
)
def test_ratings(self):
self.assertEqual({"count": 1, "rating": 5.0}, self.harvester_class.ratings())
|
mindinsight/mindconverter/forward_call.py | fapbatista/mindinsight | 216 | 11079000 | <gh_stars>100-1000
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Find out forward functions of script file"""
import ast
import pasta
class ForwardCall(ast.NodeVisitor):
"""
AST visitor that processes forward calls.
Find the sub functions called by the forward function in the script file.
"""
def __init__(self, ast_tree):
self._tree = ast_tree
self._name_stack = []
self._forward_stack = []
self.calls = {} # key is function name, value is forward function ast node.
self._function_list = {} # key is function name, value is function ast node.
self.process()
def process(self):
"""visit ast tree to find the forward functions."""
self.visit(self._tree)
# first visit to find out all functions, so restores all variables except _function_list
self._name_stack.clear()
self._forward_stack.clear()
self.calls.clear()
self.visit(self._tree)
def get_current_namespace(self):
"""Get the namespace when visit the AST node"""
namespace = '.'.join(self._name_stack)
return namespace
@classmethod
def get_call_name(cls, node):
"""Get functional call name."""
if not isinstance(node, ast.Call):
return None
return pasta.dump(node.func)
def visit_ClassDef(self, node):
"""Callback function when visit AST tree"""
self._name_stack.append(node.name)
self.generic_visit(node)
self._name_stack.pop()
def visit_FunctionDef(self, node):
"""Callback function when visit AST tree"""
namespace = self.get_current_namespace()
if namespace:
func_name = f'{namespace}.{node.name}'
else:
func_name = node.name
func_name = f'{self.get_current_namespace()}.{node.name}'
is_in_chain = func_name in self.calls or node.name == 'forward'
if is_in_chain:
self._forward_stack.append(func_name)
if node.name == 'forward':
self.calls.update({func_name: node})
self._function_list.update({func_name: node})
self.generic_visit(node)
if is_in_chain:
self._forward_stack.pop()
def visit_Call(self, node):
"""Callback function when visit AST tree"""
for arg in node.args:
self.visit(arg)
for keyword in node.keywords:
self.visit(keyword.value)
func_name = self.get_call_name(node)
if isinstance(node.func, ast.Name):
if func_name not in ['super', 'str', 'repr']:
if self._forward_stack:
self.calls.update({func_name: self._function_list.get(func_name)})
self.visit(node.func)
else:
if self._forward_stack:
if func_name.startswith('self.'):
whole_name = f'{self.get_current_namespace()}.{func_name.split(".")[-1]}'
self.calls.update({whole_name: self._function_list.get(whole_name)})
else:
self.calls.update({func_name: self._function_list.get(func_name)})
self.visit(node.func)
|
peering/migrations/0060_auto_20200718_0023.py | schiederme/peering-manager | 173 | 11079035 | <filename>peering/migrations/0060_auto_20200718_0023.py<gh_stars>100-1000
# Generated by Django 3.0.8 on 2020-07-17 22:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("peering", "0059_router_last_deployment_id")]
operations = [
migrations.AlterField(
model_name="router",
name="napalm_password",
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AlterField(
model_name="router",
name="napalm_username",
field=models.CharField(blank=True, max_length=256, null=True),
),
]
|
carball/analysis/stats/possession/turnovers.py | unitedroguegg/carball | 119 | 11079095 | from typing import Dict
import pandas as pd
from carball.generated.api.stats.events_pb2 import Hit
from ....analysis.constants.field_constants import FieldConstants
from ....analysis.stats.stats import HitStat
from ....generated.api import game_pb2
from ....generated.api.player_pb2 import Player
from ....json_parser.game import Game
class TurnoverStat(HitStat):
field_constants = FieldConstants()
def initialize_hit_stat(self, game: Game, player_map: Dict[str, Player], data_frame: pd.DataFrame):
pass
def calculate_next_hit_stat(self, game: Game, proto_game: game_pb2.Game, saltie_hit: Hit, next_saltie_hit: Hit,
player_map: Dict[str, Player], hit_index: int):
hits = proto_game.game_stats.hits
hit_player = player_map[saltie_hit.player_id.id]
second_hit_player = player_map[next_saltie_hit.player_id.id]
# If there is a goal between 2nd hit and 3rd hit abort check
if not next_saltie_hit.HasField("next_hit_frame_number") or hit_index + 2 >= len(hits):
return
third_hit_player = player_map[hits[hit_index + 2].player_id.id]
if hit_player.is_orange != second_hit_player.is_orange and hit_player.is_orange != third_hit_player.is_orange:
# this is a turnover!
# if the hit occurred on the on the same half as my team
my_half = (saltie_hit.ball_data.pos_y > 0) == hit_player.is_orange
neutral_zone = self.field_constants.get_neutral_zone(saltie_hit.ball_data)
self.assign_turnover(hit_player.stats.possession, my_half, neutral_zone)
self.assign_turnover(proto_game.teams[hit_player.is_orange].stats.possession,
my_half, neutral_zone)
second_hit_player.stats.possession.won_turnovers += 1
proto_game.teams[second_hit_player.is_orange].stats.possession.won_turnovers += 1
def assign_turnover(self, possession_proto, is_turnover_my_half, is_neutral):
possession_proto.turnovers += 1
if is_turnover_my_half and not is_neutral:
possession_proto.turnovers_on_my_half += 1
elif not is_neutral:
possession_proto.turnovers_on_their_half += 1
|
utils/lrn_rate_utils.py | siddsax/PocketFlow | 2,724 | 11079096 | # Tencent is pleased to support the open source community by making PocketFlow available.
#
# Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for learning rates."""
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
def setup_lrn_rate_piecewise_constant(global_step, batch_size, idxs_epoch, decay_rates):
"""Setup the learning rate with piecewise constant strategy.
Args:
* global_step: training iteration counter
* batch_size: number of samples in each mini-batch
* idxs_epoch: indices of epoches to decay the learning rate
* decay_rates: list of decaying rates
Returns:
* lrn_rate: learning rate
"""
# adjust interval endpoints w.r.t. FLAGS.nb_epochs_rat
idxs_epoch = [idx_epoch * FLAGS.nb_epochs_rat for idx_epoch in idxs_epoch]
# setup learning rate with the piecewise constant strategy
lrn_rate_init = FLAGS.lrn_rate_init * batch_size / FLAGS.batch_size_norm
nb_batches_per_epoch = float(FLAGS.nb_smpls_train) / batch_size
bnds = [int(nb_batches_per_epoch * idx_epoch) for idx_epoch in idxs_epoch]
vals = [lrn_rate_init * decay_rate for decay_rate in decay_rates]
lrn_rate = tf.train.piecewise_constant(global_step, bnds, vals)
return lrn_rate
def setup_lrn_rate_exponential_decay(global_step, batch_size, epoch_step, decay_rate):
"""Setup the learning rate with exponential decaying strategy.
Args:
* global_step: training iteration counter
* batch_size: number of samples in each mini-batch
* epoch_step: epoch step-size for applying the decaying step
* decay_rate: decaying rate
Returns:
* lrn_rate: learning rate
"""
# adjust the step size & decaying rate w.r.t. FLAGS.nb_epochs_rat
epoch_step *= FLAGS.nb_epochs_rat
# setup learning rate with the exponential decay strategy
lrn_rate_init = FLAGS.lrn_rate_init * batch_size / FLAGS.batch_size_norm
batch_step = int(FLAGS.nb_smpls_train * epoch_step / batch_size)
lrn_rate = tf.train.exponential_decay(
lrn_rate_init, tf.cast(global_step, tf.int32), batch_step, decay_rate, staircase=True)
return lrn_rate
|
packages/fetchai/skills/http_echo/handlers.py | bryanchriswhite/agents-aea | 126 | 11079127 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the handler for the 'http_echo' skill."""
import json
from typing import cast
from aea.protocols.base import Message
from aea.skills.base import Handler
from packages.fetchai.protocols.default import DefaultMessage
from packages.fetchai.protocols.http.message import HttpMessage
from packages.fetchai.skills.http_echo.dialogues import (
DefaultDialogues,
HttpDialogue,
HttpDialogues,
)
class HttpHandler(Handler):
"""This implements the echo handler."""
SUPPORTED_PROTOCOL = HttpMessage.protocol_id
def setup(self) -> None:
"""Implement the setup."""
def handle(self, message: Message) -> None:
"""
Implement the reaction to an envelope.
:param message: the message
"""
http_msg = cast(HttpMessage, message)
# recover dialogue
http_dialogues = cast(HttpDialogues, self.context.http_dialogues)
http_dialogue = cast(HttpDialogue, http_dialogues.update(http_msg))
if http_dialogue is None:
self._handle_unidentified_dialogue(http_msg)
return
# handle message
if http_msg.performative == HttpMessage.Performative.REQUEST:
self._handle_request(http_msg, http_dialogue)
else:
self._handle_invalid(http_msg, http_dialogue)
def _handle_unidentified_dialogue(self, http_msg: HttpMessage) -> None:
"""
Handle an unidentified dialogue.
:param http_msg: the message
"""
self.context.logger.info(
"received invalid http message={}, unidentified dialogue.".format(http_msg)
)
default_dialogues = cast(DefaultDialogues, self.context.default_dialogues)
default_msg, _ = default_dialogues.create(
counterparty=http_msg.sender,
performative=DefaultMessage.Performative.ERROR,
error_code=DefaultMessage.ErrorCode.INVALID_DIALOGUE,
error_msg="Invalid dialogue.",
error_data={"http_message": http_msg.encode()},
)
self.context.outbox.put_message(message=default_msg)
def _handle_request(
self, http_msg: HttpMessage, http_dialogue: HttpDialogue
) -> None:
"""
Handle a Http request.
:param http_msg: the http message
:param http_dialogue: the http dialogue
"""
self.context.logger.info(
"received http request with method={}, url={} and body={!r}".format(
http_msg.method, http_msg.url, http_msg.body,
)
)
if http_msg.method == "get":
self._handle_get(http_msg, http_dialogue)
elif http_msg.method == "post":
self._handle_post(http_msg, http_dialogue)
def _handle_get(self, http_msg: HttpMessage, http_dialogue: HttpDialogue) -> None:
"""
Handle a Http request of verb GET.
:param http_msg: the http message
:param http_dialogue: the http dialogue
"""
http_response = http_dialogue.reply(
performative=HttpMessage.Performative.RESPONSE,
target_message=http_msg,
version=http_msg.version,
status_code=200,
status_text="Success",
headers=http_msg.headers,
body=json.dumps({"tom": {"type": "cat", "age": 10}}).encode("utf-8"),
)
self.context.logger.info("responding with: {}".format(http_response))
self.context.outbox.put_message(message=http_response)
def _handle_post(self, http_msg: HttpMessage, http_dialogue: HttpDialogue) -> None:
"""
Handle a Http request of verb POST.
:param http_msg: the http message
:param http_dialogue: the http dialogue
"""
http_response = http_dialogue.reply(
performative=HttpMessage.Performative.RESPONSE,
target_message=http_msg,
version=http_msg.version,
status_code=200,
status_text="Success",
headers=http_msg.headers,
body=http_msg.body,
)
self.context.logger.info("responding with: {}".format(http_response))
self.context.outbox.put_message(message=http_response)
def _handle_invalid(
self, http_msg: HttpMessage, http_dialogue: HttpDialogue
) -> None:
"""
Handle an invalid http message.
:param http_msg: the http message
:param http_dialogue: the http dialogue
"""
self.context.logger.warning(
"cannot handle http message of performative={} in dialogue={}.".format(
http_msg.performative, http_dialogue
)
)
def teardown(self) -> None:
"""Implement the handler teardown."""
|
tests/r/test_quakes.py | hajime9652/observations | 199 | 11079131 | <filename>tests/r/test_quakes.py<gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.quakes import quakes
def test_quakes():
"""Test module quakes.py by downloading
quakes.csv and testing shape of
extracted data has 1000 rows and 5 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = quakes(test_path)
try:
assert x_train.shape == (1000, 5)
except:
shutil.rmtree(test_path)
raise()
|
simulation/decai/simulation/contract/incentive/tests/test_prediction_market.py | boost-entropy-python/0xDeCA10B | 445 | 11079135 | import unittest
from collections import defaultdict
from typing import cast
from injector import Injector
from decai.simulation.contract.balances import Balances
from decai.simulation.contract.classification.perceptron import PerceptronModule
from decai.simulation.contract.data.data_handler import StoredData
from decai.simulation.contract.incentive.incentive_mechanism import IncentiveMechanism
from decai.simulation.contract.incentive.prediction_market import MarketPhase, \
PredictionMarket, PredictionMarketImModule
from decai.simulation.contract.objects import Msg, TimeMock
from decai.simulation.data.data_loader import DataLoader
from decai.simulation.data.simple_data_loader import SimpleDataModule
from decai.simulation.logging_module import LoggingModule
class TestPredictionMarket(unittest.TestCase):
def test_market_like_original_paper(self):
inj = Injector([
SimpleDataModule,
LoggingModule,
PerceptronModule,
PredictionMarketImModule(
allow_greater_deposit=False,
group_contributions=False,
reset_model_during_reward_phase=False,
),
])
balances = inj.get(Balances)
data = inj.get(DataLoader)
im = cast(PredictionMarket, inj.get(IncentiveMechanism))
im.owner = 'owner'
assert isinstance(im, PredictionMarket)
init_train_data_portion = 0.2
initializer_address = 'initializer'
total_bounty = 100_000
balances.initialize(initializer_address, total_bounty)
good_contributor_address = 'good_contributor'
initial_good_balance = 10_000
balances.initialize(good_contributor_address, initial_good_balance)
bad_contributor_address = 'bad_contributor'
initial_bad_balance = 10_000
balances.initialize(bad_contributor_address, initial_bad_balance)
(x_train, y_train), (x_test, y_test) = data.load_data()
init_idx = int(len(x_train) * init_train_data_portion)
assert init_idx > 0
x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx]
x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:]
# Split test set into pieces.
num_pieces = 10
test_dataset_hashes, test_sets = im.get_test_set_hashes(num_pieces, x_test, y_test)
# Ending criteria:
min_length_s = 100
min_num_contributions = min(len(x_remaining), 100)
# Commitment Phase
self.assertIsNone(im.state)
im.model.init_model(x_init_data, y_init_data, save_model=True)
hashes_split = 3
test_reveal_index = im.initialize_market(Msg(initializer_address, total_bounty),
test_dataset_hashes[:hashes_split],
min_length_s, min_num_contributions)
assert 0 <= test_reveal_index < len(test_dataset_hashes)
self.assertEqual(MarketPhase.INITIALIZATION, im.state)
test_reveal_index = im.add_test_set_hashes(Msg(initializer_address, 0), test_dataset_hashes[hashes_split:])
assert 0 <= test_reveal_index < len(test_dataset_hashes)
self.assertEqual(MarketPhase.INITIALIZATION, im.state)
im.reveal_init_test_set(test_sets[test_reveal_index])
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
# Participation Phase
value = 100
total_deposits = defaultdict(float)
for i in range(min_num_contributions):
data = x_remaining[i]
classification = y_remaining[i]
if i % 2 == 0:
contributor = good_contributor_address
else:
contributor = bad_contributor_address
classification = 1 - classification
cost, _ = im.handle_add_data(contributor, value, data, classification)
self.assertEqual(im.min_stake, cost, "Cost should be the minimum stake because of the options passed in.")
balances.send(contributor, im.owner, cost)
total_deposits[contributor] += cost
# Reward Phase
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
im.end_market()
self.assertEqual(MarketPhase.REVEAL_TEST_SET, im.state)
for i, test_set_portion in enumerate(test_sets):
if i != test_reveal_index:
im.verify_next_test_set(test_set_portion)
self.assertEqual(MarketPhase.REWARD_RESTART, im.state)
while im.remaining_bounty_rounds > 0:
im.process_contribution()
# Collect rewards.
self.assertEqual(MarketPhase.REWARD_COLLECT, im.state)
for contributor in [good_contributor_address, bad_contributor_address]:
# Don't need to pass the right StoredData.
# noinspection PyTypeChecker
reward = im.handle_refund(contributor, None, 0, False, None)
balances.send(im.owner, contributor, reward)
self.assertGreater(total_deposits[good_contributor_address], 0)
self.assertGreater(total_deposits[bad_contributor_address], 0)
# General checks that should be true for a market with a reasonably sensitive model.
self.assertLess(balances[im.owner], total_bounty,
f"Some of the bounty should be distributed.\n"
f"Balances: {balances.get_all()}")
self.assertLess(0, balances[im.owner])
# Sometimes the bad contributor happens to get some value but not much.
self.assertAlmostEqual(balances[bad_contributor_address], initial_bad_balance, delta=2,
msg=f"The bad contributor should lose funds.\n"
f"Balances: {balances.get_all()}")
self.assertGreater(balances[good_contributor_address], initial_good_balance)
self.assertLess(balances[bad_contributor_address], balances[good_contributor_address])
self.assertLessEqual(balances[good_contributor_address] - balances[bad_contributor_address],
total_bounty)
self.assertEqual(initial_good_balance + initial_bad_balance + total_bounty,
balances[good_contributor_address] + balances[bad_contributor_address] +
balances[im.owner],
"Should be a zero-sum.")
def test_market(self):
inj = Injector([
SimpleDataModule,
LoggingModule,
PerceptronModule,
PredictionMarketImModule(
allow_greater_deposit=True,
group_contributions=True,
reset_model_during_reward_phase=True,
),
])
balances = inj.get(Balances)
data = inj.get(DataLoader)
im = cast(PredictionMarket, inj.get(IncentiveMechanism))
im.owner = 'owner'
assert isinstance(im, PredictionMarket)
init_train_data_portion = 0.2
initializer_address = 'initializer'
total_bounty = 100_000
balances.initialize(initializer_address, total_bounty)
good_contributor_address = 'good_contributor'
initial_good_balance = 10_000
balances.initialize(good_contributor_address, initial_good_balance)
bad_contributor_address = 'bad_contributor'
initial_bad_balance = 10_000
balances.initialize(bad_contributor_address, initial_bad_balance)
(x_train, y_train), (x_test, y_test) = data.load_data()
init_idx = int(len(x_train) * init_train_data_portion)
assert init_idx > 0
x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx]
x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:]
# Split test set into pieces.
num_pieces = 10
test_dataset_hashes, test_sets = im.get_test_set_hashes(num_pieces, x_test, y_test)
# Ending criteria:
min_length_s = 100
min_num_contributions = min(len(x_remaining), 100)
# Commitment Phase
self.assertIsNone(im.state)
im.model.init_model(x_init_data, y_init_data, save_model=True)
hashes_split = 3
test_reveal_index = im.initialize_market(Msg(initializer_address, total_bounty),
test_dataset_hashes[:hashes_split],
min_length_s, min_num_contributions)
assert 0 <= test_reveal_index < len(test_dataset_hashes)
self.assertEqual(MarketPhase.INITIALIZATION, im.state)
test_reveal_index = im.add_test_set_hashes(Msg(initializer_address, 0), test_dataset_hashes[hashes_split:])
assert 0 <= test_reveal_index < len(test_dataset_hashes)
self.assertEqual(MarketPhase.INITIALIZATION, im.state)
im.reveal_init_test_set(test_sets[test_reveal_index])
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
# Participation Phase
value = 100
total_deposits = defaultdict(float)
for i in range(min_num_contributions):
data = x_remaining[i]
classification = y_remaining[i]
if i % 2 == 0:
contributor = good_contributor_address
else:
contributor = bad_contributor_address
classification = 1 - classification
cost, _ = im.handle_add_data(contributor, value, data, classification)
balances.send(contributor, im.owner, cost)
total_deposits[contributor] += cost
# Reward Phase
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
im.end_market()
self.assertEqual(MarketPhase.REVEAL_TEST_SET, im.state)
for i, test_set_portion in enumerate(test_sets):
if i != test_reveal_index:
im.verify_next_test_set(test_set_portion)
self.assertEqual(MarketPhase.REWARD_RESTART, im.state)
while im.remaining_bounty_rounds > 0:
im.process_contribution()
# Collect rewards.
self.assertEqual(MarketPhase.REWARD_COLLECT, im.state)
for contributor in [good_contributor_address, bad_contributor_address]:
# Don't need to pass the right StoredData.
# noinspection PyTypeChecker
reward = im.handle_refund(contributor, None, 0, False, None)
balances.send(im.owner, contributor, reward)
self.assertGreater(total_deposits[good_contributor_address], 0)
self.assertGreater(total_deposits[bad_contributor_address], 0)
# General checks that should be true for a market with a reasonably sensitive model.
self.assertLess(balances[im.owner], total_bounty,
f"Some of the bounty should be distributed.\n"
f"Balances: {balances.get_all()}")
self.assertLess(0, balances[im.owner])
self.assertLess(balances[bad_contributor_address], initial_bad_balance)
self.assertGreater(balances[good_contributor_address], initial_good_balance)
self.assertLess(balances[bad_contributor_address], balances[good_contributor_address])
self.assertLessEqual(balances[good_contributor_address] - balances[bad_contributor_address],
total_bounty)
self.assertEqual(initial_good_balance + initial_bad_balance + total_bounty,
balances[good_contributor_address] + balances[bad_contributor_address] +
balances[im.owner],
"Should be a zero-sum.")
self.assertEqual(initial_bad_balance - total_deposits[bad_contributor_address],
balances[bad_contributor_address],
"The bad contributor should lose all of their deposits.")
def test_report(self):
inj = Injector([
SimpleDataModule,
LoggingModule,
PerceptronModule,
PredictionMarketImModule(
allow_greater_deposit=True,
group_contributions=True,
reset_model_during_reward_phase=True,
),
])
balances = inj.get(Balances)
data = inj.get(DataLoader)
im = cast(PredictionMarket, inj.get(IncentiveMechanism))
im.owner = 'owner'
time_method = inj.get(TimeMock)
assert isinstance(im, PredictionMarket)
init_train_data_portion = 0.2
initializer_address = 'initializer'
total_bounty = 100_000
balances.initialize(initializer_address, total_bounty)
good_contributor_address = 'good_contributor'
initial_good_balance = 10_000
balances.initialize(good_contributor_address, initial_good_balance)
bad_contributor_address = 'bad_contributor'
initial_bad_balance = 10_000
balances.initialize(bad_contributor_address, initial_bad_balance)
(x_train, y_train), (x_test, y_test) = data.load_data()
init_idx = int(len(x_train) * init_train_data_portion)
assert init_idx > 0
x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx]
x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:]
# Split test set into pieces.
num_pieces = 10
test_dataset_hashes, test_sets = im.get_test_set_hashes(num_pieces, x_test, y_test)
# Ending criteria:
min_length_s = 100
min_num_contributions = min(len(x_remaining), 100)
# Commitment Phase
self.assertIsNone(im.state)
im.model.init_model(x_init_data, y_init_data, save_model=True)
test_reveal_index = im.initialize_market(Msg(initializer_address, total_bounty),
test_dataset_hashes,
min_length_s, min_num_contributions)
self.assertEqual(MarketPhase.INITIALIZATION, im.state)
assert 0 <= test_reveal_index < len(test_dataset_hashes)
im.reveal_init_test_set(test_sets[test_reveal_index])
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
# Participation Phase
value = 100
total_deposits = defaultdict(float)
stored_data = None
for i in range(min_num_contributions):
time_method.add_time(60)
data = x_remaining[i]
classification = y_remaining[i]
if i % 2 == 0:
contributor = good_contributor_address
else:
contributor = bad_contributor_address
classification = 1 - classification
cost, _ = im.handle_add_data(contributor, value, data, classification)
if stored_data is None:
stored_data = StoredData(classification, time_method(), contributor, cost, cost)
balances.send(contributor, im.owner, cost)
total_deposits[contributor] += cost
# Reward Phase
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
im.end_market()
time_method.add_time(60)
self.assertEqual(MarketPhase.REVEAL_TEST_SET, im.state)
for i, test_set_portion in enumerate(test_sets):
if i != test_reveal_index:
im.verify_next_test_set(test_set_portion)
self.assertEqual(MarketPhase.REWARD_RESTART, im.state)
while im.remaining_bounty_rounds > 0:
time_method.add_time(60)
im.process_contribution()
# Collect rewards.
self.assertEqual(MarketPhase.REWARD_COLLECT, im.state)
# Get some stored data.
# Make sure reporting doesn't work yet.
reward = im.handle_report(bad_contributor_address, stored_data, False, None)
self.assertEqual(0, reward, "There should be no reward yet.")
time_method.add_time(im.any_address_claim_wait_time_s)
reward = im.handle_report(bad_contributor_address, stored_data, False, None)
balances.send(im.owner, bad_contributor_address, reward)
# Don't need to pass the right StoredData.
# noinspection PyTypeChecker
reward = im.handle_refund(bad_contributor_address, None, 0, False, None)
balances.send(im.owner, bad_contributor_address, reward)
# General checks that should be true for a market with a reasonably sensitive model.
self.assertLess(balances[im.owner], total_bounty,
f"Some of the bounty should be distributed.\n"
f"Balances: {balances.get_all()}")
self.assertLess(0, balances[im.owner])
self.assertGreater(total_deposits[good_contributor_address], 0)
self.assertGreater(total_deposits[bad_contributor_address], 0)
# The bad contributor profited because they reported the good contributor.
self.assertGreater(balances[bad_contributor_address], initial_bad_balance)
self.assertLess(balances[good_contributor_address], initial_good_balance)
self.assertLess(balances[good_contributor_address], balances[bad_contributor_address])
self.assertLessEqual(balances[bad_contributor_address] - balances[good_contributor_address],
total_bounty)
self.assertEqual(initial_good_balance + initial_bad_balance + total_bounty,
balances[good_contributor_address] + balances[bad_contributor_address] +
balances[im.owner],
"Should be a zero-sum.")
self.assertEqual(initial_good_balance - total_deposits[good_contributor_address],
balances[good_contributor_address],
"The good contributor should lose all of their deposits.")
|
sdk/search/azure-search-documents/azure/search/documents/aio/_timer.py | rsdoherty/azure-sdk-for-python | 2,728 | 11079137 | <reponame>rsdoherty/azure-sdk-for-python
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import asyncio
class Timer:
def __init__(self, timeout, callback):
self._timeout = timeout
self._callback = callback
self._task = asyncio.ensure_future(self._job())
async def _job(self):
await asyncio.sleep(self._timeout)
await self._callback()
def cancel(self):
self._task.cancel()
|
qt4i/driver/tools/dt.py | beijixing0202/QT4i | 209 | 11079144 | # -*- coding:utf-8 -*-
#
# Tencent is pleased to support the open source community by making QTA available.
# Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
'''DeviceTools
'''
from __future__ import absolute_import, print_function
import fcntl
import json
import os
import pkg_resources
import re
import subprocess
import shutil
import time
import datetime
import base64
from six.moves.http_client import HTTPConnection
from six import PY3
from six import with_metaclass
import six
import threading
from qt4i.driver.tools import mobiledevice
from qt4i.driver.tools.mobiledevice import InstallationProxy
from qt4i.driver.tools.sched import PortManager
from qt4i.driver.util import zip_decompress
from qt4i.driver.tools.mobiledevice import SandboxClient
from testbase.util import Singleton
from testbase.util import Timeout
from qt4i.driver.util._process import Process
# 挂载根路径
MOUNT_ROOT_PATH = os.path.expanduser('~')
TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
QT4I_CACHE_ROOT = os.path.join(os.path.expanduser('~'), "qt4icache")
if not os.path.exists(QT4I_CACHE_ROOT):
os.makedirs(QT4I_CACHE_ROOT)
class Flock(object):
def __init__(self, filepath=os.path.join(QT4I_CACHE_ROOT, 'lock')):
self._filepath = filepath
self._fd = open(self._filepath, 'w')
def __enter__(self):
fcntl.flock(self._fd, fcntl.LOCK_EX)
return self
def __exit__(self, *args):
fcntl.flock(self._fd, fcntl.LOCK_UN)
try:
self._fd.close()
os.unlink(self._filepath)
except:
pass
self._fd = None
def func_retry_wrap(func):
def _wrap_func(*args, **kwargs):
'''
'''
for _ in range(3):
try:
return func(*args, **kwargs)
except:
time.sleep(10)
else:
raise
return _wrap_func
class DT(with_metaclass(Singleton, object)):
'''
DeviceTools
'''
FBSIMCTL_DEFAULT_PATH = '/cores/fbsimctl/fbsimctl'
def __init__(self):
self.xcode_version = DT.get_xcode_version()
self.udid = None
self.bundle_id = None
self.sc = None
if self.compare_xcode_version("11.0") < 0:
self._init_fbsimctl()
def _init_fbsimctl(self):
try:
fbsimctl_zip = pkg_resources.resource_filename("qt4i", "driver/tools/fbsimctl/fbsimctl.zip") #@UndefinedVariable
if not os.path.exists(fbsimctl_zip):
raise Exception('fbsimctl not found')
self.fbsimctl = DT.FBSIMCTL_DEFAULT_PATH
except:
try:
os.environ['PATH'] += ':/usr/local/bin'
result = subprocess.check_output("which fbsimctl", env=os.environ, shell=True, stderr=subprocess.STDOUT, )
if PY3:
result = result.decode()
self.fbsimctl = result.split('\n')[0]
except subprocess.CalledProcessError:
raise Exception('fbsimctl not found, use brew install')
if self.fbsimctl == DT.FBSIMCTL_DEFAULT_PATH and not os.path.exists(self.fbsimctl): # 解压fbsimctl到默认路径
fbsimctl_root_path = os.path.split(DT.FBSIMCTL_DEFAULT_PATH)[0]
zip_decompress(fbsimctl_zip, fbsimctl_root_path)
os.chmod(self.fbsimctl, 0o755)
fbsimctl_fmk_path = os.path.split(DT.FBSIMCTL_DEFAULT_PATH)[0]
for root, dirs, _files in os.walk(fbsimctl_fmk_path):
if root.endswith('.framework'):
if 'Headers' not in dirs: # 为Framework添加Headers/Modules/Resources的外链
framework_name = os.path.splitext(os.path.basename(root))[0]
for item in ['Headers', 'Modules', 'Resources', framework_name]:
item_path = os.path.join(root, item)
if os.path.exists(item_path) and not os.path.islink(item_path):
os.remove(item_path)
os.symlink('Versions/Current/%s' % item, item_path)
if root.endswith('Versions'):
if 'Current' not in dirs:
current_path = os.path.join(root, 'Current')
if os.path.exists(current_path) and not os.path.islink(current_path):
os.remove(current_path)
os.symlink('A', current_path)
@staticmethod
def get_xcode_version():
'''查询Xcode版本
:returns: str
'''
version = "7.3"
try:
xcode_info = subprocess.check_output("xcodebuild -version", shell=True, stderr=subprocess.STDOUT)
if PY3:
xcode_info = xcode_info.decode()
version = re.match(r'Xcode\s(\d+\.\d+)', xcode_info.split('\n')[0]).group(1)
except subprocess.CalledProcessError as e:
raise Exception('get_xcode_version error:%s' % e.output)
return version
@staticmethod
def compare_xcode_version(version):
'''比较当前Mac上的Xcode版本和指定version的大小
当前版本 > version return 1
当前版本 = version return 0
当前版本 < version return -1
'''
current_version = DT.get_xcode_version()
return DT.compare_version(current_version, version)
@staticmethod
def compare_version(version_a, version_b):
'''
version_a > version_b return 1
version_a = version_b return 0
version_a < version_b return -1
'''
version_reg = r'^\d*\.\d*\.?\d*$'
if not re.match(version_reg, version_a):
raise Exception('version_a invalid:%s' % version_a)
elif not re.match(version_reg, version_b):
raise Exception('version_b invalid:%s' % version_b)
a = version_a.split('.')
b = version_b.split('.')
length = min(len(a), len(b))
for i in range(length):
if int(a[i]) < int(b[i]):
return -1
if int(a[i]) > int(b[i]):
return 1
if len(a) > len(b):
return 1
elif len(a) < len(b):
return -1
else:
return 0
def get_real_devices(self):
'''查询当前Mac机上已连接的真机设备(仅含真机,不含模拟器)
:returns: list[dict] or None(意外情况)
'''
devices = self.get_devices()
real_devices = []
for dev in devices:
if not dev['simulator']:
real_devices.append(dev)
return real_devices
def _get_simulators_below_xcode_7(self):
devices = []
try:
cmd = "xcrun simctl list devices"
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
if PY3:
result = result.decode()
dev_flag = False
for line in result.split("\n"):
if line.startswith("-- "):
if line.startswith("-- iOS"):
dev_flag = True
dev_version = re.match(r'--\siOS\s(\d+\.\d)\s--', line).group(1)
else:
dev_flag = False
continue
if dev_flag:
ret = re.match(r'\s{4}(.+)\s\(([0-9A-F\-]+)\)\s\((.+)\)', line)
if ret:
device = {}
device["udid"] = ret.group(2)
device["name"] = ret.group(1)
device["ios"] = dev_version
device["state"] = ret.group(3)
device["simulator"] = True
devices.append(device)
except subprocess.CalledProcessError as e:
raise Exception('_get_simulators_below_xcode_7 error:%s' % e.output)
return devices
def _get_simulators_above_xcode_7(self):
devices = []
try:
cmd = "xcrun simctl list -j devices"
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
if PY3:
result = result.decode()
json_devices = json.loads(result, encoding='utf-8')["devices"]
for k in json_devices:
if k.startswith("iOS"):
ios_version = re.match(r'iOS\s(\d+\.\d+)', k).group(1)
for dev in json_devices[k]:
dev["ios"] = ios_version
dev["simulator"] = True
devices.append(dev)
except subprocess.CalledProcessError as e:
raise Exception('_get_simulators_above_xcode_7 error:%s' % e.output)
return devices
def get_simulators(self):
'''查询已安装的模拟器(如果没必要查询真机,仅查模拟器的性能更佳)
:returns: list[dict] or None(意外情况)
'''
if self.xcode_version < "7.0":
return self._get_simulators_below_xcode_7()
else:
return self._get_simulators_above_xcode_7()
def _list_devices(self):
'''获取设备列表进程
'''
self._list_devices_process = subprocess.Popen("instruments -s devices",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
self._dev_info, _ = self._list_devices_process.communicate()
def get_devices(self):
'''查询当前Mac机上的所有可用设备(含真机与模拟器,并产生缓存)
:returns: list[dict]
'''
if self.compare_xcode_version("11.0")>=0:
return self._get_devices_by_ins()
return self._get_device_by_fbsimctl()
def get_device_by_name(self, _name):
'''通过设备名查询当前Mac机上的可用设备(含真机与模拟器)
:param _name: 设备名(支持正则表达式),例如:iPhone 5s (8.1 Simulator)
:type _name: str
:returns: dict or None
'''
_devices = self.get_devices()
for _item in _devices:
if _name == _item["name"] or re.match(_name, _item["name"]):
return _item
def get_device_by_udid(self, _device_udid):
'''通过设备的udid查询当前Mac机上的设备(含真机与模拟器)
:param _device_udid: 设备的udid
:type _device_udid: str
:returns: dict or None
'''
_devices = self.get_devices()
for _item in _devices:
if _device_udid == _item["udid"]:
return _item
def is_simulator(self, udid):
'''判断是否是模拟器
'''
device = self.get_device_by_udid(udid)
if device:
return device['simulator']
return False
def check_device_udid_is_valid(self, _device_udid):
'''检测udid是否有效(含真机与模拟器)
:param _device_udid: 真机或模拟器的udid
:type _device_udid: str
:returns: bool
'''
device = self.get_device_by_udid(_device_udid)
return bool(device)
def get_simulator_by_name(self, _name):
'''通过模拟器名查询模拟器(如果没必要查询真机,仅查模拟器的性能极佳)
:param _name: 模拟器名(支持正则表达式),例如:iPhone 5s (8.1 Simulator)
:type _name: str
:returns: dict or None
'''
_simulators = self.get_simulators()
if _simulators:
for _item in _simulators:
if _name == _item["name"] or re.match(_name, _item["name"]):
return _item
def get_simulator_by_udid(self, _device_udid):
'''通过模拟器udid查询模拟器(如果没必要查询真机,仅查模拟器的性能极佳)
:param _device_udid: 模拟器的udid
:type _device_udid: str
:returns: dict or None
'''
_simulators = self.get_simulators()
if _simulators:
for _item in _simulators:
if _device_udid == _item["udid"]:
return _item
def get_default_simulator(self):
'''查询默认的模拟器(由于模拟器只能有一个实例,所以有默认模拟器。真机不存在默认真机的情况)
:returns: dict or None(意外情况)
'''
raise Exception("get_default_simulator unsupported")
def set_default_simulator(self, _device_udid):
'''设置默认模拟器
:param _device_udid: 模拟器的udid
:type _device_udid: str
:returns: bool
'''
raise Exception("set_default_simulator unsupported")
def app_info(self, _file_path):
'''获取APP安装包的信息(IPA或APP均可)
:param _file_path: IPA或APP安装包的路径(注意:当前Mac机必须能访问到该路径)
:type _file_path: str
:returns: dict or None
'''
raise Exception("app_info unsupported")
def _modify_wi_port(self, udid):
port = PortManager.get_port('web', udid)
if self.xcode_version.startswith('9'):
wi_plist_path = '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/Library/CoreSimulator/Profiles/Runtimes/iOS.simruntime/Contents/Resources/RuntimeRoot/System/Library/LaunchDaemons/com.apple.webinspectord.plist'
else:
wi_plist_path = '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/System/Library/LaunchDaemons/com.apple.webinspectord.plist'
cmd = ['/usr/libexec/PlistBuddy', '-c', 'Set :Sockets:com.apple.webinspectord.socket:SockServiceName %s' % port, wi_plist_path]
subprocess.call(cmd)
def start_simulator(self, udid=None):
#检查模拟器是否启动成功
def check_started():
for s in self.get_simulators():
if s["state"] == "Booted" and (s["udid"] == udid or udid is None):
return True
return False
if check_started():
return True
simulator = "Simulator"
if self.compare_xcode_version("7.0") < 0:
simulator = "iOS Simulator"
if udid:
cmd = ['xcrun', 'simctl', 'boot', udid]
if self.compare_xcode_version("9.0") < 0:
cmd = [self.fbsimctl, udid, 'boot']
else:
cmd = "open -a \"%s\"" % simulator
if self.compare_xcode_version("9.3") >=0:
ret = subprocess.call(cmd, close_fds=True)
if ret != 0:
result = False
else:
result = Timeout(20, 0.05).check(check_started, True)
else:
with Flock():
self._modify_wi_port(udid)
ret = subprocess.call(cmd, close_fds=True)
if ret != 0:
result = False
else:
result = Timeout(20, 0.05).check(check_started, True)
return result
@func_retry_wrap
def _download_http_file(self, url, target_path):
'''下载http文件
:param url: HTTP路径
:type url: string
:param target_path: 下载到目标路径
:type target_path: string
'''
url0 = url
if url[:7] == 'http://':
url = url[7:]
pos = url.find('/')
host = url[:pos]
page = url[pos:]
conn = HTTPConnection(host, port=80, timeout=60) # 60秒超时
conn.request('GET', page)
res = conn.getresponse()
if res.status != 200:
raise RuntimeError('访问:%s 错误[HTTP错误码:%s]' % (url0, res.status))
data = res.read()
conn.close()
f = open(target_path, 'wb')
f.write(data)
f.close()
def _download_package(self, src, dstdir):
'''下载安装包
:param src: 源路径
:param dstdir: 本地目录
:returns: filepath - 安装包路径
'''
http_prefix = u'http://'
https_prefix = u'https://'
if isinstance(src, six.text_type):
src = src.encode('utf-8')
if PY3:
import urllib.parse as parse
else:
import urllib as parse
src = parse.quote(src, ":?=/")
if src.startswith(http_prefix) or src.startswith(https_prefix):
'''源路径是http或https服务器路径
'''
if src.rfind('?') == -1:
filename = src[src.rfind('/') + 1:]
else:
filename = src[src.rfind('/') + 1:src.rfind('?')]
filepath = os.path.join(dstdir, filename)
self._download_http_file(src, filepath)
elif os.path.isfile(src):
'''源路径是本地路径
'''
filepath = src
else:
raise RuntimeError("file path not supported, %s" % src.encode('utf8'))
return filepath
def _extract_tgz(self, filepath, dstpath):
'''解压tgz包
:param filepath: tgz包路径
:type filepath: string
:param dstpath: 解压后路径
:type dstpath: string
'''
items = os.listdir(dstpath)
#首先删除目录中已有的app文件,防止版本冲突
for it in items:
if it.endswith('.app'):
shutil.rmtree(os.path.join(dstpath, it))
zip_decompress(filepath, dstpath)
items = os.listdir(dstpath)
for it in items:
if it.endswith('.app'):
return os.path.join(dstpath, it)
def _extract_zip(self, filepath, dstpath):
'''解压zip包
:param filepath: zip包路径
:type filepath: string
:param dstpath: 解压后路径
:type dstpath: string
'''
items = os.listdir(dstpath)
#首先删除目录中已有的app文件,防止版本冲突
for it in items:
if it.endswith('.app'):
shutil.rmtree(os.path.join(dstpath, it))
zip_decompress(filepath, dstpath)
items = os.listdir(dstpath)
for it in items:
if it.endswith('.app'):
return os.path.join(dstpath, it)
def _prepare_package(self, _file_path, pkgcachedir):
'''准备安装包
:param _file_path: 安装包路径
:type _file_path: string
:param pkgcachedir: 包缓存目录
:type pkgcachedir: string
:returns: str - 安装包本地路径
'''
if not os.path.isdir(pkgcachedir):
os.mkdir(pkgcachedir)
if not isinstance(_file_path, six.text_type):
try:
_file_path = _file_path.decode('utf8')
except:
_file_path = _file_path.decode('gbk')
filepath = self._download_package(_file_path, pkgcachedir)
if filepath.endswith('.ipa'):
return filepath
elif filepath.endswith('.tar.gz'):
return self._extract_tgz(filepath, pkgcachedir)
elif filepath.endswith('.zip'):
return self._extract_zip(filepath, pkgcachedir)
else:
raise RuntimeError('unknown type of package: %s' % _file_path.encode('utf8'))
def _install_for_simulator(self, udid, app_path):
try:
cmd = "xcrun simctl install %s %s" % (udid, app_path)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
return (True, "")
except subprocess.CalledProcessError as e:
return (False, e.output)
def install(self, _file_path, _device_udid=None):
'''通过udid指定真机安装IPA或模拟器安装APP(注意:真机所用IPA不能在模拟器上安装和使用,IPA与APP相互不兼容)
:param _file_path: IPA或APP安装包的路径(注意:当前Mac机必须能访问到该路径)
:type _file_path: str
:param _device_udid: 设备的udid(当udid为None时,IPA包则安装到第一台连接上的真机上,APP包则安装到默认模拟器上)
:type _device_udid: str
:returns: bool
'''
self.install_error = ""
from qt4i.driver.tools import logger
log = logger.get_logger("task")
if not _file_path:
return True
pkgcachedir = os.path.join(os.environ['HOME'],'pkg_cache')
if not os.path.isdir(pkgcachedir):
os.mkdir(pkgcachedir)
pkgcachedir = os.path.join(pkgcachedir, str(_device_udid))
file_path = self._prepare_package(_file_path, pkgcachedir)
if os.path.exists(file_path) == False :
self.install_error = 'file does not exist'
return False
if file_path.endswith("ipa"):
if self.is_simulator(_device_udid):
log.warn('Failed to install an ipa to a simulator.')
return False
ip = InstallationProxy(_device_udid)
ret = ip.install(file_path)
else:
if not self.is_simulator(_device_udid):
log.warn('Failed to install an app to a real device.')
return False
self.start_simulator(_device_udid)
if _device_udid is None:
_device_udid = "booted"
ret = self._install_for_simulator(_device_udid, file_path)
if ret[0]:
if os.path.exists(pkgcachedir):
shutil.rmtree(pkgcachedir)
else:
log.debug('install error: %s' % str(ret[1]))
self.install_error = ret[1]
return ret[0]
def _uninstall_for_simulator(self, udid, bundle_id):
try:
cmd = "xcrun simctl uninstall %s %s" % (udid, bundle_id)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
return (True, "")
except subprocess.CalledProcessError as e:
return (False, e.output)
def uninstall(self, _bundle_id, _device_udid=None):
'''通过udid指定设备卸载指定bundle_id的APP
:param _bundle_id: APP的bundle_id,例如:com.tencent.qq.dailybuild.test
:type _bundle_id: str
:param _device_udid: 设备的udid(当udid为None时,如果有真机连接,则卸载第一台连接上的真机内的APP,如果无真机连接,则卸载默认模拟器上的APP)
:type _device_udid: str
:returns: bool
'''
self.uninstall_error = ""
simulator_flag = False
if _device_udid:
for device in self.get_devices():
if _device_udid == device["udid"]:
simulator_flag = device["simulator"]
break
else:
devices = self.get_real_devices()
if len(devices) > 0:
_device_udid = devices[0]["udid"]
else:
devices = self.get_simulators()
if len(devices) > 0:
_device_udid = devices[0]["udid"]
simulator_flag = True
if _device_udid is None:
return False
if simulator_flag:
self.start_simulator(_device_udid)
ret = self._uninstall_for_simulator(_device_udid, _bundle_id)
else:
ip = InstallationProxy(_device_udid)
ret = ip.uninstall(_bundle_id)
self.uninstall_error = ret[1]
return ret[0]
def _shutdown_simulator(self, udid):
cmd = ["xcrun", "simctl", "shutdown", udid]
if self.compare_xcode_version("11.0") < 0:
cmd = [self.fbsimctl, udid, 'shutdown']
return True if subprocess.call(cmd, close_fds=True) == 0 else False
def reboot(self, udid):
if self.is_simulator(udid):
self._shutdown_simulator(udid)
self.start_simulator(udid)
else:
mobiledevice.reboot(udid)
def get_crash_log(self, udid, procname):
'''获取指定进程的最新的crash日志
:param udid: 设备的udid
:type udid: str
:param procname: app的进程名,可通过xcode查看
:type procname: str
:return: string or None - crash内容
'''
log = None
if self.is_simulator(udid):
root_path = os.path.join(os.path.expanduser('~'), 'Library/Logs/DiagnosticReports')
for crash_log in os.listdir(root_path):
if crash_log.startswith(procname) and crash_log.endswith('crash'):
crash_log_path = os.path.join(root_path, crash_log)
with open(crash_log_path, "rb") as fd:
log = fd.read().decode("utf-8")
if udid in log:
os.remove(crash_log_path)
return log
else:
log = None
else:
logfile = mobiledevice.get_crash_log(procname, udid)
if logfile and os.path.isfile(logfile):
with open(logfile, "rb") as fd:
log = fd.read().decode("utf-8")
return log
def _convert_timestamp(self, format_time):
parsed_time = time.strptime(format_time, "%b %d %H:%M:%S %Y")
return time.mktime(parsed_time)
def _get_simulator_app_data_path(self, udid, bundle_id):
try: #先尝试simctl命令获取sandbox路径,如果失败再尝试分析系统安装日志获取路径
cmd = "xcrun simctl get_app_container %s %s data" % (udid, bundle_id)
app_path = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, close_fds=True)
if PY3:
app_path = app_path.decode()
return app_path[:-1]
except subprocess.CalledProcessError:
pass
install_log_path = os.path.join(os.path.expanduser('~'), 'Library/Logs/CoreSimulator/%s/MobileInstallation' % udid)
apps_path = os.path.join(os.path.expanduser('~'), 'Library/Developer/CoreSimulator/Devices/%s/data/Containers/Data/Application' % udid)
app_path = None
timestamp = 0
for logfile in os.listdir(install_log_path):
if logfile.startswith('mobile_installation.log'):
with open(os.path.join(install_log_path, logfile), "rb") as fd:
for line in fd:
if 'Made container live for %s at %s' % (bundle_id, apps_path) in line :
line_timestamp = self._convert_timestamp(line[4:24])
if line_timestamp > timestamp:
app_path = line
timestamp = line_timestamp
elif 'Data container for %s is now at %s' % (bundle_id, apps_path) in line:
line_timestamp = self._convert_timestamp(line[4:24])
if line_timestamp > timestamp:
app_path = line
timestamp = line_timestamp
return app_path[app_path.find(apps_path):-1]
def push_file(self, udid, bundle_id, localpath, remotepath):
'''拷贝Mac本地文件到iOS设备sandbox的指定目录中
:param udid: 设备的udid,如果不传入udid,则表示向手机的系统目录传入文件
:type udid: str
:param bundle_id: app的bundle id
:type bundle_id: str
:param localpath: Mac上的文件路径
:type localpath: str
:param remotepath: sandbox上的目录,例如:/Library/Caches/test/
:type remotepath: str
:returns: boolean
'''
if bundle_id:
if self.is_simulator(udid):
sandbox_root = self._get_simulator_app_data_path(udid, bundle_id)
remotepath = remotepath[1:] if remotepath.startswith('/') else remotepath
remote_file = os.path.join(sandbox_root, remotepath)
shutil.copy(localpath, remote_file)
return True
else:
return mobiledevice.push_file(bundle_id, localpath, remotepath, udid)
else:
if self.is_simulator(udid):
cmd = 'xcrun simctl addmedia %s %s' % (udid, localpath)
process = subprocess.Popen(cmd, shell=True, stderr=subprocess.STDOUT)
process.communicate()
return process.poll() == 0
else:
raise Exception("This operation is not supported by the device")
def pull_file(self, udid, bundle_id, remotepath, localpath='/tmp', is_dir=False, is_delete = True):
'''拷贝手机中sandbox指定目录的文件到Mac本地
:param udid: 设备的udid
:type udid: str
:param bundle_id: app的bundle id
:type bundle_id: str
:param remotepath: sandbox上的目录或者文件,例如:/Library/Caches/test/
:type remotepath: str
:param localpath: 本地的目录
:type localpath: str
:param is_dir: remotepath是否为目录,默认为单个文件
:type is_dir: bool
:param is_delete: 是否删除
:type is_delete: bool
:returns: list or None
'''
if self.is_simulator(udid):
local_files = []
sandbox_root = self._get_simulator_app_data_path(udid, bundle_id)
remotepath = remotepath[1:] if remotepath.startswith('/') else remotepath
remote_file = os.path.join(sandbox_root, remotepath)
if is_dir:
for root, _, files in os.walk(remote_file):
for f in files:
if udid:
(short_name, extension) = os.path.splitext(f)
new_f = '%s-%s%s' % (short_name, udid, extension)
filename = os.path.join(root, f)
local_file = os.path.join(localpath, new_f)
shutil.copy(filename, local_file)
local_files.append(local_file)
if is_delete:
os.remove(filename)
else:
if udid:
(short_name, extension) = os.path.splitext(os.path.basename(remotepath))
new_f = '%s-%s%s' % (short_name, udid, extension)
local_file = os.path.join(localpath, new_f)
shutil.copy(remote_file, local_file)
local_files.append(local_file)
if is_delete:
os.remove(remote_file)
return local_files
else:
return mobiledevice.pull_file(bundle_id, remotepath, localpath, udid, is_dir, is_delete)
def remove_files(self, udid, bundle_id, file_path):
'''删除手机上指定app中的文件或者目录
:param udid: 设备的udid
:type udid: str
:param bundle_id: app的bundle id
:type bundle_id: str
:param file_path: 待删除的文件或者目录,例如: /Documents/test.log
:type file_path: str
'''
if self.is_simulator(udid):
sandbox_root = self._get_simulator_app_data_path(udid, bundle_id)
file_path = file_path[1:] if file_path.startswith('/') else file_path
file_path = os.path.join(sandbox_root, file_path)
if os.path.isfile(file_path):
os.remove(file_path)
elif os.path.exists(file_path):
shutil.rmtree(file_path)
else:
mobiledevice.remove_files(bundle_id, file_path, udid)
def list_apps_with_fbsimctl(self, udid, app_type):
try:
apps_info =[]
cmd = "%s --json %s list_apps" % (self.fbsimctl, udid)
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
if PY3:
result = result.decode()
result = result.split('\n')
for line in result:
if 'list_apps' in line:
line = json.loads(line, encoding='utf-8')
if line['event_type'] == 'discrete':
apps_info = line['subject']
break
apps = []
for app_info in apps_info:
if app_type == 'all':
apps.append({app_info['bundle']['bundle_id']:app_info['bundle']['name']})
elif app_info['install_type'] == app_type:
apps.append({app_info['bundle']['bundle_id']:app_info['bundle']['name']})
return apps
except subprocess.CalledProcessError as e:
raise Exception('list_apps for simulator error:%s' % e.output)
def list_apps_with_xcrun(self, udid, app_type):
try:
apps_info = []
cmd = "xcrun simctl listapps %s" % udid
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
if PY3:
result = result.decode()
print(result)
app_types = re.findall(r'ApplicationType\s=\s(.*?);\n', result)
bundle_ids = re.findall(r'CFBundleIdentifier\s=\s"(.*?)";\n', result)
bundle_names = re.findall(r'CFBundleName\s=\s(.*?);\n', result)
list = []
for i in range(len(app_types)):
dic = {}
if app_type.lower() == 'all':
dic[str(bundle_ids[i].strip())] = str(bundle_names[i])
list.append(dic)
elif app_types[i].lower() == app_type.lower():
dic[str(bundle_ids[i].strip())] = str(bundle_names[i])
list.append(dic)
return list
except subprocess.CalledProcessError as e:
raise Exception('list_apps for simulator error:%s' % e.output)
def list_apps(self, udid, app_type):
'''获取设备上的app列表
:param udid: 设备的udid
:type udid: str
:param app_type: app的类型(user/system/all)
:type app_type: str
:returns: list
'''
if self.is_simulator(udid):
if self.compare_xcode_version("11.0") >= 0:
self.start_simulator(udid)
return self.list_apps_with_xcrun(udid, app_type)
return self.list_apps_with_fbsimctl(udid, app_type)
else:
return InstallationProxy(udid).list_apps(app_type)
def get_syslog(self, watchtime, logFile, processName, udid):
'''获取设备上服务日志
:param watchtime: 观察时间
:type watchtime: int
:param logFile: 日志文件名
:type logFile: str
:param processName: 服务名
:type processName: str
:param udid: 设备的udid
:type udid: str
'''
if self.is_simulator(udid):
root_path = os.path.join(os.path.expanduser('~'), 'Library/Logs/CoreSimulator/')
syslog_path = os.path.join(os.path.join(root_path, udid),'system.log')
with open(syslog_path,"rb") as fd:
lines = fd.readlines()
pattern = re.compile(r'\d{2}:\d{2}:\d{2}')
time_end = pattern.search(lines[-1]).group()
timestamp = int(time_end[0:2])*3600 + int(time_end[3:5])*60 + int(time_end[6:8])
a = datetime.datetime.utcfromtimestamp(timestamp)
b = datetime.timedelta(seconds=watchtime)
start = (str(a-b))[-8:]
time_start = int(start[6:8])+60*int(start[3:5])+3600*int(start[0:2])
log = []
for i in range(len(lines)):
log.append(lines[len(lines)-i-1])
if pattern.search(lines[len(lines)-i-1]):
tm = pattern.search(lines[len(lines)-i-1]).group()
time_now = int(tm[6:8])+60*int(tm[3:5])+3600*int(tm[0:2])
if (time_now < time_start):
break
with open(logFile, 'a') as f:
for i in log:
f.write(i.replace("\x00", ""))
else:
mobiledevice.get_syslog(watchtime, logFile, processName, udid)
def update_sandbox_client(self, udid, bundle_id):
'''获取SandboxClient对象,防止一直创建对象导致StartServiceError
:param udid: 设备的udid
:type udid: str
:param bundle_id: 应用的bundle_id
:type bundle_id: str
'''
if not self.udid or not self.bundle_id:
self.udid = udid
self.bundle_id = bundle_id
self.sc = SandboxClient(udid, bundle_id)
elif self.udid != udid or self.bundle_id != bundle_id:
self.close_sandbox_client()
self.sc = SandboxClient(udid, bundle_id)
else:
try:
#调用sandboxClient接口方法,以确认该对象是否还可用
self.sc.read_directory('/')
except:
self.sc = SandboxClient(udid, bundle_id)
def close_sandbox_client(self):
'''对象销毁
'''
self.bundle_id = None
self.udid = None
self.sc.close()
def get_sandbox_path_files(self, udid, bundle_id, file_path):
'''返回真机或者模拟器的沙盒路径
:param udid: 设备的udid
:type udid: str
:param bundle_id: 应用的bundle_id
:type bundle_id: str
:param file_path: 沙盒目录
:type file_path: str
'''
file_path = file_path.encode('utf-8')
sandbox_tree = []
tmp_dict = {}
if self.is_simulator(udid):
sandbox_root = self._get_simulator_app_data_path(udid, bundle_id)
file_path = sandbox_root if file_path == '/' else os.path.join(sandbox_root, file_path)
for l in os.listdir(file_path):
tmp_dict['path'] = os.path.join(file_path, l)
tmp_dict['is_dir'] = os.path.isdir(tmp_dict['path'])
sandbox_tree.append(tmp_dict)
tmp_dict = {}
else:
self.update_sandbox_client(udid, bundle_id)
for l in self.sc.read_directory(file_path):
if l not in ('.', '..'):
tmp_dict['path'] = os.path.join(file_path, l)
info = self.sc.get_file_info(tmp_dict['path'])
tmp_dict['is_dir'] = (info != None and info['st_ifmt'] == 'S_IFDIR')
sandbox_tree.append(tmp_dict)
tmp_dict = {}
return sandbox_tree
def is_sandbox_path_dir(self, udid, bundle_id, file_path):
'''判断一个sandbox路径是否是一个目录
:param udid: 设备的udid
:type udid: str
:param bundle_id: 应用的bundle_id
:type bundle_id: str
:param file_path: 沙盒目录
:type file_path: str
'''
file_path = file_path.encode('utf-8')
if self.is_simulator(udid):
sandbox_root = self._get_simulator_app_data_path(udid, bundle_id)
file_path = sandbox_root if file_path == '/' else file_path
return os.path.isdir(file_path)
else:
self.update_sandbox_client(udid, bundle_id)
info = self.sc.get_file_info(file_path)
return (info != None and info['st_ifmt'] == 'S_IFDIR')
def get_sandbox_file_content(self, udid, bundle_id, file_path):
'''获取sandbox中文本文件的内容
:param udid: 设备的udid
:type udid: str
:param bundle_id: 应用的bundle_id
:type bundle_id: str
:param file_path: 沙盒目录
:type file_path: str
'''
file_path = file_path.encode('utf-8')
if self.is_simulator(udid):
if os.path.exists(file_path):
with open(file_path, "rb") as fd:
return base64.b64encode(fd.read())
else:
raise Exception('file(%s) does not exist' % file_path)
else:
self.update_sandbox_client(udid, bundle_id)
content = self.sc.get_file_contents(file_path)
if content:
return base64.b64encode(content)
def _get_device_by_fbsimctl(self):
'''获取设备列表
'''
devices = []
cmd = "%s --json list" % self.fbsimctl
try:
res = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
if PY3:
res = res.decode()
for dev in res.split('\n'):
if dev == '' :
continue
dev = json.loads(dev)['subject']
if 'name' not in dev:
continue
name = dev['name']
if 'Apple Watch' in name:
continue
if 'Apple TV' in name:
continue
arch = dev['arch']
if arch.startswith('arm64'):
dev[u'simulator'] = False
else :
dev[u'simulator'] = True
ios = dev['os']
dev[u'ios'] = ios.split(' ')[1].strip()
devices.append(dev)
except subprocess.CalledProcessError:
pass
return devices
def _get_devices_by_ins(self):
'''查询当前Mac机上的所有可用设备(含真机与模拟器,并产生缓存)
:returns: list[dict]
'''
devices = []
self._dev_info = ''
with Flock(os.path.join(QT4I_CACHE_ROOT,'listdevices_lock')): # 使用文件锁
for i in range(3): # 尝试3次
thread = threading.Thread(target=self._list_devices)
thread.daemon = True
thread.start()
thread.join(10)
if thread.is_alive():
if not hasattr(self._list_devices_process, 'returncode'):
self._list_devices_process.terminate()
Process().kill_process_by_name(
'DTServiceHub') # subprocess调用instruments命令,会产生孤儿进程:DTServiceHub,需强制关闭该进程
if i == 2:
raise RuntimeError('List devices timeout 5s!')
else:
Process().kill_process_by_name(
'DTServiceHub') # subprocess调用instruments命令,会产生孤儿进程:DTServiceHub,需强制关闭该进程
for line in self._dev_info.split('\n'):
if 'Apple Watch' in line:
continue
if 'Apple TV' in line:
continue
matched = re.match(r'(.+)\s\((\d+\.\d+.*)\)\s\[([\w-]+)\](.*)', line)
if matched:
device = {}
device["name"] = matched.group(1)
device["udid"] = matched.group(3)
if self.compare_xcode_version("7.0") < 0:
version_type = matched.group(2).split(' ')
device["ios"] = version_type[0]
device["simulator"] = True if len(version_type) > 1 else False
else:
device["ios"] = matched.group(2)
device["simulator"] = True if matched.group(4) else False
devices.append(device)
return devices
|
unfurl/parsers/parse_ksuid.py | jakuta-tech/unfurl | 449 | 11079152 | <reponame>jakuta-tech/unfurl<filename>unfurl/parsers/parse_ksuid.py
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
ksuid_edge = {
'color': {
'color': 'orange'
},
'title': 'KSUID Parsing Functions',
'label': 'KSUID'
}
EPOCH_OFFSET = 1400000000 # Offset from January 1, 1970, so that the lifespan of KSUIDs will be considerably longer
TIMESTAMP_LENGTH = 4 # 4 bytes are storing the timestamp = 8 characters
KSUID_LENGTH = 27 # length of KSUID after encoding
CHARSET = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
BASE = 62
def run(unfurl, node):
# From https://github.com/segmentio/ksuid:
# KSUID is for K-Sortable Unique IDentifier. It's a way to generate globally unique IDs
# similar to RFC 4122 UUIDs, but contain a time component so they can be "roughly" sorted
# by time of creation. The remainder of the KSUID is randomly generated bytes.
#
# Ref:
# - https://github.com/segmentio/ksuid
# - https://github.com/saresend/KSUID
if node.data_type == 'ksuid':
decoded_str_b62_to_bytes = decodebytes(node.value)
time_part_in_bytes = decoded_str_b62_to_bytes[0:TIMESTAMP_LENGTH]
timestamp = EPOCH_OFFSET + int.from_bytes(time_part_in_bytes, 'big', signed=False)
random_payload = decoded_str_b62_to_bytes[TIMESTAMP_LENGTH:]
unfurl.add_to_queue(
data_type='epoch-seconds', key=None, value=timestamp, label=f'Timestamp: {timestamp}',
parent_id=node.node_id, incoming_edge_config=ksuid_edge)
unfurl.add_to_queue(
data_type='descriptor', key=None, value=random_payload,
label=f'Randomly generated payload: {random_payload}',
parent_id=node.node_id, incoming_edge_config=ksuid_edge)
else:
m = re.match(r'([a-zA-Z0-9]{27})', str(node.value))
if m and len(node.value) == 27:
# If timestamp component between 2014 and 2027
if '090000l1tmebfs0000000000000' < str(node.value) < '3WgEPTl1tmebfsQzFP4bxwgy80V':
unfurl.add_to_queue(
data_type='ksuid', key=None, value=node.value, label=f'KSUID: {node.value}',
hover='KSUID are identifiers that are comprised of a timestamp and a random number. '
'<a href="https://github.com/segmentio/ksuid" target="_blank">[ref]</a>',
parent_id=node.node_id, incoming_edge_config=ksuid_edge,
extra_options={'widthConstraint': {'maximum': 300}})
def decode_b62(b):
"""Decodes a base62 encoded value ``b``."""
if b.startswith("0z"):
b = b[2:]
l, i, v = len(b), 0, 0
for x in b:
v += _value(x) * (BASE ** (l - (i + 1)))
i += 1
return v
def decodebytes(s):
"""Decodes a string of base62 data into a bytes object.
:param s: A string to be decoded in base62
:rtype: bytes
"""
decoded = decode_b62(s)
buf = bytearray()
while decoded > 0:
buf.append(decoded & 0xff)
decoded //= 256
buf.reverse()
return bytes(buf)
def _value(ch):
"""Decodes an individual digit of a base62 encoded string."""
try:
return CHARSET.index(ch)
except ValueError:
raise ValueError("base62: Invalid character (%s)" % ch)
|
observations/r/nwtco.py | hajime9652/observations | 199 | 11079190 | <filename>observations/r/nwtco.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def nwtco(path):
"""Data from the National Wilm's Tumor Study
Measurement error example. Tumor histology predicts survival, but
prediction is stronger with central lab histology than with the local
institution determination.
A data frame with 4028 observations on the following 9 variables.
`seqno`
id number
`instit`
Histology from local institution
`histol`
Histology from central lab
`stage`
Disease stage
`study`
study
`rel`
indicator for relapse
`edrel`
time to relapse
`age`
age in months
`in.subcohort`
Included in the subcohort for the example in the paper
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `nwtco.csv`.
Returns:
Tuple of np.ndarray `x_train` with 4028 rows and 9 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'nwtco.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/survival/nwtco.csv'
maybe_download_and_extract(path, url,
save_file_name='nwtco.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
tests/test_02_dxf_graphics/test_223_ray.py | jkjt/ezdxf | 515 | 11079205 | # Copyright (c) 2019 <NAME>
# License: MIT License
# created 2019-03-05
import pytest
from ezdxf.entities.xline import Ray
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
RAY = """0
RAY
5
0
330
0
100
AcDbEntity
8
0
100
AcDbRay
10
0.0
20
0.0
30
0.0
11
1.0
21
0.0
31
0.0
"""
@pytest.fixture
def entity():
return Ray.from_text(RAY)
def test_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert "RAY" in ENTITY_CLASSES
def test_default_init():
entity = Ray()
assert entity.dxftype() == "RAY"
assert entity.dxf.handle is None
assert entity.dxf.owner is None
def test_default_new():
entity = Ray.new(
handle="ABBA",
owner="0",
dxfattribs={
"color": 7,
"start": (1, 2, 3),
"unit_vector": (4, 5, 6),
},
)
assert entity.dxf.layer == "0"
assert entity.dxf.color == 7
assert entity.dxf.start == (1, 2, 3)
assert entity.dxf.unit_vector == (4, 5, 6)
def test_load_from_text(entity):
assert entity.dxf.layer == "0"
assert entity.dxf.color == 256, "default color is 256 (by layer)"
assert entity.dxf.start == (0, 0, 0)
assert entity.dxf.unit_vector == (1, 0, 0)
def test_write_dxf():
entity = Ray.from_text(RAY)
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(RAY)
assert result == expected
|
lyvi/metadata.py | randrej/lyvi | 127 | 11079219 | <reponame>randrej/lyvi
# Copyright (c) 2013 <NAME> <ok100 at openmailbox dot org>
# This work is free. You can redistribute it and/or modify it under the
# terms of the Do What The Fuck You Want To Public License, Version 2,
# as published by Sam Hocevar. See the COPYING file for more details.
"""Metadata-related code."""
import os
import random
from threading import Lock
import plyr
import lyvi
class Metadata:
"""A class which holds metadata for the currently playing song."""
artist = None
album = None
title = None
file = None
_lyrics = None
_artistbio = None
_guitartabs = None
_backdrops = None
_cover = None
@property
def lyrics(self):
return self._lyrics
@lyrics.setter
def lyrics(self, value):
"""Update ui and save the lyrics."""
self._lyrics = value
lyvi.ui.update()
if lyvi.ui.autoscroll:
lyvi.ui.autoscroll.reset()
if lyvi.config['save_lyrics']:
self.save('lyrics', lyvi.config['save_lyrics'])
@property
def artistbio(self):
return self._artistbio
@artistbio.setter
def artistbio(self, value):
"""Update UI."""
self._artistbio = value
lyvi.ui.update()
@property
def guitartabs(self):
return self._guitartabs
@guitartabs.setter
def guitartabs(self, value):
"""Update UI."""
self._guitartabs = value
lyvi.ui.update()
@property
def backdrops(self):
return self._backdrops
@backdrops.setter
def backdrops(self, value):
"""Update background."""
self._backdrops = value
if lyvi.bg:
lyvi.bg.update()
@property
def cover(self):
return self._cover
@cover.setter
def cover(self, value):
"""Update background and save the cover."""
self._cover = value
if lyvi.bg:
lyvi.bg.update()
if lyvi.config['save_cover']:
self.save('cover', lyvi.config['save_cover_filename'])
def __init__(self):
"""Initialize the class."""
cache_dir = os.path.join(os.environ['HOME'], '.local/share/lyvi')
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache = plyr.Database(cache_dir)
self.lock = Lock()
def set_tags(self):
"""Set all tag properties to the actual values."""
self.artist = lyvi.player.artist
self.title = lyvi.player.title
self.album = lyvi.player.album
self.file = lyvi.player.file
def reset_tags(self):
"""Set all tag and metadata properties to None."""
self.artist = self.title = self.album = self.file = None
self.lyrics = self.artistbio = self.guitartabs = None
self.backdrops = self.cover = None
def delete(self, type, artist, title, album):
"""Delete metadata from the cache.
Keyword arguments:
type -- type of the metadata
artist -- artist tag
title -- title tag
album -- album tag
"""
if artist and title and album:
self.cache.delete(plyr.Query(get_type=type, artist=artist, title=title, album=album))
def save(self, type, file):
"""Save the given metadata type.
Keyword arguments:
type -- type of the metadata
file -- path to the file metadata will be saved to
Some special substrings can be used in the filename:
<filename> -- name of the current song without extension
<songdir> -- directory containing the current song
<artist> -- artist of the current song
<title> -- title of the current song
<album> -- album of the current song
"""
data = getattr(self, type)
if self.file and data and data != 'Searching...':
for k, v in {
'<filename>': os.path.splitext(os.path.basename(self.file))[0],
'<songdir>': os.path.dirname(self.file),
'<artist>': self.artist,
'<title>': self.title,
'<album>': self.album
}.items():
file = file.replace(k, v)
if not os.path.exists(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
if not os.path.exists(file):
mode = 'wb' if isinstance(data, bytes) else 'w'
with open(file, mode) as f:
f.write(data)
def _query(self, type, normalize=True, number=1):
"""Return a list containing results from glyr.Query,
or None if some tags are missing.
Keyword arguments:
type -- type of the metadata
normalize -- whether the search strings should be normalized by glyr
"""
try:
query = plyr.Query(
number=number,
parallel=20,
get_type=type,
artist=self.artist,
title=self.title,
album=self.album
)
except AttributeError: # Missing tags?
return None
query.useragent = lyvi.USERAGENT
query.database = self.cache
if not normalize:
query.normalize = ('none', 'artist', 'album', 'title')
return query.commit()
def get(self, type):
"""Download and set the metadata for the given property.
Keyword arguments:
type -- type of the metadata
"""
if lyvi.ui.view == type:
lyvi.ui.home()
artist = self.artist
title = self.title
number = 1
if type in ('lyrics', 'artistbio', 'guitartabs'):
setattr(self, type, 'Searching...')
elif type in ('backdrops', 'cover'):
setattr(self, type, None)
if type == 'backdrops':
number = 20
items = (self._query(type, number=number)
or self._query(type, number=number, normalize=False))
data = None
if items:
if type == 'backdrops':
data = random.choice(items).data
elif type == 'cover':
data = items[0].data
else:
data = items[0].data.decode()
with self.lock:
if artist == self.artist and title == self.title:
setattr(self, type, data)
|
blender/arm/logicnode/deprecated/LN_pause_action.py | onelsonic/armory | 2,583 | 11079225 | from arm.logicnode.arm_nodes import *
@deprecated('Set Action Paused')
class PauseActionNode(ArmLogicTreeNode):
"""Pauses the given action."""
bl_idname = 'LNPauseActionNode'
bl_label = 'Pause Action'
bl_description = "Please use the \"Set Action Paused\" node instead"
arm_category = 'Animation'
arm_version = 2
def arm_init(self, context):
self.add_input('ArmNodeSocketAction', 'In')
self.add_input('ArmNodeSocketObject', 'Object')
self.add_output('ArmNodeSocketAction', 'Out')
|
benchmark/sudoku/or-tools.py | matbesancon/ConstraintSolver.jl | 120 | 11079238 | <gh_stars>100-1000
import sys
import time
import numpy as np
from ortools.sat.python import cp_model
def from_file(filename, sep='\n'):
"Parse a file into a list of strings, separated by sep."
lines = open(filename).read().strip().split(sep)
grids = []
for line in lines:
line = line.replace(".","0")
grid = list(line)
grid = list(map(int, grid))
grid = np.reshape(grid, (9,9))
grids.append(grid.tolist())
return grids
def solve(pidx, problem):
n = 9
# Create model
model = cp_model.CpModel()
# variables
x = {}
for i in range(n):
for j in range(n):
x[i, j] = model.NewIntVar(1, n, "x[%i,%i]" % (i, j))
x_flat = [x[i, j] for i in range(n) for j in range(n)]
# all rows and columns must be unique
for i in range(n):
row = [x[i, j] for j in range(n)]
model.AddAllDifferent(row)
col = [x[j, i] for j in range(n)]
model.AddAllDifferent(col)
# cells
for i in range(2):
for j in range(2):
cell = [x[r, c]
for r in range(i * 3, i * 3 + 3)
for c in range(j * 3, j * 3 + 3)]
model.AddAllDifferent(cell)
for i in range(n):
for j in range(n):
if problem[i][j]:
model.Add(x[i, j] == problem[i][j])
# search and solution
solver = cp_model.CpSolver()
solver.parameters.log_search_progress = True
status = solver.Solve(model)
print(str(pidx)+",",solver.WallTime())
if __name__ == "__main__":
grids = from_file("top95.txt")
i = 0
for grid in grids:
solve(i, grid)
i += 1 |
scripts/data_acquisition.py | judexzhu/dzhops | 202 | 11079262 | <reponame>judexzhu/dzhops
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Guibin created on 2015/10/21
# version: 0.01
from __future__ import division
import MySQLdb
import subprocess
import math
from decimal import Decimal
# mysql
host = '数据库地址'
db = '库名'
user = '用户'
pw = '密码'
port = 端口
# snmp command
sysone = "snmpget -v 2c -c public localhost .1.3.6.1.4.1.2021.10.1.3.1 |cut -d ':' -f 4|tr -d '[:blank:]'"
sysfive = "snmpget -v 2c -c public localhost .1.3.6.1.4.1.2021.10.1.3.2 |cut -d ':' -f 4|tr -d '[:blank:]'"
sysfifteen = "snmpget -v 2c -c public localhost .1.3.6.1.4.1.2021.10.1.3.3 |cut -d ':' -f 4|tr -d '[:blank:]'"
cpuidle = "snmpget -v 2c -c public localhost .1.3.6.1.4.1.2021.11.11.0 |cut -d ':' -f 4|tr -d '[:blank:]'"
memtotal = "snmpget -v 2c -c public localhost .1.3.6.1.4.1.2021.4.5.0 |cut -d ':' -f 4|tr -d '[:blank:]'"
memfree = "snmpget -v 2c -c public localhost .1.3.6.1.4.1.2021.4.6.0 |cut -d ':' -f 4|tr -d '[:blank:]'"
disktotal = "snmpget -v 2c -c public localhost .1.3.6.1.4.1.2021.9.1.6.1 |cut -d ':' -f 4|tr -d '[:blank:]'"
diskused = "snmpget -v 2c -c public localhost .1.3.6.1.4.1.2021.9.1.8.1 |cut -d ':' -f 4|tr -d '[:blank:]'"
diskperc = "snmpget -v 2c -c public localhost .1.3.6.1.4.1.2021.9.1.9.1 |cut -d ':' -f 4|tr -d '[:blank:]'"
# exec function
def ExecSnmp(getsnmp):
child = subprocess.Popen(getsnmp, shell=True, stdout=subprocess.PIPE)
child.wait()
middle_res = child.stdout.read()
result = middle_res.strip('\n')
return result
def SaveMy(sql):
conn = MySQLdb.connect(
host = host,
user = user,
passwd = pw,
db = db,
port = port,
charset='utf8')
cursor = conn.cursor()
try:
cursor.execute(sql)
conn.commit()
except MySQLdb.Error,e:
# Rollback in case there is any error
#mysqlErro = "Mysql Error %d: %s" % (e.args[0], e.args[1])
conn.rollback()
#get monitor data
sysload1 = ExecSnmp(sysone)
sysload5 = ExecSnmp(sysfive)
sysload15 = ExecSnmp(sysfifteen)
cpuidl = ExecSnmp(cpuidle)
cpuused = str(100 - int(cpuidl))
memtol = ExecSnmp(memtotal)
memfre = ExecSnmp(memfree)
memperc = str(100 - int(round(int(memfre[:-2])/int(memtol[:-2])*100)))
memtolg = str(int(math.ceil(int(memtol[:-2])/1024/1024)))
memusdg = Decimal(str(round((int(memtol[:-2]) - int(memfre[:-2]))/1024/1024,2)))
disktol = ExecSnmp(disktotal)
diskusd = ExecSnmp(diskused)
diskpr = ExecSnmp(diskperc)
dktotal = str(int(math.ceil(int(disktol)/1024/1024)))
dkused = Decimal(str(round(int(diskusd)/1024/1024,1)))
#save to mysql
serv_sql = '''INSERT INTO `index_servstatus`
(`nowtime`,`sysone`,`sysfive`,`sysfifteen`,`cpuperc`,`memtotal`,`memused`,`memperc`,`disktotal`,`diskused`,`diskperc`)
VALUES
(now(), %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'''
conn = MySQLdb.connect(
host = host,
user = user,
passwd = pw,
db = db,
port = port,
charset='utf8')
cursor = conn.cursor()
try:
cursor.execute(serv_sql, (sysload1,sysload5,sysload15,cpuused,memtolg,memusdg,memperc,dktotal,dkused,diskpr))
conn.commit()
except MySQLdb.Error,e:
# Rollback in case there is any error
#mysqlErro = "Mysql Error %d: %s" % (e.args[0], e.args[1])
conn.rollback()
#print 'system load : %s, %s, %s' % (sysload1, sysload5, sysload15)
#print 'CPU used perc: %d%%' % cpuused
#print 'Mem used perc: %d%%' % memperc
#print 'Mem : %s/%d' % (memusdg, memtolg)
#print 'Disk : %s/%d %s%%' % (dktotal, dkused, diskpr)
|
tests/test_recurrence.py | mraihan19/AgentNet | 337 | 11079271 | """
tests for recurrence class
"""
import numpy as np
import theano
import agentnet
from agentnet.memory import RNNCell,GRUCell, LSTMCell
import lasagne
from lasagne.layers import *
def test_recurrence():
"""minimalstic test"""
sequence = InputLayer((None, None, 3), name='input sequence')
initial = InputLayer((None, 10), name='gru zero tick')
# step
inp = InputLayer((None, 3))
prev_gru = InputLayer((None, 10))
gru = GRUCell(prev_gru, inp, name='rnn')
rec = agentnet.Recurrence(input_sequences={inp: sequence},
state_variables={gru: prev_gru},
state_init={gru: initial}, # defaults to zeros
unroll_scan=False)
weights = get_all_params(rec)
gru_states = rec[gru]
run = theano.function([sequence.input_var, initial.input_var], get_output(gru_states), )
assert tuple(run(np.random.randn(5, 25, 3), np.random.randn(5, 10)).shape) == (5, 25, 10)
def test_recurrence_larger():
"""larger recurrence"""
sequence = InputLayer((None, None, 3), name='input sequence')
initial_cell = InputLayer((None, 20), name='lstm cell zero tick')
# step
inp = InputLayer((None, 3))
prev_rnn = InputLayer((None, 10))
rnn = RNNCell(prev_rnn, inp, name='rnn')
prev_lstm_cell = InputLayer((None,20)) #lstm cell
prev_lstm_hid = InputLayer((None, 20)) #lstm output
lstm_cell,lstm_hid = LSTMCell(prev_lstm_cell,prev_lstm_hid,input_or_inputs=rnn)
lstm_hid = DropoutLayer(lstm_hid,p=0.5) #dropout hid, but not cell. Just to check it works
from collections import OrderedDict #one can use regular dict but that causes a warning
rec = agentnet.Recurrence(input_sequences={inp: sequence},
state_variables=OrderedDict({rnn: prev_rnn,
lstm_hid:prev_lstm_hid,
lstm_cell:prev_lstm_cell
}),
state_init={lstm_cell: initial_cell}, # defaults to zeros
unroll_scan=False)
weights = get_all_params(rec)
rnn_states = rec[rnn]
lstm_cell_states = rec[lstm_cell]
lstm_hid_states = rec[lstm_hid]
run = theano.function([sequence.input_var, initial_cell.input_var],
get_output([rnn_states,lstm_cell_states,lstm_hid_states]),
updates = rec.get_automatic_updates() #if any randomness is used AND unroll_scan,
# one has to pass automatic updates
)
out = run(np.random.randn(5, 25, 3), np.random.randn(5, 20))
assert tuple(out[0].shape) == (5, 25, 10) #rnn
assert tuple(out[1].shape) == (5, 25, 20) #lstm cell
assert tuple(out[2].shape) == (5, 25, 20) #lstm hid (aka output)
def test_recurrence_substituted():
"""test whether it is possible to use intermediate layers as recurrence inputs"""
sequence = InputLayer((None, None, 3), name='input sequence')
sequence_intermediate = InputLayer((None, None, 5), name='intermediate values sequence')
initial = InputLayer((None, 10), name='gru zero tick')
# step
inp = InputLayer((None, 3),name='input')
intermediate = DenseLayer(inp,5,name='intermediate')
prev_gru = InputLayer((None, 10),name='prev rnn')
gru = GRUCell(prev_gru, intermediate, name='rnn')
#regular recurrence, provide inputs, intermediate is computed regularly
rec = agentnet.Recurrence(input_sequences={inp: sequence},
state_variables={gru: prev_gru},
state_init={gru: initial}, # defaults to zeros
unroll_scan=False)
weights = get_all_params(rec)
assert intermediate.b in weights
gru_states = rec[gru]
run = theano.function([sequence.input_var, initial.input_var], get_output(gru_states), )
assert tuple(run(np.random.randn(5, 25, 3), np.random.randn(5, 10)).shape) == (5, 25, 10)
#recurrence with substituted intermediate values
rec2= agentnet.Recurrence(input_sequences={intermediate: sequence_intermediate},
state_variables={gru: prev_gru},
state_init={gru: initial}, # defaults to zeros
unroll_scan=False)
weights2 = get_all_params(rec2)
assert intermediate.b not in weights2
gru_states2 = rec2[gru]
run = theano.function([sequence_intermediate.input_var, initial.input_var], get_output(gru_states2), )
assert tuple(run(np.random.randn(5, 25, 5), np.random.randn(5, 10)).shape) == (5, 25, 10)
def test_recurrence_mask():
"""test mask_input"""
np.random.seed(1337)
sequence = InputLayer((None, None, 2), name='input sequence')
mask = InputLayer((None, None), name="rnn mask [batch,tick]")
# step
inp = InputLayer((None, 2))
prev_rnn = InputLayer((None, 3))
rnn = RNNCell(prev_rnn, inp, name='rnn',
nonlinearity=lasagne.nonlinearities.linear,
b=lasagne.init.Constant(100.0)) # init with positive constant to make sure hiddens change
out = DenseLayer(rnn,num_units=10,nonlinearity=lasagne.nonlinearities.softmax)
rec = agentnet.Recurrence(input_sequences={inp: sequence},
state_variables={rnn: prev_rnn},
tracked_outputs=[out],
unroll_scan=False,
mask_input=mask)
rnn_states = rec[rnn]
outs = rec[out]
run = theano.function([sequence.input_var, mask.input_var], get_output([rnn_states,outs]))
seq = np.random.randn(4, 5, 2)
mask = np.zeros([4, 5])
mask[:2, :3] = 1
mask[2:, 2:] = 1
h_seq, out_seq = run(seq, mask)
assert tuple(h_seq.shape) == (4, 5, 3)
assert tuple(out_seq.shape) == (4,5,10)
diff_out = np.diff(h_seq, axis=1)
assert np.all(np.diff(h_seq, axis=1)[:2, 2:] == 0)
assert np.all(np.diff(h_seq, axis=1)[:2, :2] != 0)
assert np.all(np.diff(h_seq, axis=1)[2:, 1:] != 0)
assert np.all(np.diff(h_seq, axis=1)[2:, :1] == 0)
|
tests/forte/processors/stave_processor_test.py | zhanyuanucb/forte | 163 | 11079294 | # Copyright 2021 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for stave processor.
"""
import os
import sys
import json
import unittest
import threading
from typing import Any, Dict, Iterator, Optional, Type, Set, List
from forte.common import ProcessorConfigError
from forte.data.data_pack import DataPack
from forte.pipeline import Pipeline
from forte.data.readers import OntonotesReader
from forte.processors.base import PackProcessor, RequestPackingProcessor
from forte.processors.base.batch_processor import Predictor
from ft.onto.base_ontology import Token, Sentence, EntityMention, RelationLink
from forte.processors.stave import StaveProcessor
from stave_backend.lib.stave_project import StaveProjectReader
class TestStaveProcessor(unittest.TestCase):
def setUp(self):
self._port: int = 8880
_file_dir_path: str = os.path.dirname(__file__)
self._project_name: str = "serialization_pipeline_test"
self._dataset_dir: str = os.path.abspath(
os.path.join(
_file_dir_path, "../../../", "data_samples/ontonotes/00/"
)
)
self._test_specs_dir: str = os.path.abspath(
os.path.join(_file_dir_path, "../data/ontology/test_specs/")
)
self._stave_processor = StaveProcessor()
self.pl = Pipeline[DataPack](
ontology_file=os.path.join(
self._test_specs_dir, "test_stave_ontology.json"
)
)
self.pl.set_reader(OntonotesReader())
def test_stave_viewer(self):
"""
Test in viewer mode Stave. Project data, including metadata and
textpacks, will be dumped to local disk. Consistency checking
is performed here to verify the dumped project data.
"""
self.pl.add(
self._stave_processor,
config={
"project_name": self._project_name,
"server_thread_daemon": True,
},
)
self.pl.run(self._dataset_dir)
project_reader = StaveProjectReader(
project_path=self._stave_processor.configs.project_path
)
self.assertEqual(project_reader.project_name, self._project_name)
self.assertEqual(
project_reader.project_type,
self._stave_processor.configs.project_type,
)
self.assertEqual(
project_reader.ontology,
self._stave_processor.resources.get("onto_specs_dict"),
)
self.assertEqual(
project_reader.project_configs,
self._stave_processor.configs.project_configs.todict(),
)
# Check default project configuration
with open(
os.path.join(
self._test_specs_dir, "test_project_configuration.json"
),
"r",
) as f:
target_configs = json.load(f)
self.assertEqual(
target_configs,
project_reader.project_configs,
)
# Check the number of newly created documents
count, index = 0, 0
while True:
next_index = project_reader.get_next_index(index)
if next_index == index:
break
count += 1
index = next_index
self.assertEqual(count + 1, len(os.listdir(self._dataset_dir)))
def test_projecttype_exception(self):
"""
Check the validation of `project_type` config.
"""
self.pl.add(
self._stave_processor,
config={
"port": self._port,
"project_type": "multi_pack",
"server_thread_daemon": True,
},
)
with self.assertRaises(ProcessorConfigError) as context:
self.pl.run(self._dataset_dir)
def test_resources_exception(self):
"""
Check exception raised when ontology is not correctly
configured in pipeline.
"""
with self.assertRaises(ProcessorConfigError) as context:
self.pl.resource.remove("onto_specs_path")
self.pl.resource.remove("onto_specs_dict")
self.pl.add(
self._stave_processor,
config={"port": self._port, "server_thread_daemon": True},
)
self.pl.run(self._dataset_dir)
if __name__ == "__main__":
unittest.main()
|
py/tests/test_crashprobe.py | unclead/symbolic | 300 | 11079302 | <filename>py/tests/test_crashprobe.py
import os
import json
import pprint
import pytest
from symbolic import arch_get_ip_reg_name
TEST_PARAMETER = [
("1.4.1", "release", "arm64"),
("1.4.1", "release", "armv7"),
("1.4.1", "release", "x86_64"),
("1.4.1", "debug", "arm64"),
("1.4.1", "debug", "armv7"),
("1.4.1", "debug", "x86_64"),
]
def basename(x):
if x is not None:
return os.path.basename(x)
def _load_dsyms_and_symbolize_stacktrace(
filename, version, build, arch, res_path, make_report_sym
):
path = os.path.join(res_path, "ext", version, build, arch, filename)
if not os.path.isfile(path):
pytest.skip("not test file found")
with open(path) as f:
report = json.load(f)
bt = None
dsym_paths = []
dsyms_folder = os.path.join(res_path, "ext", version, build, "dSYMs")
for file in os.listdir(dsyms_folder):
if file.endswith(".dSYM"):
dsym_paths.append(os.path.join(dsyms_folder, file))
rep = make_report_sym(dsym_paths, report["debug_meta"]["images"])
exc = report["exception"]["values"][0]
stacktrace = exc["stacktrace"]
meta = {"arch": arch}
if "mechanism" in exc:
if "posix_signal" in exc["mechanism"]:
meta["signal"] = exc["mechanism"]["posix_signal"]["signal"]
if "registers" in stacktrace:
ip_reg = arch_get_ip_reg_name(arch)
if ip_reg:
meta["ip_reg"] = stacktrace["registers"].get(ip_reg)
bt = rep.symbolize_backtrace(stacktrace["frames"][::-1], meta=meta)
return bt, report
def _filter_system_frames(bt):
new_bt = []
for frame in bt:
if any(
p in frame["package"] for p in ("CrashProbeiOS", "CrashLibiOS")
) and "main.m" not in (frame.get("filename") or ""):
new_bt.append(frame)
return new_bt
def _test_doCrash_call(bt, index=1):
assert bt[index]["function"] == "-[CRLDetailViewController doCrash]"
assert basename(bt[index]["filename"]) == "CRLDetailViewController.m"
assert bt[index]["line"] == 53
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_pthread_list_lock_report(res_path, make_report_sym, version, build, arch):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Crash with _pthread_list_lock held.json",
version,
build,
arch,
res_path,
make_report_sym,
)
# http://www.crashprobe.com/ios/01/
# -[CRLCrashAsyncSafeThread crash] (CRLCrashAsyncSafeThread.m:41)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "-[CRLCrashAsyncSafeThread crash]"
assert basename(bt[0]["filename"]) == "CRLCrashAsyncSafeThread.m"
assert bt[0]["line"] == 41
_test_doCrash_call(bt)
@pytest.mark.xfail(reason="C++ Exception handling doesn't work")
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_throw_c_pp_exception(res_path, make_report_sym, version, build, arch):
# http://www.crashprobe.com/ios/02/
# Fails on every crash reporter
raise Exception("Fails on every crash reporter")
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_throw_objective_c_exception(res_path, version, build, arch, make_report_sym):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Throw Objective-C exception.json",
version,
build,
arch,
res_path,
make_report_sym,
)
# http://www.crashprobe.com/ios/03/
# NSGenericException: An uncaught exception! SCREAM.
# -[CRLCrashObjCException crash] (CRLCrashObjCException.m:41)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "-[CRLCrashObjCException crash]"
assert basename(bt[0]["filename"]) == "CRLCrashObjCException.m"
assert bt[0]["line"] == 41
_test_doCrash_call(bt)
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_access_a_non_object_as_an_object(
res_path, make_report_sym, version, build, arch
):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Access a non-object as an object.json",
version,
build,
arch,
res_path,
make_report_sym,
)
# http://www.crashprobe.com/ios/04/
# -[CRLCrashNSLog crash] (CRLCrashNSLog.m:41)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "-[CRLCrashNSLog crash]"
assert basename(bt[0]["filename"]) == "CRLCrashNSLog.m"
assert bt[0]["line"] == 41
_test_doCrash_call(bt)
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_crash_inside_objc_msg_send(res_path, make_report_sym, version, build, arch):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Crash inside objc_msgSend().json",
version,
build,
arch,
res_path,
make_report_sym,
)
if arch == "x86_64":
pytest.xfail("bad data from kscrash")
# http://www.crashprobe.com/ios/05/
# -[CRLCrashObjCMsgSend crash] (CRLCrashObjCMsgSend.m:47)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "-[CRLCrashObjCMsgSend crash]"
assert basename(bt[0]["filename"]) == "CRLCrashObjCMsgSend.m"
assert bt[0]["line"] == 47
_test_doCrash_call(bt)
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_message_a_released_object(res_path, make_report_sym, version, build, arch):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Message a released object.json",
version,
build,
arch,
res_path,
make_report_sym,
)
if arch == "x86_64":
pytest.xfail("bad data from kscrash")
# http://www.crashprobe.com/ios/06/
# -[CRLCrashReleasedObject crash]_block_invoke (CRLCrashReleasedObject.m:51-53)
# -[CRLCrashReleasedObject crash] (CRLCrashReleasedObject.m:49)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "__31-[CRLCrashReleasedObject crash]_block_invoke"
assert basename(bt[0]["filename"]) == "CRLCrashReleasedObject.m"
assert bt[0]["line"] == (arch == "arm64" and 51 or 53)
assert bt[1]["function"] == "-[CRLCrashReleasedObject crash]"
assert basename(bt[1]["filename"]) == "CRLCrashReleasedObject.m"
assert bt[1]["line"] == 49
_test_doCrash_call(bt, 2)
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_write_to_a_read_only_page(res_path, make_report_sym, version, build, arch):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Write to a read-only page.json",
version,
build,
arch,
res_path,
make_report_sym,
)
# http://www.crashprobe.com/ios/07/
# -[CRLCrashROPage crash] (CRLCrashROPage.m:42)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "-[CRLCrashROPage crash]"
assert basename(bt[0]["filename"]) == "CRLCrashROPage.m"
assert bt[0]["line"] == 42
_test_doCrash_call(bt)
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_execute_a_privileged_instruction(
res_path, make_report_sym, version, build, arch
):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Execute a privileged instruction.json",
version,
build,
arch,
res_path,
make_report_sym,
)
# http://www.crashprobe.com/ios/08/
# ARMv7: -[CRLCrashPrivInst crash] (CRLCrashPrivInst.m:42)
# ARM64: -[CRLCrashPrivInst crash] (CRLCrashPrivInst.m:52)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "-[CRLCrashPrivInst crash]"
assert basename(bt[0]["filename"]) == "CRLCrashPrivInst.m"
if arch == "arm64":
assert bt[0]["line"] == 52
elif arch == "armv7":
assert bt[0]["line"] == 42
elif arch == "x86_64":
assert bt[0]["line"] == 40
else:
assert False
_test_doCrash_call(bt)
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_execute_an_undefined_instruction(
res_path, make_report_sym, version, build, arch
):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Execute an undefined instruction.json",
version,
build,
arch,
res_path,
make_report_sym,
)
# http://www.crashprobe.com/ios/09/
# ARMv7: -[CRLCrashUndefInst crash] (CRLCrashUndefInst.m:42)
# ARM64: -[CRLCrashUndefInst crash] (CRLCrashUndefInst.m:50)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "-[CRLCrashUndefInst crash]"
assert basename(bt[0]["filename"]) == "CRLCrashUndefInst.m"
if arch == "arm64":
assert bt[0]["line"] == 50
elif arch == "armv7":
assert bt[0]["line"] == 42
elif arch == "x86_64":
assert bt[0]["line"] == 40
else:
assert False
_test_doCrash_call(bt)
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_dereference_a_null_pointer(res_path, make_report_sym, version, build, arch):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Dereference a NULL pointer.json",
version,
build,
arch,
res_path,
make_report_sym,
)
# http://www.crashprobe.com/ios/10/
# -[CRLCrashNULL crash] (CRLCrashNULL.m:37)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "-[CRLCrashNULL crash]"
assert basename(bt[0]["filename"]) == "CRLCrashNULL.m"
assert bt[0]["line"] == 37
_test_doCrash_call(bt)
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_dereference_a_bad_pointer(res_path, make_report_sym, version, build, arch):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Dereference a bad pointer.json",
version,
build,
arch,
res_path,
make_report_sym,
)
# http://www.crashprobe.com/ios/11/
# ARMv7: -[CRLCrashGarbage crash] (CRLCrashGarbage.m:48)
# ARM64: -[CRLCrashGarbage crash] (CRLCrashGarbage.m:52)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "-[CRLCrashGarbage crash]"
assert basename(bt[0]["filename"]) == "CRLCrashGarbage.m"
assert bt[0]["line"] == arch == "arm64" and 52 or 48
# TODO check here we have one more frame on arm64 from kscrash
_test_doCrash_call(bt, arch == "arm64" and 2 or 1)
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
@pytest.mark.bad_crashprobe
def test_jump_into_an_nx_page(res_path, make_report_sym, version, build, arch):
# Note mitsuhiko: this test does not actually do what the text says.
# Nothing here is jumping to an NX page, instead the compiler will
# emit a "brk #0x1" for the call to the null pointer function.
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Jump into an NX page.json", version, build, arch, res_path, make_report_sym
)
# http://www.crashprobe.com/ios/12/
# -[CRLCrashNXPage crash] (CRLCrashNXPage.m:37)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "-[CRLCrashNXPage crash]"
# This is what crashprobe actually expects but that information is not
# actually in the debug files.
if 0:
assert basename(bt[0]["filename"]) == "CRLCrashNXPage.m"
assert bt[0]["line"] == 37
# So let's assert for the second best
else:
assert basename(bt[0]["filename"]) is None
assert bt[0]["line"] in (None, 0)
_test_doCrash_call(bt)
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_stack_overflow(res_path, make_report_sym, version, build, arch):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Stack overflow.json", version, build, arch, res_path, make_report_sym
)
# http://www.crashprobe.com/ios/13/
# -[CRLCrashStackGuard crash] (CRLCrashStackGuard.m:38) or line 39
# -[CRLCrashStackGuard crash] (CRLCrashStackGuard.m:39)
# ...
# -[CRLCrashStackGuard crash] (CRLCrashStackGuard.m:39)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "-[CRLCrashStackGuard crash]"
assert basename(bt[0]["filename"]) == "CRLCrashStackGuard.m"
if arch == "x86_64":
# Let's just say good enough
assert bt[0]["line"] == 39
else:
assert bt[0]["line"] == 38
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
@pytest.mark.bad_crashprobe
def test_call_builtin_trap(res_path, make_report_sym, version, build, arch):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Call __builtin_trap().json", version, build, arch, res_path, make_report_sym
)
# http://www.crashprobe.com/ios/14/
# -[CRLCrashTrap crash] (CRLCrashTrap.m:37)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "-[CRLCrashTrap crash]"
assert basename(bt[0]["filename"]) == "CRLCrashTrap.m"
# Crashprobe (as well as the sourcecode) expects 37 here. This is
# obviously what is expected but if you look into the dsym file you
# can see that for the given address the information says it would be
# in line 35. On x86 we however see the correct result.
assert bt[0]["line"] in (35, 37)
_test_doCrash_call(bt)
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_call_abort(res_path, make_report_sym, version, build, arch):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Call abort().json", version, build, arch, res_path, make_report_sym
)
# http://www.crashprobe.com/ios/15/
# -[CRLCrashAbort crash] (CRLCrashAbort.m:37)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "-[CRLCrashAbort crash]"
assert basename(bt[0]["filename"]) == "CRLCrashAbort.m"
assert bt[0]["line"] == 37
_test_doCrash_call(bt)
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_corrupt_malloc_s_internal_tracking_information(
res_path, make_report_sym, version, build, arch
):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Corrupt malloc()'s internal tracking information.json",
version,
build,
arch,
res_path,
make_report_sym,
)
# http://www.crashprobe.com/ios/16/
# -[CRLCrashCorruptMalloc crash] (CRLCrashCorruptMalloc.m:46)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "-[CRLCrashCorruptMalloc crash]"
assert basename(bt[0]["filename"]) == "CRLCrashCorruptMalloc.m"
assert bt[0]["line"] == 46
_test_doCrash_call(bt)
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_corrupt_the_objective_c_runtime_s_structures(
res_path, make_report_sym, version, build, arch
):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Corrupt the Objective-C runtime's structures.json",
version,
build,
arch,
res_path,
make_report_sym,
)
# http://www.crashprobe.com/ios/17/
# -[CRLCrashCorruptObjC crash] (CRLCrashCorruptObjC.m:70)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "-[CRLCrashCorruptObjC crash]"
assert basename(bt[0]["filename"]) == "CRLCrashCorruptObjC.m"
assert bt[0]["line"] == 70
_test_doCrash_call(bt)
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
@pytest.mark.xfail(reason="KSCrash does not support dwarf unwinding")
def test_dwarf_unwinding(res_path, make_report_sym, version, build, arch):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"DWARF Unwinding.json", version, build, arch, res_path, make_report_sym
)
# http://www.crashprobe.com/ios/18/
# CRLFramelessDWARF_test_crash (CRLFramelessDWARF.m:35)
# -[CRLFramelessDWARF crash] (CRLFramelessDWARF.m:49)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert len(bt) > 3
assert bt[2]["function"] == "-[CRLFramelessDWARF crash]"
assert basename(bt[2]["filename"]) == "CRLFramelessDWARF.m"
assert bt[2]["line"] == 49
assert bt[4]["function"] == "CRLFramelessDWARF_test_crash"
assert basename(["filename"]) == "CRLFramelessDWARF.m"
assert bt[4]["line"] == 35
_test_doCrash_call(bt)
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_overwrite_link_register_then_crash(
res_path, make_report_sym, version, build, arch
):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Overwrite link register, then crash.json",
version,
build,
arch,
res_path,
make_report_sym,
)
# http://www.crashprobe.com/ios/19/
# -[CRLCrashOverwriteLinkRegister crash] (CRLCrashOverwriteLinkRegister.m:53)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert bt[0]["function"] == "-[CRLCrashOverwriteLinkRegister crash]"
assert basename(bt[0]["filename"]) == "CRLCrashOverwriteLinkRegister.m"
assert bt[0]["line"] == 53
_test_doCrash_call(bt, -1)
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_smash_the_bottom_of_the_stack(res_path, make_report_sym, version, build, arch):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Smash the bottom of the stack.json",
version,
build,
arch,
res_path,
make_report_sym,
)
if arch == "arm64":
pytest.xfail("This test fails everywhere in arm64")
# http://www.crashprobe.com/ios/20/
# -[CRLCrashSmashStackBottom crash] (CRLCrashSmashStackBottom.m:54)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert len(bt) > 0
assert bt[0]["function"] == "-[CRLCrashSmashStackBottom crash]"
assert basename(bt[0]["filename"]) == "CRLCrashSmashStackBottom.m"
# This is slightly wrong on x86 currently
if arch == "x86_64":
assert bt[0]["line"] == 55
else:
assert bt[0]["line"] == 54
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
def test_smash_the_top_of_the_stack(res_path, make_report_sym, version, build, arch):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Smash the top of the stack.json",
version,
build,
arch,
res_path,
make_report_sym,
)
if arch == "arm64":
pytest.xfail("This test fails everywhere in arm64")
if arch == "x86_64":
pytest.xfail("This test fails on x86_64")
# http://www.crashprobe.com/ios/21/
# -[CRLCrashSmashStackTop crash] (CRLCrashSmashStackTop.m:54)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
assert len(bt) > 0
assert bt[0]["function"] == "-[CRLCrashSmashStackTop crash]"
assert basename(bt[0]["filename"]) == "CRLCrashSmashStackTop.m"
assert bt[0]["line"] == 54
@pytest.mark.parametrize("version, build, arch", TEST_PARAMETER)
@pytest.mark.bad_crashprobe
def test_swift(res_path, make_report_sym, version, build, arch):
bt, report = _load_dsyms_and_symbolize_stacktrace(
"Swift.json", version, build, arch, res_path, make_report_sym
)
# http://www.crashprobe.com/ios/22/
# @objc CrashLibiOS.CRLCrashSwift.crash (CrashLibiOS.CRLCrashSwift)() -> () (CRLCrashSwift.swift:36)
# -[CRLDetailViewController doCrash] (CRLDetailViewController.m:53)
assert bt is not None
bt = _filter_system_frames(bt)
pprint.pprint(bt)
# XCode compiled with a wrong name for ARM
# We are testing explicitly here to also catch demangler regressions
if arch == "x86_64":
assert bt[0]["function"] == "CRLCrashSwift.crash()"
else:
assert bt[0]["function"] == "crash"
assert bt[0]["line"] == 36
assert basename(bt[0]["filename"]) == "CRLCrashSwift.swift"
assert bt[1]["function"] == "@objc CRLCrashSwift.crash()"
assert basename(bt[1]["filename"]) == "CRLCrashSwift.swift"
_test_doCrash_call(bt, 2)
|
codeformatter/scssformatter.py | ephenyxshop/sublimetext-codeformatter | 676 | 11079358 | <filename>codeformatter/scssformatter.py
# @author <NAME>
# @copyright No Copyright, use it and modify for betterment
# This is a modified version of cssformatter.py
import re
import scssbeautifier
class ScssFormatter:
def __init__(self, formatter):
self.formatter = formatter
self.opts = formatter.settings.get('codeformatter_scss_options')
def format(self, text):
text = text.decode('utf-8')
stderr = ''
stdout = ''
options = scssbeautifier.default_options()
if ('indent_size' in self.opts and self.opts['indent_size']):
options.indent_size = self.opts['indent_size']
else:
options.indent_size = 4
if ('indent_char' in self.opts and self.opts['indent_char']):
options.indent_char = self.opts['indent_char']
else:
options.indent_char = ' '
if ('indent_with_tabs' in self.opts and self.opts['indent_with_tabs']):
options.indent_with_tabs = True
else:
options.indent_with_tabs = False
if (
'selector_separator_newline' in self.opts and
self.opts['selector_separator_newline']
):
options.selector_separator_newline = True
else:
options.selector_separator_newline = False
if ('end_with_newline' in self.opts and self.opts['end_with_newline']):
options.end_with_newline = True
else:
options.end_with_newline = False
if ('eol' in self.opts and self.opts['eol']):
options.eol = self.opts['eol']
else:
options.eol = '\n'
if ('space_around_combinator' in self.opts and self.opts['space_around_combinator']):
options.space_around_combinator = True
else:
options.space_around_combinator = False
if ('newline_between_rules' in self.opts and self.opts['newline_between_rules']):
options.newline_between_rules = True
else:
options.newline_between_rules = False
try:
stdout = scssbeautifier.beautify(text, options)
except Exception as e:
stderr = str(e)
if (not stderr and not stdout):
stderr = 'Formatting error!'
return stdout, stderr
def format_on_save_enabled(self, file_name):
format_on_save = False
if ('format_on_save' in self.opts and self.opts['format_on_save']):
format_on_save = self.opts['format_on_save']
if (isinstance(format_on_save, str)):
format_on_save = re.search(format_on_save, file_name) is not None
return format_on_save
|
tests/unit_tests/explainer/test_smart_state.py | amnaabbassi/shapash | 1,665 | 11079402 | <reponame>amnaabbassi/shapash<filename>tests/unit_tests/explainer/test_smart_state.py<gh_stars>1000+
"""
Unit test smart state
"""
import unittest
from unittest.mock import patch, Mock
import pandas as pd
import numpy as np
from shapash.explainer.smart_state import SmartState
class TestSmartState(unittest.TestCase):
"""
Unit test Smart State Class
TODO: Docstring
"""
def test_validate_contributions_1(self):
"""
Unit test validate contributions
Parameters
----------
unittest : [type]
[description]
"""
state = SmartState()
x_pred = Mock()
contributions = pd.DataFrame(
[[2, 1],
[8, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
expected_output = contributions
output = state.validate_contributions(contributions, x_pred)
assert not pd.testing.assert_frame_equal(expected_output, output)
def test_validate_contributions_2(self):
"""
Unit test validate contributions 2
"""
state = SmartState()
contributions = np.array([[2, 1], [8, 4]])
x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
expected_output = pd.DataFrame(
[[2, 1],
[8, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
output = state.validate_contributions(contributions, x_pred)
assert not pd.testing.assert_frame_equal(expected_output, output)
@patch('shapash.explainer.smart_state.inverse_transform_contributions')
def test_inverse_transform_contributions(self, mock_inverse_transform_contributions):
"""
Unit test inverse transform contributions
Parameters
----------
mock_inverse_transform_contributions : [type]
[description]
"""
state = SmartState()
state.inverse_transform_contributions(Mock(), Mock())
mock_inverse_transform_contributions.assert_called()
def test_check_contributions_1(self):
"""
Unit test check contributions 1
"""
state = SmartState()
contributions = pd.DataFrame(
[[-0.2, 0.1],
[0.8, -0.4],
[0.5, -0.7]],
)
x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
)
assert not state.check_contributions(contributions, x_pred)
def test_check_contributions_2(self):
"""
Unit test check contributions 2
"""
state = SmartState()
contributions = pd.DataFrame(
[[-0.2, 0.1],
[0.8, -0.4]],
index=['row_1', 'row_2']
)
x_pred = pd.DataFrame(
[[1, 2],
[3, 4]]
)
assert not state.check_contributions(contributions, x_pred)
def test_check_contributions_3(self):
"""
Unit test check contributions 3
"""
state = SmartState()
contributions = pd.DataFrame(
[[-0.2, 0.1],
[0.8, -0.4]],
columns=['col_1', 'col_2'],
)
x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
)
assert not state.check_contributions(contributions, x_pred)
def test_check_contributions_4(self):
"""
Unit test check contributions 4
"""
state = SmartState()
contributions = pd.DataFrame(
[[-0.2, 0.1],
[0.8, -0.4]],
columns=['col_1', 'col_2'],
index=['row_1', 'row_2']
)
x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['col_1', 'col_2'],
index=['row_1', 'row_2']
)
assert state.check_contributions(contributions, x_pred)
@patch('shapash.explainer.smart_state.rank_contributions')
def test_rank_contributions(self, mock_rank_contributions):
"""
Unit test rank contributions
Parameters
----------
mock_rank_contributions : [type]
[description]
"""
state = SmartState()
state.rank_contributions(Mock(), Mock())
mock_rank_contributions.assert_called()
def test_assign_contributions_1(self):
"""
Unit test assign contributions 1
"""
state = SmartState()
output = state.assign_contributions([1, 2, 3])
expected = {
'contrib_sorted': 1,
'x_sorted': 2,
'var_dict': 3
}
self.assertDictEqual(output, expected)
def test_assign_contributions_2(self):
"""
Unit test assign contributions 2
"""
state = SmartState()
ranked = [1, 2]
with self.assertRaises(ValueError):
state.assign_contributions(ranked)
@patch('shapash.explainer.smart_state.cap_contributions')
def test_cap_contributions(self, mock_cap_contributions):
"""
Unit test cap contributions
Parameters
----------
mock_cap_contributions : [type]
[description]
"""
state = SmartState()
state.cap_contributions(Mock(), Mock())
mock_cap_contributions.assert_called()
@patch('shapash.explainer.smart_state.hide_contributions')
def test_hide_contributions(self, mock_hide_contributions):
"""
Unit test hide contributions
Parameters
----------
mock_hide_contributions : [type]
[description]
"""
state = SmartState()
state.hide_contributions(Mock(), Mock())
mock_hide_contributions.assert_called()
@patch('shapash.explainer.smart_state.sign_contributions')
def test_sign_contributions(self, mock_sign_contributions):
"""
Unit test sign contributions
Parameters
----------
mock_sign_contributions : [type]
[description]
"""
state = SmartState()
state.sign_contributions(Mock(), Mock())
mock_sign_contributions.assert_called()
@patch('shapash.explainer.smart_state.cutoff_contributions')
def test_cutoff_contributions(self, mock_cutoff_contributions):
"""
Unit test cutoff contributions
Parameters
----------
mock_cutoff_contributions : [type]
[description]
"""
state = SmartState()
state.cutoff_contributions(Mock(), Mock())
mock_cutoff_contributions.assert_called()
@patch('shapash.explainer.smart_state.combine_masks')
def test_combine_masks(self, mock_combine_masks):
"""
Unit test combine masks
Parameters
----------
mock_combine_masks : [type]
[description]
"""
state = SmartState()
state.combine_masks(Mock())
mock_combine_masks.assert_called()
@patch('shapash.explainer.smart_state.compute_masked_contributions')
def test_compute_masked_contributions(self, mock_compute_masked_contributions):
"""
Unit test compute masked contributions
Parameters
----------
mock_compute_masked_contributions : [type]
[description]
"""
state = SmartState()
state.compute_masked_contributions(Mock(), Mock())
mock_compute_masked_contributions.assert_called()
@patch('shapash.explainer.smart_state.init_mask')
def test_init_mask(self, mock_init_mask):
"""
Unit test init mask
Parameters
----------
mock_init_mask : [type]
[description]
"""
state = SmartState()
state.init_mask(Mock())
mock_init_mask.assert_called()
def test_summarize_1(self):
"""
Unit test summarize 1
"""
state = SmartState()
contrib_sorted = pd.DataFrame(
[[0.32230754, 0.1550689, 0.10183475, 0.05471339],
[-0.58547512, -0.37050409, -0.07249285, 0.00171975],
[-0.48666675, 0.25507156, -0.16968889, 0.0757443]],
columns=['contribution_0', 'contribution_1', 'contribution_2', 'contribution_3'],
index=[0, 1, 2]
)
var_dict = pd.DataFrame(
[[1, 0, 2, 3],
[1, 0, 3, 2],
[1, 0, 2, 3]],
columns=['feature_0', 'feature_1', 'feature_2', 'feature_3'],
index=[0, 1, 2]
)
x_sorted = pd.DataFrame(
[[1., 3., 22., 1.],
[2., 1., 2., 38.],
[2., 3., 26., 1.]],
columns=['feature_0', 'feature_1', 'feature_2', 'feature_3'],
index=[0, 1, 2]
)
mask = pd.DataFrame(
[[True, True, False, False],
[True, True, False, False],
[True, True, False, False]],
columns=['contribution_0', 'contribution_1', 'contribution_2', 'contribution_3'],
index=[0, 1, 2]
)
columns_dict = {0: 'Pclass', 1: 'Sex', 2: 'Age', 3: 'Embarked'}
features_dict = {'Pclass': 'Pclass', 'Sex': 'Sex', 'Age': 'Age', 'Embarked': 'Embarked'}
output = state.summarize(contrib_sorted, var_dict, x_sorted, mask, columns_dict, features_dict)
expected = pd.DataFrame(
[['Sex', 1.0, 0.32230754, 'Pclass', 3.0, 0.1550689],
['Sex', 2.0, -0.58547512, 'Pclass', 1.0, -0.37050409],
['Sex', 2.0, -0.48666675, 'Pclass', 3.0, 0.25507156]],
columns=['feature_1', 'value_1', 'contribution_1', 'feature_2', 'value_2',
'contribution_2'],
index=[0, 1, 2],
dtype=object
)
assert not pd.testing.assert_frame_equal(expected, output)
@patch('shapash.explainer.smart_state.compute_features_import')
def test_compute_features_import(self, mock_compute_features_import):
"""
Unit test compute features import
"""
state = SmartState()
state.compute_features_import(Mock())
mock_compute_features_import.assert_called()
@patch('shapash.explainer.smart_state.group_contributions')
def test_compute_grouped_contributions(self, mock_group_contributions):
"""
Unit test compute features groups contributions
"""
state = SmartState()
state.compute_grouped_contributions(Mock(), {})
mock_group_contributions.assert_called()
|
docs/tutorials/notebook_tests.py | RdecKa/bark | 174 | 11079411 | import unittest
import glob
import os
import os.path
class NotebookTests(unittest.TestCase):
@unittest.skip
def test_run_converted_notebook(self):
notebook_list = glob.glob("docs/tutorials/*.ipynb")
exclude_notebooks_list = ["02_maps", "05_Benchmarking"]
for full_notebook_name in notebook_list:
notebook_name_ipynb = os.path.basename(full_notebook_name)
notebook_name = notebook_name_ipynb.split('.')[0]
print(f"Running notebook {notebook_name}.")
if notebook_name in exclude_notebooks_list:
print(f"Skipping notebook {notebook_name}")
else:
new_py_file_name = "notebook_unittest_" + notebook_name
output = os.system(
"jupyter nbconvert --to script --output " + new_py_file_name + " docs/tutorials/" + notebook_name + ".ipynb")
exec(open("docs/tutorials/" + new_py_file_name + ".py").read())
if __name__ == '__main__':
unittest.main()
|
src/bindings/build.py | isabella232/pynacl | 756 | 11079427 | # Copyright 2013 <NAME> and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os.path
import sys
from cffi import FFI
__all__ = ["ffi"]
HEADERS = glob.glob(
os.path.join(os.path.abspath(os.path.dirname(__file__)), "*.h")
)
MINIMAL_HEADERS = glob.glob(
os.path.join(os.path.abspath(os.path.dirname(__file__)), "minimal", "*.h")
)
# Build our FFI instance
ffi = FFI()
for header in HEADERS:
with open(header) as hfile:
ffi.cdef(hfile.read())
source = []
# SODIUM_STATIC controls the visibility of symbols in the headers. (see
# export.h in the libsodium source tree). If you do not set SODIUM_STATIC
# when linking against the static library in Windows then the compile will
# fail with no symbols found.
if os.getenv("PYNACL_SODIUM_STATIC") is not None:
source.append("#define SODIUM_STATIC")
source.append("#include <sodium.h>")
for header in MINIMAL_HEADERS:
with open(header) as hfile:
source.append(hfile.read())
if sys.platform == "win32":
libraries = ["libsodium"]
else:
libraries = ["sodium"]
# Set our source so that we can actually build our bindings to sodium.
ffi.set_source("_sodium", "\n".join(source), libraries=libraries)
|
corehq/apps/userreports/transforms/custom/numeric.py | dimagilg/commcare-hq | 471 | 11079477 | def get_short_decimal_display(num):
try:
if num % 1 == 0:
return int(num)
return round(num, 2)
except:
return num
|
flash/pointcloud/segmentation/open3d_ml/sequences_dataset.py | Actis92/lightning-flash | 1,457 | 11079492 | <filename>flash/pointcloud/segmentation/open3d_ml/sequences_dataset.py
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from os.path import basename, dirname, exists, isdir, isfile, join, split
import numpy as np
import yaml
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torch.utils.data import Dataset
from flash.core.utilities.imports import _POINTCLOUD_AVAILABLE
if _POINTCLOUD_AVAILABLE:
from open3d._ml3d.datasets.utils import DataProcessing
from open3d._ml3d.utils.config import Config
class SequencesDataset(Dataset):
def __init__(
self,
data,
cache_dir="./logs/cache",
use_cache=False,
num_points=65536,
ignored_label_inds=[0],
predicting=False,
**kwargs,
):
super().__init__()
self.name = "Dataset"
self.ignored_label_inds = ignored_label_inds
kwargs["cache_dir"] = cache_dir
kwargs["use_cache"] = use_cache
kwargs["num_points"] = num_points
kwargs["ignored_label_inds"] = ignored_label_inds
self.cfg = Config(kwargs)
self.predicting = predicting
if not predicting:
self.on_fit(data)
else:
self.on_predict(data)
@property
def color_map(self):
return self.meta["color_map"]
def on_fit(self, dataset_path):
self.split = basename(dataset_path)
self.load_meta(dirname(dataset_path))
self.dataset_path = dataset_path
self.label_to_names = self.get_label_to_names()
self.num_classes = len(self.label_to_names) - len(self.ignored_label_inds)
self.make_datasets()
def load_meta(self, root_dir):
meta_file = join(root_dir, "meta.yaml")
if not exists(meta_file):
raise MisconfigurationException(
f"The {root_dir} should contain a `meta.yaml` file about the pointcloud sequences."
)
with open(meta_file) as f:
self.meta = yaml.safe_load(f)
self.label_to_names = self.get_label_to_names()
self.num_classes = len(self.label_to_names)
with open(meta_file) as f:
self.meta = yaml.safe_load(f)
remap_dict_val = self.meta["learning_map"]
max_key = max(remap_dict_val.keys())
remap_lut_val = np.zeros((max_key + 100), dtype=np.int32)
remap_lut_val[list(remap_dict_val.keys())] = list(remap_dict_val.values())
self.remap_lut_val = remap_lut_val
def make_datasets(self):
self.path_list = []
for seq in os.listdir(self.dataset_path):
sequence_path = join(self.dataset_path, seq)
directories = [f for f in os.listdir(sequence_path) if isdir(join(sequence_path, f)) and f != "labels"]
assert len(directories) == 1
scan_dir = join(sequence_path, directories[0])
for scan_name in os.listdir(scan_dir):
self.path_list.append(join(scan_dir, scan_name))
def on_predict(self, data):
if isinstance(data, list):
if not all(isfile(p) for p in data):
raise MisconfigurationException("The predict input data takes only a list of paths or a directory.")
root_dir = split(data[0])[0]
elif isinstance(data, str):
if not isdir(data) and not isfile(data):
raise MisconfigurationException("The predict input data takes only a list of paths or a directory.")
if isdir(data):
root_dir = data
data = [os.path.join(root_dir, f) for f in os.listdir(root_dir) if ".bin" in f]
elif isfile(data):
root_dir = dirname(data)
data = [data]
else:
raise MisconfigurationException("The predict input data takes only a list of paths or a directory.")
else:
raise MisconfigurationException("The predict input data takes only a list of paths or a directory.")
self.path_list = data
self.split = "predict"
self.load_meta(root_dir)
def get_label_to_names(self):
"""Returns a label to names dictonary object.
Returns:
A dict where keys are label numbers and
values are the corresponding names.
"""
return self.meta["label_to_names"]
def __getitem__(self, index):
data = self.get_data(index)
data["attr"] = self.get_attr(index)
return data
def get_data(self, idx):
pc_path = self.path_list[idx]
points = DataProcessing.load_pc_kitti(pc_path)
dir, file = split(pc_path)
if self.predicting:
label_path = join(dir, file[:-4] + ".label")
else:
label_path = join(dir, "../labels", file[:-4] + ".label")
if not exists(label_path):
labels = np.zeros(np.shape(points)[0], dtype=np.int32)
if self.split not in ["test", "all"]:
raise FileNotFoundError(f" Label file {label_path} not found")
else:
labels = DataProcessing.load_label_kitti(label_path, self.remap_lut_val).astype(np.int32)
data = {
"point": points[:, 0:3],
"feat": None,
"label": labels,
}
return data
def get_attr(self, idx):
pc_path = self.path_list[idx]
dir, file = split(pc_path)
_, seq = split(split(dir)[0])
name = f"{seq}_{file[:-4]}"
pc_path = str(pc_path)
attr = {"idx": idx, "name": name, "path": pc_path, "split": self.split}
return attr
def __len__(self):
return len(self.path_list)
def get_split(self, *_):
return self
|
ritsar/phsTools.py | mtho1/RITSAR | 133 | 11079516 | #Include dependencies
import numpy as np
from numpy import exp, pi
from numpy.linalg import norm
from . import signal as sig
from . signal import RECT
def simulate_phs(platform, points = [[0,0,0]], amplitudes = [1]):
##############################################################################
# #
# This file takes a list of target locations and amplitudes and saves the #
# demodulated signal to './phase_history.npy'. It also outputs the signal #
# to the function caller. #
# #
##############################################################################
#Retrieve relevent parameters
c = 3.0e8
gamma = platform['chirprate']
f_0 = platform['f_0']
t = platform['t']
pos = platform['pos']
npulses = platform['npulses']
nsamples= platform['nsamples']
T_p = platform['T_p']
#Simulate the phase history for each pulse, for each point
phs = np.zeros([npulses, nsamples])+0j
for i in range(npulses):
print('simulating pulse %i'%(i+1))
R_0 = norm(pos[i])
j=0
for p in points:
R_t = norm(pos[i]-p)
dr = R_t-R_0
phase = pi*gamma*(2*dr/c)**2-\
2*pi*(f_0+gamma*t)*2*dr/c
phs[i,:] += amplitudes[j]*exp(1j*phase)*RECT((t-2*dr/c),T_p)
j+=1
#np.save('./phase_history.npy', phs)
return(phs)
def RVP_correct(phs, platform):
##############################################################################
# #
# Corrects Residual Video Phase using the formulation in Carrera Appendix C #
# #
##############################################################################
#Retrieve relevent parameters
c = 3.0e8
gamma = platform['chirprate']
nsamples= platform['nsamples']
npulses = platform['npulses']
dr = platform['delta_r']
#Calculate frequency sample locations w.r.t. demodulated fast time
f_t = np.linspace(-nsamples/2, nsamples/2, nsamples)*\
2*gamma/c*dr
#Calculate correction factor
S_c = exp(-1j*pi*f_t**2/gamma)
S_c2 = np.tile(S_c,[npulses,1])
#Filter original signal
PHS = sig.ft(phs)
phs_corr = sig.ift(PHS*S_c2)
return (phs_corr)
def phs_to_const_ref(phs, platform, upchirp = 1):
##############################################################################
# #
# This program converts a phase history that was demodulated using a pulse #
# dependant range to scene center to a phase history that is demodulated #
# using a fixed reference. The fixed reference is defined as the minimum #
# range to scene center. #
# #
##############################################################################
#Retrieve relevent parameters
c = 3.0e8
f0 = platform['f_0']
gamma = platform['chirprate']
pos = platform['pos']
t = platform['t']
npulses = platform['npulses']
#Define ranges to scene center and new reference range
#using Carrera's notation
R_a = norm(pos, axis = -1)
R_a = np.array([R_a]).T
R_s = R_a.min()
DR = R_a-R_s
#Derive fast time using constant reference
t = np.tile(t,(npulses,1))
#Remove original reference and incorporate new reference into phs
sgn = (-1)**(upchirp)
phs = phs*exp(sgn*1j*4*pi*gamma/c*(f0/gamma+t)*DR)
return(phs)
def reMoComp(phs, platform, center = np.array([0,0,0])):
##############################################################################
# #
# This is the re-motion compensation algorithm. It re-motion compensates #
# the phase history to a new scene center. The "center" argument is the #
# 3D vector (in meters) that points to the new scene center using the old #
# scene center as the origin. #
# #
##############################################################################
#Retrieve relevent parameters
k_r = platform['k_r']
pos = platform['pos']
R0 = np.array([norm(pos, axis = -1)]).T
RC = np.array([norm(pos-center, axis = -1)]).T
dr = R0-RC
remocomp = np.exp(-1j*k_r*dr)
phs = phs*remocomp
return(phs) |
release/stubs.min/Autodesk/__init__.py | htlcnn/ironpython-stubs | 182 | 11079536 | <gh_stars>100-1000
# encoding: utf-8
# module Autodesk
# from RevitAPI,Version=172.16.31.10,Culture=neutral,PublicKeyToken=null,RevitAPIUI,Version=172.16.31.10,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no imports
# no functions
# no classes
# variables with complex values
|
milano/search_algorithms/gp/spearmint/utils.py | NVIDIA/Milano | 145 | 11079580 | <gh_stars>100-1000
# ##
# # Copyright (C) 2012 <NAME>, <NAME> and <NAME>
# #
# # This code is written for research and educational purposes only to
# # supplement the paper entitled "Practical Bayesian Optimization of
# # Machine Learning Algorithms" by Snoek, Larochelle and Adams Advances
# # in Neural Information Processing Systems, 2012
# #
# # This program is free software: you can redistribute it and/or modify
# # it under the terms of the GNU General Public License as published by
# # the Free Software Foundation, either version 3 of the License, or
# # (at your option) any later version.
# #
# # This program is distributed in the hope that it will be useful, but
# # WITHOUT ANY WARRANTY; without even the implied warranty of
# # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# # General Public License for more details.
# #
# # You should have received a copy of the GNU General Public License
# # along with this program. If not, see
# # <http://www.gnu.org/licenses/>.
# This code was modified to be compatible with NVAML project
import numpy as np
import numpy.random as npr
from .sobol_lib import i4_sobol_generate
def slice_sample(init_x, logprob, sigma=1.0, step_out=True, max_steps_out=1000,
compwise=False, verbose=False):
def direction_slice(direction, init_x):
def dir_logprob(z):
return logprob(direction * z + init_x)
upper = sigma * npr.rand()
lower = upper - sigma
llh_s = np.log(npr.rand()) + dir_logprob(0.0)
l_steps_out = 0
u_steps_out = 0
if step_out:
while dir_logprob(lower) > llh_s and l_steps_out < max_steps_out:
l_steps_out += 1
lower -= sigma
while dir_logprob(upper) > llh_s and u_steps_out < max_steps_out:
u_steps_out += 1
upper += sigma
steps_in = 0
while True:
steps_in += 1
new_z = (upper - lower) * npr.rand() + lower
new_llh = dir_logprob(new_z)
if np.isnan(new_llh):
print(new_z, direction * new_z + init_x, new_llh, llh_s, init_x,
logprob(init_x))
raise Exception("Slice sampler got a NaN")
if new_llh > llh_s:
break
elif new_z < 0:
lower = new_z
elif new_z > 0:
upper = new_z
else:
raise Exception("Slice sampler shrank to zero!")
if verbose:
print("Steps Out:", l_steps_out, u_steps_out, " Steps In:", steps_in)
return new_z * direction + init_x
if not init_x.shape:
init_x = np.array([init_x])
dims = init_x.shape[0]
if compwise:
ordering = np.arange(dims)
npr.shuffle(ordering)
cur_x = init_x.copy()
for d in ordering:
direction = np.zeros((dims))
direction[d] = 1.0
cur_x = direction_slice(direction, cur_x)
return cur_x
else:
direction = npr.randn(dims)
direction = direction / np.sqrt(np.sum(direction ** 2))
return direction_slice(direction, init_x)
class Parameter:
def __init__(self):
self.type = []
self.name = []
self.type = []
self.min = []
self.max = []
self.options = []
self.int_val = []
self.dbl_val = []
self.str_val = []
class GridMap:
def __init__(self, variables, grid_size):
self.variables = variables
self.cardinality = 0
# Count the total number of dimensions and roll into new format.
for variable in variables:
self.cardinality += variable['size']
# Get a list of candidate experiments generated from a sobol sequence
def hypercube_grid(self, size, seed):
# Generate from a sobol sequence
sobol_grid = np.transpose(i4_sobol_generate(self.cardinality, size, seed))
return sobol_grid
# Convert a variable to the unit hypercube
# Takes a single variable encoded as a list, assuming the ordering is
# the same as specified in the configuration file
def to_unit(self, v):
unit = np.zeros(self.cardinality)
index = 0
for variable in self.variables:
# param.name = variable['name']
if variable['type'] == 'int':
for dd in range(variable['size']):
unit[index] = self._index_unmap(float(v.pop(0)) - variable['min'], (
variable['max'] - variable['min']) + 1)
index += 1
elif variable['type'] == 'float':
for dd in range(variable['size']):
unit[index] = (float(v.pop(0)) - variable['min']) / (
variable['max'] - variable['min'])
index += 1
elif variable['type'] == 'enum':
for dd in range(variable['size']):
unit[index] = variable['options'].index(v.pop(0))
index += 1
# TODO: add log_float if this function is going to be used
else:
raise Exception("Unknown parameter type.")
if len(v) > 0:
raise Exception("Too many variables passed to parser")
return unit
def unit_to_list(self, u):
params = self.get_params(u)
paramlist = []
for p in params:
if p.type == 'int':
for v in p.int_val:
paramlist.append(v)
if p.type == 'float':
for v in p.dbl_val:
paramlist.append(v)
if p.type == 'enum':
for v in p.str_val:
paramlist.append(v)
return paramlist
def get_params(self, u):
if u.shape[0] != self.cardinality:
raise Exception("Hypercube dimensionality is incorrect.")
params = []
index = 0
for variable in self.variables:
param = Parameter()
param.name = variable['name']
if variable['type'] == 'int':
param.type = 'int'
for dd in range(variable['size']):
param.int_val.append(
variable['min'] + self._index_map(u[index], variable['max'] -
variable['min'] + 1)
)
index += 1
elif variable['type'] == 'float':
param.type = 'float'
for dd in range(variable['size']):
val = variable['min'] + u[index] * (variable['max'] - variable['min'])
val = variable['min'] if val < variable['min'] else val
val = variable['max'] if val > variable['max'] else val
param.dbl_val.append(val)
index += 1
elif variable['type'] == 'log_float':
param.type = 'float'
for dd in range(variable['size']):
val = np.log(variable['min']) + u[index] * (np.log(variable['max']) - np.log(variable['min']))
val = np.log(variable['min']) if val < np.log(variable['min']) else val
val = np.log(variable['max']) if val > np.log(variable['max']) else val
param.dbl_val.append(np.exp(val))
index += 1
elif variable['type'] == 'enum':
param.type = 'enum'
for dd in range(variable['size']):
ii = self._index_map(u[index], len(variable['options']))
index += 1
param.str_val.append(variable['options'][ii])
else:
raise Exception("Unknown parameter type.")
params.append(param)
return params
def card(self):
return self.cardinality
def _index_map(self, u, items):
return int(np.floor((1 - np.finfo(float).eps) * u * float(items)))
def _index_unmap(self, u, items):
return float(float(u) / float(items)) |
gaphor/ui/tests/test_notifier.py | bertob/gaphor | 867 | 11079584 | import pytest
from gaphor.ui.event import Notification
from gaphor.ui.mainwindow import new_builder
from gaphor.ui.notification import InAppNotifier
@pytest.fixture
def builder():
return new_builder()
@pytest.fixture
def in_app_notifier(builder):
return InAppNotifier(builder)
def test_notifier_has_widgets(in_app_notifier):
assert in_app_notifier.revealer
assert in_app_notifier.message_label
def test_notifier_can_handle_message(in_app_notifier):
in_app_notifier.handle(Notification("a message"))
assert in_app_notifier.revealer.get_reveal_child()
|
parl/remote/zmq_utils.py | lp2333/PARL | 3,172 | 11079585 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zmq
from parl.remote import remote_constants
def create_server_socket(ctx, heartbeat_timeout=False):
"""Create a server socket with a random port (support raising timeout exception).
Args:
ctx(zmq.Context()): context of zmq
heartbeat_timeout(bool): whether to set the timeout(HEARTBEAT_RCVTIMEO_S) for
receiving operation on the server socket. (The default value is False)
Returns:
socket(zmq.Context().socket): socket of the server.
port(int): port of the server socket.
"""
socket = ctx.socket(zmq.REP)
if heartbeat_timeout:
socket.setsockopt(zmq.RCVTIMEO,
remote_constants.HEARTBEAT_RCVTIMEO_S * 1000)
socket.linger = 0
port = socket.bind_to_random_port(addr="tcp://*")
return socket, port
def create_client_socket(ctx, server_socket_address, heartbeat_timeout=False):
"""Create a client socket to connect the `server_socket_address`
(support raising timeout exception).
Args:
ctx(zmq.Context()): context of zmq
server_socket_address(str): address of server socket
heartbeat_timeout(bool): whether to set the timeout(HEARTBEAT_RCVTIMEO_S) for
sending operation on the client socket. (The default value is False)
Returns:
socket(zmq.Context().socket): socket of the client.
"""
socket = ctx.socket(zmq.REQ)
if heartbeat_timeout:
socket.setsockopt(zmq.RCVTIMEO,
remote_constants.HEARTBEAT_RCVTIMEO_S * 1000)
socket.linger = 0
socket.connect("tcp://{}".format(server_socket_address))
return socket
|
setup.py | teleological/bulbs | 234 | 11079605 | """
Bulbs
-----
Bulbs is a Python persistence framework for graph databases that
connects to Neo4j Server, Rexster, OrientDB, Lightsocket, and more.
"""
import sys
from setuptools import Command, setup, find_packages
class run_audit(Command):
"""Audits source code using PyFlakes for following issues:
- Names which are used but not defined or used before they are defined.
- Names which are redefined without having been used.
"""
description = "Audit source code with PyFlakes"
user_options = []
def initialize_options(self):
all = None
def finalize_options(self):
pass
def run(self):
import os, sys
try:
import pyflakes.scripts.pyflakes as flakes
except ImportError:
print("Audit requires PyFlakes installed in your system.")
sys.exit(-1)
dirs = ['bulbs', 'tests']
# Add example directories
#for dir in ['blog',]:
# dirs.append(os.path.join('examples', dir))
# TODO: Add test subdirectories
warns = 0
for dir in dirs:
for filename in os.listdir(dir):
if filename.endswith('.py') and filename != '__init__.py':
warns += flakes.checkPath(os.path.join(dir, filename))
if warns > 0:
print(("Audit finished with total %d warnings." % warns))
else:
print("No problems found in sourcecode.")
def run_tests():
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'tests'))
from bulbs_tests import suite
return suite()
# Python 3
install_requires = ['distribute', 'httplib2>=0.7.2', 'pyyaml>=3.10', 'six', 'omnijson']
if sys.version < '3':
install_requires.append('python-dateutil==1.5')
else:
# argparse is in 3.2 but not 3.1
install_requires.append('argparse')
install_requires.append('python-dateutil>=2')
setup (
name = 'bulbs',
version = '0.3.29',
url = 'https://github.com/espeed/bulbs',
license = 'BSD',
author = '<NAME>',
author_email = '<EMAIL>',
description = 'A Python persistence framework for graph databases that '
'connects to Neo4j Server, Rexster, OrientDB, Lightsocket.',
long_description = __doc__,
keywords = "graph database DB persistence framework rexster gremlin cypher neo4j orientdb",
packages = find_packages(),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=install_requires,
classifiers = [
"Programming Language :: Python",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2.6',
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Database",
"Topic :: Database :: Front-Ends",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Distributed Computing",
],
cmdclass={'audit': run_audit},
test_suite='__main__.run_tests'
)
|
infra/bots/assets/mockery/create.py | travisleithead/skia | 6,304 | 11079615 | #!/usr/bin/env python
#
# Copyright 2020 Google LLC.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import subprocess
import os
URL = "https://github.com/vektra/mockery/releases/download/v2.4.0/mockery_2.4.0_Linux_x86_64.tar.gz"
def create_asset(target_dir):
"""Create the asset."""
os.chdir(target_dir)
output = subprocess.check_output(["wget", URL, "--output-document=mockery.tar.gz"])
print(output)
output = subprocess.check_output(["tar", "-xvf", "mockery.tar.gz"])
print(output)
os.remove("mockery.tar.gz")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
|
convlab2/policy/vhus/util.py | Malavikka/ConvLab-2 | 339 | 11079653 | <reponame>Malavikka/ConvLab-2
from copy import deepcopy
import numpy as np
import torch
def padding(old, l):
"""
pad a list of different lens "old" to the same len "l"
"""
new = deepcopy(old)
for i, j in enumerate(new):
new[i] += [0] * (l - len(j))
new[i] = j[:l]
return new
def padding_data(data):
batch_goals, batch_usrdas, batch_sysdas = deepcopy(data)
batch_input = {}
posts_length = []
posts = []
origin_responses = []
origin_responses_length = []
goals_length = []
goals = []
terminated = []
''' start padding '''
max_goal_length = max([len(sess_goal) for sess_goal in batch_goals]) # G
sentence_num = [len(sess) for sess in batch_sysdas]
# usr begins the session
max_sentence_num = max(max(sentence_num)-1, 1) # S
# goal & terminated
for i, l in enumerate(sentence_num):
goals_length += [len(batch_goals[i])] * l
goals_padded = list(batch_goals[i]) + [0] * (max_goal_length - len(batch_goals[i]))
goals += [goals_padded] * l
terminated += [0] * (l-1) + [1]
# usr
for sess in batch_usrdas:
origin_responses_length += [len(sen) for sen in sess]
max_response_length = max(origin_responses_length) # R
for sess in batch_usrdas:
origin_responses += padding(sess, max_response_length)
# sys
for sess in batch_sysdas:
sen_length = [len(sen) for sen in sess]
for j in range(len(sen_length)):
if j == 0:
posts_length.append(np.array([1] + [0] * (max_sentence_num - 1)))
else:
posts_length.append(np.array(sen_length[:j] + [0] * (max_sentence_num - j)))
posts_length = np.array(posts_length)
max_post_length = np.max(posts_length) # P
for sess in batch_sysdas:
sen_padded = padding(sess, max_post_length)
for j, sen in enumerate(sess):
if j == 0:
post_single = np.zeros([max_sentence_num, max_post_length], np.int)
else:
post_single = posts[-1].copy()
post_single[j-1, :] = sen_padded[j-1]
posts.append(post_single)
''' end padding '''
batch_input['origin_responses'] = torch.LongTensor(origin_responses) # [B, R]
batch_input['origin_responses_length'] = torch.LongTensor(origin_responses_length) #[B]
batch_input['posts_length'] = torch.LongTensor(posts_length) # [B, S]
batch_input['posts'] = torch.LongTensor(posts) # [B, S, P]
batch_input['goals_length'] = torch.LongTensor(goals_length) # [B]
batch_input['goals'] = torch.LongTensor(goals) # [B, G]
batch_input['terminated'] = torch.Tensor(terminated) # [B]
return batch_input
def kl_gaussian(argu):
recog_mu, recog_logvar, prior_mu, prior_logvar = argu
# find the KL divergence between two Gaussian distribution
loss = 1.0 + (recog_logvar - prior_logvar)
loss -= (recog_logvar.exp() + torch.pow(recog_mu - prior_mu, 2)) / prior_logvar.exp()
kl_loss = -0.5 * loss.sum(dim=1)
avg_kl_loss = kl_loss.mean()
return avg_kl_loss
def capital(da):
for d_i in da:
pairs = da[d_i]
for s_v in pairs:
if s_v[0] != 'none':
s_v[0] = s_v[0].capitalize()
da_new = {}
for d_i in da:
d, i = d_i.split('-')
if d != 'general':
d = d.capitalize()
i = i.capitalize()
da_new['-'.join((d, i))] = da[d_i]
return da_new
|
service/provider/TextImgProvider.py | BboyHanat/TextGenerator | 166 | 11079661 | <reponame>BboyHanat/TextGenerator
import os
import time
from typing import List
from constant import const
from utils.decorator import singleton
from utils.random_tools import Random
from core.element.CharImg import CharImg
from core.element.TextImg import create, gen_batch_char_obj, TYPE_ORIENTATION_HORIZONTAL, TYPE_ORIENTATION_VERTICAL, \
TYPE_ALIGN_MODEL_C, TYPE_ALIGN_MODEL_B, TYPE_ALIGN_MODEL_T
from core.layout.strategy.HorizontalStrategy import HorizontalStrategy
from core.layout.strategy.VerticalStrategy import VerticalStrategy
from core.layout.strategy.HorizontalFlowStrategy import HorizontalFlowStrategy
from core.layout.strategy.VerticalFlowStrategy import VerticalFlowStrategy
from core.layout.strategy.CustomizationStrategy1 import CustomizationStrategy1
from core.layout import TextBlock, NextBlockGenerator
from utils import font_tool
import numpy as np
import cv2
def list_font_path(font_file_dir):
"""
获取所有的字体文件路径
:param font_file_dir: 字体文件存放路径
:return:
"""
assert os.path.exists(font_file_dir), "font_file_dir is not exist, please check: {font_file_dir}".format(
font_file_dir=font_file_dir)
path_list = []
for item in os.listdir(font_file_dir):
path = os.path.join(font_file_dir, item)
path_list.append(path)
return path_list
@singleton
class TextImgProvider(NextBlockGenerator):
def __init__(self, font_file_dir, text_img_output_dir, text_img_info_output_dir, font_min_size, font_max_size,
use_char_common_color_probability,
char_common_color_list,
char_border_width,
char_border_color,
auto_padding_to_ratio=0.0,
seed=time.time()):
"""
初始化文本图片生成器
:param font_file_dir: 字体文件目录
:param text_img_output_dir: 文本图片输出目录
:param text_img_info_output_dir: 文本图片数据输出目录
:param font_min_size: 文本字体大小的最小值
:param use_char_common_color_probability
:param char_common_color_list
:param char_border_width: 字符边框的宽度
:param char_border_color: 字符边框的颜色
:param auto_padding_to_ratio: 自动padding到指定的比例 <=0 代表不自动padding (水平排布是 w/h 竖直排布是 h/w)
:param seed:
"""
os.makedirs(text_img_output_dir, exist_ok=True)
os.makedirs(text_img_info_output_dir, exist_ok=True)
if not seed:
seed = time.time()
self.font_file_list = list_font_path(font_file_dir)
self._font_index = 0
self.text_img_output_dir = text_img_output_dir
self.text_img_info_output_dir = text_img_info_output_dir
self.font_min_size = font_min_size
self.font_max_size = font_max_size
self.use_char_common_color_probability = use_char_common_color_probability
self.char_common_color_list = char_common_color_list
self.char_border_width = char_border_width
self.char_border_color = eval(char_border_color) if type(char_border_color) is str else char_border_color
self.auto_padding_to_ratio = auto_padding_to_ratio
Random.shuffle(self.font_file_list, seed)
def next_font_path(self):
"""
获取下一个字体路径
:return:
"""
font_path = self.font_file_list[self._font_index]
self._font_index += 1
if self._font_index >= len(self.font_file_list):
self._font_index = 0
return font_path
def gen_text_img(self, text: str,
font_path,
color=const.COLOR_BLACK,
font_size=14,
border_width=0,
border_color=const.COLOR_TRANSPARENT,
orientation=TYPE_ORIENTATION_HORIZONTAL,
padding=(0, 0, 0, 0),
align_mode=TYPE_ALIGN_MODEL_C,
auto_padding_to_ratio=0.0):
char_obj_list = gen_batch_char_obj(text=text, color=color, font_size=font_size, border_width=border_width,
border_color=border_color)
text_img = create(char_obj_list=char_obj_list,
orientation=orientation,
align_mode=align_mode,
padding=padding,
auto_padding_to_ratio=auto_padding_to_ratio,
font_path=font_path,
text_img_output_dir=self.text_img_output_dir,
text_img_info_output_dir=self.text_img_info_output_dir)
return text_img
def gen_complex_text_img(self, char_obj_list: List[CharImg],
font_path,
orientation=TYPE_ORIENTATION_HORIZONTAL,
align_mode=TYPE_ALIGN_MODEL_C):
"""
生成复杂的文本图片
:param char_obj_list:
:param font_path:
:param orientation:
:param align_mode:
:return:
"""
text_img = create(char_obj_list=char_obj_list,
orientation=orientation,
align_mode=align_mode,
font_path=font_path,
text_img_output_dir=self.text_img_output_dir,
text_img_info_output_dir=self.text_img_info_output_dir)
return text_img
def get_fontcolor(self, bg_img):
"""
get font color by mean
:param bg_img:
:return:
"""
char_common_color_list = self.char_common_color_list
if Random.random_float(0, 1) <= self.use_char_common_color_probability and char_common_color_list:
return eval(Random.random_choice_list(char_common_color_list))
else:
image = np.asarray(bg_img)
lab_image = cv2.cvtColor(image, cv2.COLOR_RGB2Lab)
bg = lab_image[:, :, 0]
l_mean = np.mean(bg)
new_l = Random.random_int(0, 127 - 80) if l_mean > 127 else Random.random_int(127 + 80, 255)
new_a = Random.random_int(0, 255)
new_b = Random.random_int(0, 255)
lab_rgb = np.asarray([[[new_l, new_a, new_b]]], np.uint8)
rbg = cv2.cvtColor(lab_rgb, cv2.COLOR_Lab2RGB)
r = rbg[0, 0, 0]
g = rbg[0, 0, 1]
b = rbg[0, 0, 2]
return (r, g, b, 255)
def auto_gen_next_img(self, width, height, strategy, bg_img, block_list):
"""
自动生成下一个文本贴图
:return:
"""
from service import text_provider
text = "".join(text_provider.gen.__next__())
fp = self.next_font_path()
if isinstance(strategy, HorizontalStrategy):
orientation = TYPE_ORIENTATION_VERTICAL
elif isinstance(strategy, VerticalStrategy):
orientation = TYPE_ORIENTATION_HORIZONTAL
elif isinstance(strategy, HorizontalFlowStrategy):
orientation = TYPE_ORIENTATION_HORIZONTAL
elif isinstance(strategy, VerticalFlowStrategy):
orientation = TYPE_ORIENTATION_VERTICAL
elif isinstance(strategy, CustomizationStrategy1):
if block_list:
orientation = TYPE_ORIENTATION_HORIZONTAL
else:
orientation = TYPE_ORIENTATION_VERTICAL
else:
orientation = Random.random_choice_list(
[TYPE_ORIENTATION_VERTICAL, TYPE_ORIENTATION_HORIZONTAL, TYPE_ORIENTATION_HORIZONTAL])
# 设置字体大小
if self.font_max_size != 'vaild':
font_size = Random.random_int(self.font_min_size, self.font_max_size)
else:
v = min(width, height)
font_size = Random.random_int(v // 20, v // 10)
font_size = self.font_min_size if font_size < self.font_min_size else font_size
# 剔除不存在的文字
text = "".join(filter(lambda c: font_tool.check(c, font_path=fp), text))
if len(text) >= 2:
# 生成文本图片
align = Random.random_choice_list(
[TYPE_ALIGN_MODEL_B, TYPE_ALIGN_MODEL_T, TYPE_ALIGN_MODEL_C])
text_img = self.gen_text_img(text,
font_size=font_size,
border_width=self.char_border_width,
border_color=self.char_border_color,
color=self.get_fontcolor(bg_img),
orientation=orientation,
align_mode=align,
font_path=fp,
auto_padding_to_ratio=self.auto_padding_to_ratio)
return text_img
def auto_gen_next_img_block(self, width, height, strategy, bg_img, block_list, rotate_angle):
next_img = self.auto_gen_next_img(width=width,
height=height,
strategy=strategy,
bg_img=bg_img,
block_list=block_list)
if next_img:
return TextBlock(text_img=next_img, margin=10, rotate_angle=rotate_angle)
if __name__ == '__main__':
# 使用示例
from service import init_config
init_config()
from service import text_img_provider
# 获取一个字体文件的路径
fp = text_img_provider.next_font_path()
# 导出文本图片
p = text_img_provider.gen_text_img("hello world", color=const.COLOR_BLUE, font_path=fp)
p.export()
# p.show()
# 构造文本图片
l = []
l.extend(gen_batch_char_obj("你好啊", const.COLOR_BLUE, font_size=24))
l.extend(gen_batch_char_obj(" 渣 男 ", const.COLOR_GREEN, font_size=28))
r = text_img_provider.gen_complex_text_img(l, font_path=fp)
r.show()
# 获取文字区域尺寸信息
bg_w, bg_h = text_img_provider.calc_bg_size(fp, orientation=TYPE_ORIENTATION_HORIZONTAL, char_obj_list=l,
spacing_rate=0.1)
print(bg_w)
print(bg_h)
|
plugin.py | leontrolski/dnjs | 125 | 11079667 | <reponame>leontrolski/dnjs
import icdiff
from prettyprinter import install_extras, pformat
install_extras()
def pretty_compare(config, op, left, right):
very_verbose = config.option.verbose >= 2
if not very_verbose:
return None
if op != "==":
return None
try:
if abs(left + right) < 100:
return None
except TypeError:
pass
try:
pretty_left = pformat(
left, indent=4, width=80, sort_dict_keys=True
).splitlines()
pretty_right = pformat(
right, indent=4, width=80, sort_dict_keys=True
).splitlines()
differ = icdiff.ConsoleDiff(cols=160, tabsize=4)
icdiff_lines = list(
differ.make_table(pretty_left, pretty_right, context=False)
)
return (
["equals failed"]
+ ["<left>".center(79) + "|" + "<right>".center(80)]
+ ["-" * 160]
+ [icdiff.color_codes["none"] + l for l in icdiff_lines]
)
except Exception:
return None
def pytest_assertrepr_compare(config, op, left, right):
return pretty_compare(config, op, left, right) |
lldb/packages/Python/lldbsuite/test/functionalities/postmortem/wow64_minidump/TestWow64MiniDump.py | dan-zheng/llvm-project | 765 | 11079673 | """
Test basics of a mini dump taken of a 32-bit process running in WoW64
WoW64 is the subsystem that lets 32-bit processes run in 64-bit Windows. If you
capture a mini dump of a process running under WoW64 with a 64-bit debugger, you
end up with a dump of the WoW64 layer. In that case, LLDB must do extra work to
get the 32-bit register contexts.
"""
from __future__ import print_function
from six import iteritems
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class Wow64MiniDumpTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def test_wow64_mini_dump(self):
"""Test that lldb can read the process information from the minidump."""
# target create -c fizzbuzz_wow64.dmp
target = self.dbg.CreateTarget("")
process = target.LoadCore("fizzbuzz_wow64.dmp")
self.assertTrue(process, PROCESS_IS_VALID)
self.assertEqual(process.GetNumThreads(), 1)
self.assertEqual(process.GetProcessID(), 0x1E9C)
def test_thread_info_in_wow64_mini_dump(self):
"""Test that lldb can read the thread information from the minidump."""
# target create -c fizzbuzz_wow64.dmp
target = self.dbg.CreateTarget("")
process = target.LoadCore("fizzbuzz_wow64.dmp")
# This process crashed due to an access violation (0xc0000005), but the
# minidump doesn't have an exception record--perhaps the crash handler
# ate it.
# TODO: See if we can recover the exception information from the TEB,
# which, according to Windbg, has a pointer to an exception list.
# In the dump, none of the threads are stopped, so we cannot use
# lldbutil.get_stopped_thread.
thread = process.GetThreadAtIndex(0)
self.assertEqual(thread.GetStopReason(), lldb.eStopReasonNone)
def test_stack_info_in_wow64_mini_dump(self):
"""Test that we can see a trivial stack in a VS-generate mini dump."""
# target create -c fizzbuzz_no_heap.dmp
target = self.dbg.CreateTarget("")
process = target.LoadCore("fizzbuzz_wow64.dmp")
self.assertGreaterEqual(process.GetNumThreads(), 1)
# This process crashed due to an access violation (0xc0000005), but the
# minidump doesn't have an exception record--perhaps the crash handler
# ate it.
# TODO: See if we can recover the exception information from the TEB,
# which, according to Windbg, has a pointer to an exception list.
# In the dump, none of the threads are stopped, so we cannot use
# lldbutil.get_stopped_thread.
thread = process.GetThreadAtIndex(0)
# The crash is in main, so there should be at least one frame on the
# stack.
self.assertGreaterEqual(thread.GetNumFrames(), 1)
frame = thread.GetFrameAtIndex(0)
self.assertTrue(frame.IsValid())
pc = frame.GetPC()
eip = frame.FindRegister("pc")
self.assertTrue(eip.IsValid())
self.assertEqual(pc, eip.GetValueAsUnsigned())
|
jmilkfansblog/api/wsgi_app.py | xiaoyh121/program | 176 | 11079694 | import pecan
from jmilkfansblog.api import config as api_config
def get_pecan_config():
"""Load the Pecan config from config.py."""
filename = api_config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(config=None):
"""Create a WSGI application object."""
if not config:
config = get_pecan_config()
# Setup the hooks for WSGI Application(Like Middleware in Paste).
# EG. app_hooks = [hooks.DBHook()]
app_hooks = []
# Setup the config for WSGI Application.
app_conf = dict(config.app)
# Create and init the WSGI Application.
app = pecan.make_app(
app_conf.pop('root'),
logging=getattr(config, 'logging', {}),
hooks=app_hooks,
**app_conf
)
return app
def app_factory(global_config, **local_conf):
return setup_app()
|
sentence_lstm.py | msi1427/sentence-classification | 204 | 11079711 | <reponame>msi1427/sentence-classification
'''
Written by <NAME>
Last Edit: January 2, 2018
For use on austingwalters.com
An LSTM based RNN to classify
of the common sentance types:
Question, Statement, Command, Exclamation
'''
from __future__ import print_function
import numpy as np
import keras
from sentence_types import load_encoded_data
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM
from keras.preprocessing.text import Tokenizer
max_words = 10000
maxlen = 500
embedding_dims = 150
batch_size = 150
epochs = 3
x_train, x_test, y_train, y_test = load_encoded_data(data_split=0.8,
embedding_name="data/default",
pos_tags=True)
num_classes = np.max(y_train) + 1
print(num_classes, 'classes')
print('Convert class vector to binary class matrix '
'(for use with categorical_crossentropy)')
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('Constructing model!')
model = Sequential()
model.add(Embedding(max_words, embedding_dims))
model.add(LSTM(embedding_dims, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
print('Training... Grab a coffee')
model.fit(x_train, y_train, batch_size=batch_size,
epochs=epochs, validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test,batch_size=batch_size)
print('Test accuracy:', score[1])
|
recognition/arcface_paddle/static/backbones/iresnet.py | qaz734913414/insightface | 12,377 | 11079726 | # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from collections import OrderedDict
__all__ = [
"FresResNet", "FresResNet50", "FresResNet100", "FresResNet101",
"FresResNet152"
]
class FresResNet(object):
def __init__(self,
layers=50,
num_features=512,
is_train=True,
fp16=False,
fc_type='E',
dropout=0.4):
super(FresResNet, self).__init__()
self.layers = layers
self.num_features = num_features
self.fc_type = fc_type
self.input_dict = OrderedDict()
self.output_dict = OrderedDict()
image = paddle.static.data(
name='image',
shape=[-1, 3, 112, 112],
dtype='float16' if fp16 else 'float32')
self.input_dict['image'] = image
if is_train:
label = paddle.static.data(name='label', shape=[-1], dtype='int32')
self.input_dict['label'] = label
supported_layers = [50, 100, 101, 152]
assert layers in supported_layers, \
"supported layers {}, but given {}".format(supported_layers, layers)
if layers == 50:
units = [3, 4, 14, 3]
elif layers == 100:
units = [3, 13, 30, 3]
elif layers == 101:
units = [3, 4, 23, 3]
elif layers == 152:
units = [3, 8, 36, 3]
filter_list = [64, 64, 128, 256, 512]
num_stages = 4
input_blob = paddle.static.nn.conv2d(
input=image,
num_filters=filter_list[0],
filter_size=3,
stride=1,
padding=1,
groups=1,
param_attr=paddle.ParamAttr(),
bias_attr=False)
input_blob = paddle.static.nn.batch_norm(
input=input_blob,
act=None,
epsilon=1e-05,
momentum=0.9,
is_test=False if is_train else True)
# input_blob = paddle.nn.functional.relu6(input_blob)
input_blob = paddle.static.nn.prelu(
input_blob,
mode="all",
param_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant(0.25)))
for i in range(num_stages):
for j in range(units[i]):
input_blob = self.residual_unit_v3(
input_blob,
filter_list[i + 1],
3,
2 if j == 0 else 1,
1,
is_train, )
fc1 = self.get_fc1(input_blob, is_train, dropout)
self.output_dict['feature'] = fc1
def residual_unit_v3(self, in_data, num_filter, filter_size, stride, pad,
is_train):
bn1 = paddle.static.nn.batch_norm(
input=in_data,
act=None,
epsilon=1e-05,
momentum=0.9,
is_test=False if is_train else True)
conv1 = paddle.static.nn.conv2d(
input=bn1,
num_filters=num_filter,
filter_size=filter_size,
stride=1,
padding=1,
groups=1,
param_attr=paddle.ParamAttr(),
bias_attr=False)
bn2 = paddle.static.nn.batch_norm(
input=conv1,
act=None,
epsilon=1e-05,
momentum=0.9,
is_test=False if is_train else True)
# prelu = paddle.nn.functional.relu6(bn2)
prelu = paddle.static.nn.prelu(
bn2,
mode="all",
param_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant(0.25)))
conv2 = paddle.static.nn.conv2d(
input=prelu,
num_filters=num_filter,
filter_size=filter_size,
stride=stride,
padding=pad,
groups=1,
param_attr=paddle.ParamAttr(),
bias_attr=False)
bn3 = paddle.static.nn.batch_norm(
input=conv2,
act=None,
epsilon=1e-05,
momentum=0.9,
is_test=False if is_train else True)
if stride == 1:
input_blob = in_data
else:
input_blob = paddle.static.nn.conv2d(
input=in_data,
num_filters=num_filter,
filter_size=1,
stride=stride,
padding=0,
groups=1,
param_attr=paddle.ParamAttr(),
bias_attr=False)
input_blob = paddle.static.nn.batch_norm(
input=input_blob,
act=None,
epsilon=1e-05,
momentum=0.9,
is_test=False if is_train else True)
identity = paddle.add(bn3, input_blob)
return identity
def get_fc1(self, last_conv, is_train, dropout=0.4):
body = last_conv
if self.fc_type == "Z":
body = paddle.static.nn.batch_norm(
input=body,
act=None,
epsilon=1e-05,
is_test=False if is_train else True)
if dropout > 0:
body = paddle.nn.functional.dropout(
x=body,
p=dropout,
training=is_train,
mode='upscale_in_train')
fc1 = body
elif self.fc_type == "E":
body = paddle.static.nn.batch_norm(
input=body,
act=None,
epsilon=1e-05,
is_test=False if is_train else True)
if dropout > 0:
body = paddle.nn.functional.dropout(
x=body,
p=dropout,
training=is_train,
mode='upscale_in_train')
fc1 = paddle.static.nn.fc(
x=body,
size=self.num_features,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.XavierNormal(
fan_in=0.0)),
bias_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant()))
fc1 = paddle.static.nn.batch_norm(
input=fc1,
act=None,
epsilon=1e-05,
is_test=False if is_train else True)
elif self.fc_type == "FC":
body = paddle.static.nn.batch_norm(
input=body,
act=None,
epsilon=1e-05,
is_test=False if is_train else True)
fc1 = paddle.static.nn.fc(
x=body,
size=self.num_features,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.XavierNormal(
fan_in=0.0)),
bias_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant()))
fc1 = paddle.static.nn.batch_norm(
input=fc1,
act=None,
epsilon=1e-05,
is_test=False if is_train else True)
return fc1
def FresResNet50(**args):
model = FresResNet(layers=50, **args)
return model
def FresResNet100(**args):
model = FresResNet(layers=100, **args)
return model
def FresResNet101(**args):
model = FresResNet(layers=101, **args)
return model
def FresResNet152(**args):
model = FresResNet(layers=152, **args)
return model
|
__scraping__/steamdb.info/main.py | whitmans-max/python-examples | 140 | 11079737 | #!/usr/bin/env python3
# date: 2020.05.17
import requests
from bs4 import BeautifulSoup
headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0",
}
result = requests.get("https://steamdb.info/upcoming/free/", headers=headers)
soup = BeautifulSoup(result.content, 'lxml')
#print(result.content)
urls = []
for td_tag in soup.find_all('td'):
a_tag = td_tag.find('a')
if a_tag:
urls.append(a_tag.attrs['href'])
print(urls)
|
tools/perf/benchmarks/wasmpspdfkit.py | zealoussnow/chromium | 14,668 | 11079740 | # Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The WebAssembly benchmark of PSPDFKit
The PSPDFKit benchmark measures rendering of and interactions on a pdf file.
"""
from telemetry import benchmark
from telemetry.web_perf import timeline_based_measurement
import page_sets
from benchmarks import press
@benchmark.Info(emails=['<EMAIL>'],
component='Blink>JavaScript>WebAssembly')
class WasmPsPdfKit(press._PressBenchmark): # pylint: disable=protected-access
@classmethod
def Name(cls):
return 'wasmpspdfkit'
def CreateStorySet(self, options):
return page_sets.WasmPsPdfKitStorySet()
def CreateCoreTimelineBasedMeasurementOptions(self):
options = timeline_based_measurement.Options()
options.ExtendTraceCategoryFilter(['v8.wasm'])
options.ExtendTimelineBasedMetric(['wasmMetric'])
return options
|
starry/_core/ops/limbdark/base_op.py | shashankdholakia/starry | 116 | 11079748 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from ....compat import COp
from ....starry_version import __version__
import sys
import pkg_resources
__all__ = ["LimbDarkBaseOp"]
class LimbDarkBaseOp(COp):
__props__ = ()
func_file = None
func_name = None
def __init__(self):
super(LimbDarkBaseOp, self).__init__(self.func_file, self.func_name)
def c_code_cache_version(self, *args, **kwargs):
if "dev" in __version__:
return ()
return tuple(map(int, __version__.split(".")))
def c_headers(self, *args, **kwargs):
return [
"theano_helpers.h",
"ellip.h",
"limbdark.h",
"utils.h",
"vector",
]
def c_header_dirs(self, *args, **kwargs):
dirs = [
pkg_resources.resource_filename("starry", "_core/ops/lib/include")
]
dirs += [
pkg_resources.resource_filename(
"starry", "_core/ops/lib/vendor/eigen_3.3.5"
)
]
return dirs
def c_compile_args(self, *args, **kwargs):
opts = ["-std=c++11", "-O2", "-DNDEBUG"]
if sys.platform == "darwin":
opts += ["-stdlib=libc++", "-mmacosx-version-min=10.7"]
return opts
def perform(self, *args):
raise NotImplementedError("Only C op is implemented")
|
nimfa/methods/factorization/lfnmf.py | askerdb/nimfa | 325 | 11079760 | <filename>nimfa/methods/factorization/lfnmf.py
"""
#######################################
Lfnmf (``methods.factorization.lfnmf``)
#######################################
**Fisher Nonnegative Matrix Factorization for learning Local features (LFNMF)**
[Wang2004]_.
LFNMF is based on nonnegative matrix factorization (NMF), which allows only
additive combinations of nonnegative basis components. The NMF bases are
spatially global, whereas local bases would be preferred. Li [Li2001]_ proposed
local nonnegative matrix factorization (LNFM) to achieve a localized NMF
representation by adding three constraints to enforce spatial locality:
minimize the number of basis components required to represent target matrix;
minimize redundancy between different bases by making different bases as
orthogonal as possible; maximize the total activity on each component, i. e. the
total squared projection coefficients summed over all training images.
However, LNMF does not encode discrimination information for a classification
problem.
LFNMF can produce both additive and spatially localized basis components as LNMF
and it also encodes characteristics of Fisher linear discriminant analysis (FLDA).
The main idea of LFNMF is to add Fisher constraint to the original NMF. Because
the columns of the mixture matrix (H) have a one-to-one correspondence with the
columns of the target matrix (V), between class scatter of H is maximized and
within class scatter of H is minimized.
Example usages are pattern recognition problems in classification, feature
generation and extraction for diagnostic classification purposes, face
recognition etc.
.. literalinclude:: /code/snippet_lfnmf.py
"""
from nimfa.models import *
from nimfa.utils import *
from nimfa.utils.linalg import *
__all__ = ['Lfnmf']
class Lfnmf(nmf_std.Nmf_std):
"""
:param V: The target matrix to estimate.
:type V: Instance of the :class:`scipy.sparse` sparse matrices types,
:class:`numpy.ndarray`, :class:`numpy.matrix` or tuple of instances of
the latter classes.
:param seed: Specify method to seed the computation of a factorization. If
specified :param:`W` and :param:`H` seeding must be None. If neither seeding
method or initial fixed factorization is specified, random initialization is
used.
:type seed: `str` naming the method or :class:`methods.seeding.nndsvd.Nndsvd`
or None
:param W: Specify initial factorization of basis matrix W. Default is None.
When specified, :param:`seed` must be None.
:type W: :class:`scipy.sparse` or :class:`numpy.ndarray` or
:class:`numpy.matrix` or None
:param H: Specify initial factorization of mixture matrix H. Default is None.
When specified, :param:`seed` must be None.
:type H: Instance of the :class:`scipy.sparse` sparse matrices types,
:class:`numpy.ndarray`, :class:`numpy.matrix`, tuple of instances of the
latter classes or None
:param rank: The factorization rank to achieve. Default is 30.
:type rank: `int`
:param n_run: It specifies the number of runs of the algorithm. Default is
1. If multiple runs are performed, fitted factorization model with the
lowest objective function value is retained.
:type n_run: `int`
:param callback: Pass a callback function that is called after each run when
performing multiple runs. This is useful if one wants to save summary
measures or process the result before it gets discarded. The callback
function is called with only one argument :class:`models.mf_fit.Mf_fit` that
contains the fitted model. Default is None.
:type callback: `function`
:param callback_init: Pass a callback function that is called after each
initialization of the matrix factors. In case of multiple runs the function
is called before each run (more precisely after initialization and before
the factorization of each run). In case of single run, the passed callback
function is called after the only initialization of the matrix factors.
This is useful if one wants to obtain the initialized matrix factors for
further analysis or additional info about initialized factorization model.
The callback function is called with only one argument
:class:`models.mf_fit.Mf_fit` that (among others) contains also initialized
matrix factors. Default is None.
:type callback_init: `function`
:param track_factor: When :param:`track_factor` is specified, the fitted
factorization model is tracked during multiple runs of the algorithm. This
option is taken into account only when multiple runs are executed
(:param:`n_run` > 1). From each run of the factorization all matrix factors
are retained, which can be very space consuming. If space is the problem
setting the callback function with :param:`callback` is advised which is
executed after each run. Tracking is useful for performing some quality or
performance measures (e.g. cophenetic correlation, consensus matrix,
dispersion). By default fitted model is not tracked.
:type track_factor: `bool`
:param track_error: Tracking the residuals error. Only the residuals from
each iteration of the factorization are retained. Error tracking is not
space consuming. By default residuals are not tracked and only the final
residuals are saved. It can be used for plotting the trajectory of the
residuals.
:type track_error: `bool`
:param alpha: Weight used to minimize within class
scatter and maximize between class scatter of the encoding mixture matrix.
The objective function is the constrained divergence, which is the standard
Lee's divergence rule with added terms ``alpha`` * S_w - ``alpha`` * S_h,
where S_w and S_h are within class and between class
scatter, respectively. It should be nonnegative. Default value is 0.01.
:type alpha: `float`
**Stopping criterion**
Factorization terminates if any of specified criteria is satisfied.
:param max_iter: Maximum number of factorization iterations. Note that the
number of iterations depends on the speed of method convergence. Default
is 30.
:type max_iter: `int`
:param min_residuals: Minimal required improvement of the residuals from the
previous iteration. They are computed between the target matrix and its MF
estimate using the objective function associated to the MF algorithm.
Default is None.
:type min_residuals: `float`
:param test_conv: It indicates how often convergence test is done. By
default convergence is tested each iteration.
:type test_conv: `int`
"""
def __init__(self, V, seed=None, W=None, H=None, rank=30, max_iter=30,
min_residuals=1e-5, test_conv=None, n_run=1, callback=None,
callback_init=None, track_factor=False,
track_error=False, alpha=0.01, **options):
self.name = "lfnmf"
self.aseeds = ["random", "fixed", "nndsvd", "random_c", "random_vcol"]
nmf_std.Nmf_std.__init__(self, vars())
self.tracker = mf_track.Mf_track() if self.track_factor and self.n_run > 1 \
or self.track_error else None
def factorize(self):
"""
Compute matrix factorization.
Return fitted factorization model.
"""
for run in range(self.n_run):
self.W, self.H = self.seed.initialize(
self.V, self.rank, self.options)
self.Sw, self.Sb = np.mat(
np.zeros((1, 1))), np.mat(np.zeros((1, 1)))
p_obj = c_obj = sys.float_info.max
best_obj = c_obj if run == 0 else best_obj
iter = 0
if self.callback_init:
self.final_obj = c_obj
self.n_iter = iter
mffit = mf_fit.Mf_fit(self)
self.callback_init(mffit)
while self.is_satisfied(p_obj, c_obj, iter):
p_obj = c_obj if not self.test_conv or iter % self.test_conv == 0 else p_obj
self.update()
iter += 1
c_obj = self.objective(
) if not self.test_conv or iter % self.test_conv == 0 else c_obj
if self.track_error:
self.tracker.track_error(run, c_obj)
if self.callback:
self.final_obj = c_obj
self.n_iter = iter
mffit = mf_fit.Mf_fit(self)
self.callback(mffit)
if self.track_factor:
self.tracker.track_factor(
run, W=self.W, H=self.H, final_obj=c_obj, n_iter=iter)
# if multiple runs are performed, fitted factorization model with
# the lowest objective function value is retained
if c_obj <= best_obj or run == 0:
best_obj = c_obj
self.n_iter = iter
self.final_obj = c_obj
mffit = mf_fit.Mf_fit(copy.deepcopy(self))
mffit.fit.tracker = self.tracker
return mffit
def is_satisfied(self, p_obj, c_obj, iter):
"""
Compute the satisfiability of the stopping criteria based on stopping
parameters and objective function value.
Return logical value denoting factorization continuation.
:param p_obj: Objective function value from previous iteration.
:type p_obj: `float`
:param c_obj: Current objective function value.
:type c_obj: `float`
:param iter: Current iteration number.
:type iter: `int`
"""
if self.max_iter and self.max_iter <= iter:
return False
if self.test_conv and iter % self.test_conv != 0:
return True
if self.min_residuals and iter > 0 and p_obj - c_obj < self.min_residuals:
return False
if iter > 0 and c_obj > p_obj:
return False
return True
def update(self):
"""Update basis and mixture matrix."""
_, idxH = argmax(self.H, axis=0)
c2m, avgs = self._encoding(idxH)
C = len(c2m)
ksi = 1.
# update mixture matrix H
for k in range(self.H.shape[0]):
for l in range(self.H.shape[1]):
n_r = len(c2m[idxH[0, l]])
u_c = avgs[idxH[0, l]][k, 0]
t_1 = (2 * u_c - 1.) / (4 * ksi)
t_2 = (1. - 2 * u_c) ** 2 + 8 * ksi * self.H[k, l] * sum(self.W[i, k] * self.V[i, l] /
(dot(self.W[i, :], self.H[:, l])[0, 0] + 1e-5) for i in range(self.W.shape[0]))
self.H[k, l] = t_1 + sqrt(t_2) / (4 * ksi)
# update basis matrix W
for i in range(self.W.shape[0]):
for k in range(self.W.shape[1]):
w_1 = sum(self.H[k, j] * self.V[i, j] / (dot(self.W[i, :], self.H[:, j])[0, 0] + 1e-5)
for j in range(self.V.shape[0]))
self.W[i, k] = self.W[i, k] * w_1 / self.H[k, :].sum()
W2 = repmat(self.W.sum(axis=0), self.V.shape[0], 1)
self.W = elop(self.W, W2, div)
# update within class scatter and between class
self.Sw = sum(sum(dot(self.H[:, c2m[i][j]] - avgs[i], (self.H[:, c2m[i][j]] - avgs[i]).T)
for j in range(len(c2m[i]))) for i in c2m)
avgs_t = np.mat(np.zeros((self.rank, 1)))
for k in avgs:
avgs_t += avgs[k]
avgs_t /= len(avgs)
self.Sb = sum(dot(avgs[i] - avgs_t, (avgs[i] - avgs_t).T) for i in c2m)
def _encoding(self, idxH):
"""Compute class membership and mean class value of encoding (mixture) matrix H."""
c2m = {}
avgs = {}
for i in range(idxH.shape[1]):
# group columns of encoding matrix H by class membership
c2m.setdefault(idxH[0, i], [])
c2m[idxH[0, i]].append(i)
# compute mean value of class idx in encoding matrix H
avgs.setdefault(idxH[0, i], np.mat(np.zeros((self.rank, 1))))
avgs[idxH[0, i]] += self.H[:, i]
for k in avgs:
avgs[k] /= len(c2m[k])
return c2m, avgs
def objective(self):
"""
Compute constrained divergence of target matrix from its NMF estimate
with additional factors of between class scatter and within class
scatter of the mixture matrix (H).
"""
Va = dot(self.W, self.H)
return (multiply(self.V, elop(self.V, Va, np.log)) - self.V + Va).sum() + self.alpha * np.trace(self.Sw) - self.alpha * np.trace(self.Sb)
def __str__(self):
return self.name
def __repr__(self):
return self.name
|
scripts/automation/trex_control_plane/interactive/trex/wireless/trex_wireless_traffic_handler_rpc.py | timgates42/trex-core | 956 | 11079763 | <reponame>timgates42/trex-core<filename>scripts/automation/trex_control_plane/interactive/trex/wireless/trex_wireless_traffic_handler_rpc.py
from .trex_wireless_rpc_message import *
class TrafficHandlerCall(RPCMessage):
"""Represents a Remote Call from WirelessManager to a WirelessWorker."""
TYPE = "cmd"
NUM_STATES = 3
NAME = None # should be defined for subclasses
def __init__(self, *args):
"""Create a TrafficHandlerCall.
Args:
name: name of the method to call
args: arguments to pass to the method
"""
super().__init__(TrafficHandlerCall.TYPE)
self.id = self.create_unique_id()
self.args = args
def __getstate__(self):
"""Return state values to be pickled."""
return (self.id, type(self).NAME, self.args) + super().__getstate__()
def __setstate__(self, state):
"""Restore state from the unpickled state values."""
super().__setstate__(state[TrafficHandlerCall.NUM_STATES:])
self.id, type(
self).NAME, self.args = state[:TrafficHandlerCall.NUM_STATES]
class TrafficHandlerCall_stop(TrafficHandlerCall):
"""RPC Call to TrafficHandler for method 'stop'.
See TrafficHandler for documentation.
"""
NAME = "stop"
def __init__(self):
super().__init__()
class TrafficHandlerCall_route_macs(TrafficHandlerCall):
"""RPC Call to TrafficHandler for method 'route_macs'.
See TrafficHandler for documentation.
"""
NAME = "route_macs"
def __init__(self, mac_to_connection_map):
assert(isinstance(mac_to_connection_map, dict))
super().__init__(mac_to_connection_map)
|
catalyst/metrics/__init__.py | tadejsv/catalyst | 206 | 11079770 | <gh_stars>100-1000
# flake8: noqa
# import order:
# functional
# core metrics
# metrics
from catalyst.metrics.functional import *
from catalyst.metrics._metric import (
ICallbackBatchMetric,
ICallbackLoaderMetric,
IMetric,
)
from catalyst.metrics._accumulative import AccumulativeMetric
from catalyst.metrics._additive import AdditiveMetric, AdditiveValueMetric
from catalyst.metrics._confusion_matrix import ConfusionMatrixMetric
from catalyst.metrics._functional_metric import (
FunctionalBatchMetric,
FunctionalLoaderMetric,
)
from catalyst.metrics._topk_metric import TopKMetric
from catalyst.metrics._accuracy import AccuracyMetric, MultilabelAccuracyMetric
from catalyst.metrics._auc import AUCMetric
from catalyst.metrics._classification import (
BinaryPrecisionRecallF1Metric,
MulticlassPrecisionRecallF1SupportMetric,
MultilabelPrecisionRecallF1SupportMetric,
)
from catalyst.metrics._cmc_score import CMCMetric, ReidCMCMetric
from catalyst.metrics._hitrate import HitrateMetric
from catalyst.metrics._map import MAPMetric
from catalyst.metrics._mrr import MRRMetric
from catalyst.metrics._ndcg import NDCGMetric
from catalyst.metrics._r2_squared import R2Squared
from catalyst.metrics._segmentation import (
RegionBasedMetric,
IOUMetric,
DiceMetric,
TrevskyMetric,
)
|
tempest/api/compute/admin/test_networks.py | rishabh20111990/tempest | 254 | 11079791 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest.lib import decorators
CONF = config.CONF
class NetworksTest(base.BaseV2ComputeAdminTest):
"""Tests Nova Networks API that usually requires admin privileges.
API docs:
https://docs.openstack.org/api-ref/compute/#networks-os-networks-deprecated
"""
max_microversion = '2.35'
@classmethod
def setup_clients(cls):
super(NetworksTest, cls).setup_clients()
cls.client = cls.os_admin.compute_networks_client
@decorators.idempotent_id('d206d211-8912-486f-86e2-a9d090d1f416')
def test_get_network(self):
"""Test getting network from nova side"""
networks = self.client.list_networks()['networks']
if CONF.compute.fixed_network_name:
configured_network = [x for x in networks if x['label'] ==
CONF.compute.fixed_network_name]
self.assertEqual(1, len(configured_network),
"{0} networks with label {1}".format(
len(configured_network),
CONF.compute.fixed_network_name))
elif CONF.network.public_network_id:
configured_network = [x for x in networks if x['id'] ==
CONF.network.public_network_id]
else:
raise self.skipException(
"Environment has no known-for-sure existing network.")
configured_network = configured_network[0]
network = (self.client.show_network(configured_network['id'])
['network'])
self.assertEqual(configured_network['label'], network['label'])
@decorators.idempotent_id('df3d1046-6fa5-4b2c-ad0c-cfa46a351cb9')
def test_list_all_networks(self):
"""Test getting all networks from nova side"""
networks = self.client.list_networks()['networks']
# Check the configured network is in the list
if CONF.compute.fixed_network_name:
configured_network = CONF.compute.fixed_network_name
self.assertIn(configured_network, [x['label'] for x in networks])
else:
network_labels = [x['label'] for x in networks]
self.assertNotEmpty(network_labels)
|
tests/http_parser_test.py | Adrijaned/weechat-matrix | 773 | 11079792 | <reponame>Adrijaned/weechat-matrix
import html.entities
from hypothesis import given
from hypothesis.strategies import sampled_from
from matrix.colors import MatrixHtmlParser
try:
# python 3
html_entities = [(name, char, ord(char))
for name, char in html.entities.html5.items()
if not name.endswith(';')]
except AttributeError:
# python 2
html_entities = [(name, unichr(codepoint), codepoint)
for name, codepoint
in html.entities.name2codepoint.items()]
@given(sampled_from(html_entities))
def test_html_named_entity_parsing(entitydef):
name = entitydef[0]
character = entitydef[1]
parser = MatrixHtmlParser()
assert parser.unescape('&{};'.format(name)) == character
@given(sampled_from(html_entities))
def test_html_numeric_reference_parsing(entitydef):
character = entitydef[1]
num = entitydef[2]
parser = MatrixHtmlParser()
assert parser.unescape('&#{};'.format(num)) == character
@given(sampled_from(html_entities))
def test_html_entityref_reconstruction_from_name(entitydef):
name = entitydef[0]
parser = MatrixHtmlParser()
parser.handle_entityref(name)
s = parser.get_substrings()
assert s[0].text == parser.unescape('&{};'.format(name)) and len(s) == 1
@given(sampled_from(html_entities))
def test_html_charref_reconstruction_from_name(entitydef):
num = entitydef[2]
parser = MatrixHtmlParser()
parser.handle_charref(num)
s = parser.get_substrings()
assert s[0].text == parser.unescape('&#{};'.format(num)) and len(s) == 1
def test_parsing_of_escaped_brackets():
p = MatrixHtmlParser()
p.feed('<pre><code><faketag></code></pre>')
s = p.get_substrings()
assert s[0].text == '<faketag>' and len(s) == 1
|
scripts/external_libs/scapy-2.4.5/scapy/config.py | dariusgrassi/trex-core | 250 | 11079794 | <filename>scripts/external_libs/scapy-2.4.5/scapy/config.py
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
"""
Implementation of the configuration object.
"""
from __future__ import absolute_import
from __future__ import print_function
import atexit
import copy
import functools
import os
import re
import socket
import sys
import time
import warnings
import scapy
from scapy import VERSION
from scapy.base_classes import BasePacket
from scapy.consts import DARWIN, WINDOWS, LINUX, BSD, SOLARIS
from scapy.error import log_scapy, warning, ScapyInvalidPlatformException
from scapy.modules import six
from scapy.themes import NoTheme, apply_ipython_style
from scapy.compat import (
Any,
Callable,
DecoratorCallable,
Dict,
Iterator,
List,
NoReturn,
Optional,
Set,
Type,
Tuple,
Union,
TYPE_CHECKING,
)
from types import ModuleType
if TYPE_CHECKING:
# Do not import at runtime
from scapy.packet import Packet
############
# Config #
############
class ConfClass(object):
def configure(self, cnf):
# type: (ConfClass) -> None
self.__dict__ = cnf.__dict__.copy()
def __repr__(self):
# type: () -> str
return str(self)
def __str__(self):
# type: () -> str
s = ""
dkeys = self.__class__.__dict__.copy()
dkeys.update(self.__dict__)
keys = sorted(dkeys)
for i in keys:
if i[0] != "_":
r = repr(getattr(self, i))
r = " ".join(r.split())
wlen = 76 - max(len(i), 10)
if len(r) > wlen:
r = r[:wlen - 3] + "..."
s += "%-10s = %s\n" % (i, r)
return s[:-1]
class Interceptor(object):
def __init__(self,
name, # type: str
default, # type: Any
hook, # type: Callable[..., Any]
args=None, # type: Optional[List[Any]]
kargs=None # type: Optional[Dict[str, Any]]
):
# type: (...) -> None
self.name = name
self.intname = "_intercepted_%s" % name
self.default = default
self.hook = hook
self.args = args if args is not None else []
self.kargs = kargs if kargs is not None else {}
def __get__(self, obj, typ=None):
# type: (Conf, Optional[type]) -> Any
if not hasattr(obj, self.intname):
setattr(obj, self.intname, self.default)
return getattr(obj, self.intname)
@staticmethod
def set_from_hook(obj, name, val):
# type: (Conf, str, bool) -> None
int_name = "_intercepted_%s" % name
setattr(obj, int_name, val)
def __set__(self, obj, val):
# type: (Conf, Any) -> None
old = getattr(obj, self.intname, self.default)
val = self.hook(self.name, val, old, *self.args, **self.kargs)
setattr(obj, self.intname, val)
def _readonly(name):
# type: (str) -> NoReturn
default = Conf.__dict__[name].default
Interceptor.set_from_hook(conf, name, default)
raise ValueError("Read-only value !")
ReadOnlyAttribute = functools.partial(
Interceptor,
hook=(lambda name, *args, **kwargs: _readonly(name))
)
ReadOnlyAttribute.__doc__ = "Read-only class attribute"
class ProgPath(ConfClass):
_default = "<System default>"
universal_open = "open" if DARWIN else "xdg-open"
pdfreader = universal_open
psreader = universal_open
svgreader = universal_open
dot = "dot"
display = "display"
tcpdump = "tcpdump"
tcpreplay = "tcpreplay"
hexedit = "hexer"
tshark = "tshark"
wireshark = "wireshark"
ifconfig = "ifconfig"
class ConfigFieldList:
def __init__(self):
# type: () -> None
self.fields = set() # type: Set[Any]
self.layers = set() # type: Set[Any]
@staticmethod
def _is_field(f):
# type: (Any) -> bool
return hasattr(f, "owners")
def _recalc_layer_list(self):
# type: () -> None
self.layers = {owner for f in self.fields for owner in f.owners}
def add(self, *flds):
# type: (*Any) -> None
self.fields |= {f for f in flds if self._is_field(f)}
self._recalc_layer_list()
def remove(self, *flds):
# type: (*Any) -> None
self.fields -= set(flds)
self._recalc_layer_list()
def __contains__(self, elt):
# type: (Any) -> bool
if isinstance(elt, BasePacket):
return elt in self.layers
return elt in self.fields
def __repr__(self):
# type: () -> str
return "<%s [%s]>" % (self.__class__.__name__, " ".join(str(x) for x in self.fields)) # noqa: E501
class Emphasize(ConfigFieldList):
pass
class Resolve(ConfigFieldList):
pass
class Num2Layer:
def __init__(self):
# type: () -> None
self.num2layer = {} # type: Dict[int, Type[Packet]]
self.layer2num = {} # type: Dict[Type[Packet], int]
def register(self, num, layer):
# type: (int, Type[Packet]) -> None
self.register_num2layer(num, layer)
self.register_layer2num(num, layer)
def register_num2layer(self, num, layer):
# type: (int, Type[Packet]) -> None
self.num2layer[num] = layer
def register_layer2num(self, num, layer):
# type: (int, Type[Packet]) -> None
self.layer2num[layer] = num
def __getitem__(self, item):
# type: (Union[int, Type[Packet]]) -> Union[int, Type[Packet]]
if isinstance(item, int):
return self.num2layer[item]
else:
return self.layer2num[item]
def __contains__(self, item):
# type: (Union[int, Type[Packet]]) -> bool
if isinstance(item, int):
return item in self.num2layer
else:
return item in self.layer2num
def get(self,
item, # type: Union[int, Type[Packet]]
default=None, # type: Optional[Type[Packet]]
):
# type: (...) -> Optional[Union[int, Type[Packet]]]
return self[item] if item in self else default
def __repr__(self):
# type: () -> str
lst = []
for num, layer in six.iteritems(self.num2layer):
if layer in self.layer2num and self.layer2num[layer] == num:
dir = "<->"
else:
dir = " ->"
lst.append((num, "%#6x %s %-20s (%s)" % (num, dir, layer.__name__,
layer._name)))
for layer, num in six.iteritems(self.layer2num):
if num not in self.num2layer or self.num2layer[num] != layer:
lst.append((num, "%#6x <- %-20s (%s)" % (num, layer.__name__,
layer._name)))
lst.sort()
return "\n".join(y for x, y in lst)
class LayersList(List[Type['scapy.packet.Packet']]):
def __init__(self):
# type: () -> None
list.__init__(self)
self.ldict = {} # type: Dict[str, List[Type[Packet]]]
self.filtered = False
self._backup_dict = {} # type: Dict[Type[Packet], List[Tuple[Dict[str, Any], Type[Packet]]]] # noqa: E501
def __repr__(self):
# type: () -> str
return "\n".join("%-20s: %s" % (layer.__name__, layer.name)
for layer in self)
def register(self, layer):
# type: (Type[Packet]) -> None
self.append(layer)
if layer.__module__ not in self.ldict:
self.ldict[layer.__module__] = []
self.ldict[layer.__module__].append(layer)
def layers(self):
# type: () -> List[Tuple[str, str]]
result = []
# This import may feel useless, but it is required for the eval below
import scapy # noqa: F401
for lay in self.ldict:
doc = eval(lay).__doc__
result.append((lay, doc.strip().split("\n")[0] if doc else lay))
return result
def filter(self, items):
# type: (List[Type[Packet]]) -> None
"""Disable dissection of unused layers to speed up dissection"""
if self.filtered:
raise ValueError("Already filtered. Please disable it first")
for lay in six.itervalues(self.ldict):
for cls in lay:
if cls not in self._backup_dict:
self._backup_dict[cls] = cls.payload_guess[:]
cls.payload_guess = [
y for y in cls.payload_guess if y[1] in items
]
self.filtered = True
def unfilter(self):
# type: () -> None
"""Re-enable dissection for all layers"""
if not self.filtered:
raise ValueError("Not filtered. Please filter first")
for lay in six.itervalues(self.ldict):
for cls in lay:
cls.payload_guess = self._backup_dict[cls]
self._backup_dict.clear()
self.filtered = False
class CommandsList(List[Callable[..., Any]]):
def __repr__(self):
# type: () -> str
s = []
for li in sorted(self, key=lambda x: x.__name__):
doc = li.__doc__.split("\n")[0] if li.__doc__ else "--"
s.append("%-20s: %s" % (li.__name__, doc))
return "\n".join(s)
def register(self, cmd):
# type: (DecoratorCallable) -> DecoratorCallable
self.append(cmd)
return cmd # return cmd so that method can be used as a decorator
def lsc():
# type: () -> None
"""Displays Scapy's default commands"""
print(repr(conf.commands))
class CacheInstance(Dict[str, Any], object):
__slots__ = ["timeout", "name", "_timetable", "__dict__"]
def __init__(self, name="noname", timeout=None):
# type: (str, Optional[int]) -> None
self.timeout = timeout
self.name = name
self._timetable = {} # type: Dict[str, float]
def flush(self):
# type: () -> None
CacheInstance.__init__(
self,
name=self.name,
timeout=self.timeout
)
def __getitem__(self, item):
# type: (str) -> Any
if item in self.__slots__:
return object.__getattribute__(self, item)
val = super(CacheInstance, self).__getitem__(item)
if self.timeout is not None:
t = self._timetable[item]
if time.time() - t > self.timeout:
raise KeyError(item)
return val
def get(self, item, default=None):
# type: (str, Optional[Any]) -> Any
# overloading this method is needed to force the dict to go through
# the timetable check
try:
return self[item]
except KeyError:
return default
def __setitem__(self, item, v):
# type: (str, str) -> None
if item in self.__slots__:
return object.__setattr__(self, item, v)
self._timetable[item] = time.time()
super(CacheInstance, self).__setitem__(item, v)
def update(self, # type: ignore
other, # type: Any
**kwargs # type: Any
):
# type: (...) -> None
for key, value in six.iteritems(other):
# We only update an element from `other` either if it does
# not exist in `self` or if the entry in `self` is older.
if key not in self or self._timetable[key] < other._timetable[key]:
dict.__setitem__(self, key, value)
self._timetable[key] = other._timetable[key]
def iteritems(self):
# type: () -> Iterator[Tuple[str, Any]]
if self.timeout is None:
return six.iteritems(self.__dict__) # type: ignore
t0 = time.time()
return ((k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def iterkeys(self):
# type: () -> Iterator[str]
if self.timeout is None:
return six.iterkeys(self.__dict__) # type: ignore
t0 = time.time()
return (k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def __iter__(self):
# type: () -> Iterator[str]
return self.iterkeys()
def itervalues(self):
# type: () -> Iterator[Tuple[str, Any]]
if self.timeout is None:
return six.itervalues(self.__dict__) # type: ignore
t0 = time.time()
return (v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def items(self):
# type: () -> Any
if self.timeout is None:
return super(CacheInstance, self).items()
t0 = time.time()
return [(k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def keys(self):
# type: () -> Any
if self.timeout is None:
return super(CacheInstance, self).keys()
t0 = time.time()
return [k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def values(self):
# type: () -> Any
if self.timeout is None:
return list(six.itervalues(self))
t0 = time.time()
return [v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def __len__(self):
# type: () -> int
if self.timeout is None:
return super(CacheInstance, self).__len__()
return len(self.keys())
def summary(self):
# type: () -> str
return "%s: %i valid items. Timeout=%rs" % (self.name, len(self), self.timeout) # noqa: E501
def __repr__(self):
# type: () -> str
s = []
if self:
mk = max(len(k) for k in six.iterkeys(self.__dict__))
fmt = "%%-%is %%s" % (mk + 1)
for item in six.iteritems(self.__dict__):
s.append(fmt % item)
return "\n".join(s)
def copy(self):
# type: () -> CacheInstance
return copy.copy(self)
class NetCache:
def __init__(self):
# type: () -> None
self._caches_list = [] # type: List[CacheInstance]
def add_cache(self, cache):
# type: (CacheInstance) -> None
self._caches_list.append(cache)
setattr(self, cache.name, cache)
def new_cache(self, name, timeout=None):
# type: (str, Optional[int]) -> CacheInstance
c = CacheInstance(name=name, timeout=timeout)
self.add_cache(c)
return c
def __delattr__(self, attr):
# type: (str) -> NoReturn
raise AttributeError("Cannot delete attributes")
def update(self, other):
# type: (NetCache) -> None
for co in other._caches_list:
if hasattr(self, co.name):
getattr(self, co.name).update(co)
else:
self.add_cache(co.copy())
def flush(self):
# type: () -> None
for c in self._caches_list:
c.flush()
def __repr__(self):
# type: () -> str
return "\n".join(c.summary() for c in self._caches_list)
def _version_checker(module, minver):
# type: (ModuleType, Tuple[int, ...]) -> bool
"""Checks that module has a higher version that minver.
params:
- module: a module to test
- minver: a tuple of versions
"""
# We could use LooseVersion, but distutils imports imp which is deprecated
version_regexp = r'[a-z]?((?:\d|\.)+\d+)(?:\.dev[0-9]+)?'
version_tags_r = re.match(
version_regexp,
getattr(module, "__version__", "")
)
if not version_tags_r:
return False
version_tags_i = version_tags_r.group(1).split(".")
version_tags = tuple(int(x) for x in version_tags_i)
return bool(version_tags >= minver)
def isCryptographyValid():
# type: () -> bool
"""
Check if the cryptography module >= 2.0.0 is present. This is the minimum
version for most usages in Scapy.
"""
try:
import cryptography
except ImportError:
return False
return _version_checker(cryptography, (2, 0, 0))
def isCryptographyAdvanced():
# type: () -> bool
"""
Check if the cryptography module is present, and if it supports X25519,
ChaCha20Poly1305 and such.
Notes:
- cryptography >= 2.0 is required
- OpenSSL >= 1.1.0 is required
"""
try:
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey # noqa: E501
X25519PrivateKey.generate()
except Exception:
return False
else:
return True
def isPyPy():
# type: () -> bool
"""Returns either scapy is running under PyPy or not"""
try:
import __pypy__ # noqa: F401
return True
except ImportError:
return False
def _prompt_changer(attr, val, old):
# type: (str, Any, Any) -> Any
"""Change the current prompt theme"""
Interceptor.set_from_hook(conf, attr, val)
try:
sys.ps1 = conf.color_theme.prompt(conf.prompt)
except Exception:
pass
try:
apply_ipython_style(
get_ipython() # type: ignore
)
except NameError:
pass
return getattr(conf, attr, old)
def _set_conf_sockets():
# type: () -> None
"""Populate the conf.L2Socket and conf.L3Socket
according to the various use_* parameters
"""
if conf.use_bpf and not BSD:
Interceptor.set_from_hook(conf, "use_bpf", False)
raise ScapyInvalidPlatformException("BSD-like (OSX, *BSD...) only !")
if not conf.use_pcap and SOLARIS:
Interceptor.set_from_hook(conf, "use_pcap", True)
raise ScapyInvalidPlatformException(
"Scapy only supports libpcap on Solaris !"
)
# we are already in an Interceptor hook, use Interceptor.set_from_hook
if conf.use_pcap:
try:
from scapy.arch.libpcap import L2pcapListenSocket, L2pcapSocket, \
L3pcapSocket
except (OSError, ImportError):
warning("No libpcap provider available ! pcap won't be used")
Interceptor.set_from_hook(conf, "use_pcap", False)
else:
conf.L3socket = L3pcapSocket
conf.L3socket6 = functools.partial( # type: ignore
L3pcapSocket, filter="ip6")
conf.L2socket = L2pcapSocket
conf.L2listen = L2pcapListenSocket
conf.ifaces.reload()
return
if conf.use_bpf:
from scapy.arch.bpf.supersocket import L2bpfListenSocket, \
L2bpfSocket, L3bpfSocket
conf.L3socket = L3bpfSocket
conf.L3socket6 = functools.partial( # type: ignore
L3bpfSocket, filter="ip6")
conf.L2socket = L2bpfSocket
conf.L2listen = L2bpfListenSocket
conf.ifaces.reload()
return
if LINUX:
from scapy.arch.linux import L3PacketSocket, L2Socket, L2ListenSocket
conf.L3socket = L3PacketSocket
conf.L3socket6 = functools.partial( # type: ignore
L3PacketSocket, filter="ip6")
conf.L2socket = L2Socket
conf.L2listen = L2ListenSocket
conf.ifaces.reload()
return
if WINDOWS:
from scapy.arch.windows import _NotAvailableSocket
from scapy.arch.windows.native import L3WinSocket, L3WinSocket6
conf.L3socket = L3WinSocket
conf.L3socket6 = L3WinSocket6
conf.L2socket = _NotAvailableSocket
conf.L2listen = _NotAvailableSocket
conf.ifaces.reload()
# No need to update globals on Windows
return
from scapy.supersocket import L3RawSocket
from scapy.layers.inet6 import L3RawSocket6
conf.L3socket = L3RawSocket
conf.L3socket6 = L3RawSocket6
def _socket_changer(attr, val, old):
# type: (str, bool, bool) -> Any
if not isinstance(val, bool):
raise TypeError("This argument should be a boolean")
Interceptor.set_from_hook(conf, attr, val)
dependencies = { # Things that will be turned off
"use_pcap": ["use_bpf"],
"use_bpf": ["use_pcap"],
}
restore = {k: getattr(conf, k) for k in dependencies}
del restore[attr] # This is handled directly by _set_conf_sockets
if val: # Only if True
for param in dependencies[attr]:
Interceptor.set_from_hook(conf, param, False)
try:
_set_conf_sockets()
except (ScapyInvalidPlatformException, ImportError) as e:
for key, value in restore.items():
Interceptor.set_from_hook(conf, key, value)
if isinstance(e, ScapyInvalidPlatformException):
raise
return getattr(conf, attr)
def _loglevel_changer(attr, val, old):
# type: (str, int, int) -> int
"""Handle a change of conf.logLevel"""
log_scapy.setLevel(val)
return val
def _iface_changer(attr, val, old):
# type: (str, Any, Any) -> 'scapy.interfaces.NetworkInterface'
"""Resolves the interface in conf.iface"""
if isinstance(val, str):
from scapy.interfaces import resolve_iface
iface = resolve_iface(val)
if old and iface.dummy:
warning(
"This interface is not specified in any provider ! "
"See conf.ifaces output"
)
return iface
return val # type: ignore
class Conf(ConfClass):
"""
This object contains the configuration of Scapy.
"""
version = ReadOnlyAttribute("version", VERSION)
session = "" #: filename where the session will be saved
interactive = False
#: can be "ipython", "python" or "auto". Default: Auto
interactive_shell = ""
#: if 1, prevents any unwanted packet to go out (ARP, DNS, ...)
stealth = "not implemented"
#: selects the default output interface for srp() and sendp().
iface = Interceptor("iface", None, _iface_changer) # type: 'scapy.interfaces.NetworkInterface' # type: ignore # noqa: E501
layers = LayersList()
commands = CommandsList() # type: CommandsList
ASN1_default_codec = None #: Codec used by default for ASN1 objects
AS_resolver = None #: choose the AS resolver class to use
dot15d4_protocol = None # Used in dot15d4.py
logLevel = Interceptor("logLevel", log_scapy.level, _loglevel_changer)
#: if 0, doesn't check that IPID matches between IP sent and
#: ICMP IP citation received
#: if 1, checks that they either are equal or byte swapped
#: equals (bug in some IP stacks)
#: if 2, strictly checks that they are equals
checkIPID = False
#: if 1, checks IP src in IP and ICMP IP citation match
#: (bug in some NAT stacks)
checkIPsrc = True
checkIPaddr = True
#: if True, checks that IP-in-IP layers match. If False, do
#: not check IP layers that encapsulates another IP layer
checkIPinIP = True
#: if 1, also check that TCP seq and ack match the
#: ones in ICMP citation
check_TCPerror_seqack = False
verb = 2 #: level of verbosity, from 0 (almost mute) to 3 (verbose)
prompt = Interceptor("prompt", ">>> ", _prompt_changer)
#: default mode for listening socket (to get answers if you
#: spoof on a lan)
promisc = True
sniff_promisc = 1 #: default mode for sniff()
raw_layer = None # type: Type[Packet]
raw_summary = False
padding_layer = None # type: Type[Packet]
default_l2 = None # type: Type[Packet]
l2types = Num2Layer()
l3types = Num2Layer()
L3socket = None # type: Type[scapy.supersocket.SuperSocket]
L3socket6 = None # type: Type[scapy.supersocket.SuperSocket]
L2socket = None # type: Type[scapy.supersocket.SuperSocket]
L2listen = None # type: Type[scapy.supersocket.SuperSocket]
BTsocket = None # type: Type[scapy.supersocket.SuperSocket]
USBsocket = None # type: Type[scapy.supersocket.SuperSocket]
min_pkt_size = 60
#: holds MIB direct access dictionary
mib = None # type: 'scapy.asn1.mib.MIBDict'
bufsize = 2**16
#: history file
histfile = os.getenv('SCAPY_HISTFILE',
os.path.join(os.path.expanduser("~"),
".scapy_history"))
#: includes padding in disassembled packets
padding = 1
#: BPF filter for packets to ignore
except_filter = ""
#: bpf filter added to every sniffing socket to exclude traffic
#: from analysis
filter = ""
#: when 1, store received packet that are not matched into `debug.recv`
debug_match = False
#: When 1, print some TLS session secrets when they are computed.
debug_tls = False
wepkey = ""
#: holds the Scapy interface list and manager
ifaces = None # type: 'scapy.interfaces.NetworkInterfaceDict'
#: holds the cache of interfaces loaded from Libpcap
cache_pcapiflist = {} # type: Dict[str, Tuple[str, List[str], int]]
neighbor = None # type: 'scapy.layers.l2.Neighbor'
# `neighbor` will be filed by scapy.layers.l2
#: holds the Scapy IPv4 routing table and provides methods to
#: manipulate it
route = None # type: 'scapy.route.Route'
# `route` will be filed by route.py
#: holds the Scapy IPv6 routing table and provides methods to
#: manipulate it
route6 = None # type: 'scapy.route6.Route6'
manufdb = None # type: 'scapy.data.ManufDA'
# 'route6' will be filed by route6.py
teredoPrefix = "" # type: str
teredoServerPort = None # type: int
auto_fragment = True
#: raise exception when a packet dissector raises an exception
debug_dissector = False
color_theme = Interceptor("color_theme", NoTheme(), _prompt_changer)
#: how much time between warnings from the same place
warning_threshold = 5
prog = ProgPath()
#: holds list of fields for which resolution should be done
resolve = Resolve()
#: holds list of enum fields for which conversion to string
#: should NOT be done
noenum = Resolve()
emph = Emphasize()
#: read only attribute to show if PyPy is in use
use_pypy = ReadOnlyAttribute("use_pypy", isPyPy())
#: use libpcap integration or not. Changing this value will update
#: the conf.L[2/3] sockets
use_pcap = Interceptor(
"use_pcap",
os.getenv("SCAPY_USE_LIBPCAP", "").lower().startswith("y"),
_socket_changer
)
use_bpf = Interceptor("use_bpf", False, _socket_changer)
use_npcap = False
ipv6_enabled = socket.has_ipv6
#: path or list of paths where extensions are to be looked for
extensions_paths = "."
stats_classic_protocols = [] # type: List[Type[Packet]]
stats_dot11_protocols = [] # type: List[Type[Packet]]
temp_files = [] # type: List[str]
netcache = NetCache()
geoip_city = None
# can, tls, http and a few others are not loaded by default
load_layers = [
'bluetooth',
'bluetooth4LE',
'dhcp',
'dhcp6',
'dns',
'dot11',
'dot15d4',
'eap',
'gprs',
'hsrp',
'inet',
'inet6',
'ipsec',
'ir',
'isakmp',
'l2',
'l2tp',
'llmnr',
'lltd',
'mgcp',
'mobileip',
'netbios',
'netflow',
'ntp',
'ppi',
'ppp',
'pptp',
'radius',
'rip',
'rtp',
'sctp',
'sixlowpan',
'skinny',
'smb',
'smb2',
'snmp',
'tftp',
'vrrp',
'vxlan',
'x509',
'zigbee'
]
#: a dict which can be used by contrib layers to store local
#: configuration
contribs = dict() # type: Dict[str, Any]
crypto_valid = isCryptographyValid()
crypto_valid_advanced = isCryptographyAdvanced()
fancy_prompt = True
auto_crop_tables = True
#: how often to check for new packets.
#: Defaults to 0.05s.
recv_poll_rate = 0.05
#: When True, raise exception if no dst MAC found otherwise broadcast.
#: Default is False.
raise_no_dst_mac = False
loopback_name = "lo" if LINUX else "lo0"
def __getattribute__(self, attr):
# type: (str) -> Any
# Those are loaded on runtime to avoid import loops
if attr == "manufdb":
from scapy.data import MANUFDB
return MANUFDB
if attr == "ethertypes":
from scapy.data import ETHER_TYPES
return ETHER_TYPES
if attr == "protocols":
from scapy.data import IP_PROTOS
return IP_PROTOS
if attr == "services_udp":
from scapy.data import UDP_SERVICES
return UDP_SERVICES
if attr == "services_tcp":
from scapy.data import TCP_SERVICES
return TCP_SERVICES
if attr == "iface6":
warnings.warn(
"conf.iface6 is deprecated in favor of conf.iface",
DeprecationWarning
)
attr = "iface"
return object.__getattribute__(self, attr)
if not Conf.ipv6_enabled:
log_scapy.warning("IPv6 support disabled in Python. Cannot load Scapy IPv6 layers.") # noqa: E501
for m in ["inet6", "dhcp6"]:
if m in Conf.load_layers:
Conf.load_layers.remove(m)
conf = Conf() # type: Conf
def crypto_validator(func):
# type: (DecoratorCallable) -> DecoratorCallable
"""
This a decorator to be used for any method relying on the cryptography library. # noqa: E501
Its behaviour depends on the 'crypto_valid' attribute of the global 'conf'.
"""
def func_in(*args, **kwargs):
# type: (*Any, **Any) -> Any
if not conf.crypto_valid:
raise ImportError("Cannot execute crypto-related method! "
"Please install python-cryptography v1.7 or later.") # noqa: E501
return func(*args, **kwargs)
return func_in # type: ignore
def scapy_delete_temp_files():
# type: () -> None
for f in conf.temp_files:
try:
os.unlink(f)
except Exception:
pass
del conf.temp_files[:]
atexit.register(scapy_delete_temp_files)
|
qiskit/transpiler/passes/layout/vf2_post_layout.py | t-imamichi/qiskit-core | 1,456 | 11079807 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""VF2PostLayout pass to find a layout after transpile using subgraph isomorphism"""
from enum import Enum
import logging
import time
from retworkx import PyDiGraph, vf2_mapping, PyGraph
from qiskit.transpiler.layout import Layout
from qiskit.transpiler.basepasses import AnalysisPass
from qiskit.transpiler.exceptions import TranspilerError
from qiskit.providers.exceptions import BackendPropertyError
from qiskit.transpiler.passes.layout import vf2_utils
logger = logging.getLogger(__name__)
class VF2PostLayoutStopReason(Enum):
"""Stop reasons for VF2PostLayout pass."""
SOLUTION_FOUND = "solution found"
NO_SOLUTION_FOUND = "nonexistent solution"
MORE_THAN_2Q = ">2q gates in basis"
def _target_match(node_a, node_b):
# Node A is the set of operations in the target. Node B is the count dict
# of oeprations on the node or edge in the circuit.
if isinstance(node_a, set):
return node_a.issuperset(node_b.keys())
# Node A is the count dict of operations on the node or edge in the circuit
# Node B is the set of operations in the target on the same qubit(s).
else:
return set(node_a).issubset(node_b)
class VF2PostLayout(AnalysisPass):
"""A pass for choosing a Layout after transpilation of a circuit onto a
Coupling graph, as a subgraph isomorphism problem, solved by VF2++.
Unlike the :class:`~.VF2PostLayout` transpiler pass which is designed to find an
initial layout for a circuit early in the transpilation pipeline this transpiler
pass is designed to try and find a better layout after transpilation is complete.
The initial layout phase of the transpiler doesn't have as much information available
as we do after transpilation. This pass is designed to be paired in a similar pipeline
as the layout passes. This pass will strip any idle wires from the circuit, use VF2
to find a subgraph in the coupling graph for the circuit to run on with better fidelity
and then update the circuit layout to use the new qubits.
If a solution is found that means there is a "perfect layout" and that no
further swap mapping or routing is needed. If a solution is found the layout
will be set in the property set as ``property_set['layout']``. However, if no
solution is found, no ``property_set['layout']`` is set. The stopping reason is
set in ``property_set['VF2PostLayout_stop_reason']`` in all the cases and will be
one of the values enumerated in ``VF2PostLayoutStopReason`` which has the
following values:
* ``"solution found"``: If a perfect layout was found.
* ``"nonexistent solution"``: If no perfect layout was found.
* ``">2q gates in basis"``: If VF2PostLayout can't work with basis
"""
def __init__(
self,
target=None,
coupling_map=None,
properties=None,
seed=None,
call_limit=None,
time_limit=None,
strict_direction=True,
):
"""Initialize a ``VF2PostLayout`` pass instance
Args:
target (Target): A target representing the backend device to run ``VF2PostLayout`` on.
If specified it will supersede a set value for ``properties`` and
``coupling_map``.
coupling_map (CouplingMap): Directed graph representing a coupling map.
properties (BackendProperties): The backend properties for the backend. If
:meth:`~qiskit.providers.models.BackendProperties.readout_error` is available
it is used to score the layout.
seed (int): Sets the seed of the PRNG. -1 Means no node shuffling.
call_limit (int): The number of state visits to attempt in each execution of
VF2.
time_limit (float): The total time limit in seconds to run ``VF2PostLayout``
strict_direction (bool): Whether the pass is configured to follow
the strict direction in the coupling graph. If this is set to
false, the pass will treat any edge in the coupling graph as
a weak edge and the interaction graph will be undirected. For
the purposes of evaluating layouts the avg error rate for
each qubit and 2q link will be used. This enables the pass to be
run prior to basis translation and work with any 1q and 2q operations.
However, if ``strict_direction=True`` the pass expects the input
:class:`~.DAGCircuit` object to :meth:`~.VF2PostLayout.run` to be in
the target set of instructions.
Raises:
TypeError: At runtime, if neither ``coupling_map`` or ``target`` are provided.
"""
super().__init__()
self.target = target
self.coupling_map = coupling_map
self.properties = properties
self.call_limit = call_limit
self.time_limit = time_limit
self.seed = seed
self.strict_direction = strict_direction
self.avg_error_map = None
def run(self, dag):
"""run the layout method"""
if self.target is None and (self.coupling_map is None or self.properties is None):
raise TranspilerError(
"A target must be specified or a coupling map and properties must be provided"
)
if not self.strict_direction and self.avg_error_map is None:
self.avg_error_map = vf2_utils.build_average_error_map(
self.target, self.properties, self.coupling_map
)
result = vf2_utils.build_interaction_graph(dag, self.strict_direction)
if result is None:
self.property_set["VF2PostLayout_stop_reason"] = VF2PostLayoutStopReason.MORE_THAN_2Q
return
im_graph, im_graph_node_map, reverse_im_graph_node_map = result
if self.target is not None:
if self.strict_direction:
cm_graph = PyDiGraph(multigraph=False)
else:
cm_graph = PyGraph(multigraph=False)
cm_graph.add_nodes_from(
[self.target.operation_names_for_qargs((i,)) for i in range(self.target.num_qubits)]
)
for qargs in self.target.qargs:
len_args = len(qargs)
# If qargs == 1 we already populated it and if qargs > 2 there are no instructions
# using those in the circuit because we'd have already returned by this point
if len_args == 2:
cm_graph.add_edge(
qargs[0], qargs[1], self.target.operation_names_for_qargs(qargs)
)
cm_nodes = list(cm_graph.node_indexes())
else:
cm_graph, cm_nodes = vf2_utils.shuffle_coupling_graph(
self.coupling_map, self.seed, self.strict_direction
)
logger.debug("Running VF2 to find post transpile mappings")
if self.target and self.strict_direction:
mappings = vf2_mapping(
cm_graph,
im_graph,
node_matcher=_target_match,
edge_matcher=_target_match,
subgraph=True,
id_order=False,
induced=False,
call_limit=self.call_limit,
)
else:
mappings = vf2_mapping(
cm_graph,
im_graph,
subgraph=True,
id_order=False,
induced=False,
call_limit=self.call_limit,
)
chosen_layout = None
initial_layout = Layout(dict(enumerate(dag.qubits)))
try:
if self.strict_direction:
chosen_layout_score = self._score_layout(
initial_layout, im_graph_node_map, reverse_im_graph_node_map, im_graph
)
else:
chosen_layout_score = vf2_utils.score_layout(
self.avg_error_map,
initial_layout,
im_graph_node_map,
reverse_im_graph_node_map,
im_graph,
self.strict_direction,
)
# Circuit not in basis so we have nothing to compare against return here
except KeyError:
self.property_set[
"VF2PostLayout_stop_reason"
] = VF2PostLayoutStopReason.NO_SOLUTION_FOUND
return
logger.debug("Initial layout has score %s", chosen_layout_score)
start_time = time.time()
trials = 0
for mapping in mappings:
trials += 1
logger.debug("Running trial: %s", trials)
stop_reason = VF2PostLayoutStopReason.SOLUTION_FOUND
layout = Layout(
{reverse_im_graph_node_map[im_i]: cm_nodes[cm_i] for cm_i, im_i in mapping.items()}
)
if self.strict_direction:
layout_score = self._score_layout(
layout, im_graph_node_map, reverse_im_graph_node_map, im_graph
)
else:
layout_score = vf2_utils.score_layout(
self.avg_error_map,
layout,
im_graph_node_map,
reverse_im_graph_node_map,
im_graph,
self.strict_direction,
)
logger.debug("Trial %s has score %s", trials, layout_score)
if layout_score < chosen_layout_score:
logger.debug(
"Found layout %s has a lower score (%s) than previous best %s (%s)",
layout,
layout_score,
chosen_layout,
chosen_layout_score,
)
chosen_layout = layout
chosen_layout_score = layout_score
elapsed_time = time.time() - start_time
if self.time_limit is not None and elapsed_time >= self.time_limit:
logger.debug(
"VFPostLayout has taken %s which exceeds configured max time: %s",
elapsed_time,
self.time_limit,
)
break
if chosen_layout is None:
stop_reason = VF2PostLayoutStopReason.NO_SOLUTION_FOUND
else:
existing_layout = self.property_set["layout"]
# If any ancillas in initial layout map them back to the final layout output
if existing_layout is not None and len(existing_layout) > len(chosen_layout):
virtual_bits = chosen_layout.get_virtual_bits()
used_bits = set(virtual_bits.values())
num_qubits = len(cm_graph)
for bit in dag.qubits:
if len(chosen_layout) == len(existing_layout):
break
if bit not in virtual_bits:
for i in range(num_qubits):
if i not in used_bits:
used_bits.add(i)
chosen_layout.add(bit, i)
break
self.property_set["post_layout"] = chosen_layout
self.property_set["VF2PostLayout_stop_reason"] = stop_reason
def _score_layout(self, layout, bit_map, reverse_bit_map, im_graph):
bits = layout.get_virtual_bits()
fidelity = 1
if self.target is not None:
for bit, node_index in bit_map.items():
gate_counts = im_graph[node_index]
for gate, count in gate_counts.items():
if self.target[gate] is not None and None not in self.target[gate]:
props = self.target[gate][(bits[bit],)]
if props is not None and props.error is not None:
fidelity *= (1 - props.error) ** count
for edge in im_graph.edge_index_map().values():
qargs = (bits[reverse_bit_map[edge[0]]], bits[reverse_bit_map[edge[1]]])
gate_counts = edge[2]
for gate, count in gate_counts.items():
if self.target[gate] is not None and None not in self.target[gate]:
props = self.target[gate][qargs]
if props is not None and props.error is not None:
fidelity *= (1 - props.error) ** count
else:
for bit, node_index in bit_map.items():
gate_counts = im_graph[node_index]
for gate, count in gate_counts.items():
if gate == "measure":
try:
fidelity *= (1 - self.properties.readout_error(bits[bit])) ** count
except BackendPropertyError:
pass
else:
try:
fidelity *= (1 - self.properties.gate_error(gate, bits[bit])) ** count
except BackendPropertyError:
pass
for edge in im_graph.edge_index_map().values():
qargs = (bits[reverse_bit_map[edge[0]]], bits[reverse_bit_map[edge[1]]])
gate_counts = edge[2]
for gate, count in gate_counts.items():
try:
fidelity *= (1 - self.properties.gate_error(gate, qargs)) ** count
except BackendPropertyError:
pass
return 1 - fidelity
|
DQMOffline/JetMET/python/METAnalyzer_cff.py | ckamtsikis/cmssw | 852 | 11079809 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
from DQMOffline.JetMET.metDQMConfig_cfi import *
METAnalyzerSequence = cms.Sequence(METAnalyzer)
|
src/genie/libs/parser/iosxe/tests/ShowSpanningTreeSummary/cli/equal/golden_output_mstp_single_mst_expected.py | balmasea/genieparser | 204 | 11079822 | <reponame>balmasea/genieparser<filename>src/genie/libs/parser/iosxe/tests/ShowSpanningTreeSummary/cli/equal/golden_output_mstp_single_mst_expected.py
expected_output = {
"backbone_fast": False,
"bpdu_filter": False,
"bpdu_guard": False,
"bridge_assurance": True,
"configured_pathcost": {"method": "short", "operational_value": "long"},
"etherchannel_misconfig_guard": True,
"extended_system_id": True,
"loop_guard": False,
"mode": {
"mst": {
"MST0": {
"blocking": 3,
"forwarding": 0,
"learning": 0,
"listening": 0,
"stp_active": 3,
}
}
},
"portfast_default": False,
"pvst_simulation": True,
"root_bridge_for": "MST0",
"total_statistics": {
"blockings": 3,
"forwardings": 0,
"learnings": 0,
"listenings": 0,
"num_of_msts": 1,
"stp_actives": 3,
},
"uplink_fast": False,
}
|
overloading.py | gimntut/overloading.py | 122 | 11079827 | <filename>overloading.py<gh_stars>100-1000
"""
--------------
overloading.py
--------------
Function overloading for Python 3
* Project repository: https://github.com/bintoro/overloading.py
* Documentation: https://overloading.readthedocs.org/
Copyright © 2014–2016 <NAME>. Released under the MIT License.
"""
__version__ = '0.5.0'
__all__ = ['overload', 'overloaded', 'overloads']
import ast
from collections import Counter, defaultdict, namedtuple
from functools import partial, reduce
import inspect
from itertools import chain
import operator
import re
import sys
from types import FunctionType
try:
import typing
except ImportError:
typing = None
if sys.version_info < (3, 2):
raise ImportError("Module 'overloading' requires Python version 3.2 or higher.")
DEBUG = False
######
##
## Public interface
##
def overload(func):
"""
May be used as a shortcut for ``overloaded`` and ``overloads(f)``
when the overloaded function `f` can be automatically identified.
"""
if sys.version_info < (3, 3):
raise OverloadingError("The 'overload' syntax requires Python version 3.3 or higher.")
fn = unwrap(func)
ensure_function(fn)
fname = get_full_name(fn)
if fname.find('<locals>') >= 0:
raise OverloadingError("The 'overload' syntax cannot be used with nested functions. "
"Decorators must use functools.wraps().")
try:
return register(__registry[fname], func)
except KeyError:
__registry[fname] = overloaded(func)
return __registry[fname]
def overloaded(func):
"""
Introduces a new overloaded function and registers its first implementation.
"""
fn = unwrap(func)
ensure_function(fn)
def dispatcher(*args, **kwargs):
resolved = None
if dispatcher.__complex_parameters:
cache_key_pos = []
cache_key_kw = []
for argset in (0, 1) if kwargs else (0,):
if argset == 0:
arg_pairs = enumerate(args)
complexity_mapping = dispatcher.__complex_positions
else:
arg_pairs = kwargs.items()
complexity_mapping = dispatcher.__complex_parameters
for id, arg in arg_pairs:
type_ = type(arg)
element_type = None
if id in complexity_mapping:
try:
element = next(iter(arg))
except TypeError:
pass
except StopIteration:
element_type = _empty
else:
complexity = complexity_mapping[id]
if complexity & 8 and isinstance(arg, tuple):
element_type = tuple(type(el) for el in arg)
elif complexity & 4 and hasattr(arg, 'keys'):
element_type = (type(element), type(arg[element]))
else:
element_type = type(element)
if argset == 0:
cache_key_pos.append((type_, element_type))
else:
cache_key_kw.append((id, type_, element_type))
else:
cache_key_pos = (type(arg) for arg in args)
cache_key_kw = ((name, type(arg)) for (name, arg) in kwargs.items()) if kwargs else None
cache_key = (tuple(cache_key_pos),
tuple(sorted(cache_key_kw)) if kwargs else None)
try:
resolved = dispatcher.__cache[cache_key]
except KeyError:
resolved = find(dispatcher, args, kwargs)
if resolved:
dispatcher.__cache[cache_key] = resolved
if resolved:
before = dispatcher.__hooks['before']
after = dispatcher.__hooks['after']
if before:
before(*args, **kwargs)
result = resolved(*args, **kwargs)
if after:
after(*args, **kwargs)
return result
else:
return error(dispatcher.__name__)
dispatcher.__dict__.update(
__functions = [],
__hooks = {'before': None, 'after': None},
__cache = {},
__complex_positions = {},
__complex_parameters = {},
__maxlen = 0,
)
for attr in ('__module__', '__name__', '__qualname__', '__doc__'):
setattr(dispatcher, attr, getattr(fn, attr, None))
if is_void(fn):
update_docstring(dispatcher, fn)
return dispatcher
else:
update_docstring(dispatcher)
return register(dispatcher, func)
def overloads(dispatcher, hook=None):
"""
Returns a callable that registers its argument as an implementation
of a previously declared overloaded function.
"""
return partial(register, dispatcher, hook=hook)
######
##
## Private interface
##
__registry = {}
FunctionInfo = namedtuple('FunctionInfo', ('func', 'signature'))
Signature = namedtuple('Signature', ('parameters', 'types', 'complexity', 'defaults', 'required',
'has_varargs', 'has_varkw', 'has_kwonly'))
_empty = object()
def register(dispatcher, func, *, hook=None):
"""
Registers `func` as an implementation on `dispatcher`.
"""
wrapper = None
if isinstance(func, (classmethod, staticmethod)):
wrapper = type(func)
func = func.__func__
ensure_function(func)
if isinstance(dispatcher, (classmethod, staticmethod)):
wrapper = None
dp = unwrap(dispatcher)
try:
dp.__functions
except AttributeError:
raise OverloadingError("%r has not been set up as an overloaded function." % dispatcher)
fn = unwrap(func)
if hook:
dp.__hooks[hook] = func
else:
signature = get_signature(fn)
for i, type_ in enumerate(signature.types):
if not isinstance(type_, type):
raise OverloadingError(
"Failed to overload function '{0}': parameter '{1}' has "
"an annotation that is not a type."
.format(dp.__name__, signature.parameters[i]))
for fninfo in dp.__functions:
dup_sig = sig_cmp(signature, fninfo.signature)
if dup_sig and signature.has_varargs == fninfo.signature.has_varargs:
raise OverloadingError(
"Failed to overload function '{0}': non-unique signature ({1})."
.format(dp.__name__, str.join(', ', (_repr(t) for t in dup_sig))))
# All clear; register the function.
dp.__functions.append(FunctionInfo(func, signature))
dp.__cache.clear()
dp.__maxlen = max(dp.__maxlen, len(signature.parameters))
if typing:
# For each parameter position and name, compute a bitwise union of complexity
# values over all registered signatures. Retain the result for parameters where
# a nonzero value occurs at least twice and at least one of those values is >= 2.
# Such parameters require deep type-checking during function resolution.
position_values = defaultdict(lambda: 0)
keyword_values = defaultdict(lambda: 0)
position_counter = Counter()
keyword_counter = Counter()
for fninfo in dp.__functions:
sig = fninfo.signature
complex_positions = {i: v for i, v in enumerate(sig.complexity) if v}
complex_keywords = {p: v for p, v in zip(sig.parameters, sig.complexity) if v}
for i, v in complex_positions.items():
position_values[i] |= v
for p, v in complex_keywords.items():
keyword_values[p] |= v
position_counter.update(complex_positions.keys())
keyword_counter.update(complex_keywords.keys())
dp.__complex_positions = {
i: v for i, v in position_values.items() if v >= 2 and position_counter[i] > 1}
dp.__complex_parameters = {
p: v for p, v in keyword_values.items() if v >= 2 and keyword_counter[p] > 1}
if wrapper is None:
wrapper = lambda x: x
if func.__name__ == dp.__name__:
# The returned function is going to be bound to the invocation name
# in the calling scope, so keep returning the dispatcher.
return wrapper(dispatcher)
else:
return wrapper(func)
Match = namedtuple('Match', 'score, func, sig')
SP_REGULAR = 5
SP_ABSTRACT = 4
SP_TYPING = 3
SP_GENERIC = 2
def find(dispatcher, args, kwargs):
"""
Given the arguments contained in `args` and `kwargs`, returns the best match
from the list of implementations registered on `dispatcher`.
"""
matches = []
full_args = args
full_kwargs = kwargs
for func, sig in dispatcher.__functions:
params = sig.parameters
param_count = len(params)
# Filter out arguments that will be consumed by catch-all parameters
# or by keyword-only parameters.
if sig.has_varargs:
args = full_args[:param_count]
else:
args = full_args
if sig.has_varkw or sig.has_kwonly:
kwargs = {kw: full_kwargs[kw] for kw in params if kw in full_kwargs}
else:
kwargs = full_kwargs
kwarg_set = set(kwargs)
arg_count = len(args) + len(kwargs)
optional_count = len(sig.defaults)
required_count = param_count - optional_count
# Consider candidate functions that satisfy basic conditions:
# - argument count matches signature
# - all keyword arguments are recognized.
if not 0 <= param_count - arg_count <= optional_count:
continue
if kwargs and not kwarg_set <= set(params):
continue
if kwargs and args and kwarg_set & set(params[:len(args)]):
raise TypeError("%s() got multiple values for the same parameter"
% dispatcher.__name__)
arg_score = arg_count # >= 0
type_score = 0
specificity_score = [None] * dispatcher.__maxlen
sig_score = required_count
var_score = -sig.has_varargs
indexed_kwargs = ((params.index(k), v) for k, v in kwargs.items()) if kwargs else ()
for param_pos, value in chain(enumerate(args), indexed_kwargs):
param_name = params[param_pos]
if value is None and sig.defaults.get(param_name, _empty) is None:
expected_type = type(None)
else:
expected_type = sig.types[param_pos]
specificity = compare(value, expected_type)
if specificity[0] == -1:
break
specificity_score[param_pos] = specificity
type_score += 1
else:
score = (arg_score, type_score, specificity_score, sig_score, var_score)
matches.append(Match(score, func, sig))
if matches:
if len(matches) > 1:
matches.sort(key=lambda m: m.score, reverse=True)
if DEBUG:
assert matches[0].score > matches[1].score
return matches[0].func
else:
return None
def compare(value, expected_type):
if expected_type is AnyType:
return (0,)
type_ = type(value)
if not issubclass(type_, expected_type):
# Discard immediately on type mismatch.
return (-1,)
type_tier = SP_REGULAR
type_specificity = 0
param_specificity = 0
mro_rank = 0
params = None
if typing and isinstance(expected_type, typing.UnionMeta):
types = [t for t in expected_type.__union_params__ if issubclass(type_, t)]
if len(types) > 1:
return max(map(partial(compare, value), types))
else:
expected_type = types[0]
if typing and isinstance(expected_type, (typing.TypingMeta, GenericWrapperMeta)):
type_tier = SP_TYPING
match = False
if isinstance(expected_type, typing.TupleMeta):
params = expected_type.__tuple_params__
if params:
if expected_type.__tuple_use_ellipsis__:
match = len(value) == 0 or issubclass(type(value[0]), params[0])
else:
match = len(value) == len(params) and \
all(issubclass(type(v), t) for v, t in zip(value, params))
param_specificity = 100
else:
match = True
elif isinstance(expected_type, GenericWrapperMeta):
type_tier = SP_GENERIC
type_specificity = len(expected_type.type.__mro__)
interface = expected_type.interface
params = expected_type.parameters
if expected_type.complexity > 1:
# Type-check the contents.
if interface is typing.Mapping:
if len(value) == 0:
match = True
else:
key = next(iter(value))
item_types = (type(key), type(value[key]))
elif interface is typing.Iterable:
try:
item_types = (type(next(iter(value))),)
except StopIteration:
match = True
else:
# Type-checking not implemented.
match = True
if not match:
type_vars = expected_type.type_vars
for item_type, param, type_var in zip(item_types, params, type_vars):
if isinstance(param, typing.TypeVar):
type_var = param
if type_var.__constraints__:
param = type_var.__constraints__
direct_match = item_type in param
elif type_var.__bound__:
param = type_var.__bound__
direct_match = item_type is param
else:
direct_match = True
elif param is AnyType:
direct_match = True
else:
direct_match = item_type is param
match = direct_match or \
type_var.__covariant__ and issubclass(item_type, param) or \
type_var.__contravariant__ and issubclass(param, item_type)
if not match:
break
else:
# No constrained parameters
match = True
else:
match = True
if not match:
return (-1,)
if params:
param_specificity += (sum(len(p.__mro__) for p in params if p is not AnyType)
/ len(params))
if inspect.isabstract(expected_type):
type_tier = SP_ABSTRACT
try:
mro_rank = 100 - type_.__mro__.index(expected_type)
except ValueError:
pass
if type_specificity == 0:
type_specificity = len(expected_type.__mro__)
if params:
return (mro_rank, type_tier, type_specificity, param_specificity)
else:
return (mro_rank, type_tier, type_specificity)
def get_signature(func):
"""
Gathers information about the call signature of `func`.
"""
code = func.__code__
# Names of regular parameters
parameters = tuple(code.co_varnames[:code.co_argcount])
# Flags
has_varargs = bool(code.co_flags & inspect.CO_VARARGS)
has_varkw = bool(code.co_flags & inspect.CO_VARKEYWORDS)
has_kwonly = bool(code.co_kwonlyargcount)
# A mapping of parameter names to default values
default_values = func.__defaults__ or ()
defaults = dict(zip(parameters[-len(default_values):], default_values))
# Type annotations for all parameters
type_hints = typing.get_type_hints(func) if typing else func.__annotations__
types = tuple(normalize_type(type_hints.get(param, AnyType)) for param in parameters)
# Type annotations for required parameters
required = types[:-len(defaults)] if defaults else types
# Complexity
complexity = tuple(map(type_complexity, types)) if typing else None
return Signature(parameters, types, complexity, defaults, required,
has_varargs, has_varkw, has_kwonly)
def iter_types(types):
for type_ in types:
if type_ is AnyType:
pass
elif issubclass(type_, typing.Union):
for t in iter_types(type_.__union_params__):
yield t
else:
yield type_
def normalize_type(type_, level=0):
"""
Reduces an arbitrarily complex type declaration into something manageable.
"""
if not typing or not isinstance(type_, typing.TypingMeta) or type_ is AnyType:
return type_
if isinstance(type_, typing.TypeVar):
if type_.__constraints__ or type_.__bound__:
return type_
else:
return AnyType
if issubclass(type_, typing.Union):
if not type_.__union_params__:
raise OverloadingError("typing.Union must be parameterized")
return typing.Union[tuple(normalize_type(t, level) for t in type_.__union_params__)]
if issubclass(type_, typing.Tuple):
params = type_.__tuple_params__
if level > 0 or params is None:
return typing.Tuple
elif type_.__tuple_use_ellipsis__:
return typing.Tuple[normalize_type(params[0], level + 1), ...]
else:
return typing.Tuple[tuple(normalize_type(t, level + 1) for t in params)]
if issubclass(type_, typing.Callable):
return typing.Callable
if isinstance(type_, typing.GenericMeta):
base = find_base_generic(type_)
if base is typing.Generic:
return type_
else:
return GenericWrapper(type_, base, level > 0)
raise OverloadingError("%r not supported yet" % type_)
class GenericWrapperMeta(type):
def __new__(mcs, name, bases, attrs, type_=None, base=None, simplify=False):
cls = super().__new__(mcs, name, bases, attrs)
if type_ is None:
return cls
if base is None:
base = find_base_generic(type_)
if simplify:
type_ = first_origin(type_)
cls.type = type_
cls.base = base
if issubclass(base, typing.Mapping):
cls.interface = typing.Mapping
elif issubclass(base, typing.Iterable):
cls.interface = typing.Iterable
else:
cls.interface = None
cls.derive_configuration()
cls.complexity = type_complexity(cls)
return cls
def __init__(cls, *_):
pass
def __call__(cls, type_, base=None, simplify=False):
return cls.__class__(cls.__name__, (), {}, type_, base, simplify)
def __eq__(cls, other):
if isinstance(other, GenericWrapperMeta):
return cls.type == other.type
elif isinstance(other, typing.GenericMeta):
return cls.type == other
else:
return False
def __hash__(cls):
return hash(cls.type)
def __repr__(cls):
return repr(cls.type)
def __instancecheck__(cls, obj):
return cls.type.__instancecheck__(obj)
def __subclasscheck__(cls, other):
return cls.type.__subclasscheck__(other)
def derive_configuration(cls):
"""
Collect the nearest type variables and effective parameters from the type,
its bases, and their origins as necessary.
"""
base_params = cls.base.__parameters__
if hasattr(cls.type, '__args__'):
# typing as of commit abefbe4
tvars = {p: p for p in base_params}
types = {}
for t in iter_generic_bases(cls.type):
if t is cls.base:
type_vars = tuple(tvars[p] for p in base_params)
parameters = (types.get(tvar, tvar) for tvar in type_vars)
break
if t.__args__:
for arg, tvar in zip(t.__args__, t.__origin__.__parameters__):
if isinstance(arg, typing.TypeVar):
tvars[tvar] = tvars.get(arg, arg)
else:
types[tvar] = arg
else:
# typing 3.5.0
tvars = [None] * len(base_params)
for t in iter_generic_bases(cls.type):
for i, p in enumerate(t.__parameters__):
if tvars[i] is None and isinstance(p, typing.TypeVar):
tvars[i] = p
if all(tvars):
type_vars = tvars
parameters = cls.type.__parameters__
break
cls.type_vars = type_vars
cls.parameters = tuple(normalize_type(p, 1) for p in parameters)
class GenericWrapper(metaclass=GenericWrapperMeta):
pass
def type_complexity(type_):
"""Computes an indicator for the complexity of `type_`.
If the return value is 0, the supplied type is not parameterizable.
Otherwise, set bits in the return value denote the following features:
- bit 0: The type could be parameterized but is not.
- bit 1: The type represents an iterable container with 1 constrained type parameter.
- bit 2: The type represents a mapping with a constrained value type (2 parameters).
- bit 3: The type represents an n-tuple (n parameters).
Since these features are mutually exclusive, only a `Union` can have more than one bit set.
"""
if (not typing
or not isinstance(type_, (typing.TypingMeta, GenericWrapperMeta))
or type_ is AnyType):
return 0
if issubclass(type_, typing.Union):
return reduce(operator.or_, map(type_complexity, type_.__union_params__))
if issubclass(type_, typing.Tuple):
if type_.__tuple_params__ is None:
return 1
elif type_.__tuple_use_ellipsis__:
return 2
else:
return 8
if isinstance(type_, GenericWrapperMeta):
type_count = 0
for p in reversed(type_.parameters):
if type_count > 0:
type_count += 1
if p is AnyType:
continue
if not isinstance(p, typing.TypeVar) or p.__constraints__ or p.__bound__:
type_count += 1
return 1 << min(type_count, 2)
return 0
def first_origin(type_):
while type_.__origin__:
type_ = type_.__origin__
return type_
def find_base_generic(type_):
"""Locates the underlying generic whose structure and behavior are known.
For example, the base generic of a type that inherits from `typing.Mapping[T, int]`
is `typing.Mapping`.
"""
for t in type_.__mro__:
if t.__module__ == typing.__name__:
return first_origin(t)
def iter_generic_bases(type_):
"""Iterates over all generics `type_` derives from, including origins.
This function is only necessary because, in typing 3.5.0, a generic doesn't
get included in the list of bases when it constructs a parameterized version
of itself. This was fixed in aab2c59; now it would be enough to just iterate
over the MRO.
"""
for t in type_.__mro__:
if not isinstance(t, typing.GenericMeta):
continue
yield t
t = t.__origin__
while t:
yield t
t = t.__origin__
def sig_cmp(sig1, sig2):
"""
Compares two normalized type signatures for validation purposes.
"""
types1 = sig1.required
types2 = sig2.required
if len(types1) != len(types2):
return False
dup_pos = []
dup_kw = {}
for t1, t2 in zip(types1, types2):
match = type_cmp(t1, t2)
if match:
dup_pos.append(match)
else:
break
else:
return tuple(dup_pos)
kw_range = slice(len(dup_pos), len(types1))
kwds1 = sig1.parameters[kw_range]
kwds2 = sig2.parameters[kw_range]
if set(kwds1) != set(kwds2):
return False
kwtypes1 = dict(zip(sig1.parameters, types1))
kwtypes2 = dict(zip(sig2.parameters, types2))
for kw in kwds1:
match = type_cmp(kwtypes1[kw], kwtypes2[kw])
if match:
dup_kw[kw] = match
else:
break
else:
return tuple(dup_pos), dup_kw
return False
def type_cmp(t1, t2):
if t1 is AnyType and t2 is not AnyType:
return False
if t2 is AnyType and t1 is not AnyType:
return False
if t1 == t2:
return t1
if typing:
if isinstance(t1, typing.UnionMeta) and isinstance(t2, typing.UnionMeta):
common = t1.__union_set_params__ & t2.__union_set_params__
if common:
return next(iter(common))
elif isinstance(t1, typing.UnionMeta) and t2 in t1.__union_params__:
return t2
elif isinstance(t2, typing.UnionMeta) and t1 in t2.__union_params__:
return t1
return False
class AnyTypeMeta(type):
def __subclasscheck__(cls, other):
if not isinstance(other, type):
return super().__subclasscheck__(other)
return True
class AnyType(metaclass=AnyTypeMeta):
pass
if typing:
AnyType = typing.Any
def error(name):
"""
Raises a `TypeError` when a call to an overloaded function
doesn't match any implementation.
"""
raise TypeError("Invalid type or number of arguments{0}."
.format(" when calling '%s'" % name if name else ''))
class OverloadingError(Exception):
"""Raised during function setup when something goes wrong"""
pass
def unwrap(func):
while hasattr(func, '__func__'):
func = func.__func__
while hasattr(func, '__wrapped__'):
func = func.__wrapped__
return func
def ensure_function(func):
if not isinstance(func, FunctionType):
raise OverloadingError("%r is not a function." % func)
def is_void(func):
"""
Determines if a function is a void function, i.e., one whose body contains
nothing but a docstring or an ellipsis. A void function can be used to introduce
an overloaded function without actually registering an implementation.
"""
try:
source = dedent(inspect.getsource(func))
except (OSError, IOError):
return False
fdef = next(ast.iter_child_nodes(ast.parse(source)))
return (
type(fdef) is ast.FunctionDef and len(fdef.body) == 1 and
type(fdef.body[0]) is ast.Expr and
type(fdef.body[0].value) in {ast.Str, ast.Ellipsis})
def update_docstring(dispatcher, func=None):
"""
Inserts a call signature at the beginning of the docstring on `dispatcher`.
The signature is taken from `func` if provided; otherwise `(...)` is used.
"""
doc = dispatcher.__doc__ or ''
if inspect.cleandoc(doc).startswith('%s(' % dispatcher.__name__):
return
sig = '(...)'
if func and func.__code__.co_argcount:
argspec = inspect.getfullargspec(func) # pylint: disable=deprecated-method
if argspec.args and argspec.args[0] in {'self', 'cls'}:
argspec.args.pop(0)
if any(argspec):
sig = inspect.formatargspec(*argspec) # pylint: disable=deprecated-method
sig = re.sub(r' at 0x[0-9a-f]{8,16}(?=>)', '', sig)
sep = '\n' if doc.startswith('\n') else '\n\n'
dispatcher.__doc__ = dispatcher.__name__ + sig + sep + doc
def get_full_name(obj):
return obj.__module__ + '.' + obj.__qualname__
def _repr(type_):
if type_ is AnyType:
return '<any type>'
return repr(type_)
def dedent(text):
indent = re.match(r'\s*', text).group()
if indent:
text = re.sub('^' + indent, '', text, flags=re.M)
return text
|
code/create_ppt.py | solicia-xu/pbpython | 1,846 | 11079901 | """
See http://pbpython.com/creating-powerpoint.html for details on this script
Requires https://python-pptx.readthedocs.org/en/latest/index.html
Example program showing how to read in Excel, process with pandas and
output to a PowerPoint file.
"""
from __future__ import print_function
from pptx import Presentation
from pptx.util import Inches
import argparse
import pandas as pd
import numpy as np
from datetime import date
import matplotlib.pyplot as plt
import seaborn as sns
def df_to_table(slide, df, left, top, width, height, colnames=None):
"""Converts a Pandas DataFrame to a PowerPoint table on the given
Slide of a PowerPoint presentation.
The table is a standard Powerpoint table, and can easily be modified with the Powerpoint tools,
for example: resizing columns, changing formatting etc.
Arguments:
- slide: slide object from the python-pptx library containing the slide on which you want the table to appear
- df: Pandas DataFrame with the data
Optional arguments:
- colnames
https://github.com/robintw/PandasToPowerpoint/blob/master/PandasToPowerpoint.py
"""
rows, cols = df.shape
res = slide.shapes.add_table(rows + 1, cols, left, top, width, height)
if colnames is None:
colnames = list(df.columns)
# Insert the column names
for col_index, col_name in enumerate(colnames):
# Column names can be tuples
if not isinstance(col_name, str):
col_name = " ".join(col_name)
res.table.cell(0, col_index).text = col_name
m = df.as_matrix()
for row in range(rows):
for col in range(cols):
val = m[row, col]
text = str(val)
res.table.cell(row + 1, col).text = text
def parse_args():
""" Setup the input and output arguments for the script
Return the parsed input and output files
"""
parser = argparse.ArgumentParser(description='Create ppt report')
parser.add_argument('infile',
type=argparse.FileType('r'),
help='Powerpoint file used as the template')
parser.add_argument('report',
type=argparse.FileType('r'),
help='Excel file containing the raw report data')
parser.add_argument('outfile',
type=argparse.FileType('w'),
help='Output powerpoint report file')
return parser.parse_args()
def create_pivot(df, index_list=["Manager", "Rep", "Product"],
value_list=["Price", "Quantity"]):
"""
Take a DataFrame and create a pivot table
Return it as a DataFrame pivot table
"""
table = pd.pivot_table(df, index=index_list,
values=value_list,
aggfunc=[np.sum, np.mean], fill_value=0)
return table
def create_chart(df, filename):
""" Create a simple bar chart saved to the filename based on the dataframe
passed to the function
"""
df['total'] = df['Quantity'] * df['Price']
final_plot = df.groupby('Name')['total'].sum().order().plot(kind='barh')
fig = final_plot.get_figure()
fig.set_size_inches(6, 4.5)
fig.savefig(filename, bbox_inches='tight', dpi=600)
def create_ppt(input, output, report_data, chart):
""" Take the input powerpoint file and use it as the template for the output
file.
"""
prs = Presentation(input)
# Use the output from analyze_ppt to understand which layouts and placeholders
# to use
# Create a title slide first
title_slide_layout = prs.slide_layouts[0]
slide = prs.slides.add_slide(title_slide_layout)
title = slide.shapes.title
subtitle = slide.placeholders[1]
title.text = "Quarterly Report"
subtitle.text = "Generated on {:%m-%d-%Y}".format(date.today())
# Create the summary graph
graph_slide_layout = prs.slide_layouts[8]
slide = prs.slides.add_slide(graph_slide_layout)
title = slide.shapes.title
title.text = "Sales by account"
placeholder = slide.placeholders[1]
pic = placeholder.insert_picture(chart)
subtitle = slide.placeholders[2]
subtitle.text = "Results consistent with last quarter"
# Create a slide for each manager
for manager in report_data.index.get_level_values(0).unique():
#print(report_data.xs(manager, level=0).reset_index())
slide = prs.slides.add_slide(prs.slide_layouts[2])
title = slide.shapes.title
title.text = "Report for {}".format(manager)
top = Inches(1.5)
left = Inches(0.25)
width = Inches(9.25)
height = Inches(5.0)
# Flatten the pivot table by resetting the index
# Create a table on the slide
df_to_table(slide, report_data.xs(manager, level=0).reset_index(),
left, top, width, height)
prs.save(output)
if __name__ == "__main__":
args = parse_args()
df = pd.read_excel(args.report.name)
report_data = create_pivot(df)
create_chart(df, "report-image.png")
create_ppt(args.infile.name, args.outfile.name, report_data, "report-image.png")
|
contrib/AutoNUE/tools/IDD_labeling.py | JamesLim-sy/PaddleSeg | 4,708 | 11079957 | import os
import numpy as np
import cv2
from PIL import Image
from paddleseg import utils
import xml.dom.minidom
def mkdir(path):
sub_dir = os.path.dirname(path)
if not os.path.exists(sub_dir):
os.makedirs(sub_dir)
def get_image_list(image_path):
"""Get image list"""
valid_suffix = [
'.JPEG', '.jpeg', '.JPG', '.jpg', '.BMP', '.bmp', '.PNG', '.png'
]
image_list = []
image_dir = None
if os.path.isfile(image_path):
if os.path.splitext(image_path)[-1] in valid_suffix:
image_list.append(image_path)
elif os.path.isdir(image_path):
image_dir = image_path
for root, dirs, files in os.walk(image_path):
for f in files:
if '.ipynb_checkpoints' in root:
continue
if os.path.splitext(f)[-1] in valid_suffix:
image_list.append(os.path.join(root.split('/')[-1], f))
else:
raise FileNotFoundError(
'`--image_path` is not found. it should be an image file or a directory including images'
)
if len(image_list) == 0:
raise RuntimeError('There are not image file in `--image_path`')
return image_list, image_dir
def refine_pred():
image_list, image_dir = get_image_list(
'detection_out/pseudo_color_prediction')
for ii in image_list:
name_pred = 'detection_out/pseudo_color_prediction/' + ii
name_label = 'data/IDD_Detection/Annotations/all/' + ii[:-3] + 'xml'
pred = np.array(Image.open(name_pred)).astype(np.float32)
if not os.path.exists(name_label):
pred_mask = utils.visualize.get_pseudo_color_map(pred)
pred_saved_path = 'detect_out/pred_refine/' + ii
mkdir(pred_saved_path)
pred_mask.save(pred_saved_path)
continue
dom = xml.dom.minidom.parse(name_label)
root = dom.documentElement
objects = root.getElementsByTagName("object")
for item in objects:
name = item.getElementsByTagName("name")[0]
if name.firstChild.data == 'traffic sign' or name.firstChild.data == 'traffic light':
print(ii)
xmin = int(
item.getElementsByTagName('bndbox')[0].getElementsByTagName(
'xmin')[0].firstChild.data)
ymin = int(
item.getElementsByTagName('bndbox')[0].getElementsByTagName(
'ymin')[0].firstChild.data)
xmax = int(
item.getElementsByTagName('bndbox')[0].getElementsByTagName(
'xmax')[0].firstChild.data)
ymax = int(
item.getElementsByTagName('bndbox')[0].getElementsByTagName(
'ymax')[0].firstChild.data)
if name.firstChild.data == 'traffic sign':
pred[ymin:ymax, xmin:xmax] = 18
elif name.firstChild.data == 'traffic light':
pred[ymin:ymax, xmin:xmax] = 19
pred_mask = utils.visualize.get_pseudo_color_map(pred)
pred_saved_path = 'detect_out/pred_refine/' + ii
mkdir(pred_saved_path)
pred_mask.save(pred_saved_path)
def test():
path = '/Users/liliulei/Downloads/IDD_Detection/JPEGImages/frontNear/'
image_list, image_dir = get_image_list(path)
for ii in image_list:
name_xml = '/Users/liliulei/Downloads/IDD_Detection/Annotations/frontNear/' + ii[:
-3] + 'xml'
image = cv2.imread(path + ii)
# print(image.shape)
(h, w) = image.shape[0:2]
pred = np.zeros_like(image)
dom = xml.dom.minidom.parse(name_xml)
root = dom.documentElement
objects = root.getElementsByTagName("object")
for item in objects:
name = item.getElementsByTagName("name")[0]
print(name.firstChild.data)
if name.firstChild.data == 'traffic sign' or name.firstChild.data == 'traffic light':
xmin = int(
item.getElementsByTagName('bndbox')[0].getElementsByTagName(
'xmin')[0].firstChild.data)
ymin = int(
item.getElementsByTagName('bndbox')[0].getElementsByTagName(
'ymin')[0].firstChild.data)
xmax = int(
item.getElementsByTagName('bndbox')[0].getElementsByTagName(
'xmax')[0].firstChild.data)
ymax = int(
item.getElementsByTagName('bndbox')[0].getElementsByTagName(
'ymax')[0].firstChild.data)
if name.firstChild.data == 'traffic sign':
pred[ymin:ymax, xmin:xmax, 0] = 255
elif name.firstChild.data == 'traffic light':
pred[ymin:ymax, xmin:xmax, 1] = 255
new_im = image * 0.5 + pred * 0.5
cv2.imwrite(ii.split('/')[-1][:-3] + 'png', new_im)
refine_pred()
|
Python/Tests/TestData/SendToInteractive/Delayed.py | techkey/PTVS | 404 | 11079961 | <gh_stars>100-1000
import time
if True:
time.sleep(5)
pass
if True:
x = 1
y = 2
print('hi')
|
vumi/transports/cellulant/tests/test_cellulant.py | seidu626/vumi | 199 | 11079969 | <reponame>seidu626/vumi<filename>vumi/transports/cellulant/tests/test_cellulant.py
from urllib import urlencode
from twisted.internet.defer import inlineCallbacks
from vumi.tests.helpers import VumiTestCase
from vumi.transports.cellulant import CellulantTransport, CellulantError
from vumi.message import TransportUserMessage
from vumi.utils import http_request
from vumi.transports.tests.helpers import TransportHelper
class TestCellulantTransport(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.config = {
'web_port': 0,
'web_path': '/api/v1/ussd/cellulant/',
'ussd_session_timeout': 60,
}
self.tx_helper = self.add_helper(TransportHelper(CellulantTransport))
self.transport = yield self.tx_helper.get_transport(self.config)
self.transport_url = self.transport.get_transport_url(
self.config['web_path'])
yield self.transport.session_manager.redis._purge_all() # just in case
def mk_request(self, **params):
defaults = {
'MSISDN': '27761234567',
'INPUT': '',
'opCode': 'BEG',
'ABORT': '0',
'sessionID': '1',
}
defaults.update(params)
return http_request('%s?%s' % (self.transport_url,
urlencode(defaults)), data='', method='GET')
@inlineCallbacks
def test_redis_caching(self):
# delete the key that shouldn't exist (in case of testing real redis)
yield self.transport.session_manager.redis.delete("msisdn:123")
tx = self.transport
val = yield tx.get_ussd_for_msisdn_session("msisdn", "123")
self.assertEqual(None, val)
yield tx.set_ussd_for_msisdn_session("msisdn", "123", "*bar#")
val = yield tx.get_ussd_for_msisdn_session("msisdn", "123")
self.assertEqual("*bar#", val)
@inlineCallbacks
def test_inbound_begin(self):
deferred = self.mk_request(INPUT="*120*1#")
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
self.assertEqual(msg['content'], None)
self.assertEqual(msg['to_addr'], '*120*1#')
self.assertEqual(msg['from_addr'], '27761234567'),
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_NEW)
self.assertEqual(msg['transport_metadata'], {
'session_id': '1',
})
yield self.tx_helper.make_dispatch_reply(msg, "ussd message")
response = yield deferred
self.assertEqual(response, '1|ussd message|null|null|null|null')
@inlineCallbacks
def test_inbound_resume_and_reply_with_end(self):
# first pre-populate the redis datastore to simulate prior BEG message
yield self.transport.set_ussd_for_msisdn_session(
'27761234567',
'1',
'*120*VERY_FAKE_CODE#',
)
deferred = self.mk_request(INPUT='hi', opCode='')
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
self.assertEqual(msg['content'], 'hi')
self.assertEqual(msg['to_addr'], '*120*VERY_FAKE_CODE#')
self.assertEqual(msg['from_addr'], '27761234567')
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_RESUME)
self.assertEqual(msg['transport_metadata'], {
'session_id': '1',
})
yield self.tx_helper.make_dispatch_reply(
msg, "hello world", continue_session=False)
response = yield deferred
self.assertEqual(response, '1|hello world|null|null|end|null')
@inlineCallbacks
def test_inbound_resume_with_failed_to_addr_lookup(self):
deferred = self.mk_request(MSISDN='123456', INPUT='hi', opCode='')
response = yield deferred
self.assertEqual(response, '')
[f] = self.flushLoggedErrors(CellulantError)
self.assertTrue(str(f.value).startswith(
"Failed redis USSD to_addr lookup for {"))
@inlineCallbacks
def test_inbound_abort_opcode(self):
# first pre-populate the redis datastore to simulate prior BEG message
yield self.transport.set_ussd_for_msisdn_session(
'27761234567',
'1',
'*120*VERY_FAKE_CODE#',
)
# this one should return immediately with a blank
# as there isn't going to be a sensible response
resp = yield self.mk_request(opCode='ABO')
self.assertEqual(resp, '')
[msg] = yield self.tx_helper.get_dispatched_inbound()
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_CLOSE)
@inlineCallbacks
def test_inbound_abort_field(self):
# should also return immediately
resp = yield self.mk_request(ABORT=1)
self.assertEqual(resp, '')
[msg] = yield self.tx_helper.get_dispatched_inbound()
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_CLOSE)
@inlineCallbacks
def test_nack(self):
msg = yield self.tx_helper.make_dispatch_outbound("foo")
[nack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(nack['user_message_id'], msg['message_id'])
self.assertEqual(nack['sent_message_id'], msg['message_id'])
self.assertEqual(nack['nack_reason'], 'Missing fields: in_reply_to')
|
compiler/verify/none.py | im-world/OpenRAM | 335 | 11079972 | <reponame>im-world/OpenRAM<filename>compiler/verify/none.py
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
"""
This is a DRC/LVS/PEX interface file the case with no DRC/LVS tools.
"""
import debug
# Only print the warning once.
drc_warned = False
lvs_warned = False
pex_warned = False
def write_drc_script(cell_name, gds_name, extract, final_verification=False, output_path=None, sp_name=None):
debug.error("Cannot write DRC script for unknown tool", -1)
def run_drc(cell_name, gds_name, sp_name, extract=False, final_verification=False, output_path=None):
global drc_warned
if not drc_warned:
debug.error("DRC unable to run.", -1)
drc_warned=True
# Since we warned, return a failing test.
return 1
def write_lvs_script(cell_name, gds_name, sp_name, final_verification=False, output_path=None):
pass
def run_lvs(cell_name, gds_name, sp_name, final_verification=False, output_path=None):
global lvs_warned
if not lvs_warned:
debug.error("LVS unable to run.", -1)
lvs_warned=True
# Since we warned, return a failing test.
return 1
def run_pex(name, gds_name, sp_name, output=None, final_verification=False, output_path=None):
global pex_warned
if not pex_warned:
debug.error("PEX unable to run.", -1)
pex_warned=True
# Since we warned, return a failing test.
return 1
def print_drc_stats():
pass
def print_lvs_stats():
pass
def print_pex_stats():
pass
|
jointD/test.py | thunlp/JointNRE | 186 | 11079974 | <filename>jointD/test.py
import tensorflow as tf
import numpy as np
import time
import datetime
import os
import network
import json
import sys
from sklearn.metrics import average_precision_score
import ctypes
export_path = "../data/"
word_vec = np.load(export_path + 'vec.npy')
f = open(export_path + "config", 'r')
config = json.loads(f.read())
f.close()
ll = ctypes.cdll.LoadLibrary
lib = ll("./init.so")
lib.setInPath("../data/")
lib.init()
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('nbatch_kg',100,'entity numbers used each training time')
tf.app.flags.DEFINE_float('margin',1.0,'entity numbers used each training time')
tf.app.flags.DEFINE_float('learning_rate_kg',0.001,'learning rate for kg')
tf.app.flags.DEFINE_integer('ent_total',lib.getEntityTotal(),'total of entities')
tf.app.flags.DEFINE_integer('rel_total',lib.getRelationTotal(),'total of relations')
tf.app.flags.DEFINE_integer('tri_total',lib.getTripleTotal(),'total of triples')
tf.app.flags.DEFINE_integer('katt_flag', 1, '1 for katt, 0 for att')
tf.app.flags.DEFINE_string('model', 'cnn', 'neural models to encode sentences')
tf.app.flags.DEFINE_integer('max_length',config['fixlen'],'maximum of number of words in one sentence')
tf.app.flags.DEFINE_integer('pos_num', config['maxlen'] * 2 + 1,'number of position embedding vectors')
tf.app.flags.DEFINE_integer('num_classes', config['textual_rel_total'],'maximum of relations')
tf.app.flags.DEFINE_integer('hidden_size',230,'hidden feature size')
tf.app.flags.DEFINE_integer('pos_size',5,'position embedding size')
tf.app.flags.DEFINE_integer('max_epoch',30,'maximum of training epochs')
tf.app.flags.DEFINE_integer('batch_size',131*2,'entity numbers used each training time')
tf.app.flags.DEFINE_float('learning_rate',0.1,'entity numbers used each training time')
tf.app.flags.DEFINE_float('weight_decay',0.00001,'weight_decay')
tf.app.flags.DEFINE_float('keep_prob',1.0,'dropout rate')
tf.app.flags.DEFINE_integer('test_batch_size',131*2,'entity numbers used each test time')
tf.app.flags.DEFINE_string('checkpoint_path','./model/','path to store model')
def make_shape(array,last_dim):
output = []
for i in array:
for j in i:
output.append(j)
output = np.array(output)
if np.shape(output)[-1]==last_dim:
return output
else:
print 'Make Shape Error!'
def main(_):
print 'reading word embedding'
word_vec = np.load(export_path + 'vec.npy')
print 'reading test data'
test_instance_triple = np.load(export_path + 'test_instance_triple.npy')
test_instance_scope = np.load(export_path + 'test_instance_scope.npy')
test_len = np.load(export_path + 'test_len.npy')
test_label = np.load(export_path + 'test_label.npy')
test_word = np.load(export_path + 'test_word.npy')
test_pos1 = np.load(export_path + 'test_pos1.npy')
test_pos2 = np.load(export_path + 'test_pos2.npy')
test_mask = np.load(export_path + 'test_mask.npy')
test_head = np.load(export_path + 'test_head.npy')
test_tail = np.load(export_path + 'test_tail.npy')
print 'reading finished'
print 'mentions : %d' % (len(test_instance_triple))
print 'sentences : %d' % (len(test_len))
print 'relations : %d' % (FLAGS.num_classes)
print 'word size : %d' % (len(word_vec[0]))
print 'position size : %d' % (FLAGS.pos_size)
print 'hidden size : %d' % (FLAGS.hidden_size)
print 'reading finished'
print 'building network...'
sess = tf.Session()
if FLAGS.model.lower() == "cnn":
model = network.CNN(is_training = False, word_embeddings = word_vec)
elif FLAGS.model.lower() == "pcnn":
model = network.PCNN(is_training = False, word_embeddings = word_vec)
elif FLAGS.model.lower() == "lstm":
model = network.RNN(is_training = False, word_embeddings = word_vec, cell_name = "LSTM", simple_position = True)
elif FLAGS.model.lower() == "gru":
model = network.RNN(is_training = False, word_embeddings = word_vec, cell_name = "GRU", simple_position = True)
elif FLAGS.model.lower() == "bi-lstm" or FLAGS.model.lower() == "bilstm":
model = network.BiRNN(is_training = False, word_embeddings = word_vec, cell_name = "LSTM", simple_position = True)
elif FLAGS.model.lower() == "bi-gru" or FLAGS.model.lower() == "bigru":
model = network.BiRNN(is_training = False, word_embeddings = word_vec, cell_name = "GRU", simple_position = True)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
def test_step(head, tail, word, pos1, pos2, mask, leng, label_index, label, scope):
feed_dict = {
model.head_index: head,
model.tail_index: tail,
model.word: word,
model.pos1: pos1,
model.pos2: pos2,
model.mask: mask,
model.len : leng,
model.label_index: label_index,
model.label: label,
model.scope: scope,
model.keep_prob: FLAGS.keep_prob
}
output = sess.run(model.test_output, feed_dict)
return output
f = open('results.txt','w')
f.write('iteration\taverage precision\n')
for iters in range(1,30):
print iters
saver.restore(sess, FLAGS.checkpoint_path + FLAGS.model+str(FLAGS.katt_flag)+"-"+str(3664*iters))
stack_output = []
stack_label = []
iteration = len(test_instance_scope)/FLAGS.test_batch_size
for i in range(iteration):
temp_str= 'running '+str(i)+'/'+str(iteration)+'...'
sys.stdout.write(temp_str+'\r')
sys.stdout.flush()
input_scope = test_instance_scope[i * FLAGS.test_batch_size:(i+1)*FLAGS.test_batch_size]
index = []
scope = [0]
label = []
for num in input_scope:
index = index + range(num[0], num[1] + 1)
label.append(test_label[num[0]])
scope.append(scope[len(scope)-1] + num[1] - num[0] + 1)
label_ = np.zeros((FLAGS.test_batch_size, FLAGS.num_classes))
label_[np.arange(FLAGS.test_batch_size), label] = 1
output = test_step(test_head[index], test_tail[index], test_word[index,:], test_pos1[index,:], test_pos2[index,:], test_mask[index,:], test_len[index], test_label[index], label_, np.array(scope))
stack_output.append(output)
stack_label.append(label_)
print 'evaluating...'
stack_output = np.concatenate(stack_output, axis=0)
stack_label = np.concatenate(stack_label, axis = 0)
exclude_na_flatten_output = stack_output[:,1:]
exclude_na_flatten_label = stack_label[:,1:]
print exclude_na_flatten_output.shape
print exclude_na_flatten_label.shape
average_precision = average_precision_score(exclude_na_flatten_label,exclude_na_flatten_output, average = "micro")
np.save('./'+FLAGS.model+'+sen_att_all_prob_'+str(iters)+'.npy', exclude_na_flatten_output)
np.save('./'+FLAGS.model+'+sen_att_all_label_'+str(iters)+'.npy',exclude_na_flatten_label)
print 'pr: '+str(average_precision)
f.write(str(average_precision)+'\n')
f.close()
if __name__ == "__main__":
tf.app.run()
|
CalibTracker/SiStripCommon/test/testProduceCalibrationTree_cfg.py | ckamtsikis/cmssw | 852 | 11079984 | from __future__ import print_function
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.patInputFiles_cff import filesRelValTTbarPileUpGENSIMRECO
import FWCore.ParameterSet.VarParsing as VarParsing
from CalibTracker.SiStripCommon.shallowTree_test_template import *
###################################################################
def alterTriggersForUnitTest(process):
###################################################################
'''
These modification as necessary only in order to run on a
RelVal MC in which the physics menu is not simulated!
'''
if(hasattr(process,'AAGFilter')):
process.AAGFilter.triggerConditions = ["HLT_Random_v*"]
if(hasattr(process,'IsolatedMuonFilter')):
process.IsolatedMuonFilter.triggerConditions = ["MC_IsoMu_v*"]
return process
###################################################################
# Setup 'standard' options
###################################################################
options = VarParsing.VarParsing()
options.register('conditionGT',
"auto:phase1_2017_realistic",
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"condition global tag for the job (\"auto:phase1_2017_realistic\" is default)")
options.register('conditionOverwrite',
"",
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"configuration to overwrite the condition into the GT (\"\" is default)")
options.register('inputCollection',
"generalTracks",
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"collections to be used for input (\"generalTracks\" is default)")
options.register('inputFiles',
filesRelValTTbarPileUpGENSIMRECO,
VarParsing.VarParsing.multiplicity.list,
VarParsing.VarParsing.varType.string,
"file to process")
options.register('outputFile',
"calibTreeTest.root",
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"name for the output root file (\"calibTreeTest.root\" is default)")
options.register('maxEvents',
-1,
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"number of events to process (\"-1\" for all)")
options.register('unitTest',
False,
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.bool,
"is for unit test?")
options.parseArguments()
print("conditionGT : ", options.conditionGT)
print("conditionOverwrite: ", options.conditionOverwrite)
print("inputCollection : ", options.inputCollection)
print("maxEvents : ", options.maxEvents)
print("outputFile : ", options.outputFile)
print("inputFiles : ", options.inputFiles)
process = cms.Process('CALIB')
#from CalibTracker.SiStripChannelGain.ntuple_cff import *
process.load('CalibTracker.Configuration.setupCalibrationTree_cff')
process.load('CalibTracker.SiStripCommon.ShallowEventDataProducer_cfi') #event Info
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.Geometry.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('CalibTracker.SiStripCommon.ShallowEventDataProducer_cfi') #event Info
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, options.conditionGT, options.conditionOverwrite)
process.load('FWCore.MessageService.MessageLogger_cfi')
if(options.unitTest):
process.MessageLogger.cerr.FwkReport.reportEvery = 1
else:
process.MessageLogger.cerr.FwkReport.reportEvery = 100
process.load('Configuration.StandardSequences.Services_cff')
process.add_( cms.Service( "TFileService",
fileName = cms.string( options.outputFile ),
closeFileFast = cms.untracked.bool(True) ) )
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(options.maxEvents) )
#import runs
process.source = cms.Source (
"PoolSource",
fileNames = cms.untracked.vstring(options.inputFiles[0])
)
#definition of input collection
process.CalibrationTracks.src = cms.InputTag( options.inputCollection )
process.shallowTracks.Tracks = cms.InputTag( options.inputCollection )
#process.shallowGainCalibrationAllBunch = 'ALCARECOSiStripCalMinBias' #cms.InputTag( options.inputCollection )
#process.shallowGainCalibrationAllBunch0T = 'ALCARECOSiStripCalMinBias' #cms.InputTag( options.inputCollection )
# BSCNoBeamHalo selection (Not to use for Cosmic Runs) --- OUTDATED!!!
## process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')
## process.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')
## process.L1T1=process.hltLevel1GTSeed.clone()
## process.L1T1.L1TechTriggerSeeding = cms.bool(True)
## process.L1T1.L1SeedsLogicalExpression = cms.string('(40 OR 41) AND NOT (36 OR 37 OR 38 OR 39)')
compressionSettings = 201
process.EventInfo = cms.EDAnalyzer("ShallowTree",
CompressionSettings = process.gainCalibrationTreeStdBunch.CompressionSettings,
outputCommands = cms.untracked.vstring('drop *',
'keep *_shallowEventRun_*_*',
)
)
process.gainCalibrationTreeStdBunch.CompressionSettings = cms.untracked.int32(compressionSettings)
process.gainCalibrationTreeStdBunch0T.CompressionSettings = cms.untracked.int32(compressionSettings)
process.gainCalibrationTreeIsoMuon.CompressionSettings = cms.untracked.int32(compressionSettings)
process.gainCalibrationTreeIsoMuon0T.CompressionSettings = cms.untracked.int32(compressionSettings)
process.gainCalibrationTreeAagBunch.CompressionSettings = cms.untracked.int32(compressionSettings)
process.gainCalibrationTreeAagBunch0T.CompressionSettings = cms.untracked.int32(compressionSettings)
#process.TkCalPath = cms.Path(process.L1T1*process.TkCalFullSequence)
### if it is a unit test run, do not look for not existing triggers
### e.g. no AAG in MC
if(options.unitTest):
alterTriggersForUnitTest(process)
process.TkCalPath_StdBunch = cms.Path(process.TkCalSeq_StdBunch*process.shallowEventRun*process.EventInfo)
process.TkCalPath_StdBunch0T = cms.Path(process.TkCalSeq_StdBunch0T*process.shallowEventRun*process.EventInfo)
process.TkCalPath_IsoMuon = cms.Path(process.TkCalSeq_IsoMuon*process.shallowEventRun*process.EventInfo)
process.TkCalPath_IsoMuon0T = cms.Path(process.TkCalSeq_IsoMuon0T*process.shallowEventRun*process.EventInfo)
process.TkCalPath_AagBunch = cms.Path(process.TkCalSeq_AagBunch*process.shallowEventRun*process.EventInfo)
process.TkCalPath_AagBunch0T = cms.Path(process.TkCalSeq_AagBunch0T*process.shallowEventRun*process.EventInfo)
process.schedule = cms.Schedule( process.TkCalPath_StdBunch,
process.TkCalPath_StdBunch0T,
process.TkCalPath_IsoMuon,
process.TkCalPath_IsoMuon0T,
process.TkCalPath_AagBunch,
process.TkCalPath_AagBunch0T,
)
process.options = cms.untracked.PSet(
#wantSummary = cms.untracked.bool(True),
Rethrow = cms.untracked.vstring('OtherCMS',
'StdException',
'Unknown',
'BadAlloc',
'BadExceptionType',
'ProductNotFound',
'DictionaryNotFound',
'InsertFailure',
'Configuration',
'LogicError',
'UnimplementedFeature',
'InvalidReference',
'NullPointerError',
'NoProductSpecified',
'EventTimeout',
'EventCorruption',
'ScheduleExecutionFailure',
'EventProcessorFailure',
'FileInPathError',
'FileOpenError',
'FileReadError',
'FatalRootError',
'MismatchedInputFiles',
'ProductDoesNotSupportViews',
'ProductDoesNotSupportPtr',
'NotFound')
)
process.Timing = cms.Service("Timing",
summaryOnly = cms.untracked.bool(True),
useJobReport = cms.untracked.bool(True)
)
'''
if(options.unitTest):
process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck",
ignoreTotal = cms.untracked.int32(1),
moduleMemorySummary = cms.untracked.bool(True)
)
'''
#Setup FWK for multithreaded
process.options.numberOfThreads=cms.untracked.uint32(4)
process.options.numberOfStreams=cms.untracked.uint32(0)
#process.options.numberOfConcurrentLuminosityBlocks=cms.untracked.uint32(2) ## not yet
|
rmgpy/test_data/testing_database/kinetics/families/Singlet_Val6_to_triplet/training/reactions.py | tza0035/RMG-Py | 250 | 11080015 | #!/usr/bin/env python
# encoding: utf-8
name = "Singlet_Val6_to_triplet/training"
shortDesc = "Kinetics used to train group additivity values"
longDesc = """
"""
entry(
index = 1,
label = "O2(S) => O2(T)",
degeneracy = 1,
reversible = False,
kinetics = Arrhenius(A=(4.5E+10, 's^-1'), n=0, Ea=(397, 'cal/mol')),
rank = 1,
shortDesc = """""",
longDesc =
"""
taken from:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
Evaluated Kinetic and Photochemical Data for Atmospheric Chemistry: Supplement IV.
IUPAC Subcommittee on Gas Kinetic Data Evaluation for Atmospheric Chemistry
Journal of Physical and Chemical Reference Data 21, 1125 (1992)
doi: 10.1063/1.555918
Adjusted to a first order reaction at 1 atm by alongd:
n/V = P/RT = 1 bar / (83 cm^3 bar K^-1 mol^-1 * 300 K) = 4E-05 mol cm^-3
1.81E+06 mol cm^-3 S^-1 / 4E-05 mol cm^-3 = 4.5E+10 s^-1
Original reaction is O2(1D) + M => O2 + M
""",
)
|
tests/test_io.py | natphi/eemeter | 161 | 11080022 | <reponame>natphi/eemeter
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import gzip
from pkg_resources import resource_filename, resource_stream
from tempfile import TemporaryFile
import pandas as pd
import pytest
from eemeter import (
meter_data_from_csv,
meter_data_from_json,
meter_data_to_csv,
temperature_data_from_csv,
temperature_data_from_json,
temperature_data_to_csv,
)
def test_meter_data_from_csv(sample_metadata):
meter_item = sample_metadata["il-electricity-cdd-hdd-daily"]
meter_data_filename = meter_item["meter_data_filename"]
fname = resource_filename("eemeter.samples", meter_data_filename)
with gzip.open(fname) as f:
meter_data = meter_data_from_csv(f)
assert meter_data.shape == (810, 1)
assert meter_data.index.tz.zone == "UTC"
assert meter_data.index.freq is None
def test_meter_data_from_csv_gzipped(sample_metadata):
meter_item = sample_metadata["il-electricity-cdd-hdd-daily"]
meter_data_filename = meter_item["meter_data_filename"]
with resource_stream("eemeter.samples", meter_data_filename) as f:
meter_data = meter_data_from_csv(f, gzipped=True)
assert meter_data.shape == (810, 1)
assert meter_data.index.tz.zone == "UTC"
assert meter_data.index.freq is None
def test_meter_data_from_csv_with_tz(sample_metadata):
meter_item = sample_metadata["il-electricity-cdd-hdd-daily"]
meter_data_filename = meter_item["meter_data_filename"]
with resource_stream("eemeter.samples", meter_data_filename) as f:
meter_data = meter_data_from_csv(f, gzipped=True, tz="US/Eastern")
assert meter_data.shape == (810, 1)
assert meter_data.index.tz.zone == "US/Eastern"
assert meter_data.index.freq is None
def test_meter_data_from_csv_hourly_freq(sample_metadata):
meter_item = sample_metadata["il-electricity-cdd-hdd-daily"]
meter_data_filename = meter_item["meter_data_filename"]
with resource_stream("eemeter.samples", meter_data_filename) as f:
meter_data = meter_data_from_csv(f, gzipped=True, freq="hourly")
assert meter_data.shape == (19417, 1)
assert meter_data.index.tz.zone == "UTC"
assert meter_data.index.freq == "H"
def test_meter_data_from_csv_daily_freq(sample_metadata):
meter_item = sample_metadata["il-electricity-cdd-hdd-daily"]
meter_data_filename = meter_item["meter_data_filename"]
with resource_stream("eemeter.samples", meter_data_filename) as f:
meter_data = meter_data_from_csv(f, gzipped=True, freq="daily")
assert meter_data.shape == (810, 1)
assert meter_data.index.tz.zone == "UTC"
assert meter_data.index.freq == "D"
def test_meter_data_from_csv_custom_columns(sample_metadata):
with TemporaryFile() as f:
f.write(b"start_custom,kWh\n" b"2017-01-01T00:00:00,10\n")
f.seek(0)
meter_data = meter_data_from_csv(f, start_col="start_custom", value_col="kWh")
assert meter_data.shape == (1, 1)
assert meter_data.index.tz.zone == "UTC"
assert meter_data.index.freq is None
def test_meter_data_from_json_none(sample_metadata):
data = None
meter_data = meter_data_from_json(data)
assert meter_data.shape == (0, 1)
assert meter_data.index.tz.zone == "UTC"
assert meter_data.index.freq is None
def test_meter_data_from_json_orient_list(sample_metadata):
data = [["2017-01-01T00:00:00Z", 11], ["2017-01-02T00:00:00Z", 10]]
meter_data = meter_data_from_json(data, orient="list")
assert meter_data.shape == (2, 1)
assert meter_data.index.tz.zone == "UTC"
assert meter_data.index.freq is None
def test_meter_data_from_json_orient_list_empty(sample_metadata):
data = []
meter_data = meter_data_from_json(data)
assert meter_data.shape == (0, 1)
assert meter_data.index.tz.zone == "UTC"
assert meter_data.index.freq is None
def test_meter_data_from_json_orient_records(sample_metadata):
data = [
{"start": "2017-01-01T00:00:00Z", "value": 11},
{"start": "2017-01-02T00:00:00Z", "value": ""},
{"start": "2017-01-03T00:00:00Z", "value": 10},
]
meter_data = meter_data_from_json(data, orient="records")
assert meter_data.shape == (3, 1)
assert meter_data.index.tz.zone == "UTC"
assert meter_data.index.freq is None
def test_meter_data_from_json_orient_records_empty(sample_metadata):
data = []
meter_data = meter_data_from_json(data, orient="records")
assert meter_data.shape == (0, 1)
assert meter_data.index.tz.zone == "UTC"
assert meter_data.index.freq is None
def test_meter_data_from_json_orient_records_with_estimated_true(sample_metadata):
data = [
{"start": "2017-01-01T00:00:00Z", "value": 11, "estimated": True},
{"start": "2017-01-02T00:00:00Z", "value": 10, "estimated": "true"},
{"start": "2017-01-03T00:00:00Z", "value": 10, "estimated": "True"},
{"start": "2017-01-04T00:00:00Z", "value": 10, "estimated": "1"},
{"start": "2017-01-05T00:00:00Z", "value": 10, "estimated": 1},
]
meter_data = meter_data_from_json(data, orient="records")
assert meter_data.shape == (5, 2)
assert meter_data.index.tz.zone == "UTC"
assert meter_data.index.freq is None
assert meter_data.estimated.sum() == 5
def test_meter_data_from_json_orient_records_with_estimated_false(sample_metadata):
data = [
{"start": "2017-01-01T00:00:00Z", "value": 10, "estimated": False},
{"start": "2017-01-02T00:00:00Z", "value": 10, "estimated": "false"},
{"start": "2017-01-03T00:00:00Z", "value": 10, "estimated": "False"},
{"start": "2017-01-04T00:00:00Z", "value": 10, "estimated": ""},
{"start": "2017-01-05T00:00:00Z", "value": 10, "estimated": None},
{"start": "2017-01-05T00:00:00Z", "value": 10},
]
meter_data = meter_data_from_json(data, orient="records")
assert meter_data.shape == (6, 2)
assert meter_data.index.tz.zone == "UTC"
assert meter_data.index.freq is None
assert meter_data.estimated.sum() == 0
def test_meter_data_from_json_bad_orient(sample_metadata):
data = [["2017-01-01T00:00:00Z", 11], ["2017-01-02T00:00:00Z", 10]]
with pytest.raises(ValueError):
meter_data_from_json(data, orient="NOT_ALLOWED")
def test_meter_data_to_csv(sample_metadata):
df = pd.DataFrame(
{"value": [5]}, index=pd.to_datetime(["2017-01-01T00:00:00Z"], utc=True)
)
with TemporaryFile("w+") as f:
meter_data_to_csv(df, f)
f.seek(0)
assert f.read() == ("start,value\n" "2017-01-01 00:00:00+00:00,5\n")
def test_temperature_data_from_csv(sample_metadata):
meter_item = sample_metadata["il-electricity-cdd-hdd-daily"]
temperature_filename = meter_item["temperature_filename"]
fname = resource_filename("eemeter.samples", temperature_filename)
with gzip.open(fname) as f:
temperature_data = temperature_data_from_csv(f)
assert temperature_data.shape == (19417,)
assert temperature_data.index.tz.zone == "UTC"
assert temperature_data.index.freq is None
def test_temperature_data_from_csv_gzipped(sample_metadata):
meter_item = sample_metadata["il-electricity-cdd-hdd-daily"]
temperature_filename = meter_item["temperature_filename"]
with resource_stream("eemeter.samples", temperature_filename) as f:
temperature_data = temperature_data_from_csv(f, gzipped=True)
assert temperature_data.shape == (19417,)
assert temperature_data.index.tz.zone == "UTC"
assert temperature_data.index.freq is None
def test_temperature_data_from_csv_with_tz(sample_metadata):
meter_item = sample_metadata["il-electricity-cdd-hdd-daily"]
temperature_filename = meter_item["temperature_filename"]
with resource_stream("eemeter.samples", temperature_filename) as f:
temperature_data = temperature_data_from_csv(f, gzipped=True, tz="US/Eastern")
assert temperature_data.shape == (19417,)
assert temperature_data.index.tz.zone == "US/Eastern"
assert temperature_data.index.freq is None
def test_temperature_data_from_csv_hourly_freq(sample_metadata):
meter_item = sample_metadata["il-electricity-cdd-hdd-daily"]
temperature_filename = meter_item["temperature_filename"]
with resource_stream("eemeter.samples", temperature_filename) as f:
temperature_data = temperature_data_from_csv(f, gzipped=True, freq="hourly")
assert temperature_data.shape == (19417,)
assert temperature_data.index.tz.zone == "UTC"
assert temperature_data.index.freq == "H"
def test_temperature_data_from_csv_custom_columns(sample_metadata):
with TemporaryFile() as f:
f.write(b"dt_custom,tempC\n" b"2017-01-01T00:00:00,10\n")
f.seek(0)
temperature_data = temperature_data_from_csv(
f, date_col="dt_custom", temp_col="tempC"
)
assert temperature_data.shape == (1,)
assert temperature_data.index.tz.zone == "UTC"
assert temperature_data.index.freq is None
def test_temperature_data_from_json_orient_list(sample_metadata):
data = [["2017-01-01T00:00:00Z", 11], ["2017-01-02T00:00:00Z", 10]]
temperature_data = temperature_data_from_json(data, orient="list")
assert temperature_data.shape == (2,)
assert temperature_data.index.tz.zone == "UTC"
assert temperature_data.index.freq is None
def test_temperature_data_from_json_bad_orient(sample_metadata):
data = [["2017-01-01T00:00:00Z", 11], ["2017-01-02T00:00:00Z", 10]]
with pytest.raises(ValueError):
temperature_data_from_json(data, orient="NOT_ALLOWED")
def test_temperature_data_to_csv(sample_metadata):
series = pd.Series(10, index=pd.to_datetime(["2017-01-01T00:00:00Z"], utc=True))
with TemporaryFile("w+") as f:
temperature_data_to_csv(series, f)
f.seek(0)
assert f.read() == ("dt,temperature\n" "2017-01-01 00:00:00+00:00,10\n")
|
aiogram/types/mixins.py | twistfire92/aiogram | 2,744 | 11080042 | import os
import pathlib
from io import IOBase
from typing import Union, Optional
from aiogram.utils.deprecated import warn_deprecated
class Downloadable:
"""
Mixin for files
"""
async def download(
self,
destination=None,
timeout=30,
chunk_size=65536,
seek=True,
make_dirs=True,
*,
destination_dir: Optional[Union[str, pathlib.Path]] = None,
destination_file: Optional[Union[str, pathlib.Path, IOBase]] = None
):
"""
Download file
At most one of these parameters can be used: :param destination_dir:, :param destination_file:
:param destination: deprecated, use :param destination_dir: or :param destination_file: instead
:param timeout: Integer
:param chunk_size: Integer
:param seek: Boolean - go to start of file when downloading is finished.
:param make_dirs: Make dirs if not exist
:param destination_dir: directory for saving files
:param destination_file: path to the file or instance of :class:`io.IOBase`. For e. g. :class:`io.BytesIO`
:return: destination
"""
if destination:
warn_deprecated(
"destination parameter is deprecated, please use destination_dir or destination_file."
)
if destination_dir and destination_file:
raise ValueError(
"Use only one of the parameters: destination_dir or destination_file."
)
file, destination = await self._prepare_destination(
destination,
destination_dir,
destination_file,
make_dirs
)
return await self.bot.download_file(
file_path=file.file_path,
destination=destination,
timeout=timeout,
chunk_size=chunk_size,
seek=seek,
)
async def _prepare_destination(self, dest, destination_dir, destination_file, make_dirs):
file = await self.get_file()
if not(any((dest, destination_dir, destination_file))):
destination = file.file_path
elif dest: # backward compatibility
if isinstance(dest, IOBase):
return file, dest
if isinstance(dest, (str, pathlib.Path)) and os.path.isdir(dest):
destination = os.path.join(dest, file.file_path)
else:
destination = dest
elif destination_dir:
if isinstance(destination_dir, (str, pathlib.Path)):
destination = os.path.join(destination_dir, file.file_path)
else:
raise TypeError("destination_dir must be str or pathlib.Path")
else:
if isinstance(destination_file, IOBase):
return file, destination_file
elif isinstance(destination_file, (str, pathlib.Path)):
destination = destination_file
else:
raise TypeError("destination_file must be str, pathlib.Path or io.IOBase type")
if make_dirs and os.path.dirname(destination):
os.makedirs(os.path.dirname(destination), exist_ok=True)
return file, destination
async def get_file(self):
"""
Get file information
:return: :obj:`aiogram.types.File`
"""
if hasattr(self, 'file_path'):
return self
else:
return await self.bot.get_file(self.file_id)
async def get_url(self):
"""
Get file url.
Attention!!
This method has security vulnerabilities for the reason that result
contains bot's *access token* in open form. Use at your own risk!
:return: url
"""
file = await self.get_file()
return self.bot.get_file_url(file.file_path)
def __hash__(self):
return hash(self.file_id)
|
astroquery/hitran/__init__.py | hdevillepoix/astroquery | 577 | 11080063 | <gh_stars>100-1000
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
HITRAN Catalog Query Tool
-------------------------
:Author: <NAME> (<EMAIL>)
"""
import os
from astropy import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astroquery.hitran`.
"""
query_url = _config.ConfigItem('http://hitran.org/lbl/api',
'HITRAN web interface URL.')
timeout = _config.ConfigItem(60,
'Time limit for connecting to HITRAN server.')
data_dir = os.path.join(os.path.dirname(__file__), 'data')
formatfile = _config.ConfigItem(os.path.join(data_dir, 'readme.txt'),
'Format file.')
conf = Conf()
from .core import Hitran, HitranClass
__all__ = ['Hitran', 'HitranClass', 'conf']
|
test_regression.py | morenoh149/deeplearning | 122 | 11080094 | <gh_stars>100-1000
from unittest import TestCase
from regression import *
class TestRegression(TestCase):
def test_train_linear_regression(self):
# Without bias
X = np.array([[-1.0], [1.0]])
y = np.array([-1.5, 1.5])
linear_regression = LinearRegression(X, y, False)
np.testing.assert_almost_equal(linear_regression.predict(X), y)
self.assertAlmostEqual(linear_regression.error(X, y), 0.0)
# With bias
X = np.array([[-1.0], [1.0]])
y = np.array([0.5, 2.5])
linear_regression = LinearRegression(X, y, True)
np.testing.assert_almost_equal(linear_regression.predict(X), y)
self.assertAlmostEqual(linear_regression.error(X, y), 0.0)
# With polynomial
X = np.array([[1.0], [2.0], [3.0]])
y = np.array([1.0, 4.0, 9.0])
linear_regression = LinearRegression(X, y, True, 2)
np.testing.assert_almost_equal(linear_regression.predict(X), y)
self.assertAlmostEqual(linear_regression.error(X, y), 0.0)
# Overfitting
X_train = np.array([[1.0], [2.0], [3.0]])
y_train = np.array([1.0, 4.0, 9.0])
X_test = np.array([[0.0], [1.5], [4.0]])
y_test = np.array([0.0, 2.25, 16.0])
linear_regression = LinearRegression(X_train, y_train, False, 9)
self.assertAlmostEqual(linear_regression.error(X_train, y_train), 0.0)
self.assertGreater(linear_regression.error(X_test, y_test), 0.0)
# Underfitting
linear_regression = LinearRegression(X_train, y_train, False, 1)
self.assertGreater(linear_regression.error(X_train, y_train), 0.0)
self.assertGreater(linear_regression.error(X_test, y_test), 0.0)
# Best capacity
linear_regression = LinearRegression(X_train, y_train, False, 2)
self.assertAlmostEqual(linear_regression.error(X_train, y_train), 0.0)
self.assertAlmostEqual(linear_regression.error(X_test, y_test), 0.0)
# Regularization
linear_regression = LinearRegression(X_train, y_train, False, 4, 1.0)
self.assertLess(linear_regression.error(X_train, y_train), 0.01)
self.assertLess(linear_regression.error(X_test, y_test), 1.0)
def test_hyper_linear_regression(self):
# Tuning hyper parameter with test data
X_train = np.array([[1.0], [2.0], [3.0]])
y_train = np.array([1.0, 4.0, 9.0])
X_test = np.array([[0.0], [1.5], [4.0]])
y_test = np.array([0.0, 2.25, 16.0])
hyper_linear_regression = HyperLinearRegression(X_train, y_train)
self.assertAlmostEqual(hyper_linear_regression.error(X_train, y_train), 0.0)
self.assertEqual(hyper_linear_regression.model.degree, 2)
self.assertAlmostEqual(hyper_linear_regression.error(X_test, y_test), 0.0)
def test_nearest_neighbor(self):
# Solving regression problem with nearest neighbor approach
X = np.array([[-1.0], [1.0]])
y = np.array([-1.5, 1.5])
nearest_neighbor = NearestNeighbor(X, y)
np.testing.assert_almost_equal(nearest_neighbor.predict(X), y)
self.assertAlmostEqual(nearest_neighbor.error(X, y), 0.0)
X_train = np.array([[1.0], [2.0], [3.0]])
y_train = np.array([1.0, 4.0, 9.0])
X_test = np.array([[0.0], [1.5], [4.0]])
y_test = np.array([0.0, 2.25, 16.0])
nearest_neighbor = NearestNeighbor(X_train, y_train)
np.testing.assert_almost_equal(nearest_neighbor.predict(X_train), y_train)
self.assertAlmostEqual(nearest_neighbor.error(X_train, y_train), 0.0)
self.assertGreater(nearest_neighbor.error(X_test, y_test), 0.0)
|
ursina/shaders/screenspace_shaders/camera_vertical_blur.py | bt530/ursina | 1,431 | 11080098 | <gh_stars>1000+
from ursina import Shader
camera_vertical_blur_shader = Shader(
fragment='''
#version 430
uniform sampler2D tex;
in vec2 uv;
uniform float blur_size;
out vec4 color;
void main() {
color = texture(tex, uv).rgba;
vec4 col = vec4(0.);
for(float index=0; index<10; index++) {
// add color at position to color
vec2 offset_uv = uv + vec2(0, (index/9 - 0.5) * blur_size);
col += texture(tex, offset_uv);
}
col = col / 10;
col = 1-((1-color)*(1-col));
color = mix(color, vec4(col.rgb, 1), blur_size*10);
}
''',
default_input=dict(
blur_size = .1
))
if __name__ == '__main__':
from ursina import *
app = Ursina()
window.color = color._16
e = Entity(model='sphere', color=color.orange)
e = Entity(model='cube', y=-1)
camera.shader = camera_vertical_blur_shader
slider = ThinSlider(max=.1, dynamic=True, position=(-.25, -.45))
def set_blur():
print(slider.value)
camera.set_shader_input("blur_size", slider.value)
def update():
camera.set_shader_input('blur_size', mouse.x)
slider.on_value_changed = set_blur
EditorCamera()
app.run()
|
pipeline/elastic.py | cqkenuo/w12scan | 864 | 11080119 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/1/28 6:14 PM
# @Author : w8ay
# @File : elastic.py
import os
import sys
import time
from datetime import datetime
import requests
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Date, Integer, Keyword, Text, Document, InnerDoc, Nested, Search
from elasticsearch_dsl.connections import connections
try:
import config
except ModuleNotFoundError:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from config import ELASTICSEARCH_HOSTS, ELASTICSEARCH_AUTH
connections.create_connection(hosts=ELASTICSEARCH_HOSTS, http_auth=ELASTICSEARCH_AUTH)
es = Elasticsearch(ELASTICSEARCH_HOSTS, http_auth=ELASTICSEARCH_AUTH)
class Location(InnerDoc):
country_id = Keyword()
country = Keyword()
region = Keyword()
class Info(InnerDoc):
extrainfo = Text()
name = Keyword()
port = Integer()
product = Text()
version = Keyword()
class Ips(Document):
location = Nested(Location)
infos = Nested(Info)
target = Keyword()
published_from = Date()
class Index:
name = 'w12scan'
settings = {
"number_of_shards": 2,
}
class Meta:
doc_type = 'ips'
def save(self, **kwargs):
if not self.published_from:
self.published_from = datetime.now()
return super().save(**kwargs)
class Domains(Document):
status_code = Integer()
title = Text()
headers = Text()
body = Text()
Server = Text()
ip = Keyword()
url = Keyword()
CMS = Keyword()
published_from = Date()
class Index:
name = 'w12scan'
settings = {
"number_of_shards": 2,
}
class Meta:
doc_type = 'domains'
def save(self, **kwargs):
if not self.published_from:
self.published_from = datetime.now()
return super().save(**kwargs)
def es_search_ip(ip, deduplicat=False):
_q = {
"query": {
"match": {
"target": ip
}
},
"sort": {
"published_from": {"order": "desc"}
}
}
if deduplicat:
_q["collapse"] = {
"field": "target"
}
s = Search(using=es, index='w12scan', doc_type="ips").from_dict(_q)
if s.count() > 0:
if deduplicat:
return list(s)[0]
else:
return list(s)
return False
def es_search_ip_by_id(id):
_q = {
"query": {
"match": {
"_id": id
}
}
}
s = Search(using=es, index='w12scan').from_dict(_q)
dd = s.execute().to_dict().get("hits")
if dd:
dd = dd.get("hits")
else:
return False
return dd
def es_search_domain_by_url(target):
payload = {
"query": {
"match": {
"url": target
}
},
"sort": {
"published_from": {
"order": "desc"
}
}
}
s = Search(using=es, index='w12scan', doc_type='domains').from_dict(payload)
return list(s)
def es_search_domain_by_ip(ip, deduplicat=False):
payload = {
"query": {
"match": {
"ip": ip
}
}
}
if deduplicat:
payload["collapse"] = {
"field": "url"
}
payload["sort"] = {
"published_from": {"order": "desc"}
}
s = Search(using=es, index='w12scan', doc_type='domains').from_dict(payload)
res = s.execute()
union_domains = []
for hit in res:
cid = hit.meta.id
d = hit.to_dict()
domain = d["url"]
if isinstance(domain, list):
domain = domain[0]
title = d.get("title", "")
union_domains.append({"id": cid, "url": domain, "title": title})
return union_domains
def count_app():
payload = {
"size": 0,
"aggs": {
"genres": {
"terms": {
"field": "app.keyword",
"size": 8
}
}
}
}
s = Search(using=es, index='w12scan', doc_type="domains").from_dict(payload)
res = s.execute().to_dict()
try:
r = res["aggregations"]["genres"]["buckets"]
except KeyError:
r = None
return r
def count_country():
payload = {"size": 0,
"aggs": {
"location": {
"nested": {
"path": "location"
},
"aggs": {
"country": {
"terms": {
"field": "location.country_id",
"size": 8
}
}
}
}
}
}
s = Search(using=es, index='w12scan', doc_type='ips').from_dict(payload)
res = s.execute().to_dict()
try:
r = res["aggregations"]["location"]["country"]["buckets"]
except KeyError:
r = None
return r
def count_name(size=10):
payload = {"size": 0,
"aggs": {
"infos": {
"nested": {
"path": "infos"
},
"aggs": {
"name": {
"terms": {
"field": "infos.name",
"size": size
}
}
}
}
}
}
s = Search(using=es, index='w12scan', doc_type='ips').from_dict(payload)
res = s.execute().to_dict()
try:
r = res["aggregations"]["infos"]["name"]["buckets"]
except KeyError:
r = None
return r
def count_port(size=10):
payload = {"size": 0,
"aggs": {
"infos": {
"nested": {
"path": "infos"
},
"aggs": {
"port": {
"terms": {
"field": "infos.port",
"size": size
}
}
}
}
}
}
s = Search(using=es, index='w12scan', doc_type='ips').from_dict(payload)
res = s.execute().to_dict()
try:
r = res["aggregations"]["infos"]["port"]["buckets"]
except KeyError:
r = None
return r
def total_data():
ips = Search(using=es, index='w12scan', doc_type='ips')
domains = Search(using=es, index='w12scan', doc_type='domains')
return ips.count(), domains.count()
def total_bug():
payload = {"query": {"exists": {"field": "bugs"}
}, "size": 0
}
s = Search(using=es, index='w12scan').from_dict(payload)
res = s.execute().to_dict()
return res["hits"]["total"]
def get_bug_count(doc_type, key):
payload = {'query': {'bool': {'must': [{'exists': {'field': 'bugs.{0}'.format(key)}}]}}, 'from': 0, 'size': 20,
'sort': {'published_from': {'order': 'desc'}}}
s = Search(using=es, index='w12scan', doc_type=doc_type).from_dict(payload)
res = s.count()
return res
if __name__ == '__main__':
while 1:
try:
r = requests.get("http://" + ELASTICSEARCH_HOSTS[0], auth=ELASTICSEARCH_AUTH)
if r.status_code != 200:
continue
except:
print("retrying...")
time.sleep(2)
continue
try:
Ips.init()
Domains.init()
break
except:
time.sleep(1)
continue
|
tests/syntax/test_basic.py | abol-karimi/Scenic | 141 | 11080134 |
import pytest
import scenic
from scenic.core.errors import InvalidScenarioError, RuntimeParseError
from scenic.core.object_types import Object
from tests.utils import compileScenic, sampleScene, sampleEgo, sampleParamPFrom
def test_empty():
with pytest.raises(InvalidScenarioError):
compileScenic('')
def test_minimal():
scenario = compileScenic('ego = Object')
assert len(scenario.objects) == 1
obj = scenario.objects[0]
assert type(obj) is Object
assert obj is scenario.egoObject
assert len(scenario.params) == 0
assert len(scenario.requirements) == 0
scene = sampleScene(scenario, maxIterations=1)
assert len(scene.objects) == 1
obj = scene.objects[0]
assert type(obj) is Object
assert obj is scene.egoObject
assert len(scene.params) == 0
def test_ego_second():
scenario = compileScenic('Object\n' 'ego = Object at 0 @ -5')
assert len(scenario.objects) == 2
obj = scenario.objects[0]
assert obj is scenario.egoObject
scene = sampleScene(scenario, maxIterations=1)
assert len(scene.objects) == 2
obj = scene.objects[0]
assert obj is scene.egoObject
def test_ego_nonobject():
with pytest.raises(RuntimeParseError):
compileScenic('ego = Point')
with pytest.raises(RuntimeParseError):
compileScenic('ego = dict()')
def test_ego_undefined():
with pytest.raises(RuntimeParseError):
compileScenic('x = ego\n' 'ego = Object')
def test_noninterference():
scenario = compileScenic('ego = Object')
assert len(scenario.objects) == 1
ego1 = scenario.egoObject
for i in range(5):
scene = sampleScene(scenario, maxIterations=1)
scenario = compileScenic('ego = Object')
assert len(scenario.objects) == 1
ego2 = scenario.egoObject
assert ego1 is not ego2
def test_param():
p = sampleParamPFrom('ego = Object\n' 'param p = Range(3, 5)')
assert 3 <= p <= 5
p = sampleParamPFrom('ego = Object\n' 'param p = [1, 4, 9]')
assert type(p) is list
assert p == [1, 4, 9]
p = sampleParamPFrom('ego = Object\n' 'param p = (1, 4)')
assert type(p) is tuple
assert p == (1, 4)
def test_quoted_param():
p = sampleParamPFrom('ego = Object\n' 'param "p" = Range(3, 5)')
assert 3 <= p <= 5
def test_mutate():
scenario = compileScenic("""
ego = Object at 3@1, facing 0
mutate
""")
ego1 = sampleEgo(scenario)
assert ego1.position.x != pytest.approx(3)
assert ego1.position.y != pytest.approx(1)
assert ego1.heading != pytest.approx(0)
def test_verbose():
for verb in range(4):
scenic.syntax.translator.verbosity = verb
compileScenic('ego = Object')
scenic.syntax.translator.verbosity = 1
def test_dump_python():
scenic.syntax.translator.dumpTranslatedPython = True
compileScenic('ego = Object')
scenic.syntax.translator.dumpTranslatedPython = False
scenic.syntax.translator.dumpFinalAST = True
compileScenic('ego = Object')
scenic.syntax.translator.dumpFinalAST = False
|
scripts/gen_crops_from_obj_tracker_logs.py | mageofboy/pylot | 231 | 11080136 | <filename>scripts/gen_crops_from_obj_tracker_logs.py<gh_stars>100-1000
"""
Takes in a folder of observations (center-[timestep].png images,
mot-[timestep].txt tracker logs) and creates and saves crops of the bounding
boxes. Useful for training the feature extractor model for DeepSORT tracker.
Example usage:
python gen_crops_from_obj_tracker_logs.py --data sample_data --out crops
"""
import cv2
import glob
import json
import numpy as np
import os
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_bool("bbox_json", False,
"If True, generate crops from bbox json log files \
instead of mot text log files.")
flags.DEFINE_string("data", "data", "Path to data folder.")
flags.DEFINE_string("out", "crops", "Path to dir for output data.")
def merge_bbox_json_files(dir_path, result_file="combined_logs.txt"):
"""
Merges individual bbox json files into one txt file of detections.
These files contain lists of detections, with each detection being
a 4-item list with format: [class label, detailed label, id, bbox].
The bbox has format [[xmin, ymin], [xmax, ymax]].
Args:
dir_path: Path to directory that holds bboxes-[timestep].json files.
result_file: Name of output file containing merged log files.
"""
merged_mot_text = []
bbox_json_logs = sorted(glob.glob(dir_path + "/bboxes*"),
key=lambda line: int(line.split("bboxes-")[1][:-5])) # sort by timestep
print("Found {} bbox json files.".format(len(bbox_json_logs)))
for log in bbox_json_logs:
timestamp = log.split("bboxes-")[1][:-5]
with open(log, "r") as f:
data = json.load(f)
for detection in data:
general_label, detailed_label, obj_id, bbox_coords = detection
obj_id = "+".join([detailed_label, str(obj_id)])
x, y = bbox_coords[0]
w, h = bbox_coords[1][0] - x, bbox_coords[1][1] - y
mot_text_line = ",".join([timestamp, obj_id, str(x), str(y), str(w), str(h)])
merged_mot_text.append(mot_text_line)
with open(result_file, "w") as f:
f.write('\n'.join(merged_mot_text))
def merge_mot_txt_files(dir_path, result_file="combined_logs.txt"):
"""
Merges individual mot-format txt files into one txt file of detections.
Args:
dir_path: Path to directory that holds mot-[timestep].txt files.
result_file: Name of output file containing merged log files.
"""
relevant_files = sorted(
glob.glob(dir_path + "/*.txt"),
key=lambda line: int(line.split("mot-")[1][:-4])) # sort by timestamp
print("Found {} mot txt files.".format(len(relevant_files)))
with open(result_file, "w") as combined_file:
combined_text = []
for f in relevant_files:
with open(f, "r") as sub_file:
combined_text.extend(sub_file.readlines())
combined_file.writelines(combined_text)
def get_crops(mot_detections_file, imgs_path, out_dir, area_tol=1500):
"""
Uses detections/bboxes from mot_detections_file to crop bboxes from
corresponding images at imgs_path. Only saves crops with area > area_tol.
Writes new log file that is filtered to only contain the saved crops.
"""
with open(mot_detections_file, "r") as f:
mot_data = f.readlines()
kept_crops_infos = []
for line in mot_data:
info = line.split(",")
timestamp, obj_id, x, y, w, h = info[0], info[1], int(info[2]), int(
info[3]), int(info[4]), int(info[5])
img = cv2.imread(imgs_path + "/center-{}.png".format(timestamp))
crop = img[y:y + h, x:x + w, :]
if h * w >= area_tol:
cv2.imwrite(out_dir + "/crop-{}-{}.png".format(timestamp, obj_id),
crop)
kept_crops_infos.append(line)
print("Keeping {}/{} crops with area >= {}".format(len(kept_crops_infos),
len(mot_data),
area_tol))
with open(out_dir + "/filtered_crops_logs.txt", "w") as f:
f.writelines(kept_crops_infos)
def convert_crops_to_pytorch_imagefolder_structure(crops_dir):
"""
Converts crops to training and test set (~90/10 split).
All crops for a certain object id are grouped into 1 directory.
(i.e., crops/train/object_id/all_crops_of_this_object_id.png)
"""
files = glob.glob(crops_dir + "/*.png")
obj_id_to_crops = {}
for f in files:
obj_id = f.split(".png")[0].split("-")[-1]
if obj_id not in obj_id_to_crops:
obj_id_to_crops[obj_id] = {f}
else:
obj_id_to_crops[obj_id].add(f)
os.mkdir(crops_dir + "/train")
os.mkdir(crops_dir + "/test")
for obj_id in obj_id_to_crops:
os.mkdir(crops_dir + "/train/" + obj_id)
os.mkdir(crops_dir + "/test/" + obj_id)
for f in obj_id_to_crops[obj_id]:
img_name = f.split("/")[-1]
if np.random.randint(0, 10):
os.rename(f, crops_dir + "/train/" + obj_id + "/" + img_name)
else:
os.rename(f, crops_dir + "/test/" + obj_id + "/" + img_name)
def main(_):
log_file_path = FLAGS.data + "/combined_logs.txt"
if FLAGS.bbox_json:
merge_bbox_json_files(FLAGS.data, result_file=log_file_path)
else:
merge_mot_txt_files(FLAGS.data, result_file=log_file_path)
get_crops(log_file_path, FLAGS.data, FLAGS.out)
convert_crops_to_pytorch_imagefolder_structure(FLAGS.out)
if __name__ == "__main__":
app.run(main)
|
tests/test_swagger.py | Vitalts/sanic-openapi | 236 | 11080151 | <filename>tests/test_swagger.py<gh_stars>100-1000
import pytest
from sanic import Blueprint
from sanic.constants import HTTP_METHODS
from sanic.response import text
from sanic.views import CompositionView, HTTPMethodView
METHODS = [method.lower() for method in HTTP_METHODS]
class SimpleView(HTTPMethodView):
def get(self, request):
return text("I am get method")
def post(self, request):
return text("I am post method")
def put(self, request):
return text("I am put method")
def patch(self, request):
return text("I am patch method")
def delete(self, request):
return text("I am delete method")
def head(self, request):
return text("I am head method")
def options(self, request):
return text("I am options method")
def get_handler(request):
return text("I am a get method")
view = CompositionView()
view.add(["GET"], get_handler)
view.add(["POST", "PUT"], lambda request: text("I am a post/put method"))
def test_swagger_endpoint(app):
_, response = app.test_client.get("/swagger/")
assert response.status == 200
assert (
response.content_type == "text/html" # pre sanic21.3
or response.content_type == "text/html; charset=utf-8"
) # post sanic21.3
def test_swagger_endpoint_redirect(app):
_, response = app.test_client.get("/swagger")
assert response.status == 200
assert (
response.content_type == "text/html" # pre sanic21.3
or response.content_type == "text/html; charset=utf-8"
) # post sanic21.3
assert len(response.history) == 1
status = getattr(
response.history[0],
"status",
getattr(response.history[0], "status_code", None),
) # For request-async compatibility
assert status == 302
assert str(response.history[0].url).endswith("/swagger")
@pytest.mark.skip(
reason="https://github.com/sanic-org/sanic-openapi/pull/111#pullrequestreview-255118509" # noqa
)
def test_swagger_json(app):
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
assert swagger_json.get("swagger") == "2.0"
assert swagger_json.get("definitions") == {}
assert swagger_json.get("tags") == []
assert swagger_json.get("paths") == {}
@pytest.mark.parametrize("method", METHODS)
def test_document_route(app, method):
@app.route("/", methods=[method])
def test(request):
return text("test")
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
# sanic 21.3 changes the route.name to include the app name
assert "test" in swagger_json["paths"]["/"][method]["operationId"]
swagger_json["paths"]["/"][method]["operationId"] = "test"
assert swagger_json["paths"] == {
"/": {
method: {
"operationId": "test",
"consumes": ["application/json"],
"produces": ["application/json"],
"parameters": [],
"responses": {"200": {"description": "OK"}},
}
}
}
@pytest.mark.parametrize("method", METHODS)
def test_document_blueprint_route(app, method):
bp = Blueprint("test")
@bp.route("/", methods=[method])
def test(request):
return text("test")
app.blueprint(bp)
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
assert {"name": "test"} in swagger_json["tags"]
# sanic 21.3 changes the route.name to include the app name
assert "test.test" in swagger_json["paths"]["/"][method]["operationId"]
swagger_json["paths"]["/"][method]["operationId"] = "test.test"
assert swagger_json["paths"] == {
"/": {
method: {
"operationId": "test.test",
"consumes": ["application/json"],
"produces": ["application/json"],
"tags": ["test"],
"parameters": [],
"responses": {"200": {"description": "OK"}},
}
}
}
def test_class_based_view(app):
"""
In sanic_openapi/swagger.py#n124, class based view will not document
endpoint with options method.
"""
app.add_route(SimpleView.as_view(), "/")
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
assert sorted(set(METHODS)) == sorted(
set(swagger_json["paths"]["/"].keys())
)
def test_blueprint_class_based_view(app):
bp = Blueprint("test")
bp.add_route(SimpleView.as_view(), "/")
app.blueprint(bp)
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
assert sorted(set(METHODS)) == sorted(
set(swagger_json["paths"]["/"].keys())
)
assert {"name": "test"} in swagger_json["tags"]
def test_document_compositionview(app):
app.add_route(view, "/")
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
assert set(swagger_json["paths"]["/"].keys()) == set(
["get", "post", "put"]
)
assert {"name": "test"} in swagger_json["tags"]
@pytest.mark.skip(reason="Not support now.")
def test_document_blueprint_compositionview(app):
bp = Blueprint("test")
bp.add_route(view, "/")
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
assert set(swagger_json["paths"]["/"].keys()) == set(
["get", "post", "put"]
)
def test_swagger_ui_config(app):
_, response = app.test_client.get("/swagger/swagger-config")
assert response.status == 200
assert response.content_type == "application/json"
swagger_config = response.json
assert swagger_config == {}
swagger_ui_configuration = {
"validatorUrl": None, # Disable Swagger validator
"displayRequestDuration": True,
"docExpansion": "full",
}
app.config.SWAGGER_UI_CONFIGURATION = swagger_ui_configuration
_, response = app.test_client.get("/swagger/swagger-config")
assert response.status == 200
assert response.content_type == "application/json"
swagger_config = response.json
assert swagger_config == swagger_ui_configuration
@pytest.mark.parametrize(
"configs",
[
{
"API_HOST": "http://0.0.0.0",
"API_BASEPATH": "/api",
"API_VERSION": "0.1.0",
"API_TITLE": "Sanic OpenAPI test",
"API_DESCRIPTION": "The API doc",
"API_TERMS_OF_SERVICE": "Use with caution!",
"API_CONTACT_EMAIL": "<EMAIL>",
"API_LICENSE_NAME": "MIT",
"API_LICENSE_URL": "https://choosealicense.com/licenses/mit/",
},
{
"API_HOST": "http://test.sanic-openapi",
"API_BASEPATH": "/api_test",
"API_VERSION": None,
"API_TITLE": None,
"API_DESCRIPTION": None,
"API_TERMS_OF_SERVICE": None,
"API_CONTACT_EMAIL": None,
"API_LICENSE_NAME": None,
"API_LICENSE_URL": None,
},
],
)
def test_configs(app, configs):
app.config.update(configs)
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
assert swagger_json["host"] == configs["API_HOST"]
assert swagger_json["basePath"] == configs["API_BASEPATH"]
info = swagger_json.get("info")
assert isinstance(info, dict)
assert info["version"] == configs["API_VERSION"]
assert info["title"] == configs["API_TITLE"]
assert info["description"] == configs["API_DESCRIPTION"]
assert info["termsOfService"] == configs["API_TERMS_OF_SERVICE"]
assert info["contact"]["email"] == configs["API_CONTACT_EMAIL"]
assert info["license"]["name"] == configs["API_LICENSE_NAME"]
assert info["license"]["url"] == configs["API_LICENSE_URL"]
def test_skip_static_file(app):
app.static("/static", __file__)
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
assert "/static" not in swagger_json["paths"]
def test_uri_parsed(app):
@app.get("/<name>")
def test(request, name):
return text(name)
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
assert "/{name}" in swagger_json["paths"]
def test_route_filter_all(app):
app.config.update({"API_URI_FILTER": "all"})
@app.get("/test")
def test(request):
return text("test")
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
assert "/test" in swagger_json["paths"]
assert "/test/" in swagger_json["paths"]
def test_route_filter_default(app):
app.config.update({"API_URI_FILTER": "slash"})
@app.get("/test")
def test(request):
return text("test")
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
assert "/test" not in swagger_json["paths"]
assert "/test/" in swagger_json["paths"]
def test_route_filter_slash(app):
@app.get("/test")
def test(request):
return text("test")
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
assert "/test" in swagger_json["paths"]
assert "/test/" not in swagger_json["paths"]
|
awacs/ecr.py | cloudtools/awacs | 358 | 11080165 | <reponame>cloudtools/awacs
# Copyright (c) 2012-2021, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon Elastic Container Registry"
prefix = "ecr"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
BatchCheckLayerAvailability = Action("BatchCheckLayerAvailability")
BatchDeleteImage = Action("BatchDeleteImage")
BatchGetImage = Action("BatchGetImage")
CompleteLayerUpload = Action("CompleteLayerUpload")
CreateRepository = Action("CreateRepository")
DeleteLifecyclePolicy = Action("DeleteLifecyclePolicy")
DeleteRegistryPolicy = Action("DeleteRegistryPolicy")
DeleteRepository = Action("DeleteRepository")
DeleteRepositoryPolicy = Action("DeleteRepositoryPolicy")
DescribeImageReplicationStatus = Action("DescribeImageReplicationStatus")
DescribeImageScanFindings = Action("DescribeImageScanFindings")
DescribeImages = Action("DescribeImages")
DescribeRegistry = Action("DescribeRegistry")
DescribeRepositories = Action("DescribeRepositories")
GetAuthorizationToken = Action("GetAuthorizationToken")
GetDownloadUrlForLayer = Action("GetDownloadUrlForLayer")
GetLifecyclePolicy = Action("GetLifecyclePolicy")
GetLifecyclePolicyPreview = Action("GetLifecyclePolicyPreview")
GetRegistryPolicy = Action("GetRegistryPolicy")
GetRepositoryPolicy = Action("GetRepositoryPolicy")
InitiateLayerUpload = Action("InitiateLayerUpload")
ListImages = Action("ListImages")
ListTagsForResource = Action("ListTagsForResource")
PutImage = Action("PutImage")
PutImageScanningConfiguration = Action("PutImageScanningConfiguration")
PutImageTagMutability = Action("PutImageTagMutability")
PutLifecyclePolicy = Action("PutLifecyclePolicy")
PutRegistryPolicy = Action("PutRegistryPolicy")
PutReplicationConfiguration = Action("PutReplicationConfiguration")
ReplicateImage = Action("ReplicateImage")
SetRepositoryPolicy = Action("SetRepositoryPolicy")
StartImageScan = Action("StartImageScan")
StartLifecyclePolicyPreview = Action("StartLifecyclePolicyPreview")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UploadLayerPart = Action("UploadLayerPart")
|
example/dotenv_loaded_if_enabled/app.py | RonnyPfannschmidt/dynaconf | 2,293 | 11080166 | from dynaconf import Dynaconf
settings = Dynaconf(settings_file="settings.toml", load_dotenv=True)
assert settings.name == "Bruno"
assert "FOO" in settings
assert settings.foo == "NAR"
|
mediasoup-client/deps/webrtc/src/build/fuchsia/update_sdk.py | skgwazap/mediasoup-client-android | 128 | 11080201 | <filename>mediasoup-client/deps/webrtc/src/build/fuchsia/update_sdk.py
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates the Fuchsia SDK to the given revision. Should be used in a 'hooks_os'
entry so that it only runs when .gclient's target_os includes 'fuchsia'."""
import os
import re
import shutil
import subprocess
import sys
import tarfile
import tempfile
from common import GetHostOsFromPlatform, GetHostArchFromPlatform
REPOSITORY_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'build'))
import find_depot_tools
SDK_SUBDIRS = ["arch", "pkg", "qemu", "sysroot", "target",
"toolchain_libs", "tools"]
EXTRA_SDK_HASH_PREFIX = ''
def GetSdkGeneration(hash):
if not hash:
return None
cmd = [os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gsutil.py'), 'ls',
'-L', GetBucketForPlatform() + hash]
sdk_details = subprocess.check_output(cmd)
m = re.search('Generation:\s*(\d*)', sdk_details)
if not m:
return None
return int(m.group(1))
def GetSdkHashForPlatform():
filename = '{platform}.sdk.sha1'.format(platform = GetHostOsFromPlatform())
# Get the hash of the SDK in chromium.
sdk_hash = None
hash_file = os.path.join(os.path.dirname(__file__), filename)
with open(hash_file, 'r') as f:
sdk_hash = f.read().strip()
# Get the hash of the SDK with the extra prefix.
extra_sdk_hash = None
if EXTRA_SDK_HASH_PREFIX:
extra_hash_file = os.path.join(os.path.dirname(__file__),
EXTRA_SDK_HASH_PREFIX + filename)
with open(extra_hash_file, 'r') as f:
extra_sdk_hash = f.read().strip()
# If both files are empty, return an error.
if not sdk_hash and not extra_sdk_hash:
print >>sys.stderr, 'No SHA1 found in {} or {}'.format(
hash_file, extra_hash_file)
return 1
# Return the newer SDK based on the generation number.
sdk_generation = GetSdkGeneration(sdk_hash)
extra_sdk_generation = GetSdkGeneration(extra_sdk_hash)
if extra_sdk_generation > sdk_generation:
return extra_sdk_hash
return sdk_hash
def GetBucketForPlatform():
return 'gs://fuchsia/sdk/core/{platform}-amd64/'.format(
platform = GetHostOsFromPlatform())
def EnsureDirExists(path):
if not os.path.exists(path):
print 'Creating directory %s' % path
os.makedirs(path)
# Removes previous SDK from the specified path if it's detected there.
def Cleanup(path):
hash_file = os.path.join(path, '.hash')
if os.path.exists(hash_file):
print 'Removing old SDK from %s.' % path
for d in SDK_SUBDIRS:
to_remove = os.path.join(path, d)
if os.path.isdir(to_remove):
shutil.rmtree(to_remove)
os.remove(hash_file)
# Updates the modification timestamps of |path| and its contents to the
# current time.
def UpdateTimestampsRecursive(path):
for root, dirs, files in os.walk(path):
for f in files:
os.utime(os.path.join(root, f), None)
for d in dirs:
os.utime(os.path.join(root, d), None)
def main():
if len(sys.argv) != 1:
print >>sys.stderr, 'usage: %s' % sys.argv[0]
return 1
# Quietly exit if there's no SDK support for this platform.
try:
GetHostOsFromPlatform()
except:
return 0
# Previously SDK was unpacked in //third_party/fuchsia-sdk instead of
# //third_party/fuchsia-sdk/sdk . Remove the old files if they are still
# there.
sdk_root = os.path.join(REPOSITORY_ROOT, 'third_party', 'fuchsia-sdk')
Cleanup(sdk_root)
sdk_hash = GetSdkHashForPlatform()
if not sdk_hash:
return 1
output_dir = os.path.join(sdk_root, 'sdk')
hash_filename = os.path.join(output_dir, '.hash')
if os.path.exists(hash_filename):
with open(hash_filename, 'r') as f:
if f.read().strip() == sdk_hash:
# Nothing to do. Generate sdk/BUILD.gn anyways, in case the conversion
# script changed.
subprocess.check_call([os.path.join(sdk_root, 'gen_build_defs.py')])
return 0
print 'Downloading SDK %s...' % sdk_hash
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
fd, tmp = tempfile.mkstemp()
os.close(fd)
try:
cmd = [os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gsutil.py'),
'cp', GetBucketForPlatform() + sdk_hash, tmp]
subprocess.check_call(cmd)
with open(tmp, 'rb') as f:
EnsureDirExists(output_dir)
tarfile.open(mode='r:gz', fileobj=f).extractall(path=output_dir)
finally:
os.remove(tmp)
# Generate sdk/BUILD.gn.
subprocess.check_call([os.path.join(sdk_root, 'gen_build_defs.py')])
with open(hash_filename, 'w') as f:
f.write(sdk_hash)
UpdateTimestampsRecursive(output_dir)
return 0
if __name__ == '__main__':
sys.exit(main())
|
shared/small_roots/ernst.py | jvdsn/crypto-attacks | 139 | 11080212 | import logging
from math import gcd
from sage.all import RR
from sage.all import ZZ
from shared import small_roots
def integer_trivariate_1(f, m, t, W, X, Y, Z, check_bounds=True, roots_method="resultants"):
"""
Computes small integer roots of a trivariate polynomial.
More information: Ernst M. et al., "Partial Key Exposure Attacks on RSA Up to Full Size Exponents" (Section 4.1.1)
:param f: the polynomial
:param m: the parameter m
:param t: the parameter t
:param W: the parameter W
:param X: an approximate bound on the x roots
:param Y: an approximate bound on the y roots
:param Z: an approximate bound on the z roots
:param check_bounds: whether or not we should check bounds (default: True)
:param roots_method: the method to use to find roots (default: "resultants")
:return: a generator generating small roots (tuples of x and y roots) of the polynomial
"""
pr = f.parent()
x, y, z = pr.gens()
tau = t / m
if check_bounds and RR(X) ** (1 + 3 * tau) * RR(Y) ** (2 + 3 * tau) * RR(Z) ** (1 + 3 * tau + 3 * tau ** 2) > RR(W) ** (1 + 3 * tau):
logging.debug(f"Bound check failed for m = {m}, t = {t}")
return
R = f.constant_coefficient()
while gcd(R, X) != 1:
X += 1
while gcd(R, Y) != 1:
Y += 1
while gcd(R, Z) != 1:
Z += 1
while gcd(R, W) != 1:
W += 1
n = (X * Y) ** m * Z ** (m + t) * W
assert gcd(R, n) == 1
f_ = (pow(R, -1, n) * f % n).change_ring(ZZ)
logging.debug("Generating shifts...")
shifts = set()
monomials = set()
for i in range(m + 1):
for j in range(m - i + 1):
for k in range(j + 1):
g = x ** i * y ** j * z ** k * f_ * X ** (m - i) * Y ** (m - j) * Z ** (m + t - k)
shifts.add(g)
monomials.update(g.monomials())
for k in range(j + 1, j + t + 1):
h = x ** i * y ** j * z ** k * f_ * X ** (m - i) * Y ** (m - j) * Z ** (m + t - k)
shifts.add(h)
monomials.update(h.monomials())
for i in range(m + 2):
j = m + 1 - i
for k in range(j + 1):
g_ = n * x ** i * y ** j * z ** k
shifts.add(g_)
monomials.update(g_.monomials())
for k in range(j + 1, j + t + 1):
h_ = n * x ** i * y ** j * z ** k
shifts.add(h_)
monomials.update(h_.monomials())
L = small_roots.fill_lattice(shifts, monomials, [X, Y, Z])
L = small_roots.reduce(L)
polynomials = small_roots.reconstruct_polynomials(L, monomials, [X, Y, Z])
for roots in small_roots.find_roots(f, polynomials, pr, method=roots_method):
yield roots[x], roots[y], roots[z]
def integer_trivariate_2(f, m, t, W, X, Y, Z, check_bounds=True, roots_method="resultants"):
"""
Computes small integer roots of a trivariate polynomial.
More information: Ernst M. et al., "Partial Key Exposure Attacks on RSA Up to Full Size Exponents" (Section 4.1.2)
:param f: the polynomial
:param m: the parameter m
:param t: the parameter t
:param W: the parameter W
:param X: an approximate bound on the x roots
:param Y: an approximate bound on the y roots
:param Z: an approximate bound on the z roots
:param check_bounds: whether or not we should check bounds (default: True)
:param roots_method: the method to use to find roots (default: "resultants")
:return: a generator generating small roots (tuples of x and y roots) of the polynomial
"""
pr = f.parent()
x, y, z = pr.gens()
tau = t / m
if check_bounds and RR(X) ** (2 + 3 * tau) * RR(Y) ** (3 + 6 * tau + 3 * tau ** 2) * RR(Z) ** (3 + 3 * tau) > RR(W) ** (2 + 3 * tau):
logging.debug(f"Bound check failed for m = {m}, t = {t}")
return
R = f.constant_coefficient()
while gcd(R, X) != 1:
X += 1
while gcd(R, Y) != 1:
Y += 1
while gcd(R, Z) != 1:
Z += 1
while gcd(R, W) != 1:
W += 1
n = X ** m * Y ** (m + t) * Z ** m * W
assert gcd(R, n) == 1
f_ = (pow(R, -1, n) * f % n).change_ring(ZZ)
logging.debug("Generating shifts...")
shifts = set()
monomials = set()
for i in range(m + 1):
for j in range(m - i + 1):
for k in range(m - i + 1):
g = x ** i * y ** j * z ** k * f_ * X ** (m - i) * Y ** (m + t - j) * Z ** (m - k)
shifts.add(g)
monomials.update(g.monomials())
for j in range(m - i + 1, m - i + t + 1):
for k in range(m - i + 1):
h = x ** i * y ** j * z ** k * f_ * X ** (m - i) * Y ** (m + t - j) * Z ** (m - k)
shifts.add(h)
monomials.update(h.monomials())
for i in range(m + 2):
for j in range(m + t + 2 - i):
k = m + 1 - i
g_ = n * x ** i * y ** j * z ** k
shifts.add(g_)
monomials.update(g_.monomials())
for i in range(m + 1):
j = m + t + 1 - i
for k in range(m - i + 1):
h_ = n * x ** i * y ** j * z ** k
shifts.add(h_)
monomials.update(h_.monomials())
L = small_roots.fill_lattice(shifts, monomials, [X, Y, Z])
L = small_roots.reduce(L)
polynomials = small_roots.reconstruct_polynomials(L, monomials, [X, Y, Z])
for roots in small_roots.find_roots(f, polynomials, pr, method=roots_method):
yield roots[x], roots[y], roots[z]
|
cumulusci/robotframework/pageobjects/PageObjects.py | SalesforceFoundation/CumulusCI | 109 | 11080225 | <reponame>SalesforceFoundation/CumulusCI
import inspect
import sys
from pathlib import Path
import robot.utils
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn, RobotNotRunningError
from cumulusci.robotframework.pageobjects.baseobjects import BasePage
from cumulusci.robotframework.utils import capture_screenshot_on_error
def get_keyword_names(obj):
"""Returns a list of method names for the given object
This excludes methods that begin with an underscore, and
also excludes the special method `get_keyword_names`.
"""
names = [
member[0]
for member in inspect.getmembers(obj, inspect.isroutine)
if (not member[0].startswith("_")) and member[0] != "get_keyword_names"
]
return names
def pageobject(page_type, object_name=None):
"""A decorator to designate a class as a page object"""
BuiltIn().log("importing page object {} {}".format(page_type, object_name), "DEBUG")
def wrapper(cls):
key = (page_type, object_name if object_name else "")
PageObjects.registry[key] = cls
cls._page_type = page_type
if getattr(cls, "_object_name", None) is None:
cls._object_name = object_name
else:
# this page object uses an alias for the object (ie: the name
# and _object_name do not match). Let's add this object name
# into the registry so it can be called with either the alias
# or the actual object name
alias_key = (page_type, cls._object_name)
PageObjects.registry[alias_key] = cls
return cls
return wrapper
class PageObjects(object):
"""Keyword library for importing and using page objects
When importing, you can include one or more paths to python
files that define page objects. For example, if you have a set
of classes in robot/HEDA/resources/PageObjects.py, you can import
this library into a test case like this:
| Library cumulusci.robotframework.PageObjects
| ... robot/HEDA/resources/PageObjects.py
Page object classes need to use the @pageobject decorator from
cumulusci.robotframework.pageobjects. The decorator takes two
parameters: page_type and object_name. Both are arbitrary strings,
but together should uniquely identify a collection of keywords for
a page or objects on a page.
Examples of page_type are Listing, Home, Detail, etc. Object types
can be actual object types (Contact), custom object
(Custom_object__c) or a logical name for a type of page (eg:
AppointmentManager).
Example:
| from cumulusci.robotframework.pageobjects import BasePage
| from cumulusci.robotframework.pageobjects import pageobject
| ...
| @pageobject(page_type="Detail", object_name="Custom__c")
| class CustomDetailPage(BasePage):
| ...
"""
ROBOT_LIBRARY_SCOPE = "TEST SUITE"
registry = {}
def __init__(self, *args):
self.builtin = BuiltIn()
logger.debug("initializing PageObjects...")
importer = robot.utils.Importer()
for file_path in args:
path = self._find_file_in_pythonpath(file_path)
if path:
try:
importer.import_class_or_module_by_path(str(path.resolve()))
logger.debug(f"imported page object from {path}")
except Exception as e:
raise ImportError(
f"Unable to import page object '{file_path}': ({e})", path=path
)
else:
raise ImportError(f"Unable to find page object file '{file_path}'")
self.current_page_object = None
# Start with this library at the front of the library search order;
# that may change as page objects are loaded.
try:
self.builtin.set_library_search_order("PageObjects")
except RobotNotRunningError:
# this should only happen when trying to load this library
# via the robot_libdoc task, in which case we don't care
# whether this throws an error or not.
pass
def _find_file_in_pythonpath(self, filename):
for directory in sys.path:
path = Path(directory) / filename
if path.exists():
return path
return None
@classmethod
def _reset(cls):
"""Reset the internal data structures used to manage page objects
This is to aid testing. It probably shouldn't be used at any other time.
"""
for pobj in cls.registry.values():
if pobj.__module__ in sys.modules:
del sys.modules[pobj.__module__]
cls.registry = {}
@property
def selenium(self):
return self.builtin.get_library_instance("SeleniumLibrary")
def __getattr__(self, name):
"""Return the keyword from the current page object
This method is required by robot's dynamic library api
"""
if self.current_page_object is None:
raise AttributeError(name)
return getattr(self.current_page_object, name)
def get_keyword_names(self):
"""
This method is required by robot's dynamic library api
"""
names = get_keyword_names(self)
if self.current_page_object is not None:
names = names + get_keyword_names(self.current_page_object)
return names
def log_page_object_keywords(self):
"""Logs page objects and their keywords for all page objects
which have been imported into the current suite.
"""
for key in sorted(self.registry.keys()):
pobj = self.registry[key]
keywords = get_keyword_names(pobj)
logger.info("{}: {}".format(key, ", ".join(keywords)))
def get_page_object(self, page_type, object_name):
"""Return an instance of a page object
This is useful if you want to call a single page object method
from some other keyword without having to go to another page
or load the page object into a page.
This works a lot like robot's built-in "get library instance"
keyword, but you can specify the page object by page type
and object name rather than the library name, and it will
autoload the appropriate library (assuming its module has
been imported).
"""
if (page_type, object_name) in self.registry:
cls = self.registry[(page_type, object_name)]
logger.debug(f"using page object class {cls}")
instance = cls()
instance._libname = instance.__class__.__name__
else:
# Page object has not been registered. Try to find
# an appropriate generic class. For example, if
# the requested page is "Listing", "Contact", look
# for a "ListingPage" class. If we find it, we'll
# create a library named "ContactListingPage"
instance = None
for subclass in BasePage.__subclasses__():
if getattr(subclass, "_page_type", None) == page_type:
instance = subclass(object_name)
instance._libname = "{}{}Page".format(
object_name, page_type
) # eg: ContactListingPageObject
break
if instance is None:
raise Exception(
"Unable to find a page object for '{} {}'".format(
page_type, object_name
)
)
try:
pobj = self.builtin.get_library_instance(instance._libname)
except Exception:
# Hasn't been imported. Attempt to import it with the given name
# for the given object; If this fails, just let it bubble up
# because there's nothing else we can do.
self.builtin.import_library(
"cumulusci.robotframework.pageobjects._PageObjectLibrary",
instance,
instance._libname,
"WITH NAME",
instance._libname,
)
# sure would be nice if import_library returned the instance. Just sayin'.
pobj = self.builtin.get_library_instance(instance._libname)
return pobj
@capture_screenshot_on_error
def go_to_page(self, page_type, object_name, *args, **kwargs):
"""Go to the page of the given page object.
The URL will be computed from the page_type and object_name
associated with the object, plus possibly additional arguments.
Different pages support different additional arguments. For
example, a Listing page supports the keyword argument `filter_name`,
and a Detail page can be given an object id, or parameters for
looking up the object id.
If this keyword is able to navigate to a page, the keyword
`load page object` will automatically be called to load the keywords
for the page.
Custom page objects may define the function `_go_to_page`,
which will be passed in all of the keyword arguments from this
keyword. This allows each page object to define its own URL
mapping using whatever algorithm it chooses. The only
requirement of the function is that it should compute an
appropriate url and then call `self.selenium.go_to` with the
URL.
It is also recommended that the keyword wait until it knows
that the page has finished rendering before returning (eg: by
calling `self.salesforce.wait_until_loading_is_complete()`)
"""
pobj = self.get_page_object(page_type, object_name)
pobj._go_to_page(*args, **kwargs)
self._set_current_page_object(pobj)
@capture_screenshot_on_error
def current_page_should_be(self, page_type, object_name, **kwargs):
"""Verifies that the page appears to be the requested page
If the page matches the given page object or contains the
given page object, the keyword will pass.a
When this keyword is called, it will try to get the page
object for the given page_tyope and object_name, and call the
method `_is_current_page`.
Custom page objects may define this function in whatever
manner is necessary to determine that the current page is or
contains the given page object. The only requirement is that
this function raise an exception if it determines the current
page either doesn't represent the page object or doesn't
contain the page object.
The default implementation of the function uses the page URL
and compares it to a pattern based off of the page_type and
object_name.
"""
pobj = self.get_page_object(page_type, object_name)
pobj._is_current_page(**kwargs)
self.load_page_object(page_type, object_name)
def load_page_object(self, page_type, object_name=None):
"""Load the keywords for the page object identified by the type and object name
The page type / object name pair must have been registered
using the cumulusci.robotframework.pageobject decorator.
"""
pobj = self.get_page_object(page_type, object_name)
self._set_current_page_object(pobj)
return pobj
@capture_screenshot_on_error
def wait_for_modal(self, page_type, object_name, expected_heading=None, **kwargs):
"""Wait for the given page object modal to appear.
This will wait for modal to appear. If an expected heading
is provided, it will also validate that the modal has the
expected heading.
Example:
| Wait for modal to appear New Contact expected_heading=New Contact
"""
pobj = self.get_page_object(page_type, object_name)
pobj._wait_to_appear(expected_heading=expected_heading)
self._set_current_page_object(pobj)
# Ideally we would wait for something, but I can't figure out
# what that would be. A knowledge article simply suggests
# to wait a second.
# https://help.salesforce.com/articleView?id=000352057&language=en_US&mode=1&type=1
self.builtin.sleep("1 second")
return pobj
@capture_screenshot_on_error
def wait_for_page_object(self, page_type, object_name, **kwargs):
"""Wait for an element represented by a page object to appear on the page.
The associated page object will be loaded after the element appears.
page_type represents the page type (Home, Details, etc)) and
object_name represents the name of an object (Contact,
Organization, etc)
"""
pobj = self.get_page_object(page_type, object_name)
pobj._wait_to_appear()
self._set_current_page_object(pobj)
return pobj
def _set_current_page_object(self, pobj):
"""This does the work of importing the keywords for the given page object
Multiple page objects may be loaded. Each page object will be added
to the front of robot's library search order. Note: this search order
gets reset at the start of every suite.
"""
self.current_page_object = pobj
libname = pobj._libname
old_order = list(self.builtin.set_library_search_order())
if libname in old_order:
old_order.remove(libname)
new_order = [libname] + old_order
self.builtin.log("new search order: {}".format(new_order), "DEBUG")
self.builtin.set_library_search_order(*new_order)
return pobj
|
tests/test_update_face_name.py | starenka/xhtml2pdf | 1,016 | 11080229 | <reponame>starenka/xhtml2pdf<gh_stars>1000+
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import io
import os
from unittest import TestCase
import html5lib
from xhtml2pdf.document import pisaDocument
from xhtml2pdf.w3c.cssDOMElementInterface import CSSDOMElementInterface
__doc__ = """
TTFWithSameFaceName provides us auxiliary functions to check
the correct way to choose the font style when we use a ttf file with the same face name.
it always takes the last one @font-face font-family for all the text so to avoid this issue
we have to add a "#" in the begin of the font-family value
"""
class TTFWithSameFaceName(TestCase):
tests_folder = os.path.dirname(os.path.realpath(__file__))
ttf_pathR = os.path.join(tests_folder, 'samples', 'font', 'Noto_Sans', 'NotoSans-Regular.ttf')
ttf_pathB = os.path.join(tests_folder, 'samples', 'font', 'Noto_Sans', 'NotoSans-Bold.ttf')
ttf_pathI = os.path.join(tests_folder, 'samples', 'font', 'Noto_Sans', 'NotoSans-Italic.ttf')
ttf_pathBI = os.path.join(tests_folder, 'samples', 'font', 'Noto_Sans', 'NotoSans-BoldItalic.ttf')
ff_R = "@font-face {{font-family: Noto_Regular; src: url(\'{ttf}\');}}".format(ttf=ttf_pathR)
ff_B = "@font-face {{font-family: Noto_Bold; src: url(\'{ttf}\');}}".format(ttf=ttf_pathB)
ff_I = "@font-face {{font-family: Noto_Italic; src: url(\'{ttf}\');}}".format(ttf=ttf_pathI)
ff_BI = "@font-face {{font-family: Noto_BoldItalic; src: url(\'{ttf}\');}}".format(ttf=ttf_pathBI)
css_R = ".classRegular{font-family: Noto_Regular;}"
css_B = ".classBold{font-family: Noto_Bold;}"
css_I = ".classItalic{font-family: Noto_Italic;}"
css_BI = ".classBoldItalic{font-family: Noto_BoldItalic;}"
HTML_CONTENT = u"""
<html>
<title></title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<style type="text/css">
{ff_R}
{ff_B}
{ff_I}
{ff_BI}
{css_R}
{css_B}
{css_I}
{css_BI}
</style>
</head>
<body>
<span class="classRegular">My custom regular font type</span>
<span class="classBold">My custom bold font type</span>
<span class="classItalic">My custom italic font type</span>
<span class="classBoldItalic">My custom bold-italic font type</span>
</body>
</html>
"""
html = HTML_CONTENT.format(ff_R=ff_R, ff_B=ff_B, ff_I=ff_I, ff_BI=ff_BI,
css_R=css_R, css_B=css_B, css_I=css_I, css_BI=css_BI)
def test_check_updated_face_name(self):
"""
this function help us to check is the font-family value on the pdf and
the font-family from html element are same.
"""
# Create the pisaDocument in memory from the HTML
with io.BytesIO() as pdf_file:
pisa_doc = pisaDocument(src=self.html,
dest=pdf_file)
# Parse HTML
parser = html5lib.HTMLParser(tree=html5lib.treebuilders.getTreeBuilder("dom"))
document = parser.parse(self.html)
for spanElement in document.getElementsByTagName("span"):
spanElement = CSSDOMElementInterface(spanElement)
rules = pisa_doc.cssCascade.findCSSRulesFor(spanElement, "font-family")
font_family_html = rules[0][1].get('font-family').lower()
# Test if font-family of custom @font-face was added to the pisaDocument
self.assertIsNotNone(pisa_doc.fontList.get(font_family_html))
|
tests/apps/good_flow_app/migrations/0042_add_hash_index.py | 15five/django-pg-zero-downtime-migrations | 376 | 11080235 | # Generated by Django 3.0a1 on 2019-10-14 19:48
import django.contrib.postgres.indexes
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('good_flow_app', '0041_drop_gist_index_with_condition'),
]
operations = [
migrations.AddIndex(
model_name='testtable',
index=django.contrib.postgres.indexes.HashIndex(fields=['test_field_int'], name='test_index'),
),
]
|
wordai/api/apis.py | senghoo/wordai | 115 | 11080276 | import hashlib
import json
from datetime import datetime, timedelta
import wordai.models as models
from flask import Blueprint
from flask_jwt_extended import (JWTManager, create_access_token,
create_refresh_token, get_jwt_identity,
get_raw_jwt, jwt_refresh_token_required,
jwt_required)
from flask_restful import Api, Resource, abort, reqparse, request
from jsonschema import validate
blueprint = Blueprint('profile', __name__,
template_folder='templates',
static_folder='static')
api = Api(blueprint)
class api_register(object):
def __init__(self, path):
self.path = path
def __call__(self, cls):
api.add_resource(cls, self.path)
return cls
def admin_required(f):
def __inner__(self, *args, **kwargs):
identify = get_jwt_identity()
user = models.User.find_by_username(identify)
if user and user.role == 'admin':
return f(self, user, *args, **kwargs)
return {
'message': 'Not found',
}, 404
return jwt_required(__inner__)
def user_required(f):
def __inner__(self, *args, **kwargs):
identify = get_jwt_identity()
user = models.User.find_by_username(identify)
if user and user.role in ['admin', 'user'] :
return f(self, user, *args, **kwargs)
return {
'message': 'Not found',
}, 404
return jwt_required(__inner__)
user_parser = reqparse.RequestParser()
user_parser.add_argument('username', help='This username cannot be blank', required=True)
user_parser.add_argument('password', help='This password cannot be blank', required=True)
@api_register("/registration")
class UserRegistration(Resource):
def post(self):
return {'message': 'User registration'}
@api_register("/login")
class UserLogin(Resource):
def post(self):
data = user_parser.parse_args()
current_user = models.User.check_user(data['username'], data['password'])
if not current_user:
abort(401)
return {
'message': 'User {} doesn\'t exist'.format(data['username']),
}
access_token = create_access_token(identity=data['username'])
refresh_token = create_refresh_token(identity=data['username'])
return {
'message': 'Logged in as {}'.format(current_user.username),
'role': current_user.role,
'access_token': access_token,
'refresh_token': refresh_token
}
@api_register("/token/refresh")
class TokenRefresh(Resource):
@jwt_refresh_token_required
def post(self):
current_user = get_jwt_identity()
if current_user:
access_token = create_access_token(identity=current_user)
return {
'access_token': access_token}
abort(401)
return {'message': 'invalid refresh token'}
@api_register("/wordlist")
class WordListList(Resource):
@user_required
def get(self, user):
return [json.loads(x.to_json()) for x in user.wordlists()]
@user_required
def put(self, user):
schema = {
"type": "array",
"items": {"type": "string"},
"uniqueItems": True
}
try:
body = request.json
validate(instance=body, schema=schema)
wordok, not_has, wnot_has = models.WordList.check_word(*body)
defines = models.Word.search_words(*wordok)
return {
"defines": {w['word']: w for w in json.loads(defines.to_json())},
"not_dict": wnot_has,
"not_sentence": not_has,
}
except Exception as err:
return {
"message": "invalid request body",
"error": str(err)
}, 422
@user_required
def post(self, user):
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"description": {"type": "string"},
"words": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": True
}
}
}
try:
body = request.json
validate(instance=body, schema=schema)
wordok, not_has, wnot_has = models.WordList.check_word(*body['words'])
body['words'] = list(wordok)
wordlist = models.WordList(**body)
wordlist.user = user
wordlist.save()
return {
"message": "ok",
"has": list(wordok),
"not_dict": wnot_has,
"not_sentence": not_has,
}
except Exception as err:
return {
"message": "invalid request body",
"error": str(err)
}, 422
@api_register("/wordlist/<string:lid>")
class WordListItem(Resource):
@user_required
def get(self, user, lid):
print(lid)
return json.loads(user.wordlists().filter(id=lid).first().to_json())
@user_required
def put(self, user, lid):
wordlist = models.WordList.objects(user=user, id=lid).first()
if not wordlist:
return {
"message": "wordlist not exists",
}, 404
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"description": {"type": "string"},
"words": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": True
}
}
}
try:
body = request.json
validate(instance=body, schema=schema)
wordok, not_has, wnot_has = models.WordList.check_word(*body['words'])
wordlist.words = wordok
wordlist.name = body['name']
wordlist.description = body['description']
wordlist.user = user
wordlist.save()
return {
"message": "ok",
"has": list(wordok),
"not_dict": wnot_has,
"not_sentence": not_has,
}
except Exception as err:
return {
"message": "invalid request body",
"error": str(err)
}, 422
@user_required
def delete(self, user, lid):
wordlist = models.WordList.objects(user=user, id=lid).first()
if not wordlist:
return {
"message": "wordlist not exists",
}, 404
wordlist.delete()
@api_register("/user/wordlist")
class UserWordList(Resource):
@user_required
def get(self, user):
if not user.wordlist:
return {
"message", "wordlist not set"
}, 404
data = json.loads(user.wordlist.to_json())
return {
"message": "ok",
"wordlist": data['id'],
"wordlist_name": data['name']
}
@user_required
def post(self, user):
parser = reqparse.RequestParser()
parser.add_argument('wordlist', help='This wordlist cannot be blank', required=True)
wordlist_id = parser.parse_args()
wordlist = models.WordList.objects(id=wordlist_id['wordlist']).first()
user.wordlist = wordlist
user.save()
return {
"message": "ok",
"wordlist": wordlist.name
}
@api_register("/learn/word")
class LearnNext(Resource):
@user_required
def get(self, user):
ex = user.next_exercise()
if ex:
sentence_id = json.loads(ex.sentence.to_json())['id']
word_id = json.loads(ex.word.to_json())['id']
return {
"id": word_id,
"word": ex.word.word,
"message": "ok",
"cloze": ex.cloze,
"cn": ex.sentence.chn,
"sid": sentence_id,
"answers": [a for a in ex.answers],
"check": [hashlib.sha1((a+sentence_id+word_id).encode()).hexdigest() for a in ex.answers]
}
else:
return {
"message": "no word need exercise"
}, 404
@user_required
def post(self, user):
parser = reqparse.RequestParser()
parser.add_argument('id', help='This answers cannot be blank', required=True)
parser.add_argument('sid', help='This answers cannot be blank', required=True)
parser.add_argument('answers', help='This answers cannot be blank', required=True,action='append')
parser.add_argument('check', help='This answer_check cannot be blank', required=True, action='append')
data = parser.parse_args()
word_id = data['id']
word = models.Word.objects(id=word_id).first()
if not word:
return {
"message": "word not exist"
}, 404
sentence_id = data['sid']
answers = data['answers']
check = data['check']
check_res = [hashlib.sha1((a+sentence_id+word_id).encode()).hexdigest() for a in answers]
result = check == check_res
slog = models.SentenceLog(sentence=sentence_id, result=result, time=datetime.utcnow())
models.ExerciseLog.objects(user=user, word=word).update_one(
push__sentences=slog, wordname=word.word,
upsert=True)
log = models.ExerciseLog.objects(user=user, word=word).first()
log.calucate_review()
log.save()
return {
"message": "ok",
"result": result,
}
@api_register("/dictionary/<string:word>")
class Dictionary(Resource):
@user_required
def get(self, user, word):
define = models.Word.objects(word=word).first()
if define:
return json.loads(define.to_json())
else:
return {"message": "not found"}, 404
@api_register("/wordlist/learned")
class WordlistLearned(Resource):
@user_required
def get(self, user):
words = user.wordlist.user_learned(user).only("wordname", "review")
return json.loads(words.to_json())
@api_register("/wordlist/to_learn")
class WordlistToLearn(Resource):
@user_required
def get(self, user):
words = user.wordlist.user_to_learn(user)
return words
@api_register("/statistic/learn")
class StatisticLearn(Resource):
@user_required
def get(self, user):
return {
'exercise': models.ExerciseLog.exercise_count(
user,
datetime.now()-timedelta(days=7),
datetime.now()+timedelta(days=7)
),
'review': models.ExerciseLog.review_count(
user,
datetime.now()-timedelta(days=7),
datetime.now()+timedelta(days=7)
)
}
|
pyne/dbgen/ndsfpy.py | nsryan2/pyne | 182 | 11080306 | """This module provides a way to grab and store raw data for fission product yeilds
from the NDS library at the IAEA. For more information, please visit their website:
https://www-nds.iaea.org/sgnucdat/index.htm or
https://www-nds.iaea.org/sgnucdat/c2.htm. Please contact the NDS at
<EMAIL> with questions about the data itself.
The copyright for the data parsed here is held by the IAEA and is made available
under the following conditions:
**Disclaimer:** Distributed data products contain consensus values of physical
constants. However, neither the network centre nor the IAEA guarantees the
accuracy of such data products or their suitability for particular applied
scientific purposes.
**Copyright:** One may use or reproduce data and information from this site with
an appropriate acknowledgement to the source of data. One may not charge any
subsequent fee for these data.
"""
from __future__ import print_function, division
import os
import shutil
from pyne.utils import QA_warn
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import numpy as np
import numpy.lib.recfunctions
import tables as tb
from pyne import nucname
from pyne.dbgen.api import BASIC_FILTERS
QA_warn(__name__)
def readtable(i, spdat):
"""
Reads in a set of 5 html tables and returns corresponding yield data
"""
parent = getdata(i, spdat)[0]
pfinal = (parent.split('<strong>')[1]).split('</strong>')[0]
pid = conv_to_id(pfinal)
fpdata = getdata(i + 1, spdat)
dt = np.dtype([('from_nuc', 'i4'), ('to_nuc', 'i4'),
('yield_thermal', float), ('yield_thermal_err', float),
('yield_fast', float), ('yield_fast_err', float),
('yield_14MeV', float), ('yield_14MeV_err', float)
])
dfinal = np.zeros((len(fpdata),), dtype=dt)
for index, item in enumerate(fpdata):
dfinal[index]['from_nuc'] = pid
dfinal[index]['to_nuc'] = conv_to_id(item)
thermaldata = getdata(i + 2, spdat)
for index, item in enumerate(thermaldata):
dat, err = conv_to_num(item)
dfinal[index]['yield_thermal'] = dat
dfinal[index]['yield_thermal_err'] = err
fastdata = getdata(i + 3, spdat)
for index, item in enumerate(fastdata):
dat, err = conv_to_num(item)
dfinal[index]['yield_fast'] = dat
dfinal[index]['yield_fast_err'] = err
dtdata = getdata(i + 4, spdat)
for index, item in enumerate(dtdata):
dat, err = conv_to_num(item)
dfinal[index]['yield_14MeV'] = dat
dfinal[index]['yield_14MeV_err'] = err
return dfinal
def conv_to_id(nuc):
"""Converts html nuclide names to nuclide ids
"""
parts = nuc.split('-')
return nucname.id(parts[1] + parts[2])
def conv_to_num(dstring):
"""Converts html number and error to floats
"""
if dstring == '-':
return 0, 0
dat, err = dstring.split('±')
if '<sup>' in dat:
dat = parse_num(dat)
else:
dat = float(dat)
if '<sup>' in err:
err = parse_num(err)
else:
err = float(err)
return dat, err
def parse_num(dst):
"""Converts html numbers with exponents to floats
"""
nums = dst.split('x')
base = float(nums[0])
exp = (nums[1].split('<sup>')[1]).split('</sup>')[0]
return base * 10 ** float(exp)
def getpoint(line):
"""Gets data entries from html lines
"""
spline = line.split('<tr><td class="xl28b"> ')
if len(spline) > 1:
data = spline[1].split('</td></tr>')[0]
else:
data = None
return data
def getdata(i, spdat):
"""Gets the data from the nds html table
"""
lines = spdat[i].splitlines()
dlist = []
for line in lines:
d = getpoint(line)
if d is None:
continue
dlist.append(d)
return dlist
def make_fpy_table(nuc_data, build_dir=""):
"""Adds the NDS fission yields to the nuc_data library.
Parameters
----------
nuc_data : str
Path to nuclide data file.
"""
build_filename = os.path.join(build_dir, 'nds-fpyield.html')
with open(build_filename, 'rb') as f:
raw_data = f.read().decode('iso-8859-1')
spdat = raw_data.split('<table>')
alldata = []
for i in range(1, 31, 5):
alldata.append(readtable(i, spdat))
alldata = numpy.lib.recfunctions.stack_arrays(alldata, asrecarray=True)
db = tb.open_file(nuc_data, 'a', filters=BASIC_FILTERS)
if not hasattr(db.root, 'neutron'):
neutron_group = db.create_group('/', 'neutron', 'Neutron Data')
fpy_table = db.create_table('/neutron/', 'nds_fission_products', alldata,
'NDS Fission Product Yields, percent [unitless]')
fpy_table.flush()
db.close()
def grab_fpy(build_dir='', file_out='nds-fpyield.html'):
"""Grabs the NDS fission product yields from the IAEA website
"""
build_filename = os.path.join(build_dir, file_out)
local_filename = os.path.join(os.path.dirname(__file__), file_out)
if os.path.exists(local_filename):
shutil.copy(local_filename, build_filename)
return
nist = urllib2.urlopen('https://www-nds.iaea.org/sgnucdat/c3.htm')
with open(build_filename, 'wb') as f:
f.write(nist.read())
def make_fpy(args):
"""Controller function for NDS fission products."""
nuc_data, build_dir = args.nuc_data, args.build_dir
# Check that the table exists
with tb.open_file(nuc_data, 'a', filters=BASIC_FILTERS) as f:
if hasattr(f.root, 'neutron') and hasattr(f.root.neutron,
'nds_fission_products'):
print('skipping NDS fission product yield table creation; '
'already exists.')
return
print('Grabbing NDS fission product yield data.')
grab_fpy(build_dir)
print('Making NDS fission product yield table.')
make_fpy_table(nuc_data, build_dir)
|
tinyquery/repeated_util.py | graingert/tinyquery | 104 | 11080342 | <gh_stars>100-1000
"""Helper functions for dealing with repeated fields.
It comes up in a few places that we need to flatten or unflatten repeated
columns when using them in conjunction with other repeated or scalar fields.
These functions allow us to flatten into non-repeated columns to apply various
operations and then unflatten back into repeated columns afterwards.
"""
from __future__ import absolute_import
from tinyquery import tq_modes
def rebuild_column_values(repetitions, values, result):
"""Rebuild a repeated column from flattened results.
Args:
repetitions: a list of how many repeated values go in a row for
each of the rows to process.
values: a list of all the values that need to be packed into lists
result: a (partial) result list to which the rows will be appended.
Returns:
a list of lists of values representing len(repetitions) rows, each
of which with a number of values corresponding to that row's
entry in repetitions
"""
if len(repetitions) == 0:
return result
curr_repetition = repetitions[0]
# For rows with no values, we supplied a None, so we need to pop
# off one value no matter what. If that value is None, we go back
# to an empty list, otherwise we put the value in a list.
curr_values = normalize_repeated_null(values[:max(curr_repetition, 1)])
return rebuild_column_values(
repetitions[1:],
values[max(curr_repetition, 1):],
result + [curr_values])
def normalize_column_to_length(col, desired_count):
"""Given the value(s) for a column, normalize to a desired length.
If `col` is a scalar, it's duplicated in a list the desired number of
times. If `col` is a list, it must have 0, 1, or the desired number of
elements, in which cases `None` or the single element is duplicated, or
the original list is returned.
"""
desired_count = max(desired_count, 1)
if isinstance(col, list) and len(col) == desired_count:
return col
elif isinstance(col, list):
assert len(col) in (0, 1), (
'Unexpectedly got a row with the incorrect number of '
'repeated values.')
return (col or [None]) * desired_count
else:
return [col] * desired_count
def flatten_column_values(repeated_column_indices, column_values):
"""Take a list of columns and flatten them.
We need to acomplish three things during the flattening:
1. Flatten out any repeated fields.
2. Keep track of how many repeated values were in each row so that we
can go back
3. If there are other columns, duplicate their values so that we have
the same number of entries in all columns after flattening.
Args:
repeated_column_indices: the indices of the columns that
are repeated; if there's more than one repeated column, this
function assumes that we've already checked that the lengths of
these columns will match up, or that they have 0 or 1 element.
column_values: a list containing a list for each column's values.
Returns:
(repetition_counts, flattened_columns): a tuple
repetition_counts: a list containing one number per row,
representing the number of repeated values in that row
flattened_columns: a list containing one list for each column's
values. The list for each column will not contain nested
lists.
"""
# wrapping in list for python 3 support
rows = list(zip(*column_values))
repetition_counts = [
max(max(len(row[idx]) for idx in repeated_column_indices), 1)
for row in rows
]
rows_with_repetition_normalized = [
[
normalize_column_to_length(col, count)
for col in row
]
for row, count in zip(rows, repetition_counts)
]
normalized_columns = zip(*rows_with_repetition_normalized)
flattened_columns = [
[val for arr in col for val in arr]
for col in normalized_columns]
return (repetition_counts, flattened_columns)
def columns_have_allowed_repetition_counts(ref_col, col):
"""Determine if we could select col along with ref_col.
We assume ref_col is repeated. In tinyquery this is allowable if any of
the following is true:
- col is not repeated
- col is repeated but every row has only 0 or 1 element
- col is repeated but every row with more than 1 element matches the number
of elements in ref_col
"""
if col.mode != tq_modes.REPEATED:
return True
ref_counts = [len(val) for val in ref_col.values]
counts = [len(val) for val in col.values]
return all(
rc == c or c in (0, 1) or rc in (0, 1)
for rc, c in zip(ref_counts, counts))
def normalize_repeated_null(value):
"""Normalze the way we represent null in repeated fields.
There's 3 equivalent options: `None`, [], and `[None]`. We chose [] to be
the standard for repeated fields, so this turns any of these into [].
"""
if value is None or value == [None]:
return []
return value
|
ForgeSVN/forgesvn/tests/model/test_repository.py | rohankumardubey/allura | 113 | 11080349 | # coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import os
import shutil
import unittest
from unittest import skipUnless
import pkg_resources
from itertools import count, product
from datetime import datetime
from zipfile import ZipFile
from io import BytesIO
from collections import defaultdict
from tg import tmpl_context as c, app_globals as g
import mock
from alluratest.tools import assert_equal, assert_in
from datadiff.tools import assert_equals
import tg
import ming
from ming.base import Object
from ming.orm import session, ThreadLocalORMSession
from testfixtures import TempDirectory
from alluratest.controller import setup_basic_test, setup_global_objects
from allura import model as M
from allura.model.repo_refresh import send_notifications
from allura.lib import helpers as h
from allura.webhooks import RepoPushWebhookSender
from allura.tests.model.test_repo import RepoImplTestBase
from forgesvn import model as SM
from forgesvn.model.svn import svn_path_exists
from forgesvn.tests import with_svn
from allura.tests.decorators import with_tool
import six
from io import open
from six.moves import range
class TestNewRepo(unittest.TestCase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
c.app.repo.name = 'testsvn'
c.app.repo.fs_path = repo_dir
self.repo = c.app.repo
self.repo.refresh()
self.rev = self.repo.commit('HEAD')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_last_commit_for(self):
tree = self.rev.tree
for row in tree.ls():
assert row['last_commit']['author'] is not None
def test_commit(self):
latest_rev = 7
assert self.rev.primary() is self.rev
assert self.rev.index_id().startswith('allura/model/repo/Commit#')
self.rev.author_url
self.rev.committer_url
assert_equal(self.rev.tree._id, self.rev.tree_id)
assert_equal(self.rev.shorthand_id(), '[r{}]'.format(latest_rev))
assert_equal(self.rev.symbolic_ids, ([], []))
assert_equal(self.rev.url(), '/p/test/src/{}/'.format(latest_rev))
all_cis = list(self.repo.log(self.rev._id, limit=25))
assert_equal(len(all_cis), latest_rev)
self.rev.tree.ls()
assert_equal(self.rev.tree.readme(), ('README', 'This is readme\nAnother Line\n'))
assert_equal(self.rev.tree.path(), '/')
assert_equal(self.rev.tree.url(), '/p/test/src/{}/tree/'.format(latest_rev))
self.rev.tree.by_name['README']
assert self.rev.tree.is_blob('README') is True
assert_equal(self.rev.tree['a']['b']['c'].ls(), [])
self.assertRaises(KeyError, lambda: self.rev.tree['a']['b']['d'])
assert_equal(self.rev.authored_user, None)
assert_equal(self.rev.committed_user, None)
assert_equal(
sorted(self.rev.webhook_info.keys()),
sorted(['id', 'url', 'timestamp', 'message', 'author',
'committer', 'added', 'removed', 'renamed', 'modified', 'copied']))
class TestSVNRepo(unittest.TestCase, RepoImplTestBase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
@with_tool('test', 'SVN', 'svn-tags', 'SVN with tags')
def setup_with_tools(self):
setup_global_objects()
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
with h.push_context('test', 'src', neighborhood='Projects'):
c.app.repo.name = 'testsvn'
c.app.repo.fs_path = repo_dir
self.repo = c.app.repo
self.repo.refresh()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
with h.push_context('test', 'svn-tags', neighborhood='Projects'):
c.app.repo.name = 'testsvn-trunk-tags-branches'
c.app.repo.fs_path = repo_dir
self.svn_tags = c.app.repo
self.svn_tags.refresh()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
h.set_context('test', 'src', neighborhood='Projects')
def test_init(self):
repo = SM.Repository(
name='testsvn',
fs_path=g.tmpdir + '/',
url_path='/test/',
tool='svn',
status='creating')
dirname = os.path.join(repo.fs_path, repo.name)
if os.path.exists(dirname):
shutil.rmtree(dirname)
repo.init()
shutil.rmtree(dirname)
def test_fork(self):
repo = SM.Repository(
name='testsvn',
fs_path=g.tmpdir + '/',
url_path='/test/',
tool='svn',
status='creating')
repo_path = pkg_resources.resource_filename(
'forgesvn', 'tests/data/testsvn')
dirname = os.path.join(repo.fs_path, repo.name)
if os.path.exists(dirname):
shutil.rmtree(dirname)
repo.init()
repo._impl.clone_from('file://' + repo_path)
assert not os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/pre-revprop-change'))
assert os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'))
assert os.access(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'), os.X_OK)
with open(os.path.join(g.tmpdir, 'testsvn/hooks/post-commit')) as f:
hook_data = f.read()
self.assertIn(
'curl -s http://localhost/auth/refresh_repo/p/test/src/\n',
hook_data)
self.assertIn('exec $DIR/post-commit-user "$@"\n', hook_data)
repo.refresh(notify=False)
assert len(list(repo.log(limit=100)))
shutil.rmtree(dirname)
@mock.patch('forgesvn.model.svn.tg')
def test_can_hotcopy(self, tg):
from forgesvn.model.svn import SVNImplementation
func = SVNImplementation.can_hotcopy
obj = mock.Mock(spec=SVNImplementation)
for combo in product(
['file:///myfile', 'http://myfile'],
[True, False],
['version 1.7', 'version 1.6', 'version 2.0.3']):
source_url = combo[0]
tg.config = {'scm.svn.hotcopy': combo[1]}
stdout = combo[2]
obj.check_call.return_value = stdout, '', 0
expected = (source_url.startswith('file://') and
tg.config['scm.svn.hotcopy'] and
stdout != 'version 1.6')
result = func(obj, source_url)
assert result == expected
@mock.patch('forgesvn.model.svn.g.post_event')
def test_clone(self, post_event):
repo = SM.Repository(
name='testsvn',
fs_path=g.tmpdir + '/',
url_path='/test/',
tool='svn',
status='creating')
repo_path = pkg_resources.resource_filename(
'forgesvn', 'tests/data/testsvn')
dirname = os.path.join(repo.fs_path, repo.name)
if os.path.exists(dirname):
shutil.rmtree(dirname)
repo.init()
repo._impl.clone_from('file://' + repo_path)
assert not os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/pre-revprop-change'))
assert os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'))
assert os.access(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'), os.X_OK)
with open(os.path.join(g.tmpdir, 'testsvn/hooks/post-commit')) as f:
c = f.read()
self.assertIn(
'curl -s http://localhost/auth/refresh_repo/p/test/src/\n', c)
self.assertIn('exec $DIR/post-commit-user "$@"\n', c)
repo.refresh(notify=False)
assert len(list(repo.log(limit=100)))
shutil.rmtree(dirname)
def test_index(self):
i = self.repo.index()
assert i['type_s'] == 'SVN Repository', i
def test_log_id_only(self):
entries = list(self.repo.log(id_only=True, limit=25))
assert_equal(entries, [7, 6, 5, 4, 3, 2, 1])
def test_log(self):
entries = list(self.repo.log(id_only=False, limit=25))
assert_equal(entries[len(entries)-6:], # only 6, so this test doesn't have to change when commits added
[
{'parents': [5],
'refs': [],
'committed': {
'date': datetime(2013, 11, 8, 13, 38, 11, 152821),
'name': 'coldmind', 'email': ''},
'message': '',
'rename_details': {},
'id': 6,
'authored': {
'date': datetime(2013, 11, 8, 13, 38, 11, 152821),
'name': 'coldmind',
'email': ''
}, 'size': None},
{'parents': [4],
'refs': [],
'committed': {
'date': datetime(2010, 11, 18, 20, 14, 21, 515743),
'name': 'rick446',
'email': ''},
'message': 'Copied a => b',
'rename_details': {},
'id': 5,
'authored': {
'date': datetime(2010, 11, 18, 20, 14, 21, 515743),
'name': 'rick446',
'email': ''},
'size': None},
{'parents': [3],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 59, 383719),
'name': 'rick446',
'email': ''},
'message': 'Remove hello.txt',
'rename_details': {},
'id': 4,
'authored': {
'date': datetime(2010, 10, 8, 15, 32, 59, 383719),
'name': 'rick446',
'email': ''},
'size': None},
{'parents': [2],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'name': 'rick446',
'email': ''},
'message': 'Modify readme',
'rename_details': {},
'id': 3,
'authored':
{'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'name': 'rick446',
'email': ''},
'size': None},
{'parents': [1],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 36, 221863),
'name': 'rick446',
'email': ''},
'message': 'Add path',
'rename_details': {},
'id': 2,
'authored': {
'date': datetime(2010, 10, 8, 15, 32, 36, 221863),
'name': 'rick446',
'email': ''},
'size': None},
{'parents': [],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'name': 'rick446',
'email': ''},
'message': 'Create readme',
'rename_details': {},
'id': 1,
'authored': {
'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'name': 'rick446',
'email': ''},
'size': None}])
def test_log_file(self):
entries = list(self.repo.log(path='/README', id_only=False, limit=25))
assert_equal(entries, [
{'authored': {'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'email': '',
'name': 'rick446'},
'committed': {'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'email': '',
'name': 'rick446'},
'id': 3,
'message': 'Modify readme',
'parents': [2],
'refs': [],
'size': 28,
'rename_details': {}},
{'authored': {'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'email': '',
'name': 'rick446'},
'committed': {'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'email': '',
'name': 'rick446'},
'id': 1,
'message': 'Create readme',
'parents': [],
'refs': [],
'size': 15,
'rename_details': {}},
])
def test_is_file(self):
assert self.repo.is_file('/README')
assert not self.repo.is_file('/a')
def test_paged_diffs(self):
entry = self.repo.commit(next(self.repo.log(2, id_only=True, limit=1)))
self.assertEqual(entry.diffs, entry.paged_diffs())
self.assertEqual(entry.diffs, entry.paged_diffs(start=0))
added_expected = entry.diffs.added[1:3]
expected = dict(
copied=[], changed=[], removed=[], renamed=[],
added=added_expected, total=4)
actual = entry.paged_diffs(start=1, end=3)
self.assertEqual(expected, actual)
fake_id = self.repo._impl._oid(100)
empty = M.repository.Commit(_id=fake_id, repo=self.repo).paged_diffs()
self.assertEqual(sorted(actual.keys()), sorted(empty.keys()))
def test_diff_create_file(self):
entry = self.repo.commit(next(self.repo.log(1, id_only=True, limit=1)))
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[], renamed=[],
removed=[], added=['/README'], total=1))
def test_diff_create_path(self):
entry = self.repo.commit(next(self.repo.log(2, id_only=True, limit=1)))
actual = entry.diffs
actual.added = sorted(actual.added)
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[], removed=[], renamed=[],
added=sorted([
'/a', '/a/b', '/a/b/c',
'/a/b/c/hello.txt']), total=4))
def test_diff_modify_file(self):
entry = self.repo.commit(next(self.repo.log(3, id_only=True, limit=1)))
self.assertEqual(
entry.diffs, dict(
copied=[], changed=['/README'], renamed=[],
removed=[], added=[], total=1))
def test_diff_delete(self):
entry = self.repo.commit(next(self.repo.log(4, id_only=True, limit=1)))
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[], renamed=[],
removed=['/a/b/c/hello.txt'], added=[], total=1))
def test_diff_copy(self):
entry = self.repo.commit(next(self.repo.log(5, id_only=True, limit=1)))
assert_equals(dict(entry.diffs), dict(
copied=[{'new': '/b', 'old': '/a', 'ratio': 1}], renamed=[],
changed=[], removed=[], added=[], total=1))
def test_commit(self):
entry = self.repo.commit(1)
assert entry.committed.name == 'rick446'
assert entry.message
def test_svn_path_exists(self):
repo_path = pkg_resources.resource_filename(
'forgesvn', 'tests/data/testsvn')
assert svn_path_exists("file://%s/a" % repo_path)
assert svn_path_exists("file://%s" % repo_path)
assert not svn_path_exists("file://%s/badpath" % repo_path)
with mock.patch('forgesvn.model.svn.pysvn') as pysvn:
svn_path_exists('dummy')
pysvn.Client.return_value.info2.assert_called_once_with(
'dummy',
revision=pysvn.Revision.return_value,
recurse=False)
@skipUnless(os.path.exists(tg.config.get('scm.repos.tarball.zip_binary', '/usr/bin/zip')), 'zip binary is missing')
def test_tarball(self):
tmpdir = tg.config['scm.repos.tarball.root']
assert_equal(self.repo.tarball_path,
os.path.join(tmpdir, 'svn/t/te/test/testsvn'))
assert_equal(self.repo.tarball_url('1'),
'file:///svn/t/te/test/testsvn/test-src-r1.zip')
self.repo.tarball('1')
assert os.path.isfile(
os.path.join(tmpdir, "svn/t/te/test/testsvn/test-src-r1.zip"))
tarball_zip = ZipFile(
os.path.join(tmpdir, 'svn/t/te/test/testsvn/test-src-r1.zip'), 'r')
assert_equal(tarball_zip.namelist(),
['test-src-r1/', 'test-src-r1/README'])
shutil.rmtree(self.repo.tarball_path.encode('utf-8'),
ignore_errors=True)
@skipUnless(os.path.exists(tg.config.get('scm.repos.tarball.zip_binary', '/usr/bin/zip')), 'zip binary is missing')
def test_tarball_paths(self):
rev = '19'
h.set_context('test', 'svn-tags', neighborhood='Projects')
tmpdir = tg.config['scm.repos.tarball.root']
tarball_path = os.path.join(tmpdir, 'svn/t/te/test/testsvn-trunk-tags-branches/')
# a tag
self.svn_tags.tarball(rev, '/tags/tag-1.0/')
fn = tarball_path + 'test-svn-tags-r19-tags-tag-1.0.zip'
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
tag_content = sorted(['test-svn-tags-r19-tags-tag-1.0/',
'test-svn-tags-r19-tags-tag-1.0/svn-commit.tmp',
'test-svn-tags-r19-tags-tag-1.0/README'])
assert_equal(sorted(snapshot.namelist()), tag_content)
os.remove(fn)
# a directory (of tags)
self.svn_tags.tarball(rev, '/tags/')
fn = tarball_path + 'test-svn-tags-r19-tags.zip'
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
tags_content = sorted(['test-svn-tags-r19-tags/',
'test-svn-tags-r19-tags/tag-1.0/',
'test-svn-tags-r19-tags/tag-1.0/svn-commit.tmp',
'test-svn-tags-r19-tags/tag-1.0/README'])
assert_equal(sorted(snapshot.namelist()), tags_content)
os.remove(fn)
# no path, but there are trunk in the repo
# expect snapshot of trunk
self.svn_tags.tarball(rev)
fn = tarball_path + 'test-svn-tags-r19-trunk.zip'
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
trunk_content = sorted(['test-svn-tags-r19-trunk/',
'test-svn-tags-r19-trunk/aaa.txt',
'test-svn-tags-r19-trunk/bbb.txt',
'test-svn-tags-r19-trunk/ccc.txt',
'test-svn-tags-r19-trunk/README'])
assert_equal(sorted(snapshot.namelist()), trunk_content)
os.remove(fn)
# no path, and no trunk dir
# expect snapshot of repo root
h.set_context('test', 'src', neighborhood='Projects')
fn = os.path.join(tmpdir, 'svn/t/te/test/testsvn/test-src-r1.zip')
self.repo.tarball('1')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(snapshot.namelist(), ['test-src-r1/', 'test-src-r1/README'])
shutil.rmtree(os.path.join(tmpdir, 'svn/t/te/test/testsvn/'),
ignore_errors=True)
shutil.rmtree(tarball_path, ignore_errors=True)
def test_is_empty(self):
assert not self.repo.is_empty()
with TempDirectory() as d:
repo2 = SM.Repository(
name='test',
fs_path=d.path,
url_path='/test/',
tool='svn',
status='creating')
repo2.init()
assert repo2.is_empty()
repo2.refresh()
ThreadLocalORMSession.flush_all()
assert repo2.is_empty()
def test_webhook_payload(self):
sender = RepoPushWebhookSender()
all_commits = list(self.repo.all_commit_ids())
start = len(all_commits) - 6 # only get a few so test doesn't have to change after new testdata commits
cids = all_commits[start:start+2]
payload = sender.get_payload(commit_ids=cids)
expected_payload = {
'size': 2,
'after': 'r6',
'before': 'r4',
'commits': [{
'id': 'r6',
'url': 'http://localhost/p/test/src/6/',
'timestamp': datetime(2013, 11, 8, 13, 38, 11, 152000),
'message': '',
'author': {'name': 'coldmind',
'email': '',
'username': ''},
'committer': {'name': 'coldmind',
'email': '',
'username': ''},
'added': ['/ЗРЯЧИЙ_ТА_ПОБАЧИТЬ'],
'removed': [],
'modified': [],
'copied': [],
'renamed': [],
}, {
'id': 'r5',
'url': 'http://localhost/p/test/src/5/',
'timestamp': datetime(2010, 11, 18, 20, 14, 21, 515000),
'message': 'Copied a => b',
'author': {'name': 'rick446',
'email': '',
'username': ''},
'committer': {'name': 'rick446',
'email': '',
'username': ''},
'added': [],
'removed': [],
'modified': [],
'copied': [
{'new': '/b', 'old': '/a', 'ratio': 1},
],
'renamed': [],
}],
'repository': {
'name': 'SVN',
'full_name': '/p/test/src/',
'url': 'http://localhost/p/test/src/',
},
}
assert_equals(payload, expected_payload)
class TestSVNRev(unittest.TestCase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
c.app.repo.name = 'testsvn'
c.app.repo.fs_path = repo_dir
self.repo = c.app.repo
self.repo.refresh()
self.rev = self.repo.commit(1)
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_url(self):
assert self.rev.url().endswith('/1/')
def test_primary(self):
assert self.rev.primary() == self.rev
def test_shorthand(self):
assert self.rev.shorthand_id() == '[r1]'
def test_diff(self):
diffs = (self.rev.diffs.added
+ self.rev.diffs.removed
+ self.rev.diffs.changed
+ self.rev.diffs.copied)
for d in diffs:
print(d)
def _oid(self, rev_id):
return '%s:%s' % (self.repo._id, rev_id)
def test_log(self):
# path only
commits = list(self.repo.log(self.repo.head, id_only=True, limit=25))
assert_equal(commits, [7, 6, 5, 4, 3, 2, 1])
commits = list(self.repo.log(self.repo.head, 'README', id_only=True, limit=25))
assert_equal(commits, [3, 1])
commits = list(self.repo.log(1, 'README', id_only=True, limit=25))
assert_equal(commits, [1])
commits = list(self.repo.log(self.repo.head, 'a/b/c/', id_only=True, limit=25))
assert_equal(commits, [4, 2])
commits = list(self.repo.log(3, 'a/b/c/', id_only=True, limit=25))
assert_equal(commits, [2])
assert_equal(
list(self.repo.log(self.repo.head, 'does/not/exist', id_only=True, limit=25)), [])
def test_notification_email(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
self.repo = SM.Repository(
name='testsvn',
fs_path=repo_dir,
url_path='/test/',
tool='svn',
status='creating')
self.repo.refresh()
ThreadLocalORMSession.flush_all()
send_notifications(self.repo, [self.repo.rev_to_commit_id(1)])
ThreadLocalORMSession.flush_all()
n = M.Notification.query.find({'subject': '[test:src] New commit [r1] by rick446'}).first()
assert n
assert_in('By rick446', n.text)
assert_in('Create readme', n.text)
class _Test(unittest.TestCase):
idgen = ('obj_%d' % i for i in count())
def _make_tree(self, object_id, **kwargs):
t, isnew = M.repository.Tree.upsert(object_id)
repo = getattr(self, 'repo', None)
t.repo = repo
for k, v in six.iteritems(kwargs):
if isinstance(v, six.string_types):
obj = M.repository.Blob(
t, k, next(self.idgen))
t.blob_ids.append(Object(
name=k, id=obj._id))
else:
obj = self._make_tree(next(self.idgen), **v)
t.tree_ids.append(Object(
name=k, id=obj._id))
session(t).flush()
return t
def _make_commit(self, object_id, **tree_parts):
ci, isnew = M.repository.Commit.upsert(object_id)
if isnew:
ci.committed.email = c.user.email_addresses[0]
ci.authored.email = c.user.email_addresses[0]
dt = datetime.utcnow()
# BSON datetime resolution is to 1 millisecond, not 1 microsecond
# like Python. Round this now so it'll match the value that's
# pulled from MongoDB in the tests.
ci.authored.date = dt.replace(microsecond=dt.microsecond // 1000 * 1000)
ci.message = 'summary\n\nddescription'
ci.set_context(self.repo)
ci.tree_id = 't_' + object_id
ci.tree = self._make_tree(ci.tree_id, **tree_parts)
return ci, isnew
def _make_log(self, ci):
session(ci).flush(ci)
def setUp(self):
setup_basic_test()
setup_global_objects()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
self.prefix = tg.config.get('scm.repos.root', '/')
class _TestWithRepo(_Test):
def setUp(self):
super(_TestWithRepo, self).setUp()
h.set_context('test', neighborhood='Projects')
c.project.install_app('svn', 'test1')
h.set_context('test', 'test1', neighborhood='Projects')
self.repo = M.Repository(name='test1', tool='svn')
self.repo._impl = mock.Mock(spec=M.RepositoryImplementation())
self.repo._impl.shorthand_for_commit = M.RepositoryImplementation.shorthand_for_commit
self.repo._impl.url_for_commit = (
lambda *a, **kw: M.RepositoryImplementation.url_for_commit(
self.repo._impl, *a, **kw))
self.repo._impl._repo = self.repo
self.repo._impl.all_commit_ids = lambda *a, **kw: []
self.repo._impl.commit().symbolic_ids = None
ThreadLocalORMSession.flush_all()
class _TestWithRepoAndCommit(_TestWithRepo):
def setUp(self):
super(_TestWithRepoAndCommit, self).setUp()
self.ci, isnew = self._make_commit('foo')
ThreadLocalORMSession.flush_all()
# ThreadLocalORMSession.close_all()
class TestRepo(_TestWithRepo):
def test_create(self):
assert self.repo.fs_path == os.path.join(self.prefix, 'svn/p/test/')
assert self.repo.url_path == '/p/test/'
assert self.repo.full_fs_path == os.path.join(
self.prefix, 'svn/p/test/test1')
def test_passthrough(self):
argless = ['init']
for fn in argless:
getattr(self.repo, fn)()
getattr(self.repo._impl, fn).assert_called_with()
unary = ['commit', 'open_blob']
for fn in unary:
getattr(self.repo, fn)('foo')
getattr(self.repo._impl, fn).assert_called_with('foo')
def test_shorthand_for_commit(self):
self.assertEqual(
self.repo.shorthand_for_commit('a' * 40),
'[aaaaaa]')
def test_url_for_commit(self):
self.assertEqual(
self.repo.url_for_commit('a' * 40),
'/p/test/test1/ci/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/')
@mock.patch('allura.model.repository.g.post_event')
def test_init_as_clone(self, post_event):
self.repo.init_as_clone('srcpath', 'srcname', 'srcurl')
assert self.repo.upstream_repo.name == 'srcname'
assert self.repo.upstream_repo.url == 'srcurl'
assert self.repo._impl.clone_from.called_with('srcpath')
post_event.assert_called_once_with('repo_cloned', 'srcurl', 'srcpath')
def test_latest(self):
ci = mock.Mock()
self.repo._impl.commit = mock.Mock(return_value=ci)
assert self.repo.latest() is ci
def test_index(self):
i = self.repo.index()
assert i['type_s'] == 'Repository', i
assert i['name_s'] == 'test1', i
def test_scm_host_url(self):
assert_equal(self.repo.clone_url('rw', 'nobody'),
'svn+ssh://nobody@localhost:8022/scm-repo/p/test/test1/')
assert_equal(self.repo.clone_url('https', 'nobody'),
'https://nobody@localhost:8022/scm-repo/p/test/test1/')
with h.push_config(self.repo.app.config.options, external_checkout_url='https://[email protected]/'):
assert_equal(self.repo.clone_url('https', 'user'),
'https://[email protected]/')
def test_guess_type(self):
assert self.repo.guess_type('foo.txt') == ('text/plain', None)
assert self.repo.guess_type('foo.gbaer') == (
'application/octet-stream', None)
assert self.repo.guess_type('foo.html') == ('text/html', None)
assert self.repo.guess_type('.gitignore') == ('text/plain', None)
def test_refresh(self):
committer_name = '<NAME>'
committer_email = '<EMAIL>'
ci = mock.Mock()
ci.authored.name = committer_name
ci.committed.name = committer_name
ci.committed.email = committer_email
ci.author_url = '/u/test-committer/'
ci.activity_name = '[deadbeef]'
ci.activity_url = 'url'
ci.activity_extras = {}
del ci.node_id
self.repo._impl.commit = mock.Mock(return_value=ci)
self.repo._impl.new_commits = mock.Mock(
return_value=['foo%d' % i for i in range(100)])
self.repo._impl.all_commit_ids = mock.Mock(
return_value=['foo%d' % i for i in range(100)])
self.repo.symbolics_for_commit = mock.Mock(
return_value=[['master', 'branch'], []])
def refresh_commit_info(oid, seen, lazy=False):
M.repository.CommitDoc(dict(
authored=dict(
name=committer_name,
date=datetime(2010, 10, 8, 15, 32, 48, 0),
email=committer_email),
_id=oid)).m.insert()
self.repo._impl.refresh_commit_info = refresh_commit_info
_id = lambda oid: getattr(oid, '_id', str(oid))
self.repo.shorthand_for_commit = lambda oid: '[' + _id(oid) + ']'
self.repo.url_for_commit = lambda oid: '/ci/' + _id(oid) + '/'
self.repo.refresh()
ThreadLocalORMSession.flush_all()
notifications = M.Notification.query.find().all()
for n in notifications:
if '100 new commits' in n.subject:
assert_in('By Test Committer on 10/08/2010 15:32', n.text)
assert_in('http://localhost/ci/foo99/', n.text)
break
else:
assert False, 'Did not find notification'
assert M.Feed.query.find(dict(
author_name=committer_name)).count() == 100
def test_refresh_private(self):
ci = mock.Mock()
self.repo._impl.commit = mock.Mock(return_value=ci)
self.repo._impl.new_commits = mock.Mock(
return_value=['foo%d' % i for i in range(100)])
# make unreadable by *anonymous, so additional notification logic
# executes
self.repo.acl = []
c.project.acl = []
self.repo.refresh()
def test_push_upstream_context(self):
self.repo.init_as_clone('srcpath', '/p/test/svn/', '/p/test/svn/')
old_app_instance = M.Project.app_instance
try:
M.Project.app_instance = mock.Mock(return_value=ming.base.Object(
config=ming.base.Object(_id=None)))
with self.repo.push_upstream_context():
assert c.project.shortname == 'test'
finally:
M.Project.app_instance = old_app_instance
def test_pending_upstream_merges(self):
self.repo.init_as_clone('srcpath', '/p/test/svn/', '/p/test/svn/')
old_app_instance = M.Project.app_instance
try:
M.Project.app_instance = mock.Mock(return_value=ming.base.Object(
config=ming.base.Object(_id=None)))
self.repo.pending_upstream_merges()
finally:
M.Project.app_instance = old_app_instance
class TestRepoObject(_TestWithRepoAndCommit):
def test_upsert(self):
obj0, isnew0 = M.repository.Tree.upsert('foo1')
obj1, isnew1 = M.repository.Tree.upsert('foo1')
assert obj0 is obj1
assert isnew0 and not isnew1
def test_artifact_methods(self):
assert self.ci.index_id(
) == 'allura/model/repo/Commit#foo', self.ci.index_id()
assert self.ci.primary() is self.ci, self.ci.primary()
class TestCommit(_TestWithRepo):
def setUp(self):
super(TestCommit, self).setUp()
self.ci, isnew = self._make_commit(
'foo',
a=dict(
a=dict(
a='',
b='',),
b=''))
self.tree = self.ci.tree
impl = M.RepositoryImplementation()
impl._repo = self.repo
self.repo._impl.shorthand_for_commit = impl.shorthand_for_commit
self.repo._impl.url_for_commit = impl.url_for_commit
def test_upsert(self):
obj0, isnew0 = M.repository.Commit.upsert('foo')
obj1, isnew1 = M.repository.Commit.upsert('foo')
assert obj0 is obj1
assert not isnew1
u = M.User.by_username('test-admin')
assert self.ci.author_url == u.url()
assert self.ci.committer_url == u.url()
assert self.ci.tree is self.tree
assert self.ci.summary == 'summary'
assert self.ci.shorthand_id() == '[foo]'
assert self.ci.url() == '/p/test/test1/ci/foo/'
def test_get_path(self):
b = self.ci.get_path('a/a/a')
assert isinstance(b, M.repository.Blob)
x = self.ci.get_path('a/a')
assert isinstance(x, M.repository.Tree)
def _unique_blobs(self):
def counter():
counter.i += 1
return counter.i
counter.i = 0
blobs = defaultdict(counter)
return lambda blob: BytesIO(str(blobs[blob.path()]))
def test_diffs_file_renames(self):
def open_blob(blob):
blobs = {
'a': 'Leia',
'/b/a/a': 'Darth Vader',
'/b/a/b': 'Luke Skywalker',
'/b/b': 'Death Star will destroy you',
'/b/c': 'Luke Skywalker', # moved from /b/a/b
# moved from /b/b and modified
'/b/a/z': 'Death Star will destroy you\nALL',
}
return BytesIO(blobs.get(blob.path(), ''))
self.repo._impl.open_blob = open_blob
self.repo._impl.commit = mock.Mock(return_value=self.ci)
self.repo._impl.paged_diffs.return_value = {
'added': ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'],
'changed': [],
'copied': [],
'renamed': [],
'removed': [],
'total': 5,
}
assert_equal(self.ci.diffs.added,
['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
assert (self.ci.diffs.copied
== self.ci.diffs.changed
== self.ci.diffs.removed
== [])
ci, isnew = self._make_commit(
'bar',
b=dict(
a=dict(
a='',
b='',),
b=''))
ci.parent_ids = ['foo']
self._make_log(ci)
self.repo._impl.paged_diffs.return_value = {
'added': ['b', 'b/a', 'b/a/a', 'b/a/b', 'b/b'],
'renamed': [],
'copied': [],
'changed': [],
'removed': ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'],
'total': 10,
}
assert_equal(ci.diffs.added, ['b', 'b/a', 'b/a/a', 'b/a/b', 'b/b'])
assert_equal(ci.diffs.removed, ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
assert (ci.diffs.copied
== ci.diffs.changed
== [])
ci, isnew = self._make_commit(
'baz',
b=dict(
a=dict(
z=''),
c=''))
ci.parent_ids = ['bar']
self._make_log(ci)
self.repo._impl.paged_diffs.return_value = {
'added': ['b/c', 'b/a/z'],
'removed': ['/b/a/b', 'b/b'],
'changed': [],
'copied': [
{
'new': 'b/c',
'old': 'b/a/b',
'ratio': 1,
'diff': '',
},
{
'new': 'b/a/z',
'old': 'b/b',
'ratio': 1,
'diff': '',
},
],
'renamed': [],
'total': 2
}
assert_equal(ci.diffs.added, ['b/a/z', 'b/c'])
assert_equal(ci.diffs.changed, [])
assert_equal(ci.diffs.removed, ['/b/a/b', 'b/b'])
# see mock for open_blob
assert_equal(len(ci.diffs.copied), 2)
assert_equal(ci.diffs.copied[1]['old'], 'b/a/b')
assert_equal(ci.diffs.copied[1]['new'], 'b/c')
assert_equal(ci.diffs.copied[1]['ratio'], 1)
assert_equal(ci.diffs.copied[1]['diff'], '')
assert_equal(ci.diffs.copied[0]['old'], 'b/b')
assert_equal(ci.diffs.copied[0]['new'], 'b/a/z')
def test_context(self):
self.ci.context()
class TestRename(unittest.TestCase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
c.app.repo.name = 'testsvn-rename'
c.app.repo.fs_path = repo_dir
self.repo = c.app.repo
self.repo.refresh()
self.rev = self.repo.commit('HEAD')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_log_file_with_rename(self):
entry = list(self.repo.log(path='/dir/b.txt', id_only=False, limit=1))[0]
assert_equal(entry['id'], 3)
assert_equal(entry['rename_details']['path'], '/dir/a.txt')
assert_equal(
entry['rename_details']['commit_url'],
self.repo.url_for_commit(2) # previous revision
)
def test_check_changed_path(self):
changed_path = {'copyfrom_path': '/test/path', 'path': '/test/path2'}
result = self.repo._impl._check_changed_path(
changed_path, '/test/path2/file.txt')
assert_equal({'path': '/test/path2/file.txt',
'copyfrom_path': '/test/path/file.txt'}, result)
class TestDirectRepoAccess(object):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
c.app.repo.name = 'testsvn'
c.app.repo.fs_path = repo_dir
self.repo = c.app.repo
self.repo.refresh()
self.rev = self.repo.commit('HEAD')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_paged_diffs(self):
_id = self.repo._impl._oid(6)
diffs = self.repo.commit(_id).diffs
expected = {
'added': ['/ЗРЯЧИЙ_ТА_ПОБАЧИТЬ'],
'removed': [],
'changed': [],
'copied': [],
'renamed': [],
'total': 1,
}
assert_equals(diffs, expected)
_id = self.repo._impl._oid(2)
diffs = self.repo.commit(_id).diffs
expected = {
'added': ['/a', '/a/b', '/a/b/c', '/a/b/c/hello.txt'],
'removed': [],
'changed': [],
'renamed': [],
'copied': [],
'total': 4,
}
assert_equals(diffs, expected)
_id = self.repo._impl._oid(3)
diffs = self.repo.commit(_id).diffs
expected = {
'added': [],
'removed': [],
'renamed': [],
'changed': ['/README'],
'copied': [],
'total': 1,
}
assert_equals(diffs, expected)
_id = self.repo._impl._oid(4)
diffs = self.repo.commit(_id).diffs
expected = {
'added': [],
'removed': ['/a/b/c/hello.txt'],
'changed': [],
'renamed': [],
'copied': [],
'total': 1,
}
assert_equals(diffs, expected)
|
mindsdb/api/http/namespaces/configs/datasources.py | yarenty/mindsdb | 261 | 11080395 | <gh_stars>100-1000
from flask_restx import Namespace
ns_conf = Namespace('datasources', description='Data sources')
|
RecoLuminosity/LumiProducer/test/analysis/test/crab3_template.py | ckamtsikis/cmssw | 852 | 11080400 | <filename>RecoLuminosity/LumiProducer/test/analysis/test/crab3_template.py
from WMCore.Configuration import Configuration
config = Configuration()
#name='Pt15to30'
config.section_("General")
config.General.requestName = 'PCC_ZeroBias_DataCert_150820'
config.General.workArea = 'taskManagement'
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'Run_PixVertex_LS.py'
config.JobType.allowUndistributedCMSSW = True
#config.JobType.inputFiles = ['dttf_config.db']
config.section_("Data")
config.Data.inputDataset = '/ZeroBias/Run2015C-LumiPixelsMinBias-PromptReco-v1/ALCARECO'
config.Data.lumiMask = 'jsondummy_254227_254459.txt'
config.Data.ignoreLocality = True
#useParent = True
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
#config.Data.splitting = 'EventAwareLumiBased'
config.Data.publication = False
config.Data.unitsPerJob = 10
#config.Data.totalUnits = -1
#config.Data.publishDbsUrl = 'test'
config.Data.publishDataName = 'PCC_ZeroBias_DataCert_150820'
config.section_("Site")
config.Site.storageSite = 'T2_CH_CERN'
config.Site.whitelist=['T2_FR_CCIN2P3','T2_IT_Pisa','T2_UK_London_IC','T2_HU_Budapest']
#config.Site.whitelist=['T2_FR_CCIN2P3']
|
Chapter05/Ch5/ttk_inheritance.py | henrryyanez/Tkinter-GUI-Programming-by-Example | 127 | 11080401 | <gh_stars>100-1000
import tkinter as tk
import tkinter.ttk as ttk
win = tk.Tk()
regular_button = ttk.Button(win, text="regular button")
small_button = ttk.Button(win, text="small button", style="small.TButton")
big_button = ttk.Button(win, text="big button", style="big.TButton")
big_dangerous_button = ttk.Button(win, text="big dangerous", style="danger.big.TButton")
small_dangerous_button = ttk.Button(win, text="small dangerous", style="danger.small.TButton")
style = ttk.Style()
style.configure('TButton', foreground="blue4")
style.configure('small.TButton', font=(None, 7))
style.configure('big.TButton', font=(None, 20))
style.configure('danger.small.TButton', foreground="red")
style.configure('danger.big.TButton', foreground="dark red")
regular_button.pack(padx=50, pady=50)
small_button.pack(padx=50, pady=50)
big_button.pack(padx=50, pady=50)
big_dangerous_button.pack(padx=50, pady=50)
small_dangerous_button.pack(padx=50, pady=50)
win.mainloop()
|
Chapter04/tf2/autograph.py | PacktPublishing/Hands-On-Neural-Networks-with-TensorFlow-2.0 | 112 | 11080415 | <filename>Chapter04/tf2/autograph.py
import tensorflow as tf
@tf.function
def f():
for i in range(10):
print(i)
f()
f()
|
mpf/platforms/lisy/__init__.py | Scottacus64/mpf | 163 | 11080419 | """LISY System 1/80 platform."""
|
language/serene/training.py | Xtuden-com/language | 1,199 | 11080423 | <gh_stars>1000+
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Training manager for fever code."""
import json
import os
from absl import logging
import dataclasses
from language.serene import callbacks
from language.serene import fever_tfds
from language.serene import layers
from language.serene import losses
from language.serene import model
from language.serene import preprocessing
from language.serene import tokenizers
from language.serene import util
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tqdm
from official.common import distribute_utils
@dataclasses.dataclass
class ModelConfig:
"""Typed parameters for model."""
fever_experiment_id: int
model_checkpoint: Text
dataset: Text
buffer_size: int
batch_size: int
word_emb_size: int
hidden_size: int
learning_rate: float
positive_class_weight: Optional[float]
max_epochs: int
dropout: float
activation: Text
use_batch_norm: bool
# Model Choice: two_tower or one_tower (not implemented yet).
model: Text
# Preprocessing
tokenizer: Text # EG: Convert strings to list of strings.
text_encoder: Text # EG: Convert list of strings to integers.
basic_lowercase: bool
# Embedder + Contextualizer
embedder: Text
contextualizer: Text
context_num_layers: int
tied_encoders: bool
bidirectional: bool
bert_model_name: Text
bert_max_seq_length: int
bert_vocab_path: Text
bert_model_path: Text
bert_trainable: bool
bert_dropout: float
# Neural Module Configuration
matcher: Text
matcher_hidden_size: int
projection_dim: int
fever_dev_path: Text
max_evidence: int
max_claim_tokens: int
max_evidence_tokens: int
# Whether to include the title/sentence_id in evidence encoding.
include_title: bool
include_sentence_id: bool
n_similar_negatives: int
n_background_negatives: int
scrape_type: Text
include_not_enough_info: bool
title_in_scoring: bool
classify_claim: bool
claim_loss_weight: float
def validate(self):
"""Validate that the arguments to the config are correct, error if not."""
if self.tokenizer not in ['bert', 'basic']:
raise ValueError(f'Invalid tokenizer: "{self.tokenizer}"')
if self.text_encoder not in ['bert', 'basic']:
raise ValueError(f'Invalid text encoder: "{self.text_encoder}"')
if self.matcher not in layers.matcher_registry:
raise ValueError(f'Invalid matcher: "{self.matcher}"')
if self.contextualizer not in ['bert', 'rnn', 'lstm', 'gru']:
raise ValueError(f'Invalid contextualizer: "{self.contextualizer}"')
if self.model not in ['one_tower', 'two_tower']:
raise ValueError(f'Invalid model: "{self.model}"')
if self.bert_model_name not in ['base', 'large']:
raise ValueError(f'Invalid bert model: "{self.bert_model_name}')
if self.embedder not in ['classic_embedder', 'bert_embedder']:
raise ValueError(f'Invalid embedder: "{self.embedder}"')
@classmethod
def from_dict(cls, params):
return ModelConfig(**params)
@classmethod
def from_file(cls,
file_path,
overrides = None):
with util.safe_open(file_path) as f:
params: Dict[Text, Any] = json.load(f)
if overrides is not None:
params.update(overrides)
return ModelConfig.from_dict(params)
def save(self, file_path):
with util.safe_open(file_path, 'w') as f:
json.dump(self.asdict(), f)
def asdict(self):
return dataclasses.asdict(self)
class Trainer:
"""Training wrapper around keras to manage vocab/saving/dataset creation.
The primary methods of this class are:
- train()
- predict()
- embed()
- save()
- load()
The intended use of this is
> trainer = Trainer(my_config)
> trainer.train()
The following methods are primarily for converting TFDS to tf.data.Dataset
for keras training
- _build_tokenizer()
- _build_encoder()
- _encode_and_batch()
- _batch_dataset()
- _encode_dataset()
- _build_vocab()
- _tokenize_example()
These are utilities for embedding different TFDSs
- embed_wiki_dataset()
- embed_claim_dataset()
The following methods deal with preparing the keras model for training
- _compile(): Compile model uner right scope, create callbacks, glue losses
to model
- _build_callbacks(): Keras callbacks
"""
def __init__(
self,
model_config,
debug = False,
tpu = None,
distribution_strategy = None,
tb_log_dir = None):
"""Configure the trainer.
Args:
model_config: ModelConfig parameters for training
debug: Enables certain debug behaviors like dataset subsampling
tpu: The TPU to use or None otherwise
distribution_strategy: Parallel training strategy
tb_log_dir: The directory for Tensorboard to log to
"""
self._debug = debug
if debug:
logging.info('Debug mode enabled on trainer')
self._tpu = tpu
self._distribution_strategy = distribution_strategy
self._tb_log_dir = tb_log_dir
self._strategy: Optional[tf.distribute.Strategy] = None
self._model_config = model_config
self._vocab: Optional[List[Text]] = None
self._vocab_stats: Dict[Text, Any] = {}
self._class_stats: Dict[int, int] = {0: 0, 1: 0}
# Whitespace tokenizer
self._tokenizer: Optional[tokenizers.Tokenizer] = None
self._encoder: Optional[preprocessing.FeverTextEncoder] = None
self._model: Optional[tf.keras.Model] = None
self._inner_model: Optional[tf.keras.Model] = None
def save(self):
"""Persist the encoder and the model to disk.
"""
if self._model is None or self._encoder is None:
raise ValueError('Model and encoder cannot be None')
else:
self._encoder.save_to_file(
# This is a prefix, which converts to: mydir/text_encoder.tokens
os.path.join(self._model_config.model_checkpoint, 'text_encoder'))
self._model.save_weights(
os.path.join(self._model_config.model_checkpoint, 'best_model.tf'))
@classmethod
def load(cls,
model_checkpoint,
model_config_overrides = None,
**kwargs):
"""Load the model, its tokenizer, and weights from the checkpoint.
Args:
model_checkpoint: Checkpoint to restore from, from .save()
model_config_overrides: Extra args for ModelConfig
**kwargs: Passed through to trainer, used for overriding checkpoint
Returns:
A model in the same state as just before it was saved with .save()
"""
# pylint: disable=protected-access
model_config = ModelConfig.from_file(
os.path.join(model_checkpoint, 'model_config.json'),
overrides=model_config_overrides)
trainer = Trainer(model_config=model_config, **kwargs)
trainer._tokenizer = trainer._build_tokenizer()
encoder_path = os.path.join(model_checkpoint, 'text_encoder')
if model_config.text_encoder == 'bert':
trainer._encoder = preprocessing.BertTextEncoder.load_from_file(
encoder_path)
elif model_config.text_encoder == 'basic':
trainer._encoder = preprocessing.BasicTextEncoder.load_from_file(
encoder_path)
else:
raise ValueError('Invalid text encoder')
trainer._compile()
if trainer._model is None:
raise ValueError('Model does not exist despite being compiled')
trainer._model.load_weights(os.path.join(model_checkpoint, 'best_model.tf'))
return trainer
def _save_model_config(self):
"""Save only the Model configuration to disk."""
logging.info('Saving config to: %s/model_config.json',
self._model_config.model_checkpoint)
self._model_config.save(
os.path.join(self._model_config.model_checkpoint, 'model_config.json'))
def _save_encoder(self):
"""Save only the text encoder to disk."""
self._encoder.save_to_file(
os.path.join(self._model_config.model_checkpoint, 'text_encoder'))
@property
def vocab_size(self):
if self._encoder is None:
raise ValueError('Model has not been build, so no vocab size')
else:
return self._encoder.vocab_size
def _init_strategy(self):
"""Initialize the distribution strategy (e.g. TPU/GPU/Mirrored)."""
if self._strategy is None:
if self._tpu is not None:
resolver = distribute_utils.tpu_initialize(self._tpu)
self._strategy = tf.distribute.experimental.TPUStrategy(resolver)
elif self._distribution_strategy is None or self._distribution_strategy == 'default':
self._strategy = tf.distribute.get_strategy()
elif self._distribution_strategy == 'cpu':
self._strategy = tf.distribute.OneDeviceStrategy('/device:cpu:0')
else:
if self._distribution_strategy == 'mirrored':
self._strategy = tf.distribute.MirroredStrategy()
else:
raise ValueError(
f'Invalid distribution strategy="{self._distribution_strategy}"')
def _build_tokenizer(self):
"""Build the correct tokenizer depending on model encoder.
Returns:
Tokenizer for model
"""
if self._model_config.tokenizer == 'basic':
base_tokenizer = tfds.deprecated.text.Tokenizer()
return tokenizers.ReservedTokenizer(
tokenizer=base_tokenizer, reserved_re=preprocessing.SEPARATOR_RE)
elif self._model_config.tokenizer == 'bert':
return tokenizers.BertTokenizer(
vocab_file=self._model_config.bert_vocab_path, do_lower_case=True)
else:
raise ValueError('Invalid tokenizer')
def _build_encoder(self, vocab,
tokenizer):
"""Build the encoder using the given vocab and tokenizer.
Args:
vocab: Vocab to build encoder from
tokenizer: Tokenizer to build encoder from
Returns:
The built text encoder
"""
if self._model_config.text_encoder == 'basic':
return preprocessing.BasicTextEncoder(
vocab_list=vocab,
tokenizer=tokenizer,
lowercase=self._model_config.basic_lowercase,
include_title=self._model_config.include_title,
include_sentence_id=self._model_config.include_sentence_id,
max_claim_tokens=self._model_config.max_claim_tokens,
max_evidence_tokens=self._model_config.max_evidence_tokens,
)
elif self._model_config.text_encoder == 'bert':
return preprocessing.BertTextEncoder(
tokenizer=tokenizer,
max_seq_length=self._model_config.bert_max_seq_length,
include_title=self._model_config.include_title,
include_sentence_id=self._model_config.include_sentence_id,
)
def _encode_and_batch(self,
dataset,
train=False,
filter_claims=True,
filter_evidence=True):
"""Convert a tensorflow dataset of unbatched, text examples to TF batches.
Args:
dataset: TF Dataset to transform
train: Whether to encode as training dataset
filter_claims: Whether to filter zero length claims
filter_evidence: Whether to filter zero length evidence
Returns:
encoded and batched dataset for keras fit
"""
encoded = self._encode_dataset(
dataset, filter_claims=filter_claims, filter_evidence=filter_evidence)
if train:
encoded = encoded.shuffle(
self._model_config.buffer_size, reshuffle_each_iteration=False)
batched = self._batch_dataset(encoded)
return batched
def _compile(self):
"""Compile the keras model using the correct scope."""
# pylint: disable=protected-access
self._init_strategy()
with self._strategy.scope():
if self._model_config.model == 'two_tower':
module_model = model.TwoTowerRanker(
self.vocab_size,
activation=self._model_config.activation,
matcher_name=self._model_config.matcher,
word_emb_size=self._model_config.word_emb_size,
hidden_size=self._model_config.hidden_size,
dropout=self._model_config.dropout,
use_batch_norm=self._model_config.use_batch_norm,
contextualizer=self._model_config.contextualizer,
context_num_layers=self._model_config.context_num_layers,
bidirectional=self._model_config.bidirectional,
tied_encoders=self._model_config.tied_encoders,
embedder_name=self._model_config.embedder,
matcher_hidden_size=self._model_config.matcher_hidden_size,
bert_model_name=self._model_config.bert_model_name,
bert_model_path=self._model_config.bert_model_path,
bert_trainable=self._model_config.bert_trainable,
bert_dropout=self._model_config.bert_dropout,
projection_dim=self._model_config.projection_dim,
classify_claim=self._model_config.classify_claim,
)
self._inner_model = module_model
# This hackery is necessary since keras doesn't handle dictionary inputs
# well, so we have to manually specify input/output output shapes. Since
# this is dependent on the model (e.g., bert vs other), let the encoder
# yield this.
inputs = self._encoder.compute_input_shapes()
outputs = module_model(inputs)
module_model.input_names = sorted(inputs.keys())
module_model._feed_input_names = sorted(inputs.keys())
module_model.output_names = sorted(
['evidence_matching', 'claim_classification'])
self._model = tf.keras.Model(inputs=inputs, outputs=outputs)
self._model.input_names = sorted(inputs.keys())
self._model._feed_input_names = sorted(inputs.keys())
self._model.output_names = sorted(
['evidence_matching', 'claim_classification'])
self._model.summary(line_length=500)
elif self._model_config.model == 'one_tower':
raise NotImplementedError()
else:
raise ValueError('Invalid model')
metrics = {}
evidence_metrics = [
tf.keras.metrics.BinaryAccuracy(name='accuracy'),
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.TruePositives(name='tp'),
tf.keras.metrics.FalsePositives(name='fp'),
tf.keras.metrics.TrueNegatives(name='tn'),
tf.keras.metrics.FalsePositives(name='fn'),
]
metrics['evidence_matching'] = evidence_metrics
loss = {}
loss['evidence_matching'] = losses.WeightedBinaryCrossentropyFromProbs(
positive_class_weight=self._model_config.positive_class_weight)
loss_weights = {
'evidence_matching': 1.0,
'claim_classification': self._model_config.claim_loss_weight
}
if self._model_config.classify_claim:
# TODO(perodriguez): add claim classifier metrics
claim_metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
]
metrics['claim_classification'] = claim_metrics
loss[
'claim_classification'] = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False)
else:
loss['claim_classification'] = losses.ZeroLoss()
metrics['claim_classification'] = []
self._model.compile(
loss=loss,
optimizer=tf.keras.optimizers.Adam(self._model_config.learning_rate),
metrics=metrics,
loss_weights=loss_weights,
)
def train(self,
*,
epochs = None,
steps_per_epoch = None,
validation_steps = None):
"""Prepare the dataset, callbacks, and model, then train/save it.
Args:
epochs: The number of epochs to train for, if None then default to
early stopping (useful for debugging)
steps_per_epoch: How many training steps to take, if None default to
normal training (useful for debugging)
validation_steps: How many validation steps to take, if None defualt to
normal training (useful for debugging)
"""
logging.info('Preparing model with config:\n%s', self._model_config)
with util.log_time('Initial dataset read'):
builder = fever_tfds.FeverEvidence(
data_dir=self._model_config.dataset,
n_similar_negatives=self._model_config.n_similar_negatives,
n_background_negatives=self._model_config.n_background_negatives,
train_scrape_type=self._model_config.scrape_type,
include_not_enough_info=self._model_config.include_not_enough_info,
title_in_scoring=self._model_config.title_in_scoring,
)
# Cache here to prevent hitting remote fs again
train_dataset = (builder.as_dataset(split='train')).cache()
val_dataset = builder.as_dataset(split='validation').cache()
if self._debug:
train_dataset = train_dataset.take(1000)
if self._debug:
val_dataset = val_dataset.take(200)
self._tokenizer = self._build_tokenizer()
self._vocab = list(self._build_vocab(train_dataset))
self._encoder = self._build_encoder(self._vocab, self._tokenizer)
train_batched = self._encode_and_batch(train_dataset, train=True)
val_batched = self._encode_and_batch(val_dataset, train=False)
# Cache the batch creation, but not the batchwise shuffle.
train_batched = train_batched.cache().shuffle(
100,
reshuffle_each_iteration=True).prefetch(tf.data.experimental.AUTOTUNE)
# Cache the batched validation data.
val_batched = val_batched.cache().prefetch(tf.data.experimental.AUTOTUNE)
self._compile()
model_callbacks = self._build_callbacks(val_batched)
# Save enough to reconstruct anything except for the model.
# The model itself is saved with the ModelCheckpoint callback.
self._save_model_config()
self._save_encoder()
if epochs is None:
epochs = self._model_config.max_epochs
self._model.fit(
train_batched,
validation_data=val_batched,
callbacks=model_callbacks,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
logging.info('Model Summary:\n%s', self._model.summary())
# First load the best model.
logging.info('Loading best model weights')
self._model.load_weights(self.model_weight_path)
logging.info('Saving dev predictions from best model')
self._save_dev_predictions(val_batched)
@property
def model_weight_path(self):
return os.path.join(self._model_config.model_checkpoint, 'best_model.tf')
def _save_dev_predictions(self, val_batched):
"""Save model predictions for the dev set.
This is used to compute Fever F1 as stopping metric
Args:
val_batched: The batched validation set.
"""
unbatched = val_batched.unbatch()
model_predictions = self._model.predict(val_batched)
claim_probs = model_predictions['claim_classification']
evidence_probs = model_predictions['evidence_matching']
predictions = []
# Extra _ is the label, which we don't need
for (ex, _), claim_prob, evidence_prob in tqdm.tqdm(
zip(unbatched, claim_probs, evidence_probs), mininterval=5):
predictions.append({
'claim_prob': claim_prob.tolist(),
'evidence_prob': evidence_prob.tolist(),
'metadata': json.loads(ex['metadata'].numpy().decode('utf8'))
})
pred_path = os.path.join(self._model_config.model_checkpoint,
'val_predictions.json')
with util.safe_open(pred_path, 'w') as f:
json.dump({'predictions': predictions}, f)
def predict(self, examples):
"""Given examples in JSON format, predict evidence relevance.
Args:
examples: List of claim/evidence pairs to rank
Returns:
Scalar scores for each pair
"""
stacked = {
'claim_text': [],
'evidence_text': [],
'metadata': [],
'label': [],
}
for ex in examples:
stacked['claim_text'].append(ex['claim_text'])
stacked['evidence_text'].append(ex['evidence_text'])
stacked['metadata'].append(ex['metadata'])
stacked['label'].append(ex['label'])
dataset = tf.data.Dataset.from_tensor_slices((stacked,))
batched_examples = self._encode_and_batch(
dataset, filter_claims=False, filter_evidence=False)
preds = []
for batch in batched_examples:
# model.predict() is broken after model load so we have to do this
# manually.
preds.append(self._model(batch))
return np.vstack(preds).reshape(-1).tolist()
def embed(self, examples, *, as_claim,
as_evidence): # Checker .tolist() -> Any
"""Embed a list of evidence text.
Args:
examples: A list of evidence text to embed.
as_claim: Whether to embed examples as claims
as_evidence: Whether to embed examples as evidence
Returns:
A list of embeddings, one for each evidence text.
"""
stacked = {
'claim_text': [],
'evidence_text': [],
'metadata': [],
'label': [],
}
for text in examples:
# Dummie value to make sure tokenizing works.
if as_claim:
stacked['claim_text'].append(text)
else:
stacked['claim_text'].append('a')
if as_evidence:
stacked['evidence_text'].append(text)
else:
stacked['evidence_text'].append('a')
stacked['metadata'].append('')
stacked['label'].append(tf.constant(0, dtype=tf.int64))
dataset = tf.data.Dataset.from_tensor_slices((stacked,))
batched_examples = self._encode_and_batch(
dataset, filter_claims=False, filter_evidence=False)
claim_preds = []
ev_preds = []
for batch in batched_examples:
# model.predict() is broken after model load due to missing shapes, so
# have to do our own batching/unbatching.
inputs, _ = batch
claim_encoding, ev_encoding = self._model(
inputs, embed_claim=as_claim, embed_evidence=as_evidence)
claim_preds.append(claim_encoding)
ev_preds.append(ev_encoding)
return np.vstack(claim_preds).tolist(), np.vstack(ev_preds).tolist()
def embed_wiki_dataset(self, dataset):
"""Embed the wikipedia/evidence only dataset.
Args:
dataset: The wikipedia only dataset (e.g. wiki_tfds.py)
Returns:
Aligned wikipedia_urls, sentence_ids, and embeddings of model
"""
# map_fn and tf_map_fn transform the dataset to the same format as
# tfds_evidence/the one the model expects
def map_fn(text, wikipedia_url, sentence_id):
return ('a', text, wikipedia_url, str(sentence_id),
json.dumps({
'sentence_id': int(sentence_id.numpy()),
'wikipedia_url': wikipedia_url.numpy().decode('utf8')
}))
def tf_map_fn(example):
tensors = tf.py_function(
map_fn,
inp=[
example['text'], example['wikipedia_url'], example['sentence_id']
],
Tout=(tf.string, tf.string, tf.string, tf.string, tf.string))
return {
'claim_text': tensors[0],
'evidence_text': tensors[1],
'wikipedia_url': tensors[2],
'sentence_id': tensors[3],
'claim_label': tf.constant(0, dtype=tf.int64),
'evidence_label': tf.constant(0, dtype=tf.int64),
'metadata': tensors[4]
}
formatted_ds = dataset.map(tf_map_fn)
batched_examples = self._encode_and_batch(
formatted_ds, filter_claims=False, filter_evidence=False)
preds = []
wikipedia_urls = []
sentence_ids = []
for batch in tqdm.tqdm(batched_examples, mininterval=5):
# model.predict() is broken after model load due to missing shapes, so
# have to do our own batching/unbatching.
inputs, _ = batch
_, ev_encoding = self._inner_model(
inputs, embed_claim=False, embed_evidence=True)
for m in inputs['metadata'].numpy():
key = json.loads(m.decode('utf8'))
wikipedia_urls.append(key['wikipedia_url'])
sentence_ids.append(key['sentence_id'])
preds.append(ev_encoding)
return np.array(wikipedia_urls), np.array(sentence_ids), np.vstack(preds)
def embed_claim_dataset(self, dataset):
"""Embed the claim only dataset and save them with claim_ids.
Args:
dataset: The claims only dataset (e.g. claim_tfds.py)
Returns:
Aligned claim ids and embeddings from the model
"""
batched_examples = self._encode_and_batch(
dataset, filter_claims=False, filter_evidence=False)
claim_ids = []
embeddings = []
for batch in tqdm.tqdm(batched_examples, mininterval=5):
# model.predict() is broken after model load due to missing shapes, so
# have to do our own batching/unbatching.
inputs, _ = batch
# Cannot use self._model since it does not take extra arguments. Since
# we're not using the keras API (namey .predict()), we can just use the
# underlying model stored in self._inner_model.
claim_encoding, _ = self._inner_model(
inputs, embed_claim=True, embed_evidence=False)
for m in inputs['metadata'].numpy():
key = json.loads(m.decode('utf8'))
claim_ids.append(int(key['claim_id']))
embeddings.append(claim_encoding)
return np.array(claim_ids), np.vstack(embeddings)
def _build_callbacks(self, val_batched):
"""Build the callbacks used during training."""
cns_model_checkpoint = util.safe_path(
os.path.join(self._model_config.model_checkpoint, 'best_model.tf'))
model_callbacks = [
# Note: Order matters here, particularly that FeverMetricsCallback
# comes before tensorboard so it can write to the log dictionary
# and TB picks it up.
callbacks.FeverMetricsCallback(
validation_batched=val_batched,
debug=self._debug,
fever_dev_path=self._model_config.fever_dev_path,
max_evidence=self._model_config.max_evidence,
checkpoint_dir=self._model_config.model_checkpoint,
),
# TODO(perodriguez): Determine a better thing to stop on
tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
min_delta=.001,
patience=3,
verbose=1,
mode='min'),
# TODO(perodriguez): Determine a better thing to save on
# Checkpointing also needs to know about fever recall.
tf.keras.callbacks.ModelCheckpoint(
filepath=cns_model_checkpoint,
save_best_only=True,
monitor='val_loss',
mode='min',
verbose=1,
# There is no support for GRU/LSTM Dropout with normal save
save_weights_only=True,
),
]
if self._tb_log_dir is not None:
model_callbacks.append(
tf.keras.callbacks.TensorBoard(log_dir=self._tb_log_dir))
return model_callbacks
def _batch_dataset(self, dataset):
"""Batch the dataset depending on what model is used.
Args:
dataset: A dataset to batch
Returns:
A batched dataset with correct padding shapes.
"""
return dataset.padded_batch(
batch_size=self._model_config.batch_size,
padded_shapes=(
self._encoder.padded_shapes(),
# Must match losses in training.py
{
'claim_classification': [],
'evidence_matching': []
}))
def _encode_dataset(self,
dataset,
filter_claims=True,
filter_evidence=True):
"""Convert the tfds dataset to numbers by tokenizing/embedding."""
encode = self._encoder.build_encoder_fn()
encoded_data = dataset.map(
encode, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if filter_claims:
encoded_data = encoded_data.filter(preprocessing.filter_claim_fn)
if filter_evidence:
encoded_data = encoded_data.filter(preprocessing.filter_evidence_fn)
return encoded_data
def _build_vocab(self, dataset):
"""Build the vocabulary and encoder from the dataset.
Args:
dataset: The dataset to build vocab from.
Returns:
The vocabulary in the dataset, or empty vocab if using bert
"""
# If we are using bert, then we do not need to build the vocab
# since its already defined
if self._model_config.tokenizer == 'bert' and self._model_config.text_encoder == 'bert':
logging.info('Using bert, skipping vocabulary creation')
return set()
if self._tokenizer is None:
raise ValueError('Cannot build vocab without a tokenizer.')
claim_lengths = []
evidence_lengths = []
vocab = set()
for example in tqdm.tqdm(dataset, mininterval=5):
tokenized_claim, tokenized_evidence = self._tokenize_example(example)
claim_lengths.append(len(tokenized_claim))
evidence_lengths.append(len(tokenized_evidence))
vocab.update(tokenized_claim)
vocab.update(tokenized_evidence)
logging.info('Build vocab of size (without padding): %s', len(vocab))
logging.info('Claim length statistics')
logging.info('Max: %s', max(claim_lengths))
logging.info('Min: %s', min(claim_lengths))
claim_percentiles = np.percentile(claim_lengths, [50, 90, 95, 99]).tolist()
logging.info('50/90/95/99: %s', str(claim_percentiles))
logging.info('Evidence length statistics')
logging.info('Max: %s', max(evidence_lengths))
logging.info('Min: %s', min(evidence_lengths))
evidence_percentiles = np.percentile(evidence_lengths,
[50, 90, 95, 99]).tolist()
logging.info('50/90/95/99: %s', str(evidence_percentiles))
self._vocab_stats['claim_max'] = max(claim_lengths)
self._vocab_stats['claim_min'] = min(claim_lengths)
self._vocab_stats['claim_percentiles'] = claim_percentiles
self._vocab_stats['evidence_max'] = max(evidence_lengths)
self._vocab_stats['evidence_min'] = min(evidence_lengths)
self._vocab_stats['evidence_percentiles'] = evidence_percentiles
return vocab
def _tokenize_example(self, example):
tokenized_claim = self._tokenizer.tokenize(
example['claim_text'].numpy().decode('utf8'))
tokenized_evidence = self._tokenizer.tokenize(
example['evidence_text'].numpy().decode('utf8'))
return tokenized_claim, tokenized_evidence
|
iot/api-client/end_to_end_example/cloudiot_pubsub_example_server.py | yshalabi/python-docs-samples | 5,938 | 11080435 | <gh_stars>1000+
# Copyright 2017 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Sample server that pushes configuration to Google Cloud IoT devices.
This example represents a server that consumes telemetry data from multiple
Cloud IoT devices. The devices report telemetry data, which the server consumes
from a Cloud Pub/Sub topic. The server then decides whether to turn on or off
individual devices fans.
This example requires the Google Cloud Pub/Sub client library. Install it with
$ pip install --upgrade google-cloud-pubsub
If you are running this example from a Compute Engine VM, you will have to
enable the Cloud Pub/Sub API for your project, which you can do from the Cloud
Console. Create a pubsub topic, for example
projects/my-project-id/topics/my-topic-name, and a subscription, for example
projects/my-project-id/subscriptions/my-topic-subscription.
You can then run the example with
$ python cloudiot_pubsub_example_server.py \
--project_id=my-project-id \
--pubsub_subscription=my-topic-subscription \
"""
import argparse
import base64
import json
import os
import sys
from threading import Lock
import time
from google.cloud import pubsub
from google.oauth2 import service_account
from googleapiclient import discovery
from googleapiclient.errors import HttpError
API_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
API_VERSION = 'v1'
DISCOVERY_API = 'https://cloudiot.googleapis.com/$discovery/rest'
SERVICE_NAME = 'cloudiot'
class Server(object):
"""Represents the state of the server."""
def __init__(self, service_account_json):
credentials = service_account.Credentials.from_service_account_file(
service_account_json).with_scopes(API_SCOPES)
if not credentials:
sys.exit('Could not load service account credential '
'from {}'.format(service_account_json))
discovery_url = '{}?version={}'.format(DISCOVERY_API, API_VERSION)
self._service = discovery.build(
SERVICE_NAME,
API_VERSION,
discoveryServiceUrl=discovery_url,
credentials=credentials,
cache_discovery=False)
# Used to serialize the calls to the
# modifyCloudToDeviceConfig REST method. This is needed
# because the google-api-python-client library is built on top
# of the httplib2 library, which is not thread-safe. For more
# details, see: https://developers.google.com/
# api-client-library/python/guide/thread_safety
self._update_config_mutex = Lock()
def _update_device_config(self, project_id, region, registry_id, device_id,
data):
"""Push the data to the given device as configuration."""
config_data = None
print('The device ({}) has a temperature '
'of: {}'.format(device_id, data['temperature']))
if data['temperature'] < 0:
# Turn off the fan.
config_data = {'fan_on': False}
print('Setting fan state for device', device_id, 'to off.')
elif data['temperature'] > 10:
# Turn on the fan
config_data = {'fan_on': True}
print('Setting fan state for device', device_id, 'to on.')
else:
# Temperature is OK, don't need to push a new config.
return
config_data_json = json.dumps(config_data)
body = {
# The device configuration specifies a version to update, which
# can be used to avoid having configuration updates race. In this
# case, you use the special value of 0, which tells Cloud IoT to
# always update the config.
'version_to_update': 0,
# The data is passed as raw bytes, so you encode it as base64.
# Note that the device will receive the decoded string, so you
# do not need to base64 decode the string on the device.
'binary_data': base64.b64encode(
config_data_json.encode('utf-8')).decode('ascii')
}
device_name = ('projects/{}/locations/{}/registries/{}/'
'devices/{}'.format(
project_id,
region,
registry_id,
device_id))
request = self._service.projects().locations().registries().devices(
).modifyCloudToDeviceConfig(name=device_name, body=body)
# The http call for the device config change is thread-locked so
# that there aren't competing threads simultaneously using the
# httplib2 library, which is not thread-safe.
self._update_config_mutex.acquire()
try:
request.execute()
except HttpError as e:
# If the server responds with a HtppError, log it here, but
# continue so that the message does not stay NACK'ed on the
# pubsub channel.
print('Error executing ModifyCloudToDeviceConfig: {}'.format(e))
finally:
self._update_config_mutex.release()
def run(self, project_id, pubsub_subscription):
"""The main loop. Consumes messages from the
Pub/Sub subscription.
"""
subscriber = pubsub.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id,
pubsub_subscription)
def callback(message):
"""Logic executed when a message is received from
subscribed topic.
"""
try:
data = json.loads(message.data.decode('utf-8'))
except ValueError as e:
print('Loading Payload ({}) threw an Exception: {}.'.format(
message.data, e))
message.ack()
return
# Get the registry id and device id from the attributes. These are
# automatically supplied by IoT, and allow the server to determine
# which device sent the event.
device_project_id = message.attributes['projectId']
device_registry_id = message.attributes['deviceRegistryId']
device_id = message.attributes['deviceId']
device_region = message.attributes['deviceRegistryLocation']
# Send the config to the device.
self._update_device_config(
device_project_id,
device_region,
device_registry_id,
device_id,
data)
# Acknowledge the consumed message. This will ensure that they
# are not redelivered to this subscription.
message.ack()
print('Listening for messages on {}'.format(subscription_path))
subscriber.subscribe(subscription_path, callback=callback)
# The subscriber is non-blocking, so keep the main thread from
# exiting to allow it to process messages in the background.
while True:
time.sleep(60)
def parse_command_line_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Example of Google Cloud IoT registry and '
'device management.')
# Required arguments
parser.add_argument(
'--project_id',
default=os.environ.get("GOOGLE_CLOUD_PROJECT"),
required=True,
help='GCP cloud project name.')
parser.add_argument(
'--pubsub_subscription',
required=True,
help='Google Cloud Pub/Sub subscription name.')
# Optional arguments
parser.add_argument(
'--service_account_json',
default=os.environ.get("GOOGLE_APPLICATION_CREDENTIALS"),
help='Path to service account json file.')
return parser.parse_args()
def main():
args = parse_command_line_args()
server = Server(args.service_account_json)
server.run(args.project_id, args.pubsub_subscription)
if __name__ == '__main__':
main()
|
pontoon/terminology/views.py | foss4/pontoon | 1,145 | 11080449 | from django.http import JsonResponse, StreamingHttpResponse
from django.shortcuts import get_object_or_404
from django.utils.datastructures import MultiValueDictKeyError
from django.utils.decorators import method_decorator
from django.views.decorators.http import condition
from django.views.generic import ListView
from pontoon.base.models import Locale
from pontoon.base.utils import require_AJAX
from pontoon.terminology import utils
from pontoon.terminology.models import Term, TermTranslation
@require_AJAX
def get_terms(request):
"""Retrieve terms for given source string and Locale."""
try:
source_string = request.GET["source_string"]
locale_code = request.GET["locale"]
except MultiValueDictKeyError as e:
return JsonResponse(
{"status": False, "message": f"Bad Request: {e}"},
status=400,
)
locale = get_object_or_404(Locale, code=locale_code)
payload = []
for term in Term.objects.for_string(source_string):
data = {
"text": term.text,
"part_of_speech": term.part_of_speech,
"definition": term.definition,
"usage": term.usage,
"translation": term.translation(locale),
"entity_id": term.entity_id,
}
payload.append(data)
return JsonResponse(payload, safe=False)
@method_decorator(condition(etag_func=None), name="dispatch")
class DownloadTerminologyViewV2(ListView):
def get_tbx_file_content(self, term_translations, locale_code):
return utils.build_tbx_v2_file(term_translations, locale_code)
def dispatch(self, request, locale, *args, **kwargs):
locale = get_object_or_404(Locale, code=locale)
term_translations = TermTranslation.objects.filter(
locale=locale
).prefetch_related("term")
content = self.get_tbx_file_content(term_translations, locale.code)
response = StreamingHttpResponse(content, content_type="text/xml")
response["Content-Disposition"] = 'attachment; filename="{locale}.tbx"'.format(
locale=locale.code
)
return response
class DownloadTerminologyViewV3(DownloadTerminologyViewV2):
def get_tbx_file_content(self, term_translations, locale_code):
return utils.build_tbx_v3_file(term_translations, locale_code)
|
src/nsupdate/main/migrations/0011_auto_20170526_2113.py | mirzazulfan/nsupdate.info | 774 | 11080456 | <filename>src/nsupdate/main/migrations/0011_auto_20170526_2113.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('main', '0010_auto_20151229_1717'),
]
operations = [
migrations.AlterField(
model_name='domain',
name='name',
field=models.CharField(help_text='Name of the zone where dynamic hosts may get added', unique=True, max_length=255, verbose_name='name', validators=[django.core.validators.RegexValidator(regex=b'([a-zA-Z0-9-_]+\\.)+[a-zA-Z0-9-_]{2,}', message='Invalid domain name')]),
),
]
|
tensorflow2/tf2cv/models/inceptionresnetv1.py | naviocean/imgclsmob | 2,649 | 11080476 | """
InceptionResNetV1 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
"""
__all__ = ['InceptionResNetV1', 'inceptionresnetv1', 'InceptionAUnit', 'InceptionBUnit', 'InceptionCUnit',
'ReductionAUnit', 'ReductionBUnit']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import MaxPool2d, BatchNorm, conv1x1, conv1x1_block, conv3x3_block, Concurrent, flatten,\
is_channels_first, SimpleSequential
from .inceptionv3 import MaxPoolBranch, Conv1x1Branch, ConvSeqBranch
class InceptionAUnit(nn.Layer):
"""
InceptionResNetV1 type Inception-A unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps,
data_format="channels_last",
**kwargs):
super(InceptionAUnit, self).__init__(**kwargs)
self.scale = 0.17
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:3],
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[3:6],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_eps=bn_eps,
data_format=data_format,
name="branch3"))
conv_in_channels = out_channels_list[0] + out_channels_list[2] + out_channels_list[5]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
use_bias=True,
data_format=data_format,
name="conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
identity = x
x = self.branches(x, training=training)
x = self.conv(x, training=training)
x = self.scale * x + identity
x = self.activ(x)
return x
class InceptionBUnit(nn.Layer):
"""
InceptionResNetV1 type Inception-B unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps,
data_format="channels_last",
**kwargs):
super(InceptionBUnit, self).__init__(**kwargs)
self.scale = 0.10
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
conv_in_channels = out_channels_list[0] + out_channels_list[3]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
use_bias=True,
data_format=data_format,
name="conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
identity = x
x = self.branches(x, training=training)
x = self.conv(x, training=training)
x = self.scale * x + identity
x = self.activ(x)
return x
class InceptionCUnit(nn.Layer):
"""
InceptionResNetV1 type Inception-C unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
scale : float, default 1.0
Scale value for residual branch.
activate : bool, default True
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps,
scale=0.2,
activate=True,
data_format="channels_last",
**kwargs):
super(InceptionCUnit, self).__init__(**kwargs)
self.activate = activate
self.scale = scale
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, (1, 3), (3, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 1), (1, 0)),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
conv_in_channels = out_channels_list[0] + out_channels_list[3]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
use_bias=True,
data_format=data_format,
name="conv")
if self.activate:
self.activ = nn.ReLU()
def call(self, x, training=None):
identity = x
x = self.branches(x, training=training)
x = self.conv(x, training=training)
x = self.scale * x + identity
if self.activate:
x = self.activ(x)
return x
class ReductionAUnit(nn.Layer):
"""
InceptionResNetV1 type Reduction-A unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps,
data_format="channels_last",
**kwargs):
super(ReductionAUnit, self).__init__(**kwargs)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[0:1],
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,),
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(MaxPoolBranch(
data_format=data_format,
name="branch3"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class ReductionBUnit(nn.Layer):
"""
InceptionResNetV1 type Reduction-B unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps,
data_format="channels_last",
**kwargs):
super(ReductionBUnit, self).__init__(**kwargs)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[0:2],
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[2:4],
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[4:7],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_eps=bn_eps,
data_format=data_format,
name="branch3"))
self.branches.children.append(MaxPoolBranch(
data_format=data_format,
name="branch4"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptInitBlock(nn.Layer):
"""
InceptionResNetV1 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
bn_eps,
in_channels,
data_format="channels_last",
**kwargs):
super(InceptInitBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
strides=2,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
strides=1,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv2")
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
strides=1,
padding=1,
bn_eps=bn_eps,
data_format=data_format,
name="conv3")
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=0,
data_format=data_format,
name="pool")
self.conv4 = conv1x1_block(
in_channels=64,
out_channels=80,
strides=1,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv4")
self.conv5 = conv3x3_block(
in_channels=80,
out_channels=192,
strides=1,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv5")
self.conv6 = conv3x3_block(
in_channels=192,
out_channels=256,
strides=2,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv6")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
x = self.pool(x)
x = self.conv4(x, training=training)
x = self.conv5(x, training=training)
x = self.conv6(x, training=training)
return x
class InceptHead(nn.Layer):
"""
InceptionResNetV1 specific classification block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float
Small float added to variance in Batch norm.
dropout_rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels,
bn_eps,
dropout_rate,
classes,
data_format="channels_last",
**kwargs):
super(InceptHead, self).__init__(**kwargs)
self.data_format = data_format
self.use_dropout = (dropout_rate != 0.0)
if dropout_rate > 0.0:
self.dropout = nn.Dropout(
rate=dropout_rate,
name="dropout")
self.fc1 = nn.Dense(
units=512,
input_dim=in_channels,
use_bias=False,
name="fc1")
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
self.fc2 = nn.Dense(
units=classes,
input_dim=512,
name="fc2")
def call(self, x, training=None):
x = flatten(x, self.data_format)
if self.use_dropout:
x = self.dropout(x, training=training)
x = self.fc1(x)
x = self.bn(x, training=training)
x = self.fc2(x)
return x
class InceptionResNetV1(tf.keras.Model):
"""
InceptionResNetV1 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
dropout_rate=0.0,
bn_eps=1e-5,
in_channels=3,
in_size=(299, 299),
classes=1000,
data_format="channels_last",
**kwargs):
super(InceptionResNetV1, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
layers = [5, 11, 7]
in_channels_list = [256, 896, 1792]
normal_out_channels_list = [[32, 32, 32, 32, 32, 32], [128, 128, 128, 128], [192, 192, 192, 192]]
reduction_out_channels_list = [[384, 192, 192, 256], [256, 384, 256, 256, 256, 256, 256]]
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
self.features = SimpleSequential(name="features")
self.features.add(InceptInitBlock(
in_channels=in_channels,
bn_eps=bn_eps,
data_format=data_format,
name="init_block"))
in_channels = in_channels_list[0]
for i, layers_per_stage in enumerate(layers):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j in range(layers_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
out_channels_list_per_stage = reduction_out_channels_list[i - 1]
else:
unit = normal_units[i]
out_channels_list_per_stage = normal_out_channels_list[i]
if (i == len(layers) - 1) and (j == layers_per_stage - 1):
unit_kwargs = {"scale": 1.0, "activate": False}
else:
unit_kwargs = {}
stage.add(unit(
in_channels=in_channels,
out_channels_list=out_channels_list_per_stage,
bn_eps=bn_eps,
data_format=data_format,
name="unit{}".format(j + 1),
**unit_kwargs))
if (j == 0) and (i != 0):
in_channels = in_channels_list[i]
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=8,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = InceptHead(
in_channels=in_channels,
bn_eps=bn_eps,
dropout_rate=dropout_rate,
classes=classes,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x, training=training)
return x
def get_inceptionresnetv1(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create InceptionResNetV1 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = InceptionResNetV1(**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def inceptionresnetv1(**kwargs):
"""
InceptionResNetV1 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_inceptionresnetv1(model_name="inceptionresnetv1", bn_eps=1e-3, **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
inceptionresnetv1,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 299, 299) if is_channels_first(data_format) else (batch, 299, 299, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionresnetv1 or weight_count == 23995624)
if __name__ == "__main__":
_test()
|
validation_tests/analytical_exact/transcritical_without_shock/analytical_without_shock.py | samcom12/anuga_core | 136 | 11080478 | """
Transcritical flow over a bump without a shock.
Ref1: Houghton & Kasahara, Nonlinear shallow fluid flow over an isolated ridge.
Comm. Pure and Applied Math. DOI:10.1002/cpa.3160210103
Ref2: Delestre et al, 2012, SWASHES: a compilation of shallow water
analytic solutions..., Int J Numer Meth Fluids, DOI:10.1002/fld.3741
<NAME>, ANU 2012
"""
from numpy import zeros, linspace
from scipy.optimize import fsolve
from pylab import plot, show
from anuga import g
q0 = 1.53 # This is the imposed momentum
h_d = 0.66 # This is the water height downstream
def analytic_sol(x):
def elevation(x):
z_b = zeros(len(x))
for i in range(len(x)):
if (8.0 <= x[i] <= 12.0):
z_b[i] = 0.2 - 0.05*(x[i]-10.0)**2.0
else:
z_b[i] = 0.0
return z_b
z = elevation(x)
zM= max(z)
def find_hM(hM): #to find the water height at the maxima of the bump
return h_d**3 + (-q0**2/(2*g*hM**2)-hM-zM)*h_d**2 + q0**2/(2*g)
hM = fsolve(find_hM, 0.5)
def find_h(h): #to find the water height at every spatial point after hM is found
return h**3 + (zb-q0**2/(2*g*hM**2)-hM-zM)*h**2 + q0**2/(2*g)
h = zeros(len(x))
for i in range(len(x)):
zb = z[i]
#h[i] = fsolve(find_h, 1.0)
if x[i] < 10:
h[i] = fsolve(find_h, 1.0)
else:
h[i] = fsolve(find_h, 0.4)
return h, z
##N = 401
##L = 25.
##x = linspace(0.0,L,N)
##h,z=analytic_sol(x)
##plot(x,h+z, x,z)
##plot(x, 1.53/h)
##show()
|
homeassistant/components/lutron/switch.py | learn-home-automation/core | 22,481 | 11080487 | """Support for Lutron switches."""
from homeassistant.components.switch import SwitchEntity
from . import LUTRON_CONTROLLER, LUTRON_DEVICES, LutronDevice
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Lutron switches."""
devs = []
# Add Lutron Switches
for (area_name, device) in hass.data[LUTRON_DEVICES]["switch"]:
dev = LutronSwitch(area_name, device, hass.data[LUTRON_CONTROLLER])
devs.append(dev)
# Add the indicator LEDs for scenes (keypad buttons)
for scene_data in hass.data[LUTRON_DEVICES]["scene"]:
(area_name, keypad_name, scene, led) = scene_data
if led is not None:
led = LutronLed(
area_name, keypad_name, scene, led, hass.data[LUTRON_CONTROLLER]
)
devs.append(led)
add_entities(devs, True)
class LutronSwitch(LutronDevice, SwitchEntity):
"""Representation of a Lutron Switch."""
def __init__(self, area_name, lutron_device, controller):
"""Initialize the switch."""
self._prev_state = None
super().__init__(area_name, lutron_device, controller)
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._lutron_device.level = 100
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._lutron_device.level = 0
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {"lutron_integration_id": self._lutron_device.id}
@property
def is_on(self):
"""Return true if device is on."""
return self._lutron_device.last_level() > 0
def update(self):
"""Call when forcing a refresh of the device."""
if self._prev_state is None:
self._prev_state = self._lutron_device.level > 0
class LutronLed(LutronDevice, SwitchEntity):
"""Representation of a Lutron Keypad LED."""
def __init__(self, area_name, keypad_name, scene_device, led_device, controller):
"""Initialize the switch."""
self._keypad_name = keypad_name
self._scene_name = scene_device.name
super().__init__(area_name, led_device, controller)
def turn_on(self, **kwargs):
"""Turn the LED on."""
self._lutron_device.state = 1
def turn_off(self, **kwargs):
"""Turn the LED off."""
self._lutron_device.state = 0
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {
"keypad": self._keypad_name,
"scene": self._scene_name,
"led": self._lutron_device.name,
}
@property
def is_on(self):
"""Return true if device is on."""
return self._lutron_device.last_state
@property
def name(self):
"""Return the name of the LED."""
return f"{self._area_name} {self._keypad_name}: {self._scene_name} LED"
def update(self):
"""Call when forcing a refresh of the device."""
if self._lutron_device.last_state is not None:
return
# The following property getter actually triggers an update in Lutron
self._lutron_device.state # pylint: disable=pointless-statement
|
884 Uncommon Words from Two Sentences.py | krishna13052001/LeetCode | 872 | 11080502 | #!/usr/bin/python3
"""
We are given two sentences A and B. (A sentence is a string of space separated
words. Each word consists only of lowercase letters.)
A word is uncommon if it appears exactly once in one of the sentences, and does
not appear in the other sentence.
Return a list of all uncommon words.
You may return the list in any order.
Example 1:
Input: A = "this apple is sweet", B = "this apple is sour"
Output: ["sweet","sour"]
Example 2:
Input: A = "apple apple", B = "banana"
Output: ["banana"]
Note:
0 <= A.length <= 200
0 <= B.length <= 200
A and B both contain only spaces and lowercase letters.
"""
from typing import List
from collections import Counter
class Solution:
def uncommonFromSentences(self, A: str, B: str) -> List[str]:
"""
need counter, only need to appear once
"""
c = Counter(A.split()) + Counter(B.split())
ret = [
k
for k, v in c.items()
if v == 1
]
return ret
def uncommonFromSentences_complext(self, A: str, B: str) -> List[str]:
"""
need counter
"""
c_A, c_B = Counter(A.split()), Counter(B.split())
ret = []
for k, v in c_A.items():
if v == 1 and k not in c_B:
ret.append(k)
for k, v in c_B.items():
if v == 1 and k not in c_A:
ret.append(k)
return ret
def uncommonFromSentences_error(self, A: str, B: str) -> List[str]:
"""
set difference
"""
s_A, s_B = set(A.split()), set(B.split())
return list(
(s_A - s_B) | (s_B - s_A)
)
|
src/pytorch_adapt/layers/plus_residual.py | KevinMusgrave/pytorch-adapt | 131 | 11080521 | import torch
class PlusResidual(torch.nn.Module):
"""
Wraps a layer such that the forward pass returns
```x + self.layer(x)```
"""
def __init__(self, layer: torch.nn.Module):
"""
Arguments:
layer: The layer to be wrapped.
"""
super().__init__()
self.layer = layer
def forward(self, x):
""""""
return x + self.layer(x)
|
neurokit/materials/__init__.py | ucohen/NeuroKit.py | 338 | 11080532 | """
materials submodule.
"""
import inspect
class Path:
def materials():
return(inspect.getfile(Path).split("__init__")[0]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.