max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
7
115
max_stars_count
int64
101
368k
id
stringlengths
2
8
content
stringlengths
6
1.03M
tests/trac/test-trac-0132.py
eLBati/pyxb
123
12776697
# -*- coding: utf-8 -*- from __future__ import unicode_literals import logging if __name__ == '__main__': logging.basicConfig() _log = logging.getLogger(__name__) import sys import pyxb import unittest class TestTrac0132 (unittest.TestCase): message = 'bad character \u2620' def testDecode (self): e = pyxb.PyXBException(self.message) if sys.version_info[:2] > (2, 4): self.assertEqual(self.message, e.args[0]) if __name__ == '__main__': unittest.main()
server/plugins/gatekeeper/gatekeeper.py
nathandarnell/sal
215
12776699
<gh_stars>100-1000 from django.db.models import Q import sal.plugin TITLES = { 'ok': 'Machines with Gatekeeper enabled', 'alert': 'Machines without Gatekeeper enabled', 'unknown': 'Machines with unknown Gatekeeper status'} PLUGIN_Q = Q(pluginscriptsubmission__plugin='Gatekeeper') SCRIPT_Q = Q(pluginscriptsubmission__pluginscriptrow__pluginscript_name='Gatekeeper') class Gatekeeper(sal.plugin.Widget): supported_os_families = [sal.plugin.OSFamilies.darwin] def get_context(self, queryset, **kwargs): queryset = queryset.filter(os_family='Darwin') context = self.super_get_context(queryset, **kwargs) context['ok'] = self._filter(queryset, 'ok').count() context['alert'] = self._filter(queryset, 'alert').count() context['unknown'] = queryset.count() - context['ok'] - context['alert'] return context def filter(self, machines, data): if data not in TITLES: return None, None return self._filter(machines, data), TITLES[data] def _filter(self, machines, data): machines = machines.filter(os_family='Darwin') if data == 'ok': machines = ( machines .filter(PLUGIN_Q, SCRIPT_Q, pluginscriptsubmission__pluginscriptrow__pluginscript_data='Enabled')) elif data == 'alert': machines = ( machines .filter(PLUGIN_Q, SCRIPT_Q, pluginscriptsubmission__pluginscriptrow__pluginscript_data='Disabled')) elif data == 'unknown': machines = ( machines .exclude(pk__in=self._filter(machines, 'ok').values('pk')) .exclude(pk__in=self._filter(machines, 'alert').values('pk'))) return machines
comicolorization/extensions/__init__.py
DwangoMediaVillage/Comicolorization
122
12776700
<gh_stars>100-1000 from .save_images import SaveGeneratedImageExtension, SaveRawImageExtension
bumblebee_status/util/algorithm.py
rosalogia/bumblebee-status
1,089
12776702
import copy def merge(target, *args): """Merges arbitrary data - copied from http://blog.impressiver.com/post/31434674390/deep-merge-multiple-python-dicts :param target: the data structure to fill :param args: a list of data structures to merge into target :return: target, with all data in args merged into it :rtype: whatever type was originally passed in """ if len(args) > 1: for item in args: merge(target, item) return target item = args[0] if not isinstance(item, dict): return item for key, value in item.items(): if key in target and isinstance(target[key], dict): merge(target[key], value) else: if not key in target: target[key] = copy.deepcopy(value) return target # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
alex/applications/PublicTransportInfoCS/slu/dailogregclassifier/download_models.py
oplatek/alex
184
12776760
<gh_stars>100-1000 #!/usr/bin/env python # -*- coding: utf-8 -*- if __name__ == '__main__': import autopath from alex.utils.config import online_update if __name__ == '__main__': online_update("applications/PublicTransportInfoCS/slu/dailogregclassifier/dailogreg.nbl.model.all")
tests/test_table_input.py
abcnishant007/sklearn-evaluation
351
12776817
<filename>tests/test_table_input.py from unittest import TestCase from sklearn_evaluation import table class TestMissingInput(TestCase): def test_feature_importances(self): with self.assertRaisesRegex(ValueError, "needed to tabulate"): table.feature_importances(None)
text_analysis_tools/api/sentiment/sentiment.py
yu3peng/text_analysis_tools
149
12776822
# -*- coding: utf-8 -*- import os import json import jieba.analyse import jieba CURRENT_PATH = os.path.dirname(os.path.abspath(__file__)) sentiment_path = os.path.join(CURRENT_PATH, 'data', 'sentimentDict.json') stopwords_path = os.path.join(CURRENT_PATH, 'data', 'stopwords.txt.json') degree_path = os.path.join(CURRENT_PATH, 'data', 'degreeDict.json') not_path = os.path.join(CURRENT_PATH, 'data', 'notDict.json') jieba_dic_path = os.path.join(CURRENT_PATH, 'data', 'jieba.dic') # 加载情感词典 jieba.load_userdict(jieba_dic_path) class SentimentAnalysis(): def __init__(self): self.sentiment_score_dic = self.load_json(sentiment_path) self.degree_score = self.load_json(degree_path) self.notwords = self.load_json(not_path) def load_json(self, json_file_path): with open(json_file_path, 'r', encoding='utf-8') as f: return json.loads(f.read(), encoding='utf-8') def analysis(self, sentence): words = jieba.lcut(sentence) score = self.sentiment_score_dic.get(words[0], 0) if len(words) > 1: score += self.sentiment_score_dic.get(words[1], 0) * self.notwords.get(words[0], 1) * self.degree_score.get(words[0], 1) if len(words) > 2: for i in range(2, len(words)): score += self.sentiment_score_dic.get(words[i], 0) * self.notwords.get(words[i-1], 1) * \ self.degree_score.get(words[i-1], 1) * self.degree_score.get(words[i-2], 1) * \ self.notwords.get(words[i-2], 1) if score < 0: return {'negative': score} if score > 0: return {'positive': score} return {'middle': score}
cctbx/sgtbx/direct_space_asu/plane_group_reference_table.py
dperl-sol/cctbx_project
155
12776844
<filename>cctbx/sgtbx/direct_space_asu/plane_group_reference_table.py from __future__ import absolute_import, division, print_function from cctbx.sgtbx.direct_space_asu import direct_space_asu from cctbx.sgtbx.direct_space_asu.short_cuts import * from six.moves import range def asu_01(): # p_1 (s.g. 1) return (direct_space_asu('P 1') & x0 & +x1 & y0 & +y1 & z0 & +z1 ) def asu_02(): # p_2 (s.g. 3) return (direct_space_asu('P 2') & x0(y2) & x2(y2) & y0 & +y1 & z0 & +z1 ) def asu_03(): # p_m (s.g. 6) return (direct_space_asu('P -2x') & x0 & x2 & y0 & +y1 & z0 & +z1 ) def asu_04(): # p_g (s.g. 7) return (direct_space_asu('P -2xb') & x0(+y2) & x2(+y2) & y0 & +y1 & z0 & +z1 ) def asu_05(): # c_m (s.g. 8) return (direct_space_asu('C -2x') & x0 & x2 & y0 & +y2 & z0 & +z1 ) def asu_06(): # p_2_m_m (s.g. 25) return (direct_space_asu('P 2 -2') & x0 & x2 & y0 & y2 & z0 & +z1 ) def asu_07(): # p_2_m_g (s.g. 28) return (direct_space_asu('P 2 -2a') & x0(y2) & x4 & y0 & +y1 & z0 & +z1 ) def asu_08(): # p_2_g_g (s.g. 32) return (direct_space_asu('P 2 -2ab') & x0 & x2(-y0) & y0 & +y2 & z0 & +z1 ) def asu_09(): # c_2_m_m (s.g. 35) return (direct_space_asu('C 2 -2') & x0 & x4(y4) & y0 & y2 & z0 & +z1 ) def asu_10(): # p_4 (s.g. 75) return (direct_space_asu('P 4') & x0(-y0) & x2 & y0 & y2(-x2) & z0 & +z1 ) def asu_11(): # p_4_m_m (s.g. 99) return (direct_space_asu('P 4 -2') & x0 & y2 & -p0 & z0 & +z1 ) def asu_12(): # p_4_g_m (s.g. 100) return (direct_space_asu('P 4 -2ab') & x0(-y0) & y0 & m2 & z0 & +z1 ) def asu_13(): # p_3 (s.g. 143) return (direct_space_asu('P 3') & x0(-y0) & y0 & k1 & m1(-h1 | -k1) & h1 & z0 & +z1 ) def asu_14(): # p_3_m_1 (s.g. 156) return (direct_space_asu('P 3 -2"') & h0 & m1 & k0 & z0 & +z1 ) def asu_15(): # p_3_1_m (s.g. 157) return (direct_space_asu('P 3 -2') & y0 & k1 & m1(y3) & p0 & z0 & +z1 ) def asu_16(): # p_6 (s.g. 168) return (direct_space_asu('P 6') & y0 & k1 & m1(y3) & p0(-y0) & z0 & +z1 ) def asu_17(): # p_6_m_m (s.g. 183) return (direct_space_asu('P 6 -2') & y0 & k1 & -h0 & z0 & +z1 ) def get_asu(point_group_number): return eval("asu_%02d" % point_group_number)() if (__name__ == "__main__"): for i in range(1,17+1): get_asu(i).show_summary()
search_insert_position/solution.py
mahimadubey/leetcode-python
528
12776852
# -*- coding: utf-8 -*- """ Given a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order. You may assume no duplicates in the array. Here are few examples. [1,3,5,6], 5 → 2 [1,3,5,6], 2 → 1 [1,3,5,6], 7 → 4 [1,3,5,6], 0 → 0 """ class Solution(object): def searchInsert(self, nums, target): """ :type nums: List[int] :type target: int :rtype: int """ n = len(nums) if not nums: return 0 else: left = 0 right = n - 1 while left <= right: mid = (left + right) / 2 if nums[mid] == target: return mid elif (mid < n - 1 and nums[mid] < target and nums[mid + 1] > target): return mid + 1 elif target < nums[mid]: right = mid - 1 else: left = mid + 1 if left > n - 1: return n elif right < 0: return 0 a = [1, 3, 5, 6] s = Solution() print(s.searchInsert(a, 5)) print(s.searchInsert(a, 2)) print(s.searchInsert(a, 7)) print(s.searchInsert(a, 0))
thespian/system/transport/test/test_resultcallback.py
dendron2000/Thespian
210
12776871
from thespian.system.transport import ResultCallback from datetime import datetime, timedelta from time import sleep class TestUnitResultCallback(object): def _good(self, result, value): if not hasattr(self, 'goods'): self.goods = [] self.goods.append( (result, value) ) def _fail(self, result, value): if not hasattr(self, 'fails'): self.fails = [] self.fails.append( (result, value) ) def testGoodCallback(self): self.goods = [] self.fails = [] rc = ResultCallback(self._good, self._fail) rc.resultCallback(True, 5) assert self.goods == [(True, 5)] assert self.fails == [] def testFailCallback(self): self.goods = [] self.fails = [] rc = ResultCallback(self._good, self._fail) rc.resultCallback(False, 9) assert self.goods == [] assert self.fails == [(False, 9)] def testGoodCallbackReCall(self): self.goods = [] self.fails = [] rc = ResultCallback(self._good, self._fail) rc.resultCallback(True, 5) assert self.goods == [(True, 5)] assert self.fails == [] rc.resultCallback(True, 4) assert self.goods == [(True, 5)] assert self.fails == [] def testFailCallbackReCall(self): self.goods = [] self.fails = [] rc = ResultCallback(self._good, self._fail) rc.resultCallback(False, 9) assert self.goods == [] assert self.fails == [(False, 9)] rc.resultCallback(False, 8) assert self.goods == [] assert self.fails == [(False, 9)] def testGoodCallbackReCallFail(self): self.goods = [] self.fails = [] rc = ResultCallback(self._good, self._fail) rc.resultCallback(True, 5) assert self.goods == [(True, 5)] assert self.fails == [] rc.resultCallback(False, 4) assert self.goods == [(True, 5)] assert self.fails == [] def testFailCallbackReCallGood(self): self.goods = [] self.fails = [] rc = ResultCallback(self._good, self._fail) rc.resultCallback(False, 9) assert self.goods == [] assert self.fails == [(False, 9)] rc.resultCallback(True, 8) assert self.goods == [] assert self.fails == [(False, 9)] def testManyGoodCallbacks(self): self.goods = [] self.fails = [] rc = [ResultCallback(self._good, self._fail) for N in range(20)] for num,each in enumerate(rc): each.resultCallback(True, num) assert self.goods == [(True, N) for N in range(20)] assert self.fails == [] def testManyFailCallbacks(self): self.goods = [] self.fails = [] rc = [ResultCallback(self._good, self._fail) for N in range(20)] for num,each in enumerate(rc): each.resultCallback(False, num) assert self.goods == [] assert self.fails == [(False, N) for N in range(20)] def testManyGoodAndFailCallbacks(self): self.goods = [] self.fails = [] rc = [ResultCallback(self._good, self._fail) for N in range(20)] for num,each in enumerate(rc): each.resultCallback(0 == num % 3, num) assert self.goods == [(True, N) for N in range(20) if N % 3 == 0] assert self.fails == [(False, N) for N in range(20) if N % 3] def testChainedGoodCallbacks(self): self.goods = [] self.fails = [] rc = ResultCallback(self._good, self._fail) rc2 = ResultCallback(self._good, self._fail, rc) rc3 = ResultCallback(self._good, self._fail, rc2) rc3.resultCallback(True, 'good') assert self.goods == [(True, 'good')] * 3 assert self.fails == [] def testChainedFailCallbacks(self): self.goods = [] self.fails = [] rc = ResultCallback(self._good, self._fail) rc2 = ResultCallback(self._good, self._fail, rc) rc3 = ResultCallback(self._good, self._fail, rc2) rc3.resultCallback(False, 'oops') assert self.goods == [] assert self.fails == [(False, 'oops')] * 3 def testChainedGoodCallbacksDoNotDuplicate(self): self.goods = [] self.fails = [] rc = ResultCallback(self._good, self._fail) rc2 = ResultCallback(self._good, self._fail, rc) rc3 = ResultCallback(self._good, self._fail, rc2) rc2.resultCallback(True, 'ok') assert self.goods == [(True, 'ok'), (True, 'ok')] assert self.fails == [] rc3.resultCallback(True, 'good') assert self.goods == [(True, 'ok'), (True, 'ok'), (True, 'good')] assert self.fails == [] def testChainedFailCallbacksDoNotDuplicate(self): self.goods = [] self.fails = [] rc = ResultCallback(self._good, self._fail) rc2 = ResultCallback(self._good, self._fail, rc) rc3 = ResultCallback(self._good, self._fail, rc2) rc2.resultCallback(False, 'bad') assert self.goods == [] assert self.fails == [(False, 'bad'), (False, 'bad')] rc3.resultCallback(False, 'oops') assert self.goods == [] assert self.fails == [(False, 'bad'), (False, 'bad'), (False, 'oops')] def testChainedGoodCallbacksDoNotDuplicateOnFail(self): self.goods = [] self.fails = [] rc = ResultCallback(self._good, self._fail) rc2 = ResultCallback(self._good, self._fail, rc) rc3 = ResultCallback(self._good, self._fail, rc2) rc2.resultCallback(True, 'ok') assert self.goods == [(True, 'ok'), (True, 'ok')] assert self.fails == [] rc3.resultCallback(False, 'bad') assert self.goods == [(True, 'ok'), (True, 'ok')] assert self.fails == [(False, 'bad')] def testChainedFailCallbacksDoNotDuplicateOnGood(self): self.goods = [] self.fails = [] rc = ResultCallback(self._good, self._fail) rc2 = ResultCallback(self._good, self._fail, rc) rc3 = ResultCallback(self._good, self._fail, rc2) rc2.resultCallback(False, 'bad') assert self.goods == [] assert self.fails == [(False, 'bad'), (False, 'bad')] rc3.resultCallback(True, 'yippee') assert self.goods == [(True, 'yippee')] assert self.fails == [(False, 'bad'), (False, 'bad')]
pykeyvi/src/converters/__init__.py
remusao/keyvi
147
12776876
from .pykeyvi_autowrap_conversion_providers import * from autowrap.ConversionProvider import special_converters def register_converters(): special_converters.append(MatchIteratorPairConverter())
evidently/telemetry/__init__.py
alex-zenml/evidently
2,212
12776888
<reponame>alex-zenml/evidently<filename>evidently/telemetry/__init__.py from .sender import TelemetrySender
mindarmour/adv_robustness/defenses/__init__.py
hboshnak/mindarmour
139
12776896
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module includes classical defense algorithms in defencing adversarial examples and enhancing model security and trustworthy. """ from .adversarial_defense import AdversarialDefense from .adversarial_defense import AdversarialDefenseWithAttacks from .adversarial_defense import EnsembleAdversarialDefense from .natural_adversarial_defense import NaturalAdversarialDefense from .projected_adversarial_defense import ProjectedAdversarialDefense __all__ = ['AdversarialDefense', 'AdversarialDefenseWithAttacks', 'NaturalAdversarialDefense', 'ProjectedAdversarialDefense', 'EnsembleAdversarialDefense']
recipes/Python/496825_Game_theory_payoff_matrix_solver/recipe-496825.py
tdiprima/code
2,023
12776901
''' Approximate the strategy oddments for 2 person zero-sum games of perfect information. Applies the iterative solution method described by <NAME> in his classic book, The Compleat Strategyst, ISBN 0-486-25101-2. See chapter 5, page 180 for details. ''' from operator import add, neg def solve(payoff_matrix, iterations=100): 'Return the oddments (mixed strategy ratios) for a given payoff matrix' transpose = zip(*payoff_matrix) numrows = len(payoff_matrix) numcols = len(transpose) row_cum_payoff = [0] * numrows col_cum_payoff = [0] * numcols colpos = range(numcols) rowpos = map(neg, xrange(numrows)) colcnt = [0] * numcols rowcnt = [0] * numrows active = 0 for i in xrange(iterations): rowcnt[active] += 1 col_cum_payoff = map(add, payoff_matrix[active], col_cum_payoff) active = min(zip(col_cum_payoff, colpos))[1] colcnt[active] += 1 row_cum_payoff = map(add, transpose[active], row_cum_payoff) active = -max(zip(row_cum_payoff, rowpos))[1] value_of_game = (max(row_cum_payoff) + min(col_cum_payoff)) / 2.0 / iterations return rowcnt, colcnt, value_of_game ########################################### # Example solutions to two pay-off matrices print solve([[2,3,1,4], [1,2,5,4], [2,3,4,1], [4,2,2,2]]) # Example on page 185 print solve([[4,0,2], [6,7,1]]) # Exercise 2 number 3
src/genie/libs/parser/iosxe/tests/ShowEthernetServiceInstance/cli/equal/golden_output_1_expected.py
balmasea/genieparser
204
12776912
<filename>src/genie/libs/parser/iosxe/tests/ShowEthernetServiceInstance/cli/equal/golden_output_1_expected.py expected_output = { "service_instance": { 501: { "interfaces": { "TenGigabitEthernet0/3/0": {"state": "Up", "type": "Static"}, "TenGigabitEthernet0/1/0": {"state": "Up", "type": "Static"}, } }, 502: { "interfaces": {"TenGigabitEthernet0/3/0": {"state": "Up", "type": "Static"}} }, } }
setup.py
SeanNobel/d4rl-pybullet
130
12776953
<gh_stars>100-1000 from setuptools import setup, find_packages setup(name="d4rl_pybullet", version="0.1", license="MIT", description="Datasets for data-driven deep reinforcement learnig with Pybullet environments", url="https://github.com/takuseno/d4rl-pybullet", install_requires=["gym", "pybullet", "h5py"], packages=["d4rl_pybullet"])
repokid/plugin.py
boost-entropy-repos-org/repokid
999
12776958
# Copyright 2021 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import logging from typing import Any from typing import Dict from typing import Optional from repokid import CONFIG from repokid.types import RepokidConfig logger = logging.getLogger("repokid") class RepokidPlugin: def __init__(self, config: Optional[RepokidConfig] = None): if config: self.config = config else: self.config = CONFIG class M_A(type): pass class Singleton(M_A): _instances: Dict[str, Singleton] = {} def __call__(cls, *args: Any, **kwargs: Any) -> Singleton: if cls.__name__ not in cls._instances: cls._instances[cls.__name__] = super(Singleton, cls).__call__( *args, **kwargs ) return cls._instances[cls.__name__]
gaphor/C4Model/c4model.py
mrmonkington/gaphor
867
12776963
<reponame>mrmonkington/gaphor # This file is generated by profile_coder.py. DO NOT EDIT! from __future__ import annotations from gaphor.core.modeling.properties import ( association, attribute, relation_many, relation_one, ) from gaphor.UML import Actor, Package class C4Container(Package): description: attribute[str] location: attribute[str] ownerContainer: relation_one[C4Container] owningContainer: relation_many[C4Container] technology: attribute[str] type: attribute[str] class C4Database(C4Container): pass class C4Person(Actor): description: attribute[str] location: attribute[str] C4Container.description = attribute("description", str) C4Container.location = attribute("location", str) C4Container.ownerContainer = association( "ownerContainer", C4Container, upper=1, opposite="owningContainer" ) C4Container.owningContainer = association( "owningContainer", C4Container, composite=True, opposite="ownerContainer" ) C4Container.technology = attribute("technology", str) C4Container.type = attribute("type", str) C4Person.description = attribute("description", str) C4Person.location = attribute("location", str) C4Container.namespace.subsets.add(C4Container.ownerContainer) # type: ignore[attr-defined] C4Container.ownedMember.subsets.add(C4Container.owningContainer) # type: ignore[attr-defined]
src/layoutparser/elements/utils.py
frankiert/layout-parser
2,931
12776964
<reponame>frankiert/layout-parser # Copyright 2021 The Layout Parser team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Union, Dict, Dict, Any, Optional, Tuple import numpy as np from PIL import Image def cvt_coordinates_to_points(coords: Tuple[float, float, float, float]) -> np.ndarray: x_1, y_1, x_2, y_2 = coords return np.array( [ [x_1, y_1], # Top Left [x_2, y_1], # Top Right [x_2, y_2], # Bottom Right [x_1, y_2], # Bottom Left ] ) def cvt_points_to_coordinates(points: np.ndarray) -> Tuple[float, float, float, float]: x_1 = points[:, 0].min() y_1 = points[:, 1].min() x_2 = points[:, 0].max() y_2 = points[:, 1].max() return (x_1, y_1, x_2, y_2) def perspective_transformation( M: np.ndarray, points: np.ndarray, is_inv: bool = False ) -> np.ndarray: if is_inv: M = np.linalg.inv(M) src_mid = np.hstack([points, np.ones((points.shape[0], 1))]).T # 3x4 dst_mid = np.matmul(M, src_mid) dst = (dst_mid / dst_mid[-1]).T[:, :2] # 4x2 return dst def vertice_in_polygon(vertice: np.ndarray, polygon_points: np.ndarray) -> bool: # The polygon_points are ordered clockwise # The implementation is based on the algorithm from # https://demonstrations.wolfram.com/AnEfficientTestForAPointToBeInAConvexPolygon/ points = polygon_points - vertice # shift the coordinates origin to the vertice edges = np.append(points, points[0:1, :], axis=0) return all([np.linalg.det([e1, e2]) >= 0 for e1, e2 in zip(edges, edges[1:])]) # If the points are ordered clockwise, the det should <=0 def polygon_area(xs: np.ndarray, ys: np.ndarray) -> float: """Calculate the area of polygons using `Shoelace Formula <https://en.wikipedia.org/wiki/Shoelace_formula>`_. Args: xs (`np.ndarray`): The x coordinates of the points ys (`np.ndarray`): The y coordinates of the points """ # Refer to: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates # The formula is equivalent to the original one indicated in the wikipedia # page. return 0.5 * np.abs(np.dot(xs, np.roll(ys, 1)) - np.dot(ys, np.roll(xs, 1)))
tools/xdl/xdlrcviz.py
leonardt/magma
167
12777011
<filename>tools/xdl/xdlrcviz.py import sexpr import sys import os from pprint import pprint from subprocess import Popen, PIPE fname = sys.argv[1] name = os.path.basename(fname).split('.')[0] file = open(fname) source = "" for line in file.readlines(): if line[0] != "#": source += line sexpr.input(source) s = sexpr.parse() while len(s) == 1: s = s[0] table = {} for x in s: table[x[0]] = x[1:] class Element(): def __init__(self,name): self.name = name self.cfg = [] self.inputs = [] self.outputs = [] def canelide(self): if len(self.cfg) == 0: if len(self.inputs) == 0 and len(self.outputs) == 1: return self.outputs[0] == self.name elif len(self.inputs) == 1 and len(self.outputs) == 0: return self.inputs[0] == self.name return False class Primitive(): def __init__(self,sexpr): self.name = sexpr[1] #pprint(sexpr) input,output = Element("input"),Element("output") self.elements = [ input, output ] self.connections = {} # (e0,outputpin,e1,inputpin) => true for i in sexpr[4:]: if i[0] == "pin": if i[3] == "input": input.outputs.append(i[2]) self.connections[ ("input",i[2],i[1],i[2]) ] = True else: output.inputs.append(i[2]) self.connections[ (i[1],i[2],"output",i[2]) ] = True elif i[0] == "element": e = Element(i[1]) self.elements.append(e) for ii in i[2:]: if isinstance(ii,list): if ii[0] == "pin": getattr(e,ii[2]+"s").append(ii[1]) elif ii[0] == "conn": if ii[3] == "==>": self.connections[ (ii[1],ii[2],ii[4],ii[5]) ] = True else: self.connections[ (ii[4],ii[5],ii[1],ii[2]) ] = True elif ii[0] == "cfg": e.cfg = ii[1:] def save(self): print("Saving %s" % self.name) p = Popen(["dot","-Tpdf","-o","%s_%s.pdf" % (self.name,name)], stdin=PIPE) f = p.stdin def write(s): f.write(s) if self.name == "PCIE_3_0": sys.stdout.write(s) write("digraph G {\n") write(" graph [rankdir = LR];\n") write(" node[shape=record];\n") for e in self.elements: def namefmt(xs): return "|".join([ "<%s>%s" % (x,x) for x in xs]) def quote(x): return """ \\"%s\\" """ % x.replace("<","\\<").replace(">","\\>").replace("|","\\|") cfgstring = '\\n'.join([quote(x) for x in e.cfg]) if e.canelide(): write(""" %s[label="<%s>%s"];\n""" % (e.name,e.name,e.name)) else: write(""" %s[label="{ {%s} | %s\\n%s | {%s} }"];\n""" % (e.name,namefmt(e.inputs),e.name,cfgstring,namefmt(e.outputs))) for t in self.connections.keys(): write(" %s:%s -> %s:%s;\n" % t) write("}") f.close() if p.wait() != 0: raise for i in table["primitive_defs"]: if i[0] == "primitive_def": p = Primitive(i) try: p.save() except: print("Failed to save %s" % p.name)
microraiden/examples/ticker_client.py
andrevmatos/microraiden
417
12777043
<filename>microraiden/examples/ticker_client.py from tkinter import ttk import tkinter import logging import gevent import click import sys from microraiden import Session from microraiden import utils log = logging.getLogger(__name__) class ETHTickerClient(ttk.Frame): def __init__( self, sender_privkey: str, session: Session = None, poll_interval: float = 5 ) -> None: self.poll_interval = poll_interval self.root = tkinter.Tk() ttk.Frame.__init__(self, self.root) self.root.title('µRaiden ETH Ticker') self.root.protocol('WM_DELETE_WINDOW', self.close) self.pack() self.pricevar = tkinter.StringVar(value='0.00 USD') ttk.Label(self, textvariable=self.pricevar, font=('Helvetica', '72')).pack() if session is None: self.session = Session( private_key=sender_privkey, close_channel_on_exit=True, endpoint_url='http://localhost:5000' ) else: self.session = session self.active_query = False self.running = False def run(self): self.running = True self.root.after(0, self.query_price) self.root.mainloop() def query_price(self): if not self.running: return self.active_query = True response = self.session.get('http://localhost:5000/ETHUSD') if response: price = float(response.json()['last_price']) log.info('New price received: {:.2f} USD'.format(price)) self.pricevar.set('{:.2f} USD'.format(price)) else: log.warning('No response.') if self.running: self.root.after(int(self.poll_interval * 1000), self.query_price) self.active_query = False def close(self): log.info('Shutting down gracefully.') self.running = False self.root.destroy() # Sloppy handling of thread joining but works for this small demo. while self.active_query: gevent.sleep(1) self.session.close() @click.command() @click.option( '--private-key', required=True, help='Path to private key file of the proxy', type=click.Path(exists=True, dir_okay=False, resolve_path=True) ) @click.option( '--private-key-password-file', default=None, help='Path to file containing password for the JSON-encoded private key', type=click.Path(exists=True, dir_okay=False, resolve_path=True) ) def main( private_key, private_key_password_file, ): private_key = utils.get_private_key(private_key, private_key_password_file) if private_key is None: sys.exit(1) ticker = None try: ticker = ETHTickerClient(private_key) ticker.run() except KeyboardInterrupt: if ticker: ticker.close() if __name__ == '__main__': from gevent import monkey monkey.patch_all() logging.basicConfig(level=logging.INFO) main()
openapi_core/contrib/flask/responses.py
Yarn-e/openapi-core
160
12777048
<reponame>Yarn-e/openapi-core """OpenAPI core contrib flask responses module""" from werkzeug.datastructures import Headers from openapi_core.validation.response.datatypes import OpenAPIResponse class FlaskOpenAPIResponseFactory: @classmethod def create(cls, response): header = Headers(response.headers) return OpenAPIResponse( data=response.data, status_code=response._status_code, headers=header, mimetype=response.mimetype, )
system/tasks.py
topicgit/seal
132
12777085
import logging import requests import json from celery import shared_task from system.models import Users from seal import settings logger = logging.getLogger('system_celery') @shared_task def system_demo(one): ##因为开启了时区,所以django在数据库里面保存的为 utc 时间, 调用的时候会帮你 转为 东八区, celery会自动识别时间 from django.utils import timezone for i in Users.objects.all(): print(i.last_login) ## 直接读取时间,会是 utc时间,未转换,如果需要处理 请注意 print(timezone.localtime(i.last_login).strftime("%Y-%m-%d %H:%M:%S")) ## 时间格式化为 正常时间 print("celery定时任务demo 每分钟执行一遍",one) return @shared_task def ding_ding_to_info(content,type=None): """ 钉钉接口 异步调用 ding_ding_to_info.delay("报警1") :param content: 文本内容 :param type: :return: """ web_hook_url = getattr(settings, 'web_hook_url'), headers = {'content-type': 'application/json'} data = { "msgtype": "text", "text": { "content": content }, "at": { "atMobiles": [ ], } } try: r = requests.post(web_hook_url[0], data=json.dumps(data), headers=headers) print(r.text) except Exception as e: logger.error(e)
utils/decoder/model.py
gaoyiyeah/ZASR_tensorflow
115
12777098
"""Contains DeepSpeech2 model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import os import time import logging import gzip import copy import numpy as np import inspect from utils.decoder.swig_wrapper import Scorer from utils.decoder.swig_wrapper import ctc_greedy_decoder from utils.decoder.swig_wrapper import ctc_beam_search_decoder_batch class LM_decoder(object): def __init__(self, beam_alpha, beam_beta, language_model_path, vocab_list): """Initialize the external scorer. :param beam_alpha: Parameter associated with language model. :type beam_alpha: float :param beam_beta: Parameter associated with word count. :type beam_beta: float :param language_model_path: Filepath for language model. If it is empty, the external scorer will be set to None, and the decoding method will be pure beam search without scorer. :type language_model_path: basestring|None :param vocab_list: List of tokens in the vocabulary, for decoding. :type vocab_list: list """ if language_model_path != '': print("begin to initialize the external scorer " "for decoding") self._ext_scorer = Scorer(beam_alpha, beam_beta, language_model_path, vocab_list) lm_char_based = self._ext_scorer.is_character_based() lm_max_order = self._ext_scorer.get_max_order() lm_dict_size = self._ext_scorer.get_dict_size() print("language model: " "is_character_based = %d," % lm_char_based + " max_order = %d," % lm_max_order + " dict_size = %d" % lm_dict_size) print("end initializing scorer") else: self._ext_scorer = None print("no language model provided, " "decoding by pure beam search without scorer.") def decode_batch_beam_search(self, probs_split, beam_alpha, beam_beta, beam_size, cutoff_prob, cutoff_top_n, vocab_list, num_processes): """Decode by beam search for a batch of probs matrix input. :param probs_split: List of 2-D probability matrix, and each consists of prob vectors for one speech utterancce. :param probs_split: List of matrix :param beam_alpha: Parameter associated with language model. :type beam_alpha: float :param beam_beta: Parameter associated with word count. :type beam_beta: float :param beam_size: Width for Beam search. :type beam_size: int :param cutoff_prob: Cutoff probability in pruning, default 1.0, no pruning. :type cutoff_prob: float :param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n characters with highest probs in vocabulary will be used in beam search, default 40. :type cutoff_top_n: int :param vocab_list: List of tokens in the vocabulary, for decoding. :type vocab_list: list :param num_processes: Number of processes (CPU) for decoder. :type num_processes: int :return: List of transcription texts. :rtype: List of basestring """ if self._ext_scorer != None: self._ext_scorer.reset_params(beam_alpha, beam_beta) # beam search decode num_processes = min(num_processes, np.shape(probs_split)[0]) beam_search_results = ctc_beam_search_decoder_batch( probs_split=probs_split, vocabulary=vocab_list, beam_size=beam_size, num_processes=num_processes, ext_scoring_func=self._ext_scorer, cutoff_prob=cutoff_prob, cutoff_top_n=cutoff_top_n) results = [result[0][1] for result in beam_search_results] return results def _adapt_feeding_dict(self, feeding_dict): """Adapt feeding dict according to network struct. To remove impacts from padding part, we add scale_sub_region layer and sub_seq layer. For sub_seq layer, 'sequence_offset' and 'sequence_length' fields are appended. For each scale_sub_region layer 'convN_index_range' field is appended. :param feeding_dict: Feeding is a map of field name and tuple index of the data that reader returns. :type feeding_dict: dict|list :return: Adapted feeding dict. :rtype: dict|list """ adapted_feeding_dict = copy.deepcopy(feeding_dict) if isinstance(feeding_dict, dict): adapted_feeding_dict["sequence_offset"] = len(adapted_feeding_dict) adapted_feeding_dict["sequence_length"] = len(adapted_feeding_dict) for i in xrange(self._num_conv_layers): adapted_feeding_dict["conv%d_index_range" %i] = \ len(adapted_feeding_dict) elif isinstance(feeding_dict, list): adapted_feeding_dict.append("sequence_offset") adapted_feeding_dict.append("sequence_length") for i in xrange(self._num_conv_layers): adapted_feeding_dict.append("conv%d_index_range" % i) else: raise ValueError("Type of feeding_dict is %s, not supported." % type(feeding_dict)) return adapted_feeding_dict def _adapt_data(self, data): """Adapt data according to network struct. For each convolution layer in the conv_group, to remove impacts from padding data, we can multiply zero to the padding part of the outputs of each batch normalization layer. We add a scale_sub_region layer after each batch normalization layer to reset the padding data. For rnn layers, to remove impacts from padding data, we can truncate the padding part before output data feeded into the first rnn layer. We use sub_seq layer to achieve this. :param data: Data from data_provider. :type data: list|function :return: Adapted data. :rtype: list|function """ def adapt_instance(instance): if len(instance) < 2 or len(instance) > 3: raise ValueError("Size of instance should be 2 or 3.") padded_audio = instance[0] text = instance[1] # no padding part if len(instance) == 2: audio_len = padded_audio.shape[1] else: audio_len = instance[2] adapted_instance = [padded_audio, text] # Stride size for conv0 is (3, 2) # Stride size for conv1 to convN is (1, 2) # Same as the network, hard-coded here padded_conv0_h = (padded_audio.shape[0] - 1) // 2 + 1 padded_conv0_w = (padded_audio.shape[1] - 1) // 3 + 1 valid_w = (audio_len - 1) // 3 + 1 adapted_instance += [ [0], # sequence offset, always 0 [valid_w], # valid sequence length # Index ranges for channel, height and width # Please refer scale_sub_region layer to see details [1, 32, 1, padded_conv0_h, valid_w + 1, padded_conv0_w] ] pre_padded_h = padded_conv0_h for i in xrange(self._num_conv_layers - 1): padded_h = (pre_padded_h - 1) // 2 + 1 pre_padded_h = padded_h adapted_instance += [ [1, 32, 1, padded_h, valid_w + 1, padded_conv0_w] ] return adapted_instance if isinstance(data, list): return map(adapt_instance, data) elif inspect.isgeneratorfunction(data): def adapted_reader(): for instance in data(): yield map(adapt_instance, instance) return adapted_reader else: raise ValueError("Type of data is %s, not supported." % type(data)) def _create_parameters(self, model_path=None): """Load or create model parameters.""" if model_path is None: self._parameters = paddle.parameters.create(self._loss) else: self._parameters = paddle.parameters.Parameters.from_tar( gzip.open(model_path)) def _create_network(self, vocab_size, num_conv_layers, num_rnn_layers, rnn_layer_size, use_gru, share_rnn_weights): """Create data layers and model network.""" # paddle.data_type.dense_array is used for variable batch input. # The size 161 * 161 is only an placeholder value and the real shape # of input batch data will be induced during training. audio_data = paddle.layer.data( name="audio_spectrogram", type=paddle.data_type.dense_array(161 * 161)) text_data = paddle.layer.data( name="transcript_text", type=paddle.data_type.integer_value_sequence(vocab_size)) seq_offset_data = paddle.layer.data( name='sequence_offset', type=paddle.data_type.integer_value_sequence(1)) seq_len_data = paddle.layer.data( name='sequence_length', type=paddle.data_type.integer_value_sequence(1)) index_range_datas = [] for i in xrange(num_rnn_layers): index_range_datas.append( paddle.layer.data( name='conv%d_index_range' % i, type=paddle.data_type.dense_vector(6))) self._log_probs, self._loss = deep_speech_v2_network( audio_data=audio_data, text_data=text_data, seq_offset_data=seq_offset_data, seq_len_data=seq_len_data, index_range_datas=index_range_datas, dict_size=vocab_size, num_conv_layers=num_conv_layers, num_rnn_layers=num_rnn_layers, rnn_size=rnn_layer_size, use_gru=use_gru, share_rnn_weights=share_rnn_weights)
tests/core/pyspec/eth2spec/utils/hash_function.py
MicahZoltu/eth2.0-specs
2,161
12777113
from hashlib import sha256 from remerkleable.byte_arrays import Bytes32 from typing import Union ZERO_BYTES32 = b'\x00' * 32 def hash(x: Union[bytes, bytearray, memoryview]) -> Bytes32: return Bytes32(sha256(x).digest())
tfjs_graph_converter/quirks.py
httpsgithu/tfjs-to-tf
114
12777127
# SPDX-License-Identifier: MIT # Copyright © 2020 <NAME> """Functions to fix various known issues with exported TFJS models""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import base64 from typing import Any, Dict, List, Optional import tfjs_graph_converter.common as common def _find_if_has_key(obj: Dict[str, Any], key: str, of_type: Optional[type] = None) -> List[Any]: """ Recursively find all objects with a given key in a dictionary Args: obj: Dictionary to search key: Key to find of_type: [optional] Type of the referenced item Returns: List of all objects that contain an item with the given key and matching type """ def get_children(item: Any) -> List[Any]: return [val for val in item.values() if isinstance(val, dict)] found = [] stack = get_children(obj) while len(stack) > 0: item = stack.pop() if key in item and (of_type is None or isinstance(item[key], of_type)): found.append(item) stack.extend(get_children(item)) return found def _convert_string_attrs(node: Dict[str, Any]) -> None: """ Deep search string attributes (labelled "s" in GraphDef proto) and convert ascii code lists to base64-encoded strings if necessary """ attr_key = common.TFJS_NODE_ATTR_KEY str_key = common.TFJS_ATTR_STRING_VALUE_KEY # some layers (e.g. PReLU) don't contain the `attr` key, # so test for its presence attrs: list = [] if attr_key in node: attrs = _find_if_has_key(node[attr_key], key=str_key, of_type=list) for attr in attrs: array = attr[str_key] # check if conversion is actually necessary if (len(array) > 0) and isinstance(array, list) \ and isinstance(array[0], int): string = ''.join(map(chr, array)) binary = string.encode('utf8') attr[str_key] = base64.encodebytes(binary) elif len(array) == 0: attr[str_key] = None def _fix_dilation_attrs(node: Dict[str, Any]) -> None: """ Search dilations-attribute and convert misaligned dilation rates if necessary see https://github.com/patlevin/tfjs-to-tf/issues/1 """ path = ['attr', 'dilations', 'list'] values = node found = True for key in path: if key in values: values = values[key] else: found = False break # if dilations are present, they're stored in 'values' now ints = common.TFJS_ATTR_INT_VALUE_KEY if found and ints in values and isinstance(values[ints], list): value = values[ints] if len(value) != 4: # must be NCHW-formatted 4D tensor or else TF can't handle it raise ValueError("Unsupported 'dilations'-attribute in node " f'{node[common.TFJS_NAME_KEY]}') # check for [>1,>1,1,1], which is likely a mistranslated [1,>1,>1,1] if int(value[0], 10) > 1: values[ints] = ['1', value[0], value[1], '1'] def fix_node_attributes(message_dict: Dict[str, Any]) -> Dict[str, Any]: """ Fix various known issues found "in the wild": • Node attributes in deserialised JSON may contain strings as lists of ascii codes when the TF GraphDef proto expects base64 encoded strings • 'dilation' attributes may be misaligned in a way unsupported by TF Further fixes will be added as issues are reported. Args: message_dict: Graph model formatted as parsed JSON dictionary Returns: Updated message dictionary with fixes applied if necessary """ if common.TFJS_NODE_KEY in message_dict: nodes = message_dict[common.TFJS_NODE_KEY] for node in nodes: _convert_string_attrs(node) _fix_dilation_attrs(node) return message_dict
spectre/trading/stopmodel.py
rajach/spectre
302
12777154
<filename>spectre/trading/stopmodel.py """ @author: Heerozh (<NAME>) @copyright: Copyright 2019-2020, Heerozh. All rights reserved. @license: Apache 2.0 @email: <EMAIL> """ import math def sign(x): return math.copysign(1, x) class PriceTracker: def __init__(self, current_price, recorder=max): self.last_price = current_price self.recorder = recorder self.recorded_price = current_price self.tracking_position = None def update_price(self, last_price): self.recorded_price = self.recorder(self.recorded_price, last_price) self.last_price = last_price def process_split(self, inverse_ratio: float): self.recorded_price /= inverse_ratio # ----------------------------------------------------------------------------- class StopTracker(PriceTracker): def __init__(self, current_price, stop_price, callback): super().__init__(current_price, lambda _, x: x) self._stop_price = stop_price self.stop_loss = stop_price < current_price self.callback = callback @property def stop_price(self): return self._stop_price def fire(self, *args): if callable(self.callback): return self.callback(*args) else: return self.callback def check_trigger(self, *args): if self.stop_loss: if self.last_price <= self.stop_price: return self.fire(*args) else: if self.last_price >= self.stop_price: return self.fire(*args) return False class StopModel: def __init__(self, ratio: float, callback=None): self.ratio = ratio self.callback = callback def new_tracker(self, current_price, inverse): if inverse: stop_price = current_price * (1 - self.ratio) else: stop_price = current_price * (1 + self.ratio) return StopTracker(current_price, stop_price, self.callback) # ----------------------------------------------------------------------------- class TrailingStopTracker(StopTracker): def __init__(self, current_price, ratio, callback): self.ratio = ratio stop_price = current_price * (1 + self.ratio) StopTracker.__init__(self, current_price, stop_price, callback=callback) PriceTracker.__init__(self, current_price, recorder=max if ratio < 0 else min) @property def stop_price(self): return self.recorded_price * (1 + self.ratio) class TrailingStopModel(StopModel): """ Unlike trailing stop order, the ratio in this model is relative to the highest / lowest price, so -0.1 means stop price is 90% of the highest price from now to the future; 0.1 means stop price is 110% of the lowest price from now to the future. """ def new_tracker(self, current_price, inverse): ratio = -self.ratio if inverse else self.ratio return TrailingStopTracker(current_price, ratio, self.callback) # ----------------------------------------------------------------------------- class DecayTrailingStopTracker(TrailingStopTracker): def __init__(self, current_price, ratio, target, decay_rate, max_decay, callback): self.initial_ratio = ratio self.max_decay = max_decay self.decay_rate = decay_rate self.target = target super().__init__(current_price, ratio, callback) @property def current(self): raise NotImplementedError("abstractmethod") @property def stop_price(self): decay = max(self.decay_rate ** (self.current / self.target), self.max_decay) self.ratio = self.initial_ratio * decay return self.recorded_price * (1 + self.ratio) class PnLDecayTrailingStopTracker(DecayTrailingStopTracker): @property def current(self): pos = self.tracking_position pnl = (self.recorded_price / pos.average_price - 1) * sign(pos.shares) pnl = max(pnl, 0) if self.target > 0 else min(pnl, 0) return pnl class PnLDecayTrailingStopModel(StopModel): """ Exponential decay to the stop ratio: `ratio * decay_rate ^ (PnL% / PnL_target%)`. If it's stop gain model, `PnL_target` should be Loss Target (negative). So, the lower the `ratio` when PnL% approaches the target, and if PnL% exceeds PnL_target%, any small opposite changes will trigger stop. """ def __init__(self, ratio: float, pnl_target: float, callback=None, decay_rate=0.05, max_decay=0): super().__init__(ratio, callback) self.decay_rate = decay_rate self.pnl_target = pnl_target self.max_decay = max_decay def new_tracker(self, current_price, inverse): ratio = -self.ratio if inverse else self.ratio return PnLDecayTrailingStopTracker( current_price, ratio, self.pnl_target, self.decay_rate, self.max_decay, self.callback) class TimeDecayTrailingStopTracker(DecayTrailingStopTracker): @property def current(self): pos = self.tracking_position return pos.period class TimeDecayTrailingStopModel(StopModel): def __init__(self, ratio: float, period_target: 'pd.Timedelta', callback=None, decay_rate=0.05, max_decay=0): super().__init__(ratio, callback) self.decay_rate = decay_rate self.period_target = period_target self.max_decay = max_decay def new_tracker(self, current_price, inverse): ratio = -self.ratio if inverse else self.ratio return TimeDecayTrailingStopTracker( current_price, ratio, self.period_target, self.decay_rate, self.max_decay, self.callback)
conan/recipes/android-sdk-tools/conanfile.py
alexa/aac-sdk
139
12777170
<reponame>alexa/aac-sdk from conans import ConanFile, tools, RunEnvironment import os, logging class AndroidSdkToolsConanFile(ConanFile): name = "android-sdk-tools" version = "4.0" user = "aac-sdk" channel = "stable" no_copy_source = True exports_sources = ["cmake-wrapper.cmd", "cmake-wrapper"] settings = "os", "arch", "compiler", "build_type" requires = ["zulu-openjdk/11.0.8"] options = { "sdk_version": "ANY", "ndk_version": "ANY", "android_stl": ["c++_shared","c++_static"] } default_options = { "sdk_version": "7302050", "ndk_version": "20.0.5594570", "android_stl": "c++_shared" } @staticmethod def chmod_plus_x(filename): if os.name == "posix": os.chmod(filename, os.stat(filename).st_mode | 0o111) def fix_permissions(self,root_folder): if os.name != "posix": return for root, _, files in os.walk(root_folder): for filename in files: filename = os.path.join(root, filename) with open(filename, "rb") as f: sig = f.read(4) if type(sig) is str: sig = [ord(s) for s in sig] else: sig = [s for s in sig] if len(sig) > 2 and sig[0] == 0x23 and sig[1] == 0x21: logging.info(f"chmod on script file: {filename}") self.chmod_plus_x(filename) elif sig == [0x7F, 0x45, 0x4C, 0x46]: logging.info(f"chmod on ELF file: {filename}") self.chmod_plus_x(filename) elif sig == [0xCA, 0xFE, 0xBA, 0xBE] or \ sig == [0xBE, 0xBA, 0xFE, 0xCA] or \ sig == [0xFE, 0xED, 0xFA, 0xCF] or \ sig == [0xCF, 0xFA, 0xED, 0xFE] or \ sig == [0xFE, 0xEF, 0xFA, 0xCE] or \ sig == [0xCE, 0xFA, 0xED, 0xFE]: logging.info(f"chmod on Mach-O file: {filename}") self.chmod_plus_x(filename) @property def _build_os(self): settings_build = getattr(self,"settings_build",None) return settings_build.os if settings_build else self.settings.os def source(self): if self._build_os == "Macos": package = f"commandlinetools-mac-{self.options.sdk_version}_latest" elif self._build_os == "Linux": package = f"commandlinetools-linux-{self.options.sdk_version}_latest" else: raise Exception( f"settings.os not supported: {self._build_os}" ) #download the command line tools package tools.get( f"https://dl.google.com/android/repository/{package}.zip" ) def package(self): self.copy( "*", src="cmdline-tools", dst="cmdline-tools" ) self.copy( "cmake-wrapper.cmd" ) self.copy( "cmake-wrapper" ) # fix executable permisions for command line tools self.fix_permissions(self.package_folder) # check the license -- needs to be accepted once sdk_manager = os.path.join( self.package_folder, "cmdline-tools", "bin", "sdkmanager" ) auto_accept_licenses = os.getenv("BUILDER_ACCEPT_LICENSES", "False").lower() == "true" env_run = RunEnvironment(self) with tools.environment_append( env_run.vars ): # check the license -- needs to be accepted once check_yes_opt = f"yes | {sdk_manager}" if auto_accept_licenses else sdk_manager self.run( f"{check_yes_opt} --sdk_root={self.package_folder} --licenses", run_environment=True ) # install android sdk self.run( f"{sdk_manager} --sdk_root={self.package_folder} 'platform-tools' 'platforms;android-{self.settings_target.os.api_level}'", run_environment=True ) # install android ndk self.run( f"{sdk_manager} --sdk_root={self.package_folder} --install 'ndk;{self.options.ndk_version}'", run_environment=True ) @property def _platform(self): return {"Windows": "windows", "Macos": "darwin", "Linux": "linux"}.get(str(self._build_os)) @property def _android_abi(self): return {"x86": "x86", "x86_64": "x86_64", "armv7hf": "armeabi-v7a", "armv8": "arm64-v8a"}.get(str(self.settings_target.arch)) @property def _llvm_triplet(self): arch = {'armv7hf': 'arm', 'armv8': 'aarch64', 'x86': 'i686', 'x86_64': 'x86_64'}.get(str(self.settings_target.arch)) abi = 'androideabi' if self.settings_target.arch == 'armv7hf' else 'android' return f"{arch}-linux-{abi}" @property def _clang_triplet(self): arch = {'armv7hf': 'armv7a', 'armv8': 'aarch64', 'x86': 'i686', 'x86_64': 'x86_64'}.get(str(self.settings_target.arch)) abi = 'androideabi' if self.settings_target.arch == 'armv7hf' else 'android' return f"{arch}-linux-{abi}" @property def _sdk_home(self): return os.path.join( self.package_folder ) @property def _ndk_home(self): return os.path.join( self.package_folder, "ndk", str(self.options.ndk_version) ) @property def _ndk_root(self): return os.path.join(self._ndk_home, "toolchains", "llvm", "prebuilt", f"{self._platform}-x86_64") def _tool_name(self, tool): if 'clang' in tool: suffix = '.cmd' if self._build_os == 'Windows' else '' return f"{self._clang_triplet}{self.settings_target.os.api_level}-{tool}{suffix}" else: suffix = '.exe' if self._build_os == 'Windows' else '' return f"{self._llvm_triplet}-{tool}{suffix}" def _define_tool_var(self, name, value): ndk_bin = os.path.join(self._ndk_root, 'bin') path = os.path.join(ndk_bin, self._tool_name(value)) logging.info(f"Creating {name} environment variable: {path}") return path def package_info(self): # set the android sdk environment variables logging.info(f"Creating ANDROID_SDK_ROOT environment variable: {self._sdk_home}") self.env_info.ANDROID_SDK_ROOT = self._sdk_home # test shall pass, so this runs also in the build as build requirement context # ndk-build: https://developer.android.com/ndk/guides/ndk-build self.env_info.PATH.append( self._ndk_home ) # You should use the ANDROID_NDK_ROOT environment variable to indicate where the NDK is located. # That's what most NDK-related scripts use (inside the NDK, and outside of it). # https://groups.google.com/g/android-ndk/c/qZjhOaynHXc logging.info(f"Creating ANDROID_NDK_ROOT environment variable: {self._ndk_home}") self.env_info.ANDROID_NDK_ROOT = self._ndk_home # Gradle is complaining about the ANDROID_NDK_HOME environment variable: # WARNING: Support for ANDROID_NDK_HOME is deprecated and will be removed in the future. # Use android.ndkVersion in build.gradle instead. # logging.info(f"Creating ANDROID_NDK_HOME environment variable: {self._ndk_home}") # self.env_info.ANDROID_NDK_HOME = self._ndk_home logging.info(f"Creating NDK_ROOT environment variable: {self._ndk_root}") self.env_info.NDK_ROOT = self._ndk_root logging.info(f"Creating CHOST environment variable: {self._llvm_triplet}") self.env_info.CHOST = self._llvm_triplet ndk_sysroot = os.path.join(self._ndk_root, 'sysroot') logging.info(f"Creating CONAN_CMAKE_FIND_ROOT_PATH environment variable: {ndk_sysroot}") self.env_info.CONAN_CMAKE_FIND_ROOT_PATH = ndk_sysroot logging.info(f"Creating SYSROOT environment variable: {ndk_sysroot}") self.env_info.SYSROOT = ndk_sysroot logging.info(f"Creating self.cpp_info.sysroot: {ndk_sysroot}") self.cpp_info.sysroot = ndk_sysroot logging.info(f"Creating ANDROID_NATIVE_API_LEVEL environment variable: {self.settings_target.os.api_level}") self.env_info.ANDROID_NATIVE_API_LEVEL = str(self.settings_target.os.api_level) self.chmod_plus_x(os.path.join(self.package_folder, "cmake-wrapper")) cmake_wrapper = "cmake-wrapper.cmd" if self._build_os == "Windows" else "cmake-wrapper" cmake_wrapper = os.path.join(self.package_folder, cmake_wrapper) logging.info(f"Creating CONAN_CMAKE_PROGRAM environment variable: {cmake_wrapper}") self.env_info.CONAN_CMAKE_PROGRAM = cmake_wrapper toolchain = os.path.join(self._ndk_home, "build", "cmake", "android.toolchain.cmake") logging.info(f"Creating CONAN_CMAKE_TOOLCHAIN_FILE environment variable: {toolchain}") self.env_info.CONAN_CMAKE_TOOLCHAIN_FILE = toolchain self.env_info.CC = self._define_tool_var('CC', 'clang') self.env_info.CXX = self._define_tool_var('CXX', 'clang++') self.env_info.LD = self._define_tool_var('LD', 'ld') self.env_info.AR = self._define_tool_var('AR', 'ar') self.env_info.AS = self._define_tool_var('AS', 'as') self.env_info.RANLIB = self._define_tool_var('RANLIB', 'ranlib') self.env_info.STRIP = self._define_tool_var('STRIP', 'strip') self.env_info.ADDR2LINE = self._define_tool_var('ADDR2LINE', 'addr2line') self.env_info.NM = self._define_tool_var('NM', 'nm') self.env_info.OBJCOPY = self._define_tool_var('OBJCOPY', 'objcopy') self.env_info.OBJDUMP = self._define_tool_var('OBJDUMP', 'objdump') self.env_info.READELF = self._define_tool_var('READELF', 'readelf') self.env_info.ELFEDIT = self._define_tool_var('ELFEDIT', 'elfedit') self.env_info.ANDROID_PLATFORM = f"android-{self.settings_target.os.api_level}" self.env_info.ANDROID_TOOLCHAIN = "clang" self.env_info.ANDROID_ABI = self._android_abi self.env_info.ANDROID_STL = f"{self.options.android_stl}" # set the stl shared lib path if specified by the android_stl option if self.options.android_stl == "c++_shared": self.env_info.ANDROID_STL_SHARED_LIB = f"{os.path.join(ndk_sysroot, 'usr', 'lib', self._llvm_triplet, 'libc++_shared.so')}" logging.info(f"Creating ANDROID_STL_SHARED_LIB environment variable: {self.env_info.ANDROID_STL_SHARED_LIB}") self.env_info.CMAKE_FIND_ROOT_PATH_MODE_PROGRAM = "BOTH" self.env_info.CMAKE_FIND_ROOT_PATH_MODE_LIBRARY = "BOTH" self.env_info.CMAKE_FIND_ROOT_PATH_MODE_INCLUDE = "BOTH" self.env_info.CMAKE_FIND_ROOT_PATH_MODE_PACKAGE = "BOTH"
Skoarcery/factoary/Code_Parser_Py.py
sofakid/Skoarcery
343
12777248
<filename>Skoarcery/factoary/Code_Parser_Py.py import unittest from Skoarcery import langoids, terminals, nonterminals, dragonsets, parsetable, emissions from Skoarcery.langoids import Terminal, Nonterminal class Code_Parser_Py(unittest.TestCase): def setUp(self): terminals.init() nonterminals.init() langoids.init() dragonsets.init() parsetable.init() emissions.init() def test_PY_rdpp(self): from Skoarcery.dragonsets import FIRST, FOLLOW from Skoarcery.terminals import Empty fd = open("SkoarPyon/rdpp.py", "w") PY = emissions.PY PY.fd = fd # Header # Imports # class SkoarParseException # class SkoarParser: # __init__ # fail self.code_start() PY.tab += 1 N = nonterminals.nonterminals.values() # precompute desirables PY.method("init_desirables") for A in N: R = A.production_rules PY.nl() PY.cmt(str(A)) # each production for P in R: if P.derives_empty: continue # A -> alpha alpha = P.production desires = FIRST(alpha) if Empty in desires: desires.discard(Empty) desires.update(FOLLOW(A)) i = 0 n = len(desires) PY.dict_set("self.desirables", str(P), "[", end="") for toke in desires: PY.raw(toke.toker_name) i += 1 if i != n: if i % 5 == 0: PY.raw(",\n") PY.stmt(" ", end="") else: PY.raw(", ") else: PY.raw("]\n") PY.end() # write each nonterminal as a function for A in N: R = A.production_rules #PY.cmt(str(A)) PY.stmt("def " + A.name + "(self, parent):") PY.tab += 1 PY.stmt("self.tab += 1") if A.intermediate: PY.var("noad", "parent") else: PY.var("noad", PY.v_new("SkoarNoad", PY.v_sym(A.name), "parent")) PY.nl() #PY.code_line("print('" + A.name + "')") for P in R: if P.derives_empty: continue # A -> alpha alpha = P.production PY.stmt("desires = " + PY.v_dict_get("self.desirables", str(P))) PY.if_("self.toker.sees(desires)") #PY.print(str(P)) for x in alpha: if isinstance(x, Terminal): PY.stmt("noad.add_toke('" + x.toker_name + "', self.toker.burn(" + x.toker_name + "))") #PY.print("burning: " + x.name) else: if x.intermediate: PY.stmt("self." + x.name + "(noad)") else: PY.stmt("noad.add_noad(self." + x.name + "(noad))") else: PY.return_("noad") PY.tab -= 1 PY.nl() if A.derives_empty: PY.cmt("<e>") #PY.print("burning empty") PY.return_("noad") else: PY.cmt("Error State") PY.stmt("self.fail()") PY.tab -= 1 PY.nl() PY.tab -= 1 fd.close() def code_start(self): from Skoarcery.terminals import Empty PY = emissions.PY PY.file_header("rdpp", "PyRDPP - Create Recursive Descent Predictive Parser") s = "from Skoarcery.SkoarPyon.apparatus import SkoarNoad\n"\ "from Skoarcery.SkoarPyon.lex import " T = terminals.tokens.values() n = len(T) i = 0 for t in T: if t == Empty: n -= 1 continue s += t.toker_name i += 1 if i < n: if i % 5 == 0: s += ", \\\n " else: s += ", " PY.raw(s + """ class SkoarParseException(Exception): pass class SkoarParser: def __init__(self, runtime): self.runtime = runtime self.toker = runtime.toker self.tab = 0 self.desirables = dict() self.init_desirables() def fail(self): self.toker.dump() raise SkoarParseException @property def tabby(self): if self.tab == 0: return "" return ("{:>" + str(self.tab * 2) + "}").format(" ") def print(self, line, end): print(self.tabby + line, end=end) """)
httpolice/syntax/rfc3986.py
vfaronov/httpolice
1,027
12777281
from httpolice.citation import RFC from httpolice.parse import (auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst) from httpolice.syntax.common import ALPHA, DIGIT, HEXDIG pct_encoded = '%' + HEXDIG + HEXDIG > auto sub_delims = (literal('!') | '$' | '&' | "'" | '(' | ')' | '*' | '+' | ',' | ';' | '=') > auto unreserved = ALPHA | DIGIT | '-' | '.' | '_' | '~' > auto pchar = unreserved | sub_delims | ':' | '@' | pct_encoded > auto segment = string(pchar) > auto segment_nz = string1(pchar) > auto segment_nz_nc = string1(unreserved | sub_delims | '@' | pct_encoded) > auto scheme = ALPHA + string(ALPHA | DIGIT | '+' | '-' | '.') > pivot userinfo = string(unreserved | sub_delims | ':' | pct_encoded) > pivot dec_octet = (DIGIT | octet_range(0x31, 0x39) + DIGIT | '1' + DIGIT + DIGIT | '2' + octet_range(0x30, 0x34) + DIGIT | '25' + octet_range(0x30, 0x35)) > auto IPv4address = (dec_octet + '.' + dec_octet + '.' + dec_octet + '.' + dec_octet) > pivot h16 = string_times(1, 4, HEXDIG) > auto ls32 = (h16 + ':' + h16) | IPv4address > auto IPv6address = ( string_times(6, 6, h16 + ':') + ls32 | '::' + string_times(5, 5, h16 + ':') + ls32 | maybe_str(h16) + '::' + string_times(4, 4, h16 + ':') + ls32 | maybe_str(string_times(0, 1, h16 + ':') + h16) + '::' + string_times(3, 3, h16 + ':') + ls32 | maybe_str(string_times(0, 2, h16 + ':') + h16) + '::' + string_times(2, 2, h16 + ':') + ls32 | maybe_str(string_times(0, 3, h16 + ':') + h16) + '::' + h16 + ':' + ls32 | maybe_str(string_times(0, 4, h16 + ':') + h16) + '::' + ls32 | maybe_str(string_times(0, 5, h16 + ':') + h16) + '::' + h16 | maybe_str(string_times(0, 6, h16 + ':') + h16) + '::' ) > pivot IPvFuture = ('v' + string1(HEXDIG) + '.' + string1(unreserved | sub_delims | ':')) > pivot # As updated by RFC 6874 ZoneID = string1(unreserved | pct_encoded) > pivot IPv6addrz = IPv6address + '%25' + ZoneID > pivot IP_literal = '[' + (IPv6address | IPv6addrz | IPvFuture) + ']' > pivot reg_name = string(unreserved | sub_delims | pct_encoded) > pivot host = IP_literal | IPv4address | reg_name > pivot port = string(DIGIT) > pivot authority = maybe_str(userinfo + '@') + host + maybe_str(':' + port) > pivot path_abempty = string('/' + segment) > auto path_absolute = '/' + maybe_str(segment_nz + string('/' + segment)) > auto path_noscheme = segment_nz_nc + string('/' + segment) > auto path_rootless = segment_nz + string('/' + segment) > auto path_empty = subst(u'') << empty > auto hier_part = ('//' + authority + path_abempty | path_absolute | path_rootless | path_empty) > pivot query = string(pchar | '/' | '?') > pivot fragment = string(pchar | '/' | '?') > pivot absolute_URI = scheme + ':' + hier_part + maybe_str('?' + query) > pivot relative_part = ('//' + authority + path_abempty | path_absolute | path_noscheme | path_empty) > pivot URI = (scheme + ':' + hier_part + maybe_str('?' + query) + maybe_str('#' + fragment)) > pivot relative_ref = (relative_part + maybe_str('?' + query) + maybe_str('#' + fragment)) > pivot URI_reference = URI | relative_ref > pivot fill_names(globals(), RFC(3986))
plot_sweep.py
yinghai/benchmark
384
12777283
<filename>plot_sweep.py import argparse import json # import pandas as pd import os # import sys # import re import yaml import itertools # from bokeh.layouts import column, row, layout, gridplot # from bokeh.plotting import figure, output_file, show # from bokeh.sampledata.autompg import autompg # from bokeh.transform import jitter from bokeh.palettes import Category10 from bokeh.models import HoverTool, Div, Range1d, HoverTool from bokeh.plotting import figure, output_file, show # from bokeh.models import Legend # from bokeh.models import ColumnDataSource, CategoricalTicker, Div # from bokeh.models import ColumnDataSource, DataTable, DateFormatter, TableColumn # from bokeh.transform import jitter from collections import defaultdict from datetime import datetime as dt from torchbenchmark.util.data import load_data_dir, load_data_files from torchbenchmark.score.compute_score import TorchBenchScore TORCHBENCH_SCORE_VERSION = "v1" if __name__ == "__main__": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("data_dir", nargs='+', help="One or more directories containing benchmark json files. " "Each directory will be plotted as a separate series. " "By default, the first file in the first directory will be used" " to generate a score configuration with a target of 1000," " and everything else will be relative to that.") parser.add_argument("--output_html", default='plot.html', help="html file to write") parser.add_argument("--plot_all", action='store_true', help="Plots the scores for each configuration") parser.add_argument("--reference_json", required=True, help="file defining score norm values, usually first json in first data_dir") args = parser.parse_args() plot_height = 800 plot_width = 1000 assert len(args.data_dir) > 0, "Must provide at least one data directory" compare_datasets = [load_data_dir(d, most_recent_files=-1) for d in args.data_dir] with open(args.reference_json) as f: ref_data = json.load(f) plot_all = args.plot_all score_config = TorchBenchScore(ref_data=ref_data, version=TORCHBENCH_SCORE_VERSION) p = figure(plot_width=plot_width, plot_height=plot_height, x_axis_type='datetime') xs = [] ys = [] zs = [] max_score = 0 for d in compare_datasets: scores = {} scores_db = defaultdict(list) for i in range(len(d._json_raw)): data = d._json_raw[i] pytorch_ver = data['machine_info']['pytorch_version'] # Slice the portion after '+' pytorch_ver_cuda_loc = pytorch_ver.rfind('+') pytorch_ver = pytorch_ver[:pytorch_ver_cuda_loc] date = dt.strptime(pytorch_ver[pytorch_ver.index("dev") + len("dev"):], "%Y%m%d") score = score_config.compute_score(data) scores[date] = score dates = [] total_scores = [] all_scores = [] for date in sorted(scores.keys()): dates.append(date) total_scores.append(scores[date]["total"]) max_score = max(max_score, max(total_scores)) all_scores.append(scores[date]) xs.append(dates) ys.append(total_scores) if plot_all: zs.append(all_scores) colors = itertools.cycle(Category10[10]) basenames = map(os.path.basename, args.data_dir) if plot_all: for x, z in zip(xs, zs): basename = next(basenames) color = next(colors) configs = z[0].keys() for config in configs: if not ("subscore" in config or "total" in config): continue color = next(colors) scores = [] for s in z: scores.append(s[config]) p.line(x, scores, color=color, line_width=2, legend_label=basename + '-' + config) p.legend.click_policy = "hide" else: for x, y, color in zip(xs, ys, colors): p.line(x, y, color=color, line_width=2, legend_label=next(basenames)) for x, y, color in zip(xs, ys, colors): p.circle(x, y, color=color) p.legend.location = "bottom_right" p.y_range = Range1d(0, max_score * 1.25) p.add_tools(HoverTool( tooltips=[ ('date', '@x{%F}'), ('score', '@y{0.00 a}'), ], formatters={ '@x': 'datetime', '@y': 'numeral', }, )) output_file(args.output_html) show(p)
package/awesome_streamlit/experiments/__init__.py
R-fred/awesome-streamlit
1,194
12777301
"""Imports that should be exposed outside the package""" from .hello_world import write as write_hello_world
algs4/symbol_graph.py
dumpmemory/algs4-py
230
12777330
<reponame>dumpmemory/algs4-py """ Execution: python symbol_graph.py filename.txt delimiter Data files: https://algs4.cs.princeton.edu/41graph/routes.txt https://algs4.cs.princeton.edu/41graph/movies.txt https://algs4.cs.princeton.edu/41graph/moviestiny.txt https://algs4.cs.princeton.edu/41graph/moviesG.txt https://algs4.cs.princeton.edu/41graph/moviestopGrossing.txt % python symbol_graph.py routes.txt " " JFK MCO ATL ORD LAX PHX LAS % python symbol_graph.py movies.txt "/" Tin Men (1987) Hershey, Barbara Geppi, Cindy <NAME> (II) Herr, Marcia ... Blumenfeld, Alan DeBoy, David Bacon, Kevin Woodsman, The (2004) Wild Things (1998) Where the Truth Lies (2005) Tremors (1990) ... Apollo 13 (1995) Animal House (1978) Assumes that input file is encoded using UTF-8. % iconv -f ISO-8859-1 -t UTF-8 movies-iso8859.txt > movies.txt """ from algs4.st import ST from algs4.graph import Graph class SymbolGraph: def __init__(self, stream, sp): self.st = ST() for line in open(stream): a = line.strip().split(sp) for i in range(len(a)): if not self.st.contains(a[i]): self.st.put(a[i], self.st.size()) self.keys = ["" for _ in range(self.st.size())] for key in self.st.keys(): self.keys[self.st.get(key)] = key self.G = Graph(self.st.size()) for line in open(stream): a = line.strip().split(sp) v = self.st.get(a[0]) for i in range(1, len(a)): self.G.add_edge(v, self.st.get(a[i])) def contains(self, s): return self.st.contains(s) def index(self, s): return self.st.get(s) def name(self, v): return self.keys[v] def graph(self): return self.G if __name__ == "__main__": import sys filename, delimiter = sys.argv[1], sys.argv[2] sg = SymbolGraph(filename, delimiter) graph = sg.graph() for line in sys.stdin: source = line.strip() if sg.contains(source): s = sg.index(source) for v in graph.adj[s]: print(" ", sg.name(v), end='') else: print("input not contains source: ", source)
python/pmercury/protocols/dhcp.py
raj-apoorv/mercury
299
12777333
""" Copyright (c) 2019 Cisco Systems, Inc. All rights reserved. License at https://github.com/cisco/mercury/blob/master/LICENSE """ import os import sys import functools from socket import AF_INET, AF_INET6, inet_ntop sys.path.append(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../') from pmercury.protocols.protocol import Protocol MAX_CACHED_RESULTS = 2**24 class DHCP(Protocol): def __init__(self, fp_database=None, config=None): # populate fingerprint databases self.fp_db = None DHCP.static_data = set([0x35, 0x37]) DHCP.contextual_data = {0x03: ('router',lambda x: inet_ntop(AF_INET, x)), 0x06: ('domain_name_server',lambda x: inet_ntop(AF_INET, x)), 0x0c: ('hostname',lambda x: x.decode()), 0x0f: ('domain_name',lambda x: x.decode()), 0x32: ('requested_ip',lambda x: inet_ntop(AF_INET, x)), 0x3c: ('vendor_class_id',lambda x: x.decode())} @staticmethod def proto_identify(data, offset, data_len): if data_len < 230: return False if (data[offset] != 0x01 or data[offset+236] != 0x63 or data[offset+237] != 0x82 or data[offset+238] != 0x53 or data[offset+239] != 0x63): return False return True @staticmethod def fingerprint(data, offset, data_len): hardware_address_length = data[offset + 2] cmac = data[offset+28:offset+28+hardware_address_length].hex() context = [{'name': 'client_mac_address', 'data': '%s' % ':'.join(a+b for a,b in zip(cmac[::2], cmac[1::2]))}] offset += 240 fp_ = '(' while offset < data_len: kind = data[offset] if kind == 0xff or kind == 0x00: # End / Padding fp_ += '(%02x)' % kind break length = data[offset+1] if kind in DHCP.contextual_data: name_, transform_ = DHCP.contextual_data[kind] context.append({'name':name_, 'data':transform_(data[offset+2:offset+2+length])}) if offset+length+2 >= data_len: return None if kind not in DHCP.static_data: fp_ += '(%02x)' % kind offset += length+2 continue fp_ += '(%s)' % data[offset:offset+2+length].hex() offset += length+2 fp_ += ')' return fp_, context
astroNN/__init__.py
igomezv/astroNN
156
12777350
r""" Deep Learning for Astronomers with Tensorflow """ from pkg_resources import get_distribution version = __version__ = get_distribution('astroNN').version
renderer/render_utils.py
archonic/frankmocap
1,612
12777443
# Copyright (c) Facebook, Inc. and its affiliates. import numpy as np # vertices: frames x meshVerNum x 3 # trifaces: facePolygonNum x 3 = 22800 x 3 def ComputeNormal(vertices, trifaces): if vertices.shape[0] > 5000: print('ComputeNormal: Warning: too big to compute {0}'.format(vertices.shape) ) return #compute vertex Normals for all frames U = vertices[:,trifaces[:,1],:] - vertices[:,trifaces[:,0],:] #frames x faceNum x 3 V = vertices[:,trifaces[:,2],:] - vertices[:,trifaces[:,1],:] #frames x faceNum x 3 originalShape = U.shape #remember: frames x faceNum x 3 U = np.reshape(U, [-1,3]) V = np.reshape(V, [-1,3]) faceNormals = np.cross(U,V) #frames x 13776 x 3 from sklearn.preprocessing import normalize if np.isnan(np.max(faceNormals)): print('ComputeNormal: Warning nan is detected {0}') return faceNormals = normalize(faceNormals) faceNormals = np.reshape(faceNormals, originalShape) if False: #Slow version vertex_normals = np.zeros(vertices.shape) #(frames x 11510) x 3 for fIdx, vIdx in enumerate(trifaces[:,0]): vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:] for fIdx, vIdx in enumerate(trifaces[:,1]): vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:] for fIdx, vIdx in enumerate(trifaces[:,2]): vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:] else: #Faster version # Computing vertex normals, much faster (and obscure) replacement index = np.vstack((np.ravel(trifaces), np.repeat(np.arange(len(trifaces)), 3))).T index_sorted = index[index[:,0].argsort()] vertex_normals = np.add.reduceat(faceNormals[:,index_sorted[:, 1],:][0], np.concatenate(([0], np.cumsum(np.unique(index_sorted[:, 0], return_counts=True)[1])[:-1])))[None, :] vertex_normals = vertex_normals.astype(np.float64) originalShape = vertex_normals.shape vertex_normals = np.reshape(vertex_normals, [-1,3]) vertex_normals = normalize(vertex_normals) vertex_normals = np.reshape(vertex_normals,originalShape) return vertex_normals def ComputeNormal_gpu(vertices, trifaces): import torch import torch.nn.functional as F if vertices.shape[0] > 5000: print('ComputeNormal: Warning: too big to compute {0}'.format(vertices.shape) ) return #compute vertex Normals for all frames #trifaces_cuda = torch.from_numpy(trifaces.astype(np.long)).cuda() vertices_cuda = torch.from_numpy(vertices.astype(np.float32)).cuda() U_cuda = vertices_cuda[:,trifaces[:,1],:] - vertices_cuda[:,trifaces[:,0],:] #frames x faceNum x 3 V_cuda = vertices_cuda[:,trifaces[:,2],:] - vertices_cuda[:,trifaces[:,1],:] #frames x faceNum x 3 originalShape = list(U_cuda.size()) #remember: frames x faceNum x 3 U_cuda = torch.reshape(U_cuda, [-1,3])#.astype(np.float32) V_cuda = torch.reshape(V_cuda, [-1,3])#.astype(np.float32) faceNormals = U_cuda.cross(V_cuda) faceNormals = F.normalize(faceNormals,dim=1) faceNormals = torch.reshape(faceNormals, originalShape) # trifaces has duplicated vertex index, so cannot be parallazied # vertex_normals = torch.zeros(vertices.shape,dtype=torch.float32).cuda() #(frames x 11510) x 3 # for fIdx, vIdx in enumerate(trifaces[:,0]): # vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:] # for fIdx, vIdx in enumerate(trifaces[:,1]): # vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:] # for fIdx, vIdx in enumerate(trifaces[:,2]): # vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:] # Computing vertex normals, much faster (and obscure) replacement index = np.vstack((np.ravel(trifaces), np.repeat(np.arange(len(trifaces)), 3))).T index_sorted = index[index[:,0].argsort()] vertex_normals = np.add.reduceat(faceNormals[:,index_sorted[:, 1],:][0], np.concatenate(([0], np.cumsum(np.unique(index_sorted[:, 0], return_counts=True)[1])[:-1])))[None, :] vertex_normals = torch.from_numpy(vertex_normals).float().cuda() vertex_normals = F.normalize(vertex_normals,dim=2) vertex_normals = vertex_normals.data.cpu().numpy() #(batch, chunksize, dim) return vertex_normals
rlgraph/agents/sac_agent.py
RLGraph/RLGraph
290
12777480
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import, division, print_function import numpy as np from rlgraph import get_backend from rlgraph.agents import Agent from rlgraph.components import Component, Synchronizable, Memory, ValueFunction, ContainerMerger, PrioritizedReplay from rlgraph.components.loss_functions.sac_loss_function import SACLossFunction from rlgraph.spaces import FloatBox, BoolBox, IntBox, ContainerSpace from rlgraph.spaces.space_utils import sanity_check_space from rlgraph.utils import RLGraphError from rlgraph.utils.decorators import rlgraph_api, graph_fn from rlgraph.utils.ops import flatten_op, DataOpTuple from rlgraph.utils.util import strip_list, force_list if get_backend() == "tf": import tensorflow as tf elif get_backend() == "pytorch": import torch class SyncSpecification(object): """Describes a synchronization schedule, used to update the target value weights. The target values are gradually updates using exponential moving average as suggested by the paper.""" def __init__(self, sync_interval=None, sync_tau=None): """ Arguments: sync_interval: How often to update the target. sync_tau: The smoothing constant to use in the averaging. Setting to 1 replaces the values each iteration. """ self.sync_interval = sync_interval self.sync_tau = sync_tau class SACAgentComponent(Component): def __init__(self, agent, policy, q_function, preprocessor, memory, discount, initial_alpha, target_entropy, optimizer, vf_optimizer, alpha_optimizer, q_sync_spec, num_q_functions=2): super(SACAgentComponent, self).__init__(nesting_level=0) self.agent = agent self._policy = policy self._preprocessor = preprocessor self._memory = memory self._q_functions = [q_function] self._q_functions += [q_function.copy(scope="{}-{}".format(q_function.scope, i + 1), trainable=True) for i in range(num_q_functions - 1)] # Set number of return values for get_q_values graph_fn. self.graph_fn_num_outputs["_graph_fn_get_q_values"] = num_q_functions for q in self._q_functions: # TODO: is there a better way to do this? if "synchronizable" not in q.sub_components: q.add_components(Synchronizable(), expose_apis="sync") self._target_q_functions = [q.copy(scope="target-" + q.scope, trainable=True) for q in self._q_functions] for target_q in self._target_q_functions: # TODO: is there a better way to do this? if "synchronizable" not in target_q.sub_components: target_q.add_components(Synchronizable(), expose_apis="sync") self._optimizer = optimizer self.vf_optimizer = vf_optimizer self.alpha_optimizer = alpha_optimizer self.initial_alpha = initial_alpha self.log_alpha = None self.target_entropy = target_entropy self.loss_function = SACLossFunction(target_entropy=target_entropy, discount=discount, num_q_functions=num_q_functions) memory_items = ["states", "actions", "rewards", "next_states", "terminals"] self._merger = ContainerMerger(*memory_items) q_names = ["q_{}".format(i) for i in range(len(self._q_functions))] self._q_vars_merger = ContainerMerger(*q_names, scope="q_vars_merger") self.add_components(policy, preprocessor, memory, self._merger, self.loss_function, optimizer, vf_optimizer, self._q_vars_merger) # , self._q_vars_splitter) self.add_components(*self._q_functions) self.add_components(*self._target_q_functions) if self.alpha_optimizer is not None: self.add_components(self.alpha_optimizer) self.steps_since_last_sync = None self.q_sync_spec = q_sync_spec self.env_action_space = None self.episode_reward = None def check_input_spaces(self, input_spaces, action_space=None): for s in ["states", "actions", "env_actions", "preprocessed_states", "rewards", "terminals"]: sanity_check_space(input_spaces[s], must_have_batch_rank=True) self.env_action_space = input_spaces["env_actions"].flatten() def create_variables(self, input_spaces, action_space=None): self.steps_since_last_sync = self.get_variable("steps_since_last_sync", dtype="int", initializer=0) self.log_alpha = self.get_variable("log_alpha", dtype="float", initializer=np.log(self.initial_alpha)) self.episode_reward = self.get_variable("episode_reward", shape=(), initializer=0.0) @rlgraph_api def get_policy_weights(self): return self._policy.variables() @rlgraph_api def get_q_weights(self): merged_weights = self._q_vars_merger.merge(*[q.variables() for q in self._q_functions]) return merged_weights @rlgraph_api(must_be_complete=False) def set_policy_weights(self, weights): return self._policy.sync(weights) """ TODO: need to define the input space @rlgraph_api(must_be_complete=False) def set_q_weights(self, q_weights): split_weights = self._q_vars_splitter.call(q_weights) assert len(split_weights) == len(self._q_functions) update_ops = [q.sync(q_weights) for q_weights, q in zip(split_weights, self._q_functions)] update_ops.extend([q.sync(q_weights) for q_weights, q in zip(split_weights, self._target_q_functions)]) return tuple(update_ops) """ @rlgraph_api def preprocess_states(self, states): return self._preprocessor.preprocess(states) @rlgraph_api def insert_records(self, preprocessed_states, env_actions, rewards, next_states, terminals): records = self._merger.merge(preprocessed_states, env_actions, rewards, next_states, terminals) return self._memory.insert_records(records) @rlgraph_api def update_from_memory(self, batch_size=64, time_percentage=None): records, sample_indices, importance_weights = self._memory.get_records(batch_size) result = self.update_from_external_batch( records["states"], records["actions"], records["rewards"], records["terminals"], records["next_states"], importance_weights, time_percentage ) if isinstance(self._memory, PrioritizedReplay): update_pr_step_op = self._memory.update_records(sample_indices, result["critic_loss_per_item"]) result["update_pr_step_op"] = update_pr_step_op return result @rlgraph_api def update_from_external_batch( self, preprocessed_states, env_actions, rewards, terminals, next_states, importance_weights, time_percentage=None ): actions = self._graph_fn_one_hot(env_actions) actor_loss, actor_loss_per_item, critic_loss, critic_loss_per_item, alpha_loss, alpha_loss_per_item = \ self.get_losses(preprocessed_states, actions, rewards, terminals, next_states, importance_weights) policy_vars = self._policy.variables() q_vars = [q_func.variables() for q_func in self._q_functions] merged_q_vars = self._q_vars_merger.merge(*q_vars) critic_step_op = self.vf_optimizer.step(merged_q_vars, critic_loss, critic_loss_per_item, time_percentage) actor_step_op = self._optimizer.step(policy_vars, actor_loss, actor_loss_per_item, time_percentage) if self.target_entropy is not None: alpha_step_op = self._graph_fn_update_alpha(alpha_loss, alpha_loss_per_item, time_percentage) else: alpha_step_op = self._graph_fn_no_op() # TODO: optimizer for alpha sync_op = self.sync_targets() # Increase the global training step counter. alpha_step_op = self._graph_fn_training_step(alpha_step_op) return dict( actor_step_op=actor_step_op, critic_step_op=critic_step_op, sync_op=sync_op, alpha_step_op=alpha_step_op, actor_loss=actor_loss, actor_loss_per_item=actor_loss_per_item, critic_loss=critic_loss, critic_loss_per_item=critic_loss_per_item, alpha_loss=alpha_loss, alpha_loss_per_item=alpha_loss_per_item ) @graph_fn(flatten_ops=True, split_ops=True, add_auto_key_as_first_param=True) def _graph_fn_one_hot(self, key, env_actions): if isinstance(self.env_action_space[key], IntBox): env_actions = tf.one_hot(env_actions, depth=self.env_action_space[key].num_categories, axis=-1) return env_actions @graph_fn(requires_variable_completeness=True) def _graph_fn_update_alpha(self, alpha_loss, alpha_loss_per_item, time_percentage=None): alpha_step_op = self.alpha_optimizer.step( DataOpTuple([self.log_alpha]), alpha_loss, alpha_loss_per_item, time_percentage ) return alpha_step_op @rlgraph_api # `returns` are determined in ctor def _graph_fn_get_q_values(self, preprocessed_states, actions, target=False): backend = get_backend() flat_actions = flatten_op(actions) actions = [] for flat_key, action_component in self._policy.action_space.flatten().items(): actions.append(flat_actions[flat_key]) if backend == "tf": actions = tf.concat(actions, axis=-1) elif backend == "pytorch": actions = torch.cat(actions, dim=-1) q_funcs = self._q_functions if target is False else self._target_q_functions # We do not concat states yet because we might pass states through a conv stack before merging it # with actions. return tuple(q.state_action_value(preprocessed_states, actions) for q in q_funcs) @rlgraph_api def get_losses(self, preprocessed_states, actions, rewards, terminals, next_states, importance_weights): # TODO: internal states samples_next = self._policy.get_action_and_log_likelihood(next_states, deterministic=False) next_sampled_actions = samples_next["action"] log_probs_next_sampled = samples_next["log_likelihood"] q_values_next_sampled = self.get_q_values( next_states, next_sampled_actions, target=True ) q_values = self.get_q_values(preprocessed_states, actions) samples = self._policy.get_action_and_log_likelihood(preprocessed_states, deterministic=False) sampled_actions = samples["action"] log_probs_sampled = samples["log_likelihood"] q_values_sampled = self.get_q_values(preprocessed_states, sampled_actions) alpha = self._graph_fn_compute_alpha() return self.loss_function.loss( alpha, log_probs_next_sampled, q_values_next_sampled, q_values, log_probs_sampled, q_values_sampled, rewards, terminals ) @rlgraph_api def get_preprocessed_state_and_action(self, states, deterministic=False): preprocessed_states = self._preprocessor.preprocess(states) return self.action_from_preprocessed_state(preprocessed_states, deterministic) @rlgraph_api def action_from_preprocessed_state(self, preprocessed_states, deterministic=False): out = self._policy.get_action(preprocessed_states, deterministic=deterministic) return out["action"], preprocessed_states @rlgraph_api(requires_variable_completeness=True) def reset_targets(self): ops = (target_q.sync(q.variables()) for q, target_q in zip(self._q_functions, self._target_q_functions)) return tuple(ops) @rlgraph_api(requires_variable_completeness=True) def sync_targets(self): should_sync = self._graph_fn_get_should_sync() return self._graph_fn_sync(should_sync) @rlgraph_api def get_memory_size(self): return self._memory.get_size() @graph_fn def _graph_fn_compute_alpha(self): backend = get_backend() if backend == "tf": return tf.exp(self.log_alpha) elif backend == "pytorch": return torch.exp(self.log_alpha) # TODO: Move this into generic AgentRootComponent. @graph_fn def _graph_fn_training_step(self, other_step_op=None): if self.agent is not None: add_op = tf.assign_add(self.agent.graph_executor.global_training_timestep, 1) op_list = [add_op] + [other_step_op] if other_step_op is not None else [] with tf.control_dependencies(op_list): return tf.no_op() if other_step_op is None else other_step_op else: return tf.no_op() if other_step_op is None else other_step_op @graph_fn(returns=1, requires_variable_completeness=True) def _graph_fn_get_should_sync(self): if get_backend() == "tf": inc_op = tf.assign_add(self.steps_since_last_sync, 1) should_sync = inc_op >= self.q_sync_spec.sync_interval def reset_op(): op = tf.assign(self.steps_since_last_sync, 0) with tf.control_dependencies([op]): return tf.no_op() sync_op = tf.cond( pred=inc_op >= self.q_sync_spec.sync_interval, true_fn=reset_op, false_fn=tf.no_op ) with tf.control_dependencies([sync_op]): return tf.identity(should_sync) else: raise NotImplementedError("TODO") @graph_fn(returns=1, requires_variable_completeness=True) def _graph_fn_sync(self, should_sync): assign_ops = [] tau = self.q_sync_spec.sync_tau if tau != 1.0: all_source_vars = [source.get_variables(collections=None, custom_scope_separator="-") for source in self._q_functions] all_dest_vars = [destination.get_variables(collections=None, custom_scope_separator="-") for destination in self._target_q_functions] for source_vars, dest_vars in zip(all_source_vars, all_dest_vars): for (source_key, source_var), (dest_key, dest_var) in zip(sorted(source_vars.items()), sorted(dest_vars.items())): assign_ops.append(tf.assign(dest_var, tau * source_var + (1.0 - tau) * dest_var)) else: all_source_vars = [source.variables() for source in self._q_functions] for source_vars, destination in zip(all_source_vars, self._target_q_functions): assign_ops.append(destination.sync(source_vars)) assert len(assign_ops) > 0 grouped_op = tf.group(assign_ops) def assign_op(): # Make sure we are returning no_op as opposed to reference with tf.control_dependencies([grouped_op]): return tf.no_op() cond_assign_op = tf.cond(should_sync, true_fn=assign_op, false_fn=tf.no_op) with tf.control_dependencies([cond_assign_op]): return tf.no_op() @graph_fn def _graph_fn_no_op(self): return tf.no_op() @rlgraph_api def get_global_timestep(self): return self.read_variable(self.agent.graph_executor.global_timestep) @rlgraph_api def _graph_fn_update_global_timestep(self, increment): if get_backend() == "tf": add_op = tf.assign_add(self.agent.graph_executor.global_timestep, increment) return add_op elif get_backend == "pytorch": self.agent.graph_executor.global_timestep += increment return self.agent.graph_executor.global_timestep @rlgraph_api def _graph_fn_get_episode_reward(self): return self.episode_reward @rlgraph_api def _graph_fn_set_episode_reward(self, episode_reward): return tf.assign(self.episode_reward, episode_reward) class SACAgent(Agent): def __init__( self, state_space, action_space, discount=0.98, preprocessing_spec=None, network_spec=None, internal_states_space=None, policy_spec=None, value_function_spec=None, execution_spec=None, optimizer_spec=None, value_function_optimizer_spec=None, observe_spec=None, update_spec=None, summary_spec=None, saver_spec=None, auto_build=True, name="sac-agent", double_q=True, initial_alpha=1.0, gumbel_softmax_temperature=1.0, target_entropy=None, memory_spec=None, value_function_sync_spec=None ): """ This is an implementation of the Soft-Actor Critic algorithm. Paper: http://arxiv.org/abs/1801.01290 Args: state_space (Union[dict,Space]): Spec dict for the state Space or a direct Space object. action_space (Union[dict,Space]): Spec dict for the action Space or a direct Space object. preprocessing_spec (Optional[list,PreprocessorStack]): The spec list for the different necessary states preprocessing steps or a PreprocessorStack object itself. discount (float): The discount factor (gamma). network_spec (Optional[list,NeuralNetwork]): Spec list for a NeuralNetwork Component or the NeuralNetwork object itself. internal_states_space (Optional[Union[dict,Space]]): Spec dict for the internal-states Space or a direct Space object for the Space(s) of the internal (RNN) states. policy_spec (Optional[dict]): An optional dict for further kwargs passing into the Policy c'tor. value_function_spec (list, dict, ValueFunction): Neural network specification for baseline or instance of ValueFunction. execution_spec (Optional[dict,Execution]): The spec-dict specifying execution settings. optimizer_spec (Optional[dict,Optimizer]): The spec-dict to create the Optimizer for this Agent. value_function_optimizer_spec (dict): Optimizer config for value function optimizer. If None, the optimizer spec for the policy is used (same learning rate and optimizer type). observe_spec (Optional[dict]): Spec-dict to specify `Agent.observe()` settings. update_spec (Optional[dict]): Spec-dict to specify `Agent.update()` settings. summary_spec (Optional[dict]): Spec-dict to specify summary settings. saver_spec (Optional[dict]): Spec-dict to specify saver settings. auto_build (Optional[bool]): If True (default), immediately builds the graph using the agent's graph builder. If false, users must separately call agent.build(). Useful for debugging or analyzing components before building. name (str): Some name for this Agent object. double_q (bool): Whether to train two q networks independently. initial_alpha (float): "The temperature parameter α determines the relative importance of the entropy term against the reward". gumbel_softmax_temperature (float): Temperature parameter for the Gumbel-Softmax distribution used for discrete actions. memory_spec (Optional[dict,Memory]): The spec for the Memory to use for the DQN algorithm. update_spec (dict): Here we can have sync_interval or sync_tau (for the value network update). """ # If VF spec is a network spec, wrap with SAC vf type. The VF must concatenate actions and states, # which can require splitting the network in the case of e.g. conv-inputs. if isinstance(value_function_spec, list): value_function_spec = dict(type="sac_value_function", network_spec=value_function_spec) self.logger.info("Using default SAC value function.") elif isinstance(value_function_spec, ValueFunction): self.logger.info("Using value function object {}".format(ValueFunction)) if policy_spec is None: # Continuous action space: Use squashed normal. # Discrete: Gumbel-softmax. policy_spec = dict(deterministic=False, distributions_spec=dict( bounded_distribution_type="squashed", discrete_distribution_type="gumbel_softmax", gumbel_softmax_temperature=gumbel_softmax_temperature )) super(SACAgent, self).__init__( state_space=state_space, action_space=action_space, discount=discount, preprocessing_spec=preprocessing_spec, network_spec=network_spec, internal_states_space=internal_states_space, policy_spec=policy_spec, value_function_spec=value_function_spec, execution_spec=execution_spec, optimizer_spec=optimizer_spec, value_function_optimizer_spec=value_function_optimizer_spec, observe_spec=observe_spec, update_spec=update_spec, summary_spec=summary_spec, saver_spec=saver_spec, auto_build=auto_build, name=name ) self.double_q = double_q self.target_entropy = target_entropy self.initial_alpha = initial_alpha # Assert that the synch interval is a multiple of the update_interval. if "sync_interval" in self.update_spec: if self.update_spec["sync_interval"] / self.update_spec["update_interval"] != \ self.update_spec["sync_interval"] // self.update_spec["update_interval"]: raise RLGraphError( "ERROR: sync_interval ({}) must be multiple of update_interval " "({})!".format(self.update_spec["sync_interval"], self.update_spec["update_interval"]) ) elif "sync_tau" in self.update_spec: if self.update_spec["sync_tau"] <= 0 or self.update_spec["sync_tau"] > 1.0: raise RLGraphError( "sync_tau ({}) must be in interval (0.0, 1.0]!".format(self.update_spec["sync_tau"]) ) else: self.update_spec["sync_tau"] = 0.005 # The value mentioned in the paper # Extend input Space definitions to this Agent's specific API-methods. preprocessed_state_space = self.preprocessed_state_space.with_batch_rank() reward_space = FloatBox(add_batch_rank=True) terminal_space = BoolBox(add_batch_rank=True) #self.iterations = self.update_spec["num_iterations"] self.batch_size = self.update_spec["batch_size"] float_action_space = self.action_space.with_batch_rank().map( mapping=lambda flat_key, space: space.as_one_hot_float_space() if isinstance(space, IntBox) else space ) self.input_spaces.update(dict( env_actions=self.action_space.with_batch_rank(), actions=float_action_space, preprocessed_states=preprocessed_state_space, rewards=reward_space, terminals=terminal_space, next_states=preprocessed_state_space, states=self.state_space.with_batch_rank(add_batch_rank=True), batch_size=int, importance_weights=FloatBox(add_batch_rank=True), deterministic=bool, weights="variables:{}".format(self.policy.scope) )) if value_function_sync_spec is None: value_function_sync_spec = SyncSpecification( sync_interval=self.update_spec["sync_interval"] // self.update_spec["update_interval"], sync_tau=self.update_spec["sync_tau"] if "sync_tau" in self.update_spec else 5e-3 ) self.memory = Memory.from_spec(memory_spec) self.alpha_optimizer = self.optimizer.copy(scope="alpha-" + self.optimizer.scope) if self.target_entropy is not None else None self.root_component = SACAgentComponent( agent=self, policy=self.policy, q_function=self.value_function, preprocessor=self.preprocessor, memory=self.memory, discount=self.discount, initial_alpha=self.initial_alpha, target_entropy=target_entropy, optimizer=self.optimizer, vf_optimizer=self.value_function_optimizer, alpha_optimizer=self.alpha_optimizer, q_sync_spec=value_function_sync_spec, num_q_functions=2 if self.double_q is True else 1 ) extra_optimizers = [self.value_function_optimizer] if self.alpha_optimizer is not None: extra_optimizers.append(self.alpha_optimizer) self.build_options = dict(optimizers=extra_optimizers) if self.auto_build: self._build_graph( [self.root_component], self.input_spaces, optimizer=self.optimizer, batch_size=self.update_spec["batch_size"], build_options=self.build_options ) self.graph_built = True def set_weights(self, policy_weights, value_function_weights=None): # TODO: Overrides parent but should this be policy of value function? return self.graph_executor.execute((self.root_component.set_policy_weights, policy_weights)) def get_weights(self): return dict(policy_weights=self.graph_executor.execute(self.root_component.get_policy_weights)) def get_action(self, states, internals=None, use_exploration=True, apply_preprocessing=True, extra_returns=None, time_percentage=None): # TODO: common pattern - move to Agent """ Args: extra_returns (Optional[Set[str],str]): Optional string or set of strings for additional return values (besides the actions). Possible values are: - 'preprocessed_states': The preprocessed states after passing the given states through the preprocessor stack. - 'internal_states': The internal states returned by the RNNs in the NN pipeline. - 'used_exploration': Whether epsilon- or noise-based exploration was used or not. Returns: tuple or single value depending on `extra_returns`: - action - the preprocessed states """ extra_returns = {extra_returns} if isinstance(extra_returns, str) else (extra_returns or set()) # States come in without preprocessing -> use state space. if apply_preprocessing: call_method = self.root_component.get_preprocessed_state_and_action batched_states, remove_batch_rank = self.state_space.force_batch(states) else: call_method = self.root_component.action_from_preprocessed_state batched_states = states remove_batch_rank = False #remove_batch_rank = batched_states.ndim == np.asarray(states).ndim + 1 # Increase timesteps by the batch size (number of states in batch). batch_size = len(batched_states) self.timesteps += batch_size # Control, which return value to "pull" (depending on `additional_returns`). return_ops = [0, 1] if "preprocessed_states" in extra_returns else [0] ret = force_list(self.graph_executor.execute(( call_method, [batched_states, not use_exploration], # deterministic = not use_exploration # 0=preprocessed_states, 1=action return_ops ))) # Convert Gumble (relaxed one-hot) sample back into int type for all discrete composite actions. if isinstance(self.action_space, ContainerSpace): ret[0] = ret[0].map( mapping=lambda key, action: np.argmax(action, axis=-1).astype(action.dtype) if isinstance(self.flat_action_space[key], IntBox) else action ) elif isinstance(self.action_space, IntBox): ret[0] = np.argmax(ret[0], axis=-1).astype(self.action_space.dtype) if remove_batch_rank: ret[0] = strip_list(ret[0]) if "preprocessed_states" in extra_returns: return ret[0], ret[1] else: return ret[0] def _observe_graph(self, preprocessed_states, actions, internals, rewards, next_states, terminals): self.graph_executor.execute((self.root_component.insert_records, [preprocessed_states, actions, rewards, next_states, terminals])) def update(self, batch=None, time_percentage=None, **kwargs): if batch is None: size = self.graph_executor.execute(self.root_component.get_memory_size) # TODO: is this necessary? if size < self.batch_size: return 0.0, 0.0, 0.0 ret = self.graph_executor.execute((self.root_component.update_from_memory, [self.batch_size, time_percentage])) else: ret = self.graph_executor.execute((self.root_component.update_from_external_batch, [ batch["states"], batch["actions"], batch["rewards"], batch["terminals"], batch["next_states"], batch["importance_weights"], time_percentage ])) return ret["actor_loss"], ret["actor_loss_per_item"], ret["critic_loss"], ret["alpha_loss"] def reset(self): """ Resets our preprocessor, but only if it contains stateful PreprocessLayer Components (meaning the PreprocessorStack has at least one variable defined). """ if self.preprocessing_required and len(self.preprocessor.variables) > 0: self.graph_executor.execute("reset_preprocessor") self.graph_executor.execute(self.root_component.reset_targets) def __repr__(self): return "SACAgent(double-q={}, initial-alpha={}, target-entropy={})".format( self.double_q, self.initial_alpha, self.target_entropy )
python/chartParsing.py
pramitmallick/spinn
103
12777498
""" Artifical test for chart parsing """ from random import shuffle import numpy as np import string def generate_string(length): letters = list(string.ascii_lowercase) + list(string.ascii_uppercase) shuffle(letters) output = [] for i in range(length): output.append(letters[i]) return output sen_length = 25 sentence = generate_string(sen_length) # Compose : [A, B] = (A) + (B) = (AB) # Combine : ((AB)C), (A(BC)) = (ABC) # A + B = (AB) # (AB) + C = ((AB)C) def compose(l, r): return "(" + l + r + ")" def combine(list_versions): return list_versions[0] #return list_versions[0].replace("(","").replace(")","") def compute_compositions(sent): length = len(sent) -1 l_hiddens = sent[:-1] l_cells = sent[:-1] r_hiddens = sent[1:] r_cells = sent[1:] chart = [] masks = [] choices = [] """ layer_0 = [] for i in range(len(sent)): layer_0.append((sent[i], sent[i])) chart = [layer_0] """ chart = [sent] # list or tuple. w/e masks = [np.zeros(len(sent))] choices = [sent] for row in range(1, len(sent)): chart.append([]) masks.append([]) choices.append([]) for col in range(len(sent) - row): chart[row].append(None) masks[row].append(None) choices[row].append(None) for row in range(1, len(sent)): # = len(l_hiddens) for col in range(len(sent) - row): versions = [] for i in range(row): #print row, col, chart[row-i-1][col], chart[i][row+col-i] versions.append(compose(chart[row-i-1][col], chart[i][row+col-i])) chart[row][col] = combine(versions) choices[row][col] = versions l = len(versions) rand_pos = np.random.randint(l) mask = np.zeros(l) mask[rand_pos] += 1 masks[row][col] = mask return chart, masks, choices chart, mask, choices = compute_compositions(sentence) """ for row in len(choices): for col in len(choices[row]): pick = choices[row][col][int(np.where(mask[row][col])[0])] """ print choices[-1][-1][int(np.where(mask[-1][-1])[0])]
deinkscape.py
Emoji-COLRv0/emojitwo
313
12777543
#!/usr/bin/env python3 # -*- mode: python; coding: utf-8 -*- # By HarJIT in 2020. MIT/Expat licence. import os, xml.dom.minidom, shutil, re, glob svgpresattrs = ("alignment-baseline", "baseline-shift", "clip", "clip-path", "clip-rule", "color", "color-interpolation", "color-interpolation-filters", "color-profile", "color-rendering", "cursor", "direction", "display", "dominant-baseline", "enable-background", "fill", "fill-opacity", "fill-rule", "filter", "flood-color", "flood-opacity", "font-family", "font-size", "font-size-adjust", "font-stretch", "font-style", "font-variant", "font-weight", "glyph-orientation-horizontal", "glyph-orientation-vertical", "image-rendering", "kerning", "letter-spacing", "lighting-color", "marker-end", "marker-mid", "marker-start", "mask", "opacity", "overflow", "pointer-events", "shape-rendering", "solid-color", "solid-opacity", "stop-color", "stop-opacity", "stroke", "stroke-dasharray", "stroke-dashoffset", "stroke-linecap", "stroke-linejoin", "stroke-miterlimit", "stroke-opacity", "stroke-width", "text-anchor", "text-decoration", "text-rendering", "transform", "unicode-bidi", "vector-effect", "visibility", "word-spacing", "writing-mode") needlessline = re.compile("(?m)^\s*\n") def has_real_dc(document): if document.getElementsByTagName("cc:license"): return True elif document.getElementsByTagName("cc:License"): return True elif document.getElementsByTagName("dc:contributor"): return True elif document.getElementsByTagName("cc:Agent"): return True elif document.getElementsByTagName("cc:permits"): return True elif document.getElementsByTagName("cc:requires"): return True return False for pn in glob.glob("**/*.svg", recursive=True): i = os.path.basename(pn) if "draft" in i.casefold(): continue document = xml.dom.minidom.parse(pn) changed = False keep_metadata = has_real_dc(document) retain_ns = ["xmlns:xlink"] if keep_metadata: retain_ns.extend(["xmlns:rdf", "xmlns:cc", "xmlns:dc"]) for element in document.getElementsByTagName("*"): if element.nodeName == "metadata" and not keep_metadata: print(i, "removing", element.nodeName) changed = True element.parentNode.removeChild(element) elif element.nodeName == "defs": if (not element.childNodes) or (len(element.childNodes) == 1 and element.firstChild.nodeName == "#text" and not element.firstChild.wholeText.strip()): print(i, "removing", element.nodeName) changed = True element.parentNode.removeChild(element) elif element.nodeName.startswith(("inkscape:", "sodipodi:")): print(i, "removing", element.nodeName) changed = True element.parentNode.removeChild(element) # if element.hasAttribute("style"): # Rip SVG pres. attributes out of inline CSS, replacing any overridden attributes # Note: this will bork on quoted ; in values, which I don't expect to occur. stylelist = element.getAttribute("style").strip(";").split(";") styleout = "" for style in stylelist: if ":" not in style: continue # nvm name, val = style.split(":", 1) if name in svgpresattrs: print(i, "attributising", name) changed = True element.setAttribute(name.strip(), val.strip()) elif "inkscape" in name: print(i, "removing", name) changed = True pass else: print(i, "retaining", name) changed = True styleout += style + ";" if not styleout: element.removeAttribute("style") else: element.setAttribute("style", styleout) for attr in list(element.attributes.keys())[:]: if attr.startswith("stroke-") and not element.hasAttribute("stroke") and not (element.nodeName == "g"): print(i, "removing", attr) changed = True element.removeAttribute(attr) elif attr.startswith("inkscape:") or attr.startswith("sodipodi:"): print(i, "removing", attr) changed = True element.removeAttribute(attr) elif attr.startswith("xmlns:") and attr not in retain_ns: print(i, "removing", attr) changed = True element.removeAttribute(attr) elif (element.nodeName == "svg") and (attr == "version"): print(i, "removing", attr) changed = True element.removeAttribute("version") elif attr == "fill-opacity" and element.getAttribute("fill-opacity") == "1": print(i, "removing", attr) changed = True element.removeAttribute("fill-opacity") if element.hasAttribute("stroke"): print(i, "has stroke") if element.hasAttribute("id") and ((not element.parentNode) or element.parentNode.nodeName != "defs"): # Autogenerated ID rubbish if re.compile(r"^{}\d+$".format(element.nodeName)).match(element.getAttribute("id")): print(i, "removing ID", element.getAttribute("id")) changed = True element.removeAttribute("id") if changed: shutil.move(pn, pn + "~") with open(pn, "w") as f: x = document.toxml().replace("<?xml version=\"1.0\" ?>", "") f.write("".join(needlessline.split(x))) os.unlink(pn + "~")
tests/test_portfolio_handler.py
ivanliu1989/qstrader
113
12777565
import datetime from decimal import Decimal import unittest from qstrader.event import FillEvent, OrderEvent, SignalEvent from qstrader.portfolio_handler import PortfolioHandler from qstrader.price_handler.base import AbstractTickPriceHandler from qstrader.compat import queue class PriceHandlerMock(AbstractTickPriceHandler): def __init__(self): pass def get_best_bid_ask(self, ticker): prices = { "MSFT": (Decimal("50.28"), Decimal("50.31")), "GOOG": (Decimal("705.46"), Decimal("705.46")), "AMZN": (Decimal("564.14"), Decimal("565.14")), } return prices[ticker] class PositionSizerMock(object): def __init__(self): pass def size_order(self, portfolio, initial_order): """ This PositionSizerMock object simply modifies the quantity to be 100 of any share transacted. """ initial_order.quantity = 100 return initial_order class RiskManagerMock(object): def __init__(self): pass def refine_orders(self, portfolio, sized_order): """ This RiskManagerMock object simply lets the sized order through, creates the corresponding OrderEvent object and adds it to a list. """ order_event = OrderEvent( sized_order.ticker, sized_order.action, sized_order.quantity ) return [order_event] class TestSimpleSignalOrderFillCycleForPortfolioHandler(unittest.TestCase): """ Tests a simple Signal, Order and Fill cycle for the PortfolioHandler. This is, in effect, a sanity check. """ def setUp(self): """ Set up the PortfolioHandler object supplying it with $500,000.00 USD in initial cash. """ initial_cash = Decimal("500000.00") events_queue = queue.Queue() price_handler = PriceHandlerMock() position_sizer = PositionSizerMock() risk_manager = RiskManagerMock() # Create the PortfolioHandler object from the rest self.portfolio_handler = PortfolioHandler( initial_cash, events_queue, price_handler, position_sizer, risk_manager ) def test_create_order_from_signal_basic_check(self): """ Tests the "_create_order_from_signal" method as a basic sanity check. """ signal_event = SignalEvent("MSFT", "BOT") order = self.portfolio_handler._create_order_from_signal(signal_event) self.assertEqual(order.ticker, "MSFT") self.assertEqual(order.action, "BOT") self.assertEqual(order.quantity, 0) def test_place_orders_onto_queue_basic_check(self): """ Tests the "_place_orders_onto_queue" method as a basic sanity check. """ order = OrderEvent("MSFT", "BOT", 100) order_list = [order] self.portfolio_handler._place_orders_onto_queue(order_list) ret_order = self.portfolio_handler.events_queue.get() self.assertEqual(ret_order.ticker, "MSFT") self.assertEqual(ret_order.action, "BOT") self.assertEqual(ret_order.quantity, 100) def test_convert_fill_to_portfolio_update_basic_check(self): """ Tests the "_convert_fill_to_portfolio_update" method as a basic sanity check. """ fill_event_buy = FillEvent( datetime.datetime.utcnow(), "MSFT", "BOT", 100, "ARCA", Decimal("50.25"), Decimal("1.00") ) self.portfolio_handler._convert_fill_to_portfolio_update(fill_event_buy) # Check the Portfolio values within the PortfolioHandler port = self.portfolio_handler.portfolio self.assertEqual(port.cur_cash, Decimal("494974.00")) # TODO: Finish this off and check it works via Interactive Brokers fill_event_sell = FillEvent( datetime.datetime.utcnow(), "MSFT", "SLD", 100, "ARCA", Decimal("50.25"), Decimal("1.00") ) self.portfolio_handler._convert_fill_to_portfolio_update(fill_event_sell) def test_on_signal_basic_check(self): """ Tests the "on_signal" method as a basic sanity check. """ signal_event = SignalEvent("MSFT", "BOT") self.portfolio_handler.on_signal(signal_event) ret_order = self.portfolio_handler.events_queue.get() self.assertEqual(ret_order.ticker, "MSFT") self.assertEqual(ret_order.action, "BOT") self.assertEqual(ret_order.quantity, 100) if __name__ == "__main__": unittest.main()
babble/__init__.py
billchenxi/babble
130
12777587
from .explanation import Explanation from .parsing import Rule, Grammar, Parse, SemanticParser from .filter_bank import FilterBank from .utils import ExplanationIO, link_explanation_candidates from .babbler import Babbler, BabbleStream
src/bkl/interpreter/__init__.py
johnwbyrd/brakefile
118
12777595
# # This file is part of Bakefile (http://bakefile.org) # # Copyright (C) 2008-2013 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # """ This module contains the very core of Bakefile -- the interpreter, :class:`bkl.interpreter.Interpreter`, and its supporting classes. """ import logging import bkl.parser import bkl.model import bkl.api import bkl.expr import passes from builder import Builder from bkl.error import Error, warning from bkl.parser import parse_file logger = logging.getLogger("bkl.interpreter") class Interpreter(object): """ The interpreter is responsible for doing everything necessary to "translate" input ``.bkl`` files into generated native makefiles. This includes building a project model from the input, checking it for correctness, optimizing it and creating outputs for all enabled toolsets. :class:`Interpreter` provides both high-level interface for single-call usage (see :meth:`process`) and other methods with finer granularity that allows you to inspect individual steps (most useful for the test suite). .. attribute:: model Model of the project, as :class:`bkl.model.Project`. It's state always reflects current state of processing. .. attribute:: toolsets_to_use Set of toolsets to generate for. This list may contain only a subset of toolsets the bakefile is written for and may even contain toolsets not specified in the bakefile. If :const:`None` (the default), then the toolsets listed in the bakefile are used. """ def __init__(self): self.model = bkl.model.Project() self.toolsets_to_use = None def limit_toolsets(self, toolsets): """Sets :attr:`toolsets_to_use`.""" self.toolsets_to_use = set(toolsets) def process(self, ast): """ Interprets input file and generates the outputs. :param ast: AST of the input file, as returned by :func:`bkl.parser.parse_file`. Processing is done in several phases: 1. Basic model is built (see :class:`bkl.interpreter.builder.Builder`). No optimizations or checks are performed at this point. 2. Several generic optimization and checking passes are run on the model. Among other things, types correctness and other constraints are checked, variables are substituted and evaluated. 3. The model is split into several copies, one per output toolset. 4. Further optimization passes are done. 5. Output files are generated. Step 1 is done by :meth:`add_module`. Steps 2-4 are done by :meth:`finalize` and step 5 is implemented in :meth:`generate`. """ self.add_module(ast, self.model) self.finalize() self.generate() def process_file(self, filename): """Like :meth:`process()`, but takes filename as its argument.""" self.process(parse_file(filename)) def add_module(self, ast, parent): """ Adds parsed AST to the model, without doing any optimizations. May be called more than once, with different parsed files. :param ast: AST of the input file, as returned by :func:`bkl.parser.parse_file`. """ logger.info("processing %s", ast.filename) submodules = [] b = Builder(on_submodule=lambda fn, pos: submodules.append((fn,pos))) module = b.create_model(ast, parent) while submodules: sub_filename, sub_pos = submodules[0] submodules.pop(0) try: sub_ast = parse_file(sub_filename) except IOError as e: if e.filename: msg = "%s: %s" % (e.strerror, e.filename) else: msg = e.strerror raise Error(msg, pos=sub_pos) self.add_module(sub_ast, module) def _call_custom_steps(self, model, func): for step in bkl.api.CustomStep.all(): logger.debug("invoking custom step %s.%s()", step.name, func) getattr(step, func)(model) def finalize(self): """ Finalizes the model, i.e. checks it for validity, optimizes, creates per-toolset models etc. """ logger.debug("finalizing the model") # call any custom steps first: self._call_custom_steps(self.model, "finalize") # then apply standard processing: passes.detect_potential_problems(self.model) passes.normalize_and_validate_bool_subexpressions(self.model) passes.normalize_vars(self.model) passes.validate_vars(self.model) passes.normalize_paths_in_model(self.model, toolset=None) passes.simplify_exprs(self.model) def finalize_for_toolset(self, toolset_model, toolset): """ Finalizes after "toolset" variable was set. """ passes.remove_disabled_model_parts(toolset_model, toolset) # TODO: do this in finalize() instead passes.make_variables_for_missing_props(toolset_model, toolset) passes.eliminate_superfluous_conditionals(toolset_model) # This is done second time here (in addition to finalize()) to deal # with paths added by make_variables_for_missing_props() and paths with # @builddir (which is toolset specific and couldn't be resolved # earlier). Ideally we wouldn't do it, but hopefully it's not all that # inefficient, as no real work is done for paths that are already # normalized: passes.normalize_paths_in_model(toolset_model, toolset) def make_toolset_specific_model(self, toolset, skip_making_copy=False): """ Returns toolset-specific model, i.e. one that works only with *toolset*, has the ``toolset`` property set to it. The caller still needs to call finalize_for_toolset() on it. """ if skip_making_copy: model = self.model else: model = self.model.clone() # don't use Variable.from_property(), because it's read-only model.add_variable(bkl.model.Variable.from_property( model.get_prop("toolset"), bkl.expr.LiteralExpr(toolset))) return model def generate(self): """ Generates output files. """ # collect all requested toolsets: toolsets = set() for module in self.model.modules: module_toolsets = module.get_variable("toolsets") if module_toolsets: toolsets.update(module_toolsets.value.as_py()) if self.toolsets_to_use: for t in self.toolsets_to_use: if t not in toolsets: try: bkl.api.Toolset.get(t) except KeyError: raise Error("unknown toolset \"%s\" given on command line" % t) warning("toolset \"%s\" is not supported by the project, there may be issues", t) # Add the forced toolset to all submodules: for module in self.model.modules: module_toolsets = module.get_variable("toolsets") if module_toolsets: module_toolsets.value.items.append(bkl.expr.LiteralExpr(t)) toolsets = self.toolsets_to_use toolsets = list(toolsets) logger.debug("toolsets to generate for: %s", toolsets) if not toolsets: raise Error("nothing to generate, \"toolsets\" property is empty") # call any custom steps first: self._call_custom_steps(self.model, "generate") # and generate the outputs (notice that we can avoid making a # (expensive!) deepcopy of the model for one of the toolsets and can # reuse the current model): for toolset in toolsets[:-1]: self.generate_for_toolset(toolset) self.generate_for_toolset(toolsets[-1], skip_making_copy=True) def generate_for_toolset(self, toolset, skip_making_copy=False): """ Generates output for given *toolset*. """ logger.debug("****** preparing model for toolset %s ******", toolset) model = self.make_toolset_specific_model(toolset, skip_making_copy) self.finalize_for_toolset(model, toolset) logger.debug("****** generating for toolset %s ********", toolset) bkl.api.Toolset.get(toolset).generate(model)
example/issues/449_django_lazy_path/pulpsettings.py
sephiartlist/dynaconf
2,293
12777602
REST_FRAMEWORK__DEFAULT_AUTHENTICATION_CLASSES = ( "rest_framework.authentication.SessionAuthentication", "pulpcore.app.authentication.PulpRemoteUserAuthentication", "foo_bar1", )
minpy/numpy/random.py
yuhonghong66/minpy
1,271
12777621
<reponame>yuhonghong66/minpy #!/usr/bin/env python # -*- coding: utf-8 -*- """ Mock numpy random module """ #pylint: disable= invalid-name from __future__ import absolute_import import sys from minpy.numpy.mocking import Module _old = { '__name__' : __name__, } sys.modules[__name__] = Module(_old, 'random')
pyatlas/unit_tests/test_identifier_converters.py
yazad3/atlas
188
12777627
import unittest from pyatlas import identifier_converters class IdentifierConvertersTest(unittest.TestCase): def setUp(self): pass def test_osm_conversion(self): atlas_id = 222222000000 osm_id = 222222 self.assertEqual(osm_id, identifier_converters.get_osm_identifier(atlas_id)) atlas_id = 123001002 osm_id = 123 self.assertEqual(osm_id, identifier_converters.get_osm_identifier(atlas_id)) atlas_id = 3101220 osm_id = 3 self.assertEqual(osm_id, identifier_converters.get_osm_identifier(atlas_id)) atlas_id = -222222000001 osm_id = 222222 self.assertEqual(osm_id, identifier_converters.get_osm_identifier(atlas_id)) def test_country_code_conversion(self): atlas_id = 222222000000 country_code = 0 self.assertEqual(country_code, identifier_converters.get_country_code(atlas_id)) atlas_id = 123001002 country_code = 1 self.assertEqual(country_code, identifier_converters.get_country_code(atlas_id)) atlas_id = 3101220 country_code = 101 self.assertEqual(country_code, identifier_converters.get_country_code(atlas_id)) atlas_id = -222222002001 country_code = 2 self.assertEqual(country_code, identifier_converters.get_country_code(atlas_id)) def test_way_section_conversion(self): atlas_id = 222222000000 way_section = 0 self.assertEqual(way_section, identifier_converters.get_way_section_index(atlas_id)) atlas_id = 123001002 way_section = 2 self.assertEqual(way_section, identifier_converters.get_way_section_index(atlas_id)) atlas_id = 3101220 way_section = 220 self.assertEqual(way_section, identifier_converters.get_way_section_index(atlas_id)) atlas_id = -222222002001 way_section = 1 self.assertEqual(way_section, identifier_converters.get_way_section_index(atlas_id))
videos/HomeworkVol03/678-widcardw.py
AStarySky/manim_sandbox
366
12777683
<gh_stars>100-1000 # from widcardw from manimlib.imports import * class Test6(Scene): CONFIG = {"camera_config": {"background_color": "#ffffff"}} def construct(self): circle0 = Circle(radius=1.5, stroke_color="#559944", plot_depth=-2) doto = Dot(ORIGIN, color="#000000") texto = TexMobject("O", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6).next_to( doto, RIGHT+DOWN, buff=SMALL_BUFF) self.play(ShowCreation(circle0)) self.play(Write(doto), Write(texto)) dota = Dot(np.array([3.2, 0, 0]), color="#000000", plot_depth=1) texta = TexMobject("A", color="#000000").next_to( dota, RIGHT+DOWN, buff=SMALL_BUFF) self.play(Write(dota), Write(texta)) t = ValueTracker(2) dotb = Dot(color="#bb3333", plot_depth=1).add_updater(lambda b: b.move_to(np.array([ 1.5*np.cos(t.get_value()), 1.5*np.sin(t.get_value()), 0 ]))) textb = TexMobject("B", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6).add_updater( lambda b: b.next_to(dotb, UP+LEFT, buff=SMALL_BUFF)) self.play(Write(dotb), Write(textb)) self.wait(0.2) l_ab = DashedLine(color="#bb7755", stroke_width=1.5, plot_depth=0).add_updater( lambda l: l.put_start_and_end_on(dota.get_center(), dotb.get_center())) self.play(ShowCreation(l_ab)) self.wait(0.2) self.play(t.increment_value, 1, rate_func=smooth) self.play(t.increment_value, -3, rate_func=smooth) l_b = Line(LEFT, RIGHT).add_updater(lambda l: l.become( Line(color="#55aaee", plot_depth=0).rotate(l_ab.get_angle()+PI/2, about_point=l_ab.get_start()) .move_to(l_ab.get_end()).scale(20) )) dotc = Dot(stroke_opacity=0, fill_opacity=0).add_updater( lambda d: d.move_to(l_b.get_start())) self.play(ShowCreation(l_b)) self.add(dotc) anglea = Angle(dota, dotb, dotc)\ .add_updater(lambda a: a.become(Angle(dota, dotb, dotc, color="#E65A4C"))) self.play(ShowCreation(anglea)) for i in range(50): self.play(t.increment_value, TAU/50, rate_func=linear, run_time=0.12) l_b.clear_updaters() l_b.plot_depth = -1 l_bc = l_b.copy().set_stroke(width=1.5, color="#00aaff") self.add(l_bc) l_b.add_updater(lambda l: l.become( Line(color="#55aaee", plot_depth=0).rotate(l_ab.get_angle()+PI/2, about_point=l_ab.get_start()) .move_to(l_ab.get_end()).scale(20) )) self.add(l_b) anglea.clear_updaters() l_b.clear_updaters() self.play(FadeOut(anglea), FadeOut(l_b)) self.wait(3) class Test7(Scene): CONFIG = {"camera_config": {"background_color": "#ffffff"}} def construct(self): t = ValueTracker(0) doto = Dot(DOWN*0.6, color="#000000", background_stroke_color="#ffffff", background_stroke_width=3, plot_depth=2).scale(0.5) dotp = Dot(np.array([0, -2.7, 0]), color="#000000", background_stroke_color="#ffffff", background_stroke_width=3, plot_depth=2).scale(0.5) dota = Dot(color="#000000", background_stroke_color="#ffffff", background_stroke_width=3, plot_depth=2).scale(0.5).add_updater(lambda d: d.move_to(np.array([ doto.get_center()[0]+np.cos(t.get_value()), doto.get_center()[1]+np.sin(t.get_value()), 0 ]))) cira = Circle().add_updater(lambda c: c.become( Circle(radius=get_line_long(dotp.get_center(), dota.get_center()), color="#559944").move_to(dota.get_center()) )) texto = TexMobject( "O", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6)\ .scale(0.7).next_to(doto, DOWN+RIGHT, buff=SMALL_BUFF) textp = TexMobject( "P", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6)\ .scale(0.7).next_to(dotp, DOWN+LEFT, buff=SMALL_BUFF) texta = TexMobject( "A", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6)\ .scale(0.7).add_updater(lambda a: a.next_to(dota, DOWN+LEFT, buff=SMALL_BUFF)) ciro = Circle(radius=1, color="#bb7755").move_to(doto.get_center()) dotpc = Dot(color="#000000").scale(0.5).move_to(dotp.get_center()) l_pa = DashedLine(color="#55bb33", stroke_width=1.5).add_updater(lambda l: l.put_start_and_end_on( dota.get_center(), dotpc.get_center())) self.play(ShowCreation(ciro), Write(doto), Write(texto)) self.play(Write(dotp), Write(textp)) self.wait(0.3) self.play(Write(dota), Write(texta)) self.add(dotpc) self.play(ShowCreation(l_pa)) path = TracedPath(dotpc.get_center, stroke_color="#559944", stroke_width=3) self.add(path) self.play(Rotating(dotpc, about_point=dota.get_center()), run_time=1.8, rate_func=smooth) # self.play(ShowCreation(cira)) l_pa.clear_updaters() self.remove(dotpc, path) self.play(FadeOut(l_pa), FadeIn(cira)) self.play(t.increment_value, -PI/2) self.wait(0.3) for i in range(40): self.play(t.increment_value, TAU/40, rate_func=linear, run_time=0.2) cira.clear_updaters() ciracpy = cira.copy().set_color("#9944bb").set_stroke(width=1.5) self.add(ciracpy) cira.add_updater(lambda c: c.become( Circle(radius=get_line_long(dotp.get_center(), dota.get_center()), color="#559944").move_to(dota.get_center()) )) self.add(cira) #attention: get_line_long is defined by Shy_Vector #if it does not work, you can turn to "get_norm(...)" cira.clear_updaters() self.play(FadeOut(cira)) self.wait(2.5) class Test8(Scene): CONFIG = {"camera_config": {"background_color": "#ffffff"}} def construct(self): doto = Dot(color="#000000", background_stroke_color="#ffffff", background_stroke_width=3, plot_depth=2).scale(0.7) dota = Dot(LEFT*1.8, color="#000000", background_stroke_color="#ffffff", background_stroke_width=3, plot_depth=2).scale(0.7) dotb = Dot(RIGHT*1.8, color="#000000", background_stroke_color="#ffffff", background_stroke_width=3, plot_depth=2).scale(0.7) texto = TexMobject("O", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6, plot_depth=2).scale(0.7).next_to(doto, RIGHT+DOWN, buff=SMALL_BUFF) texta = TexMobject("A", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6, plot_depth=2).scale(0.7).next_to(dota, LEFT, buff=SMALL_BUFF) textb = TexMobject("B", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6, plot_depth=2).scale(0.7).next_to(dotb, RIGHT, buff=SMALL_BUFF) ciro = Circle(radius=1.8, color="#559944") l_ab = Line(LEFT*1.8, RIGHT*1.8, color="#4488dd") self.play(ShowCreation(ciro), Write(doto), Write(texto)) self.play(ShowCreation(l_ab), *[Write(obj) for obj in [dota, dotb, texta, textb]]) self.wait(0.3) t = ValueTracker(1) dotp = Dot(color="#000000", background_stroke_color="#ffffff", background_stroke_width=3, plot_depth=2).scale(0.7)\ .add_updater(lambda d: d.move_to(np.array([ 1.8*np.cos(t.get_value()), 1.8*np.sin(t.get_value()), 0 ]))) textp = TexMobject("P", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6, plot_depth=2).scale(0.7)\ .add_updater(lambda p: p.next_to(dotp, UP+RIGHT, buff=SMALL_BUFF)) self.play(Write(dotp), Write(textp)) self.wait(0.2) cirp = Circle(radius=2).add_updater(lambda c: c.become( Circle(radius=abs(dotp.get_center()[1]), color="#dd7766") .move_to(dotp.get_center()) )) self.play(ShowCreation(cirp)) self.play(t.increment_value, 1) self.play(t.increment_value, -2) self.wait(0.2) for i in range(40): self.play(t.increment_value, TAU/40, rate_func=linear, run_time=0.2) cirp.clear_updaters() cirpc = cirp.copy().set_stroke(width=1.5, color="#715582") self.add(cirpc) cirp.add_updater(lambda c: c.become( Circle(radius=abs(dotp.get_center()[1]), color="#dd7766") .move_to(dotp.get_center()))) self.add(cirp) cirp.clear_updaters() textp.clear_updaters() dotp.clear_updaters() self.wait() self.play(*[FadeOut(obj) for obj in [doto, dota, dotb, texta, textb, textp, textp, dotp, l_ab, ciro, texto]]) self.wait(2) ''' to be completed... class Test5(Scene): CONFIG = {"camera_config": {"background_color": "#ffffff"}} def construct(self): dotb = Dot(LEFT*2, color="#000000", background_stroke_color="#ffffff", background_stroke_width=3, plot_depth=2) dotc = Dot(RIGHT*2, color="#000000", background_stroke_color="#ffffff", background_stroke_width=3, plot_depth=2) dota = Dot(LEFT*2+UP*1.3, color="#000000", background_stroke_color="#ffffff", background_stroke_width=3, plot_depth=2) texta = TexMobject("A", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6, plot_depth=2).next_to(dota, UP+LEFT, buff=SMALL_BUFF) textb = TexMobject("B", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6, plot_depth=2).next_to(dotb, LEFT+DOWN, buff=SMALL_BUFF) textc = TexMobject("C", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6, plot_depth=2).next_to(dotc, RIGHT+DOWN, buff=SMALL_BUFF) l_ab = Line(color="#559944")\ .put_start_and_end_on(dota.get_center(), dotb.get_center()) l_bc = Line(color="#559944")\ .put_start_and_end_on(dotc.get_center(), dotb.get_center()) self.play(*[ShowCreation(obj) for obj in [l_ab, l_bc, dota, dotb, dotc]]) self.play(*[Write(obj) for obj in [texta, textb, textc]]) self.wait(0.3) t = ValueTracker(0) def p_pos(t): return np.array([0, 0, 0]) dotp = Dot(color="#000000", background_stroke_color="#ffffff", background_stroke_width=3, plot_depth=2)\ .add_updater(lambda d: d.move_to())'''
assertpy/__init__.py
santunioni/assertpy
246
12777688
from __future__ import absolute_import from .assertpy import assert_that, assert_warn, soft_assertions, fail, soft_fail, add_extension, remove_extension, WarningLoggingAdapter, __version__ from .file import contents_of
rl_agents/trainer/logger.py
neskoc/rl-agents
342
12777691
<reponame>neskoc/rl-agents<gh_stars>100-1000 import json import logging.config from pathlib import Path import gym from rl_agents.configuration import Configurable logging_config = { "version": 1, "disable_existing_loggers": False, "formatters": { "standard": { "format": "[%(levelname)s] %(message)s " }, "detailed": { "format": "[%(name)s:%(levelname)s] %(message)s " } }, "handlers": { "default": { "level": "INFO", "formatter": "standard", "class": "logging.StreamHandler" } }, "loggers": { "": { "handlers": [ "default" ], "level": "DEBUG", "propagate": True } } } def configure(config={}, gym_level=gym.logger.INFO): """ Configure logging. Update the default configuration by a configuration file. Also configure the gym logger. :param config: logging configuration, or path to a configuration file :param gym_level: desired level for gym logger """ if config: if isinstance(config, str): with Path(config).open() as f: config = json.load(f) Configurable.rec_update(logging_config, config) logging.config.dictConfig(logging_config) gym.logger.set_level(gym_level) def add_file_handler(file_path): """ Add a file handler to the root logger. :param Path file_path: log file path """ configure({ "handlers": { file_path.name: { "class": "logging.FileHandler", "filename": file_path, "level": "DEBUG", "formatter": "detailed", "mode": 'w' } }, "loggers": { "": { "handlers": [ file_path.name, *logging_config["handlers"] ] } } })
gpflow/utilities/utilities.py
HarrySpearing/GPflow
1,724
12777735
# Copyright 2017-2021 The GPflow Contributors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module is deprecated, and is only provided for backwards compatibility. It will be removed in GPflow 2.3. """ from deprecated import deprecated from . import misc, traversal __all__ = [] def _create_module_redirects(m): for name in m.__all__: func = getattr(m, name) assert callable(func), "all names exported by misc and traversal should be functions" deprecated_func = deprecated( reason="The gpflow.utilities.utilities module is deprecated and will " f"be removed in GPflow 2.3; use gpflow.utilities.{name} instead." )(func) globals()[name] = deprecated_func __all__.append(name) _create_module_redirects(misc) _create_module_redirects(traversal) del _create_module_redirects, misc, traversal
tests/java/org/python/indexer/data/mod2.py
jeff5/jython-whinchat
577
12777762
<reponame>jeff5/jython-whinchat<filename>tests/java/org/python/indexer/data/mod2.py import distutils.command def mod2test(): return dir(distutils)
eraserhead/request_storage.py
yozlet/django-eraserhead
216
12777792
# encoding: utf-8 from __future__ import print_function import term import humanfriendly class RequestStorage(object): """ Stores statistics about single request """ def __init__(self): self.queryset_stats = [] def add_queryset_storage_instance(self, queryset_storage): self.queryset_stats.append(queryset_storage) @property def total_wasted_memory(self): wasted_memory = 0 for qs_storage in self.queryset_stats: wasted_memory += qs_storage.total_wasted_memory return wasted_memory # Stats print methods def print_stats(self): """ Display statistics of current request """ if not self.queryset_stats: return term.writeLine("\n\t ERASERHEAD STATS \n", term.bold, term.reverse) for queryset_storage in self.queryset_stats: queryset_storage.print_stats() print() term.write("\t TOTAL WASTED MEMORY: ", term.bold, term.reverse) term.write(" {}".format(humanfriendly.format_size(self.total_wasted_memory)), term.red) print()
setup.py
rvega/isobar
241
12777806
#!/usr/bin/env python from setuptools import setup, find_packages setup( name='isobar', version='0.1.1', description='A Python library to express and manipulate musical patterns', long_description = open("README.md", "r").read(), long_description_content_type = "text/markdown", author='<NAME>', author_email='<EMAIL>', url='https://github.com/ideoforms/isobar', packages=find_packages(), install_requires=['python-osc', 'mido', 'python-rtmidi'], keywords=['sound', 'music', 'composition'], classifiers=[ 'Topic :: Multimedia :: Sound/Audio', 'Topic :: Artistic Software', 'Development Status :: 4 - Beta', 'Intended Audience :: Developers' ], setup_requires=['pytest-runner'], tests_require=['pytest', 'pytest-timeout'] )
alphamind/tests/portfolio/test_optimizers.py
rongliang-tech/alpha-mind
186
12777819
<gh_stars>100-1000 # -*- coding: utf-8 -*- """ Created on 2017-11-1 @author: cheng.li """ import unittest import numpy as np from alphamind.portfolio.optimizers import LPOptimizer from alphamind.portfolio.optimizers import QuadraticOptimizer from alphamind.portfolio.optimizers import TargetVolOptimizer class TestOptimizers(unittest.TestCase): def test_lpoptimizer(self): er = np.array([-1., -2.]) lower_bound = np.array([0., 0.2]) upper_bound = np.array([1., 0.8]) optimizer = LPOptimizer(objective=-er, cons_matrix=np.array([[1., 1., 1., 1.]]), lbound=lower_bound, ubound=upper_bound) self.assertAlmostEqual(optimizer.feval(), 1.2) np.testing.assert_array_almost_equal(optimizer.x_value(), [0.8, 0.2]) def test_qpoptimizer(self): er = np.array([0.01, 0.02, 0.03]) cov = np.array([[0.02, 0.01, 0.02], [0.01, 0.02, 0.03], [0.02, 0.03, 0.02]]) ids_var = np.diag([0.01, 0.02, 0.03]) cov += ids_var lbound = np.array([0., 0., 0.]) ubound = np.array([0.4, 0.4, 0.5]) cons = np.array([[1., 1., 1.], [1., 0., 1.]]) clbound = np.array([1., 0.3]) cubound = np.array([1., 0.7]) cons_matrix = np.concatenate([cons, clbound.reshape((-1, 1)), cubound.reshape((-1, 1))], axis=1) optimizer = QuadraticOptimizer(objective=-er, cov=cov, lbound=lbound, ubound=ubound, cons_matrix=cons_matrix) # check against matlab result np.testing.assert_array_almost_equal(optimizer.x_value(), [0.2, 0.3, 0.5], 4) def test_qpoptimizer_with_factor_model(self): er = np.array([0.1, 0.2, 0.3]) lbound = np.array([0.0, 0.0, 0.0]) ubound = np.array([1.0, 1.0, 1.0]) factor_var = np.array([[0.5, -0.3], [-0.3, 0.7]]) factor_load = np.array([[0.8, 0.2], [0.5, 0.5], [0.2, 0.8]]) idsync = np.array([0.1, 0.3, 0.2]) cons = np.array([[1., 1., 1.]]) clbound = np.array([1.]) cubound = np.array([1.]) cons_matrix = np.concatenate([cons, clbound.reshape((-1, 1)), cubound.reshape((-1, 1))], axis=1) optimizer = QuadraticOptimizer(objective=-er, lbound=lbound, ubound=ubound, factor_cov=factor_var, factor_load=factor_load, factor_special=idsync, cons_matrix=cons_matrix) # check against cvxpy result np.testing.assert_array_almost_equal(optimizer.x_value(), [0.2866857, 0.21416417, 0.49915014], 4) def test_qpoptimizer_with_identity_matrix(self): er = np.array([-0.02, 0.01, 0.03]) cov = np.diag([1., 1., 1.]) optimizer = QuadraticOptimizer(objective=-er, cov=cov) np.testing.assert_array_almost_equal(optimizer.x_value(), [-0.02, 0.01, 0.03], 4) def test_target_vol_optimizer_without_cons(self): er = np.array([0.1, 0.2, 0.3]) cov = np.array([[0.05, 0.01, 0.02], [0.01, 0.06, 0.03], [0.02, 0.03, 0.07]]) lbound = np.array([-0.3, -0.3, -0.3]) ubound = np.array([0.5, 0.5, 0.5]) target_vol = 0.1 optimizer = TargetVolOptimizer(objective=-er, cov=cov, lbound=lbound, ubound=ubound, target_vol=target_vol) # check against known good result np.testing.assert_array_almost_equal(optimizer.x_value(), [.0231776, 0.1274768, 0.30130881], 4) def test_target_vol_optimizer_with_cons(self): er = np.array([0.1, 0.2, 0.3]) cov = np.array([[0.05, 0.01, 0.02], [0.01, 0.06, 0.03], [0.02, 0.03, 0.07]]) lbound = np.array([-0.3, -0.3, -0.3]) ubound = np.array([0.5, 0.5, 0.5]) cons = np.array([[1., 1., 1.]]) clbound = np.array([0.]) cubound = np.array([0.]) cons_matrix = np.concatenate([cons, clbound.reshape((-1, 1)), cubound.reshape((-1, 1))], axis=1) target_vol = 0.1 optimizer = TargetVolOptimizer(objective=-er, cov=cov, lbound=lbound, ubound=ubound, target_vol=target_vol, cons_matrix=cons_matrix) # check against known good result np.testing.assert_array_almost_equal(optimizer.x_value(), [-0.3, -0.10919033, 0.40919033], 4) def test_target_vol_optimizer_with_factor_model(self): er = np.array([0.1, 0.2, 0.3]) lbound = np.array([0.0, 0.0, 0.0]) ubound = np.array([1.0, 1.0, 1.0]) factor_var = np.array([[0.5, -0.3], [-0.3, 0.7]]) factor_load = np.array([[0.8, 0.2], [0.5, 0.5], [0.2, 0.8]]) idsync = np.array([0.1, 0.3, 0.2]) cons = np.array([[1., 1., 1.]]) clbound = np.array([1.]) cubound = np.array([1.]) target_vol = 0.5 cons_matrix = np.concatenate([cons, clbound.reshape((-1, 1)), cubound.reshape((-1, 1))], axis=1) optimizer = TargetVolOptimizer(objective=-er, factor_cov=factor_var, factor_load=factor_load, factor_special=idsync, lbound=lbound, ubound=ubound, target_vol=target_vol, cons_matrix=cons_matrix) # check against cvxpy result np.testing.assert_array_almost_equal(optimizer.x_value(), [0.26595552, 0.21675092, 0.51729356], 4) def test_target_vol_with_cons_and_ieq(self): er = np.array([0.1, 0.2, 0.3]) cov = np.array([[0.05, 0.01, 0.02], [0.01, 0.06, 0.03], [0.02, 0.03, 0.07]]) lbound = np.array([-0.3, -0.3, -0.3]) ubound = np.array([0.5, 0.5, 0.5]) cons = np.array([[1., 1., 1.]]) clbound = np.array([0.]) cubound = np.array([0.]) target_vol = 0.1 cons_matrix = np.concatenate([cons, clbound.reshape((-1, 1)), cubound.reshape((-1, 1))], axis=1) optimizer = TargetVolOptimizer(objective=-er, cov=cov, lbound=lbound, ubound=ubound, target_vol=target_vol, cons_matrix=cons_matrix) # check against known good result np.testing.assert_array_almost_equal(optimizer.x_value(), [-0.3, -0.10919033, 0.40919033], 4) if __name__ == '__main__': unittest.main()
scripts/estimate_median_freq.py
rocksat/jsis3d
180
12777846
import os import sys import h5py import argparse import numpy as np parser = argparse.ArgumentParser() parser.add_argument('--root', help='path to root directory') args = parser.parse_args() root = args.root fname = os.path.join(root, 'metadata/train.txt') flist = [os.path.join(root, 'h5', line.strip()) for line in open(fname, 'r')] fname = os.path.join(root, 'metadata', 'classes.txt') classes = [line.strip() for line in open(fname, 'r')] num_classes = len(classes) sizes = np.zeros(num_classes) total = np.zeros(num_classes) for fname in flist: print('> Processing {}...'.format(fname)) fin = h5py.File(fname) coords = fin['coords'][:] points = fin['points'][:] labels = fin['labels'][:] labels = labels.reshape(-1, 2) num_points = labels.shape[0] for i in range(num_classes): indices = (labels[:, 0] == i) size = np.sum(indices) sizes[i] += size if size == 0: continue total[i] += num_points freq = sizes / total weight = np.median(freq) / freq fname = os.path.join(root, 'metadata', 'weight.txt') print('> Saving statistics to {}...'.format(fname)) np.savetxt(fname, weight, fmt='%f')
third_party/blink/renderer/build/scripts/blinkbuild/name_style_converter_test.py
zipated/src
2,151
12777848
# Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=import-error,print-statement,relative-import,protected-access """Unit tests for name_style_converter.py.""" import unittest from name_style_converter import NameStyleConverter from name_style_converter import tokenize_name class SmartTokenizerTest(unittest.TestCase): def test_simple_cases(self): self.assertEqual(tokenize_name('foo'), ['foo']) self.assertEqual(tokenize_name('fooBar'), ['foo', 'Bar']) self.assertEqual(tokenize_name('fooBarBaz'), ['foo', 'Bar', 'Baz']) self.assertEqual(tokenize_name('Baz'), ['Baz']) self.assertEqual(tokenize_name(''), []) self.assertEqual(tokenize_name('FOO'), ['FOO']) self.assertEqual(tokenize_name('foo2'), ['foo', '2']) def test_tricky_cases(self): self.assertEqual(tokenize_name('XMLHttpRequest'), ['XML', 'Http', 'Request']) self.assertEqual(tokenize_name('HTMLElement'), ['HTML', 'Element']) self.assertEqual(tokenize_name('WebGLRenderingContext'), ['WebGL', 'Rendering', 'Context']) self.assertEqual(tokenize_name('CanvasRenderingContext2D'), ['Canvas', 'Rendering', 'Context', '2D']) self.assertEqual(tokenize_name('CanvasRenderingContext2DAPITest'), ['Canvas', 'Rendering', 'Context', '2D', 'API', 'Test']) self.assertEqual(tokenize_name('SVGSVGElement'), ['SVG', 'SVG', 'Element']) self.assertEqual(tokenize_name('CanvasRenderingContext2D'), ['Canvas', 'Rendering', 'Context', '2D']) self.assertEqual(tokenize_name('CSSURLImageValue'), ['CSS', 'URL', 'Image', 'Value']) self.assertEqual(tokenize_name('CSSPropertyAPID'), ['CSS', 'Property', 'API', 'D']) self.assertEqual(tokenize_name('AXARIAGridCell'), ['AX', 'ARIA', 'Grid', 'Cell']) self.assertEqual(tokenize_name('CDATASection'), ['CDATA', 'Section']) self.assertEqual(tokenize_name('ASCIICType'), ['ASCII', 'CType']) self.assertEqual(tokenize_name('CString'), ['CString']) self.assertEqual(tokenize_name('HTMLDListElement'), ['HTML', 'DList', 'Element']) self.assertEqual(tokenize_name('HTMLOListElement'), ['HTML', 'OList', 'Element']) self.assertEqual(tokenize_name('HTMLIFrameElement'), ['HTML', 'IFrame', 'Element']) self.assertEqual(tokenize_name('HTMLPlugInElement'), ['HTML', 'PlugIn', 'Element']) # No special handling for OptGroup, FieldSet, and TextArea. self.assertEqual(tokenize_name('HTMLOptGroupElement'), ['HTML', 'Opt', 'Group', 'Element']) self.assertEqual(tokenize_name('HTMLFieldSetElement'), ['HTML', 'Field', 'Set', 'Element']) self.assertEqual(tokenize_name('HTMLTextAreaElement'), ['HTML', 'Text', 'Area', 'Element']) self.assertEqual(tokenize_name('Path2D'), ['Path', '2D']) self.assertEqual(tokenize_name('Point2D'), ['Point', '2D']) self.assertEqual(tokenize_name('CanvasRenderingContext2DState'), ['Canvas', 'Rendering', 'Context', '2D', 'State']) self.assertEqual(tokenize_name('Accelerated2dCanvas'), ['Accelerated', '2d', 'Canvas']) self.assertEqual(tokenize_name('RTCDTMFSender'), ['RTC', 'DTMF', 'Sender']) self.assertEqual(tokenize_name('WebGLCompressedTextureS3TCsRGB'), ['WebGL', 'Compressed', 'Texture', 'S3TC', 'sRGB']) self.assertEqual(tokenize_name('WebGL2CompressedTextureETC1'), ['WebGL2', 'Compressed', 'Texture', 'ETC1']) self.assertEqual(tokenize_name('EXTsRGB'), ['EXT', 'sRGB']) # 'PVRTC' contains a special token 'RTC', but it should be a # single token. self.assertEqual(tokenize_name('WebGLCompressedTexturePVRTC'), ['WebGL', 'Compressed', 'Texture', 'PVRTC']) self.assertEqual(tokenize_name('SVGFEBlendElement'), ['SVG', 'FE', 'Blend', 'Element']) self.assertEqual(tokenize_name('SVGMPathElement'), ['SVG', 'MPath', 'Element']) self.assertEqual(tokenize_name('SVGTSpanElement'), ['SVG', 'TSpan', 'Element']) self.assertEqual(tokenize_name('SVGURIReference'), ['SVG', 'URI', 'Reference']) self.assertEqual(tokenize_name('UTF16TextIterator'), ['UTF16', 'Text', 'Iterator']) self.assertEqual(tokenize_name('UTF8Decoder'), ['UTF8', 'Decoder']) self.assertEqual(tokenize_name('Uint8Array'), ['Uint8', 'Array']) self.assertEqual(tokenize_name('DOMWindowBase64'), ['DOM', 'Window', 'Base64']) self.assertEqual(tokenize_name('TextCodecLatin1'), ['Text', 'Codec', 'Latin1']) self.assertEqual(tokenize_name('V8BindingForCore'), ['V8', 'Binding', 'For', 'Core']) self.assertEqual(tokenize_name('V8DOMRect'), ['V8', 'DOM', 'Rect']) self.assertEqual(tokenize_name('String16MojomTraits'), ['String16', 'Mojom', 'Traits']) self.assertEqual(tokenize_name('V0InsertionPoint'), ['V0', 'Insertion', 'Point']) self.assertEqual(tokenize_name('ShadowDOMV0Test'), ['Shadow', 'DOM', 'V0', 'Test']) self.assertEqual(tokenize_name('ElementShadowV0'), ['Element', 'Shadow', 'V0']) self.assertEqual(tokenize_name('StubChromeClientForSPv2'), ['Stub', 'Chrome', 'Client', 'For', 'SPv2']) self.assertEqual(tokenize_name('SQLiteAuthorizer'), ['SQLite', 'Authorizer']) self.assertEqual(tokenize_name('XPathEvaluator'), ['XPath', 'Evaluator']) self.assertEqual(tokenize_name('IsXHTMLDocument'), ['Is', 'XHTML', 'Document']) self.assertEqual(tokenize_name('isHTMLDocument'), ['is', 'HTML', 'Document']) self.assertEqual(tokenize_name('matrix3d'), ['matrix', '3d']) def test_ignoring_characters(self): self.assertEqual(tokenize_name('Animation.idl'), ['Animation', 'idl']) self.assertEqual(tokenize_name('-webkit-appearance'), ['webkit', 'appearance']) self.assertEqual(tokenize_name(' foo_bar!#"$'), ['foo', 'bar']) class NameStyleConverterTest(unittest.TestCase): def test_snake_case(self): converter = NameStyleConverter('HTMLElement') self.assertEqual(converter.to_snake_case(), 'html_element') def test_upper_camel_case(self): converter = NameStyleConverter('someSuperThing') self.assertEqual(converter.to_upper_camel_case(), 'SomeSuperThing') converter = NameStyleConverter('SVGElement') self.assertEqual(converter.to_upper_camel_case(), 'SVGElement') converter = NameStyleConverter('cssExternalScannerPreload') self.assertEqual(converter.to_upper_camel_case(), 'CSSExternalScannerPreload') converter = NameStyleConverter('xpathExpression') self.assertEqual(converter.to_upper_camel_case(), 'XPathExpression') converter = NameStyleConverter('feDropShadow') self.assertEqual(converter.to_upper_camel_case(), 'FEDropShadow') def test_lower_camel_case(self): converter = NameStyleConverter('someSuperThing') self.assertEqual(converter.to_lower_camel_case(), 'someSuperThing') converter = NameStyleConverter('SVGElement') self.assertEqual(converter.to_lower_camel_case(), 'svgElement') converter = NameStyleConverter('documentURI') self.assertEqual(converter.to_lower_camel_case(), 'documentURI') converter = NameStyleConverter('-webkit-margin-start') self.assertEqual(converter.to_lower_camel_case(), 'webkitMarginStart') converter = NameStyleConverter('Accelerated2dCanvas') self.assertEqual(converter.to_lower_camel_case(), 'accelerated2dCanvas') def test_macro_case(self): converter = NameStyleConverter('WebGLBaz2D') self.assertEqual(converter.to_macro_case(), 'WEBGL_BAZ_2D') def test_all_cases(self): converter = NameStyleConverter('SVGScriptElement') self.assertEqual(converter.to_all_cases(), { 'snake_case': 'svg_script_element', 'upper_camel_case': 'SVGScriptElement', 'macro_case': 'SVG_SCRIPT_ELEMENT', })
python/test/cuda/test_large_blocks.py
daniel-falk/nnabla-ext-cuda
103
12777865
<gh_stars>100-1000 # Copyright 2017,2018,2019,2020,2021 Sony Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import pytest import numpy as np import nnabla as nn import nnabla.functions as F @pytest.mark.parametrize("m", [1, 2, 3]) def test_cuda_large_blocks(cuda_test_opts, m): if cuda_test_opts.disable_test_large_blocks: pytest.skip('`--disable-test-large-blocks` is passed') CUDA_THREAD_PER_BLOCK = 512 CUDA_MAX_BLOCKS = 65536 size = CUDA_MAX_BLOCKS * CUDA_THREAD_PER_BLOCK * m + 3 print("Variable size:", size) x = np.zeros((size,), np.float32) v = nn.Variable(x.shape) v.d = x from nnabla.ext_utils import get_extension_context with nn.context_scope(get_extension_context('cuda')): y = F.relu(v) y.forward()
tests/test_0231-indexform.py
BioGeek/awkward-1.0
519
12777887
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE from __future__ import absolute_import import pytest # noqa: F401 import numpy as np # noqa: F401 import awkward as ak # noqa: F401 def test(): for itype in ["i8", "u8", "i32", "u32", "i64"]: form = ak.forms.ListOffsetForm(itype, ak.forms.EmptyForm()) assert form.offsets == itype
src/gausskernel/dbmind/tools/ai_server/service/datafactory/collector/agent_collect.py
Yanci0/openGauss-server
360
12777926
#!/usr/bin/python3 # -*- coding: utf-8 -*- ############################################################################# # Copyright (c): 2021, Huawei Tech. Co., Ltd. # FileName : agent_collect.py # Version : # Date : 2021-4-7 # Description : Receives and stores agent data. ############################################################################# try: import sys import os from flask import request, Response from flask_restful import Resource sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../../")) from common.logger import CreateLogger from service.datafactory.storage.insert_data_to_database import SaveData except ImportError as err: sys.exit("agent_collect.py: Failed to import module: %s." % str(err)) LOGGER = CreateLogger("debug", "server.log").create_log() class ResponseTuple: """ This class is used for generating a response tuple. """ @staticmethod def success(result=None): if result is None: return {"status": "success"}, 200 return {"status": "success", "result": result} @staticmethod def error(msg="", status_code=400): return {"status": "error", "msg": msg}, status_code class Source(Resource): """ This class is used for acquiring metric data from agent and save data in sqlite database. """ def __init__(self): pass @staticmethod def post(): content = request.json client_ip = request.remote_addr LOGGER.info("Successfully received request from: %s." % client_ip) try: insert_db = SaveData(LOGGER) insert_db.run(content) return ResponseTuple.success() except Exception as e: return ResponseTuple.error(msg=str(e), status_code=Response.status_code) @staticmethod def get(): return ResponseTuple.success(result="Server service is normal.") @staticmethod def delete(): return ResponseTuple.error(status_code=400)
sgit/commit.py
russelldavis/SublimeGit
310
12777946
<gh_stars>100-1000 # coding: utf-8 from functools import partial import sublime from sublime_plugin import WindowCommand, TextCommand, EventListener from .util import find_view_by_settings, noop, get_setting from .cmd import GitCmd from .helpers import GitStatusHelper from .status import GIT_WORKING_DIR_CLEAN GIT_COMMIT_VIEW_TITLE = "COMMIT_EDITMSG" GIT_COMMIT_VIEW_SYNTAX = 'Packages/SublimeGit/syntax/SublimeGit Commit Message.tmLanguage' GIT_NOTHING_STAGED = u'No changes added to commit. Use s on files/sections in the status view to stage changes.' GIT_COMMIT_TEMPLATE = u"""{old_msg} # Please enter the commit message for your changes. Lines starting # with '#' will be ignored, and an empty message aborts the commit. {status}""" GIT_AMEND_PUSHED = (u"It is discouraged to rewrite history which has already been pushed. " u"Are you sure you want to amend the commit?") CUT_LINE = u"------------------------ >8 ------------------------\n" CUT_EXPLANATION = u"# Do not touch the line above.\n# Everything below will be removed.\n" class GitCommit(object): windows = {} class GitCommitWindowCmd(GitCmd, GitStatusHelper): @property def is_verbose(self): return get_setting('git_commit_verbose', False) def get_commit_template(self, repo, add=False, amend=False): cmd = ['commit', '--dry-run', '--status', '--all' if add else None, '--amend' if amend else None, '--verbose' if self.is_verbose else None] exit, stdout, stderr = self.git(cmd, cwd=repo) stderr = stderr.strip() if stderr: for line in stderr.splitlines(): stdout += "# %s\n" % line old_msg = '' if amend: old_msg = self.git_lines(['rev-list', '--format=%B', '--max-count=1', 'HEAD'], cwd=repo) old_msg = "%s\n" % "\n".join(old_msg[1:]) if self.is_verbose and CUT_LINE not in stdout: comments = [] other = [] for line in stdout.splitlines(): if line.startswith('#'): comments.append(line) else: other.append(line) status = "\n".join(comments) status += "\n# %s" % CUT_LINE status += CUT_EXPLANATION status += "\n".join(other) else: status = stdout return GIT_COMMIT_TEMPLATE.format(status=status, old_msg=old_msg) def show_commit_panel(self, content): panel = self.window.get_output_panel('git-commit') panel.run_command('git_panel_write', {'content': content}) self.window.run_command('show_panel', {'panel': 'output.git-commit'}) class GitCommitCommand(WindowCommand, GitCommitWindowCmd): """ Documentation coming soon. """ def run(self, add=False): repo = self.get_repo() if not repo: return staged = self.has_staged_changes(repo) dirty = self.has_unstaged_changes(repo) if not add and not staged: return sublime.error_message(GIT_NOTHING_STAGED) elif add and (not staged and not dirty): return sublime.error_message(GIT_WORKING_DIR_CLEAN) view = find_view_by_settings(self.window, git_view='commit', git_repo=repo) if not view: view = self.window.new_file() view.set_name(GIT_COMMIT_VIEW_TITLE) view.set_syntax_file(GIT_COMMIT_VIEW_SYNTAX) view.set_scratch(True) view.settings().set('git_view', 'commit') view.settings().set('git_repo', repo) GitCommit.windows[view.id()] = (self.window, add, False) self.window.focus_view(view) template = self.get_commit_template(repo, add=add) view.run_command('git_commit_template', {'template': template}) class GitCommitAmendCommand(GitCommitWindowCmd, WindowCommand): """ Documentation coming soon. """ def run(self): repo = self.get_repo() if not repo: return unpushed = self.git_exit_code(['diff', '--exit-code', '--quiet', '@{upstream}..'], cwd=repo) if unpushed == 0: if not sublime.ok_cancel_dialog(GIT_AMEND_PUSHED, 'Amend commit'): return view = find_view_by_settings(self.window, git_view='commit', git_repo=repo) if not view: view = self.window.new_file() view.set_name(GIT_COMMIT_VIEW_TITLE) view.set_syntax_file(GIT_COMMIT_VIEW_SYNTAX) view.set_scratch(True) view.settings().set('git_view', 'commit') view.settings().set('git_repo', repo) GitCommit.windows[view.id()] = (self.window, False, True) self.window.focus_view(view) template = self.get_commit_template(repo, amend=True) view.run_command('git_commit_template', {'template': template}) class GitCommitTemplateCommand(TextCommand): def is_visible(self): return False def run(self, edit, template=''): if self.view.size() > 0: self.view.erase(edit, sublime.Region(0, self.view.size())) self.view.insert(edit, 0, template) self.view.sel().clear() self.view.sel().add(sublime.Region(0)) class GitCommitEventListener(EventListener): _lpop = False def mark_pedantic(self, view): if view.settings().get('git_view') == 'commit' or view.file_name() == 'COMMIT_EDITMSG': # Header lines should be a max of 50 chars view.erase_regions('git-commit.header') firstline = view.line(view.text_point(0, 0)) if firstline.end() > 50 and not view.substr(firstline).startswith('#'): view.add_regions('git-commit.header', [sublime.Region(50, firstline.end())], 'invalid', 'dot') # The second line should be empty view.erase_regions('git-commit.line2') secondline = view.line(view.text_point(1, 0)) if secondline.end() - secondline.begin() > 0 and not view.substr(secondline).startswith('#'): view.add_regions('git-commit.line2', [secondline], 'invalid', 'dot') # Other lines should be at most 72 chars view.erase_regions('git-commit.others') for l in view.lines(sublime.Region(view.text_point(2, 0), view.size())): if view.substr(l).startswith('#'): break if l.end() - l.begin() > 72: view.add_regions('git-commit.others', [sublime.Region(l.begin() + 72, l.end())], 'invalid', 'dot') def on_activated(self, view): if sublime.version() < '3000' and get_setting('git_commit_pedantic') is True: self.mark_pedantic(view) def on_modified(self, view): if sublime.version() < '3000' and get_setting('git_commit_pedantic') is True: self.mark_pedantic(view) def on_modified_async(self, view): if get_setting('git_commit_pedantic') is True: self.mark_pedantic(view) def on_activated_async(self, view): if get_setting('git_commit_pedantic') is True: self.mark_pedantic(view) def on_close(self, view): if view.settings().get('git_view') == 'commit' and view.id() in GitCommit.windows: message = view.substr(sublime.Region(0, view.size())) window, add, amend = GitCommit.windows[view.id()] repo = view.settings().get('git_repo') window.run_command('git_commit_perform', {'message': message, 'add': add, 'amend': amend, 'repo': repo}) class GitCommitPerformCommand(WindowCommand, GitCommitWindowCmd): def run(self, repo, message, add=False, amend=False): cmd = ['commit', '--cleanup=strip', '--all' if add else None, '--amend' if amend else None, '--verbose' if self.is_verbose else None, '-F', '-'] exit, stdout, stderr = self.git(cmd, stdin=message, cwd=repo) self.show_commit_panel(stdout if exit == 0 else stderr) self.window.run_command('git_status', {'refresh_only': True}) def is_visible(self): return False class GitCommitSaveCommand(TextCommand): def is_visible(self): return False def run(self, edit): if self.view.settings().get('git_view') == 'commit' and self.view.id() in GitCommit.windows: return self.view.run_command('save') class GitQuickCommitCommand(WindowCommand, GitCommitWindowCmd): """ Quickly commit changes with a one-line commit message. If there are any staged changes, only those changes will be added. If there are no staged changes, any changed files that git know about will be added in the commit. If the working directory is clean, an error will be shown indicating it. After entering the commit message, press enter to commit, or esc to cancel. An empty commit message will also result in the commit being cancelled. """ def run(self): repo = self.get_repo() if not repo: return staged = self.has_staged_changes(repo) dirty = self.has_unstaged_changes(repo) if not staged and not dirty: sublime.error_message(GIT_WORKING_DIR_CLEAN.capitalize()) return self.window.show_input_panel("Commit message:", '', partial(self.on_commit_message, repo), noop, noop) def on_commit_message(self, repo, msg=None): if not msg: msg = '' cmd = ['commit', '-F', '-'] if self.has_staged_changes(repo) else ['commit', '-a', '-F', '-'] stdout = self.git_string(cmd, stdin=msg, cwd=repo) self.show_commit_panel(stdout) self.window.run_command('git_status', {'refresh_only': True}) class GitQuickCommitCurrentFileCommand(TextCommand, GitCmd, GitStatusHelper): """ Documentation coming soon. """ def run(self, edit): filename = self.view.file_name() if not filename: sublime.error_message("Cannot commit a file which has not been saved.") return repo = self.get_repo() if not repo: return if not self.file_in_git(repo, filename): if sublime.ok_cancel_dialog("The file %s is not tracked by git. Do you want to add it?" % filename, "Add file"): exit, stdout, stderr = self.git(['add', '--force', '--', filename], cwd=repo) if exit == 0: sublime.status_message('Added %s' % filename) else: sublime.error_message('git error: %s' % stderr) else: return self.view.window().show_input_panel("Commit message:", '', partial(self.on_commit_message, repo, filename), noop, noop) def on_commit_message(self, repo, filename, msg=None): if not msg: msg = '' # run command cmd = ['commit', '-F', '-', '--only', '--', filename] stdout = self.git_string(cmd, stdin=msg, cwd=repo) # show output panel panel = self.view.window().get_output_panel('git-commit') panel.run_command('git_panel_write', {'content': stdout}) self.view.window().run_command('show_panel', {'panel': 'output.git-commit'}) # update status if necessary self.view.window().run_command('git_status', {'refresh_only': True})
src/seedwork/infrastructure/logging.py
Ermlab/python-ddd
308
12777968
from pythonjsonlogger import jsonlogger from datetime import datetime import logging from logging import Logger from logging.config import dictConfig from seedwork.utils.functional import SimpleLazyObject from seedwork.infrastructure.request_context import request_context class RequestContextFilter(logging.Filter): """ "Provides correlation id parameter for the logger""" def __init__(self, name: str, request_context) -> None: super().__init__(name=name) self.request_context = request_context def filter(self, record): record.correlation_id = self.request_context.correlation_id.get() return True class ElkJsonFormatter(jsonlogger.JsonFormatter): """ ELK stack-compatibile formatter """ def add_fields(self, log_record, record, message_dict): super(ElkJsonFormatter, self).add_fields(log_record, record, message_dict) log_record["@timestamp"] = datetime.now().isoformat() log_record["level"] = record.levelname log_record["logger"] = record.name class LoggerFactory: _configured = False @classmethod def configure( cls, logger_name="app", log_filename="./logs.json", request_context=request_context, ): cls.logger_name = logger_name cls.log_filename = log_filename cls.request_context = request_context cls._configured = True @classmethod def create_logger(cls): """ Returns a logger instance, based on a configuration options """ if not cls._configured: cls.configure() logging_config = { "version": 1, "disable_existing_loggers": False, "formatters": { "default": { # exact format is not important, this is the minimum information "format": "%(asctime)s %(name)-12s %(levelname)-8s %(correlation_id)s %(message)s", }, "colored": { "()": "colorlog.ColoredFormatter", "format": "%(log_color)s%(asctime)s %(name)-12s %(levelname)-8s %(correlation_id)s %(message)s", "log_colors": { "DEBUG": "white", "INFO": "green", "WARNING": "yellow", "ERROR": "red", "CRITICAL": "red,bold", }, }, "colored_db": { "()": "colorlog.ColoredFormatter", "format": "%(log_color)s%(asctime)s %(name)-12s %(levelname)-8s %(correlation_id)s %(message)s", "log_colors": { "DEBUG": "purple", "INFO": "green", "WARNING": "yellow", "ERROR": "red", "CRITICAL": "red,bold", }, }, "json_formatter": { "()": "seedwork.infrastructure.logging.ElkJsonFormatter", }, }, "handlers": { # console logs to stderr "console": { "class": "logging.StreamHandler", "formatter": "default", }, "colored_console": { "class": "colorlog.StreamHandler", "formatter": "colored", }, "colored_console_db": { "class": "colorlog.StreamHandler", "formatter": "colored_db", }, "file_handler": { "class": "logging.handlers.RotatingFileHandler", "filename": cls.log_filename, "formatter": "json_formatter", } if cls.log_filename else None, # Add Handler for Sentry for `warning` and above # 'sentry': { # 'level': 'WARNING', # 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler', # }, }, "loggers": { cls.logger_name: { "level": "DEBUG", "handlers": ["colored_console", "file_handler"], # , 'sentry'], }, # Prevent noisy modules from logging to Sentry "noisy_module": { "level": "ERROR", "handlers": ["console"], "propagate": False, }, }, } dictConfig(logging_config) logger = logging.getLogger(name=cls.logger_name) logger.addFilter( RequestContextFilter( name=cls.logger_name, request_context=cls.request_context ) ) return logger """ We are making logger globally available, but to make it configurable logger lazy-evaluated. Use `LoggerFactory.configure()` to configure the logger prior to its usage """ logger = SimpleLazyObject(LoggerFactory.create_logger)
homeassistant/components/wiffi/wiffi_strings.py
domwillcode/home-assistant
30,023
12777989
"""Definition of string used in wiffi json telegrams.""" # units of measurement WIFFI_UOM_TEMP_CELSIUS = "gradC" WIFFI_UOM_DEGREE = "grad" WIFFI_UOM_PERCENT = "%" WIFFI_UOM_MILLI_BAR = "mb" WIFFI_UOM_LUX = "lux"
uasyncio/test_readexactly.py
Carglglz/micropython-lib
126
12778021
<reponame>Carglglz/micropython-lib<filename>uasyncio/test_readexactly.py<gh_stars>100-1000 from uasyncio import StreamReader class MockSock: def __init__(self, data_list): self.data = data_list def read(self, sz): try: return self.data.pop(0) except IndexError: return b"" mock = MockSock([ b"123", b"234", b"5", b"a", b"b", b"c", b"d", b"e", ]) def func(): sr = StreamReader(mock) assert await sr.readexactly(3) == b"123" assert await sr.readexactly(4) == b"2345" assert await sr.readexactly(5) == b"abcde" # This isn't how it should be, but the current behavior assert await sr.readexactly(10) == b"" for i in func(): pass
examples/urlopen.py
mikelolasagasti/bandit
4,016
12778084
''' Example dangerous usage of urllib[2] opener functions The urllib and urllib2 opener functions and object can open http, ftp, and file urls. Often, the ability to open file urls is overlooked leading to code that can unexpectedly open files on the local server. This could be used by an attacker to leak information about the server. ''' import urllib import urllib2 # Python 3 import urllib.request # Six import six def test_urlopen(): # urllib url = urllib.quote('file:///bin/ls') urllib.urlopen(url, 'blah', 32) urllib.urlretrieve('file:///bin/ls', '/bin/ls2') opener = urllib.URLopener() opener.open('file:///bin/ls') opener.retrieve('file:///bin/ls') opener = urllib.FancyURLopener() opener.open('file:///bin/ls') opener.retrieve('file:///bin/ls') # urllib2 handler = urllib2.HTTPBasicAuthHandler() handler.add_password(realm='test', uri='http://mysite.com', user='bob') opener = urllib2.build_opener(handler) urllib2.install_opener(opener) urllib2.urlopen('file:///bin/ls') urllib2.Request('file:///bin/ls') # Python 3 urllib.request.urlopen('file:///bin/ls') urllib.request.urlretrieve('file:///bin/ls', '/bin/ls2') opener = urllib.request.URLopener() opener.open('file:///bin/ls') opener.retrieve('file:///bin/ls') opener = urllib.request.FancyURLopener() opener.open('file:///bin/ls') opener.retrieve('file:///bin/ls') # Six six.moves.urllib.request.urlopen('file:///bin/ls') six.moves.urllib.request.urlretrieve('file:///bin/ls', '/bin/ls2') opener = six.moves.urllib.request.URLopener() opener.open('file:///bin/ls') opener.retrieve('file:///bin/ls') opener = six.moves.urllib.request.FancyURLopener() opener.open('file:///bin/ls') opener.retrieve('file:///bin/ls')
python_web/config.py
LouisYZK/Frodo
123
12778096
import os import configparser import yaml import ast from pathlib import Path HERE = Path(__file__).parent.absolute() print(HERE) config_dir = HERE / 'config/config.ini.model' config = configparser.ConfigParser() config.read(config_dir) ACCESS_TOKEN_EXPIRE_MINUTES = config.get('security', 'access_token_expire_minutes') JWT_ALGORITHM = config.get('security', 'jwt_algorithm') OAUTH_REDIRECT_PATH = config.get('github', 'oauth_redirect_path') REDIRECT_URI = config.get('github', 'redirect_uri') CLIENT_ID = config.get('github', 'client_id') CLIENT_SECRET = config.get('github', 'client_secret') HOST_PATH = config.get('global', 'host_path') WEB_PORT = config.get('port', "fastapi") # DB_URL = os.getenv('DB_URL', config.get('database', 'db_url')) db_host = config.get('database', 'host') db_username = config.get('database', 'username') db_pwd = config.get('database', 'password') db_port = config.get('database', 'port') db = config.get('database', 'db') charset = config.get('database', 'charset') DB_URL = f'mysql+pymysql://{db_username}:{db_pwd}@{db_host}:{db_port}/{db}?charset={charset}' print(DB_URL) REDIS_URL = os.getenv('REDIS_URL', config.get('redis', 'redis_url')) DEBUG = os.getenv('DEBUG', config.get('global', 'debug')).lower() \ in ('true', 'y', 'yes', '1') WTF_CSRF_SECRET_KEY = 123 AUTH_LOGIN_ENDPOINT = 'index.login' MEMCACHED_HOST = os.getenv('MEMCACHED_HOST', config.get('memcached', 'memcached_host')) MEMCACHED_PORT = config.get('memcached', 'memcached_port') oauth_redirect_path = '/oauth' redirect_uri = 'http://127.0.0.1:8000/oauth' client_id = "098a2e6da880878e05da" client_secret = "<KEY>" REACT_PROMPT = '喜欢这篇文章吗? 记得给我留言或订阅哦' PLOAD_FOLDER = HERE / 'static/upload' AUTHOR = 'zhikai' SITE_TITLE = 'Zhikai-Yang Space' PER_PAGE = 10 GOOGLE_ANALYTICS = '' SENTRY_DSN = '' REQUEST_TIMEOUT = 15 SHOW_PAGEVIEW = True PERMALINK_TYPE = 'slug' # 可选 id、slug、title # [(Endpoint, Name, IconName, Color), ...] # SITE_NAV_MENUS = [('blog.index', '首页'), ('blog.topics', '专题'), # ('blog.archives', '归档'), ('blog.tags', '标签'), # ('index.search', '搜索'), ('/page/aboutme', '关于我'), # ('index.feed', 'RSS', 'rss', '#fc6423')] SITE_NAV_MENUS = [('blog.index', '首页'), ('blog.activities', '动态'), ('blog.tags', '标签'), ('index.search', '搜索'), ('blog.archives', '归档'), ('/post/aboutme', '关于我') ] BEIAN_ID = '' JWT_SECRET = config.get('security', 'jwt_secret') EXPIRATION_DELTA = 60 * 60 WTF_CSRF_ENABLED = False MAIL_SERVER = 'smtp.qq.com' MAIL_PORT = 465 MAIL_USERNAME = '' MAIL_PASSWORD = '' BLOG_URL = 'https://example.com' UPLOAD_FOLDER = HERE / 'static/upload' # Redis sentinel REDIS_SENTINEL_SERVICE_HOST = None REDIS_SENTINEL_SERVICE_PORT = 26379 SHOW_AUTHOR = True class AttrDict(dict): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.__dict__ = self try: with open(HERE / 'config.yaml') as f: yaml_content = f.read() partials = AttrDict(yaml.load(yaml_content)).partials USE_YAML = True except FileNotFoundError: USE_YAML = False partials = {} try: from local_settings import * # noqa except ImportError: pass K_POST = 1001 K_COMMENT = 1002 ONE_MINUTE = 60 ONE_HOUR = ONE_MINUTE * 60 ONE_DAY = ONE_HOUR * 24 K_STATUS = 1003 K_ACTIVITY = 1004 CDN_DOMAIN = '' USE_FFMPEG = False STATIC_FILE_TYPES = ('jpg', 'png', 'webp', 'gif', 'mp4', 'css', 'js')
vedastr/metrics/__init__.py
csmasters/vedastr
475
12778112
from .accuracy import Accuracy from .builder import build_metric
alipay/aop/api/domain/ApInvoiceBillLinkOrderRequest.py
antopen/alipay-sdk-python-all
213
12778115
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi class ApInvoiceBillLinkOrderRequest(object): def __init__(self): self._amt = None self._daily_bill_dimension = None self._monthly_bill_no = None @property def amt(self): return self._amt @amt.setter def amt(self, value): if isinstance(value, MultiCurrencyMoneyOpenApi): self._amt = value else: self._amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value) @property def daily_bill_dimension(self): return self._daily_bill_dimension @daily_bill_dimension.setter def daily_bill_dimension(self, value): self._daily_bill_dimension = value @property def monthly_bill_no(self): return self._monthly_bill_no @monthly_bill_no.setter def monthly_bill_no(self, value): self._monthly_bill_no = value def to_alipay_dict(self): params = dict() if self.amt: if hasattr(self.amt, 'to_alipay_dict'): params['amt'] = self.amt.to_alipay_dict() else: params['amt'] = self.amt if self.daily_bill_dimension: if hasattr(self.daily_bill_dimension, 'to_alipay_dict'): params['daily_bill_dimension'] = self.daily_bill_dimension.to_alipay_dict() else: params['daily_bill_dimension'] = self.daily_bill_dimension if self.monthly_bill_no: if hasattr(self.monthly_bill_no, 'to_alipay_dict'): params['monthly_bill_no'] = self.monthly_bill_no.to_alipay_dict() else: params['monthly_bill_no'] = self.monthly_bill_no return params @staticmethod def from_alipay_dict(d): if not d: return None o = ApInvoiceBillLinkOrderRequest() if 'amt' in d: o.amt = d['amt'] if 'daily_bill_dimension' in d: o.daily_bill_dimension = d['daily_bill_dimension'] if 'monthly_bill_no' in d: o.monthly_bill_no = d['monthly_bill_no'] return o
packages/pyright-internal/src/tests/samples/paramSpec4.py
Jasha10/pyright
3,934
12778116
# This sample tests the type checker's handling of ParamSpec # and Concatenate as described in PEP 612. from typing import Callable, Concatenate, ParamSpec, TypeVar P = ParamSpec("P") R = TypeVar("R") class Request: ... def with_request(f: Callable[Concatenate[Request, P], R]) -> Callable[P, R]: def inner(*args: P.args, **kwargs: P.kwargs) -> R: return f(Request(), *args, **kwargs) return inner @with_request def takes_int_str(request: Request, x: int, y: str) -> int: # use request return x + 7 takes_int_str(1, "A") # This should generate an error because the first arg # is the incorrect type. takes_int_str("B", "A") # This should generate an error because there are too # many parameters. takes_int_str(1, "A", 2) # This should generate an error because a ParamSpec can appear # only within the last type arg for Concatenate def decorator1(f: Callable[Concatenate[P, P], int]) -> Callable[P, int]: ... # This should generate an error because the last type arg # for Concatenate should be a ParamSpec. def decorator2(f: Callable[Concatenate[int, int], int]) -> Callable[P, int]: ... # This should generate an error because Concatenate is missing # its type arguments. def decorator3(f: Callable[Concatenate, int]) -> Callable[P, int]: ... def decorator4(func: Callable[P, None]) -> Callable[Concatenate[int, P], None]: def wrapper(x: int, /, *args: P.args, **kwargs: P.kwargs) -> None: ... return wrapper def func1(func: Callable[Concatenate[int, P], None]) -> Callable[P, None]: ... def func2(a: int, b: str, c: str) -> None: ... def func3(a: int, /, b: str, c: str) -> None: ... def func4(a: int, b: str, /, c: str) -> None: ... v1 = func1(func2) reveal_type(v1, expected_text="(b: str, c: str) -> None") v2 = func1(func3) reveal_type(v2, expected_text="(b: str, c: str) -> None") v3 = func1(func4) reveal_type(v3, expected_text="(b: str, /, c: str) -> None") def func5(__fn: Callable[P, R], *args: P.args, **kwargs: P.kwargs) -> R: ... def func6(name: str, *args: str): ... v5 = func5(func6, "a", "b", "c") # This should generate an error because 1 isn't assignable to str. v6 = func5(func6, "a", "b", "c", 1) def func7(name: str, **kwargs: str): ... v7 = func5(func7, "a", b="b", c="c") # This should generate an error because 1 isn't assignable to str. v8 = func5(func7, "a", b="b", c=1)
pyjobs/core/migrations/0015_job_ad_interested.py
Mdslino/PyJobs
132
12778121
<filename>pyjobs/core/migrations/0015_job_ad_interested.py<gh_stars>100-1000 # -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2018-11-04 18:42 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("core", "0014_auto_20180511_2122")] operations = [ migrations.AddField( model_name="job", name="ad_interested", field=models.BooleanField( default=False, verbose_name="Interessado em ficar em destaque no PyJobs?", ), ) ]
apps/panel/migrations/0004_log.py
ivall/IVmonitor
190
12778152
# Generated by Django 3.0.7 on 2021-02-05 09:15 from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('panel', '0003_auto_20210205_0955'), ] operations = [ migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('status', models.CharField(max_length=4)), ('time', models.DateTimeField(default=django.utils.timezone.now)), ('monitor_object', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='panel.MonitorObject')), ], ), ]
hummingbot/strategy/dev_0_hello_world/start.py
cardosofede/hummingbot
542
12778239
<reponame>cardosofede/hummingbot #!/usr/bin/env python from hummingbot.strategy.dev_0_hello_world.dev_0_hello_world_config_map import dev_0_hello_world_config_map from hummingbot.strategy.dev_0_hello_world import HelloWorldStrategy def start(self): try: exchange = dev_0_hello_world_config_map.get("exchange").value.lower() trading_pair = dev_0_hello_world_config_map.get("trading_pair").value asset = dev_0_hello_world_config_map.get("asset").value self._initialize_markets([(exchange, [trading_pair])]) exchange = self.markets[exchange] self.strategy = HelloWorldStrategy(exchange=exchange, trading_pair=trading_pair, asset=asset, ) except Exception as e: self.notify(str(e)) self.logger().error("Unknown error during initialization.", exc_info=True)
tests/unit/test_modulegraph/testpkg-packages/pkg/__init__.py
hawkhai/pyinstaller
9,267
12778243
<reponame>hawkhai/pyinstaller """ pkg.init """
dataparser/queue/finder.py
idxn/sublime-robot-framework-assistant
103
12778260
import os import fnmatch def finder(path, ext): """Returns files from path by extension""" l = [] if not ext.startswith('*.'): ext = '*.{0}'.format(ext) for path, dirs, files in os.walk(os.path.abspath(path)): for f in fnmatch.filter(files, ext): l.append(os.path.join(path, f)) return l
ote_sdk/ote_sdk/entities/coordinate.py
ntyukaev/training_extensions
775
12778267
<reponame>ntyukaev/training_extensions """This module implements the Coordinate entity""" # Copyright (C) 2021-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from typing import Tuple class Coordinate: """ Represents a 2D-coordinate with an x-position and a y-position. NB most coordinates are normalized (between 0.0 and 1.0) :param x: x-coordinate :param y: y-coordinate """ __slots__ = ["x", "y"] def __init__(self, x: float, y: float): self.x = x self.y = y def __repr__(self): return f"Coordinate(x={self.x}, y={self.y})" def __eq__(self, other): return self.x == other.x and self.y == other.y def __hash__(self): return hash(str(self)) def as_tuple(self) -> Tuple[float, float]: """ Convert the coordinates to a pair (x,y) """ return self.x, self.y def as_int_tuple(self) -> Tuple[int, int]: """ Convert the coordinates to a pair of integer coordinates (x,y) """ return int(self.x), int(self.y)
kashgari/layers/__init__.py
SharpKoi/Kashgari
2,422
12778272
# encoding: utf-8 # author: BrikerMan # contact: <EMAIL> # blog: https://eliyar.biz # file: __init__.py # time: 7:39 下午 from typing import Dict, Any from tensorflow import keras from .conditional_random_field import KConditionalRandomField from .behdanau_attention import BahdanauAttention # type: ignore L = keras.layers L.BahdanauAttention = BahdanauAttention L.KConditionalRandomField = KConditionalRandomField def resigter_custom_layers(custom_objects: Dict[str, Any]) -> Dict[str, Any]: custom_objects['KConditionalRandomField'] = KConditionalRandomField custom_objects['BahdanauAttention'] = BahdanauAttention return custom_objects if __name__ == "__main__": pass
leet/array/maxSubArrayLen.py
peterlamar/python-cp-cheatsheet
140
12778287
class Solution: # Maximum Size Subarray Sum Equals k def maxSubArrayLen(self, nums: List[int], k: int) -> int: hm = {0:-1} ps = 0 rtn = 0 for i in range(len(nums)): ps += nums[i] if ps not in hm: hm[ps] = i if ps - k in hm: rtn = max(rtn, i-hm[ps-k]) return rtn def maxSubArrayLen(self, nums: List[int], k: int) -> int: hm = {0:-1} ps = rtn = 0 for i, n in enumerate(nums): ps += n if ps not in hm: hm[ps] = i if ps-k in hm: rtn = max(rtn, i-hm[ps-k]) return rtn
faker/providers/lorem/tl_PH/__init__.py
jacksmith15/faker
12,077
12778301
<reponame>jacksmith15/faker<gh_stars>1000+ from ..fil_PH import Provider as FilPhProvider class Provider(FilPhProvider): """Implement lorem provider for ``tl_PH`` locale. There is no difference from the |FilPhLoremProvider|. .. |FilPhLoremProvider| replace:: :meth:`FilPhLoremProvider <faker.providers.lorem.fil_PH.Provider>` """ pass
sampling_free/modeling/generalized_rcnn/rpn/retinanet/__init__.py
ChenJoya/sampling-free
266
12778302
<reponame>ChenJoya/sampling-free from .retinanet import build_retinanet
trackml/score.py
victor-estrade/trackml-library
166
12778315
"""TrackML scoring metric""" __authors__ = ['<NAME>', '<NAME>', '<NAME>', '<NAME>'] import numpy import pandas def _analyze_tracks(truth, submission): """Compute the majority particle, hit counts, and weight for each track. Parameters ---------- truth : pandas.DataFrame Truth information. Must have hit_id, particle_id, and weight columns. submission : pandas.DataFrame Proposed hit/track association. Must have hit_id and track_id columns. Returns ------- pandas.DataFrame Contains track_id, nhits, major_particle_id, major_particle_nhits, major_nhits, and major_weight columns. """ # true number of hits for each particle_id particles_nhits = truth['particle_id'].value_counts(sort=False) total_weight = truth['weight'].sum() # combined event with minimal reconstructed and truth information event = pandas.merge(truth[['hit_id', 'particle_id', 'weight']], submission[['hit_id', 'track_id']], on=['hit_id'], how='left', validate='one_to_one') event.drop('hit_id', axis=1, inplace=True) event.sort_values(by=['track_id', 'particle_id'], inplace=True) # ASSUMPTIONs: 0 <= track_id, 0 <= particle_id tracks = [] # running sum for the reconstructed track we are currently in rec_track_id = -1 rec_nhits = 0 # running sum for the particle we are currently in (in this track_id) cur_particle_id = -1 cur_nhits = 0 cur_weight = 0 # majority particle with most hits up to now (in this track_id) maj_particle_id = -1 maj_nhits = 0 maj_weight = 0 for hit in event.itertuples(index=False): # we reached the next track so we need to finish the current one if (rec_track_id != -1) and (rec_track_id != hit.track_id): # could be that the current particle is the majority one if maj_nhits < cur_nhits: maj_particle_id = cur_particle_id maj_nhits = cur_nhits maj_weight = cur_weight # store values for this track tracks.append((rec_track_id, rec_nhits, maj_particle_id, particles_nhits[maj_particle_id], maj_nhits, maj_weight / total_weight)) # setup running values for next track (or first) if rec_track_id != hit.track_id: rec_track_id = hit.track_id rec_nhits = 1 cur_particle_id = hit.particle_id cur_nhits = 1 cur_weight = hit.weight maj_particle_id = -1 maj_nhits = 0 maj_weights = 0 continue # hit is part of the current reconstructed track rec_nhits += 1 # reached new particle within the same reconstructed track if cur_particle_id != hit.particle_id: # check if last particle has more hits than the majority one # if yes, set the last particle as the new majority particle if maj_nhits < cur_nhits: maj_particle_id = cur_particle_id maj_nhits = cur_nhits maj_weight = cur_weight # reset runnig values for current particle cur_particle_id = hit.particle_id cur_nhits = 1 cur_weight = hit.weight # hit belongs to the same particle within the same reconstructed track else: cur_nhits += 1 cur_weight += hit.weight # last track is not handled inside the loop if maj_nhits < cur_nhits: maj_particle_id = cur_particle_id maj_nhits = cur_nhits maj_weight = cur_weight # store values for the last track tracks.append((rec_track_id, rec_nhits, maj_particle_id, particles_nhits[maj_particle_id], maj_nhits, maj_weight / total_weight)) cols = ['track_id', 'nhits', 'major_particle_id', 'major_particle_nhits', 'major_nhits', 'major_weight'] return pandas.DataFrame.from_records(tracks, columns=cols) def score_event(truth, submission): """Compute the TrackML event score for a single event. Parameters ---------- truth : pandas.DataFrame Truth information. Must have hit_id, particle_id, and weight columns. submission : pandas.DataFrame Proposed hit/track association. Must have hit_id and track_id columns. """ tracks = _analyze_tracks(truth, submission) purity_rec = numpy.true_divide(tracks['major_nhits'], tracks['nhits']) purity_maj = numpy.true_divide(tracks['major_nhits'], tracks['major_particle_nhits']) good_track = (0.5 < purity_rec) & (0.5 < purity_maj) return tracks['major_weight'][good_track].sum()
exercises/zh/solution_03_09_01.py
Jette16/spacy-course
2,085
12778375
from spacy.lang.zh import Chinese from spacy.tokens import Token nlp = Chinese() # 注册词符的扩展属性"is_country",其默认值是False Token.set_extension("is_country", default=False) # 处理文本,将词符"新加坡"的is_country属性设置为True doc = nlp("我住在新加坡。") doc[3]._.is_country = True # 对所有词符打印词符文本及is_country属性 print([(token.text, token._.is_country) for token in doc])
examples/worker/simplejob.py
pooya/disco
786
12778443
from disco.job import SimpleJob class SimpleJob(SimpleJob): def map(self, worker, task, **jobargs): worker.output(task, partition=None).file.append('hello world!') def reduce(self, worker, task, **jobargs): worker.output(task, partition=None).file.append('goodbye world!')
tests/seahub/views/sysadmin/test_sysadmin.py
samuelduann/seahub
420
12778455
<filename>tests/seahub/views/sysadmin/test_sysadmin.py<gh_stars>100-1000 import os import openpyxl from io import BytesIO from mock import patch from django.urls import reverse from seahub.base.accounts import User from seahub.options.models import (UserOptions, KEY_FORCE_PASSWD_CHANGE) from seahub.test_utils import BaseTestCase from seahub.utils.ms_excel import write_xls as real_write_xls import pytest pytestmark = pytest.mark.django_db from seaserv import ccnet_threaded_rpc class BatchUserMakeAdminTest(BaseTestCase): def setUp(self): self.login_as(self.admin) def test_can_make_admins(self): resp = self.client.post( reverse('batch_user_make_admin'), { 'set_admin_emails': self.user.username }, HTTP_X_REQUESTED_WITH='XMLHttpRequest' ) old_passwd = <PASSWORD> self.assertContains(resp, '"success": true') u = User.objects.get(email=self.user.username) assert u.is_staff is True assert u.enc_password == <PASSWORD> # class UserMakeAdminTest(TestCase, Fixtures): # def test_can_make_admin(self): # self.client.post( # reverse('auth_login'), {'username': self.admin.username, # 'password': '<PASSWORD>'} # ) # resp = self.client.get( # reverse('user_make_admin', args=[self.user.id]) # ) # old_passwd = <PASSWORD> # self.assertEqual(302, resp.status_code) # u = User.objects.get(email=self.user.username) # assert u.is_staff is True # assert u.enc_password == <PASSWORD> class UserRemoveTest(BaseTestCase): def setUp(self): self.login_as(self.admin) def test_can_remove(self): # create one user username = self.user.username resp = self.client.post( reverse('user_remove', args=[username]) ) self.assertEqual(302, resp.status_code) assert len(ccnet_threaded_rpc.search_emailusers('DB', username, -1, -1)) == 0 class SudoModeTest(BaseTestCase): def test_normal_user_raise_404(self): self.login_as(self.user) resp = self.client.get(reverse('sys_sudo_mode')) self.assertEqual(404, resp.status_code) def test_admin_get(self): self.login_as(self.admin) resp = self.client.get(reverse('sys_sudo_mode')) self.assertEqual(200, resp.status_code) self.assertTemplateUsed('sysadmin/sudo_mode.html') def test_admin_post(self): self.login_as(self.admin) resp = self.client.post(reverse('sys_sudo_mode'), { 'username': self.admin.username, 'password': <PASSWORD>, }) self.assertEqual(302, resp.status_code) self.assertRedirects(resp, reverse('sys_info')) class SysGroupAdminExportExcelTest(BaseTestCase): def setUp(self): self.login_as(self.admin) def test_can_export_excel(self): resp = self.client.get(reverse('sys_group_admin_export_excel')) self.assertEqual(200, resp.status_code) assert 'application/ms-excel' in resp.headers['content-type'] class SysUserAdminExportExcelTest(BaseTestCase): def setUp(self): self.login_as(self.admin) def test_can_export_excel(self): resp = self.client.get(reverse('sys_useradmin_export_excel')) self.assertEqual(200, resp.status_code) assert 'application/ms-excel' in resp.headers['content-type'] def write_xls(self, sheet_name, head, data_list): assert 'Role' in head return real_write_xls(sheet_name, head, data_list) @patch('seahub.views.sysadmin.write_xls') @patch('seahub.views.sysadmin.is_pro_version') def test_can_export_excel_in_pro(self, mock_is_pro_version, mock_write_xls): mock_is_pro_version.return_value = True mock_write_xls.side_effect = self.write_xls # mock_write_xls.assert_called_once() resp = self.client.get(reverse('sys_useradmin_export_excel')) self.assertEqual(200, resp.status_code) assert 'application/ms-excel' in resp.headers['content-type'] class BatchAddUserHelpTest(BaseTestCase): def setUp(self): self.login_as(self.admin) def test_can_get_excel(self): resp = self.client.get(reverse('batch_add_user_example')+"?type=xlsx") assert resp.status_code == 200 def test_validate_excel(self): resp = self.client.get(reverse('batch_add_user_example')+"?type=xlsx") wb = openpyxl.load_workbook(filename=BytesIO(resp.content), read_only=True) assert wb.sheetnames[0] == 'sample' rows = wb.worksheets[0].rows i = 0 next(rows) for r in rows: assert r[0].value == 'test' + str(i) + '@example.com' assert r[1].value == '123456' assert r[2].value == 'test' + str(i) assert r[3].value == 'default' assert r[4].value == '1000' i += 1
saleor/plugins/webhook/const.py
victor-abz/saleor
1,392
12778477
CACHE_EXCLUDED_SHIPPING_KEY = "webhook_exclude_shipping_id_" CACHE_EXCLUDED_SHIPPING_TIME = 60 * 3 EXCLUDED_SHIPPING_REQUEST_TIMEOUT = 2
workflow/__main__.py
trecvt-oss/workflow
183
12778504
<gh_stars>100-1000 #!/usr/bin/python # -*- coding: utf-8 -*- # License: 3-clause BSD License # Author: <NAME> <<EMAIL>> # Read-more: https://github.com/mdipierro/workflow import sys, os, shelve, glob, time, shlex, subprocess, logging, re, optparse re_line = re.compile('(?P<n>\w+):\s*(?P<p>.+?)\s*(\[(?P<dt>\w+)\]\s*)?:\s*(?P<c>.*)\s*(?P<a>\&)?') def daemonize(): if os.fork()==0: os.setsid() if os.fork()==0: return os._exit(0) def load_config(config_filename,data): if not os.path.exists(config_filename): return (None,0) config_mt = os.path.getmtime(config_filename) config = [] print '-'*10+' loading rules '+'-'*10 lines = open(config_filename,'r').read() for line in lines.replace('\\\n','\n').split('\n'): if not line.startswith('#') and ':' in line: match = re_line.match(line) if match: print line name = match.group('n') pattern = match.group('p') dt = eval((match.group('dt') or '1')\ .replace('s','*1').replace('m','*60')\ .replace('h','*3600').replace('d','*24*3600')\ .replace('w','*7*24*3600')) command = match.group('c') ampersand = match.group('a') config.append((name,pattern,dt,command,ampersand)) if not name in data: data[name]=[] print '-'*35 return config, config_mt def workflow(options): folder = options.folder or './' logging.basicConfig(level=logging.INFO, format='%(asctime)s: %(levelname)-8s: %(message)s', datefmt='%m-%d %H:%M', filename=options.logfile) config_filename = options.config or os.path.join(folder,'workflow.config') cache_filename = options.cache or os.path.join(folder,'workflow.cache') data = shelve.open(cache_filename) config, config_mt = load_config(config_filename,data) processes = {} while config: pause = True if config_mt < os.path.getmtime(config_filename): config, config_mt = load_config(config_filename,data) if not config: return for clear in glob.glob('.workflow.*.clear'): rule = clear[10:-6] logging.info('clearing rule "%s"' % rule) for key in data.get(rule,[]): if key in data: del data[key] os.unlink(clear) for name,pattern,dt,action,ampersand in config: filenames = glob.glob(pattern) for filename in filenames: mt = os.path.getmtime(filename) if mt > time.time()-dt: continue pid_file = filename+'.%s.pid' % name log_file = filename+'.%s.out' % name err_file = filename+'.%s.err' % name key = re.sub('\s+',' ',pattern+'='+filename+':'+action).strip() if not (os.path.exists(pid_file) or os.path.exists(err_file)): if data.get(key,None)!=mt: command = action.replace(options.name,filename) logging.info('%s -> %s' % (filename, command)) wlg = open(log_file,'wb') process = subprocess.Popen(command,stdout=wlg, stderr=wlg,shell=True) open(pid_file,'w').write(str(process.pid)) processes[pid_file] = (filename,command,process) if not ampersand: process.wait() if pid_file in processes and processes[pid_file][2].poll()==0: filename, command, process = processes[pid_file] returncode = process.returncode if returncode !=0: open(err_file,'w').write(str(returncode)) logging.error('%s -> %s' % (filename, command)) else: data[key] = mt data[name] = data[name]+[key] del processes[pid_file] os.remove(pid_file) pause = False elif os.path.exists(pid_file) and not pid_file in processes: os.remove(pid_file) pause = False if pause: time.sleep(options.sleep) def main(): usage = """ 1. read docs: https://github.com/mdipierro/workflow 2. create a file workflow.config 3. run workflow.py """ version = "0.1" parser = optparse.OptionParser(usage, None, optparse.Option, version) parser.add_option("-s", "--sleep", dest="sleep", default=1, help="sleep interval") parser.add_option("-c", "--clear", dest="clear", default=None, help="clear rule") parser.add_option("-n", "--name", dest="name", default='$0', help="name") parser.add_option("-f", "--folder", dest="folder", default='./', help="folder for workflow") parser.add_option("-d", "--daemonize", dest="daemonize", default=False, action="store_true", help="runs as daemon") parser.add_option("-x", "--config", dest="config", default=None, help="path of the config filename "\ +"(default=workflow.config)") parser.add_option("-y", "--cache", dest="cache", default=None, help="path of the cache filename "\ +"(default=workflow.cache)") parser.add_option("-l", "--logfile", dest="logfile", default=None, help="path of the logfile "\ +"(default=/var/tmp/workflow.log when daemonized)") (options, args) = parser.parse_args() if options.clear: open('.workflow.%s.clear' % options.clear,'wb').write(time.ctime()) return if options.daemonize: options.logfile = options.logfile or '/var/tmp/workflow.log' daemonize() try: workflow(options) except KeyboardInterrupt: return if __name__=='__main__': main()
mae_envs/modules/__init__.py
bglick13/multi-agent-emergence-environments
1,317
12778514
from .module import * from .util import *
mopidy_mopify/mem.py
dirkgroenen/mopidy-mopify
504
12778515
<reponame>dirkgroenen/mopidy-mopify queuemanager = None localfiles = None
house_robber_ii/solution.py
mahimadubey/leetcode-python
528
12778542
""" Note: This is an extension of House Robber. After robbing those houses on that street, the thief has found himself a new place for his thievery so that he will not get too much attention. This time, all houses at this place are arranged in a circle. That means the first house is the neighbor of the last one. Meanwhile, the security system for these houses remain the same as for those in the previous street. Given a list of non-negative integers representing the amount of money of each house, determine the maximum amount of money you can rob tonight without alerting the police. """ class Solution(object): def rob(self, nums): """ :type nums: List[int] :rtype: int """ n = len(nums) if n == 0: return 0 elif n == 1: return nums[0] return max(self.rob_aux(nums, 0), self.rob_aux(nums, 1)) def rob_aux(self, nums, left): n = len(nums) - 1 t = [0 for i in range(n + 1)] if n == 0: return t[n] t[1] = nums[left] if n <= 1: return t[n] t[2] = max(nums[left: left + 2]) for i in range(3, n + 1): t[i] = max(t[i - 2] + nums[left + i - 1], t[i - 1]) return t[n] a1 = [1] a2 = [4, 1, 6, 10, 5, 13, 2, 7] s = Solution() print(s.rob(a1)) print(s.rob(a2))
spyder_terminal/server/rest/__init__.py
mrclary/spyder-terminal
169
12778546
# -*- coding: utf-8 -*- """ rest module. ========= Provides: 1. Asynchronous execution of JSON services How to use the documentation ---------------------------- Documentation is available in one form: docstrings provided with the code Copyright (c) 2016, <NAME>. MIT, see LICENSE for more details. """ from . import term_rest term_rest
experiments/launcher_exp2_collect.py
MenshovSergey/DetectChess
144
12778568
<filename>experiments/launcher_exp2_collect.py import os import pandas as pd from os2d.utils.logger import extract_value_from_os2d_binary_log, mAP_percent_to_points if __name__ == "__main__": config_path = os.path.dirname(os.path.abspath(__file__)) config_job_name = "exp2" log_path = os.path.abspath(os.path.join(config_path, "..", "output/exp2")) def get_result(job_type, # "v1" or "v2" sub_index, backbone_arch, init_model_nickname, random_seed, ): job_name = f"{config_job_name}.{sub_index}.{job_type}_seed{random_seed}" log_folder = job_name + "_" + backbone_arch + "_init_" + init_model_nickname log_folder = os.path.join(log_path, log_folder) data_file = os.path.join(log_folder, "train_log.pkl") return mAP_percent_to_points(extract_value_from_os2d_binary_log(data_file, "[email protected]_grozi-val-new-cl", reduce="max")),\ mAP_percent_to_points(extract_value_from_os2d_binary_log(data_file, "[email protected]_grozi-val-new-cl", reduce="first")) table = pd.DataFrame(columns=["arch", "init", "v1-train", "v2-init", "v2-train"]) random_seed = 0 for i, arch, init in zip(range(10), ["ResNet50"] * 5 + ["ResNet101"] * 5, ["fromScratch", "imageNetPth", "imageNetCaffe2", "imageNetCaffe2GroupNorm", "cocoMaskrcnnFpn", "imageNetPth", "imageNetCaffe2", "buildingsCirtorch", "cocoMaskrcnnFpn", "pascalWeakalign"] ): val_train_v1, val_init_v1 = get_result("v1", i, arch, init, random_seed) val_train_v2, val_init_v2 = get_result("v2", i, arch, init, random_seed) table = table.append({"arch":arch, "init":init, "v1-train":val_train_v1, "v2-init":val_init_v2, "v2-train":val_train_v2}, ignore_index=True) print(table, sep='\n')
orion/app.py
brian123zx/orion-server
120
12778580
from flask import Flask from flask import jsonify from flask import request from flask_cors import CORS from raven.contrib.flask import Sentry from orion.context import Context from orion.handlers import handler_classes def init_app(app): """ Statefully initialize the Flask application. This involves creating a sever-side application context and adding route definitions for all endpoint handlers. :param app: Uninitialized Flask application instance. :return: Server-side application context. """ ctx = Context(app) CORS(app, supports_credentials=True, origins=[ctx.config.get_value('frontend_url')]) sentry_dsn = ctx.config.get_value('sentry_dsn') if sentry_dsn: Sentry(dsn=sentry_dsn).init_app(app) def map_handler_func(HandlerClass): """ Create all necessary params for adding this route to the Flask server. :param HandlerClass: Handler class to prepare. :return: A tuple of (path, name, view_func, methods) for this handler. """ def handler_wrapper(*args, **kwargs): # Provide an abstraction for supplying the handler with request JSON. data = request.get_json(force=True, silent=True) or {} handler = HandlerClass(ctx, data) resp_json, status = handler.run(*args, **kwargs) return jsonify(resp_json), status return HandlerClass.path, HandlerClass.__name__, handler_wrapper, HandlerClass.methods for rule, endpoint, view_func, methods in map(map_handler_func, handler_classes): app.add_url_rule( rule=rule, endpoint=endpoint, view_func=view_func, methods=methods, ) return ctx def create_app(): """ Create a fully initialized Flask application instance for this server. :return: The initialized Flask application instance. """ app = Flask('orion') ctx = init_app(app) app.ctx = ctx return app
saleor/graphql/order/mutations/fulfillment_refund_and_return_product_base.py
eanknd/saleor
1,392
12778585
from typing import Optional import graphene from django.core.exceptions import ValidationError from ....giftcard.utils import order_has_gift_card_lines from ....order import FulfillmentLineData from ....order import models as order_models from ....order.error_codes import OrderErrorCode from ....order.fetch import OrderLineInfo from ....payment.models import TransactionItem from ...core.mutations import BaseMutation from ..types import FulfillmentLine, OrderLine class FulfillmentRefundAndReturnProductBase(BaseMutation): class Meta: abstract = True @classmethod def clean_order_payment(cls, payment, cleaned_input): if not payment or not payment.can_refund(): raise ValidationError( { "order": ValidationError( "Order cannot be refunded.", code=OrderErrorCode.CANNOT_REFUND.value, ) } ) cleaned_input["payment"] = payment @classmethod def clean_amount_to_refund( cls, order, amount_to_refund, charged_value, cleaned_input ): if amount_to_refund is not None: if order_has_gift_card_lines(order): raise ValidationError( { "amount_to_refund": ValidationError( ( "Cannot specified amount to refund when order has " "gift card lines." ), code=OrderErrorCode.CANNOT_REFUND.value, ) } ) if amount_to_refund > charged_value: raise ValidationError( { "amount_to_refund": ValidationError( ( "The amountToRefund is greater than the maximal " "possible amount to refund." ), code=OrderErrorCode.CANNOT_REFUND.value, ), } ) cleaned_input["amount_to_refund"] = amount_to_refund @classmethod def _raise_error_for_line(cls, msg, type, line_id, field_name, code=None): line_global_id = graphene.Node.to_global_id(type, line_id) if not code: code = OrderErrorCode.INVALID_QUANTITY.value raise ValidationError( { field_name: ValidationError( msg, code=code, params={field_name: line_global_id}, ) } ) @classmethod def raise_error_for_payment_error(cls, transactions: Optional[TransactionItem]): if transactions: code = OrderErrorCode.MISSING_TRANSACTION_ACTION_REQUEST_WEBHOOK.value msg = "No app or plugin is configured to handle payment action requests." else: msg = "The refund operation is not available yet." code = OrderErrorCode.CANNOT_REFUND.value raise ValidationError( msg, code=code, ) @classmethod def clean_fulfillment_lines( cls, fulfillment_lines_data, cleaned_input, whitelisted_statuses ): fulfillment_lines = cls.get_nodes_or_error( [line["fulfillment_line_id"] for line in fulfillment_lines_data], field="fulfillment_lines", only_type=FulfillmentLine, qs=order_models.FulfillmentLine.objects.prefetch_related( "fulfillment", "order_line" ), ) fulfillment_lines = list(fulfillment_lines) cleaned_fulfillment_lines = [] for line, line_data in zip(fulfillment_lines, fulfillment_lines_data): quantity = line_data["quantity"] if line.order_line.is_gift_card: cls._raise_error_for_line( "Cannot refund or return gift card line.", "FulfillmentLine", line.pk, "fulfillment_line_id", OrderErrorCode.GIFT_CARD_LINE.value, ) if line.quantity < quantity: cls._raise_error_for_line( "Provided quantity is bigger than quantity from " "fulfillment line", "FulfillmentLine", line.pk, "fulfillment_line_id", ) if line.fulfillment.status not in whitelisted_statuses: allowed_statuses_str = ", ".join(whitelisted_statuses) cls._raise_error_for_line( f"Unable to process action for fulfillmentLine with different " f"status than {allowed_statuses_str}.", "FulfillmentLine", line.pk, "fulfillment_line_id", code=OrderErrorCode.INVALID.value, ) replace = line_data.get("replace", False) if replace and not line.order_line.variant_id: cls._raise_error_for_line( "Unable to replace line as the assigned product doesn't exist.", "OrderLine", line.pk, "order_line_id", ) cleaned_fulfillment_lines.append( FulfillmentLineData( line=line, quantity=quantity, replace=replace, ) ) cleaned_input["fulfillment_lines"] = cleaned_fulfillment_lines @classmethod def clean_lines(cls, lines_data, cleaned_input): order_lines = cls.get_nodes_or_error( [line["order_line_id"] for line in lines_data], field="order_lines", only_type=OrderLine, qs=order_models.OrderLine.objects.prefetch_related( "fulfillment_lines__fulfillment", "variant", "allocations" ), ) order_lines = list(order_lines) cleaned_order_lines = [] for line, line_data in zip(order_lines, lines_data): quantity = line_data["quantity"] if line.is_gift_card: cls._raise_error_for_line( "Cannot refund or return gift card line.", "OrderLine", line.pk, "order_line_id", OrderErrorCode.GIFT_CARD_LINE.value, ) if line.quantity < quantity: cls._raise_error_for_line( "Provided quantity is bigger than quantity from order line.", "OrderLine", line.pk, "order_line_id", ) quantity_ready_to_move = line.quantity_unfulfilled if quantity_ready_to_move < quantity: cls._raise_error_for_line( "Provided quantity is bigger than unfulfilled quantity.", "OrderLine", line.pk, "order_line_id", ) variant = line.variant replace = line_data.get("replace", False) if replace and not line.variant_id: cls._raise_error_for_line( "Unable to replace line as the assigned product doesn't exist.", "OrderLine", line.pk, "order_line_id", ) cleaned_order_lines.append( OrderLineInfo( line=line, quantity=quantity, variant=variant, replace=replace ) ) cleaned_input["order_lines"] = cleaned_order_lines
tuyaha/devices/switch.py
PaulAnnekov/tuya-ha
153
12778604
from tuyaha.devices.base import TuyaDevice class TuyaSwitch(TuyaDevice): def turn_on(self): if self._control_device("turnOnOff", {"value": "1"}): self._update_data("state", True) def turn_off(self): if self._control_device("turnOnOff", {"value": "0"}): self._update_data("state", False) def update(self, use_discovery=True): return self._update(use_discovery=True)
tools/polly/bin/detail/osx_dev_root.py
Kondr11/LABA7
861
12778606
# Copyright (c) 2015, <NAME> # All rights reserved. import os import re def get(osx_version): dev_dir = re.sub(r'\.', '_', osx_version) dev_dir = 'OSX_{}_DEVELOPER_DIR'.format(dev_dir) return os.getenv(dev_dir)
libsaas/services/uservoice/comments.py
MidtownFellowship/libsaas
155
12778628
from libsaas import http, parsers from libsaas.services import base from . import resource, flags class CommentsBase(resource.UserVoiceTextResource): path = 'comments' def wrap_object(self, name): return {'comment': {'text': name}} class Comments(CommentsBase): def create(self, obj): raise base.MethodNotSupported() class ForumSuggestionComment(CommentsBase): @base.resource(flags.SuggestionCommentFlags) def flags(self): """ Return the resource corresponding to all the flags of this comment. """ return flags.SuggestionCommentFlags(self) class ForumSuggestionComments(CommentsBase): @base.apimethod def get(self, page=None, per_page=None, filter=None, sort=None): """ Fetch comments on this suggestion. :var page: Where should paging start. If left as `None`, the first page is returned. :vartype page: int :var per_page: How many objects sould be returned. If left as `None`, 10 objects are returned. :vartype per_page: int :var filter: The kind of comments to return, see upstream documentation for possible values. :vartype filter: str :var sort: How should the returned collection be sorted. Refer to upstream documentation for possible values. :vartype sort: str """ params = base.get_params(None, locals()) request = http.Request('GET', self.get_url(), params) return request, parsers.parse_json class UserComments(CommentsBase): def create(self, obj): raise base.MethodNotSupported() @base.apimethod def get(self, page=None, per_page=None, filter=None, sort=None): """ Fetch comments from this user. :var page: Where should paging start. If left as `None`, the first page is returned. :vartype page: int :var per_page: How many objects sould be returned. If left as `None`, 10 objects are returned. :vartype per_page: int :var filter: The kind of comments to return, see upstream documentation for possible values. :vartype filter: str :var sort: How should the returned collection be sorted. Refer to upstream documentation for possible values. :vartype sort: str """ params = base.get_params(None, locals()) request = http.Request('GET', self.get_url(), params) return request, parsers.parse_json
backend/api/views/task.py
skaghzz/doccano
3,989
12778629
<filename>backend/api/views/task.py from celery.result import AsyncResult from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from rest_framework.views import APIView class TaskStatus(APIView): permission_classes = (IsAuthenticated,) def get(self, request, *args, **kwargs): task = AsyncResult(kwargs['task_id']) ready = task.ready() error = ready and not task.successful() return Response({ 'ready': ready, 'result': task.result if ready and not error else None, 'error': {'text': str(task.result)} if error else None, })
docs/examples/cpu_temperature_bar_graph.py
NotBobTheBuilder/gpiozero
743
12778671
<reponame>NotBobTheBuilder/gpiozero<filename>docs/examples/cpu_temperature_bar_graph.py from gpiozero import LEDBarGraph, CPUTemperature from signal import pause cpu = CPUTemperature(min_temp=50, max_temp=90) leds = LEDBarGraph(2, 3, 4, 5, 6, 7, 8, pwm=True) leds.source = cpu pause()
src/opendr/perception/activity_recognition/x3d/algorithm/x3d.py
makistsantekidis/opendr
217
12778704
<gh_stars>100-1000 """ Adapted from: https://github.com/facebookresearch/SlowFast """ import math import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from .head_helper import X3DHead from .resnet_helper import ResStage from .stem_helper import VideoModelStem import pytorch_lightning as pl class X3D(pl.LightningModule): """ X3D model, adapted from https://github.com/facebookresearch/SlowFast <NAME>. "X3D: Expanding Architectures for Efficient Video Recognition." https://arxiv.org/abs/2004.04730 """ def __init__( self, dim_in: int, image_size: int, frames_per_clip: int, num_classes: int, conv1_dim: int, conv5_dim: int, num_groups: int, width_per_group: int, width_factor: float, depth_factor: float, bottleneck_factor: float, use_channelwise_3x3x3: bool, dropout_rate: float, head_activation: str, head_batchnorm: bool, fc_std_init: float, final_batchnorm_zero_init: bool, loss_name="cross_entropy", ): super().__init__() self.norm_module = torch.nn.BatchNorm3d self.loss_name = loss_name exp_stage = 2.0 self.dim_conv1 = conv1_dim self.dim_res2 = ( _round_width(self.dim_conv1, exp_stage, divisor=8) if False # hparams.X3D.SCALE_RES2 else self.dim_conv1 ) self.dim_res3 = _round_width(self.dim_res2, exp_stage, divisor=8) self.dim_res4 = _round_width(self.dim_res3, exp_stage, divisor=8) self.dim_res5 = _round_width(self.dim_res4, exp_stage, divisor=8) self.block_basis = [ # blocks, c, stride [1, self.dim_res2, 2], [2, self.dim_res3, 2], [5, self.dim_res4, 2], [3, self.dim_res5, 2], ] num_groups = num_groups width_per_group = width_per_group dim_inner = num_groups * width_per_group w_mul = width_factor d_mul = depth_factor dim_res1 = _round_width(self.dim_conv1, w_mul) # Basis of temporal kernel sizes for each of the stage. temp_kernel = [ [[5]], # conv1 temporal kernels. [[3]], # res2 temporal kernels. [[3]], # res3 temporal kernels. [[3]], # res4 temporal kernels. [[3]], # res5 temporal kernels. ] self.s1 = VideoModelStem( dim_in=[dim_in], dim_out=[dim_res1], kernel=[temp_kernel[0][0] + [3, 3]], stride=[[1, 2, 2]], padding=[[temp_kernel[0][0][0] // 2, 1, 1]], norm_module=self.norm_module, stem_func_name="x3d_stem", ) # blob_in = s1 dim_in = dim_res1 dim_out = dim_in for stage, block in enumerate(self.block_basis): dim_out = _round_width(block[1], w_mul) dim_inner = int(bottleneck_factor * dim_out) n_rep = _round_repeats(block[0], d_mul) prefix = "s{}".format(stage + 2) # start w res2 to follow convention s = ResStage( dim_in=[dim_in], dim_out=[dim_out], dim_inner=[dim_inner], temp_kernel_sizes=temp_kernel[1], stride=[block[2]], num_blocks=[n_rep], num_groups=[dim_inner] if use_channelwise_3x3x3 else [num_groups], num_block_temp_kernel=[n_rep], nonlocal_inds=[[]], nonlocal_group=[1], nonlocal_pool=[[1, 2, 2], [1, 2, 2]], instantiation="dot_product", trans_func_name="x3d_transform", stride_1x1=False, norm_module=self.norm_module, dilation=[1], drop_connect_rate=0.0, ) dim_in = dim_out self.add_module(prefix, s) spat_sz = int(math.ceil(image_size / 32.0)) self.head = X3DHead( dim_in=dim_out, dim_inner=dim_inner, dim_out=conv5_dim, num_classes=num_classes, pool_size=(frames_per_clip, spat_sz, spat_sz), dropout_rate=dropout_rate, act_func=head_activation, bn_lin5_on=bool(head_batchnorm), ) init_weights(self, fc_std_init, bool(final_batchnorm_zero_init)) def forward(self, x: Tensor): # The original slowfast code was set up to use multiple paths, wrap the input x = [x] # type:ignore for module in self.children(): x = module(x) return x def training_step(self, batch, batch_idx): x, y = batch x = self.forward(x) loss = getattr(F, self.loss_name, F.cross_entropy)(x, y) self.log('train/loss', loss) self.log('train/acc', _accuracy(x, y)) return loss def validation_step(self, batch, batch_idx): x, y = batch x = self.forward(x) loss = getattr(F, self.loss_name, F.cross_entropy)(x, y) self.log('val/loss', loss) self.log('val/acc', _accuracy(x, y)) return loss def test_step(self, batch, batch_idx): x, y = batch x = self.forward(x) loss = getattr(F, self.loss_name, F.cross_entropy)(x, y) self.log('test/loss', loss) self.log('test/acc', _accuracy(x, y)) return loss def _accuracy(x: Tensor, y: Tensor): return torch.sum(x.argmax(dim=1) == y) / len(y) def _round_width(width, multiplier, min_depth=8, divisor=8): """Round width of filters based on width multiplier.""" if not multiplier: return width width *= multiplier min_depth = min_depth or divisor new_filters = max(min_depth, int(width + divisor / 2) // divisor * divisor) if new_filters < 0.9 * width: new_filters += divisor return int(new_filters) def _round_repeats(repeats, multiplier): """Round number of layers based on depth multiplier.""" multiplier = multiplier if not multiplier: return repeats return int(math.ceil(multiplier * repeats)) def c2_msra_fill(module: nn.Module) -> None: """ Initialize `module.weight` using the "MSRAFill" implemented in Caffe2. Also initializes `module.bias` to 0. Args: module (torch.nn.Module): module to initialize. """ # pyre-ignore nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu") if module.bias is not None: # pyre-ignore nn.init.constant_(module.bias, 0) def init_weights(model, fc_init_std=0.01, zero_init_final_bn=True): """ Performs ResNet style weight initialization. Args: fc_init_std (float): the expected standard deviation for fc layer. zero_init_final_bn (bool): if True, zero initialize the final bn for every bottleneck. """ for m in model.modules(): if isinstance(m, nn.Conv3d): """ Follow the initialization method proposed in: {He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level performance on imagenet classification." arXiv preprint arXiv:1502.01852 (2015)} """ c2_msra_fill(m) elif isinstance(m, nn.BatchNorm3d): if ( hasattr(m, "transform_final_bn") and m.transform_final_bn and zero_init_final_bn ): batchnorm_weight = 0.0 else: batchnorm_weight = 1.0 if m.weight is not None: m.weight.data.fill_(batchnorm_weight) if m.bias is not None: m.bias.data.zero_() if isinstance(m, nn.Linear): m.weight.data.normal_(mean=0.0, std=fc_init_std) if m.bias is not None: m.bias.data.zero_()
app/settings/arq.py
leosussan/fastapi-gino-arq-postgres
289
12778706
<filename>app/settings/arq.py from arq.connections import RedisSettings from .globals import REDIS_IP, REDIS_PORT settings = RedisSettings(host=REDIS_IP, port=REDIS_PORT)
zinnia/migrations/__init__.py
Boondockers-Welcome/django-blog-zinnia
1,522
12778732
"""Migrations for Zinnia"""
openbb_terminal/dashboards/widget_helpers.py
tehcoderer/GamestonkTerminal
255
12778771
"""Widgets Helper Library. A library of `ipywidgets` wrappers for notebook based reports and voila dashboards. The library includes both python code and html/css/js elements that can be found in the `./widgets` folder. """ import os from jinja2 import Template def stylesheet(): """Load a default CSS stylesheet from file.""" with open( os.path.join(os.path.dirname(os.path.abspath(__file__)), "widgets", "style.css") ) as f: style = f.read() return style def price_card(ticker: str, price: str, price_color: str = "neutral_color") -> str: """Prepare a styled HTML element of a 128 by 128 price card. Parameters ---------- ticker : str Instrument ticker for the price card price : str Instrument price as a string price_color : str, optional The color of the price. Accepts "up_color", "down_color" and default "neutral_color" Returns ------- str HTML code as string """ with open( os.path.join(os.path.dirname(os.path.abspath(__file__)), "widgets", "card.j2") ) as f: template = Template(f.read()) card = template.render(ticker=ticker, price=price, price_color=price_color) return card
corehq/apps/sms/tests/opt_tests.py
akashkj/commcare-hq
471
12778795
from django.test import TestCase from corehq.apps.accounting.models import SoftwarePlanEdition from corehq.apps.accounting.tests.utils import DomainSubscriptionMixin from corehq.apps.accounting.utils import clear_plan_version_cache from corehq.apps.domain.models import Domain from corehq.messaging.smsbackends.test.models import SQLTestSMSBackend from corehq.apps.sms.api import incoming, send_sms_to_verified_number from corehq.apps.sms.messages import MSG_OPTED_IN, MSG_OPTED_OUT, get_message from corehq.apps.sms.models import SMS, PhoneBlacklist, PhoneNumber, SQLMobileBackendMapping, SQLMobileBackend from corehq.apps.sms.tests.util import ( delete_domain_phone_numbers, setup_default_sms_test_backend, ) from corehq.form_processor.tests.utils import FormProcessorTestUtils class OptTestCase(DomainSubscriptionMixin, TestCase): @classmethod def setUpClass(cls): super(OptTestCase, cls).setUpClass() cls.domain = 'opt-test' cls.domain_obj = Domain(name=cls.domain) cls.domain_obj.sms_case_registration_enabled = True cls.domain_obj.save() cls.setup_subscription(cls.domain, SoftwarePlanEdition.ADVANCED) cls.backend, cls.backend_mapping = setup_default_sms_test_backend() cls.custom_backend = SQLTestSMSBackend.objects.create( name='MOBILE_BACKEND_CUSTOM_TEST', is_global=True, hq_api_id=SQLTestSMSBackend.get_api_id(), opt_in_keywords=['RESTART'], opt_out_keywords=['RESTOP'] ) cls.custom_backend_mapping = SQLMobileBackendMapping.objects.create( is_global=True, backend_type=SQLMobileBackend.SMS, prefix='1', backend=cls.custom_backend, ) @classmethod def tearDownClass(cls): cls.backend_mapping.delete() cls.backend.delete() cls.custom_backend_mapping.delete() cls.custom_backend.delete() FormProcessorTestUtils.delete_all_cases(cls.domain) cls.teardown_subscriptions() cls.domain_obj.delete() clear_plan_version_cache() super(OptTestCase, cls).tearDownClass() def tearDown(self): PhoneBlacklist.objects.all().delete() SMS.objects.filter(domain=self.domain).delete() delete_domain_phone_numbers(self.domain) def get_last_sms(self, phone_number): return SMS.objects.filter(domain=self.domain, phone_number=phone_number).order_by('-date')[0] def test_opt_out_and_opt_in(self): self.assertEqual(PhoneBlacklist.objects.count(), 0) incoming('99912345678', 'join opt-test', 'GVI') v = PhoneNumber.get_two_way_number('99912345678') self.assertIsNotNone(v) incoming('99912345678', 'stop', 'GVI') self.assertEqual(PhoneBlacklist.objects.count(), 1) phone_number = PhoneBlacklist.objects.get(phone_number='99912345678') self.assertFalse(phone_number.send_sms) self.assertEqual(phone_number.domain, self.domain) self.assertIsNotNone(phone_number.last_sms_opt_out_timestamp) self.assertIsNone(phone_number.last_sms_opt_in_timestamp) sms = self.get_last_sms('+99912345678') self.assertEqual(sms.direction, 'O') self.assertEqual(sms.text, get_message(MSG_OPTED_OUT, context=('START',))) incoming('99912345678', 'start', 'GVI') self.assertEqual(PhoneBlacklist.objects.count(), 1) phone_number = PhoneBlacklist.objects.get(phone_number='99912345678') self.assertTrue(phone_number.send_sms) self.assertEqual(phone_number.domain, self.domain) self.assertIsNotNone(phone_number.last_sms_opt_out_timestamp) self.assertIsNotNone(phone_number.last_sms_opt_in_timestamp) sms = self.get_last_sms('+99912345678') self.assertEqual(sms.direction, 'O') self.assertEqual(sms.text, get_message(MSG_OPTED_IN, context=('STOP',))) def test_sending_to_opted_out_number(self): self.assertEqual(PhoneBlacklist.objects.count(), 0) incoming('99912345678', 'join opt-test', 'GVI') v = PhoneNumber.get_two_way_number('99912345678') self.assertIsNotNone(v) send_sms_to_verified_number(v, 'hello') sms = self.get_last_sms('+99912345678') self.assertEqual(sms.direction, 'O') self.assertEqual(sms.text, 'hello') incoming('99912345678', 'stop', 'GVI') self.assertEqual(PhoneBlacklist.objects.count(), 1) phone_number = PhoneBlacklist.objects.get(phone_number='99912345678') self.assertFalse(phone_number.send_sms) send_sms_to_verified_number(v, 'hello') sms = self.get_last_sms('+99912345678') self.assertEqual(sms.direction, 'O') self.assertEqual(sms.text, 'hello') self.assertTrue(sms.error) self.assertEqual(sms.system_error_message, SMS.ERROR_PHONE_NUMBER_OPTED_OUT) incoming('99912345678', 'start', 'GVI') self.assertEqual(PhoneBlacklist.objects.count(), 1) phone_number = PhoneBlacklist.objects.get(phone_number='99912345678') self.assertTrue(phone_number.send_sms) send_sms_to_verified_number(v, 'hello') sms = self.get_last_sms('+99912345678') self.assertEqual(sms.direction, 'O') self.assertEqual(sms.text, 'hello') self.assertFalse(sms.error) self.assertIsNone(sms.system_error_message) def test_custom_opt_keywords(self): self.assertEqual(PhoneBlacklist.objects.count(), 0) incoming('19912345678', 'join opt-test', 'TEST') v = PhoneNumber.get_two_way_number('19912345678') self.assertIsNotNone(v) send_sms_to_verified_number(v, 'hello') sms = self.get_last_sms('+19912345678') self.assertEqual(sms.direction, 'O') self.assertEqual(sms.text, 'hello') incoming('19912345678', 'restop', 'TEST') self.assertEqual(PhoneBlacklist.objects.count(), 1) phone_number = PhoneBlacklist.objects.get(phone_number='19912345678') self.assertFalse(phone_number.send_sms) send_sms_to_verified_number(v, 'hello') sms = self.get_last_sms('+19912345678') self.assertEqual(sms.direction, 'O') self.assertEqual(sms.text, 'hello') self.assertTrue(sms.error) self.assertEqual(sms.system_error_message, SMS.ERROR_PHONE_NUMBER_OPTED_OUT) incoming('19912345678', 'restart', 'TEST') self.assertEqual(PhoneBlacklist.objects.count(), 1) phone_number = PhoneBlacklist.objects.get(phone_number='19912345678') self.assertTrue(phone_number.send_sms) send_sms_to_verified_number(v, 'hello') sms = self.get_last_sms('+19912345678') self.assertEqual(sms.direction, 'O') self.assertEqual(sms.text, 'hello') self.assertFalse(sms.error) self.assertIsNone(sms.system_error_message)