id
stringlengths
1
265
text
stringlengths
6
5.19M
dataset_id
stringclasses
7 values
182835
# Copyright (C) 2015-2016 The bitcoin-blockchain-parser developers # # This file is part of bitcoin-blockchain-parser. # # It is subject to the license terms in the LICENSE file found in the top-level # directory of this distribution. # # No part of bitcoin-blockchain-parser, including this file, may be copied, # modified, propagated, or distributed except according to the terms contained # in the LICENSE file. import os import unittest from binascii import a2b_hex, b2a_hex from blockchain_parser.transaction import Transaction from .utils import read_test_data class TestTransaction(unittest.TestCase): def test_rbf(self): data = a2b_hex("01000000019222bbb054bb9f94571dfe769af5866835f2a97e8839" "59fa757de4064bed8bca01000000035101b1000000000100000000" "00000000016a01000000") tx = Transaction(data) self.assertTrue(tx.uses_replace_by_fee()) coinbase = a2b_hex("01000000010000000000000000000000000000000000000000" "000000000000000000000000ffffffff4203c8e405fabe6d6d" "98b0e98e3809941f1fd8cafe7c8236e27b8d1a776b1835aa54" "8bb84fe5b5f3d7010000000000000002650300aaa757eb0000" "002f736c7573682f0000000001baa98396000000001976a914" "7c154ed1dc59609e3d26abb2df2ea3d587cd8c4188ac000000" "00") tx = Transaction(coinbase) self.assertTrue(tx.is_coinbase()) self.assertFalse(tx.uses_replace_by_fee()) def test_bip69(self): non_compliant = read_test_data("bip69_false.txt") tx = Transaction(non_compliant) self.assertFalse(tx.uses_bip69()) compliant = read_test_data("bip69_true.txt") tx = Transaction(compliant) self.assertTrue(tx.uses_bip69()) def test_bech32_p2wpkh(self): tx = Transaction(read_test_data("bech32_p2wpkh.txt")) self.assertEqual(["3BBqfnaPbgi5KWECWdFpvryUfw7QatWy37"], [a.address for a in tx.outputs[0].addresses]) self.assertEqual(["bc1q4z0874xmfxe3xeqknulgnqhukhfjwh5tvjrr2x"], [a.address for a in tx.outputs[1].addresses]) def test_bech32_p2wsh(self): tx = Transaction(read_test_data("bech32_p2wsh.txt")) self.assertEqual(["3GMKKFPNUg13VktgihUD8QfXVQRBdoDNDf"], [a.address for a in tx.outputs[0].addresses]) self.assertEqual(["bc1qday7wsftyv4r6qkpn8907s8fy3kexkny0xwrd8d4wlk06zffzyuqpp629n"], [a.address for a in tx.outputs[1].addresses]) def test_segwit(self): tx = Transaction(read_test_data("segwit.txt")) self.assertTrue(tx.is_segwit) id = "22116f1d76ab425ddc6d10d184331e70e080dd6275d7aa90237ceb648dc38224" self.assertTrue(tx.txid == id) h = "1eac09f372a8c13bb7dea6bd66ee71a6bcc469b57b35c1e394ad7eb7c107c507" self.assertTrue(tx.hash == h) segwit_input = tx.inputs[0] self.assertTrue(len(segwit_input.witnesses) == 4) self.assertTrue(len(segwit_input.witnesses[0]) == 0) wit_1 = "3045022100bc2ba8808127f8a74beed6dfa1b9fe54675c55aab85a61d7" \ "a74c15b993e67e5f02204dada4e15f0b4e659dae7bf0d0f648010d1f2b" \ "665f587a35eb6f22e44194952301" wit_2 = "3045022100f4c7ec7c2064fe2cc4389733ac0a57d8080a62180a004b02" \ "a19b89267113a17f022004ee9fdb081359c549ee42ffb58279363563ea" \ "f0191cd8b2b0ceebf62146b50b01" wit_3 = "5221022b003d276bce58bef509bdcd9cf7e156f0eae18e1175815282e6" \ "5e7da788bb5b21035c58f2f60ecf38c9c8b9d1316b662627ec672f5fd9" \ "12b1a2cc28d0b9b00575fd2103c96d495bfdd5ba4145e3e046fee45e84" \ "a8a48ad05bd8dbb395c011a32cf9f88053ae" parsed_1 = b2a_hex(segwit_input.witnesses[1]).decode("utf-8") parsed_2 = b2a_hex(segwit_input.witnesses[2]).decode("utf-8") parsed_3 = b2a_hex(segwit_input.witnesses[3]).decode("utf-8") self.assertEqual(parsed_1, wit_1) self.assertEqual(parsed_2, wit_2) self.assertEqual(parsed_3, wit_3) def test_vsize(self): segwit_tx = Transaction(read_test_data("size_segwit.txt")) non_segwit_tx = Transaction(read_test_data("size_non_segwit.txt")) self.assertEqual(non_segwit_tx.vsize, non_segwit_tx.size) self.assertEqual(non_segwit_tx.vsize, 189) self.assertNotEqual(segwit_tx.vsize, segwit_tx.size) self.assertEqual(segwit_tx.vsize, 208) self.assertEqual(segwit_tx.size, 373) def test_large(self): data = read_test_data("large_tx.txt") tx = Transaction(data) self.assertTrue( tx.hash == "29a3efd3ef04f9153d47a990bd7b048a4b2d213daa" "a5fb8ed670fb85f13bdbcf") self.assertTrue(tx.size == len(tx.hex)) def test_incomplete(self): data = read_test_data("invalid_tx.txt") self.assertRaises(Exception, Transaction, data) def test_unknown_scripts(self): data = read_test_data("scripts_invalid.txt") tx = Transaction(data) for output in tx.outputs: self.assertEqual([], output.addresses)
StarcoderdataPython
1661952
from can_tools.scrapers.official.AZ.counties.maricopa_vaccine import ( ArizonaMaricopaVaccine, )
StarcoderdataPython
56703
<reponame>MyDataShare/access-gateway<gh_stars>1-10 from unittest import main from .helpers import api, test_base text = """ Nocturne Ruislinnun laulu korvissani, tähkäpäiden päällä täysi kuu; kesä-yön on onni omanani, kaskisavuun laaksot verhouu. En ma iloitse, en sure, huokaa; mutta metsän tummuus mulle tuokaa, puunto pilven, johon päivä hukkuu, siinto vaaran tuulisen, mi nukkuu, tuoksut vanamon ja varjot veen; niistä sydämeni laulun teen. Sulle laulan neiti, kesäheinä, sydämeni suuri hiljaisuus, uskontoni, soipa säveleinä, tammenlehvä-seppel vehryt, uus. En ma enää aja virvatulta, onpa kädessäni onnen kulta; pienentyy mun ympär’ elon piiri; aika seisoo, nukkuu tuuliviiri; edessäni hämäräinen tie tuntemattomahan tupaan vie. <NAME> """ class RouteTextPayloadCases: class RouteTextPayloadBase(test_base.TestBase): def test_text_in_json(self): json = self.res.json() arg = 'text' self.assertIn(arg, json) self.assertEqual(json[arg], text) class TestRouteTextPayloadPost(RouteTextPayloadCases.RouteTextPayloadBase, test_base.TestBase, api.APITests): @classmethod def setup(cls): cls.res = api.post( cls.v['AGW'] + '/test_route_text_payload_post', text=text ) class TestRouteTextPayloadPut(RouteTextPayloadCases.RouteTextPayloadBase, test_base.TestBase, api.APITests): @classmethod def setup(cls): cls.res = api.put( cls.v['AGW'] + '/test_route_text_payload_put', text=text ) class TestRouteTextPayloadPatch(RouteTextPayloadCases.RouteTextPayloadBase, test_base.TestBase, api.APITests): @classmethod def setup(cls): cls.res = api.patch( cls.v['AGW'] + '/test_route_text_payload_patch', text=text ) if __name__ == '__main__': main()
StarcoderdataPython
97880
from collections import Counter def day6(n_days: int) -> int: with open("input.txt") as f: fishes = map(int, f.readline().split(",")) n_fish_per_day = Counter() for f in fishes: n_fish_per_day[f] += 1 # simulate days for _ in range(n_days): new_n_fish_per_day = {} for i in range(1, 9): new_n_fish_per_day[i - 1] = n_fish_per_day[i] n_new_fish = n_fish_per_day[0] new_n_fish_per_day[6] += n_new_fish new_n_fish_per_day[8] = n_new_fish n_fish_per_day = new_n_fish_per_day # count return sum(n_fish_per_day[i] for i in range(0, 9)) print("part1:", day6(80)) print("part2:", day6(256))
StarcoderdataPython
42394
<filename>configs/_base_/explain/count_concepts.py concept_detector_cfg = dict( quantile_threshold=0.99, with_bboxes=True, count_disjoint=True, ) target_layer = 'layer3.5'
StarcoderdataPython
3320769
<gh_stars>10-100 # coding: utf-8 """json读写插件 """ import re import types import ujson import requests from girlfriend.util.lang import args2fields from girlfriend.util.resource import HTTP_SCHEMA from girlfriend.plugin.data import ( AbstractDataReader, AbstractDataWriter ) from girlfriend.exception import InvalidArgumentException class JSONReaderPlugin(object): """可以从文件或者Web URL中加载json对象,并进行格式转换 支持常见的json文件格式 """ name = "read_json" def execute(self, context, *json_readers): return [reader(context) for reader in json_readers] JSON_REGEX = re.compile(r"\{.*?\}") class JSONR(AbstractDataReader): @args2fields() def __init__(self, path, style, record_handler=None, record_filter=None, result_wrapper=None, variable=None): """ :param context 上下文对象 :param path 加载路径,可以是文件路径,也可以是web url :param style json数据格式,允许三种格式: 1. line: 文件每行是一个json对象 2. array: 文件内容是一个json数组 3. extract:property 文件是一个json对象,但是只提取某一部分进行处理 4. block: 区块,不在同一行 :param record_handler 行处理器,返回的每行都是字典对象,通过该函数可以进行包装 如果返回None,那么将对该行忽略 :param record_filter 行过滤器 :param result_wrapper 对最终结果进行包装 :param variable 结果写入上下文的变量名,如果为None,那么将返回值交给框架自身来保存 """ pass def __call__(self, context): result = [] # 基于文件的逐行加载 if self._style == "line" and not self._path.startswith(HTTP_SCHEMA): with open(self._path, "r") as f: for line in f: line = line.strip() if not line or line.startswith(("#", "//", ";")): continue record = ujson.loads(line) self._handle_record(record, result.append) else: json_content = None # 从不同的来源加载json对象 if self._path.startswith(HTTP_SCHEMA): json_content = requests.get(self._path).text else: with open(self._path, "r") as f: json_content = f.read() json_content = json_content.strip() # 按行读取 if self._style == "line": for line in json_content.splitlines(): line = line.strip() if not line: continue record = ujson.loads(line) self._handle_record(record, result.append) # 按块读取 if self._style == "block": json_buffer = [] in_block = False for char in json_content: if char == "{": in_block = True json_buffer.append(char) elif char == "}" and in_block: json_buffer.append(char) try: record = ujson.loads("".join(json_buffer)) except ValueError: continue else: self._handle_record(record, result.append) json_buffer = [] in_block = False elif in_block: json_buffer.append(char) # 按数组读取 elif self._style == "array": json_array = ujson.loads(json_content) for record in json_array: self._handle_record(record, result.append) # 使用属性提取器 elif self._style.startswith("extract:"): json_obj = ujson.loads(json_content) keys = self._style[len("extract:"):].split(".") for key in keys: json_obj = json_obj[key] for record in json_obj: self._handle_record(record, result.append) return self._handle_result(context, result) class JSONWriterPlugin(object): name = "write_json" def execute(self, context, *json_writers): for json_writer in json_writers: json_writer(context) class JSONW(AbstractDataWriter): @args2fields() def __init__(self, path, style, object, record_handler=None, record_filter=None, http_method="post", http_field=None, variable=None): """ :param path 写入路径,默认为文件路径,如果是HTTP或者HTTPS开头,那么将会POST到对应的地址 :param style 写入格式,line - 按行写入 array - 作为json数组写入 object - 作为单独对象写入 :param table 要操作的对象,可以是具体的对象,也可以是context中的变量名 :param record_handler 行处理器,可以在此进行格式转换,比如把时间对象转换为字符串 :param record_filter 行过滤器 :param http_method http写入方法,默认为POST,可以指定PUT :param variable 将json写入上下文变量 """ pass def __call__(self, context): if (self._style == "line" and self._path and not self._path.startswith(HTTP_SCHEMA)): with open(self._path, "w") as f: for row in self._object: row = self._handle_record(row) f.write(ujson.dumps(row) + "\n") return # json文本 json_text = "" if isinstance(self._object, types.FunctionType): self._object = self._object(context) elif isinstance(self._object, types.StringTypes): self._object = context[self._object] if self._style == "object": json_text = ujson.dumps(self._object) result = [] for row in self._object: row = self._handle_record(row, result.append) # 数组格式直接dump if self._style == "array": json_text = ujson.dumps(result) # line格式 if self._style == "line": string_buffer = [] for row in self._object: row = self._handle_record(row) string_buffer.append(ujson.dumps(row)) json_text = "\n".join(string_buffer) if self._path is None: if self._variable: context[self._variable] = json_text return else: raise InvalidArgumentException( u"当path为None时,必须指定一个有效的variable") if self._path.startswith(HTTP_SCHEMA): if self._http_method.lower() == "post": if self._http_field: requests.post( self._path, data={self._http_field: json_text}) elif self._style == "line": requests.post(self._path, data=json_text) else: requests.post(self._path, json=json_text) elif self._http_method.lower() == "put": requests.put(self._path, json=json_text) else: with open(self._path, "w") as f: f.write(json_text) if self._variable: context[self._variable] = json_text
StarcoderdataPython
107071
<gh_stars>0 # Generated by Django 3.0.6 on 2020-06-03 08:23 from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('course', '0006_auto_20200601_1325'), ] operations = [ migrations.RemoveField( model_name='course', name='subscribed_users', ), migrations.RemoveField( model_name='course', name='tutor', ), migrations.AddField( model_name='course', name='tutor', field=models.ManyToManyField(related_name='courses_asigned', to=settings.AUTH_USER_MODEL), ), ]
StarcoderdataPython
3243855
<gh_stars>10-100 # -*- coding: utf-8 -*- """Module containing the logic for the lambda nuke entry-points."""
StarcoderdataPython
115644
<reponame>siemens/python-cybox # Copyright (c) 2014, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. import cybox import cybox.bindings.cybox_core as core_binding from cybox.common import VocabString from cybox.core import Object class AssociationType(VocabString): _XSI_TYPE = 'cyboxVocabs:ActionObjectAssociationTypeVocab-1.0' class AssociatedObject(Object): """The CybOX Associated Object element. Currently only supports the id, association_type and ObjectProperties properties """ superclass = Object def __init__(self, defined_object=None, type_=None, association_type=None): super(AssociatedObject, self).__init__(defined_object, type_) self.association_type = association_type def to_obj(self, return_obj=None, ns_info=None): self._collect_ns_info(ns_info) obj = super(AssociatedObject, self).to_obj(return_obj=core_binding.AssociatedObjectType(), ns_info=ns_info) if self.association_type is not None: obj.Association_Type = self.association_type.to_obj(ns_info=ns_info) return obj def to_dict(self): object_dict = super(AssociatedObject, self).to_dict() if self.association_type is not None: object_dict['association_type'] = self.association_type.to_dict() return object_dict @staticmethod def from_obj(object_obj): if not object_obj: return None obj = Object.from_obj(object_obj, AssociatedObject()) obj.association_type = AssociationType.from_obj(object_obj.Association_Type) return obj @staticmethod def from_dict(object_dict): if not object_dict: return None obj = Object.from_dict(object_dict, AssociatedObject()) obj.association_type = AssociationType.from_dict(object_dict.get('association_type', None)) return obj
StarcoderdataPython
3354700
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Nov 3 19:37:10 2017 @author: dalonlobo """ import pandas as pd import numpy as np import matplotlib.pyplot as plt dataset = pd.read_csv('50_Startups.csv') X = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values from sklearn.preprocessing import LabelEncoder, OneHotEncoder labelencoder = LabelEncoder() X[:, 3] = labelencoder.fit_transform(X[:, 3]) onehotencoder = OneHotEncoder(categorical_features=[3]) X = onehotencoder.fit_transform(X).toarray() X = X[:, 1:] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) y_pred = regressor.predict(X_test) regressor.score(X_test, y_test)
StarcoderdataPython
3310483
<reponame>gustavs408650/looker_sdk_30 from __future__ import absolute_import # flake8: noqa # import apis into api package from looker_client_30.api.api_auth_api import ApiAuthApi from looker_client_30.api.auth_api import AuthApi from looker_client_30.api.config_api import ConfigApi from looker_client_30.api.connection_api import ConnectionApi from looker_client_30.api.content_api import ContentApi from looker_client_30.api.dashboard_api import DashboardApi from looker_client_30.api.data_action_api import DataActionApi from looker_client_30.api.datagroup_api import DatagroupApi from looker_client_30.api.group_api import GroupApi from looker_client_30.api.homepage_api import HomepageApi from looker_client_30.api.integration_api import IntegrationApi from looker_client_30.api.look_api import LookApi from looker_client_30.api.lookml_model_api import LookmlModelApi from looker_client_30.api.project_api import ProjectApi from looker_client_30.api.query_api import QueryApi from looker_client_30.api.render_task_api import RenderTaskApi from looker_client_30.api.role_api import RoleApi from looker_client_30.api.running_queries_api import RunningQueriesApi from looker_client_30.api.scheduled_plan_api import ScheduledPlanApi from looker_client_30.api.session_api import SessionApi from looker_client_30.api.space_api import SpaceApi from looker_client_30.api.sql_query_api import SqlQueryApi from looker_client_30.api.user_api import UserApi from looker_client_30.api.user_attribute_api import UserAttributeApi from looker_client_30.api.workspace_api import WorkspaceApi
StarcoderdataPython
38068
import requests import functools from django.shortcuts import get_object_or_404 from db_query.models import PersistentQuery def apply_middleware(raw_data, exec_sql_fn, *args, **kw_args): return adapt_bundle(raw_data[0], exec_sql_fn) def adapt_bundle(raw_data, exec_sql_fn): return { "id": raw_data.get("id"), "form-title": raw_data.get("titulo_do_form"), "bundled-tables": adapt_bundled_tables(raw_data.get("bundled-tables"), exec_sql_fn), } def adapt_bundled_tables(bundled_tables, exec_sql_fn): return [adapt_bundled_table(table, exec_sql_fn) for table in bundled_tables] def adapt_bundled_table(table, exec_sql_fn): raw_params = params_to_dict(table.get("parametros").split("\n")) return { "tab-title": raw_params.get("TITULO_ABA"), "master": raw_params.get("PAINEL_MASTER") == "S", "complex-id": raw_params.get("COMPLEXA_ID"), "detail": raw_params.get("DETAIL") == "S", "related-fields": [c.strip() for c in raw_params.get("COLUNAS_DETAIL", "").split(",")], "master-fields": [c.strip() for c in raw_params.get("COLUNAS_MASTER", "").split(",")], "bundle-actions": parse_bundle_actions(raw_params.get("BUNDLE_ACTIONS", "")), "definition": fix_none_results(get_child_definition(raw_params.get("COMPLEXA_ID"))), } def parse_bundle_actions(raw_actions): try: return [parse_bundle_action(raw_action) for raw_action in raw_actions.split("~")] except (IndexError, AttributeError): return [] def parse_bundle_action(raw_action): """ Parse action attributes such as caption:Recalcular;type:primary;action:reglass_cotacoes.recalcular;enabled-states:@view,pending """ def parse_array_attr(attr): print(attr) if attr[0] != "@": return attr return attr[1:].split(",") def parse_attr(attrs, attr): attr_parts = attr.split(":") return dict(attrs, **{attr_parts[0]: parse_array_attr(attr_parts[1])}) return functools.reduce( parse_attr, [action_attrs for action_attrs in raw_action.split(";")], {} ) def get_complex_definition(raw_params): if raw_params.get("DETAIL") != "S": return None complex_id = raw_params.get("COMPLEXA_ID") related_fields = [c.strip() for c in raw_params.get("COLUNAS_DETAIL", "").split(",")] master_fields = [c.strip() for c in raw_params.get("COLUNAS_MASTER", "").split(",")] # TODO: do this in a better way HOST = "http://localhost:8000" PATH = "/api/query/persistent/complex-tables/?id={id}&middleware=complex_forms&depth=1" BASE_URL = HOST + PATH def get_child_definition(complex_id): r = requests.get(BASE_URL.format(id=complex_id)) if r.status_code == 200: return r.json().get("data") return {} def fix_none_results(data): if isinstance(data, dict): return {k: fix_none_results(v) for k, v in data.items()} if isinstance(data, list): return [fix_none_results(v) for v in data] if data == "None": return None return data def params_to_dict(params): def split_param(param): p = param.split("=") if len(p) < 2: return [param, None] return p[0], p[1].strip() return {p[0]: p[1] for p in map(split_param, params)} # "COMPLEXA_ID": None, # "TITULO_ABA": "tab-title", # "PAINEL_MASTER": "master", # "DETAIL": None, # "COLUNAS_DETAIL": "related-columns", # "COLUNAS_MASTER": "master-columns", # "DETAIL_PAGINA_MASTER": ,
StarcoderdataPython
57051
import gtfs_kit from representation.gtfs_metadata import GtfsMetadata from representation.gtfs_representation import GtfsRepresentation from representation.dataset_infos import DatasetInfos from requests.exceptions import MissingSchema from pandas.errors import ParserError GTFS_TYPE = "GTFS" GBFS_TYPE = "GBFS" def build_representation(dataset_type, dataset_infos): """Dataset representation builder function. The factory builds and return a dataset representation according to the dataset type. :param dataset_type: The type of the dataset, either GTFS or GBFS. :param dataset_infos: The processing infos of the dataset. """ if not isinstance(dataset_infos, DatasetInfos): raise TypeError("Dataset infos must be a valid DatasetInfos.") representation = None if dataset_type == GTFS_TYPE: representation = build_gtfs_representation(dataset_infos) elif dataset_type == GBFS_TYPE: representation = build_gbfs_representation(dataset_infos) return representation def build_gtfs_representation(dataset_infos): try: dataset = gtfs_kit.read_feed(dataset_infos.zip_path, dist_units="km") except TypeError as te: raise TypeError( f"Exception '{te}' occurred while reading the GTFS dataset with the GTFS kit library." f"The dataset must be a valid GTFS zip file or URL.\n" ) except MissingSchema as ms: raise MissingSchema( f"Exception '{ms}' occurred while opening the GTFS dataset with the GTFS kit library." f"The dataset must be a valid GTFS zip file or URL.\n" ) except ParserError as pe: print( f"Exception {pe} found while reading the dataset. " f"Continuing adding dataset infos to database without metadata" ) return None metadata = GtfsMetadata(dataset_infos) representation = GtfsRepresentation( dataset_infos.source_entity_code, dataset, metadata ) return representation def build_gbfs_representation(dataset_infos): raise NotImplementedError
StarcoderdataPython
3378994
<filename>homeworks/aleksey_gukov/lesson13/level01.py<gh_stars>0 def get_headers(http_resp): spisok = http_resp.split("\n") del spisok[0] del spisok[spisok.index("") :] slovar = {} for chast in spisok: head = chast.split(": ") slovar[head[0]] = head[1] return slovar
StarcoderdataPython
50430
<filename>koapy/cli/utils/credential.py import os import click from koapy.config import ( config, config_from_dict, default_user_config_path, save_config, user_config, ) def prompt_credential(): credential = config.get("koapy.backend.kiwoom_open_api_plus.credential") default_user_id = credential["user_id"] default_user_password = credential["user_password"] default_server = "simulation" if credential["is_simulation"] else "real" default_cert_password = credential["cert_password"] user_id = click.prompt("User ID", default=default_user_id) user_password = click.prompt( "User Password", hide_input=True, default=default_user_password, show_default=False, ) is_simulation = ( click.prompt( "Server Type", type=click.Choice(["real", "simulation"], case_sensitive=False), default=default_server, ) == "simulation" ) if is_simulation: cert_password = <PASSWORD> else: cert_password = click.prompt( "Cert Password", hide_input=True, default=default_cert_password, show_default=False, ) account_count = click.prompt("Account Count", type=int, default=1) account_passwords = {} for _ in range(account_count): account_number = click.prompt("Account Number", default="0000000000") account_password = click.prompt( "Account Password", hide_input=True, default="<PASSWORD>", show_default=False, ) account_passwords[account_number] = account_password credential = { "user_id": user_id, "user_password": <PASSWORD>, "cert_password": <PASSWORD>_password, "is_simulation": is_simulation, "account_passwords": account_passwords, } credential = config_from_dict(credential) return credential def get_credential(interactive=False): if not interactive: credential = config.get("koapy.backend.kiwoom_open_api_plus.credential") else: credential = prompt_credential() save_credential = ( click.prompt( "Save credential info into a config file?", type=click.Choice(["y", "n"], case_sensitive=False), default="n", ) == "y" ) if save_credential: config_path = click.prompt( "Path to save config file", default=default_user_config_path ) if os.path.exists(config_path): should_write = ( click.prompt( "The file already exists, overwrite?", type=click.Choice(["y", "n"], case_sensitive=False), default="n", ) == "y" ) else: should_write = True if should_write: user_config.put( "koapy.backend.kiwoom_open_api_plus.credential", credential ) save_config(config_path, user_config) return credential
StarcoderdataPython
3237994
<filename>hydrus/client/gui/search/ClientGUIPredicatesOR.py import typing from qtpy import QtWidgets as QW from hydrus.core import HydrusData from hydrus.core import HydrusExceptions from hydrus.client import ClientConstants as CC from hydrus.client import ClientSearch from hydrus.client.gui import QtPorting as QP # ultimately, rewrite acread to be two classes, acread and acreadthatsupportsor # and then this guy only imports the base class, and only the supportsor will know about this # otherwise we have jank imports, and also nested OR lmao # also this should take file and tag domain class ORPredicateControl( QW.QWidget ): def __init__( self, parent: QW.QWidget, predicate: ClientSearch.Predicate ): QW.QWidget.__init__( self, parent ) from hydrus.client.gui.search import ClientGUIACDropdown if predicate.GetType() != ClientSearch.PREDICATE_TYPE_OR_CONTAINER: raise Exception( 'Launched an ORPredicateControl without an OR Pred!' ) predicates = predicate.GetValue() page_key = HydrusData.GenerateKey() file_search_context = ClientSearch.FileSearchContext( file_service_key = CC.LOCAL_FILE_SERVICE_KEY, predicates = predicates ) self._search_control = ClientGUIACDropdown.AutoCompleteDropdownTagsRead( self, page_key, file_search_context, hide_favourites_edit_actions = True ) vbox = QP.VBoxLayout() QP.AddToLayout( vbox, self._search_control, CC.FLAGS_CENTER_PERPENDICULAR ) self.setLayout( vbox ) def CheckValid( self ): try: predicates = self.GetPredicates() except Exception as e: raise HydrusExceptions.VetoException( str( e ) ) def GetPredicates( self ): or_sub_predicates = self._search_control.GetPredicates() if len( or_sub_predicates ) == 0: return [] elif len( or_sub_predicates ) == 1: return or_sub_predicates or_predicate = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_OR_CONTAINER, or_sub_predicates ) return [ or_predicate ]
StarcoderdataPython
1719872
<filename>optapy-core/tests/test_user_error.py<gh_stars>10-100 import optapy import optapy.score import optapy.config import optapy.constraint from optapy.types import Duration import pytest import re @optapy.planning_entity class Entity: def __init__(self, value=None): self.value = value @optapy.planning_variable(str, value_range_provider_refs=['value_range']) def get_value(self): return self.value def set_value(self, value): self.value = value @optapy.planning_solution class Solution: def __init__(self, entity_list, value_list, score=None): self.entity_list = entity_list self.value_list = value_list self.score = score @optapy.planning_entity_collection_property(Entity) def get_entity_list(self): return self.entity_list @optapy.problem_fact_collection_property(str) @optapy.value_range_provider(range_id='value_range') def get_value_list(self): return self.value_list @optapy.planning_score(optapy.score.SimpleScore) def get_score(self): return self.score def set_score(self, score): self.score = score @optapy.constraint_provider def my_constraints(constraint_factory): return [ constraint_factory.forEach(optapy.get_class(Entity)) .penalize('Penalize each entity', optapy.score.SimpleScore.ONE, lambda entity: 'TEN') ] def test_non_planning_solution_being_passed_to_solve(): solver_config = optapy.config.solver.SolverConfig() solver_config.withSolutionClass(optapy.get_class(Solution)).withEntityClasses(optapy.get_class(Entity)) \ .withConstraintProviderClass(optapy.get_class(my_constraints)) solver = optapy.solver_factory_create(solver_config).buildSolver() with pytest.raises(ValueError, match=re.escape( f'A problem was not passed to solve (parameter problem was ({None})). Maybe ' f'pass an instance of a class annotated with @planning_solution to solve?' )): solver.solve(None) def test_none_passed_to_solve(): solver_config = optapy.config.solver.SolverConfig() solver_config.withSolutionClass(optapy.get_class(Solution)).withEntityClasses(optapy.get_class(Entity)) \ .withConstraintProviderClass(optapy.get_class(my_constraints)) problem = 10 solver = optapy.solver_factory_create(solver_config).buildSolver() with pytest.raises(ValueError, match=re.escape( f'The problem ({problem}) is not an instance of a @planning_solution class. ' f'Maybe decorate the problem class ({type(problem)}) with @planning_solution?' )): solver.solve(10) def test_bad_return_type(): solver_config = optapy.config.solver.SolverConfig() solver_config.withSolutionClass(optapy.get_class(Solution)) \ .withEntityClasses([optapy.get_class(Entity)]) \ .withConstraintProviderClass(optapy.get_class(my_constraints)) \ .withTerminationSpentLimit(optapy.types.Duration.ofSeconds(1)) problem = Solution([Entity()], ['1', '2', '3']) solver = optapy.solver_factory_create(solver_config).buildSolver() with pytest.raises(RuntimeError, match=r'An error occurred during solving. This can occur when.*'): solver.solve(problem)
StarcoderdataPython
151464
import argparse import logging import os import copy from collections import defaultdict from typing import List import numpy as np import torch from sklearn import metrics from torch.nn.utils.rnn import pad_sequence from torch.utils.data.dataloader import DataLoader from training.models import RNNClassifier from operator import add import json class NumpyEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.ndarray): return obj.tolist() elif isinstance(obj, np.float32): return obj.item() return json.JSONEncoder.default(self, obj) class ArgsStruct: def __init__(self, **entries): self.__dict__.update(entries) def load_architecture(device: torch.device, args: argparse.Namespace): model = RNNClassifier(arch=args.arch, static_input_size=args.static_input_size, dynamic_input_size=args.dynamic_input_size, static_embedding_size=args.static_embedding_size, hidden_size=args.hidden_size, dropout=args.dropout, rnn_layers=args.rnn_layers, bidirectional=args.bidirectional, use_attention=args.use_attention, attention_type=args.attention_type, attention_fields=args.attention_fields, device=device, fc_layers=args.fc_layers, use_prior_prob_label=args.use_prior_prob_label) model.to(device) return model def deterministic(seed): torch.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(seed) def evaluate(data_loader: DataLoader, models: List[torch.nn.Module], device: torch.device, subset_name: str, criterion, logistic_threshold: float, exp_dir: str, metric='accuracy', max_seq: int = -1, aggregate: str = 'add', aggregate_or_th: float = 0.5): assert aggregate in ['add', 'or'] assert aggregate_or_th > 0 and aggregate_or_th <= 1 assert metric in ['accuracy', 'f1'] metric_name = metric if metric == 'accuracy': metric_score = metrics.accuracy_score metric_args = {} metric_other_name, metric_other_args, metric_other_score = 'f1', {'average': 'macro'}, metrics.f1_score else: metric_score = metrics.f1_score metric_args = {'average': 'macro'} metric_other_name, metric_other_args, metric_other_score = 'accuracy', {}, metrics.accuracy_score # deterministic(seed) # seed should NOT be used here (TODO review) total = 0 loss_total = 0 [model.eval() for model in models] with torch.no_grad(): predictions = [defaultdict(list) for _ in range(len(models))] patient_ids_from_start = defaultdict(list) corrects = defaultdict(list) first = True for model_idx, model in enumerate(models): for data in data_loader: patient_ids, static_data, dynamic_data, lengths, labels = data[0], data[1].to(device), \ data[2], data[3].to(device), \ data[4].to(device) if max_seq != -1: new_dynamic_data = [] for data in dynamic_data: new_dynamic_data.append(data[len(data) - max_seq:] if len(data) > max_seq else data) dynamic_data = new_dynamic_data # TO FIX: the padding remove one sequence from the list! dynamic_data_padded = pad_sequence(dynamic_data, batch_first=True, padding_value=0).to(device) effective_lengths = torch.ones(dynamic_data_padded.shape[0]).long().to(device) c_lengths = torch.tensor(list(range(dynamic_data_padded.shape[1]))).long().to(device) outputs = torch.zeros(dynamic_data_padded.shape[0]).to(device) hidden = model.init_hidden(dynamic_data_padded.shape[0]) max_seq_step = dynamic_data_padded.shape[1] dynamic_data_history = torch.zeros(len(data[0]), dynamic_data_padded.shape[1], model.hidden_size).to(device) for seq_step in range(max_seq_step): events = dynamic_data_padded[:, seq_step, :] non_zero = (effective_lengths != 0).nonzero().squeeze() static = static_data[non_zero] lens = effective_lengths[non_zero] evs = events[non_zero] if len(lens.shape) != 1: static = static.unsqueeze(dim=0) lens = lens.unsqueeze(dim=0) evs = evs.unsqueeze(dim=0) evs = evs.unsqueeze(dim=1) if model.arch != 'lstm': if len(non_zero.shape) == 0: outputs[non_zero], hidden[:, non_zero:non_zero + 1, :], dynamic_data_event, _, _ = model( (static, evs, lens, hidden, dynamic_data_history), seq_step) else: outputs[non_zero], hidden[:, non_zero, :], dynamic_data_event, _, _ = model( (static, evs, lens, hidden, dynamic_data_history), seq_step) else: outputs[non_zero], h, dynamic_data_event, _, _ = model( (static, evs, lens, hidden, dynamic_data_history), seq_step) if len(non_zero.shape) == 0: hidden[0][:, non_zero:non_zero + 1, :] = h[0] hidden[1][:, non_zero:non_zero + 1, :] = h[1] else: hidden[0][:, non_zero, :] = h[0] hidden[1][:, non_zero, :] = h[1] # append predictions non_zero_indexes = non_zero.tolist() if isinstance(non_zero.tolist(), list) else [non_zero.tolist()] # append predictions and patient ids from start (left-aligned sequences) for pred_idx in non_zero_indexes: pred = torch.sigmoid(outputs[pred_idx]).clone().data pred_seq_len = lengths.tolist()[pred_idx] - 1 predictions[model_idx][seq_step].append(pred) # furthermore, store the patient_ids for each step pid = patient_ids[pred_idx] patient_ids_from_start[seq_step].append(int(pid)) dynamic_data_history[:, seq_step, :] = dynamic_data_event if first: outs = labels[non_zero].clone().data.tolist() outs = outs if isinstance(outs, list) else [outs] for label in outs: corrects[seq_step].append(label) total += 1 if len(non_zero.shape) == 0 else len(non_zero) if outputs[non_zero].size(): if criterion.__class__.__name__ == 'BCEWithLogitsLoss': loss_total += criterion(outputs[non_zero].clone(), labels[non_zero].float()) else: loss_total += criterion(torch.sigmoid(outputs[non_zero]).clone(), labels[non_zero].float()) effective_lengths = (c_lengths[seq_step] < lengths - 1).long() first = False loss_total /= len(models) # compute predictions and from end (right-aligned sequences) using the sequence length for each prediction max_steps = len(predictions[0].keys()) # Compute voted predictions def aggregate_or(votes): return (1 if len(list(filter(lambda x: x == 1, votes)))/len(votes) >= aggregate_or_th else 0, sum(votes)/len(votes)) predicted = defaultdict(list) predicted_probs = defaultdict(list) for step in range(max_steps): # for each step, sum the prediction of each model in the ensemble preds_votes = [] if aggregate == 'add': for model_idx in range(len(predictions)): if len(preds_votes) == 0: preds_votes = [pred.tolist() for pred in predictions[model_idx][step]] else: preds_votes = list(map(add, preds_votes, [pred.tolist() for pred in predictions[model_idx][step]])) predicted[step] = [1 if pred >= logistic_threshold * len(models) else 0 for pred in preds_votes] predicted_probs[step] = preds_votes else: preds_votes_to_aggregate = [] for model_idx in range(len(predictions)): if len(preds_votes_to_aggregate) == 0: preds_votes_to_aggregate = [pred.tolist() for pred in predictions[model_idx][step]] preds_votes_to_aggregate = [[1 if pred >= logistic_threshold else 0 for pred in preds_votes_to_aggregate]] else: new_votes = [pred.tolist() for pred in predictions[model_idx][step]] new_votes = [1 if pred >= logistic_threshold else 0 for pred in new_votes] preds_votes_to_aggregate.append(new_votes) pred_probs_or = [] for idx_pred_ in range(len(preds_votes_to_aggregate[0])): preds_votes.append(aggregate_or([preds[idx_pred_] for preds in preds_votes_to_aggregate])) for idx_pred_vote, pred_vote in enumerate(preds_votes): decision, probs = pred_vote preds_votes[idx_pred_vote] = decision pred_probs_or.append(probs) predicted[step] = preds_votes predicted_probs[step] = pred_probs_or predictions = dict() prediction_probs = dict() labels = dict() for step in predicted.keys(): lista_ids = patient_ids_from_start[step] lista_labels = corrects[step] lista_predicciones = predicted[step] lista_probs = predicted_probs[step] for id, label, prediction, prob in zip(lista_ids, lista_labels, lista_predicciones, lista_probs): if step == 0: predictions[id] = [] prediction_probs[id] = [] labels[id] = label predictions[id].append(prediction) prediction_probs[id].append(prob) predicted_from_end = defaultdict(list) predicted_probs_from_end = defaultdict(list) patient_ids_from_end = defaultdict(list) corrects_from_end = defaultdict(list) predictions_copy = copy.deepcopy(predictions) predictions_probs_copy = copy.deepcopy(prediction_probs) for step in range(max_steps): y_pred = [] y_pred_probs = [] y_true = [] patient_ids_step = [] for id in predictions_copy: if len(predictions_copy[id]) > 0: last_prediction = predictions_copy[id].pop() y_pred.append(last_prediction) y_pred_probs.append(predictions_probs_copy[id].pop()) y_true.append(labels[id]) patient_ids_step.append(id) patient_ids_from_end[step] = patient_ids_step predicted_from_end[step] = y_pred predicted_probs_from_end[step] = y_pred_probs corrects_from_end[step] = y_true # write to disk predictions and corrects labels eval_preds = {"predictions_from_start": predicted, "predictions_from_end": predicted_from_end, "patient_ids_from_start": patient_ids_from_start, "patient_ids_from_end": patient_ids_from_end, "predicted_probs_from_start": predicted_probs, "predicted_probs_from_end": predicted_probs_from_end, "labels": corrects, "labels_from_end": corrects_from_end} with open(os.path.join(exp_dir, 'eval_preds_' + subset_name + '.json'), 'w') as pn: json.dump(eval_preds, pn, cls=NumpyEncoder) # Compute evaluations metrics and write report eval_metrics = {"from_start": defaultdict(), "from_end": defaultdict(), f"{metric_name}_avg_weighted": defaultdict()} for step in range(max_steps): # mean over all the correct predictions at given step assert (len(predicted[step]) == len(corrects[step]) and len(predicted_from_end[step]) == len( corrects_from_end[step])), \ 'number of labels different from number of predictions' eval_metrics["from_start"][step] = {metric_name: metric_score(corrects[step], predicted[step], **metric_args), metric_other_name: metric_other_score(corrects[step], predicted[step], **metric_other_args), "sensitivity": metrics.recall_score(corrects[step], predicted[step]), "corrects": f'{metrics.accuracy_score(corrects[step], predicted[step], normalize=False)}', "examples": f'{len(predicted[step])}'} eval_metrics["from_end"][step] = { metric_name: metric_score(corrects_from_end[step], predicted_from_end[step], **metric_args), metric_other_name: metric_other_score(corrects_from_end[step], predicted_from_end[step], **metric_other_args), "sensitivity": metrics.recall_score(corrects_from_end[step], predicted_from_end[step]), "corrects": f'{metrics.accuracy_score(corrects_from_end[step], predicted_from_end[step], normalize=False)}', "examples": f'{len(predicted_from_end[step])}'} predicted_all_scores = [] for step in range(max_steps): predicted_all_scores.extend(predicted_probs[step]) predicted_all = [] for step in range(max_steps): predicted_all.extend(predicted[step]) predicted_all_from_end = [] for step in range(max_steps): predicted_all_from_end.extend(predicted_from_end[step]) corrects_all = [] for step in range(max_steps): corrects_all.extend(corrects[step]) corrects_all_from_end = [] for step in range(max_steps): corrects_all_from_end.extend(corrects_from_end[step]) eval_metrics[f"{metric_name}_avg_weighted"] = metric_score(corrects_all, predicted_all, **metric_args) eval_metrics[f"{metric_name}_avg_weighted_from_end"] = metric_score(corrects_all_from_end, predicted_all_from_end, **metric_args) eval_metrics[f"{metric_other_name}_avg_weighted"] = metric_other_score(corrects_all, predicted_all, **metric_other_args) eval_metrics[f"{metric_other_name}_avg_weighted_from_end"] = metric_other_score(corrects_all_from_end, predicted_all_from_end, **metric_other_args) eval_metrics["auc"] = metrics.roc_auc_score(corrects_all, predicted_all_scores) tn, fp, fn, tp = metrics.confusion_matrix(corrects_all, predicted_all).ravel() specificity = tn / (tn + fp) eval_metrics["sensitivity"] = metrics.recall_score(corrects_all, predicted_all) eval_metrics["sensitivity_from_end"] = metrics.recall_score(corrects_all_from_end, predicted_all_from_end) eval_metrics["specificity"] = specificity # TODO: encapsulate the evaluation report in a function eval_report = '\t'.join(['days from hospitalization', 'corrects', 'examples', f'{metric_name} per day', f'{metric_name} average weighted', f'{metric_other_name} per day', f'{metric_other_name} average weighted', 'sensitivity per day', 'sensitivity average' ]) for step in range(max_steps): eval_report += '\t'.join([f'\n{step}', f'{eval_metrics["from_start"][step]["corrects"]}', f'{eval_metrics["from_start"][step]["examples"]}', f'{eval_metrics["from_start"][step][f"{metric_name}"] * 100:.2f}%', f'{eval_metrics[f"{metric_name}_avg_weighted"] * 100:.2f}%', f'{eval_metrics["from_start"][step][f"{metric_other_name}"] * 100:.2f}%', f'{eval_metrics[f"{metric_other_name}_avg_weighted"] * 100:.2f}%', f'{eval_metrics["from_start"][step]["sensitivity"] * 100:.2f}%', f'{eval_metrics["sensitivity"] * 100:.2f}%' ]) eval_report += '\n' eval_report += '\t'.join(['days before discharge', 'corrects', 'examples', f'{metric_name} per day', f'{metric_name} average weighted', f'{metric_other_name} per day', f'{metric_other_name} average weighted', 'sensitivity per day', 'sensitivity average']) for step in range(max_steps): eval_report += '\t'.join([f'\n{step}', f'{eval_metrics["from_end"][step]["corrects"]}', f'{eval_metrics["from_end"][step]["examples"]}', f'{eval_metrics["from_end"][step][f"{metric_name}"] * 100:.2f}%', f'{eval_metrics[f"{metric_name}_avg_weighted_from_end"] * 100:.2f}%', f'{eval_metrics["from_end"][step][f"{metric_other_name}"] * 100:.2f}%', f'{eval_metrics[f"{metric_other_name}_avg_weighted_from_end"] * 100:.2f}%', f'{eval_metrics["from_end"][step]["sensitivity"] * 100:.2f}%', f'{eval_metrics["sensitivity_from_end"] * 100:.2f}%']) logging.info(eval_report) with open(os.path.join(exp_dir, 'eval_report_' + subset_name + '.csv'), 'w') as fn: fn.writelines(eval_report) logging.info( f"{metric_name.upper()} GLOBAL {subset_name}: " + f'{eval_metrics[f"{metric_name}_avg_weighted"] * 100:.4f}%') output_table = { f'{metric_name}_avg_weighted': eval_metrics[f'{metric_name}_avg_weighted'], f'{metric_other_name}_avg_weighted': eval_metrics[f'{metric_other_name}_avg_weighted'], 'sensitivity': eval_metrics['sensitivity'], 'specificity': eval_metrics['specificity'], 'auc': eval_metrics['auc'] } return float(loss_total), output_table
StarcoderdataPython
111532
<filename>crawler_from_scratch/start.py # AUTOGENERATED! DO NOT EDIT! File to edit: 00_Getting_Started.ipynb (unless otherwise specified). __all__ = []
StarcoderdataPython
1770421
import numpy as np from thyme import * from thyme.parsers.extxyz import write import sys if __name__ == "__main__": # trjs = from_file(sys.argv[1]) # trjs.save(sys.argv[2]+".pickle") # trjs.save(sys.argv[2]+"_padded_mat.npz") trj = Trajectory.from_file( sys.argv[1], update_dict={ CELL: [12.817769235175424, 24.094028158633765, 122.0], FIXED_ATTRS: [CELL], PER_FRAME_ATTRS: [POSITION, TOTAL_ENERGY, "label"], SPECIES: ["Au"] * 144, }, mapping={ POSITION: "xyz", TOTAL_ENERGY: "pe", }, ) trj.include_frames(np.arange(0, len(trj), 100)) # print(trj) # write(sys.argv[2], trj, append=False) trj.save(sys.argv[2])
StarcoderdataPython
25192
<filename>tests/fdb.py<gh_stars>0 import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library from datetime import datetime import time import paho.mqtt.client as mqtt def button_callback(channel): print(str(datetime.now()) + "Button was pushed!") trigger() time.sleep(2) # The callback for when the client receives a CONNACK response from the server. def on_connect(client, userdata, flags, rc): print("Connected with result code "+str(rc)) # Subscribing in on_connect() means that if we lose the connection and # reconnect then subscriptions will be renewed. client.subscribe("$SYS/#") # The callback for when a PUBLISH message is received from the server. def on_message(client, userdata, msg): print(msg.topic+" "+str(msg.payload)) def trigger(): client = mqtt.Client() client.on_connect = on_connect client.on_message = on_message client.connect("mqtt.iglor.es", 8080, 60) payload = { "data": "bomb" } print("lalas") client.publish("3522109c644e08605c46308a880dcb7d/smartphone", payload=bytes(payload), qos=0, retain=False) time.sleep(0.5) GPIO.setwarnings(False) # Ignore warning for now GPIO.setmode(GPIO.BOARD) # Use physical pin numbering GPIO.setup(10, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 10 to be an input pin and set initial value to be pulled low (off) GPIO.add_event_detect(10,GPIO.RISING,callback=button_callback) # Setup event on pin 10 rising edge message = input("Press enter to quit\n\n") # Run until someone presses enter GPIO.cleanup() # Clean up
StarcoderdataPython
155871
from __future__ import division from scipy.stats import norm from pandas import read_excel import numpy as np # Transform to normal distribution # def allnorm(x, y): sample = len(x) # Estimate norm parameters # phat1 = norm.fit(x, loc=0, scale=1) meanx = phat1[0] sigmax = phat1[1] phat2 = norm.fit(y, loc=0, scale=1) meany = phat2[0] sigmay = phat2[1] # Save frequent calculations # x_minus_mean_x = x - meanx y_minus_mean_y = y - meany sigmax_pow_3 = sigmax ** 3 sigmax_pow_2 = sigmax ** 2 sigmay_pow_3 = sigmay ** 3 sigmay_pow_2 = sigmay ** 2 minus_sample = -sample # Calculate hessian matrix of log-likelihood # hes_normx = np.array([[minus_sample / (sigmax_pow_2), -2*np.sum(x_minus_mean_x) / (sigmax_pow_3)], [-2*np.sum(x_minus_mean_x) / (sigmax_pow_3), (sample / (sigmax_pow_2)) - (3*np.sum((x_minus_mean_x)**2) / (sigmax**4))] ]) hes_normy = np.array([[minus_sample / (sigmay_pow_2), -2*np.sum(y_minus_mean_y) / (sigmay_pow_3)], [-2*np.sum(x - meany) / (sigmay_pow_3), (sample / (sigmay_pow_2)) - (3*np.sum((y_minus_mean_y)**2) / sigmay**4)] ]) # Calculate cumulative of x and y # u = norm.cdf(x_minus_mean_x / sigmax, loc=0, scale=1) v = norm.cdf(y_minus_mean_y / sigmay, loc=0, scale=1) # Fix output # zeros_tmp = np.zeros((2, 2)) new_hes_normx = np.concatenate((hes_normx, zeros_tmp), axis=1) new_hes_normy = np.concatenate((zeros_tmp, hes_normy), axis=1) hes_norm = np.concatenate((new_hes_normx, new_hes_normy), axis=0) sigma = [sigmax, sigmay, meanx, meany] # Fix overflow # for i in range(len(u)): if u[i] == 1: u[i] = 0.99999999 if v[i] == 1: v[i] = 0.99999999 result = {"sigma": sigma, "hes_norm": hes_norm, "u": u, "v": v } return result # Test # if __name__ == "__main__": df = read_excel("/home/petropoulakis/Desktop/artificial_data.xlsx", sheet_name='Sheet1') x = [] y = [] for index, row in df.iterrows(): x.append([float(row['x'])]) y.append([float(row['y'])]) result = allnorm(x, y) print(result['sigma']) print(result['hes_norm']) print(result['u'][:5]) print(result['v'][:5])
StarcoderdataPython
3397124
<reponame>sns-sdks/python-twitter-premium from dataclasses import dataclass, field from typing import Optional, List from .base import BaseModel @dataclass class Hashtag(BaseModel): """ A class representing hashtag object. Refer: https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/entities-object#hashtags """ text: Optional[str] = field(default=None) indices: Optional[List[int]] = field(default=None, repr=False) @dataclass class Size(BaseModel): """ A class representing size object. Refer: https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/entities-object#size """ w: Optional[int] = field(default=None) h: Optional[int] = field(default=None) resize: Optional[str] = field(default=None) @dataclass class Sizes(BaseModel): """ A class representing sizes object. Refer: https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/entities-object#media-size """ thumb: Optional[Size] = field(default=None) small: Optional[Size] = field(default=None, repr=False) medium: Optional[Size] = field(default=None, repr=False) large: Optional[Size] = field(default=None, repr=False) @dataclass class Variant(BaseModel): bitrate: Optional[int] = field(default=None, repr=False) content_type: Optional[str] = field(default=None, repr=False) url: Optional[str] = field(default=None, repr=False) @dataclass class VideoInfo(BaseModel): """ A class representing video object. Refer: https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/extended-entities-object """ aspect_ratio: Optional[List[int]] = field(default=None, repr=False) duration_millis: Optional[int] = field(default=None, repr=False) variants: Optional[List[Variant]] = field(default=None, repr=False) @dataclass class AdditionalMediaInfo(BaseModel): """ A class representing additional media object. Refer: https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/extended-entities-object """ title: Optional[str] = field(default=None, repr=False) description: Optional[str] = field(default=None, repr=False) embeddable: Optional[bool] = field(default=None, repr=False) monetizable: Optional[bool] = field(default=None, repr=False) @dataclass class Media(BaseModel): """ A class representing media object. Refer: https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/entities-object#media """ display_url: Optional[str] = field(default=None, repr=False) expanded_url: Optional[str] = field(default=None, repr=False) id: Optional[int] = field(default=None, repr=False) id_str: Optional[str] = field(default=None) indices: Optional[List[int]] = field(default=None, repr=False) media_url: Optional[str] = field(default=None, repr=False) media_url_https: Optional[str] = field(default=None, repr=False) type: Optional[str] = field(default=None) url: Optional[str] = field(default=None, repr=False) sizes: Optional[Sizes] = field(default=None, repr=False) source_status_id: Optional[int] = field(default=None, repr=False) source_status_id_str: Optional[str] = field(default=None, repr=False) video_info: Optional[VideoInfo] = field(default=None, repr=False) additional_media_info: Optional[AdditionalMediaInfo] = field(default=None, repr=False) @dataclass class Url(BaseModel): """ A class representing media object. Refer: https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/entities-object#urls """ display_url: Optional[str] = field(default=None, repr=False) expanded_url: Optional[str] = field(default=None, repr=False) url: Optional[str] = field(default=None) indices: Optional[List[int]] = field(default=None, repr=False) # Extends fields status: Optional[int] = field(default=None, repr=False) title: Optional[str] = field(default=None, repr=False) description: Optional[str] = field(default=None, repr=False) @dataclass class UserMention(BaseModel): """ A class representing user mention object. Refer: https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/entities-object#mentions """ id: Optional[int] = field(default=None, repr=False) id_str: Optional[str] = field(default=None) indices: Optional[List[int]] = field(default=None, repr=False) name: Optional[str] = field(default=None) screen_name: Optional[str] = field(default=None) @dataclass class Symbol(BaseModel): """ A class representing symbol object. Refer: https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/entities-object#symbols """ indices: Optional[List[int]] = field(default=None, repr=False) text: Optional[str] = field(default=None) @dataclass class PollOption(BaseModel): position: Optional[int] = field(default=None) text: Optional[str] = field(default=None) @dataclass class Poll(BaseModel): """ A class representing poll object. Refer: https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/entities-object#polls """ options: Optional[List[PollOption]] = field(default=None, repr=False) end_datetime: Optional[str] = field(default=None) duration_minutes: Optional[int] = field(default=None) @dataclass class Entities(BaseModel): """ A class representing entities object. Refer: https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/entities-object """ hashtags: Optional[List[Hashtag]] = field(default=None, repr=False) media: Optional[List[Media]] = field(default=None, repr=False) urls: Optional[List[Url]] = field(default=None, repr=False) user_mentions: Optional[List[UserMention]] = field(default=None, repr=False) symbols: Optional[List[Symbol]] = field(default=None, repr=False) polls: Optional[List[Poll]] = field(default=None, repr=False) @dataclass class ExtendedEntities(Entities): """ extended entities has same struct as entities. Refer: https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/extended-entities-object """ ... @dataclass class UserEntitiesUrl(BaseModel): urls: Optional[List[Url]] = field(default=None, repr=False) @dataclass class UserEntitiesDescription(BaseModel): description: Optional[List[Url]] = field(default=None, repr=False) @dataclass class UserEntities(BaseModel): """ A class representing user entities object. It has a bit different for tweet entities. Refer: https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/entities-object#entities-user """ url: Optional[UserEntitiesUrl] = field(default=None, repr=False) description: Optional[UserEntitiesDescription] = field(default=None, repr=False)
StarcoderdataPython
1770223
<gh_stars>0 # Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed # under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from util import not_none from concourse.model.base import ( AttributeSpec, Trait, TraitTransformer, ) ATTRIBUTES = ( AttributeSpec.optional( name='suppress_parallel_execution', default=None, doc='whether parallel executions of the same job should be prevented', type=bool, ), ) class SchedulingTrait(Trait): def _attribute_specs(self): return ATTRIBUTES def _defaults_dict(self): return AttributeSpec.defaults_dict(ATTRIBUTES) def _optional_attributes(self): return set(AttributeSpec.optional_attr_names(ATTRIBUTES)) # XXX: merge this with cron-trait def transformer(self): return SchedulingTraitTransformer() def suppress_parallel_execution(self): return self.raw.get('suppress_parallel_execution', None) class SchedulingTraitTransformer(TraitTransformer): name = 'scheduling' def process_pipeline_args(self, pipeline_args: 'JobVariant'): # no-op pass
StarcoderdataPython
3392585
<filename>datetime2/modern.py # ISO calendar and Internet time # Copyright (c) 2013-2022 <NAME> # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name(s) of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AS IS AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. __author__ = "<NAME> <francescor2010 at yahoo.it>" __all__ = ["IsoCalendar"] import bisect from fractions import Fraction from math import floor from datetime2 import verify_fractional_value _long_years = frozenset( [ 4, 9, 15, 20, 26, 32, 37, 43, 48, 54, 60, 65, 71, 76, 82, 88, 93, 99, 105, 111, 116, 122, 128, 133, 139, 144, 150, 156, 161, 167, 172, 178, 184, 189, 195, 201, 207, 212, 218, 224, 229, 235, 240, 246, 252, 257, 263, 268, 274, 280, 285, 291, 296, 303, 308, 314, 320, 325, 331, 336, 342, 348, 353, 359, 364, 370, 376, 381, 387, 392, 398, ] ) _weeks_in_previous_years = [0] for year_index in range(1, 400): _weeks_in_previous_years.append(_weeks_in_previous_years[-1] + (52 if year_index not in _long_years else 53)) # This code pretty prints the week-in-previous-years list # for row in range (20): # print('{:3d}: {}'.format(row * 20, " ".join(['{:5d}'.format(_weeks_in_previous_years[y + row * 20]) for y in range(20)]))) ############################################################################## # Iso calendar # class IsoCalendar: def __init__(self, year, week, day): if not isinstance(year, int) or not isinstance(week, int) or not isinstance(day, int): raise TypeError("integer argument expected") if week < 1 or week > IsoCalendar.weeks_in_year(year): raise ValueError(f"Week must be between 1 and number of weeks in year, while it is {week}.") if day < 1 or day > 7: raise ValueError(f"Day must be between 1 and 7, while it is {day}.") self._year = year self._week = week self._day = day self._rata_die = None @property def year(self): return self._year @property def week(self): return self._week @property def day(self): return self._day @classmethod def from_rata_die(cls, day_count): if not isinstance(day_count, int): raise TypeError("integer argument expected") week_no_less_1, day_less_1 = divmod(day_count - 1, 7) # ranges: week_no_less_1: free, day_less_1: 0..6 four_hundred_years, no_of_weeks_in_400 = divmod(week_no_less_1, 20871) # ranges: four_hundred_years: free, no_of_weeks_in_400: 0..20870 year_in_400 = bisect.bisect_right(_weeks_in_previous_years, no_of_weeks_in_400) # range: year_in_400: 1..400 year = year_in_400 + four_hundred_years * 400 week = no_of_weeks_in_400 - _weeks_in_previous_years[year_in_400 - 1] + 1 day = day_less_1 + 1 iso_day = cls(year, week, day) iso_day._rata_die = day_count return iso_day @staticmethod def is_long_year(year): return year % 400 in _long_years @staticmethod def weeks_in_year(year): return 52 if year % 400 not in _long_years else 53 def to_rata_die(self): if self._rata_die is None: y400, year_in_400 = divmod(self.year - 1, 400) self._rata_die = (y400 * 146097 + 7 * (_weeks_in_previous_years[year_in_400] + self.week - 1) + self.day) return self._rata_die def day_of_year(self): return 7 * (self.week - 1) + self.day def replace(self, *, year=None, week=None, day=None): if year is None: year = self.year if week is None: week = self.week if day is None: day = self.day return type(self)(year, week, day) def __repr__(self): return f"datetime2.modern.{type(self).__name__}({self.year}, {self.week}, {self.day})" def __str__(self): if self.year >= 0: return f"{self.year:04d}-W{self.week:02d}-{self.day:1d}" else: return f"{self.year:05d}-W{self.week:02d}-{self.day:1d}" name_weekdays = [ "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday", ] format_functions = { "a": lambda self: IsoCalendar.name_weekdays[self.day - 1][:3], "A": lambda self: IsoCalendar.name_weekdays[self.day - 1], "j": lambda self: f"{self.day_of_year():03d}", "w": lambda self: f"{self.day:1d}", "W": lambda self: f"{(self.day_of_year() + 7 - self.day) // 7:02d}", "y": lambda self: f"{self.year:03d}"[-2:], "Y": lambda self: f"{self.year:04d}" if self.year >= 0 else f"-{-self.year:04d}" } def cformat(self, format_string): if not isinstance(format_string, str): raise TypeError("Format must be specified with string.") output_pieces = [] for format_chunk in format_string.split("%%"): format_parts = format_chunk.split("%") chunk_pieces = [format_parts[0]] for part in format_parts[1:]: if part == "": # special case: last char is '%' value = "%" else: try: value = self.format_functions[part[0]](self) except KeyError: value = "%" + part[0] chunk_pieces.append(value) chunk_pieces.append(part[1:]) output_pieces.append("".join(chunk_pieces)) return "%".join(output_pieces) ############################################################################## # Internet time representation # class InternetTime: def __init__(self, beat): try: beat_fraction = verify_fractional_value(beat, min=0, max_excl=1000) except TypeError as exc: raise TypeError("beat is not a valid fractional value") from exc except ValueError as exc: raise ValueError("beat must be equal or greater than 0 and less than 1000.") from exc self._beat = beat_fraction @property def beat(self): return self._beat @classmethod def from_time_pair(cls, day_frac, utcoffset): day_frac_valid = verify_fractional_value(day_frac, min=0, max_excl=1, strict=True) if utcoffset is None: raise TypeError("Internet time can only be used for aware Time instances.") utcoffset_valid = verify_fractional_value(utcoffset, min=-1, max=1, strict=True) utc_time = day_frac_valid - utcoffset_valid beat = (utc_time - floor(utc_time)) * 1000 internet = cls(beat) return internet def to_time_pair(self): return self._beat / 1000, Fraction(-1, 24) def __repr__(self): return f"datetime2.modern.{type(self).__name__}({self.beat!r})" def __str__(self): return f"@{int(self.beat):03d}" format_functions = { "b": lambda self: f"{int(self.beat):03d}", "f": lambda self: f"{int((self.beat - int(self.beat)) * 1000):03d}" } def cformat(self, format_string): if not isinstance(format_string, str): raise TypeError("Format must be specified with string.") output_pieces = [] for format_chunk in format_string.split("%%"): format_parts = format_chunk.split("%") chunk_pieces = [format_parts[0]] for part in format_parts[1:]: if part == "": # special case: last char is '%' value = "%" else: try: value = self.format_functions[part[0]](self) except KeyError: value = "%" + part[0] chunk_pieces.append(value) chunk_pieces.append(part[1:]) output_pieces.append("".join(chunk_pieces)) return "%".join(output_pieces)
StarcoderdataPython
1687466
from django.contrib import admin from spinach import Batch from . import models from .tasks import tasks @admin.register(models.Feed) class FeedAdmin(admin.ModelAdmin): list_display = ('name', 'domain', 'last_fetched_at', 'last_failure') actions = ['sync'] def sync(self, request, queryset): batch = Batch() for feed in queryset: batch.schedule('synchronize_feed', feed.id, force=True) tasks.schedule_batch(batch) sync.short_description = 'Synchronize feed' @admin.register(models.Article) class ArticleAdmin(admin.ModelAdmin): pass @admin.register(models.Attachment) class AttachmentAdmin(admin.ModelAdmin): list_display = ('id', 'title', 'size_in_bytes', 'duration') readonly_fields = ('article',) @admin.register(models.ReaderProfile) class ProfileAdmin(admin.ModelAdmin): pass @admin.register(models.Subscription) class SubscriptionAdmin(admin.ModelAdmin): pass @admin.register(models.Board) class BoardAdmin(admin.ModelAdmin): pass @admin.register(models.CachedImage) class CachedImageAdmin(admin.ModelAdmin): list_display = ('id', 'uri', 'format', 'resolution', 'is_tracking_pixel', 'created_at') readonly_fields = ('id', 'uri', 'format', 'resolution', 'size_in_bytes', 'failure_reason', 'created_at', 'image_tag')
StarcoderdataPython
175721
# -*- coding: utf-8 -*- # Spider for 91 buddha # # See documentation in: # https://doc.scrapy.org/en/latest/topics/spiders.html import scrapy import logging import random from buddha_item import BuddhaItem from utils.data_store import DataStore import math import time date_string = time.strftime("%Y_%m_%d", time.localtime()) logging.basicConfig( filename=('buddha_%s.log' % (date_string)), level=logging.DEBUG, filemode='w') logger = logging.getLogger(__name__) class BuddhaSpider(scrapy.Spider): # BuddhaSpider name = "buddha" start_urls = [ 'http://91porn.com/v.php?next=watch', # 全部视频 'http://91porn.com/v.php?category=rf' # 最近加精 ] # start_urls = ['https://www.zhihu.com/signin'] # start_urls = ['https://twitter.com/'] headers = { "Accept": "*/*", "Accept-Encoding": "gzip,deflate", "Accept-Language": "zh-CN,zh;q=0.8", "Connection": "keep-alive", "Content-Type": " application/x-www-form-urlencoded; charset=UTF-8", "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) \ AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.111 \ Safari/537.36" } cookies = { "watch_times": "1" } def __init__(self, type): self.type = type super(BuddhaSpider, self).__init__() def start_requests(self): logger.info("Buddha - Start, Type: %s" % self.type) if self.type == 0: url = self.start_urls[0] elif self.type == 1: url = self.start_urls[1] return [ scrapy.Request( url=url, callback=self.parse_last_page), ] def parse_last_page(self, response): xpath_str = '//*[@class="videopaging"]/text()' item = response.xpath(xpath_str).extract()[0] total_count = item.split(' ')[-1] last_page_num = math.ceil(int(total_count) / 20) logger.info("Videos Total Count: %s,\ Pages Total Count: %s" % (total_count, last_page_num)) url = response.url + '&page=%s' % last_page_num yield scrapy.Request( url=url, callback=self.parse, dont_filter=True) yield scrapy.Request( url=url, callback=self.parse_previous_page, dont_filter=True) def parse(self, response): logger.info("Buddha - Parse : %s" % (response.url)) xpath_str = '//*[@class="listchannel"]/a[@target="blank"]/@href' for href in response.xpath(xpath_str).extract(): logger.info("Request Detail: %s" % (href)) # 查询是否重复 viewkey = self._viewkey_from_url(href) ds = DataStore() (exists, rf) = ds.buddha_exists(viewkey) ds.close() if self.type == 0 and exists: # 全部抓取且存在,跳过 logger.warning("Ignore, View: %s exits" % (viewkey)) continue elif self.type == 1 and exists and rf == 1: # 精华且存在且已经为精华,跳过 logger.warning("Ignore, View: %s exits" % (viewkey)) continue random_ip = str(random.randint(0, 255)) + "." + \ str(random.randint(0, 255)) + "." + \ str(random.randint(0, 255)) + "." + \ str(random.randint(0, 255)) self.headers["X-Forwarded-For"] = random_ip yield scrapy.Request( url=href, headers=self.headers, cookies=self.cookies, callback=self.parse_detail) # filename = 'buddha.html' # with open(filename, 'wb') as response_file: # response_file.write(response.body) def parse_detail(self, response): buddha = BuddhaItem() try: name = response.xpath( '//div[@id="viewvideo-title"]/text()').extract()[0] name = "".join(name.split()) logger.info("Buddha - Parse Detail: %s" % (name)) except (ValueError, IndexError): logger.error( "Buddha - Parse Detail Error: %s,\n\ name parse error" % (response.url)) name = '' buddha["name"] = name buddha["url"] = response.url viewkey = self._viewkey_from_url(response.url) buddha["viewkey"] = viewkey try: download_url = response.xpath( '//video[@id="vid"]/source/@src').extract()[0] download_url = "".join(download_url.split()) logger.info("Buddha - Parse Detail: %s" % (download_url)) except (ValueError, IndexError): logger.error( "Buddha - Parse Detail Error: %s,\n\ download_url parse error" % (response.url)) download_url = '' buddha["download_url"] = download_url try: image_url = response.xpath( '//div[@class="example-video-container"]/\ video/@poster').extract()[0] logger.info("Buddha - Parse Detail: %s" % (image_url)) except (ValueError, IndexError): logger.error( "Buddha - Parse Detail Error: %s,\n\ image_url parse error" % (response.url)) image_url = '' buddha["image_url"] = image_url try: duration = response.xpath( '//div[@class="boxPart"]/text()').extract()[1] duration = "".join(duration.split()) logger.info("Buddha - Parse Detail: %s" % (duration)) except (ValueError, IndexError): logger.error( "Buddha - Parse Detail Error: %s,\n\ duration parse error" % (response.url)) duration = '' buddha["duration"] = duration try: points = response.xpath( '//div[@class="boxPart"]/text()').extract()[-1] points = "".join(points.split()) logger.info("Buddha - Parse Detail: %s" % (points)) except (ValueError, IndexError): logger.error( "Buddha - Parse Detail Error: %s,\n\ points parse error" % (response.url)) points = '' buddha["points"] = points try: add_time = response.xpath( '//div[@id="videodetails-content"]/\ span[@class="title"]/text()').extract()[0] add_time = "".join(add_time.split()) logger.info("Buddha - Parse Detail: %s" % (add_time)) except (ValueError, IndexError): logger.error( "Buddha - Parse Detail Error: %s,\n\ add_time parse error" % (response.url)) add_time = '' buddha["add_time"] = add_time try: author = response.xpath( '//div[@id="videodetails-content"]/a/\ span[@class="title"]/text()').extract()[0] author = "".join(author.split()) logger.info("Buddha - Parse Detail: %s" % (author)) except (ValueError, IndexError): logger.error( "Buddha - Parse Detail Error: %s,\n\ author parse error" % (response.url)) author = '' buddha["author"] = author try: more = response.xpath( '//span[@class="more"]/text()').extract()[0] more = "".join("".join(more).split()) # logger.info("Buddha - Parse Detail: %s" % ( # more)) except (ValueError, IndexError): logger.error( "Buddha - Parse Detail Error: %s,\n\ more parse error" % (response.url)) more = '' desc = more logger.info("Buddha - Parse Detail: %s" % ( desc)) buddha["desc"] = desc buddha["rf"] = 0 if self.type == 1: logger.info("Buddha - Parse Detail rf : %s" % (buddha["rf"])) buddha["rf"] = 1 # logger.info("Buddha - Parse Detail: %s" % (buddha)) yield buddha # filename = 'buddha_detail_%s.html' % int(time.time()) # with open(filename, 'wb') as response_file: # response_file.write(response.body) def parse_previous_page(self, response): xpath_str = '//*[@id="paging"]/div/form/a/@href' next_url = response.urljoin( response.xpath(xpath_str).extract()[0]) logger.info("Buddha - Parse Previous Page : %s" % (next_url)) yield scrapy.Request( url=next_url, callback=self.parse, dont_filter=True) pagingnav = response.xpath( '//*[@id="paging"]/div/form/\ span[@class="pagingnav"]/text()').extract()[0] if pagingnav != '1': # 已经解析到第一页了,停止 yield scrapy.Request( url=next_url, callback=self.parse_previous_page, dont_filter=True) else: logger.info("Buddha - End With Page : %s " % (pagingnav)) def _viewkey_from_url(self, url): key = 'viewkey=' viewkey = '' if key in url: start = url.index(key) + len(key) end = start + 20 viewkey = url[start:end] return viewkey def closed(self, spider): # second param is instance of spder about to be closed. logger.info("Buddha - Closed")
StarcoderdataPython
185765
<gh_stars>1-10 import pathlib import shutil import tempfile from pymongo import MongoClient from ted_sws import config from ted_sws.core.model.manifestation import XMLManifestation from ted_sws.core.model.notice import Notice from ted_sws.data_manager.adapters.mapping_suite_repository import TRANSFORM_PACKAGE_NAME, VALIDATE_PACKAGE_NAME, \ SPARQL_PACKAGE_NAME, METADATA_FILE_NAME, RESOURCES_PACKAGE_NAME, SHACL_PACKAGE_NAME, TEST_DATA_PACKAGE_NAME, \ MAPPINGS_PACKAGE_NAME, MappingSuiteRepositoryInFileSystem, MappingSuiteRepositoryMongoDB from ted_sws.data_manager.adapters.notice_repository import NoticeRepository from ted_sws.mapping_suite_processor.adapters.github_package_downloader import GitHubMappingSuitePackageDownloader from ted_sws.mapping_suite_processor.services.conceptual_mapping_files_injection import \ mapping_suite_processor_inject_resources, mapping_suite_processor_inject_rml_modules, \ mapping_suite_processor_inject_shacl_shapes, \ mapping_suite_processor_inject_sparql_queries from ted_sws.mapping_suite_processor.services.conceptual_mapping_generate_metadata import \ mapping_suite_processor_generate_metadata from ted_sws.mapping_suite_processor.services.conceptual_mapping_generate_sparql_queries import \ mapping_suite_processor_generate_sparql_queries from ted_sws.resources import RESOURCES_PATH CONCEPTUAL_MAPPINGS_FILE_NAME = "conceptual_mappings.xlsx" CONCEPTUAL_MAPPINGS_ASSERTIONS = "cm_assertions" SHACL_SHAPE_INJECTION_FOLDER = "ap_data_shape" SHACL_SHAPE_RESOURCES_FOLDER = "shacl_shapes" SHACL_SHAPE_FILE_NAME = "ePO_shacl_shapes.rdf" MAPPING_FILES_RESOURCES_FOLDER = "mapping_files" RML_MODULES_FOLDER = "rml_modules" SPARQL_QUERIES_RESOURCES_FOLDER = "queries" SPARQL_QUERIES_INJECTION_FOLDER = "business_queries" PROD_ARCHIVE_SUFFIX = "prod" DEMO_ARCHIVE_SUFFIX = "demo" def mapping_suite_processor_zip_package(mapping_suite_package_path: pathlib.Path, prod_version: bool = False): """ This function archives a package and puts a suffix in the name of the archive. :param mapping_suite_package_path: :param prod_version: :return: """ archive_name_suffix = PROD_ARCHIVE_SUFFIX if prod_version else DEMO_ARCHIVE_SUFFIX tmp_folder_path = mapping_suite_package_path.parent / f"{mapping_suite_package_path.stem}-{archive_name_suffix}" output_archive_file_name = mapping_suite_package_path.parent / f"{mapping_suite_package_path.stem}-{archive_name_suffix}" shutil.copytree(mapping_suite_package_path, tmp_folder_path, dirs_exist_ok=True) if prod_version: shutil.rmtree(tmp_folder_path / TEST_DATA_PACKAGE_NAME) shutil.make_archive(str(output_archive_file_name), 'zip', tmp_folder_path) shutil.rmtree(tmp_folder_path) def mapping_suite_processor_expand_package(mapping_suite_package_path: pathlib.Path): """ This function reads data from conceptual_mappings.xlsx and expand provided package. :param mapping_suite_package_path: :return: """ conceptual_mappings_file_path = mapping_suite_package_path / TRANSFORM_PACKAGE_NAME / CONCEPTUAL_MAPPINGS_FILE_NAME cm_sparql_folder_path = mapping_suite_package_path / VALIDATE_PACKAGE_NAME / SPARQL_PACKAGE_NAME / CONCEPTUAL_MAPPINGS_ASSERTIONS metadata_file_path = mapping_suite_package_path / METADATA_FILE_NAME resources_folder_path = mapping_suite_package_path / TRANSFORM_PACKAGE_NAME / RESOURCES_PACKAGE_NAME mapping_files_resources_folder_path = RESOURCES_PATH / MAPPING_FILES_RESOURCES_FOLDER rml_modules_folder_path = mapping_suite_package_path / TRANSFORM_PACKAGE_NAME / MAPPINGS_PACKAGE_NAME mapping_files_rml_modules_folder_path = RESOURCES_PATH / RML_MODULES_FOLDER shacl_shape_file_path = RESOURCES_PATH / SHACL_SHAPE_RESOURCES_FOLDER / SHACL_SHAPE_FILE_NAME shacl_shape_injection_folder = mapping_suite_package_path / VALIDATE_PACKAGE_NAME / SHACL_PACKAGE_NAME / SHACL_SHAPE_INJECTION_FOLDER sparql_queries_resources_folder_path = RESOURCES_PATH / SPARQL_QUERIES_RESOURCES_FOLDER sparql_queries_injection_folder = mapping_suite_package_path / VALIDATE_PACKAGE_NAME / SPARQL_PACKAGE_NAME / SPARQL_QUERIES_INJECTION_FOLDER shacl_shape_injection_folder.mkdir(parents=True, exist_ok=True) cm_sparql_folder_path.mkdir(parents=True, exist_ok=True) resources_folder_path.mkdir(parents=True, exist_ok=True) rml_modules_folder_path.mkdir(parents=True, exist_ok=True) mapping_suite_processor_generate_sparql_queries(conceptual_mappings_file_path=conceptual_mappings_file_path, output_sparql_queries_folder_path=cm_sparql_folder_path ) mapping_suite_processor_generate_metadata(conceptual_mappings_file_path=conceptual_mappings_file_path, output_metadata_file_path=metadata_file_path ) mapping_suite_processor_inject_resources(conceptual_mappings_file_path=conceptual_mappings_file_path, resources_folder_path=mapping_files_resources_folder_path, output_resources_folder_path=resources_folder_path ) mapping_suite_processor_inject_rml_modules(conceptual_mappings_file_path=conceptual_mappings_file_path, rml_modules_folder_path=mapping_files_rml_modules_folder_path, output_rml_modules_folder_path=rml_modules_folder_path ) mapping_suite_processor_inject_shacl_shapes(shacl_shape_file_path=shacl_shape_file_path, output_shacl_shape_folder_path=shacl_shape_injection_folder ) mapping_suite_processor_inject_sparql_queries(sparql_queries_folder_path=sparql_queries_resources_folder_path, output_sparql_queries_folder_path=sparql_queries_injection_folder ) mapping_suite_processor_zip_package(mapping_suite_package_path=mapping_suite_package_path) mapping_suite_processor_zip_package(mapping_suite_package_path=mapping_suite_package_path, prod_version=True) def mapping_suite_processor_load_package_in_mongo_db(mapping_suite_package_path: pathlib.Path, mongodb_client: MongoClient, load_test_data: bool = False ): """ This feature allows you to upload a mapping suite package to MongoDB. :param mapping_suite_package_path: :param mongodb_client: :param load_test_data: :return: """ mapping_suite_repository_path = mapping_suite_package_path.parent mapping_suite_package_name = mapping_suite_package_path.name mapping_suite_repository_in_file_system = MappingSuiteRepositoryInFileSystem( repository_path=mapping_suite_repository_path) mapping_suite_in_memory = mapping_suite_repository_in_file_system.get(reference=mapping_suite_package_name) if load_test_data: tests_data = mapping_suite_in_memory.transformation_test_data.test_data notice_repository = NoticeRepository(mongodb_client=mongodb_client) for test_data in tests_data: notice_repository.add(notice=Notice(ted_id=test_data.file_name.split(".")[0], xml_manifestation=XMLManifestation(object_data=test_data.file_content))) mapping_suite_repository_mongo_db = MappingSuiteRepositoryMongoDB(mongodb_client=mongodb_client) mapping_suite_repository_mongo_db.add(mapping_suite=mapping_suite_in_memory) def mapping_suite_processor_from_github_expand_and_load_package_in_mongo_db(mapping_suite_package_name: str, mongodb_client: MongoClient, load_test_data: bool = False ): """ This feature is intended to download a mapping_suite_package from GitHub and process it for upload to MongoDB. :param mapping_suite_package_name: :param mongodb_client: :param load_test_data: :return: """ mapping_suite_package_downloader = GitHubMappingSuitePackageDownloader( github_repository_url=config.GITHUB_TED_SWS_ARTEFACTS_URL) with tempfile.TemporaryDirectory() as tmp_dir: tmp_dir_path = pathlib.Path(tmp_dir) mapping_suite_package_downloader.download(mapping_suite_package_name=mapping_suite_package_name, output_mapping_suite_package_path=tmp_dir_path) mapping_suite_package_path = tmp_dir_path / mapping_suite_package_name mapping_suite_processor_expand_package(mapping_suite_package_path=mapping_suite_package_path) mapping_suite_processor_load_package_in_mongo_db(mapping_suite_package_path=mapping_suite_package_path, mongodb_client=mongodb_client, load_test_data=load_test_data )
StarcoderdataPython
1749399
<reponame>tirkarthi/odin-ai from __future__ import absolute_import, division, print_function import os import time import numpy as np import seaborn as sns import tensorflow as tf import tensorflow_probability as tfp from matplotlib import pyplot as plt from tensorflow import keras from odin import visual as vs from odin.bay import kl_divergence os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' tf.random.set_seed(8) np.random.seed(8) sns.set() # =========================================================================== # Helper functions # =========================================================================== def minimize(loss_func, params, verbose=False, print_params=True, learning_rate=0.1, epochs=500): opt = tf.optimizers.Adam(learning_rate=learning_rate) benchmark = [] history = [] for i in range(epochs): start_time = time.time() with tf.GradientTape() as tape: tape.watch(params) loss = tf.reduce_mean(loss_func()) grad = tape.gradient(loss, params) benchmark.append(time.time() - start_time) if verbose and (i + 1) % (epochs // 2) == 0: print("#%-4d Loss:%.4f (%.2f sec/100)" % (i + 1, loss, np.mean(benchmark) * 100)) if print_params: for p in params: print(' * %s: %s' % (p.name, str(p.numpy()))) history.append([loss.numpy()] + [p.numpy() for p in params]) opt.apply_gradients(grads_and_vars=zip(grad, params)) return history create_posterior = lambda: tfp.distributions.Normal( loc=tf.Variable(0., dtype='float32', trainable=True, name='loc'), scale=tf.Variable(1., dtype='float32', trainable=True, name='scale'), name='Normal') # NOTE: it important to get the loc spread wide enough to prevent mode collapse # however, the scale must be small enough for not exploding the gradients create_mixture_posterior = lambda n, loc_min=0, loc_max=100: \ tfp.distributions.MixtureSameFamily( mixture_distribution=tfp.distributions.Categorical(probs=[1. / n] * n), components_distribution=tfp.distributions.Normal( loc=tf.Variable( np.linspace(loc_min, loc_max, n), dtype='float32', trainable=True, name='loc'), scale=tf.Variable( [1.] * n, dtype='float32', trainable=True, name='scale'))) def plot_posteriors(posterior, prior, n=1000): # this is very hard-coded function plt.figure(figsize=(12, 8)) sns.kdeplot(prior.sample(int(n)).numpy(), label="Prior") for post, analytic, reverse, sample_shape in posterior: sns.kdeplot(post.sample(int(n)).numpy(), linestyle='-' if reverse else '--', label='%s-%s mcmc:%d' % ('KL(q||p)' if reverse else 'KL(p||q)', 'A' if analytic else 'S', sample_shape)) def plot_histories(posterior, histories): plt.figure(figsize=(24, 5)) for idx, (post, analytic, reverse, sample_shape) in enumerate(posterior): ax = plt.subplot(1, len(posterior), idx + 1) hist = histories[idx] name = '%s-%s mcmc:%d' % \ ('KL(q||p)' if reverse else 'KL(p||q)', 'A' if analytic else 'S', sample_shape) loc = np.asarray([i[1] for i in hist]) plt.plot(loc, label='loc', linestyle='-' if reverse else '--') scale = np.asarray([i[2] for i in hist]) plt.plot(scale, label='scale', linestyle='-' if reverse else '--') plt.legend() ax = ax.twinx() plt.plot([i[0] for i in hist], label='loss', color='r') plt.title(name) plt.tight_layout() # =========================================================================== # Can deep network fix posterior mode collapse due to loc initialization # * Appropriate learning rate is essential # * High amount of components help, but not too high # * Too deep network will make overfitting to the first components. # * If input features are useless, deep network cannot help # * maximum likelihood might end up with more modes # =========================================================================== prior = tfp.distributions.MixtureSameFamily( mixture_distribution=tfp.distributions.Categorical(probs=[1.0 / 3] * 3), components_distribution=tfp.distributions.Normal(loc=[0, 25, 80], scale=[1, 12, 4])) n_components = 3 X = np.zeros(shape=(1, n_components)).astype('float32') X = np.linspace(0, 80, num=n_components, dtype='float32')[None, :] # X = np.random.rand(1, 3).astype('float32') outputs = {} for reverse in (True, False): loc = keras.Sequential([ keras.layers.Dense(16, activation='relu', input_shape=(n_components,)), keras.layers.Dense(n_components, activation='linear', input_shape=(n_components,)), ]) scale = tf.Variable([1.] * n_components, dtype='float32', trainable=True, name='scale') history = minimize(lambda: kl_divergence(tfp.distributions.MixtureSameFamily( mixture_distribution=tfp.distributions.Categorical( probs=[1. / n_components] * n_components), components_distribution=tfp.distributions.Normal(loc=loc(X), scale=scale )), prior, reverse=reverse, q_sample=100), params=loc.trainable_variables + [scale], verbose=True, print_params=False, learning_rate=0.01, epochs=1200) posterior = tfp.distributions.MixtureSameFamily( mixture_distribution=tfp.distributions.Categorical( probs=[1. / n_components] * n_components), components_distribution=tfp.distributions.Normal(loc=loc(X), scale=scale)) outputs[reverse] = [posterior, history] plt.figure(figsize=(18, 8)) plt.subplot(1, 2, 1) sns.kdeplot(prior.sample(10000).numpy(), label='Prior') sns.kdeplot(outputs[True][0].sample(10000).numpy().ravel(), label='Posterior-KL(q||p)') sns.kdeplot(outputs[False][0].sample(10000).numpy().ravel(), label='Posterior-KL(p||q)', linestyle='--') plt.legend() ax = plt.subplot(1, 2, 2) l1 = plt.plot([i[0] for i in outputs[True][1]], label='KL(q||p)') ax.twinx() l2 = plt.plot([i[0] for i in outputs[False][1]], label='KL(p||q)', linestyle='--') plt.title("KL loss") plt.legend(handles=[l1[0], l2[0]]) # =========================================================================== # Mixture with Mixture Posterior # =========================================================================== prior = tfp.distributions.MixtureSameFamily( mixture_distribution=tfp.distributions.Categorical(probs=[1.0 / 3] * 3), components_distribution=tfp.distributions.Normal(loc=[0, 32, 80], scale=[1, 12, 4])) for n in [2, 3, 5]: # analytic, reverse, nmcmc posterior = [ (create_mixture_posterior(n=n), False, True, 1), (create_mixture_posterior(n=n), False, False, 1), (create_mixture_posterior(n=n), False, True, 100), (create_mixture_posterior(n=n), False, False, 100), ] histories = [] for post, analytic, reverse, sample_shape in posterior: print("Training:", analytic, reverse, sample_shape) h = minimize(lambda: kl_divergence( q=post, p=prior, analytic=analytic, reverse=reverse, q_sample=sample_shape), [ post.components_distribution.loc, post.components_distribution.scale ], verbose=False) histories.append(h) # for more complicated distribution, need more samples plot_posteriors(posterior, prior, n=10000) plt.title("Prior:3-mixture Posterior:%d-mixture" % n) plot_histories(posterior, histories) vs.plot_save() exit() # =========================================================================== # Mixture with Normal Posterior # =========================================================================== prior = tfp.distributions.MixtureSameFamily( mixture_distribution=tfp.distributions.Categorical(probs=[0.5, 0.5]), components_distribution=tfp.distributions.Normal(loc=[2, 20], scale=[1, 4])) posterior = [ (create_posterior(), False, True, 1), # analytic, reverse, nmcmc (create_posterior(), False, False, 1), (create_posterior(), False, True, 100), (create_posterior(), False, False, 100), ] histories = [] for post, analytic, reverse, sample_shape in posterior: print("Training:", analytic, reverse, sample_shape) h = minimize(lambda: kl_divergence( q=post, p=prior, analytic=analytic, reverse=reverse, q_sample=sample_shape), [post.loc, post.scale], verbose=False) histories.append(h) plot_posteriors(posterior, prior) plt.title("Prior:2-mixture Posterior:Normal") plot_histories(posterior, histories) # =========================================================================== # Simple distribution # =========================================================================== prior = tfp.distributions.Normal(loc=8, scale=12) posterior = [ (create_posterior(), True, True, 1), # analytic, reverse, nmcmc (create_posterior(), True, False, 1), (create_posterior(), False, True, 1), (create_posterior(), False, True, 100), (create_posterior(), False, False, 1), (create_posterior(), False, False, 100) ] histories = [] for post, analytic, reverse, sample_shape in posterior: print("Training:", analytic, reverse, sample_shape) h = minimize(lambda: kl_divergence( q=post, p=prior, analytic=analytic, reverse=reverse, q_sample=sample_shape), [post.loc, post.scale], verbose=False) histories.append(h) plot_posteriors(posterior, prior) plt.title("Prior:Normal Posterior:Normal") plot_histories(posterior, histories)
StarcoderdataPython
3274539
# coding: utf-8 from __future__ import unicode_literals import unittest import responses from admitad.items import Coupons, CouponsForWebsite, CouponsCategories from admitad.tests.base import BaseTestCase class CouponsTestCase(BaseTestCase): def test_get_coupons_request(self): with responses.RequestsMock() as resp: resp.add( resp.GET, self.prepare_url(Coupons.URL, params={ 'campaign': [1, 5, 6], 'campaign_category': [11, 12], 'category': [22, 23], 'type': 'some', 'limit': 10, 'offset': 0, 'order_by': ['name', '-rating'] }), match_querystring=True, json={'status': 'ok'}, status=200 ) result = self.client.Coupons.get( campaign=[1, 5, 6], campaign_category=[11, 12], category=[22, 23], type='some', limit=10, offset=0, order_by=['name', '-rating']) self.assertIn('status', result) def test_get_coupons_request_with_id(self): with responses.RequestsMock() as resp: resp.add( resp.GET, self.prepare_url(Coupons.SINGLE_URL, coupon_id=42), match_querystring=True, json={'status': 'ok'}, status=200 ) result = self.client.Coupons.getOne(42) self.assertIn('status', result) class CouponsForWebsiteTestCase(BaseTestCase): def test_get_coupons_for_website_request(self): with responses.RequestsMock() as resp: resp.add( resp.GET, self.prepare_url(CouponsForWebsite.URL, website_id=1, params={ 'campaign': [1, 5, 6], 'campaign_category': [11, 12], 'category': [22, 23], 'type': 'some', 'limit': 10, 'offset': 0, 'order_by': ['name', '-rating'] }), match_querystring=True, json={'status': 'ok'}, status=200 ) result = self.client.CouponsForWebsite.get( 1, campaign=[1, 5, 6], campaign_category=[11, 12], category=[22, 23], type='some', limit=10, offset=0, order_by=['name', '-rating']) self.assertIn('status', result) def test_get_coupons_for_website_request_with_id(self): with responses.RequestsMock() as resp: resp.add( resp.GET, self.prepare_url(CouponsForWebsite.SINGLE_URL, website_id=10, campaign_id=20), match_querystring=True, json={'status': 'ok'}, status=200 ) result = self.client.CouponsForWebsite.getOne(10, 20) self.assertIn('status', result) class CouponsCategoriesTestCase(BaseTestCase): def test_get_categories_request(self): with responses.RequestsMock() as resp: resp.add( resp.GET, self.prepare_url(CouponsCategories.URL, params={ 'limit': 10, 'offset': 0 }), match_querystring=True, json={'status': 'ok'}, status=200 ) result = self.client.CouponsCategories.get(limit=10, offset=0) self.assertIn('status', result) def test_get_categorty_with_id_request(self): with responses.RequestsMock() as resp: resp.add( resp.GET, self.prepare_url(CouponsCategories.SINGLE_URL, coupon_category_id=200), match_querystring=True, json={'status': 'ok'}, status=200 ) result = self.client.CouponsCategories.getOne(200) self.assertIn('status', result) if __name__ == '__main__': unittest.main()
StarcoderdataPython
3256127
#!/usr/bin/env python import optparse from rq import use_connection, Queue from rq import dummy def parse_args(): parser = optparse.OptionParser() parser.add_option('-n', '--count', type='int', dest='count', default=1) opts, args = parser.parse_args() return (opts, args, parser) def main(): import sys sys.path.insert(0, '.') opts, args, parser = parse_args() use_connection() queues = ('default', 'high', 'low') sample_calls = [ (dummy.do_nothing, [], {}), (dummy.sleep, [1], {}), (dummy.fib, [8], {}), # normal result (dummy.fib, [24], {}), # takes pretty long (dummy.div_by_zero, [], {}), # 5 / 0 => div by zero exc (dummy.random_failure, [], {}), # simulate random failure (handy for requeue testing) ] for i in range(opts.count): import random f, args, kwargs = random.choice(sample_calls) q = Queue(random.choice(queues)) q.enqueue(f, *args, **kwargs) #q = Queue('foo') #q.enqueue(do_nothing) #q.enqueue(sleep, 3) #q = Queue('bar') #q.enqueue(yield_stuff) #q.enqueue(do_nothing) #q.enqueue(do_nothing) #q.enqueue(do_nothing) #q.enqueue(do_nothing) #q.enqueue(do_nothing) #q.enqueue(do_nothing) #q.enqueue(do_nothing) #q.enqueue(do_nothing) #q.enqueue(do_nothing) #q.enqueue(do_nothing) #q.enqueue(do_nothing) #q.enqueue(do_nothing) #q.enqueue(do_nothing) #q.enqueue(do_nothing) #q.enqueue(do_nothing) #q.enqueue(do_nothing) if __name__ == '__main__': main()
StarcoderdataPython
1643969
import json, os, signal, time, traceback,shutil import curses import cbpro import multiprocessing from pygotrader import arguments,config, order_handler, pygo_order_book, tui, algorithm_handler from pkg_resources import Requirement, resource_filename #from profiling.tracing import TracingProfiler class CustomExit(Exception): """Custom class to exit program""" pass def signal_handler(signum, frame): """Wrapper to handle break signals from the user""" # print('Caught signal %d' % signum) raise CustomExit def pause(): """Lazy wrapper for a cli pause ¯\_(ツ)_/¯ """ program_pause = input("Press the <ENTER> key to continue...") def create_namespace(my_manager): """Initialize and construct the shared namespace As mentioned elsewhere, lists and dicts need to be created from the Manager class otherwise updates to them won't propagate. Remember, this is more of a helper class, not one to do serious work with. Operations can be expensive. And custom classes can't be used. Order format for placement into the buy and sell order queue lists: {'order':'buy','type':'market','product':'BTC','size':0.1,'price':1.00} Order format for placement into the cancel order queue lists: {'order':'cancel','order_id':1111111} """ ns = my_manager.Namespace() ns.exchange_order_matches = my_manager.list() ns.buy_order_queue = my_manager.list() ns.sell_order_queue = my_manager.list() ns.cancel_order_queue = my_manager.list() ns.my_orders = my_manager.dict() ns.last_match = 0.00 ns.highest_bid = 0.00 ns.lowest_ask = 0.00 ns.message = '' ns.ui_asks = my_manager.list() ns.ui_bids = my_manager.list() for x in range(0,10): ns.ui_asks.insert(x,{'price':0.00,'depth':0.00}) ns.ui_bids.insert(x,{'price':0.00,'depth':0.00}) return ns def main(): """Entry point for the program Create the objects that are going to run the various parts of the programs. Threads/processes are not directly created here. That's been left for the individual classes. Variables of note: ns - a multiprocessing.Manager.namespace that is for sharing data between threads and processes Objects of note: MyConfig - loads and stores arguments passed in as well as user config information stored in outside files PygoOrderBook - the order book that uses a websocket to pull data from the exchange. Does not actually place orders. AuthenticatedClient - this has the secrets for the user loaded from the external config files. This is the class that actually places orders that organized and called by the OrderHandler class. OrderHandler - separate-running process to place and cancel orders through functions in the AuthenticatedClient class. TODO: - Add the ability to create external config class via an install function or some kind of config function within the tui or cli - Add the ability to export data in real-time to database """ signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) """ The multiprocessing Manager namespace seems like the easiest/least-risky way of sharing data across processes. However, it requires use of dicts and lists from the Manager class. Important notes: 1) Operations on this data structure can be expensive. Reads are cheap, and simple writes aren't too bad, but for example, deleting a Manager.list() data structure and creating a new one can take a few hundredths of a second. Its better to do work on a local data structure and then copy it over to the shared namespace. 2) Namespaces don't work with deep data structures, i.e. a custom class You can do a dict inside a list, or vice-versa, but that's about it """ my_manager = multiprocessing.Manager() ns = create_namespace(my_manager) view_mode = True try: # profiler = TracingProfiler() # profiler.start() argument_parser = arguments.create_parser() args = argument_parser.parse_args() if args.config: view_mode = False my_config = config.MyConfig(exchange=args.exchange,product=args.product) if not view_mode: my_config.load_config(args.config) my_authenticated_client = my_config.get_coinbase_authenticated_client() if not os.path.exists(args.algorithm_file): filename = resource_filename(Requirement.parse("pygotrader"),"pygotrader/algorithm.py") print(f"Copying sample algorithm file to ./algorithm.py...") shutil.copyfile(filename,"./algorithm.py") time.sleep(2) my_order_handler = order_handler.OrderHandler(my_authenticated_client,ns) my_order_handler.start() else: my_authenticated_client = None my_order_handler = None ns.message = 'Running in view mode' my_order_book = pygo_order_book.PygoOrderBook(ns,product_id=my_config.product, url=my_config.websocket_url) my_order_book.start() while not my_order_book.has_started: time.sleep(0.1) mytui = tui.Menu(ns, my_order_book, my_authenticated_client, my_order_handler, algorithm_file=args.algorithm_file) curses.wrapper(mytui.start) except CustomExit: my_order_book.close() if not view_mode: my_order_handler.close() # profiler.stop() # profiler.run_viewer() except Exception as e: print(traceback.format_exc()) if __name__ == "__main__": main()
StarcoderdataPython
140106
""" Encapsulates external dependencies to retrieve hardware metadata """ import logging from abc import ABC, abstractmethod from dataclasses import dataclass from typing import Dict, Iterable, List, Optional from codecarbon.core.gpu import get_gpu_details from codecarbon.core.units import Power logger = logging.getLogger(__name__) @dataclass class BaseHardware(ABC): @property @abstractmethod def total_power(self) -> Power: pass @dataclass class GPU(BaseHardware): num_gpus: int gpu_ids: Optional[List] def __repr__(self) -> str: return super().__repr__() + " ({})".format( ", ".join([d["name"] for d in get_gpu_details()]) ) def _get_power_for_gpus(self, gpu_ids: Iterable[int]) -> Power: """ Get total power consumed by specific GPUs identified by `gpu_ids` :param gpu_ids: :return: """ all_gpu_details: List[Dict] = get_gpu_details() return Power.from_milli_watts( sum( [ gpu_details["power_usage"] for idx, gpu_details in enumerate(all_gpu_details) if idx in gpu_ids ] ) ) @property def total_power(self) -> Power: if self.gpu_ids is not None: gpu_ids = self.gpu_ids assert set(gpu_ids).issubset( set(range(self.num_gpus)) ), f"Unknown GPU ids {gpu_ids}" else: gpu_ids = set(range(self.num_gpus)) return self._get_power_for_gpus(gpu_ids=gpu_ids) @classmethod def from_utils(cls, gpu_ids: Optional[List] = None) -> "GPU": return cls(num_gpus=len(get_gpu_details()), gpu_ids=gpu_ids) @dataclass class CPU(BaseHardware): @property def total_power(self) -> Power: pass
StarcoderdataPython
3282083
<reponame>Samteymoori/pepper from build import PEPPER from collections import defaultdict def pileup_from_reads(reference, start_pos, end_pos, reads): longest_insert_count = defaultdict(int) insert_dict = defaultdict(lambda: defaultdict(int)) base_dict = defaultdict(lambda: defaultdict(int)) read_start_pos = defaultdict(int) read_end_pos = defaultdict(int) query_names = list() for read in reads: read_start_pos[read.query_name] = read.pos read_end_pos[read.query_name] = read.pos_end cigar_tuples = read.cigar_tuples read_index = 0 reference_position = read.pos query_names.append(read.query_name) for cigar_tup in cigar_tuples: # match if cigar_tup.cigar_op == 0: for i in range(cigar_tup.cigar_len): base_dict[read.query_name][reference_position] = read.sequence[read_index] read_index += 1 reference_position += 1 # insert if cigar_tup.cigar_op == 1: longest_insert_count[reference_position] = max(longest_insert_count[reference_position], cigar_tup.cigar_len) in_allele = "" for i in range(cigar_tup.cigar_len): in_allele += read.sequence[read_index] read_index += 1 insert_dict[read.query_name][reference_position] = in_allele # delete if cigar_tup.cigar_op == 2: for i in range(cigar_tup.cigar_len): base_dict[read.query_name][reference_position] = '*' reference_position += 1 # soft-clip if cigar_tup.cigar_op == 4: read_index += cigar_tup.cigar_len for i in range(start_pos, end_pos): ref_base = reference[i - start_pos] print(ref_base, end='') if longest_insert_count[i]: print("*" * longest_insert_count[i], end='') print() for read_name in query_names: for i in range(start_pos, end_pos): # if read_start_pos[read_name] < i: # print(' ', end='') # continue # if read_end_pos[read_name] < i: # break if base_dict[read_name][i]: read_base = base_dict[read_name][i] else: read_base = ' ' print(read_base, end='') if insert_dict[read_name][i] and i < read_end_pos[read_name]: print(insert_dict[read_name][i], end='') star_needed = longest_insert_count[i] - int(len(insert_dict[read_name][i])) if star_needed > 0: print("*" * star_needed, end='') elif longest_insert_count[i] and i < read_end_pos[read_name]: print("*" * longest_insert_count[i], end='') print(read_name)
StarcoderdataPython
3232804
<reponame>thismachinekillszombies/game_machine from .vector_sprite import VectorSprite from pygame import Rect class Rectangle(VectorSprite) : def __init__(self, coords, size, visible = True, interactive = True, line_colour = (0, 0, 0, 255), line_width = 1, fill_colour = (255, 255, 255, 255)): super(Rectangle, self).__init__(coords, visible, interactive, line_colour, line_width, fill_colour) self._size = (size[0] + line_width, size[1] + line_width) def over(self, coords) : return self.in_bounds(coords) def fill_surface(self) : super(Rectangle, self).fill_surface() if self._fill_colour != None : self._surface.fill(self._fill_colour) if self._line_width > 0 and self._line_colour != None : self._surface.fill(self._line_colour, Rect(0, 0, self._size[0], self._line_width)) self._surface.fill(self._line_colour, Rect(0, 0, self._line_width, self._size[1])) self._surface.fill(self._line_colour, Rect(self._size[0] - self._line_width, 0, self._line_width, self._size[1])) self._surface.fill(self._line_colour, Rect(0, self._size[1] - self._line_width, self._size[0], self._line_width)) @property def size(self) : return self._size @size.setter def size(self, size) : if size == None : self._size = None else : self._size = (size[0] + self._line_width, size[1] + self._line_width) self._surface = None @property def height(self) : return self._size[1] @height.setter def height(self, height) : self.size = [self.width, height] @property def width(self) : return self._size[0] @width.setter def width(self, width) : self.size = [width, self.height]
StarcoderdataPython
4821033
<reponame>IanShoe/serverless-leo<gh_stars>1-10 class Config: LOG_NAME = 'leosdk' LOG_LEVEL = 'DEBUG' LOG_FORMAT = '%(asctime)s %(name)s [%(levelname)s] %(message)s' BUS_REGION = 'us-west-2' AWS_PROFILE = 'leo_test' WRITER = 'STREAM' # WRITER = 'STORAGE' # WRITER = 'BATCH' class DevBus(Config): REGION = 'us-west-2' ARCHIVE = 'DevBus-LeoArchive-XYZABC456123' CRON = 'DevBus-LeoCron-XYZABC456123' EVENT = 'DevBus-LeoEvent-XYZABC456123' STREAM = 'DevBus-LeoKinesisStream-XYZABC456123' STREAM_MAX_RECORD_SIZE = 1024 * 900 STREAM_MAX_BATCH_SIZE = 1024 * 900 STREAM_MAX_BATCH_RECORDS = 1000 STREAM_MAX_BATCH_AGE = 200 STREAM_MAX_UPLOAD_ATTEMPTS = 10 BATCH = 'DevBus-LeoFirehoseStream-XYZABC456123' BATCH_MAX_RECORD_SIZE = 1024 * 4900 BATCH_MAX_SIZE = 1024 * 3900 BATCH_MAX_RECORDS = 1000 BATCH_MAX_AGE = 60 BATCH_MAX_UPLOAD_ATTEMPTS = 10 STORAGE = 'devbus-leos3-xyzabc456123' STORAGE_MAX_RECORD_SIZE = 1024 * 4900 STORAGE_MAX_BATCH_SIZE = 1024 * 3900 STORAGE_MAX_BATCH_RECORDS = 4000 STORAGE_MAX_BATCH_AGE = 6000 STORAGE_MAX_UPLOAD_ATTEMPTS = 10 SETTINGS = 'DevBus-LeoSettings-XYZABC456123' SYSTEM = 'DevBus-LeoSystem-XYZABC456123' class ProdBus(Config): REGION = 'us-west-2' ARCHIVE = 'DevBus-LeoArchive-XYZABC456123' CRON = 'DevBus-LeoCron-XYZABC456123' EVENT = 'DevBus-LeoEvent-XYZABC456123' STREAM = 'DevBus-LeoKinesisStream-XYZABC456123' STREAM_MAX_RECORD_SIZE = 1024 * 900 STREAM_MAX_BATCH_SIZE = 1024 * 900 STREAM_MAX_BATCH_RECORDS = 1000 STREAM_MAX_BATCH_AGE = 200 STREAM_MAX_UPLOAD_ATTEMPTS = 10 BATCH = 'DevBus-LeoFirehoseStream-XYZABC456123' BATCH_MAX_RECORD_SIZE = 1024 * 4900 BATCH_MAX_SIZE = 1024 * 3900 BATCH_MAX_RECORDS = 1000 BATCH_MAX_AGE = 600 BATCH_MAX_UPLOAD_ATTEMPTS = 10 STORAGE = 'devbus-leos3-xyzabc456123' STORAGE_MAX_RECORD_SIZE = 1024 * 4900 STORAGE_MAX_BATCH_SIZE = 1024 * 3900 STORAGE_MAX_BATCH_RECORDS = 4000 STORAGE_MAX_BATCH_AGE = 6000 STORAGE_MAX_UPLOAD_ATTEMPTS = 10 SETTINGS = 'DevBus-LeoSettings-XYZABC456123' SYSTEM = 'DevBus-LeoSystem-XYZABC456123'
StarcoderdataPython
3215911
<filename>Python/libraries/recognizers-date-time/recognizers_date_time/resources/english_date_time.py # ------------------------------------------------------------------------------ # <auto-generated> # This code was generated by a tool. # Changes to this file may cause incorrect behavior and will be lost if # the code is regenerated. # </auto-generated> # # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # ------------------------------------------------------------------------------ from .base_date_time import BaseDateTime # pylint: disable=line-too-long class EnglishDateTime: LangMarker = 'Eng' CheckBothBeforeAfter = False TillRegex = f'(?<till>\\b(to|(un)?till?|thru|through)\\b(\\s+the\\b)?|{BaseDateTime.RangeConnectorSymbolRegex})' RangeConnectorRegex = f'(?<and>\\b(and|through|to)\\b(\\s+the\\b)?|{BaseDateTime.RangeConnectorSymbolRegex})' LastNegPrefix = f'(?<!(w(ill|ould|on\\s*\'\\s*t)|m(ay|ight|ust)|sh(all|ould(n\\s*\'\\s*t)?)|c(an(\\s*\'\\s*t|not)?|ould(n\\s*\'\\s*t)?))(\\s+not)?\\s+)' RelativeRegex = f'\\b(?<order>following|next|(up)?coming|this|{LastNegPrefix}last|past|previous|current|the)\\b' StrictRelativeRegex = f'\\b(?<order>following|next|(up)?coming|this|{LastNegPrefix}last|past|previous|current)\\b' UpcomingPrefixRegex = f'((this\\s+)?((up)?coming))' NextPrefixRegex = f'\\b(following|next|{UpcomingPrefixRegex})\\b' AfterNextSuffixRegex = f'\\b(after\\s+(the\\s+)?next)\\b' PastPrefixRegex = f'((this\\s+)?past)\\b' PreviousPrefixRegex = f'({LastNegPrefix}last|previous|{PastPrefixRegex})\\b' ThisPrefixRegex = f'(this|current)\\b' RangePrefixRegex = f'(from|between)' CenturySuffixRegex = f'(^century)\\b' ReferencePrefixRegex = f'(that|same)\\b' FutureSuffixRegex = f'\\b(in\\s+the\\s+)?(future|hence)\\b' DayRegex = f'(the\\s*)?(?<!(\\d+:?|\\$)\\s*)(?<day>(?:3[0-1]|[1-2]\\d|0?[1-9])(?:th|nd|rd|st)?)(?=\\b|t)' ImplicitDayRegex = f'(the\\s*)?(?<day>(?:3[0-1]|[0-2]?\\d)(?:th|nd|rd|st))\\b' MonthNumRegex = f'(?<month>1[0-2]|(0)?[1-9])\\b' WrittenOneToNineRegex = f'(?:one|two|three|four|five|six|seven|eight|nine)' WrittenElevenToNineteenRegex = f'(?:eleven|twelve|(?:thir|four|fif|six|seven|eigh|nine)teen)' WrittenTensRegex = f'(?:ten|twenty|thirty|fou?rty|fifty|sixty|seventy|eighty|ninety)' WrittenNumRegex = f'(?:{WrittenOneToNineRegex}|{WrittenElevenToNineteenRegex}|{WrittenTensRegex}(\\s+{WrittenOneToNineRegex})?)' WrittenCenturyFullYearRegex = f'(?:(one|two)\\s+thousand(\\s+and)?(\\s+{WrittenOneToNineRegex}\\s+hundred(\\s+and)?)?)' WrittenCenturyOrdinalYearRegex = f'(?:twenty(\\s+(one|two))?|ten|eleven|twelve|thirteen|fifteen|eigthteen|(?:four|six|seven|nine)(teen)?|one|two|three|five|eight)' CenturyRegex = f'\\b(?<century>{WrittenCenturyFullYearRegex}|{WrittenCenturyOrdinalYearRegex}(\\s+hundred)?(\\s+and)?)\\b' LastTwoYearNumRegex = f'(?:zero\\s+{WrittenOneToNineRegex}|{WrittenElevenToNineteenRegex}|{WrittenTensRegex}(\\s+{WrittenOneToNineRegex})?)' FullTextYearRegex = f'\\b((?<firsttwoyearnum>{CenturyRegex})\\s+(?<lasttwoyearnum>{LastTwoYearNumRegex})\\b|\\b(?<firsttwoyearnum>{WrittenCenturyFullYearRegex}|{WrittenCenturyOrdinalYearRegex}\\s+hundred(\\s+and)?))\\b' OclockRegex = f'(?<oclock>o\\s*((’|‘|\')\\s*)?clock|sharp)' SpecialDescRegex = f'((?<ipm>)p\\b)' AmDescRegex = f'(?:{BaseDateTime.BaseAmDescRegex})' PmDescRegex = f'(:?{BaseDateTime.BasePmDescRegex})' AmPmDescRegex = f'(:?{BaseDateTime.BaseAmPmDescRegex})' DescRegex = f'(:?(:?({OclockRegex}\\s+)?(?<desc>({AmPmDescRegex}|{AmDescRegex}|{PmDescRegex}|{SpecialDescRegex})))|{OclockRegex})' OfPrepositionRegex = f'(\\bof\\b)' TwoDigitYearRegex = f'\\b(?<![$])(?<year>([0-9]\\d))(?!(\\s*((\\:\\d)|{AmDescRegex}|{PmDescRegex}|\\.\\d)))\\b' YearRegex = f'(?:{BaseDateTime.FourDigitYearRegex}|{FullTextYearRegex})' WeekDayRegex = f'\\b(?<weekday>(?:sun|mon|tues?|thurs?|fri)(day)?|thu|wedn(esday)?|weds?|sat(urday)?)s?\\b' SingleWeekDayRegex = f'\\b(?<weekday>(?<!(easter|palm)\\s+)sunday|(?<!easter\\s+)saturday|(?<!(easter|cyber)\\s+)monday|mon|(?<!black\\s+)friday|fri|(?:tues?|thurs?)(day)?|thu|wedn(esday)?|weds?|((?<=on\\s+)(sat|sun)))\\b' RelativeMonthRegex = f'(?<relmonth>((day\\s+)?of\\s+)?{RelativeRegex}\\s+month)\\b' MonthRegex = f'\\b(?<month>apr(il)?|aug(ust)?|dec(ember)?|feb(ruary)?|jan(uary)?|july?|june?|mar(ch)?|may|nov(ember)?|oct(ober)?|sept(ember)?|sep)(?!\\p{{L}})' WrittenMonthRegex = f'(((the\\s+)?month of\\s+)?{MonthRegex})' MonthSuffixRegex = f'(?<msuf>(?:(in|of|on)\\s+)?({RelativeMonthRegex}|{WrittenMonthRegex}))' DateUnitRegex = f'(?<unit>decades?|years?|months?|weeks?|(?<business>(business\\s+|week\\s*))?days?|fortnights?|weekends?|(?<=\\s+\\d{{1,4}})[ymwd])\\b' DateTokenPrefix = 'on ' TimeTokenPrefix = 'at ' TokenBeforeDate = 'on ' TokenBeforeTime = 'at ' HalfTokenRegex = f'^(half)' QuarterTokenRegex = f'^((a\\s+)?quarter)' ThreeQuarterTokenRegex = f'^(three\\s+quarters?)' ToTokenRegex = f'\\b(to)$' FromRegex = f'\\b(from(\\s+the)?)$' BetweenTokenRegex = f'\\b(between(\\s+the)?)$' SimpleCasesRegex = f'\\b({RangePrefixRegex}\\s+)?({DayRegex})\\s*{TillRegex}\\s*({DayRegex}\\s+{MonthSuffixRegex}|{MonthSuffixRegex}\\s+{DayRegex})((\\s+|\\s*,\\s*){YearRegex})?\\b' MonthFrontSimpleCasesRegex = f'\\b({RangePrefixRegex}\\s+)?{MonthSuffixRegex}\\s+((from)\\s+)?({DayRegex})\\s*{TillRegex}\\s*({DayRegex})((\\s+|\\s*,\\s*){YearRegex})?\\b' MonthFrontBetweenRegex = f'\\b{MonthSuffixRegex}\\s+(between\\s+)({DayRegex})\\s*{RangeConnectorRegex}\\s*({DayRegex})((\\s+|\\s*,\\s*){YearRegex})?\\b' BetweenRegex = f'\\b(between\\s+)({DayRegex})\\s*{RangeConnectorRegex}\\s*({DayRegex})\\s+{MonthSuffixRegex}((\\s+|\\s*,\\s*){YearRegex})?\\b' MonthWithYear = f'\\b(({WrittenMonthRegex}[\\.]?(\\s*)[/\\\\\\-\\.,]?(\\s+(of|in))?(\\s*)({YearRegex}|(?<order>following|next|last|this)\\s+year))|(({YearRegex}|(?<order>following|next|last|this)\\s+year)(\\s*),?(\\s*){WrittenMonthRegex}))\\b' SpecialYearPrefixes = f'(calendar|(?<special>fiscal|school))' OneWordPeriodRegex = f'\\b((((the\\s+)?month of\\s+)?({StrictRelativeRegex}\\s+)?{MonthRegex})|(month|year) to date|(?<toDate>((un)?till?|to)\\s+date)|({RelativeRegex}\\s+)?(my\\s+)?((?<business>working\\s+week|workweek)|week(end)?|month|fortnight|(({SpecialYearPrefixes}\\s+)?year))(?!((\\s+of)?\\s+\\d+(?!({BaseDateTime.BaseAmDescRegex}|{BaseDateTime.BasePmDescRegex}))|\\s+to\\s+date))(\\s+{AfterNextSuffixRegex})?)\\b' MonthNumWithYear = f'\\b(({BaseDateTime.FourDigitYearRegex}(\\s*)[/\\-\\.](\\s*){MonthNumRegex})|({MonthNumRegex}(\\s*)[/\\-](\\s*){BaseDateTime.FourDigitYearRegex}))\\b' WeekOfMonthRegex = f'\\b(?<wom>(the\\s+)?(?<cardinal>first|1st|second|2nd|third|3rd|fourth|4th|fifth|5th|last)\\s+week\\s+{MonthSuffixRegex}(\\s+{BaseDateTime.FourDigitYearRegex}|{RelativeRegex}\\s+year)?)\\b' WeekOfYearRegex = f'\\b(?<woy>(the\\s+)?(?<cardinal>first|1st|second|2nd|third|3rd|fourth|4th|fifth|5th|last)\\s+week(\\s+of)?\\s+({YearRegex}|{RelativeRegex}\\s+year))\\b' FollowedDateUnit = f'^\\s*{DateUnitRegex}' NumberCombinedWithDateUnit = f'\\b(?<num>\\d+(\\.\\d*)?){DateUnitRegex}' QuarterTermRegex = f'\\b(((?<cardinal>first|1st|second|2nd|third|3rd|fourth|4th)[ -]+quarter)|(q(?<number>[1-4])))\\b' RelativeQuarterTermRegex = f'\\b(?<orderQuarter>{StrictRelativeRegex})\\s+quarter\\b' QuarterRegex = f'((the\\s+)?{QuarterTermRegex}(?:((\\s+of)?\\s+|\\s*[,-]\\s*)({YearRegex}|{RelativeRegex}\\s+year))?)|{RelativeQuarterTermRegex}' QuarterRegexYearFront = f'(?:{YearRegex}|{RelativeRegex}\\s+year)(\'s)?(?:\\s*-\\s*|\\s+(the\\s+)?)?{QuarterTermRegex}' HalfYearTermRegex = f'(?<cardinal>first|1st|second|2nd)\\s+half' HalfYearFrontRegex = f'(?<year>((1[5-9]|20)\\d{{2}})|2100)(\\s*-\\s*|\\s+(the\\s+)?)?h(?<number>[1-2])' HalfYearBackRegex = f'(the\\s+)?(h(?<number>[1-2])|({HalfYearTermRegex}))(\\s+of|\\s*,\\s*)?\\s+({YearRegex})' HalfYearRelativeRegex = f'(the\\s+)?{HalfYearTermRegex}(\\s+of|\\s*,\\s*)?\\s+({RelativeRegex}\\s+year)' AllHalfYearRegex = f'({HalfYearFrontRegex})|({HalfYearBackRegex})|({HalfYearRelativeRegex})' EarlyPrefixRegex = f'\\b(?<EarlyPrefix>early|beginning of|start of|(?<RelEarly>earlier(\\s+in)?))\\b' MidPrefixRegex = f'\\b(?<MidPrefix>mid-?|middle of)\\b' LaterPrefixRegex = f'\\b(?<LatePrefix>late|end of|(?<RelLate>later(\\s+in)?))\\b' PrefixPeriodRegex = f'({EarlyPrefixRegex}|{MidPrefixRegex}|{LaterPrefixRegex})' PrefixDayRegex = f'\\b((?<EarlyPrefix>earl(y|ier))|(?<MidPrefix>mid(dle)?)|(?<LatePrefix>later?))(\\s+in)?(\\s+the\\s+day)?$' SeasonDescRegex = f'(?<seas>spring|summer|fall|autumn|winter)' SeasonRegex = f'\\b(?<season>({PrefixPeriodRegex}\\s+)?({RelativeRegex}\\s+)?{SeasonDescRegex}((\\s+of|\\s*,\\s*)?\\s+({YearRegex}|{RelativeRegex}\\s+year))?)\\b' WhichWeekRegex = f'\\b(week)(\\s*)(?<number>5[0-3]|[1-4]\\d|0?[1-9])\\b' WeekOfRegex = f'(the\\s+)?((week)(\\s+(of|(commencing|starting|beginning)(\\s+on)?))|w/c)(\\s+the)?' MonthOfRegex = f'(month)(\\s*)(of)' DateYearRegex = f'(?<year>{BaseDateTime.FourDigitYearRegex}|(?<!,\\s?){TwoDigitYearRegex}|{TwoDigitYearRegex}(?=(\\.(?!\\d)|[?!;]|$)))' YearSuffix = f'((,|\\sof)?\\s*({DateYearRegex}|{FullTextYearRegex}))' OnRegex = f'(?<=\\bon\\s+)({DayRegex}s?)\\b' RelaxedOnRegex = f'(?<=\\b(on|at|in)\\s+)((?<day>(3[0-1]|[0-2]?\\d)(?:th|nd|rd|st))s?)\\b' PrefixWeekDayRegex = f'(\\s*((,?\\s*on)|[-—–]))' ThisRegex = f'\\b(this(\\s*week{PrefixWeekDayRegex}?)?\\s*{WeekDayRegex})|({WeekDayRegex}((\\s+of)?\\s+this\\s*week))\\b' LastDateRegex = f'\\b({PreviousPrefixRegex}(\\s*week{PrefixWeekDayRegex}?)?\\s*{WeekDayRegex})|({WeekDayRegex}(\\s+(of\\s+)?last\\s*week))\\b' NextDateRegex = f'\\b({NextPrefixRegex}(\\s*week{PrefixWeekDayRegex}?)?\\s*{WeekDayRegex})|((on\\s+)?{WeekDayRegex}((\\s+of)?\\s+(the\\s+following|(the\\s+)?next)\\s*week))\\b' SpecialDayRegex = f'\\b((the\\s+)?day before yesterday|(the\\s+)?day after (tomorrow|tmr)|the\\s+day\\s+(before|after)(?!=\\s+day)|((the\\s+)?({RelativeRegex}|my)\\s+day)|yesterday|tomorrow|tmr|today|otd)\\b' SpecialDayWithNumRegex = f'\\b((?<number>{WrittenNumRegex})\\s+days?\\s+from\\s+(?<day>yesterday|tomorrow|tmr|today))\\b' RelativeDayRegex = f'\\b(((the\\s+)?{RelativeRegex}\\s+day))\\b' SetWeekDayRegex = f'\\b(?<prefix>on\\s+)?(?<weekday>morning|afternoon|evening|night|(sun|mon|tues|wednes|thurs|fri|satur)day)s\\b' WeekDayOfMonthRegex = f'(?<wom>(the\\s+)?(?<cardinal>first|1st|second|2nd|third|3rd|fourth|4th|fifth|5th|last)\\s+(week\\s+{MonthSuffixRegex}[\\.]?\\s+(on\\s+)?{WeekDayRegex}|{WeekDayRegex}\\s+{MonthSuffixRegex}))' RelativeWeekDayRegex = f'\\b({WrittenNumRegex}\\s+{WeekDayRegex}\\s+(from\\s+now|later))\\b' SpecialDate = f'(?=\\b(on|at)\\s+the\\s+){DayRegex}\\b' DatePreposition = f'\\b(on|in)' DateExtractorYearTermRegex = f'(\\s+|\\s*[/\\\\.,-]\\s*|\\s+of\\s+){DateYearRegex}' DayPrefix = f'\\b({WeekDayRegex}|{SpecialDayRegex})\\b' DateExtractor1 = f'\\b({DayPrefix}\\s*[,-]?\\s*)?(({MonthRegex}[\\.]?\\s*[/\\\\.,-]?\\s*{DayRegex})|(\\({MonthRegex}\\s*[-./]\\s*{DayRegex}\\)))(\\s*\\(\\s*{DayPrefix}\\s*\\))?({DateExtractorYearTermRegex}\\b)?' DateExtractor3 = f'\\b({DayPrefix}(\\s+|\\s*,\\s*))?({DayRegex}[\\.]?(\\s+|\\s*[-,/]\\s*|\\s+of\\s+){MonthRegex}[\\.]?((\\s+in)?{DateExtractorYearTermRegex})?|{BaseDateTime.FourDigitYearRegex}\\s*[-./]?\\s*(the\\s+)?(?<day>(?:3[0-1]|[1-2]\\d|0?[1-9])(?:th|nd|rd|st)?)[\\.]?(\\s+|\\s*[-,/]\\s*|\\s+of\\s+){MonthRegex}[\\.]?)\\b' DateExtractor4 = f'\\b{MonthNumRegex}\\s*[/\\\\\\-]\\s*{DayRegex}[\\.]?\\s*[/\\\\\\-]\\s*{DateYearRegex}' DateExtractor5 = f'\\b({DayPrefix}(\\s*,)?\\s+)?{DayRegex}\\s*[/\\\\\\-\\.]\\s*({MonthNumRegex}|{MonthRegex})\\s*[/\\\\\\-\\.]\\s*{DateYearRegex}(?!\\s*[/\\\\\\-\\.]\\s*\\d+)' DateExtractor6 = f'(?<={DatePreposition}\\s+)({StrictRelativeRegex}\\s+)?({DayPrefix}\\s+)?{MonthNumRegex}[\\-\\.]{DayRegex}(?![%]){BaseDateTime.CheckDecimalRegex}\\b' DateExtractor7L = f'\\b({DayPrefix}(\\s*,)?\\s+)?{MonthNumRegex}\\s*/\\s*{DayRegex}{DateExtractorYearTermRegex}(?![%])\\b' DateExtractor7S = f'\\b({DayPrefix}(\\s*,)?\\s+)?{MonthNumRegex}\\s*/\\s*{DayRegex}(?![%]){BaseDateTime.CheckDecimalRegex}\\b' DateExtractor8 = f'(?<={DatePreposition}\\s+)({StrictRelativeRegex}\\s+)?({DayPrefix}\\s+)?{DayRegex}[\\\\\\-]{MonthNumRegex}(?![%]){BaseDateTime.CheckDecimalRegex}\\b' DateExtractor9L = f'\\b({DayPrefix}(\\s*,)?\\s+)?{DayRegex}\\s*/\\s*{MonthNumRegex}{DateExtractorYearTermRegex}(?![%])\\b' DateExtractor9S = f'\\b({DayPrefix}(\\s*,)?\\s+)?{DayRegex}\\s*/\\s*{MonthNumRegex}{BaseDateTime.CheckDecimalRegex}(?![%])\\b' DateExtractorA = f'\\b({DayPrefix}(\\s*,)?\\s+)?(({BaseDateTime.FourDigitYearRegex}\\s*[/\\\\\\-\\.]\\s*({MonthNumRegex}|{MonthRegex})\\s*[/\\\\\\-\\.]\\s*{DayRegex})|({MonthRegex}\\s*[/\\\\\\-\\.]\\s*{BaseDateTime.FourDigitYearRegex}\\s*[/\\\\\\-\\.]\\s*(the\\s+)?(?<day>(?:3[0-1]|[1-2]\\d|0?[1-9])(?:th|nd|rd|st)?))|({DayRegex}\\s*[/\\\\\\-\\.]\\s*{BaseDateTime.FourDigitYearRegex}\\s*[/\\\\\\-\\.]\\s*{MonthRegex}))' OfMonth = f'^\\s*(day\\s+)?of\\s*{MonthRegex}' MonthEnd = f'{MonthRegex}\\s*(the)?\\s*$' WeekDayEnd = f'(this\\s+)?{WeekDayRegex}\\s*,?\\s*$' WeekDayStart = f'^[\\.]' RangeUnitRegex = f'\\b(?<unit>years?|months?|weeks?|fortnights?)\\b' HourNumRegex = f'\\b(?<hournum>zero|one|two|three|four|five|six|seven|eight|nine|ten|eleven|twelve)\\b' MinuteNumRegex = f'(((?<tens>twenty|thirty|fou?rty|fifty)(\\s*-?\\s*))?(?<minnum>one|two|three|four|five|six|seven|eight|nine)|(?<minnum>ten|eleven|twelve|thirteen|fifteen|eighteen|(four|six|seven|nine)(teen)|twenty|thirty|forty|fifty))' DeltaMinuteNumRegex = f'(((?<tens>twenty|thirty|fou?rty|fifty)(\\s*-?\\s*))?(?<deltaminnum>one|two|three|four|five|six|seven|eight|nine)|(?<deltaminnum>ten|eleven|twelve|thirteen|fifteen|eighteen|(four|six|seven|nine)(teen)|twenty|thirty|forty|fifty))' PmRegex = f'(?<pm>(((?:at|in|around|circa|on|for)\\s+(the\\s+)?)?(afternoon|evening|midnight|lunchtime))|((at|in|around|on|for)\\s+(the\\s+)?night))' PmRegexFull = f'(?<pm>((?:at|in|around|circa|on|for)\\s+(the\\s+)?)?(afternoon|evening|(mid)?night|lunchtime))' AmRegex = f'(?<am>((?:at|in|around|circa|on|for)\\s+(the\\s+)?)?(morning))' LunchRegex = f'\\blunchtime\\b' NightRegex = f'\\b(mid)?night\\b' CommonDatePrefixRegex = f'^[\\.]' LessThanOneHour = f'(?<lth>(a\\s+)?quarter|three quarter(s)?|half( an hour)?|{BaseDateTime.DeltaMinuteRegex}(\\s+(minutes?|mins?))|{DeltaMinuteNumRegex}(\\s+(minutes?|mins?)))' WrittenTimeRegex = f'(?<writtentime>{HourNumRegex}\\s+{MinuteNumRegex}(\\s+(minutes?|mins?))?)' TimePrefix = f'(?<prefix>{LessThanOneHour}\\s+(past|to))' TimeSuffix = f'(?<suffix>{AmRegex}|{PmRegex}|{OclockRegex})' TimeSuffixFull = f'(?<suffix>{AmRegex}|{PmRegexFull}|{OclockRegex})' BasicTime = f'\\b(?<basictime>{WrittenTimeRegex}|{HourNumRegex}|{BaseDateTime.HourRegex}:{BaseDateTime.MinuteRegex}(:{BaseDateTime.SecondRegex})?|{BaseDateTime.HourRegex}(?![%\\d]))' MidnightRegex = f'(?<midnight>mid\\s*(-\\s*)?night)' MidmorningRegex = f'(?<midmorning>mid\\s*(-\\s*)?morning)' MidafternoonRegex = f'(?<midafternoon>mid\\s*(-\\s*)?afternoon)' MiddayRegex = f'(?<midday>mid\\s*(-\\s*)?day|((12\\s)?noon))' MidTimeRegex = f'(?<mid>({MidnightRegex}|{MidmorningRegex}|{MidafternoonRegex}|{MiddayRegex}))' AtRegex = f'\\b(?:(?:(?<=\\b(at|(at)?\\s*around|circa)\\s+)(?:{WrittenTimeRegex}|{HourNumRegex}|{BaseDateTime.HourRegex}(?!\\.\\d)(\\s*((?<iam>a)|(?<ipm>p)))?|{MidTimeRegex}))|{MidTimeRegex})\\b' IshRegex = f'\\b({BaseDateTime.HourRegex}(-|——)?ish|noon(ish)?)\\b' TimeUnitRegex = f'([^a-z]{{1,}}|\\b)(?<unit>h(ou)?rs?|h|min(ute)?s?|sec(ond)?s?)\\b' RestrictedTimeUnitRegex = f'(?<unit>hour|minute)\\b' FivesRegex = f'(?<tens>(?:fifteen|(?:twen|thir|fou?r|fif)ty(\\s*five)?|ten|five))\\b' HourRegex = f'\\b{BaseDateTime.HourRegex}' PeriodHourNumRegex = f'\\b(?<hour>twenty(\\s+(one|two|three|four))?|eleven|twelve|thirteen|fifteen|eighteen|(four|six|seven|nine)(teen)?|zero|one|two|three|five|eight|ten)\\b' ConnectNumRegex = f'\\b{BaseDateTime.HourRegex}(?<min>[0-5][0-9])\\s*{DescRegex}' TimeRegexWithDotConnector = f'({BaseDateTime.HourRegex}(\\s*\\.\\s*){BaseDateTime.MinuteRegex})' TimeRegex1 = f'\\b({TimePrefix}\\s+)?({WrittenTimeRegex}|{HourNumRegex}|{BaseDateTime.HourRegex})(\\s*|[.]){DescRegex}' TimeRegex2 = f'(\\b{TimePrefix}\\s+)?(t)?{BaseDateTime.HourRegex}(\\s*)?:(\\s*)?{BaseDateTime.MinuteRegex}((\\s*)?:(\\s*)?{BaseDateTime.SecondRegex})?(?<iam>a)?((\\s*{DescRegex})|\\b)' TimeRegex3 = f'(\\b{TimePrefix}\\s+)?{BaseDateTime.HourRegex}\\.{BaseDateTime.MinuteRegex}(\\s*{DescRegex})' TimeRegex4 = f'\\b{TimePrefix}\\s+{BasicTime}(\\s*{DescRegex})?\\s+{TimeSuffix}\\b' TimeRegex5 = f'\\b{TimePrefix}\\s+{BasicTime}((\\s*{DescRegex})|\\b)' TimeRegex6 = f'({BasicTime})(\\s*{DescRegex})?\\s+{TimeSuffix}\\b' TimeRegex7 = f'\\b{TimeSuffixFull}\\s+(at\\s+)?{BasicTime}((\\s*{DescRegex})|\\b)' TimeRegex8 = f'.^' TimeRegex9 = f'\\b{PeriodHourNumRegex}(\\s+|-){FivesRegex}((\\s*{DescRegex})|\\b)' TimeRegex10 = f'\\b({TimePrefix}\\s+)?{BaseDateTime.HourRegex}(\\s*h\\s*){BaseDateTime.MinuteRegex}(\\s*{DescRegex})?' TimeRegex11 = f'\\b((?:({TimeTokenPrefix})?{TimeRegexWithDotConnector}(\\s*{DescRegex}))|(?:(?:{TimeTokenPrefix}{TimeRegexWithDotConnector})(?!\\s*per\\s*cent|%)))' FirstTimeRegexInTimeRange = f'\\b{TimeRegexWithDotConnector}(\\s*{DescRegex})?' PureNumFromTo = f'({RangePrefixRegex}\\s+)?({HourRegex}|{PeriodHourNumRegex})(\\s*(?<leftDesc>{DescRegex}))?\\s*{TillRegex}\\s*({HourRegex}|{PeriodHourNumRegex})(?<rightDesc>\\s*({PmRegex}|{AmRegex}|{DescRegex}))?' PureNumBetweenAnd = f'(between\\s+)(({BaseDateTime.TwoDigitHourRegex}{BaseDateTime.TwoDigitMinuteRegex})|{HourRegex}|{PeriodHourNumRegex})(\\s*(?<leftDesc>{DescRegex}))?\\s*{RangeConnectorRegex}\\s*(({BaseDateTime.TwoDigitHourRegex}{BaseDateTime.TwoDigitMinuteRegex})|{HourRegex}|{PeriodHourNumRegex})(?<rightDesc>\\s*({PmRegex}|{AmRegex}|{DescRegex}))?' SpecificTimeFromTo = f'({RangePrefixRegex}\\s+)?(?<time1>(({TimeRegex2}|{FirstTimeRegexInTimeRange})|({HourRegex}|{PeriodHourNumRegex})(\\s*(?<leftDesc>{DescRegex}))?))\\s*{TillRegex}\\s*(?<time2>(({TimeRegex2}|{TimeRegexWithDotConnector}(?<rightDesc>\\s*{DescRegex}))|({HourRegex}|{PeriodHourNumRegex})(\\s*(?<rightDesc>{DescRegex}))?))' SpecificTimeBetweenAnd = f'(between\\s+)(?<time1>(({TimeRegex2}|{FirstTimeRegexInTimeRange})|({HourRegex}|{PeriodHourNumRegex})(\\s*(?<leftDesc>{DescRegex}))?))\\s*{RangeConnectorRegex}\\s*(?<time2>(({TimeRegex2}|{TimeRegexWithDotConnector}(?<rightDesc>\\s*{DescRegex}))|({HourRegex}|{PeriodHourNumRegex})(\\s*(?<rightDesc>{DescRegex}))?))' SuffixAfterRegex = f'\\b(((at)\\s)?(or|and)\\s+(above|after|later|greater)(?!\\s+than))\\b' PrepositionRegex = f'(?<prep>^(,\\s*)?(at|on|of)(\\s+the)?$)' LaterEarlyRegex = f'((?<early>earl(y|ier)(\\s+|-))|(?<late>late(r?\\s+|-)))' MealTimeRegex = f'\\b(at\\s+)?(?<mealTime>breakfast|brunch|lunch(\\s*time)?|dinner(\\s*time)?|supper)\\b' UnspecificTimePeriodRegex = f'({MealTimeRegex})' TimeOfDayRegex = f'\\b(?<timeOfDay>((((in\\s+the\\s+)?{LaterEarlyRegex}?(in(\\s+the)?\\s+)?(morning|afternoon|night|evening)))|{MealTimeRegex}|(((in\\s+(the)?\\s+)?)(daytime|business\\s+hour)))s?)\\b' SpecificTimeOfDayRegex = f'\\b(({StrictRelativeRegex}\\s+{TimeOfDayRegex})\\b|\\btoni(ght|te))s?\\b' TimeFollowedUnit = f'^\\s*{TimeUnitRegex}' TimeNumberCombinedWithUnit = f'\\b(?<num>\\d+(\\.\\d*)?){TimeUnitRegex}' BusinessHourSplitStrings = [r'business', r'hour'] NowRegex = f'\\b(?<now>(right\\s+)?now|as\\s+soon\\s+as\\s+possible|asap|recently|previously|at\\s+(present|this\\s+time|th(e|is)\\s+minute|the\\s+(moment|present\\s+time)))\\b' NowParseRegex = f'\\b({NowRegex}|^(date)$)\\b' SuffixRegex = f'^\\s*(in the\\s+)?(morning|afternoon|evening|night)\\b' NonTimeContextTokens = f'(building)' DateTimeTimeOfDayRegex = f'\\b(?<timeOfDay>morning|(?<pm>afternoon|night|evening))\\b' DateTimeSpecificTimeOfDayRegex = f'\\b(({RelativeRegex}\\s+{DateTimeTimeOfDayRegex})\\b|\\btoni(ght|te))\\b' TimeOfTodayAfterRegex = f'^\\s*(,\\s*)?(in\\s+)?{DateTimeSpecificTimeOfDayRegex}' TimeOfTodayBeforeRegex = f'{DateTimeSpecificTimeOfDayRegex}(\\s*,)?(\\s+(at|around|circa|in|on))?\\s*$' SimpleTimeOfTodayAfterRegex = f'(?<!{NonTimeContextTokens}\\s*)\\b({HourNumRegex}|{BaseDateTime.HourRegex})\\s*(,\\s*)?(in\\s+)?{DateTimeSpecificTimeOfDayRegex}\\b' SimpleTimeOfTodayBeforeRegex = f'\\b{DateTimeSpecificTimeOfDayRegex}(\\s*,)?(\\s+(at|around|circa))?\\s*({HourNumRegex}|{BaseDateTime.HourRegex})\\b' SpecificEndOfRegex = f'(the\\s+)?end of(\\s+the)?\\s*$' UnspecificEndOfRegex = f'\\b(the\\s+)?(eod|(end\\s+of\\s+day))\\b' UnspecificEndOfRangeRegex = f'\\b(eoy)\\b' PeriodTimeOfDayRegex = f'\\b((in\\s+(the)?\\s+)?{LaterEarlyRegex}?(this\\s+)?{DateTimeTimeOfDayRegex})\\b' PeriodSpecificTimeOfDayRegex = f'\\b({LaterEarlyRegex}?this\\s+{DateTimeTimeOfDayRegex}|({StrictRelativeRegex}\\s+{PeriodTimeOfDayRegex})\\b|\\btoni(ght|te))\\b' PeriodTimeOfDayWithDateRegex = f'\\b(({PeriodTimeOfDayRegex}(\\s+(on|of))?))\\b' LessThanRegex = f'\\b(less\\s+than)\\b' MoreThanRegex = f'\\b(more\\s+than)\\b' DurationUnitRegex = f'(?<unit>{DateUnitRegex}|h(ou)?rs?|h|min(ute)?s?|sec(ond)?s?|nights?)\\b' SuffixAndRegex = f'(?<suffix>\\s*(and)\\s+(an?\\s+)?(?<suffix_num>half|quarter))' PeriodicRegex = f'\\b(?<periodic>((?<multiplier>semi|bi|tri)(\\s*|-))?(daily|monthly|weekly|quarterly|yearly|annual(ly)?))\\b' EachUnitRegex = f'\\b(?<each>(each|every|any|once an?)(?<other>\\s+other)?\\s+({DurationUnitRegex}|(?<specialUnit>quarters?|weekends?)|{WeekDayRegex})|(?<specialUnit>weekends))' EachPrefixRegex = f'\\b(?<each>(each|every|once an?)\\s*$)' SetEachRegex = f'\\b(?<each>(each|every)(?<other>\\s+other)?\\s*)(?!the|that)\\b' SetLastRegex = f'(?<last>following|next|upcoming|this|{LastNegPrefix}last|past|previous|current)' EachDayRegex = f'^\\s*(each|every)\\s*day\\b' DurationFollowedUnit = f'(^\\s*{DurationUnitRegex}\\s+{SuffixAndRegex})|(^\\s*{SuffixAndRegex}?(\\s+|-)?{DurationUnitRegex})' NumberCombinedWithDurationUnit = f'\\b(?<num>\\d+(\\.\\d*)?)(-)?{DurationUnitRegex}' AnUnitRegex = f'(\\b((?<half>(half)\\s+)?an?|another)|(?<half>(1/2|½|half)))\\s+{DurationUnitRegex}' DuringRegex = f'\\b(for|during)\\s+the\\s+(?<unit>year|month|week|day|fortnight)\\b' AllRegex = f'\\b(?<all>(all|full|whole)(\\s+|-)(?<unit>year|month|week|day|fortnight))\\b' HalfRegex = f'((an?\\s*)|\\b)(?<half>half\\s+(?<unit>year|month|week|fortnight|day|hour))\\b' ConjunctionRegex = f'\\b((and(\\s+for)?)|with)\\b' HolidayList1 = f'(?<holiday>mardi gras|(washington|mao)\'s birthday|juneteenth|(jubilee|freedom)(\\s+day)|chinese new year|(new\\s+(years\'|year\\s*\'s|years?)\\s+eve)|(new\\s+(years\'|year\\s*\'s|years?)(\\s+day)?)|may\\s*day|yuan dan|christmas eve|(christmas|xmas)(\\s+day)?|black friday|yuandan|easter(\\s+(sunday|saturday|monday))?|clean monday|ash wednesday|palm sunday|maundy thursday|good friday|white\\s+(sunday|monday)|trinity sunday|pentecost|corpus christi|cyber monday)' HolidayList2 = f'(?<holiday>(thanks\\s*giving|all saint\'s|white lover|s(?:ain)?t?(\\.)?\\s+(?:patrick|george)(?:\')?(?:s)?|us independence|all hallow|all souls|guy fawkes|cinco de mayo|halloween|qingming|dragon boat|april fools|tomb\\s*sweeping)(\\s+day)?)' HolidayList3 = f'(?<holiday>(?:independence|presidents(?:\')?|mlk|martin luther king( jr)?|canberra|ascension|columbus|tree( planting)?|arbor|labou?r|((international|int\'?l)\\s+)?workers\'?|mother\'?s?|father\'?s?|female|women(\'s)?|single|teacher\'?s|youth|children|girls|lovers?|earth|inauguration|groundhog|valentine\'?s|baptiste|bastille|veterans(?:\')?|memorial|mid[ \\-]autumn|moon|spring|lantern)\\s+day)' HolidayRegex = f'\\b(({StrictRelativeRegex}\\s+({HolidayList1}|{HolidayList2}|{HolidayList3}))|(({HolidayList1}|{HolidayList2}|{HolidayList3})(\\s+(of\\s+)?({YearRegex}|{RelativeRegex}\\s+year))?))\\b' AMTimeRegex = f'(?<am>morning)' PMTimeRegex = f'\\b(?<pm>afternoon|evening|night)\\b' NightTimeRegex = f'(night)' NowTimeRegex = f'(now|at\\s+(present|this\\s+time|th(e|is)\\s+minute|the\\s+(moment|present\\s+time)))' RecentlyTimeRegex = f'(recently|previously)' AsapTimeRegex = f'(as soon as possible|asap)' InclusiveModPrepositions = f'(?<include>((on|in|at)\\s+or\\s+)|(\\s+or\\s+(on|in|at)))' AroundRegex = f'(?:\\b(?:around|circa)\\s*?\\b)(\\s+the)?' BeforeRegex = f'((\\b{InclusiveModPrepositions}?(?:before|in\\s+advance\\s+of|prior\\s+to|(no\\s+later|earlier|sooner)\\s+than|ending\\s+(with|on)|by|(un)?till?|(?<include>as\\s+late\\s+as)){InclusiveModPrepositions}?\\b\\s*?)|(?<!\\w|>)((?<include><\\s*=)|<))(\\s+the)?' AfterRegex = f'((\\b{InclusiveModPrepositions}?((after|(starting|beginning)(\\s+on)?(?!\\sfrom)|(?<!no\\s+)later than)|(year greater than))(?!\\s+or equal to){InclusiveModPrepositions}?\\b\\s*?)|(?<!\\w|<)((?<include>>\\s*=)|>))(\\s+the)?' SinceRegex = f'(?:(?:\\b(?:since|after\\s+or\\s+equal\\s+to|starting\\s+(?:from|on|with)|as\\s+early\\s+as|(any\\s+time\\s+)from)\\b\\s*?)|(?<!\\w|<)(>=))(\\s+the)?' SinceRegexExp = f'({SinceRegex}|\\bfrom(\\s+the)?\\b)' AgoRegex = f'\\b(ago|before\\s+(?<day>yesterday|today))\\b' LaterRegex = f'\\b(?:later(?!((\\s+in)?\\s*{OneWordPeriodRegex})|(\\s+{TimeOfDayRegex})|\\s+than\\b)|from now|(from|after)\\s+(?<day>tomorrow|tmr|today))\\b' BeforeAfterRegex = f'\\b((?<before>before)|(?<after>from|after))\\b' InConnectorRegex = f'\\b(in)\\b' SinceYearSuffixRegex = f'(^\\s*{SinceRegex}(\\s*(the\\s+)?year\\s*)?{YearSuffix})' WithinNextPrefixRegex = f'\\b(within(\\s+the)?(\\s+(?<next>{NextPrefixRegex}))?)\\b' TodayNowRegex = f'\\b(today|now)\\b' MorningStartEndRegex = f'(^(morning|{AmDescRegex}))|((morning|{AmDescRegex})$)' AfternoonStartEndRegex = f'(^(afternoon|{PmDescRegex}))|((afternoon|{PmDescRegex})$)' EveningStartEndRegex = f'(^(evening))|((evening)$)' NightStartEndRegex = f'(^(over|to)?ni(ght|te))|((over|to)?ni(ght|te)$)' InexactNumberRegex = f'\\b((a\\s+)?few|some|several|(?<NumTwoTerm>(a\\s+)?couple(\\s+of)?))\\b' InexactNumberUnitRegex = f'({InexactNumberRegex})\\s+({DurationUnitRegex})' RelativeTimeUnitRegex = f'(?:(?:(?:{NextPrefixRegex}|{PreviousPrefixRegex}|{ThisPrefixRegex})\\s+({TimeUnitRegex}))|((the|my))\\s+({RestrictedTimeUnitRegex}))' RelativeDurationUnitRegex = f'(?:(?:(?<=({NextPrefixRegex}|{PreviousPrefixRegex}|{ThisPrefixRegex})\\s+)({DurationUnitRegex}))|((the|my))\\s+({RestrictedTimeUnitRegex}))' ReferenceDatePeriodRegex = f'\\b{ReferencePrefixRegex}\\s+(?<duration>week(end)?|fortnight|month|year|decade)\\b' ConnectorRegex = f'^(-|,|for|t|around|circa|@)$' FromToRegex = f'(\\b(from).+(to|and|or)\\b.+)' SingleAmbiguousMonthRegex = f'^(the\\s+)?(may|march)$' SingleAmbiguousTermsRegex = f'^(the\\s+)?(day|week|month|year)$' UnspecificDatePeriodRegex = f'^(week|fortnight|month|year)$' PrepositionSuffixRegex = f'\\b(on|in|at|around|circa|from|to)$' FlexibleDayRegex = f'(?<DayOfMonth>([A-Za-z]+\\s)?[A-Za-z\\d]+)' ForTheRegex = f'\\b((((?<=for\\s+)the\\s+{FlexibleDayRegex})|((?<=on\\s+)(the\\s+)?{FlexibleDayRegex}(?<=(st|nd|rd|th))))(?<end>\\s*(,|\\.(?!\\d)|!|\\?|$)))' WeekDayAndDayOfMonthRegex = f'\\b{WeekDayRegex}\\s+(the\\s+{FlexibleDayRegex})\\b' WeekDayAndDayRegex = f'\\b{WeekDayRegex}\\s+(?!(the)){DayRegex}(?!([-:]|(\\s+({AmDescRegex}|{PmDescRegex}|{OclockRegex}))))\\b' RestOfDateRegex = f'\\b(rest|remaining)\\s+(of\\s+)?((the|my|this|current)\\s+)?(?<duration>week|fortnight|month|year|decade)\\b' RestOfDateTimeRegex = f'\\b(rest|remaining)\\s+(of\\s+)?((the|my|this|current)\\s+)?(?<unit>day)\\b' AmbiguousRangeModifierPrefix = f'(from)' NumberEndingPattern = f'^(?:\\s+(?<meeting>meeting|appointment|conference|((skype|teams|zoom|facetime)\\s+)?call)\\s+to\\s+(?<newTime>{PeriodHourNumRegex}|{HourRegex})([\\.]?$|(\\.,|,|!|\\?)))' OneOnOneRegex = f'\\b(1\\s*:\\s*1(?!\\d))|(one (on )?one|one\\s*-\\s*one|one\\s*:\\s*one)\\b' LaterEarlyPeriodRegex = f'\\b(({PrefixPeriodRegex})\\s*\\b\\s*(?<suffix>{OneWordPeriodRegex}|(?<FourDigitYear>{BaseDateTime.FourDigitYearRegex}))|({UnspecificEndOfRangeRegex}))\\b' WeekWithWeekDayRangeRegex = f'\\b((?<week>({NextPrefixRegex}|{PreviousPrefixRegex}|this)\\s+week)((\\s+between\\s+{WeekDayRegex}\\s+and\\s+{WeekDayRegex})|(\\s+from\\s+{WeekDayRegex}\\s+to\\s+{WeekDayRegex})))\\b' GeneralEndingRegex = f'^\\s*((\\.,)|\\.|,|!|\\?)?\\s*$' MiddlePauseRegex = f'\\s*(,)\\s*' DurationConnectorRegex = f'^\\s*(?<connector>\\s+|and|,)\\s*$' PrefixArticleRegex = f'\\bthe\\s+' OrRegex = f'\\s*((\\b|,\\s*)(or|and)\\b|,)\\s*' SpecialYearTermsRegex = f'\\b((({SpecialYearPrefixes}\\s+)?year)|(cy|(?<special>fy|sy)))' YearPlusNumberRegex = f'\\b({SpecialYearTermsRegex}\\s*((?<year>(\\d{{2,4}}))|{FullTextYearRegex}))\\b' NumberAsTimeRegex = f'\\b({WrittenTimeRegex}|{PeriodHourNumRegex}|{BaseDateTime.HourRegex})\\b' TimeBeforeAfterRegex = f'\\b(((?<=\\b(before|no later than|by|after)\\s+)({WrittenTimeRegex}|{HourNumRegex}|{BaseDateTime.HourRegex}|{MidTimeRegex}))|{MidTimeRegex})\\b' DateNumberConnectorRegex = f'^\\s*(?<connector>\\s+at)\\s*$' DecadeRegex = f'(?<decade>(?:nough|twen|thir|fou?r|fif|six|seven|eigh|nine)ties|two\\s+thousands)' DecadeWithCenturyRegex = f'(the\\s+)?(((?<century>\\d|1\\d|2\\d)?(\')?(?<decade>\\d0)(\')?(\\s)?s\\b)|(({CenturyRegex}(\\s+|-)(and\\s+)?)?{DecadeRegex})|({CenturyRegex}(\\s+|-)(and\\s+)?(?<decade>tens|hundreds)))' RelativeDecadeRegex = f'\\b((the\\s+)?{RelativeRegex}\\s+((?<number>[\\w,]+)\\s+)?decades?)\\b' YearPeriodRegex = f'((((from|during|in)\\s+)?{YearRegex}\\s*({TillRegex})\\s*{YearRegex})|(((between)\\s+){YearRegex}\\s*({RangeConnectorRegex})\\s*{YearRegex}))' StrictTillRegex = f'(?<till>\\b(to|(un)?till?|thru|through)\\b|{BaseDateTime.RangeConnectorSymbolRegex}(?!\\s*(h[1-2]|q[1-4])(?!(\\s+of|\\s*,\\s*))))' StrictRangeConnectorRegex = f'(?<and>\\b(and|through|to)\\b|{BaseDateTime.RangeConnectorSymbolRegex}(?!\\s*(h[1-2]|q[1-4])(?!(\\s+of|\\s*,\\s*))))' StartMiddleEndRegex = f'\\b((?<StartOf>((the\\s+)?(start|beginning)\\s+of\\s+)?)(?<MiddleOf>((the\\s+)?middle\\s+of\\s+)?)(?<EndOf>((the\\s+)?end\\s+of\\s+)?))' ComplexDatePeriodRegex = f'(?:((from|during|in)\\s+)?{StartMiddleEndRegex}(?<start>.+)\\s*({StrictTillRegex})\\s*{StartMiddleEndRegex}(?<end>.+)|((between)\\s+){StartMiddleEndRegex}(?<start>.+)\\s*({StrictRangeConnectorRegex})\\s*{StartMiddleEndRegex}(?<end>.+))' FailFastRegex = f'{BaseDateTime.DeltaMinuteRegex}|\\b(?:{BaseDateTime.BaseAmDescRegex}|{BaseDateTime.BasePmDescRegex})|{BaseDateTime.BaseAmPmDescRegex}|\\b(?:zero|{WrittenOneToNineRegex}|{WrittenElevenToNineteenRegex}|{WrittenTensRegex}|{WrittenMonthRegex}|{SeasonDescRegex}|{DecadeRegex}|centur(y|ies)|weekends?|quarters?|hal(f|ves)|yesterday|to(morrow|day|night)|tmr|noonish|\\d(-|——)?ish|((the\\s+\\w*)|\\d)(th|rd|nd|st)|(mid\\s*(-\\s*)?)?(night|morning|afternoon|day)s?|evenings?|noon|lunch(time)?|dinner(time)?|(day|night)time|overnight|dawn|dusk|sunset|hours?|hrs?|h|minutes?|mins?|seconds?|secs?|eo[dmy]|mardi[ -]?gras|birthday|eve|christmas|xmas|thanksgiving|halloween|yuandan|easter|yuan dan|april fools|cinco de mayo|all (hallow|souls)|guy fawkes|(st )?patrick|hundreds?|noughties|aughts|thousands?)\\b|{WeekDayRegex}|{SetWeekDayRegex}|{NowRegex}|{PeriodicRegex}|\\b({DateUnitRegex}|{ImplicitDayRegex})' UnitMap = dict([("decades", "10Y"), ("decade", "10Y"), ("years", "Y"), ("year", "Y"), ("months", "MON"), ("month", "MON"), ("quarters", "3MON"), ("quarter", "3MON"), ("semesters", "6MON"), ("semestres", "6MON"), ("semester", "6MON"), ("semestre", "6MON"), ("weeks", "W"), ("week", "W"), ("weekends", "WE"), ("weekend", "WE"), ("fortnights", "2W"), ("fortnight", "2W"), ("weekdays", "D"), ("weekday", "D"), ("days", "D"), ("day", "D"), ("nights", "D"), ("night", "D"), ("hours", "H"), ("hour", "H"), ("hrs", "H"), ("hr", "H"), ("h", "H"), ("minutes", "M"), ("minute", "M"), ("mins", "M"), ("min", "M"), ("seconds", "S"), ("second", "S"), ("secs", "S"), ("sec", "S")]) UnitValueMap = dict([("decades", 315360000), ("decade", 315360000), ("years", 31536000), ("year", 31536000), ("months", 2592000), ("month", 2592000), ("fortnights", 1209600), ("fortnight", 1209600), ("weekends", 172800), ("weekend", 172800), ("weeks", 604800), ("week", 604800), ("days", 86400), ("day", 86400), ("nights", 86400), ("night", 86400), ("hours", 3600), ("hour", 3600), ("hrs", 3600), ("hr", 3600), ("h", 3600), ("minutes", 60), ("minute", 60), ("mins", 60), ("min", 60), ("seconds", 1), ("second", 1), ("secs", 1), ("sec", 1)]) SpecialYearPrefixesMap = dict([("fiscal", "FY"), ("school", "SY"), ("fy", "FY"), ("sy", "SY")]) SeasonMap = dict([("spring", "SP"), ("summer", "SU"), ("fall", "FA"), ("autumn", "FA"), ("winter", "WI")]) SeasonValueMap = dict([("SP", 3), ("SU", 6), ("FA", 9), ("WI", 12)]) CardinalMap = dict([("first", 1), ("1st", 1), ("second", 2), ("2nd", 2), ("third", 3), ("3rd", 3), ("fourth", 4), ("4th", 4), ("fifth", 5), ("5th", 5)]) DayOfWeek = dict([("monday", 1), ("tuesday", 2), ("wednesday", 3), ("thursday", 4), ("friday", 5), ("saturday", 6), ("sunday", 0), ("mon", 1), ("tue", 2), ("tues", 2), ("wed", 3), ("wedn", 3), ("weds", 3), ("thu", 4), ("thur", 4), ("thurs", 4), ("fri", 5), ("sat", 6), ("sun", 0)]) MonthOfYear = dict([("january", 1), ("february", 2), ("march", 3), ("april", 4), ("may", 5), ("june", 6), ("july", 7), ("august", 8), ("september", 9), ("october", 10), ("november", 11), ("december", 12), ("jan", 1), ("feb", 2), ("mar", 3), ("apr", 4), ("jun", 6), ("jul", 7), ("aug", 8), ("sep", 9), ("sept", 9), ("oct", 10), ("nov", 11), ("dec", 12), ("1", 1), ("2", 2), ("3", 3), ("4", 4), ("5", 5), ("6", 6), ("7", 7), ("8", 8), ("9", 9), ("10", 10), ("11", 11), ("12", 12), ("01", 1), ("02", 2), ("03", 3), ("04", 4), ("05", 5), ("06", 6), ("07", 7), ("08", 8), ("09", 9)]) Numbers = dict([("zero", 0), ("one", 1), ("a", 1), ("an", 1), ("two", 2), ("three", 3), ("four", 4), ("five", 5), ("six", 6), ("seven", 7), ("eight", 8), ("nine", 9), ("ten", 10), ("eleven", 11), ("twelve", 12), ("thirteen", 13), ("fourteen", 14), ("fifteen", 15), ("sixteen", 16), ("seventeen", 17), ("eighteen", 18), ("nineteen", 19), ("twenty", 20), ("twenty one", 21), ("twenty two", 22), ("twenty three", 23), ("twenty four", 24), ("twenty five", 25), ("twenty six", 26), ("twenty seven", 27), ("twenty eight", 28), ("twenty nine", 29), ("thirty", 30), ("thirty one", 31), ("thirty two", 32), ("thirty three", 33), ("thirty four", 34), ("thirty five", 35), ("thirty six", 36), ("thirty seven", 37), ("thirty eight", 38), ("thirty nine", 39), ("forty", 40), ("forty one", 41), ("forty two", 42), ("forty three", 43), ("forty four", 44), ("forty five", 45), ("forty six", 46), ("forty seven", 47), ("forty eight", 48), ("forty nine", 49), ("fifty", 50), ("fifty one", 51), ("fifty two", 52), ("fifty three", 53), ("fifty four", 54), ("fifty five", 55), ("fifty six", 56), ("fifty seven", 57), ("fifty eight", 58), ("fifty nine", 59), ("sixty", 60), ("sixty one", 61), ("sixty two", 62), ("sixty three", 63), ("sixty four", 64), ("sixty five", 65), ("sixty six", 66), ("sixty seven", 67), ("sixty eight", 68), ("sixty nine", 69), ("seventy", 70), ("seventy one", 71), ("seventy two", 72), ("seventy three", 73), ("seventy four", 74), ("seventy five", 75), ("seventy six", 76), ("seventy seven", 77), ("seventy eight", 78), ("seventy nine", 79), ("eighty", 80), ("eighty one", 81), ("eighty two", 82), ("eighty three", 83), ("eighty four", 84), ("eighty five", 85), ("eighty six", 86), ("eighty seven", 87), ("eighty eight", 88), ("eighty nine", 89), ("ninety", 90), ("ninety one", 91), ("ninety two", 92), ("ninety three", 93), ("ninety four", 94), ("ninety five", 95), ("ninety six", 96), ("ninety seven", 97), ("ninety eight", 98), ("ninety nine", 99), ("one hundred", 100)]) DayOfMonth = dict([("1st", 1), ("1th", 1), ("2nd", 2), ("2th", 2), ("3rd", 3), ("3th", 3), ("4th", 4), ("5th", 5), ("6th", 6), ("7th", 7), ("8th", 8), ("9th", 9), ("10th", 10), ("11th", 11), ("11st", 11), ("12th", 12), ("12nd", 12), ("13th", 13), ("13rd", 13), ("14th", 14), ("15th", 15), ("16th", 16), ("17th", 17), ("18th", 18), ("19th", 19), ("20th", 20), ("21st", 21), ("21th", 21), ("22nd", 22), ("22th", 22), ("23rd", 23), ("23th", 23), ("24th", 24), ("25th", 25), ("26th", 26), ("27th", 27), ("28th", 28), ("29th", 29), ("30th", 30), ("31st", 31), ("01st", 1), ("01th", 1), ("02nd", 2), ("02th", 2), ("03rd", 3), ("03th", 3), ("04th", 4), ("05th", 5), ("06th", 6), ("07th", 7), ("08th", 8), ("09th", 9)]) DoubleNumbers = dict([("half", 0.5), ("quarter", 0.25)]) HolidayNames = dict([("easterday", ["easterday", "easter", "eastersunday"]), ("ashwednesday", ["ashwednesday"]), ("palmsunday", ["palmsunday"]), ("maundythursday", ["maundythursday"]), ("goodfriday", ["goodfriday"]), ("eastersaturday", ["eastersaturday"]), ("eastermonday", ["eastermonday"]), ("ascensionday", ["ascensionday"]), ("whitesunday", ["whitesunday", "pentecost", "pentecostday"]), ("whitemonday", ["whitemonday"]), ("trinitysunday", ["trinitysunday"]), ("corpuschristi", ["corpuschristi"]), ("earthday", ["earthday"]), ("fathers", ["fatherday", "fathersday"]), ("mothers", ["motherday", "mothersday"]), ("thanksgiving", ["thanksgivingday", "thanksgiving"]), ("blackfriday", ["blackfriday"]), ("cybermonday", ["cybermonday"]), ("martinlutherking", ["mlkday", "martinlutherkingday", "martinlutherkingjrday"]), ("washingtonsbirthday", ["washingtonsbirthday", "washingtonbirthday", "presidentsday"]), ("canberra", ["canberraday"]), ("labour", ["labourday", "laborday"]), ("columbus", ["columbusday"]), ("memorial", ["memorialday"]), ("yuandan", ["yuandan"]), ("maosbirthday", ["maosbirthday"]), ("teachersday", ["teachersday", "teacherday"]), ("singleday", ["singleday"]), ("allsaintsday", ["allsaintsday"]), ("youthday", ["youthday"]), ("childrenday", ["childrenday", "childday"]), ("femaleday", ["femaleday"]), ("treeplantingday", ["treeplantingday"]), ("arborday", ["arborday"]), ("girlsday", ["girlsday"]), ("whiteloverday", ["whiteloverday"]), ("loverday", ["loverday", "loversday"]), ("christmas", ["christmasday", "christmas"]), ("xmas", ["xmasday", "xmas"]), ("newyear", ["newyear"]), ("newyearday", ["newyearday"]), ("newyearsday", ["newyearsday"]), ("inaugurationday", ["inaugurationday"]), ("groundhougday", ["groundhougday"]), ("valentinesday", ["valentinesday"]), ("stpatrickday", ["stpatrickday", "stpatricksday", "stpatrick"]), ("aprilfools", ["aprilfools"]), ("stgeorgeday", ["stgeorgeday"]), ("mayday", ["mayday", "intlworkersday", "internationalworkersday", "workersday"]), ("cincodemayoday", ["cincodemayoday"]), ("baptisteday", ["baptisteday"]), ("usindependenceday", ["usindependenceday"]), ("independenceday", ["independenceday"]), ("bastilleday", ["bastilleday"]), ("halloweenday", ["halloweenday", "halloween"]), ("allhallowday", ["allhallowday"]), ("allsoulsday", ["allsoulsday"]), ("guyfawkesday", ["guyfawkesday"]), ("veteransday", ["veteransday"]), ("christmaseve", ["christmaseve"]), ("newyeareve", ["newyearseve", "newyeareve"]), ("juneteenth", ["juneteenth", "freedomday", "jubileeday"])]) WrittenDecades = dict([("hundreds", 0), ("tens", 10), ("twenties", 20), ("thirties", 30), ("forties", 40), ("fifties", 50), ("sixties", 60), ("seventies", 70), ("eighties", 80), ("nineties", 90)]) SpecialDecadeCases = dict([("noughties", 2000), ("aughts", 2000), ("two thousands", 2000)]) DefaultLanguageFallback = 'MDY' SuperfluousWordList = [r'preferably', r'how about', r'maybe', r'perhaps', r'say', r'like'] DurationDateRestrictions = [r'today', r'now'] AmbiguityFiltersDict = dict([("^\\d{4}$", "(\\d\\.\\d{4}|\\d{4}\\.\\d)"), ("^(morning|afternoon|evening|night|day)\\b", "\\b(good\\s+(morning|afternoon|evening|night|day))|(nighty\\s+night)\\b"), ("\\bnow\\b", "\\b(^now,)|\\b((is|are)\\s+now\\s+for|for\\s+now)\\b"), ("\\bmay\\b", "\\b((((!|\\.|\\?|,|;|)\\s+|^)may i)|(i|you|he|she|we|they)\\s+may|(may\\s+((((also|not|(also not)|well)\\s+)?(be|ask|contain|constitute|e-?mail|take|have|result|involve|get|work|reply|differ))|(or may not))))\\b"), ("\\b(a|one) second\\b", "\\b(?<!an?\\s+)(a|one) second (round|time)\\b"), ("\\b(breakfast|brunch|lunch(time)?|dinner(time)?|supper)$", "(?<!\\b(at|before|after|around|circa)\\b\\s)(breakfast|brunch|lunch|dinner|supper)(?!\\s*time)"), ("^\\d+m$", "^\\d+m$"), ("^(apr|aug|dec|feb|jan|jul|jun|mar|may|nov|oct|sept?)$", "([$%£&!?@#])(apr|aug|dec|feb|jan|jul|jun|mar|may|nov|oct|sept?)|(apr|aug|dec|feb|jan|jul|jun|mar|may|nov|oct|sept?)([$%£&@#])")]) MorningTermList = [r'morning'] AfternoonTermList = [r'afternoon'] EveningTermList = [r'evening'] MealtimeBreakfastTermList = [r'breakfast'] MealtimeBrunchTermList = [r'brunch'] MealtimeLunchTermList = [r'lunch', r'lunchtime'] MealtimeDinnerTermList = [r'dinner', r'dinnertime', r'supper'] DaytimeTermList = [r'daytime'] NightTermList = [r'night'] SameDayTerms = [r'today', r'otd'] PlusOneDayTerms = [r'tomorrow', r'tmr', r'day after'] MinusOneDayTerms = [r'yesterday', r'day before'] PlusTwoDayTerms = [r'day after tomorrow', r'day after tmr'] MinusTwoDayTerms = [r'day before yesterday'] FutureTerms = [r'this', r'next'] LastCardinalTerms = [r'last'] MonthTerms = [r'month'] MonthToDateTerms = [r'month to date'] WeekendTerms = [r'weekend'] WeekTerms = [r'week'] FortnightTerms = [r'fortnight', r'fourtenight'] YearTerms = [r'year'] GenericYearTerms = [r'y'] YearToDateTerms = [r'year to date'] DoubleMultiplierRegex = f'^(bi)(-|\\s)?' HalfMultiplierRegex = f'^(semi)(-|\\s)?' DayTypeRegex = f'((week)?da(il)?ys?)$' WeekTypeRegex = f'(week(s|ly)?)$' WeekendTypeRegex = f'(weekends?)$' MonthTypeRegex = f'(month(s|ly)?)$' QuarterTypeRegex = f'(quarter(s|ly)?)$' YearTypeRegex = f'((years?|annual)(ly)?)$' # pylint: enable=line-too-long
StarcoderdataPython
1693396
import bentoml import pandas as pd import numpy as np from bentoml.artifact import PickleArtifact # from bentoml.adapters import DataframeInput from bentoml.handlers import DataframeHandler from bentoml.handlers import JsonHandler @bentoml.ver(1, 0) @bentoml.artifacts([ PickleArtifact("knn"), PickleArtifact("index_map"), PickleArtifact("cluster_path"), PickleArtifact("pop_matrix"), ]) class ClusteredKNN(bentoml.BentoService): def get_index(self, item): if item in self.artifacts.index_map: return self.artifacts.index_map[item] else: return 0 def setup_scores(self, features, n_neighbors): neighbors_idxs = self.artifacts.knn.kneighbors(X=features, n_neighbors=n_neighbors, return_distance=False) # get indexes of neighbors knclusters = self.artifacts.cluster_path.labels_[neighbors_idxs] # get clusters of neighbors clicks = [self.artifacts.pop_matrix[c] for c in knclusters] # create an array with the number of item iteractions per cluster (per item) clicks = np.asarray(clicks[0]) self.mean_scores = np.mean(clicks, axis=0) # mean over the number of iteractions to create a weighted score def get_score(self, index): if index is 0: return -1 else: return self.mean_scores[index] @bentoml.api(JsonHandler) def rank(self, sample): n_neighbors = 10 articles = sample['Article_List'] indexed_articles = [self.get_index(art) for art in articles] user_features = sample['User_Features'] self.setup_scores(np.asarray([user_features]), n_neighbors) scores = [self.get_score(idx) for idx in indexed_articles] output = [item for score, item in sorted(zip(scores, articles),reverse=True)] return { "articles": output, "scores": sorted(scores, reverse=True) }
StarcoderdataPython
3330898
<filename>random_user_agent.py from enums import OS, Chipset, Browser, Language import random from datetime import date, timedelta, datetime from copy import deepcopy def _random_date(start: datetime.date, end: datetime.date): """Generate a random datetime between `start` and `end`""" return start + timedelta( # Get a random amount of seconds between `start` and `end` seconds=random.randint(0, int((end - start).total_seconds())), ) class _State: os = None chipset = None browser = None language = None class RandomUserAgent: """ Generates a random user agent to be used in HTTP requests as your browser. Defaults: completely randomized """ def __init__(self): self._state = _State() ################################################################## # Quick access ################################################################## @property def os(self): return self._state.os @property def language(self): return self._state.language @property def browser(self): return self._state.browser @property def chipset(self): return self._state.chipset ################################################################## ################################################################## # select OS ################################################################## def linux(self): self._state.os = OS.Linux return self def windows(self): self._state.os = OS.Windows return self def macosx(self): self._state.os = OS.MacOSX return self def android(self): self._state.os = OS.Android return self def ios(self): self._state.os = OS.iOS return self ################################################################## ################################################################## # select chipset ################################################################## def x86(self): self._state.chipset = Chipset.x86 return self def x64(self): self._state.chipset = Chipset.x64 return self def intel(self): self._state.chipset = Chipset.Intel return self def ppc(self): self._state.chipset = Chipset.PPC return self def uintel(self): self._state.chipset = Chipset.UIntel return self def uppc(self): self._state.chipset = Chipset.UPPC return self ################################################################## ################################################################## # Select browser ################################################################## def firefox(self): self._state.browser = Browser.Firefox return self def safari(self): self._state.browser = Browser.Safari return self def iexplorer(self): self._state.browser = Browser.IExplorer return self def opera(self): self._state.browser = Browser.Opera return self def chrome(self): self._state.browser = Browser.Chrome return self ################################################################## ################################################################## def set_language(self, lang: Language): self._state.language = lang return self ################################################################## def _validate(self): try: Chipset.check_if_ok_for_os(self.chipset, self.os) return True except ValueError: pass return False def _randomize_os(self): if self._state.os: return while True: self._state.os = random.choice(list(OS.__members__.values())) if self._state.chipset and self._validate(): return def _randomize_chipset(self): if self._state.chipset: return while True: self._state.chipset = random.choice(list(Chipset.__members__.values())) if self._validate(): return def _randomize_browser(self): if self._state.browser: return self._state.browser = random.choice(list(Browser.__members__.values())) def _randomize_language(self): if self._state.language: return self._state.language = random.choice(list(Language.__members__.values())) def _randomize_firefox(self): ua = "Mozilla/5.0 " random_date = _random_date(date(2011, 1, 1), datetime.now().date()).strftime('%Y%m%d') ver = [ f'Gecko/{random_date} Firefox/{random.randint(5, 7)}.0', f'Gecko/{random_date} Firefox/{random.randint(5, 7)}.0.1', f'Gecko/{random_date} Firefox/3.6.{random.randint(1, 20)}', f'Gecko/{random_date} Firefox/3.8', ] if self.os == OS.Windows: ua += f'(Windows NT {random.randint(5, 6)}.{random.randint(0, 1)}; ' ua += self.language.value + '; ' ua += f'rv:1.9.{random.randint(0, 2)}.20) ' ua += random.choice(ver) elif self.os == OS.Linux: ua += f'(X11; Linux {self.chipset.value}; ' ua += f'rv:{random.randint(5, 7)}.0) ' ua += random.choice(ver) elif self.os == OS.MacOSX: ua += f'(Macintosh; {self.chipset.value} ' ua += f'Mac OS X 10_{random.randint(5, 7)}_{random.randint(0, 9)} ' ua += f'rv:{random.randint(2, 6)}.0) ' ua += random.choice(ver) else: raise NotImplementedError return ua def _randomize_safari(self): ua = 'Mozilla/5.0 ' saf = f'{random.randint(531, 535)}.{random.randint(1, 50)}.{random.randint(1, 7)}' if random.randint(0, 1) == 0: ver = f'{random.randint(4, 5)}.{random.randint(0, 1)}' else: ver = f'{random.randint(4, 5)}.0.{random.randint(1, 5)}' if self.os == OS.Windows: ua += f'(Windows; U; Windows NT {random.randint(5, 6)}.{random.randint(0, 1)}) ' ua += f'AppleWebKit/{saf} (KHTML, like Gecko) ' ua += f'Version/{ver} ' ua += f'Safari/{saf}' elif self.os == OS.MacOSX: ua += f'(Macintosh; U; {self.chipset.value} ' ua += f'Mac OS X 10_{random.randint(5, 7)}_{random.randint(0, 9)} ' ua += f'rv:{random.randint(2, 6)}.0; ' ua += f'{self.language.value}) ' ua += f'AppleWebKit/{saf} (KHTML, like Gecko) ' ua += f'Version/{ver} ' ua += f'Safari/{saf}' else: raise NotImplementedError return ua def _randomize_iexplorer(self): ua = f'Mozilla/{random.randint(4, 5)}.0 ' ie_extra = [ '', f'; .NET CLR 1.1.{random.randint(4320, 4325)}', '; WOW64', ] if self.os == OS.Windows: ua += f'(compatible; ' ua += f'MSIE {random.randint(5, 9)}.0; ' ua += f'Windows NT {random.randint(5, 6)}.{random.randint(0, 1)}; ' ua += f'Trident/{random.randint(3, 5)}.{random.randint(0, 1)})' ua += random.choice(ie_extra) else: raise NotImplementedError return ua def _randomize_opera(self): ua = f'Opera/{random.randint(8, 9)}.{random.randint(10, 99)} ' op_extra = [ '', f'; .NET CLR 1.1.{random.randint(4320, 4325)}', '; WOW64', ] if self.os == OS.Linux: ua += f'(X11; Linux {self.chipset.value}; U; ' ua += f'{self.language.value}) ' ua += f'Presto/2.9.{random.randint(160, 190)} ' ua += f'Version/{random.randint(10, 12)}.00' ua += random.choice(op_extra) elif self.os == OS.Windows: ua += f'(Windows NT {random.randint(5, 6)}.{random.randint(0, 1)}; U; ' ua += f'{self.language.value}) ' ua += f'Presto/2.9.{random.randint(160, 190)} ' ua += f'Version/{random.randint(10, 12)}.00' ua += random.choice(op_extra) else: raise NotImplementedError return ua def _randomize_chrome(self): ua = 'Mozilla/5.0' saf = f'{random.randint(531, 536)}.{random.randint(0, 2)}' if self.os == OS.Linux: ua += f'(X11; Linux {self.chipset.value}) ' ua += f'AppleWebKit/{saf} ' ua += f'(KHTML, like Gecko) Chrome/{random.randint(13, 15)}.0.{random.randint(800, 899)}.0 ' ua += f'Safari/{saf}' elif self.os == OS.Windows: ua += f'(Windows NT {random.randint(5, 6)}.{random.randint(0, 1)}) ' ua += f'AppleWebKit/{saf} ' ua += f'(KHTML, like Gecko) ' ua += f'Chrome/{random.randint(13, 15)}.0.{random.randint(800, 899)}.0 ' ua += f'Safari/{saf}' elif self.os == OS.MacOSX: ua += f'(Macintosh; U; {self.chipset.value} Mac OS X ' ua += f'10_{random.randint(5, 7)}_{random.randint(0, 9)}) ' ua += f'AppleWebKit/{saf} ' ua += f'(KHTML, like Gecko) ' ua += f'Chrome/{random.randint(13, 15)}.0.{random.randint(800, 899)}.0 ' ua += f'Safari/{saf}' else: raise NotImplementedError return ua def build(self): current_state = deepcopy(self._state) error_counter = 20 while error_counter > 0: # Keep trying until you get a decent combination. try: self._randomize_os() self._randomize_chipset() self._randomize_browser() self._randomize_language() if not self._validate(): error_counter -= 1 # Call it like _randomize_firefox return getattr(self, f'_randomize_{self._state.browser.name.lower()}')() except NotImplementedError: error_counter -= 1 # restore previous state to try something else self._state = deepcopy(current_state) pass raise ValueError("Invalid combination passed. Can't handle this!") if __name__ == '__main__': rua = RandomUserAgent() print( rua.linux().firefox().build() )
StarcoderdataPython
3370016
from foundations_spec import * class DockerTestMixin(object): @let def docker_client(self): import docker return docker.from_env() def _create_temp_directories(self, *directory_names): import tempfile self._temp_directories = {} for directory_name in directory_names: self._temp_directories[directory_name] = tempfile.mkdtemp() def _cleanup_temp_directories(self): import shutil for directory_name in self._temp_directories: shutil.rmtree(self._temp_directories[directory_name], ignore_errors=True)
StarcoderdataPython
3279943
from dataclasses import asdict from aiohttp import web from schematics.exceptions import DataError from asyncpg.exceptions import UniqueViolationError from src import db from src.models.user import User from src.models.paginator import Paginator from src.models.game import State from src.encrypt import encrypt_jwt, is_same_messages from src.decorators import auth_required async def ping(request: web.Request) -> web.Response: return web.json_response({'message': 'pong'}) async def registration(request: web.Request) -> web.Response: data = await request.json() user = User(data) try: user.validate() await db.create_user(request.app['pool'], user) except DataError as e: return web.json_response(e.to_primitive(), status=400) except UniqueViolationError: return web.json_response({'message': 'user with that name already exists'}, status=409) return web.json_response({'message': 'OK'}, status=201) async def login(request: web.Request) -> web.Response: data = await request.json() remember = data.pop('remember', False) user = User(data) try: user.validate() except DataError as e: return web.json_response(e.to_primitive(), status=400) stored_user = await db.get_user(request.app['pool'], user) if stored_user is None: return web.json_response({'message': 'user don\'t exists'}, status=404) if not is_same_messages(stored_user.password, user.password): return web.json_response({'message': 'wrong password'}, status=400) response = web.json_response({'message': 'OK'}, status=200) response.set_cookie( name='token', value=encrypt_jwt(id=stored_user.id, username=stored_user.username), httponly=True, max_age=None if remember else 3600 * 24 * 7 ) return response async def logout(request: web.Request) -> web.Response: response = web.json_response({'message': 'OK'}) response.del_cookie(name='token') return response @auth_required async def create_game(request: web.Request) -> web.Response: game = await db.create_game(request.app['pool'], request.user) return web.json_response(asdict(game), status=201) @auth_required async def login_game(request: web.Request) -> web.Response: game = await db.get_game(request.app['pool'], int(request.match_info['_id'])) if game is None: return web.json_response({'message': 'game not found'}, status=404) if game.current_state != State.PENDING.value: return web.json_response({'message': 'invalid state'}, status=400) game.set_opponent(request.user) await db.update_game(request.app['pool'], game) return web.json_response(asdict(game), status=200) async def get_game(request: web.Request) -> web.Response: game = await db.get_game(request.app['pool'], int(request.match_info['_id'])) if game is None: return web.json_response({'message': 'game not found'}, status=404) return web.json_response(asdict(game), status=200) async def get_games(request: web.Request) -> web.Response: count_games = await db.get_total_games(request.app['pool']) paginator = Paginator( count_games, request.query.get('page', '1'), request.query.get('limit', '10') ) games = await db.get_game_list(request.app['pool'], paginator.page, paginator.limit) return web.json_response( { 'games': [game.to_json() for game in games], 'paginator': paginator.to_json() }, status=200 )
StarcoderdataPython
1765494
def error(text): print("\033[31m" + text + "\033[0m") def warning(text): print("\033[33m" + text + "\033[0m") def info(text): print("\033[32m" + text + "\033[0m")
StarcoderdataPython
3376513
<gh_stars>1-10 from sqlalchemy import Column, String from app.db.base_class import Base class User(Base): username = Column(String, primary_key=True, index=True) full_name = Column(String) hashed_password = Column(String)
StarcoderdataPython
1699172
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Tests for scripts/master/master_gen.py.""" import os import tempfile import unittest # This adjusts sys.path, so it must be imported before the other modules. import test_env # pylint: disable=W0403 from master import master_gen SAMPLE_WATERFALL_PYL = """\ { "master_base_class": "_FakeMasterBase", "master_port": 20999, "master_port_alt": 40999, "bot_port": 30999, "templates": ["templates"], "builders": { "Test Linux": { "properties": { "config": "Release" }, "recipe": "test_recipe", "scheduler": "test_repo", "bot_pools": ["main"], "botbuilddir": "test" }, "Test Linux Timeouts": { "properties": { "config": "Release" }, "recipe": "test_recipe", "scheduler": "test_repo", "bot_pools": ["main"], "botbuilddir": "test", "builder_timeout_s": 7200, "no_output_timeout_s": 3600, }, "Test Linux Remote Run": { "properties": { "config": "Release" }, "recipe": "test_recipe", "scheduler": "test_repo", "bot_pools": ["main"], "botbuilddir": "test", "use_remote_run": True, }, "Test Linux Nightly": { "properties": { "config": "Release" }, "recipe": "test_nightly_recipe", "scheduler": "nightly", "bot_pools": ["main"], "botbuilddir": "test", "category": "0nightly" } }, "schedulers": { "nightly": { "type": "cron", "hour": 4, "minute": 15, }, "test_repo": { "type": "git_poller", "git_repo_url": "https://chromium.googlesource.com/test/test.git", }, }, "bot_pools": { "main": { "bot_data": { "bits": 64, "os": "linux", "version": "precise" }, "bots": ["vm9999-m1"], }, }, } """ SAMPLE_TRYSERVER_PYL = """\ { "master_base_class": "_FakeMasterBase", "master_port": 20999, "master_port_alt": 40999, "bot_port": 30999, "buildbucket_bucket": "fake_bucket", "service_account_file": "fake_service_account", "templates": ["templates"], "builders": { "Test Linux": { "properties": { "config": "Release" }, "recipe": "test_recipe", "scheduler": None, "bot_pools": ["main"], "botbuilddir": "test" } }, "schedulers": {}, "bot_pools": { "main": { "bot_data": { "bits": 64, "os": "linux", "version": "precise" }, "bots": ["vm{9998..9999}-m1"], }, }, } """ # This class fakes the base class from master_site_config.py. class _FakeMasterBase(object): in_production = False is_production_host = False # This class fakes the actual master class in master_site_config.py. class _FakeMaster(_FakeMasterBase): project_name = 'test' master_port = '20999' slave_port = '30999' master_port_alt = '40999' buildbot_url = 'https://build.chromium.org/p/test' buildbucket_bucket = None service_account_file = None class PopulateBuildmasterConfigTest(unittest.TestCase): def verify_timeouts(self, builder, expected_builder_timeout=None, expected_no_output_timeout=2400): steps = builder['factory'].steps self.assertEqual(1, len(steps)) step_dict = steps[0][1] self.assertEqual(step_dict['maxTime'], expected_builder_timeout) self.assertEqual(step_dict['timeout'], expected_no_output_timeout) def test_waterfall(self): try: fp = tempfile.NamedTemporaryFile(delete=False) fp.write(SAMPLE_WATERFALL_PYL) fp.close() c = {} master_gen.PopulateBuildmasterConfig(c, fp.name, _FakeMaster) c['builders'] = sorted(c['builders']) self.assertEqual(len(c['builders']), 4) self.assertEqual(c['builders'][0]['name'], 'Test Linux') self.verify_timeouts(c['builders'][0]) self.assertEqual(c['builders'][1]['name'], 'Test Linux Timeouts') self.verify_timeouts(c['builders'][1], 7200, 3600) self.assertEqual(c['builders'][2]['name'], 'Test Linux Remote Run') self.verify_timeouts(c['builders'][2]) self.assertEqual(len(c['change_source']), 1) self.assertEqual(len(c['schedulers']), 2) finally: os.remove(fp.name) def test_tryservers(self): try: fp = tempfile.NamedTemporaryFile(delete=False) fp.write(SAMPLE_TRYSERVER_PYL) fp.close() c = {} master_gen.PopulateBuildmasterConfig(c, fp.name, _FakeMaster) self.assertEqual(len(c['builders']), 1) self.assertEqual(c['builders'][0]['name'], 'Test Linux') self.assertEqual(set(s.slavename for s in c['slaves']), set(['vm9998-m1', 'vm9999-m1'])) self.assertEqual(len(c['change_source']), 0) self.assertEqual(len(c['schedulers']), 0) finally: os.remove(fp.name) # TODO: Remove this code once all of the builders.pyl formats have # been upgraded to the new nomenclature. OLD_TRYSERVER_PYL = """\ { "master_base_class": "_FakeMasterBase", "master_port": 20999, "master_port_alt": 40999, "slave_port": 30999, "buildbucket_bucket": "fake_bucket", "service_account_file": "fake_service_account", "templates": ["templates"], "builders": { "Test Linux": { "properties": { "config": "Release" }, "recipe": "test_recipe", "scheduler": None, "slave_pools": ["main"], "slavebuilddir": "test" } }, "schedulers": {}, "slave_pools": { "main": { "slave_data": { "bits": 64, "os": "linux", "version": "precise" }, "slaves": ["vm9999-m1"], }, }, } """ class OldNomenclature(unittest.TestCase): def test_old_nomenclature(self): try: fp = tempfile.NamedTemporaryFile(delete=False) fp.write(OLD_TRYSERVER_PYL) fp.close() c = {} master_gen.PopulateBuildmasterConfig(c, fp.name, _FakeMaster) self.assertEqual(len(c['builders']), 1) self.assertEqual(c['builders'][0]['name'], 'Test Linux') self.assertEqual(len(c['change_source']), 0) self.assertEqual(len(c['schedulers']), 0) finally: os.remove(fp.name) if __name__ == '__main__': unittest.TestCase.maxDiff = None unittest.main()
StarcoderdataPython
168925
# --*-- coding : utf-8 --*-- """Author: Trinity Core Team MIT License Copyright (c) 2018 Trinity Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" import unittest class Web3ClientTestFactory(unittest.TestCase): """ Test Suite for Web3 Client methods """ def setUp(self): self.ta_wallet_1 = '' self.ta_wallet_key_1 = '' self.ta_wallet_2 = '' self.ta_wallet_key_2 = '' pass def tearDown(self): pass def test_estimate_gas_for_increaseApproval(self): pass def test_estimate_gas_for_setSettleTimeout(self, ): pass def test_estimate_gas_for_setToken(self): pass def test_estimate_gas_for_deposit(self): pass def test_estimate_gas_for_updateDeposit(self): pass def test_estimate_gas_for_quickCloseChannel(self): pass def test_estimate_gas_for_withdrawBalance(self): pass def test_estimate_gas_for_closeChannel(self): pass def test_estimate_gas_for_updateTransaction(self): pass def test_estimate_gas_for_settleTransaction(self): pass def test_estimate_gas_for_withdraw(self): pass def test_estimate_gas_for_withdrawSettle(self): pass
StarcoderdataPython
1621542
<filename>crisiscleanup/calls/migrations/0017_AdditionalModelUpdates.py # -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2018-03-07 04:23 from __future__ import unicode_literals from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('calls', '0016_FleshOutModels'), ] operations = [ migrations.AlterField( model_name='call', name='call_start', field=models.DateTimeField(default=django.utils.timezone.now), preserve_default=False, ), migrations.AlterField( model_name='call', name='duration', field=models.PositiveIntegerField(default=0), preserve_default=False, ), migrations.AlterField( model_name='user', name='last_used_state', field=models.CharField(blank=True, max_length=50, null=True), ), ]
StarcoderdataPython
1670605
#!/usr/bin/env python 3 ############################################################################################ # # # Program purpose: Select the odd items of a list. # # Program Author : <NAME> <<EMAIL>> # # Creation Date : November 18, 2019 # # # ############################################################################################ import random def random_int_list(low: int, high: int, list_size: int) -> list: if list_size < 0: raise ValueError('Invalid list size') return [random.randint(low, high) for _ in range(list_size)] def get_odd_items(some_list: list) -> list: return some_list[::2] if __name__ == "__main__": new_list_data = random_int_list(low=0, high=15, list_size=10) print(f'Generated list data: {new_list_data}') print(f' Odd items in list: {get_odd_items(some_list=new_list_data)}')
StarcoderdataPython
1765170
<reponame>jensgk/optbinning """ Optimal binning algorithm 2D. """ # <NAME> <<EMAIL>> # Copyright (C) 2021 import matplotlib.pyplot as plt import numpy as np import pandas as pd from mpl_toolkits.axes_grid1 import make_axes_locatable from ...formatting import dataframe_to_string from ..binning_statistics import _check_build_parameters from ..binning_statistics import _check_is_built from ..binning_statistics import BinningTable from ..metrics import bayesian_probability from ..metrics import binning_quality_score from ..metrics import chi2_cramer_v from ..metrics import frequentist_pvalue from ..metrics import hhi from ..metrics import gini from ..metrics import hellinger from ..metrics import jeffrey from ..metrics import jensen_shannon from ..metrics import triangular def _bin_fmt(bin, show_digits): if np.isinf(bin[0]): return "({0:.{2}f}, {1:.{2}f})".format(bin[0], bin[1], show_digits) else: return "[{0:.{2}f}, {1:.{2}f})".format(bin[0], bin[1], show_digits) def bin_xy_str_format(bins_x, bins_y, show_digits): show_digits = 2 if show_digits is None else show_digits bins_xy = [] for bx, by in zip(bins_x, bins_y): _bx = _bin_fmt(bx, show_digits) _by = _bin_fmt(by, show_digits) bins_xy.append(r"{} $\cup$ {}".format(_bx, _by)) return bins_xy def bin_str_format(bins, show_digits): show_digits = 2 if show_digits is None else show_digits bin_str = [] for bin in bins: bin_str.append(_bin_fmt(bin, show_digits)) return bin_str class BinningTable2D(BinningTable): """Binning table to summarize optimal binning of two numerical variables with respect to a binary target. Parameters ---------- name_x : str, optional (default="") The name of variable x. name_y : str, optional (default="") The name of variable y. dtype_x : str, optional (default="numerical") The data type of variable x. Supported data type is "numerical" for continuous and ordinal variables. dtype_y : str, optional (default="numerical") The data type of variable y. Supported data type is "numerical" for continuous and ordinal variables. splits_x : numpy.ndarray List of split points for variable x. splits_y : numpy.ndarray List of split points for variable y. m : int Number of rows of the 2D array. n : int Number of columns of the 2D array. n_nonevent : numpy.ndarray Number of non-events. n_event : numpy.ndarray Number of events. D : numpy.ndarray Event rate 2D array. P : numpy-ndarray Records 2D array. Warning ------- This class is not intended to be instantiated by the user. It is preferable to use the class returned by the property ``binning_table`` available in all optimal binning classes. """ def __init__(self, name_x, name_y, dtype_x, dtype_y, splits_x, splits_y, m, n, n_nonevent, n_event, D, P): self.name_x = name_x self.name_y = name_y self.dtype_x = dtype_x self.dtype_y = dtype_y self.splits_x = splits_x self.splits_y = splits_y self.m = m self.n = n self.n_nonevent = n_nonevent self.n_event = n_event self.D = D self.P = P self._is_built = False self._is_analyzed = False def build(self, show_digits=2, show_bin_xy=False, add_totals=True): """Build the binning table. Parameters ---------- show_digits : int, optional (default=2) The number of significant digits of the bin column. show_bin_xy: bool (default=False) Whether to show a single bin column with x and y. add_totals : bool (default=True) Whether to add a last row with totals. Returns ------- binning_table : pandas.DataFrame """ _check_build_parameters(show_digits, add_totals) if not isinstance(show_bin_xy, bool): raise TypeError("show_bin_xy must be a boolean; got {}." .format(show_bin_xy)) n_nonevent = self.n_nonevent n_event = self.n_event n_records = n_event + n_nonevent t_n_nonevent = n_nonevent.sum() t_n_event = n_event.sum() t_n_records = t_n_nonevent + t_n_event t_event_rate = t_n_event / t_n_records p_records = n_records / t_n_records p_event = n_event / t_n_event p_nonevent = n_nonevent / t_n_nonevent mask = (n_event > 0) & (n_nonevent > 0) event_rate = np.zeros(len(n_records)) woe = np.zeros(len(n_records)) iv = np.zeros(len(n_records)) js = np.zeros(len(n_records)) # Compute weight of evidence and event rate event_rate[mask] = n_event[mask] / n_records[mask] constant = np.log(t_n_event / t_n_nonevent) woe[mask] = np.log(1 / event_rate[mask] - 1) + constant W = np.log(1 / self.D - 1) + constant # Compute Gini self._gini = gini(self.n_event, self.n_nonevent) # Compute divergence measures p_ev = p_event[mask] p_nev = p_nonevent[mask] iv[mask] = jeffrey(p_ev, p_nev, return_sum=False) js[mask] = jensen_shannon(p_ev, p_nev, return_sum=False) t_iv = iv.sum() t_js = js.sum() self._iv = t_iv self._js = t_js self._hellinger = hellinger(p_ev, p_nev, return_sum=True) self._triangular = triangular(p_ev, p_nev, return_sum=True) # Keep data for plotting self._n_records = n_records self._event_rate = event_rate self._woe = woe self._W = W # Compute KS self._ks = np.abs(p_event.cumsum() - p_nonevent.cumsum()).max() # Compute HHI self._hhi = hhi(p_records) self._hhi_norm = hhi(p_records, normalized=True) # Compute paths. This is required for both plot and analysis # paths x: horizontal self._paths_x = [] for i in range(self.m): path = tuple(dict.fromkeys(self.P[i, :])) if path not in self._paths_x: self._paths_x.append(path) # paths y: vertical self._paths_y = [] for j in range(self.n): path = tuple(dict.fromkeys(self.P[:, j])) if path not in self._paths_y: self._paths_y.append(path) if show_bin_xy: bin_xy_str = bin_xy_str_format(self.splits_x, self.splits_y, show_digits) bin_xy_str.extend(["Special", "Missing"]) df = pd.DataFrame({ "Bin": bin_xy_str, "Count": n_records, "Count (%)": p_records, "Non-event": n_nonevent, "Event": n_event, "Event rate": event_rate, "WoE": woe, "IV": iv, "JS": js }) else: bin_x_str = bin_str_format(self.splits_x, show_digits) bin_y_str = bin_str_format(self.splits_y, show_digits) bin_x_str.extend(["Special", "Missing"]) bin_y_str.extend(["Special", "Missing"]) df = pd.DataFrame({ "Bin x": bin_x_str, "Bin y": bin_y_str, "Count": n_records, "Count (%)": p_records, "Non-event": n_nonevent, "Event": n_event, "Event rate": event_rate, "WoE": woe, "IV": iv, "JS": js }) if add_totals: if show_bin_xy: totals = ["", t_n_records, 1, t_n_nonevent, t_n_event, t_event_rate, "", t_iv, t_js] else: totals = ["", "", t_n_records, 1, t_n_nonevent, t_n_event, t_event_rate, "", t_iv, t_js] df.loc["Totals"] = totals self._is_built = True return df def plot(self, metric="woe", savefig=None): """Plot the binning table. Visualize the non-event and event count, and the Weight of Evidence or the event rate for each bin. Parameters ---------- metric : str, optional (default="woe") Supported metrics are "woe" to show the Weight of Evidence (WoE) measure and "event_rate" to show the event rate. savefig : str or None (default=None) Path to save the plot figure. """ _check_is_built(self) if metric not in ("event_rate", "woe"): raise ValueError('Invalid value for metric. Allowed string ' 'values are "event_rate" and "woe".') if metric == "woe": metric_values = self._woe metric_matrix = self._W metric_label = "WoE" elif metric == "event_rate": metric_values = self._event_rate metric_matrix = self.D metric_label = "Event rate" fig, ax = plt.subplots(figsize=(7, 7)) divider = make_axes_locatable(ax) axtop = divider.append_axes("top", size=2.5, pad=0.1, sharex=ax) axright = divider.append_axes("right", size=2.5, pad=0.1, sharey=ax) # Hide x labels and tick labels for top plots and y ticks for # right plots. # Position [0, 0] for path in self._paths_x: er = sum([ [metric_values[p]] * np.count_nonzero( self.P == p, axis=1).max() for p in path], []) er = er + [er[-1]] axtop.step(np.arange(self.n + 1) - 0.5, er, label=path, where="post") for i in range(self.n): axtop.axvline(i + 0.5, color="grey", linestyle="--", alpha=0.5) axtop.get_xaxis().set_visible(False) axtop.set_ylabel(metric_label, fontsize=12) # Position [1, 0] pos = ax.matshow(metric_matrix, cmap=plt.cm.bwr) for j in range(self.n): for i in range(self.m): c = int(self.P[i, j]) ax.text(j, i, str(c), va='center', ha='center') fig.colorbar(pos, ax=ax, orientation="horizontal", fraction=0.025, pad=0.125) ax.xaxis.set_label_position("bottom") ax.xaxis.tick_bottom() ax.set_ylabel("Bin ID - y ({})".format(self.name_x), fontsize=12) ax.set_xlabel("Bin ID - x ({})".format(self.name_y), fontsize=12) # Position [1, 1] for path in self._paths_y: er = sum([ [metric_values[p]] * (np.count_nonzero( self.P == p, axis=0).max()) for p in path], []) er = er + [er[-1]] axright.step(er, np.arange(self.m + 1) - 0.5, label=path, where="pre") for j in range(self.m): axright.axhline(j - 0.5, color="grey", linestyle="--", alpha=0.5) axright.get_yaxis().set_visible(False) axright.set_xlabel(metric_label, fontsize=12) # adjust margins axright.margins(y=0) axtop.margins(x=0) plt.tight_layout() axtop.legend(bbox_to_anchor=(1, 1)) axright.legend(bbox_to_anchor=(1, 1)) if savefig is None: plt.show() else: if not isinstance(savefig, str): raise TypeError("savefig must be a string path; got {}." .format(savefig)) plt.savefig(savefig) plt.close() def analysis(self, pvalue_test="chi2", n_samples=100, print_output=True): """Binning table analysis. Statistical analysis of the binning table, computing the statistics Gini index, Information Value (IV), Jensen-Shannon divergence, and the quality score. Additionally, several statistical significance tests between consecutive bins of the contingency table are performed: a frequentist test using the Chi-square test or the Fisher's exact test, and a Bayesian A/B test using the beta distribution as a conjugate prior of the Bernoulli distribution. Parameters ---------- pvalue_test : str, optional (default="chi2") The statistical test. Supported test are "chi2" to choose the Chi-square test and "fisher" to choose the Fisher exact test. n_samples : int, optional (default=100) The number of samples to run the Bayesian A/B testing between consecutive bins to compute the probability of the event rate of bin A being greater than the event rate of bin B. print_output : bool (default=True) Whether to print analysis information. Notes ----- The Chi-square test uses `scipy.stats.chi2_contingency <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats. chi2_contingency.html>`_, and the Fisher exact test uses `scipy.stats.fisher_exact <https://docs.scipy.org/doc/scipy/reference/ generated/scipy.stats.fisher_exact.html>`_. """ pairs = set() for path in self._paths_x: tpairs = tuple(zip(path[:-1], path[1:])) for tp in tpairs: pairs.add(tp) for path in self._paths_y: tpairs = tuple(zip(path[:-1], path[1:])) for tp in tpairs: pairs.add(tp) pairs = sorted(pairs) # Significance tests n_bins = len(self._n_records) n_metric = n_bins - 2 n_nev = self.n_nonevent[:n_metric] n_ev = self.n_event[:n_metric] if len(n_nev) >= 2: chi2, cramer_v = chi2_cramer_v(n_nev, n_ev) else: cramer_v = 0 t_statistics = [] p_values = [] p_a_b = [] p_b_a = [] for pair in pairs: obs = np.array([n_nev[list(pair)], n_ev[list(pair)]]) t_statistic, p_value = frequentist_pvalue(obs, pvalue_test) pab, pba = bayesian_probability(obs, n_samples) p_a_b.append(pab) p_b_a.append(pba) t_statistics.append(t_statistic) p_values.append(p_value) df_tests = pd.DataFrame({ "Bin A": np.array([p[0] for p in pairs]), "Bin B": np.array([p[1] for p in pairs]), "t-statistic": t_statistics, "p-value": p_values, "P[A > B]": p_a_b, "P[B > A]": p_b_a }) if pvalue_test == "fisher": df_tests.rename(columns={"t-statistic": "odd ratio"}, inplace=True) tab = 4 if len(df_tests): df_tests_string = dataframe_to_string(df_tests, tab) else: df_tests_string = " " * tab + "None" # Quality score self._quality_score = binning_quality_score(self._iv, p_values, self._hhi_norm) report = ( "------------------------------------------------\n" "OptimalBinning: Binary Binning Table 2D Analysis\n" "------------------------------------------------\n" "\n" " General metrics" "\n\n" " Gini index {:>15.8f}\n" " IV (Jeffrey) {:>15.8f}\n" " JS (Jensen-Shannon) {:>15.8f}\n" " Hellinger {:>15.8f}\n" " Triangular {:>15.8f}\n" " KS {:>15.8f}\n" " HHI {:>15.8f}\n" " HHI (normalized) {:>15.8f}\n" " Cramer's V {:>15.8f}\n" " Quality score {:>15.8f}\n" "\n" " Significance tests\n\n{}\n" ).format(self._gini, self._iv, self._js, self._hellinger, self._triangular, self._ks, self._hhi, self._hhi_norm, cramer_v, self._quality_score, df_tests_string) if print_output: print(report) self._is_analyzed = True
StarcoderdataPython
180766
from collections import namedtuple from collections import defaultdict from biicode.common.model.brl.cell_name import CellName from biicode.common.model.symbolic.block_version import BlockVersion from biicode.common.utils.serializer import DictDeserializer, SetDeserializer from biicode.common.model.brl.block_cell_name import BlockCellName from biicode.common.model.resource import ResourceDeserializer from biicode.common.model.cells import CellDeserializer from biicode.common.model.content import ContentDeserializer from biicode.common.model.id import ID from biicode.common.model.declare.declaration import Declaration import copy class VersionDict(defaultdict): def __init__(self, items_type): super(VersionDict, self).__init__(items_type) def explode(self): items_type = self.default_factory() if isinstance(items_type, (set, list, tuple)): result = [] for k, v in self.iteritems(): result.extend([Reference(k, x) for x in v]) return result elif isinstance(items_type, dict): result = {} for k, v in self.iteritems(): for k2, v2 in v.iteritems(): result[Reference(k, k2)] = v2 return result raise ValueError('This type of VersionDict cannot be exploded') def __repr__(self): result = [str(self.__class__.__name__)] for k, v in self.iteritems(): result.append('%s: %s' % (k, v)) return ', '.join(result) class AbsoluteReferences(VersionDict): """{block_version: set(BlockCellName)} """ def __init__(self): super(AbsoluteReferences, self).__init__(set) class References(VersionDict): '''Dict of block_version -> Set[CellName]. It can also be {block_version: Set of BlockCellName}, for Dependencies translating with DependencyTranslator ''' def __init__(self): super(References, self).__init__(set) def add(self, reference): self[reference.block_version].add(reference.ref) def __deepcopy__(self, memo): '''this method is necessary for deepcopy in memory caches, defaultdict deepcopy __init__ signature is incompatible with current''' r = References() for key, values in self.iteritems(): r[key] = copy.deepcopy(values) return r @staticmethod def deserialize(data): if data is None: return None d = DictDeserializer(BlockVersion, SetDeserializer(CellName)).deserialize(data) result = References() result.update(d) return result class ReferencedResources(VersionDict): '''The dict items are dict {CellName: Resource(Cell, Content)}''' def __init__(self): super(ReferencedResources, self).__init__(dict) @staticmethod def deserialize(data): d = DictDeserializer(BlockVersion, DictDeserializer(CellName, ResourceDeserializer(CellDeserializer(ID), ContentDeserializer(ID)))).deserialize(data) result = ReferencedResources() result.update(d) return result def __add__(self, other): '''adds two referencedResources, for example localDb+remotes for building Snapshot of dependencies''' result = ReferencedResources() for version, deps in self.iteritems(): result[version].update(deps) for version, deps in other.iteritems(): result[version].update(deps) return result class ReferencedDependencies(VersionDict): '''The dict items are dict{Declaration: set(BlockCellName)}''' def __init__(self): super(ReferencedDependencies, self).__init__(lambda: defaultdict(set)) @staticmethod def deserialize(data): d = DictDeserializer(BlockVersion, DictDeserializer(Declaration, SetDeserializer(BlockCellName))).deserialize(data) result = ReferencedDependencies() result.update(d) return result class Reference(namedtuple('Reference', ['block_version', 'ref'])): '''Ref can only be a single ref ''' def __repr__(self): return "%s/%s" % (self[0], self[1]) @staticmethod def deserialize(data): return Reference(BlockVersion.deserialize(data[0]), CellName.deserialize(data[1])) def serialize(self): return (self.block_version.serialize(), self.ref) def block_cell_name(self): '''assuming that ref is a single CellName''' return self.block_version.block_name + self.ref
StarcoderdataPython
1647115
import os from gppylib.commands.base import Command, REMOTE from gppylib.commands.unix import getLocalHostname gphome = os.environ.get('GPHOME') def get_command(local, command, hostname): if local: cmd = Command(hostname, cmdStr=command) else: cmd = Command(hostname, cmdStr=command, ctxt=REMOTE, remoteHost=hostname) return cmd def get_host_for_command(local, cmd): if local: cmd = Command(name='get the hostname', cmdStr='hostname') cmd.run(validateAfter=True) results = cmd.get_results() return results.stdout.strip() else: return cmd.remoteHost def get_copy_command(local, host, datafile, tmpdir): if local: cmd_str = 'mv -f %s %s/%s.data' % (datafile, tmpdir, host) else: cmd_str = 'scp %s:%s %s/%s.data' % (host, datafile, tmpdir, host) return Command(host, cmd_str)
StarcoderdataPython
41450
""" ________ ___ __ ________ ________ ___ ___ ________ _______ ________ |\ ____\|\ \ |\ \|\ __ \|\ __ \ |\ \|\ \|\ ____\|\ ___ \ |\ __ \ \ \ \___|\ \ \ \ \ \ \ \|\ \ \ \|\ \ \ \ \\\ \ \ \___|\ \ __/|\ \ \|\ \ \ \_____ \ \ \ __\ \ \ \ __ \ \ ____\ \ \ \\\ \ \_____ \ \ \_|/_\ \ _ _\ \|____|\ \ \ \|\__\_\ \ \ \ \ \ \ \___| \ \ \\\ \|____|\ \ \ \_|\ \ \ \\ \| ____\_\ \ \____________\ \__\ \__\ \__\ \ \_______\____\_\ \ \_______\ \__\\ _\ |\_________\|____________|\|__|\|__|\|__| \|_______|\_________\|_______|\|__|\|__| \|_________| \|_________| """ __title__ = "Django Swap User" __version__ = "0.9.8" __author__ = "<NAME>" __license__ = "MIT" __copyright__ = "Copyright 2022 © <NAME>" VERSION = __version__ default_app_config = "swap_user.apps.DjangoSwapUser"
StarcoderdataPython
196696
<reponame>acyrl/edward2 # coding=utf-8 # Copyright 2020 The Edward2 Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Uncertainty-based convolutional layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from edward2.tensorflow import constraints from edward2.tensorflow import generated_random_variables from edward2.tensorflow import initializers from edward2.tensorflow import random_variable from edward2.tensorflow import regularizers from edward2.tensorflow.layers import utils import tensorflow.compat.v2 as tf @utils.add_weight class Conv2DReparameterization(tf.keras.layers.Conv2D): """2D convolution layer (e.g. spatial convolution over images). The layer computes a variational Bayesian approximation to the distribution over convolutional layers, ``` p(outputs | inputs) = int conv2d(inputs; weights, bias) p(weights, bias) dweights dbias. ``` It does this with a stochastic forward pass, sampling from learnable distributions on the kernel and bias. Gradients with respect to the distributions' learnable parameters backpropagate via reparameterization. Minimizing cross-entropy plus the layer's losses performs variational minimum description length, i.e., it minimizes an upper bound to the negative marginal likelihood. """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='trainable_normal', bias_initializer='zeros', kernel_regularizer='normal_kl_divergence', bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Conv2DReparameterization, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=initializers.get(kernel_initializer), bias_initializer=initializers.get(bias_initializer), kernel_regularizer=regularizers.get(kernel_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), kernel_constraint=constraints.get(kernel_constraint), bias_constraint=constraints.get(bias_constraint), **kwargs) def call_weights(self): """Calls any weights if the initializer is itself a layer.""" if isinstance(self.kernel_initializer, tf.keras.layers.Layer): self.kernel = self.kernel_initializer(self.kernel.shape, self.dtype) if isinstance(self.bias_initializer, tf.keras.layers.Layer): self.bias = self.bias_initializer(self.bias.shape, self.dtype) def call(self, *args, **kwargs): self.call_weights() kwargs.pop('training', None) return super(Conv2DReparameterization, self).call(*args, **kwargs) class Conv2DFlipout(Conv2DReparameterization): """2D convolution layer (e.g. spatial convolution over images). The layer computes a variational Bayesian approximation to the distribution over convolutional layers, ``` p(outputs | inputs) = int conv2d(inputs; weights, bias) p(weights, bias) dweights dbias. ``` It does this with a stochastic forward pass, sampling from learnable distributions on the kernel and bias. Gradients with respect to the distributions' learnable parameters backpropagate via reparameterization. Minimizing cross-entropy plus the layer's losses performs variational minimum description length, i.e., it minimizes an upper bound to the negative marginal likelihood. This layer uses the Flipout estimator (Wen et al., 2018) for integrating with respect to the `kernel`. Namely, it applies pseudo-independent weight perturbations via independent sign flips for each example, enabling variance reduction over independent weight perturbations. For this estimator to work, the `kernel` random variable must be able to decompose as a sum of its mean and a perturbation distribution; the perturbation distribution must be independent across weight elements and symmetric around zero (for example, a fully factorized Gaussian). """ def call(self, inputs): if not isinstance(self.kernel, random_variable.RandomVariable): return super(Conv2DFlipout, self).call(inputs) self.call_weights() outputs = self._apply_kernel(inputs) if self.use_bias: if self.data_format == 'channels_first': outputs = tf.nn.bias_add(outputs, self.bias, data_format='NCHW') else: outputs = tf.nn.bias_add(outputs, self.bias, data_format='NHWC') if self.activation is not None: outputs = self.activation(outputs) return outputs def _apply_kernel(self, inputs): input_shape = tf.shape(inputs) batch_dim = input_shape[0] if self._convolution_op is None: padding = self.padding if self.padding == 'causal': padding = 'valid' if not isinstance(padding, (list, tuple)): padding = padding.upper() self._convolution_op = functools.partial( tf.nn.convolution, strides=self.strides, padding=padding, data_format='NHWC' if self.data_format == 'channels_last' else 'NCHW', dilations=self.dilation_rate) if self.data_format == 'channels_first': channels = input_shape[1] sign_input_shape = [batch_dim, channels, 1, 1] sign_output_shape = [batch_dim, self.filters, 1, 1] else: channels = input_shape[-1] sign_input_shape = [batch_dim, 1, 1, channels] sign_output_shape = [batch_dim, 1, 1, self.filters] sign_input = tf.cast(2 * tf.random.uniform(sign_input_shape, minval=0, maxval=2, dtype=tf.int32) - 1, inputs.dtype) sign_output = tf.cast(2 * tf.random.uniform(sign_output_shape, minval=0, maxval=2, dtype=tf.int32) - 1, inputs.dtype) kernel_mean = self.kernel.distribution.mean() perturbation = self.kernel - kernel_mean outputs = self._convolution_op(inputs, kernel_mean) outputs += self._convolution_op(inputs * sign_input, perturbation) * sign_output return outputs class Conv2DHierarchical(Conv2DFlipout): """2D convolution layer with hierarchical distributions. The layer computes a variational Bayesian approximation to the distribution over convolutional layers, and where the distribution over weights involves a hierarchical distribution with hidden unit noise coupling vectors of the kernel weight matrix (Louizos et al., 2017), ``` p(outputs | inputs) = int conv2d(inputs; new_kernel, bias) p(kernel, local_scales, global_scale, bias) dkernel dlocal_scales dglobal_scale dbias. ``` It does this with a stochastic forward pass, sampling from learnable distributions on the kernel and bias. The kernel is written in non-centered parameterization where ``` new_kernel[i, j] = kernel[i, j] * local_scale[j] * global_scale. ``` That is, there is "local" multiplicative noise which couples weights for each output filter. There is also a "global" multiplicative noise which couples the entire weight matrix. By default, the weights are normally distributed and the local and global noises are half-Cauchy distributed; this makes the kernel a horseshoe distribution (Carvalho et al., 2009; <NAME> Scott, 2012). The estimation uses Flipout for variance reduction with respect to sampling the full weights. Gradients with respect to the distributions' learnable parameters backpropagate via reparameterization. Minimizing cross-entropy plus the layer's losses performs variational minimum description length, i.e., it minimizes an upper bound to the negative marginal likelihood. """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='trainable_normal', bias_initializer='zeros', local_scale_initializer='trainable_half_cauchy', global_scale_initializer='trainable_half_cauchy', kernel_regularizer='normal_kl_divergence', bias_regularizer=None, local_scale_regularizer='half_cauchy_kl_divergence', global_scale_regularizer=regularizers.HalfCauchyKLDivergence( scale=1e-5), activity_regularizer=None, kernel_constraint=None, bias_constraint=None, local_scale_constraint='softplus', global_scale_constraint='softplus', **kwargs): self.local_scale_initializer = initializers.get(local_scale_initializer) self.global_scale_initializer = initializers.get(global_scale_initializer) self.local_scale_regularizer = regularizers.get(local_scale_regularizer) self.global_scale_regularizer = regularizers.get(global_scale_regularizer) self.local_scale_constraint = constraints.get(local_scale_constraint) self.global_scale_constraint = constraints.get(global_scale_constraint) super(Conv2DHierarchical, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=initializers.get(kernel_initializer), bias_initializer=initializers.get(bias_initializer), kernel_regularizer=regularizers.get(kernel_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), kernel_constraint=constraints.get(kernel_constraint), bias_constraint=constraints.get(bias_constraint), **kwargs) def build(self, input_shape): self.local_scale = self.add_weight( shape=(self.filters,), name='local_scale', initializer=self.local_scale_initializer, regularizer=self.local_scale_regularizer, constraint=self.local_scale_constraint) self.global_scale = self.add_weight( shape=(), name='global_scale', initializer=self.global_scale_initializer, regularizer=self.global_scale_regularizer, constraint=self.global_scale_constraint) super(Conv2DHierarchical, self).build(input_shape) def call_weights(self): """Calls any weights if the initializer is itself a layer.""" if isinstance(self.local_scale_initializer, tf.keras.layers.Layer): self.local_scale = self.local_scale_initializer(self.local_scale.shape, self.dtype) if isinstance(self.global_scale_initializer, tf.keras.layers.Layer): self.global_scale = self.global_scale_initializer(self.global_scale.shape, self.dtype) super(Conv2DHierarchical, self).call_weights() def _apply_kernel(self, inputs): outputs = super(Conv2DHierarchical, self)._apply_kernel(inputs) if self.data_format == 'channels_first': local_scale = tf.reshape(self.local_scale, [1, -1, 1, 1]) else: local_scale = tf.reshape(self.local_scale, [1, 1, 1, -1]) # TODO(trandustin): Figure out what to set local/global scales to at test # time. Means don't exist for Half-Cauchy approximate posteriors. outputs *= local_scale * self.global_scale return outputs class Conv2DVariationalDropout(Conv2DReparameterization): """2D convolution layer with variational dropout (Kingma et al., 2015). Implementation follows the additive parameterization of Molchanov et al. (2017). """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='trainable_normal', bias_initializer='zeros', kernel_regularizer='log_uniform_kl_divergence', bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Conv2DVariationalDropout, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=initializers.get(kernel_initializer), bias_initializer=initializers.get(bias_initializer), kernel_regularizer=regularizers.get(kernel_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), kernel_constraint=constraints.get(kernel_constraint), bias_constraint=constraints.get(bias_constraint), **kwargs) def call(self, inputs, training=None): if not isinstance(self.kernel, random_variable.RandomVariable): return super(Conv2DVariationalDropout, self).call(inputs) self.call_weights() if training is None: training = tf.keras.backend.learning_phase() if self._convolution_op is None: padding = self.padding if self.padding == 'causal': padding = 'valid' if not isinstance(padding, (list, tuple)): padding = padding.upper() self._convolution_op = functools.partial( tf.nn.convolution, strides=self.strides, padding=padding, data_format='NHWC' if self.data_format == 'channels_last' else 'NCHW', dilations=self.dilation_rate) def dropped_inputs(): """Forward pass with dropout.""" # Clip magnitude of dropout rate, where we get the dropout rate alpha from # the additive parameterization (Molchanov et al., 2017): for weight ~ # Normal(mu, sigma**2), the variance `sigma**2 = alpha * mu**2`. mean = self.kernel.distribution.mean() log_variance = tf.math.log(self.kernel.distribution.variance()) log_alpha = log_variance - tf.math.log(tf.square(mean) + tf.keras.backend.epsilon()) log_alpha = tf.clip_by_value(log_alpha, -8., 8.) log_variance = log_alpha + tf.math.log(tf.square(mean) + tf.keras.backend.epsilon()) means = self._convolution_op(inputs, mean) stddevs = tf.sqrt( self._convolution_op(tf.square(inputs), tf.exp(log_variance)) + tf.keras.backend.epsilon()) if self.use_bias: if self.data_format == 'channels_first': means = tf.nn.bias_add(means, self.bias, data_format='NCHW') else: means = tf.nn.bias_add(means, self.bias, data_format='NHWC') outputs = generated_random_variables.Normal(loc=means, scale=stddevs) if self.activation is not None: outputs = self.activation(outputs) return outputs # Following tf.keras.Dropout, only apply variational dropout if training # flag is True. training_value = utils.smart_constant_value(training) if training_value is not None: if training_value: return dropped_inputs() else: return super(Conv2DVariationalDropout, self).call(inputs) return tf.cond( pred=training, true_fn=dropped_inputs, false_fn=lambda: super(Conv2DVariationalDropout, self).call(inputs)) class Conv2DBatchEnsemble(tf.keras.layers.Layer): """A batch ensemble convolutional layer.""" def __init__(self, filters, kernel_size, ensemble_size=4, alpha_initializer=tf.keras.initializers.Ones(), gamma_initializer=tf.keras.initializers.Ones(), strides=(1, 1), padding='valid', data_format='channels_last', activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Conv2DBatchEnsemble, self).__init__(**kwargs) self.filters = filters self.kernel_size = kernel_size self.data_format = data_format self.ensemble_size = ensemble_size self.alpha_initializer = alpha_initializer self.gamma_initializer = gamma_initializer self.use_bias = use_bias self.activation = tf.keras.activations.get(activation) self.conv2d = tf.keras.layers.Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=None, use_bias=False, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint) def build(self, input_shape): input_shape = tf.TensorShape(input_shape) if self.data_format == 'channels_first': input_channel = input_shape[1] elif self.data_format == 'channels_last': input_channel = input_shape[-1] self.alpha = self.add_weight( 'alpha', shape=[self.ensemble_size, input_channel], initializer=self.alpha_initializer, trainable=True, dtype=self.dtype) self.gamma = self.add_weight( 'gamma', shape=[self.ensemble_size, self.filters], initializer=self.gamma_initializer, trainable=True, dtype=self.dtype) if self.use_bias: self.bias = self.add_weight( name='bias', shape=[self.ensemble_size, self.filters], initializer=tf.keras.initializers.Zeros(), trainable=True, dtype=self.dtype) else: self.bias = None self.built = True def call(self, inputs): axis_change = -1 if self.data_format == 'channels_first' else 1 batch_size = tf.shape(inputs)[0] input_dim = self.alpha.shape[-1] examples_per_model = batch_size // self.ensemble_size alpha = tf.reshape(tf.tile(self.alpha, [1, examples_per_model]), [batch_size, input_dim]) gamma = tf.reshape(tf.tile(self.gamma, [1, examples_per_model]), [batch_size, self.filters]) alpha = tf.expand_dims(alpha, axis=axis_change) alpha = tf.expand_dims(alpha, axis=axis_change) gamma = tf.expand_dims(gamma, axis=axis_change) gamma = tf.expand_dims(gamma, axis=axis_change) outputs = self.conv2d(inputs*alpha) * gamma if self.use_bias: bias = tf.reshape(tf.tile(self.bias, [1, examples_per_model]), [batch_size, self.filters]) bias = tf.expand_dims(bias, axis=axis_change) bias = tf.expand_dims(bias, axis=axis_change) outputs += bias if self.activation is not None: outputs = self.activation(outputs) return outputs def get_config(self): config = { 'ensemble_size': self.ensemble_size, 'random_sign_init': self.random_sign_init, 'alpha_initializer': tf.keras.initializers.serialize( self.alpha_initializer), 'gamma_initializer': tf.keras.initializers.serialize( self.gamma_initializer), 'activation': tf.activations.serialize(self.activation), 'use_bias': self.use_bias, } base_config = super(Conv2DBatchEnsemble, self).get_config() conv_config = self.conv2d.get_config() return dict( list(base_config.items()) + list(conv_config.items()) + list(config.items()))
StarcoderdataPython
1675004
<filename>May 2019/bubble_sort.py numbers = [2, 4, 9, 1, 7, 6, 5, 8, 3] def bubble_sort(array): search_length = len(array)-1 swapped = True #Keeps iterating, comparing and swapping until a complete loop without swapping is detected while swapped == True: swapped = False for index in range(search_length): if array[index] > array[index+1]: array[index+1], array[index] = array[index], array[index+1] swapped = True search_length -= 1 return array print(bubble_sort(numbers))
StarcoderdataPython
1606368
<filename>data/split_coco.py import json import numpy as np with open("data/widgets/annotations/all.json") as f: all_annotations = json.load(f) samples_count = len(all_annotations["annotations"]) validation_ids = np.random.choice(samples_count, size=62, replace=False) valid_dict = all_annotations.copy() valid_dict["annotations"] = [valid_dict["annotations"][i] for i in validation_ids] train_dict = all_annotations.copy() train_dict["annotations"] = [train_dict["annotations"][i] for i in range(samples_count) if i not in validation_ids] with open("data/widgets/annotations/widgets-train.json", "w") as f: json.dump(train_dict, f) with open("data/widgets/annotations/widgets-val.json", "w") as f: json.dump(valid_dict, f)
StarcoderdataPython
1726390
""" Views (JSON objects) for Annotator storage backend """ import datetime import jwt import logging import os from collections import OrderedDict import django_filters.rest_framework from django.http import HttpResponse, JsonResponse from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.shortcuts import render from rest_framework import viewsets from rest_framework.response import Response from rest_framework.pagination import LimitOffsetPagination from . import settings from .models import Annotation from .serializers import UserSerializer, AnnotationSerializer LOG = logging.getLogger(__name__) CONSUMER_KEY = os.environ.get('CONSUMER_KEY') CONSUMER_SECRET = os.environ.get('CONSUMER_SECRET') CONSUMER_TTL = 86400 def generate_token(user_id): return jwt.encode({ 'consumerKey': CONSUMER_KEY, 'userId': user_id, 'issuedAt': _now().isoformat() + 'Z', 'ttl': CONSUMER_TTL }, CONSUMER_SECRET) def _now(): return datetime.datetime.utcnow().replace(microsecond=0) @login_required def profile(request): return HttpResponse('foo') @login_required def root(request): return JsonResponse(settings.ANNOTATOR_API) @login_required def token(request): return(HttpResponse(generate_token(request.user.username))) def jsfile(request): return render(request, 'anno2.js', {'url': os.environ.get('DJANGO_HOST')}) class LimitOffsetTotalRowsPagination(LimitOffsetPagination): def get_paginated_response(self, data): return Response(OrderedDict([ ('total', self.count), ('next', self.get_next_link()), ('previous', self.get_previous_link()), ('rows', data) ])) class SearchViewSet(viewsets.ModelViewSet): serializer_class = AnnotationSerializer filter_backends = (django_filters.rest_framework.DjangoFilterBackend,) pagination_class = LimitOffsetTotalRowsPagination def get_queryset(self): queryset = Annotation.objects.all() # TODO repeat for all possible queries? text = self.request.query_params.get('text', None) if text is not None: queryset = queryset.filter(text=text) quote = self.request.query_params.get('quote', None) if quote is not None: queryset = queryset.filter(quote=quote) uri = self.request.query_params.get('uri', None) if uri is not None: queryset = queryset.filter(uri=uri) return queryset class AnnotationViewSet(viewsets.ModelViewSet): serializer_class = AnnotationSerializer queryset = Annotation.objects.all() class UserViewSet(viewsets.ReadOnlyModelViewSet): """ API endpoint that allows users to be viewed or edited. """ queryset = User.objects.all().order_by('-date_joined') serializer_class = UserSerializer
StarcoderdataPython
94391
from django.core.urlresolvers import resolve from django.test import TestCase from django.http import HttpRequest, QueryDict from minimal_django import index, urlpatterns class HomePageTest(TestCase): def test_root_url_resolves_to_home_page_view(self): found = resolve('/', urlconf = urlpatterns) self.assertEqual(found.func, index) def test_home_page_returns_correct_html(self): request = HttpRequest() response = index(request) self.assertIn(b'Hello', response.content) # self.assertTrue(response.content.startswith(b'<html>')) # self.assertIn(b'<title>Artist Search API</title>', response.content) # self.assertTrue(response.content.endswith(b'</html>'))
StarcoderdataPython
3363204
<reponame>achimmihca/DedoMouse<filename>src/gui/VideoCaptureThread.py #!/usr/bin/env python import time from typing import Any import logging import cv2 from PySide6.QtCore import QThread from PySide6.QtGui import QImage, QPixmap from PySide6.QtWidgets import QLabel from common.Config import Config from common.Vector import Vector from common.WebcamControl import WebcamControl class VideoCaptureThread(QThread): def __init__(self, config: Config, webcam_control: WebcamControl, video_display_label: QLabel) -> None: QThread.__init__(self) self.config = config self.webcam_control = webcam_control self.video_display_label = video_display_label self.log = logging.getLogger(self.__class__.__name__) def run(self) -> None: self.log.info("started VideoCaptureThread") while self.config.running.value: self.webcam_control.frame_analyzed_callbacks.append(self.display_video_frame) self.webcam_control.restart_video_capture_callbacks.append(self.on_video_capture_restart) try: video_capture_error_message = self.webcam_control.start_video_capture() if video_capture_error_message is not None: self.video_display_label.setText(video_capture_error_message) except Exception: error_message = ":(\n\nError during video capture or processing.\nCheck log file for further information." self.log.exception(error_message) self.video_display_label.setText(error_message) # Wait until video capture should be restarted while self.config.running.value and not self.webcam_control.is_restart_video_capture: time.sleep(0.1) def display_video_frame(self, frame: Any, frame_size: Vector) -> None: try: label_width = self.video_display_label.width() label_height = self.video_display_label.height() scale_x = label_width / frame_size.x scale_y = label_height / frame_size.y scale = min(scale_x, scale_y) frame = cv2.resize(frame, None, fx=scale, fy=scale) image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888) self.video_display_label.setPixmap(QPixmap.fromImage(image)) except Exception: self.log.exception(f"Could not display video.") def on_video_capture_restart(self) -> None: self.video_display_label.setText("Restarting camera\nwith changed settings...")
StarcoderdataPython
1778775
<filename>UML2ER/contracts/HContract06_CompleteLHS.py<gh_stars>1-10 from core.himesis import Himesis, HimesisPreConditionPatternLHS import uuid class HContract06_CompleteLHS(HimesisPreConditionPatternLHS): def __init__(self): """ Creates the himesis graph representing the AToM3 model HContract06_CompleteLHS """ # Flag this instance as compiled now self.is_compiled = True super(HContract06_CompleteLHS, self).__init__(name='HContract06_CompleteLHS', num_nodes=0, edges=[]) # Add the edges self.add_edges([]) # Set the graph attributes self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule'] self["MT_constraint__"] = """return True""" self["name"] = """""" self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HContract06_CompleteLHS') self["equations"] = [] # Set the node attributes # match class Class(Class) node self.add_node() self.vs[0]["MT_pre__attr1"] = """return True""" self.vs[0]["MT_label__"] = """1""" self.vs[0]["mm__"] = """MT_pre__Class""" self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Class') # apply class EntityType(EntityType) node self.add_node() self.vs[1]["MT_pre__attr1"] = """return True""" self.vs[1]["MT_label__"] = """2""" self.vs[1]["mm__"] = """MT_pre__EntityType""" self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'EntityType') # trace association null--trace-->nullnode self.add_node() self.vs[2]["MT_label__"] = """3""" self.vs[2]["mm__"] = """MT_pre__trace_link""" self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'EntityTypeassoc2Class') # Add the edges self.add_edges([ (1,2), # apply class null(Class) -> backward_association (2,0), # backward_associationnull -> match_class null(Class) ]) # define evaluation methods for each match class. def eval_attr11(self, attr_value, this): return True # define evaluation methods for each apply class. def eval_attr12(self, attr_value, this): return True # define evaluation methods for each match association. # define evaluation methods for each apply association. def constraint(self, PreNode, graph): return True
StarcoderdataPython
1693575
from .relationship import Relationship from .base import BaseRelationship
StarcoderdataPython
84145
<filename>run_in_cron.py import run run.job()
StarcoderdataPython
3368142
<reponame>experiencedft/defisaver-sim from modules.readConfig import readConfig from modules.optimize import optimizeRatioContinuous underlying_return, time_period, volatility = readConfig("Continuous limit optimization parameters") optimal_ratio, optimal_return = optimizeRatioContinuous(underlying_return, time_period, volatility) print("Optimal leverage ratio = ", optimal_ratio) print("Optimal return = ", optimal_return) print("Relative performance to underlying = ", optimal_return/underlying_return) if optimal_return/underlying_return > 1: print("BETTER THAN HOLDING \n") else: print("WORSE THAN HOLDING \n")
StarcoderdataPython
1760874
<reponame>marissawalker/spectre<filename>tests/Unit/Evolution/Systems/RelativisticEuler/Valencia/TestFunctions.py # Distributed under the MIT License. # See LICENSE.txt for details. import numpy as np # Functions for testing Characteristics.cpp def characteristic_speeds(lapse, shift, spatial_velocity, spatial_velocity_sqrd, sound_speed_sqrd, normal_oneform): normal_velocity = np.dot(spatial_velocity, normal_oneform) normal_shift = np.dot(shift, normal_oneform) prefactor = lapse / (1.0 - spatial_velocity_sqrd * sound_speed_sqrd) first_term = prefactor * normal_velocity * (1.0 - sound_speed_sqrd) second_term = (prefactor * np.sqrt(sound_speed_sqrd) * np.sqrt((1.0 - spatial_velocity_sqrd) * (1.0 - spatial_velocity_sqrd * sound_speed_sqrd - normal_velocity * normal_velocity * (1.0 - sound_speed_sqrd)))) result = [first_term - second_term - normal_shift] for i in range(0, spatial_velocity.size): result.append(lapse * normal_velocity - normal_shift) result.append(first_term + second_term - normal_shift) return result # End functions for testing Characteristics.cpp # Functions for testing ConservativeFromPrimitive.cpp def tilde_d(rest_mass_density, specific_internal_energy, spatial_velocity_oneform, spatial_velocity_squared, lorentz_factor, specific_enthalpy, pressure, sqrt_det_spatial_metric): return lorentz_factor * rest_mass_density * sqrt_det_spatial_metric def tilde_tau(rest_mass_density, specific_internal_energy, spatial_velocity_oneform, spatial_velocity_squared, lorentz_factor, specific_enthalpy, pressure, sqrt_det_spatial_metric): return ((pressure * spatial_velocity_squared + (lorentz_factor/(1.0 + lorentz_factor) * spatial_velocity_squared + specific_internal_energy) * rest_mass_density) * lorentz_factor**2 * sqrt_det_spatial_metric) def tilde_s(rest_mass_density, specific_internal_energy, spatial_velocity_oneform, spatial_velocity_squared, lorentz_factor, specific_enthalpy, pressure, sqrt_det_spatial_metric): return (spatial_velocity_oneform * lorentz_factor**2 * specific_enthalpy * rest_mass_density * sqrt_det_spatial_metric) # End functions for testing ConservativeFromPrimitive.cpp # Functions for testing Equations.cpp def source_tilde_d(tilde_d, tilde_tau, tilde_s, spatial_velocity, pressure, lapse, d_lapse, d_shift, d_spatial_metric, inv_spatial_metric, sqrt_det_spatial_metric, extrinsic_curvature): return 0.0 def source_tilde_tau(tilde_d, tilde_tau, tilde_s, spatial_velocity, pressure, lapse, d_lapse, d_shift, d_spatial_metric, inv_spatial_metric, sqrt_det_spatial_metric, extrinsic_curvature): upper_tilde_s = np.einsum("a, ia", tilde_s, inv_spatial_metric) densitized_stress = (0.5 * np.outer(upper_tilde_s, spatial_velocity) + 0.5 * np.outer(spatial_velocity, upper_tilde_s) + sqrt_det_spatial_metric * pressure * inv_spatial_metric) return (lapse * np.einsum("ab, ab", densitized_stress, extrinsic_curvature) - np.einsum("ab, ab", inv_spatial_metric, np.outer(tilde_s, d_lapse))) def source_tilde_s(tilde_d, tilde_tau, tilde_s, spatial_velocity, pressure, lapse, d_lapse, d_shift, d_spatial_metric, inv_spatial_metric, sqrt_det_spatial_metric, extrinsic_curvature): upper_tilde_s = np.einsum("a, ia", tilde_s, inv_spatial_metric) densitized_stress = (np.outer(upper_tilde_s, spatial_velocity) + sqrt_det_spatial_metric * pressure * inv_spatial_metric) return (np.einsum("a, ia", tilde_s, d_shift) - d_lapse * (tilde_tau + tilde_d) + 0.5 * lapse * np.einsum("ab, iab", densitized_stress, d_spatial_metric)) # End functions for testing Equations.cpp # Functions for testing Fluxes.cpp def tilde_d_flux(tilde_d, tilde_tau, tilde_s, lapse, shift, sqrt_det_spatial_metric, pressure, spatial_velocity): return tilde_d * (lapse * spatial_velocity - shift) def tilde_tau_flux(tilde_d, tilde_tau, tilde_s, lapse, shift, sqrt_det_spatial_metric, pressure, spatial_velocity): return (sqrt_det_spatial_metric * lapse * pressure * spatial_velocity + tilde_tau * (lapse * spatial_velocity - shift)) def tilde_s_flux(tilde_d, tilde_tau, tilde_s, lapse, shift, sqrt_det_spatial_metric, pressure, spatial_velocity): result = np.outer(lapse * spatial_velocity - shift, tilde_s) result += (sqrt_det_spatial_metric * lapse * pressure * np.identity(shift.size)) return result # End functions for testing Fluxes.cpp
StarcoderdataPython
176287
# -*- coding: utf-8 -*- import sys from PySide2 import QtWidgets from PySide2.QtTest import QTest from numpy import pi from Tests.GUI import gui_option # Set unit as [m] from pyleecan.Classes.LamSlotMag import LamSlotMag from pyleecan.Classes.SlotCirc import SlotCirc from pyleecan.GUI.Dialog.DMachineSetup.SMSlot.WSlotCirc.WSlotCirc import WSlotCirc import pytest class TestPMSlot10(object): """Test that the widget PMSlot10 behave like it should""" def setup_method(self): self.test_obj = LamSlotMag(Rint=0.1, Rext=0.2) self.test_obj.slot = SlotCirc(H0=10e-3, W0=45e-3) self.widget = WSlotCirc(self.test_obj) @classmethod def setup_class(cls): """Start the app for the test""" print("\nStart Test TestPMSlot10") if not QtWidgets.QApplication.instance(): cls.app = QtWidgets.QApplication(sys.argv) else: cls.app = QtWidgets.QApplication.instance() @classmethod def teardown_class(cls): """Exit the app after the test""" cls.app.quit() def test_init(self): """Check that the Widget spinbox initialise to the lamination value""" assert self.widget.lf_H0.value() == 0.01 assert self.widget.lf_W0.value() == 0.045 def test_set_W0(self): """Check that the Widget allow to update W0""" # Check Unit assert self.widget.unit_W0.text() == "[m]" # Change value in GUI self.widget.lf_W0.clear() QTest.keyClicks(self.widget.lf_W0, "0.31") self.widget.lf_W0.editingFinished.emit() # To trigger the slot assert self.widget.slot.W0 == 0.31 assert self.test_obj.slot.W0 == 0.31 def test_set_H0(self): """Check that the Widget allow to update H0""" # Check Unit assert self.widget.unit_H0.text() == "[m]" # Change value in GUI self.widget.lf_H0.clear() QTest.keyClicks(self.widget.lf_H0, "0.34") self.widget.lf_H0.editingFinished.emit() # To trigger the slot assert self.widget.slot.H0 == 0.34 assert self.test_obj.slot.H0 == 0.34 def test_output_txt(self): """Check that the Output text is computed and correct""" self.test_obj.slot = SlotCirc(H0=10e-3, W0=45e-3) self.widget = WSlotCirc(self.test_obj) assert self.widget.w_out.out_slot_height.text() == "Slot height: 0.01127 [m]" def test_check(self): """Check that the check is working correctly""" self.test_obj = LamSlotMag(Rint=0.1, Rext=0.2) # H0 self.test_obj.slot = SlotCirc(H0=None, W0=0.10) self.widget = WSlotCirc(self.test_obj) assert self.widget.check(self.test_obj) == "You must set H0 !" # W0 self.test_obj.slot = SlotCirc(H0=0.10, W0=None) assert self.widget.check(self.test_obj) == "You must set W0 !" if __name__ == "__main__": a = TestPMSlot10() a.setup_class() a.setup_method() a.test_init() a.test_output_txt() a.teardown_class() print("Done")
StarcoderdataPython
4819742
""" Parse Velodyne VLP-16 data """ import struct import math from osgar.node import Node LASER_ANGLES = [-15, 1, -13, 3, -11, 5, -9, 7, -7, 9, -5, 11, -3, 13, -1, 15] NUM_LASERS = 16 def parse_packet(data, offset_step=100): assert len(data) == 1206, len(data) assert offset_step % 100 == 0, offset_step # must be divisible by 100 timestamp, factory = struct.unpack_from("<IH", data, offset=1200) assert factory == 0x2237, hex(factory) # 0x22=VLP-16, 0x37=Strongest Return # time = timestamp/1000000.0 # if self.time is not None: # lost_packets = int(round((time - self.time)/EXPECTED_PACKET_TIME)) - 1 # else: # lost_packets = 0 # self.time = time # if lost_packets > 0 and (self.last_blocked is None or self.time > self.last_blocked + EXPECTED_SCAN_DURATION): # self.last_blocked = self.time + EXPECTED_SCAN_DURATION # self.scan_index += 1 # print("DROPPED index", self.scan_index) # if self.last_blocked is not None and self.time < self.last_blocked: # return # to catch up-to-date packets again ... ret = [] for offset in range(0, 1200, offset_step): # 100 bytes per one reading flag, azi = struct.unpack_from("<HH", data, offset) assert flag == 0xEEFF, hex(flag) azimuth = azi/100.0 # H-distance (2mm step), B-reflectivity (0 arr = struct.unpack_from('<' + "HB"*32, data, offset + 4) dist = [] for i in range(NUM_LASERS): dist.append(arr[i*2] * 0.002) # so now we have azimuth and NUM_LASERS distance readings for d, beta_deg in zip(dist, LASER_ANGLES): beta = math.radians(beta_deg) x = d * math.cos(azimuth) * math.cos(beta) y = d * math.sin(azimuth) * math.cos(beta) z = d * math.sin(beta) ret.append([x, y, z]) return ret class Velodyne(Node): def __init__(self, config, bus): bus.register('raw', 'xyz') super().__init__(config, bus) self.offset_step = config.get('offset_step', 200) # skip every second packet (we need 1deg resolution input 0.4) assert self.offset_step % 100 == 0, self.offset_step # must be divisible by 100 def update(self): channel = super().update() assert channel == 'raw', channel self.publish('xyz', parse_packet(self.raw, offset_step=self.offset_step)) # vim: expandtab sw=4 ts=4
StarcoderdataPython
4833081
<gh_stars>0 import json from channels.generic.websocket import AsyncWebsocketConsumer from channels.db import database_sync_to_async from .models import Chat, ChatRoom class ChatConsumer(AsyncWebsocketConsumer): async def connect(self): self.room_name = self.scope['url_route']['kwargs']['room_name'] self.room_group_name = 'chat_%s' % self.room_name await self.channel_layer.group_add( self.room_group_name, self.channel_name ) await self.accept() async def disconnect(self, close_code): await self.channel_layer.group_discard( self.room_group_name, self.channel_name ) async def receive(self, text_data): text_data_json = json.loads(text_data) message = text_data_json['message'] self.user_id = self.scope['user'].id # Find room object room = await database_sync_to_async(ChatRoom.objects.get)(name=self.room_name) # Create new chat object chat = Chat( content=message, user=self.scope['user'], room=room ) await database_sync_to_async(chat.save)() await self.channel_layer.group_send( self.room_group_name, { 'type': 'chat_message', 'message': message, 'user_id': self.user_id }) async def chat_message(self, event): message = event['message'] user_id = event['user_id'] await self.send(text_data=json.dumps({ 'message': message, 'user_id': user_id }))
StarcoderdataPython
3338103
<reponame>AlexScheller/dark-chess from flask import Blueprint main = Blueprint('main', __name__) from dark_chess_app.modules.main import routes
StarcoderdataPython
4823786
from django.contrib import admin from .models import Profile, Tweet, ScheduledTweet admin.site.register(Profile) admin.site.register(Tweet) admin.site.register(ScheduledTweet)
StarcoderdataPython
128439
<filename>models/__init__.py from .gazeflow import Glow
StarcoderdataPython
93036
<gh_stars>0 """ models.py """ import uuid from django.contrib.auth.models import AbstractUser from django.db import models from django.utils.translation import ugettext_lazy as _ from .managers import AppUserManager def user_directory_path(instance, filename): # file will be uploaded to MEDIA_ROOT/user_id/<filename> return '{0}/{1}'.format(instance.username, filename) class User(AbstractUser): # fields removed from base user model first_name = None last_name = None SEX_CHOICES = ( ('Male', 'Male'), ('Female', 'Female'), ) slug = models.SlugField(unique=True, default=uuid.uuid1, blank=True) username = models.CharField(max_length=30, unique=True, blank=True) email = models.EmailField(_('email address'), unique=True, blank=True) photo = models.ImageField( upload_to=user_directory_path, null=True, blank=True) name = models.CharField('full name', max_length=255, blank=True) date_of_birth = models.DateField(_('date of birth'), null=True, blank=True) sex = models.CharField( max_length=6, choices=SEX_CHOICES, null=True, blank=True) is_member = models.BooleanField(default=False, blank=True) # contacts = models.ManyToManyField('self', related_name='contacts', symmetrical=False) USERNAME_FIELD = 'username' REQUIRED_FIELDS = ['name', 'email'] objects = AppUserManager() class Meta: verbose_name = _('user') verbose_name_plural = _('users') def get_full_name(self): return self.name def get_short_name(self): return self.username def __str__(self): return self.username
StarcoderdataPython
1703030
<reponame>parikshitgupta1/leetcode class Solution(object): def leastInterval(self, tasks, n): frequencies = {} output = 0 if n == 0: return len(tasks) for k in tasks: frequencies[k] = frequencies.get(k,0)+1 max_value = max(frequencies.values()) max_value_occurrences = 0 for value in frequencies.values(): if value == max_value: max_value_occurrences += 1 return max((max_value-1)*(n+1)+max_value_occurrences, len(tasks))
StarcoderdataPython
193465
<gh_stars>0 #!/usr/bin/env python # -*- coding: utf-8 -*- import os import configparser from oger.ctrl.router import Router, PipelineServer from oger.ctrl.run import run as og_run import pandas as pd SETTINGS_FILENAME = 'settings.ini' def create_settings_file(path: str, ont: str = 'ALL') -> None: """ Creates the settings.ini file for OGER to get parameters. - Parameters: - path - path of the 'nlp' folder - ont - the ontology to be used as dictionary ['ALL', 'ENVO', 'CHEBI'] - The 'Shared' section declares global variables that can be used in other sections e.g. Data root. root = location of the working directory accessed in other sections using => ${Shared:root}/ - Input formats accepted: txt, txt_json, bioc_xml, bioc_json, conll, pubmed, pxml, pxml.gz, pmc, nxml, pubtator, pubtator_fbk, becalmabstracts, becalmpatents - Two iter-modes available: [collection or document] document:- 'n' input files = 'n' output files (provided every file has ontology terms) collection:- n input files = 1 output file - Export formats possible: tsv, txt, text_tsv, xml, text_xml, bioc_xml, bioc_json, bionlp, bionlp.ann, brat, brat.ann, conll, pubtator, pubanno_json, pubtator, pubtator_fbk, europepmc, europepmc.zip, odin, becalm_tsv, becalm_json These can be passed as a list for multiple outputs too. - Multiple Termlists can be declared in separate sections e.g. [Termlist1], [Termlist2] ...[Termlistn] with each having their own paths """ config = configparser.ConfigParser() config['Section'] = {} config['Shared'] = {} # Settings required by OGER config['Main'] = { 'input-directory' : os.path.join(path,'input'), 'output-directory' : os.path.join(path,'output'), 'pointer-type' : 'glob', 'pointers' : '*.tsv', 'iter-mode' : 'collection', 'article-format' : 'txt_tsv', 'export_format': 'tsv', 'termlist_stopwords': os.path.join(path,'stopwords','stopwords.txt') } if ont == 'ENVO': config.set('Main','termlist_path', os.path.join(path,'terms/envo_termlist.tsv')) elif ont == 'CHEBI': config.set('Main','termlist_path', os.path.join(path,'terms/chebi_termlist.tsv')) else: config.set('Main', 'termlist1_path', os.path.join(path,'terms/envo_termlist.tsv')) config.set('Main', 'termlist2_path', os.path.join(path,'terms/chebi_termlist.tsv')) # This is how OGER prescribes in it's test file but above works too. '''config['Termlist1'] = { 'path' : os.path.join(path,'terms/envo_termlist.tsv') } config['Termlist2'] = { 'path' : os.path.join(path,'terms/chebi_termlist.tsv') }''' # Write with open(os.path.join(path, SETTINGS_FILENAME), 'w') as settings_file: config.write(settings_file) def prep_nlp_input(path: str, columns: list)-> str: ''' Arguments: path - path to the file which has text to be analyzed columns - The first column HAS to be an id column. ''' df = pd.read_csv(path, sep=',', low_memory=False, usecols=columns) sub_df = df.dropna() # Hacky way of creating i/p files to run OGER '''for idx, row in sub_df.T.iteritems(): new_file = 'nlp/input/'+str(row[0])+'.txt' path_to_new_file = os.path.abspath(os.path.join(os.path.dirname(path),'..',new_file)) if os.path.exists(path_to_new_file): mode = 'a' else: mode = 'w' with open(path_to_new_file, mode) as txt_file: txt_file.write(row[1])''' # New way of doing this : PR submitted to Ontogene for merging code. fn = 'nlp' nlp_input = os.path.abspath(os.path.join(os.path.dirname(path),'..','nlp/input/'+fn+'.tsv')) sub_df.to_csv(nlp_input, sep='\t', index=False) return fn def run_oger(path: str , input_file_name: str , n_workers :int = 1 ) -> pd.DataFrame: config = configparser.ConfigParser() config.read(os.path.join(path, SETTINGS_FILENAME)) sections = config._sections settings = sections['Main'] settings['n_workers'] = n_workers og_run(**settings) df = process_oger_output(path, input_file_name) return df def process_oger_output(path: str, input_file_name: str) -> pd.DataFrame: """ The OGER output is a TSV which is imported and only the terms that occurred in the text file are considered and a dataframe of relevant information is returned """ cols = ['TaxId', 'Biolink', 'BeginTerm', 'EndTerm', 'TokenizedTerm', 'PreferredTerm', \ 'CURIE', 'NaN1', 'SentenceID', 'NaN2', 'UMLS_CUI'] df = pd.read_csv(os.path.join(path, 'output',input_file_name+'.tsv'), sep='\t', names=cols) sub_df = df[['TaxId', 'Biolink','TokenizedTerm', 'PreferredTerm', 'CURIE']] interested_df = sub_df.loc[(df['TokenizedTerm'] == df['PreferredTerm'].str.replace(r"\(.*\)",""))] interested_df = interested_df.drop(columns = ['PreferredTerm']).drop_duplicates() interested_df.to_csv(os.path.join(path, 'output',input_file_name +'Filtered.tsv'), sep='\t', index=False) return interested_df
StarcoderdataPython
3250191
# Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import print_function from __future__ import division from __future__ import absolute_import import datetime import httplib import mock import unittest import webapp2 import webtest from google.appengine.ext import ndb from dashboard import update_dashboard_stats from dashboard.common import utils from dashboard.models import anomaly from dashboard.services import gerrit_service from dashboard.pinpoint.models.change import change as change_module from dashboard.pinpoint.models.change import commit from dashboard.pinpoint.models import job as job_module from dashboard.pinpoint.models import job_state from dashboard.pinpoint.models.quest import execution_test from dashboard.pinpoint.models.quest import quest from dashboard.pinpoint import test _RESULTS_BY_CHANGE = { 'chromium@aaaaaaa': [1, 1, 1, 1], 'chromium@bbbbbbb': [5, 5, 5, 5] } class _QuestStub(quest.Quest): def __str__(self): return 'Quest' def Start(self, change): return ExecutionResults(change) @classmethod def FromDict(cls, arguments): return cls class ExecutionResults(execution_test._ExecutionStub): def __init__(self, c): super(ExecutionResults, self).__init__() self._result_for_test = _RESULTS_BY_CHANGE[str(c)] def _Poll(self): self._Complete( result_arguments={'arg key': 'arg value'}, result_values=self._result_for_test) def _StubFunc(*args, **kwargs): del args del kwargs @ndb.tasklet def _FakeTasklet(*args): del args class UpdateDashboardStatsTest(test.TestCase): def setUp(self): super(UpdateDashboardStatsTest, self).setUp() app = webapp2.WSGIApplication([ ('/update_dashboard_stats', update_dashboard_stats.UpdateDashboardStatsHandler) ]) self.testapp = webtest.TestApp(app) def _CreateJob(self, hash1, hash2, comparison_mode, created, bug_id, exception=None, arguments=None): old_commit = commit.Commit('chromium', hash1) change_a = change_module.Change((old_commit,)) old_commit = commit.Commit('chromium', hash2) change_b = change_module.Change((old_commit,)) job = job_module.Job.New((_QuestStub(),), (change_a, change_b), comparison_mode=comparison_mode, bug_id=bug_id, arguments=arguments) job.created = created job.exception = exception job.state.ScheduleWork() job.state.Explore() job.put() return job @mock.patch.object(update_dashboard_stats, '_ProcessPinpointJobs', mock.MagicMock(side_effect=_FakeTasklet)) @mock.patch.object(update_dashboard_stats.deferred, 'defer') def testPost_ProcessAlerts_Success(self, mock_defer): created = datetime.datetime.now() - datetime.timedelta(hours=1) # sheriff = ndb.Key('Sheriff', 'Chromium Perf Sheriff') anomaly_entity = anomaly.Anomaly( test=utils.TestKey('M/B/suite'), timestamp=created) #, sheriff=sheriff) anomaly_entity.put() self.testapp.get('/update_dashboard_stats') self.assertTrue(mock_defer.called) @mock.patch.object(update_dashboard_stats, '_ProcessPinpointJobs', _StubFunc) @mock.patch.object(update_dashboard_stats, '_ProcessPinpointStats', _StubFunc) def testPost_ProcessAlerts_NoAlerts(self): created = datetime.datetime.now() - datetime.timedelta(days=2) # sheriff = ndb.Key('Sheriff', 'Chromium Perf Sheriff') anomaly_entity = anomaly.Anomaly( test=utils.TestKey('M/B/suite'), timestamp=created) # , sheriff=sheriff) anomaly_entity.put() self.testapp.get('/update_dashboard_stats') self.ExecuteDeferredTasks('default', recurse=False) patcher = mock.patch('update_dashboard_stats.deferred.defer') self.addCleanup(patcher.stop) mock_defer = patcher.start() self.assertFalse(mock_defer.called) @mock.patch.object(update_dashboard_stats, '_ProcessAlerts', _StubFunc) @mock.patch.object(change_module.Change, 'Midpoint', mock.MagicMock(side_effect=commit.NonLinearError)) @mock.patch.object(update_dashboard_stats, '_ProcessPinpointJobs', _StubFunc) def testPost_ProcessPinpointStats_Success(self): created = datetime.datetime.now() - datetime.timedelta(hours=12) j = self._CreateJob( 'aaaaaaaa', 'bbbbbbbb', job_state.PERFORMANCE, created, 12345, arguments={ 'configuration': 'bot1', 'benchmark': 'suite1' }) j.updated = created + datetime.timedelta(hours=1) j.put() created = datetime.datetime.now() - datetime.timedelta(hours=12) j = self._CreateJob( 'aaaaaaaa', 'bbbbbbbb', job_state.PERFORMANCE, created, 12345, arguments={ 'configuration': 'bot2', 'benchmark': 'suite2' }) j.updated = created + datetime.timedelta(hours=1) j.put() self.testapp.get('/update_dashboard_stats') patcher = mock.patch('update_dashboard_stats.deferred.defer') self.addCleanup(patcher.stop) mock_defer = patcher.start() self.ExecuteDeferredTasks('default', recurse=False) self.assertTrue(mock_defer.called) @mock.patch.object(update_dashboard_stats, '_ProcessAlerts', mock.MagicMock(side_effect=_FakeTasklet)) @mock.patch.object(update_dashboard_stats, '_ProcessPinpointStats', mock.MagicMock(side_effect=_FakeTasklet)) @mock.patch.object(change_module.Change, 'Midpoint', mock.MagicMock(side_effect=commit.NonLinearError)) @mock.patch.object(update_dashboard_stats.deferred, 'defer') def testPost_ProcessPinpoint_Success(self, mock_defer): created = datetime.datetime.now() - datetime.timedelta(days=1) self._CreateJob('aaaaaaaa', 'bbbbbbbb', job_state.PERFORMANCE, created, 12345) anomaly_entity = anomaly.Anomaly( test=utils.TestKey('M/B/S'), bug_id=12345, timestamp=created) anomaly_entity.put() self.testapp.get('/update_dashboard_stats') self.assertTrue(mock_defer.called) @mock.patch.object(gerrit_service, 'GetChange', mock.MagicMock(side_effect=httplib.HTTPException)) @mock.patch.object(update_dashboard_stats, '_ProcessAlerts', _StubFunc) @mock.patch.object(update_dashboard_stats, '_ProcessPinpointStats', _StubFunc) @mock.patch.object(change_module.Change, 'Midpoint', mock.MagicMock(side_effect=commit.NonLinearError)) def testPost_ProcessPinpoint_NoResults(self): created = datetime.datetime.now() - datetime.timedelta(days=1) anomaly_entity = anomaly.Anomaly( test=utils.TestKey('M/B/S'), bug_id=12345, timestamp=created) anomaly_entity.put() self._CreateJob('aaaaaaaa', 'bbbbbbbb', job_state.FUNCTIONAL, created, 12345) created = datetime.datetime.now() - datetime.timedelta(days=15) self._CreateJob('aaaaaaaa', 'bbbbbbbb', job_state.PERFORMANCE, created, 12345) created = datetime.datetime.now() - datetime.timedelta(days=1) self._CreateJob('aaaaaaaa', 'bbbbbbbb', job_state.PERFORMANCE, created, None) created = datetime.datetime.now() - datetime.timedelta(days=1) self._CreateJob('aaaaaaaa', 'aaaaaaaa', job_state.PERFORMANCE, created, 12345) created = datetime.datetime.now() - datetime.timedelta(days=1) self._CreateJob('aaaaaaaa', 'bbbbbbbb', job_state.PERFORMANCE, created, 12345, 'foo') way_too_old = datetime.datetime(year=2000, month=1, day=1) anomaly_entity = anomaly.Anomaly( test=utils.TestKey('M/B/S'), bug_id=1, timestamp=way_too_old) anomaly_entity.put() created = datetime.datetime.now() - datetime.timedelta(days=1) self._CreateJob('aaaaaaaa', 'bbbbbbbb', job_state.PERFORMANCE, created, 1) self.testapp.get('/update_dashboard_stats') patcher = mock.patch('update_dashboard_stats.deferred.defer') self.addCleanup(patcher.stop) mock_defer = patcher.start() self.assertFalse(mock_defer.called) if __name__ == '__main__': unittest.main()
StarcoderdataPython
3360417
<gh_stars>1-10 from unittest.mock import Mock, patch from weakref import ref import pytest try: from django.db import models # from parasolr.django.indexing import ModelIndexable from parasolr.django.signals import IndexableSignalHandler from parasolr.django.tests import test_models except ImportError: IndexableSignalHandler = None from parasolr.tests.utils import skipif_django, skipif_no_django def setup_module(): # connect indexing signal handlers for this test module only if IndexableSignalHandler: IndexableSignalHandler.connect() def teardown_module(): # disconnect indexing signal handlers if IndexableSignalHandler: IndexableSignalHandler.disconnect() @skipif_django def test_no_django_indexable(): # should not be defined when django is not installed with pytest.raises(ImportError): from parasolr.django.signals import IndexableSignalHandler @skipif_no_django class TestIndexableSignalHandler: def test_connect(self): # check that signal handlers are connected as expected # - model save and delete post_save_handlers = [item[1] for item in models.signals.post_save.receivers] assert ref(IndexableSignalHandler.handle_save) in post_save_handlers post_del_handlers = [item[1] for item in models.signals.post_delete.receivers] assert ref(IndexableSignalHandler.handle_delete) in post_del_handlers # many to many m2m_handlers = [item[1] for item in models.signals.m2m_changed.receivers] assert ref(IndexableSignalHandler.handle_relation_change) \ in m2m_handlers # testing related handlers based on test models post_save_handlers = [item[1] for item in models.signals.post_save.receivers] assert ref(test_models.signal_method) in post_save_handlers pre_del_handlers = [item[1] for item in models.signals.pre_delete.receivers] assert ref(test_models.signal_method) in pre_del_handlers def test_handle_save(self): instance = test_models.IndexItem() with patch.object(instance, 'index') as mockindex: # call directly IndexableSignalHandler.handle_save(Mock(), instance) mockindex.assert_any_call() # call via signal mockindex.reset_mock() models.signals.post_save.send(test_models.IndexItem, instance=instance) mockindex.assert_any_call() # non-indexable object should be ignored nonindexable = Mock() IndexableSignalHandler.handle_save(Mock(), nonindexable) nonindexable.index.assert_not_called() def test_handle_delete(self): with patch.object(test_models.IndexItem, 'remove_from_index') as \ mock_rmindex: instance = test_models.IndexItem() IndexableSignalHandler.handle_delete(Mock(), instance) mock_rmindex.assert_called_with() # non-indexable object should be ignored nonindexable = Mock() IndexableSignalHandler.handle_delete(Mock(), nonindexable) nonindexable.remove_from_index.assert_not_called() @pytest.mark.django_db def test_handle_relation_change(self): instance = test_models.IndexItem() with patch.object(instance, 'index') as mockindex: # call directly - supported actions for action in ['post_add', 'post_remove', 'post_clear']: mockindex.reset_mock() IndexableSignalHandler.handle_relation_change( test_models.IndexItem, instance, action) mockindex.assert_any_call() # if action is not one we care about, should be ignored mockindex.reset_mock() IndexableSignalHandler.handle_relation_change( test_models.IndexItem, instance, 'pre_remove') mockindex.assert_not_called() # non-indexable object should be ignored nonindexable = Mock() IndexableSignalHandler.handle_relation_change( Mock(), nonindexable, 'post_add') nonindexable.index.assert_not_called()
StarcoderdataPython
1785094
from flask import Blueprint, request, jsonify, session, render_template from flask_login import login_required api = Blueprint('api', __name__) @api.route('/articles', methods=['GET']) def get_all_articles(): pass @api.route('/article', methods=['POST']) def create_article(): pass @api.route('/article/<string:article_name>', methods=['GET', 'PUT', 'DELETE']) def article(article_name): pass @login_required @api.route('/subscribers', methods=['GET']) def get_subscribers(): pass @api.route('/subscriber', methods=['POST']) def add_subscriber(): pass @api.route('/subscriber/<uuid:id>', methods=['DELETE']) def delete_subscriber(id): pass @login_required @api.route('/tags', methods=['GET']) def get_tags(): pass @login_required @api.route('/tag', methods=['POST']) def add_tag(): pass @api.route('/tag/<uuid:id>', methods=['DELETE']) def delete_tag(id): pass @api.route('/short_url', methods=['POST']) def create_short_url(): pass @login_required @api.route('/config', methods=['GET', 'POST', 'PATCH']) def config(): pass
StarcoderdataPython
133188
import sys import ctypes_scanner pidint = int(sys.argv[1]) toto = ctypes_scanner.GetRegexMatches(pidint,"http://[a-zA-Z_0-9\.]*") print(toto) print(len(toto))
StarcoderdataPython
1740609
<reponame>genkosta/social-network # Generated by Django 2.0.6 on 2018-06-26 08:38 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import social_network.core.models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('posts', '0008_post_is_disable'), ] operations = [ migrations.CreateModel( name='Profile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image', models.ImageField(blank=True, null=True, upload_to=social_network.core.models.make_upload_path, validators=[social_network.core.models.validate_image], verbose_name='Image')), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name': 'Profile', 'verbose_name_plural': 'Profiles', 'ordering': ('-pk',), }, ), ]
StarcoderdataPython
162807
from ..utils.error import Error class Validator(object): def __init__(self): pass def validate(self, model): self.validate_php_functional(model) @staticmethod def validate_php_functional(model): for cls in model.classes: for member in cls.members: if member.type == 'map': key_type = member.template_args[0].type if model.has_class(key_type) and model.get_class(key_type).type != 'enum': value_type = member.template_args[1].type Error.exit(Error.OBJECT_IS_KEY_OF_MAP, cls.name, key_type, value_type, member.name) if cls.type == 'enum' and member.initial_value is not None: if '|' in member.initial_value or \ '&' in member.initial_value or \ '^' in member.initial_value or \ '~' in member.initial_value: Error.exit(Error.ENUM_CANNOT_BE_COMBINATED, cls.name, member.name, member.initial_value)
StarcoderdataPython
3272574
# Methods to help parse like for private keys to servers and safes. # Used in Bot.py files to read in the user's key-values. from .globals import * from utils.Keychain import Keychain def parseKeychainFromFile(filename, folder="keys"): """ Parses in the bot's private information into a dictionary of {key:value} pairs. This is used to return a `Keychain` object. """ filepath = folder + "/" + filename print(f"{CM_UTILS}attempting to read keys from {filepath}...") # Required bot startup parameters print(f"{CM_UTILS}required values in expected file format:") header = f"============> {filepath} <============" print(header) for rk in REQUIRED: print(f'{rk}{KF_SPLIT}"{rk}"') print('='*len(header)) # File parsing try: # Initialize the read keys dictionary keychain = Keychain() # Go through keys file line-by-line and add to keysDict. # Also for each collected key parameter, check it off the required list. with open(filepath, 'r+') as keyFile: for line in keyFile.readlines(): ls = line.strip().split(KF_SPLIT) key = ls[0].strip() val = ls[1].strip() keychain.addKeyValue(key, val) # Not all required if not keychain.hasAllRequired(): print(f"{CM_UTILS}[ERROR]: Did not read all required parameters.") print(f">Please double-check your key file and try again.") print(f">Quitting...") quit() # All required parameters met. Lock the keychain and return data for bot. keychain.lockKeys() return keychain # Catch file errors or other strange beasts except Exception as e: print(f"{CM_UTILS}[ERROR] - There was an error reading data from the file.") print(f">Check your file input.") print(f">{e}") quit()
StarcoderdataPython
4812263
<gh_stars>0 ### HELPERS ### def to_dec(bin, val=0): if len(bin) == 0: return val return to_dec(bin[1:], val+(int(bin[0])*2**(len(bin)-1))) #print(to_dec("0")) # 0 #print(to_dec("1")) # 1 #print(to_dec("10110")) # 22 #print(to_dec("01001")) # 9 ### SOL ### f = open("p1.txt", "r") bits = f.readlines() f.close() gamma = "" epsilon = "" for i in range(0, len(bits[0])-1): zeros = 0 ones = 0 for b in bits: if b[i] == "1": ones += 1 else: # b[i] == "0" zeros += 1 if ones > zeros: gamma += "1" epsilon += "0" else: # zeros < ones gamma += "0" epsilon += "1" # Problem does not specify what to do if equal print("gamma:", gamma, "=", to_dec(gamma)) print("epsilon:", epsilon, "=", to_dec(epsilon)) print("answer:", to_dec(gamma)*to_dec(epsilon)) print("\n\n") bits = [b.strip("\n") for b in bits] oxygen = bits.copy() co2 = bits.copy() for i in range(0, len(bits[0])): zeros = [] ones = [] if len(oxygen)> 1: for b in oxygen: if b[i] == "1": ones.append(b) else: # b[i] == "0" zeros.append(b) if len(ones) >= len(zeros): oxygen = ones.copy() else: # len(ones) < len(zeros) oxygen = zeros.copy() zeros = [] ones = [] if len(co2) > 1: for b in co2: if b[i] == "1": ones.append(b) else: # b[i] == "0" zeros.append(b) if len(zeros) <= len(ones): co2 = zeros.copy() else: # len(zeros) > len(ones) co2 = ones.copy() print("oxygen:", oxygen[0], "=", to_dec(oxygen[0])) print("co2:", co2[0], "=", to_dec(co2[0])) print("answer:", to_dec(oxygen[0])*to_dec(co2[0]))
StarcoderdataPython
144524
# -*- coding: utf-8 -*- from __future__ import print_function import torch import torch.nn.functional as F import spdnn torch.manual_seed(7) input = torch.randn(2,3,3,3, requires_grad=True).cuda() weight = torch.randn(3,3,2,2, requires_grad=True).cuda() print('input shape: ', input.shape) print('weights shape: ', weight.shape) def test_conv(): output = F.conv2d(input, weight) print('output shape: ', output.shape) return output def test_conv_grad(): grad_output = torch.randn(output.shape).cuda() print('grad_output: ', grad_output) grad_weight = F.grad.conv2d_weight(input, weight.shape, grad_output) print('grad_weight: ', grad_weight) grad_input = F.grad.conv2d_input(input.shape, weight, grad_output) print('grad_weight: ', grad_input) def conv2d(x, w, stride=1, padding=0, dilation=1): input_size = (x.shape[2], x.shape[3]) kernel_size = (w.shape[2], w.shape[3]) inp_unf = F.unfold(x, kernel_size) out_unf = w.view(w.size(0), -1).matmul(inp_unf) height = (input_size[0] + 2*padding- dilation *(kernel_size[0]-1)-1)//stride + 1 width = (input_size[1] + 2*padding- dilation *(kernel_size[1]-1)-1)//stride + 1 output_size = (height, width) output = out_unf.view(out_unf.shape[0], out_unf.shape[1], output_size[0], output_size[1]) return output def conv2d_grad_input(input_shape, w, grad, stride=1, padding=0, dilation=1): input_size = (input_shape[2], input_shape[3]) kernel_size = (w.shape[2], w.shape[3]) return conv2d(grad, w.tranpose(1,2)) def test_conv2gemm(): kernel_size = (weight.shape[2], weight.shape[3]) inp_unf = F.unfold(input, kernel_size) print('inp_unf shape: ', inp_unf.shape) #out_unf = inp_unf.transpose(1, 2).matmul(weight.view(weight.size(0), -1).t()).transpose(1, 2) out_unf = weight.view(weight.size(0), -1).mm(inp_unf) #w = weight.view(2, -1) #print('weight shape: ', weight.shape) #print('w shape: ', w.shape) #out_unf = w.mm(inp_unf) print('out_unf shape: ', out_unf.shape) #output = F.fold(out_unf, (2,2), kernel_size) output_size = (2, 2) output = out_unf.view(out_unf.shape[0], out_unf.shape[1], output_size[0], output_size[1]) print('output shape: ', output.shape) return output if __name__ == '__main__': output = test_conv() output2 = conv2d(input, weight) print('diff: ', (output-output2).norm())
StarcoderdataPython
112657
''' https://docs.python.org/3/reference/datamodel.html#object.__getitem__ ''' class XXX(object): def __init__(self): self.data = {int(i): str(i) for i in range(3)} def __len__(self): return len(self.data) def __getitem__(self, index): if index >= len(self): raise IndexError return self.data[index], index x = XXX() for v, i in x: print(v, i)
StarcoderdataPython
47945
import numpy as np import scipy as sp from scipy.sparse.linalg import LinearOperator, lgmres, gmres import tensornetwork as tn import jax_vumps.numpy_backend.contractions as ct # import jax_vumps.numpy_backend.mps_linalg as mps_linalg def LH_linear_operator(A_L, lR): """ Return, as a LinearOperator, the LHS of the equation found by summing the geometric series for the left environment Hamiltonian. """ chi = A_L.shape[1] Id = np.eye(chi, dtype=A_L.dtype) def matvec(v): v = v.reshape((chi, chi)) Th_v = ct.XopL(A_L, X=v) vR = ct.proj(v, lR)*Id v = v - Th_v + vR v = v.flatten() return v op = LinearOperator((chi**2, chi**2), matvec=matvec, dtype=A_L.dtype) return op def call_solver(op, hI, params, x0, tol): """ Code used by both solve_for_RH and solve_for_LH to call the sparse solver. """ if x0 is not None: x0 = x0.flatten() if params["solver"] == "gmres": x, info = gmres(op, hI.flatten(), tol=tol, restart=params["n_krylov"], maxiter=params["max_restarts"], x0=x0) elif params["solver"] == "lgmres": x, info = lgmres(op, hI.flatten(), tol=tol, maxiter=params["maxiter"], inner_m=params["inner_m"], outer_k=params["outer_k"], x0=x0) new_hI = x.reshape(hI.shape) return (new_hI, info) def outermat(A, B): chi = A.shape[0] contract = [A, B] idxs = [[-2, -1], [-3, -4]] return tn.ncon(contract, idxs, backend="numpy").reshape((chi**2, chi**2)) def dense_LH_op(A_L, lR): chi = A_L.shape[1] eye = np.eye(chi, dtype=A_L.dtype) term1 = outermat(eye, eye) term2 = ct.tmdense(A_L).reshape((chi**2, chi**2)) term3 = outermat(eye, lR) mat = term1-term2+term3 mat = mat.T return mat def prepare_for_LH_solve(A_L, H, lR): hL_bare = ct.compute_hL(A_L, H) hL_div = ct.proj(hL_bare, lR)*np.eye(hL_bare.shape[0]) hL = hL_bare - hL_div return hL def solve_for_LH(A_L, H, lR, params, delta, oldLH=None, dense=False): """ Find the renormalized left environment Hamiltonian using a sparse solver. """ hL = prepare_for_LH_solve(A_L, H, lR) chi = hL.shape[0] tol = params["tol_coef"]*delta if dense: mat = dense_LH_op(A_L, lR) op = LH_linear_operator(A_L, lR) LH = sp.linalg.solve(mat.T, hL.reshape((chi**2))) LH = LH.reshape((chi, chi)) else: op = LH_linear_operator(A_L, lR) LH, info = call_solver(op, hL, params, oldLH, tol) if info != 0: print("Warning: Hleft solution failed with code: "+str(info)) return LH def RH_linear_operator(A_R, rL): chi = A_R.shape[1] """ Return, as a LinearOperator, the LHS of the equation found by summing the geometric series for the right environment Hamiltonian. """ Id = np.eye(chi, dtype=A_R.dtype) def matvec(v): v = v.reshape((chi, chi)) Th_v = ct.XopR(A_R, X=v) Lv = ct.proj(rL, v)*Id v = v - Th_v + Lv v = v.flatten() return v op = LinearOperator((chi**2, chi**2), matvec=matvec, dtype=A_R.dtype) return op def solve_for_RH(A_R, H, rL, params, delta, oldRH=None): """ Find the renormalized right environment Hamiltonian using a sparse solver. """ hR_bare = ct.compute_hR(A_R, H) hR_div = ct.proj(rL, hR_bare)*np.eye(hR_bare.shape[0]) hR = hR_bare - hR_div op = RH_linear_operator(A_R, rL) tol = params["tol_coef"]*delta RH, info = call_solver(op, hR, params, oldRH, tol) if info != 0: print("Warning: RH solution failed with code: "+str(info)) # RHL = np.abs(ct.proj(rL, RH)) # if RHL > 1E-6: # print("Warning: large <L|RH> = ", str(RHL)) return RH
StarcoderdataPython
1630145
""" TIFF image parser. Authors: <NAME>, <NAME>, <NAME> Creation date: 30 september 2006 """ from hachoir.parser import Parser from hachoir.field import SeekableFieldSet, RootSeekableFieldSet, Bytes from hachoir.core.endian import LITTLE_ENDIAN, BIG_ENDIAN from hachoir.parser.image.exif import TIFF, IFD def getStrips(ifd): data = {} for i, entry in enumerate(ifd.array('entry')): data[entry['tag'].display] = entry # image data if "StripOffsets" in data and "StripByteCounts" in data: offs = ifd.getEntryValues(data["StripOffsets"]) bytes = ifd.getEntryValues(data["StripByteCounts"]) for off, byte in zip(offs, bytes): yield off.value, byte.value # image data if "TileOffsets" in data and "TileByteCounts" in data: offs = ifd.getEntryValues(data["TileOffsets"]) bytes = ifd.getEntryValues(data["TileByteCounts"]) for off, byte in zip(offs, bytes): yield off.value, byte.value class ImageFile(SeekableFieldSet): def __init__(self, parent, name, description, ifd): SeekableFieldSet.__init__(self, parent, name, description, None) self._ifd = ifd def createFields(self): for off, byte in getStrips(self._ifd): self.seekByte(off, relative=False) field = Bytes(self, "strip[]", byte) yield field class TiffFile(RootSeekableFieldSet, Parser): PARSER_TAGS = { "id": "tiff", "category": "image", "file_ext": ("tif", "tiff"), "mime": ("image/tiff",), "min_size": 8 * 8, "magic": ((b"II\x2A\0", 0), (b"MM\0\x2A", 0)), "description": "TIFF picture" } # Correct endian is set in constructor endian = LITTLE_ENDIAN def __init__(self, stream, **args): RootSeekableFieldSet.__init__( self, None, "root", stream, None, stream.askSize(self)) if self.stream.readBytes(0, 2) == b"MM": self.endian = BIG_ENDIAN Parser.__init__(self, stream, **args) def validate(self): endian = self.stream.readBytes(0, 2) if endian not in (b"MM", b"II"): return "Invalid endian (%r)" % endian if self["version"].value != 42: return "Unknown TIFF version" return True def createFields(self): yield from TIFF(self) for ifd in self: if not isinstance(ifd, IFD): continue offs = (off for off, byte in getStrips(ifd)) self.seekByte(min(offs), relative=False) image = ImageFile(self, "image[]", "Image File", ifd) yield image
StarcoderdataPython
183277
from apiclient.discovery import build from oauth2client.service_account import ServiceAccountCredentials execfile("config.py") # Core Reporting API def get_core_reporting_api_service(): scope = 'https://www.googleapis.com/auth/analytics.readonly' key_file_location = config['client_secret_file_name'] # Authenticate and construct service. service = get_service( api_name='analytics', api_version='v3', scopes=[scope], key_file_location=key_file_location) return service # Common Utilits def get_service(api_name, api_version, scopes, key_file_location): """Get a service that communicates to a Google API. Args: api_name: The name of the api to connect to. api_version: The api version to connect to. scopes: A list auth scopes to authorize for the application. key_file_location: The path to a valid service account JSON key file. Returns: A service that is connected to the specified API. """ credentials = ServiceAccountCredentials.from_json_keyfile_name( key_file_location, scopes=scopes) # Build the service object. service = build(api_name, api_version, credentials=credentials) return service def get_the_first_profile_id(service): accounts = service.management().accounts().list().execute() if accounts.get('items'): account = accounts.get('items')[0].get('id') properties = service.management().webproperties().list( accountId=account).execute() if properties.get('items'): property = properties.get('items')[0].get('id') profiles = service.management().profiles().list( accountId=account, webPropertyId=property).execute() if profiles.get('items'): return profiles.get('items')[0].get('id') return None
StarcoderdataPython
87418
# This file was automatically generated by SWIG (http://www.swig.org). # Version 2.0.4 # # Do not make changes to this file unless you know what you are doing--modify # the SWIG interface file instead. from sys import version_info if version_info >= (2,6,0): def swig_import_helper(): from os.path import dirname import imp fp = None try: fp, pathname, description = imp.find_module('_cqpid', [dirname(__file__)]) except ImportError: import _cqpid return _cqpid if fp is not None: try: _mod = imp.load_module('_cqpid', fp, pathname, description) finally: fp.close() return _mod _cqpid = swig_import_helper() del swig_import_helper else: import _cqpid del version_info try: _swig_property = property except NameError: pass # Python < 2.2 doesn't have 'property'. def _swig_setattr_nondynamic(self,class_type,name,value,static=1): if (name == "thisown"): return self.this.own(value) if (name == "this"): if type(value).__name__ == 'SwigPyObject': self.__dict__[name] = value return method = class_type.__swig_setmethods__.get(name,None) if method: return method(self,value) if (not static): self.__dict__[name] = value else: raise AttributeError("You cannot add attributes to %s" % self) def _swig_setattr(self,class_type,name,value): return _swig_setattr_nondynamic(self,class_type,name,value,0) def _swig_getattr(self,class_type,name): if (name == "thisown"): return self.this.own() method = class_type.__swig_getmethods__.get(name,None) if method: return method(self) raise AttributeError(name) def _swig_repr(self): try: strthis = "proxy of " + self.this.__repr__() except: strthis = "" return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) try: _object = object _newclass = 1 except AttributeError: class _object : pass _newclass = 0 MessagingError = _cqpid.MessagingError LinkError = _cqpid.LinkError AddressError = _cqpid.AddressError ResolutionError = _cqpid.ResolutionError AssertionFailed = _cqpid.AssertionFailed NotFound = _cqpid.NotFound InvalidOption = _cqpid.InvalidOption MalformedAddress = _cqpid.MalformedAddress ReceiverError = _cqpid.ReceiverError FetchError = _cqpid.FetchError Empty = _cqpid.Empty SenderError = _cqpid.SenderError SendError = _cqpid.SendError TargetCapacityExceeded = _cqpid.TargetCapacityExceeded ConnectionError = _cqpid.ConnectionError ConnectError = _cqpid.ConnectError SessionError = _cqpid.SessionError TransactionError = _cqpid.TransactionError TransactionAborted = _cqpid.TransactionAborted UnauthorizedAccess = _cqpid.UnauthorizedAccess class Address(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, Address, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, Address, name) __repr__ = _swig_repr def __init__(self, *args): this = _cqpid.new_Address(*args) try: self.this.append(this) except: self.this = this __swig_destroy__ = _cqpid.delete_Address __del__ = lambda self : None; def getName(self): return _cqpid.Address_getName(self) def setName(self, *args): return _cqpid.Address_setName(self, *args) def getSubject(self): return _cqpid.Address_getSubject(self) def setSubject(self, *args): return _cqpid.Address_setSubject(self, *args) def getOptions(self, *args): return _cqpid.Address_getOptions(self, *args) def setOptions(self, *args): return _cqpid.Address_setOptions(self, *args) def getType(self): return _cqpid.Address_getType(self) def setType(self, *args): return _cqpid.Address_setType(self, *args) def str(self): return _cqpid.Address_str(self) def __nonzero__(self): return _cqpid.Address___nonzero__(self) __bool__ = __nonzero__ Address_swigregister = _cqpid.Address_swigregister Address_swigregister(Address) class Duration(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, Duration, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, Duration, name) __repr__ = _swig_repr def __init__(self, *args): this = _cqpid.new_Duration(*args) try: self.this.append(this) except: self.this = this def getMilliseconds(self): return _cqpid.Duration_getMilliseconds(self) __swig_destroy__ = _cqpid.delete_Duration __del__ = lambda self : None; Duration_swigregister = _cqpid.Duration_swigregister Duration_swigregister(Duration) cvar = _cqpid.cvar Duration.FOREVER = _cqpid.cvar.Duration_FOREVER Duration.IMMEDIATE = _cqpid.cvar.Duration_IMMEDIATE Duration.SECOND = _cqpid.cvar.Duration_SECOND Duration.MINUTE = _cqpid.cvar.Duration_MINUTE def __eq__(*args): return _cqpid.__eq__(*args) __eq__ = _cqpid.__eq__ def __ne__(*args): return _cqpid.__ne__(*args) __ne__ = _cqpid.__ne__ class Message(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, Message, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, Message, name) __repr__ = _swig_repr def __init__(self, *args): this = _cqpid.new_Message(*args) try: self.this.append(this) except: self.this = this __swig_destroy__ = _cqpid.delete_Message __del__ = lambda self : None; def _setReplyTo(self, *args): return _cqpid.Message__setReplyTo(self, *args) def _getReplyTo(self): return _cqpid.Message__getReplyTo(self) def setSubject(self, *args): return _cqpid.Message_setSubject(self, *args) def getSubject(self): return _cqpid.Message_getSubject(self) def setContentType(self, *args): return _cqpid.Message_setContentType(self, *args) def getContentType(self): return _cqpid.Message_getContentType(self) def setMessageId(self, *args): return _cqpid.Message_setMessageId(self, *args) def getMessageId(self): return _cqpid.Message_getMessageId(self) def setUserId(self, *args): return _cqpid.Message_setUserId(self, *args) def getUserId(self): return _cqpid.Message_getUserId(self) def setCorrelationId(self, *args): return _cqpid.Message_setCorrelationId(self, *args) def getCorrelationId(self): return _cqpid.Message_getCorrelationId(self) def setPriority(self, *args): return _cqpid.Message_setPriority(self, *args) def getPriority(self): return _cqpid.Message_getPriority(self) def _setTtl(self, *args): return _cqpid.Message__setTtl(self, *args) def _getTtl(self): return _cqpid.Message__getTtl(self) def setDurable(self, *args): return _cqpid.Message_setDurable(self, *args) def getDurable(self): return _cqpid.Message_getDurable(self) def getRedelivered(self): return _cqpid.Message_getRedelivered(self) def setRedelivered(self, *args): return _cqpid.Message_setRedelivered(self, *args) def getProperties(self, *args): return _cqpid.Message_getProperties(self, *args) def setContent(self, *args): return _cqpid.Message_setContent(self, *args) def getContent(self): return _cqpid.Message_getContent(self) def getContentPtr(self): return _cqpid.Message_getContentPtr(self) def getContentSize(self): return _cqpid.Message_getContentSize(self) def setProperty(self, *args): return _cqpid.Message_setProperty(self, *args) # UNSPECIFIED was module level before, but I do not # know how to insert python code at the top of the module. # (A bare "%pythoncode" inserts at the end. UNSPECIFIED=object() def __init__(self, content=None, content_type=UNSPECIFIED, id=None, subject=None, user_id=None, reply_to=None, correlation_id=None, durable=None, priority=None, ttl=None, properties=None): this = _cqpid.new_Message('') try: self.this.append(this) except: self.this = this if content : self.content = content if content_type != UNSPECIFIED : self.content_type = content_type if id is not None : self.id = id if subject is not None : self.subject = subject if user_id is not None : self.user_id = user_id if reply_to is not None : self.reply_to = reply_to if correlation_id is not None : self.correlation_id = correlation_id if durable is not None : self.durable = durable if priority is not None : self.priority = priority if ttl is not None : self.ttl = ttl if properties is not None : # Can't set properties via (inst).getProperties, because # the typemaps make a copy of the underlying properties. # Instead, set via setProperty for the time-being for k, v in properties.iteritems() : self.setProperty(k, v) def _get_content(self) : if self.content_type == "amqp/list" : return decodeList(self) if self.content_type == "amqp/map" : return decodeMap(self) return self.getContent() def _set_content(self, content) : if isinstance(content, basestring) : self.setContent(content) elif isinstance(content, list) or isinstance(content, dict) : encode(content, self) else : # Not a type we can handle. Try setting it anyway, # although this will probably lead to a swig error self.setContent(content) __swig_getmethods__["content"] = _get_content __swig_setmethods__["content"] = _set_content if _newclass: content = property(_get_content, _set_content) __swig_getmethods__["content_type"] = getContentType __swig_setmethods__["content_type"] = setContentType if _newclass: content_type = property(getContentType, setContentType) __swig_getmethods__["id"] = getMessageId __swig_setmethods__["id"] = setMessageId if _newclass: id = property(getMessageId, setMessageId) __swig_getmethods__["subject"] = getSubject __swig_setmethods__["subject"] = setSubject if _newclass: subject = property(getSubject, setSubject) __swig_getmethods__["priority"] = getPriority __swig_setmethods__["priority"] = setPriority if _newclass: priority = property(getPriority, setPriority) def getTtl(self) : return self._getTtl().getMilliseconds()/1000.0 def setTtl(self, duration) : self._setTtl(Duration(int(1000*duration))) __swig_getmethods__["ttl"] = getTtl __swig_setmethods__["ttl"] = setTtl if _newclass: ttl = property(getTtl, setTtl) __swig_getmethods__["user_id"] = getUserId __swig_setmethods__["user_id"] = setUserId if _newclass: user_id = property(getUserId, setUserId) __swig_getmethods__["correlation_id"] = getCorrelationId __swig_setmethods__["correlation_id"] = setCorrelationId if _newclass: correlation_id = property(getCorrelationId, setCorrelationId) __swig_getmethods__["redelivered"] = getRedelivered __swig_setmethods__["redelivered"] = setRedelivered if _newclass: redelivered = property(getRedelivered, setRedelivered) __swig_getmethods__["durable"] = getDurable __swig_setmethods__["durable"] = setDurable if _newclass: durable = property(getDurable, setDurable) __swig_getmethods__["properties"] = getProperties if _newclass: properties = property(getProperties) def getReplyTo(self) : return self._getReplyTo().str() def setReplyTo(self, address_str) : self._setReplyTo(Address(address_str)) __swig_getmethods__["reply_to"] = getReplyTo __swig_setmethods__["reply_to"] = setReplyTo if _newclass: reply_to = property(getReplyTo, setReplyTo) def __repr__(self): args = [] for name in ["id", "subject", "user_id", "reply_to", "correlation_id", "priority", "ttl", "durable", "redelivered", "properties", "content_type"] : value = getattr(self, name) if value : args.append("%s=%r" % (name, value)) if self.content is not None: if args: args.append("content=%r" % self.content) else: args.append(repr(self.content)) return "Message(%s)" % ", ".join(args) Message_swigregister = _cqpid.Message_swigregister Message_swigregister(Message) def __mul__(*args): return _cqpid.__mul__(*args) __mul__ = _cqpid.__mul__ class EncodingException(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, EncodingException, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, EncodingException, name) __repr__ = _swig_repr def __init__(self, *args): this = _cqpid.new_EncodingException(*args) try: self.this.append(this) except: self.this = this __swig_destroy__ = _cqpid.delete_EncodingException __del__ = lambda self : None; EncodingException_swigregister = _cqpid.EncodingException_swigregister EncodingException_swigregister(EncodingException) class Receiver(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, Receiver, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, Receiver, name) __repr__ = _swig_repr def __init__(self, *args): this = _cqpid.new_Receiver(*args) try: self.this.append(this) except: self.this = this __swig_destroy__ = _cqpid.delete_Receiver __del__ = lambda self : None; def get(self, *args): return _cqpid.Receiver_get(self, *args) def _fetch(self, *args): return _cqpid.Receiver__fetch(self, *args) def setCapacity(self, *args): return _cqpid.Receiver_setCapacity(self, *args) def getCapacity(self): return _cqpid.Receiver_getCapacity(self) def available(self): return _cqpid.Receiver_available(self) def unsettled(self): return _cqpid.Receiver_unsettled(self) def close(self): return _cqpid.Receiver_close(self) def isClosed(self): return _cqpid.Receiver_isClosed(self) def getName(self): return _cqpid.Receiver_getName(self) def getSession(self): return _cqpid.Receiver_getSession(self) __swig_getmethods__["capacity"] = getCapacity __swig_setmethods__["capacity"] = setCapacity if _newclass: capacity = property(getCapacity, setCapacity) __swig_getmethods__["session"] = getSession if _newclass: session = property(getSession) def fetch(self, timeout=None) : if timeout is None : return self._fetch() else : # Python API uses timeouts in seconds, # but C++ API uses milliseconds return self._fetch(Duration(int(1000*timeout))) Receiver_swigregister = _cqpid.Receiver_swigregister Receiver_swigregister(Receiver) def decode(*args): return _cqpid.decode(*args) decode = _cqpid.decode def encode(*args): return _cqpid.encode(*args) encode = _cqpid.encode class Sender(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, Sender, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, Sender, name) __repr__ = _swig_repr def __init__(self, *args): this = _cqpid.new_Sender(*args) try: self.this.append(this) except: self.this = this __swig_destroy__ = _cqpid.delete_Sender __del__ = lambda self : None; def _send(self, *args): return _cqpid.Sender__send(self, *args) def close(self): return _cqpid.Sender_close(self) def setCapacity(self, *args): return _cqpid.Sender_setCapacity(self, *args) def getCapacity(self): return _cqpid.Sender_getCapacity(self) def unsettled(self): return _cqpid.Sender_unsettled(self) def available(self): return _cqpid.Sender_available(self) def getName(self): return _cqpid.Sender_getName(self) def getSession(self): return _cqpid.Sender_getSession(self) def send(self, object, sync=True) : if isinstance(object, Message): message = object else: message = Message(object) return self._send(message, sync) __swig_getmethods__["capacity"] = getCapacity __swig_setmethods__["capacity"] = setCapacity if _newclass: capacity = property(getCapacity, setCapacity) __swig_getmethods__["session"] = getSession if _newclass: session = property(getSession) Sender_swigregister = _cqpid.Sender_swigregister Sender_swigregister(Sender) class Session(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, Session, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, Session, name) __repr__ = _swig_repr def __init__(self, *args): this = _cqpid.new_Session(*args) try: self.this.append(this) except: self.this = this __swig_destroy__ = _cqpid.delete_Session __del__ = lambda self : None; def close(self): return _cqpid.Session_close(self) def commit(self): return _cqpid.Session_commit(self) def rollback(self): return _cqpid.Session_rollback(self) def _acknowledge_all(self, sync = False): return _cqpid.Session__acknowledge_all(self, sync) def _acknowledge_msg(self, *args): return _cqpid.Session__acknowledge_msg(self, *args) def acknowledge(self, *args): return _cqpid.Session_acknowledge(self, *args) def acknowledgeUpTo(self, *args): return _cqpid.Session_acknowledgeUpTo(self, *args) def reject(self, *args): return _cqpid.Session_reject(self, *args) def release(self, *args): return _cqpid.Session_release(self, *args) def sync(self, block = True): return _cqpid.Session_sync(self, block) def getReceivable(self): return _cqpid.Session_getReceivable(self) def getUnsettledAcks(self): return _cqpid.Session_getUnsettledAcks(self) def nextReceiver(self, *args): return _cqpid.Session_nextReceiver(self, *args) def sender(self, *args): return _cqpid.Session_sender(self, *args) def receiver(self, *args): return _cqpid.Session_receiver(self, *args) def getSender(self, *args): return _cqpid.Session_getSender(self, *args) def getReceiver(self, *args): return _cqpid.Session_getReceiver(self, *args) def getConnection(self): return _cqpid.Session_getConnection(self) def hasError(self): return _cqpid.Session_hasError(self) def checkError(self): return _cqpid.Session_checkError(self) def acknowledge(self, message=None, disposition=None, sync=True) : if disposition : raise Exception("SWIG does not support dispositions yet. Use " "Session.reject and Session.release instead") if message : self._acknowledge_msg(message, sync) else : self._acknowledge_all(sync) __swig_getmethods__["connection"] = getConnection if _newclass: connection = property(getConnection) Session_swigregister = _cqpid.Session_swigregister Session_swigregister(Session) class Connection(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, Connection, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, Connection, name) __repr__ = _swig_repr def __init__(self, *args): this = _cqpid.new_Connection(*args) try: self.this.append(this) except: self.this = this __swig_destroy__ = _cqpid.delete_Connection __del__ = lambda self : None; def setOption(self, *args): return _cqpid.Connection_setOption(self, *args) def open(self): return _cqpid.Connection_open(self) def opened(self): return _cqpid.Connection_opened(self) def isOpen(self): return _cqpid.Connection_isOpen(self) def close(self): return _cqpid.Connection_close(self) def createTransactionalSession(self, *args): return _cqpid.Connection_createTransactionalSession(self, *args) def createSession(self, *args): return _cqpid.Connection_createSession(self, *args) def getSession(self, *args): return _cqpid.Connection_getSession(self, *args) def getAuthenticatedUsername(self): return _cqpid.Connection_getAuthenticatedUsername(self) # Handle the different options by converting underscores to hyphens. # Also, the sasl_mechanisms option in Python has no direct # equivalent in C++, so we will translate them to sasl_mechanism # when possible. def __init__(self, url=None, **options): if url: args = [url] else: args = [] if options : if "sasl_mechanisms" in options : if ' ' in options.get("sasl_mechanisms",'') : raise Exception( "C++ Connection objects are unable to handle " "multiple sasl-mechanisms") options["sasl_mechanism"] = options.pop("sasl_mechanisms") args.append(options) this = _cqpid.new_Connection(*args) try: self.this.append(this) except: self.this = this def _session(self, *args): return _cqpid.Connection__session(self, *args) def session(self, name=None, transactional=False) : if name is None : name = '' return self._session(name, transactional) @staticmethod def establish(url=None, **options) : conn = Connection(url, **options) conn.open() return conn Connection_swigregister = _cqpid.Connection_swigregister Connection_swigregister(Connection) class FailoverUpdates(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, FailoverUpdates, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, FailoverUpdates, name) __repr__ = _swig_repr def __init__(self, *args): this = _cqpid.new_FailoverUpdates(*args) try: self.this.append(this) except: self.this = this __swig_destroy__ = _cqpid.delete_FailoverUpdates __del__ = lambda self : None; FailoverUpdates_swigregister = _cqpid.FailoverUpdates_swigregister FailoverUpdates_swigregister(FailoverUpdates) def decodeMap(*args): return _cqpid.decodeMap(*args) decodeMap = _cqpid.decodeMap def decodeList(*args): return _cqpid.decodeList(*args) decodeList = _cqpid.decodeList # Bring into module scope UNSPECIFIED = Message.UNSPECIFIED # This file is compatible with both classic and new-style classes.
StarcoderdataPython
4804249
<reponame>Mario263/Hacktoberfest_2021 # Python3 code to demonstrate working of # Maximum and Minimum K elements in Tuple # Using sorted() + loop # initializing tuple test_tup = (5, 20, 3, 7, 6, 8) # printing original tuple print("The original tuple is : " + str(test_tup)) # initializing K K = 2 # Maximum and Minimum K elements in Tuple # Using sorted() + loop res = [] test_tup = list(sorted(test_tup)) for idx, val in enumerate(test_tup): if idx < K or idx >= len(test_tup) - K: res.append(val) res = tuple(res) # printing result print("The extracted values : " + str(res))
StarcoderdataPython
124338
<reponame>dr-guangtou/asap #!/usr/bin/env python """This script will read the dark matter particle table for the SMDPL simulation, and downsample it for our model. """ import os import argparse import numpy as np import pandas as pd S18A_RAND_COLS = [ 'object_id', 'ra', 'dec', 'coord', 'skymap_id', 'tract', 'patch', 'patch_s', 'parent_id', 'nchild', 'isprimary', 'adjust_density', 'detect_ispatchinner', 'detect_istractinner', 'g_pix_variance', 'g_sky_mean', 'g_sky_std', 'g_inputcount_value', 'g_inputcount_flag', 'g_inputcount_flag_noinputs', 'g_inputcount_flag_badcentroid', 'g_pixelflags', 'g_pixelflags_offimage', 'g_pixelflags_edge', 'g_pixelflags_bad', 'g_pixelflags_interpolatedcenter', 'g_pixelflags_saturatedcenter', 'g_pixelflags_crcenter', 'g_pixelflags_suspectcenter', 'g_pixelflags_bright_objectcenter', 'g_sdssshape_psf_shape11', 'g_sdssshape_psf_shape22', 'g_sdssshape_psf_shape12', 'r_pix_variance', 'r_sky_mean', 'r_sky_std', 'r_inputcount_value', 'r_inputcount_flag', 'r_inputcount_flag_noinputs', 'r_inputcount_flag_badcentroid', 'r_pixelflags', 'r_pixelflags_offimage', 'r_pixelflags_edge', 'r_pixelflags_bad', 'r_pixelflags_interpolatedcenter', 'r_pixelflags_saturatedcenter', 'r_pixelflags_crcenter', 'r_pixelflags_suspectcenter', 'r_pixelflags_bright_objectcenter', 'r_sdssshape_psf_shape11', 'r_sdssshape_psf_shape22', 'r_sdssshape_psf_shape12', 'i_pix_variance', 'i_sky_mean', 'i_sky_std', 'i_inputcount_value', 'i_inputcount_flag', 'i_inputcount_flag_noinputs', 'i_inputcount_flag_badcentroid', 'i_pixelflags', 'i_pixelflags_offimage', 'i_pixelflags_edge', 'i_pixelflags_bad', 'i_pixelflags_interpolatedcenter', 'i_pixelflags_saturatedcenter', 'i_pixelflags_crcenter', 'i_pixelflags_suspectcenter', 'i_pixelflags_bright_objectcenter', 'i_sdssshape_psf_shape11', 'i_sdssshape_psf_shape22', 'i_sdssshape_psf_shape12', 'z_pix_variance', 'z_sky_mean', 'z_sky_std', 'z_inputcount_value', 'z_inputcount_flag', 'z_inputcount_flag_noinputs', 'z_inputcount_flag_badcentroid', 'z_pixelflags', 'z_pixelflags_offimage', 'z_pixelflags_edge', 'z_pixelflags_bad', 'z_pixelflags_interpolatedcenter', 'z_pixelflags_saturatedcenter', 'z_pixelflags_crcenter', 'z_pixelflags_suspectcenter', 'z_pixelflags_bright_objectcenter', 'z_sdssshape_psf_shape11', 'z_sdssshape_psf_shape22', 'z_sdssshape_psf_shape12', 'y_pix_variance', 'y_sky_mean', 'y_sky_std', 'y_inputcount_value', 'y_inputcount_flag', 'y_inputcount_flag_noinputs', 'y_inputcount_flag_badcentroid', 'y_pixelflags', 'y_pixelflags_offimage', 'y_pixelflags_edge', 'y_pixelflags_bad', 'y_pixelflags_interpolatedcenter', 'y_pixelflags_saturatedcenter', 'y_pixelflags_crcenter', 'y_pixelflags_suspectcenter', 'y_pixelflags_bright_objectcenter', 'y_sdssshape_psf_shape11', 'y_sdssshape_psf_shape22', 'y_sdssshape_psf_shape12' ] S18A_RAND_USE = [0, 1, 2, 5, 6, 10, 11, 17, 36, 55, 74, 93] S18A_RAND_DTYPE = [ ("object_id", "int32"), ("ra", "float64"), ("dec", "float64"), ("tract", "int32"), ("patch", "int32"), ("isprimary", "bool"), ("adjust_density", "float64"), ("g_inputcount_value", "float32"), ("r_inputcount_value", "float32"), ("i_inputcount_value", "float32"), ("z_inputcount_value", "float32"), ("y_inputcount_value", "float32") ] def downsample_randoms(rand_file, chunksize=1e6, seed=95064, whitespace=False, downsample=False, verbose=True, gzip=False): """Down-sample the random catalogs from the HSC S18A data.""" if not os.path.isfile(rand_file): raise IOError("# Can not find the particle table : %s" % rand_file) if gzip: rand_pre = rand_file.replace('.csv.gz', '') else: rand_pre = rand_file.replace('.csv', '') # Reduce the number of colunms and save as a numpy array rand_out = rand_pre + ".npy" rand_out_downsample = rand_pre + "_downsample.npy" if verbose: print("# Save the downsampled catalog to : %s" % rand_out) # Read the data rand_pchunks = pd.read_csv( rand_file, usecols=S18A_RAND_USE, delim_whitespace=whitespace, index_col=False, chunksize=chunksize) rand_pdframe = pd.concat(rand_pchunks) rand_pdframe.rename(columns={'# object_id': 'object_id'}, inplace=True) rand_array = rand_pdframe.to_records(index=False) # Save the result if verbose: print("# There are %d randoms in the file: %s" % (len(rand_array), rand_file)) np.save(rand_out, rand_array) # Downsample if downsample: np.random.seed(seed) n_rand = int(len(rand_array) / 10) rand_downsample = np.random.choice(rand_array, n_rand, replace=False) # Save the result np.save(rand_out_downsample, rand_downsample) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( 'rand_file', type=str, help=('The particle catalog.')) parser.add_argument( '-c', '--chunk', dest='chunksize', type=int, default=1e6, help=('Size of the chunk when reading in the catalog.')) parser.add_argument( '-g', '--gzip', dest='gzip', help=('Whether the file is in .csv.gz format'), action="store_true", default=False) parser.add_argument( '-s', '--seed', dest='seed', help='Random seed', type=int, default=95064) parser.add_argument( '-v', '--verbose', dest='verbose', help=('Blah, blah...'), action="store_true", default=False) args = parser.parse_args() downsample_randoms( args.rand_file, chunksize=args.chunksize, seed=args.seed, verbose=args.verbose, gzip=args.gzip)
StarcoderdataPython
12436
<reponame>qingyunha/boltdb<gh_stars>1-10 import os import unittest import tempfile from boltdb import BoltDB class TestFree(unittest.TestCase): def setUp(self): self.db = BoltDB(tempfile.mktemp()) def tearDown(self): os.unlink(self.db.filename) def test_free(self): with self.db.update() as tx: b = tx.bucket() b.put(b"foo", b"bar") self.assertEqual(self.db.freelist.ids, [3]) with self.db.update() as tx: b = tx.bucket() b.put(b"foo", b"bar") self.assertEqual(self.db.freelist.ids, [4]) def test_free2(self): self.assertEqual(self.db.freepages(), [2]) with self.db.update() as tx: b = tx.bucket() b.put(b"foo", b"bar") self.assertEqual(sorted(self.db.freepages()), [2, 3]) with self.db.update() as tx: b = tx.bucket() b.put(b"foo", b"bar") self.assertEqual(sorted(self.db.freepages()), [2, 4])
StarcoderdataPython
32291
from dissononce.dh import private class PrivateKey(private.PrivateKey): pass
StarcoderdataPython
1706699
<filename>warehouse/legacy/tables.py # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Note: Tables that exist here should not be used anywhere, they only exist # here for migration support with alembic. If any of these tables end up # being used they should be moved outside of warehouse.legacy. The goal # is that once the legacy PyPI code base is gone, that these tables # can just be deleted and a migration made to drop them from the # database. from citext import CIText from sqlalchemy import ( Column, ForeignKey, Index, Table, UniqueConstraint, DateTime, Text, ) from warehouse import db # TODO: Once https://github.com/pypa/warehouse/issues/3632 is solved, then we # should be able to get rid of this table too, however keeping it around # for now to aid in the resolution of that issue. rego_otk = Table( "rego_otk", db.metadata, Column("name", CIText(), ForeignKey("accounts_user.username", ondelete="CASCADE")), Column("otk", Text()), Column("date", DateTime(timezone=False)), UniqueConstraint("otk", name="rego_otk_unique"), ) Index("rego_otk_name_idx", rego_otk.c.name) Index("rego_otk_otk_idx", rego_otk.c.otk)
StarcoderdataPython
1655915
<gh_stars>0 import os import glob import argparse import zipfile import imageio import tqdm import numpy as np import tracker import cv2 def create_gifs(model, anno_path, zip_path, gif_path, dim=(408,360)): z = zipfile.ZipFile(os.path.join(zip_path)) names = [name for name in z.namelist() if name.endswith('.jpg')] names.sort(key=lambda s: int(s[:-4])) image_gen = (cv2.imdecode(np.frombuffer(z.read(name), np.uint8), 1) for name in names) success_images = [] original_images = [] precision_images = [] with open(anno_path, "r") as f: first_bbox = tuple(map(float,f.readline().split(","))) for image, (_, bbox) in tqdm.tqdm(model.predict_frames(image_gen, bbox=first_bbox)): anno_bbox = tuple(map(float,f.readline().split(","))) p1 = (int(anno_bbox[0]), int(anno_bbox[1])) p2 = (int(anno_bbox[0] + anno_bbox[2]), int(anno_bbox[1] + anno_bbox[3])) m1 = (int(bbox[0]), int(bbox[1])) m2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])) c1 = (int(anno_bbox[0] + anno_bbox[2] // 2), int(anno_bbox[1] + anno_bbox[3] // 2)) c2 = (int(bbox[0] + bbox[2] // 2), int(bbox[1] + bbox[3] // 2)) original = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) original_images.append(original) copy = original.copy() cv2.rectangle(copy, p1, p2, (0,255,0), 2, 1) cv2.rectangle(copy, m1, m2, (0,0,255), 2, 1) # cover success image success_image = np.zeros(copy.shape, np.uint8) minx = max(p1[0], m1[0]) miny = max(p1[1], m1[1]) maxx = min(m2[0], p2[0]) maxy = min(m2[1], p2[1]) cv2.rectangle(success_image, p1, p2, (200,200,0), -1) cv2.rectangle(success_image, m1, m2, (200,200,0), -1) cv2.rectangle(success_image, (minx, miny), (maxx, maxy), (0,200,200), -1) cv2.rectangle(success_image, p1, p2, (0,255,0), 2, 1) cv2.rectangle(success_image, m1, m2, (0,0,255), 2, 1) alpha = 0.6 overlay = cv2.addWeighted(success_image, alpha, copy, 1-alpha, gamma=0) success_images.append(success_image) # precision precision_image = np.zeros(original.shape, dtype=np.uint8) prec_copy = original.copy() cv2.rectangle(precision_image, p1, p2, (0,255,0), 1) cv2.rectangle(precision_image, m1, m2, (0,0,255), 1) cv2.line(precision_image, c1, c2, (255,0,0), 5,1) cv2.rectangle(original, p1, p2, (0,255,0), 2, 1) cv2.rectangle(original, m1, m2, (0,0,255), 2, 1) overlay = cv2.addWeighted(precision_image, alpha, prec_copy, 1-alpha, gamma=0) precision_images.append(precision_image) imageio.mimsave(gif_path+"_success.gif", success_images) imageio.mimsave(gif_path+"_precision.gif", precision_images) imageio.mimsave(gif_path+"_original.gif", original_images) def create_video(model, anno_path, zip_path, vid_path, dim=(480,360), model_type="CSRT"): fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') out = cv2.VideoWriter(vid_path, fourcc, 20, dim) z = zipfile.ZipFile(os.path.join(zip_path)) names = [name for name in z.namelist() if name.endswith('.jpg')] names.sort(key=lambda s: int(s[:-4])) image_gen = (cv2.imdecode(np.frombuffer(z.read(name), np.uint8), 1) for name in names) with open(anno_path, "r") as f: first_bbox = tuple(map(float,f.readline().split(","))) for image, (_, bbox) in tqdm.tqdm(model.predict_frames(image_gen, bbox=first_bbox)): anno_bbox = tuple(map(float,f.readline().split(","))) p1 = (int(anno_bbox[0]), int(anno_bbox[1])) p2 = (int(anno_bbox[0] + anno_bbox[2]), int(anno_bbox[1] + anno_bbox[3])) cv2.rectangle(image, p1, p2, (0,255,0), 2, 1) cv2.putText(image, 'Grnd Truth', (p1[0], p1[1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0,255,0), 2) p1 = (int(bbox[0]), int(bbox[1])) p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])) cv2.rectangle(image, p1, p2, (255,0,0), 2, 1) cv2.putText(image, model_type, (p2[0] - 40, p2[1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255,0,0), 2) out.write(image) out.release() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-f", "--zippath", help="zipfile of images", type=str, required=True) parser.add_argument("-a", "--annopath", help="path to annotation file", type=str, required=True) parser.add_argument("-v", "--vidpath", help="video out filepath", type=str, default="summary.csv") parser.add_argument("-m", "--model", help="model type ('CSRT', 'KCF', 'GOTURN')", type=str, choices=['CSRT', 'KCF', 'GOTURN'], required=True) parser.add_argument("-g", "--gif", action='store_true') args = parser.parse_args() model = tracker.factory(args.model, timed=True) if not args.gif: create_video(model, args.annopath, args.zippath, args.vidpath, model_type=args.model) else: create_gifs(model, args.annopath, args.zippath, args.vidpath)
StarcoderdataPython
1727631
<gh_stars>0 #Importing modules import pandas as pd import json from datetime import datetime from kafka import KafkaConsumer from pyspark.ml.feature import VectorAssembler from time import sleep #Declearing consumer connection try: consumer = KafkaConsumer('stock_prices',bootstrap_servers=['localhost:9092']) except: print('connection error') #getting data and predicting result using the model def stock_prediction(sqlContext,load_model): try: for msg in consumer: res = json.loads(msg.value.decode('utf-8')) dlist = list(res.values()) pd_df = pd.DataFrame([dlist], columns=['Open', 'Close', 'Volume', 'High', 'Low']) pd_df = pd_df.astype(float) spark_df = sqlContext.createDataFrame(pd_df) vectorAssembler = VectorAssembler(inputCols=['Open', 'High', 'Low'], outputCol='features') df_vect = vectorAssembler.transform(spark_df) df_vect_features = df_vect.select(['features', 'Close']) predictions = load_model.transform(df_vect_features) predictions.select("prediction", "Close", "features").show() predict_value = predictions.select('prediction').collect()[0].__getitem__("prediction") close_value = predictions.select('Close').collect()[0].__getitem__('Close') print(msg.key) date_time = msg.key.decode('utf-8') return round(predict_value, 4), close_value, date_time except: print('Debug the above lines of code')
StarcoderdataPython
92566
<reponame>Limmen/open_spiel<filename>open_spiel/python/games/optimal_stopping_game_approx_exp.py import math from typing import List, Tuple import gym import numpy as np import threading import time import torch from stable_baselines3 import PPO from stable_baselines3.common.monitor import Monitor from open_spiel.python.games.optimal_stopping_game_config_sequential import OptimalStoppingGameConfigSequential from open_spiel.python.games.optimal_stopping_game_util import OptimalStoppingGameUtil class OptimalStoppingGameApproxExp: """ Class for computing exploitability of a strategy profile (pi_1, pi_2) """ def __init__(self, pi_1, pi_2, config: OptimalStoppingGameConfigSequential, seed: int, br_training_timesteps = 30000, br_evaluate_timesteps = 1000, br_net_num_layers=3, br_net_num_hidden_neurons = 128, br_learning_rate = 3e-4, br_batch_size = 64, br_steps_between_updates = 2048, br_training_device_str : str = "cpu"): """ Initializes the object :param pi_1: the defender NFSP strategy :param pi_2: the attacker NFSP strategy :param config: the game configuration :param seed: the random seed :param br_training_timesteps: the number of time-steps to use when approximating best response strategies :param br_evaluate_timesteps: number of time-steps for evaluating a learned BR policy :param br_net_num_layers: number of hidden layers of the NN to learn an approximate BR :param br_net_num_hidden_neurons: number of hidden neurons per layer of the NN to learn an approximate BR :param br_learning_rate: the learning rate for learning approximate best response strategies :param br_batch_size: the batch size for learning best response strategies :param br_steps_between_updates: the number of steps between each update during learning of best response strategies :param br_training_device_str: the device for the training of BR strategies """ self.pi_1 = pi_1 self.pi_2 = pi_2 self.config = config self.br_training_device_str = br_training_device_str self.attacker_mdp = self._get_attacker_mdp() self.defender_pomdp = self._get_defender_pomdp() self.seed = seed self.br_timesteps = br_training_timesteps self.br_evaluate_timesteps = br_evaluate_timesteps self.br_net_num_layers = br_net_num_layers self.br_net_num_hidden_neurons = br_net_num_hidden_neurons self.br_learning_rate = br_learning_rate self.br_batch_size = br_batch_size self.br_steps_between_updates = br_steps_between_updates def _get_attacker_mdp(self) -> gym.Env: """ :return: the attacker MDP for calculating a best response strategy """ env = StoppingGameAttackerMDPEnv(config=self.config, pi_1=self.pi_1, pi_2=self.pi_2, device_str=self.br_training_device_str) return env def _get_defender_pomdp(self) -> gym.Env: """ :return: the defender POMDP for calculating a best response strategy """ env = StoppingGameDefenderPOMDPEnv(config=self.config, pi_1=self.pi_1, pi_2=self.pi_2) return env def approx_exploitability(self) -> float: """ :return: approximate exploitability of pi_1 and pi_2 """ print("--- Calculating approximate exploitability ---") avg_attacker_br_R = self.attacker_br_avg_reward() avg_defender_br_R = self.defender_br_avg_reward() approx_expl = abs(avg_attacker_br_R + avg_defender_br_R)/2 return approx_expl def attacker_br_avg_reward(self) -> float: """ Learns an approximate best response strategy of the attacker and returns its average reward :return: the average reward of the approximate best response strategy """ policy_kwargs = dict(net_arch=[self.br_net_num_hidden_neurons]*self.br_net_num_layers) # log_dir = "./" env = Monitor(self.attacker_mdp) # env = self.attacker_mdp model = PPO("MlpPolicy", env, verbose=0, policy_kwargs=policy_kwargs, n_steps=self.br_steps_between_updates, batch_size=self.br_batch_size, learning_rate=self.br_learning_rate, seed=self.seed, device=self.br_training_device_str, gamma=1) self.attacker_mdp.ppo_pi_2 = model print(" ** Starting training of an approximate best response strategy of the attacker ** ") progress_thread = ProgressThread(env=env, max_steps=self.br_timesteps) progress_thread.start() model.learn(total_timesteps=self.br_timesteps) progress_thread.running = False print("** Training of an approximate best response strategy of the attacker complete **") obs = env.reset() r = 0 returns = [] for i in range(self.br_evaluate_timesteps): action, _states = model.predict(obs, deterministic=True) obs, reward, done, info = env.step(action) r += reward if done: returns.append(r) r = 0 obs = env.reset() avg_R = -np.mean(returns) print("Attacker approximate best response AVG Return:{}".format(avg_R)) return float(avg_R) def defender_br_avg_reward(self) -> float: """ Learns an approximate best response strategy of the defender and returns its average reward :return: the average reward of the approximate best response strategy """ policy_kwargs = dict(net_arch=[self.br_net_num_hidden_neurons]*self.br_net_num_layers) # log_dir = "./" env = Monitor(self.defender_pomdp) # env = self.defender_pomdp model = PPO("MlpPolicy", env, verbose=0, policy_kwargs=policy_kwargs, n_steps=self.br_steps_between_updates, batch_size=self.br_batch_size, learning_rate=self.br_learning_rate, seed=self.seed, device=self.br_training_device_str, gamma=1) print("** Starting training of an approximate best response strategy of the defender **") progress_thread = ProgressThread(env=env, max_steps=self.br_timesteps) progress_thread.start() model.learn(total_timesteps=self.br_timesteps) progress_thread.running = False print("** Training of an approximate best response strategy of the defender complete **") obs = env.reset() r = 0 returns = [] for i in range(self.br_evaluate_timesteps): action, _states = model.predict(obs, deterministic=True) obs, reward, done, info = env.step(action) r += reward if done: returns.append(r) r = 0 obs = env.reset() avg_R = np.mean(returns) print("Defender approximate best response AVG Return:{}".format(avg_R)) return float(avg_R) class StoppingGameAttackerMDPEnv(gym.Env): """ MDP where the attacker faces a static defender policy. The optimal policy in this MDP is a best response strategy of the attacker """ def __init__(self, config: OptimalStoppingGameConfigSequential, pi_1, pi_2, device_str): """ Initializes the environment :param config: the environment configuration :param pi_1: NFSP policy of the defender :param pi_2: NFSP policy of the attacker """ self.config = config self.l = config.L self.s0 = 0 self.b0 = config.initial_belief self.pi_1 = pi_1 self.pi_2 = pi_2 self.observation_space = gym.spaces.Box(low=np.array([0,0,0]), high=np.array([self.config.L,1,2]), dtype=np.float32, shape=(3,)) self.action_space = gym.spaces.Discrete(2) self.num_actions = 2 self.t = 0 self.ppo_pi_2 = None self.device_str = device_str self.device = torch.device(self.device_str) def get_attacker_dist(self, obs): obs = np.array([obs]) actions, values, log_prob = self.ppo_pi_2.policy.forward(obs=torch.tensor(obs).to(self.device)) action = actions[0] if action == 1: stop_prob = math.exp(log_prob) else: stop_prob = 1-math.exp(log_prob) return [1-stop_prob, stop_prob] def get_attacker_stage_policy_avg(self) -> List: """ Extracts the stage policy from pi_2 :return: the attacker's stage policy """ pi_2_stage = np.zeros((3, 2)).tolist() pi_2_stage[-1] = [0.5]*2 for s in range(2): o = [self.l,self.b[1],s] # pi_2_stage[s] = self.pi_2._act(o, legal_actions = [0,1])[1] pi_2_stage[s] = self.get_attacker_dist(obs=o) return pi_2_stage def step(self, a2) -> Tuple[np.ndarray, float, bool, dict]: """ Takes a step in the environment :param a2: the attacker's action :return: o, r, done, info """ done = False a1 = self.defender_action() r = -self.config.R[self.l-1][a1][a2][self.s] T = self.config.T[self.l-1] self.s = self.sample_next_state(a1=a1, a2=a2, T=T) o = max(self.config.O) if self.s == 2 or self.t >= self.config.T_max: done = True else: o = self.sample_next_observation(a1=a1, a2=a2) pi_2_stage = self.get_attacker_stage_policy_avg() self.b = OptimalStoppingGameUtil.next_belief(o=o, a1=a1, b=self.b, pi_2=pi_2_stage, config=self.config, l=self.l, a2=a2) self.l = self.l-a1 info = {"o": o, "s": self.s} self.t += 1 return np.array([self.l, self.b[1], self.s]), r, done, info def defender_action(self) -> int: """ Samples a defender action from a static policy :return: the sampled defender action """ stop_prob = self.pi_1._act([self.l, self.b[1], self.b[1]], legal_actions = [0, 1])[1][1] if np.random.rand() <= stop_prob: return 1 else: return 0 def sample_next_state(self, a1: int, a2: int, T: np.ndarray) -> int: """ Samples the next state :param a1: action of the defender :param a2: action of the attacker :param T: the transition tensor :return: the next state """ state_probs = [] for s_prime in self.config.S: state_probs.append(T[a1][a2][self.s][s_prime]) s_prime = np.random.choice(np.arange(0, len(self.config.S)), p=state_probs) return s_prime def sample_next_observation(self, a1: int, a2: int) -> int: """ Samples the next observation :param a1: the action of the defender :param a2: the action of the attacker :return: the next observation """ observation_probs = [] for o in self.config.O: observation_probs.append(self.config.Z[a1][a2][self.s][o]) o = np.random.choice(np.arange(0, len(self.config.O)), p=observation_probs) return o def reset(self) -> np.ndarray: """ Resets the environment :return: the initial observation """ self.s = 0 self.b = self.config.initial_belief self.l = self.config.L self.t = 0 return np.array([self.l, self.b0[1], self.s]) def render(self): raise NotImplementedError("not supported") class StoppingGameDefenderPOMDPEnv(gym.Env): """ POMDP where the defender faces a static attacker policy. The optimal policy in this POMDP is a best response strategy of the defender """ def __init__(self, config: OptimalStoppingGameConfigSequential, pi_1, pi_2): """ Initializes the game :param config: the game configuration :param pi_1: the defender NFSP policy :param pi_2: the attacker NFSP policy """ self.config = config self.l = config.L self.s0 = 0 self.b0 = config.initial_belief self.pi_1 = pi_1 self.pi_2 = pi_2 self.t = 0 self.observation_space = gym.spaces.Box(low=np.array([0,0,0]), high=np.array([self.config.L,1,1]), dtype=np.float32, shape=(3,)) self.action_space = gym.spaces.Discrete(2) def get_attacker_stage_policy_avg(self): """ Extracts the stage policy from pi_2 :return: the attacker's stage policy """ pi_2_stage = np.zeros((3, 2)).tolist() pi_2_stage[-1] = [0.5]*2 for s in range(2): o = [self.l,self.b[1],s] pi_2_stage[s] = self.pi_2._act(o, legal_actions = [0,1])[1] return pi_2_stage def step(self, a1) -> Tuple[np.ndarray, float, bool, dict]: """ Steps the environment :param a1: the defender action :return: o, r, done, info """ done = False a2 = self.attacker_action() r = self.config.R[self.l-1][a1][a2][self.s] T = self.config.T[self.l-1] self.s = self.sample_next_state(a1=a1, a2=a2, T=T) o = max(self.config.O) if self.s == 2 or self.t >= self.config.T_max: done = True else: o = self.sample_next_observation(a1=a1, a2=a2) pi_2_stage = self.get_attacker_stage_policy_avg() self.b = OptimalStoppingGameUtil.next_belief(o=o, a1=a1, b=self.b, pi_2=pi_2_stage, config=self.config, l=self.l, a2=a2) self.l = self.l-a1 info = {"o": o, "s": self.s} self.t += 1 return np.array([self.l, self.b[1], self.b[1]]), r, done, info def attacker_action(self) -> int: """ Samples an attacker action from a static policy :return: the sampled attacker action """ stop_prob = self.pi_2._act([self.l, self.b[1], self.s], legal_actions = [0, 1])[1][1] if np.random.rand() < stop_prob: return 1 else: return 0 def sample_next_state(self, a1: int, a2: int, T: np.ndarray) -> int: """ Samples the next state :param a1: action of the defender :param a2: action of the attacker :param T: the transition tensor :return: the next state """ state_probs = [] for s_prime in self.config.S: state_probs.append(T[a1][a2][self.s][s_prime]) s_prime = np.random.choice(np.arange(0, len(self.config.S)), p=state_probs) return s_prime def sample_next_observation(self, a1: int, a2: int) -> int: """ Samples the next observation :param a1: the action of the defender :param a2: the action of the attacker :return: the next observation """ observation_probs = [] for o in self.config.O: observation_probs.append(self.config.Z[a1][a2][self.s][o]) o = np.random.choice(np.arange(0, len(self.config.O)), p=observation_probs) return o def reset(self) -> np.ndarray: """ Resets the environment :return: the initial observation """ self.s = 0 self.b = self.config.initial_belief self.l = self.config.L self.t = 0 return np.array([self.l, self.b0[1], self.s]) def render(self): raise NotImplementedError("not supported") class ProgressThread(threading.Thread): def __init__(self, env, max_steps): threading.Thread.__init__(self) self.env = env self.max_steps = max_steps self.running = True def run(self) -> None: while self.running: time.sleep(5) if self.running: print(f"Learning a best response strategy, progress:{int(100*round(self.env.total_steps/self.max_steps,2))}%")
StarcoderdataPython
1770777
class AnalysisType(object): """ Base class for the different analysis types """ def __init__(self, structure_model, name="DefaultAnalysisType"): self.name = name # the structure model - geometry and physics - has the Dirichlet BC # for the bottom node included self.structure_model = structure_model self.displacement = None self.rotation = None self.force = None self.reaction = None self.moment = None def solve(self): """ Solve for something """ print("Solving for something in AnalysisType base class \n") pass def postprocess(self): """ Postprocess something """ print("Postprocessing in AnalysisType base class \n") pass
StarcoderdataPython
3380555
<gh_stars>0 """ Copyright (c) 2018-2021 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pathlib import Path import pickle from functools import partial from collections import OrderedDict import numpy as np from ..base_evaluator import BaseEvaluator from ..quantization_model_evaluator import create_dataset_attributes from ...adapters import create_adapter from ...config import ConfigError from ...launcher import create_launcher from ...utils import contains_all, contains_any, extract_image_representations, get_path from ...progress_reporters import ProgressReporter from ...logging import print_info def generate_name(prefix, with_prefix, layer_name): return prefix + layer_name if with_prefix else layer_name.split(prefix)[-1] class SuperResolutionFeedbackEvaluator(BaseEvaluator): def __init__(self, dataset_config, launcher, model): self.dataset_config = dataset_config self.preprocessing_executor = None self.preprocessor = None self.dataset = None self.postprocessor = None self.metric_executor = None self.launcher = launcher self.srmodel = model self._metrics_results = [] @classmethod def from_configs(cls, config, delayed_model_loading=False): dataset_config = config['datasets'] launcher_config = config['launchers'][0] if launcher_config['framework'] == 'dlsdk' and 'device' not in launcher_config: launcher_config['device'] = 'CPU' launcher = create_launcher(launcher_config, delayed_model_loading=True) model = SRFModel( config.get('network_info', {}), launcher, config.get('_models', []), config.get('_model_is_blob'), delayed_model_loading ) return cls(dataset_config, launcher, model) def process_dataset( self, subset=None, num_images=None, check_progress=False, dataset_tag='', output_callback=None, allow_pairwise_subset=False, dump_prediction_to_annotation=False, calculate_metrics=True, **kwargs): if self.dataset is None or (dataset_tag and self.dataset.tag != dataset_tag): self.select_dataset(dataset_tag) self._annotations, self._predictions = [], [] self._create_subset(subset, num_images, allow_pairwise_subset) metric_config = self.configure_intermediate_metrics_results(kwargs) compute_intermediate_metric_res, metric_interval, ignore_results_formatting = metric_config if 'progress_reporter' in kwargs: _progress_reporter = kwargs['progress_reporter'] _progress_reporter.reset(self.dataset.size) else: _progress_reporter = None if not check_progress else self._create_progress_reporter( check_progress, self.dataset.size ) self.srmodel.init_feedback(self.dataset.data_reader) for batch_id, (batch_input_ids, batch_annotation, batch_inputs, batch_identifiers) in enumerate(self.dataset): self.srmodel.fill_feedback(batch_inputs) batch_inputs = self.preprocessor.process(batch_inputs, batch_annotation) batch_inputs_extr, _ = extract_image_representations(batch_inputs) callback = None if callback: callback = partial(output_callback, metrics_result=None, element_identifiers=batch_identifiers, dataset_indices=batch_input_ids) batch_raw_prediction, batch_prediction = self.srmodel.predict( batch_identifiers, batch_inputs_extr, callback=callback ) annotation, prediction = self.postprocessor.process_batch(batch_annotation, batch_prediction) self.srmodel.feedback(prediction) metrics_result = None if self.metric_executor and calculate_metrics: metrics_result, _ = self.metric_executor.update_metrics_on_batch( batch_input_ids, annotation, prediction ) if self.metric_executor.need_store_predictions: self._annotations.extend(annotation) self._predictions.extend(prediction) if output_callback: output_callback( batch_raw_prediction[0], metrics_result=metrics_result, element_identifiers=batch_identifiers, dataset_indices=batch_input_ids ) if _progress_reporter: _progress_reporter.update(batch_id, len(prediction)) if compute_intermediate_metric_res and _progress_reporter.current % metric_interval == 0: self.compute_metrics( print_results=True, ignore_results_formatting=ignore_results_formatting ) if _progress_reporter: _progress_reporter.finish() if self.srmodel.store_predictions: self.srmodel.save_predictions() def compute_metrics(self, print_results=True, ignore_results_formatting=False): if self._metrics_results: del self._metrics_results self._metrics_results = [] for result_presenter, evaluated_metric in self.metric_executor.iterate_metrics( self._annotations, self._predictions): self._metrics_results.append(evaluated_metric) if print_results: result_presenter.write_result(evaluated_metric, ignore_results_formatting) return self._metrics_results def extract_metrics_results(self, print_results=True, ignore_results_formatting=False): if not self._metrics_results: self.compute_metrics(False, ignore_results_formatting) result_presenters = self.metric_executor.get_metric_presenters() extracted_results, extracted_meta = [], [] for presenter, metric_result in zip(result_presenters, self._metrics_results): result, metadata = presenter.extract_result(metric_result) if isinstance(result, list): extracted_results.extend(result) extracted_meta.extend(metadata) else: extracted_results.append(result) extracted_meta.append(metadata) if print_results: presenter.write_result(metric_result, ignore_results_formatting) return extracted_results, extracted_meta def print_metrics_results(self, ignore_results_formatting=False): if not self._metrics_results: self.compute_metrics(True, ignore_results_formatting) return result_presenters = self.metric_executor.get_metric_presenters() for presenter, metric_result in zip(result_presenters, self._metrics_results): presenter.write_result(metric_result, ignore_results_formatting) @property def dataset_size(self): return self.dataset.size def release(self): self.srmodel.release() self.launcher.release() def reset(self): if self.metric_executor: self.metric_executor.reset() if hasattr(self, '_annotations'): del self._annotations del self._predictions del self._input_ids del self._metrics_results self._annotations = [] self._predictions = [] self._input_ids = [] self._metrics_results = [] if self.dataset: self.dataset.reset(self.postprocessor.has_processors) @staticmethod def get_processing_info(config): module_specific_params = config.get('module_config') model_name = config['name'] dataset_config = module_specific_params['datasets'][0] launcher_config = module_specific_params['launchers'][0] return ( model_name, launcher_config['framework'], launcher_config['device'], launcher_config.get('tags'), dataset_config['name'] ) def _create_subset(self, subset=None, num_images=None, allow_pairwise=False): if self.dataset.batch is None: self.dataset.batch = 1 if subset is not None: self.dataset.make_subset(ids=subset, accept_pairs=allow_pairwise) elif num_images is not None: self.dataset.make_subset(end=num_images, accept_pairs=allow_pairwise) @staticmethod def configure_intermediate_metrics_results(config): compute_intermediate_metric_res = config.get('intermediate_metrics_results', False) metric_interval, ignore_results_formatting = None, None if compute_intermediate_metric_res: metric_interval = config.get('metrics_interval', 1000) ignore_results_formatting = config.get('ignore_results_formatting', False) return compute_intermediate_metric_res, metric_interval, ignore_results_formatting def load_network(self, network=None): self.srmodel.load_network(network, self.launcher) def load_network_from_ir(self, models_list): self.srmodel.load_model(models_list, self.launcher) def get_network(self): return self.srmodel.get_network() def get_metrics_attributes(self): if not self.metric_executor: return {} return self.metric_executor.get_metrics_attributes() def register_metric(self, metric_config): if isinstance(metric_config, str): self.metric_executor.register_metric({'type': metric_config}) elif isinstance(metric_config, dict): self.metric_executor.register_metric(metric_config) else: raise ValueError('Unsupported metric configuration type {}'.format(type(metric_config))) def register_postprocessor(self, postprocessing_config): pass def register_dumped_annotations(self): pass def select_dataset(self, dataset_tag): if self.dataset is not None and isinstance(self.dataset_config, list): return dataset_attributes = create_dataset_attributes(self.dataset_config, dataset_tag) self.dataset, self.metric_executor, self.preprocessor, self.postprocessor = dataset_attributes @staticmethod def _create_progress_reporter(check_progress, dataset_size): pr_kwargs = {} if isinstance(check_progress, int) and not isinstance(check_progress, bool): pr_kwargs = {"print_interval": check_progress} return ProgressReporter.provide('print', dataset_size, **pr_kwargs) class BaseModel: def __init__(self, network_info, launcher, delayed_model_loading=False): self.network_info = network_info self.launcher = launcher def predict(self, identifiers, input_data): raise NotImplementedError def release(self): pass # pylint: disable=E0203 class BaseDLSDKModel: def print_input_output_info(self): print_info('{} - Input info:'.format(self.default_model_suffix)) has_info = hasattr(self.network if self.network is not None else self.exec_network, 'input_info') if self.network: if has_info: network_inputs = OrderedDict( [(name, data.input_data) for name, data in self.network.input_info.items()] ) else: network_inputs = self.network.inputs network_outputs = self.network.outputs else: if has_info: network_inputs = OrderedDict([ (name, data.input_data) for name, data in self.exec_network.input_info.items() ]) else: network_inputs = self.exec_network.inputs network_outputs = self.exec_network.outputs for name, input_info in network_inputs.items(): print_info('\tLayer name: {}'.format(name)) print_info('\tprecision: {}'.format(input_info.precision)) print_info('\tshape {}\n'.format(input_info.shape)) print_info('{} - Output info'.format(self.default_model_suffix)) for name, output_info in network_outputs.items(): print_info('\tLayer name: {}'.format(name)) print_info('\tprecision: {}'.format(output_info.precision)) print_info('\tshape: {}\n'.format(output_info.shape)) def automatic_model_search(self, network_info): model = Path(network_info['srmodel']) if model.is_dir(): is_blob = network_info.get('_model_is_blob') if is_blob: model_list = list(model.glob('*{}.blob'.format(self.default_model_suffix))) if not model_list: model_list = list(model.glob('*.blob')) else: model_list = list(model.glob('*{}.xml'.format(self.default_model_suffix))) blob_list = list(model.glob('*{}.blob'.format(self.default_model_suffix))) if not model_list and not blob_list: model_list = list(model.glob('*.xml')) blob_list = list(model.glob('*.blob')) if not model_list: model_list = blob_list if not model_list: raise ConfigError('Suitable model for {} not found'.format(self.default_model_suffix)) if len(model_list) > 1: raise ConfigError('Several suitable models for {} found'.format(self.default_model_suffix)) model = model_list[0] print_info('{} - Found model: {}'.format(self.default_model_suffix, model)) if model.suffix == '.blob': return model, None weights = get_path(network_info.get('weights', model.parent / model.name.replace('xml', 'bin'))) print_info('{} - Found weights: {}'.format(self.default_model_suffix, weights)) return model, weights def load_network(self, network, launcher): self.network = network self.exec_network = launcher.ie_core.load_network(network, launcher.device) def update_inputs_outputs_info(self): raise NotImplementedError def load_model(self, network_info, launcher, log=False): model, weights = self.automatic_model_search(network_info) if weights is not None: self.network = launcher.read_network(str(model), str(weights)) self.exec_network = launcher.ie_core.load_network(self.network, launcher.device) else: self.exec_network = launcher.ie_core.import_network(str(model)) self.update_inputs_outputs_info() if log: self.print_input_output_info() def create_model(model_config, launcher, delayed_model_loading=False): launcher_model_mapping = { 'dlsdk': ModelDLSDKModel, 'tf': ModelTFModel, } framework = launcher.config['framework'] if 'predictions' in model_config and not model_config.get('store_predictions', False): framework = 'dummy' model_class = launcher_model_mapping.get(framework) if not model_class: raise ValueError('model for framework {} is not supported'.format(framework)) return model_class(model_config, launcher, delayed_model_loading) class SRFModel(BaseModel): def __init__(self, network_info, launcher, models_args, is_blob, delayed_model_loading=False): super().__init__(network_info, launcher) if models_args and not delayed_model_loading: model = network_info.get('srmodel', {}) if not contains_any(model, ['model', 'onnx_model']) and models_args: model['srmodel'] = models_args[0] model['_model_is_blob'] = is_blob network_info.update({'sr_model': model}) if not contains_all(network_info, ['srmodel']) and not delayed_model_loading: raise ConfigError('network_info should contain srmodel field') self.srmodel = create_model(network_info['srmodel'], launcher, delayed_model_loading) self.feedback = self.srmodel.feedback self.init_feedback = self.srmodel.init_feedback self.fill_feedback = self.srmodel.fill_feedback self.store_predictions = network_info['srmodel'].get('store_predictions', False) self._predictions = [] if self.store_predictions else None self._part_by_name = {'srmodel': self.srmodel} self._raw_outs = OrderedDict() def predict(self, identifiers, input_data, callback=None): predictions, raw_outputs = [], [] for data in input_data: output, prediction = self.srmodel.predict(identifiers, data) if self.store_predictions: self._predictions.append(prediction) raw_outputs.append(output) predictions.append(prediction) return raw_outputs, predictions def reset(self): self.processing_frames_buffer = [] if self._predictions is not None: self._predictions = [] def release(self): self.srmodel.release() def save_predictions(self): if self._predictions is not None: prediction_file = Path(self.network_info['srmodel'].get('predictions', 'model_predictions.pickle')) with prediction_file.open('wb') as file: pickle.dump(self._predictions, file) def load_network(self, network_list, launcher): for network_dict in network_list: self._part_by_name[network_dict['name']].load_network(network_dict['srmodel'], launcher) self.update_inputs_outputs_info() def load_model(self, network_list, launcher): for network_dict in network_list: self._part_by_name[network_dict['name']].load_model(network_dict, launcher) self.update_inputs_outputs_info() def _add_raw_predictions(self, prediction): for key, output in prediction.items(): if key not in self._raw_outs: self._raw_outs[key] = [] self._raw_outs[key].append(output) def get_network(self): return [{'name': 'srmodel', 'model': self.srmodel.network}] class FeedbackMixin: def configure_feedback(self): self._idx_to_name = {} self._name_to_idx = {} self._feedback_name = self.network_info['feedback_input'] self._feedback_data = {self._feedback_name: None} self._first_step = True self._inputs = self.network_info['inputs'] self._feedback_inputs = {self._feedback_name: [t for t in self._inputs if t['name'] == self._feedback_name][0]} for input_info in self._inputs: idx = int(input_info['value']) self._idx_to_name[idx] = input_info['name'] self._name_to_idx[input_info['name']] = idx self._feedback_idx = self._name_to_idx[self._feedback_name] def init_feedback(self, reader): info = self._feedback_inputs[self._feedback_name] self._feedback_data[self._feedback_name] = reader.read(info['initializer']) def feedback(self, data): data = data[0] self._feedback_data[self._feedback_name] = data[0].value def fill_feedback(self, data): data[0].data[self._feedback_idx] = self._feedback_data[self._feedback_name] return data class ModelDLSDKModel(BaseModel, BaseDLSDKModel, FeedbackMixin): default_model_suffix = 'srmodel' def __init__(self, network_info, launcher, delayed_model_loading=False): super().__init__(network_info, launcher) self.input_blob, self.output_blob = None, None self.with_prefix = None if not delayed_model_loading: self.load_model(network_info, launcher, log=True) self.adapter = create_adapter(network_info.get('adapter', 'super_resolution')) self.configure_feedback() def predict(self, identifiers, input_data): input_data = self.fit_to_input(input_data) raw_result = self.exec_network.infer(input_data) result = self.adapter.process([raw_result], identifiers, [{}]) return raw_result, result def release(self): del self.exec_network del self.launcher def fit_to_input(self, input_data): has_info = hasattr(self.exec_network, 'input_info') if has_info: input_info = self.exec_network.input_info else: input_info = self.exec_network.inputs fitted = {} for name, info in input_info.items(): data = input_data[self._name_to_idx[name]] data = np.expand_dims(data, axis=0) data = np.transpose(data, [0, 3, 1, 2]) assert tuple(info.input_data.shape) == np.shape(data) fitted[name] = data return fitted def update_inputs_outputs_info(self): has_info = hasattr(self.exec_network, 'input_info') input_info = self.exec_network.input_info if has_info else self.exec_network.inputs input_blob = next(iter(input_info)) with_prefix = input_blob.startswith(self.default_model_suffix + '_') if (with_prefix != self.with_prefix) and with_prefix: self.network_info['feedback_input'] = '_'.join([self.default_model_suffix, self.network_info['feedback_input']]) for inp in self.network_info['inputs']: inp['name'] = '_'.join([self.default_model_suffix, inp['name']]) if 'blob' in inp.keys(): inp['blob'] = '_'.join([self.default_model_suffix, inp['blob']]) self.network_info['adapter']['target_out'] = '_'.join([self.default_model_suffix, self.network_info['adapter']['target_out']]) self.with_prefix = with_prefix class ModelTFModel(BaseModel, FeedbackMixin): default_model_suffix = 'srmodel' def __init__(self, network_info, launcher, *args, **kwargs): super().__init__(network_info, launcher) model = self.automatic_model_search(network_info) self.inference_session = launcher.create_inference_session(str(model)) self.adapter = create_adapter(network_info.get('adapter', 'super_resolution')) self.configure_feedback() def predict(self, identifiers, input_data): input_data = self.fit_to_input(input_data) raw_result = self.inference_session.predict([input_data]) result = self.adapter.process(raw_result, identifiers, [{}]) return raw_result, result def fit_to_input(self, input_data): fitted = {} for idx, data in enumerate(input_data): name = self._idx_to_name[idx] data = np.expand_dims(data, axis=0) fitted[name] = data return fitted def release(self): del self.inference_session @staticmethod def automatic_model_search(network_info): model = Path(network_info['model']) return model
StarcoderdataPython
3276640
from typing import Any from src.Shared.InterfaceAdapters.IFilter import IFilter class Filter(IFilter): _filters: dict def __init__(self, filter: dict): self._filters = filter defaultFilters: Any = self.getDefaultFilters() for valueKey in defaultFilters: self._filters.update(valueKey) def get(self, key: str) -> str: return self._filters.get(key) if self.has(key) else None def has(self, key: str) -> bool: return key in self._filters def isEmpty(self) -> bool: return True if self._filters else False def values(self) -> dict: return self._filters
StarcoderdataPython
180845
<gh_stars>1-10 #!/home/adam/Documents/revkit-1.3/python #!/usr/bin/python # RevKit: A Toolkit for Reversible Circuit Design (www.revkit.org) # Copyright (C) 2009-2011 The RevKit Developers <<EMAIL>> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys , os sys.path.append(os.path.dirname(sys.path[0])) from revkit import * opts = program_options() opts.add_option( "spec", "specification of a circuit" ) \ .add_option( "impl", "implementation of a circuit") \ opts.parse( sys.argv ) if not opts.good() or not opts.is_set ("spec") or not opts.is_set ("impl"): print opts exit( 1 ) spec = circuit () impl = circuit () read_realization (spec, opts["spec"]) or sys.exit ("Cannot read " + opts["spec"]) read_realization (impl, opts["impl"]) or sys.exit ("Cannot read " + opts["impl"]) r = equivalence_check (spec, impl ) length = max ( map (len, r.keys())) string = '{0:' + str(length + 1) + 's} {1:s} ' for key in r.keys(): print string.format (key + ":", str(r[key])) #print statistics
StarcoderdataPython
3229050
import argparse import csv import functools import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from tensorflow.keras.layers import Dense, Activation from tensorflow.keras import regularizers from config import hidden_layer_size, n_params def load_dataset(file_path): print("Loading {}".format(file_path)) with open(file_path, 'r') as csv_file: reader = csv.reader(csv_file) csv_data = np.array([list(map(float, line)) for line in reader]) return csv_data def get_model(): model = tf.keras.Sequential([ Dense(hidden_layer_size, input_shape=(n_params,), kernel_regularizer=regularizers.l2(0.001)), Activation('relu'), Dense(hidden_layer_size, kernel_regularizer=regularizers.l2(0.001)), Activation('relu'), Dense(1), Activation('sigmoid') ]) model.compile(optimizer="rmsprop", loss='mse', metrics=['mse']) return model def get_args(): parser = argparse.ArgumentParser( description='Training or inference using a neural network model.') parser.add_argument("-p", "--plot", action="store_true", help="Save training curve in a picture") parser.add_argument("-w", "--weights", default="weights.h5", help="File to hold the weights") parser.add_argument("-i", "--iterations", type=int, default=250, help="Number of iterations (epochs)") parser.add_argument("command", choices=["fit", "predict"], help="Fit the model or predict") return parser.parse_args() def main(): args = get_args() model = get_model() if args.command == "fit": training_csv = load_dataset("training_set.csv") np.random.shuffle(training_csv) training_data = training_csv[:, :-1] training_labels = training_csv[:, -1] history = model.fit(training_data, training_labels, epochs=args.iterations, verbose=2, validation_split=0.3, shuffle=True) if args.plot: plt.plot(history.history['mse']) plt.plot(history.history['val_mse']) plt.title('training curve') plt.ylabel('mean squared error') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper right') plt.savefig("training_curve.png") print("Training curve saved in training_curve.png") model.save_weights(args.weights) print("Weights saved in {}".format(args.weights)) elif args.command == "predict": print("Loading weights from {}".format(args.weights)) model.load_weights(args.weights) inference_csv = load_dataset("inference_set.csv") inference_features = inference_csv[:, 1:] print("Starting inference") predictions = model.predict_on_batch(inference_features).numpy() with open('game_list.csv', 'r') as csv_file: reader = csv.reader(csv_file) game_list = list(reader) names_dic = {int(detail[0]): detail[1] for detail in game_list} del game_list id_score = sorted(([int(inference_csv[i, 0]), predictions[i, 0]] for i in range(inference_csv.shape[0])), key=lambda x: -x[1]) for i in range(len(id_score)): id_score[i].append(names_dic[id_score[i][0]]) with open("recommended.csv", "w") as csv_file: writer = csv.writer(csv_file, lineterminator="\n") writer.writerows(id_score) print("Results written in recommended.csv") else: print("Unrecognized command: {}".format(args.command)) exit(1) main()
StarcoderdataPython
181014
from django.contrib import admin from django.urls import reverse from django.utils.safestring import mark_safe from .models import * # Register your models here. @admin.register(Product) class ProductAdmin(admin.ModelAdmin): list_display = [field.name for field in Product._meta.fields] @admin.register(Review) class ReviewAdmin(admin.ModelAdmin): list_display = [field.name for field in Review._meta.fields] def order_pdf(obj): url = reverse('orders:admin_order_pdf', args=[obj._id]) return mark_safe(f'<a href="{url}">PDF</a>') order_pdf.short_description = 'Invoice' @admin.register(Order) class OrderAdmin(admin.ModelAdmin): list_display = [field.name for field in Order._meta.fields] list_display.append(order_pdf) @admin.register(OrderItem) class OrderItemAdmin(admin.ModelAdmin): list_display = [field.name for field in OrderItem._meta.fields] # admin.site.register(OrderItem) @admin.register(ShippingAddress) class ShippingAddressAdmin(admin.ModelAdmin): list_display = [field.name for field in ShippingAddress._meta.fields] # class OrderAdmin(admin.ModelAdmin): # list_display = ['id',order_pdf]
StarcoderdataPython
4837224
<reponame>saavpedia/python<gh_stars>0 #!/usr/bin/env python ################################################################################ # Copyright 2018 <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ import os import sqlite3 import urllib2, math, shutil class SQLite3(object) : def __init__(self): self.__itsCursor = None self.__itsVersionList = [ {'date':'2018.05.04', 'size':10753868800, 'folder':'2018.05.04.sqlite.split', 'unit_size':10485760 } ] pass def __load(self): theBasePath = os.path.dirname(os.path.realpath(__file__)) theDBFilePath = theBasePath + '/saavpedia.db' if os.path.exists(theDBFilePath): if os.path.isfile(theDBFilePath) and os.path.getsize(theDBFilePath) == self.__itsVersionList[-1]['size']: self.__itsConnection = sqlite3.connect(theDBFilePath) self.__itsCursor = self.__itsConnection.cursor() #print 'Local SAAVpedia DB is loaded.' return False pass self.__saveDB(theDBFilePath) self.__itsCursor = None return True def __saveDB(self, theDBFilePath): theLastVersionInfo = self.__itsVersionList[-1] theUnitSize = theLastVersionInfo['unit_size'] theNumOfSplitFiles = int(math.ceil(theLastVersionInfo['size'] / (theUnitSize * 1.0))) theWriter = open(theDBFilePath, 'wb') theTempFileList = [] theTempFolder = os.path.dirname(theDBFilePath) + os.sep + 'tmp' if not os.path.exists(theTempFolder): os.makedirs(theTempFolder) for idx in range(theNumOfSplitFiles): theTempFilePath = '{0}{1}SAAVpedia.sqlite.{2}.db'.format(theTempFolder, os.sep, str(idx)) theTempFileList.append(theTempFilePath) if (not os.path.exists(theTempFilePath)) or (os.path.getsize(theTempFilePath) != theUnitSize): print 'Downloading SAAVpedia.sqlite.{0}.db - {1:.2f}%'.format(idx, (idx + 1.0) / theNumOfSplitFiles * 100.0) theTempWriter = open(theTempFilePath, 'wb') theURL = 'https://github.com/saavpedia/python/blob/master/SAAVpedia/db/{0}/SAAVpediaData.sqlite.db.{1}.kbsi?raw=true'.format(theLastVersionInfo['folder'], idx) theData = urllib2.urlopen(theURL).read() theTempWriter.write(theData) theTempWriter.close() pass print 'Download is completed.' theCount = 0 for ithDBFile in theTempFileList: print 'Generating SAAVpedia DB... - {0:.2f}%'.format((theCount+1.0)/theNumOfSplitFiles*100.0) with open(ithDBFile, 'rb') as theReader: theWriter.write(theReader.read()) pass theCount += 1 pass theWriter.close() print 'Removing temporary files...' shutil.rmtree(theTempFolder) print 'SAAVpedia initilzation is completed.' def load(self): try: self.__load() return False except Exception as e: print str(e) return True def open(self, theDBFilePath): try: self.__itsConnection = sqlite3.connect(theDBFilePath) self.__itsCursor = self.__itsConnection.cursor() return False except: return True def close(self): if not self.__itsCursor == None: self.__itsCursor.close() self.__itsCursor = None pass def execute(self, theCommand): if not self.__itsCursor == None: return self.__itsCursor.execute(theCommand) return None if __name__ == '__main__': theSQLite = SQLite3() theSQLite.load() pass
StarcoderdataPython