max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/agent/samples/process_agent.py | bbhunter/ostorlab | 113 | 12676587 | <filename>tests/agent/samples/process_agent.py<gh_stars>100-1000
"""Sample agents that implements the process method, listen to ping messages and sends them back."""
import datetime
import logging
from ostorlab.agent import agent, message as agent_message
logger = logging.getLogger(__name__)
class ProcessTestAgent(agent.Agent):
"""Custom agent implementation."""
message = None
def process(self, message: agent_message.Message) -> None:
"""Receives ping messages and sends new ones.
Args:
message: message from bus
Returns:
None
"""
logger.info('received message')
self.message = message
self.emit('v3.healthcheck.ping', {'body': f'from test agent at {datetime.datetime.now()}'})
# process_agent = ProcessTestAgent(
# definitions.AgentDefinition(name='process_test_agent', in_selectors=['v3.healthcheck.ping'],
# out_selectors=['v3.healthcheck.ping']),
# definitions.AgentInstanceSettings(
# bus_url='amqp://guest:guest@localhost:5672/', bus_exchange_topic='ostorlab_test', healthcheck_port=5302))
#
# process_agent.run()
ProcessTestAgent.main()
|
tests/unit/test_expression_tree/test_printing/test_print_name.py | manjunathnilugal/PyBaMM | 330 | 12676589 | """
Tests for the print_name.py
"""
import unittest
import pybamm
class TestPrintName(unittest.TestCase):
def test_prettify_print_name(self):
param = pybamm.LithiumIonParameters()
param1 = pybamm.standard_variables
param2 = pybamm.LeadAcidParameters()
# Test PRINT_NAME_OVERRIDES
self.assertEqual(param.timescale.print_name, r"\tau")
# Test superscripts
self.assertEqual(param.U_n_ref.print_name, r"U_{n}^{ref}")
# Test subscripts
self.assertEqual(param.a_R_p.print_name, r"a_{R\,p}")
# Test dim and dimensional
self.assertEqual(param.j0_n_ref_dimensional.print_name, r"\hat{j0}_{n}^{ref}")
self.assertEqual(param.C_dl_n_dimensional.print_name, r"\hat{C}_{dl\,n}")
# Test bar
self.assertEqual(param1.c_s_n_xav.print_name, r"\bar{c}_{s\,n}")
# Test greek letters
self.assertEqual(param2.delta.print_name, r"\delta")
# Test new_copy()
x_n = pybamm.standard_spatial_vars.x_n
a_n = param2.a_n(x_n)
a_n.new_copy()
# Test eps
self.assertEqual(param1.eps_n.print_name, r"\epsilon_n")
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
|
utils/check_dependencies.py | aguilajesus/dtformats | 702 | 12676592 | <reponame>aguilajesus/dtformats
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to check for the availability and version of dependencies."""
import sys
# Change PYTHONPATH to include dependencies.
sys.path.insert(0, '.')
import utils.dependencies # pylint: disable=wrong-import-position
if __name__ == '__main__':
dependency_helper = utils.dependencies.DependencyHelper()
if not dependency_helper.CheckDependencies():
sys.exit(1)
|
Question_81_90/answers/answer_85.py | nuck555/ImageProcessing100Wen | 3,482 | 12676596 | import cv2
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
# Dicrease color
def dic_color(img):
img //= 63
img = img * 64 + 32
return img
# Database
def get_DB():
# get training image path
train = glob("dataset/train_*")
train.sort()
# prepare database
db = np.zeros((len(train), 13), dtype=np.int32)
# prepare path database
pdb = []
# each image
for i, path in enumerate(train):
# read image
img = dic_color(cv2.imread(path))
#get histogram
for j in range(4):
db[i, j] = len(np.where(img[..., 0] == (64 * j + 32))[0])
db[i, j+4] = len(np.where(img[..., 1] == (64 * j + 32))[0])
db[i, j+8] = len(np.where(img[..., 2] == (64 * j + 32))[0])
# get class
if 'akahara' in path:
cls = 0
elif 'madara' in path:
cls = 1
# store class label
db[i, -1] = cls
# store image path
pdb.append(path)
return db, pdb
# test
def test_DB(db, pdb):
# get test image path
test = glob("dataset/test_*")
test.sort()
success_num = 0.
# each image
for path in test:
# read image
img = dic_color(cv2.imread(path))
# get histogram
hist = np.zeros(12, dtype=np.int32)
for j in range(4):
hist[j] = len(np.where(img[..., 0] == (64 * j + 32))[0])
hist[j+4] = len(np.where(img[..., 1] == (64 * j + 32))[0])
hist[j+8] = len(np.where(img[..., 2] == (64 * j + 32))[0])
# get histogram difference
difs = np.abs(db[:, :12] - hist)
difs = np.sum(difs, axis=1)
# get argmin of difference
pred_i = np.argmin(difs)
# get prediction label
pred = db[pred_i, -1]
if pred == 0:
pl = "akahara"
elif pred == 1:
pl = "madara"
print(path, "is similar >>", pdb[pred_i], " Pred >>", pl)
db, pdb = get_DB()
test_DB(db, pdb) |
vint/linting/formatter/statistic_formatter.py | mosheavni/vint | 538 | 12676667 | from typing import List, Dict, Any # noqa: F401
from vint.linting.formatter.formatter import Formatter
class StatisticFormatter(Formatter):
def format_violations(self, violations): # type: (List[Dict[str, Any]]) -> str
violations_count = len(violations)
output = super(StatisticFormatter, self).format_violations(violations) + '\n'
return output + 'Total violations: {count}'.format(count=violations_count)
|
reddit2telegram/channels/r_biganimetiddies/app.py | mainyordle/reddit2telegram | 187 | 12676678 | #encoding:utf-8
# Write here subreddit name. Like this one for /r/BigAnimeTiddies.
subreddit = 'BigAnimeTiddies'
# This is for your public telegram channel.
t_channel = '@r_BigAnimeTiddies'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
tests/test_implementations/test_sqlalchemy/api_test/test_delete_many_api.py | aebrahim/FastAPIQuickCRUD | 123 | 12676685 | import json
from collections import OrderedDict
from starlette.testclient import TestClient
from src.fastapi_quickcrud import sqlalchemy_to_pydantic
from src.fastapi_quickcrud.crud_router import crud_router_builder
from src.fastapi_quickcrud.misc.type import CrudMethods
from tests.test_implementations.test_sqlalchemy.api_test import get_transaction_session, app, UntitledTable256
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.UPSERT_ONE
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
test_create_one = crud_router_builder(db_session=get_transaction_session,
db_model=UntitledTable256,
crud_models=UntitledTable256Model,
prefix="/test_creation_one",
tags=["test"]
)
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.UPSERT_MANY,
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
test_create_many = crud_router_builder(db_session=get_transaction_session,
db_model=UntitledTable256,
crud_models=UntitledTable256Model,
prefix="/test_creation_many",
tags=["test"]
)
# Response Mode Test
# response_many = create_many_response_model['__root__'].sub_fields[0].outer_type_.__dict__['__fields__']
# for k, v in response_many.items():
# assert not v.required
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.POST_REDIRECT_GET
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# Model Test
# api_model = UntitledTable256Model.__dict__['POST']
# assert api_model
# post_redirect_get_model = api_model[CrudMethods.POST_REDIRECT_GET].__dict__
# assert post_redirect_get_model['requestModel'] or post_redirect_get_model['responseModel']
# post_redirect_get_request_model = deepcopy(post_redirect_get_model['requestModel'].__dict__['__fields__'])
# post_redirect_get_response_model = deepcopy(post_redirect_get_model['responseModel'].__dict__['__fields__'])
# Request Model Test
# for k, v in post_redirect_get_request_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
# Response Model Test
# for k, v in post_redirect_get_response_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
# for k, v in post_redirect_get_response_model.items():
# assert v.required
test_post_and_redirect_get = crud_router_builder(db_session=get_transaction_session,
db_model=UntitledTable256,
crud_models=UntitledTable256Model,
prefix="/test_post_direct_get",
tags=["test"]
)
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.FIND_ONE
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# # # Model Test
# api_model = UntitledTable256Model.__dict__['GET']
# assert api_model
# get_one_model = api_model[CrudMethods.FIND_ONE].__dict__
# assert get_one_model['requestModel'] or get_one_model['responseModel']
# get_one_request_model = deepcopy(get_one_model['requestModel'].__dict__['__fields__'])
# get_one_response_model = deepcopy(get_one_model['responseModel'].__dict__['__fields__'])
# primary_key_of_get_sql_schema = get_one_request_model[UntitledTable256.__dict__['primary_key_of_table']]
# assert not primary_key_of_get_sql_schema.required
# get_one_request_model.pop(UntitledTable256.__dict__['primary_key_of_table'], None)
# for k, v in get_one_request_model.items():
# assert not v.required
# # FIXME some thing may not require
# for k, v in get_one_response_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
test_get_data = crud_router_builder(db_session=get_transaction_session,
db_model=UntitledTable256,
crud_models=UntitledTable256Model,
prefix="/test",
tags=["test"]
)
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.DELETE_MANY
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# # # Model Test
# api_model = UntitledTable256Model.__dict__['GET']
# assert api_model
# get_one_model = api_model[CrudMethods.FIND_ONE].__dict__
# assert get_one_model['requestModel'] or get_one_model['responseModel']
# get_one_request_model = deepcopy(get_one_model['requestModel'].__dict__['__fields__'])
# get_one_response_model = deepcopy(get_one_model['responseModel'].__dict__['__fields__'])
# primary_key_of_get_sql_schema = get_one_request_model[UntitledTable256.__dict__['primary_key_of_table']]
# assert not primary_key_of_get_sql_schema.required
# get_one_request_model.pop(UntitledTable256.__dict__['primary_key_of_table'], None)
# for k, v in get_one_request_model.items():
# assert not v.required
# # FIXME some thing may not require
# for k, v in get_one_response_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
test_delete_data = crud_router_builder(db_session=get_transaction_session,
db_model=UntitledTable256,
crud_models=UntitledTable256Model,
prefix="/test_delete_many",
tags=["test"]
)
[app.include_router(i) for i in [test_post_and_redirect_get, test_delete_data, test_create_one, test_create_many, test_get_data]]
client = TestClient(app)
primary_key_name = UntitledTable256.primary_key_of_table
unique_fields = UntitledTable256.unique_fields
def test_create_many_and_delete_many():
headers = {
'accept': 'application/json',
'Content-Type': 'application/json',
}
data = { "insert": [ { "bool_value": True, "char_value": "string", "date_value": "2021-07-24", "float4_value": 0,
"float8_value": 0, "int2_value": 0, "int4_value": 0, "int8_value": 0, "interval_value": 0,
"json_value": {}, "jsonb_value": {}, "numeric_value": 0, "text_value": "string",
"timestamp_value": "2021-07-24T02:54:53.285Z", "timestamptz_value": "2021-07-24T02:54:53.285Z",
"uuid_value": "3fa85f64-5717-4562-b3fc-2c963f66afa6", "varchar_value": "string", "array_value": [ 0 ],
"array_str__value": [ "string" ], "time_value": "18:18:18" , "timetz_value": "18:18:18+00:00"},
{"bool_value": True, "char_value": "string", "date_value": "2021-07-24", "float4_value": 0,
"float8_value": 0, "int2_value": 0, "int4_value": 0, "int8_value": 0, "interval_value": 0,
"json_value": {}, "jsonb_value": {}, "numeric_value": 0, "text_value": "string", "time_value": "18:18:18",
"timestamp_value": "2021-07-24T02:54:53.285Z",
"timestamptz_value": "2021-07-24T02:54:53.285Z",
"uuid_value": "3fa85f64-5717-4562-b3fc-2c963f66afa6", "varchar_value": "string",
"array_value": [0], "array_str__value": ["string"], "timetz_value": "18:18:18+00:00"},
{"bool_value": True, "char_value": "string", "date_value": "2021-07-24", "float4_value": 0,
"float8_value": 0, "int2_value": 0, "int4_value": 0, "int8_value": 0, "interval_value": 0,
"json_value": {}, "jsonb_value": {}, "numeric_value": 0, "text_value": "string",
"timestamp_value": "2021-07-24T02:54:53.285Z",
"timestamptz_value": "2021-07-24T02:54:53.285Z",
"uuid_value": "3fa85f64-5717-4562-b3fc-2c963f66afa6", "varchar_value": "string",
"array_value": [0], "array_str__value": ["string"], "time_value": "18:18:18", "timetz_value": "18:18:18+00:00"},
] }
response = client.post('/test_creation_many', headers=headers, data=json.dumps(data))
assert response.status_code == 201
insert_response_data = response.json()
primary_key_list = [i[primary_key_name] for i in insert_response_data]
min_key = min(primary_key_list)
max_key = max(primary_key_list)
params = {"primary_key____from": min_key,
"primary_key____to": max_key,
"bool_value____list":True,
"char_value____str": 'string%',
"char_value____str_____matching_pattern": 'case_sensitive',
"date_value____from": "2021-07-22",
"date_value____to": "2021-07-25",
"float4_value____from": -1,
"float4_value____to": 2,
"float4_value____list": 0,
"float8_value____from": -1,
"float8_value____to": 2,
"float8_value____list": 0,
"int2_value____from": -1,
"int2_value____to": 9,
"int2_value____list": 0,
"int4_value____from": -1,
"int4_value____to": 9,
"int4_value____list": 0,
"int8_value____from": -1,
"int8_value____to": 9,
"int8_value____list": 0,
"interval_value____from": -1,
"interval_value____to": 9,
"interval_value____list": 0,
"numeric_value____from": -1,
"numeric_value____to": 9,
"numeric_value____list": 0,
"text_value____list": "string",
"time_value____from": '18:18:18',
"time_value____to": '18:18:18',
"time_value____list": '18:18:18',
"timestamp_value_value____from": "2021-07-24T02:54:53.285",
"timestamp_value_value____to": "2021-07-24T02:54:53.285",
"timestamp_value_value____list": "2021-07-24T02:54:53.285",
"timestamptz_value_value____from": "2021-07-24T02:54:53.285Z",
"timestamptz_value_value____to": "2021-07-24T02:54:53.285Z",
"timestamptz_value_value____list": "2021-07-24T02:54:53.285Z",
"uuid_value_value____list": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"time_value____from": '18:18:18+00:00',
"time_value____to": '18:18:18+00:00',
"time_value____list": '18:18:18+00:00',
"varchar_value____str": 'string',
"varchar_value____str_____matching_pattern": 'case_sensitive',
"varchar_value____list": 'string',
}
from urllib.parse import urlencode
query_string = urlencode(OrderedDict(**params))
response = client.delete(f'/test_delete_many?{query_string}')
assert response.status_code == 200
assert response.headers['x-total-count'] == '3'
def test_create_many_and_delete_many_but_not_found():
headers = {
'accept': 'application/json',
'Content-Type': 'application/json',
}
data = { "insert": [ { "bool_value": True, "char_value": "string", "date_value": "2021-07-24", "float4_value": 0,
"float8_value": 0, "int2_value": 0, "int4_value": 0, "int8_value": 0, "interval_value": 0,
"json_value": {}, "jsonb_value": {}, "numeric_value": 0, "text_value": "string",
"timestamp_value": "2021-07-24T02:54:53.285", "timestamptz_value": "2021-07-24T02:54:53.285Z",
"uuid_value": "3fa85f64-5717-4562-b3fc-2c963f66afa6", "varchar_value": "string", "array_value": [ 0 ],
"array_str__value": [ "string" ], "time_value": "18:18:18" , "timetz_value": "18:18:18+00:00"},
{"bool_value": True, "char_value": "string", "date_value": "2021-07-24", "float4_value": 0,
"float8_value": 0, "int2_value": 0, "int4_value": 0, "int8_value": 0, "interval_value": 0,
"json_value": {}, "jsonb_value": {}, "numeric_value": 0, "text_value": "string", "time_value": "18:18:18",
"timestamp_value": "2021-07-24T02:54:53.285",
"timestamptz_value": "2021-07-24T02:54:53.285Z",
"uuid_value": "3fa85f64-5717-4562-b3fc-2c963f66afa6", "varchar_value": "string",
"array_value": [0], "array_str__value": ["string"], "timetz_value": "18:18:18+00:00"},
{"bool_value": True, "char_value": "string", "date_value": "2021-07-24", "float4_value": 0,
"float8_value": 0, "int2_value": 0, "int4_value": 0, "int8_value": 0, "interval_value": 0,
"json_value": {}, "jsonb_value": {}, "numeric_value": 0, "text_value": "string",
"timestamp_value": "2021-07-24T02:54:53.285",
"timestamptz_value": "2021-07-24T02:54:53.285Z",
"uuid_value": "3fa85f64-5717-4562-b3fc-2c963f66afa6", "varchar_value": "string",
"array_value": [0], "array_str__value": ["string"], "time_value": "18:18:18", "timetz_value": "18:18:18+00:00"},
] }
response = client.post('/test_creation_many', headers=headers, data=json.dumps(data))
assert response.status_code == 201
insert_response_data = response.json()
primary_key_list = [i[primary_key_name] for i in insert_response_data]
min_key = min(primary_key_list)
max_key = max(primary_key_list)
params = {"primary_key____from": min_key,
"primary_key____to": max_key,
"bool_value____list":True,
"char_value____str": 'string%',
"char_value____str_____matching_pattern": 'case_sensitive',
"date_value____from": "2021-07-22",
"date_value____to": "2021-07-25",
"float4_value____from": -1,
"float4_value____to": 2,
"float4_value____list": 0,
"float8_value____from": -1,
"float8_value____to": 2,
"float8_value____list": 0,
"int2_value____from": -1,
"int2_value____to": 9,
"int2_value____list": 0,
"int4_value____from": -1,
"int4_value____to": 9,
"int4_value____list": 0,
"int8_value____from": -1,
"int8_value____to": 9,
"int8_value____list": 0,
"interval_value____from": -1,
"interval_value____to": 9,
"interval_value____list": 0,
"numeric_value____from": -1,
"numeric_value____to": 9,
"numeric_value____list": 0,
"text_value____list": "string",
"time_value____from": '10:18:18',
"time_value____to": '12:18:18',
"time_value____list": '12:18:18',
"timestamp_value_value____from": "2021-07-24T02:54:53.285",
"timestamp_value_value____to": "2021-07-24T02:54:53.285",
"timestamp_value_value____list": "2021-07-24T02:54:53.285",
"timestamptz_value_value____from": "2021-07-24T02:54:53.285Z",
"timestamptz_value_value____to": "2021-07-24T02:54:53.285Z",
"timestamptz_value_value____list": "2021-07-24T02:54:53.285Z",
"uuid_value_value____list": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"timez_value____from": '18:18:18+00:00',
"timez_value____to": '18:18:18+00:00',
"timez_value____list": '18:18:18+00:00',
"varchar_value____str": 'string',
"varchar_value____str_____matching_pattern": 'case_sensitive',
"varchar_value____list": 'string',
}
from urllib.parse import urlencode
query_string = urlencode(OrderedDict(**params))
response = client.delete(f'/test_delete_many?{query_string}')
assert response.status_code == 204
|
scripts/generate_requirements_rtd.py | sreeja97/pystiche | 129 | 12676696 | import configparser
import re
from os import path
try:
import light_the_torch as ltt
import yaml
assert ltt.__version__ >= "0.2"
except (ImportError, AssertionError):
msg = "Please install pyyaml and light-the-torch>=0.2 prior to running this."
raise RuntimeError(msg)
DEPS_SUBSTITUTION_PATTERN = re.compile(r"\{\[(?P<section>[a-zA-Z\-]+)\]deps\}")
def main(
root=".", file=path.join("docs", "requirements-rtd.txt"),
):
python_version = extract_python_version_from_rtd_config(root)
deps = extract_docs_deps_from_tox_config(root)
deps.extend(find_pytorch_wheel_links(root, python_version))
with open(file, "w") as fh:
fh.write("\n".join(deps) + "\n")
def extract_python_version_from_rtd_config(root, file=".readthedocs.yml"):
with open(path.join(root, file)) as fh:
data = yaml.load(fh, Loader=yaml.FullLoader)
return str(data["python"]["version"])
def extract_docs_deps_from_tox_config(root, file="tox.ini", section="docs-common"):
config = configparser.ConfigParser()
config.read(path.join(root, file))
deps = []
sections = [section]
for section in sections:
for dep in config[section]["deps"].strip().split("\n"):
match = DEPS_SUBSTITUTION_PATTERN.match(dep)
if match is None:
deps.append(dep)
else:
sections.append(match.group("section"))
return deps
def find_pytorch_wheel_links(
root, python_version, computation_backend="cpu", platform="linux_x86_64",
):
return ltt.find_links(
[root],
computation_backend=computation_backend,
python_version=python_version,
platform=platform,
)
if __name__ == "__main__":
project_root = path.abspath(path.join(path.dirname(__file__), ".."))
main(project_root)
|
gryphon/execution/controllers/balance.py | qiquanzhijia/gryphon | 1,109 | 12676697 | from gryphon.execution.lib.exchange_color import exchange_color
from gryphon.lib.exchange.exchange_factory import *
from gryphon.lib.logger import get_logger
from gryphon.lib.models.exchange import Balance
from gryphon.lib import session
logger = get_logger(__name__)
def balance_requests(exchanges):
balance_requests = []
for exchange in exchanges:
balance_requests.append(exchange.get_balance_req())
return balance_requests
def balance_responses(exchanges, balance_requests):
"""
This function uses environment variables to set
a minimum balances for an exchange.
Format:{{exchange.name}}_MINIMUM_USD
Examples: BITSTAMP_MINIMUM_USD, CAVIRTEX_MINIMUM_BTC
"""
balances = {}
balances['system'] = Balance()
for exchange in exchanges:
req = balance_requests.pop(0)
balances[exchange.name] = exchange.get_balance_resp(req)
balances['system']['USD'] += balances[exchange.name].fiat().to('USD')
balances['system']['BTC'] += balances[exchange.name]['BTC']
return balances
def get_db_balances(exchanges):
db = session.get_a_trading_db_mysql_session()
db_balances = {}
db_balances['system'] = Balance()
try:
for exchange in exchanges:
exchange_data = exchange.exchange_account_db_object(db)
db_balances[exchange.name] = exchange_data.balance
db_balances['system']['USD'] += db_balances[exchange.name].fiat().to('USD')
db_balances['system']['BTC'] += db_balances[exchange.name]['BTC']
finally:
db.close()
return db_balances
def format_balances(exchange_balances, db_balances):
output_string = u"\n{0:15} : {1:15} | {2:15} || {3:15} | {4:15}\n".format("Balances", "FIAT", "BTC", "dbFIAT", "dbBTC")
for name, balance in sorted(exchange_balances.iteritems()):
db_balance = db_balances[name]
chunk = u"{0:15} : {1:15} | {2:15.8f} || {3:15} | {4:15.8f}\n".format(
name,
balance.fiat(),
balance['BTC'].amount,
db_balance.fiat(),
db_balance['BTC'].amount
)
chunk = exchange_color(chunk, name)
output_string += chunk
return output_string
def balance(exchange_name):
if exchange_name:
exchange = make_exchange_from_key(exchange_name)
exchanges = [exchange]
else:
exchanges = all_exchanges()
brs = balance_requests(exchanges)
balances = balance_responses(exchanges, brs)
db_balances = get_db_balances(exchanges)
print(format_balances(balances, db_balances))
|
prometheus_fastapi_instrumentator/__init__.py | chbndrhnns/prometheus-fastapi-instrumentator | 223 | 12676707 | <filename>prometheus_fastapi_instrumentator/__init__.py
# Copyright © 2020 <NAME> <<EMAIL>>
# Licensed under Apache License 2.0 <http://www.apache.org/licenses/LICENSE-2.0>
from .instrumentation import PrometheusFastApiInstrumentator
Instrumentator = PrometheusFastApiInstrumentator
|
titus/titus/inspector/defs.py | jmilleralpine/hadrian | 127 | 12676710 | #!/usr/bin/env python
# Copyright (C) 2014 Open Data ("Open Data" refers to
# one or more of the following companies: Open Data Partners LLC,
# Open Data Research LLC, or Open Data Capital LLC.)
#
# This file is part of Hadrian.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import os
import os.path
import readline
import traceback
import re
import sys
import subprocess
import glob
import math
import titus.inspector.parser as parser
from titus.reader import jsonToAst
from titus.genpy import PFAEngine
from titus.errors import AvroException, SchemaParseException, PFAException
CONFIG_DIRECTORY = "~/.pfa"
CONFIG_DIRECTORY_EXISTS = True
if not os.path.exists(os.path.expanduser(CONFIG_DIRECTORY)):
if raw_input("Create {0} for configuration files? (Y/n): ".format(CONFIG_DIRECTORY)).upper().strip() in ("Y", ""):
os.mkdir(os.path.expanduser(CONFIG_DIRECTORY))
else:
CONFIG_DIRECTORY_EXISTS = False
CONFIG_COMPLETER_DELIMS = " \t[],:="
readline.set_completer_delims(CONFIG_COMPLETER_DELIMS)
readline.parse_and_bind("tab: complete")
readline.parse_and_bind(r'"\eh": backward-kill-word')
readline.parse_and_bind(r'"\ep": history-search-backward')
readline.parse_and_bind(r'"\en": history-search-forward')
class TabCompleter(object):
"""Handles tab-completion in pfainspector."""
def __init__(self, mode):
""":type mode: titus.inspector.defs.Mode
:param mode: the pfainspector mode in which this tab completer is active
"""
self.mode = mode
self.candidates = []
def complete(self, text, state):
""":type text: string
:param text: partial text to complete
:type state: integer
:param state: the number of times the user has pressed tab. If ``0``, generate a new list of candidates; otherwise, use the old one
:rtype: list of strings
:return: set of completions
"""
if state == 0:
line = readline.get_line_buffer()
begin = readline.get_begidx()
end = readline.get_endidx()
established = line[:begin]
active = line[begin:end]
self.candidates = self.mode.complete(established, active)
try:
return self.candidates[state]
except IndexError:
return None
class InspectorError(Exception):
"""Exception encountered in pfainspector."""
pass
class Gadget(object):
"""Trait for pfainspector gadgets."""
def __init__(self, mode):
self.commandGroup = None
def do(*args):
"""Helper function for chaining function calls in a Python lambda expression.
Part of a poor-man's functional programming suite used to define several pfainspector commands as one-liners.
:type args: anything
:param args: functions that have already been evaluated
:rtype: anything
:return: the last argument
"""
return args[-1]
def maybe(action, exceptions):
"""Helper function for try-catch logic in a Python lambda expression.
Part of a poor-man's functional programming suite used to define several pfainspector commands as one-liners.
:type action: callable
:param action: function to call
:type exceptions: list of (class, string)
:param exceptions: exception classes to catch and their corresponding pfainspector messages
:rtype: anything
:return: result of calling ``action``
"""
try:
action()
except Exception as err:
for cls, message in exceptions:
if isinstance(err, cls):
raise InspectorError(message)
raise
def switch(*pairs):
"""Helper function for cond-logic in a Python lambda expression.
Part of a poor-man's functional programming suite used to define several pfainspector commands as one-liners.
:type pairs: callables
:param pairs: sequence of predicate1, consequent1, predicate2, consequent2, ..., alternate; the predicates will each be called in turn until one returns ``True``, and then the corresponding consequent will be called *or* the alternate will be called if none of the predicates return ``True``
"""
if len(pairs) % 2 != 1 or len(pairs) < 3:
raise TypeError
for predicate, consequent in zip(pairs[:-1][::2], pairs[:-1][1::2]):
if callable(predicate):
predicate = predicate()
if predicate:
if callable(consequent):
consequent = consequent()
return consequent
alterante = pairs[-1]
if callable(alterante):
alterante = alterante()
return alterante
def exception(x):
"""Helper function for raising an exception in a Python lambda expression.
Part of a poor-man's functional programming suite used to define several pfainspector commands as one-liners.
:type x: ``Exception``
:param x: exception to raise (from a function call)
"""
raise x
def getwords(text):
"""Parse a (partial?) command line into a syntax tree.
:type text: string
:param text: text to parse
:rtype: list of titus.inspector.parser.Ast
:return: abstract syntax tree for a pfainspector command line
"""
try:
return parser.parser.parse(text)
except parser.ParserError as err:
raise InspectorError(str(err))
def getcomplete(established):
"""Get the result of tab completion.
:type established: string
:param established: text that has been established and is not subject to completion (though it may influence completion as a context)
:rtype: list of titus.inspector.parser.Ast
:return: abstract syntax tree of the established part of a pfainspector command line
"""
if established.strip() == "":
return []
else:
return parser.parser.parse(established) # let it fail; completer ignores exceptions
def pathcomplete(established, active):
"""Tab completion routine for filesystem paths.
:type established: string
:param established: text that has been established and is not subject to completion (everything up to the last ``/``)
:type active: string
:param active: text to be completed (anything after the last ``/``)
:rtype: list of strings
:return: list of possible completions
"""
base, last = os.path.split(os.path.expanduser(active))
if base == "":
base = "."
if os.path.isdir(base):
if last == "":
condition = lambda x: not x.startswith(".")
else:
condition = lambda x: x.startswith(last)
def finish(x):
if " " in x:
quote = '"'
else:
quote = ""
if os.path.isdir(x):
return quote + os.path.join(x, "")
elif os.path.exists(x):
return quote + x + " "
return [finish(os.path.join(base, x) if base != "." else x) for x in os.listdir(base) if condition(x)]
def extcomplete(node, items):
"""Tab completion routine for JSON extraction (everything after an opening square bracket).
:type established: string
:param established: text that has been established and is not subject to completion (everything up to the last ``,``)
:type active: string
:param active: text to be completed (anything after the last ``,``)
:rtype: list of strings
:return: list of possible completions
"""
for item in items:
if isinstance(item, (parser.Word, parser.String)) and isinstance(node, dict):
if item.text in node:
node = node[item.text]
else:
return []
elif isinstance(item, parser.Integer) and isinstance(node, (list, tuple)):
if item.num < len(node):
node = node[item.num]
else:
return []
else:
return []
def quote(x):
if " " in x:
return '"' + x + '"'
else:
return x
if isinstance(node, dict):
return [quote(x) + (", " if isinstance(node[x], (list, tuple, dict)) else "]") for x in node]
elif isinstance(node, (list, tuple)):
formatter = "%%0%dd" % int(math.ceil(math.log10(len(node))))
return [quote(formatter % x) + (", " if isinstance(node[x], (list, tuple, dict)) else "]") for x in xrange(len(node))]
else:
return []
def extaction(args0, node, items):
"""Action for pfainspector extensions (depend on specific commands).
:type args0: titus.inspector.parser.Extract
:param args0: abstract syntax tree representation of the object to extract (word before opening square bracket)
:type node: Pythonized JSON
:param node: JSON node from which to extract subobjects
:type items: (integer, titus.inspector.parser.FilePath)
:param items: extraction path (everything between square brakets, possibly still open)
:rtype: Pythonized JSON
:return: result of extracting subobjects
"""
for index, item in enumerate(items):
if isinstance(item, (parser.Word, parser.String)):
try:
node = node[item.text]
except (KeyError, TypeError):
raise InspectorError("{0} has no key {1}".format(args0.strto(index), str(item)))
elif isinstance(item, parser.Integer):
try:
node = node[item.num]
except (IndexError, TypeError):
raise InspectorError("{0} has no index {1}".format(args0.strto(index), str(item)))
else:
raise InspectorError("syntax error: {0} should not appear in extractor".format(item))
return node
def run(command, *args):
"""Helper function to run a subprocess in a Python lambda expression.
:type command: string
:param command: external command to run
:type args: strings
:param args: arguments for the external command
:rtype: string
:return: result of running the command
"""
whole = [command]
for arg in args:
g = glob.glob(arg)
if len(g) > 0:
whole.extend(g)
else:
whole.append(arg)
proc = subprocess.Popen(whole, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returnCode = proc.wait()
output = proc.stdout.read()
if returnCode != 0:
output += "\nsubprocesses failed with exit code {0}".format(returnCode)
return output
def pipe(command):
"""Helper function to create a subprocess.Popen as a pipe.
:type command: list of strings
:param command: external command with arguments
:rtype: subprocess.Popen object
:return: piped subprocess (for further processing)
"""
return subprocess.Popen(command, stdin=subprocess.PIPE)
def pipewait(proc):
"""Helper function to wait for a subprocess.Popen to complete.
:type proc: subprocess.Popen
:param proc: process to wait for
:rtype: ``None``
:return: nothing; if the process's return code is not ``0``, a warning message is printed to standard output
"""
proc.stdin.close()
returnCode = proc.wait()
if returnCode != 0:
print "\nsubprocesses failed with exit code {0}".format(returnCode)
class Model(object):
"""A loaded JSON or PFA file.
Always has an ``obj`` member, representing the Pythonized JSON that was loaded from the file.
Lazy-evaluates an ``engineConfig`` member the first time it is requested. This is a titus.pfaast.EngineConfig representing the abstract syntax tree of the PFA file. If the PFA file contains an error, attempting to access the ``engineConfig`` property yields an exception (titus.errors.AvroException, titus.errors.SchemaParseException, or titus.errors.PFAException).
Lazy-evaluates an ``engine`` member the first time it is requested. This is a titus.genpy.PFAEngine representing an executable scoring engine. If the PFA file contains an error, attempting to access the ``engineConfig`` property yields an exception (titus.errors.AvroException, titus.errors.SchemaParseException, or titus.errors.PFAException).
"""
def __init__(self, obj):
self.obj = obj
self._engineConfig = None
self._engine = None
def reset(self):
self._engineConfig = None
self._engine = None
@property
def engineConfig(self):
if self._engineConfig is None:
try:
self._engineConfig = jsonToAst(self.obj)
except (AvroException, SchemaParseException, PFAException) as err:
raise InspectorError(str(err))
return self._engineConfig
@property
def engine(self):
if self._engine is None:
try:
self._engine, = PFAEngine.fromAst(self.engineConfig)
except (AvroException, SchemaParseException, PFAException) as err:
raise InspectorError(str(err))
return self._engine
def __repr__(self):
return "Model(" + repr(self.obj) + ")"
class Mode(object):
"""A mode of operation for the pfainspector.
This concrete base class is a read-eval-print loop for responding to the user's commands.
"""
def __init__(self):
"""Create the main mode.
If titus.inspector.defs.CONFIG_DIRECTORY_EXISTS is ``True``, get the readline history file from the user's titus.inspector.defs.CONFIG_DIRECTORY.
"""
if CONFIG_DIRECTORY_EXISTS:
self.historyPath = os.path.join(os.path.expanduser(CONFIG_DIRECTORY), self.historyFileName)
if not os.path.exists(self.historyPath):
open(self.historyPath, "w").close()
self.active = True
self.tabCompleter = TabCompleter(self)
readline.read_history_file(self.historyPath)
readline.set_completer(self.tabCompleter.complete)
def writehistory():
if self.active:
readline.write_history_file(self.historyPath)
atexit.register(writehistory)
def loop(self):
"""Main loop: attempts to evaluate commands.
An ``EOFError`` exception (user typed control-D on an empty line) quits the pfainspector.
A ``KeyboardInterrupt`` exception (user typed control-C at any time) jumps to an empty command-line state.
Any titus.inspector.defs.InspectorError results in the error's message being printed on the screen.
Any other exception results in a full stack trace being printed on the screen.
"""
while True:
try:
line = raw_input(self.prompt)
if line.strip() != "":
self.action(line)
except EOFError:
print
sys.exit(0)
except KeyboardInterrupt:
print "(use control-D or exit to quit)"
except InspectorError as err:
print err
except Exception as err:
traceback.print_exc()
def pause(self):
"""Write the current history to the history file and stop readline."""
readline.write_history_file(self.historyPath)
readline.clear_history()
readline.set_completer()
self.active = False
def resume(self):
"""Read the history file and restart readline."""
self.active = True
readline.read_history_file(self.historyPath)
readline.set_completer(self.tabCompleter.complete)
class Command(object):
"""Trait for pfainspector commands."""
def __init__(self):
pass
def syntaxError(self):
"""General behavior for syntax errors in the command parser: raise a titus.inspector.defs.InspectorError."""
if self.syntax is None:
raise InspectorError("syntax error in {0}".format(self.name))
else:
raise InspectorError("syntax error, should be: {0}".format(self.syntax))
class SimpleCommand(Command):
"""A pfainspector command whose action can be expressed as a Python lambda expression."""
def __init__(self, name, action, minargs=None, maxargs=None, completer=None, help=None, syntax=None):
"""Create a SimpleCommand.
:type name: string
:param name: name of the command
:type action: callable
:param action: action to perform, usually supplied as a Python lambda expression.
:type minargs: non-negative integer or ``None``
:param minargs: if provided, a minimum legal number of arguments for the command
:type maxargs: non-negative integer or ``None``
:param maxargs: if provided, a maximum legal number of arguments for the command
:type completer: callable
:param completer: function to call to complete the arguments of the command
:type help: string
:param help: message to the user describing the purpose of this command
:type syntax: string
:param syntax: message to the user specifying the syntax of this command
"""
self.name = name
self._action = action
self.minargs = minargs
self.maxargs = maxargs
self.completer = completer
self.syntax = syntax
self.help = help + "\n " + syntax
def complete(self, established, active):
"""Handle tab-complete for this command's arguments.
:type established: string
:param established: part of the text that has been established
:type active: string
:param active: part of the text to be completed
:rtype: list of strings
:return: potential completions
"""
out = []
if self.completer is None:
pass
elif callable(self.completer):
out.extend(self.completer(established, active))
else:
for completer in self.completers:
out.extend(completer(established, active))
return out
def action(self, args):
"""Perform the action associated with this command.
:type args: list of titus.inspector.parser.Ast
:param args: arguments passed to the command
:rtype: ``None``
:return: nothing; results must be printed to the screen
"""
if self.help is not None and len(args) == 1 and args[0] == parser.Word("help"):
print self.help
else:
for arg in args:
if not isinstance(arg, parser.FilePath):
raise InspectorError("arguments must all be words, not {0}".format(arg))
if self.minargs is not None and len(args) < self.minargs:
raise InspectorError("too few arguments (at least {0} are required)".format(self.minargs))
if self.maxargs is not None and len(args) < self.maxargs:
raise InspectorError("too many arguments (at most {0} are required)".format(self.maxargs))
out = self._action(*[x.text for x in args])
if out is not None:
print out
class CommandGroup(Command):
"""A pfainspector command that defers to a group of subcommands."""
def __init__(self, name, commands):
""":type name: string
:param name: name of the group
:type commands: list of titus.inspector.defs.Command
:param commands: commands in this group
"""
self.name = name
self.commands = dict((x.name, x) for x in commands)
if name is None:
self.help = "Commands:\n"
else:
self.help = "{0} gadget (type '{1} help' for details)\nSubcommands under {2}:\n".format(name, name, name)
for x in commands:
self.help += " {0:<20s} {1}\n".format(x.name, x.help.split("\n")[0] if x.help is not None else "")
self.help = self.help.strip()
def complete(self, established, active):
"""Handle tab-complete for the command group: either expanding the subcommand name or deferring to the subcommand's ``complete`` method.
:type established: string
:param established: part of the text that has been established
:type active: string
:param active: part of the text to be completed
:rtype: list of strings
:return: potential completions
"""
word = re.match(r'''\s*([A-Za-z][A-Za-z0-9_]*)\s*''', established)
if word is None:
return [x + " " for x in self.commands if x.startswith(active)]
else:
try:
command = self.commands[word.groups()[0]]
except KeyError:
return []
else:
return command.complete(established[word.end():], active)
def action(self, args):
"""Perform the action associated associated with this command group: descend into the subcommand and call its ``action`` method.
:type args: list of titus.inspector.parser.Ast
:param args: arguments passed to the command
:rtype: ``None``
:return: nothing; results must be printed to the screen
"""
if len(args) == 0:
if self.name is not None:
raise InspectorError("command {0} requires a subcommand".format(self.name))
elif len(args) == 1 and args[0] == parser.Word("help"):
print self.help
elif isinstance(args[0], parser.Word):
try:
command = self.commands[args[0].text]
except KeyError:
if args[0] == parser.Word("help"):
raise InspectorError("help commands should end with the word 'help'")
if self.name is None:
raise InspectorError("no command named {0}".format(args[0].text))
else:
raise InspectorError("command {0} has no subcommand {1}".format(self.name, args[0].text))
else:
command.action(args[1:])
else:
if self.name is None:
raise InspectorError("command must be a word, not {0}".format(args[0]))
else:
raise InspectorError("subcommand of {0} must be a word, not {1}".format(self.name, args[0]))
|
lib/python2.7/site-packages/django/utils/feedgenerator.py | bop/bauhaus | 285 | 12676719 | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from __future__ import unicode_literals
import datetime
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.encoding import force_text, iri_to_uri
from django.utils import datetime_safe
from django.utils import six
from django.utils.six import StringIO
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.timezone import is_aware
def rfc2822_date(date):
# We can't use strftime() because it produces locale-dependent results, so
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d%02d' % (hour, minute)
else:
return time_str + '-0000'
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d:%02d' % (hour, minute)
else:
return time_str + 'Z'
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [force_text(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, unique_id_is_permalink=None, enclosure=None,
categories=(), item_copyright=None, ttl=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate, which is a datetime.datetime object, and
enclosure, which is an instance of the Enclosure class.
"""
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'unique_id_is_permalink': unique_id_is_permalink,
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate. If none of them have a pubdate,
this returns the current date/time.
"""
updates = [i['pubdate'] for i in self.items if i['pubdate'] is not None]
if len(updates) > 0:
updates.sort()
return updates[-1]
else:
return datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None,
{"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" % \
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement("dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
guid_attrs = {}
if isinstance(item.get('unique_id_is_permalink'), bool):
guid_attrs['isPermaLink'] = str(
item['unique_id_is_permalink']).lower()
handler.addQuickElement("guid", item['unique_id'], guid_attrs)
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("enclosure", '',
{"url": item['enclosure'].url, "length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement("updated", rfc3339_date(item['pubdate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("link", '',
{"rel": "enclosure",
"href": item['enclosure'].url,
"length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
|
skhep/utils/exceptions.py | AdvaitDhingra/scikit-hep | 150 | 12676740 | <filename>skhep/utils/exceptions.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license, see LICENSE.
"""
Submodule for useful exceptions
===============================
.. note:: not meant for user code in general, though possible.
"""
# Definition of handy colours for printing
_default = "\x1b[00m"
_green = "\x1b[01;32m"
_red = "\x1b[01;31m"
class InvalidOperationError(Exception):
"""Exception class for meaningless operations."""
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
def __str__(self):
"""String representation."""
return _red + self.message + _default
class SkhepTypeError(Exception):
"""Exception class for non-instantiable classes."""
def __init__(self, name):
Exception.__init__(self, name)
self.message = "'{0}' is an abstract base class. Instantiate one of its subclasses instead.".format(
name
)
def __str__(self):
"""String representation."""
return _red + self.message + _default
|
cubi_tk/__init__.py | eudesbarbosa/cubi-tk | 132 | 12676747 | <gh_stars>100-1000
from ._version import get_versions # type: ignore
__version__ = get_versions()["version"]
del get_versions
|
src/ralph/data_importer/fields.py | pinoatrome/ralph | 1,668 | 12676751 | import logging
from djmoney.money import Money
from import_export import fields
from ralph.settings import DEFAULT_CURRENCY_CODE
logger = logging.getLogger(__name__)
class ThroughField(fields.Field):
def __init__(
self, through_model, through_from_field_name, through_to_field_name,
attribute=None, column_name=None, widget=None,
readonly=False
):
"""
Field for through django model import/export
Args:
through_model: Django through model for M2M relation
through_from_field_name: field name model that is
currently imported
through_to_field_name: field name the model which is added
as ManyToMany
attribute: string of either an instance attribute or callable
off the object
column_name: let you provide how this field is named
in datasource.
widget: defines widget that will be used to represent field
data in export
readonly: boolean value defines that if this field will
be assigned to object during import
"""
self.through_model = through_model
self.through_from_field_name = through_from_field_name
self.through_to_field_name = through_to_field_name
super().__init__(attribute, column_name, widget, readonly)
def save(self, obj, data):
if not self.readonly:
value = data.get(self.column_name)
current = set(self.widget.clean(value))
# filter old assignments to obj by through_model
old_objs = set([
getattr(i, self.through_to_field_name) for i in
self.through_model.objects.filter(
**{self.through_from_field_name: obj}
).select_related(
self.through_to_field_name
)
])
to_add = current - old_objs
to_remove = old_objs - current
to_add_list = []
for i in to_add:
logger.info('Adding %s to %s/%s assignments',
i.pk, self.through_model, obj.pk
)
to_add_list.append(self.through_model(
**{
self.through_from_field_name: obj,
self.through_to_field_name: i
}
))
if to_add_list:
self.through_model.objects.bulk_create(to_add_list)
if to_remove:
logger.warning(
'Removing assignments from %s/%s: %s',
self.through_model, obj.pk, [i.pk for i in to_remove]
)
self.through_model.objects.filter(
**{
self.through_from_field_name: obj,
'{}__in'.format(self.through_to_field_name): to_remove
}
).delete()
class PriceField(fields.Field):
def save(self, obj, data):
price = Money(
data['price'],
data.get('price_currency', DEFAULT_CURRENCY_CODE)
)
setattr(obj, 'price', price)
|
lte/gateway/c/core/oai/tasks/s1ap/messages/asn1/asn1tostruct.py | Aitend/magma | 849 | 12676755 | <reponame>Aitend/magma<gh_stars>100-1000
#
# Copyright (c) 2015, EURECOM (www.eurecom.fr)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import datetime
import getopt
import getpass
import os
import re
import string
import sys
version = "1.0.2"
lines = ""
iesDefs = {}
ieofielist = {}
choicelist = {}
choiceiesDefs = {}
outdir = './'
filenames = []
verbosity = 0
prefix = ""
FAIL = '\033[91m'
WARN = '\033[93m'
ENDC = '\033[0m'
fileprefix = ""
fileprefix_first_upper = ""
def printFail(string):
sys.stderr.write(FAIL + string + ENDC + "\n")
def printWarning(string):
print(WARN + string + ENDC)
def printDebug(string):
if verbosity > 0:
print(string)
def outputHeaderToFile(f, filename):
now = datetime.datetime.now()
f.write("""/*
* Copyright (c) 2015, EURECOM (www.eurecom.fr)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are those
* of the authors and should not be interpreted as representing official policies,
* either expressed or implied, of the FreeBSD Project.
*/
""")
f.write("/*******************************************************************************\n")
f.write(" * This file had been created by asn1tostruct.py script v%s\n" % (version))
f.write(" * Please do not modify this file but regenerate it via script.\n")
f.write(" * Created on: %s by %s\n * from %s\n" % (str(now), getpass.getuser(), filenames))
f.write(" ******************************************************************************/\n")
def lowerFirstCamelWord(word):
""" puts the first word in a CamelCase Word in lowercase.
I.e. CustomerID becomes customerID, XMLInfoTest becomes xmlInfoTest
"""
newstr = ''
swapped = word.swapcase()
idx = 0
# if it's all-caps, return an all-lowered version
lowered = word.lower()
if swapped == lowered:
return lowered
for c in swapped:
if c in string.ascii_lowercase:
newstr += c
idx += 1
else:
break
if idx < 2:
newstr += word[idx:]
else:
newstr = newstr[:-1] + word[idx - 1:]
return newstr
def usage():
print("Python parser for asn1 v%s" % (version))
print("Usage: python asn1tostruct.py [options]")
print("Available options:")
print("-d Enable script debug")
print("-f [file] Input file to parse")
print("-o [dir] Output files to given directory")
print("-h Print this help and return")
try:
opts, args = getopt.getopt(sys.argv[1:], "df:ho:", ["debug", "file", "help", "outdir"])
except getopt.GetoptError as err:
# print help information and exit:
usage()
sys.exit(2)
for o, a in opts:
if o in ("-f", "--file"):
filenames.append(a)
if o in ("-d", "--debug"):
verbosity = 1
if o in ("-o", "--outdir"):
outdir = a
if outdir.rfind('/') != len(outdir):
outdir += '/'
if o in ("-h", "--help"):
usage()
sys.exit(2)
for filename in filenames:
file = open(filename, 'r')
for line in file:
# Removing any comment
if line.find('--') >= 0:
line = line[:line.find('--')]
# Removing any carriage return
lines += re.sub('\r', '', line)
for m in re.findall(r'([a-zA-Z0-9-]+)\s*::=\s+SEQUENCE\s+\(\s*SIZE\s*\(\s*\d+\s*\.\.\s*[0-9a-zA-Z-]+\s*\)\s*\)\s*OF\s+[a-zA-Z-]+\s*\{\s*\{\s*([0-9a-zA-Z-]+)\s*\}\s*\}', lines, re.MULTILINE):
ieofielist[m[0]] = m[1]
for m in re.findall(r'([a-zA-Z0-9-]+)\s*::=\s+E-RAB-IE-ContainerList\s*\{\s*\{\s*([a-zA-Z0-9-]+)\s*\}\s*\}', lines, re.MULTILINE):
ieofielist[m[0]] = m[1]
for m in re.findall(r'([a-zA-Z0-9-]+)\s*::=\s+CHOICE\s*\{', lines, re.MULTILINE):
choicelist[m] = m
for i in re.findall(r'([a-zA-Z0-9-]+)\s+([A-Z0-9-]+)\s*::=\s*\{\s+([\,\|\{\}\t\n\.{3}\ \-a-zA-Z0-9]+)\s+}\n', lines, re.MULTILINE):
ies = []
maxLength = 0
# TODO: handle extensions
if i[1].find('EXTENSION') >= 0:
continue
if fileprefix == "":
fileprefix = i[1][:i[1].find('-')].lower()
for j in re.findall(r'\s*\{\s*([a-zA-Z0-9-\ \t]+)\s*\}\s*[\|,]*', i[2], re.MULTILINE):
for k in re.findall(r'ID\s*([a-zA-Z0-9\-]+)\s*CRITICALITY\s*([a-zA-Z0-9\-]+)\s+[A-Z]+\s+([a-zA-Z0-9\-]+)\s*PRESENCE\s*([a-zA-Z0-9\-]+)', j, re.MULTILINE):
printDebug("Got new ie for message " + i[0] + ": " + str(k))
if len(k[2]) > maxLength:
maxLength = len(k[2])
ies.append(k)
if len(ies) > 0:
iesDefs[i[0]] = {"length": maxLength, "ies": ies}
else:
printWarning("Didn't find any information element for message: " + i[0])
for i in re.findall(r'([a-zA-Z0-9-]+)\s*::=\s*CHOICE\s*\{\s+([\,\|\t\n\.{3}\ \-a-zA-Z0-9]+)\s+}\n', lines, re.MULTILINE):
choiceies = []
for j in re.findall(r'\s*([a-zA-Z0-9-\ \t\n]+)\s*[\|,]*', i[1], re.MULTILINE):
for k in re.findall(r'([a-zA-Z0-9\-]+)\s*([a-zA-Z0-9\-]+)', j, re.MULTILINE):
printDebug("Got new ie for message " + i[0] + ": " + str(k))
choiceies.append(k)
if len(choiceies) > 0:
choiceiesDefs[i[0]] = {"ies": choiceies}
else:
printWarning("Didn't find any information element for message: " + i[0])
if len(iesDefs) == 0:
printFail("No Information Element parsed, exiting")
sys.exit(0)
fileprefix_first_upper = fileprefix[0].upper() + fileprefix[1:]
f = open(outdir + fileprefix + '_ies_defs.h', 'w')
outputHeaderToFile(f, filename)
f.write("#include \"%s_common.h\"\n\n" % (fileprefix))
f.write("#ifndef %s_IES_DEFS_H_\n#define %s_IES_DEFS_H_\n\n" % (fileprefix.upper(), fileprefix.upper()))
f.write("/* Define the version of script used to generate this file */\n")
f.write("#define %s_SCRIPT_VERSION (%s)\n\n" % (fileprefix.upper(), re.sub('\.', '', version)))
for key in iesDefs:
if key not in ieofielist.values():
continue
for (i, j) in ieofielist.items():
if j == key:
break
f.write("typedef struct %sIEs_s {\n" % (re.sub('-', '_', i)))
f.write(" A_SEQUENCE_OF(struct %s_s) %s;\n" % (re.sub('IEs', '', re.sub('-', '_', ieofielist[i])), lowerFirstCamelWord(re.sub('IEs', '', re.sub('-', '_', ieofielist[i])))))
f.write("} %sIEs_t;\n\n" % (re.sub('-', '_', i)))
for key in iesDefs:
keyupperunderscore = re.sub('-', '_', key.upper())
keylowerunderscore = re.sub('-', '_', key.lower())
shift = 0
if len(iesDefs[key]["ies"]) == 0:
continue
# Presence mask
for ie in iesDefs[key]["ies"]:
ieupperunderscore = re.sub('-', '_', re.sub('id-', '', ie[0])).upper()
if ie[3] == "optional" or ie[3] == "conditional":
f.write(
"#define {0:<{pad}} {1}\n".format(
"%s_%s_PRESENT" % (keyupperunderscore, ieupperunderscore), "(1 << %d)" % shift,
pad=iesDefs[key]["length"] + len(keyupperunderscore) + 9,
),
)
shift += 1
if (shift > 0):
f.write("\n")
f.write("typedef struct %s_s {\n" % (re.sub('-', '_', key)))
if (shift > 0):
f.write(" {0:<{pad}} {1};\n".format("uint16_t", "presenceMask", pad=iesDefs[key]["length"] + 2))
for ie in iesDefs[key]["ies"]:
ieunderscore = re.sub('-', '_', ie[2])
iename = re.sub('id-', '', ie[0])
ienameunderscore = lowerFirstCamelWord(re.sub('-', '_', iename))
if ie[2] in ieofielist:
f.write(" %sIEs_t %s;" % (re.sub('-', '_', ie[2]), ienameunderscore))
else:
f.write(" {0:<{pad}} {1};".format("%s_t" % ieunderscore, ienameunderscore, pad=iesDefs[key]["length"] + 2))
if ie[3] == "optional":
f.write(" ///< Optional field")
elif ie[3] == "conditional":
f.write(" ///< Conditional field")
f.write("\n")
f.write("} %s_t;\n\n" % (re.sub('-', '_', key)))
f.write("typedef struct %s_message_s {\n" % (fileprefix))
f.write(" %s_ProcedureCode_t procedureCode;\n" % (fileprefix_first_upper))
f.write(" %s_Criticality_t criticality;\n" % (fileprefix_first_upper))
f.write(" uint8_t direction;\n")
f.write(" union {\n")
messageList = list(iesDefs.keys())
messageList.sort()
for message in messageList:
if message in ieofielist.values():
continue
if len(iesDefs[message]["ies"]) == 0:
continue
f.write(" %s_t %s;\n" % (re.sub('-', '_', message), lowerFirstCamelWord(re.sub('-', '_', message))))
f.write(" } msg;\n")
f.write("} %s_message;\n\n" % (fileprefix))
for key in iesDefs:
if key in ieofielist.values():
continue
structName = re.sub('ies', '', key)
asn1cStruct = re.sub('-', '_', re.sub('IEs', '', re.sub('-IEs', '', key)))
asn1cStruct = re.sub('Item', 'List', asn1cStruct)
keylowerunderscore = re.sub('-', '_', key.lower())
firstlower = re.sub('Item', 'List', re.sub('enb', 'eNB', lowerFirstCamelWord(asn1cStruct)))
f.write("/** \\brief Decode function for %s ies.\n" % (key))
if len(iesDefs[key]["ies"]) != 0:
f.write(" * \\param %s Pointer to ASN1 structure in which data will be stored\n" % (lowerFirstCamelWord(re.sub('-', '_', key))))
f.write(" * \\param any_p Pointer to the ANY value to decode.\n")
f.write(" **/\n")
f.write("int %s_decode_%s(\n" % (fileprefix, keylowerunderscore))
if len(iesDefs[key]["ies"]) != 0:
f.write(" %s_t *%s,\n" % (re.sub('-', '_', key), lowerFirstCamelWord(re.sub('-', '_', key))))
f.write(" ANY_t *any_p);\n\n")
if len(iesDefs[key]["ies"]) == 0:
continue
f.write("/** \\brief Encode function for %s ies.\n" % (key))
f.write(" * \\param %s Pointer to the ASN1 structure.\n" % (firstlower))
f.write(" * \\param %s Pointer to the IES structure.\n" % (lowerFirstCamelWord(re.sub('-', '_', key))))
f.write(" **/\n")
f.write("int %s_encode_%s(\n" % (fileprefix, re.sub('-', '_', structName.lower())))
f.write(" %s_t *%s,\n" % (asn1cStruct, firstlower))
f.write(" %s_t *%s);\n\n" % (re.sub('-', '_', key), lowerFirstCamelWord(re.sub('-', '_', key))))
for key in iesDefs:
if key not in ieofielist.values():
continue
asn1cStruct = re.sub('-', '_', re.sub('IEs', '', key))
asn1cStruct = re.sub('Item', 'List', asn1cStruct)
firstlower = re.sub('Item', 'List', re.sub('enb', 'eNB', lowerFirstCamelWord(asn1cStruct)))
f.write("/** \\brief Encode function for %s ies.\n" % (key))
f.write(" * \\param %s Pointer to the ASN1 structure.\n" % (firstlower))
f.write(" * \\param %s Pointer to the IES structure.\n" % (lowerFirstCamelWord(re.sub('-', '_', key))))
f.write(" **/\n")
f.write("int %s_encode_%s(\n" % (fileprefix, firstlower.lower()))
f.write(" %s_t *%s,\n" % (asn1cStruct, firstlower))
f.write(" %sIEs_t *%sIEs);\n\n" % (asn1cStruct, firstlower))
f.write("/** \\brief Decode function for %s ies.\n" % (key))
f.write(" * \\param any_p Pointer to the ANY value to decode.\n")
f.write(" * \\param callback Callback function called when any_p is successfully decoded.\n")
f.write(" **/\n")
f.write("int %s_decode_%s(\n" % (fileprefix, firstlower.lower()))
f.write(" %sIEs_t *%sIEs,\n" % (asn1cStruct, firstlower))
f.write(" %s_t *%s);\n\n" % (asn1cStruct, lowerFirstCamelWord(asn1cStruct)))
for key in choiceiesDefs:
if key not in choicelist.values():
continue
keyname = re.sub('IEs', '', key)
f.write("/** \\brief Decode function for %s ies.\n" % (key))
f.write(" * \\param %s_p pointer to buffer to decode.\n" % (lowerFirstCamelWord(re.sub('-', '', keyname))))
f.write(" * \\param %s pointer to store the value after decode.\n" % (lowerFirstCamelWord(re.sub('-', '', keyname))))
f.write(" **/\n")
f.write("int %s_decode_%s(\n" % (fileprefix, re.sub('-', '_', keyname).lower()))
f.write(" %s_t *%s,\n" % (re.sub('-', '_', keyname), lowerFirstCamelWord(re.sub('-', '', keyname))))
f.write(" %s_t *%s_p);\n\n" % (re.sub('-', '_', keyname), lowerFirstCamelWord(re.sub('-', '', keyname))))
for key in iesDefs:
asn1cStruct = re.sub('-', '_', re.sub('IEs', '', key))
asn1cStruct = re.sub('Item', 'List', asn1cStruct)
firstlower = re.sub('Item', 'List', re.sub('enb', 'eNB', lowerFirstCamelWord(asn1cStruct)))
if key in ieofielist.values():
f.write("/** \\brief Display %s encapsulated IE using XER encoding.\n" % (asn1cStruct))
f.write(" * \\param %s Pointer to the IES structure.\n" % (lowerFirstCamelWord(re.sub('-', '_', key))))
f.write(" * \\param file File descriptor to write output.\n")
f.write(" **/\n")
f.write("asn_enc_rval_t %s_xer_print_%s(\n" % (fileprefix, re.sub('item', 'list', firstlower.lower())))
f.write(" asn_app_consume_bytes_f *cb,\n")
f.write(" void *app_key,\n")
f.write(" %sIEs_t *%sIEs);\n\n" % (re.sub('item', 'list', asn1cStruct), firstlower))
else:
f.write("/** \\brief Display %s message using XER encoding.\n" % (asn1cStruct))
f.write(" * \\param message_p Pointer to root message.\n")
f.write(" * \\param file File descriptor to write output.\n")
f.write(" **/\n")
f.write("asn_enc_rval_t %s_xer_print_%s(\n" % (fileprefix, firstlower.lower()))
f.write(" asn_app_consume_bytes_f *cb,\n")
f.write(" void *app_key,\n")
f.write(" %s_message *message_p);\n\n" % (fileprefix))
f.write("int %s_xer__print2sp(const void *buffer, size_t size, void *app_key);\n\n" % (fileprefix.lower()))
f.write("int %s_xer__print2fp(const void *buffer, size_t size, void *app_key);\n\n" % (fileprefix.lower()))
f.write("extern size_t %s_string_total_size;\n\n" % (fileprefix.lower()))
for key in iesDefs:
if len(iesDefs[key]["ies"]) == 0:
continue
keyupperunderscore = re.sub('-', '_', key.upper())
keylowerunderscore = re.sub('-', '_', key.lower())
structName = re.sub('ies', '', key, flags=re.IGNORECASE)
f.write("int free_%s(\n" % (re.sub('-', '_', structName.lower())))
f.write(" %s_t *%s);\n\n" % (prefix + re.sub('-', '_', key), lowerFirstCamelWord(re.sub('-', '_', key))))
f.write("#endif /* %s_IES_DEFS_H_ */\n\n" % (fileprefix.upper()))
# Generate Decode functions
f = open(outdir + fileprefix + '_decoder.c', 'w')
outputHeaderToFile(f, filename)
f.write("#include \"%s_common.h\"\n#include \"%s_ies_defs.h\"\n#include \"log.h\"\n\n" % (fileprefix, fileprefix))
for key in iesDefs:
if key in ieofielist.values():
continue
structName = re.sub('ies', '', key)
asn1cStruct = re.sub('-', '_', re.sub('IEs', '', key))
if asn1cStruct.rfind('_') == len(asn1cStruct) - 1:
asn1cStruct = asn1cStruct[:-1]
asn1cStruct = re.sub('Item', 'List', asn1cStruct)
ielistname = re.sub('UE', 'ue', asn1cStruct)
ielistnamefirstlower = ielistname[:1].lower() + ielistname[1:]
asn1cStructfirstlower = asn1cStruct[:1].lower() + asn1cStruct[1:]
keyName = re.sub('-', '_', key)
keyupperunderscore = keyName.upper()
firstlower = re.sub('Item', 'List', re.sub('enb', 'eNB', lowerFirstCamelWord(asn1cStruct)))
iesaccess = ""
if key not in ieofielist.values():
iesaccess = "%s_ies." % (firstlower)
f.write("int %s_decode_%s(\n" % (fileprefix, re.sub('-', '_', structName.lower())))
if len(iesDefs[key]["ies"]) != 0:
f.write(" %s_t *%s,\n" % (re.sub('-', '_', key), lowerFirstCamelWord(re.sub('-', '_', key))))
f.write(" ANY_t *any_p) {\n\n")
f.write(" %s_t %s;\n %s_t *%s_p = &%s;\n" % (asn1cStruct, asn1cStructfirstlower, asn1cStruct, asn1cStructfirstlower, asn1cStructfirstlower))
f.write(" int i, decoded = 0;\n")
if len(iesDefs[key]["ies"]) != 0:
f.write(" int tempDecoded = 0;\n")
f.write(" assert(any_p != NULL);\n")
if len(iesDefs[key]["ies"]) != 0:
f.write(" assert(%s != NULL);\n\n" % (lowerFirstCamelWord(re.sub('-', '_', key))))
f.write(" memset(%s, 0, sizeof(%s_t));\n" % (lowerFirstCamelWord(re.sub('-', '_', key)), prefix + re.sub('-', '_', key)))
f.write(" OAILOG_DEBUG (LOG_%s, \"Decoding message %s (%%s:%%d)\\n\", __FILE__, __LINE__);\n\n" % (fileprefix.upper(), re.sub('-', '_', keyName)))
f.write(" ANY_to_type_aper(any_p, &asn_DEF_%s, (void**)&%s_p);\n\n" % (asn1cStruct, asn1cStructfirstlower))
f.write(" for (i = 0; i < %s_p->%slist.count; i++) {\n" % (asn1cStructfirstlower, iesaccess))
f.write(" %s_IE_t *ie_p;\n" % (fileprefix[0].upper() + fileprefix[1:]))
f.write(" ie_p = %s_p->%slist.array[i];\n" % (asn1cStructfirstlower, iesaccess))
f.write(" switch(ie_p->id) {\n")
for ie in iesDefs[key]["ies"]:
iename = re.sub('id-', '', ie[0])
ienameunderscore = lowerFirstCamelWord(re.sub('-', '_', iename))
ienameunderscorefirstlower = lowerFirstCamelWord(ienameunderscore)
ietypesubst = re.sub('-', '', ie[2])
ietypeunderscore = re.sub('-', '_', ie[2])
ieupperunderscore = re.sub('-', '_', re.sub('id-', '', ie[0])).upper()
if ie[3] == "optional":
f.write(" /* Optional field */\n")
elif ie[3] == "conditional":
f.write(" /* Conditional field */\n")
f.write(" case %s_ProtocolIE_ID_%s:\n" % (fileprefix_first_upper, re.sub('-', '_', ie[0])))
f.write(" {\n")
f.write(" %s_t *%s_p = NULL;\n" % (ietypeunderscore, lowerFirstCamelWord(ietypesubst)))
if ie[3] != "mandatory":
f.write(" %s->presenceMask |= %s_%s_PRESENT;\n" % (lowerFirstCamelWord(re.sub('-', '_', key)), keyupperunderscore, ieupperunderscore))
f.write(" tempDecoded = ANY_to_type_aper(&ie_p->value, &asn_DEF_%s, (void**)&%s_p);\n" % (ietypeunderscore, lowerFirstCamelWord(ietypesubst)))
f.write(" if (tempDecoded < 0 || %s_p == NULL) {\n" % (lowerFirstCamelWord(ietypesubst)))
f.write(" OAILOG_ERROR (LOG_%s, \"Decoding of IE %s failed\\n\");\n" % (fileprefix.upper(), ienameunderscore))
f.write(" if (%s_p)\n" % (lowerFirstCamelWord(ietypesubst)))
f.write(" ASN_STRUCT_FREE(asn_DEF_%s, %s_p);\n" % (ietypeunderscore, lowerFirstCamelWord(ietypesubst)))
f.write(" return -1;\n")
f.write(" }\n")
f.write(" decoded += tempDecoded;\n")
f.write(" if (asn1_xer_print)\n")
f.write(" xer_fprint(stdout, &asn_DEF_%s, %s_p);\n" % (ietypeunderscore, lowerFirstCamelWord(ietypesubst)))
if ie[2] in (list(ieofielist.keys()) + list(choicelist.keys())):
if ie[2] in choicelist.keys():
f.write(" if (%s_decode_%s(&%s->%s, %s_p) < 0) {\n" % (fileprefix, ietypeunderscore.lower(), lowerFirstCamelWord(re.sub('-', '_', key)), ienameunderscore, lowerFirstCamelWord(ietypesubst)))
f.write(" OAILOG_ERROR (LOG_%s, \"Decoding of encapsulated IE %s failed\\n\");\n" % (fileprefix.upper(), lowerFirstCamelWord(ietypesubst)))
f.write(" }\n")
f.write(
" ASN_STRUCT_FREE(asn_DEF_%s, %s_p);\n" %
(ietypeunderscore, lowerFirstCamelWord(ietypesubst)),
)
elif ie[2] in ieofielist.keys():
f.write(" if (%s_decode_%s(&%s->%s, %s_p) < 0) {\n" % (fileprefix, ietypeunderscore.lower(), lowerFirstCamelWord(re.sub('-', '_', key)), ienameunderscore, lowerFirstCamelWord(ietypesubst)))
f.write(" OAILOG_ERROR (LOG_%s, \"Decoding of encapsulated IE %s failed\\n\");\n" % (fileprefix.upper(), lowerFirstCamelWord(ietypesubst)))
f.write(" }\n")
f.write(
" ASN_STRUCT_FREE(asn_DEF_%s, %s_p);\n" %
(ietypeunderscore, lowerFirstCamelWord(ietypesubst)),
)
else:
f.write(" memcpy(&%s->%s, %s_p, sizeof(%s_t));\n" % (lowerFirstCamelWord(re.sub('-', '_', key)), ienameunderscore, lowerFirstCamelWord(ietypesubst), ietypeunderscore))
f.write(" FREEMEM(%s_p);\n" % (lowerFirstCamelWord(ietypesubst)))
f.write(" %s_p = NULL;\n" % (lowerFirstCamelWord(ietypesubst)))
f.write(" } break;\n")
f.write(" default:\n")
f.write(" OAILOG_ERROR (LOG_%s, \"Unknown protocol IE id (%%d) for message %s\\n\", (int)ie_p->id);\n" % (fileprefix.upper(), re.sub('-', '_', structName.lower())))
f.write(" }\n")
f.write(" }\n")
f.write(" ASN_STRUCT_FREE(asn_DEF_%s, %s_p);\n" % (asn1cStruct, asn1cStructfirstlower))
f.write(" return decoded;\n")
f.write("}\n\n")
# Generate free functions for encapsulated IEs
for key in iesDefs:
if key not in ieofielist.values():
continue
if len(iesDefs[key]["ies"]) == 0:
continue
# TODO: Check if the encapsulated IE also contains further encap.
ie = iesDefs[key]["ies"][0]
ietypeunderscore = prefix + re.sub('-', '_', ie[2])
keyname = re.sub('IEs', '', re.sub('Item', 'List', key))
iesStructName = lowerFirstCamelWord(re.sub('Item', 'List', re.sub('-', '_', key)))
f.write("int free_%s(\n" % (re.sub('-', '_', keyname).lower()))
f.write(" %sIEs_t *%s) {\n\n" % (re.sub('-', '_', keyname), iesStructName))
f.write(" assert(%s != NULL);\n\n" % (iesStructName))
f.write(
" for (int i = 0; i < %s->%s.count; i++) {\n" %
(
iesStructName,
re.sub('IEs', '', lowerFirstCamelWord(re.sub('-', '_', key))),
),
)
f.write(
" ASN_STRUCT_FREE(asn_DEF_%s, %s->%s.array[i]);\n" %
(
ietypeunderscore,
iesStructName,
re.sub('IEs', '', lowerFirstCamelWord(re.sub('-', '_', key))),
),
)
f.write(" }\n")
f.write(
" free(%s->%s.array);\n" %
(iesStructName, re.sub('IEs', '', lowerFirstCamelWord(re.sub('-', '_', key)))),
)
f.write(" return 0;\n")
f.write("}\n\n")
for key in iesDefs:
if len(iesDefs[key]["ies"]) == 0:
continue
keyupperunderscore = re.sub('-', '_', key.upper())
keylowerunderscore = re.sub('-', '_', key.lower())
structName = re.sub('ies', '', key, flags=re.IGNORECASE)
f.write("int free_%s(\n" % (re.sub('-', '_', structName.lower())))
f.write(
" %s_t *%s) {\n\n" % (
prefix + re.sub('-', '_', key),
lowerFirstCamelWord(re.sub('-', '_', key)),
),
)
for ie in iesDefs[key]["ies"]:
ietypeunderscore = prefix + re.sub('-', '_', ie[2])
ieupperunderscore = re.sub('-', '_', re.sub('id-', '', ie[0])).upper()
if ie[3] != "mandatory":
if ie[3] == "optional":
f.write(" /* Optional field */\n")
elif ie[3] == "conditional":
f.write(" /* Conditional field */\n")
f.write(" if ((%s->presenceMask & %s_%s_PRESENT)\n" % (lowerFirstCamelWord(re.sub('-', '_', key)), keyupperunderscore, ieupperunderscore))
f.write(" == %s_%s_PRESENT) \n " % (keyupperunderscore, ieupperunderscore))
iename = re.sub('id-', '', ie[0])
ienameunderscore = lowerFirstCamelWord(re.sub('-', '_', iename))
# Check if this is an encapsulated IE, if so call the free function.
if ie[2] in ieofielist.keys():
keyname = re.sub('IEs', '', re.sub('Item', 'List', ie[2]))
f.write(
" free_%s(&%s->%s);\n" % (
re.sub('-', '_', keyname).lower(),
lowerFirstCamelWord(re.sub('-', '_', key)), ienameunderscore,
),
)
else:
f.write(" ASN_STRUCT_FREE_CONTENTS_ONLY(asn_DEF_%s, &%s->%s);\n" % (ietypeunderscore, lowerFirstCamelWord(re.sub('-', '_', key)), ienameunderscore))
f.write(" return 0;\n")
f.write("}\n\n")
for key in iesDefs:
if key not in ieofielist.values():
continue
keyname = re.sub('IEs', '', re.sub('Item', 'List', key))
f.write("int %s_decode_%s(\n" % (fileprefix, re.sub('-', '_', keyname).lower()))
f.write(" %sIEs_t *%sIEs,\n" % (re.sub('-', '_', keyname), lowerFirstCamelWord(re.sub('-', '_', keyname))))
f.write(" %s_t *%s) {\n\n" % (re.sub('-', '_', keyname), lowerFirstCamelWord(re.sub('-', '_', keyname))))
f.write(" int i, decoded = 0;\n")
f.write(" int tempDecoded = 0;\n\n")
f.write(" assert(%s != NULL);\n" % (lowerFirstCamelWord(re.sub('-', '_', keyname))))
f.write(" assert(%sIEs != NULL);\n\n" % (lowerFirstCamelWord(re.sub('-', '_', keyname))))
f.write(" for (i = 0; i < %s->list.count; i++) {\n" % (lowerFirstCamelWord(re.sub('-', '_', keyname))))
f.write(" %s_IE_t *ie_p = %s->list.array[i];\n" % (fileprefix[0].upper() + fileprefix[1:], lowerFirstCamelWord(re.sub('-', '_', keyname))))
f.write(" switch (ie_p->id) {\n")
for ie in iesDefs[key]["ies"]:
iename = re.sub('id-', '', ie[0])
ienameunderscore = lowerFirstCamelWord(re.sub('-', '_', iename))
f.write(" case %s_ProtocolIE_ID_%s:\n" % (fileprefix_first_upper, re.sub('-', '_', ie[0])))
f.write(" {\n")
f.write(" %s_t *%s_p = NULL;\n" % (re.sub('-', '_', ie[2]), lowerFirstCamelWord(re.sub('-', '', ie[2]))))
f.write(" tempDecoded = ANY_to_type_aper(&ie_p->value, &asn_DEF_%s, (void**)&%s_p);\n" % (re.sub('-', '_', ie[2]), lowerFirstCamelWord(re.sub('-', '', ie[2]))))
f.write(" if (tempDecoded < 0 || %s_p == NULL) {\n" % (lowerFirstCamelWord(re.sub('-', '', ie[2]))))
f.write(" OAILOG_ERROR (LOG_%s, \"Decoding of IE %s for message %s failed\\n\");\n" % (fileprefix.upper(), ienameunderscore, re.sub('-', '_', keyname)))
f.write(" if (%s_p)\n" % (lowerFirstCamelWord(re.sub('-', '', ie[2]))))
f.write(" ASN_STRUCT_FREE(asn_DEF_%s, %s_p);\n" % (re.sub('-', '_', ie[2]), lowerFirstCamelWord(re.sub('-', '', ie[2]))))
f.write(" return -1;\n")
f.write(" }\n")
f.write(" decoded += tempDecoded;\n")
f.write(" if (asn1_xer_print)\n")
f.write(" xer_fprint(stdout, &asn_DEF_%s, %s_p);\n" % (re.sub('-', '_', ie[2]), lowerFirstCamelWord(re.sub('-', '', ie[2]))))
f.write(
" ASN_SEQUENCE_ADD(&%sIEs->%s, %s_p);\n" % (
lowerFirstCamelWord(re.sub('-', '_', keyname)),
re.sub('IEs', '', lowerFirstCamelWord(re.sub('-', '_', key))), lowerFirstCamelWord(re.sub('-', '', ie[2])),
),
)
f.write(" } break;\n")
f.write(" default:\n")
f.write(" OAILOG_ERROR (LOG_%s, \"Unknown protocol IE id (%%d) for message %s\\n\", (int)ie_p->id);\n" % (fileprefix.upper(), re.sub('-', '_', structName.lower())))
f.write(" return -1;\n")
f.write(" }\n")
f.write(" }\n")
f.write(" return decoded;\n")
f.write("}\n\n")
for key in choiceiesDefs:
if key not in choicelist.values():
continue
keyname = re.sub('IEs', '', key)
f.write("int %s_decode_%s(\n" % (fileprefix, re.sub('-', '_', keyname).lower()))
f.write(" %s_t *%s,\n" % (re.sub('-', '_', keyname), lowerFirstCamelWord(re.sub('-', '', keyname))))
f.write(" %s_t *%s_p) {\n\n" % (re.sub('-', '_', keyname), lowerFirstCamelWord(re.sub('-', '', keyname))))
f.write(" assert(%s_p != NULL);\n" % (lowerFirstCamelWord(re.sub('-', '', keyname))))
f.write(" assert(%s != NULL);\n\n" % (lowerFirstCamelWord(re.sub('-', '', keyname))))
f.write(" OAILOG_DEBUG (LOG_%s, \"Decoding choice %s (%%s:%%d)\\n\", __FILE__, __LINE__);\n" % (fileprefix.upper(), re.sub('-', '_', keyname)))
f.write(" %s->present = %s_p->present;\n\n" % (lowerFirstCamelWord(re.sub('-', '', keyname)), lowerFirstCamelWord(re.sub('-', '', keyname))))
f.write(" switch (%s_p->present) {\n" % (lowerFirstCamelWord(re.sub('-', '', keyname))))
for ie in choiceiesDefs[key]["ies"]:
iename = re.sub('-', '_', ie[0])
f.write(" case %s_PR_%s:\n" % (re.sub('-', '_', keyname), iename))
f.write(" {\n")
if ie[1] in ieofielist.keys():
ienameunderscore = re.sub('-', '_', ie[1])
f.write(" if (%s_decode_%s((%sIEs_t *)&%s->choice.%s, &%s_p->choice.%s) < 0) {\n" % (fileprefix, ienameunderscore.lower(), ienameunderscore, lowerFirstCamelWord(re.sub('-', '', keyname)), iename, lowerFirstCamelWord(re.sub('-', '', keyname)), iename))
f.write(" OAILOG_ERROR (LOG_%s, \"Decoding of encapsulated IE %s failed\\n\");\n" % (fileprefix.upper(), lowerFirstCamelWord(ienameunderscore)))
f.write(" return -1;\n")
f.write(" }\n")
else:
f.write(" OAILOG_DEBUG (LOG_%s, \"Decoding %s (%%s:%%d)\\n\", __FILE__, __LINE__);\n" % (fileprefix.upper(), re.sub('-', '_', iename)))
f.write(" memcpy(%s, %s_p, sizeof(%s_t));\n" % (lowerFirstCamelWord(re.sub('-', '', keyname)), lowerFirstCamelWord(re.sub('-', '', keyname)), re.sub('-', '_', keyname)))
f.write(" } break;\n")
f.write(" default:\n")
f.write(" OAILOG_ERROR (LOG_%s, \"Unknown choice type (%%d) for %s\\n\", (int)%s_p->present);\n" % (fileprefix.upper(), re.sub('-', '_', keyname), lowerFirstCamelWord(re.sub('-', '', keyname))))
f.write(" return -1;\n")
f.write(" }\n")
f.write(" return 0;\n")
f.write("}\n\n")
# Generate IES Encode functions
f = open(outdir + fileprefix + '_encoder.c', 'w')
outputHeaderToFile(f, filename)
f.write("#include \"%s_common.h\"\n" % (fileprefix))
f.write("#include \"%s_ies_defs.h\"\n\n" % (fileprefix))
for key in iesDefs:
if key in ieofielist.values():
continue
structName = re.sub('ies', '', key)
asn1cStruct = re.sub('-', '_', re.sub('IEs', '', key))
asn1cStruct = re.sub('Item', 'List', asn1cStruct)
if asn1cStruct.rfind('_') == len(asn1cStruct) - 1:
asn1cStruct = asn1cStruct[:-1]
asn1cStructfirstlower = asn1cStruct[:1].lower() + asn1cStruct[1:]
firstwordlower = re.sub('Item', 'List', re.sub('enb', 'eNB', lowerFirstCamelWord(asn1cStruct)))
iesaccess = ""
if key not in ieofielist.values():
iesaccess = "%s_ies." % (firstwordlower)
keyName = re.sub('-', '_', key)
keyupperunderscore = keyName.upper()
# No IE to encode...
if len(iesDefs[key]["ies"]) == 0:
continue
f.write("int %s_encode_%s(\n" % (fileprefix, re.sub('-', '_', structName.lower())))
f.write(" %s_t *%s,\n" % (asn1cStruct, firstwordlower))
f.write(" %s_t *%s) {\n\n" % (re.sub('-', '_', key), lowerFirstCamelWord(re.sub('-', '_', key))))
f.write(" %s_IE_t *ie;\n\n" % (fileprefix_first_upper))
f.write(" assert(%s != NULL);\n" % (firstwordlower));
f.write(" assert(%s != NULL);\n\n" % (lowerFirstCamelWord(re.sub('-', '_', key))));
for ie in iesDefs[key]["ies"]:
iename = re.sub('-', '_', re.sub('id-', '', ie[0]))
ienameunderscore = re.sub('-', '_', iename)
ienamefirstwordlower = lowerFirstCamelWord(iename)
ieupperunderscore = re.sub('-', '_', re.sub('id-', '', ie[0])).upper()
ietypeunderscore = re.sub('-', '_', ie[2])
if ie[3] != "mandatory":
if ie[3] == "optional":
f.write(" /* Optional field */\n")
elif ie[3] == "conditional":
f.write(" /* Conditional field */\n")
f.write(" if (%s->presenceMask & %s_%s_PRESENT) {\n\n" % (lowerFirstCamelWord(re.sub('-', '_', key)), keyupperunderscore, ieupperunderscore))
#f.write(" == %s_%s_PRESENT) {\n" % (keyupperunderscore, ieupperunderscore))
if ie[2] in ieofielist.keys():
f.write(" %s_t %s;\n" % (ietypeunderscore, ienamefirstwordlower))
f.write(" memset(&%s, 0, sizeof(%s_t));\n" % (ienamefirstwordlower, ietypeunderscore))
f.write("\n")
f.write(" if (%s_encode_%s(&%s, &%s->%s) < 0) return -1;\n" % (fileprefix, ietypeunderscore.lower(), ienamefirstwordlower, lowerFirstCamelWord(re.sub('-', '_', key)), ienamefirstwordlower))
f.write(" if ((ie = %s_new_ie(%s_ProtocolIE_ID_%s,\n" % (fileprefix, fileprefix_first_upper, re.sub('-', '_', ie[0])))
f.write(" %s_Criticality_%s,\n" % (fileprefix_first_upper, ie[1]))
f.write(" &asn_DEF_%s,\n" % (ietypeunderscore))
#f.write(" &%s->%s)) == NULL) {\n" % (lowerFirstCamelWord(re.sub('-', '_', key)), ienamefirstwordlower))
if ie[2] in ieofielist.keys():
f.write(" &%s)) == NULL) {\n" % (ienamefirstwordlower))
else:
f.write(" &%s->%s)) == NULL) {\n" % (lowerFirstCamelWord(re.sub('-', '_', key)), ienamefirstwordlower))
f.write(" return -1;\n")
f.write(" }\n")
f.write(" ASN_SEQUENCE_ADD(&%s->%slist, ie);\n" % (firstwordlower, iesaccess))
if ie[2] in ieofielist.keys():
f.write(" /* Free any dynamic allocation that is no more used */\n")
f.write(" ASN_STRUCT_FREE_CONTENTS_ONLY(asn_DEF_%s, &%s);\n" % (ietypeunderscore, ienamefirstwordlower))
f.write(" }\n\n")
else:
if ie[2] in ieofielist.keys():
f.write(" %s_t %s;\n\n" % (ietypeunderscore, ienamefirstwordlower))
f.write(" memset(&%s, 0, sizeof(%s_t));\n" % (ienamefirstwordlower, ietypeunderscore))
f.write("\n")
f.write(" if (%s_encode_%s(&%s, &%s->%s) < 0) return -1;\n" % (fileprefix, ietypeunderscore.lower(), ienamefirstwordlower, lowerFirstCamelWord(re.sub('-', '_', key)), ienamefirstwordlower))
f.write(" if ((ie = %s_new_ie(%s_ProtocolIE_ID_%s,\n" % (fileprefix, fileprefix_first_upper, re.sub('-', '_', ie[0])))
f.write(" %s_Criticality_%s,\n" % (fileprefix_first_upper, ie[1]))
f.write(" &asn_DEF_%s,\n" % (ietypeunderscore))
if ie[2] in ieofielist.keys():
f.write(" &%s)) == NULL) {\n" % (ienamefirstwordlower))
else:
f.write(" &%s->%s)) == NULL) {\n" % (lowerFirstCamelWord(re.sub('-', '_', key)), ienamefirstwordlower))
f.write(" return -1;\n")
f.write(" }\n")
f.write(" ASN_SEQUENCE_ADD(&%s->%slist, ie);\n\n" % (firstwordlower, iesaccess))
if ie[2] in ieofielist.keys():
f.write(" /* Free any dynamic allocation that is no more used */\n")
f.write(" ASN_STRUCT_FREE_CONTENTS_ONLY(asn_DEF_%s, &%s);\n\n" % (ietypeunderscore, ienamefirstwordlower))
f.write(" return 0;\n")
f.write("}\n\n")
for (key, value) in iesDefs.items():
if key not in ieofielist.values():
continue
ie = value["ies"][0]
ietypeunderscore = re.sub('-', '_', ie[2])
asn1cStruct = re.sub('-', '_', re.sub('IEs', '', re.sub('-IEs', '', key)))
asn1cStruct = re.sub('Item', 'List', asn1cStruct)
firstwordlower = re.sub('Item', 'List', re.sub('enb', 'eNB', lowerFirstCamelWord(asn1cStruct)))
for (i, j) in ieofielist.items():
if j == key:
break
f.write("int %s_encode_%s(\n" % (fileprefix, re.sub('-', '_', i).lower()))
f.write(" %s_t *%s,\n" % (asn1cStruct, firstwordlower))
f.write(" %sIEs_t *%sIEs) {\n\n" % (re.sub('-', '_', i), lowerFirstCamelWord(re.sub('-', '_', i))))
f.write(" int i;\n")
f.write(" %s_IE_t *ie;\n\n" % (fileprefix_first_upper))
f.write(" assert(%s != NULL);\n" % (firstwordlower));
f.write(" assert(%sIEs != NULL);\n\n" % (lowerFirstCamelWord(re.sub('-', '_', i))));
f.write(" for (i = 0; i < %sIEs->%s.count; i++) {\n" % (firstwordlower, re.sub('IEs', '', lowerFirstCamelWord(re.sub('-', '_', key)))))
f.write(" if ((ie = %s_new_ie(%s_ProtocolIE_ID_%s,\n" % (fileprefix, fileprefix_first_upper, re.sub('-', '_', ie[0])))
f.write(" %s_Criticality_%s,\n" % (fileprefix_first_upper, ie[1]))
f.write(" &asn_DEF_%s,\n" % (ietypeunderscore))
f.write(" %sIEs->%s.array[i])) == NULL) {\n" % (firstwordlower, re.sub('IEs', '', lowerFirstCamelWord(re.sub('-', '_', key)))))
f.write(" return -1;\n")
f.write(" }\n")
f.write(" ASN_SEQUENCE_ADD(&%s->list, ie);\n" % (firstwordlower))
f.write(" }\n")
f.write(" return 0;\n")
f.write("}\n\n")
# Generate xer print functions
f = open(outdir + fileprefix + '_xer_print.c', 'w')
outputHeaderToFile(f, filename)
f.write("#include <stdlib.h>\n")
f.write("#include <stdio.h>\n\n")
f.write("#include <asn_application.h>\n#include <asn_internal.h>\n\n")
f.write("#include \"%s_common.h\"\n#include \"%s_ies_defs.h\"\n\n" % (fileprefix, fileprefix))
f.write("size_t %s_string_total_size = 0;\n\n" % (fileprefix.lower()))
f.write(
"""int
%s_xer__print2fp(const void *buffer, size_t size, void *app_key) {
FILE *stream = (FILE *)app_key;
if(fwrite(buffer, 1, size, stream) != size)
return -1;
return 0;
}
""" % (fileprefix.lower()),
)
f.write(
"""int %s_xer__print2sp(const void *buffer, size_t size, void *app_key) {
char *string = (char *)app_key;
/* Copy buffer to the formatted string */
memcpy(&string[%s_string_total_size], buffer, size);
%s_string_total_size += size;
return 0;
}
""" % (fileprefix.lower(), fileprefix.lower(), fileprefix.lower()),
)
f.write("""static asn_enc_rval_t
xer_encode_local(asn_TYPE_descriptor_t *td, void *sptr,
asn_app_consume_bytes_f *cb, void *app_key, int indent) {
asn_enc_rval_t er, tmper;
const char *mname;
size_t mlen;
int xcan = 2;
if(!td || !sptr) goto cb_failed;
mname = td->xml_tag;
mlen = strlen(mname);
_i_ASN_TEXT_INDENT(0, indent);
_ASN_CALLBACK3("<", 1, mname, mlen, ">", 1);
tmper = td->xer_encoder(td, sptr, indent + 1, XER_F_BASIC, cb, app_key);
if(tmper.encoded == -1) return tmper;
_ASN_CALLBACK3("</", 2, mname, mlen, ">\\n", xcan);
er.encoded = 4 + xcan + (2 * mlen) + tmper.encoded;
_ASN_ENCODED_OK(er);
cb_failed:
_ASN_ENCODE_FAILED;
}
""")
for (key, value) in iesDefs.items():
keyName = re.sub('-', '_', key)
keyupperunderscore = keyName.upper()
iesStructName = lowerFirstCamelWord(re.sub('-', '_', key))
ie = value["ies"][0]
ietypeunderscore = re.sub('-', '_', ie[2])
if key in ieofielist.values():
f.write("asn_enc_rval_t %s_xer_print_%s(\n" % (fileprefix, re.sub('ies', '', re.sub('item', 'list', re.sub('-', '_', key).lower()))))
else:
f.write("asn_enc_rval_t %s_xer_print_%s(\n" % (fileprefix, re.sub('ies', '', re.sub('-', '_', key).lower())))
#f.write(" FILE *file,\n")
f.write(" asn_app_consume_bytes_f *cb,\n")
f.write(" void *app_key,\n")
if key in ieofielist.values():
iesStructName = lowerFirstCamelWord(re.sub('Item', 'List', re.sub('-', '_', key)))
f.write(" %sIEs_t *%s) {\n\n" % (re.sub('IEs', '', re.sub('Item', 'List', re.sub('-', '_', key))), iesStructName))
f.write(" int i;\n")
f.write(" asn_enc_rval_t er;\n")
else:
f.write(" %s_message *message_p)\n{\n" % (fileprefix))
f.write(" %s_t *%s;\n" % (re.sub('-', '_', key), iesStructName))
f.write(" asn_enc_rval_t er;\n")
#f.write(" void *app_key = (void *)file;\n")
#f.write(" asn_app_consume_bytes_f *cb = %s_xer__print2fp;\n\n" % (fileprefix.lower()))
f.write(" %s = &message_p->msg.%s;\n\n" % (iesStructName, iesStructName))
if key in ieofielist.values():
# Increase indentation level
f.write(" for (i = 0; i < %s->%s.count; i++) {\n" % (iesStructName, re.sub('IEs', '', lowerFirstCamelWord(re.sub('-', '_', key)))))
#f.write(" xer_fprint(file, &asn_DEF_%s, %s->%s.array[i]);\n" % (ietypeunderscore, iesStructName, re.sub('IEs', '', lowerFirstCamelWord(re.sub('-', '_', key)))))
f.write(" er = xer_encode(&asn_DEF_%s, %s->%s.array[i], XER_F_BASIC, cb, app_key);\n" % (ietypeunderscore, iesStructName, re.sub('IEs', '', lowerFirstCamelWord(re.sub('-', '_', key)))))
f.write(" }\n")
else:
f.write(" cb(\"<%s-PDU>\\n\", %d, app_key);\n" % (key, len("<%s-PDU>\n" % (key))))
f.write(" xer_encode_local(&asn_DEF_%s_Criticality, &message_p->criticality, cb, app_key, 1);\n" % fileprefix_first_upper)
f.write(" xer_encode_local(&asn_DEF_%s_ProcedureCode, &message_p->procedureCode, cb, app_key, 1);\n" % fileprefix_first_upper)
f.write(" cb(\" <%s>\\n\", %d, app_key);\n" % (key, len(" <%s>\n" % (key))))
for ie in iesDefs[key]["ies"]:
iename = re.sub('-', '_', re.sub('id-', '', ie[0]))
ienameunderscore = re.sub('-', '_', iename)
ienamefirstwordlower = lowerFirstCamelWord(iename)
ietypeunderscore = re.sub('-', '_', ie[2])
ieupperunderscore = re.sub('-', '_', re.sub('id-', '', ie[0])).upper()
if ie[3] != "mandatory":
if ie[3] == "optional":
f.write(" /* Optional field */\n")
elif ie[3] == "conditional":
f.write(" /* Conditional field */\n")
f.write(" if (%s->presenceMask & %s_%s_PRESENT)\n " % (iesStructName, keyupperunderscore, ieupperunderscore))
# Is it an encapsulated IE ?
if ie[2] in ieofielist.keys():
f.write(" %s_xer_print_%s(cb, app_key, &%s->%s);\n" % (fileprefix, re.sub('ies', '', re.sub('-', '_', ie[2]).lower()), iesStructName, ienamefirstwordlower))
else:
f.write(" xer_encode_local(&asn_DEF_%s, &%s->%s, cb, app_key, 2);\n" % (ietypeunderscore, iesStructName, ienamefirstwordlower))
f.write(" cb(\" </%s>\\n\", %d, app_key);\n" % (key, len(" </%s>\n" % (key))))
f.write(" cb(\"</%s-PDU>\\n\", %d, app_key);\n" % (key, len("</%s-PDU>\n" % (key))))
f.write(" _ASN_ENCODED_OK(er);\n")
# if key not in ieofielist.values():
# f.write("cb_failed:\n")
#f.write(" return er;\n")
f.write("}\n\n")
|
dizoo/box2d/lunarlander/envs/__init__.py | LuciusMos/DI-engine | 464 | 12676763 | <reponame>LuciusMos/DI-engine
from .lunarlander_env import LunarLanderEnv
|
python36/12_val/BCC97_simulation.py | aborodya/dawp | 484 | 12676781 | <reponame>aborodya/dawp
#
# Monte Carlo Simulation of BCC97 Model
# 12_val/BCC97_simulation.py
#
# (c) Dr. <NAME>
# Derivatives Analytics with Python
#
import sys
import math
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.append('11_cal')
from H93_calibration import S0, kappa_r, theta_r, sigma_r, r0
mpl.rcParams['font.family'] = 'serif'
#
# Model Parameters
#
opt = np.load('11_cal/opt_full.npy')
kappa_v, theta_v, sigma_v, rho, v0, lamb, mu, delta = opt
#
# Simulation Parameters
#
T = 1.0 # time horizon
M = 25 # time steps
I = 10000 # number of replications per valuation
anti_paths = True # antithetic paths for variance reduction
moment_matching = True # moment matching for variance reduction
np.random.seed(100000) # seed value for random number generator
#
# Random Number Generation
#
def generate_cholesky(rho):
''' Function to generate Cholesky matrix.
Parameters
==========
rho: float
correlation between index level and variance
Returns
=======
matrix: NumPy array
Cholesky matrix
'''
rho_rs = 0 # correlation between index level and short rate
covariance = np.zeros((4, 4), dtype=np.float)
covariance[0] = [1.0, rho_rs, 0.0, 0.0]
covariance[1] = [rho_rs, 1.0, rho, 0.0]
covariance[2] = [0.0, rho, 1.0, 0.0]
covariance[3] = [0.0, 0.0, 0.0, 1.0]
cho_matrix = np.linalg.cholesky(covariance)
return cho_matrix
def random_number_generator(M, I, anti_paths, moment_matching):
''' Function to generate pseudo-random numbers.
Parameters
==========
M: int
time steps
I: int
number of simulation paths
anti_paths: bool
flag for antithetic paths
moment_matching: bool
flag for moment matching
Returns
=======
rand: NumPy array
random number array
'''
if anti_paths:
rand = np.random.standard_normal((4, M + 1, int(I / 2)))
rand = np.concatenate((rand, -rand), 2)
else:
rand = np.random.standard_normal((4, M + 1, I))
if moment_matching:
for a in range(4):
rand[a] = rand[a] / np.std(rand[a])
rand[a] = rand[a] - np.mean(rand[a])
return rand
#
# Function for Short Rate and Volatility Processes
#
def SRD_generate_paths(x0, kappa, theta, sigma, T, M, I,
rand, row, cho_matrix):
''' Function to simulate Square-Root Difussion (SRD/CIR) process.
Parameters
==========
x0: float
initial value
kappa: float
mean-reversion factor
theta: float
long-run mean
sigma: float
volatility factor
T: float
final date/time horizon
M: int
number of time steps
I: int
number of paths
row: int
row number for random numbers
cho_matrix: NumPy array
cholesky matrix
Returns
=======
x: NumPy array
simulated variance paths
'''
dt = T / M
x = np.zeros((M + 1, I), dtype=np.float)
x[0] = x0
xh = np.zeros_like(x)
xh[0] = x0
sdt = math.sqrt(dt)
for t in range(1, M + 1):
ran = np.dot(cho_matrix, rand[:, t])
xh[t] = (xh[t - 1] + kappa * (theta -
np.maximum(0, xh[t - 1])) * dt +
np.sqrt(np.maximum(0, xh[t - 1])) * sigma * ran[row] * sdt)
x[t] = np.maximum(0, xh[t])
return x
#
# Function for B96 Index Process
#
def B96_generate_paths(S0, r, v, lamb, mu, delta, rand, row1, row2,
cho_matrix, T, M, I, moment_matching):
''' Simulation of Bates (1996) index process.
Parameters
==========
S0: float
initial value
r: NumPy array
simulated short rate paths
v: NumPy array
simulated variance paths
lamb: float
jump intensity
mu: float
expected jump size
delta: float
standard deviation of jump
rand: NumPy array
random number array
row1, row2: int
rows/matrices of random number array to use
cho_matrix: NumPy array
Cholesky matrix
T: float
time horizon, maturity
M: int
number of time intervals, steps
I: int
number of paths to simulate
moment_matching: bool
flag for moment matching
Returns
=======
S: NumPy array
simulated index level paths
'''
S = np.zeros((M + 1, I), dtype=np.float)
S[0] = S0
dt = T / M
sdt = math.sqrt(dt)
ranp = np.random.poisson(lamb * dt, (M + 1, I))
bias = 0.0
for t in range(1, M + 1, 1):
ran = np.dot(cho_matrix, rand[:, t, :])
if moment_matching:
bias = np.mean(np.sqrt(v[t]) * ran[row1] * sdt)
S[t] = S[t - 1] * (np.exp(((r[t] + r[t - 1]) / 2 - 0.5 * v[t]) * dt +
np.sqrt(v[t]) * ran[row1] * sdt - bias) +
(np.exp(mu + delta * ran[row2]) - 1) * ranp[t])
return S
if __name__ == '__main__':
#
# Simulation
#
cho_matrix = generate_cholesky(rho)
rand = random_number_generator(M, I, anti_paths, moment_matching)
r = SRD_generate_paths(r0, kappa_r, theta_r, sigma_r, T, M, I,
rand, 0, cho_matrix)
v = SRD_generate_paths(v0, kappa_v, theta_v, sigma_v, T, M, I,
rand, 2, cho_matrix)
S = B96_generate_paths(S0, r, v, lamb, mu, delta, rand, 1, 3,
cho_matrix, T, M, I, moment_matching)
def plot_rate_paths(r):
plt.figure(figsize=(10, 6))
plt.plot(r[:, :10])
plt.xlabel('time step')
plt.ylabel('short rate level')
plt.title('Short Rate Simulated Paths')
def plot_volatility_paths(v):
plt.figure(figsize=(10, 6))
plt.plot(np.sqrt(v[:, :10]))
plt.xlabel('time step')
plt.ylabel('volatility level')
plt.title('Volatility Simulated Paths')
def plot_index_paths(S):
plt.figure(figsize=(10, 6))
plt.plot(S[:, :10])
plt.xlabel('time step')
plt.ylabel('index level')
plt.title('EURO STOXX 50 Simulated Paths')
def plot_index_histogram(S):
plt.figure(figsize=(10, 6))
plt.hist(S[-1], bins=30)
plt.xlabel('index level')
plt.ylabel('frequency')
plt.title('EURO STOXX 50 Values after 1 Year')
|
onmt/decoders/cnn_decoder.py | TAEYOUNG-SYG/OpenNMT-py | 194 | 12676796 | <reponame>TAEYOUNG-SYG/OpenNMT-py
"""
Implementation of the CNN Decoder part of
"Convolutional Sequence to Sequence Learning"
"""
import torch
import torch.nn as nn
import onmt.modules
from onmt.decoders.decoder import DecoderState
from onmt.utils.misc import aeq
from onmt.utils.cnn_factory import shape_transform, GatedConv
SCALE_WEIGHT = 0.5 ** 0.5
class CNNDecoder(nn.Module):
"""
Decoder built on CNN, based on :cite:`DBLP:journals/corr/GehringAGYD17`.
Consists of residual convolutional layers, with ConvMultiStepAttention.
"""
def __init__(self, num_layers, hidden_size, attn_type,
copy_attn, cnn_kernel_width, dropout, embeddings):
super(CNNDecoder, self).__init__()
# Basic attributes.
self.decoder_type = 'cnn'
self.num_layers = num_layers
self.hidden_size = hidden_size
self.cnn_kernel_width = cnn_kernel_width
self.embeddings = embeddings
self.dropout = dropout
# Build the CNN.
input_size = self.embeddings.embedding_size
self.linear = nn.Linear(input_size, self.hidden_size)
self.conv_layers = nn.ModuleList()
for _ in range(self.num_layers):
self.conv_layers.append(
GatedConv(self.hidden_size, self.cnn_kernel_width,
self.dropout, True))
self.attn_layers = nn.ModuleList()
for _ in range(self.num_layers):
self.attn_layers.append(
onmt.modules.ConvMultiStepAttention(self.hidden_size))
# CNNDecoder has its own attention mechanism.
# Set up a separated copy attention layer, if needed.
self._copy = False
if copy_attn:
self.copy_attn = onmt.modules.GlobalAttention(
hidden_size, attn_type=attn_type)
self._copy = True
def forward(self, tgt, memory_bank, state, memory_lengths=None, step=None):
""" See :obj:`onmt.modules.RNNDecoderBase.forward()`"""
# NOTE: memory_lengths is only here for compatibility reasons
# with onmt.modules.RNNDecoderBase.forward()
# CHECKS
assert isinstance(state, CNNDecoderState)
_, tgt_batch, _ = tgt.size()
_, contxt_batch, _ = memory_bank.size()
aeq(tgt_batch, contxt_batch)
# END CHECKS
if state.previous_input is not None:
tgt = torch.cat([state.previous_input, tgt], 0)
# Initialize return variables.
outputs = []
attns = {"std": []}
assert not self._copy, "Copy mechanism not yet tested in conv2conv"
if self._copy:
attns["copy"] = []
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
tgt_emb = emb.transpose(0, 1).contiguous()
# The output of CNNEncoder.
src_memory_bank_t = memory_bank.transpose(0, 1).contiguous()
# The combination of output of CNNEncoder and source embeddings.
src_memory_bank_c = state.init_src.transpose(0, 1).contiguous()
# Run the forward pass of the CNNDecoder.
emb_reshape = tgt_emb.contiguous().view(
tgt_emb.size(0) * tgt_emb.size(1), -1)
linear_out = self.linear(emb_reshape)
x = linear_out.view(tgt_emb.size(0), tgt_emb.size(1), -1)
x = shape_transform(x)
pad = torch.zeros(x.size(0), x.size(1),
self.cnn_kernel_width - 1, 1)
pad = pad.type_as(x)
base_target_emb = x
for conv, attention in zip(self.conv_layers, self.attn_layers):
new_target_input = torch.cat([pad, x], 2)
out = conv(new_target_input)
c, attn = attention(base_target_emb, out,
src_memory_bank_t, src_memory_bank_c)
x = (x + (c + out) * SCALE_WEIGHT) * SCALE_WEIGHT
output = x.squeeze(3).transpose(1, 2)
# Process the result and update the attentions.
outputs = output.transpose(0, 1).contiguous()
if state.previous_input is not None:
outputs = outputs[state.previous_input.size(0):]
attn = attn[:, state.previous_input.size(0):].squeeze()
attn = torch.stack([attn])
attns["std"] = attn
if self._copy:
attns["copy"] = attn
# Update the state.
state.update_state(tgt)
return outputs, state, attns
def init_decoder_state(self, _, memory_bank, enc_hidden, with_cache=False):
"""
Init decoder state.
"""
return CNNDecoderState(memory_bank, enc_hidden)
class CNNDecoderState(DecoderState):
"""
Init CNN decoder state.
"""
def __init__(self, memory_bank, enc_hidden):
self.init_src = (memory_bank + enc_hidden) * SCALE_WEIGHT
self.previous_input = None
@property
def _all(self):
"""
Contains attributes that need to be updated in self.beam_update().
"""
return (self.previous_input,)
def detach(self):
self.previous_input = self.previous_input.detach()
def update_state(self, new_input):
""" Called for every decoder forward pass. """
self.previous_input = new_input
def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
self.init_src = self.init_src.data.repeat(1, beam_size, 1)
|
koalixcrm/crm/factories/factory_task_link_type.py | Cataldir/koalixcrm | 290 | 12676817 | # -*- coding: utf-8 -*-
import factory
from koalixcrm.crm.models import TaskLinkType
class RelatedToTaskLinkTypeFactory(factory.django.DjangoModelFactory):
class Meta:
model = TaskLinkType
django_get_or_create = ('title',)
title = "Is related to"
description = "This task is related with ...."
class RequiresLinkTypeFactory(factory.django.DjangoModelFactory):
class Meta:
model = TaskLinkType
django_get_or_create = ('title',)
title = "This task requires"
description = "This task requires the completion or the existence of ...."
|
django_filters/fields.py | jvacek/django-filter | 2,512 | 12676836 | <reponame>jvacek/django-filter
from collections import namedtuple
from datetime import datetime, time
from django import forms
from django.utils.dateparse import parse_datetime
from django.utils.encoding import force_str
from django.utils.translation import gettext_lazy as _
from .conf import settings
from .constants import EMPTY_VALUES
from .utils import handle_timezone
from .widgets import (
BaseCSVWidget,
CSVWidget,
DateRangeWidget,
LookupChoiceWidget,
RangeWidget
)
class RangeField(forms.MultiValueField):
widget = RangeWidget
def __init__(self, fields=None, *args, **kwargs):
if fields is None:
fields = (
forms.DecimalField(),
forms.DecimalField())
super().__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
return slice(*data_list)
return None
class DateRangeField(RangeField):
widget = DateRangeWidget
def __init__(self, *args, **kwargs):
fields = (
forms.DateField(),
forms.DateField())
super().__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
start_date, stop_date = data_list
if start_date:
start_date = handle_timezone(
datetime.combine(start_date, time.min),
False
)
if stop_date:
stop_date = handle_timezone(
datetime.combine(stop_date, time.max),
False
)
return slice(start_date, stop_date)
return None
class DateTimeRangeField(RangeField):
widget = DateRangeWidget
def __init__(self, *args, **kwargs):
fields = (
forms.DateTimeField(),
forms.DateTimeField())
super().__init__(fields, *args, **kwargs)
class IsoDateTimeRangeField(RangeField):
widget = DateRangeWidget
def __init__(self, *args, **kwargs):
fields = (
IsoDateTimeField(),
IsoDateTimeField())
super().__init__(fields, *args, **kwargs)
class TimeRangeField(RangeField):
widget = DateRangeWidget
def __init__(self, *args, **kwargs):
fields = (
forms.TimeField(),
forms.TimeField())
super().__init__(fields, *args, **kwargs)
class Lookup(namedtuple('Lookup', ('value', 'lookup_expr'))):
def __new__(cls, value, lookup_expr):
if value in EMPTY_VALUES or lookup_expr in EMPTY_VALUES:
raise ValueError(
"Empty values ([], (), {}, '', None) are not "
"valid Lookup arguments. Return None instead."
)
return super().__new__(cls, value, lookup_expr)
class LookupChoiceField(forms.MultiValueField):
default_error_messages = {
'lookup_required': _('Select a lookup.'),
}
def __init__(self, field, lookup_choices, *args, **kwargs):
empty_label = kwargs.pop('empty_label', settings.EMPTY_CHOICE_LABEL)
fields = (field, ChoiceField(choices=lookup_choices, empty_label=empty_label))
widget = LookupChoiceWidget(widgets=[f.widget for f in fields])
kwargs['widget'] = widget
kwargs['help_text'] = field.help_text
super().__init__(fields, *args, **kwargs)
def compress(self, data_list):
if len(data_list) == 2:
value, lookup_expr = data_list
if value not in EMPTY_VALUES:
if lookup_expr not in EMPTY_VALUES:
return Lookup(value=value, lookup_expr=lookup_expr)
else:
raise forms.ValidationError(
self.error_messages['lookup_required'],
code='lookup_required')
return None
class IsoDateTimeField(forms.DateTimeField):
"""
Supports 'iso-8601' date format too which is out the scope of
the ``datetime.strptime`` standard library
# ISO 8601: ``http://www.w3.org/TR/NOTE-datetime``
Based on Gist example by <NAME> https://gist.github.com/copitux/5773821
"""
ISO_8601 = 'iso-8601'
input_formats = [ISO_8601]
def strptime(self, value, format):
value = force_str(value)
if format == self.ISO_8601:
parsed = parse_datetime(value)
if parsed is None: # Continue with other formats if doesn't match
raise ValueError
return handle_timezone(parsed)
return super().strptime(value, format)
class BaseCSVField(forms.Field):
"""
Base field for validating CSV types. Value validation is performed by
secondary base classes.
ex::
class IntegerCSVField(BaseCSVField, filters.IntegerField):
pass
"""
base_widget_class = BaseCSVWidget
def __init__(self, *args, **kwargs):
widget = kwargs.get('widget') or self.widget
kwargs['widget'] = self._get_widget_class(widget)
super().__init__(*args, **kwargs)
def _get_widget_class(self, widget):
# passthrough, allows for override
if isinstance(widget, BaseCSVWidget) or (
isinstance(widget, type) and
issubclass(widget, BaseCSVWidget)):
return widget
# complain since we are unable to reconstruct widget instances
assert isinstance(widget, type), \
"'%s.widget' must be a widget class, not %s." \
% (self.__class__.__name__, repr(widget))
bases = (self.base_widget_class, widget, )
return type(str('CSV%s' % widget.__name__), bases, {})
def clean(self, value):
if value in self.empty_values and self.required:
raise forms.ValidationError(self.error_messages['required'], code='required')
if value is None:
return None
return [super(BaseCSVField, self).clean(v) for v in value]
class BaseRangeField(BaseCSVField):
# Force use of text input, as range must always have two inputs. A date
# input would only allow a user to input one value and would always fail.
widget = CSVWidget
default_error_messages = {
'invalid_values': _('Range query expects two values.')
}
def clean(self, value):
value = super().clean(value)
assert value is None or isinstance(value, list)
if value and len(value) != 2:
raise forms.ValidationError(
self.error_messages['invalid_values'],
code='invalid_values')
return value
class ChoiceIterator:
# Emulates the behavior of ModelChoiceIterator, but instead wraps
# the field's _choices iterable.
def __init__(self, field, choices):
self.field = field
self.choices = choices
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
if self.field.null_label is not None:
yield (self.field.null_value, self.field.null_label)
yield from self.choices
def __len__(self):
add = 1 if self.field.empty_label is not None else 0
add += 1 if self.field.null_label is not None else 0
return len(self.choices) + add
class ModelChoiceIterator(forms.models.ModelChoiceIterator):
# Extends the base ModelChoiceIterator to add in 'null' choice handling.
# This is a bit verbose since we have to insert the null choice after the
# empty choice, but before the remainder of the choices.
def __iter__(self):
iterable = super().__iter__()
if self.field.empty_label is not None:
yield next(iterable)
if self.field.null_label is not None:
yield (self.field.null_value, self.field.null_label)
yield from iterable
def __len__(self):
add = 1 if self.field.null_label is not None else 0
return super().__len__() + add
class ChoiceIteratorMixin:
def __init__(self, *args, **kwargs):
self.null_label = kwargs.pop('null_label', settings.NULL_CHOICE_LABEL)
self.null_value = kwargs.pop('null_value', settings.NULL_CHOICE_VALUE)
super().__init__(*args, **kwargs)
def _get_choices(self):
return super()._get_choices()
def _set_choices(self, value):
super()._set_choices(value)
value = self.iterator(self, self._choices)
self._choices = self.widget.choices = value
choices = property(_get_choices, _set_choices)
# Unlike their Model* counterparts, forms.ChoiceField and forms.MultipleChoiceField do not set empty_label
class ChoiceField(ChoiceIteratorMixin, forms.ChoiceField):
iterator = ChoiceIterator
def __init__(self, *args, **kwargs):
self.empty_label = kwargs.pop('empty_label', settings.EMPTY_CHOICE_LABEL)
super().__init__(*args, **kwargs)
class MultipleChoiceField(ChoiceIteratorMixin, forms.MultipleChoiceField):
iterator = ChoiceIterator
def __init__(self, *args, **kwargs):
self.empty_label = None
super().__init__(*args, **kwargs)
class ModelChoiceField(ChoiceIteratorMixin, forms.ModelChoiceField):
iterator = ModelChoiceIterator
def to_python(self, value):
# bypass the queryset value check
if self.null_label is not None and value == self.null_value:
return value
return super().to_python(value)
class ModelMultipleChoiceField(ChoiceIteratorMixin, forms.ModelMultipleChoiceField):
iterator = ModelChoiceIterator
def _check_values(self, value):
null = self.null_label is not None and value and self.null_value in value
if null: # remove the null value and any potential duplicates
value = [v for v in value if v != self.null_value]
result = list(super()._check_values(value))
result += [self.null_value] if null else []
return result
|
jaqs/research/signaldigger/performance.py | WestXu/JAQS | 602 | 12676845 | <filename>jaqs/research/signaldigger/performance.py
# encoding: utf-8
import numpy as np
import pandas as pd
import scipy.stats as scst
import statsmodels.api as sm
from jaqs.trade.common import CALENDAR_CONST
def calc_signal_ic(signal_data):
"""
Computes the Spearman Rank Correlation based Information Coefficient (IC)
between signal values and N period forward returns for each period in
the signal index.
Parameters
----------
signal_data : pd.DataFrame - MultiIndex
Index is pd.MultiIndex ['trade_date', 'symbol'], columns = ['signal', 'return', 'quantile']
Returns
-------
ic : pd.DataFrame
Spearman Rank correlation between signal and provided forward returns.
"""
def src_ic(df):
_ic = scst.spearmanr(df['signal'], df['return'])[0]
return _ic
signal_data = signal_data.copy()
grouper = ['trade_date']
ic = signal_data.groupby(grouper).apply(src_ic)
ic = pd.DataFrame(ic)
ic.columns = ['ic']
return ic
def calc_ic_stats_table(ic_data):
ic_summary_table = pd.DataFrame()
ic_summary_table["IC Mean"] = ic_data.mean()
ic_summary_table["IC Std."] = ic_data.std()
t_stat, p_value = scst.ttest_1samp(ic_data, 0)
ic_summary_table["t-stat(IC)"] = t_stat
ic_summary_table["p-value(IC)"] = p_value
ic_summary_table["IC Skew"] = scst.skew(ic_data)
ic_summary_table["IC Kurtosis"] = scst.kurtosis(ic_data)
ic_summary_table["Ann. IR"] = ic_data.mean() / ic_data.std()
return ic_summary_table
def mean_information_coefficient(ic, by_time=None):
"""
Get the mean information coefficient of specified groups.
Answers questions like:
What is the mean IC for each month?
What is the mean IC for each group for our whole timerange?
What is the mean IC for for each group, each week?
Parameters
----------
by_time : str (pd time_rule), optional
Time window to use when taking mean IC.
See http://pandas.pydata.org/pandas-docs/stable/timeseries.html
for available options.
Returns
-------
ic : pd.DataFrame
Mean Spearman Rank correlation between signal and provided
forward price movement windows.
"""
grouper = []
if by_time is not None:
grouper.append(pd.TimeGrouper(by_time))
if len(grouper) == 0:
ic = ic.mean()
else:
ic.index = pd.to_datetime(ic.index, format="%Y%m%d")
ic = (ic.reset_index().set_index('trade_date').groupby(grouper).mean())
return ic
def calc_period_wise_weighted_signal_return(signal_data, weight_method):
"""
Computes period wise period_wise_returns for portfolio weighted by signal
values. Weights are computed by demeaning signals and dividing
by the sum of their absolute value (achieving gross leverage of 1).
Parameters
----------
signal_data : pd.DataFrame - MultiIndex
Index is pd.MultiIndex ['trade_date', 'symbol'], columns = ['signal', 'return', 'quantile']
weight_method : {'equal_weight', 'long_only', 'long_short'}
Returns
-------
res : pd.DataFrame
Period wise period_wise_returns of dollar neutral portfolio weighted by signal value.
"""
def calc_norm_weights(ser, method):
if method == 'equal_weight':
ser.loc[:] = 1.0 / len(ser)
elif method == 'long_short':
# TODO: do we need to de-mean?
ser = ser - ser.mean()
elif method == 'long_only':
ser = (ser + ser.abs()) / 2.0
elif method == 'short_only':
ser = (ser - ser.abs()) / 2.0
else:
raise ValueError("method can only be equal_weight, long_only or long_short,"
"but [{}] is provided".format(method))
return ser / ser.abs().sum()
grouper = ['trade_date']
weights = signal_data.groupby(grouper)['signal'].apply(calc_norm_weights, weight_method)
# df_sig = signal_data['signal'].unstack(level='symbol')
# weights = df_sig.apply(calc_norm_weights, axis=1, args=(weight_method, ))
weighted_returns = signal_data['return'].multiply(weights, axis=0)
period_wise_returns = weighted_returns.groupby(level='trade_date').sum()
res = pd.DataFrame(period_wise_returns)
res.columns = ['return']
return res
def regress_period_wise_signal_return(signal_data, group=False):
"""
Computes period wise period_wise_returns for portfolio weighted by signal
values. Weights are computed by demeaning signals and dividing
by the sum of their absolute value (achieving gross leverage of 1).
Parameters
----------
signal_data : pd.DataFrame - MultiIndex
Index is pd.MultiIndex ['trade_date', 'symbol'], columns = ['signal', 'return', 'quantile']
Returns
-------
period_wise_returns : pd.DataFrame
Period wise period_wise_returns of dollar neutral portfolio weighted by signal
value.
"""
def regress(df):
x = df['signal'].values
y = df['return'].values
x = sm.add_constant(x)
mod = sm.OLS(y, x).fit()
idiosyncractic, signal_return = mod.params
# return pd.Series(index=['idio', 'signal_return'], data=[idiosyncractic, signal_return])
return signal_return
grouper = [signal_data.index.get_level_values('trade_date')]
if group:
grouper.append('group')
regress_res = signal_data.groupby(grouper).apply(regress)
return pd.DataFrame(regress_res)
'''
def calc_alpha_beta(active_return, period, benchmark_return=None):
if isinstance(active_return, pd.Series):
active_return = pd.DataFrame(active_return)
if isinstance(benchmark_return, pd.Series):
benchmark_return = pd.DataFrame(benchmark_return)
benchmark_return = benchmark_return.loc[active_return.index, :]
alpha_beta = pd.DataFrame()
x = benchmark_return.values
y = active_return.values
x = sm.add_constant(x)
reg_fit = sm.OLS(y, x).fit()
alpha, beta = reg_fit.params
alpha_beta.loc['Ann. alpha', period] = \
(1 + alpha) ** (252.0 / period) - 1
alpha_beta.loc['beta', period] = beta
return alpha_beta
'''
def calc_quantile_return_mean_std(signal_data, time_series=False):
"""
Computes mean returns for signal quantiles across
provided forward returns columns.
Parameters
----------
signal_data : pd.DataFrame - MultiIndex
Index is pd.MultiIndex ['trade_date', 'symbol'], columns = ['signal', 'return', 'quantile']
Returns
-------
res : pd.DataFrame of dict
"""
signal_data = signal_data.copy()
grouper = ['quantile']
if time_series:
grouper.append('trade_date')
group_mean_std = signal_data.groupby(grouper)['return'].agg(['mean', 'std', 'count'])
# TODO: why?
'''
std_error_ret = group_mean_std.loc[:, 'std'].copy() / np.sqrt(group_mean_std.loc[:, 'count'].copy())
'''
if time_series:
quantile_daily_mean_std_dic = dict()
quantiles = np.unique(group_mean_std.index.get_level_values(level='quantile'))
for q in quantiles: # loop for different quantiles
df_q = group_mean_std.loc[pd.IndexSlice[q, :], :] # bug
df_q.index = df_q.index.droplevel(level='quantile')
quantile_daily_mean_std_dic[q] = df_q
return quantile_daily_mean_std_dic
else:
return group_mean_std
def calc_return_diff_mean_std(q1, q2):
"""
Computes the difference between the mean returns of
two quantiles. Optionally, computes the standard error
of this difference.
Parameters
----------
q1, q2 : pd.DataFrame
DataFrame of mean period wise returns by quantile.
Index is datet, columns = ['mean', 'std', 'count']
Returns
-------
res : pd.DataFrame
Difference of mean return and corresponding std.
"""
res_raw = pd.merge(q1, q2, how='outer', suffixes=('_1','_2'), left_index=True, right_index=True).fillna(0)
res_raw['mean_diff'] = res_raw['mean_1'] - res_raw['mean_2']
res_raw['std'] = np.sqrt(res_raw['mean_1'] **2 + res_raw['mean_2']**2)
res = res_raw[['mean_diff','std']]
return res
'''
def period2daily(ser, period, do_roll_mean=False):
if not period > 1:
return ser
if do_roll_mean:
ser = ser.rolling(window=period, min_periods=1, axis=1).mean()
ser_daily_pow = (ser + 1) ** (1. / period)
return ser_daily_pow - 1.0
'''
def calc_active_cum_return_way2(portfolio_ret, benchmark_ret):
benchmark_ret = benchmark_ret.loc[portfolio_ret.index]
portfolio_cum = portfolio_ret.add(1.0).cumprod(axis=0)
benchmark_cum = benchmark_ret.add(1.0).cumprod(axis=0)
active_cum = portfolio_cum.sub(benchmark_cum.values.flatten(), axis=0) + 1.0
return active_cum
def calc_active_cum_return(portfolio_ret, benchmark_ret):
benchmark_ret = benchmark_ret.loc[portfolio_ret.index]
active_ret = portfolio_ret.sub(benchmark_ret.values.flatten(), axis=0)
active_cum = active_ret.add(1.0).cumprod()
return active_cum
def price2ret(prices, period=5, axis=None):
"""
Parameters
----------
prices : pd.DataFrame or pd.Series
Index is datetime.
period : int
axis : {0, 1, None}
Returns
-------
ret : pd.DataFrame or pd.Series
"""
ret = prices.pct_change(periods=period, axis=axis)
return ret
def cum2ret(cum, period=1, axis=None, compound=False):
"""
Parameters
----------
cum : pd.Series
Starts from zero.
period : int
axis : {0, 1, None}
compound : bool
Returns
-------
ret : pd.Series
"""
if axis is None:
kwargs = dict()
else:
kwargs = {'axis': axis}
if np.any(cum.min(**kwargs)) < 0:
raise ValueError("Minimum value of cumulative return is less than zero.")
cum = cum.add(1.0)
if compound:
ret = cum.pct_change(periods=period, **kwargs)
else:
ret = cum.diff(periods=period, **kwargs)
return ret
def ret2cum(ret, compound=False, axis=None):
"""
Parameters
----------
ret : pd.Series
Starts from zero.
compound : bool
axis : {0, 1, None}
Returns
-------
cum : pd.Series
"""
if axis is None:
kwargs = dict()
else:
kwargs = {'axis': axis}
if compound:
# use log to avoid numerical problems
log_sum = np.log(ret.add(1.0)).cumsum(**kwargs)
cum = np.exp(log_sum).sub(1.0)
else:
cum = ret.cumsum(**kwargs)
return cum
def calc_performance_metrics(ser, cum_return=False, compound=False):
"""
Calculate annualized return, volatility and sharpe.
We assumed data frequency to be day.
Parameters
----------
ser : pd.DataFrame or pd.Series
Index is int date, values are floats.
ser should start from 0.
cum_return : bool
Whether ser is cumulative or daily return.
compound
Whether calculation of return is compound.
Returns
-------
res : dict
"""
if isinstance(ser, pd.DataFrame):
ser = ser.iloc[:, 0]
idx = ser.index
if cum_return:
cum_ret = ser
ret = cum2ret(cum_ret, period=1, compound=compound)
else:
ret = ser
cum_ret = ret2cum(ret, compound=compound)
n_trade_days = len(idx)
n_years = n_trade_days * 1. / CALENDAR_CONST.TRADE_DAYS_PER_YEAR
total_ret = cum_ret.iat[-1]
if compound:
ann_ret = np.power(cum_ret.iat[-1] + 1.0, 1. / n_years) - 1
else:
ann_ret = total_ret / n_years
std = np.std(ret) # use std instead of np.sqrt( (ret**2).sum() / len(ret) )
ann_vol = std * np.sqrt(CALENDAR_CONST.TRADE_DAYS_PER_YEAR)
sharpe = ann_ret / ann_vol
# print "ann. ret = {:.1f}%; ann. vol = {:.1f}%, sharpe = {:.2f}".format(ann_ret * 100, ann_vol * 100, sharpe)
res = {'ann_ret': ann_ret,
'ann_vol': ann_vol,
'sharpe': sharpe}
return res
def period_wise_ret_to_cum(ret, period, compound=False):
"""
Calculate cumulative returns from N-periods returns, no compounding.
When 'period' N is greater than 1 the cumulative returns plot is computed
building and averaging the cumulative returns of N interleaved portfolios
(started at subsequent periods 1,2,3,...,N) each one rebalancing every N
periods.
Parameters
----------
ret: pd.Series or pd.DataFrame
pd.Series containing N-periods returns
period: integer
Period for which the returns are computed
compound : bool
Whether calculate using compound return.
Returns
-------
pd.Series
Cumulative returns series starting from zero.
"""
if isinstance(ret, pd.DataFrame):
# deal with each column recursively
return ret.apply(period_wise_ret_to_cum, axis=0, args=(period,))
elif isinstance(ret, pd.Series):
if period == 1:
return ret.add(1).cumprod().sub(1.0)
# invest in each portfolio separately
periods_index = np.arange(len(ret.index)) // period
period_portfolios = ret.groupby(by=periods_index, axis=0).apply(lambda ser: pd.DataFrame(np.diag(ser)))
period_portfolios.index = ret.index
# cumulate returns separately
if compound:
cum_returns = period_portfolios.add(1).cumprod().sub(1.0)
else:
cum_returns = period_portfolios.cumsum()
# since capital of all portfolios are the same, return in all equals average return
res = cum_returns.mean(axis=1)
return res
else:
raise NotImplementedError("ret must be Series or DataFrame.")
|
Src/StdLib/Lib/test/test_compare.py | cwensley/ironpython2 | 2,293 | 12676863 | <filename>Src/StdLib/Lib/test/test_compare.py
import unittest
from test import test_support
class Empty:
def __repr__(self):
return '<Empty>'
class Coerce:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return '<Coerce %s>' % self.arg
def __coerce__(self, other):
if isinstance(other, Coerce):
return self.arg, other.arg
else:
return self.arg, other
class Cmp:
def __init__(self,arg):
self.arg = arg
def __repr__(self):
return '<Cmp %s>' % self.arg
def __cmp__(self, other):
return cmp(self.arg, other)
class ComparisonTest(unittest.TestCase):
set1 = [2, 2.0, 2L, 2+0j, Coerce(2), Cmp(2.0)]
set2 = [[1], (3,), None, Empty()]
candidates = set1 + set2
def test_comparisons(self):
for a in self.candidates:
for b in self.candidates:
if ((a in self.set1) and (b in self.set1)) or a is b:
self.assertEqual(a, b)
else:
self.assertNotEqual(a, b)
def test_id_comparisons(self):
# Ensure default comparison compares id() of args
L = []
for i in range(10):
L.insert(len(L)//2, Empty())
for a in L:
for b in L:
self.assertEqual(cmp(a, b), cmp(id(a), id(b)),
'a=%r, b=%r' % (a, b))
def test_main():
test_support.run_unittest(ComparisonTest)
if __name__ == '__main__':
test_main()
|
sdfrenderer/deepsdf/workspace.py | TRI-ML/sdflabel | 105 | 12676869 | <filename>sdfrenderer/deepsdf/workspace.py
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# Source: https://github.com/facebookresearch/DeepSDF
import json
import os
import torch
from torch import nn
model_params_subdir = "ModelParameters"
optimizer_params_subdir = "OptimizerParameters"
latent_codes_subdir = "LatentCodes"
logs_filename = "Logs.pth"
reconstructions_subdir = "Reconstructions"
reconstruction_meshes_subdir = "Meshes"
reconstruction_codes_subdir = "Codes"
specifications_filename = "specs.json"
data_source_map_filename = ".datasources.json"
evaluation_subdir = "Evaluation"
sdf_samples_subdir = "SdfSamples"
surface_samples_subdir = "SurfaceSamples"
normalization_param_subdir = "NormalizationParameters"
def load_experiment_specifications(experiment_directory):
filename = os.path.join(experiment_directory, specifications_filename)
if not os.path.isfile(filename):
raise Exception(
"The experiment directory ({}) does not include specifications file " +
'"specs.json"'.format(experiment_directory)
)
return json.load(open(filename))
def load_model_parameters(experiment_directory, checkpoint, decoder):
filename = os.path.join(experiment_directory, model_params_subdir, checkpoint + ".pth")
if not os.path.isfile(filename):
raise Exception('model state dict "{}" does not exist'.format(filename))
data = torch.load(filename)
decoder.load_state_dict(data["model_state_dict"])
return data["epoch"]
def build_decoder(experiment_directory, experiment_specs):
arch = __import__("networks." + experiment_specs["NetworkArch"], fromlist=["Decoder"])
latent_size = experiment_specs["CodeLength"]
decoder = arch.Decoder(latent_size, **experiment_specs["NetworkSpecs"]).cuda()
return decoder
def load_decoder(experiment_directory, experiment_specs, checkpoint, data_parallel=True):
decoder = build_decoder(experiment_directory, experiment_specs)
if data_parallel:
decoder = torch.nn.DataParallel(decoder)
epoch = load_model_parameters(experiment_directory, checkpoint, decoder)
return (decoder, epoch)
def load_latent_vectors(experiment_directory, checkpoint):
filename = os.path.join(experiment_directory, latent_codes_subdir, checkpoint + ".pth")
if not os.path.isfile(filename):
raise Exception(
"The experiment directory ({}) does not include a latent code file " +
'for checkpoint "{}"'.format(experiment_directory, checkpoint)
)
data = torch.load(filename)
num_vecs = data["latent_codes"].size()[0]
latent_size = data["latent_codes"].size()[2]
lat_vecs = []
for i in range(num_vecs):
lat_vecs.append(data["latent_codes"][i].cuda())
return lat_vecs
def get_data_source_map_filename(data_dir):
return os.path.join(data_dir, data_source_map_filename)
def get_reconstructed_mesh_filename(experiment_dir, epoch, dataset, class_name, instance_name):
return os.path.join(
experiment_dir,
reconstructions_subdir,
str(epoch),
reconstruction_meshes_subdir,
dataset,
class_name,
instance_name + ".ply",
)
def get_reconstructed_code_filename(experiment_dir, epoch, dataset, class_name, instance_name):
return os.path.join(
experiment_dir,
reconstructions_subdir,
str(epoch),
reconstruction_codes_subdir,
dataset,
class_name,
instance_name + ".pth",
)
def get_evaluation_dir(experiment_dir, checkpoint, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, evaluation_subdir, checkpoint)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_model_params_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, model_params_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_optimizer_params_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, optimizer_params_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_latent_codes_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, latent_codes_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def setup_dsdf(dir, mode='eval', precision=torch.float16):
specs_filename = os.path.splitext(dir)[0] + '.json'
if not os.path.isfile(specs_filename):
raise Exception('The experiment directory does not include specifications file "specs.json"')
specs = json.load(open(specs_filename))
arch = __import__("deepsdf.networks." + specs["NetworkArch"], fromlist=["Decoder"])
latent_size = specs["CodeLength"]
specs["NetworkSpecs"].pop('samples_per_scene', None) # remove samples_per_scene to get scale for a single model
decoder = arch.Decoder(latent_size, **specs["NetworkSpecs"])
decoder = torch.nn.DataParallel(decoder)
saved_model_state = torch.load(dir)
saved_model_epoch = saved_model_state["epoch"]
decoder.load_state_dict(saved_model_state["model_state_dict"])
decoder = decoder.module
convert_to_precision(decoder, precision)
if mode == 'train':
decoder.train()
elif mode == 'eval':
decoder.eval()
return decoder, latent_size
def convert_to_precision(model, precision):
model.to(dtype=precision)
for layer in model.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.float()
|
flask_oauthlib/contrib/client/signals.py | PCMan/flask-oauthlib | 1,292 | 12676873 | from flask.signals import Namespace
__all__ = ['request_token_fetched']
_signals = Namespace()
request_token_fetched = _signals.signal('request-token-fetched')
|
script/key_scaner.py | yutiansut/Personae | 1,046 | 12676887 | # coding=utf-8
import sys
import os
if __name__ == '__main__':
start_ip, end_ip = sys.argv[1], sys.argv[2]
split_start_ip = start_ip.split('.')
split_end_ip = end_ip.split('.')
ip_list_str = ""
ip_base = split_start_ip[0] + '.' + split_start_ip[1] + '.' + split_start_ip[2] + '.'
ip_count = int(split_end_ip[-1]) - int(split_start_ip[-1]) + 1
for ip_index in range(ip_count):
ip_list_str += ip_base + str(int(split_start_ip[3]) + ip_index) + " "
cmd_1 = "ssh-keyscan -t rsa %s" % ip_list_str
os.system("%s > ~/.ssh/known_hosts" % cmd_1)
|
conanfile.py | Naios/Continue.cpp | 745 | 12676892 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from conans import ConanFile, tools
def get_version():
git = tools.Git()
try:
return git.run("describe --tags --abbrev=0")
except:
return None
class ContinuableConan(ConanFile):
name = "continuable"
version = get_version()
license = "MIT"
url = "https://github.com/Naios/continuable"
author = "<NAME> (<EMAIL>)"
description = "C++14 asynchronous allocation aware futures"
homepage = "https://naios.github.io/continuable/"
no_copy_source = True
scm = {
"type": "git",
"url": "auto",
"revision": "auto"
}
def package(self):
self.copy("LICENSE.txt", "licenses")
self.copy("include/*.hpp")
self.copy("include/*.inl")
def package_id(self):
self.info.header_only()
def requirements(self):
self.requires("function2/4.0.0@naios/stable")
|
tools/generate_codemeta.py | stevemats/mne-python | 1,953 | 12676893 | <reponame>stevemats/mne-python<filename>tools/generate_codemeta.py
import os
import subprocess
from datetime import date
from mne import __version__ as release_version
# NOTE: ../codemeta.json should not be continuously updated. Run this script
# only at release time.
# add to these as necessary
compound_surnames = (
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>'
)
def parse_name(name):
"""Split name blobs from `git shortlog -nse` into first/last/email."""
# remove commit count
_, name_and_email = name.strip().split('\t')
name, email = name_and_email.split(' <')
email = email.strip('>')
email = '' if 'noreply' in email else email # ignore "noreply" emails
name = ' '.join(name.split('.')) # remove periods from initials
# handle compound surnames
for compound_surname in compound_surnames:
if name.endswith(compound_surname):
ix = name.index(compound_surname)
first = name[:ix].strip()
last = compound_surname
return (first, last, email)
# handle non-compound surnames
name_elements = name.split()
if len(name_elements) == 1: # mononyms / usernames
first = ''
last = name
else:
first = ' '.join(name_elements[:-1])
last = name_elements[-1]
return (first, last, email)
# MAKE SURE THE RELEASE STRING IS PROPERLY FORMATTED
try:
split_version = list(map(int, release_version.split('.')))
except ValueError:
raise
msg = f'version string must be X.Y.Z (all integers), got {release_version}'
assert len(split_version) == 3, msg
# RUN GIT SHORTLOG TO GET ALL AUTHORS, SORTED BY NUMBER OF COMMITS
args = ['git', 'shortlog', '-nse']
result = subprocess.run(args, capture_output=True, text=True)
lines = result.stdout.strip().split('\n')
all_names = [parse_name(line) for line in lines]
# CONSTRUCT JSON AUTHORS LIST
authors = [f'''{{
"@type":"Person",
"email":"{email}",
"givenName":"{first}",
"familyName": "{last}"
}}''' for (first, last, email) in all_names]
# GET OUR DEPENDENCIES
with open(os.path.join('..', 'setup.py'), 'r') as fid:
for line in fid:
if line.strip().startswith('python_requires='):
version = line.strip().split('=', maxsplit=1)[1].strip("'\",")
dependencies = [f'python{version}']
break
hard_dependencies = ('numpy', 'scipy')
with open(os.path.join('..', 'requirements.txt'), 'r') as fid:
for line in fid:
req = line.strip()
for hard_dep in hard_dependencies:
if req.startswith(hard_dep):
dependencies.append(req)
# these must be done outside the boilerplate (no \n allowed in f-strings):
authors = ',\n '.join(authors)
dependencies = '",\n "'.join(dependencies)
# ASSEMBLE COMPLETE JSON
codemeta_boilerplate = f'''{{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"@type": "SoftwareSourceCode",
"license": "https://spdx.org/licenses/BSD-3-Clause",
"codeRepository": "git+https://github.com/mne-tools/mne-python.git",
"dateCreated": "2010-12-26",
"datePublished": "2014-08-04",
"dateModified": "{str(date.today())}",
"downloadUrl": "https://github.com/mne-tools/mne-python/archive/v{release_version}.zip",
"issueTracker": "https://github.com/mne-tools/mne-python/issues",
"name": "MNE-Python",
"version": "{release_version}",
"description": "MNE-Python is an open-source Python package for exploring, visualizing, and analyzing human neurophysiological data. It provides methods for data input/output, preprocessing, visualization, source estimation, time-frequency analysis, connectivity analysis, machine learning, and statistics.",
"applicationCategory": "Neuroscience",
"developmentStatus": "active",
"referencePublication": "https://doi.org/10.3389/fnins.2013.00267",
"keywords": [
"MEG",
"EEG",
"fNIRS",
"ECoG",
"sEEG",
"DBS"
],
"programmingLanguage": [
"Python"
],
"operatingSystem": [
"Linux",
"Windows",
"macOS"
],
"softwareRequirements": [
"{dependencies}"
],
"author": [
{authors}
]
}}
''' # noqa E501
# WRITE TO FILE
with open(os.path.join('..', 'codemeta.json'), 'w') as codemeta_file:
codemeta_file.write(codemeta_boilerplate)
|
pypy2.7/module/_multiprocess/test/test_semaphore.py | UniverseFly/multiprocess | 356 | 12676919 | <filename>pypy2.7/module/_multiprocess/test/test_semaphore.py
import sys
from _multiprocess.interp_semaphore import (
RECURSIVE_MUTEX, SEMAPHORE)
class AppTestSemaphore:
spaceconfig = dict(usemodules=('_multiprocess', 'thread',
'signal', 'select',
'binascii', 'struct'))
if sys.platform == 'win32':
spaceconfig['usemodules'] += ('_rawffi', '_cffi_backend')
else:
spaceconfig['usemodules'] += ('fcntl',)
def setup_class(cls):
cls.w_SEMAPHORE = cls.space.wrap(SEMAPHORE)
cls.w_RECURSIVE = cls.space.wrap(RECURSIVE_MUTEX)
cls.w_runappdirect = cls.space.wrap(cls.runappdirect)
# import here since importing _multiprocess imports multiprocess
# (in interp_connection) to get the BufferTooShort exception, which on
# win32 imports msvcrt which imports via cffi which allocates ccharp
# that are never released. This trips up the LeakChecker if done in a
# test function
cls.w_multiprocessing = cls.space.appexec([],
'(): import multiprocess as m; return m')
def test_semaphore_basic(self):
from _multiprocess import SemLock
import sys
assert SemLock.SEM_VALUE_MAX > 10
kind = self.SEMAPHORE
value = 1
maxvalue = 1
# the following line gets OSError: [Errno 38] Function not implemented
# if /dev/shm is not mounted on Linux
sem = SemLock(kind, value, maxvalue)
assert sem.kind == kind
assert sem.maxvalue == maxvalue
assert isinstance(sem.handle, (int, long))
assert sem._count() == 0
if sys.platform == 'darwin':
raises(NotImplementedError, 'sem._get_value()')
else:
assert sem._get_value() == 1
assert sem._is_zero() == False
sem.acquire()
assert sem._is_mine()
assert sem._count() == 1
if sys.platform == 'darwin':
raises(NotImplementedError, 'sem._get_value()')
else:
assert sem._get_value() == 0
assert sem._is_zero() == True
sem.release()
assert sem._count() == 0
sem.acquire()
sem._after_fork()
assert sem._count() == 0
def test_recursive(self):
from _multiprocess import SemLock
kind = self.RECURSIVE
value = 1
maxvalue = 1
# the following line gets OSError: [Errno 38] Function not implemented
# if /dev/shm is not mounted on Linux
sem = SemLock(kind, value, maxvalue)
sem.acquire()
sem.release()
assert sem._count() == 0
sem.acquire()
sem.release()
# now recursively
sem.acquire()
sem.acquire()
assert sem._count() == 2
sem.release()
sem.release()
def test_semaphore_maxvalue(self):
from _multiprocess import SemLock
import sys
kind = self.SEMAPHORE
value = SemLock.SEM_VALUE_MAX
maxvalue = SemLock.SEM_VALUE_MAX
sem = SemLock(kind, value, maxvalue)
for i in range(10):
res = sem.acquire()
assert res == True
assert sem._count() == i+1
if sys.platform != 'darwin':
assert sem._get_value() == maxvalue - (i+1)
value = 0
maxvalue = SemLock.SEM_VALUE_MAX
sem = SemLock(kind, value, maxvalue)
for i in range(10):
sem.release()
assert sem._count() == -(i+1)
if sys.platform != 'darwin':
assert sem._get_value() == i+1
def test_semaphore_wait(self):
from _multiprocess import SemLock
kind = self.SEMAPHORE
value = 1
maxvalue = 1
sem = SemLock(kind, value, maxvalue)
res = sem.acquire()
assert res == True
res = sem.acquire(timeout=0.1)
assert res == False
def test_semaphore_rebuild(self):
from _multiprocess import SemLock
kind = self.SEMAPHORE
value = 1
maxvalue = 1
sem = SemLock(kind, value, maxvalue)
sem2 = SemLock._rebuild(sem.handle, kind, value)
assert sem.handle == sem2.handle
def test_semaphore_contextmanager(self):
from _multiprocess import SemLock
kind = self.SEMAPHORE
value = 1
maxvalue = 1
sem = SemLock(kind, value, maxvalue)
with sem:
assert sem._count() == 1
assert sem._count() == 0
def test_in_threads(self):
from _multiprocess import SemLock
from threading import Thread
from time import sleep
l = SemLock(0, 1, 1)
if self.runappdirect:
def f(id):
for i in range(10000):
pass
else:
def f(id):
for i in range(1000):
# reduce the probability of thread switching
# at exactly the wrong time in semlock_acquire
for j in range(10):
pass
threads = [Thread(None, f, args=(i,)) for i in range(2)]
[t.start() for t in threads]
# if the RLock calls to sem_wait and sem_post do not match,
# one of the threads will block and the call to join will fail
[t.join() for t in threads]
|
baidu/part_params.py | LBinsky/spiders | 324 | 12676928 | <filename>baidu/part_params.py
import execjs
def get_params(data=None):
with open('params.js', 'r')as f:
content = f.read()
ctx = execjs.compile(content)
if data:
result = ctx.call('result', data)
else:
result = ctx.call('result')
return result
def get_dv():
with open('dv.js', 'r')as f:
content = f.read()
ctx = execjs.compile(content)
result = ctx.call('f')
return result
def get_fs(nameL):
with open('fs.js', 'r')as f:
content = f.read()
ctx = execjs.compile(content)
result = ctx.call('result',nameL)
return result
def get_traceid():
with open('traceid.js', 'r')as f:
content = f.read()
ctx = execjs.compile(content)
result = ctx.call('getid')
return result
|
tensorflow_probability/python/distributions/johnson_su_test.py | jakee417/probability-1 | 3,670 | 12676934 | <reponame>jakee417/probability-1<filename>tensorflow_probability/python/distributions/johnson_su_test.py
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for initializers."""
import math
# Dependency imports
import numpy as np
from scipy import stats as sp_stats
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class JohnsonSUTest(test_util.TestCase):
def setUp(self):
self._rng = np.random.RandomState(123)
super(JohnsonSUTest, self).setUp()
def _testParamShapes(self, sample_shape, expected):
param_shapes = tfd.JohnsonSU.param_shapes(sample_shape)
skewness_shape, tailweight_shape, mu_shape, sigma_shape = \
param_shapes['skewness'], param_shapes['tailweight'], \
param_shapes['loc'], param_shapes['scale']
self.assertAllEqual(expected, self.evaluate(skewness_shape))
self.assertAllEqual(expected, self.evaluate(tailweight_shape))
self.assertAllEqual(expected, self.evaluate(mu_shape))
self.assertAllEqual(expected, self.evaluate(sigma_shape))
skewness = tf.zeros(skewness_shape)
tailweight = tf.ones(tailweight_shape)
mu = tf.zeros(mu_shape)
sigma = tf.ones(sigma_shape)
self.assertAllEqual(
expected,
self.evaluate(
tf.shape(tfd.JohnsonSU(skewness, tailweight, mu, sigma,
validate_args=True)
.sample(seed=test_util.test_seed()))))
def _testParamStaticShapes(self, sample_shape, expected):
param_shapes = tfd.JohnsonSU.param_static_shapes(sample_shape)
mu_shape, sigma_shape = param_shapes['loc'], param_shapes['scale']
self.assertEqual(expected, mu_shape)
self.assertEqual(expected, sigma_shape)
def testSampleLikeArgsGetDistDType(self):
dist = tfd.JohnsonSU(1., 2., 0., 1.)
self.assertEqual(tf.float32, dist.dtype)
for method in ('log_prob', 'prob', 'log_cdf', 'cdf',
'log_survival_function', 'survival_function', 'quantile'):
self.assertEqual(tf.float32, getattr(dist, method)(1).dtype, method)
def testParamShapes(self):
sample_shape = [10, 3, 4]
self._testParamShapes(sample_shape, sample_shape)
self._testParamShapes(tf.constant(sample_shape), sample_shape)
def testParamStaticShapes(self):
sample_shape = [10, 3, 4]
self._testParamStaticShapes(sample_shape, sample_shape)
self._testParamStaticShapes(
tf.TensorShape(sample_shape), sample_shape)
def testJohnsonSULogPDF(self):
batch_size = 6
skewness = tf.constant([1.0] * batch_size)
tailweight = tf.constant([2.0] * batch_size)
mu = tf.constant([3.0] * batch_size)
sigma = tf.constant([math.sqrt(10.0)] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
johnson_su = tfd.JohnsonSU(skewness=skewness, tailweight=tailweight, loc=mu,
scale=sigma, validate_args=True)
log_pdf = johnson_su.log_prob(x)
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()), log_pdf.shape)
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()),
self.evaluate(log_pdf).shape)
self.assertAllEqual(johnson_su.batch_shape, log_pdf.shape)
self.assertAllEqual(johnson_su.batch_shape, self.evaluate(log_pdf).shape)
pdf = johnson_su.prob(x)
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()), pdf.shape)
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()),
self.evaluate(pdf).shape)
self.assertAllEqual(johnson_su.batch_shape, pdf.shape)
self.assertAllEqual(johnson_su.batch_shape, self.evaluate(pdf).shape)
expected_log_pdf = sp_stats.johnsonsu(self.evaluate(skewness),
self.evaluate(tailweight),
self.evaluate(mu),
self.evaluate(sigma)).logpdf(x)
self.assertAllClose(expected_log_pdf, self.evaluate(log_pdf))
self.assertAllClose(np.exp(expected_log_pdf), self.evaluate(pdf))
def testJohnsonSULogPDFMultidimensional(self):
batch_size = 6
skewness = tf.constant([[1.0, -1.0]] * batch_size)
tailweight = tf.constant([[1.0, 2.0]] * batch_size)
mu = tf.constant([[3.0, -3.0]] * batch_size)
sigma = tf.constant(
[[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
johnson_su = tfd.JohnsonSU(skewness=skewness, tailweight=tailweight, loc=mu,
scale=sigma, validate_args=True)
log_pdf = johnson_su.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.shape, (6, 2))
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()), log_pdf.shape)
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()),
self.evaluate(log_pdf).shape)
self.assertAllEqual(johnson_su.batch_shape, log_pdf.shape)
self.assertAllEqual(johnson_su.batch_shape, self.evaluate(log_pdf).shape)
pdf = johnson_su.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.shape, (6, 2))
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()), pdf.shape)
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()), pdf_values.shape)
self.assertAllEqual(johnson_su.batch_shape, pdf.shape)
self.assertAllEqual(johnson_su.batch_shape, pdf_values.shape)
expected_log_pdf = sp_stats.johnsonsu.logpdf(x,
self.evaluate(skewness),
self.evaluate(tailweight),
self.evaluate(mu),
self.evaluate(sigma))
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testJohnsonSUCDF(self):
batch_size = 50
skewness = self._rng.randn(batch_size)
tailweight = self._rng.rand(batch_size) + 1.0
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
johnson_su = tfd.JohnsonSU(skewness=skewness, tailweight=tailweight, loc=mu,
scale=sigma, validate_args=True)
cdf = johnson_su.cdf(x)
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()), cdf.shape)
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()),
self.evaluate(cdf).shape)
self.assertAllEqual(johnson_su.batch_shape, cdf.shape)
self.assertAllEqual(johnson_su.batch_shape, self.evaluate(cdf).shape)
expected_cdf = sp_stats.johnsonsu.cdf(x, skewness, tailweight, mu, sigma)
self.assertAllClose(expected_cdf, self.evaluate(cdf), atol=0)
def testJohnsonSUSurvivalFunction(self):
batch_size = 50
skewness = self._rng.randn(batch_size)
tailweight = self._rng.rand(batch_size) + 1.0
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
johnson_su = tfd.JohnsonSU(skewness=skewness, tailweight=tailweight, loc=mu,
scale=sigma, validate_args=True)
sf = johnson_su.survival_function(x)
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()), sf.shape)
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()),
self.evaluate(sf).shape)
self.assertAllEqual(johnson_su.batch_shape, sf.shape)
self.assertAllEqual(johnson_su.batch_shape, self.evaluate(sf).shape)
expected_sf = sp_stats.johnsonsu.sf(x, skewness, tailweight, mu, sigma)
self.assertAllClose(expected_sf, self.evaluate(sf), atol=0)
def testJohnsonSULogCDF(self):
batch_size = 50
skewness = self._rng.randn(batch_size)
tailweight = self._rng.rand(batch_size) + 1.0
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-100.0, 10.0, batch_size).astype(np.float64)
johnson_su = tfd.JohnsonSU(skewness=skewness, tailweight=tailweight, loc=mu,
scale=sigma, validate_args=True)
cdf = johnson_su.log_cdf(x)
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()), cdf.shape)
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()),
self.evaluate(cdf).shape)
self.assertAllEqual(johnson_su.batch_shape, cdf.shape)
self.assertAllEqual(johnson_su.batch_shape, self.evaluate(cdf).shape)
expected_cdf = sp_stats.johnsonsu.logcdf(x, skewness, tailweight, mu, sigma)
self.assertAllClose(expected_cdf, self.evaluate(cdf), atol=0, rtol=1e-3)
def testFiniteGradientAtDifficultPoints(self):
def make_fn(dtype, attr):
x = np.array([-100., -20., -5., 0., 5., 20., 100.]).astype(dtype)
return lambda g, d, m, s: getattr( # pylint: disable=g-long-lambda
tfd.JohnsonSU(skewness=g, tailweight=d, loc=m, scale=s,
validate_args=True),
attr)(x)
for dtype in np.float32, np.float64:
for attr in ['cdf', 'log_cdf', 'survival_function',
'log_survival_function', 'log_prob', 'prob']:
value, grads = self.evaluate(tfp.math.value_and_gradient(
make_fn(dtype, attr),
[tf.constant(0, dtype), tf.constant(1, dtype),
tf.constant(2, dtype), tf.constant(3, dtype)]))
self.assertAllFinite(value)
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
self.assertAllFinite(grads[2])
self.assertAllFinite(grads[3])
def testJohnsonSULogSurvivalFunction(self):
batch_size = 50
skewness = self._rng.randn(batch_size)
tailweight = self._rng.rand(batch_size) + 1.0
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-10.0, 10.0, batch_size).astype(np.float64)
johnson_su = tfd.JohnsonSU(skewness=skewness, tailweight=tailweight, loc=mu,
scale=sigma, validate_args=True)
sf = johnson_su.log_survival_function(x)
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()), sf.shape)
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()),
self.evaluate(sf).shape)
self.assertAllEqual(johnson_su.batch_shape, sf.shape)
self.assertAllEqual(johnson_su.batch_shape, self.evaluate(sf).shape)
expected_sf = sp_stats.johnsonsu.logsf(x, skewness, tailweight, mu, sigma)
self.assertAllClose(expected_sf, self.evaluate(sf), atol=0, rtol=1e-5)
def testJohnsonSUMean(self):
skewness = [1.]
tailweight = [2.]
# Mu will be broadcast to [7, 7, 7].
mu = [7.]
sigma = [11., 12., 13.]
johnson_su = tfd.JohnsonSU(skewness=skewness, tailweight=tailweight, loc=mu,
scale=sigma, validate_args=True)
self.assertAllEqual((3,), johnson_su.mean().shape)
# sp_stats doesn't work with array tailweight
expected_mean = sp_stats.johnsonsu.mean(skewness, tailweight[0], mu, sigma)
self.assertAllClose(expected_mean, self.evaluate(johnson_su.mean()))
def testJohnsonSUQuantile(self):
batch_size = 52
skewness = self._rng.randn(batch_size)
tailweight = self._rng.rand(batch_size) + 1.0
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
p = np.linspace(0., 1.0, batch_size - 2).astype(np.float64)
# Quantile performs piecewise rational approximation so adding some
# special input values to make sure we hit all the pieces.
p = np.hstack((p, np.exp(-33), 1. - np.exp(-33)))
johnson_su = tfd.JohnsonSU(skewness=skewness, tailweight=tailweight, loc=mu,
scale=sigma, validate_args=True)
x = johnson_su.quantile(p)
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()), x.shape)
self.assertAllEqual(
self.evaluate(johnson_su.batch_shape_tensor()),
self.evaluate(x).shape)
self.assertAllEqual(johnson_su.batch_shape, x.shape)
self.assertAllEqual(johnson_su.batch_shape, self.evaluate(x).shape)
expected_x = sp_stats.johnsonsu.ppf(p, skewness, tailweight, mu, sigma)
self.assertAllClose(expected_x, self.evaluate(x), atol=0.)
def _testQuantileFiniteGradientAtDifficultPoints(self, dtype):
skewness = tf.constant(dtype(0))
tailweight = tf.constant(dtype(1))
mu = tf.constant(dtype(0))
sigma = tf.constant(dtype(1))
p = tf.constant(dtype([np.exp(-32.), np.exp(-2.),
1. - np.exp(-2.), 1. - np.exp(-8.)]))
value, grads = tfp.math.value_and_gradient(
lambda m, p_: tfd.JohnsonSU(skewness=skewness, tailweight=tailweight, # pylint:disable=g-long-lambda
loc=m, scale=sigma, validate_args=True).
quantile(p_), [mu, p])
value, grads = self.evaluate([value, grads])
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
def testQuantileFiniteGradientAtDifficultPointsFloat32(self):
self._testQuantileFiniteGradientAtDifficultPoints(np.float32)
def testQuantileFiniteGradientAtDifficultPointsFloat64(self):
self._testQuantileFiniteGradientAtDifficultPoints(np.float64)
def testJohnsonSUVariance(self):
skewness = [1.]
tailweight = [2.]
# sigma will be broadcast to [7, 7, 7]
mu = [1., 2., 3.]
sigma = [7.]
johnson_su = tfd.JohnsonSU(skewness=skewness, tailweight=tailweight, loc=mu,
scale=sigma, validate_args=True)
self.assertAllEqual((3,), johnson_su.variance().shape)
expected_v = sp_stats.johnsonsu.var(skewness[0], tailweight[0], mu[0],
sigma[0])
self.assertAllClose([expected_v] * 3, self.evaluate(johnson_su.variance()))
def testJohnsonSUStandardDeviation(self):
skewness = [1.]
tailweight = [2.]
# sigma will be broadcast to [7, 7, 7]
mu = [1., 2., 3.]
sigma = [7.]
johnson_su = tfd.JohnsonSU(skewness=skewness, tailweight=tailweight, loc=mu,
scale=sigma, validate_args=True)
self.assertAllEqual((3,), johnson_su.stddev().shape)
expected_d = sp_stats.johnsonsu.std(skewness[0], tailweight[0], mu[0],
sigma[0])
self.assertAllClose([expected_d] * 3, self.evaluate(johnson_su.stddev()))
def testJohnsonSUSample(self):
skewness = tf.constant(1.0)
tailweight = tf.constant(2.0)
mu = tf.constant(3.0)
sigma = tf.constant(math.sqrt(3.0))
mu_v = sp_stats.johnsonsu.mean(1, 2, 3, math.sqrt(3.0))
sigma_v = sp_stats.johnsonsu.std(1, 2, 3, math.sqrt(3.0))
n = tf.constant(100000)
johnson_su = tfd.JohnsonSU(skewness=skewness, tailweight=tailweight, loc=mu,
scale=sigma, validate_args=True)
samples = johnson_su.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
# Note that the standard error for the sample mean is ~ sigma / sqrt(n).
# The sample variance similarly is dependent on sigma and n.
# Thus, the tolerances below are very sensitive to number of samples
# as well as the variances chosen.
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(sample_values.mean(), mu_v, atol=1e-1)
self.assertAllClose(sample_values.std(), sigma_v, atol=1e-1)
expected_samples_shape = tf.TensorShape(
[self.evaluate(n)]).concatenate(
tf.TensorShape(
self.evaluate(johnson_su.batch_shape_tensor())))
self.assertAllEqual(expected_samples_shape, samples.shape)
self.assertAllEqual(expected_samples_shape, sample_values.shape)
expected_samples_shape = (
tf.TensorShape([self.evaluate(n)]).concatenate(
johnson_su.batch_shape))
self.assertAllEqual(expected_samples_shape, samples.shape)
self.assertAllEqual(expected_samples_shape, sample_values.shape)
def testJohnsonSUFullyReparameterized(self):
skewness = tf.constant(1.0)
tailweight = tf.constant(2.0)
mu = tf.constant(4.0)
sigma = tf.constant(3.0)
_, [grad_skewness, grad_tailweight, grad_mu, grad_sigma] = (
tfp.math.value_and_gradient(
lambda g, d, m, s: tfd.JohnsonSU(skewness=g, tailweight=d, loc=m, # pylint:disable=g-long-lambda
scale=s, validate_args=True)
.sample(100, seed=test_util.test_seed()),
[skewness, tailweight, mu, sigma]))
grad_skewness, grad_tailweight, grad_mu, grad_sigma = self.evaluate(
[grad_skewness, grad_tailweight, grad_mu, grad_sigma])
self.assertIsNotNone(grad_skewness)
self.assertIsNotNone(grad_tailweight)
self.assertIsNotNone(grad_mu)
self.assertIsNotNone(grad_sigma)
def testJohnsonSUSampleMultiDimensional(self):
batch_size = 2
skewness = tf.constant([[1.0, -1.0]] * batch_size)
tailweight = tf.constant([[2.0, 3.0]] * batch_size)
mu = tf.constant([[3.0, -3.0]] * batch_size)
sigma = tf.constant(
[[math.sqrt(2.0), math.sqrt(3.0)]] * batch_size)
sp_stats_params = [
(1, 2, 3, math.sqrt(2.)),
(-1, 3, -3, math.sqrt(3.))
]
mu_v = [sp_stats.johnsonsu.mean(*params) for params in sp_stats_params]
sigma_v = [sp_stats.johnsonsu.std(*params) for params in sp_stats_params]
n = tf.constant(100000)
johnson_su = tfd.JohnsonSU(skewness=skewness, tailweight=tailweight, loc=mu,
scale=sigma, validate_args=True)
samples = johnson_su.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
# Note that the standard error for the sample mean is ~ sigma / sqrt(n).
# The sample variance similarly is dependent on sigma and n.
# Thus, the tolerances below are very sensitive to number of samples
# as well as the variances chosen.
self.assertEqual(samples.shape, (100000, batch_size, 2))
self.assertAllClose(sample_values[:, 0, 0].mean(), mu_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 0].std(), sigma_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].mean(), mu_v[1], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].std(), sigma_v[1], atol=1e-1)
expected_samples_shape = tf.TensorShape(
[self.evaluate(n)]).concatenate(
tf.TensorShape(
self.evaluate(johnson_su.batch_shape_tensor())))
self.assertAllEqual(expected_samples_shape, samples.shape)
self.assertAllEqual(expected_samples_shape, sample_values.shape)
expected_samples_shape = (
tf.TensorShape([self.evaluate(n)]).concatenate(
johnson_su.batch_shape))
self.assertAllEqual(expected_samples_shape, samples.shape)
self.assertAllEqual(expected_samples_shape, sample_values.shape)
def testNegativetailweightFails(self):
with self.assertRaisesOpError('Argument `tailweight` must be positive.'):
johnson_su = tfd.JohnsonSU(skewness=[1.], tailweight=[-1.], loc=[1.],
scale=[5.], validate_args=True, name='D')
self.evaluate(johnson_su.mean())
def testNegativeScaleFails(self):
with self.assertRaisesOpError('Argument `scale` must be positive.'):
johnson_su = tfd.JohnsonSU(skewness=[1.], tailweight=[1.], loc=[1.],
scale=[-5.], validate_args=True, name='S')
self.evaluate(johnson_su.mean())
def testJohnsonSUShape(self):
skewness = tf.constant(1.0)
tailweight = tf.constant(2.0)
mu = tf.constant([-3.0] * 5)
sigma = tf.constant(11.0)
johnson_su = tfd.JohnsonSU(skewness=skewness, tailweight=tailweight, loc=mu,
scale=sigma, validate_args=True)
self.assertEqual(self.evaluate(johnson_su.batch_shape_tensor()), [5])
self.assertEqual(johnson_su.batch_shape, tf.TensorShape([5]))
self.assertAllEqual(self.evaluate(johnson_su.event_shape_tensor()), [])
self.assertEqual(johnson_su.event_shape, tf.TensorShape([]))
def testJohnsonSUShapeWithPlaceholders(self):
skewness = tf1.placeholder_with_default(np.float32(5), shape=None)
tailweight = tf1.placeholder_with_default(np.float32(5), shape=None)
mu = tf1.placeholder_with_default(np.float32(5), shape=None)
sigma = tf1.placeholder_with_default(
np.float32([1.0, 2.0]), shape=None)
johnson_su = tfd.JohnsonSU(skewness=skewness, tailweight=tailweight, loc=mu,
scale=sigma, validate_args=True)
# get_batch_shape should return an '<unknown>' tensor (graph mode only).
self.assertEqual(johnson_su.event_shape, ())
self.assertEqual(johnson_su.batch_shape,
tf.TensorShape([2] if tf.executing_eagerly() else None))
self.assertAllEqual(self.evaluate(johnson_su.event_shape_tensor()), [])
self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()), [2])
def testVariableScale(self):
x = tf.Variable(1.)
d = tfd.JohnsonSU(skewness=0., tailweight=2., loc=0., scale=x,
validate_args=True)
self.evaluate([v.initializer for v in d.variables])
self.assertIs(x, d.scale)
self.assertEqual(0., self.evaluate(d.mean()))
with self.assertRaisesOpError('Argument `scale` must be positive.'):
with tf.control_dependencies([x.assign(-1.)]):
self.evaluate(d.mean())
def testIncompatibleArgShapes(self):
scale = tf1.placeholder_with_default(tf.ones([4, 1]), shape=None)
with self.assertRaisesRegexp(Exception, r'Incompatible shapes'):
d = tfd.JohnsonSU(skewness=1., tailweight=2., loc=tf.zeros([2, 3]),
scale=scale, validate_args=True)
self.evaluate(d.batch_shape_tensor())
def testBatchSamplesAreIndependent(self):
num_samples = 1000
d = tfd.JohnsonSU(loc=[0., 0.], scale=1., skewness=0., tailweight=1.)
xs = d.sample(num_samples, seed=test_util.test_seed())
cov = 1. / num_samples * tf.matmul(xs, xs, transpose_a=True)
self.assertAllClose(cov / d.variance(), tf.eye(2), atol=0.4)
if __name__ == '__main__':
test_util.main()
|
packages/regression_model/regression_model/processing/errors.py | avkmal/deploying-machine-learning-models | 477 | 12676936 | class BaseError(Exception):
"""Base package error."""
class InvalidModelInputError(BaseError):
"""Model input contains an error."""
|
lib/dataset/argoverse_convertor.py | decisionforce/mmTransformer | 199 | 12676961 | import os
import pickle
import re
import sys
from typing import Any, Dict, List
import numpy as np
import pandas as pd
from argoverse.data_loading.argoverse_forecasting_loader import \
ArgoverseForecastingLoader
from argoverse.map_representation.map_api import ArgoverseMap
from tqdm import tqdm
from .preprocess_utils.feature_utils import compute_feature_for_one_seq, save_features
from .preprocess_utils.map_utils_vec import save_map
# vectorization
from .vectorization import VectorizedCase
class ArgoverseConvertor(object):
def __init__(self, cfg):
self.data_dir = cfg['DATA_DIR']
self.obs_len = cfg['OBS_LEN']
self.lane_radius = cfg['LANE_RADIUS']
self.object_radius = cfg['OBJ_RADIUS']
self.raw_dataformat = cfg['RAW_DATA_FORMAT']
self.am = ArgoverseMap()
self.Afl = ArgoverseForecastingLoader
self.out_dir = cfg['INTERMEDIATE_DATA_DIR']
self.save_dir_pretext = cfg['info_prefix']
self.specific_data_fold_list = cfg['specific_data_fold_list']
# vectorization
self.vec_processor = VectorizedCase(cfg['vectorization_cfg'])
def preprocess_map(self):
os.makedirs(self.out_dir, exist_ok=True)
if not os.path.exists(os.path.join(self.out_dir, 'map.pkl')):
print("Processing maps ...")
save_map(self.out_dir)
print('Map is save at '+ os.path.join(self.out_dir, 'map.pkl'))
def process(self,):
# preprocess the map
self.preprocess_map()
# storage the case infomation
data_info = {}
for folder in os.listdir(self.data_dir):
if folder not in self.specific_data_fold_list:
continue
afl = self.Afl(os.path.join(self.data_dir, folder, 'data'))
info_dict = {}
data_info[folder] = {}
for path_name_ext in tqdm(afl.seq_list):
afl_ = afl.get(path_name_ext)
path, name_ext = os.path.split(path_name_ext)
name, ext = os.path.splitext(name_ext)
info_dict[name] = self.process_case(afl_.seq_df)
out_path = os.path.join(
self.out_dir, self.save_dir_pretext + f'{folder}.pkl')
with open(out_path, 'wb') as f:
pickle.dump(info_dict, f, pickle.HIGHEST_PROTOCOL)
data_info[folder]['sample_num'] = len(afl.seq_list)
print('Data is save at ' + out_path)
# print info
print("Finish Preprocessing.")
for k in data_info.keys():
print('dataset name: ' + k +
'\n sample num: {}'.format(data_info[k]['sample_num']))
def preprocess_case(self, seq_df):
'''
Args:
seq_df:
'''
# retrieve info from csv
agent_feature, obj_feature_ls, nearby_lane_ids, norm_center, city_name =\
compute_feature_for_one_seq(
seq_df,
self.am,
self.obs_len,
self.lane_radius,
self.object_radius,
self.raw_dataformat,
viz=False,
mode='nearby'
)
# pack as the output
dic = save_features(
agent_feature, obj_feature_ls, nearby_lane_ids, norm_center, city_name
)
return dic
def process_case(self, seq_df):
# tensorized
data = self.preprocess_case(seq_df)
# vectorized
vec_dic = self.vec_processor.process_case(data)
return vec_dic
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Preprocess argoverse dataset')
parser.add_argument('config', help='config file path')
args = parser.parse_args()
from config.Config import Config
cfg = Config.fromfile(args.config)
preprocess_cfg = cfg.get('preprocess_dataset')
processor = ArgoverseConvertor(preprocess_cfg)
processor.process()
|
picam.py | PiSimo/PiCamNN | 111 | 12676968 | #!/usr/bin/python3
import cv2
import time
import threading
import subprocess
import numpy as np
from sys import exit
from os import system
from keras import backend as K
from keras.models import load_model
from yad2k.models.keras_yolo import yolo_eval, yolo_head
'''
---# GLOBAL VARIABLES #---
'''
#USER SETTINGS :
maxDays = 7 #The recorded videos will be destroyed after "maxDays" days
baseFolder = "/var/www/html/" #Apache's base folder
scriptFolder = "/home/pi/PiCamNN/" #The folder which contains main script (picam.py)
num_cam = -1 #Number of the camera (if -1 it will be the first camera read by the system)
frame_check = 17 #Frames to check before quit
time_chunck = 15 #Time to consider for a new action
telegram_user = "" #Your telegram user name so all the images will be sent to your telegram char with yourself
#IMAGES VARIABLES:
w = 0 #width
h = 0 #height
#Image processing vars:
blur1 = 2
blur2 = 1
erodeval = 7
#Volatile arrays:
frames = []
times = []
#General Log file
flog = open(baseFolder+"logs","a")
'''
---# END #---
'''
#when an error occur
def printExit(out):
print(out,"\nClosing....!")
exit(-1)
#Updating index.html :
# 1 Opening index.html
# 2 Adding new link for the new video
# 3 If there are more then maxDays links removing oldest one
# 4 Removing also the oldest video
def handleFile(name):
print("[PiCamNN] Updating file...")
f = open(baseFolder+"index.html","r")
cont = f.read()
f.close()
if cont.find(name) != -1:
print("[PiCamNN] File has been update yet !")
return False
f = open(baseFolder+"index.html","w")
lines = cont.split("\n")
day = 0
go = False
for i in range(len(lines)-1):
if lines[i].find("<!-- S -->") != -1:
i += 1
f.write("<!-- S -->\n <a href=\"{}.avi\" class=\"file\" >{}.avi</a><br />\n".format(name,name))
day += 1
go = True
elif lines[i].find("<!-- E -->") != -1:
f.write("{}\n".format(lines[i]))
go = False
if day > maxDays:
rm = lines[i-1]
rm = rm[rm.find("\"")+1:len(rm)]
rm = rm[0:rm.find("\"")]
try:
system("rm {}".format(baseFolder+rm))
print("[PiCamNN] Old file removed.")
except:
print("[PiCamNN] An error occured while removing old file!")
elif go:
day +=1
if day <= maxDays:f.write("{}\n".format(lines[i]))
else:f.write("{}\n".format(lines[i]))
f.close()
print("[PiCamNN] index.html UPDATED")
return True
# Some morphological operations on two input frames to check for movements
def movement(mat_1,mat_2):
mat_1_gray = cv2.cvtColor(mat_1.copy(),cv2.COLOR_BGR2GRAY)
mat_1_gray = cv2.blur(mat_1_gray,(blur1,blur1))
_,mat_1_gray = cv2.threshold(mat_1_gray,100,255,0)
mat_2_gray = cv2.cvtColor(mat_2.copy(),cv2.COLOR_BGR2GRAY)
mat_2_gray = cv2.blur(mat_2_gray,(blur1,blur1))
_,mat_2_gray = cv2.threshold(mat_2_gray,100,255,0)
mat_2_gray = cv2.bitwise_xor(mat_1_gray,mat_2_gray)
mat_2_gray = cv2.blur(mat_2_gray,(blur2,blur2))
_,mat_2_gray = cv2.threshold(mat_2_gray,70,255,0)
mat_2_gray = cv2.erode(mat_2_gray,np.ones((erodeval,erodeval)))
mat_2_gray = cv2.dilate(mat_2_gray,np.ones((4,4)))
_, contours,__ = cv2.findContours(mat_2_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 0:return True #If there were any movements
return False #if not
#Pedestrian Recognition Thread
def yoloThread():
global frames,times
model_path = scriptFolder+"tiny.h5" #Model weights
sess = K.get_session()
print("[PiCam] Loading anchors file...")
anchors = [1.08,1.19,3.42,4.41,6.63,11.38,9.42,5.11,16.62,10.52] #Tiny Yolo anchors' values
anchors = np.array(anchors).reshape(-1, 2)
print("[PiCam] Loading yolo model ({})...".format(scriptFolder+"tiny.h5"))
yolo_model = load_model(model_path) #Loading Tiny YOLO
num_anchors = len(anchors)
print('[PiCam] YOLO model loaded !'.format(model_path))
model_image_size = yolo_model.layers[0].input_shape[1:3] #Get input shape
yolo_outputs = yolo_head(yolo_model.output, anchors, 20)
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(yolo_outputs,input_image_shape,score_threshold=0.3,iou_threshold=0.4)
num = 0 #Starting Photo's name
old_time = 0.0 #Latest time
print("[PiCam] YOLO Thread started!")
### Loop:
while True:
if len(frames) != 0:
try:
cv2.waitKey(17)
mat = frames[0] #Get First frame with movements
mat = cv2.resize(mat,(model_image_size[0],model_image_size[1]))
in_mat = np.array(mat,dtype='float32')
in_mat /= 255. #Removing mean
in_mat = np.expand_dims(in_mat, 0)
if (times[0] - old_time) > time_chunck:
#Searching for detection:
out_boxes, out_scores, out_classes = sess.run([boxes, scores, classes],feed_dict={yolo_model.input: in_mat,input_image_shape: [mat.shape[1], mat.shape[0]],K.learning_phase(): 0})
if len(out_boxes) > 0:
writ = False
xs,ys = [],[] #X's and Y's coordinate
for i, c in reversed(list(enumerate(out_classes))):
if c == 14: #14 is the label for persons
writ = True
box = out_boxes[i]
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(mat.shape[1], np.floor(bottom + 0.5).astype('int32'))
right = min(mat.shape[0], np.floor(right + 0.5).astype('int32'))
xs.append(left+i)
xs.append(right-i)
ys.append(top+i)
ys.append(bottom-i)
if writ:
img_name = scriptFolder+"imgs/{}.png".format(num)
cv2.imwrite(img_name,mat[min(ys):max(ys),min(xs):max(xs)]) #Only saving the rectangle in which persons' got detected
out_s = "[{}] Detected person (taken {}s)!\n".format(time.strftime("%H:%M:%S"),round(time.time()-times[0])) #Log output
print(out_s)
flog.write(out_s)
flog.flush()
try: #Preventig Problems like no connection #I've used subprocess to set a timeout
subprocess.call("telegram-cli -W -e \'send_photo {} {} \' ".format(telegram_user,img_name),timeout=30,shell=True)
except Exception as exc:
print("[PiCam] Some error occured in YOLO Thread ({}) :".format(time.strftime("%H:%M:%S")),exc)
num += 1
old_time = times[0] #Updating detection time
except Exception as ex:
print("[PiCam] Some error occured in YOLO Thread ({}) :".format(time.strftime("%H:%M:%S")),ex)
del times[0] #Deleting first Detection time
del frames[0] #Deleting first Frame
cv2.waitKey(50)
'''
Main code from here :
'''
if __name__ == "__main__":
print("Starting PiCam....")
name = time.strftime("%c").replace(" ","_")[0:10]
#Camera Input
cap = None
try:
cap = cv2.VideoCapture(num_cam) #Trying to open camera
_,dim = cap.read()
if not _ or dim.shape == (0,0,0) :
printExit("[PiCam] Error occured when opening the camera stream!")
h = dim.shape[0]
w = dim.shape[1]
print("[PiCam] Height:{} | Width:{}".format(h,w))
except:
printExit("[PiCam] Error occured when opening the camera stream!")
#Video Output
writer = None
err = "[PiCam] Error occured when opening the output video stream!"
load = handleFile(name) #Updating web_file
if not load:system("mv {}.avi {}_.avi".format(baseFolder+name,baseFolder+name))
writer = cv2.VideoWriter(baseFolder+name+".avi", cv2.VideoWriter_fourcc(*"MJPG"), 21,(w,h), True)
if not writer.isOpened():
printExit(err) #If output Stream is unavailable
#Loading video file of the same day:
if not load:
try:
print("[PiCam] Loading old video File...",end="")
read = cv2.VideoCapture(baseFolder+name+"_.avi")
_,mat = read.read()
while _ :
if mat.shape == (0,0,0) or mat.shape[0] != h or mat.shape[1] != w:
print("[PiCam] Couldn't load old file skipping(shape {})...!".format(mat.shape))
break
writer.write(mat)
_,mat = read.read()
del read,mat
print("loaded!")
except:
print("\n[PiCam] Couldn't load old file skipping...!")
system("rm {}_.avi".format(baseFolder+name)) #Removing old video file
#Starting Yolo thread
yoloThread = threading.Thread(target=yoloThread)
print("[PiCam] Starting Yolo Thread....")
yoloThread.start()
day = time.strftime("%d") #startin' day
frc = 0 #Frame count
#Loop's start
print("[PiCam] Starting main loop...")
while True:
try:
_,a = cap.read()
if a.shape == (0,0,0):
#If Frame was empty trying frame_check times and then breaking program
for i in range(frame_check):
_,a = cap.read()
if a.shape != (0,0,0):break
if i == frame_check-1:printExit("[PiCam] Error with camera stream!")
cv2.waitKey(33)
_,b = cap.read()
move = movement(a,b)
#if we have got a movement
if move:
print("[PiCam] Movement ({})".format(time.strftime("%H:%M:%S")))
if frc % 2 == 0: #Only each two frames with movement are passed to YOLO Thread
frames.append(b.copy()) #Frame with movement
times.append(time.time())#Detection Time
frc += 1 #FrameCount +1
cv2.putText(b,name.replace("_"," ")+" "+time.strftime("%H:%M:%S"),(50,h - 50),cv2.FONT_HERSHEY_SIMPLEX, 2,(255,255,255))
writer.write(cv2.resize(b,(w,h))) #Adding to File
if time.strftime("%d") != day:
writer.release() #Closing old video output
frc = 0
print("[PiCam] Cleaning imgs dir...")
system("rm {}".format(scriptFolder+"imgs/*"))
#Handling FLOG:
flog.close()
system("echo '### PiCam Live Logs###' > {}".format(baseFolder+"logs")) #Cleaning Logs file
flog = open(baseFolder+"logs","a")
#FLOG end
print("[PiCam] New day! Restarting video output....")
name = time.strftime("%c").replace(" ","_")[0:10]
writer = cv2.VideoWriter(baseFolder+name+".avi", cv2.VideoWriter_fourcc(*"MJPG"), 21,(w,h), True)
print("[PiCam] Updating index.html...")
handleFile(name)
day = time.strftime("%d")
print("[PiCam] Done! Resuming main loop...")
except Exception as ex:
print("Some error occured : ",ex)
sys.exit(-1)
|
examples/custom_code/client.py | koskotG/ebonite | 270 | 12676971 | import sys
import requests
def main():
try:
value = int(sys.argv[1])
except (IndexError, ValueError):
print(f'Usage: {sys.argv[0]} [integer]')
return
payload = {'vector': {'values': [{'value': value}]}}
r = requests.post('http://localhost:9000/predict', json=payload)
r.raise_for_status()
print(r.json()['data']['values'][0]['value'])
if __name__ == '__main__':
main()
|
src/oci/data_integration/models/create_connection_from_object_storage.py | Manny27nyc/oci-python-sdk | 249 | 12676990 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .create_connection_details import CreateConnectionDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateConnectionFromObjectStorage(CreateConnectionDetails):
"""
The details to create an Oracle Object Storage data asset connection.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateConnectionFromObjectStorage object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.CreateConnectionFromObjectStorage.model_type` attribute
of this class is ``ORACLE_OBJECT_STORAGE_CONNECTION`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param model_type:
The value to assign to the model_type property of this CreateConnectionFromObjectStorage.
Allowed values for this property are: "ORACLE_ADWC_CONNECTION", "ORACLE_ATP_CONNECTION", "ORACLE_OBJECT_STORAGE_CONNECTION", "ORACLEDB_CONNECTION", "MYSQL_CONNECTION", "GENERIC_JDBC_CONNECTION", "BICC_CONNECTION", "AMAZON_S3_CONNECTION"
:type model_type: str
:param key:
The value to assign to the key property of this CreateConnectionFromObjectStorage.
:type key: str
:param model_version:
The value to assign to the model_version property of this CreateConnectionFromObjectStorage.
:type model_version: str
:param parent_ref:
The value to assign to the parent_ref property of this CreateConnectionFromObjectStorage.
:type parent_ref: oci.data_integration.models.ParentReference
:param name:
The value to assign to the name property of this CreateConnectionFromObjectStorage.
:type name: str
:param description:
The value to assign to the description property of this CreateConnectionFromObjectStorage.
:type description: str
:param object_status:
The value to assign to the object_status property of this CreateConnectionFromObjectStorage.
:type object_status: int
:param identifier:
The value to assign to the identifier property of this CreateConnectionFromObjectStorage.
:type identifier: str
:param connection_properties:
The value to assign to the connection_properties property of this CreateConnectionFromObjectStorage.
:type connection_properties: list[oci.data_integration.models.ConnectionProperty]
:param registry_metadata:
The value to assign to the registry_metadata property of this CreateConnectionFromObjectStorage.
:type registry_metadata: oci.data_integration.models.RegistryMetadata
:param credential_file_content:
The value to assign to the credential_file_content property of this CreateConnectionFromObjectStorage.
:type credential_file_content: str
:param user_id:
The value to assign to the user_id property of this CreateConnectionFromObjectStorage.
:type user_id: str
:param finger_print:
The value to assign to the finger_print property of this CreateConnectionFromObjectStorage.
:type finger_print: str
:param pass_phrase:
The value to assign to the pass_phrase property of this CreateConnectionFromObjectStorage.
:type pass_phrase: str
"""
self.swagger_types = {
'model_type': 'str',
'key': 'str',
'model_version': 'str',
'parent_ref': 'ParentReference',
'name': 'str',
'description': 'str',
'object_status': 'int',
'identifier': 'str',
'connection_properties': 'list[ConnectionProperty]',
'registry_metadata': 'RegistryMetadata',
'credential_file_content': 'str',
'user_id': 'str',
'finger_print': 'str',
'pass_phrase': '<PASSWORD>'
}
self.attribute_map = {
'model_type': 'modelType',
'key': 'key',
'model_version': 'modelVersion',
'parent_ref': 'parentRef',
'name': 'name',
'description': 'description',
'object_status': 'objectStatus',
'identifier': 'identifier',
'connection_properties': 'connectionProperties',
'registry_metadata': 'registryMetadata',
'credential_file_content': 'credentialFileContent',
'user_id': 'userId',
'finger_print': 'fingerPrint',
'pass_phrase': '<PASSWORD>'
}
self._model_type = None
self._key = None
self._model_version = None
self._parent_ref = None
self._name = None
self._description = None
self._object_status = None
self._identifier = None
self._connection_properties = None
self._registry_metadata = None
self._credential_file_content = None
self._user_id = None
self._finger_print = None
self._pass_phrase = None
self._model_type = 'ORACLE_OBJECT_STORAGE_CONNECTION'
@property
def credential_file_content(self):
"""
Gets the credential_file_content of this CreateConnectionFromObjectStorage.
The credential file content from an Oracle Object Storage wallet.
:return: The credential_file_content of this CreateConnectionFromObjectStorage.
:rtype: str
"""
return self._credential_file_content
@credential_file_content.setter
def credential_file_content(self, credential_file_content):
"""
Sets the credential_file_content of this CreateConnectionFromObjectStorage.
The credential file content from an Oracle Object Storage wallet.
:param credential_file_content: The credential_file_content of this CreateConnectionFromObjectStorage.
:type: str
"""
self._credential_file_content = credential_file_content
@property
def user_id(self):
"""
Gets the user_id of this CreateConnectionFromObjectStorage.
The OCI user OCID for the user to connect to.
:return: The user_id of this CreateConnectionFromObjectStorage.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""
Sets the user_id of this CreateConnectionFromObjectStorage.
The OCI user OCID for the user to connect to.
:param user_id: The user_id of this CreateConnectionFromObjectStorage.
:type: str
"""
self._user_id = user_id
@property
def finger_print(self):
"""
Gets the finger_print of this CreateConnectionFromObjectStorage.
The fingerprint for the user.
:return: The finger_print of this CreateConnectionFromObjectStorage.
:rtype: str
"""
return self._finger_print
@finger_print.setter
def finger_print(self, finger_print):
"""
Sets the finger_print of this CreateConnectionFromObjectStorage.
The fingerprint for the user.
:param finger_print: The finger_print of this CreateConnectionFromObjectStorage.
:type: str
"""
self._finger_print = finger_print
@property
def pass_phrase(self):
"""
Gets the pass_phrase of this CreateConnectionFromObjectStorage.
The passphrase for the connection.
:return: The pass_phrase of this CreateConnectionFromObjectStorage.
:rtype: str
"""
return self._pass_phrase
@pass_phrase.setter
def pass_phrase(self, pass_phrase):
"""
Sets the pass_phrase of this CreateConnectionFromObjectStorage.
The passphrase for the connection.
:param pass_phrase: The pass_phrase of this CreateConnectionFromObjectStorage.
:type: str
"""
self._pass_phrase = pass_phrase
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
src/model/ResNet.py | alpayuz/DeepDeblur-PyTorch | 158 | 12676998 | <reponame>alpayuz/DeepDeblur-PyTorch<filename>src/model/ResNet.py
import torch.nn as nn
from . import common
def build_model(args):
return ResNet(args)
class ResNet(nn.Module):
def __init__(self, args, in_channels=3, out_channels=3, n_feats=None, kernel_size=None, n_resblocks=None, mean_shift=True):
super(ResNet, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.n_feats = args.n_feats if n_feats is None else n_feats
self.kernel_size = args.kernel_size if kernel_size is None else kernel_size
self.n_resblocks = args.n_resblocks if n_resblocks is None else n_resblocks
self.mean_shift = mean_shift
self.rgb_range = args.rgb_range
self.mean = self.rgb_range / 2
modules = []
modules.append(common.default_conv(self.in_channels, self.n_feats, self.kernel_size))
for _ in range(self.n_resblocks):
modules.append(common.ResBlock(self.n_feats, self.kernel_size))
modules.append(common.default_conv(self.n_feats, self.out_channels, self.kernel_size))
self.body = nn.Sequential(*modules)
def forward(self, input):
if self.mean_shift:
input = input - self.mean
output = self.body(input)
if self.mean_shift:
output = output + self.mean
return output
|
PyFin/Analysis/DataProviders/__init__.py | rpatil524/Finance-Python | 325 | 12677002 | # -*- coding: utf-8 -*-
u"""
Created on 2015-8-19
@author: cheng.li
"""
from PyFin.Analysis.DataProviders.DataProviders import DataProvider
__all__ = ['DataProvider']
|
tests/test_sleeping.py | MarkusH/kopf | 1,038 | 12677025 | import asyncio
import pytest
from kopf.engines.sleeping import sleep_or_wait
async def test_the_only_delay_is_awaited(timer):
with timer:
unslept = await asyncio.wait_for(sleep_or_wait(0.10), timeout=1.0)
assert 0.10 <= timer.seconds < 0.11
assert unslept is None
async def test_the_shortest_delay_is_awaited(timer):
with timer:
unslept = await asyncio.wait_for(sleep_or_wait([0.10, 0.20]), timeout=1.0)
assert 0.10 <= timer.seconds < 0.11
assert unslept is None
async def test_specific_delays_only_are_awaited(timer):
with timer:
unslept = await asyncio.wait_for(sleep_or_wait([0.10, None]), timeout=1.0)
assert 0.10 <= timer.seconds < 0.11
assert unslept is None
async def test_passed_delays_skip_sleeping(timer):
with timer:
unslept = await asyncio.wait_for(sleep_or_wait([0.10, -10]), timeout=1.0)
assert timer.seconds < 0.01
assert unslept is None
@pytest.mark.parametrize('delays', [
pytest.param([], id='empty-list'),
pytest.param([None], id='list-of-none'),
])
async def test_no_delays_skip_sleeping(timer, delays):
with timer:
unslept = await asyncio.wait_for(sleep_or_wait(delays), timeout=1.0)
assert timer.seconds < 0.01
assert unslept is None
async def test_by_event_set_before_time_comes(timer):
event = asyncio.Event()
asyncio.get_running_loop().call_later(0.07, event.set)
with timer:
unslept = await asyncio.wait_for(sleep_or_wait(0.10, event), timeout=1.0)
assert unslept is not None
assert 0.02 <= unslept <= 0.04
assert 0.06 <= timer.seconds <= 0.08
async def test_with_zero_time_and_event_initially_cleared(timer):
event = asyncio.Event()
event.clear()
with timer:
unslept = await asyncio.wait_for(sleep_or_wait(0, event), timeout=1.0)
assert timer.seconds <= 0.01
assert unslept is None
async def test_with_zero_time_and_event_initially_set(timer):
event = asyncio.Event()
event.set()
with timer:
unslept = await asyncio.wait_for(sleep_or_wait(0, event), timeout=1.0)
assert timer.seconds <= 0.01
assert not unslept # 0/None; undefined for such case: both goals reached.
|
Chapter08/models.py | yoyboy/Software-Architecture-with-Python | 103 | 12677038 | # Code Listing #1
"""
Glossary models - Showing django admin view
"""
from django.db import models
class GlossaryTerm(models.Model):
""" Model for describing a glossary word (term) """
term = models.CharField(max_length=1024)
meaning = models.CharField(max_length=1024)
meaning_html = models.CharField('Meaning with HTML markup',
max_length=4096, null=True, blank=True)
example = models.CharField(max_length=4096, null=True, blank=True)
# can be a ManyToManyField?
domains = models.CharField(max_length=128, null=True, blank=True)
notes = models.CharField(max_length=2048, null=True, blank=True)
url = models.CharField('URL', max_length=2048, null=True, blank=True)
name = models.ForeignKey('GlossarySource', verbose_name='Source', blank=True)
def __unicode__(self):
return self.term
class Meta:
unique_together = ('term', 'meaning', 'url')
class GlossarySource(models.Model):
""" Model for describing a glossary source """
name = models.CharField(max_length=256, primary_key=True)
url = models.CharField(max_length=2048, blank=True)
description = models.CharField(max_length=512)
# can be a ManyToManyField?
tags = models.CharField(max_length=1024, blank=True)
mainlang = models.CharField(max_length=8, default='en_US')
singlepage = models.BooleanField(default=True)
translations = models.BooleanField(default=False)
def __unicode__(self):
return self.name
|
zentral/contrib/munki/forms.py | janheise/zentral | 634 | 12677058 | <reponame>janheise/zentral<filename>zentral/contrib/munki/forms.py<gh_stars>100-1000
from django import forms
from zentral.core.probes.forms import BaseCreateProbeForm
from zentral.utils.forms import CommaSeparatedQuotedStringField
from .models import Configuration, Enrollment, PrincipalUserDetectionSource
from .probes import MunkiInstallProbe
class PrincipalUserDetectionSourceWidget(forms.CheckboxSelectMultiple):
def __init__(self, attrs=None, choices=()):
super().__init__(attrs, choices=PrincipalUserDetectionSource.choices())
def format_value(self, value):
if isinstance(value, str) and value:
value = [v.strip() for v in value.split(",")]
return super().format_value(value)
class ConfigurationForm(forms.ModelForm):
class Meta:
model = Configuration
fields = "__all__"
widgets = {"principal_user_detection_sources": PrincipalUserDetectionSourceWidget}
class EnrollmentForm(forms.ModelForm):
class Meta:
model = Enrollment
fields = "__all__"
def __init__(self, *args, **kwargs):
self.configuration = kwargs.pop("configuration", None)
kwargs.pop("enrollment_only", None)
kwargs.pop("standalone", None)
super().__init__(*args, **kwargs)
if self.configuration:
self.fields["configuration"].widget = forms.HiddenInput()
class UpdateInstallProbeForm(forms.Form):
installed_item_names = CommaSeparatedQuotedStringField(help_text="Comma separated names of the installed items",
required=False)
install_types = forms.ChoiceField(choices=(('install,removal', 'install & removal'),
('install', 'install'),
('removal', 'removal')),
initial='install',
widget=forms.RadioSelect)
unattended_installs = forms.ChoiceField(choices=(('', 'yes & no'),
('1', 'yes'),
('0', 'no')),
widget=forms.RadioSelect,
required=False)
def get_body(self):
cleaned_data = self.cleaned_data
# install types
body = {'install_types': sorted(cleaned_data['install_types'].split(','))}
# installed item names
installed_item_names = cleaned_data.get('installed_item_names')
if installed_item_names:
body['installed_item_names'] = installed_item_names
# unattended installs
try:
unattended_installs = bool(int(cleaned_data.get('unattended_installs')))
except ValueError:
pass
else:
body['unattended_installs'] = unattended_installs
return body
@staticmethod
def get_probe_initial(probe):
initial = {'installed_item_names': sorted(probe.installed_item_names),
'install_types': ','.join(sorted(probe.install_types))}
if probe.unattended_installs is None:
initial['unattended_installs'] = ''
else:
initial['unattended_installs'] = str(int(probe.unattended_installs))
return initial
class CreateInstallProbeForm(BaseCreateProbeForm, UpdateInstallProbeForm):
model = MunkiInstallProbe
field_order = ("name", "installed_item_names", "unattended_yes", "unattended_no")
|
tinymongo/results.py | sumants-dev/tinymongo | 177 | 12677079 | <filename>tinymongo/results.py
"""Result class definitions."""
class _WriteResult(object):
"""Base class for write result classes."""
def __init__(self, acknowledged=True):
self.acknowledged = acknowledged # here only to PyMongo compat
class InsertOneResult(_WriteResult):
"""The return type for :meth:`~tinymongo.TinyMongoCollection.insert_one`.
"""
__slots__ = ("__inserted_id", "__acknowledged", "__eid")
def __init__(self, eid, inserted_id, acknowledged=True):
self.__eid = eid
self.__inserted_id = inserted_id
super(InsertOneResult, self).__init__(acknowledged)
@property
def inserted_id(self):
"""The inserted document's _id."""
return self.__inserted_id
@property
def eid(self):
"""The inserted document's tinyDB eid."""
return self.__eid
class InsertManyResult(_WriteResult):
"""The return type for :meth:`~tinymongo.TinyMongoCollection.insert_many`.
"""
__slots__ = ("__inserted_ids", "__acknowledged", "__eids")
def __init__(self, eids, inserted_ids, acknowledged=True):
self.__eids = eids
self.__inserted_ids = inserted_ids
super(InsertManyResult, self).__init__(acknowledged)
@property
def inserted_ids(self):
"""A list of _ids of the inserted documents, in the order provided."""
return self.__inserted_ids
@property
def eids(self):
"""A list of _ids of the inserted documents, in the order provided."""
return self.__eids
class UpdateResult(_WriteResult):
"""The return type for :meth:`~tinymongo.TinyMongoCollection.update_one`,
:meth:`~tinymongo.TinyMongoCollection.update_many`, and
:meth:`~tinymongo.TinyMongoCollection.replace_one`.
"""
__slots__ = ("__raw_result", "__acknowledged")
def __init__(self, raw_result, acknowledged=True):
self.__raw_result = raw_result
super(UpdateResult, self).__init__(acknowledged)
@property
def raw_result(self):
"""The raw result document returned by the server."""
return self.__raw_result
@property
def matched_count(self):
"""The number of documents matched for this update."""
# TODO: Implement this
@property
def modified_count(self):
"""The number of documents modified.
"""
# TODO: Implement this
@property
def upserted_id(self):
"""The _id of the inserted document if an upsert took place. Otherwise
``None``.
"""
# TODO: Implement this
class DeleteResult(_WriteResult):
"""The return type for :meth:`~tinymongo.TinyMongoCollection.delete_one`
and :meth:`~tinymongo.TinyMongoCollection.delete_many`"""
__slots__ = ("__raw_result", "__acknowledged")
def __init__(self, raw_result, acknowledged=True):
self.__raw_result = raw_result
super(DeleteResult, self).__init__(acknowledged)
@property
def raw_result(self):
"""The raw result document returned by the server."""
return self.__raw_result
@property
def deleted_count(self):
"""The number of documents deleted."""
if isinstance(self.raw_result, list):
return len(self.raw_result)
else:
return self.raw_result
|
code/source/inception/inception_score_tf.py | alexban94/msci_project | 1,086 | 12677085 | <gh_stars>1000+
# Code derived from https://github.com/openai/improved-gan/tree/master/inception_score
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
import glob
import scipy.misc
import math
import sys
import chainer
from chainer import functions as F
MODEL_DIR = '/tmp/imagenet'
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
softmax = None
last_layer = None
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.3
def inception_forward(images, layer):
assert (type(images[0]) == np.ndarray)
assert (len(images[0].shape) == 3)
assert (np.max(images[0]) > 10)
assert (np.min(images[0]) >= 0.0)
bs = 100
images = images.transpose(0, 2, 3, 1)
with tf.Session(config=config) as sess:
preds = []
n_batches = int(math.ceil(float(len(images)) / float(bs)))
for i in range(n_batches):
sys.stdout.write(".")
sys.stdout.flush()
inp = images[(i * bs):min((i + 1) * bs, len(images))]
pred = sess.run(layer, {'ExpandDims:0': inp})
preds.append(pred)
preds = np.concatenate(preds, 0)
return preds
def get_mean_and_cov(images):
before_preds = inception_forward(images, last_layer)
m = np.mean(before_preds, 0)
cov = np.cov(before_preds, rowvar=False)
return m, cov
def get_fid(images, ref_stats=None, images_ref=None, splits=10):
before_preds = inception_forward(images, last_layer)
if ref_stats is None:
if images_ref is None:
raise ValueError('images_ref should be provided if ref_stats is None')
m_ref, cov_ref = get_mean_and_cov(images_ref)
fids = []
for i in range(splits):
part = before_preds[(i * before_preds.shape[0] // splits):((i + 1) * before_preds.shape[0] // splits), :]
m_gen = np.mean(part, 0)
cov_gen = np.cov(part, rowvar=False)
fid = np.sum((m_ref - m_gen) ** 2) + np.trace(
cov_ref + cov_gen - 2 * scipy.linalg.sqrtm(np.dot(cov_ref, cov_gen)))
fids.append(fid)
return np.mean(fids), np.std(fids)
# Call this function with list of images. Each of elements should be a
# numpy array with values ranging from 0 to 255.
def get_inception_score(images, splits=10):
preds = inception_forward(images, softmax)
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
# Call this function with list of images. Each of elements should be a
# numpy array with values ranging from 0 to 255.
def get_inception_accuracy(images, labels):
batch_size = 100
if isinstance(images, (list, tuple)):
ims_list = images
ys_list = []
for ims in ims_list:
n, _, _, _ = ims.shape
n_batches = int(math.ceil(float(n) / float(batch_size)))
print('batch_size:{}, n_ims{}, n_batches{}'.format(batch_size, n, n_batches))
print('Calculating inception accuracy...')
ys = inception_forward(ims, softmax)[:, 1:1001]
ys_list.append(ys)
ys = sum(ys_list) / len(ys_list)
else:
n, _, _, _, = images.shape
n_batches = int(math.ceil(float(n) / float(batch_size)))
print('batch_size:{}, n_ims{}, n_batches{}'.format(batch_size, n, n_batches))
print('Calculating inception accuracy...')
ys = inception_forward(images, softmax)[:, 1:1001]
return F.accuracy(ys, labels).data
# This function is called automatically.
def _init_inception():
global softmax
global last_layer
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(MODEL_DIR, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)
with tf.gfile.FastGFile(os.path.join(
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# Works with an arbitrary minibatch size.
with tf.Session(config=config) as sess:
pool3 = sess.graph.get_tensor_by_name('pool_3:0')
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o._shape = tf.TensorShape(new_shape)
w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]
last_layer = tf.squeeze(pool3)
logits = tf.matmul(last_layer, w)
softmax = tf.nn.softmax(logits)
if softmax is None:
_init_inception()
|
gooey/gui/components/options/options.py | Jacke/Gooey | 13,430 | 12677091 | from gooey.gui.components.filtering.prefix_filter import PrefixTokenizers
def _include_layout_docs(f):
"""
Combines the layout_options docsstring with the
wrapped function's doc string.
"""
f.__doc__ = (f.__doc__ or '') + LayoutOptions.__doc__
return f
def _include_global_option_docs(f):
"""
Combines docstrings for options available to
all widget types.
"""
_doc = """:param initial_value: Sets the initial value in the UI.
"""
f.__doc__ = (f.__doc__ or '') + _doc
return f
def _include_chooser_msg_wildcard_docs(f):
"""
Combines the basic Chooser options (wildard, message) docsstring
with the wrapped function's doc string.
"""
_doc = """:param wildcard: Sets the wildcard, which can contain multiple file types, for
example: "BMP files (.bmp)|.bmp|GIF files (.gif)|.gif"
:param message: Sets the message that will be displayed on the dialog.
"""
f.__doc__ = (f.__doc__ or '') + _doc
return f
def _include_choose_dir_file_docs(f):
"""
Combines the basic Chooser options (wildard, message) docsstring
with the wrapped function's doc string.
"""
_doc = """:param default_dir: The default directory selected when the dialog spawns
:param default_file: The default filename used in the dialog
"""
f.__doc__ = (f.__doc__ or '') + _doc
return f
def LayoutOptions(label_color=None,
label_bg_color=None,
help_color=None,
help_bg_color=None,
error_color=None,
error_bg_color=None,
show_label=True,
show_help=True,
visible=True,
full_width=False):
"""
Layout Options:
---------------
Color options can be passed either as a hex string ('#ff0000') or as
a collection of RGB values (e.g. `[255, 0, 0]` or `(255, 0, 0)`)
:param label_color: The foreground color of the label text
:param label_bg_color: The background color of the label text.
:param help_color: The foreground color of the help text.
:param help_bg_color: The background color of the help text.
:param error_color: The foreground color of the error text (when visible).
:param error_bg_color: The background color of the error text (when visible).
:param show_label: Toggles whether or not to display the label text
:param show_help: Toggles whether or not to display the help text
:param visible: Hides the entire widget when False. Note: the widget
is still present in the UI and will still send along any
default values that have been provided in code. This option
is here for when you want to hide certain advanced / dangerous
inputs from your GUI users.
:param full_width: This is a layout hint for this widget. When True the widget
will fill the entire available space within a given row.
Otherwise, it will be sized based on the column rules
provided elsewhere.
"""
return _clean(locals())
@_include_layout_docs
@_include_global_option_docs
def TextField(initial_value=None, validator=None, **layout_options):
return _clean(locals())
@_include_layout_docs
@_include_global_option_docs
def PasswordField(initial_value=None, validator=None, **layout_options):
return _clean(locals())
@_include_layout_docs
@_include_global_option_docs
def IntegerField(initial_value=None, validator=None, min=0, max=100, increment=1, **layout_options):
"""
:param min: The minimum value allowed
:param max: The maximum value allowed
:param increment: The step size of the spinner
"""
return _clean(locals())
@_include_layout_docs
@_include_global_option_docs
def Slider(initial_value=None, validator=None, min=0, max=100, increment=1, **layout_options):
"""
:param min: The minimum value allowed
:param max: The maximum value allowed
:param increment: The step size of the slider
"""
return _clean(locals())
@_include_layout_docs
@_include_global_option_docs
def DecimalField(validator=None,
initial_value=None,
min=0.0,
max=1.0,
increment=0.01,
precision=2,
**layout_options):
"""
:param min: The minimum value allowed
:param max: The maximum value allowed
:param increment: The step size of the spinner
:param precision: The precision of the decimal (0-20)
"""
return _clean(locals())
@_include_layout_docs
@_include_global_option_docs
def TextArea(initial_value=None, height=None, readonly=False, validator=None, **layout_options):
"""
:param height: The height of the TextArea.
:param readonly: Controls whether or not user's may modify the contents
"""
return _clean(locals())
@_include_layout_docs
@_include_global_option_docs
def RichTextConsole(**layout_options):
return _clean(locals())
@_include_layout_docs
@_include_global_option_docs
def ListBox(initial_value=None, height=None, **layout_options):
"""
:param height: The height of the ListBox
"""
return _clean(locals())
# TODO: what are this guy's layout options..?
def MutexGroup(initial_selection=None, title=None, **layout_options):
"""
:param initial_selection: The index of the option which should be initially selected.
:param title: Adds the supplied title above the RadioGroup options (when present)
"""
return _clean(locals())
@_include_layout_docs
@_include_global_option_docs
def Dropdown(initial_value=None, **layout_options):
return _clean(locals())
@_include_layout_docs
@_include_global_option_docs
def Counter(initial_value=None, **layout_options):
return _clean(locals())
@_include_layout_docs
@_include_global_option_docs
def CheckBox(initial_value=None, **layout_options):
return _clean(locals())
@_include_layout_docs
@_include_global_option_docs
def BlockCheckBox(initial_value=None, checkbox_label=None, **layout_options):
return _clean(locals())
@_include_layout_docs
@_include_global_option_docs
def FilterableDropdown(placeholder=None,
empty_message=None,
max_size=80,
search_strategy=None,
initial_value=None,
**layout_options):
"""
:param placeholder: Text to display when the user has provided no input
:param empty_message: Text to display if the user's query doesn't match anything
:param max_size: maximum height of the dropdown
:param search_strategy: see: PrefixSearchStrategy
"""
return _clean(locals())
def PrefixSearchStrategy(
choice_tokenizer=PrefixTokenizers.WORDS,
input_tokenizer=PrefixTokenizers.REGEX('\s'),
ignore_case=True,
operator='AND',
index_suffix=False):
"""
:param choice_tokenizer: See: PrefixTokenizers - sets the tokenization strategy
for the `choices`
:param input_tokenizer: See: PrefixTokenizers sets how the users's `input` get tokenized.
:param ignore_case: Controls whether or not to honor case while searching
:param operator: see: `OperatorType` - controls whether or not individual
search tokens
get `AND`ed or `OR`d together when evaluating a match.
:param index_suffix: When enabled, generates a suffix-tree to enable efficient
partial-matching against any of the tokens.
"""
return {**_clean(locals()), 'type': 'PrefixFilter'}
@_include_layout_docs
@_include_global_option_docs
@_include_choose_dir_file_docs
@_include_chooser_msg_wildcard_docs
def FileChooser(wildcard=None,
default_dir=None,
default_file=None,
message=None,
initial_value=None,
**layout_options):
return _clean(locals())
@_include_layout_docs
@_include_global_option_docs
@_include_chooser_msg_wildcard_docs
def DirectoryChooser(wildcard=None,
default_path=None,
message=None,
initial_value=None,
**layout_options):
"""
:param default_path: The default path selected when the dialog spawns
"""
return _clean(locals())
@_include_layout_docs
@_include_global_option_docs
@_include_choose_dir_file_docs
@_include_chooser_msg_wildcard_docs
def FileSaver(wildcard=None,
default_dir=None,
default_file=None,
message=None,
initial_value=None,
**layout_options):
return _clean(locals())
@_include_layout_docs
@_include_global_option_docs
@_include_choose_dir_file_docs
@_include_chooser_msg_wildcard_docs
def MultiFileSaver(wildcard=None,
default_dir=None,
default_file=None,
message=None,
initial_value=None,
**layout_options):
return _clean(locals())
def ExpressionValidator(test=None, message=None):
"""
Creates the data for a basic expression validator.
Your test function can be made up of any valid Python expression.
It receives the variable user_input as an argument against which to
perform its validation. Note that all values coming from Gooey
are in the form of a string, so you'll have to cast as needed
in order to perform your validation.
"""
return {**_clean(locals()), 'type': 'ExpressionValidator'}
def RegexValidator(test=None, message=None):
"""
Creates the data for a basic RegexValidator.
:param test: the regex expression. This should be the expression
directly (i.e. `test='\d+'`). Gooey will test
that the user's input satisfies this expression.
:param message: The message to display if the input doesn't match
the regex
"""
return {**_clean(locals()), 'type': 'RegexValidator'}
def ArgumentGroup(show_border=False,
show_underline=True,
label_color=None,
columns=None,
margin_top=None):
"""
:param show_border: When True a labeled border will surround all widgets added to this group.
:param show_underline: Controls whether or not to display the underline when using the default border style
:param label_color: The foreground color for the group name
:param columns: Controls the number of widgets on each row
:param margin_top: specifies the top margin in pixels for this group
"""
return _clean(locals())
def _clean(options):
cleaned = {k: v for k, v in options.items()
if v is not None and k != "layout_options"}
return {**options.get('layout_options', {}), **cleaned}
|
src/macad_gym/core/utils/map_explore.py | SHITIANYU-hue/macad-gym | 201 | 12677109 | #!/usr/bin/env python
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import glob
import os
import sys
import itertools
try:
sys.path.append(
glob.glob('../../carla/PythonAPI/**/*%d.%d-%s.egg' %
(sys.version_info.major, sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
import math
import random
import json
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument(
"-v",
"--viz-map",
action="store_true",
default=False,
help="Show map topology")
parser.add_argument(
"-e",
"--export-node-coord-map",
help="Export the map between spawn_points and node_ids"
" to the JSON file")
args = parser.parse_args()
def get_transform(vehicle_location, angle, d=6.4):
a = math.radians(angle)
location = carla.Location(d * math.cos(a), d * math.sin(a),
2.0) + vehicle_location
return carla.Transform(location, carla.Rotation(
yaw=180 + angle, pitch=-15))
def show_map_topology(world):
import matplotlib.pyplot as plt
topology = world.get_map().get_topology()
for segment in topology:
x1, y1 = segment[0].transform.location.x, segment[
0].transform.location.y
x2, y2 = segment[1].transform.location.x, segment[
1].transform.location.y
plt.plot([x1, x2], [y1, y2], marker='o')
plt.gca().invert_yaxis()
plt.show()
input()
def map_spawn_point_to_node(world) -> dict:
node_coord_map = dict()
node_id = itertools.count()
for location in world.get_map().get_spawn_points():
node_coord_map[next(node_id)] = [
location.location.x, location.location.y, location.location.z
]
return node_coord_map
def start_walker(ped1):
"""Set a walker in forward motion"""
ped_cmd = carla.WalkerControl()
ped_cmd.speed = 1.778
ped1.apply_control(ped_cmd)
def stop_walker(ped1):
"""Halt a walker"""
ped_cmd = carla.WalkerControl()
ped_cmd.speed = 0.0
ped1.apply_control(ped_cmd)
spawn_locs = {
"car1": {
"S": [19, -133, 0.3],
"E": [104, -132, 8]
},
"car2": {
"S": [84, -123, 8],
"E": [41, -137, 8]
},
"ped1": {
"S": [74, -126, 8],
"E": [92, -125, 8]
}
}
def start_scenario():
car_bp = random.choice(world.get_blueprint_library().filter('vehicle'))
car_bp.set_attribute("role_name", "hero")
car1_loc_s = carla.Location(*spawn_locs["car1"]["S"])
car2_loc_s = carla.Location(*spawn_locs["car2"]["S"])
ped1_loc_s = carla.Location(*spawn_locs["ped1"]["S"])
car1 = world.spawn_actor(
car_bp, carla.Transform(car1_loc_s, carla.Rotation(yaw=0)))
car1.set_autopilot(True)
car2 = world.spawn_actor(
car_bp, carla.Transform(car2_loc_s, carla.Rotation(yaw=-90)))
car2.set_autopilot(True)
ped_bp = random.choice(world.get_blueprint_library().filter('walker'))
ped1 = world.spawn_actor(
ped_bp, carla.Transform(ped1_loc_s, carla.Rotation(yaw=0)))
start_walker(ped1)
def get_traffic_lights(loc=carla.Location(0, 0, 0)):
tls = {}
for a in world.get_actors().filter("traffic.traffic_light"):
tls[a.id] = [
a.get_location().x,
a.get_location().y,
a.get_location().z
]
print("ID:", a.id, "loc:",
a.get_location().x,
a.get_location().y,
a.get_location().z)
# Sort traffic lights by their location.x
ans = sorted(tls.items(), key=lambda kv: kv[1][0])
return ans
# Main:
client = carla.Client('localhost', 2000)
client.set_timeout(2.0)
world = client.get_world()
if args.export_node_coord_map:
node_coord_map = map_spawn_point_to_node(world)
json.dump(
node_coord_map, open("TOWN04.json", 'w'), indent=2, sort_keys=True)
if args.viz_map:
show_map_topology(world.get_map())
spectator = world.get_spectator()
spectator_loc = carla.Location(70, -123, 9)
spectator.set_transform(get_transform(spectator_loc, angle=160.0))
start_scenario()
"""
try:
angle = 0
while angle < 90:
timestamp = world.wait_for_tick()
angle += timestamp.delta_seconds * 90.0
spectator.set_transform(get_transform(vehicle.get_location(),
angle - 90))
# spectator.set_transform(get_transform(vehicle.get_location(), angle))
# input("Enter Key")
finally:
vehicle.destroy()
"""
|
imperative/python/test/unit/functional/test_functional_distributed_axis.py | Olalaye/MegEngine | 5,168 | 12677115 | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = reduce_scatter_sum(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = x + y
data = (x, y)
z = np.split(z, 2, axis=axis)
z = np.concatenate(z, axis=0)
expect = (z[: z.shape[0] // 2], z[z.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), True), ((2, 4, 6, 8), False)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_scatter(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = scatter(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = x + 1
data = (x, y)
_x = np.split(x, 2, axis=axis)
_x = np.concatenate(_x, axis=0)
expect = (_x[: _x.shape[0] // 2], _x[_x.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 4, 6, 8)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize(
"split_axis,concat_axis", [(0, 1), (1, 0), (2, 0), (0, 2), (2, 3)], ids=str
)
@pytest.mark.isolated_distributed
def test_all_to_all(shape, symbolic, split_axis, concat_axis):
@dist.launcher(n_gpus=2)
def worker(data):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
all_to_all_output = all_to_all(
inp, split_axis=split_axis, concat_axis=concat_axis
)
gather_C = gather(inp, axis=concat_axis)
gather_B = gather(all_to_all_output, axis=split_axis)
if rank == 0:
return gather_B, gather_C
return all_to_all_output
func = trace(symbolic=symbolic)(func)
ret = func()
if rank == 0:
assert np.allclose(ret[0], ret[1])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
data = (x, y)
worker(data)
|
nova/virt/disk/vfs/api.py | zjzh/nova | 1,874 | 12677141 | # Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import importutils
LOG = logging.getLogger(__name__)
class VFS(object):
"""Interface for manipulating disk image.
The VFS class defines an interface for manipulating files within
a virtual disk image filesystem. This allows file injection code
to avoid the assumption that the virtual disk image can be mounted
in the host filesystem.
All paths provided to the APIs in this class should be relative
to the root of the virtual disk image filesystem. Subclasses
will translate paths as required by their implementation.
"""
# Class level flag to indicate whether we can consider
# that guestfs is ready to be used.
guestfs_ready = False
@staticmethod
def instance_for_image(image, partition):
"""Get a VFS instance for the image
:param image: instance of nova.virt.image.model.Image
:param partition: the partition number to access
"""
LOG.debug("Instance for image image=%(image)s "
"partition=%(partition)s",
{'image': image, 'partition': partition})
LOG.debug("Using primary VFSGuestFS")
vfs = importutils.import_object(
"nova.virt.disk.vfs.guestfs.VFSGuestFS",
image, partition)
if not VFS.guestfs_ready:
# Inspect for capabilities and keep
# track of the result only if succeeded.
vfs.inspect_capabilities()
VFS.guestfs_ready = True
return vfs
def __init__(self, image, partition):
"""Create a new local VFS instance
:param image: instance of nova.virt.image.model.Image
:param partition: the partition number to access
"""
self.image = image
self.partition = partition
def setup(self, mount=True):
"""Performs any one-time setup.
Perform any one-time setup tasks to make the virtual filesystem
available to future API calls.
"""
pass
def teardown(self):
"""Releases all resources initialized in the setup method."""
pass
def make_path(self, path):
"""Creates a directory @path.
Create a directory @path, including all intermedia path components
if they do not already exist.
"""
pass
def append_file(self, path, content):
"""Appends @content to the end of the file.
Append @content to the end of the file identified by @path, creating
the file if it does not already exist.
"""
pass
def replace_file(self, path, content):
"""Replaces contents of the file.
Replace the entire contents of the file identified by @path, with
@content, creating the file if it does not already exist.
"""
pass
def read_file(self, path):
"""Returns the entire contents of the file identified by @path."""
pass
def has_file(self, path):
"""Returns a True if the file identified by @path exists."""
pass
def set_permissions(self, path, mode):
"""Sets the permissions on the file.
Set the permissions on the file identified by @path to @mode. The file
must exist prior to this call.
"""
pass
def set_ownership(self, path, user, group):
"""Sets the ownership on the file.
Set the ownership on the file identified by @path to the username
@user and groupname @group. Either of @user or @group may be None,
in which case the current ownership will be left unchanged.
The ownership must be passed in string form, allowing subclasses to
translate to uid/gid form as required. The file must exist prior to
this call.
"""
pass
def get_image_fs(self):
"""Returns the filesystem type or an empty string.
Determine the filesystem type whether the disk image is
partition less.
"""
pass
|
data_pipeline/tools/redshift_sql_to_avsc.py | poros/data_pipeline | 110 | 12677200 | # -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import optparse
import re
import simplejson as json
import yelp_batch
from cached_property import cached_property
from yelp_batch.batch import batch_command_line_options
from yelp_batch.batch import os
from data_pipeline.tools._glob_util import get_file_paths_from_glob_patterns
# See https://regex101.com/r/kC0kZ1/2
CREATE_TABLE_REGEX = re.compile('^create(\s*table)?\s*((.+)\.)?(\w+)\s*\(?')
# See https://regex101.com/r/zG9kV1/2
PRIMARY_KEY_REGEX = re.compile('^primary\s*key\s*\((.+)?\)')
# See https://regex101.com/r/kD8iN5/17
FIELD_LINE_REGEX = re.compile(
'^(\w+)\s*(\w+)\s*(\(\s*(\d+|\d+\s*\,\s*\d+)\s*\))?\s*(?P<pk>primary\s+key)?\s*(not\s+null|null)?\s*((default)\s+(\"|\')?(null|false|true|\d+\.\d+|\d+|[\w\s]*)(\"|\')?)?(\"|\')?.*,' # noqa
)
# See https://regex101.com/r/bN3xL0
START_FIELDS_REGEX = re.compile('^.*\(')
# See https://regex101.com/r/bR7bH2
STOP_FIELDS_REGEX = re.compile('^\)')
REDSHIFT_SQL_TO_AVRO_TYPE_MAPPING = {
'bigint': 'long',
'bool': 'boolean',
'boolean': 'boolean',
'bpchar': 'string',
'char': 'string',
'character': 'string',
'date': 'string',
'decimal': 'double',
'numeric': 'double',
'double': 'double',
'float': 'double',
'float4': 'float',
'float8': 'double',
'int': 'int',
'int2': 'int',
'int4': 'int',
'int8': 'long',
'integer': 'int',
'nchar': 'string',
'nvarchar': 'string',
'real': 'float',
'smallint': 'int',
'text': 'string',
'timestamp': 'long',
'varchar': 'string'
}
def _sanitize_line(line):
return line.strip().lower()
class RedshiftFieldLineToAvroFieldConverter(object):
""" Converter for a single redshift column definition line in a
`CREATE TABLE` statement.
This should eventually be replaced by DATAPIPE-353.
"""
def __init__(self, field_line, pkeys):
"""
Args:
field_line(string): Content of a column definition line from a
redshift *.sql file
pkeys([string]): A list of the primary keys, used for determining
the meta attribute of "pkey"
"""
self.field_line = _sanitize_line(field_line)
self.pkeys = pkeys
@cached_property
def avro_field(self):
field = {
"name": self.name,
"type": self.avro_type,
"doc": ""
}
field.update(self.avro_meta_attributes)
return field
@cached_property
def name(self):
return self._regex_matcher.group(1)
@cached_property
def avro_core_type(self):
return REDSHIFT_SQL_TO_AVRO_TYPE_MAPPING[self.sql_type]
@cached_property
def avro_type(self):
avro_type = self.avro_core_type
if self.nullable:
if self.default_null:
return ['null', avro_type]
else:
return [avro_type, 'null']
else:
return avro_type
@cached_property
def sql_type(self):
return self._regex_matcher.group(2)
@cached_property
def sql_default(self):
""" Return the default value defined for the column, if any.
Note:
This will succeed only if the 'default' follows the 'NOT NULL'/
'NULL' on the column line. I've reached the limits of what
black magic I'm willing to deal with in this regex and
DATAPIPE-353 should be replacing this eventually anyway. :)
"""
return self._regex_matcher.group(10)
@cached_property
def nullable(self):
nullable_str = self._regex_matcher.group(6)
return not(nullable_str and re.search('^(not\s+null)', nullable_str))
@cached_property
def default_null(self):
return self.nullable and self.sql_default in ['null', None]
@cached_property
def avro_meta_attributes(self):
meta = {}
field_name = self.name
for index, pkey_name in enumerate(self.pkeys):
if pkey_name == field_name:
meta['pkey'] = index + 1
break
if self.sql_type in ['varchar', 'nvarchar', 'text']:
meta['maxlen'] = self.sql_type_width
if self.sql_type in ['char', 'character', 'nchar', 'bpchar']:
meta['fixlen'] = self.sql_type_width
if self.sql_type in ['date', 'timestamp']:
meta[self.sql_type] = True
if self.sql_type in ['decimal', 'numeric']:
meta['fixed_pt'] = True
meta['precision'] = self.sql_type_width[0]
meta['scale'] = self.sql_type_width[1]
if self.default_null:
meta['default'] = None
elif self.sql_default is not None:
if self.avro_core_type == 'boolean':
if self.sql_default == 'true':
meta['default'] = True
elif self.sql_default == 'false':
meta['default'] = False
else:
try:
meta['default'] = bool(int(self.sql_default))
except ValueError:
# suppress the exception
pass
elif self.avro_core_type in ['long', 'int']:
try:
meta['default'] = int(self.sql_default)
except ValueError:
# suppress the exception. This can be thrown when the
# default is something like 'getdate()'
pass
elif self.avro_core_type in ['double', 'float']:
try:
meta['default'] = float(self.sql_default)
except ValueError:
# suppress the exception.
pass
else:
meta['default'] = self.sql_default
return meta
@cached_property
def sql_type_width(self):
""" Return the sql type width, which is an int defining the the
maximum size for character types and a (presumably two element) list of
ints (the precision and scale) for the decimal type.
Note:
Some redshift sql types have default widths associated to them, see
http://docs.aws.amazon.com/redshift/latest/dg/r_Character_types.html
for more details
"""
width = self._regex_matcher.group(4)
if width:
if ',' in width:
return [
int(part.strip())
for part in width.split(',')
]
else:
return int(width)
else:
if self.sql_type in ['text', 'bpchar', 'varchar', 'nvarchar']:
return 256
if self.sql_type in ['char', 'character', 'nchar']:
return 1
return None
@cached_property
def _regex_matcher(self):
return FIELD_LINE_REGEX.search(self.field_line)
class RedshiftSQLToAVSCConverter(object):
""" Simple converter from redshift *.sql CREATE TABLE definitions (such
as those in yelp-main/schema/yelp_dw_redshift/tables) to data pipeline
format Avro *.avsc schemas.
This should eventually be replaced by DATAPIPE-353.
Notes:
This makes a number of assumptions about the input content, namely
that there is a column definition per line, that is followed by
convention in all yelp *.sql files - however this is NOT a general
purpose parser/converter.
"""
def __init__(self, sql_content, base_namespace, default_schema='public'):
"""
Args:
sql_content(string): Content of a redshift *.sql file
base_namespace(string): The base namespace (the namespace will be
a combination of "{base_namespace}.{schema}"
default_schema(string): The default schema, for any tables
encountered which do not specify a schema.
"""
self.sql_content = sql_content
self.base_namespace = base_namespace
self.default_schema = default_schema
@cached_property
def avro_record(self):
""" Get the data pipeline format Avro representation of
self.sql_content.
"""
return {
'type': 'record',
'namespace': self.namespace,
'name': self.table,
'doc': '',
'pkey': self.pkeys,
'fields': [
field_line_converter.avro_field
for field_line_converter in self.field_line_converters
]
}
@cached_property
def namespace(self):
return '{0}.{1}'.format(self.base_namespace, self.schema)
@cached_property
def schema(self):
m = CREATE_TABLE_REGEX.search(self.create_table_line)
return m.group(3) if m.group(3) else self.default_schema
@cached_property
def table(self):
m = CREATE_TABLE_REGEX.search(self.create_table_line)
if m.group(4):
return m.group(4)
else:
raise ValueError("Could not locate the table name")
@cached_property
def sql_lines(self):
return [_sanitize_line(line) for line in self.sql_content.split('\n')]
@cached_property
def create_table_line(self):
for line in self.sql_lines:
if CREATE_TABLE_REGEX.search(line):
return line
raise ValueError("Could not locate a 'CREATE TABLE' statement!")
@cached_property
def pkeys(self):
pkeys = []
# loop through field lines to extract primary keys
for line in self.sql_lines:
if self._get_primary_key_in_field_line(line):
pkeys.append(self._get_primary_key_in_field_line(line))
if self.primary_key_line:
pkeys.extend([
pkey.strip() for pkey in
PRIMARY_KEY_REGEX.search(
self.primary_key_line
).group(1).split(',')
])
return pkeys
@cached_property
def primary_key_line(self):
for line in self.sql_lines:
if self._is_primary_key_line(line):
return line
def _is_primary_key_line(self, line):
return bool(PRIMARY_KEY_REGEX.search(line))
def _get_primary_key_in_field_line(self, line):
field_line = FIELD_LINE_REGEX.search(line)
if field_line and field_line.group(5) is not None:
# if primary key present in sql field line return field name
return field_line.group(1)
@cached_property
def field_line_converters(self):
return [
RedshiftFieldLineToAvroFieldConverter(
field_line=line,
pkeys=self.pkeys
)
for line in self._raw_field_lines
]
@cached_property
def _raw_field_lines(self):
raw_field_lines = []
for line in self.sql_lines[self._find_field_lines_start_index():]:
line = _sanitize_line(line=line)
if self._is_stop_line(line=line):
break
elif FIELD_LINE_REGEX.search(line):
raw_field_lines.append(line)
return raw_field_lines
def _find_field_lines_start_index(self):
for index, line in enumerate(self.sql_lines):
line = _sanitize_line(line=line)
if self._is_start_line(line=line):
return index
def _is_start_line(self, line):
return bool(START_FIELDS_REGEX.search(line))
def _is_stop_line(self, line):
return STOP_FIELDS_REGEX.search(line) or self._is_primary_key_line(line)
class RedshiftSQLToAVSCBatch(yelp_batch.batch.Batch):
notify_emails = ['<EMAIL>']
@batch_command_line_options
def parse_options(self, option_parser):
opt_group = optparse.OptionGroup(
option_parser,
"RedshiftSQLToAVSC Options"
)
opt_group.add_option(
'--glob',
action='append',
type='string',
default=[],
dest='globs',
help='[REQUIRED] Either a path to a specific CREATE TABLE redshift'
' *.sql file, or a glob pattern for a directory containing '
'such files. (For example: '
'"/nail/home/USER/some_dw_redshift_tables/*.sql") '
'Note --glob may be provided multiple times.'
)
opt_group.add_option(
'--base-namespace',
type='string',
default='yelp_dw_redshift',
help='[REQUIRED] Base of the namespace. The namespace will be a '
'combination of "{base-namespace}.{schema}" and it is best to '
'choose a base-namespace which reflects the data store '
'associated with the table (such as yelp_dw_redshift for the '
'yelp datawarehouse redshift tables). '
'Default is "%default"'
)
opt_group.add_option(
'--default-schema',
type='string',
default='public',
help='[REQUIRED] default schema for tables without any specified. '
'The namespace will be a combination of '
'"{base-namespace}.{schema}". '
'Default is "%default"'
)
opt_group.add_option(
'--overwrite',
action="store_true",
default=False,
help='Overwrite existing *.avsc files with new output from the '
'conversion run. '
'Default is "%default"'
)
return opt_group
def run(self):
""" Primary entry point for the batch
"""
sql_file_paths = get_file_paths_from_glob_patterns(
glob_patterns=self.options.globs
)
for sql_file_path in sql_file_paths:
avsc_file_path = sql_file_path.replace('.sql', '.avsc')
self.log.info(
'Converting "{0}" to "{1}"'.format(
sql_file_path,
avsc_file_path
)
)
if os.path.exists(avsc_file_path) and not self.options.overwrite:
self.log.info(
'Skipping "{0}", use "--overwrite" to overwrite existing '
'*.avsc files.'.format(
avsc_file_path
)
)
continue
self.convert_sql_to_avsc(
avsc_file_path=avsc_file_path,
sql_file_path=sql_file_path
)
def convert_sql_to_avsc(self, avsc_file_path, sql_file_path):
with open(sql_file_path) as sql_file:
sql_content = sql_file.read()
converter = RedshiftSQLToAVSCConverter(
sql_content=sql_content,
base_namespace=self.options.base_namespace,
default_schema=self.options.default_schema
)
avro = converter.avro_record
with open(avsc_file_path, 'w') as avsc_file:
self.log.info('Writing "{0}"'.format(avsc_file_path))
json.dump(
obj=avro,
fp=avsc_file,
indent=' ',
sort_keys=True
)
if __name__ == "__main__":
RedshiftSQLToAVSCBatch().start()
|
tests/functional/backward_compatibility/black_box.py | obilaniu/orion | 177 | 12677202 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Simple one dimensional example with noise level for a possible user's script."""
import argparse
import random
from orion.client import report_results
def function(x, noise):
"""Evaluate partial information of a quadratic."""
z = (x - 34.56789) * random.gauss(0, noise)
return 4 * z ** 2 + 23.4, 8 * z
def execute():
"""Execute a simple pipeline as an example."""
# 1. Receive inputs as you want
parser = argparse.ArgumentParser()
parser.add_argument("-x", type=float, required=True)
parser.add_argument("--fidelity", type=int, default=10)
inputs = parser.parse_args()
assert 0 <= inputs.fidelity <= 10
noise = (1 - inputs.fidelity / 10) + 0.0001
# 2. Perform computations
y, dy = function(inputs.x, noise)
# 3. Gather and report results
results = list()
results.append(dict(name="example_objective", type="objective", value=y))
results.append(dict(name="example_gradient", type="gradient", value=[dy]))
report_results(results)
if __name__ == "__main__":
execute()
|
core/tests/test_polypod/test_main/test_main_container.py | admariner/polyaxon | 3,200 | 12677204 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from polyaxon.connections.kinds import V1ConnectionKind
from polyaxon.connections.schemas import (
V1BucketConnection,
V1ClaimConnection,
V1HostPathConnection,
V1K8sResourceSchema,
)
from polyaxon.env_vars.keys import POLYAXON_KEYS_LOG_LEVEL
from polyaxon.exceptions import PolypodException
from polyaxon.k8s import k8s_schemas
from polyaxon.polyflow import V1Init, V1Plugins
from polyaxon.polypod.common.env_vars import get_env_var
from polyaxon.polypod.common.mounts import (
get_artifacts_context_mount,
get_auth_context_mount,
get_mounts,
)
from polyaxon.polypod.main.container import get_main_container
from polyaxon.polypod.specs.contexts import PluginsContextsSpec
from polyaxon.schemas.types import V1ConnectionType, V1K8sResourceType
from tests.utils import BaseTestCase
@pytest.mark.polypod_mark
class TestMainContainer(BaseTestCase):
def setUp(self):
super().setUp()
# Secrets and config maps
self.non_mount_resource1 = V1K8sResourceType(
name="non_mount_test1",
schema=V1K8sResourceSchema(name="ref", items=["item1", "item2"]),
is_requested=False,
)
self.request_non_mount_resource1 = V1K8sResourceType(
name="request_non_mount_resource1",
schema=V1K8sResourceSchema(name="ref", items=["item1", "item2"]),
is_requested=True,
)
self.non_mount_resource2 = V1K8sResourceType(
name="non_mount_test2",
schema=V1K8sResourceSchema(name="ref"),
is_requested=False,
)
self.mount_resource1 = V1K8sResourceType(
name="mount_test1",
schema=V1K8sResourceSchema(
name="ref", items=["item1", "item2"], mount_path="/tmp1"
),
is_requested=False,
)
self.request_mount_resource2 = V1K8sResourceType(
name="mount_test1",
schema=V1K8sResourceSchema(name="ref", mount_path="/tmp2"),
is_requested=True,
)
# Connections
self.gcs_store = V1ConnectionType(
name="test_gcs",
kind=V1ConnectionKind.GCS,
schema=V1BucketConnection(bucket="gs//:foo"),
secret=self.mount_resource1.schema,
)
self.s3_store = V1ConnectionType(
name="test_s3",
kind=V1ConnectionKind.S3,
schema=V1BucketConnection(bucket="s3//:foo"),
secret=self.non_mount_resource1.schema,
)
self.az_store = V1ConnectionType(
name="test_az",
kind=V1ConnectionKind.WASB,
schema=V1BucketConnection(bucket="wasb://[email protected]"),
secret=self.non_mount_resource1.schema,
)
self.claim_store = V1ConnectionType(
name="test_claim",
kind=V1ConnectionKind.VOLUME_CLAIM,
schema=V1ClaimConnection(
mount_path="/tmp", volume_claim="test", read_only=True
),
)
self.host_path_store = V1ConnectionType(
name="test_path",
kind=V1ConnectionKind.HOST_PATH,
schema=V1HostPathConnection(mount_path="/tmp", host_path="/tmp"),
)
def assert_artifacts_store_raises(self, store, run_path):
with self.assertRaises(PolypodException):
get_main_container(
container_id="test",
main_container=None,
contexts=PluginsContextsSpec.from_config(
V1Plugins(collect_artifacts=True, collect_logs=False)
),
volume_mounts=None,
log_level=None,
artifacts_store=store,
init=None,
connection_by_names=None,
connections=None,
secrets=None,
config_maps=None,
kv_env_vars=None,
env=None,
ports=None,
run_path=run_path,
)
def test_get_main_container_with_artifacts_store_with_wrong_paths_raises(self):
artifacts_store = V1ConnectionType(
name="test_s3",
kind=V1ConnectionKind.S3,
schema=V1BucketConnection(bucket="s3//:foo"),
)
self.assert_artifacts_store_raises(store=artifacts_store, run_path=None)
artifacts_store = V1ConnectionType(
name="test_s3",
kind=V1ConnectionKind.S3,
schema=V1BucketConnection(bucket="s3//:foo"),
)
self.assert_artifacts_store_raises(store=artifacts_store, run_path=[])
def test_get_main_container_with_none_values(self):
container = get_main_container(
container_id="test",
main_container=k8s_schemas.V1Container(name="main"),
contexts=None,
volume_mounts=None,
log_level=None,
artifacts_store=None,
init=None,
connection_by_names=None,
connections=None,
secrets=None,
config_maps=None,
kv_env_vars=None,
env=None,
ports=None,
run_path=None,
)
assert container.name == "test"
assert container.image is None
assert container.image_pull_policy is None
assert container.command is None
assert container.args is None
assert container.ports == []
assert container.env == []
assert container.env_from == []
assert container.resources is None
assert container.volume_mounts == []
def test_get_main_container_simple_params(self):
initial_mounts = [
k8s_schemas.V1VolumeMount(
name="test", mount_path="/mount_test", read_only=True
)
]
resources = k8s_schemas.V1ResourceRequirements(
requests={"cpu": "1", "memory": "256Mi"},
limits={"cpu": "1", "memory": "256Mi"},
)
container = get_main_container(
container_id="new-name",
main_container=k8s_schemas.V1Container(
name="main",
image="job_docker_image",
image_pull_policy="IfNotPresent",
command=["cmd", "-p", "-c"],
args=["arg1", "arg2"],
resources=resources,
),
contexts=None,
volume_mounts=initial_mounts,
log_level="info",
artifacts_store=None,
init=None,
connection_by_names=None,
connections=None,
secrets=None,
config_maps=None,
kv_env_vars=None,
env=None,
ports=23,
run_path=None,
)
assert container.name == "new-name"
assert container.image == "job_docker_image"
assert container.image_pull_policy == "IfNotPresent"
assert container.command == ["cmd", "-p", "-c"]
assert container.args == ["arg1", "arg2"]
assert container.ports == [k8s_schemas.V1ContainerPort(container_port=23)]
assert container.env == [
get_env_var(name=POLYAXON_KEYS_LOG_LEVEL, value="info")
]
assert container.env_from == []
assert container.resources == resources
assert container.volume_mounts == initial_mounts
def test_get_main_container_with_mounted_artifacts_store(self):
container = get_main_container(
container_id="test",
main_container=k8s_schemas.V1Container(name="main"),
contexts=None,
volume_mounts=None,
log_level=None,
artifacts_store=None,
init=[V1Init(connection=self.claim_store.name)],
connections=None,
connection_by_names={self.claim_store.name: self.claim_store},
secrets=None,
config_maps=None,
kv_env_vars=None,
env=None,
ports=None,
run_path="run_path",
)
assert container.name == "test"
assert container.image is None
assert container.image_pull_policy is None
assert container.command is None
assert container.args is None
assert container.ports == []
assert container.env_from == []
assert container.resources is None
assert len(container.volume_mounts) == 1
# Mount store
container = get_main_container(
container_id="test",
main_container=k8s_schemas.V1Container(name="main"),
contexts=PluginsContextsSpec.from_config(
V1Plugins(
mount_artifacts_store=True,
collect_artifacts=True,
collect_logs=True,
collect_resources=True,
)
),
volume_mounts=None,
log_level=None,
artifacts_store=None,
init=[V1Init(connection=self.claim_store.name)],
connections=None,
connection_by_names={self.claim_store.name: self.claim_store},
secrets=None,
config_maps=None,
kv_env_vars=None,
env=None,
ports=None,
run_path="run_path",
)
assert container.name == "test"
assert container.image is None
assert container.image_pull_policy is None
assert container.command is None
assert container.args is None
assert container.ports == []
assert container.env_from == []
assert container.resources is None
assert len(container.volume_mounts) == 1 # store not passed
container = get_main_container(
container_id="",
main_container=k8s_schemas.V1Container(name="main"),
contexts=None,
volume_mounts=None,
log_level=None,
artifacts_store=None,
init=[V1Init(connection=self.claim_store.name)],
connections=[self.claim_store.name],
connection_by_names={self.claim_store.name: self.claim_store},
secrets=None,
config_maps=None,
kv_env_vars=None,
env=None,
ports=None,
run_path="run_path",
)
assert container.name == "main"
assert container.image is None
assert container.image_pull_policy is None
assert container.command is None
assert container.args is None
assert container.ports == []
assert container.env_from == []
assert container.resources is None
assert len(container.volume_mounts) == 2
container = get_main_container(
container_id="main-job",
main_container=k8s_schemas.V1Container(name="main"),
contexts=PluginsContextsSpec.from_config(
V1Plugins(
collect_artifacts=True, collect_logs=True, collect_resources=True
)
),
volume_mounts=None,
log_level=None,
artifacts_store=self.claim_store,
init=None,
connections=[],
connection_by_names={self.claim_store.name: self.claim_store},
secrets=None,
config_maps=None,
kv_env_vars=None,
env=None,
ports=None,
run_path="run_path",
)
assert container.name == "main-job"
assert container.image is None
assert container.image_pull_policy is None
assert container.command is None
assert container.args is None
assert container.ports == []
assert len(container.env) == 2 + 1 # One from the artifacts store name env var
assert container.env_from == []
assert container.resources is None
assert len(container.volume_mounts) == 1
# Mount store
container = get_main_container(
container_id="main-job",
main_container=k8s_schemas.V1Container(name="main"),
contexts=PluginsContextsSpec.from_config(
V1Plugins(
mount_artifacts_store=True,
collect_artifacts=True,
collect_logs=True,
collect_resources=True,
)
),
volume_mounts=None,
log_level=None,
artifacts_store=self.claim_store,
init=None,
connections=[],
connection_by_names={self.claim_store.name: self.claim_store},
secrets=None,
config_maps=None,
kv_env_vars=None,
env=None,
ports=None,
run_path="run_path",
)
assert container.name == "main-job"
assert container.image is None
assert container.image_pull_policy is None
assert container.command is None
assert container.args is None
assert container.ports == []
# One from the artifacts store name env var and the schema
assert len(container.env) == 2 + 1 + 1
assert container.env_from == []
assert container.resources is None
assert len(container.volume_mounts) == 2
def test_get_main_container_with_bucket_artifacts_store(self):
container = get_main_container(
container_id="main",
main_container=k8s_schemas.V1Container(name="main"),
contexts=PluginsContextsSpec.from_config(
V1Plugins(
collect_artifacts=True, collect_logs=True, collect_resources=True
)
),
volume_mounts=None,
log_level=None,
artifacts_store=self.s3_store,
init=None,
connections=None,
connection_by_names={self.s3_store.name: self.s3_store},
secrets=None,
config_maps=None,
kv_env_vars=None,
env=None,
ports=None,
run_path="run_path",
)
assert container.name == "main"
assert container.image is None
assert container.image_pull_policy is None
assert container.command is None
assert container.args is None
assert container.ports == []
assert len(container.env) == 2 + 1 # One from the artifacts store name env var
assert container.env_from == []
assert container.resources is None
assert len(container.volume_mounts) == 1 # mount context
# Mount store
container = get_main_container(
container_id="main",
main_container=k8s_schemas.V1Container(name="main"),
contexts=PluginsContextsSpec.from_config(
V1Plugins(
mount_artifacts_store=True,
collect_artifacts=False,
collect_logs=False,
collect_resources=False,
)
),
volume_mounts=None,
log_level=None,
artifacts_store=self.gcs_store,
init=None,
connections=None,
connection_by_names={self.gcs_store.name: self.gcs_store},
secrets=None,
config_maps=None,
kv_env_vars=None,
env=None,
ports=None,
run_path="run_path",
)
assert container.name == "main"
assert container.image is None
assert container.image_pull_policy is None
assert container.command is None
assert container.args is None
assert container.ports == []
assert len(container.env) == 2 + 1 # One from the artifacts store name env var
assert container.env_from == []
assert container.resources is None
assert len(container.volume_mounts) == 1 # mount resource
container = get_main_container(
container_id="main1",
main_container=k8s_schemas.V1Container(name="main"),
contexts=PluginsContextsSpec.from_config(
V1Plugins(
collect_artifacts=True,
collect_logs=True,
collect_resources=True,
sync_statuses=True,
)
),
volume_mounts=None,
log_level=None,
artifacts_store=self.s3_store,
init=None,
connections=None,
connection_by_names={self.s3_store.name: self.s3_store},
secrets=[self.mount_resource1],
config_maps=None,
kv_env_vars=None,
env=None,
ports=None,
run_path="run_path",
)
assert container.name == "main1"
assert container.image is None
assert container.image_pull_policy is None
assert container.command is None
assert container.args is None
assert container.ports == []
assert len(container.env) == 2 + 1 # One from the artifacts store name env var
assert container.env_from == []
assert container.resources is None
# The mount resource1 is not requested
assert len(container.volume_mounts) == 1 # one mount resource
container = get_main_container(
container_id="main1",
main_container=k8s_schemas.V1Container(name="main"),
contexts=PluginsContextsSpec.from_config(
V1Plugins(
collect_artifacts=True, collect_logs=True, collect_resources=True
)
),
volume_mounts=None,
log_level=None,
artifacts_store=self.s3_store,
init=None,
connections=None,
connection_by_names={self.s3_store.name: self.s3_store},
secrets=[self.request_mount_resource2],
config_maps=None,
kv_env_vars=None,
env=None,
ports=None,
run_path="run_path",
)
assert container.name == "main1"
assert container.image is None
assert container.image_pull_policy is None
assert container.command is None
assert container.args is None
assert container.ports == []
assert len(container.env) == 2 + 1 # One from the artifacts store name env var
assert container.env_from == []
assert container.resources is None
# The mount resource2 is requested
assert len(container.volume_mounts) == 2 # one mount resource
container = get_main_container(
container_id="tensorflow",
main_container=k8s_schemas.V1Container(name="main"),
contexts=PluginsContextsSpec.from_config(
V1Plugins(
collect_artifacts=True, collect_logs=True, collect_resources=False
)
),
volume_mounts=None,
log_level=None,
artifacts_store=self.s3_store,
init=None,
connections=None,
connection_by_names={self.s3_store.name: self.s3_store},
secrets=[self.non_mount_resource1],
config_maps=None,
kv_env_vars=None,
env=None,
ports=None,
run_path="run_path",
)
assert container.name == "tensorflow"
assert container.image is None
assert container.image_pull_policy is None
assert container.command is None
assert container.args is None
assert container.ports == []
assert len(container.env) == 1 + 1 # One from the artifacts store name env var
assert container.env_from == []
assert container.resources is None
assert len(container.volume_mounts) == 1 # outputs context
container = get_main_container(
container_id="pytorch",
main_container=k8s_schemas.V1Container(name="main"),
contexts=PluginsContextsSpec.from_config(
V1Plugins(
collect_artifacts=True, collect_logs=True, collect_resources=True
)
),
volume_mounts=None,
log_level=None,
artifacts_store=self.s3_store,
init=None,
connections=None,
connection_by_names={self.s3_store.name: self.s3_store},
secrets=[self.request_non_mount_resource1],
config_maps=None,
kv_env_vars=None,
env=None,
ports=None,
run_path="run_path",
)
assert container.name == "pytorch"
assert container.image is None
assert container.image_pull_policy is None
assert container.command is None
assert container.args is None
assert container.ports == []
# 2 + 2 env vars from the secret mount + 1 from the artifacts store name env var
assert len(container.env) == 2 + 2 + 1
assert container.env_from == []
assert container.resources is None
assert len(container.volume_mounts) == 1
def test_get_main_container(self):
container = get_main_container(
container_id="test",
main_container=k8s_schemas.V1Container(name="main"),
contexts=None,
volume_mounts=None,
log_level=None,
artifacts_store=None,
init=[
V1Init(connection=self.claim_store.name),
V1Init(connection=self.s3_store.name),
],
connections=[self.host_path_store.name, self.gcs_store.name],
connection_by_names={
self.claim_store.name: self.claim_store,
self.s3_store.name: self.s3_store,
self.host_path_store.name: self.host_path_store,
self.gcs_store.name: self.gcs_store,
},
secrets=[self.mount_resource1, self.request_non_mount_resource1],
config_maps=[self.non_mount_resource1, self.request_mount_resource2],
kv_env_vars=None,
env=None,
ports=None,
run_path="run_path",
)
assert container.name == "test"
assert container.image is None
assert container.image_pull_policy is None
assert container.command is None
assert container.args is None
assert container.ports == []
# 2 env vars from the secret mount
# + 2 for the connection (context_path + spec)
# + 1 for the connection spec (non mount)
assert len(container.env) == 5
assert container.env_from == []
assert container.resources is None
assert len(container.volume_mounts) == 4
def test_get_main_container_host_paths(self):
contexts = PluginsContextsSpec(
auth=True,
docker=False,
shm=False,
mount_artifacts_store=False,
collect_logs=True,
collect_artifacts=True,
collect_resources=True,
auto_resume=True,
sync_statuses=True,
external_host=False,
sidecar=None,
)
volume_mounts = get_mounts(
use_auth_context=contexts.auth,
use_artifacts_context=False,
use_docker_context=contexts.docker,
use_shm_context=contexts.shm,
)
artifacts_store = V1ConnectionType(
name="plx-outputs",
kind=V1ConnectionKind.HOST_PATH,
schema=V1HostPathConnection(
mount_path="/tmp/plx/outputs", host_path="/tmp/plx/outputs"
),
)
container = get_main_container(
container_id="test",
main_container=k8s_schemas.V1Container(name="main"),
contexts=PluginsContextsSpec.from_config(
V1Plugins(collect_artifacts=True, collect_logs=True)
),
volume_mounts=volume_mounts,
log_level=None,
artifacts_store=artifacts_store,
init=[],
connections=[],
connection_by_names={artifacts_store.name: artifacts_store},
secrets=[],
config_maps=[],
kv_env_vars=None,
env=None,
ports=None,
run_path="run_path",
)
assert container.volume_mounts == [
get_auth_context_mount(read_only=True),
get_artifacts_context_mount(read_only=False),
]
|
src/untp/dataparse.py | justbilt/TextureUnPacker | 161 | 12677206 | #!/usr/bin/env python
# coding=utf-8
# Python 2.7.3
import os
import json
from parse import parse
from plistlib import readPlist
from pprint import pprint
def parse_file(_filepath, _config=None, _extra_data_receiver=None):
path,name = os.path.split(_filepath)
pre,ext = os.path.splitext(name)
if ext == ".plist":
try:
data = readPlist(_filepath)
except Exception:
print("fail: read plist file failed >", _filepath)
return
return parse_plistdata(data)
elif ext == ".fnt":
with open(_filepath, "r") as f:
data = f.read().split("\n")
if len(data) < 5:
print("fail: read plist file failed >", _filepath)
return
return parse_fntdata(data, _config if _config else {"prefix": pre}, _extra_data_receiver)
def parse_fntdata(_data, _config, _extra_data_receiver=None):
"""
info face="Haettenschweiler" size=60 bold=0 italic=0 charset="" unicode=0 stretchH=100 smooth=1 aa=1 padding=0,0,0,0 spacing=2,2
common lineHeight=64 base=53 scaleW=256 scaleH=128 pages=1 packed=0
page id=0 file="attack_num.png"
chars count=12
char id=52 x=2 y=2 width=33 height=51 xoffset=0 yoffset=5 xadvance=32 page=0 chnl=0 letter="4"
char id=48 x=37 y=2 width=29 height=50 xoffset=1 yoffset=6 xadvance=29 page=0 chnl=0 letter="0"
char id=53 x=68 y=2 width=29 height=50 xoffset=1 yoffset=6 xadvance=28 page=0 chnl=0 letter="5"
char id=57 x=99 y=2 width=28 height=50 xoffset=1 yoffset=6 xadvance=28 page=0 chnl=0 letter="9"
char id=54 x=129 y=2 width=28 height=50 xoffset=1 yoffset=6 xadvance=28 page=0 chnl=0 letter="6"
char id=56 x=159 y=2 width=28 height=50 xoffset=1 yoffset=6 xadvance=28 page=0 chnl=0 letter="8"
char id=51 x=189 y=2 width=28 height=50 xoffset=1 yoffset=6 xadvance=28 page=0 chnl=0 letter="3"
char id=50 x=219 y=2 width=28 height=49 xoffset=1 yoffset=7 xadvance=28 page=0 chnl=0 letter="2"
char id=55 x=2 y=55 width=30 height=48 xoffset=1 yoffset=8 xadvance=28 page=0 chnl=0 letter="7"
char id=49 x=34 y=55 width=20 height=48 xoffset=1 yoffset=8 xadvance=20 page=0 chnl=0 letter="1"
char id=45 x=56 y=55 width=18 height=12 xoffset=1 yoffset=36 xadvance=19 page=0 chnl=0 letter="-"
char id=32 x=76 y=55 width=0 height=0 xoffset=11 yoffset=73 xadvance=16 page=0 chnl=0 letter="space"
"""
data = {}
frame_data_list = []
parse_common_info = parse("common lineHeight={line_height:d} base={base:d} scaleW={scale_w:d} scaleH={scale_h:d} pages={pages:d} packed={packed:d}", _data[1])
parse_page_info = parse("page id={id:d} file=\"{file}\"", _data[2])
parse_char_count = parse("chars count={count:d}", _data[3])
raw_frames_data = {}
for index in xrange(0, parse_char_count["count"]):
parse_frame = parse("char id={id:d} x={x:d} y={y:d} width={width:d} height={height:d} xoffset={xoffset:d} yoffset={yoffset:d} xadvance={xadvance:d} page={page:d} chnl={chnl:d} letter=\"{letter}\"", _data[index + 4])
frame_data = {}
frame_data["name"] = "{prefix}_{id}.png".format(prefix= _config["prefix"], id=parse_frame["id"], letter=parse_frame["letter"])
frame_data["source_size"] = (parse_frame["width"], parse_frame["height"])
frame_data["rotated"] = False
frame_data["src_rect"] = (parse_frame["x"], parse_frame["y"], parse_frame["x"] + parse_frame["width"], parse_frame["y"] + parse_frame["height"])
frame_data["offset"] = (0, 0)
if parse_frame["width"] <= 0 or parse_frame["height"] <= 0:
continue
frame_data_list.append(frame_data)
parse_frame_named_data = parse_frame.named.copy()
parse_frame_named_data["texture"] = frame_data["name"]
raw_frames_data[parse_frame["id"]] = parse_frame_named_data
data["texture"] = parse_page_info["file"]
data["frames"] = frame_data_list
if _extra_data_receiver != None:
_extra_data_receiver["common"] = parse_common_info.named
_extra_data_receiver["frames"] = raw_frames_data
return data
def _mapping_list(_result, _name, _data):
for i,v in enumerate(_name):
if isinstance(v, list):
_mapping_list(_result, v, _data[i])
else:
_result[v] = _data[i]
return _result
def _parse_str(_name, _str):
return _mapping_list({}, _name, json.loads(_str.replace("{", "[").replace("}", "]")))
def parse_plistdata(_data):
fmt = _data.metadata.format
# check file format
if fmt not in (0, 1, 2, 3):
print("fail: unsupport format " + str(fmt))
return None
data = {}
frame_data_list = []
for (name,config) in _data.frames.items():
frame_data = {}
if fmt == 0:
source_size = {
"w": config.get("originalWidth", 0),
"h": config.get("originalHeight", 0),
}
rotated = False
src_rect = (
config.get("x", 0),
config.get("y", 0),
config.get("x", 0) + config.get("originalWidth", 0),
config.get("y", 0) + config.get("originalHeight", 0),
)
offset = {
"x": config.get("offsetX", False),
"y": config.get("offsetY", False),
}
elif fmt == 1 or fmt == 2:
frame = _parse_str([["x","y"],["w","h"]], config.frame)
center_offset = _parse_str(["x","y"], config.offset)
source_size = _parse_str(["w","h"], config.sourceSize)
rotated = config.get("rotated", False)
src_rect = (
frame["x"],
frame["y"],
frame["x"]+(frame["h"] if rotated else frame["w"]),
frame["y"]+(frame["w"] if rotated else frame["h"])
)
offset = {
"x": source_size["w"]/2 + center_offset["x"] - frame["w"]/2,
"y": source_size["h"]/2 - center_offset["y"] - frame["h"]/2,
}
elif fmt == 3:
frame = _parse_str([["x","y"],["w","h"]], config.textureRect)
center_offset = _parse_str(["x","y"], config.spriteOffset)
source_size = _parse_str(["w","h"], config.spriteSourceSize)
rotated = config.textureRotated
src_rect = (
frame["x"],
frame["y"],
frame["x"]+(frame["h"] if rotated else frame["w"]),
frame["y"]+(frame["w"] if rotated else frame["h"])
)
offset = {
"x": source_size["w"]/2 + center_offset["x"] - frame["w"]/2,
"y": source_size["h"]/2 - center_offset["y"] - frame["h"]/2,
}
else:
continue
frame_data["name"] = name
frame_data["source_size"] = (int(source_size["w"]), int(source_size["h"]))
frame_data["rotated"] = rotated
frame_data["src_rect"] = [int(x) for x in src_rect ]
frame_data["offset"] = (int(offset["x"]), int(offset["y"]))
frame_data_list.append(frame_data)
data["frames"] = frame_data_list
data["texture"] = _data.metadata.textureFileName
return data |
auth/test/test_cookie.py | giuseppe/quay | 2,027 | 12677213 | import uuid
from flask_login import login_user
from app import LoginWrappedDBUser
from data import model
from auth.cookie import validate_session_cookie
from test.fixtures import *
def test_anonymous_cookie(app):
assert validate_session_cookie().missing
def test_invalidformatted_cookie(app):
# "Login" with a non-UUID reference.
someuser = model.user.get_user("devtable")
login_user(LoginWrappedDBUser("somenonuuid", someuser))
# Ensure we get an invalid session cookie format error.
result = validate_session_cookie()
assert result.authed_user is None
assert result.context.identity is None
assert not result.has_nonrobot_user
assert result.error_message == "Invalid session cookie format"
def test_disabled_user(app):
# "Login" with a disabled user.
someuser = model.user.get_user("disabled")
login_user(LoginWrappedDBUser(someuser.uuid, someuser))
# Ensure we get an invalid session cookie format error.
result = validate_session_cookie()
assert result.authed_user is None
assert result.context.identity is None
assert not result.has_nonrobot_user
assert result.error_message == "User account is disabled"
def test_valid_user(app):
# Login with a valid user.
someuser = model.user.get_user("devtable")
login_user(LoginWrappedDBUser(someuser.uuid, someuser))
result = validate_session_cookie()
assert result.authed_user == someuser
assert result.context.identity is not None
assert result.has_nonrobot_user
assert result.error_message is None
def test_valid_organization(app):
# "Login" with a valid organization.
someorg = model.user.get_namespace_user("buynlarge")
someorg.uuid = str(uuid.uuid4())
someorg.verified = True
someorg.save()
login_user(LoginWrappedDBUser(someorg.uuid, someorg))
result = validate_session_cookie()
assert result.authed_user is None
assert result.context.identity is None
assert not result.has_nonrobot_user
assert result.error_message == "Cannot login to organization"
|
pinout/components/pinlabel.py | j0ono0/pinout-diagram | 304 | 12677228 | <reponame>j0ono0/pinout-diagram
import copy
from pinout.core import SvgShape, Group, Rect, Text, BoundingCoords, Coords
from pinout.components import leaderline as lline
from pinout import config
class Body(SvgShape):
"""Graphical shape that makes up the body of a pinlabel."""
def __init__(self, x, y, width, height, corner_radius=0, **kwargs):
self.corner_radius = corner_radius
super().__init__(x=x, y=y, width=width, height=height, **kwargs)
def bounding_coords(self):
# PinLabelBody origin is vertically centered
return BoundingCoords(
self.x,
self.y - (self.height / 2),
self.x + self.width,
self.y + (self.height / 2),
)
def render(self):
body = Rect(
x=self.x,
y=self.y - (self.height / 2),
width=self.width,
height=self.height,
corner_radius=self.corner_radius,
)
body.add_tag(config.pinlabel["body"]["tag"])
return body.render()
class Leaderline(lline.Curved):
"""Graphical line joining the label origin coordinates to the label body."""
pass
class Base(Group):
"""Label component designed specifically for labelling pins."""
def __init__(
self,
content="",
x=0,
y=0,
tag=None,
body=None,
leaderline=None,
**kwargs,
):
self.content = content
self._leaderline = None
self._body = None
super().__init__(x, y, tag=tag, **kwargs)
self.update_config(config.pinlabel)
self.body = body
self.leaderline = leaderline
# Add leaderline and body reference into children
self.add(self._body)
# Add leaderline at render as it is replaced by pinlabelGroup!!!
# Add SvgShape so pin label reports correct dimensions.
self.add(SvgShape(x=self.leaderline.x, y=self.leaderline.x))
self.add_tag(config.pinlabel["tag"])
@property
def body(self):
return self._body
@body.setter
def body(self, body):
# ensure instance data is unique
body = copy.deepcopy(body or self.config["body"])
# Convert dict into body object
if isinstance(body, dict):
body_config = self.config["body"]
body_config.update(body)
body = Body(**body_config)
# Add body config tag if not there
body.add_tag(self.config["body"]["tag"])
self._body = body
@property
def leaderline(self):
return self._leaderline
@leaderline.setter
def leaderline(self, leaderline):
# ensure instance data is unique
leaderline = copy.deepcopy(leaderline or self.config["leaderline"])
# Convert dict into leaderline object
if isinstance(leaderline, dict):
leaderline_config = self.config["leaderline"]
leaderline_config.update(leaderline)
leaderline = Leaderline(**leaderline_config)
# Add leaderline config tag if not there
leaderline.add_tag(self.config["leaderline"]["tag"])
self._leaderline = leaderline
def render(self):
# Add text content
x = self.body.width / 2 + self.body.x
y = self.body.y
self.add(
Text(
self.content,
x=x,
y=y,
tag=config.pinlabel["text"]["tag"],
scale=self.scale,
)
)
# Route leaderline
self.leaderline.route(Rect(), self._body)
self.add(self.leaderline)
return super().render()
class PinLabel(Base):
pass
class PinLabelGroup(Group):
"""Convenience class to place multiple rows of pin-labels on a pin-header."""
def __init__(
self,
x,
y,
pin_pitch,
label_start,
label_pitch,
labels,
leaderline=None,
body=None,
**kwargs,
):
scale = Coords(*kwargs.pop("scale", (1, 1)))
super().__init__(x=x, y=y, **kwargs)
# Setup generators for row locations
pin_coords = config.pitch_generator((0, 0), pin_pitch)
label_coords = config.pitch_generator(label_start, label_pitch)
for row in labels:
row_group = self.add(Group())
for label in row:
# If data is supplied convert to Label
if type(label) is tuple:
content, tag, *args = label
attrs = args[0] if len(args) > 0 else {}
# Set leaderline and body in attrs if supplied in either:
# 1. data
# 2. PinlabelGroup
attrs["leaderline"] = attrs.get("leaderline", None) or leaderline
attrs["body"] = attrs.get("body", None) or body
label = PinLabel(
content=content,
scale=scale,
**attrs,
)
# -- label now exists -- #
label.add_tag(tag)
# Label follows another label in the row
try:
prev_label = row_group.children[-1]
label.x = prev_label.x + prev_label.width * scale.x
label.y = prev_label.y + prev_label.body.y * scale.y
label.leaderline = lline.Straight(direction="hh")
# Start of a new row
except IndexError:
label.x, label.y = next(pin_coords)
x, y = next(label_coords)
label.body.x += x - label.x * scale.x
label.body.y += y - label.y * scale.y
row_group.add(label)
|
var/spack/repos/builtin/packages/ck/package.py | kkauder/spack | 2,360 | 12677303 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Ck(MavenPackage):
"""CK calculates class-level and metric-level code metrics in Java
projects by means of static analysis (i.e. no need for compiled code)."""
homepage = "https://github.com/mauricioaniche/ck"
url = "https://github.com/mauricioaniche/ck/archive/ck-0.6.2.tar.gz"
version('0.6.2', sha256='ee16d209f05852230504dea1af39cdb1cfc8e9b56f4708ed1afcd5ce44af76eb')
version('0.6.1', sha256='1db1fef7111bb485d5554d5927611761a102133a41b88e8fb20cd44494411ac4')
version('0.6.0', sha256='8a1affad047fbefda5d2dad1a795204ffd06c50e2fba830f87cf6c7518423137')
version('0.5.2', sha256='35f610f5d97ca31a62903ba368be7e0b74764daccd95afa3eb9ff04e0326a7ca')
version('0.5.1', sha256='732849ae7b26d01ee082283396a6fdd7823282c368ae6fd05966acb4598ccebe')
version('0.5.0', sha256='3923d25ff4941a6207d644fd1ba3115b5ad303ef953285610e836bc59a4cbcb7')
|
omega_miya/plugins/omega_sign_in/utils.py | rinrini001/omega-miya | 120 | 12677306 | <reponame>rinrini001/omega-miya
"""
@Author : Ailitonia
@Date : 2021/08/27 0:48
@FileName : utils.py
@Project : nonebot2_miya
@Description : 签到素材合成工具
@GitHub : https://github.com/Ailitonia
@Software : PyCharm
"""
import os
import random
import asyncio
import aiofiles.os
from typing import Optional
from datetime import datetime
from PIL import Image, ImageDraw, ImageFont
from nonebot import get_driver, require, logger
from omega_miya.database import DBPixivillust, Result
from omega_miya.utils.pixiv_utils import PixivIllust
from omega_miya.utils.omega_plugin_utils import HttpFetcher, ProcessUtils, TextUtils
from .config import Config
from .fortune import get_fortune
global_config = get_driver().config
plugin_config = Config(**global_config.dict())
TMP_PATH = global_config.tmp_path_
RESOURCES_PATH = global_config.resources_path_
SIGN_IN_PIC_PATH = os.path.abspath(os.path.join(TMP_PATH, 'sign_in_pic'))
SIGN_IN_CARD_PATH = os.path.abspath(os.path.join(TMP_PATH, 'sign_in_card'))
ENABLE_PIC_PREPARING_SCHEDULER = plugin_config.enable_pic_preparing_scheduler
CACHE_PIC_LIMIT = plugin_config.cache_pic_limit
async def __pre_download_sign_in_pic(pid: int, *, pic_size: str = 'regular') -> Result.IntResult:
illust_info_result = await PixivIllust(pid=pid).get_illust_data()
if illust_info_result.error:
return Result.IntResult(error=True, info=illust_info_result.info, result=-1)
pic_url = illust_info_result.result.get('illust_pages', {}).get(0, {}).get(pic_size)
if not pic_url:
return Result.IntResult(error=True, info='Small illust pages url not found', result=-1)
fetcher = HttpFetcher(timeout=30, attempt_limit=2, flag='pre_download_sign_in_pic', headers=PixivIllust.HEADERS)
download_result = await fetcher.download_file(url=pic_url, path=SIGN_IN_PIC_PATH)
if download_result.error:
return Result.IntResult(error=True, info=download_result.info, result=-1)
else:
return Result.IntResult(error=False, info='Success', result=0)
async def __prepare_sign_in_pic() -> Result.TextResult:
# 检查当前缓存目录里面的图片是不是超出数量限制 是的话就删除超出的部分
if not os.path.exists(SIGN_IN_PIC_PATH):
os.makedirs(SIGN_IN_PIC_PATH)
pic_file_list = os.listdir(SIGN_IN_PIC_PATH)
if len(pic_file_list) > CACHE_PIC_LIMIT:
del_pic_file_list = random.sample(pic_file_list, k=(len(pic_file_list) - CACHE_PIC_LIMIT))
for pic_file in del_pic_file_list:
await aiofiles.os.remove(os.path.abspath(os.path.join(SIGN_IN_PIC_PATH, pic_file)))
logger.info(f'Preparing sign in pic processing, '
f'removed pic "{"/".join(del_pic_file_list)}" exceed the limit of cache')
# 获取图片信息并下载图片
pic_list_result = await DBPixivillust.rand_illust(num=100, nsfw_tag=0, ratio=1)
if pic_list_result.error or not pic_list_result.result:
logger.error(f'Preparing sign in pic failed, DB Error or not result, result: {pic_list_result}')
return Result.TextResult(error=True, info=pic_list_result.info, result='DB Error or not result')
tasks = [__pre_download_sign_in_pic(pid=pid) for pid in pic_list_result.result]
pre_download_result = await ProcessUtils.fragment_process(
tasks=tasks, fragment_size=20, log_flag='pre_download_sign_in_pic')
success_count = 0
failed_count = 0
for result in pre_download_result:
if result.success():
success_count += 1
else:
failed_count += 1
result_text = f'Completed with {success_count} Success, {failed_count} Failed'
logger.info(f'Preparing sign in pic completed, {result_text}')
return Result.TextResult(error=True, info=f'Completed', result=result_text)
# 下载签到图片的定时任务
if ENABLE_PIC_PREPARING_SCHEDULER:
scheduler = require("nonebot_plugin_apscheduler").scheduler
scheduler.add_job(
__prepare_sign_in_pic,
'cron',
# year=None,
# month=None,
# day='*/1',
# week=None,
# day_of_week=None,
hour='*/6',
# minute=None,
# second=None,
# start_date=None,
# end_date=None,
# timezone=None,
id='prepare_sign_in_pic',
coalesce=True,
misfire_grace_time=120
)
async def __get_reand_sign_in_pic() -> Result.TextResult:
try_count = 0
if not os.path.exists(SIGN_IN_PIC_PATH):
os.makedirs(SIGN_IN_PIC_PATH)
pic_file_list = os.listdir(SIGN_IN_PIC_PATH)
while not pic_file_list and try_count < 2:
await __prepare_sign_in_pic()
pic_file_list = os.listdir(SIGN_IN_PIC_PATH)
try_count += 1
if not pic_file_list:
return Result.TextResult(error=True, info='Can not pre-download sign in pic', result='')
# 重置随机种子
random.seed()
rand_file = random.choice(pic_file_list)
file_path = os.path.abspath(os.path.join(SIGN_IN_PIC_PATH, rand_file))
return Result.TextResult(error=False, info='Success', result=file_path)
def __get_level(favorability: float) -> tuple[int, int, int]:
"""
根据好感度获取等级及当前等级好感度
:param favorability: 总好感度
:return: (等级, 当前等级好感度, 当前等级好感度上限)
"""
if favorability <= 0:
return 0, 0, 1
elif favorability < 10000:
return 1, int(favorability), 10000
elif favorability < 36000:
return 2, int(favorability - 10000), 26000
elif favorability < 78000:
return 3, int(favorability - 36000), 42000
elif favorability < 136000:
return 4, int(favorability - 78000), 58000
elif favorability < 210000:
return 5, int(favorability - 136000), 74000
elif favorability < 300000:
return 6, int(favorability - 210000), 90000
elif favorability < 406000:
return 7, int(favorability - 300000), 106000
else:
return 8, int(favorability - 406000), 122000
def __get_level_color(level: int) -> tuple[int, int, int]:
"""
根据等级获取相应等级颜色
:param level: 等级
:return: (int, int, int): RGB 颜色
"""
level_color: dict[int, tuple[int, int, int]] = {
0: (136, 136, 136),
1: (102, 102, 102),
2: (153, 204, 153),
3: (221, 204, 136),
4: (255, 204, 51),
5: (255, 204, 204),
6: (247, 119, 127),
7: (102, 204, 255),
8: (175, 136, 250),
}
return level_color.get(level, (136, 136, 136))
async def get_hitokoto(*, c: Optional[str] = None) -> Result.TextResult:
"""获取一言"""
url = 'https://v1.hitokoto.cn'
params = {
'encode': 'json',
'charset': 'utf-8'
}
if c is not None:
params.update({'c': c})
headers = HttpFetcher.DEFAULT_HEADERS.update({'accept': 'application/json'})
hitokoto_result = await HttpFetcher(flag='sign_hitokoto', headers=headers).get_json(url=url, params=params)
if hitokoto_result.error:
return Result.TextResult(error=True, info=hitokoto_result.info, result='')
text = f'{hitokoto_result.result.get("hitokoto")}\n——《{hitokoto_result.result.get("from")}》'
if hitokoto_result.result.get("from_who"):
text += f' {hitokoto_result.result.get("from_who")}'
return Result.TextResult(error=False, info='Success', result=text)
async def generate_sign_in_card(
user_id: int, user_text: str, fav: float, *, width: int = 1024, fortune_do: bool = True) -> Result.TextResult:
"""
生成卡片
:param user_id: 用户id
:param user_text: 头部自定义文本
:param fav: 用户好感度 用户计算等级
:param width: 生成图片宽度 自适应排版
:param fortune_do: 是否绘制老黄历当日宜与不宜
:return: 生成图片地址
"""
# 获取头图
sign_pic_path_result = await __get_reand_sign_in_pic()
if sign_pic_path_result.error:
return Result.TextResult(error=True, info=sign_pic_path_result.info, result='')
sign_pic_path = sign_pic_path_result.result
def __handle():
# 生成用户当天老黄历
user_fortune = get_fortune(user_id=user_id)
fortune_star = user_fortune.get('fortune_star')
fortune_text = user_fortune.get('fortune_text')
fortune_do_1 = user_fortune.get('do_1')
fortune_do_2 = user_fortune.get('do_2')
fortune_not_do_1 = user_fortune.get('not_do_1')
fortune_not_do_2 = user_fortune.get('not_do_2')
# 加载头图
draw_top_img: Image.Image = Image.open(sign_pic_path)
# 调整头图宽度
top_img_height = int(width * draw_top_img.height / draw_top_img.width)
draw_top_img = draw_top_img.resize((width, top_img_height))
# 字体
bd_font_path = os.path.abspath(os.path.join(RESOURCES_PATH, 'fonts', 'SourceHanSans_Heavy.otf'))
bd_font = ImageFont.truetype(bd_font_path, width // 10)
bd_title_font = ImageFont.truetype(bd_font_path, width // 12)
bd_text_font = ImageFont.truetype(bd_font_path, width // 18)
main_font_path = os.path.abspath(os.path.join(RESOURCES_PATH, 'fonts', 'SourceHanSans_Regular.otf'))
text_font = ImageFont.truetype(main_font_path, width // 28)
level_font_path = os.path.abspath(os.path.join(RESOURCES_PATH, 'fonts', 'pixel.ttf'))
level_font = ImageFont.truetype(level_font_path, width // 20)
bottom_font_path = os.path.abspath(os.path.join(RESOURCES_PATH, 'fonts', 'fzzxhk.ttf'))
bottom_text_font = ImageFont.truetype(bottom_font_path, width // 40)
# 打招呼
if 4 <= datetime.now().hour < 11:
top_text = '早上好'
elif 11 <= datetime.now().hour < 14:
top_text = '中午好'
elif 14 <= datetime.now().hour < 19:
top_text = '下午好'
elif 19 <= datetime.now().hour < 22:
top_text = '晚上好'
else:
top_text = '晚安'
top_text_width, top_text_height = bd_font.getsize(top_text)
# 计算好感度等级条
level = __get_level(favorability=fav)
level_text = f'Level {level[0]}'
level_text_width, level_text_height = level_font.getsize(level_text)
fav_text = f'{level[1]}/{level[2]}'
fav_rat = level[1] / level[2] if level[1] < level[2] else 1
fav_text_width, fav_text_height = text_font.getsize(fav_text)
# 日期
date_text = datetime.now().strftime('%m/%d')
# 昵称、好感度、积分
# 首先要对文本进行分割
user_text_ = TextUtils(text=user_text).split_multiline(width=(width - int(width * 0.125)), font=text_font)
user_text_width, user_text_height = text_font.getsize_multiline(user_text_)
# 今日运势
fortune_text_width, fortune_text_height = bd_text_font.getsize(fortune_text)
fortune_star_width, fortune_star_height = text_font.getsize(fortune_star)
# 底部文字
bottom_text_width, bottom_text_height = bottom_text_font.getsize(f'{"@@##" * 4}\n' * 4)
# 总高度
if fortune_do:
height = (top_img_height + top_text_height + user_text_height + level_text_height +
fortune_text_height * 3 + fortune_star_height * 6 + bottom_text_height * 4 +
int(0.25 * width))
else:
height = (top_img_height + top_text_height + user_text_height + level_text_height +
fortune_text_height * 1 + fortune_star_height * 2 + bottom_text_height * 4 +
int(0.1875 * width))
# 生成背景
background = Image.new(
mode="RGB",
size=(width, height),
color=(255, 255, 255))
# 开始往背景上绘制各个元素
# 以下排列从上到下绘制 请勿变换顺序 否则导致位置错乱
background.paste(draw_top_img, box=(0, 0)) # 背景
this_height = top_img_height + int(0.0625 * width)
ImageDraw.Draw(background).text(xy=(int(width * 0.0625), this_height),
text=top_text, font=bd_font, align='left', anchor='lt',
fill=(0, 0, 0)) # 打招呼
ImageDraw.Draw(background).text(xy=(width - int(width * 0.0625), this_height),
text=date_text, font=bd_title_font, align='right', anchor='rt',
fill=__get_level_color(level=level[0])) # 日期
this_height += top_text_height
ImageDraw.Draw(background).multiline_text(xy=(int(width * 0.0625), this_height),
text=user_text_, font=text_font, align='left',
fill=(128, 128, 128)) # 昵称、好感度、积分
this_height += user_text_height + int(0.046875 * width)
ImageDraw.Draw(background).text(xy=(int(width * 0.065), this_height),
text=level_text, font=level_font, align='left', anchor='lt',
fill=__get_level_color(level=level[0])) # 等级
this_height += level_text_height + int(0.03125 * width)
ImageDraw.Draw(background).text(xy=(width - int(width * 0.0625), this_height),
text=fav_text, font=text_font, align='right', anchor='rm',
fill=(208, 208, 208)) # 经验条数值
ImageDraw.Draw(background).line(xy=[(int(width * 0.0625), this_height),
(width - int(width * 0.09375 + fav_text_width), this_height)],
fill=(224, 224, 224), width=int(0.03125 * width)) # 经验条底
ImageDraw.Draw(background).line(
xy=[(int(width * 0.0625), this_height),
(int(width * 0.0625 + (width * 0.84375 - fav_text_width) * fav_rat), this_height)],
fill=__get_level_color(level=level[0]), width=int(0.03125 * width)) # 经验条内
this_height += fortune_star_height + int(0.015625 * width)
ImageDraw.Draw(background).text(xy=(int(width * 0.0625), this_height),
text=f'今日运势: {fortune_text}', font=bd_text_font,
align='left', anchor='lt', fill=(0, 0, 0)) # 今日运势
this_height += fortune_text_height
ImageDraw.Draw(background).text(xy=(int(width * 0.0625), this_height),
text=fortune_star, font=text_font, align='left', anchor='lt',
fill=(128, 128, 128)) # 运势星星
if fortune_do:
this_height += fortune_star_height + int(0.03125 * width)
ImageDraw.Draw(background).text(xy=(int(width * 0.0625), this_height),
text=f'宜', font=bd_text_font, align='left', anchor='lt',
fill=(0, 0, 0)) # 宜
this_height += fortune_text_height
ImageDraw.Draw(background).text(xy=(int(width * 0.0625), this_height),
text=fortune_do_1, font=text_font, align='left', anchor='lt',
fill=(128, 128, 128)) # 今日宜1
this_height += fortune_star_height # 反正这两字体都一样大
ImageDraw.Draw(background).text(xy=(int(width * 0.0625), this_height),
text=fortune_do_2, font=text_font, align='left', anchor='lt',
fill=(128, 128, 128)) # 今日宜2
this_height += fortune_star_height + int(0.03125 * width)
ImageDraw.Draw(background).text(xy=(int(width * 0.0625), this_height),
text=f'不宜', font=bd_text_font, align='left', anchor='lt',
fill=(0, 0, 0)) # 不宜
this_height += fortune_text_height
ImageDraw.Draw(background).text(xy=(int(width * 0.0625), this_height),
text=fortune_not_do_1, font=text_font, align='left', anchor='lt',
fill=(128, 128, 128)) # 今日不宜1
this_height += fortune_star_height
ImageDraw.Draw(background).text(xy=(int(width * 0.0625), this_height),
text=fortune_not_do_2, font=text_font, align='left', anchor='lt',
fill=(128, 128, 128)) # 今日不宜2
this_height += fortune_star_height + bottom_text_height * 2
ImageDraw.Draw(background).text(xy=(width - int(width * 0.0625), this_height),
text='随机生成 请勿迷信', font=bottom_text_font, align='right', anchor='rt',
fill=(128, 128, 128))
this_height += bottom_text_height
ImageDraw.Draw(background).text(xy=(width - int(width * 0.0625), this_height),
text=f'Omega Miya @ {datetime.now().year}',
font=bottom_text_font, align='right', anchor='rt',
fill=(128, 128, 128))
if not os.path.exists(SIGN_IN_CARD_PATH):
os.makedirs(SIGN_IN_CARD_PATH)
if fortune_do:
name_prefix = 'fortune_sign_in'
else:
name_prefix = 'fortune'
save_path = os.path.abspath(os.path.join(
SIGN_IN_CARD_PATH, f"{name_prefix}_card_{user_id}_{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.jpg"))
background.save(save_path, 'JPEG')
return save_path
try:
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(None, __handle)
return Result.TextResult(error=False, info='Success', result=result)
except Exception as e:
return Result.TextResult(error=True, info=repr(e), result='')
__all__ = [
'scheduler',
'get_hitokoto',
'generate_sign_in_card'
]
|
tools/coverage/coverage_diff.py | zmxdream/Paddle | 17,085 | 12677323 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
usage: coverage_diff.py info_file diff_file > > coverage-diff.info
"""
import sys
def get_diff_file_lines(diff_file):
"""
Args:
diff_file (str): File to get modified lines.
Returns:
dict: The diff lines of files.
"""
diff_file_lines = {}
current_file = None
current_line = -1
with open(diff_file) as diff_file:
for line in diff_file:
line = line.strip()
if line.startswith('+++ '):
current_file = line.lstrip('+++ ')
diff_file_lines[current_file] = []
continue
elif line.startswith('@@ '):
current_line = line.split()[2]
current_line = current_line.lstrip('+').split(',')[0]
current_line = int(current_line)
continue
elif line.startswith('-'):
continue
elif line.startswith('+'):
diff_file_lines[current_file].append(current_line)
current_line += 1
return diff_file_lines
def get_info_file_lines(info_file, diff_file):
"""
Args:
info_file (str): File generated by lcov.
diff_file (str): File to get modified lines.
Returns:
None
"""
diff_file_lines = get_diff_file_lines(diff_file)
current_lines = []
current_lf = 0
current_lh = 0
with open(info_file) as info_file:
for line in info_file:
line = line.strip()
if line.startswith('SF:'):
current_file = line.lstrip('SF:')
if current_file.startswith('/paddle/'):
current_file = current_file[len('/paddle/'):]
current_lines = diff_file_lines.get(current_file, [])
elif line.startswith('DA:'):
da = line.lstrip('DA:').split(',')
if int(da[0]) in current_lines:
current_lf += 1
if not line.endswith(',0'):
current_lh += 1
print(line)
continue
elif line.startswith('LF:'):
print('LF:{}'.format(current_lf))
continue
elif line.startswith('LH:'):
print('LH:{}'.format(current_lh))
continue
print(line)
if __name__ == '__main__':
if len(sys.argv) < 3:
exit()
info_file = sys.argv[1]
diff_file = sys.argv[2]
get_info_file_lines(info_file, diff_file)
|
models/model_3/model_3.py | tangmingsh/image_classifier | 175 | 12677341 | model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy']) |
paraphraser/inference.py | mahmoudeid789/paraphraser | 371 | 12677381 | import tensorflow as tf
from embeddings import load_sentence_embeddings
from preprocess_data import preprocess_batch
from six.moves import input
from lstm_model import lstm_model
import numpy as np
from pprint import pprint as pp
class Paraphraser(object):
'''Heart of the paraphraser model. This class loads the checkpoint
into the Tensorflow runtime environment and is responsible for inference.
Greedy and sampling based approaches are supported
'''
def __init__(self, checkpoint):
"""Constructor. Load vocabulary index, start token, end token, unk id,
mask_id. Restore checkpoint.
Args:
checkpoint: A path to the checkpoint
"""
self.word_to_id, self.idx_to_word, self.embedding, self.start_id, self.end_id, self.unk_id, self.mask_id = load_sentence_embeddings()
self.checkpoint = checkpoint
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
self.model = lstm_model(self.sess, 'infer', 300, self.embedding, self.start_id, self.end_id, self.mask_id)
saver = tf.train.Saver()
saver.restore(self.sess, checkpoint)
def sample_paraphrase(self, sentence, sampling_temp=1.0, how_many=1):
"""Paraphrase by sampling a distribution
Args:
sentence (str): A sentence input that will be paraphrased by
sampling from distribution.
sampling_temp (int) : A number between 0 an 1
Returns:
str: a candidate paraphrase of the `sentence`
"""
return self.infer(1, sentence, self.idx_to_word, sampling_temp, how_many)
def greedy_paraphrase(self, sentence):
"""Paraphrase using greedy sampler
Args:
sentence : The source sentence to be paraphrased.
Returns:
str : a candidate paraphrase of the `sentence`
"""
return self.infer(0, sentence, self.idx_to_word, 0., 1)
def infer(self, decoder, source_sent, id_to_vocab, temp, how_many):
""" Perform inferencing. In other words, generate a paraphrase
for the source sentence.
Args:
decoder : 0 for greedy, 1 for sampling
source_sent : source sentence to generate a paraphrase for
id_to_vocab : dict of vocabulary index to word
end_id : the end token
temp : the sampling temperature to use when `decoder` is 1
Returns:
str : for the generated paraphrase
"""
seq_source_words, seq_source_ids = preprocess_batch([ source_sent ] * how_many)
#print(seq_source_words)
#print(seq_source_ids)
seq_source_len = [ len(seq_source) for seq_source in seq_source_ids ]
#print(seq_source_len)
feed_dict = {
self.model['seq_source_ids']: seq_source_ids,
self.model['seq_source_lengths']: seq_source_len,
self.model['decoder_technique']: decoder,
self.model['sampling_temperature']: temp
}
feeds = [
self.model['predictions']
#model['final_sequence_lengths']
]
predictions = self.sess.run(feeds, feed_dict)[0]
#print(predictions)
return self.translate(predictions, decoder, id_to_vocab, seq_source_words[0])
def translate(self, predictions, decoder, id_to_vocab, seq_source_words):
""" Translate the vocabulary ids in `predictions` to actual words
that compose the paraphrase.
Args:
predictions : arrays of vocabulary ids
decoder : 0 for greedy, 1 for sample, 2 for beam
id_to_vocab : dict of vocabulary index to word
Returns:
str : the paraphrase
"""
translated_predictions = []
#np_end = np.where(translated_predictions == end_id)
for sent_pred in predictions:
translated = []
for pred in sent_pred:
word = 'UUNNKK'
if pred == self.end_id:
break
if pred == self.unk_id:
# Search for rare word
for seq_source_word in seq_source_words:
if seq_source_word not in self.word_to_id:
word = seq_source_word
else:
word = id_to_vocab[pred]
translated.append(word)
translated_predictions.append(' '.join(translated))
return translated_predictions
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type=str, help='Checkpoint path')
args = parser.parse_args()
paraphraser = Paraphraser(args.checkpoint)
while 1:
source_sentence = input("Source: ")
#p = paraphraser.greedy_paraphrase(source_sentence)
#print(p)
paraphrases = paraphraser.sample_paraphrase(source_sentence, sampling_temp=0.75, how_many=10)
for i, paraphrase in enumerate(paraphrases):
print("Paraph #{}: {}".format(i, paraphrase))
if __name__ == '__main__':
main()
|
install salabim from github.py | akharitonov/salabim | 151 | 12677396 | <gh_stars>100-1000
import sys
import site
import shutil
import hashlib
import base64
from pathlib import Path
import configparser
import urllib.request
import urllib.error
def _install(files, url=None):
"""
install one file package from GitHub or current directory
Parameters
----------
files : list
files to be installed
the first item (files[0]) will be used as the name of the package''
optional files should be preceded with an exclamation mark (!)
url : str
url of the location of the GitHub repository
this will start usually with https://raw.githubusercontent.com/ and end with /master/
if omitted, the files will be copied from the current directory (not GitHub)
Returns
-------
info : Info instance
info.package : name of the package installed
info.path : name where the package is installed in the site-packages
info.version : version of the package (obtained from <package>.py)
info.files_copied : list of copied files
Notes
-----
The program automatically makes the required __init__.py file (unless given in files) and
<package><version>.dist-info folder with the usual files METADATA, INSTALLER and RECORDS.
As the setup.py is not run, the METADATA is very limited, i.e. is contains just name and version.
If a __init__.py is in files that file will be used.
Otherwise, an __init__/py file will be generated. In thet case, if a __version__ = statement
is found in the source file, the __version__ will be included in that __init__.py file.
Version history
---------------
version 1.0.5 2020-06-24
Bug with removing the dist-info of packages starting with the same name fixed.
version 1.0.4 2020-03-29
Linux and ios versions now search in sys.path for site-packages,
whereas other platforms now use site.getsitepackages().
This is to avoid installation in a roaming directory on Windows.
version 1.0.2 2020-03-07
modified several open calls to be compatible with Python < 3.6
multipe installation for Pythonista removed. Now installs only in site-packages
version 1.0.1 2020-03-06
now uses urllib instead of requests to avoid non standard libraries
installation for Pythonista improved
version 1.0.0 2020-03-04
initial version
(c)2020 <NAME> - www.salabim.org
"""
class Info:
version = "?"
package = "?"
path = "?"
files_copied = []
info = Info()
Pythonista = sys.platform == "ios"
if not files:
raise ValueError("no files specified")
if files[0][0] == "!":
raise ValueError("first item in files (sourcefile) may not be optional")
package = Path(files[0]).stem
sourcefile = files[0]
file_contents = {}
for file in files:
optional = file[0] == "!"
if optional:
file = file[1:]
if url:
try:
with urllib.request.urlopen(url + file) as response:
page = response.read()
file_contents[file] = page
exists = True
except urllib.error.URLError:
exists = False
else:
exists = Path(file).is_file()
if exists:
with open(file, "rb") as f:
file_contents[file] = f.read()
if (not exists) and (not optional):
raise FileNotFoundError(file + " not found. Nothing installed.")
version = "unknown"
for line in file_contents[sourcefile].decode("utf-8").split("\n"):
line_split = line.split("__version__ =")
if len(line_split) > 1:
raw_version = line_split[-1].strip(" '\"")
version = ""
for c in raw_version:
if c in "0123456789-.":
version += c
else:
break
break
info.files_copied = list(file_contents.keys())
info.package = package
info.version = version
file = "__init__.py"
if file not in file_contents:
file_contents[file] = ("from ." + package + " import *\n").encode()
if version != "unknown":
file_contents[file] += ("from ." + package + " import __version__\n").encode()
if sys.platform.startswith("linux") or (sys.platform == "ios"):
search_in = sys.path
else:
search_in = site.getsitepackages()
for f in search_in:
sitepackages_path = Path(f)
if sitepackages_path.name == "site-packages" and sitepackages_path.is_dir():
break
else:
raise ModuleNotFoundError("can't find the site-packages folder")
path = sitepackages_path / package
info.path = str(path)
if path.is_file():
path.unlink()
if not path.is_dir():
path.mkdir()
for file, contents in file_contents.items():
with (path / file).open("wb") as f:
f.write(contents)
if Pythonista:
pypi_packages = sitepackages_path / ".pypi_packages"
config = configparser.ConfigParser()
config.read(pypi_packages)
config[package] = {}
config[package]["url"] = "github"
config[package]["version"] = version
config[package]["summary"] = ""
config[package]["files"] = path.as_posix()
config[package]["dependency"] = ""
with pypi_packages.open("w") as f:
config.write(f)
else:
for entry in sitepackages_path.glob("*"):
if entry.is_dir():
if entry.stem.startswith(package + "-") and entry.suffix == ".dist-info":
shutil.rmtree(entry)
path_distinfo = Path(str(path) + "-" + version + ".dist-info")
if not path_distinfo.is_dir():
path_distinfo.mkdir()
with (path_distinfo / "METADATA").open("w") as f: # make a dummy METADATA file
f.write("Name: " + package + "\n")
f.write("Version: " + version + "\n")
with (path_distinfo / "INSTALLER").open("w") as f: # make a dummy METADATA file
f.write("github\n")
with (path_distinfo / "RECORD").open("w") as f:
pass # just to create the file to be recorded
with (path_distinfo / "RECORD").open("w") as record_file:
for p in (path, path_distinfo):
for file in p.glob("**/*"):
if file.is_file():
name = file.relative_to(sitepackages_path).as_posix() # make sure we have slashes
record_file.write(name + ",")
if (file.stem == "RECORD" and p == path_distinfo) or ("__pycache__" in name.lower()):
record_file.write(",")
else:
with file.open("rb") as f:
file_contents = f.read()
hash = "sha256=" + base64.urlsafe_b64encode(
hashlib.sha256(file_contents).digest()
).decode("latin1").rstrip("=")
# hash calculation derived from wheel.py in pip
length = str(len(file_contents))
record_file.write(hash + "," + length)
record_file.write("\n")
return info
if __name__ == "__main__":
info = _install(
files="salabim.py !calibri.ttf !mplus-1m-regular.ttf !license.txt !DejaVuSansMono.ttf !changelog.txt".split(),
url="https://raw.githubusercontent.com/salabim/salabim/master/",
)
print(info.package + " " + info.version + " successfully installed in " + info.path)
print("files copied: ", ", ".join(info.files_copied))
|
service/workflow/workflow_permission_service.py | SuperLeilia/loonflow | 1,541 | 12677412 | <reponame>SuperLeilia/loonflow
from apps.workflow.models import WorkflowUserPermission
from service.account.account_base_service import account_base_service_ins
from service.base_service import BaseService
from service.common.common_service import common_service_ins
from service.common.constant_service import constant_service_ins
class WorkflowPermissionService(BaseService):
"""
流程服务
"""
def __init__(self):
pass
def get_workflow_id_list_by_permission(self, permission, user_type, user):
"""
获取操作权限
:param permission:
:param user_type:
:param user:
:return:
"""
if user_type not in ['app', 'user', 'department']:
return False, 'user type is invalid'
if not user:
if user_type == 'app':
return False, 'app_name is not provided'
if user_type == 'user':
return False, 'user is not provided'
if user_type == 'department':
return False, 'department is not provided'
if user == 'loonflow':
from apps.workflow.models import Workflow
workflow_query_set = Workflow.objects.filter(is_deleted=0).all()
workflow_id_list = []
for workflow_obj in workflow_query_set:
workflow_id_list.append(workflow_obj.id)
return True, dict(workflow_id_list=workflow_id_list)
result_queryset = WorkflowUserPermission.objects.filter(permission=permission, user_type=user_type, user=user, is_deleted=0).all()
workflow_id_list = [result.workflow_id for result in result_queryset]
workflow_id_list = list(set(workflow_id_list))
return True, dict(workflow_id_list=workflow_id_list)
def workflow_id_permission_check(self, workflow_id, permission, user_type, user):
"""
检查是否有某workflow_id的权限
:param workflow_id:
:param permission:
:param user_type:
:param user:
:return:
"""
if user_type == 'app' and user == 'loonflow':
return True, ''
workflow_query_set = WorkflowUserPermission.objects.filter(
is_deleted=0, workflow_id=workflow_id, permission=permission, user_type=user_type, user=user).first()
if workflow_query_set:
return True, ''
else:
if permission == 'api':
return False, 'app: {} has no api permission for workflow_id: {}'.format(user, workflow_id)
if permission == 'admin':
return False, 'user: {} has no admin permission for workflow_id:{}'.format(user, workflow_id)
if permission == 'intervene':
return False, 'user: {} has no intervene permission for workflow_id:{}'.format(user, workflow_id)
if permission == 'view':
if user_type == 'user':
return False, 'user: {} has no view permission for workflow_id:{}'.format(user, workflow_id)
if user_type == 'department':
return False, 'department: {} has no view permission for workflow_id:{}'.format(user, workflow_id)
return False, 'no permission'
def get_record_list_by_app_list(self, app_list):
"""
批量获取应用的workflow权限
:param app_list:
:return:
"""
permission_query_set = WorkflowUserPermission.objects.filter(
is_deleted=0, permission='api', user_type='app', user__in=app_list).all()
return True, dict(permission_query_set=permission_query_set)
def update_app_permission(self, app_name, workflow_ids):
"""
更新应用的权限
:param app_name:
:param workflow_ids:
:return:
"""
if workflow_ids:
workflow_id_list = [int(workflow_id) for workflow_id in workflow_ids.split(',')]
else:
workflow_id_list = []
permission_query_set = WorkflowUserPermission.objects.filter(
is_deleted=0, permission='api', user_type='app', user=app_name).all()
exist_workflow_id_list = [permission_query.workflow_id for permission_query in permission_query_set]
flag, need_add_workflow_list = common_service_ins.list_difference(workflow_id_list, exist_workflow_id_list)
if flag is False:
return False, need_add_workflow_list
flag, need_del_workflow_list = common_service_ins.list_difference(exist_workflow_id_list, workflow_id_list)
if flag is False:
return False, need_del_workflow_list
add_permission_query_list = []
for workflow_id in need_add_workflow_list:
add_permission_query_list.append(WorkflowUserPermission(permission='api', user_type='app', user=app_name, workflow_id=workflow_id))
WorkflowUserPermission.objects.bulk_create(add_permission_query_list)
WorkflowUserPermission.objects.filter(
is_deleted=0, permission='api', user_type='app', user=app_name, workflow_id__in=need_del_workflow_list).update(is_deleted=1)
return True, ''
def del_app_permission(self, app_name, workflow_ids=None):
"""
删除应用权限
:param app_name:
:param workflow_ids:
:return:
"""
if workflow_ids == None:
WorkflowUserPermission.objects.filter(
is_deleted=0, permission='api', user_type='app', user=app_name).update(is_deleted=1)
else:
WorkflowUserPermission.objects.filter(
is_deleted=0, permission='api', user_type='app', user=app_name, workflow_id__in=workflow_ids.split(',')).update(is_deleted=1)
return True, ''
def manage_workflow_permission_check(self, workflow_id, username, app_name):
"""
用户是否有管理工作流的权限
:param workflow_id:
:param username:
:param app_name:
:return:
"""
# 判断应用是否有工作流的权限
flag, msg = self.workflow_id_permission_check(workflow_id, 'api', 'app', app_name)
if flag is False:
return flag, msg
# 工作流创建人有管理权限
from service.workflow.workflow_base_service import workflow_base_service_ins
flag, workflow_obj = workflow_base_service_ins.get_by_id(workflow_id)
if workflow_obj.creator == username:
return True, "creator has workflow's manage permission"
# 超级管理员拥有所有工作流的管理权限
flag, user_obj = account_base_service_ins.get_user_by_username(username)
if flag is False:
return flag, user_obj
if user_obj.type_id == constant_service_ins.ACCOUNT_TYPE_SUPER_ADMIN:
return True, "superuser has all workflow's manage permission"
flag, msg = self.workflow_id_permission_check(workflow_id, 'admin', 'user', username)
return flag, msg
workflow_permission_service_ins = WorkflowPermissionService()
|
shopify/resources/report.py | butlertron/shopify_python_api | 828 | 12677416 | <filename>shopify/resources/report.py
from ..base import ShopifyResource
class Report(ShopifyResource):
pass
|
alg/time_series_deconfounder/rmsn/libs/model_rnn.py | loramf/mlforhealthlabpub | 171 | 12677437 | """
CODE ADAPTED FROM: https://github.com/sjblim/rmsn_nips_2018
Implementation of Recurrent Marginal Structural Networks (R-MSNs):
<NAME>, <NAME>, <NAME>, "Forecasting Treatment Responses Over Time Using Recurrent
Marginal Structural Networks", Advances in Neural Information Processing Systems, 2018.
"""
import tensorflow as tf
import numpy as np
import pandas as pd
import rmsn.libs.net_helpers as helpers
_ACTIVATION_MAP = {'sigmoid': tf.nn.sigmoid,
'elu': tf.nn.elu,
'tanh': tf.nn.tanh,
'linear': lambda x: x}
class StateDumpingRNN(tf.contrib.rnn.RNNCell):
""" This RNNCell dumps out internal states for lstms"""
def __init__(self, lstm):
super(StateDumpingRNN, self).__init__()
# Check that outputs
self.lstm_cell = lstm
@property
def state_size(self):
return self.lstm_cell.state_size
@property
def output_size(self):
return self.lstm_cell .state_size
def call(self, inputs, state):
output, state = self.lstm_cell(inputs, state)
return state, state
class Seq2SeqDecoderCell(tf.contrib.rnn.RNNCell):
""" Decoder cell which allows for feedback, and external inputs during training """
def __init__(self, lstm, W, b, b_training_mode=False):
super(Seq2SeqDecoderCell, self).__init__()
self.lstm_cell = lstm
self.W = W
self.b = b
self._output_size = self.W.get_shape().as_list()[-1]
self.b_training_mode = b_training_mode
@property
def state_size(self):
if self.b_training_mode: # use actual inputs
return self.lstm_cell.state_size
else:
return self.lstm_cell.state_size+self._output_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
# During training time, we assume that the previous input shape is [batch_size, action_vector + output_vector]
# Output vectors are assumed to be at the end of the input or state vector (depending on train/test mode respectively)
if self.b_training_mode:
actual_states = state
combined_inputs = inputs
else:
actual_states, prev_outputs = tf.split(state,
[self.lstm_cell.state_size, self._output_size],
axis=-1)
combined_inputs = tf.concat([inputs, prev_outputs], axis=-1)
# TODO: FIX HACK! THis forces this lstm to be in a different scope
with tf.variable_scope("seq2seq"):
output, state = self.lstm_cell(combined_inputs, actual_states)
output = tf.matmul(output, self.W) + self.b
if not self.b_training_mode:
state = tf.concat([state, output], axis =-1)
return output, state
class RnnModel:
def __init__(self, params):
# Generic params
self.net_name = params['net_name']
self.experiment_name = params['experiment_name']
# Data params
self.training_data = params['training_dataset']
self.validation_data = params['validation_dataset']
self.test_data = params['test_dataset']
self.input_size = params['input_size']
self.output_size = params['output_size']
# Network params
self.softmax_size = params['softmax_size']
self.dropout_rate = params['dropout_rate']
self.hidden_layer_size = params['hidden_layer_size']
self.memory_activation_type = params['hidden_activation']
self.output_activation_type = params['output_activation']
self.b_use_seq2seq_feedback = params['use_seq2seq_feedback']
self.b_use_seq2seq_training_mode = params['use_seq2seq_training_mode']
# Memory Adapter params
self.b_use_memory_adapter = False if 'use_memory_adapter' not in params else params['use_memory_adapter']
self.memory_adapter_size = 0 if 'memory_adapter_size' not in params else params['memory_adapter_size']
self.encoder_state_size = None
# TODO: FIX THIS HACK FOR LOADING
# Change scope for seq2seq network - so weights can be loaded later...
variable_scope_name = "seq2seq" if "seq2seq" in self.net_name else "network"
with tf.variable_scope(variable_scope_name):
self.rnn_cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_layer_size,
activation=_ACTIVATION_MAP[self.memory_activation_type],
state_is_tuple=False,
name=variable_scope_name
if variable_scope_name != "network" else None)
self.output_activation = _ACTIVATION_MAP[self.output_activation_type]
self.output_w = tf.get_variable("Output_W",
[self.hidden_layer_size, self.output_size],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
self.output_b = tf.get_variable("Output_b",
[self.output_size],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
# Training params
self.performance_metric = params['performance_metric']
self.epochs = params['num_epochs']
self.minibatch_size = params['minibatch_size']
self.learning_rate = params['learning_rate']
self.max_global_norm = params['max_norm']
self.backprop_length = params['backprop_length']
self.global_step = tf.get_variable('global_step_tfrnn',
initializer=0,
dtype=np.int32,
trainable=False)
# Test params
self.num_prediction_samples = 500
# Saving params
self.model_folder = params['model_folder']
relevant_name_parts = [self.experiment_name,
#self.net_name,
self.dropout_rate,
self.hidden_layer_size,
self.epochs,
self.minibatch_size,
self.learning_rate,
self.max_global_norm,
self.backprop_length]
# Check
if not (self.memory_activation_type == "elu" and self.output_activation_type == "linear"):
relevant_name_parts += [self.memory_activation_type, self.output_activation_type]
if self.memory_adapter_size > 0 :
relevant_name_parts += [self.memory_adapter_size]
self.serialisation_name = "_".join([str(s) for s in relevant_name_parts])
def _apply_memory_adapter(self, encoder_states):
b_single_layer = self.memory_adapter_size == 0 # since externally checked that memory adapter should be applied
if self.encoder_state_size is None:
encoder_size = encoder_states.get_shape().as_list()[-1]
self.encoder_state_size = encoder_size
if b_single_layer:
self.memory_adapter_layer = {'W1': tf.get_variable("Adapter_Layer1_W",
[self.encoder_state_size, self.hidden_layer_size*2],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer()),
'b1': tf.get_variable("Adapter_Layer1_b",
[self.hidden_layer_size*2],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer()),
}
else:
self.memory_adapter_layer = {'W1': tf.get_variable("Adapter_Layer1_W",
[self.encoder_state_size, self.memory_adapter_size],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer()),
'b1': tf.get_variable("Adapter_Layer1_b",
[self.memory_adapter_size],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer()),
'W2': tf.get_variable("Adapter_Layer2_W",
[self.memory_adapter_size, self.hidden_layer_size*2],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer()),
'b2': tf.get_variable("Adapter_Layer2_b",
[self.hidden_layer_size*2], # LSTM memory is double concated
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
}
# Use elu and linear to avoid placing any restrictions on the range of internal activations
memory_activation_fxn = _ACTIVATION_MAP[self.memory_activation_type]
decoder_states = memory_activation_fxn(tf.matmul(encoder_states, self.memory_adapter_layer['W1'])
+ self.memory_adapter_layer['b1'])
if not b_single_layer:
decoder_states = memory_activation_fxn(tf.matmul(decoder_states, self.memory_adapter_layer['W2'])
+ self.memory_adapter_layer['b2'])
return decoder_states
def get_prediction_graph(self, use_validation_set,
with_dropout=True,
placeholder_time_steps=None,
b_use_state_initialisation=False,
b_dump_all_states=False):
if placeholder_time_steps:
data_chunk = {}
data_chunk['inputs'] = tf.placeholder(tf.float32,[None, placeholder_time_steps, self.input_size])
data_chunk['sequence_lengths'] = tf.placeholder(tf.float32,[None]) # Length
else:
if use_validation_set is None:
dataset = self.training_data.batch(self.minibatch_size)
elif use_validation_set:
dataset = self.validation_data.batch(self.minibatch_size)
else:
dataset = self.test_data.batch(self.minibatch_size)
iterator = tf.data.Iterator.from_structure(dataset.output_types,
dataset.output_shapes)
initializer = iterator.make_initializer(dataset)
data_chunk = iterator.get_next()
if b_use_state_initialisation:
if 'initial_states' not in data_chunk:
raise ValueError("State initialisations not present!")
initial_states = tf.cast(data_chunk['initial_states'], tf.float32)
else:
initial_states = None
output = self._build_prediction_graph(data_chunk,
with_dropout=with_dropout,
initial_states=initial_states,
b_dump_all_states=b_dump_all_states)
if placeholder_time_steps:
output['input_holder'] = data_chunk['inputs']
output['sequence_length_holder'] = data_chunk['sequence_lengths']
else:
output['initializer'] = initializer
return output
def _build_prediction_graph(self, data_chunk, with_dropout=True, initial_states=None,
b_dump_all_states=False):
# output_minibatch = tf.cast(data_chunk['outputs'], tf.float32)
# active_entries = tf.cast(data_chunk['active_entries'], tf.float32)
input_minibatch = tf.cast(data_chunk['inputs'], tf.float32)
sequence_lengths = tf.cast(data_chunk['sequence_lengths'], tf.int32)
time_steps = input_minibatch.get_shape().as_list()[1]
# Setup graph now
outputs = []
states_list = []
if with_dropout:
num_samples = self.num_prediction_samples
keep_probs = (1 - self.dropout_rate)
else:
num_samples = 1
keep_probs = 1.0
lstm_additional_size = self.output_size \
if not self.b_use_seq2seq_training_mode and self.b_use_seq2seq_feedback \
else 0
cell = tf.nn.rnn_cell.DropoutWrapper(self.rnn_cell,
input_keep_prob=keep_probs,
output_keep_prob=keep_probs,
state_keep_prob=keep_probs,
variational_recurrent=True,
input_size=input_minibatch.shape[2] + lstm_additional_size,
dtype=tf.float32)
# Extension for feedback loops in seq2seq architecture
if self.b_use_seq2seq_feedback:
cell = Seq2SeqDecoderCell(cell, self.output_w, self.output_b, b_training_mode=False)
# Extension for memory adapter
if self.b_use_memory_adapter:
if initial_states is None:
raise ValueError("Memory adapter requires initial states!")
initial_states = self._apply_memory_adapter(initial_states)
for i in range(num_samples):
val, states = tf.nn.dynamic_rnn(cell,
input_minibatch,
initial_state=initial_states, # None for default
dtype=tf.float32,
sequence_length=sequence_lengths)
if b_dump_all_states:
state_dumping_cell = StateDumpingRNN(cell)
all_states, dumped_states = tf.nn.dynamic_rnn(state_dumping_cell,
input_minibatch,
initial_state=initial_states, # None for default
dtype=tf.float32,
sequence_length=sequence_lengths)
else:
all_states = states # just dump one state - used to speed up training while enforcing function params
# Linear output layer
flattened_val = tf.reshape(val, [-1, self.hidden_layer_size])
if self.b_use_seq2seq_feedback:
logits = flattened_val
else:
logits = tf.matmul(flattened_val, self.output_w) + self.output_b
if self.softmax_size != 0:
logits = tf.reshape(logits, [-1, time_steps, self.output_size])
core_outputs, softmax_outputs = tf.split(logits,
[self.output_size - self.softmax_size, self.softmax_size],
axis=2)
output = tf.concat([self.output_activation(core_outputs), tf.nn.softmax(softmax_outputs, axis=2)],
axis=2)
else:
output = self.output_activation(logits)
output = tf.reshape(output, [-1, time_steps, self.output_size])
outputs.append(tf.expand_dims(output, 0))
states_list.append(tf.expand_dims(all_states, 0))
# Dumping output
samples = tf.concat(outputs, axis=0)
mean_estimate = tf.reduce_mean(samples, axis=0)
upper_bound = tf.contrib.distributions.percentile(samples, q=95.0, axis=0)
lower_bound = tf.contrib.distributions.percentile(samples, q=5.0, axis=0)
# Averages across all samples - no difference for single sample
ave_state = tf.reduce_mean(tf.concat(states_list, axis=0), axis=0)
return {'mean': mean_estimate, 'upper_bound': upper_bound, 'lower_bound': lower_bound, 'ave_states': ave_state}
def get_training_graph(self,
use_truncated_bptt=True,
b_stub_front=True,
b_use_state_initialisation=True):
training_dataset = self.training_data.shuffle(buffer_size=10000) \
.batch(self.minibatch_size) \
.repeat(self.epochs)
iterator = training_dataset.make_one_shot_iterator()
data_chunk = iterator.get_next()
input_minibatch = tf.cast(data_chunk['inputs'], tf.float32)
output_minibatch = tf.cast(data_chunk['outputs'], tf.float32)
active_entries = tf.cast(data_chunk['active_entries'], tf.float32)
sequence_lengths = tf.cast(data_chunk['sequence_lengths'], tf.int32)
if b_use_state_initialisation:
if 'initial_states' not in data_chunk:
raise ValueError("State initialisations not present!")
initial_states = tf.cast(data_chunk['initial_states'], tf.float32)
# Extension for memory adapter
if self.b_use_memory_adapter:
if initial_states is None:
raise ValueError("Memory adapter requires initial states!")
initial_states = self._apply_memory_adapter(initial_states)
else:
initial_states = None
if 'propensity_weights' in data_chunk:
weights = tf.cast(data_chunk['propensity_weights'], tf.float32)
else:
weights = 1
keep_probs = (1 - self.dropout_rate)
# Setup graph now
lstm_additional_size = self.output_size \
if not self.b_use_seq2seq_training_mode and self.b_use_seq2seq_feedback \
else 0
cell = tf.nn.rnn_cell.DropoutWrapper(self.rnn_cell,
input_keep_prob=keep_probs,
output_keep_prob=keep_probs,
state_keep_prob=keep_probs,
variational_recurrent=True,
input_size=input_minibatch.shape[2] + lstm_additional_size,
dtype=tf.float32)
if self.b_use_seq2seq_feedback:
cell = Seq2SeqDecoderCell(cell, self.output_w, self.output_b,
b_training_mode=self.b_use_seq2seq_training_mode)
# Stack up the dynamic RNNs for T-BPTT.
# Splitting it up
total_timesteps = input_minibatch.get_shape().as_list()[1]
num_slices = int(total_timesteps/self.backprop_length)
chunk_sizes = [self.backprop_length for i in range(num_slices)]
odd_size = total_timesteps - self.backprop_length*num_slices
# get all the chunks
if odd_size > 0:
if b_stub_front:
chunk_sizes = [odd_size] + chunk_sizes
else:
chunk_sizes = chunk_sizes + [odd_size]
# Implement TF style Truncated-backprop through time
outputs = []
start = 0
states = initial_states
for chunk_size in chunk_sizes:
input_chunk = tf.slice(input_minibatch, [0, start, 0], [-1, chunk_size, self.input_size])
if states is not None and use_truncated_bptt:
val, states = tf.nn.dynamic_rnn(cell,
input_chunk,
sequence_length=sequence_lengths,
dtype=tf.float32,
initial_state=states)
else:
val, states = tf.nn.dynamic_rnn(cell,
input_chunk,
sequence_length=sequence_lengths,
dtype=tf.float32)
# Linear output layer
flattened_val = tf.reshape(val, [-1, self.hidden_layer_size])
if self.b_use_seq2seq_feedback:
logits = flattened_val
else:
logits = tf.matmul(flattened_val, self.output_w) + self.output_b
if self.softmax_size !=0:
logits = tf.reshape(logits, [-1, chunk_size, self.output_size])
core_outputs, softmax_outputs = tf.split(logits,
[self.output_size - self.softmax_size, self.softmax_size],
axis=2)
output = tf.concat([self.output_activation(core_outputs), tf.nn.softmax(softmax_outputs, axis=2)], axis=2)
else:
output = self.output_activation(logits)
output = tf.reshape(output, [-1, chunk_size, self.output_size])
outputs.append(output)
# break links between states for truncated bptt
states = tf.identity(states)
# Starting point
start += chunk_size
# Dumping output
predictions = tf.concat(outputs, axis=1)
# Split out the softmax components
if self.softmax_size > 0:
original_vs_softmax_size = [self.output_size -self.softmax_size, self.softmax_size]
predictions, softmax_predictions = tf.split(predictions, original_vs_softmax_size, axis=2)
output_minibatch, softmax_output_minibatch = tf.split(output_minibatch, original_vs_softmax_size, axis=2)
active_entries, softmax_active = tf.split(active_entries, original_vs_softmax_size, axis=2)
# Compute loss function
if self.performance_metric == "mse":
loss = tf.reduce_sum(tf.square(predictions - output_minibatch) * active_entries * weights) \
/ tf.reduce_sum(active_entries) # cos some zero entires
elif self.performance_metric == "xentropy":
loss = tf.reduce_sum((output_minibatch * -tf.log(predictions + 1e-8)
+ (1 - output_minibatch) * -tf.log(1 - predictions + 1e-8))
* active_entries * weights) \
/ tf.reduce_sum(active_entries)
else:
raise ValueError("Unknown performance metric {}".format(self.performance_metric))
if self.softmax_size > 0:
loss += tf.reduce_sum(softmax_output_minibatch * -tf.log(softmax_predictions + 1e-8)
* softmax_active * weights) \
/ tf.reduce_sum(softmax_active)
optimiser = helpers.get_optimization_graph(loss,
learning_rate=self.learning_rate,
max_global_norm=self.max_global_norm,
global_step=self.global_step)
# Parcel outputs
handles = {'loss': loss,
'optimiser': optimiser}
return handles
|
WAF/WAF-Enhanced-Replicator/wafget.py | thmasgq/aws-support-tools | 1,038 | 12677441 | <reponame>thmasgq/aws-support-tools
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/env python3
# Modules importing
from __future__ import print_function
import os, sys
import waffun as function
import boto3
import zipfile
# Global Constants
limitWebAcl = '10'
def stageFile(fileName):
# Trying to 'touch' (create) the provided file
dummyFile = ""
try:
dummyFile = open(fileName, 'w')
except:
print("*** Unable to create the file " + fileName + "! ***\n", file=sys.stderr)
sys.exit(-1)
else:
return (dummyFile)
def getWaf(arguments):
'''
Prints customer account and calls the right WAF function to get customer's resources.
The arguments are a list with the following values: [wafType to be considered (1 = global, 2 = regional), region name, Web ACL ID]
'''
# Staging all files. The first one is the log file. The second one is the Terraform template file.
# The third one is the zip file containing the two previous ones.
listLogTemplate = function.getHomeConfig()
log = stageFile(listLogTemplate[0])
template = stageFile(listLogTemplate[1])
package = listLogTemplate[2]
print("Your WAFER log file is " + listLogTemplate[0])
print("Your Terraform template file is " + listLogTemplate[1])
# Populating first lines of the log file
log.write("*************************************************************************\n")
log.write("WAFER - AWS WAF Enhanced Repicator - Version " + function.getVersion() + "\n")
log.write("*************************************************************************\n")
webAclId = arguments[2]
isRegional = False
suffix = "_"
region = "us-east-1"
if arguments[0] == 2: # This indicates that it will be regional WAF
isRegional = True
suffix = "regional_"
region = arguments[1]
if isRegional:
print("Considering WAF regional resources on " + region + ".\n")
log.write(function.getFormattedDateTime() + "Region: " + region + "\n")
client = boto3.setup_default_session(region_name = region)
client = boto3.client('waf-regional')
else:
print("Considering WAF global resources.\n")
log.write(function.getFormattedDateTime() + "Global WAF\n")
client = boto3.client('waf')
if len(webAclId) == 0:
try:
response = client.list_web_acls()
except:
function.abortMission(log, template, "list_web_acls()")
else:
# In case query is ok, proceed with the code
if len(response) == 0:
if isRegional:
print("You have no Web ACLs on region {}. Exiting...\n".format(region), file=sys.stderr)
else:
print("You have no global Web ACLs.\n", file=sys.stderr)
log.write(function.getFormattedDateTime() + "End of Log.")
function.abortMission(log, template)
else:
print("Choose which Web ACL you want to consider: ")
for i in range(len(response['WebACLs'])):
print("[{}] Id: {}, Name: {}".format(str(i+1), response['WebACLs'][i]['WebACLId'], response['WebACLs'][i]['Name']))
print("[0] Abort")
choice = -1
while (choice < 0 or choice > len(response)):
choice = input("Your choice: ")
if not choice.isdigit():
choice = -1
else:
choice = int(choice)
if choice == 0:
print("Aborting execution.\n", file=sys.stderr)
log.write(function.getFormattedDateTime() + "End of Log.")
function.abortMission(log, template, "")
webAclId = response['WebACLs'][choice-1]['WebACLId']
webAclName = response['WebACLs'][choice-1]['Name']
else:
try:
response = client.get_web_acl(WebACLId = webAclId)
except:
if isRegional:
print("Unable to find the provided Web ACL ID {} on the provided region {}.".format(webAclId, region), file=sys.stderr)
log.write(function.getFormattedDateTime() + "Unable to find the provided Web ACL " + webAclId + " on the provided region " + region + ".\n")
else:
print("Unable to find the provided global Web ACL ID {}.".format(webAclId), file=sys.stderr)
log.write(function.getFormattedDateTime() + "Unable to find the provided global Web ACL " + webAclId + ".\n")
function.abortMission(log, template, "")
webAclName = response['WebACL']['Name']
log.write(function.getFormattedDateTime() + "Web ACL (ID): " + webAclName + " (" + webAclId + ")\n")
print("Grabbing resources for Web ACL {} (ID: {})...".format(webAclName, webAclId))
try:
response1 = client.get_web_acl(WebACLId = webAclId)
except:
function.abortMission(log, template, "get_web_acl()")
metricName = response1['WebACL']['MetricName']
defaultAction = response1['WebACL']['DefaultAction']['Type']
# Starting the template writing.
template.write('provider "aws" {\n')
if isRegional:
template.write(' region = "' + region + '"\n')
else:
template.write(' region = "us-east-1"\n')
template.write('}\n\n')
# Getting all conditions.
conditionsResult = crawlConditions(client, log, template, suffix)
template.write(conditionsResult[1])
template.write("\n\n")
rules = {}
for i in range(len(response1['WebACL']['Rules'])):
finalString = ""
ruleId = response1['WebACL']['Rules'][i]['RuleId']
ruleType = response1['WebACL']['Rules'][i]['Type']
if ruleType == 'GROUP':
try:
groupTemp = client.get_rule_group(RuleGroupId = ruleId)
except:
function.abortMission(log, template, "get_rule_group()")
groupName = groupTemp['RuleGroup']['Name']
print("Rule Group (Id): {} ({})".format(groupName, ruleId))
log.write(function.getFormattedDateTime() + "Group Name: " + groupName + " / Group Id: " + ruleId + "\n")
try:
loopGroup = client.list_activated_rules_in_rule_group(RuleGroupId = ruleId)
except:
function.abortMission(log, template, "list_activated_rules_in_rule_group()")
for j in range(len(loopGroup['ActivatedRules'])):
idTemp = loopGroup['ActivatedRules'][j]['RuleId']
try:
rTemp = client.get_rule(RuleId = idTemp)
except:
function.abortMission(log, template, "get_rule()")
# Checking if the rule was not already recorded
if not idTemp in rules:
index = 0
for key, value in rules.items():
if rules[key][:5] == "rule_":
index += 1
rules[idTemp] = "rule_" + str(index)
nameTemp = rTemp['Rule']['Name']
print(" Rule Name: {} / Rule ID: {}".format(nameTemp, idTemp))
log.write(function.getFormattedDateTime() + " Rule Name: " + nameTemp + " / Rule ID: " + ruleId + "\n")
finalString += "resource \"aws_waf" + suffix + "rule\" \"rule_" + str(index) +"\" {\n"
finalString += " name = \"" + rTemp['Rule']['Name'] + "\"\n"
finalString += " metric_name = \"" + rTemp['Rule']['MetricName'] + "\"\n\n"
for k in range(len(rTemp['Rule']['Predicates'])):
if isRegional:
finalString += " predicate {\n"
else:
finalString += " predicates {\n"
finalString += " type = \"" + rTemp['Rule']['Predicates'][k]['Type'] + "\"\n"
finalString += " negated = " + str(rTemp['Rule']['Predicates'][k]['Negated']).lower() + "\n"
conditionId = rTemp['Rule']['Predicates'][k]['DataId']
finalString += " data_id = \"${aws_waf" + suffix + conditionsResult[0][conditionId][:-2] + "." + conditionsResult[0][conditionId] + ".id}\"\n"
finalString += " }\n"
finalString += "}\n\n"
finalString += "resource \"aws_waf" + suffix + "rule_group\" \"rule_group_" + str(i) +"\" {\n"
rules[ruleId] = "rule_group_" + str(i)
finalString += " name = \"" + groupName + "\"\n"
finalString += " metric_name = \"" + groupTemp['RuleGroup']['MetricName'] + "\"\n\n"
for j in range(len(loopGroup['ActivatedRules'])):
finalString += " activated_rule {\n"
finalString += " action {\n"
finalString += " type = \"" + loopGroup['ActivatedRules'][j]['Action']['Type'] + "\"\n"
finalString += " }\n\n"
finalString += " priority = " + str(loopGroup['ActivatedRules'][j]['Priority']) + "\n"
finalString += " rule_id = \"${aws_waf" + suffix + "rule." + rules[loopGroup['ActivatedRules'][j]['RuleId']] + ".id}\"\n"
finalString += " }\n\n"
finalString += "}\n\n"
template.write(finalString)
elif ruleType == "RATE_BASED":
try:
rTemp = client.get_rate_based_rule(RuleId = ruleId)
except:
function.abortMission(log, template, "get_rate_based_rule()")
ruleName = rTemp['Rule']['Name']
ruleAction = response1['WebACL']['Rules'][i]['Action']['Type']
log.write(function.getFormattedDateTime() + "Rule Name: " + ruleName + " / Rule Id: " + ruleId + "\n")
print("Rule Name: {} / Rule Id: {}".format(ruleName, ruleId))
idTemp = rTemp['Rule']['RuleId']
if not idTemp in rules:
index = 0
for key, value in rules.items():
if rules[key][:5] == "rule_":
index += 1
rules[idTemp] = "rule_" + str(index)
finalString += "resource \"aws_waf" + suffix + "rate_based_rule\" \"rule_" + str(index) +"\" {\n"
finalString += " name = \"" + rTemp['Rule']['Name'] + "\"\n"
finalString += " metric_name = \"" + rTemp['Rule']['MetricName'] + "\"\n\n"
finalString += " rate_key = \"" + rTemp['Rule']['RateKey'] + "\"\n"
finalString += " rate_limit = " + str(rTemp['Rule']['RateLimit']) + "\n\n"
for j in range(len(rTemp['Rule']['MatchPredicates'])):
if isRegional:
finalString += " predicate {\n"
else:
finalString += " predicates {\n"
conditionId = rTemp['Rule']['MatchPredicates'][j]['DataId']
finalString += " data_id = \"${aws_waf" + suffix + conditionsResult[0][conditionId][:-2] + "." + conditionsResult[0][conditionId] + ".id}\"\n"
finalString += " negated = " + str(rTemp['Rule']['MatchPredicates'][j]['Negated']).lower() + "\n"
finalString += " type = \"" + rTemp['Rule']['MatchPredicates'][j]['Type'] + "\"\n"
finalString += " }\n\n"
finalString += "}\n\n"
template.write(finalString)
elif ruleType == "REGULAR":
try:
rTemp = client.get_rule(RuleId = ruleId)
except:
function.abortMission(log, template, "get_rule()")
ruleName = rTemp['Rule']['Name']
ruleAction = response1['WebACL']['Rules'][i]['Action']['Type']
log.write(function.getFormattedDateTime() + "Rule Name: " + ruleName + " / Rule Id: " + ruleId + "\n")
print("Rule Name: {} / Rule Id: {}".format(ruleName, ruleId))
idTemp = rTemp['Rule']['RuleId']
if not idTemp in rules:
index = 0
for key, value in rules.items():
if rules[key][:5] == "rule_":
index += 1
rules[idTemp] = "rule_" + str(index)
finalString += "resource \"aws_waf" + suffix + "rule\" \"rule_" + str(index) +"\" {\n"
finalString += " name = \"" + rTemp['Rule']['Name'] + "\"\n"
finalString += " metric_name = \"" + rTemp['Rule']['MetricName'] + "\"\n\n"
for j in range(len(rTemp['Rule']['Predicates'])):
if isRegional:
finalString += " predicate {\n"
else:
finalString += " predicates {\n"
conditionId = rTemp['Rule']['Predicates'][j]['DataId']
finalString += " data_id = \"${aws_waf" + suffix + conditionsResult[0][conditionId][:-2] + "." + conditionsResult[0][conditionId] + ".id}\"\n"
finalString += " negated = " + str(rTemp['Rule']['Predicates'][j]['Negated']).lower() + "\n"
finalString += " type = \"" + rTemp['Rule']['Predicates'][j]['Type'] + "\"\n"
finalString += " }\n\n"
finalString += "}\n\n"
template.write(finalString)
# Getting all associated resources for the Web ACL.
resourcesResult = getAssociatedResources(client, webAclId, region, log, template, isRegional)
template.write(resourcesResult[1])
finalString = ""
finalString += "resource \"aws_waf" + suffix + "web_acl\" \"web_acl\" {\n"
finalString += ' name = "'+ webAclName + '"\n'
finalString += ' metric_name = "' + metricName + '"\n\n'
finalString += ' default_action {\n'
finalString += ' type = "' + defaultAction + '"\n'
finalString += ' }\n\n'
for i in range(len(response1['WebACL']['Rules'])):
ruleType = response1['WebACL']['Rules'][i]['Type']
if isRegional:
finalString += " rule {\n"
else:
finalString += " rules {\n"
finalString += " priority = " + str(response1['WebACL']['Rules'][i]['Priority']) + "\n"
finalString += " type = \"" + ruleType + "\"\n"
if ruleType == "GROUP":
finalString += " rule_id = \"${aws_waf" + suffix + "rule_group." + rules[response1['WebACL']['Rules'][i]['RuleId']] + ".id}\"\n\n"
finalString += " override_action {\n"
finalString += " type = \"" + response1['WebACL']['Rules'][i]['OverrideAction']['Type'] + "\"\n"
elif ruleType == "REGULAR":
finalString += " rule_id = \"${aws_waf" + suffix + "rule." + rules[response1['WebACL']['Rules'][i]['RuleId']] + ".id}\"\n\n"
finalString += " action {\n"
finalString += " type = \"" + response1['WebACL']['Rules'][i]['Action']['Type'] + "\"\n"
elif ruleType == "RATE_BASED":
finalString += " rule_id = \"${aws_waf" + suffix + "rate_based_rule." + rules[response1['WebACL']['Rules'][i]['RuleId']] + ".id}\"\n\n"
finalString += " action {\n"
finalString += " type = \"" + response1['WebACL']['Rules'][i]['Action']['Type'] + "\"\n"
finalString += " }\n"
finalString += " }\n\n"
finalString += "}\n\n"
# This means there are regional resources associated with the Web ACL. In case it's a Global WAF Web ACL,
# and there is at least one CloudFront distribution associated with it, this was already covered in the
# the corresponding CloudFront block while running the getAssociatedResources() function.
if len(resourcesResult[0]) > 0 and isRegional:
for z in range(len(resourcesResult[0])):
finalString += "resource \"aws_wafregional_web_acl_association\" \"web_acl_association_" + str(z) + "\" {\n"
finalString += " web_acl_id = \"${aws_wafregional_web_acl.web_acl.id}\"\n"
if "alb_dns_name" in resourcesResult[0][z]:
finalString += " resource_arn = \"${aws_lb.waferALB.arn}\"\n" # This means an ALB needs to be associated with the Web ACL
else:
# This means an API Gateway needs to be associated with the Web ACL
finalString += " resource_arn = \"arn:aws:apigateway:" + region + "::/restapis/${aws_api_gateway_rest_api.waferAPI.id}/stages/waferStage\"\n"
finalString += "}\n\n"
# This is the real final part of the template file (the outputs).
finalString += "output \"Web_ACL_Name\" {\n"
finalString += " description = \"Please refer to this Web ACL\"\n"
finalString += " value = \"" + webAclName + "\"\n"
finalString += "}\n\n"
for z in range(len(resourcesResult[0])):
finalString += "output \"" + resourcesResult[0][z][0] + "\" {\n"
finalString += " description = \"" + resourcesResult[0][z][1] + "\"\n"
tail = ""
if "api_gateway_invoke_url" in resourcesResult[0][z]:
tail = "/WAFER" # Adding the stage nane to the final URL.
finalString += " value = " + resourcesResult[0][z][2] + tail + "\n"
finalString += "}\n\n"
template.write(finalString)
log.write(function.getFormattedDateTime() + "End of Log.")
print("All done.")
log.close()
template.close()
# Zipping files.
try:
import zlib
compression = zipfile.ZIP_DEFLATED
except:
compression = zipfile.ZIP_STORED
zf = zipfile.ZipFile(package, mode = "w")
try:
zf.write(listLogTemplate[0], compress_type = compression)
except:
print("Unable to add {} to the zip file!".format(listLogTemplate[0]))
try:
zf.write(listLogTemplate[1], compress_type = compression)
except:
print("Unable to add {} to the zip file!".format(listLogTemplate[1]))
zf.close()
print("\nGenerated ZIP file: {}.".format(package))
def crawlConditions(botoClient, log, template, suffix):
'''
This function crawls all conditions from the provided Boto3 object and returns them in a form of a conditions list and a template string.
'''
returnString = ""
conditionsDict = {}
# Getting the String Match Conditions
try:
test = botoClient.list_byte_match_sets()
except:
function.abortMission(log, template, "list_byte_match_sets()")
for k in range(len(test['ByteMatchSets'])):
try:
condition = botoClient.get_byte_match_set(ByteMatchSetId = test['ByteMatchSets'][k]['ByteMatchSetId'])
except:
function.abortMission(log, template, "get_byte_match_set()")
namePrefix = "byte_match_set_" + str(k)
returnString += "resource \"aws_waf" + suffix + "byte_match_set\" \"" + namePrefix + "\" {\n"
returnString += " name = \"" + condition['ByteMatchSet']['Name'] + "\"\n\n"
for l in range(len(condition['ByteMatchSet']['ByteMatchTuples'])):
returnString += " byte_match_tuples {\n"
returnString += " text_transformation = \"" + condition['ByteMatchSet']['ByteMatchTuples'][l]['TextTransformation'] + "\"\n"
returnString += " target_string = \"" + str(condition['ByteMatchSet']['ByteMatchTuples'][l]['TargetString'])[2:-1] + "\"\n"
returnString += " positional_constraint = \"" + condition['ByteMatchSet']['ByteMatchTuples'][l]['PositionalConstraint'] + "\"\n\n"
returnString += " field_to_match {\n"
returnString += " type = \"" + condition['ByteMatchSet']['ByteMatchTuples'][l]['FieldToMatch']['Type'] + "\"\n"
if len(condition['ByteMatchSet']['ByteMatchTuples'][l]['FieldToMatch']) > 1:
returnString += " data = \"" + condition['ByteMatchSet']['ByteMatchTuples'][l]['FieldToMatch']['Data'] + "\"\n"
returnString += " }\n"
returnString += " }"
if l != len(condition['ByteMatchSet']['ByteMatchTuples']) - 1:
returnString += "\n\n"
else:
returnString += "\n"
conditionsDict[test['ByteMatchSets'][k]['ByteMatchSetId']] = namePrefix
returnString += "}\n\n"
returnString += "\n\n"
# Getting the Regex Pattern Sets
try:
test = botoClient.list_regex_pattern_sets()
except:
function.abortMission(log, template, "list_regex_pattern_sets()")
for k in range(len(test['RegexPatternSets'])):
try:
condition = botoClient.get_regex_pattern_set(RegexPatternSetId = test['RegexPatternSets'][k]['RegexPatternSetId'])
except:
function.abortMission(log, template, "get_regex_pattern_set()")
namePrefix = "regex_pattern_set_" + str(k)
returnString += "resource \"aws_waf" + suffix + "regex_pattern_set\" \"" + namePrefix + "\" {\n"
returnString += " name = \"" + condition['RegexPatternSet']['Name'] + "\"\n"
returnString += " regex_pattern_strings = [ "
for l in range(len(condition['RegexPatternSet']['RegexPatternStrings'])):
# The following loop is to insert another "\" for all Regex pattern sets that have "\", as Terraform may not originally understand them.
cadTemp = ""
for m in range(len(condition['RegexPatternSet']['RegexPatternStrings'][l])):
if condition['RegexPatternSet']['RegexPatternStrings'][l][m] == "\\":
cadTemp += "\\\\" + condition['RegexPatternSet']['RegexPatternStrings'][l][m+1:]
m += 1
if len(cadTemp) == 0:
cadTemp = condition['RegexPatternSet']['RegexPatternStrings'][l]
returnString += "\"" + cadTemp + "\""
if l != len(condition['RegexPatternSet']['RegexPatternStrings']) - 1:
returnString += ", "
returnString += " ]\n"
conditionsDict[test['RegexPatternSets'][k]['RegexPatternSetId']] = namePrefix
returnString += "}\n\n"
# Getting the Regex Match Conditions
try:
test = botoClient.list_regex_match_sets()
except:
function.abortMission(log, template, "list_regex_match_sets()")
for k in range(len(test['RegexMatchSets'])):
try:
condition = botoClient.get_regex_match_set(RegexMatchSetId = test['RegexMatchSets'][k]['RegexMatchSetId'])
except:
function.abortMission(log, template, "get_regex_match_set()")
namePrefix = "regex_match_set_" + str(k)
returnString += "resource \"aws_waf" + suffix + "regex_match_set\" \"" + namePrefix + "\" {\n"
returnString += " name = \"" + condition['RegexMatchSet']['Name'] + "\"\n\n"
for l in range(len(condition['RegexMatchSet']['RegexMatchTuples'])):
returnString += " regex_match_tuple {\n"
returnString += " field_to_match {\n"
returnString += " type = \"" + condition['RegexMatchSet']['RegexMatchTuples'][l]['FieldToMatch']['Type'] + "\"\n"
if len(condition['RegexMatchSet']['RegexMatchTuples'][l]['FieldToMatch']) > 1:
returnString += " data = \"" + condition['RegexMatchSet']['RegexMatchTuples'][l]['FieldToMatch']['Data'] + "\"\n"
returnString += " }\n\n"
returnString += " text_transformation = \"" + condition['RegexMatchSet']['RegexMatchTuples'][l]['TextTransformation'] + "\"\n"
returnString += " regex_pattern_set_id = \"${aws_waf" + suffix + "regex_pattern_set." + conditionsDict[condition['RegexMatchSet']['RegexMatchTuples'][l]['RegexPatternSetId']] + ".id}\"\n"
returnString += " }"
if l != len(condition['RegexMatchSet']['RegexMatchTuples']) - 1:
returnString += "\n\n"
else:
returnString += "\n"
conditionsDict[test['RegexMatchSets'][k]['RegexMatchSetId']] = namePrefix
returnString += "}\n\n"
# Getting the SQL Injection Conditions
try:
test = botoClient.list_sql_injection_match_sets()
except:
function.abortMission(log, template, "list_sql_injection_match_sets()")
for k in range(len(test['SqlInjectionMatchSets'])):
try:
condition = botoClient.get_sql_injection_match_set(SqlInjectionMatchSetId = test['SqlInjectionMatchSets'][k]['SqlInjectionMatchSetId'])
except:
function.abortMission(log, template, "get_sql_injection_match_set()")
namePrefix = "sql_injection_match_set_" + str(k)
returnString += "resource \"aws_waf" + suffix + "sql_injection_match_set\" \"" + namePrefix + "\" {\n"
returnString += " name = \"" + condition['SqlInjectionMatchSet']['Name'] + "\"\n\n"
for l in range(len(condition['SqlInjectionMatchSet']['SqlInjectionMatchTuples'])):
if len(suffix) == 1: # This means it's global WAF (suffix == '_'). Terraaform expects 'tuples' (plural).
returnString += " sql_injection_match_tuples {\n"
else:
returnString += " sql_injection_match_tuple {\n"
returnString += " text_transformation = \"" + condition['SqlInjectionMatchSet']['SqlInjectionMatchTuples'][l]['TextTransformation'] + "\"\n"
returnString += " field_to_match {\n"
returnString += " type = \"" + condition['SqlInjectionMatchSet']['SqlInjectionMatchTuples'][l]['FieldToMatch']['Type'] + "\"\n"
if len(condition['SqlInjectionMatchSet']['SqlInjectionMatchTuples'][l]['FieldToMatch']) > 1:
returnString += " data = \"" + condition['SqlInjectionMatchSet']['SqlInjectionMatchTuples'][l]['FieldToMatch']['Data'] + "\"\n"
returnString += " }\n"
returnString += " }"
if l != len(condition['SqlInjectionMatchSet']['SqlInjectionMatchTuples']) - 1:
returnString += "\n\n"
else:
returnString += "\n"
conditionsDict[test['SqlInjectionMatchSets'][k]['SqlInjectionMatchSetId']] = namePrefix
returnString += "}"
returnString += "\n\n"
# Getting the Size Constraint Set Conditions
try:
test = botoClient.list_size_constraint_sets()
except:
function.abortMission(log, template, "list_size_constraint_sets()")
for k in range(len(test['SizeConstraintSets'])):
try:
condition = botoClient.get_size_constraint_set(SizeConstraintSetId = test['SizeConstraintSets'][k]['SizeConstraintSetId'])
except:
function.abortMission(log, template, "get_size_constraint_set())")
namePrefix = "size_constraint_set_" + str(k)
returnString += "resource \"aws_waf" + suffix + "size_constraint_set\" \"" + namePrefix + "\" {\n"
returnString += " name = \"" + condition['SizeConstraintSet']['Name'] + "\"\n\n"
for l in range(len(condition['SizeConstraintSet']['SizeConstraints'])):
returnString += " size_constraints {\n"
returnString += " text_transformation = \"" + condition['SizeConstraintSet']['SizeConstraints'][l]['TextTransformation'] + "\"\n"
returnString += " comparison_operator = \"" + condition['SizeConstraintSet']['SizeConstraints'][l]['ComparisonOperator'] + "\"\n"
returnString += " size = \"" + str(condition['SizeConstraintSet']['SizeConstraints'][l]['Size']) + "\"\n\n"
returnString += " field_to_match {\n"
returnString += " type = \"" + condition['SizeConstraintSet']['SizeConstraints'][l]['FieldToMatch']['Type'] + "\"\n"
if len(condition['SizeConstraintSet']['SizeConstraints'][l]['FieldToMatch']) > 1:
returnString += " data = \"" + condition['SizeConstraintSet']['SizeConstraints'][l]['FieldToMatch']['Data'] + "\"\n"
returnString += " }\n"
returnString += " }"
if l != len(condition['SizeConstraintSet']['SizeConstraints']) - 1:
returnString += "\n\n"
else:
returnString += "\n"
conditionsDict[test['SizeConstraintSets'][k]['SizeConstraintSetId']] = namePrefix
returnString += "}"
returnString += "\n\n"
# Getting the IP Set Conditions
try:
test = botoClient.list_ip_sets()
except:
function.abortMission(log, template, "list_ip_sets()")
for k in range(len(test['IPSets'])):
try:
condition = botoClient.get_ip_set(IPSetId = test['IPSets'][k]['IPSetId'])
except:
function.abortMission(log, template, "get_ip_set()")
namePrefix = "ipset_" + str(k)
returnString += "resource \"aws_waf" + suffix + "ipset\" \"" + namePrefix + "\" {\n"
returnString += " name = \"" + condition['IPSet']['Name'] + "\"\n\n"
for l in range(len(condition['IPSet']['IPSetDescriptors'])):
if len(suffix) == 1: # This means it's global WAF (suffix == '_'). Terraaform expects 'descriptors' (plural).
returnString += " ip_set_descriptors {\n"
else:
returnString += " ip_set_descriptor {\n"
returnString += " type = \"" + condition['IPSet']['IPSetDescriptors'][l]['Type'] + "\"\n"
returnString += " value = \"" + condition['IPSet']['IPSetDescriptors'][l]['Value'] + "\"\n"
returnString += " }"
if l != len(condition['IPSet']['IPSetDescriptors']) - 1:
returnString += "\n\n"
else:
returnString += "\n"
conditionsDict[test['IPSets'][k]['IPSetId']] = namePrefix
returnString += "}\n\n"
# Getting the Geo Conditions
try:
test = botoClient.list_geo_match_sets()
except:
function.abortMission(log, template, "list_geo_match_sets()")
for k in range(len(test['GeoMatchSets'])):
try:
condition = botoClient.get_geo_match_set(GeoMatchSetId = test['GeoMatchSets'][k]['GeoMatchSetId'])
except:
function.abortMission(log, template, "get_geo_match_set()")
namePrefix = "geo_match_set_" + str(k)
returnString += "resource \"aws_waf" + suffix + "geo_match_set\" \"" + namePrefix + "\" {\n"
returnString += " name = \"" + condition['GeoMatchSet']['Name'] + "\"\n\n"
for l in range(len(condition['GeoMatchSet']['GeoMatchConstraints'])):
returnString += " geo_match_constraint {\n"
returnString += " type = \"" + condition['GeoMatchSet']['GeoMatchConstraints'][l]['Type'] + "\"\n"
returnString += " value = \"" + condition['GeoMatchSet']['GeoMatchConstraints'][l]['Value'] + "\"\n"
returnString += " }"
if l != len(condition['GeoMatchSet']['GeoMatchConstraints']) - 1:
returnString += "\n\n"
else:
returnString += "\n"
conditionsDict[test['GeoMatchSets'][k]['GeoMatchSetId']] = namePrefix
returnString += "}\n\n"
# Getting the XSS Conditions
try:
test = botoClient.list_xss_match_sets()
except:
function.abortMission(log, template, "list_xss_match_sets()")
for k in range(len(test['XssMatchSets'])):
try:
condition = botoClient.get_xss_match_set(XssMatchSetId = test['XssMatchSets'][k]['XssMatchSetId'])
except:
function.abortMission(log, template, "get_xss_match_set()")
namePrefix = "xss_match_set_" + str(k)
returnString += "resource \"aws_waf" + suffix + "xss_match_set\" \"" + namePrefix + "\" {\n"
returnString += " name = \"" + condition['XssMatchSet']['Name'] + "\"\n\n"
for l in range(len(condition['XssMatchSet']['XssMatchTuples'])):
if len(suffix) == 1: # This means it's global WAF (suffix == '_'). Terraform expects 'tuples' (plural).
returnString += " xss_match_tuples {\n"
else:
returnString += " xss_match_tuple {\n"
returnString += " text_transformation = \"" + condition['XssMatchSet']['XssMatchTuples'][l]['TextTransformation'] + "\"\n"
returnString += " field_to_match {\n"
returnString += " type = \"" + condition['XssMatchSet']['XssMatchTuples'][l]['FieldToMatch']['Type'] + "\"\n"
if len(condition['XssMatchSet']['XssMatchTuples'][l]['FieldToMatch']) > 1:
returnString += " data = \"" + condition['XssMatchSet']['XssMatchTuples'][l]['FieldToMatch']['Data'] + "\"\n"
returnString += " }\n"
returnString += " }"
if l != len(condition['XssMatchSet']['XssMatchTuples']) - 1:
returnString += "\n\n"
else:
returnString += "\n"
conditionsDict[test['XssMatchSets'][k]['XssMatchSetId']] = namePrefix
returnString += "}"
return([conditionsDict, returnString])
def getAssociatedResources(wafClient, AclId, region, log, template, isRegional):
'''
Looks into the customer's WebACL and looks for associated resources.
Returns a list of resources' names in case any is found.
'''
resourceString = ""
resourcesList = []
# Checking if the Web ACL is associated with any resource. If the resulting array las a length greater than zero,
# it means there is at least one resource of that type associated with the Web ACL.
# Looking for ALBs first. If at least one ALB is associated, we need to create all resources to support it:
# VPC, Subnet, Route Table, Internet Gateway, Target Group and Security Group.
if isRegional:
try:
rAlb = wafClient.list_resources_for_web_acl(WebACLId = AclId, ResourceType = "APPLICATION_LOAD_BALANCER")
except:
function.abort(log, template, "list_resources_for_web_acl(ALB)")
if len(rAlb['ResourceArns']) > 0:
log.write(function.getFormattedDateTime() + "Found at least one ALB associated with this Web ACL. Creating equivalent resource...\n")
print("Found at least one ALB associated with this Web ACL. Creating equivalent resource...")
resourceString += "resource \"aws_vpc\" \"waferVPC\" {\n"
resourceString += " cidr_block = \"10.10.0.0/16\"\n\n"
resourceString += " tags = {\n"
resourceString += " Name = \"WAFER\"\n"
resourceString += " }\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_subnet\" \"waferSubnet1\" {\n"
resourceString += " vpc_id = \"${aws_vpc.waferVPC.id}\"\n"
resourceString += " availability_zone = \"" + region + "a\"\n"
resourceString += " cidr_block = \"10.10.1.0/24\"\n\n"
resourceString += " tags = {\n"
resourceString += " Name = \"WAFER\"\n"
resourceString += " }\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_subnet\" \"waferSubnet2\" {\n"
resourceString += " vpc_id = \"${aws_vpc.waferVPC.id}\"\n"
resourceString += " availability_zone = \"" + region + "b\"\n"
resourceString += " cidr_block = \"10.10.2.0/24\"\n\n"
resourceString += " tags = {\n"
resourceString += " Name = \"WAFER\"\n"
resourceString += " }\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_internet_gateway\" \"waferIGW\" {\n"
resourceString += " vpc_id = \"${aws_vpc.waferVPC.id}\"\n\n"
resourceString += " tags = {\n"
resourceString += " Name = \"WAFER\"\n"
resourceString += " }\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_route_table\" \"waferRT\" {\n"
resourceString += " vpc_id = \"${aws_vpc.waferVPC.id}\"\n\n"
resourceString += " route {\n"
resourceString += " cidr_block = \"0.0.0.0/0\"\n"
resourceString += " gateway_id = \"${aws_internet_gateway.waferIGW.id}\"\n"
resourceString += " }\n\n"
resourceString += " tags = {\n"
resourceString += " Name = \"WAFER\"\n"
resourceString += " }\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_route_table_association\" \"waferRTAssociation1\" {\n"
resourceString += " subnet_id = \"${aws_subnet.waferSubnet1.id}\"\n"
resourceString += " route_table_id = \"${aws_route_table.waferRT.id}\"\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_route_table_association\" \"waferRTAssociation2\" {\n"
resourceString += " subnet_id = \"${aws_subnet.waferSubnet2.id}\"\n"
resourceString += " route_table_id = \"${aws_route_table.waferRT.id}\"\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_security_group\" \"waferALBSG\" {\n"
resourceString += " name = \"waferALBSG\"\n"
resourceString += " description = \"Allow HTTP inbound traffic\"\n"
resourceString += " vpc_id = \"${aws_vpc.waferVPC.id}\"\n"
resourceString += " ingress {\n"
resourceString += " from_port = 80\n"
resourceString += " to_port = 80\n"
resourceString += " protocol = \"tcp\"\n"
resourceString += " cidr_blocks = [ \"0.0.0.0/0\" ]\n"
resourceString += " }\n\n"
resourceString += " egress {\n"
resourceString += " from_port = 0\n"
resourceString += " to_port = 0\n"
resourceString += " protocol = \"-1\"\n"
resourceString += " cidr_blocks = [ \"0.0.0.0/0\" ]\n"
resourceString += " }\n\n"
resourceString += " tags = {\n"
resourceString += " Name = \"WAFER\"\n"
resourceString += " }\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_lb\" \"waferALB\" {\n"
resourceString += " name = \"waferALB\"\n"
resourceString += " internal = false\n"
resourceString += " load_balancer_type = \"application\"\n"
resourceString += " security_groups = [\"${aws_security_group.waferALBSG.id}\"]\n"
resourceString += " subnets = [\"${aws_subnet.waferSubnet1.id}\", \"${aws_subnet.waferSubnet2.id}\"]\n\n"
resourceString += " enable_cross_zone_load_balancing = true\n\n"
resourceString += " tags = {\n"
resourceString += " Name = \"WAFER\"\n"
resourceString += " }\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_lb_target_group\" \"waferALBTG\" {\n"
resourceString += " name = \"waferALBTG\"\n"
resourceString += " port = 80\n"
resourceString += " protocol = \"HTTP\"\n"
resourceString += " vpc_id = \"${aws_vpc.waferVPC.id}\"\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_lb_listener\" \"waferALBListener\" {\n"
resourceString += " load_balancer_arn = \"${aws_lb.waferALB.arn}\"\n"
resourceString += " port = \"80\"\n"
resourceString += " protocol = \"HTTP\"\n\n"
resourceString += " default_action {\n"
resourceString += " type = \"forward\"\n"
resourceString += " target_group_arn = \"${aws_lb_target_group.waferALBTG.arn}\"\n"
resourceString += " }\n"
resourceString += "}\n\n"
listTemp = []
listTemp.append("ALB_DNS_Name")
listTemp.append("ALB DNS Name")
listTemp.append("aws_lb.waferALB.dns_name")
resourcesList.append(listTemp)
# Let's check also if there's an API Gateway endpoint associated with the Web ACL.
try:
rApi = wafClient.list_resources_for_web_acl(WebACLId = AclId, ResourceType = "API_GATEWAY")
except:
function.abort(log, template, "list_resources_for_web_acl(API)")
if len(rApi['ResourceArns']) > 0:
log.write(function.getFormattedDateTime() + "Found at least one API Gateway endpoint associated with this Web ACL. Creating equivalent resource...\n")
log.write(function.getFormattedDateTime() + "Do not forget to change the API Gateway Integration method type to something different than 'MOCK'!\n")
print("Found at least one API Gateway endpoint associated with this Web ACL. Creating equivalent resource...")
resourceString += "resource \"aws_api_gateway_rest_api\" \"waferAPI\" {\n"
resourceString += " name = \"waferAPI\"\n"
resourceString += " description = \"WAFER API\"\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_api_gateway_resource\" \"waferAPIResource\" {\n"
resourceString += " rest_api_id = \"${aws_api_gateway_rest_api.waferAPI.id}\"\n"
resourceString += " parent_id = \"${aws_api_gateway_rest_api.waferAPI.root_resource_id}\"\n"
resourceString += " path_part = \"WAFER\"\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_api_gateway_method\" \"waferMethod\" {\n"
resourceString += " rest_api_id = \"${aws_api_gateway_rest_api.waferAPI.id}\"\n"
resourceString += " resource_id = \"${aws_api_gateway_resource.waferAPIResource.id}\"\n"
resourceString += " http_method = \"GET\"\n"
resourceString += " authorization = \"NONE\"\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_api_gateway_deployment\" \"waferDeployment\" {\n"
resourceString += " depends_on = [\"aws_api_gateway_integration.waferIntegration\"]\n"
resourceString += " rest_api_id = \"${aws_api_gateway_rest_api.waferAPI.id}\"\n"
resourceString += " stage_name = \"test\"\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_api_gateway_stage\" \"waferStage\" {\n"
resourceString += " stage_name = \"waferStage\"\n"
resourceString += " rest_api_id = \"${aws_api_gateway_rest_api.waferAPI.id}\"\n"
resourceString += " deployment_id = \"${aws_api_gateway_deployment.waferDeployment.id}\"\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_api_gateway_integration\" \"waferIntegration\" {\n"
resourceString += " rest_api_id = \"${aws_api_gateway_rest_api.waferAPI.id}\"\n"
resourceString += " resource_id = \"${aws_api_gateway_resource.waferAPIResource.id}\"\n"
resourceString += " http_method = \"${aws_api_gateway_method.waferMethod.http_method}\"\n"
resourceString += " integration_http_method = \"GET\"\n"
resourceString += " type = \"MOCK\"\n"
resourceString += "}\n\n"
listTemp = []
listTemp.append("API_Gateway_Invoke_URL")
listTemp.append("API Gateway Invoke URL")
listTemp.append("aws_api_gateway_stage.waferStage.invoke_url")
resourcesList.append(listTemp)
else:
# It's a global WAF, so, we can check if there's a CloudFront distribution associated with the Web ACL.
try:
cloudFront = boto3.client('cloudfront')
rCfn = cloudFront.list_distributions_by_web_acl_id(WebACLId = AclId)
except:
function.abort(log, template, "list_distributions_by_web_acl_id(CloudFront)")
if rCfn['DistributionList']['Quantity'] > 0:
log.write(function.getFormattedDateTime() + "Found at least one CloudFront distribution associated with this Web ACL. Creating equivalent resource...\n")
print("Found at least one CloudFront distribution associated with this Web ACL. Creating equivalent resource...")
# We need to create an ALB first and then use it as the origin for the CloudFront distribution.
resourceString += "resource \"aws_vpc\" \"waferVPC\" {\n"
resourceString += " cidr_block = \"10.10.0.0/16\"\n\n"
resourceString += " tags = {\n"
resourceString += " Name = \"WAFER\"\n"
resourceString += " }\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_subnet\" \"waferSubnet1\" {\n"
resourceString += " vpc_id = \"${aws_vpc.waferVPC.id}\"\n"
resourceString += " availability_zone = \"us-east-1a\"\n"
resourceString += " cidr_block = \"10.10.1.0/24\"\n\n"
resourceString += " tags = {\n"
resourceString += " Name = \"WAFER\"\n"
resourceString += " }\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_subnet\" \"waferSubnet2\" {\n"
resourceString += " vpc_id = \"${aws_vpc.waferVPC.id}\"\n"
resourceString += " availability_zone = \"us-east-1b\"\n"
resourceString += " cidr_block = \"10.10.2.0/24\"\n\n"
resourceString += " tags = {\n"
resourceString += " Name = \"WAFER\"\n"
resourceString += " }\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_internet_gateway\" \"waferIGW\" {\n"
resourceString += " vpc_id = \"${aws_vpc.waferVPC.id}\"\n\n"
resourceString += " tags = {\n"
resourceString += " Name = \"WAFER\"\n"
resourceString += " }\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_route_table\" \"waferRT\" {\n"
resourceString += " vpc_id = \"${aws_vpc.waferVPC.id}\"\n\n"
resourceString += " route {\n"
resourceString += " cidr_block = \"0.0.0.0/0\"\n"
resourceString += " gateway_id = \"${aws_internet_gateway.waferIGW.id}\"\n"
resourceString += " }\n\n"
resourceString += " tags = {\n"
resourceString += " Name = \"WAFER\"\n"
resourceString += " }\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_route_table_association\" \"waferRTAssociation1\" {\n"
resourceString += " subnet_id = \"${aws_subnet.waferSubnet1.id}\"\n"
resourceString += " route_table_id = \"${aws_route_table.waferRT.id}\"\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_route_table_association\" \"waferRTAssociation2\" {\n"
resourceString += " subnet_id = \"${aws_subnet.waferSubnet2.id}\"\n"
resourceString += " route_table_id = \"${aws_route_table.waferRT.id}\"\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_security_group\" \"waferALBSG\" {\n"
resourceString += " name = \"waferALBSG\"\n"
resourceString += " description = \"Allow HTTP inbound traffic\"\n"
resourceString += " vpc_id = \"${aws_vpc.waferVPC.id}\"\n"
resourceString += " ingress {\n"
resourceString += " from_port = 80\n"
resourceString += " to_port = 80\n"
resourceString += " protocol = \"tcp\"\n"
resourceString += " cidr_blocks = [ \"0.0.0.0/0\" ]\n"
resourceString += " }\n\n"
resourceString += " egress {\n"
resourceString += " from_port = 0\n"
resourceString += " to_port = 0\n"
resourceString += " protocol = \"-1\"\n"
resourceString += " cidr_blocks = [ \"0.0.0.0/0\" ]\n"
resourceString += " }\n\n"
resourceString += " tags = {\n"
resourceString += " Name = \"WAFER\"\n"
resourceString += " }\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_lb\" \"waferALB\" {\n"
resourceString += " name = \"waferALB\"\n"
resourceString += " internal = false\n"
resourceString += " load_balancer_type = \"application\"\n"
resourceString += " security_groups = [\"${aws_security_group.waferALBSG.id}\"]\n"
resourceString += " subnets = [\"${aws_subnet.waferSubnet1.id}\", \"${aws_subnet.waferSubnet2.id}\"]\n\n"
resourceString += " enable_cross_zone_load_balancing = true\n\n"
resourceString += " tags = {\n"
resourceString += " Name = \"WAFER\"\n"
resourceString += " }\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_lb_target_group\" \"waferALBTG\" {\n"
resourceString += " name = \"waferALBTG\"\n"
resourceString += " port = 80\n"
resourceString += " protocol = \"HTTP\"\n"
resourceString += " vpc_id = \"${aws_vpc.waferVPC.id}\"\n"
resourceString += "}\n\n"
resourceString += "resource \"aws_lb_listener\" \"waferALBListener\" {\n"
resourceString += " load_balancer_arn = \"${aws_lb.waferALB.arn}\"\n"
resourceString += " port = \"80\"\n"
resourceString += " protocol = \"HTTP\"\n\n"
resourceString += " default_action {\n"
resourceString += " type = \"forward\"\n"
resourceString += " target_group_arn = \"${aws_lb_target_group.waferALBTG.arn}\"\n"
resourceString += " }\n"
resourceString += "}\n\n"
listTemp = []
listTemp.append("ALB_DNS_Name")
listTemp.append("ALB DNS Name")
listTemp.append("aws_lb.waferALB.dns_name")
resourcesList.append(listTemp)
# Time to create the CloudFront distribution.
resourceString += "resource \"aws_cloudfront_distribution\" \"waferCFN\" {\n"
resourceString += " comment = \"WAFER CloudFront Distribution\"\n"
resourceString += " enabled = true\n"
resourceString += " web_acl_id = \"${aws_waf_web_acl.web_acl.id}\"\n\n"
resourceString += " origin {\n"
resourceString += " domain_name = \"${aws_lb.waferALB.dns_name}\"\n"
resourceString += " origin_id = \"ELB-${aws_lb.waferALB.name}\"\n\n"
resourceString += " custom_origin_config {\n"
resourceString += " http_port = 80\n"
resourceString += " https_port = 443\n"
resourceString += " origin_protocol_policy = \"http-only\"\n"
resourceString += " origin_ssl_protocols = [\"TLSv1\", \"TLSv1.1\", \"TLSv1.2\", \"SSLv3\"]\n"
resourceString += " }\n"
resourceString += " }\n\n"
resourceString += " default_cache_behavior {\n"
resourceString += " allowed_methods = [\"GET\", \"HEAD\", \"OPTIONS\", \"PUT\", \"POST\", \"PATCH\", \"DELETE\"]\n"
resourceString += " cached_methods = [\"GET\", \"HEAD\"]\n"
resourceString += " target_origin_id = \"ELB-${aws_lb.waferALB.name}\"\n\n"
resourceString += " forwarded_values {\n"
resourceString += " query_string = true\n"
resourceString += " headers = [\"*\"]\n"
resourceString += " cookies {\n"
resourceString += " forward = \"all\"\n"
resourceString += " }\n"
resourceString += " }\n\n"
resourceString += " viewer_protocol_policy = \"allow-all\"\n"
resourceString += " }\n\n"
resourceString += " viewer_certificate {\n"
resourceString += " cloudfront_default_certificate = true\n"
resourceString += " }\n\n"
resourceString += " restrictions {\n"
resourceString += " geo_restriction {\n"
resourceString += " restriction_type = \"none\"\n"
resourceString += " }\n"
resourceString += " }\n"
resourceString += "}\n\n"
listTemp = []
listTemp.append("CloudFront_Distribution_Domain_Name")
listTemp.append("CloudFront Distribution Name")
listTemp.append("aws_cloudfront_distribution.waferCFN.domain_name")
resourcesList.append(listTemp)
return([resourcesList, resourceString])
|
GCL/losses/bootstrap.py | lem0nle/PyGCL | 361 | 12677456 | import torch
import torch.nn.functional as F
from .losses import Loss
class BootstrapLatent(Loss):
def __init__(self):
super(BootstrapLatent, self).__init__()
def compute(self, anchor, sample, pos_mask, neg_mask=None, *args, **kwargs) -> torch.FloatTensor:
anchor = F.normalize(anchor, dim=-1, p=2)
sample = F.normalize(sample, dim=-1, p=2)
similarity = anchor @ sample.t()
loss = (similarity * pos_mask).sum(dim=-1)
return loss.mean()
|
yawast/__init__.py | Prodject/yawast | 200 | 12677463 | # Copyright (c) 2013 - 2020 <NAME> and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
try:
from ._version import __version__
__all__ = ["__version__"]
del _version # remove to avoid confusion with __version__
except Exception:
# if we get here, something is very wrong - running under python2?
pass
|
TFRecModel/src/com/sparrowrecsys/offline/tensorflow/EmbeddingMLP.py | sunbuhui/SparrowRecSys | 1,748 | 12677467 | import tensorflow as tf
# Training samples path, change to your local path
training_samples_file_path = tf.keras.utils.get_file("trainingSamples.csv",
"file:///Users/zhewang/Workspace/SparrowRecSys/src/main"
"/resources/webroot/sampledata/trainingSamples.csv")
# Test samples path, change to your local path
test_samples_file_path = tf.keras.utils.get_file("testSamples.csv",
"file:///Users/zhewang/Workspace/SparrowRecSys/src/main"
"/resources/webroot/sampledata/testSamples.csv")
# load sample as tf dataset
def get_dataset(file_path):
dataset = tf.data.experimental.make_csv_dataset(
file_path,
batch_size=12,
label_name='label',
na_value="0",
num_epochs=1,
ignore_errors=True)
return dataset
# split as test dataset and training dataset
train_dataset = get_dataset(training_samples_file_path)
test_dataset = get_dataset(test_samples_file_path)
# genre features vocabulary
genre_vocab = ['Film-Noir', 'Action', 'Adventure', 'Horror', 'Romance', 'War', 'Comedy', 'Western', 'Documentary',
'Sci-Fi', 'Drama', 'Thriller',
'Crime', 'Fantasy', 'Animation', 'IMAX', 'Mystery', 'Children', 'Musical']
GENRE_FEATURES = {
'userGenre1': genre_vocab,
'userGenre2': genre_vocab,
'userGenre3': genre_vocab,
'userGenre4': genre_vocab,
'userGenre5': genre_vocab,
'movieGenre1': genre_vocab,
'movieGenre2': genre_vocab,
'movieGenre3': genre_vocab
}
# all categorical features
categorical_columns = []
for feature, vocab in GENRE_FEATURES.items():
cat_col = tf.feature_column.categorical_column_with_vocabulary_list(
key=feature, vocabulary_list=vocab)
emb_col = tf.feature_column.embedding_column(cat_col, 10)
categorical_columns.append(emb_col)
# movie id embedding feature
movie_col = tf.feature_column.categorical_column_with_identity(key='movieId', num_buckets=1001)
movie_emb_col = tf.feature_column.embedding_column(movie_col, 10)
categorical_columns.append(movie_emb_col)
# user id embedding feature
user_col = tf.feature_column.categorical_column_with_identity(key='userId', num_buckets=30001)
user_emb_col = tf.feature_column.embedding_column(user_col, 10)
categorical_columns.append(user_emb_col)
# all numerical features
numerical_columns = [tf.feature_column.numeric_column('releaseYear'),
tf.feature_column.numeric_column('movieRatingCount'),
tf.feature_column.numeric_column('movieAvgRating'),
tf.feature_column.numeric_column('movieRatingStddev'),
tf.feature_column.numeric_column('userRatingCount'),
tf.feature_column.numeric_column('userAvgRating'),
tf.feature_column.numeric_column('userRatingStddev')]
# embedding + MLP model architecture
model = tf.keras.Sequential([
tf.keras.layers.DenseFeatures(numerical_columns + categorical_columns),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid'),
])
# compile the model, set loss function, optimizer and evaluation metrics
model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy', tf.keras.metrics.AUC(curve='ROC'), tf.keras.metrics.AUC(curve='PR')])
# train the model
model.fit(train_dataset, epochs=5)
# evaluate the model
test_loss, test_accuracy, test_roc_auc, test_pr_auc = model.evaluate(test_dataset)
print('\n\nTest Loss {}, Test Accuracy {}, Test ROC AUC {}, Test PR AUC {}'.format(test_loss, test_accuracy,
test_roc_auc, test_pr_auc))
# print some predict results
predictions = model.predict(test_dataset)
for prediction, goodRating in zip(predictions[:12], list(test_dataset)[0][1][:12]):
print("Predicted good rating: {:.2%}".format(prediction[0]),
" | Actual rating label: ",
("Good Rating" if bool(goodRating) else "Bad Rating"))
|
opendatatools/amac/amac_agent.py | jjcc/OpenData | 1,179 | 12677522 | # encoding: UTF-8
from urllib.parse import urlencode
from bs4 import BeautifulSoup
import datetime
import time
from opendatatools.common import RestAgent
import pandas as pd
import json
import math
import random
class AMACAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
self.add_headers({ # 请求头
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Content-Length': '2',
'Content-Type': 'application/json',
'Host': 'gs.amac.org.cn',
'Origin': 'http://gs.amac.org.cn',
'Referer': 'http://gs.amac.org.cn/amac-infodisc/res/pof/fund/index.html',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'
})
self.fund_list_url = 'http://gs.amac.org.cn/amac-infodisc/api/pof/fund?'
self.fund_base_url = 'http://gs.amac.org.cn/amac-infodisc/res/pof/fund/'
self.manager_list_url = 'http://gs.amac.org.cn/amac-infodisc/api/pof/manager?' # 请求 url 所携带参数的前面部分
self.manager_base_url = 'http://gs.amac.org.cn/amac-infodisc/res/pof/manager/' # 基金 url 前面的部分
self.company_title_list = [
'私募基金管理人名称',
'管理人详细信息网址',
'管理人类型',
'成立时间',
'备案时间',
'机构诚信信息',
'基金管理人全称(中文)',
'基金管理人全称(英文)',
'登记编号',
'组织机构代码',
'登记时间',
'成立时间',
'注册地址',
'办公地址',
'注册资本(万元)(人民币)',
'实缴资本(万元)(人民币)',
'企业性质',
'注册资本实缴比例',
'机构类型',
'业务类型',
'全职员工人数',
'取得基金从业人数',
'机构网址',
]
self.fund_title_list = [
'基金名称',
'基金编号',
'成立时间',
'备案时间',
'基金备案阶段',
'基金类型',
'币种',
'基金管理人名称',
'管理类型',
'托管人名称',
'运作状态',
'基金信息最后更新时间',
'基金协会特别提示(针对基金)'
]
def get_total_record(self, list_url):
params = {
'rand': random.random(),
'page': 1,
'size': 10,
}
url = list_url + urlencode(params)
resp = self.do_request(url, json={}, method="POST")
result = json.loads(resp)
return result['totalElements']
def get_page(self, list_url, page):
'''爬取第xx页信息'''
# url 携带参数,设置了每页显示 100 条信息
params = {
'rand': 0.3248183083707361,
'page': page,
'size': 1000,
}
url = list_url + urlencode(params)
resp = self.do_request(url, json={}, method="POST")
return json.loads(resp)
def parse_fund_page(self, r_json):
if r_json:
items = r_json.get('content')
for item in items:
info = {}
info['id'] = item.get('id')
info['基金名称'] = item.get('fundName')
info['基金详细信息网址'] = self.fund_base_url + item.get('id') + ".html"
info['基金状态'] = item.get('workingState')
info['私募基金管理人名称'] = item.get('managerName')
info['管理人类型'] = item.get('managerType')
establishDate = item.get('establishDate')
info['成立时间'] = str(datetime.datetime.fromtimestamp(
establishDate / 1000).date()) if establishDate else '' # 成立时间有可能为空,防止这种情况而报错
putOnRecordDate = item.get('putOnRecordDate')
info['备案时间'] = str(
datetime.datetime.fromtimestamp(putOnRecordDate / 1000).date()) if putOnRecordDate else ''
yield info
def parse_manager_page(self, r_json):
if r_json:
items = r_json.get('content')
for item in items:
info = {}
info['id'] = item.get('id')
info['私募基金管理人名称'] = item.get('managerName')
info['管理人详细信息网址'] = self.manager_base_url + item.get('id') + ".html"
info['管理人类型'] = item.get('primaryInvestType')
establishDate = item.get('establishDate')
info['成立时间'] = str(datetime.datetime.fromtimestamp(
establishDate / 1000).date()) if establishDate else '' # 成立时间有可能为空,防止这种情况而报错
registerDate = item.get('registerDate')
info['备案时间'] = str(
datetime.datetime.fromtimestamp(registerDate / 1000).date()) if registerDate else ''
yield info
def get_detail(self, url):
resp = self.do_request(url, method="GET", encoding="utf-8")
return resp
def parse_manager_detail(self, html):
soup = BeautifulSoup(html, "html5lib")
tables = soup.find_all('table')
info = {}
for table in tables:
if table.has_attr("class") and "table-info" in table['class']:
rows = table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) >= 2:
title = cols[0].text
content = cols[1].text
title = title.replace(":", "")
content = content.replace("\n", "")
content = content.strip()
if title in self.company_title_list:
info[title] = content
if len(cols) >= 4:
title = cols[2].text
content = cols[3].text
title = title.replace(":", "")
content = content.replace("\n", "")
content = content.strip()
if title in self.company_title_list:
info[title] = content
return info
def parse_fund_detail(self, html):
soup = BeautifulSoup(html, "html5lib")
tables = soup.find_all('table')
info = {}
for table in tables:
if table.has_attr("class") and "table-info" in table['class']:
rows = table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) >= 2:
title = cols[0].text
content = cols[1].text
title = title.replace(":", "")
content = content.replace("\n", "")
content = content.strip()
if title in self.fund_title_list:
info[title] = content
if len(cols) >= 4:
title = cols[2].text
content = cols[3].text
title = title.replace(":", "")
content = content.replace("\n", "")
content = content.strip()
if title in self.fund_title_list:
info[title] = content
return info
def get_company_list(self):
total_record = self.get_total_record(self.manager_list_url)
total_page = math.ceil(total_record / 1000)
print(total_record, total_page)
lis_json = []
for page in range(1, total_page):
print("page=", page)
r_json = self.get_page(self.manager_list_url, page)
results = self.parse_manager_page(r_json)
for result in results:
lis_json.append(result)
return pd.DataFrame(lis_json)
def get_company_detail(self, company_id):
url = self.manager_base_url + company_id + ".html"
html = self.get_detail(url)
info = self.parse_manager_detail(html)
return info
def get_fund_list(self):
total_record = self.get_total_record(self.fund_list_url)
total_page = math.ceil(total_record / 1000)
print(total_record, total_page)
lis_json = []
for page in range(1, total_page):
print("page=", page)
r_json = self.get_page(self.fund_list_url, page)
results = self.parse_fund_page(r_json)
for result in results:
lis_json.append(result)
return pd.DataFrame(lis_json)
def get_fund_detail(self, fund_id):
url = self.fund_base_url + fund_id + ".html"
html = self.get_detail(url)
info = self.parse_fund_detail(html)
return info
if __name__ == '__main__':
agent = AMACAgent()
#df = agent.get_company_list()
#print(df)
#result = agent.get_company_detail("101000004390")
#print(result)
#df = agent.get_fund_list()
#print(df)
result = agent.get_fund_detail('351000130305')
print(result)
|
igibson/utils/data_utils/sampling_task/batch_sampling_saver.py | mamadbiabon/iGibson | 360 | 12677525 | <filename>igibson/utils/data_utils/sampling_task/batch_sampling_saver.py
import argparse
import os
import subprocess
import bddl
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--max_trials", type=int, default=1, help="Maximum number of trials to try sampling.")
parser.add_argument(
"--num_initializations", type=int, default=1, help="Number of initialization per PDDL per scene."
)
parser.add_argument("--start_initialization", type=int, default=0, help="Starting idx for initialization")
args = parser.parse_args()
condition_dir = os.path.join(os.path.dirname(bddl.__file__), "activity_conditions")
for task in sorted(os.listdir(condition_dir)):
task_dir = os.path.join(condition_dir, task)
if os.path.isdir(task_dir):
for task_id_file in sorted(os.listdir(task_dir)):
task_id = task_id_file.replace("problem", "")[0]
if task_id != "0":
continue
subprocess.call(
"python sampling_saver.py --task {} --task_id {} --max_trials {} --num_initializations {} --start_initialization {}".format(
task,
task_id,
args.max_trials,
args.num_initializations,
args.start_initialization,
),
shell=True,
)
if __name__ == "__main__":
main()
|
saleor/graphql/core/__init__.py | elwoodxblues/saleor | 15,337 | 12677529 | from . import fields # noqa
|
mlflow/entities/metric.py | akarloff/mlflow | 10,351 | 12677540 | from mlflow.entities._mlflow_object import _MLflowObject
from mlflow.protos.service_pb2 import Metric as ProtoMetric
class Metric(_MLflowObject):
"""
Metric object.
"""
def __init__(self, key, value, timestamp, step):
self._key = key
self._value = value
self._timestamp = timestamp
self._step = step
@property
def key(self):
"""String key corresponding to the metric name."""
return self._key
@property
def value(self):
"""Float value of the metric."""
return self._value
@property
def timestamp(self):
"""Metric timestamp as an integer (milliseconds since the Unix epoch)."""
return self._timestamp
@property
def step(self):
"""Integer metric step (x-coordinate)."""
return self._step
def to_proto(self):
metric = ProtoMetric()
metric.key = self.key
metric.value = self.value
metric.timestamp = self.timestamp
metric.step = self.step
return metric
@classmethod
def from_proto(cls, proto):
return cls(proto.key, proto.value, proto.timestamp, proto.step)
|
tools/bin/pythonSrc/pychecker-0.8.18/test_input/test56.py | YangHao666666/hawq | 450 | 12677572 | <reponame>YangHao666666/hawq
'doc'
from import56a import Foo
def x():
print Foo
|
ners/utils/geometry.py | dumpmemory/ners | 156 | 12677583 | """
Utilities for various geometric operations.
"""
import numpy as np
import pytorch3d
import torch
import torch.nn.functional as F
from pytorch3d.utils import ico_sphere
def random_rotation(device=None):
quat = torch.randn(4, device=device)
quat /= quat.norm()
return pytorch3d.transforms.quaternion_to_matrix(quat)
def rot6d_to_matrix(rot_6d):
"""
Convert 6D rotation representation to 3x3 rotation matrix.
Reference: <NAME> al., "On the Continuity of Rotation Representations in Neural
Networks", CVPR 2019
Args:
rot_6d (B x 6): Batch of 6D Rotation representation.
Returns:
Rotation matrices (B x 3 x 3).
"""
rot_6d = rot_6d.view(-1, 3, 2)
a1 = rot_6d[:, :, 0]
a2 = rot_6d[:, :, 1]
b1 = F.normalize(a1)
b2 = F.normalize(a2 - torch.einsum("bi,bi->b", b1, a2).unsqueeze(-1) * b1)
b3 = torch.cross(b1, b2)
return torch.stack((b1, b2, b3), dim=-1)
def matrix_to_rot6d(rotmat):
"""
Convert rotation matrix to 6D rotation representation.
Args:
rotmat (B x 3 x 3): Batch of rotation matrices.
Returns:
6D Rotations (B x 3 x 2).
"""
return rotmat.view(-1, 3, 3)[:, :, :2]
def spherical_to_cartesian(theta, phi, radius=1.0):
"""
Converts from spherical coordinates to cartesian coordinates. Spherical coordinates
are defined according to the physics convention (theta elevation, phi azimuth).
https://en.wikipedia.org/wiki/Spherical_coordinate_system#Cartesian_coordinates
Args:
theta (tensor): elevation.
phi (tensor): azimuth.
radius (tensor): radius. Defaults to 1.
Returns:
(x, y, z)
"""
x = radius * torch.sin(theta) * torch.cos(phi)
y = radius * torch.sin(theta) * torch.sin(phi)
z = radius * torch.cos(theta)
return x, y, z
def cartesian_to_spherical(x, y, z):
"""
Converts spherical coordinates to cartesian coordinates.
Args:
x (tensor).
y (tensor).
z (tensor).
Returns:
(theta, phi)
"""
theta = torch.arccos(z)
phi = torch.atan2(y, x)
return theta, phi
def create_sphere(level=4, device=None):
"""
Creates a unit ico-sphere.
"""
mesh = ico_sphere(level=level, device=device)
return mesh.verts_padded()[0], mesh.faces_padded()[0]
def unwrap_uv_map(height=256, width=256):
"""
Samples spherical coordinates to unwrap a UV map.
Args:
height (int).
width (int).
Returns:
Spherical coordinates (H,W,3).
"""
theta_ = torch.linspace(0, np.pi, height)
phi_ = torch.linspace(-np.pi, np.pi, width)
theta, phi = torch.meshgrid(theta_, phi_)
x, y, z = spherical_to_cartesian(theta, phi)
return torch.dstack((x, y, z))
|
src/watchpoints/ast_monkey.py | KikeM/watchpoints | 357 | 12677597 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/watchpoints/blob/master/NOTICE.txt
import ast
import sys
def ast_parse_node(node):
"""
:param ast.Node node: an ast node representing an expression of variable
:return ast.Node: an ast node for:
_watchpoints_obj = var
if <var is a local variable>:
# watch(a)
_watchpoints_localvar = "a"
elif <var is a subscript>:
# watch(a[3])
_watchpoints_parent = a
_watchpoints_subscr = 3
elif <var is an attribute>:
# watch(a.b)
_watchpoints_parent = a
_watchpoints_attr = "b"
"""
root = ast.Module(
body=[
ast.Assign(
targets=[
ast.Name(id="_watchpoints_obj", ctx=ast.Store())
],
value=node
)
],
type_ignores=[]
)
if type(node) is ast.Name:
root.body.append(
ast.Assign(
targets=[
ast.Name(id="_watchpoints_localvar", ctx=ast.Store())
],
value=ast.Constant(value=node.id)
)
)
elif type(node) is ast.Subscript:
root.body.append(
ast.Assign(
targets=[
ast.Name(id="_watchpoints_parent", ctx=ast.Store())
],
value=node.value
)
)
if sys.version_info.minor <= 8 and type(node.slice) is ast.Index:
value_node = node.slice.value
elif sys.version_info.minor >= 9 and type(node.slice) is not ast.Slice:
value_node = node.slice
else:
raise ValueError("Slice is not supported!")
root.body.append(
ast.Assign(
targets=[
ast.Name(id="_watchpoints_subscr", ctx=ast.Store())
],
value=value_node
)
)
elif type(node) is ast.Attribute:
root.body.append(
ast.Assign(
targets=[
ast.Name(id="_watchpoints_parent", ctx=ast.Store())
],
value=node.value
)
)
root.body.append(
ast.Assign(
targets=[
ast.Name(id="_watchpoints_attr", ctx=ast.Store())
],
value=ast.Constant(value=node.attr)
)
)
ast.fix_missing_locations(root)
return root
|
cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_exception.py | lightsey/cinder | 571 | 12677604 | <reponame>lightsey/cinder
# Copyright (c) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class StoropsException(Exception):
message = 'Storops Error.'
class VNXException(StoropsException):
message = "VNX Error."
class VNXStorageGroupError(VNXException):
pass
class VNXAttachAluError(VNXException):
pass
class VNXAluAlreadyAttachedError(VNXAttachAluError):
message = (
'LUN already exists in the specified storage group',
'Requested LUN has already been added to this Storage Group')
class VNXDetachAluError(VNXStorageGroupError):
pass
class VNXDetachAluNotFoundError(VNXDetachAluError):
message = 'No such Host LUN in this Storage Group'
class VNXCreateStorageGroupError(VNXStorageGroupError):
pass
class VNXStorageGroupNameInUseError(VNXCreateStorageGroupError):
message = 'Storage Group name already in use'
class VNXNoHluAvailableError(VNXStorageGroupError):
pass
class VNXMigrationError(VNXException):
pass
class VNXLunNotMigratingError(VNXException):
pass
class VNXLunSyncCompletedError(VNXMigrationError):
error_code = 0x714a8021
class VNXTargetNotReadyError(VNXMigrationError):
message = 'The destination LUN is not available for migration'
class VNXSnapError(VNXException):
pass
class VNXDeleteAttachedSnapError(VNXSnapError):
error_code = 0x716d8003
class VNXCreateSnapError(VNXException):
message = 'Cannot create the snapshot.'
class VNXAttachSnapError(VNXSnapError):
message = 'Cannot attach the snapshot.'
class VNXDetachSnapError(VNXSnapError):
message = 'Cannot detach the snapshot.'
class VNXSnapAlreadyMountedError(VNXSnapError):
error_code = 0x716d8055
class VNXSnapNameInUseError(VNXSnapError):
error_code = 0x716d8005
class VNXSnapNotExistsError(VNXSnapError):
message = 'The specified snapshot does not exist.'
class VNXLunError(VNXException):
pass
class VNXCreateLunError(VNXLunError):
pass
class VNXLunNameInUseError(VNXCreateLunError):
error_code = 0x712d8d04
class VNXLunExtendError(VNXLunError):
pass
class VNXLunExpandSizeError(VNXLunExtendError):
error_code = 0x712d8e04
class VNXLunPreparingError(VNXLunError):
error_code = 0x712d8e0e
class VNXLunNotFoundError(VNXLunError):
message = 'Could not retrieve the specified (pool lun).'
class VNXDeleteLunError(VNXLunError):
pass
class VNXLunUsedByFeatureError(VNXLunError):
pass
class VNXCompressionError(VNXLunError):
pass
class VNXCompressionAlreadyEnabledError(VNXCompressionError):
message = 'Compression on the specified LUN is already turned on.'
class VNXConsistencyGroupError(VNXException):
pass
class VNXCreateConsistencyGroupError(VNXConsistencyGroupError):
pass
class VNXConsistencyGroupNameInUseError(VNXCreateConsistencyGroupError):
error_code = 0x716d8021
class VNXConsistencyGroupNotFoundError(VNXConsistencyGroupError):
message = 'Cannot find the consistency group'
class VNXPingNodeError(VNXException):
pass
class VNXMirrorException(VNXException):
pass
class VNXMirrorNameInUseError(VNXMirrorException):
message = 'Mirror name already in use'
class VNXMirrorPromotePrimaryError(VNXMirrorException):
message = 'Cannot remove or promote a primary image.'
class VNXMirrorNotFoundError(VNXMirrorException):
message = 'Mirror not found'
class VNXMirrorGroupNameInUseError(VNXMirrorException):
message = 'Mirror Group name already in use'
class VNXMirrorGroupNotFoundError(VNXMirrorException):
message = 'Unable to locate the specified group'
class VNXMirrorGroupAlreadyMemberError(VNXMirrorException):
message = 'The mirror is already a member of a group'
class VNXMirrorGroupMirrorNotMemberError(VNXMirrorException):
message = 'The specified mirror is not a member of the group'
class VNXMirrorGroupAlreadyPromotedError(VNXMirrorException):
message = 'The Consistency Group has no secondary images to promote'
|
autoPyTorch/components/preprocessing/feature_preprocessing/fast_ica.py | mens-artis/Auto-PyTorch | 1,657 | 12677609 | import torch
import warnings
import ConfigSpace
import ConfigSpace.hyperparameters as CSH
import ConfigSpace.conditions as CSC
from autoPyTorch.utils.config_space_hyperparameter import get_hyperparameter
from autoPyTorch.components.preprocessing.preprocessor_base import PreprocessorBase
class FastICA(PreprocessorBase):
def __init__(self, hyperparameter_config):
self.algorithm = hyperparameter_config['algorithm']
self.whiten = hyperparameter_config['whiten']
self.fun = hyperparameter_config['fun']
self.n_components = None
if (self.whiten):
self.n_components = hyperparameter_config['n_components']
def fit(self, X, Y):
import sklearn.decomposition
self.preprocessor = sklearn.decomposition.FastICA(
n_components=self.n_components, algorithm=self.algorithm,
fun=self.fun, whiten=self.whiten
)
# Make the RuntimeWarning an Exception!
with warnings.catch_warnings():
warnings.filterwarnings("error", message='array must not contain infs or NaNs')
try:
return self.preprocessor.fit(X)
except ValueError as e:
if 'array must not contain infs or NaNs' in e.args[0]:
raise ValueError("Bug in scikit-learn: https://github.com/scikit-learn/scikit-learn/pull/2738")
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
@staticmethod
def get_hyperparameter_search_space(
dataset_info=None,
n_components=(10,2000),
algorithm=('parallel', 'deflation'),
whiten=(True, False),
fun=('logcosh', 'exp', 'cube'),
):
cs = ConfigSpace.ConfigurationSpace()
n_components_hp = get_hyperparameter(CSH.UniformIntegerHyperparameter, "n_components", n_components)
algorithm_hp = get_hyperparameter(CSH.CategoricalHyperparameter, 'algorithm', algorithm)
whiten_hp = get_hyperparameter(CSH.CategoricalHyperparameter, 'whiten', whiten)
fun_hp = get_hyperparameter(CSH.CategoricalHyperparameter, 'fun', fun)
if True in whiten:
cs.add_hyperparameters([n_components_hp, algorithm_hp, whiten_hp, fun_hp])
cs.add_condition(CSC.EqualsCondition(n_components_hp, whiten_hp, True))
return cs
|
rotkehlchen/tests/unit/test_etherscan.py | rotkehlchenio/rotkehlchen | 137 | 12677650 | from rotkehlchen.externalapis.etherscan import _hashes_tuple_to_list
def test_hashes_tuple_to_list():
hashes = {('0x1', 1), ('0x2', 2), ('0x3', 3), ('0x4', 4), ('0x5', 5)}
assert _hashes_tuple_to_list(hashes) == ['0x1', '0x2', '0x3', '0x4', '0x5']
|
python/replicate/exceptions.py | kennyworkman/replicate | 810 | 12677677 | from . import constants
class DoesNotExist(Exception):
pass
class ReadError(Exception):
pass
class WriteError(Exception):
pass
class RepositoryConfigurationError(Exception):
pass
class IncompatibleRepositoryVersion(Exception):
pass
class CorruptedRepositorySpec(Exception):
pass
class ConfigNotFound(Exception):
pass
|
kpm/console.py | ericchiang/kpm | 121 | 12677696 | <gh_stars>100-1000
import json
import subprocess
import random
import logging
logger = logging.getLogger(__name__)
class KubernetesExec(object):
def __init__(self, rcname, cmd='sh', namespace="default", container=None, kind="rc"):
self.rcname = rcname
self.namespace = namespace
self.command = cmd
self.kind = kind
self.container = container
def call(self, tty=True):
rc = self._getrc()
selector = self._getselector(rc)
logger.info("selector: %s", selector)
pods = self._getpods(selector)
podname = random.choice(pods)['metadata']['name']
cmd = ['exec', '--namespace', self.namespace, podname]
if tty:
cmd.append("-ti")
if self.container is not None:
cmd += ['-c', self.container]
command = ['kubectl'] + cmd + ["--"] + self.command.split(" ")
return subprocess.call(command)
def _getpods(self, selector):
cmd = ['get', "pods", "-l", selector, '-o', 'json']
podslist = json.loads(self._call(cmd))
pods = podslist['items']
return pods
def _getselector(self, rc):
s = None
items = rc['spec']['selector']
if 'matchLabels' in items:
items = items['matchLabels']
for k, v in items.iteritems():
if s is None:
s = "%s=%s" % (k, v)
else:
s += ",%s=%s" % (k, v)
return s
def _getrc(self):
cmd = ['get', self.kind, self.rcname, '-o', 'json']
return json.loads(self._call(cmd))
def _call(self, cmd, dry=False):
command = ['kubectl'] + cmd + ["--namespace", self.namespace]
return subprocess.check_output(command, stderr=subprocess.STDOUT)
|
venv/Lib/site-packages/folium/plugins/boat_marker.py | star10919/drf | 5,451 | 12677707 | # -*- coding: utf-8 -*-
from folium.elements import JSCSSMixin
from folium.map import Marker
from folium.utilities import parse_options
from jinja2 import Template
class BoatMarker(JSCSSMixin, Marker):
"""Add a Marker in the shape of a boat.
Parameters
----------
location: tuple of length 2, default None
The latitude and longitude of the marker.
If None, then the middle of the map is used.
heading: int, default 0
Heading of the boat to an angle value between 0 and 360 degrees
wind_heading: int, default None
Heading of the wind to an angle value between 0 and 360 degrees
If None, then no wind is represented.
wind_speed: int, default 0
Speed of the wind in knots.
https://github.com/thomasbrueggemann/leaflet.boatmarker
"""
_template = Template(u"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = L.boatMarker(
{{ this.location|tojson }},
{{ this.options|tojson }}
).addTo({{ this._parent.get_name() }});
{% if this.wind_heading is not none -%}
{{ this.get_name() }}.setHeadingWind(
{{ this.heading }},
{{ this.wind_speed }},
{{ this.wind_heading }}
);
{% else -%}
{{this.get_name()}}.setHeading({{this.heading}});
{% endif -%}
{% endmacro %}
""")
default_js = [
('markerclusterjs',
'https://unpkg.com/leaflet.boatmarker/leaflet.boatmarker.min.js'),
]
def __init__(self, location, popup=None, icon=None,
heading=0, wind_heading=None, wind_speed=0, **kwargs):
super(BoatMarker, self).__init__(
location,
popup=popup,
icon=icon
)
self._name = 'BoatMarker'
self.heading = heading
self.wind_heading = wind_heading
self.wind_speed = wind_speed
self.options = parse_options(**kwargs)
|
venv/Lib/site-packages/nbconvert/utils/tests/test_pandoc.py | ajayiagbebaku/NFL-Model | 1,367 | 12677717 | """Test Pandoc module"""
#-----------------------------------------------------------------------------
# Copyright (C) 2014 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import warnings
from ...tests.utils import onlyif_cmds_exist
from nbconvert.tests.base import TestsBase
from .. import pandoc
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestPandoc(TestsBase):
"""Collection of Pandoc tests"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.original_env = os.environ.copy()
def setUp(self):
super().setUp()
pandoc.check_pandoc_version._cached = None
@onlyif_cmds_exist('pandoc')
def test_pandoc_available(self):
""" Test behaviour that pandoc functions raise PandocMissing as documented """
pandoc.clean_cache()
os.environ["PATH"] = ""
with self.assertRaises(pandoc.PandocMissing):
pandoc.get_pandoc_version()
with self.assertRaises(pandoc.PandocMissing):
pandoc.check_pandoc_version()
with self.assertRaises(pandoc.PandocMissing):
pandoc.pandoc("", "markdown", "html")
# original_env["PATH"] should contain pandoc
os.environ["PATH"] = self.original_env["PATH"]
with warnings.catch_warnings(record=True) as w:
pandoc.get_pandoc_version()
pandoc.check_pandoc_version()
pandoc.pandoc("", "markdown", "html")
self.assertEqual(w, [])
@onlyif_cmds_exist('pandoc')
def test_minimal_version(self):
original_minversion = pandoc._minimal_version
pandoc._minimal_version = "120.0"
with warnings.catch_warnings(record=True) as w:
# call it twice to verify the cached value is used
assert not pandoc.check_pandoc_version()
assert not pandoc.check_pandoc_version()
# only one warning after two calls, due to cache
self.assertEqual(len(w), 1)
# clear cache
pandoc.check_pandoc_version._cached = None
pandoc._minimal_version = pandoc.get_pandoc_version()
assert pandoc.check_pandoc_version()
def pandoc_function_raised_missing(f, *args, **kwargs):
try:
f(*args, **kwargs)
except pandoc.PandocMissing:
return True
else:
return False
|
vt/client.py | Dmenifee23-star/vt-py | 208 | 12677735 | <filename>vt/client.py<gh_stars>100-1000
# Copyright © 2019 The vt-py authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aiohttp
import asyncio
import base64
import json
import io
from .error import APIError
from .feed import Feed
from .iterator import Iterator
from .object import Object
from .utils import make_sync
from .version import __version__
__all__ = [
'Client',
'ClientResponse',
'url_id']
_API_HOST = 'https://www.virustotal.com'
# All API endpoints start with this prefix, you don't need to include the
# prefix in the paths you request as it's prepended automatically.
_ENDPOINT_PREFIX = '/api/v3'
# AppEngine server decides whether or not it should serve gzipped content
# based on Accept-Encoding and User-Agent. Non-standard UAs are not served
# with gzipped content unless it contains the string "gzip" somewhere.
# See: https://cloud.google.com/appengine/kb/#compression
_USER_AGENT_FMT = '{agent}; vtpy {version}; gzip'
def url_id(url):
"""Generates the object ID for an URL.
The ID generated by this function can be used in calls that expect a URL ID
like `client.get_object('/urls/<id>')`
"""
return base64.urlsafe_b64encode(url.encode()).decode().strip("=")
class ClientResponse:
"""Class representing the HTTP responses returned by the client.
This class is just a thing wrapper around `aiohttp.ClientResponse
<https://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.ClientResponse>`_
that allows using it in both asynchronous and synchronous mode. Instances of
this class have all the attributes that you can find in `aiohttp.ClientResponse`,
like `version`, `status`, `method`, `url`, and so on. Methods in
`aiohttp.ClientResponse` that return a coroutine have two flavors in this
class: synchronous and asynchronous. For example, `aiohttp.ClientResponse.read()`
becomes `vt.ClientResponse.read_async()`, and `vt.ClientResponse.read()` is
the synchronous version of `vt.ClientResponse.read_async()`. Find more
information about attributes and methods in `aiohttp.ClientResponse` in:
https://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.ClientResponse
"""
def __init__(self, aiohttp_resp):
self._aiohttp_resp = aiohttp_resp
def __getattr__(self, attr):
return getattr(self._aiohttp_resp, attr)
@property
def content(self):
return StreamReader(self._aiohttp_resp.content)
async def _get_chunked_response(self):
buffer = b""
async for data, _ in self.content.iter_chunks():
buffer += data
return buffer
async def read_async(self):
if self.headers.get('Transfer-encoding') == 'chunked':
return await self._get_chunked_response()
else:
return await self._aiohttp_resp.read()
def read(self):
return make_sync(self.read_async())
async def json_async(self):
if self.headers.get('Transfer-encoding') == 'chunked':
response_content = await self._get_chunked_response()
return json.loads(response_content)
else:
return await self._aiohttp_resp.json()
def json(self):
return make_sync(self.json_async())
async def text_async(self):
if self.headers.get('Transfer-encoding') == 'chunked':
response_content = await self._get_chunked_response()
return response_content.decode(self._aiohttp_resp.get_encoding())
else:
return await self._aiohttp_resp.text()
def text(self):
return make_sync(self.text_async())
class StreamReader:
"""Class representing the HTTP responses returned by the client.
This class is just a thing wrapper around `aiohttp.StreamReader
<https://aiohttp.readthedocs.io/en/stable/streams.html#aiohttp.StreamReader>`_
that allows using it in both asynchronous and synchronous mode. Instances of
this class have all the methods that you can find in `aiohttp.StreamReader`,
like `readany()`, `readany()`, etc. Methods in `aiohttp.StreamReader`
come in two flavors in this class: synchronous and asynchronous. For example,
`read()` and `read_async`, where `read` is the synchronous one and `read_async`
is the asynchronous. Find more information about attributes and methods in
`aiohttp.StreamReader` in:
https://aiohttp.readthedocs.io/en/stable/streams.html#aiohttp.StreamReader
"""
def __init__(self, aiohttp_stream_reader):
self._aiohttp_stream_reader = aiohttp_stream_reader
def __getattr__(self, attr):
return getattr(self._aiohttp_stream_reader, attr)
async def read_async(self, n=-1):
return await self._aiohttp_stream_reader.read(n)
def read(self, n=-1):
return make_sync(self.read_async(n))
async def readany_async(self):
return await self._aiohttp_stream_reader.readany()
def readany(self):
return make_sync(self.readany_async())
async def readexactly_async(self, n):
return await self._aiohttp_stream_reader.readexactly(n)
def readexactly(self, n):
return make_sync(self.readexactly_async(n))
async def readline_async(self):
return await self._aiohttp_stream_reader.readline()
def readline(self):
return make_sync(self.readline_async())
async def readchunk_async(self):
return await self._aiohttp_stream_reader.readchunk()
def readchunk(self):
return make_sync(self.readchunk_async())
class Client:
"""Client for interacting with VirusTotal.
:param apikey: Your VirusTotal API key.
:param agent: A string that identifies your application.
:param host: By default https://www.virustotal.com, it can be changed for
testing purposes.
:param trust_env: Get proxies information from HTTP_PROXY/HTTPS_PROXY
environment variables if the parameter is True (False by default).
:param timeout: A int that determines the number of seconds to wait for
a request to timeout (300 by default).
:param proxy: A string indicating the proxy to use for requests
made by the client (None by default).
:type apikey: str
:type agent: str
:type host: str
:type trust_env: bool
:type timeout: int
:type proxy: str
"""
def __init__(self, apikey, agent="unknown", host=None, trust_env=False,
timeout=300, proxy=None):
"""Initialize the client with the provided API key."""
if not isinstance(apikey, str):
raise ValueError('API key must be a string')
if not apikey:
raise ValueError('API key can not be an empty string')
self._host = host or _API_HOST
self._apikey = apikey
self._agent = agent
self._session = None
self._trust_env = trust_env
self._timeout = timeout
self._proxy = proxy
def _full_url(self, path, *args):
try:
path = path.format(*args)
except IndexError:
raise ValueError('Not enough arguments to fill all placeholders in path')
if path.startswith('http'):
return path
return self._host + _ENDPOINT_PREFIX + path
def _get_session(self):
if not self._session:
self._session = aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False),
headers={
'X-Apikey': self._apikey,
'Accept-Encoding': 'gzip',
'User-Agent': _USER_AGENT_FMT.format_map({
'agent': self._agent, 'version': __version__})},
trust_env=self._trust_env,
timeout=aiohttp.ClientTimeout(total=self._timeout))
return self._session
async def __aenter__(self):
return self
async def __aexit__(self, type, value, traceback):
await self.close_async()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _extract_data_from_json(self, json_response):
if not 'data' in json_response:
raise ValueError('response does not returns a data field')
return json_response['data']
async def _response_to_json(self, response):
error = await self.get_error_async(response)
if error:
raise error
return await response.json_async()
async def _response_to_object(self, response):
json_response = await self._response_to_json(response)
try:
return Object.from_dict(self._extract_data_from_json(json_response))
except ValueError as err:
raise ValueError(f'response is not an object: {err}')
async def close_async(self):
"""Like :func:`close` but returns a coroutine."""
if self._session:
await self._session.close()
self._session = None
def close(self):
"""Closes the client.
When the client is not needed anymore it should be closed for releasing
resources like TCP connections.
"""
return make_sync(self.close_async())
def delete(self, path, *path_args):
"""Sends a DELETE request to a given API endpoint.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:type path: str
:returns: An instance of :class:`ClientResponse`.
"""
return make_sync(self.delete_async(path, *path_args))
async def delete_async(self, path, *path_args):
"""Like :func:`delete` but returns a coroutine."""
return ClientResponse(
await self._get_session().delete(
self._full_url(path, *path_args), proxy=self._proxy))
def download_file(self, hash, file):
"""Downloads a file given its hash (SHA-256, SHA-1 or MD5).
The file identified by the hash will be written to the provided file
object. The file object must be opened in write binary mode ('wb').
:param hash: File hash.
:param file: A file object where the downloaded file will be written to.
:type hash: str
:type file: file-like object
"""
return make_sync(self.download_file_async(hash, file))
async def download_file_async(self, hash, file):
"""Like :func:`download_file` but returns a coroutine."""
response = await self.get_async(f'/files/{hash}/download')
error = await self.get_error_async(response)
if error:
raise error
while True:
chunk = await response.content.read_async(1024*1024)
if not chunk:
break
file.write(chunk)
def feed(self, feed_type, cursor=None):
"""Returns an iterator for a VirusTotal feed.
This functions returns an iterator that allows to retrieve a continuous
stream of files as they are scanned by VirusTotal. See the documentation
for the :class:`Feed` class for more details.
:param feed_type: One of the supported feed types enumerated in
:class:`FeedType`.
:param cursor: An optional cursor indicating where to start. This argument
can be a string in the format 'YYYMMDDhhmm' indicating the date and time
of the first package that will be retrieved.
:type hash: :class:`vt.FeedType`
:type cursor: str
"""
return Feed(self, feed_type, cursor=cursor)
def get(self, path, *path_args, params=None):
"""Sends a GET request to a given API endpoint.
This is a low-level function that returns a raw HTTP response, no error
checking nor response parsing is performed. See :func:`get_json`,
:func:`get_data` and :func:`get_object` for higher-level functions.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param params: Parameters sent in the request.
:type path: str
:type params: dict
:returns: An instance of :class:`ClientResponse`.
"""
return make_sync(self.get_async(path, *path_args, params=params))
async def get_async(self, path, *path_args, params=None):
"""Like :func:`get` but returns a coroutine."""
return ClientResponse(
await self._get_session().get(
self._full_url(path, *path_args),
params=params, proxy=self._proxy))
def get_data(self, path, *path_args, params=None):
"""Sends a GET request to a given API endpoint and returns response's data.
Most VirusTotal API responses are JSON-encoded with the following format::
{"data": <response data>}
This function parses the server's response and return only the data, if the
response is not in the expected format an exception is raised. For endpoints
where the data is a VirusTotal object you can use :func:`get_object` instead.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param params: Parameters sent in the request.
:type path: str
:type params: dict
:returns:
Whatever the server returned in the response's data field, it may be a
dict, list, string or some other Python type, depending on the endpoint
called.
"""
return make_sync(self.get_data_async(path, *path_args, params=params))
async def get_data_async(self, path, *path_args, params=None):
"""Like :func:`get_data` but returns a coroutine."""
json_response = await self.get_json_async(path, *path_args, params=params)
return self._extract_data_from_json(json_response)
async def get_error_async(self, response):
"""Given a :class:`ClientResponse` returns a :class:`APIError`
This function checks if the response from the VirusTotal backend was an
error and returns the appropriate :class:`APIError` or None if no error
occurred.
:param response: A :class:`ClientResponse` instance.
:returns: An instance of :class:`APIError` or None.
"""
if response.status == 200:
return None
if response.status >= 400 and response.status <= 499:
if response.content_type == 'application/json':
json_response = await response.json_async()
error = json_response.get('error')
if error:
return APIError.from_dict(error)
return APIError('ClientError', await response.text_async())
return APIError('ServerError', await response.text_async())
def get_json(self, path, *path_args, params=None):
"""Sends a GET request to a given API endpoint and parses the response.
Most VirusTotal API responses are JSON-encoded. This function parses the
JSON, check for errors, and return the server response as a dictionary.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param params: Parameters sent in the request.
:type path: str
:type params: dict
:returns:
A dictionary with the backend's response.
"""
return make_sync(self.get_json_async(path, *path_args, params=params))
async def get_json_async(self, path, *path_args, params=None):
"""Like :func:`get_json` but returns a coroutine."""
response = await self.get_async(path, *path_args, params=params)
return await self._response_to_json(response)
def get_object(self, path, *path_args, params=None):
"""Sends a GET request to a given API endpoint and returns an object.
The endpoint specified must return an object, not a collection. This
means that get_object can be used with endpoints like /files/{file_id}
and /urls/{url_id}, which return an individual object but not with
/comments, which returns a collection of objects.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param params: Parameters sent in the request.
:type path: str
:type params: dict
:returns:
An instance of :class:`Object`.
"""
return make_sync(self.get_object_async(path, *path_args, params=params))
async def get_object_async(self, path, *path_args, params=None):
"""Like :func:`get_object` but returns a coroutine."""
response = await self.get_async(path, *path_args, params=params)
return await self._response_to_object(response)
def patch(self, path, *path_args, data=None):
"""Sends a PATCH request to a given API endpoint.
This is a low-level function that returns a raw HTTP response, no error
checking nor response parsing is performed. See :func:`patch_object` for
a higher-level function.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param data: Data sent in the request body.
:type path: str
:type data: A string or bytes
:returns: An instance of :class:`ClientResponse`.
"""
return make_sync(self.patch_async(path, *path_args, data))
async def patch_async(self, path, *path_args, data=None):
"""Like :func:`patch` but returns a coroutine."""
return ClientResponse(
await self._get_session().patch(
self._full_url(path, *path_args),
data=data, proxy=self._proxy))
def patch_object(self, path, *path_args, obj):
"""Sends a PATCH request for modifying an object.
This function modifies an object. The endpoint must be one that identifies
an object, like /intelligence/hunting_rulesets/{id}.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param obj: Object that has been modified.
:type path: str
:type obj: :class:`Object`
:returns: An instance of :class:`Object` representing the same object after
the changes has been applied.
"""
return make_sync(self.patch_object_async(path, *path_args, obj=obj))
async def patch_object_async(self, path, *path_args, obj):
"""Like :func:`patch_object` but returns a coroutine."""
data = json.dumps({'data': obj.to_dict(modified_attributes_only=True)})
response = await self.patch_async(path, *path_args, data=data)
return await self._response_to_object(response)
def post(self, path, *path_args, data=None):
"""Sends a POST request to a given API endpoint.
This is a low-level function that returns a raw HTTP response, no error
checking nor response parsing is performed. See :func:`post_object` for
a higher-level function.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param data: Data sent in the request body.
:type path: str
:type data: A string or bytes
:returns: An instance of :class:`ClientResponse`.
"""
return make_sync(self.post_async(path, *path_args, data=data))
async def post_async(self, path, *path_args, data=None):
"""Like :func:`post` but returns a coroutine."""
return ClientResponse(
await self._get_session().post(
self._full_url(path, *path_args),
data=data, proxy=self._proxy))
def post_object(self, path, *path_args, obj):
"""Sends a POST request for creating an object.
This function create a new object. The endpoint must be one that identifies
a collection, like /intelligence/hunting_rulesets.
:param path: Path to API endpoint.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param obj: Instance :class:`Object` with the type expected by the API
endpoint.
:type path: str
:type obj: :class:`Object`
:returns: An instance of :class:`Object` representing the new object.
"""
return make_sync(self.post_object_async(path, *path_args, obj=obj))
async def post_object_async(self, path, *path_args, obj):
"""Like :func:`post_object` but returns a coroutine."""
data = json.dumps({'data': obj.to_dict()})
response = await self.post_async(path, *path_args, data=data)
return await self._response_to_object(response)
def iterator(self, path, *path_args, params=None, cursor=None,
limit=None, batch_size=0):
"""Returns an iterator for the collection specified by the given path.
The endpoint specified by path must return a collection of objects. An
example of such an endpoint are /comments and /intelligence/search.
:param path: Path to API endpoint returning a collection.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param params: Additional parameters passed to the endpoint.
:param cursor: Cursor for resuming the iteration at the point it was left
previously. A cursor can be obtained with Iterator.cursor(). This
cursor is not the same one returned by the VirusTotal API.
:param limit: Maximum number of objects that will be returned by the iterator.
If a limit is not provided the iterator continues until it reaches the
last object in the collection.
:param batch_size: Maximum number of objects retrieved on each call to the
endpoint. If not provided the server will decide how many objects to
return.
:type path: str
:type params: dict
:type cursor: str
:type limit: int
:type batch_size: int
:returns: An instance of :class:`Iterator`.
"""
return Iterator(self, self._full_url(path, *path_args),
params=params, cursor=cursor, limit=limit, batch_size=batch_size)
def scan_file(self, file, wait_for_completion=False):
"""Scans a file.
:param file: File to be scanned.
:param wait_for_completion: If True the function doesn't return until the
analysis has been completed.
:type file: File-like object.
:type wait_for_completion: bool
:returns: An instance of :class:`Object` of analysis type.
"""
return make_sync(self.scan_file_async(
file, wait_for_completion=wait_for_completion))
async def scan_file_async(self, file, wait_for_completion=False):
"""Like :func:`scan_file` but returns a coroutine."""
if not isinstance(file, io.IOBase):
raise TypeError(f'Expected a file to be a file object, got {type(file)}')
# The snippet below could be replaced with this simpler code:
#
# form_data = aiohttp.FormData()
# form_data.add_field('file', file)
#
# However, aiohttp.FormData assumes that the server supports RFC 5987 and
# send a Content-Disposition like:
#
# 'form-data; name="file"; filename="foobar"; filename*=UTF-8''foobar
#
# AppEngine's upload handler doesn't like the filename*=UTF-8''foobar field
# and fails with this Content-Disposition header.
part = aiohttp.get_payload(file)
filename = file.name if hasattr(file, 'name') else 'unknown'
disposition = f'form-data; name="file"; filename="{filename}"'
part.headers['Content-Disposition'] = disposition
form_data = aiohttp.MultipartWriter('form-data')
form_data.append_payload(part)
upload_url = await self.get_data_async('/files/upload_url')
response = ClientResponse(
await self._get_session().post(
upload_url, data=form_data, proxy=self._proxy))
analysis = await self._response_to_object(response)
if wait_for_completion:
analysis = await self._wait_for_analysis_completion(analysis)
return analysis
def scan_url(self, url, wait_for_completion=False):
"""Scans a URL.
:param url: The URL to be scanned.
:param wait_for_completion: If True the function doesn't return until the
analysis has been completed.
:type url: str
:type wait_for_completion: bool
:returns: An instance of :class:`Object` of analysis type.
"""
return make_sync(self.scan_url_async(
url, wait_for_completion=wait_for_completion))
async def scan_url_async(self, url, wait_for_completion=False):
"""Like :func:`scan_url` but returns a coroutine."""
form_data = aiohttp.FormData()
form_data.add_field('url', url)
response = ClientResponse(
await self._get_session().post(
self._full_url('/urls'), data=form_data, proxy=self._proxy))
analysis = await self._response_to_object(response)
if wait_for_completion:
analysis = await self._wait_for_analysis_completion(analysis)
return analysis
async def _wait_for_analysis_completion(self, analysis):
while True:
analysis = await self.get_object_async('/analyses/{}', analysis.id)
if analysis.status == 'completed':
break
await asyncio.sleep(20)
return analysis
|
python/introduction-to-regex.py | gajubadge11/HackerRank-1 | 340 | 12677737 | <filename>python/introduction-to-regex.py<gh_stars>100-1000
#!/usr/bin/env python3
import re
if __name__ == "__main__":
t = int(input().strip())
pattern = '^[+-]?[0-9]*\.[0-9]+$'
for _ in range(t):
print(bool(re.match(pattern, input()))) |
pyteal/compiler/compiler.py | spapasoteriou/pyteal | 184 | 12677751 | <filename>pyteal/compiler/compiler.py
from typing import List, Tuple, Set, Dict, Optional, cast
from ..types import TealType
from ..ast import (
Expr,
Return,
Seq,
ScratchSlot,
SubroutineDefinition,
SubroutineDeclaration,
)
from ..ir import Mode, TealComponent, TealOp, TealBlock, TealSimpleBlock
from ..errors import TealInputError, TealInternalError
from .sort import sortBlocks
from .flatten import flattenBlocks, flattenSubroutines
from .scratchslots import assignScratchSlotsToSubroutines
from .subroutines import (
findRecursionPoints,
spillLocalSlotsDuringRecursion,
resolveSubroutines,
)
from .constants import createConstantBlocks
MAX_TEAL_VERSION = 5
MIN_TEAL_VERSION = 2
DEFAULT_TEAL_VERSION = MIN_TEAL_VERSION
class CompileOptions:
def __init__(
self,
*,
mode: Mode = Mode.Signature,
version: int = DEFAULT_TEAL_VERSION,
) -> None:
self.mode = mode
self.version = version
self.currentSubroutine: Optional[SubroutineDefinition] = None
self.breakBlocksStack: List[List[TealSimpleBlock]] = []
self.continueBlocksStack: List[List[TealSimpleBlock]] = []
def setSubroutine(self, subroutine: Optional[SubroutineDefinition]) -> None:
self.currentSubroutine = subroutine
def enterLoop(self) -> None:
self.breakBlocksStack.append([])
self.continueBlocksStack.append([])
def isInLoop(self) -> bool:
return len(self.breakBlocksStack) != 0
def addLoopBreakBlock(self, block: TealSimpleBlock) -> None:
if len(self.breakBlocksStack) == 0:
raise TealInternalError("Cannot add break block when no loop is active")
self.breakBlocksStack[-1].append(block)
def addLoopContinueBlock(self, block: TealSimpleBlock) -> None:
if len(self.continueBlocksStack) == 0:
raise TealInternalError("Cannot add continue block when no loop is active")
self.continueBlocksStack[-1].append(block)
def exitLoop(self) -> Tuple[List[TealSimpleBlock], List[TealSimpleBlock]]:
if len(self.breakBlocksStack) == 0 or len(self.continueBlocksStack) == 0:
raise TealInternalError("Cannot exit loop when no loop is active")
return (self.breakBlocksStack.pop(), self.continueBlocksStack.pop())
def verifyOpsForVersion(teal: List[TealComponent], version: int):
"""Verify that all TEAL operations are allowed in the specified version.
Args:
teal: Code to check.
mode: The version to check against.
Raises:
TealInputError: if teal contains an operation not allowed in version.
"""
for stmt in teal:
if isinstance(stmt, TealOp):
op = stmt.getOp()
if op.min_version > version:
raise TealInputError(
"Op not supported in TEAL version {}: {}. Minimum required version is {}".format(
version, op, op.min_version
)
)
def verifyOpsForMode(teal: List[TealComponent], mode: Mode):
"""Verify that all TEAL operations are allowed in mode.
Args:
teal: Code to check.
mode: The mode to check against.
Raises:
TealInputError: if teal contains an operation not allowed in mode.
"""
for stmt in teal:
if isinstance(stmt, TealOp):
op = stmt.getOp()
if not op.mode & mode:
raise TealInputError(
"Op not supported in {} mode: {}".format(mode.name, op)
)
def compileSubroutine(
ast: Expr,
options: CompileOptions,
subroutineMapping: Dict[Optional[SubroutineDefinition], List[TealComponent]],
subroutineGraph: Dict[SubroutineDefinition, Set[SubroutineDefinition]],
subroutineBlocks: Dict[Optional[SubroutineDefinition], TealBlock],
) -> None:
currentSubroutine = (
cast(SubroutineDeclaration, ast).subroutine
if isinstance(ast, SubroutineDeclaration)
else None
)
if not ast.has_return():
if ast.type_of() == TealType.none:
ast = Seq([ast, Return()])
else:
ast = Return(ast)
options.setSubroutine(currentSubroutine)
start, end = ast.__teal__(options)
start.addIncoming()
start.validateTree()
start = TealBlock.NormalizeBlocks(start)
start.validateTree()
order = sortBlocks(start, end)
teal = flattenBlocks(order)
verifyOpsForVersion(teal, options.version)
verifyOpsForMode(teal, options.mode)
subroutineMapping[currentSubroutine] = teal
subroutineBlocks[currentSubroutine] = start
referencedSubroutines: Set[SubroutineDefinition] = set()
for stmt in teal:
for subroutine in stmt.getSubroutines():
referencedSubroutines.add(subroutine)
if currentSubroutine is not None:
subroutineGraph[currentSubroutine] = referencedSubroutines
newSubroutines = referencedSubroutines - subroutineMapping.keys()
for subroutine in sorted(newSubroutines, key=lambda subroutine: subroutine.id):
compileSubroutine(
subroutine.getDeclaration(),
options,
subroutineMapping,
subroutineGraph,
subroutineBlocks,
)
def compileTeal(
ast: Expr,
mode: Mode,
*,
version: int = DEFAULT_TEAL_VERSION,
assembleConstants: bool = False,
) -> str:
"""Compile a PyTeal expression into TEAL assembly.
Args:
ast: The PyTeal expression to assemble.
mode: The mode of the program to assemble. Must be Signature or Application.
version (optional): The TEAL version used to assemble the program. This will determine which
expressions and fields are able to be used in the program and how expressions compile to
TEAL opcodes. Defaults to 2 if not included.
assembleConstants (optional): When true, the compiler will produce a program with fully
assembled constants, rather than using the pseudo-ops `int`, `byte`, and `addr`. These
constants will be assembled in the most space-efficient way, so enabling this may reduce
the compiled program's size. Enabling this option requires a minimum TEAL version of 3.
Defaults to false.
Returns:
A TEAL assembly program compiled from the input expression.
Raises:
TealInputError: if an operation in ast is not supported by the supplied mode and version.
TealInternalError: if an internal error is encounter during compilation.
"""
if (
not (MIN_TEAL_VERSION <= version <= MAX_TEAL_VERSION)
or type(version) is not int
):
raise TealInputError(
"Unsupported TEAL version: {}. Excepted an integer in the range [{}, {}]".format(
version, MIN_TEAL_VERSION, MAX_TEAL_VERSION
)
)
options = CompileOptions(mode=mode, version=version)
subroutineMapping: Dict[
Optional[SubroutineDefinition], List[TealComponent]
] = dict()
subroutineGraph: Dict[SubroutineDefinition, Set[SubroutineDefinition]] = dict()
subroutineBlocks: Dict[Optional[SubroutineDefinition], TealBlock] = dict()
compileSubroutine(
ast, options, subroutineMapping, subroutineGraph, subroutineBlocks
)
localSlotAssignments = assignScratchSlotsToSubroutines(
subroutineMapping, subroutineBlocks
)
spillLocalSlotsDuringRecursion(
version, subroutineMapping, subroutineGraph, localSlotAssignments
)
subroutineLabels = resolveSubroutines(subroutineMapping)
teal = flattenSubroutines(subroutineMapping, subroutineLabels)
if assembleConstants:
if version < 3:
raise TealInternalError(
"The minimum TEAL version required to enable assembleConstants is 3. The current version is {}".format(
version
)
)
teal = createConstantBlocks(teal)
lines = ["#pragma version {}".format(version)]
lines += [i.assemble() for i in teal]
return "\n".join(lines)
|
sdks/python/apache_beam/examples/sql_taxi.py | hengfengli/beam | 5,279 | 12677781 | <filename>sdks/python/apache_beam/examples/sql_taxi.py
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An example that processes streaming NYC Taxi data with SqlTransform.
This example reads from the PubSub NYC Taxi stream described in
https://github.com/googlecodelabs/cloud-dataflow-nyc-taxi-tycoon, aggregates
the data in 15s windows using SqlTransform, and writes the output to
a user-defined PubSub topic.
Java 8 must be available to run this pipeline, and the
--experiments=use_runner_v2 flag must be passed when running on Dataflow.
Docker must also be available to run this pipeline locally.
"""
# pytype: skip-file
import json
import logging
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.transforms.sql import SqlTransform
def run(output_topic, pipeline_args):
pipeline_options = PipelineOptions(
pipeline_args, save_main_session=True, streaming=True)
with beam.Pipeline(options=pipeline_options) as pipeline:
_ = (
pipeline
| beam.io.ReadFromPubSub(
topic='projects/pubsub-public-data/topics/taxirides-realtime',
timestamp_attribute="ts").with_output_types(bytes)
| "Parse JSON payload" >> beam.Map(json.loads)
# Use beam.Row to create a schema-aware PCollection
| "Create beam Row" >> beam.Map(
lambda x: beam.Row(
ride_status=str(x['ride_status']),
passenger_count=int(x['passenger_count'])))
# SqlTransform will computes result within an existing window
| "15s fixed windows" >> beam.WindowInto(beam.window.FixedWindows(15))
# Aggregate drop offs and pick ups that occur within each 15s window
| SqlTransform(
"""
SELECT
ride_status,
COUNT(*) AS num_rides,
SUM(passenger_count) AS total_passengers
FROM PCOLLECTION
WHERE NOT ride_status = 'enroute'
GROUP BY ride_status""")
# SqlTransform yields python objects with attributes corresponding to
# the outputs of the query.
# Collect those attributes, as well as window information, into a dict
| "Assemble Dictionary" >> beam.Map(
lambda row,
window=beam.DoFn.WindowParam: {
"ride_status": row.ride_status,
"num_rides": row.num_rides,
"total_passengers": row.total_passengers,
"window_start": window.start.to_rfc3339(),
"window_end": window.end.to_rfc3339()
})
| "Convert to JSON" >> beam.Map(json.dumps)
| "UTF-8 encode" >> beam.Map(lambda s: s.encode("utf-8"))
| beam.io.WriteToPubSub(topic=output_topic))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--output_topic',
dest='output_topic',
required=True,
help=(
'Cloud PubSub topic to write to (e.g. '
'projects/my-project/topics/my-topic), must be created prior to '
'running the pipeline.'))
known_args, pipeline_args = parser.parse_known_args()
run(known_args.output_topic, pipeline_args)
|
models/Lightweight/MobileNetV2.py | Dou-Yu-xuan/deep-learning-visal | 150 | 12677806 | import torch
import torch.nn as nn
import torchvision
from functools import reduce
def Conv3x3BNReLU(in_channels,out_channels,stride,groups):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=1, groups=groups),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True)
)
def Conv1x1BNReLU(in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True)
)
def Conv1x1BN(in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1),
nn.BatchNorm2d(out_channels)
)
class InvertedResidual(nn.Module):
def __init__(self, in_channels, out_channels, stride, expansion_factor=6):
super(InvertedResidual, self).__init__()
self.stride = stride
mid_channels = (in_channels * expansion_factor)
self.bottleneck = nn.Sequential(
Conv1x1BNReLU(in_channels, mid_channels),
Conv3x3BNReLU(mid_channels, mid_channels, stride,groups=mid_channels),
Conv1x1BN(mid_channels, out_channels)
)
if self.stride == 1:
self.shortcut = Conv1x1BN(in_channels, out_channels)
def forward(self, x):
out = self.bottleneck(x)
out = (out+self.shortcut(x)) if self.stride==1 else out
return out
class MobileNetV2(nn.Module):
def __init__(self, num_classes=1000):
super(MobileNetV2,self).__init__()
self.first_conv = Conv3x3BNReLU(3,32,2,groups=1)
self.layer1 = self.make_layer(in_channels=32, out_channels=16, stride=1, block_num=1)
self.layer2 = self.make_layer(in_channels=16, out_channels=24, stride=2, block_num=2)
self.layer3 = self.make_layer(in_channels=24, out_channels=32, stride=2, block_num=3)
self.layer4 = self.make_layer(in_channels=32, out_channels=64, stride=2, block_num=4)
self.layer5 = self.make_layer(in_channels=64, out_channels=96, stride=1, block_num=3)
self.layer6 = self.make_layer(in_channels=96, out_channels=160, stride=2, block_num=3)
self.layer7 = self.make_layer(in_channels=160, out_channels=320, stride=1, block_num=1)
self.last_conv = Conv1x1BNReLU(320,1280)
self.avgpool = nn.AvgPool2d(kernel_size=7,stride=1)
self.dropout = nn.Dropout(p=0.2)
self.linear = nn.Linear(in_features=1280,out_features=num_classes)
def make_layer(self, in_channels, out_channels, stride, block_num):
layers = []
layers.append(InvertedResidual(in_channels, out_channels, stride))
for i in range(1, block_num):
layers.append(InvertedResidual(out_channels,out_channels,1))
return nn.Sequential(*layers)
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear) or isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.first_conv(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
x = self.layer7(x)
x = self.last_conv(x)
x = self.avgpool(x)
x = x.view(x.size(0),-1)
x = self.dropout(x)
out = self.linear(x)
return out
if __name__=='__main__':
model = MobileNetV2()
# model = torchvision.models.MobileNetV2()
print(model)
input = torch.randn(1, 3, 224, 224)
out = model(input)
print(out.shape)
|
airbyte-integrations/connectors/source-freshdesk/unit_tests/test_call_credit.py | OTRI-Unipd/OTRI-airbyte | 6,215 | 12677826 | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import time
from source_freshdesk.utils import CallCredit
def test_consume_one():
"""Multiple consumptions of 1 cred will reach limit"""
credit = CallCredit(balance=3, reload_period=1)
ts_1 = time.time()
for i in range(4):
credit.consume(1)
ts_2 = time.time()
assert 1 <= ts_2 - ts_1 < 2
def test_consume_many():
"""Consumptions of N creds will reach limit and decrease balance"""
credit = CallCredit(balance=3, reload_period=1)
ts_1 = time.time()
credit.consume(1)
credit.consume(3)
ts_2 = time.time()
# the balance decreased already, so single cred will be enough to reach limit
credit.consume(1)
ts_3 = time.time()
assert 1 <= ts_2 - ts_1 < 2
assert 1 <= ts_3 - ts_2 < 2
|
tools/accuracy_checker/tests/test_onnx_launcher.py | APrigarina/open_model_zoo | 1,031 | 12677832 | """
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
pytest.importorskip('accuracy_checker.launcher.onnx_launcher')
import cv2
import numpy as np
from accuracy_checker.launcher.launcher import create_launcher
from accuracy_checker.config import ConfigError
def old_onnxrunitme(models_dir):
import onnxruntime as rt
sess = rt.InferenceSession(str(models_dir / "samplenet.onnx"))
try:
sess.get_providers()
return False
except AttributeError:
return True
def get_onnx_test_model(models_dir, device=None, ep=None):
config = {
"framework": "onnx_runtime",
"model": str(models_dir / "samplenet.onnx"),
"adapter": "classification",
}
if device is not None:
config['device'] = device
if ep is not None:
config['execution_providers'] = ep
return create_launcher(config)
class TestONNXRuntimeLauncher:
def test_launcher_creates(self, models_dir):
launcher = get_onnx_test_model(models_dir)
assert launcher.inputs['data'] == [1, 3, 32, 32]
assert launcher.output_blob == 'fc3'
def test_infer(self, data_dir, models_dir):
onnx_test_model = get_onnx_test_model(models_dir)
_, _, h, w = onnx_test_model.inputs['data']
img_raw = cv2.imread(str(data_dir / '1.jpg'))
img_rgb = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
img_resized = cv2.resize(img_rgb, (w, h))
input_blob = np.transpose([img_resized], (0, 3, 1, 2))
res = onnx_test_model.predict([{'data': input_blob.astype(np.float32)}], [{}])
assert np.argmax(res[0]['fc3']) == 7
def test_infer_with_execution_provider(self, data_dir, models_dir):
if old_onnxrunitme(models_dir):
pytest.skip(reason="onnxruntime does not support EP")
onnx_test_model = get_onnx_test_model(models_dir, ep=['CPUExecutionProvider'])
_, _, h, w = onnx_test_model.inputs['data']
img_raw = cv2.imread(str(data_dir / '1.jpg'))
img_rgb = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
img_resized = cv2.resize(img_rgb, (w, h))
input_blob = np.transpose([img_resized], (0, 3, 1, 2))
res = onnx_test_model.predict([{'data': input_blob.astype(np.float32)}], [{}])
assert np.argmax(res[0]['fc3']) == 7
def test_auto_model_search(self, models_dir):
config = {
"framework": "onnx_runtime",
"model": models_dir,
}
launcher = create_launcher(config, 'samplenet')
assert launcher.model == models_dir / "samplenet.onnx"
@pytest.mark.usefixtures('mock_path_exists')
class TestONNXRuntimeLauncherConfig:
def test_missed_model_in_create_onnx_launcher_raises_config_error_exception(self):
config = {'framework': 'onnx_runtime'}
with pytest.raises(ConfigError):
create_launcher(config)
def test_unsupported_device_in_create_onnx_launcher_raises_config_error_exception(self):
config = {'framework': 'onnx_runtime', 'model': 'model.onnx', 'device': 'UNSUPPORTED'}
with pytest.raises(ConfigError):
create_launcher(config)
|
test/sanity/document-start-end/test.py | frank-dspeed/nw.js | 27,296 | 12677843 | import time
import os
import sys
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from nw_util import *
chrome_options = Options()
chrome_options.add_argument("nwapp=" + os.path.dirname(os.path.abspath(__file__)))
driver = webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER'], chrome_options=chrome_options)
try:
wait_switch_window_name(driver, 'index')
print driver.current_url
result = wait_for_element_id(driver, 'result')
result2 = wait_for_element_id(driver, 'result2')
print result
print result2
assert(result == 'success from popup' and result2 == 'startiframe')
finally:
#time.sleep(50)
driver.quit()
|
f5/multi_device/test/unit/test_trust_domain.py | nghia-tran/f5-common-python | 272 | 12677856 | # Copyright 2015-2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5.multi_device.cluster import TrustDomain
from f5.multi_device.exceptions import DeviceAlreadyInTrustDomain
from f5.multi_device.exceptions import DeviceNotTrusted
import mock
import pytest
class MockDeviceInfo(object):
def __init__(self, name):
self.name = name
self.selfDevice = 'true'
self.managementIp = '1.1.1.1'
@pytest.fixture
def BigIPs():
mock_bigips = []
for bigip in range(4):
mock_bigip = mock.MagicMock()
mock_bigip.__name = 'me'
mock_bigip.tm.cm.devices.get_collection.return_value = \
[MockDeviceInfo('test')]
mock_bigip.tm.cm.devices.get_collection.__name__ = 'test'
mock_bigips.append(mock_bigip)
return mock_bigips
@pytest.fixture
def TrustDomainCreateNew(BigIPs):
mock_bigips = BigIPs
td = TrustDomain()
return td, mock_bigips
def test_validate_device_not_trusted(TrustDomainCreateNew):
td, mock_bigips = TrustDomainCreateNew
with pytest.raises(DeviceNotTrusted) as ex:
td.devices = mock_bigips
td.validate()
assert "'test' is not trusted by 'test', which trusts: []" in str(ex.value)
@mock.patch('f5.multi_device.trust_domain.TrustDomain._set_attributes')
@mock.patch('f5.multi_device.trust_domain.TrustDomain.validate')
def test___init__(mock_set_attr, mock_validate, BigIPs):
mock_bigips = BigIPs
td = TrustDomain(devices=mock_bigips)
assert td._set_attributes.call_args == mock.call(devices=mock_bigips)
def test__set_attributes(BigIPs):
mock_bigips = BigIPs
td = TrustDomain()
td._set_attributes(devices=mock_bigips, partition='test')
assert td.devices == mock_bigips
assert td.partition == 'test'
assert td.device_group_name == 'device_trust_group'
assert td.device_group_type == 'sync-only'
@mock.patch('f5.multi_device.trust_domain.TrustDomain._add_trustee')
@mock.patch('f5.multi_device.trust_domain.pollster')
def test_create(mock_add_trustee, mock_pollster, TrustDomainCreateNew):
td, mock_bigips = TrustDomainCreateNew
td.create(devices=mock_bigips, partition='test')
assert td.devices == mock_bigips
assert td.partition == 'test'
assert td._add_trustee.call_args_list == \
[
mock.call(mock_bigips[1]),
mock.call(mock_bigips[2]),
mock.call(mock_bigips[3])
]
@mock.patch('f5.multi_device.trust_domain.TrustDomain._add_trustee')
@mock.patch('f5.multi_device.trust_domain.pollster')
@mock.patch('f5.multi_device.trust_domain.TrustDomain._remove_trustee')
def test_teardown(
mock_add_trustee, mock_pollster, mock_rem_trustee, TrustDomainCreateNew
):
td, mock_bigips = TrustDomainCreateNew
td.create(devices=mock_bigips, partition='test')
td.teardown()
assert td.domain == {}
assert td._remove_trustee.call_args_list == \
[
mock.call(mock_bigips[0]),
mock.call(mock_bigips[1]),
mock.call(mock_bigips[2]),
mock.call(mock_bigips[3])
]
@mock.patch('f5.multi_device.trust_domain.get_device_info')
@mock.patch('f5.multi_device.trust_domain.TrustDomain._modify_trust')
def test__add_trustee(mock_dev_info, mock_mod_trust, TrustDomainCreateNew):
td, mock_bigips = TrustDomainCreateNew
td._set_attributes(devices=mock_bigips, partition='test')
td._add_trustee(mock_bigips[1])
assert td._modify_trust.call_args == \
mock.call(mock_bigips[0], td._get_add_trustee_cmd, mock_bigips[1])
@mock.patch('f5.multi_device.trust_domain.TrustDomain._modify_trust')
def test__add_trustee_already_in_domain(
mock_mod_trust, TrustDomainCreateNew
):
td, mock_bigips = TrustDomainCreateNew
td._set_attributes(devices=mock_bigips, partition='test')
td.domain = {'test': 'device'}
with pytest.raises(DeviceAlreadyInTrustDomain) as ex:
td._add_trustee(mock_bigips[1])
assert "Device: 'test' is already in this trust domain" in str(ex.value)
|
reader/slqa/predictor.py | wsdm/RCZoo | 166 | 12677872 | #!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""DrQA Document Reader predictor"""
import logging
from multiprocessing import Pool as ProcessPool
from multiprocessing.util import Finalize
from .vector import vectorize, batchify
from .model import DocReader
from . import DEFAULTS, utils
from .. import tokenizers
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Tokenize + annotate
# ------------------------------------------------------------------------------
PROCESS_TOK = None
def init(tokenizer_class, annotators):
global PROCESS_TOK
PROCESS_TOK = tokenizer_class(annotators=annotators)
Finalize(PROCESS_TOK, PROCESS_TOK.shutdown, exitpriority=100)
def tokenize(text):
global PROCESS_TOK
return PROCESS_TOK.tokenize(text)
# ------------------------------------------------------------------------------
# Predictor class.
# ------------------------------------------------------------------------------
class Predictor(object):
"""Load a pretrained DocReader model and predict inputs on the fly."""
def __init__(self, model=None, tokenizer=None, normalize=True,
embedding_file=None, num_workers=None):
"""
Args:
model: path to saved model file.
tokenizer: option string to select tokenizer class.
normalize: squash output score to 0-1 probabilities with a softmax.
embedding_file: if provided, will expand dictionary to use all
available pretrained vectors in this file.
num_workers: number of CPU processes to use to preprocess batches.
"""
logger.info('Initializing model...')
self.model = DocReader.load(model or DEFAULTS['model'],
normalize=normalize)
if embedding_file:
logger.info('Expanding dictionary...')
words = utils.index_embedding_words(embedding_file)
added = self.model.expand_dictionary(words)
self.model.load_embeddings(added, embedding_file)
logger.info('Initializing tokenizer...')
annotators = tokenizers.get_annotators_for_model(self.model)
if not tokenizer:
tokenizer_class = DEFAULTS['tokenizer']
else:
tokenizer_class = tokenizers.get_class(tokenizer)
if num_workers is None or num_workers > 0:
self.workers = ProcessPool(
num_workers,
initializer=init,
initargs=(tokenizer_class, annotators),
)
else:
self.workers = None
self.tokenizer = tokenizer_class(annotators=annotators)
def predict(self, document, question, candidates=None, top_n=1):
"""Predict a single document - question pair."""
results = self.predict_batch([(document, question, candidates,)], top_n)
return results[0]
def predict_batch(self, batch, top_n=1):
"""Predict a batch of document - question pairs."""
documents, questions, candidates = [], [], []
for b in batch:
documents.append(b[0])
questions.append(b[1])
candidates.append(b[2] if len(b) == 3 else None)
candidates = candidates if any(candidates) else None
# Tokenize the inputs, perhaps multi-processed.
if self.workers:
q_tokens = self.workers.map_async(tokenize, questions)
d_tokens = self.workers.map_async(tokenize, documents)
q_tokens = list(q_tokens.get())
d_tokens = list(d_tokens.get())
else:
q_tokens = list(map(self.tokenizer.tokenize, questions))
d_tokens = list(map(self.tokenizer.tokenize, documents))
examples = []
for i in range(len(questions)):
examples.append({
'id': i,
'question': q_tokens[i].words(),
'qlemma': q_tokens[i].lemmas(),
'document': d_tokens[i].words(),
'lemma': d_tokens[i].lemmas(),
'pos': d_tokens[i].pos(),
'ner': d_tokens[i].entities(),
'answers': [(0,0)],
}) # use a fake answer for keeping vectorize function invariant
# Stick document tokens in candidates for decoding
if candidates:
candidates = [{'input': d_tokens[i], 'cands': candidates[i]}
for i in range(len(candidates))]
# Build the batch and run it through the model
batch_exs = batchify([vectorize(e, self.model) for e in examples])
(s, e, score), _ = self.model.predict(batch_exs, candidates, top_n)
# Retrieve the predicted spans
results = []
for i in range(len(s)):
predictions = []
for j in range(len(s[i])):
span = d_tokens[i].slice(s[i][j], e[i][j] + 1).untokenize()
predictions.append((span, score[i][j]))
results.append(predictions)
return results
def cuda(self):
self.model.cuda()
def cpu(self):
self.model.cpu()
|
tools/azure-sdk-tools/packaging_tools/conf.py | rsdoherty/azure-sdk-for-python | 2,728 | 12677877 | import logging
from pathlib import Path
from typing import Dict, Any
import pytoml as toml
_LOGGER = logging.getLogger(__name__)
CONF_NAME = "sdk_packaging.toml"
_SECTION = "packaging"
# Default conf
_CONFIG = {
"package_name": "packagename",
"package_nspkg": "packagenspkg",
"package_pprint_name": "MyService Management",
"package_doc_id": "",
"is_stable": False,
"is_arm": True,
"need_msrestazure": False, # track2 does not need it anymore in setup.py
"need_azuremgmtcore": True,
}
def read_conf(folder: Path) -> Dict[str, Any]:
conf_path = folder / CONF_NAME
if not conf_path.exists():
return {}
with open(conf_path, "rb") as fd:
return toml.load(fd)[_SECTION]
def build_default_conf(folder: Path, package_name: str) -> None:
conf_path = folder / CONF_NAME
if conf_path.exists():
_LOGGER.info("Skipping default conf since the file exists")
return
_LOGGER.info("Build default conf for %s", package_name)
conf = {_SECTION: _CONFIG.copy()}
conf[_SECTION]["package_name"] = package_name
conf[_SECTION]["package_nspkg"] = package_name[: package_name.rindex("-")] + "-nspkg"
with open(conf_path, "w") as fd:
toml.dump(conf, fd)
|
tests/pypokerengine/engine/round_manager_test.py | stupps/PyPokerEngine | 479 | 12677884 | from tests.base_unittest import BaseUnitTest
from mock import patch
from pypokerengine.engine.round_manager import RoundManager
from pypokerengine.engine.game_evaluator import GameEvaluator
from pypokerengine.engine.poker_constants import PokerConstants as Const
from pypokerengine.engine.player import Player
from pypokerengine.engine.pay_info import PayInfo
from pypokerengine.engine.card import Card
from pypokerengine.engine.deck import Deck
from pypokerengine.engine.table import Table
class RoundManagerTest(BaseUnitTest):
def setUp(self):
pass
def test_collect_blind(self):
state, _ = self.__start_round()
players = state["table"].seats.players
sb_amount = 5
self.eq(100-sb_amount, players[0].stack)
self.eq(100-sb_amount*2, players[1].stack)
self.eq("SMALLBLIND", players[0].action_histories[-1]["action"])
self.eq("BIGBLIND", players[1].action_histories[-1]["action"])
self.eq(sb_amount, players[0].pay_info.amount)
self.eq(sb_amount*2, players[1].pay_info.amount)
def test_collect_ante(self):
ante = 10
sb_amount = 5
table = self.__setup_table()
state, _ = RoundManager.start_new_round(1, sb_amount, ante, table)
players = state["table"].seats.players
self.eq(100-sb_amount-ante, players[0].stack)
self.eq(100-sb_amount*2-ante, players[1].stack)
self.eq(100-ante, players[2].stack)
self.eq("ANTE", players[0].action_histories[0]["action"])
self.eq("ANTE", players[1].action_histories[0]["action"])
self.eq("ANTE", players[2].action_histories[0]["action"])
self.eq(sb_amount+ante, players[0].pay_info.amount)
self.eq(sb_amount*2+ante, players[1].pay_info.amount)
self.eq(ante, players[2].pay_info.amount)
self.eq(sb_amount+sb_amount*2+ante*3, GameEvaluator.create_pot(players)[0]["amount"])
def test_collect_ante_skip_loser(self):
ante = 10
sb_amount = 5
table = self.__setup_table()
table.seats.players[2].stack = 0
table.seats.players[2].pay_info.status = PayInfo.FOLDED
state, _ = RoundManager.start_new_round(1, sb_amount, ante, table)
players = state["table"].seats.players
self.eq(sb_amount+sb_amount*2+ante*2, GameEvaluator.create_pot(players)[0]["amount"])
def test_deal_holecard(self):
state, _ = self.__start_round()
players = state["table"].seats.players
self.eq([Card.from_id(1), Card.from_id(2)], players[0].hole_card)
self.eq([Card.from_id(3), Card.from_id(4)], players[1].hole_card)
def test_message_after_start_round(self):
with patch('pypokerengine.engine.message_builder.MessageBuilder.build_round_start_message', return_value="hoge"),\
patch('pypokerengine.engine.message_builder.MessageBuilder.build_street_start_message', return_value="fuga"),\
patch('pypokerengine.engine.message_builder.MessageBuilder.build_ask_message', return_value="bar"):
_, msgs = self.__start_round()
self.eq(("uuid0", "hoge"), msgs[0])
self.eq(("uuid1", "hoge"), msgs[1])
self.eq(("uuid2", "hoge"), msgs[2])
self.eq((-1, "fuga"), msgs[3])
self.eq(("uuid2", "bar"), msgs[4])
def test_state_after_start_round(self):
state, msgs = self.__start_round()
self.eq(2, state["next_player"])
self.eq("SMALLBLIND", state["table"].seats.players[0].action_histories[0]["action"])
self.eq("BIGBLIND", state["table"].seats.players[1].action_histories[0]["action"])
def test_message_after_apply_action(self):
with patch('pypokerengine.engine.message_builder.MessageBuilder.build_round_start_message', return_value="hoge"),\
patch('pypokerengine.engine.message_builder.MessageBuilder.build_street_start_message', return_value="fuga"),\
patch('pypokerengine.engine.message_builder.MessageBuilder.build_ask_message', return_value="bar"),\
patch('pypokerengine.engine.message_builder.MessageBuilder.build_game_update_message', return_value="boo"):
state, _ = self.__start_round()
_, msgs = RoundManager.apply_action(state, "call", 10)
self.eq((-1, "boo"), msgs[0])
self.eq(("uuid0", "bar"), msgs[1])
def test_state_after_apply_call(self):
state, _ = self.__start_round()
state, _ = RoundManager.apply_action(state, "call", 10)
self.eq(0, state["next_player"])
self.eq("CALL", state["table"].seats.players[2].action_histories[0]["action"])
def test_state_after_apply_raise(self):
state, _ = self.__start_round()
state, _ = RoundManager.apply_action(state, "raise", 15)
self.eq(0, state["next_player"])
self.eq("RAISE", state["table"].seats.players[2].action_histories[0]["action"])
def test_message_after_forward_to_flop(self):
with patch('pypokerengine.engine.message_builder.MessageBuilder.build_street_start_message', return_value="fuga"),\
patch('pypokerengine.engine.message_builder.MessageBuilder.build_ask_message', return_value="bar"),\
patch('pypokerengine.engine.message_builder.MessageBuilder.build_game_update_message', return_value="boo"):
state, _ = self.__start_round()
state, _ = RoundManager.apply_action(state, "fold", 0)
state, _ = RoundManager.apply_action(state, "call", 10)
_, msgs = RoundManager.apply_action(state, "call", 10)
self.eq((-1, "boo"), msgs[0])
self.eq((-1, "fuga"), msgs[1])
self.eq(("uuid0", "bar"), msgs[2])
def test_state_after_forward_to_flop(self):
state, _ = self.__start_round()
state, _ = RoundManager.apply_action(state, "fold", 0)
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "call", 10)
self.eq(Const.Street.FLOP, state["street"])
self.eq(0, state["next_player"])
self.eq([Card.from_id(cid) for cid in range(7,10)], state["table"].get_community_card())
fetch_player = lambda uuid: [p for p in state["table"].seats.players if p.uuid==uuid][0]
self.true(all(map(lambda p: len(p.action_histories)==0, state["table"].seats.players)))
self.eq(2, len(fetch_player("uuid0").round_action_histories[Const.Street.PREFLOP]))
self.eq(2, len(fetch_player("uuid1").round_action_histories[Const.Street.PREFLOP]))
self.eq(1, len(fetch_player("uuid2").round_action_histories[Const.Street.PREFLOP]))
self.assertIsNone(fetch_player("uuid0").round_action_histories[Const.Street.TURN])
def test_state_after_forward_to_turn(self):
state, _ = self.__start_round()
state, _ = RoundManager.apply_action(state, "fold", 0)
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "call", 0)
state, msgs = RoundManager.apply_action(state, "call", 0)
self.eq(Const.Street.TURN, state["street"])
self.eq([Card.from_id(cid) for cid in range(7,11)], state["table"].get_community_card())
self.eq(3, len(msgs))
fetch_player = lambda uuid: [p for p in state["table"].seats.players if p.uuid==uuid][0]
self.true(all(map(lambda p: len(p.action_histories)==0, state["table"].seats.players)))
self.eq(2, len(fetch_player("uuid0").round_action_histories[Const.Street.PREFLOP]))
self.eq(2, len(fetch_player("uuid1").round_action_histories[Const.Street.PREFLOP]))
self.eq(1, len(fetch_player("uuid2").round_action_histories[Const.Street.PREFLOP]))
self.eq(1, len(fetch_player("uuid0").round_action_histories[Const.Street.FLOP]))
self.eq(1, len(fetch_player("uuid1").round_action_histories[Const.Street.FLOP]))
self.eq(0, len(fetch_player("uuid2").round_action_histories[Const.Street.FLOP]))
self.assertIsNone(fetch_player("uuid0").round_action_histories[Const.Street.TURN])
def test_state_after_forward_to_river(self):
state, _ = self.__start_round()
state, _ = RoundManager.apply_action(state, "fold", 0)
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "call", 0)
state, msgs = RoundManager.apply_action(state, "call", 0)
self.eq(Const.Street.RIVER, state["street"])
self.eq([Card.from_id(cid) for cid in range(7,12)], state["table"].get_community_card())
self.eq(3, len(msgs))
fetch_player = lambda uuid: [p for p in state["table"].seats.players if p.uuid==uuid][0]
self.true(all(map(lambda p: len(p.action_histories)==0, state["table"].seats.players)))
self.eq(2, len(fetch_player("uuid0").round_action_histories[Const.Street.PREFLOP]))
self.eq(2, len(fetch_player("uuid1").round_action_histories[Const.Street.PREFLOP]))
self.eq(1, len(fetch_player("uuid2").round_action_histories[Const.Street.PREFLOP]))
self.eq(1, len(fetch_player("uuid0").round_action_histories[Const.Street.FLOP]))
self.eq(1, len(fetch_player("uuid1").round_action_histories[Const.Street.FLOP]))
self.eq(0, len(fetch_player("uuid2").round_action_histories[Const.Street.FLOP]))
self.eq(1, len(fetch_player("uuid0").round_action_histories[Const.Street.TURN]))
self.eq(1, len(fetch_player("uuid1").round_action_histories[Const.Street.TURN]))
self.eq(0, len(fetch_player("uuid2").round_action_histories[Const.Street.TURN]))
self.assertIsNone(fetch_player("uuid0").round_action_histories[Const.Street.RIVER])
def test_state_after_showdown(self):
mock_return = [1,0]*3
with patch('pypokerengine.engine.hand_evaluator.HandEvaluator.eval_hand', side_effect=mock_return),\
patch('pypokerengine.engine.message_builder.MessageBuilder.build_round_result_message', return_value="bogo"):
state, _ = self.__start_round()
state, _ = RoundManager.apply_action(state, "fold", 0)
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "call", 0)
self.eq(Const.Street.FINISHED, state["street"])
self.eq(110, state["table"].seats.players[0].stack)
self.eq( 90, state["table"].seats.players[1].stack)
self.eq(100, state["table"].seats.players[2].stack)
self.true(all(map(lambda p: len(p.action_histories)==0, state["table"].seats.players)))
self.true(all(map(lambda p: p.round_action_histories==[None]*4, state["table"].seats.players)))
def test_message_after_showdown(self):
mock_return = [1,0]*3
with patch('pypokerengine.engine.hand_evaluator.HandEvaluator.eval_hand', side_effect=mock_return),\
patch('pypokerengine.engine.message_builder.MessageBuilder.build_game_update_message', return_value="boo"),\
patch('pypokerengine.engine.message_builder.MessageBuilder.build_round_result_message', return_value="foo"):
state, _ = self.__start_round()
state, _ = RoundManager.apply_action(state, "fold", 0)
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "call", 0)
_, msgs = RoundManager.apply_action(state, "call", 0)
self.eq((-1, "boo"), msgs[0])
self.eq((-1, "foo"), msgs[1])
def test_table_reset_after_showdown(self):
mock_return = [1,0]*3
with patch('pypokerengine.engine.hand_evaluator.HandEvaluator.eval_hand', side_effect=mock_return),\
patch('pypokerengine.engine.message_builder.MessageBuilder.build_game_update_message', return_value="boo"),\
patch('pypokerengine.engine.message_builder.MessageBuilder.build_round_result_message', return_value="foo"):
state, _ = self.__start_round()
state, _ = RoundManager.apply_action(state, "fold", 0)
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "call", 0)
table = state["table"]
player = state["table"].seats.players[0]
self.eq(52, table.deck.size())
self.eq([], table.get_community_card())
self.eq([], player.hole_card)
self.eq([], player.action_histories)
self.eq(PayInfo.PAY_TILL_END, player.pay_info.status)
def test_message_skip_when_only_one_player_is_active(self):
state, _ = self.__start_round()
state, _ = RoundManager.apply_action(state, "fold", 0)
state, msgs = RoundManager.apply_action(state, "fold", 0)
self.eq(Const.Street.FINISHED, state["street"])
self.false("street_start_message" in [msg["message"]["message_type"] for _, msg in msgs])
def test_ask_player_target_when_dealer_btn_player_is_folded(self):
state, _ = self.__start_round()
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "fold", 10)
state, _ = RoundManager.apply_action(state, "call", 0)
state, msgs = RoundManager.apply_action(state, "call", 0)
self.eq("uuid1", msgs[-1][0])
def test_skip_asking_to_allin_player(self):
state, _ = self.__start_round()
# Round 1
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "fold", 0)
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "raise", 50)
state, _ = RoundManager.apply_action(state, "call", 50)
state, _ = RoundManager.apply_action(state, "fold", 0)
self.eq([95, 40, 165], [p.stack for p in state["table"].seats.players])
# Round 2
state["table"].shift_dealer_btn()
state["table"].set_blind_pos(1, 2)
state, _ = RoundManager.start_new_round(2, 5, 0, state["table"])
state, _ = RoundManager.apply_action(state, "raise", 40)
state, _ = RoundManager.apply_action(state, "call", 40)
state, _ = RoundManager.apply_action(state, "raise", 70)
state, msgs = RoundManager.apply_action(state, "call", 70)
self.eq([25, 0, 95], [p.stack for p in state["table"].seats.players])
self.eq(1, state["street"])
self.eq("uuid2", msgs[-1][0])
def test_when_only_one_player_is_waiting_ask(self):
state, _ = self.__start_round()
# Round 1
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "fold", 0)
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "raise", 50)
state, _ = RoundManager.apply_action(state, "call", 50)
state, _ = RoundManager.apply_action(state, "fold", 0)
self.eq([95, 40, 165], [p.stack for p in state["table"].seats.players])
# Round 2
state["table"].shift_dealer_btn()
state, _ = RoundManager.start_new_round(2, 5, 0, state["table"])
state, _ = RoundManager.apply_action(state, "raise", 40)
state, _ = RoundManager.apply_action(state, "call", 40)
state, _ = RoundManager.apply_action(state, "raise", 70)
state, _ = RoundManager.apply_action(state, "call", 70)
state, _ = RoundManager.apply_action(state, "call", 0)
state, _ = RoundManager.apply_action(state, "raise", 10)
state, _ = RoundManager.apply_action(state, "call", 10)
state, _ = RoundManager.apply_action(state, "raise", 85)
state, _ = RoundManager.apply_action(state, "call", 85)
def test_ask_big_blind_in_preflop(self):
state, _ = self.__start_round()
state, _ = RoundManager.apply_action(state, "call", 10)
state, msg = RoundManager.apply_action(state, "call", 10)
self.eq("uuid1", msg[-1][0])
self.eq(Const.Street.PREFLOP, state["street"])
def test_everyone_agree_logic_regression(self):
players = [Player("uuid%d" % i, 100) for i in range(4)]
players[0].stack = 150
players[1].stack = 150
players[2].stack = 50
players[3].stack = 50
deck = Deck(cheat=True, cheat_card_ids=range(1,53))
table = Table(cheat_deck=deck)
for player in players: table.seats.sitdown(player)
table.dealer_btn = 3
table.set_blind_pos(0, 1)
state, _ = RoundManager.start_new_round(1, 5, 0, table)
state, _ = RoundManager.apply_action(state, "raise", 15)
state, _ = RoundManager.apply_action(state, "raise", 20)
state, _ = RoundManager.apply_action(state, "raise", 25)
state, _ = RoundManager.apply_action(state, "raise", 30)
state, _ = RoundManager.apply_action(state, "raise", 50)
state, _ = RoundManager.apply_action(state, "call", 50)
state, _ = RoundManager.apply_action(state, "raise", 125)
state, _ = RoundManager.apply_action(state, "call", 125)
state, _ = RoundManager.apply_action(state, "fold", 0)
state, _ = RoundManager.apply_action(state, "fold", 0)
self.eq(Const.Street.FINISHED, state["street"])
def test_add_amount_calculationl_when_raise_on_ante(self):
table = self.__setup_table()
pot_amount = lambda state: GameEvaluator.create_pot(state["table"].seats.players)[0]["amount"]
stack_check = lambda expected, state: self.eq(expected, [p.stack for p in state["table"].seats.players])
start_state, _ = RoundManager.start_new_round(1, 10, 5, table)
self.eq(45, pot_amount(start_state))
stack_check([85, 75, 95], start_state)
folded_state, _ = RoundManager.apply_action(start_state, "fold", 0)
called_state, _ = RoundManager.apply_action(folded_state, "call", 20)
self.eq(55, pot_amount(called_state))
stack_check([85, 75, 95], start_state)
called_state, _ = RoundManager.apply_action(start_state, "call", 20)
self.eq(20, called_state["table"].seats.players[2].action_histories[-1]["paid"])
self.eq(65, pot_amount(called_state))
raised_state, _ = RoundManager.apply_action(start_state, "raise", 30)
self.eq(30, raised_state["table"].seats.players[2].action_histories[-1]["paid"])
self.eq(75, pot_amount(raised_state))
def test_deepcopy_state(self):
table = self.__setup_table()
original = RoundManager._RoundManager__gen_initial_state(2, 5, table)
copied = RoundManager._RoundManager__deep_copy_state(original)
check = lambda key: self.eq(original[key], copied[key])
[check(key) for key in ["round_count", "small_blind_amount", "street", "next_player"]]
def __start_round(self):
table = self.__setup_table()
round_count = 1
small_blind_amount = 5
ante = 0
return RoundManager.start_new_round(round_count, small_blind_amount, ante, table)
def __setup_table(self):
players = [Player("uuid%d" % i, 100) for i in range(3)]
deck = Deck(cheat=True, cheat_card_ids=range(1,53))
table = Table(cheat_deck=deck)
for player in players: table.seats.sitdown(player)
table.dealer_btn = 2
table.set_blind_pos(0, 1)
return table
|
tests/observation/test_processing_of_namespaces.py | tavaresrodrigo/kopf | 855 | 12677888 | <reponame>tavaresrodrigo/kopf<gh_stars>100-1000
import asyncio
import async_timeout
import pytest
from kopf._cogs.structs.bodies import RawBody, RawEvent
from kopf._cogs.structs.references import Insights
from kopf._core.reactor.observation import process_discovered_namespace_event
async def test_initial_listing_is_ignored():
insights = Insights()
e1 = RawEvent(type=None, object=RawBody(metadata={'name': 'ns1'}))
async def delayed_injection(delay: float):
await asyncio.sleep(delay)
await process_discovered_namespace_event(
insights=insights, raw_event=e1, namespaces=['ns*'])
task = asyncio.create_task(delayed_injection(0))
with pytest.raises(asyncio.TimeoutError):
async with async_timeout.timeout(0.1) as timeout:
async with insights.revised:
await insights.revised.wait()
await task
assert timeout.expired
assert not insights.namespaces
@pytest.mark.parametrize('etype', ['ADDED', 'MODIFIED'])
async def test_followups_for_addition(timer, etype):
insights = Insights()
e1 = RawEvent(type=etype, object=RawBody(metadata={'name': 'ns1'}))
async def delayed_injection(delay: float):
await asyncio.sleep(delay)
await process_discovered_namespace_event(
insights=insights, raw_event=e1, namespaces=['ns*'])
task = asyncio.create_task(delayed_injection(0.1))
async with timer, async_timeout.timeout(1):
async with insights.revised:
await insights.revised.wait()
await task
assert 0.1 < timer.seconds < 0.11
assert insights.namespaces == {'ns1'}
@pytest.mark.parametrize('etype', ['DELETED'])
async def test_followups_for_deletion(timer, etype):
insights = Insights()
insights.namespaces.add('ns1')
e1 = RawEvent(type=etype, object=RawBody(metadata={'name': 'ns1'}))
async def delayed_injection(delay: float):
await asyncio.sleep(delay)
await process_discovered_namespace_event(
insights=insights, raw_event=e1, namespaces=['ns*'])
task = asyncio.create_task(delayed_injection(0.1))
async with timer, async_timeout.timeout(1):
async with insights.revised:
await insights.revised.wait()
await task
assert 0.1 < timer.seconds < 0.11
assert not insights.namespaces
|
alibi_detect/cd/tensorflow/mmd.py | sugatoray/alibi-detect | 1,227 | 12677890 | <filename>alibi_detect/cd/tensorflow/mmd.py
import logging
import numpy as np
import tensorflow as tf
from typing import Callable, Dict, Optional, Tuple, Union
from alibi_detect.cd.base import BaseMMDDrift
from alibi_detect.utils.tensorflow.distance import mmd2_from_kernel_matrix
from alibi_detect.utils.tensorflow.kernels import GaussianRBF
logger = logging.getLogger(__name__)
class MMDDriftTF(BaseMMDDrift):
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
preprocess_x_ref: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
kernel: Callable = GaussianRBF,
sigma: Optional[np.ndarray] = None,
configure_kernel_from_x_ref: bool = True,
n_permutations: int = 100,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the permutation test.
preprocess_x_ref
Whether to already preprocess and store the reference data.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Kernel used for the MMD computation, defaults to Gaussian RBF kernel.
sigma
Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths.
configure_kernel_from_x_ref
Whether to already configure the kernel bandwidth from the reference data.
n_permutations
Number of permutations used in the permutation test.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
preprocess_x_ref=preprocess_x_ref,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
sigma=sigma,
configure_kernel_from_x_ref=configure_kernel_from_x_ref,
n_permutations=n_permutations,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': 'tensorflow'})
# initialize kernel
if isinstance(sigma, np.ndarray):
sigma = tf.convert_to_tensor(sigma)
self.kernel = kernel(sigma) if kernel == GaussianRBF else kernel
# compute kernel matrix for the reference data
if self.infer_sigma or isinstance(sigma, tf.Tensor):
self.k_xx = self.kernel(self.x_ref, self.x_ref, infer_sigma=self.infer_sigma)
self.infer_sigma = False
else:
self.k_xx, self.infer_sigma = None, True
def kernel_matrix(self, x: Union[np.ndarray, tf.Tensor], y: Union[np.ndarray, tf.Tensor]) -> tf.Tensor:
""" Compute and return full kernel matrix between arrays x and y. """
k_xy = self.kernel(x, y, self.infer_sigma)
k_xx = self.k_xx if self.k_xx is not None and self.update_x_ref is None else self.kernel(x, x)
k_yy = self.kernel(y, y)
kernel_mat = tf.concat([tf.concat([k_xx, k_xy], 1), tf.concat([tf.transpose(k_xy, (1, 0)), k_yy], 1)], 0)
return kernel_mat
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, np.ndarray]:
"""
Compute the p-value resulting from a permutation test using the maximum mean discrepancy
as a distance measure between the reference data and the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the MMD^2 between the reference and test set
and the MMD^2 values from the permutation test.
"""
x_ref, x = self.preprocess(x)
# compute kernel matrix, MMD^2 and apply permutation test using the kernel matrix
n = x.shape[0]
kernel_mat = self.kernel_matrix(x_ref, x)
kernel_mat = kernel_mat - tf.linalg.diag(tf.linalg.diag_part(kernel_mat)) # zero diagonal
mmd2 = mmd2_from_kernel_matrix(kernel_mat, n, permute=False, zero_diag=False).numpy()
mmd2_permuted = np.array(
[mmd2_from_kernel_matrix(kernel_mat, n, permute=True, zero_diag=False).numpy()
for _ in range(self.n_permutations)]
)
p_val = (mmd2 <= mmd2_permuted).mean()
return p_val, mmd2, mmd2_permuted
|
Lib/test/test_email/test_pickleable.py | shawwn/cpython | 52,316 | 12677898 | <gh_stars>1000+
import unittest
import textwrap
import copy
import pickle
import email
import email.message
from email import policy
from email.headerregistry import HeaderRegistry
from test.test_email import TestEmailBase, parameterize
@parameterize
class TestPickleCopyHeader(TestEmailBase):
header_factory = HeaderRegistry()
unstructured = header_factory('subject', 'this is a test')
header_params = {
'subject': ('subject', 'this is a test'),
'from': ('from', '<EMAIL>'),
'to': ('to', 'a: <EMAIL>, <EMAIL>;, <EMAIL>'),
'date': ('date', 'Tue, 29 May 2012 09:24:26 +1000'),
}
def header_as_deepcopy(self, name, value):
header = self.header_factory(name, value)
h = copy.deepcopy(header)
self.assertEqual(str(h), str(header))
def header_as_pickle(self, name, value):
header = self.header_factory(name, value)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(header, proto)
h = pickle.loads(p)
self.assertEqual(str(h), str(header))
@parameterize
class TestPickleCopyMessage(TestEmailBase):
# Message objects are a sequence, so we have to make them a one-tuple in
# msg_params so they get passed to the parameterized test method as a
# single argument instead of as a list of headers.
msg_params = {}
# Note: there will be no custom header objects in the parsed message.
msg_params['parsed'] = (email.message_from_string(textwrap.dedent("""\
Date: Tue, 29 May 2012 09:24:26 +1000
From: <EMAIL>
To: <EMAIL>
Subject: help
I think I forgot the ring.
"""), policy=policy.default),)
msg_params['created'] = (email.message.Message(policy=policy.default),)
msg_params['created'][0]['Date'] = 'Tue, 29 May 2012 09:24:26 +1000'
msg_params['created'][0]['From'] = '<EMAIL>'
msg_params['created'][0]['To'] = '<EMAIL>'
msg_params['created'][0]['Subject'] = 'help'
msg_params['created'][0].set_payload('I think I forgot the ring.')
def msg_as_deepcopy(self, msg):
msg2 = copy.deepcopy(msg)
self.assertEqual(msg2.as_string(), msg.as_string())
def msg_as_pickle(self, msg):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(msg, proto)
msg2 = pickle.loads(p)
self.assertEqual(msg2.as_string(), msg.as_string())
if __name__ == '__main__':
unittest.main()
|
tests/test_serial/test_arduinocore.py | AFTC-1/Arduino-rpi | 178 | 12677946 | import common
from nanpy.arduinotree import ArduinoTree
from nose.tools import eq_
from nose.tools import ok_
def setup():
common.setup()
def test():
a = ArduinoTree()
eq_(a.core.digitalPinToBitMask(2), 4)
eq_(a.core.digitalPinToPort(2), 4)
eq_(a.core.digitalPinToTimer(2), 0)
eq_(a.core.analogInPinToBit(2), 2)
def test_ports():
a = ArduinoTree()
eq_(a.core.portInputRegister(0), 0) # NOT_A_PORT
eq_(a.core.portInputRegister(1), 0) # NOT_A_PORT
eq_(a.core.portInputRegister(2), 35) # PINB
eq_(a.core.portInputRegister(3), 38) # PINC
eq_(a.core.portInputRegister(4), 41) # PIND
eq_(a.core.portModeRegister(0), 0) # NOT_A_PORT
eq_(a.core.portModeRegister(1), 0) # NOT_A_PORT
eq_(a.core.portModeRegister(2), 36) # DDRB
eq_(a.core.portModeRegister(3), 39) # DDRC
eq_(a.core.portModeRegister(4), 42) # DDRD
eq_(a.core.portOutputRegister(0), 0) # NOT_A_PORT
eq_(a.core.portOutputRegister(1), 0) # NOT_A_PORT
eq_(a.core.portOutputRegister(2), 37) # PORTB
eq_(a.core.portOutputRegister(3), 40) # PORTC
eq_(a.core.portOutputRegister(4), 43) # PORTD
|
examples/tabular/higgs/interpret.py | Locust2520/path_explain | 145 | 12677971 | import tensorflow as tf
import numpy as np
from path_explain.utils import set_up_environment
from path_explain.path_explainer_tf import PathExplainerTF
from preprocess import higgs_dataset
from train import build_model
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_examples', 10000, 'Number of inputs to run attributions on')
flags.DEFINE_integer('num_samples', 300, 'Number of samples to use when computing attributions')
def interpret(argv=None):
set_up_environment(visible_devices=FLAGS.visible_devices)
train_set, test_set, vald_set = higgs_dataset(batch_size=FLAGS.batch_size,
num_parallel_calls=8,
buffer_size=10000,
seed=0,
scale=True,
include_vald=True)
print('Loading model...')
model = build_model(weight_decay=FLAGS.weight_decay,
num_layers=FLAGS.num_layers,
hidden_units=FLAGS.hidden_units,
for_interpretation=True)
model.load_weights('model.h5', by_name=True)
print('Gathering inputs...')
training_iters = int(10000 / FLAGS.batch_size)
training_samples = []
for i, (x_batch, _) in enumerate(train_set):
training_samples.append(x_batch)
if i >= training_iters:
break
training_samples = tf.concat(training_samples, axis=0)
input_samples = []
true_labels = []
pred_output = []
num_accumulated = 0
for x_batch, label_batch in test_set:
pred_labels = model(x_batch)
correct_mask = (pred_labels[:, 0].numpy() > 0.5).astype(int) == label_batch
input_samples.append(x_batch.numpy()[correct_mask])
pred_output.append(pred_labels.numpy()[correct_mask, 0])
true_labels.append(label_batch.numpy()[correct_mask])
num_accumulated += np.sum(correct_mask)
if num_accumulated >= FLAGS.num_examples:
break
input_samples = np.concatenate(input_samples, axis=0).astype(np.float32)
true_labels = np.concatenate(true_labels, axis=0)
pred_output = np.concatenate(pred_output, axis=0)
np.save('input_samples.npy', input_samples)
np.save('pred_output.npy', pred_output)
np.save('true_labels.npy', true_labels)
explainer = PathExplainerTF(model)
print('Computing attributions...')
attributions = explainer.attributions(inputs=input_samples,
baseline=np.zeros((1, input_samples.shape[1]), dtype=np.float32),
batch_size=FLAGS.batch_size,
num_samples=FLAGS.num_samples,
use_expectation=False,
output_indices=0,
verbose=True)
np.save('attributions.npy', attributions)
print('Computing interactions...')
interactions = explainer.interactions(inputs=input_samples,
baseline=np.zeros((1, input_samples.shape[1]), dtype=np.float32),
batch_size=FLAGS.batch_size,
num_samples=FLAGS.num_samples,
use_expectation=False,
output_indices=0,
verbose=True)
np.save('interactions.npy', interactions)
if __name__ == '__main__':
app.run(interpret) |
legacy/datasources/base_datasource.py | ParikhKadam/zenml | 1,275 | 12677979 | # Copyright (c) ZenML GmbH 2020. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Base Class for all ZenML datasources"""
import json
import os
from abc import abstractmethod
from typing import Text, Dict, Optional, Callable
from uuid import uuid4
from zenml.enums import GDPComponent
from zenml.exceptions import AlreadyExistsException
from zenml.exceptions import EmptyDatasourceException
from zenml.exceptions import InitializationException
from zenml.logger import get_logger
from zenml.metadata import ZenMLMetadataStore
from zenml.repo import Repository, ArtifactStore
from zenml.standards import standard_keys as keys
from zenml.utils import path_utils
from zenml.utils import source_utils
from zenml.utils.analytics_utils import CREATE_DATASOURCE
from zenml.utils.analytics_utils import track
# from zenml.utils.post_training.post_training_utils import \
# view_schema, get_feature_spec_from_schema, \
# convert_raw_dataset_to_pandas, view_statistics
from zenml.utils.print_utils import to_pretty_string, PrintStyles
logger = get_logger(__name__)
class BaseDatasource:
"""Base class for all ZenML datasources.
Every ZenML datasource should override this class.
"""
def __init__(
self,
name: Text,
_id: Text = None,
backend=None,
metadata_store: Optional[ZenMLMetadataStore] = None,
artifact_store: Optional[ArtifactStore] = None,
commits: Optional[Dict] = None,
*args,
**kwargs):
"""
Construct the datasource.
Args:
name (str): name of datasource
schema (dict): schema of datasource
_id: unique ID (for internal use)
"""
if _id:
# Its loaded from config
self._id = _id
logger.debug(f'Datasource {name} loaded.')
else:
# If none, then this is assumed to be 'new'. Check dupes.
all_names = Repository.get_instance().get_datasource_names()
if any(d == name for d in all_names):
raise AlreadyExistsException(
name=name,
resource_type='datasource')
self._id = str(uuid4())
track(event=CREATE_DATASOURCE)
logger.info(f'Datasource {name} created.')
# Metadata store
if metadata_store:
self.metadata_store: ZenMLMetadataStore = metadata_store
else:
# use default
try:
self.metadata_store: ZenMLMetadataStore = \
Repository.get_instance().get_default_metadata_store()
except InitializationException:
self.metadata_store = None
# Default to local
if backend is None:
from zenml.backends.orchestrator import OrchestratorBaseBackend
self.backend = OrchestratorBaseBackend()
else:
self.backend = backend
# Artifact store
if artifact_store:
self.artifact_store = artifact_store
else:
# use default
try:
self.artifact_store = \
Repository.get_instance().get_default_artifact_store()
except InitializationException:
self.metadata_store = None
if commits is None:
self.commits = {}
else:
self.commits = commits
self.name = name
self._immutable = False
self._source = source_utils.resolve_class(self.__class__)
self._source_args = json.dumps(kwargs)
def __str__(self):
return to_pretty_string(self.to_config())
def __repr__(self):
return to_pretty_string(self.to_config(), style=PrintStyles.PPRINT)
@property
def is_empty(self):
if self.commits:
return False
return True
@property
def n_datapoints(self):
# """Gets total number of datapoints in datasource"""
# pipeline = self._get_one_pipeline()
# data_files = self._get_data_file_paths(pipeline)
# return sum(1 for _ in tf.data.TFRecordDataset(data_files,
# compression_type='GZIP'))
raise NotImplementedError
@abstractmethod
def process(self, output_path: Text, make_beam_pipeline: Callable = None):
pass
def commit(self):
from zenml.pipelines.data_pipeline import DataPipeline
data_pipeline = DataPipeline(
enable_cache=False,
backend=self.backend,
metadata_store=self.metadata_store,
artifact_store=self.artifact_store,
datasource=self
)
data_pipeline.run()
commit_id = data_pipeline.pipeline_name.split('_')[2]
self.commits[commit_id] = data_pipeline.pipeline_name.split('_')[1]
return commit_id
def _assert_commit_id(self, commit_id: Text):
"""Asserts commit_id is in self.commits"""
if commit_id not in self.commits:
raise AssertionError(
f'There is no such commit_id as {commit_id} in the '
f'datasource {self.name}')
@classmethod
def from_config(cls, config: Dict):
"""
Convert from Data Step config to ZenML Datasource object.
Data step is also populated and configuration set to parameters set
in the config file.
Args:
config: a DataStep config in dict-form (probably loaded from YAML).
"""
if keys.DatasourceKeys.SOURCE not in config[
keys.PipelineKeys.DATASOURCE]:
return None # can be empty
# this is the data step config block
source = config[keys.PipelineKeys.DATASOURCE][
keys.DatasourceKeys.SOURCE]
datasource_class = source_utils.load_source_path_class(source)
datasource_name = config[keys.PipelineKeys.DATASOURCE][
keys.DatasourceKeys.NAME]
_id = config[keys.PipelineKeys.DATASOURCE][keys.DatasourceKeys.ID]
args = json.loads(
config[keys.PipelineKeys.DATASOURCE][keys.DatasourceKeys.ARGS])
# start with artifact store
artifact_store = ArtifactStore(config[keys.PipelineKeys.DATASOURCE][
keys.DatasourceKeys.ARTIFACT_STORE])
# metadata store
metadata_store: ZenMLMetadataStore = ZenMLMetadataStore.from_config(
config=config[keys.PipelineKeys.DATASOURCE][
keys.DatasourceKeys.METADATA_STORE]
)
# backend
from zenml.backends.orchestrator import OrchestratorBaseBackend
backend = OrchestratorBaseBackend.from_config(
config=config[keys.PipelineKeys.DATASOURCE][
keys.DatasourceKeys.BACKEND]
)
# resolve commits
data_pipeline_names = \
metadata_store.get_data_pipeline_names_from_datasource_name(
datasource_name)
# ugly hack to recompile the commit times
commits = {}
if data_pipeline_names:
commits = {x.split('_')[2]: x.split('_')[1] for x in
data_pipeline_names}
obj = datasource_class(
name=datasource_name, _id=_id, commits=commits, backend=backend,
metadata_store=metadata_store, artifact_store=artifact_store,
**args)
obj._immutable = True
return obj
def to_config(self):
"""Converts datasource to ZenML config block."""
return {
keys.DatasourceKeys.NAME: self.name,
keys.DatasourceKeys.SOURCE: self._source,
keys.DatasourceKeys.ARGS: self._source_args,
keys.DatasourceKeys.ID: self._id,
keys.DatasourceKeys.METADATA_STORE:
self.metadata_store.to_config(),
keys.DatasourceKeys.ARTIFACT_STORE: self.artifact_store.path,
keys.DatasourceKeys.BACKEND: self.backend.to_config()
}
def get_latest_commit(self):
a = [k for k, v in
sorted(self.commits.items(), key=lambda item: item[1])]
if a:
return a[-1]
def get_first_commit(self):
a = [k for k, v in
sorted(self.commits.items(), key=lambda item: item[1])]
if a:
return a[0]
def get_data_pipeline_from_commit(self, commit_id: Text):
from zenml.pipelines.data_pipeline import DataPipeline
self._assert_commit_id(commit_id)
repo: Repository = Repository.get_instance()
name = DataPipeline.get_name_from_pipeline_name(
DataPipeline.PIPELINE_TYPE + '_' + self.commits[
commit_id] + '_' + commit_id)
return repo.get_pipeline_by_name(name)
def _get_one_pipeline(self):
"""Gets representative pipeline from all pipelines associated."""
if self.commits:
return self.get_data_pipeline_from_commit(
list(self.commits.keys())[0])
raise EmptyDatasourceException
def _get_data_file_paths(self, pipeline):
"""
Gets path where data is stored as list of file paths.
Args:
pipeline: a pipeline with this datasource embedded
"""
if pipeline.datasource._id != self._id:
raise AssertionError('This pipeline does not belong to this '
'datasource.')
# Take any pipeline and get the datagen
data_uri = os.path.join(pipeline.get_artifacts_uri_by_component(
GDPComponent.DataGen.name
)[0], 'Split-examples')
data_files = path_utils.list_dir(data_uri)
return data_files
def sample_data(self, sample_size: int = 100000):
"""
Sampels data from datasource as a pandas DataFrame.
Args:
sample_size: # of rows to sample.
"""
# pipeline = self._get_one_pipeline()
# data_files = self._get_data_file_paths(pipeline)
#
# schema_uri = pipeline.get_artifacts_uri_by_component(
# GDPComponent.DataSchema.name)[0]
# spec = get_feature_spec_from_schema(schema_uri)
#
# dataset = tf.data.TFRecordDataset(data_files, compression_type='GZIP')
# return convert_raw_dataset_to_pandas(dataset, spec, sample_size)
raise NotImplementedError
# TODO [High]: Completely hacked code to get this to work
def get_artifact_uri_by_component_and_commit_id(
self, commit_id: Text, component_name: Text):
"""
Gets the artifact URI by component and commit id.
Args:
commit_id:
component_name:
"""
from zenml.pipelines.data_pipeline import DataPipeline
store = self.metadata_store.store
run_contexts = store.get_contexts_by_type(
ZenMLMetadataStore.RUN_TYPE_NAME)
run_contexts = [x for x in run_contexts if
x.name.startswith(DataPipeline.PIPELINE_TYPE)]
# now filter to the datasource name through executions
commit_context = None
for c in run_contexts:
es = store.get_executions_by_context(c.id)
for e in es:
if 'name' in e.custom_properties and e.custom_properties[
'name'].string_value == self.name:
if commit_id in c.name:
commit_context = c
if commit_context is None:
raise AssertionError(
f'Commit {commit_id} not found in metadata store for '
f'datasource: {self.name}')
# First get the context of the component and its artifacts
component_context = [c for c in store.get_contexts_by_type(
ZenMLMetadataStore.NODE_TYPE_NAME) if
c.name.endswith(component_name)][0]
component_artifacts = store.get_artifacts_by_context(
component_context.id)
# Second, get the context of the particular pipeline and its artifacts
pipeline_artifacts = store.get_artifacts_by_context(
commit_context.id)
# Figure out the matching ids and get URIs
return [a.uri for a in component_artifacts
if a.id in [p.id for p in pipeline_artifacts]]
# def view_schema(self, commit_id: Text = None):
# """
# View schema of data flowing in pipeline.
#
# Args:
# commit_id: used to specify which commit's schema to use, if None
# uses latest
# """
# if commit_id is None:
# commit_id = self.get_latest_commit()
# self._assert_commit_id(commit_id)
#
# pipeline = self.get_data_pipeline_from_commit(commit_id)
# uri = pipeline.get_artifacts_uri_by_component(
# GDPComponent.DataSchema.name)[0]
# view_schema(uri)
#
# def view_statistics(self, commit_id: Text = None, port: int = None,
# magic: bool = False):
# """
# View statistics of data flowing in pipeline.
#
# Args:
# port (int): Port at which to launch the statistics facet.
# commit_id: used to specify which commit's schema to use, if None
# uses latest
# magic (bool): Whether to display within a jupyter notebook or not
# """
# if commit_id is None:
# commit_id = self.get_latest_commit()
# self._assert_commit_id(commit_id)
# pipeline = self.get_data_pipeline_from_commit(commit_id)
# uri = pipeline.get_artifacts_uri_by_component(
# GDPComponent.DataStatistics.name)[0]
# view_statistics(uri, port=port, magic=magic)
|
gtfspy/networks.py | Leo-Ryu/gtfspy | 118 | 12678000 | <reponame>Leo-Ryu/gtfspy<filename>gtfspy/networks.py
import networkx
import pandas as pd
from math import isnan
from gtfspy import route_types
from gtfspy.util import wgs84_distance, graph_node_attrs
from warnings import warn
ALL_STOP_TO_STOP_LINK_ATTRIBUTES = [
"capacity_estimate", "duration_min", "duration_max",
"duration_median", "duration_avg", "n_vehicles", "route_types",
"d", "distance_shape",
"route_I_counts"
]
DEFAULT_STOP_TO_STOP_LINK_ATTRIBUTES = [
"n_vehicles", "duration_avg",
"d", "route_I_counts"
]
def walk_transfer_stop_to_stop_network(gtfs, max_link_distance=None):
"""
Construct the walk network.
If OpenStreetMap-based walking distances have been computed, then those are used as the distance.
Otherwise, the great circle distances ("d") is used.
Parameters
----------
gtfs: gtfspy.GTFS
max_link_distance: int, optional
If given, all walking transfers with great circle distance longer
than this limit (expressed in meters) will be omitted.
Returns
-------
net: networkx.DiGraph
edges have attributes
d:
straight-line distance between stops
d_walk:
distance along the road/tracks/..
"""
if max_link_distance is None:
max_link_distance = 1000
net = networkx.Graph()
_add_stops_to_net(net, gtfs.get_table("stops"))
stop_distances = gtfs.get_table("stop_distances")
if stop_distances["d_walk"][0] is None:
osm_distances_available = False
warn("Warning: OpenStreetMap-based walking distances have not been computed, using euclidean distances instead."
"Ignore this warning if running unit tests.")
else:
osm_distances_available = True
for stop_distance_tuple in stop_distances.itertuples():
from_node = stop_distance_tuple.from_stop_I
to_node = stop_distance_tuple.to_stop_I
if osm_distances_available:
if stop_distance_tuple.d_walk > max_link_distance or isnan(stop_distance_tuple.d_walk):
continue
data = {'d': stop_distance_tuple.d, 'd_walk': stop_distance_tuple.d_walk}
else:
if stop_distance_tuple.d > max_link_distance:
continue
data = {'d': stop_distance_tuple.d}
net.add_edge(from_node, to_node, **data)
return net
def stop_to_stop_network_for_route_type(gtfs,
route_type,
link_attributes=None,
start_time_ut=None,
end_time_ut=None):
"""
Get a stop-to-stop network describing a single mode of travel.
Parameters
----------
gtfs : gtfspy.GTFS
route_type : int
See gtfspy.route_types.TRANSIT_ROUTE_TYPES for the list of possible types.
link_attributes: list[str], optional
defaulting to use the following link attributes:
"n_vehicles" : Number of vehicles passed
"duration_min" : minimum travel time between stops
"duration_max" : maximum travel time between stops
"duration_median" : median travel time between stops
"duration_avg" : average travel time between stops
"d" : distance along straight line (wgs84_distance)
"distance_shape" : minimum distance along shape
"capacity_estimate" : approximate capacity passed through the stop
"route_I_counts" : dict from route_I to counts
start_time_ut: int
start time of the time span (in unix time)
end_time_ut: int
end time of the time span (in unix time)
Returns
-------
net: networkx.DiGraph
A directed graph Directed graph
"""
if link_attributes is None:
link_attributes = DEFAULT_STOP_TO_STOP_LINK_ATTRIBUTES
assert(route_type in route_types.TRANSIT_ROUTE_TYPES)
stops_dataframe = gtfs.get_stops_for_route_type(route_type)
net = networkx.DiGraph()
_add_stops_to_net(net, stops_dataframe)
events_df = gtfs.get_transit_events(start_time_ut=start_time_ut,
end_time_ut=end_time_ut,
route_type=route_type)
if len(net.nodes()) < 2:
assert events_df.shape[0] == 0
# group events by links, and loop over them (i.e. each link):
link_event_groups = events_df.groupby(['from_stop_I', 'to_stop_I'], sort=False)
for key, link_events in link_event_groups:
from_stop_I, to_stop_I = key
assert isinstance(link_events, pd.DataFrame)
# 'dep_time_ut' 'arr_time_ut' 'shape_id' 'route_type' 'trip_I' 'duration' 'from_seq' 'to_seq'
if link_attributes is None:
net.add_edge(from_stop_I, to_stop_I)
else:
link_data = {}
if "duration_min" in link_attributes:
link_data['duration_min'] = float(link_events['duration'].min())
if "duration_max" in link_attributes:
link_data['duration_max'] = float(link_events['duration'].max())
if "duration_median" in link_attributes:
link_data['duration_median'] = float(link_events['duration'].median())
if "duration_avg" in link_attributes:
link_data['duration_avg'] = float(link_events['duration'].mean())
# statistics on numbers of vehicles:
if "n_vehicles" in link_attributes:
link_data['n_vehicles'] = int(link_events.shape[0])
if "capacity_estimate" in link_attributes:
link_data['capacity_estimate'] = route_types.ROUTE_TYPE_TO_APPROXIMATE_CAPACITY[route_type] \
* int(link_events.shape[0])
if "d" in link_attributes:
from_lat = graph_node_attrs(net, from_stop_I)['lat']
from_lon = graph_node_attrs(net, from_stop_I)['lon']
to_lat = graph_node_attrs(net, to_stop_I)['lat']
to_lon = graph_node_attrs(net, to_stop_I)['lon']
distance = wgs84_distance(from_lat, from_lon, to_lat, to_lon)
link_data['d'] = int(distance)
if "distance_shape" in link_attributes:
assert "shape_id" in link_events.columns.values
found = None
for i, shape_id in enumerate(link_events["shape_id"].values):
if shape_id is not None:
found = i
break
if found is None:
link_data["distance_shape"] = None
else:
link_event = link_events.iloc[found]
distance = gtfs.get_shape_distance_between_stops(
link_event["trip_I"],
int(link_event["from_seq"]),
int(link_event["to_seq"])
)
link_data['distance_shape'] = distance
if "route_I_counts" in link_attributes:
link_data["route_I_counts"] = link_events.groupby("route_I").size().to_dict()
net.add_edge(from_stop_I, to_stop_I, **link_data)
return net
def stop_to_stop_networks_by_type(gtfs):
"""
Compute stop-to-stop networks for all travel modes (route_types).
Parameters
----------
gtfs: gtfspy.GTFS
Returns
-------
dict: dict[int, networkx.DiGraph]
keys should be one of route_types.ALL_ROUTE_TYPES (i.e. GTFS route_types)
"""
route_type_to_network = dict()
for route_type in route_types.ALL_ROUTE_TYPES:
if route_type == route_types.WALK:
net = walk_transfer_stop_to_stop_network(gtfs)
else:
net = stop_to_stop_network_for_route_type(gtfs, route_type)
route_type_to_network[route_type] = net
assert len(route_type_to_network) == len(route_types.ALL_ROUTE_TYPES)
return route_type_to_network
def combined_stop_to_stop_transit_network(gtfs, start_time_ut=None, end_time_ut=None):
"""
Compute stop-to-stop networks for all travel modes and combine them into a single network.
The modes of transport are encoded to a single network.
The network consists of multiple links corresponding to each travel mode.
Walk mode is not included.
Parameters
----------
gtfs: gtfspy.GTFS
Returns
-------
net: networkx.MultiDiGraph
keys should be one of route_types.TRANSIT_ROUTE_TYPES (i.e. GTFS route_types)
"""
multi_di_graph = networkx.MultiDiGraph()
for route_type in route_types.TRANSIT_ROUTE_TYPES:
graph = stop_to_stop_network_for_route_type(gtfs, route_type,
start_time_ut=start_time_ut, end_time_ut=end_time_ut)
for from_node, to_node, data in graph.edges(data=True):
data['route_type'] = route_type
multi_di_graph.add_edges_from(graph.edges(data=True))
multi_di_graph.add_nodes_from(graph.nodes(data=True))
return multi_di_graph
def _add_stops_to_net(net, stops):
"""
Add nodes to the network from the pandas dataframe describing (a part of the) stops table in the GTFS database.
Parameters
----------
net: networkx.Graph
stops: pandas.DataFrame
"""
for stop in stops.itertuples():
data = {
"lat": stop.lat,
"lon": stop.lon,
"name": stop.name
}
net.add_node(stop.stop_I, **data)
def temporal_network(gtfs,
start_time_ut=None,
end_time_ut=None,
route_type=None):
"""
Compute the temporal network of the data, and return it as a pandas.DataFrame
Parameters
----------
gtfs : gtfspy.GTFS
start_time_ut: int | None
start time of the time span (in unix time)
end_time_ut: int | None
end time of the time span (in unix time)
route_type: int | None
Specifies which mode of public transport are included, or whether all modes should be included.
The int should be one of the standard GTFS route_types:
(see also gtfspy.route_types.TRANSIT_ROUTE_TYPES )
If route_type is not specified, all modes are included.
Returns
-------
events_df: pandas.DataFrame
Columns: departure_stop, arrival_stop, departure_time_ut, arrival_time_ut, route_type, route_I, trip_I
"""
events_df = gtfs.get_transit_events(start_time_ut=start_time_ut,
end_time_ut=end_time_ut,
route_type=route_type)
events_df.drop('to_seq', 1, inplace=True)
events_df.drop('shape_id', 1, inplace=True)
events_df.drop('duration', 1, inplace=True)
events_df.drop('route_id', 1, inplace=True)
events_df.rename(
columns={
'from_seq': "seq"
},
inplace=True
)
return events_df
def route_to_route_network(gtfs, walking_threshold, start_time, end_time):
"""
Creates networkx graph where the nodes are bus routes and a edge indicates that there is a possibility to transfer
between the routes
:param gtfs:
:param walking_threshold:
:param start_time:
:param end_time:
:return:
"""
graph = networkx.Graph()
routes = gtfs.get_table("routes")
for i in routes.itertuples():
graph.add_node(i.route_id, type=i.type, color=route_types.ROUTE_TYPE_TO_COLOR[i.type])
query = """SELECT stop1.route_id AS route_id1, stop1.type, stop2.route_id AS route_id2, stop2.type FROM
(SELECT * FROM stop_distances WHERE d_walk < %s) sd,
(SELECT * FROM stop_times, trips, routes
WHERE stop_times.trip_I=trips.trip_I AND trips.route_I=routes.route_I
AND stop_times.dep_time_ds > %s AND stop_times.dep_time_ds < %s) stop1,
(SELECT * FROM stop_times, trips, routes
WHERE stop_times.trip_I=trips.trip_I AND trips.route_I=routes.route_I
AND stop_times.dep_time_ds > %s AND stop_times.dep_time_ds < %s) stop2
WHERE sd.from_stop_I = stop1.stop_I AND sd.to_stop_I = stop2.stop_I AND stop1.route_id != stop2.route_id
GROUP BY stop1.route_id, stop2.route_id""" % (walking_threshold, start_time, end_time, start_time,
end_time)
df = gtfs.execute_custom_query_pandas(query)
for items in df.itertuples():
graph.add_edge(items.route_id1, items.route_id2)
graph.remove_nodes_from(networkx.isolates(graph))
return graph
# def cluster_network_stops(stop_to_stop_net, distance):
# """
# Aggregate graph by grouping nodes that are within a specified distance.
# The ids of the nodes are tuples of the original stop_Is.
#
# Parameters
# ----------
# network: networkx.DiGraph
# distance: float
# group all nodes within this distance.
#
# Returns
# -------
# graph: networkx.Graph
# """
# pass
# def aggregate__network(self, graph, distance):
# """
# See to_aggregate_line_graph for documentation
# """
# raise NotImplementedError("this is not working fully yet")
# assert distance <= 1000, "only works with distances below 1000 meters"
# nodes = set(graph.nodes())
#
# node_distance_graph = networkx.Graph()
#
# stop_distances = self.get_table("stop_distances")
# stop_pairs = stop_distances[stop_distances['d'] <= distance]
# stop_pairs = zip(stop_pairs['from_stop_I'], stop_pairs['to_stop_I'])
# for node in nodes:
# node_distance_graph.add_node(node)
# for node, another_node in stop_pairs:
# if (node in nodes) and (another_node in nodes):
# node_distance_graph.add_edge(node, another_node)
#
# node_group_iter = networkx.connected_components(node_distance_graph)
#
# aggregate_graph = networkx.Graph()
# old_node_to_new_node = {}
# for node_group in node_group_iter:
# new_node_id = tuple(node for node in node_group)
# lats = []
# lons = []
# names = []
# for node in node_group:
# if node not in graph:
# # some stops may not part of the original node line graph
# # (e.g. if some lines are not considered, or there are extra stops in stops table)
# continue
# old_node_to_new_node[node] = new_node_id
# lats.append(graph.node[node]['lat'])
# lons.append(graph.node[node]['lon'])
# names.append(graph.node[node]['name'])
# new_lat = numpy.mean(lats)
# new_lon = numpy.mean(lons)
# attr_dict = {
# "lat": new_lat,
# "lon": new_lon,
# "names": names
# }
# aggregate_graph.add_node(new_node_id, attr_dict=attr_dict)
#
# for from_node, to_node, data in graph.edges(data=True):
# new_from_node = old_node_to_new_node[from_node]
# new_to_node = old_node_to_new_node[to_node]
# if aggregate_graph.has_edge(new_from_node, new_to_node):
# edge_data = aggregate_graph.get_edge_data(new_from_node, new_to_node)
# edge_data['route_ids'].append(data['route_ids'])
# else:
# aggregate_graph.add_edge(new_from_node, new_to_node, route_ids=data['route_ids'])
# return aggregate_graph
|
src/test/tests/simulation/domainbounds.py | visit-dav/vis | 226 | 12678016 | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: domainbounds.py
#
# Tests: libsim - connecting to simulation and retrieving data from it.
# mesh - 3D rectilinear mesh
#
# Programmer: <NAME>
# Date: June 17, 2014
#
# Modifications:
#
# ----------------------------------------------------------------------------
# Create our simulation object.
sim = TestSimulation("domainbounds", "domainbounds.sim2")
# Test that we can start and connect to the simulation.
started, connected = TestSimStartAndConnect("domainbounds00", sim)
# Perform our tests.
if connected:
# Make sure the metadata is right.
TestSimMetaData("domainbounds01", sim.metadata())
AddPlot("Subset", "Domains")
DrawPlots()
v = GetView3D()
v.viewNormal = (0.672727, 0.569817, 0.471961)
v.viewUp = (-0.252634, 0.776445, -0.57733)
SetView3D(v)
Test("domainbounds02")
DeleteAllPlots()
AddPlot("Pseudocolor", "zonal")
DrawPlots()
Test("domainbounds03")
DeleteAllPlots()
# Close down the simulation.
if started:
sim.endsim()
Exit()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.