id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
471320
|
import pandas as pd
# TODO: populate with classes to load
class_register = []
exclude_register = []
|
471333
|
import subprocess
def run_bash_command(bashCommand):
print(bashCommand)
try:
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
return output
except:
print("couldn't run bash command, try running it manually")
|
471338
|
from indy import ledger
import json
import pytest
@pytest.mark.asyncio
async def test_build_pool_restart_request_work_for_start_action():
identifier = "Th7MpTaRZVRYnPiabds81Y"
expected_response = {
"identifier": identifier,
"operation": {
"type": "118",
"action": "start",
"datetime": "0",
}
}
request = json.loads(
await ledger.build_pool_restart_request(identifier, 'start', '0'))
assert expected_response.items() <= request.items()
@pytest.mark.asyncio
async def test_build_pool_restart_request_work_for_cancel_action():
identifier = "Th7MpTaRZVRYnPiabds81Y"
expected_response = {
"identifier": identifier,
"operation": {
"type": "118",
"action": "cancel",
}
}
request = json.loads(
await ledger.build_pool_restart_request(identifier, 'cancel', None))
assert expected_response.items() <= request.items()
|
471359
|
from peewee import *
database = MySQLDatabase('wefe_board', **{'charset': 'utf8', 'sql_mode': 'PIPES_AS_CONCAT', 'use_unicode': True, 'host': '10.**.**.**', 'port': 3306, 'user': 'wefe', 'password': '<PASSWORD>'})
class UnknownField(object):
def __init__(self, *_, **__): pass
class BaseModel(Model):
class Meta:
database = database
|
471361
|
import sys
from torch.utils.data import DataLoader
from beta_rec.datasets.movielens import Movielens_100k
from beta_rec.datasets.seq_data_utils import (
SeqDataset,
collate_fn,
create_seq_db,
dataset_to_seq_target_format,
reindex_items,
)
sys.path.append("../")
if __name__ == "__main__":
ml = Movielens_100k()
ml.download()
ml.load_interaction()
tem_data = ml.make_temporal_split()
tem_train_data = tem_data[tem_data.col_flag == "train"]
tem_valid_data = tem_data[tem_data.col_flag == "validate"]
tem_test_data = tem_data[tem_data.col_flag == "test"]
# reindex items from 1
train_data, valid_data, test_data = reindex_items(
tem_train_data, tem_valid_data, tem_test_data
)
# convert interactions to sequences
seq_train_data = create_seq_db(train_data)
# convert sequences to (seq, target) format
load_train_data = dataset_to_seq_target_format(seq_train_data)
# define pytorch Dataset class for sequential datasets
load_train_data = SeqDataset(load_train_data)
# pad the sequences with 0
load_train_data = DataLoader(
load_train_data, batch_size=32, shuffle=False, collate_fn=collate_fn
)
|
471395
|
n = int(input())
login = []
logout = []
for i in range(n):
data = input().split()
login.append(data[0])
logout.append(data[1])
# print(login)
# print(logout)
user_num = 0
user_hot = 0
start_time = ''
end_time = ''
for hour in range(0, 24):
for minute in range(60):
time_now = f'{hour:02}:{minute:02}'
counter = user_num == user_hot
if time_now=='09:59': print(counter)
for i in range(n):
if login[i] == time_now:
user_num += 1
print(login[i] + ' : ' + str(i) + '번 유저 로그인')
print(user_num, user_hot)
# print(user_num)
if logout[i] == time_now:
user_num -= 1
print(logout[i] + ' : ' + str(i) + '번 유저 로그아웃')
print(user_num, user_hot)
# print(user_num)
if (user_num != user_hot) and counter:
end_time = time_now
print('!')
if user_num is not 0: print(user_num, user_hot)
if user_num > user_hot:
user_hot = user_num
start_time = time_now
print(user_hot)
print(start_time, end_time)
|
471461
|
import thulac
import sys
import csv
import os
sys.path.append("..")
from Model.neo4j_models import Neo4j_Handle
thuFactory = thulac.thulac()
print('--init thulac()--')
#预加载Neo4j图数据库
neo4jconn = Neo4j_Handle()
neo4jconn.connectDB()
print('--Neo4j connecting--')
domain_ner_dict = {}
filePath = os.getcwd()
with open(filePath+'/toolkit/domainDict.csv','r',encoding="utf-8") as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
#实体 类型代码
domain_ner_dict[str(row[0])] = int(row[1])
print('--Load Domain Dictionary...--!')
|
471479
|
import RPi.GPIO
import sys
import random
sys.path.append("../../")
from gfxlcd.driver.ili9325.gpio import GPIO as ILIGPIO
from gfxlcd.driver.ili9325.ili9325 import ILI9325
RPi.GPIO.setmode(RPi.GPIO.BCM)
def hole(o, x, y):
o.draw_pixel(x+1, y)
o.draw_pixel(x+2, y)
o.draw_pixel(x+3, y)
o.draw_pixel(x+1, y + 4)
o.draw_pixel(x+2, y + 4)
o.draw_pixel(x+3, y + 4)
o.draw_pixel(x, y + 1)
o.draw_pixel(x+4, y + 1)
o.draw_pixel(x, y + 2)
o.draw_pixel(x+4, y + 2)
o.draw_pixel(x, y + 3)
o.draw_pixel(x+4, y + 3)
def draw_points(o):
for _ in range(0, 50):
hole(o, random.randint(2, o.width-10), random.randint(2, o.height-10))
def draw_net(o):
s = 0
while s < o.width-1:
o.draw_line(s, 0, s, o.height-1)
s += 10
s = 0
while s < o.height-1:
o.draw_line(0, s, o.width-1, s)
s += 10
drv = ILIGPIO()
drv.pins['LED'] = 6
drv.pins['CS'] = 18
lcd_tft = ILI9325(240, 320, drv)
lcd_tft.init()
lcd_tft.background_color = (255, 255, 255)
lcd_tft.fill_rect(0, 0, 240, 320)
lcd_tft.color = (0, 255, 1)
lcd_tft.draw_circle(79, 99, 40)
lcd_tft.draw_circle(60, 80, 7)
lcd_tft.draw_circle(100, 80, 7)
lcd_tft.draw_line(79, 90, 70, 100)
lcd_tft.draw_line(79, 90, 88, 100)
lcd_tft.draw_arc(79, 91, 12, 45, 135)
lcd_tft.color = (255, 0, 0)
lcd_tft.draw_arc(79, 90, 40, 45, 135)
lcd_tft.draw_line(51, 117, 105, 117)
lcd_tft.background_color = (255, 127, 127)
lcd_tft.fill_rect(75, 140, 83, 220)
lcd_tft.draw_line(75, 220, 65, 280)
lcd_tft.draw_line(83, 220, 93, 280)
lcd_tft.draw_line(83, 150, 130, 150)
lcd_tft.background_color = (0, 255, 0)
lcd_tft.fill_rect(0, 0, 122, 32)
lcd_tft.color = (0, 0, 0)
lcd_tft.background_color = (0, 0, 0)
lcd_tft.draw_circle(60, 15, 15)
lcd_tft.draw_circle(53, 10, 3)
lcd_tft.draw_circle(67, 10, 3)
lcd_tft.draw_arc(60, 15, 10, 45, 135)
lcd_tft.draw_line(60, 12, 57, 17)
lcd_tft.draw_line(60, 12, 63, 17)
lcd_tft.draw_arc(60, 15, 3, 45, 135)
lcd_tft.fill_rect(2, 2, 42, 29)
lcd_tft.fill_rect(119, 2, 109, 12)
lcd_tft.fill_rect(119, 17, 109, 19)
lcd_tft.draw_rect(77, 6, 105, 16)
lcd_tft.fill_rect(77, 16, 105, 25)
lcd_tft.background_color = (0, 0, 0)
lcd_tft.fill_rect(100, 200, 222, 264)
lcd_tft.color = (43, 212, 255)
lcd_tft.background_color = (43, 212, 255)
lcd_tft.draw_circle(131, 232, 31)
lcd_tft.draw_circle(119, 222, 7)
lcd_tft.draw_circle(143, 222, 7)
lcd_tft.draw_arc(131, 232, 20, 45, 135)
lcd_tft.draw_line(131, 227, 127, 238)
lcd_tft.draw_line(131, 227, 135, 238)
lcd_tft.draw_arc(131, 235, 5, 45, 135)
lcd_tft.fill_rect(195, 204, 205, 210)
lcd_tft.draw_rect(180, 210, 220, 225)
lcd_tft.fill_rect(180, 226, 220, 259)
|
471530
|
import spacy
nlp = spacy.load("de_core_news_sm")
text = "Apple: Modell IPhone SE kommt im Sommer"
# Verarbeite den Text
doc = ____
# Iteriere über die Entitäten
for ____ in ____.____:
# Drucke Text und Label der Entität
print(____.____, ____.____)
# Erstelle eine Span für "IPhone SE"
iphone_se = ____
# Drucke den Text der Span
print("Fehlende Entität:", iphone_se.text)
|
471546
|
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from tests import utils
class TestQuantizedConv2dBigStrideSmallKernel(utils.TorchGlowTestCase):
# These tests should be run on NNPI card manually, or else
# buck test will only run them on emulator.
supported_backends = {"NNPI"}
fused_2d_expect = {
"aten::quantize_per_tensor",
"quantized::conv2d",
"aten::dequantize",
}
fused_3d_expect = {
"aten::quantize_per_tensor",
"quantized::conv3d",
"aten::dequantize",
}
@utils.deterministic_expand(
[
lambda: (
"2d_stride_bigger_in_one_dim",
torch.nn.Conv2d(8, 4, [1, 1], groups=1, stride=[2, 1]),
torch.randn([1, 8, 8, 8]),
),
lambda: (
"2d_stride_bigger_in_multi_dims",
torch.nn.Conv2d(8, 4, [1, 1], groups=1, stride=[2, 2]),
torch.randn([1, 8, 8, 8]),
),
lambda: (
"2d_stride_bigger_in_multi_groups",
torch.nn.Conv2d(8, 4, [1, 1], groups=4, stride=[2, 1]),
torch.randn([1, 8, 8, 8]),
),
lambda: (
"2d_stride_bigger_strong_test_1",
torch.nn.Conv2d(4, 8, [2, 3], groups=2, stride=[1, 4]),
torch.randn([1, 4, 29, 23]),
),
lambda: (
"2d_stride_bigger_strong_test_2",
torch.nn.Conv2d(6, 8, [7, 3], groups=2, stride=[8, 4]),
torch.randn([2, 6, 47, 35]),
),
]
)
def test_qconv_2d(self, name, conv, tensor):
"""Test of quantized conv2d whose stride is bigger than kernel."""
with torch.no_grad():
model = torch.ao.quantization.QuantWrapper(conv)
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
torch.ao.quantization.prepare(model, inplace=True)
# Calibration
model.forward(tensor)
torch.ao.quantization.convert(model, inplace=True)
utils.compare_tracing_methods(
model,
tensor,
fusible_ops=self.fused_2d_expect,
# We set the atol & rtol of this test to be very big,
# because we know there is going to be issues of off-by-1,
# and we dont want to trigger it.
# However, even with such great atol & rtol, this is still
# good enough to verify the functionality is enabled correctly.
atol=0.1,
rtol=0.1,
)
# Skiped 3d tests
@utils.deterministic_expand(
[
lambda: (
"3d_stride_bigger_in_one_dim",
torch.nn.Conv3d(8, 4, kernel_size=2, groups=1, stride=1),
torch.randn([1, 8, 16, 8, 8]),
),
]
)
@unittest.skip(reason="qconv3d channelwise is not yet supported on NNPI")
def test_qconv_3d(self, name, conv, tensor):
"""Test of quantized conv3d whose stride is bigger than kernel."""
with torch.no_grad():
model = torch.ao.quantization.QuantWrapper(conv)
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
torch.ao.quantization.prepare(model, inplace=True)
# Calibration
model.forward(tensor)
torch.ao.quantization.convert(model, inplace=True)
utils.compare_tracing_methods(
model,
tensor,
fusible_ops=self.fused_3d_expect,
atol=0.1,
rtol=0.1,
)
|
471560
|
import os
import sys
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import numpy as np
from keras.layers import Input
from keras.models import Model
sys.path.append(".") # To find local version of the library
from pix2pose_model import ae_model as ae
#find the last weight in each folder and convert it to the inference weights
if(len(sys.argv)!=2 and len(sys.argv)!=3):
print("python3 tools/4_convert_weights_inference.py <weight_dir> < 1-(optional)to overwrite>")
weight_dir = sys.argv[1]
pass_exists=True
if len(sys.argv)>2:
if(sys.argv[2]=='1' or int(sys.argv[2])==1 ):
pass_exists=False
for root,dir,files in os.walk(weight_dir):
if len(files)>0:
weight_fn=""
recent_epoch=0
for fn_temp in files:
if(fn_temp.startswith("pix2pose"+".") and fn_temp.endswith("hdf5")):
temp_split = fn_temp.split(".")
epoch_split = temp_split[1].split("-")
epoch_split2= epoch_split[0].split("_")
epoch_temp = int(epoch_split2[0])
if(epoch_temp>recent_epoch):
recent_epoch = epoch_temp
weight_fn = fn_temp
if os.path.exists(os.path.join(root,"inference.hdf5")) and pass_exists:
print("A converted file exists in ",os.path.join(root,"inference.hdf5"))
continue
if(weight_fn!=""):
generator_train = ae.aemodel_unet_prob(p=1.0)
discriminator = ae.DCGAN_discriminator()
imsize=128
dcgan_input = Input(shape=(imsize, imsize, 3))
dcgan_target = Input(shape=(imsize, imsize, 3))
prob_gt = Input(shape=(imsize, imsize, 1))
gen_img,prob = generator_train(dcgan_input)
recont_l =ae.transformer_loss([np.eye(3)])([gen_img,dcgan_target,prob_gt,prob_gt])
disc_out = discriminator(gen_img)
dcgan = Model(inputs=[dcgan_input,dcgan_target,prob_gt],outputs=[recont_l,disc_out,prob])
print("load recent weights from ", os.path.join(root,weight_fn))
dcgan.load_weights(os.path.join(root,weight_fn))
print("save recent weights to ", os.path.join(root,"inference.hdf5"))
generator_train.save_weights(os.path.join(root,"inference.hdf5"))
|
471565
|
from stix_shifter_utils.modules.base.stix_transmission.base_query_connector import BaseQueryConnector
import re
import json
from stix_shifter_utils.utils.error_response import ErrorResponder
from stix_shifter_utils.utils import logger
class UnexpectedResponseException(Exception):
pass
class QueryConnector(BaseQueryConnector):
PATTERN = '<ID>(.*)</ID>'
DEFAULT_ID = 'UNKNOWN'
def __init__(self, api_client):
self.api_client = api_client
self.logger = logger.set_logger(__name__)
self.connector = __name__.split('.')[1]
def create_query_connection(self, query):
response_txt = None
return_obj = dict()
try:
response = self.api_client.create_search(query)
response_code = response.code
response_txt = response.read().decode('utf-8')
search_id = self.DEFAULT_ID
search = re.search(self.PATTERN, response_txt, re.IGNORECASE)
if search:
search_id = search.group(1)
return_obj['search_id'] = search_id
if 199 < response_code < 300 and search_id != self.DEFAULT_ID:
return_obj['success'] = True
elif ErrorResponder.is_plain_string(response_txt):
ErrorResponder.fill_error(return_obj, message=response_txt, connector=self.connector)
elif ErrorResponder.is_json_string(response_txt):
response_json = json.loads(response_txt)
ErrorResponder.fill_error(return_obj, response_json, ['arguments'], connector=self.connector)
else:
raise UnexpectedResponseException
except Exception as e:
if response_txt is not None:
ErrorResponder.fill_error(return_obj, message='unexpected exception', connector=self.connector)
self.logger.error('can not parse response: ' + str(response_txt))
else:
raise e
return return_obj
|
471582
|
from collections import namedtuple
import math
from dydx3.constants import COLLATERAL_ASSET
from dydx3.constants import COLLATERAL_ASSET_ID_BY_NETWORK_ID
from dydx3.starkex.constants import CONDITIONAL_TRANSFER_FEE_ASSET_ID
from dydx3.starkex.constants import CONDITIONAL_TRANSFER_FIELD_BIT_LENGTHS
from dydx3.starkex.constants import CONDITIONAL_TRANSFER_MAX_AMOUNT_FEE
from dydx3.starkex.constants import CONDITIONAL_TRANSFER_PADDING_BITS
from dydx3.starkex.constants import CONDITIONAL_TRANSFER_PREFIX
from dydx3.starkex.constants import ONE_HOUR_IN_SECONDS
from dydx3.starkex.helpers import fact_to_condition
from dydx3.starkex.helpers import nonce_from_client_id
from dydx3.starkex.helpers import to_quantums_exact
from dydx3.starkex.signable import Signable
from dydx3.starkex.starkex_resources.proxy import get_hash
StarkwareConditionalTransfer = namedtuple(
'StarkwareConditionalTransfer',
[
'sender_position_id',
'receiver_position_id',
'receiver_public_key',
'condition',
'quantums_amount',
'nonce',
'expiration_epoch_hours',
],
)
class SignableConditionalTransfer(Signable):
def __init__(
self,
network_id,
sender_position_id,
receiver_position_id,
receiver_public_key,
fact_registry_address,
fact,
human_amount,
client_id,
expiration_epoch_seconds,
):
receiver_public_key = (
receiver_public_key
if isinstance(receiver_public_key, int)
else int(receiver_public_key, 16)
)
quantums_amount = to_quantums_exact(human_amount, COLLATERAL_ASSET)
expiration_epoch_hours = math.ceil(
float(expiration_epoch_seconds) / ONE_HOUR_IN_SECONDS,
)
message = StarkwareConditionalTransfer(
sender_position_id=int(sender_position_id),
receiver_position_id=int(receiver_position_id),
receiver_public_key=receiver_public_key,
condition=fact_to_condition(fact_registry_address, fact),
quantums_amount=quantums_amount,
nonce=nonce_from_client_id(client_id),
expiration_epoch_hours=expiration_epoch_hours,
)
super(SignableConditionalTransfer, self).__init__(
network_id,
message,
)
def to_starkware(self):
return self._message
def _calculate_hash(self):
"""Calculate the hash of the Starkware order."""
# TODO: Check values are in bounds
# The transfer asset and fee asset are always the collateral asset.
# Fees are not supported for conditional transfers.
asset_ids = get_hash(
COLLATERAL_ASSET_ID_BY_NETWORK_ID[self.network_id],
CONDITIONAL_TRANSFER_FEE_ASSET_ID,
)
part_1 = get_hash(
get_hash(
asset_ids,
self._message.receiver_public_key,
),
self._message.condition,
)
part_2 = self._message.sender_position_id
part_2 <<= CONDITIONAL_TRANSFER_FIELD_BIT_LENGTHS['position_id']
part_2 += self._message.receiver_position_id
part_2 <<= CONDITIONAL_TRANSFER_FIELD_BIT_LENGTHS['position_id']
part_2 += self._message.sender_position_id
part_2 <<= CONDITIONAL_TRANSFER_FIELD_BIT_LENGTHS['nonce']
part_2 += self._message.nonce
part_3 = CONDITIONAL_TRANSFER_PREFIX
part_3 <<= CONDITIONAL_TRANSFER_FIELD_BIT_LENGTHS['quantums_amount']
part_3 += self._message.quantums_amount
part_3 <<= CONDITIONAL_TRANSFER_FIELD_BIT_LENGTHS['quantums_amount']
part_3 += CONDITIONAL_TRANSFER_MAX_AMOUNT_FEE
part_3 <<= CONDITIONAL_TRANSFER_FIELD_BIT_LENGTHS[
'expiration_epoch_hours'
]
part_3 += self._message.expiration_epoch_hours
part_3 <<= CONDITIONAL_TRANSFER_PADDING_BITS
return get_hash(
get_hash(
part_1,
part_2,
),
part_3,
)
|
471674
|
def get_max_possible(coins, amount=0, turn=True):
if not coins:
return amount
if turn:
alt_1 = get_max_possible(coins[1:], amount + coins[0], False)
alt_2 = get_max_possible(coins[:-1], amount + coins[-1], False)
return max(alt_1, alt_2)
first, last = coins[0], coins[-1]
if first > last:
coins = coins[1:]
else:
coins = coins[:-1]
return get_max_possible(coins, amount, True)
# Test
assert get_max_possible([1, 2, 3, 4, 5]) == 9
|
471724
|
import aiohttp
# Working aiohttp get_url
# Now with closing sessions!
# <NAME>
async def get_url(url, headers: dict = None):
headers = headers or {"user-agent" : "GAF Bot"}
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=headers) as response:
status = response.status
if status != 200:
json = None
return response, json, status
try:
json = await response.json()
except Exception:
json = None
return response, json, status
async def post_url(url, data: dict = None, headers: dict = None):
headers = headers or {"user-agent": "GAF Bot"}
async with aiohttp.ClientSession() as session:
async with session.post(url, data=data, headers=headers) as response:
status = response.status
if status != 200:
json = None
return response, json, status
try:
json = await response.json()
except Exception:
json = None
return response, json, status
|
471729
|
SURVEY_LIST_GET = {
'tags': ['기숙사'],
'description': '설문조사 리스트 불러오기(학생 학년에 따라 필터링됨)',
'parameters': [
{
'name': 'Authorization',
'description': 'JWT Token',
'in': 'header',
'type': 'str',
'required': True
}
],
'responses': {
'200': {
'description': '설문조사 리스트 불러오기 성공',
'examples': {
'application/json': [
{
'id': 's3qldmc13opeflds',
'title': '내일 저녁 치킨먹기 찬반설문',
'start_date': '2017-10-24',
'end_date': '2017-10-25'
},
{
'id': '1fnfdj3391idkflds',
'title': '등교 후 12시간 자습 찬반설문',
'start_date': '2017-10-24',
'end_date': '2017-10-30'
}
]
}
},
'403': {
'description': '권한 없음'
}
}
}
SURVEY_GET = {
'tags': ['기숙사'],
'description': '설문조사 질문 리스트 불러오기',
'parameters': [
{
'name': 'Authorization',
'description': 'JWT Token',
'in': 'header',
'type': 'str',
'required': True
},
{
'name': 'id',
'description': '설문조사 ID',
'in': 'path',
'type': 'str',
'required': True
}
],
'responses': {
'200': {
'description': '질문 리스트 불러오기 성공',
'examples': {
'application/json': [
{
'id': '13211265df16ads',
'title': '저녁에 치킨을 먹고 싶습니까?',
'is_objective': True,
'choice_paper': ['예', '아니오']
},
{
'id': '11265cd65432r9',
'title': '어디 치킨이 좋습니까?',
'is_objective': False
}
]
}
},
'403': {
'description': '권한 없음'
}
}
}
SURVEY_POST = {
'tags': ['기숙사'],
'description': '답변 남기기',
'parameters': [
{
'name': 'Authorization',
'description': 'JWT Token',
'in': 'header',
'type': 'str',
'required': True
},
{
'name': 'id',
'description': '질문 ID',
'in': 'path',
'type': 'str',
'required': True
},
{
'name': 'answer',
'description': '답변',
'in': 'formData',
'type': 'str',
'required': True
}
],
'responses': {
'201': {
'description': '답변 남기기 성공'
},
'403': {
'description': '권한 없음'
}
}
}
|
471735
|
import abc
import os
import boto3
class AssumeRoleInterface(abc.ABC):
@abc.abstractmethod
def get_client(self, service):
pass
@abc.abstractmethod
def _new_assumed_role_session(self, session, account, role_name):
pass
class AssumeRole(AssumeRoleInterface):
def __new__(cls, *args, **kwargs):
if 'SCOPE' in os.environ and os.environ["SCOPE"] != "single-account":
print("Assume Role Prod")
return AssumeRoleProd(*args)
else:
return AssumeRoleLocal(*args)
class AssumeRoleLocal(AssumeRoleInterface):
def __init__(self, account, role_name):
self.cross_account_role_arn = f"arn:aws:iam::{account}:role/{role_name}"
def get_client(self, service):
return boto3.client(service, region_name="us-east-1")
def _new_assumed_role_session(self, session, role_arn):
pass
class AssumeRoleProd(AssumeRoleInterface):
def __init__(self,account, role_name):
self.cross_account_role_arn = f"arn:aws:iam::{account}:role/{role_name}"
def get_client(self, service):
client = self._new_assumed_role_session(
boto3.Session(),
self.cross_account_role_arn
).client(
service,
region_name="us-east-1"
)
return client
def _new_assumed_role_session(self, session, role_arn):
credentials = session.client('sts').assume_role(
RoleArn=role_arn,
RoleSessionName='role_session',
DurationSeconds=3600)
return boto3.Session(
aws_access_key_id=credentials['Credentials']['AccessKeyId'],
aws_secret_access_key=credentials['Credentials']['SecretAccessKey'],
aws_session_token=credentials['Credentials']['SessionToken'])
|
471768
|
import os
import re
import time
import json
import socket
import shutil
from .app import *
def check_hostname(hostname):
return True if re.match(r'([a-zA-Z0-9]+(\.[a-zA-Z0-9]+)+)', hostname) else False
def convert_hostnames(file_path):
with open(file_path, 'r+') as json_file:
data = json.loads(json_file.read())
data_accounts = data['accounts']
length, loop, timeout = 0, 0, 0
for name, value in data_accounts.items():
for i in range(len(value)):
if check_hostname(data_accounts[name][i]['hostname']) == True:
length += 1
for name, value in data_accounts.items():
for i in range(len(value)):
account = data_accounts[name][i]
if check_hostname(account['hostname']) == False:
continue
try:
if timeout == 3: break
log_replace('[{}/{}] Converting hostnames'.format(app_format(loop+1, align='>', width=len(str(length)), chars='0'), length), time=True, status='INFO', status_color='[Y1]')
host = ''
host = socket.gethostbyname(account['hostname'])
if not host:
raise socket.gaierror
elif host != account['host']:
log('{:.<19} [Y1]{:.<23} {}'.format((account['host'] if account['host'] else '(empty)')+' ', host+' [G1]', account['hostname']), status='INFO', status_color='[G1]')
data_accounts[name][i]['host'] = host
timeout = 0
except socket.gaierror:
log('[{}/{}] Converting hostnames error ({})'.format(app_format(timeout+1, align='>', width=len(str(length)), chars='0'), app_format('3', align='>', width=len(str(length)), chars='0'), account['hostname']), status='INFO', status_color='[R1]')
timeout = timeout + 1
finally:
loop = loop + 1
json_file.seek(0)
json.dump(data, json_file, indent=2)
json_file.truncate()
return data_accounts
def generate_accounts(data_accounts, data_authentications=None):
data_authentications = json.loads(open(real_path('/../database/authentications.json')).read())['authentications'] if data_authentications is None else data_authentications
accounts = []
for i in range(len(data_authentications)):
for name in data_accounts:
for x in range(len(data_accounts[name])):
account = data_accounts[name][x]
if not account: continue
account['hostname'] = account['hostname'].lstrip('#')
if check_hostname(account['hostname']) == False:
continue
accounts.append({
'name': name,
'host': account['host'],
'hostname': account['hostname'],
'username': account['username'].replace('{username}', data_authentications[i]['username']),
'password': account['password'].replace('{password}', data_authentications[i]['password'])
})
accounts = [dict(tuples) for tuples in {tuple(dictionaries.items()) for dictionaries in accounts}]
return accounts
def get_file_names(value = 'all'):
file_names = []
if value == 'all':
file_names = [
'config/config.json',
'config/proxies.txt',
'config/payload.txt',
'config/server-name-indication.txt',
'database/accounts.json',
'database/authentications.json',
'database/servers.json'
]
elif value == 'database':
file_names = [
'database/accounts.json',
'database/authentications.json',
'database/servers.json'
]
return file_names
def reset_to_default_settings():
for file_name in get_file_names('all'):
try:
os.remove(real_path('/../' + file_name))
except FileNotFoundError: pass
default_settings()
def reset_database():
for file_name in get_file_names('database'):
try:
os.remove(real_path('/../' + file_name))
except FileNotFoundError: pass
default_settings()
def default_settings():
for file_name in get_file_names('all'):
try:
open(real_path('/../' + file_name))
except FileNotFoundError:
shutil.copyfile(real_path('/default/' + file_name), real_path('/../' + file_name))
def json_error(file):
value = 'Exception: {} \n\n'.format(' ' * 24) + \
' File {} Error! \n'.format(file).replace('/app/../', '/') + \
' Run reset.py first or fixing by your-self. \n' + \
' Good-luck! \n'
log(value, status_color='[R1]')
def autoload():
default_settings()
autoload()
|
471778
|
from Proxy import validateData
from Proxy import fetchData
from Proxy import parseData
from Core.Logger import log
import time
import threading
import queue
import configparser
import logging
# 代理信息键值字段
PROXY_ID = 'id'
PROXY_IP = 'ip'
PROXY_PORT = 'port'
PROXY_ADDRESS = 'address'
PROXY_PROTOCOL = 'protocol'
PROXY_ALIVE_TIME = 'aliveTime'
PROXY_VALIDATE_TIME = 'validateTime'
# 代理网页检索起始页
FETCH_START_PAGE = 1
# 代理网页检索最大截至页
FETCH_END_PAGE = 5
# 代理池大小
PROXY_POOL_SIZE = 10
# 代理池扫描更新间隔
PROXY_POOL_SCAN_INTERVAL = 300
# 代理验证线程数
PROXY_VALIDATE_THREAD_NUM = 3
# 待验证的代理信息列表
unchecked_proxy_list = queue.LifoQueue(300)
# 可用的代理池
proxy_pool = queue.Queue(100)
# 标志量,是否正在扫描代理池
is_scanning = False
# 代理服务守护线程
class ProxyDaemonThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
# 初始化配置
self.init()
# 启动代理检验线程
validate_thread_list = []
for i in range(PROXY_VALIDATE_THREAD_NUM):
validate_thread = ProxyValidateThread()
validate_thread_list.append(validate_thread)
validate_thread.start()
if log.isEnabledFor(logging.DEBUG):
log.debug("代理验证线程启动")
# 启动代理池扫描线程
scan_thread = ProxyPoolScanThread()
scan_thread.start()
if log.isEnabledFor(logging.DEBUG):
log.debug("代理池扫描线程启动")
# 检查是否有线程出现异常并将其重启
while True:
# 检查代理验证线程
for thread in validate_thread_list:
if thread.status == 'error':
validate_thread_list.remove(thread)
thread = ProxyValidateThread()
validate_thread_list.append(thread)
thread.start()
if log.error(logging.ERROR):
log.error('代理验证线程重新启动')
# 检查代理池扫描线程
if scan_thread.status == 'error':
scan_thread = ProxyPoolScanThread()
scan_thread.start()
if log.isEnabledFor(logging.ERROR):
log.error("代理池扫描线程重新启动")
time.sleep(180)
# 初始化,读取配置文件并配置
@staticmethod
def init():
section = "proxy_core"
config = configparser.ConfigParser()
config.read('Proxy/Config/proxyConfiguration.conf', encoding='utf8')
# validateData配置
validateData.CONNECT_TIMEOUT = int(config.get(section, "proxyValidate_connectTimeout"))
validateData.NETWORK_RECONNECT_TIMES = int(config.get(section, "proxyValidate_networkReconnectTimes"))
# fetchData配置
fetchData.CONNECT_TIMEOUT = int(config.get(section, "dataFetch_connectTimeout"))
fetchData.NETWORK_RECONNECT_INTERVAL = int(config.get(section, "dataFetch_networkReconnectInterval"))
fetchData.NETWORK_RETRY_TIMES = int(config.get(section, "dataFetch_networkReconnectionTimes"))
# proxyCore配置
global FETCH_START_PAGE
global FETCH_END_PAGE
global PROXY_POOL_SIZE
global PROXY_POOL_SCAN_INTERVAL
global PROXY_VALIDATE_THREAD_NUM
FETCH_START_PAGE = int(config.get(section, "proxyCore_fetchStartPage"))
FETCH_END_PAGE = int(config.get(section, "proxyCore_fetchEndPage"))
PROXY_POOL_SIZE = int(config.get(section, "proxyCore_proxyPoolSize"))
PROXY_POOL_SCAN_INTERVAL = int(config.get(section, "proxyCore_proxyPoolScanInterval"))
PROXY_VALIDATE_THREAD_NUM = int(config.get(section, "proxyCore_proxyValidateThreadNum"))
# 代理检验线程
class ProxyValidateThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.status = 'running'
# 创建代理验证实例
self.dataValidateModule = validateData.DataValidateModule()
def run(self):
try:
while True:
# 若正在扫描代理池,则暂停
while is_scanning:
time.sleep(3)
if proxy_pool.qsize() < PROXY_POOL_SIZE and unchecked_proxy_list.qsize() > 0:
unchecked_proxy = unchecked_proxy_list.get()
is_available = self.dataValidateModule.validate_proxy_ip(unchecked_proxy)
if is_available is True:
proxy_pool.put(unchecked_proxy)
# print(unchecked_proxy)
time.sleep(1)
else:
time.sleep(5)
except Exception as e:
if log.isEnabledFor(logging.ERROR):
log.exception(e)
self.status = 'error'
# 代理池扫描线程,去除代理池中不可用的代理
class ProxyPoolScanThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.status = 'running'
self.current_page = 1
# 创建数据抓取模块
self.dataFetchModule = fetchData.DataFetchModule()
# 创建数据解析模块
self.dataParseModule = parseData.DataParseModule()
def run(self):
try:
while True:
if proxy_pool.qsize() < PROXY_POOL_SIZE and unchecked_proxy_list.qsize() < PROXY_POOL_SIZE:
self.fetch_and_parse_proxy()
elif proxy_pool.qsize() == PROXY_POOL_SIZE:
if log.isEnabledFor(logging.DEBUG):
log.debug('代理池更新')
self.scan_proxy_pool()
time.sleep(PROXY_POOL_SCAN_INTERVAL)
else:
time.sleep(60)
except Exception as e:
if log.isEnabledFor(logging.ERROR):
log.exception(e)
self.status = 'error'
# 扫描代理池中的代理
@staticmethod
def scan_proxy_pool():
# 由于待验证线程是先进后出队列,故对代理池进行扫描只需要将其添加到未检查列表,
# 由代理检验线程对其重新验证并加入回代理池
global is_scanning
is_scanning = True
while proxy_pool.qsize() > 0:
unchecked_proxy_list.put(proxy_pool.get())
is_scanning = False
# 爬取并解析代理
def fetch_and_parse_proxy(self):
if self.current_page > FETCH_END_PAGE:
self.current_page = FETCH_START_PAGE
response_data = self.dataFetchModule.fetch_proxy_data(self.current_page)
proxy_data = self.dataParseModule.parse_data(response_data)
self.current_page += 1
# 将解析到的代理添加到待验证的代理列表
for proxy in proxy_data:
unchecked_proxy_list.put(proxy)
class ProxyService:
def __init__(self):
self.proxy_daemon_thread = ProxyDaemonThread()
@staticmethod
def get_proxy():
return proxy_pool.get()
def start_proxy_service(self):
self.proxy_daemon_thread.start()
# if __name__ == '__main__':
# proxyService = ProxyService()
# while True:
# print(proxyService.get_proxy())
|
471779
|
from __future__ import print_function
import os
import sys
os.environ['NPY_DISTUTILS_APPEND_FLAGS'] = '1'
# Check if we have numpy:
try:
from numpy.distutils.misc_util import Configuration
import numpy.distutils.core
from numpy.distutils.core import setup
except:
raise ImportError('pyOptSparse requires numpy version 1.0 or later')
# HACK to make bdist_wheel command usable when using numpy.distutils.core.setup
try:
from wheel import bdist_wheel
except ImportError:
if 'bdist_wheel' in sys.argv:
print("\nThe bdist_wheel option requires the 'wheel' package to be installed.\n"
"Install it using 'pip install wheel'.")
sys.exit(-1)
else:
numpy.distutils.core.numpy_cmdclass['bdist_wheel'] = bdist_wheel.bdist_wheel
if len(sys.argv) == 1:
print("\nTo install, run: python setup.py install --user\n\n"
"To build, run: python setup.py build_ext --inplace\n\n"
"For help on C-compiler options run: python setup.py build --help-compiler\n\n"
"For help on Fortran-compiler options run: python setup.py build --help-fcompiler\n\n"
"To specify a Fortran compiler to use run: python setup.py install --user --fcompiler=<fcompiler name>\n\n"
"For further help run: python setup.py build --help"
)
sys.exit(-1)
def configuration(parent_package='', top_path=None):
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('pyoptsparse')
return config
setup(
name = 'pyoptsparse',
version = '1.0.0',
author = 'Dr. <NAME>',
author_email = '<EMAIL>',
maintainer = 'Dr. <NAME>',
maintainer_email = '<EMAIL>',
description = 'Python package for formulating and solving nonlinear constrained optimization problems',
long_description = 'pyOptSparse is a Python package for formulating and solving nonlinear constrained optimization problems',
keywords = 'optimization',
license = 'GNU LGPL',
platforms = ['Windows','Linux','Solaris','Mac OS-X','Unix'],
classifiers = ['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'License :: LGPL',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
'Topic :: Education'],
configuration = configuration,
)
|
471803
|
from django import urls
from pghistory.tests import views
urlpatterns = [
urls.path('test-view', views.MyPostView.as_view(), name='test_view')
]
|
471804
|
import unittest
from hamcrest import assert_that
from allure_commons_test.report import has_test_case
from allure_commons_test.label import has_epic
from allure_commons_test.label import has_feature
from test.example_runner import run_docstring_example
class TestBDDLabel(unittest.TestCase):
def test_method_label(self):
"""
>>> import unittest
>>> import allure
>>> class TestBDDLabelExample(unittest.TestCase):
... @allure.epic("Label", "Bdd")
... @allure.feature("Method label")
... def test_method_label_example(self):
... pass
"""
allure_report = run_docstring_example()
assert_that(allure_report,
has_test_case("test_method_label_example",
has_epic("Label"),
has_epic("Bdd"),
has_feature("Method label")
)
)
def test_class_label(self):
"""
>>> import unittest
>>> import allure
>>> @allure.epic("Label", "Bdd")
... class TestBDDLabelExample(unittest.TestCase):
... def test_class_label_example(self):
... pass
"""
allure_report = run_docstring_example()
assert_that(allure_report,
has_test_case("test_class_label_example",
has_epic("Label"),
has_epic("Bdd"),
)
)
def test_class_method_label(self):
"""
>>> import unittest
>>> import allure
>>> @allure.epic("Label", "Bdd")
... class TestBDDLabelExample(unittest.TestCase):
... @allure.feature("Method label")
... def test_class_and_method_label_example(self):
... pass
"""
allure_report = run_docstring_example()
assert_that(allure_report,
has_test_case("test_class_and_method_label_example",
has_epic("Label"),
has_epic("Bdd"),
has_feature("Method label")
)
)
def test_func_label():
"""
>>> import allure
>>> @allure.epic("Label", "Bdd")
... @allure.feature("Function label")
... def test_func_label_example():
... pass
"""
allure_report = run_docstring_example()
assert_that(allure_report,
has_test_case("test_func_label_example",
has_epic("Label"),
has_epic("Bdd"),
has_feature("Function label")
)
)
|
471814
|
import importlib
import os
import json
import numpy as np
import math
import sys
import pandas as pd
import datetime
import logging
import time
import torch
# ===================================== Common Functions =====================================
def check_input(imgs, targets):
import cv2 as cv
import numpy as np
for idx, target in enumerate(targets):
img = imgs[idx]
img = img.permute(1, 2, 0) # C x H x W --> H x W x C
image = np.array(img.cpu()).copy()
image = np.array(image[..., ::-1]) # RGB --> BGR
org = tuple([int(_ / 2) for _ in image.shape])
cv.putText(image, str(target), tuple(org[:2][::-1]), cv.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
cv.imshow("img", image)
cv.waitKey(0)
def get_et_time(t):
t = int(t)
ets = []
# s
ets.append(t % 60)
# m
t = t // 60
ets.append(t % 60)
# h
t = t // 60
ets.append(t % 24)
# d
t = t // 24
if t != 0:
ets.append(t)
ets.reverse()
ets = ['{:02d}'.format(_) for _ in ets]
return ':'.join(ets)
def save_model(d, epoch, save_dir):
save_name = os.path.join(save_dir, 'epoch_{:06d}.pth'.format(epoch))
torch.save(d, save_name)
dst = os.path.join(save_dir, 'latest.pth')
if os.path.islink(dst):
os.remove(dst)
os.symlink(save_name, dst)
def import_module(path):
py_idx = path.rfind('.py')
if py_idx != -1:
path = path[:py_idx]
_module_path = path.replace('\\', '/')
_module_path = _module_path.replace('/', '.')
return importlib.import_module(_module_path)
def mkdirs(path):
if not os.path.exists(path):
os.makedirs(path)
def get_date_str():
time_str = time.strftime("%Y-%m-%d_%H%M%S", time.localtime())
return time_str
def prase_gpus(gpus):
try:
_gpus = gpus.split('-')
if len(_gpus) == 2:
gpus = [i for i in range(int(_gpus[0]), int(_gpus[1]))]
else:
_gpus = gpus.split(',')
gpus = [int(x) for x in _gpus]
return gpus
except:
print('the gpus index is error!!! please specify right gpus index, or default use cpu')
gpus = []
return gpus
# ===================================== Json Objects =====================================
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.int32):
return int(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def save_dict(fname, d, mode='w'):
# 持久化写入
with open(fname, mode) as fp:
# json.dump(d, fp, cls=NpEncoder, indent=1, separators=(',', ': '))
json.dump(d, fp, cls=NpEncoder)
def load_dict(fname):
with open(fname, "r") as fp:
o = json.load(fp, )
return o
# ===================================== Logger Objects =====================================
def get_logger(name='root'):
formatter = logging.Formatter(
# fmt='%(asctime)s [%(levelname)s]: %(filename)s(%(funcName)s:%(lineno)s) >> %(message)s')
fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger
logger = get_logger('root')
class Metrics:
def __init__(self, watch, **names):
self.watch = watch
self.metric = dict(**names)
self.metric[watch] = dict(val=0, sum=0, cnt=0, avg=0)
self.metric['time'] = dict(last=time.time(), val=0, sum=0, cnt=0, avg=0)
self.metrics = []
def update(self, **kwargs):
d = dict(**kwargs)
for k, v in d.items():
if k == self.watch:
self.metric[k]['val'] = float(v)
self.metric[k]['sum'] += self.metric[k]['val']
self.metric[k]['cnt'] += 1
self.metric[k]['avg'] = self.metric[k]['sum'] / self.metric[k]['cnt']
last = self.metric['time']['last']
self.metric['time']['last'] = time.time()
self.metric['time']['val'] = self.metric['time']['last'] - last
self.metric['time']['sum'] += self.metric['time']['val']
self.metric['time']['cnt'] += 1
self.metric['time']['avg'] = self.metric['time']['sum'] / self.metric['time']['cnt']
with open(self.metric['log_path'], 'a+') as fp:
line = json.dumps(self.metric)
fp.write(line + '\n')
else:
self.metric[k] = v
def str(self):
ets = ((self.metric['total_epoch'] - self.metric['epoch']) * self.metric['total_iter'] \
- self.metric['iter']) * self.metric['time']['avg']
ets = get_et_time(ets)
msg = 'Epoch [{}/{}], iter [{}/{}], eta {}, lr {:.6f}, {} {:.4f}({:.4f})'.format(
self.metric['epoch'], self.metric['total_epoch'],
self.metric['iter'], self.metric['total_iter'],
ets, self.metric['lr'], self.watch,
self.metric[self.watch]['val'], self.metric[self.watch]['avg']
)
return msg
|
471816
|
import torch
from torchvision.models import resnet18
import numpy as np
import pandas as pd
import os
def logic_learning(i2c_net, dataloader, device, optimizer, criterion, epoch, n_epochs, train=False):
running_loss = 0.0
c_correct = 0
c_total = 0
preds = []
total_step = len(dataloader)
for batch_idx, (data_, concepts_) in enumerate(dataloader):
data_, concepts_ = data_.to(device), concepts_.to(device)
if train:
optimizer.zero_grad()
c_preds = i2c_net(data_)
loss = criterion(c_preds, concepts_)
if train:
loss.backward()
optimizer.step()
running_loss += loss.item()
c_correct += torch.sum((c_preds > 0.5).eq(concepts_).sum(dim=1) == concepts_.size(1)).item()
c_total += concepts_.size(0)
preds.append(c_preds.cpu().detach().numpy())
if (batch_idx) % 20 == 0 and train:
print(f'Epoch [{epoch}/{n_epochs}], Step [{batch_idx}/{total_step}], Loss: {loss.item():.4f}')
c_accuracy = c_correct / c_total
loss = running_loss / total_step
return i2c_net, c_accuracy, loss, preds
def load_and_predict(i2c_net, train_dataloader, val_dataloader, test_dataloader,
optimizer, criterion, device, args):
i2c_net.load_state_dict(torch.load(os.path.join(args['models_dir'], f'resnet_{args["model_style"]}_i2c_{args["seed"]}.pt')))
i2c_net.eval()
_, _, _, c_predictions_train = logic_learning(i2c_net, train_dataloader,
device, optimizer, criterion,
1, 1, train=False)
c_predictions_train = (np.vstack(c_predictions_train)>0.5).astype(int)
_, _, _, c_predictions_val = logic_learning(i2c_net, val_dataloader,
device, optimizer, criterion,
1, 1, train=False)
c_predictions_val = (np.vstack(c_predictions_val)>0.5).astype(int)
_, _, _, c_predictions_test = logic_learning(i2c_net, test_dataloader,
device, optimizer, criterion,
1, 1, train=False)
c_predictions_test = (np.vstack(c_predictions_test)>0.5).astype(int)
return c_predictions_train, c_predictions_val, c_predictions_test
def i2c_style(train_dataloader, val_dataloader, test_dataloader, device, args):
data_, concepts_ = next(iter(train_dataloader))
n_classes_c = concepts_.size(1)
i2c_net = resnet18(pretrained=False)
num_ftrs = i2c_net.fc.in_features
i2c_net.fc = torch.nn.Linear(num_ftrs, n_classes_c)
i2c_net = torch.nn.Sequential(*[
i2c_net,
torch.nn.Sigmoid(),
])
i2c_net.to(device)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.AdamW(i2c_net.parameters(), lr=0.0001)
if os.path.isfile(os.path.join(args['models_dir'], f'resnet_{args["model_style"]}_i2c_{args["seed"]}.pt')):
i2c_net.load_state_dict(
torch.load(os.path.join(args['models_dir'], f'resnet_{args["model_style"]}_i2c_{args["seed"]}.pt')))
c_predictions_train, c_predictions_val, c_predictions_test = load_and_predict(i2c_net, train_dataloader,
val_dataloader, test_dataloader,
optimizer, criterion, device,
args)
return c_predictions_train, c_predictions_val, c_predictions_test
n_epochs = 200
valid_loss_min = np.Inf
val_loss = []
val_c_acc = []
test_loss = []
test_c_acc = []
train_loss = []
train_c_acc = []
for epoch in range(1, n_epochs + 1):
print(f'Epoch {epoch}\n')
i2c_net, c_accuracy, loss, _ = logic_learning(i2c_net, train_dataloader,
device, optimizer, criterion,
epoch, n_epochs, train=True)
train_c_acc.append(c_accuracy)
train_loss.append(loss)
print(f'\ntrain-loss: {loss:.4f}, train-c-acc: {c_accuracy:.4f}')
with torch.no_grad():
i2c_net.eval()
i2c_net, c_accuracy, loss, _ = logic_learning(i2c_net, val_dataloader,
device, optimizer, criterion,
epoch, n_epochs, train=False)
val_c_acc.append(c_accuracy)
val_loss.append(loss)
print(f'validation loss: {loss:.4f}, validation-c-acc: {c_accuracy:.4f}')
if loss < valid_loss_min:
valid_loss_min = loss
torch.save(i2c_net.state_dict(),
os.path.join(args['models_dir'], f'resnet_{args["model_style"]}_i2c_{args["seed"]}.pt'))
print('Improvement-Detected, save-model')
i2c_net, c_accuracy, loss, _ = logic_learning(i2c_net, test_dataloader,
device, optimizer, criterion,
epoch, n_epochs, train=False)
test_c_acc.append(c_accuracy)
test_loss.append(loss)
print(f'test loss: {loss:.4f}, test-c-acc: {c_accuracy:.4f}\n')
i2c_net.train()
results = pd.DataFrame({
'test_c_acc': test_c_acc,
'test_loss': test_loss,
'val_c_acc': val_c_acc,
'val_loss': val_loss,
'train_c_acc': train_c_acc,
'train_loss': train_loss,
})
results_dir = './results'
if not os.path.isdir(results_dir):
os.makedirs(results_dir)
results.to_csv(os.path.join(results_dir, f'results_{args["model_style"]}_i2c_{args["seed"]}.csv'))
# i2c_net.load_state_dict(torch.load(os.path.join(args['models_dir'], f'resnet_{args["model_style"]}_i2c_{args["seed"]}.pt')))
c_predictions_train, c_predictions_val, c_predictions_test = load_and_predict(i2c_net, train_dataloader,
val_dataloader, test_dataloader,
optimizer, criterion, device, args)
return c_predictions_train, c_predictions_val, c_predictions_test
|
471863
|
import sys
path = sys.argv[1]
key = int(sys.argv[2])
buffer = bytearray(open(path, "rb").read())
for i in range(len(buffer)):
buffer[i] = buffer[i] ^ key
print(buffer.decode())
|
471893
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.CSCFakeDBGains = cms.ESSource("CSCFakeDBGains")
process.prefer("CSCFakeDBGains")
process.CSCFakeDBPedestals = cms.ESSource("CSCFakeDBPedestals")
process.prefer("CSCFakeDBPedestals")
process.CSCFakeDBNoiseMatrix = cms.ESSource("CSCFakeDBNoiseMatrix")
process.prefer("CSCFakeDBNoiseMatrix")
process.CSCFakeDBCrosstalk = cms.ESSource("CSCFakeDBCrosstalk")
process.prefer("CSCFakeDBCrosstalk")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource")
process.prod1 = cms.EDAnalyzer("CSCGainsDBReadAnalyzer")
process.prod2 = cms.EDAnalyzer("CSCPedestalDBReadAnalyzer")
process.prod3 = cms.EDAnalyzer("CSCCrossTalkDBReadAnalyzer")
process.prod4 = cms.EDAnalyzer("CSCNoiseMatrixDBReadAnalyzer")
process.output = cms.OutputModule("AsciiOutputModule")
process.p = cms.Path(process.prod1*process.prod2*process.prod3*process.prod4)
process.ep = cms.EndPath(process.output)
|
471900
|
import pymysql
import time
HOST = "database-1.cluster-cti5mc0i9dvk.ap-northeast-2.rds.amazonaws.com"
USER = "the_red"
PASSWORD = "<PASSWORD>"
DB = "the_red"
connection = pymysql.connect(host=HOST,
user=USER,
password=PASSWORD,
database=DB,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
while True:
with connection.cursor() as cursor:
# Create a new record
sql = "INSERT INTO `test` VALUES (1)"
cursor.execute(sql)
connection.commit()
with connection.cursor() as cursor:
sql = "SELECT MAX(uid) from test"
cursor.execute(sql)
result = cursor.fetchone()
print(result)
time.sleep(3)
|
471920
|
import inspect
import re
import collectd_haproxy.plugin
import collectd_haproxy.connection
import collectd_haproxy.metrics
import collectd_haproxy.compat
modules_to_test = (
collectd_haproxy.plugin,
collectd_haproxy.connection,
collectd_haproxy.metrics,
collectd_haproxy.compat,
)
def test_docstrings():
for module in modules_to_test:
for path, thing in get_module_things(module):
yield create_docstring_assert(path, thing)
def get_module_things(module):
module_name = module.__name__
for func_name, func in get_module_functions(module):
if inspect.getmodule(func) != module:
continue
yield (module_name + "." + func_name, func)
for class_name, klass in get_module_classes(module):
if inspect.getmodule(klass) != module:
continue
yield (module_name + "." + class_name, klass)
for method_name, method in get_class_methods(klass):
if method_name not in klass.__dict__:
continue
yield (module_name + "." + class_name + ":" + method_name, method)
def get_module_classes(module):
for name, klass in inspect.getmembers(module, predicate=inspect.isclass):
yield (name, klass)
def get_module_functions(module):
for name, func in inspect.getmembers(module, predicate=inspect.isfunction):
yield (name, func)
def get_class_methods(klass):
for name, method in inspect.getmembers(klass, predicate=inspect.ismethod):
yield (name, method)
def create_docstring_assert(path, thing):
def test_function():
assert_docstring_present(thing, path)
assert_docstring_includes_param_metadata(thing, path)
test_name = "test_docstring__%s" % de_camelcase(path)
test_function.__name__ = test_name
test_function.description = test_name
return test_function
def assert_docstring_present(thing, path):
docstring = inspect.getdoc(thing)
if not docstring or not docstring.strip():
raise AssertionError("No docstring present for %s" % path)
def assert_docstring_includes_param_metadata(thing, path):
if inspect.isclass(thing):
return
docstring = inspect.getdoc(thing)
if not docstring:
return
for arg_name in inspect.getargspec(thing).args:
if arg_name in ("self", "cls"):
continue
if ":param %s:" % arg_name not in docstring:
raise AssertionError(
"Missing :param: for arg %s of %s" % (arg_name, path)
)
if ":type %s:" % arg_name not in docstring:
raise AssertionError(
"Missing :type: for arg %s of %s" % (arg_name, path)
)
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def de_camelcase(name):
return all_cap_re.sub(
r'\1_\2',
first_cap_re.sub(r'\1_\2', name)
).lower()
|
472031
|
try:
from .GnuProlog import GNUProlog
except Exception:
pass
try:
from .SWIProlog import SWIProlog
except Exception:
pass
try:
from .XSBProlog import XSBProlog
except Exception:
pass
from .prologsolver import Prolog
|
472032
|
import pyomo.environ as pe
import logging
import warnings
import math
logger = logging.getLogger(__name__)
pyo = pe
def _copy_v_pts_without_inf(v_pts):
new_pts = list()
for pt in v_pts:
if pt > -math.inf and pt < math.inf:
new_pts.append(pt)
return new_pts
def _get_bnds_list(v):
lb = pe.value(v.lb)
ub = pe.value(v.ub)
if lb is None:
lb = -math.inf
if ub is None:
ub = math.inf
return [lb, ub]
def var_info_str(v):
s = '\tVar: {0}\n'.format(v)
return s
def bnds_info_str(vlb, vub):
s = '\tLB: {0}\n'.format(vlb)
s += '\tUB: {0}\n'.format(vub)
return s
def x_pts_info_str(_x_pts):
s = '\tx_pts: {0}\n'.format(_x_pts)
return s
def check_var_pts(x, x_pts=None):
xlb = pe.value(x.lb)
xub = pe.value(x.ub)
if xlb is None:
xlb = -math.inf
if xub is None:
xub = math.inf
raise_error = False
raise_warning = False
msg = None
if xub < xlb:
msg = 'Lower bound is larger than upper bound:\n' + var_info_str(x) + bnds_info_str(xlb, xub)
raise_error = True
if x_pts is not None:
ordered = all(x_pts[i] <= x_pts[i+1] for i in range(len(x_pts)-1))
if not ordered:
msg = 'x_pts must be ordered:\n' + var_info_str(x) + bnds_info_str(xlb, xub) + x_pts_info_str(x_pts)
raise_error = True
if xlb != x_pts[0] or xub != x_pts[-1]:
msg = ('end points of the x_pts list must be equal to the bounds on the x variable:\n' + var_info_str(x) +
bnds_info_str(xlb, xub) + x_pts_info_str(x_pts))
raise_error = True
if raise_error:
logger.error(msg)
raise ValueError(msg)
if raise_warning:
logger.warning(msg)
warnings.warn(msg)
|
472047
|
from microscopes.kernels.slice import theta
from microscopes.irm.model import initialize, bind
from microscopes.common.relation.dataview import numpy_dataview
from microscopes.common.rng import rng
from microscopes.models import bbnc
from microscopes.irm.definition import model_definition
from microscopes.common.testutil import assert_1d_cont_dist_approx_sps
from scipy.stats import beta
import numpy as np
#from nose.plugins.attrib import attr
def test_slice_theta_irm():
N = 10
defn = model_definition([N], [((0, 0), bbnc)])
data = np.random.random(size=(N, N)) < 0.8
view = numpy_dataview(data)
r = rng()
prior = {'alpha': 1.0, 'beta': 9.0}
s = initialize(
defn,
[view],
r=r,
cluster_hps=[{'alpha': 2.0}],
relation_hps=[prior],
domain_assignments=[[0] * N])
bs = bind(s, 0, [view])
params = {0: {'p': 0.05}}
heads = len([1 for y in data.flatten() if y])
tails = len([1 for y in data.flatten() if not y])
alpha1 = prior['alpha'] + heads
beta1 = prior['beta'] + tails
def sample_fn():
theta(bs, r, tparams=params)
return s.get_suffstats(0, [0, 0])['p']
rv = beta(alpha1, beta1)
assert_1d_cont_dist_approx_sps(sample_fn, rv, nsamples=50000)
|
472055
|
from pathlib import Path
import re
import yaml
repo_dir = Path(__file__).parent.parent.parent.parent
resources_dir = repo_dir / 'resources'
# excel_path = resources_dir / 'Semio2Brain Database.xlsx'
semiology_dict_path = resources_dir / 'semiology_dictionary.yaml'
with open(semiology_dict_path) as f:
SemioDict = yaml.load(f, Loader=yaml.FullLoader)
def custom_semiology_lookup(custom_semiology, nested_dict=SemioDict,
found=None) -> list:
"""
User enters custom semiology. This checks if we already have a catch-all in taxonomy replacement SemioDict.
Top level function will use this to find a match within SemioDict:
semiology_exists_already = custom_semiology_lookup(custom_semiology, found=[])
if not semiology_exists_already:
pass
elif len(semiology_exists_already) == 1:
pop-up window("Note this custom semiology may already exist within the category {}".format(semiology_exists_already[0]))
elif len(semiology_exists_already) > 1:
pop-up window("Note this custom semiology term occurs in various ways within the following categories: {}".format(str(semiology_exists_already)))
Alim-Marvasti 2020
"""
found = [] if found is None else found
for k, v in nested_dict.items():
# look for matching keys only in top level
if re.search(r'(?i)' + custom_semiology, k):
found.append(k)
elif re.search(r'(?i)' + k, custom_semiology):
found.append(k)
elif isinstance(v, list):
for regex_item in v:
if re.search(r'(?i)' + regex_item, custom_semiology):
found.append(k)
if re.search(r'(?i)' + custom_semiology, regex_item):
found.append(k)
elif isinstance(v, dict):
# run it again to open nested dict values:
custom_semiology_lookup(
custom_semiology, nested_dict=v, found=found)
else: # single regex term in the value of the key
if re.search(r'(?i)' + v, custom_semiology):
found.append(k)
if re.search(r'(?i)' + custom_semiology, v):
found.append(k)
return list(set(found))
|
472058
|
import os
import socket
if True:
sd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sd.bind(('127.0.0.1', 1024))
sd.listen(10)
for i in range(3):
if os.fork () == 0:
while True:
cd, _ = sd.accept()
cd.close()
print i
os.wait()
|
472069
|
from __future__ import print_function
import sys
import os
os.environ['ENABLE_CNNL_TRYCATCH'] = 'OFF' # pylint: disable=C0413
import unittest
import logging
import torch
import copy # pylint: disable=C0411
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir + "/../../")
from common_utils import testinfo, TestCase # pylint: disable=C0413,C0411
logging.basicConfig(level=logging.DEBUG)
class TestOps(TestCase):
# @unittest.skip("not test")
@testinfo()
def test_select(self):
in_shape = (2, 3, 24, 30)
in_shape1 = (2, 3, 33)
in_shape2 = (2, 24)
input_dtypes = [torch.float, torch.half]
for data_type in input_dtypes:
input_t = torch.rand(in_shape, dtype=torch.float)
input1 = torch.rand(in_shape1, dtype=torch.float)
input2 = torch.rand(in_shape2, dtype=torch.float)
output_cpu = input_t[:, 1]
input_mlu = self.to_mlu_dtype(input_t, data_type)
output_mlu = input_mlu[:, 1]
self.assertTensorsEqual(
output_cpu, output_mlu.cpu().float(), 0.003, use_MSE=True)
output_cpu1 = input1[:, 2]
input1_mlu = self.to_mlu_dtype(input1, data_type)
output_mlu1 = input1_mlu[:, 2]
self.assertTensorsEqual(
output_cpu1, output_mlu1.cpu().float(), 0.003, use_MSE=True)
output_cpu1 = input2[1:, -1]
input2_mlu = self.to_mlu_dtype(input2, data_type)
output_mlu1 = input2_mlu[1:, -1]
self.assertTensorsEqual(
output_cpu1, output_mlu1.cpu().float(), 0.003, use_MSE=True)
output_cpu = input_t[:, :, :, -2]
output_mlu = input_mlu[:, :, :, -2]
self.assertTensorsEqual(
output_cpu, output_mlu.cpu().float(), 0.003, use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_select_channel_last(self):
in_shape = (2, 3, 24, 30)
input_dtypes = [torch.float, torch.half]
for data_type in input_dtypes:
input_t = torch.rand(in_shape).to(memory_format=torch.channels_last)
output_cpu = input_t[:, 1]
input_mlu = self.to_mlu_dtype(input_t, data_type)
output_mlu = input_mlu[:, 1]
self.assertTensorsEqual(
output_cpu, output_mlu.cpu().float(), 0.003, use_MSE=True)
output_cpu = input_t[:, :, :, -2]
output_mlu = input_mlu[:, :, :, -2]
self.assertTensorsEqual(
output_cpu, output_mlu.cpu().float(), 0.003, use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_select_not_dense(self):
in_shape = (2, 3, 24, 30)
input_dtypes = [torch.float, torch.half]
for data_type in input_dtypes:
input_t = torch.rand(in_shape)
output_cpu = input_t[:,:,:,:19][:, :, :, 1]
input_mlu = self.to_mlu_dtype(input_t, data_type)
output_mlu = input_mlu[:,:,:,:19][:, :, :, 1]
self.assertTensorsEqual(
output_cpu, output_mlu.cpu().float(), 0.003, use_MSE=True)
output_cpu = input_t[:,:,:,:19][:, :, :, -2]
output_mlu = input_mlu[:,:,:,:19][:, :, :, -2]
self.assertTensorsEqual(
output_cpu, output_mlu.cpu().float(), 0.003, use_MSE=True)
<EMAIL>("not test")
@testinfo()
def test_slice_backward(self):
x = torch.randn((30, 2), requires_grad=True)
x_mlu = self.to_device(x)
z = x[12]
z_mlu = x_mlu[12]
grad = torch.randn(2)
grad_mlu = self.to_device(grad)
z.backward(grad)
out_grad = copy.deepcopy(x.grad)
x.grad.zero_()
z_mlu.backward(grad_mlu)
out_grad_mlu = x.grad
self.assertTensorsEqual(
z, z_mlu.cpu(), 0.0, use_MSE=True)
self.assertTensorsEqual(
out_grad, out_grad_mlu.cpu(), 0.0, use_MSE=True)
<EMAIL>("not test")
@testinfo()
def test_select_exception(self):
a = torch.tensor(4, dtype=torch.float).to('mlu')
ref_msg = r"^select\(\) cannot be applied to a 0-dim tensor\.$"
with self.assertRaisesRegex(IndexError, ref_msg):
a.select(dim=1, index=2)
a = torch.randn((3,4), dtype=torch.float).to('mlu')
ref_msg = r"^select\(\): index 5 out of range for tensor of"
ref_msg = ref_msg + r" size \[3, 4\] at dimension 1$"
with self.assertRaisesRegex(IndexError, ref_msg):
a.select(dim=1, index=5)
if __name__ == "__main__":
unittest.main()
|
472070
|
import cv2
import numpy as np
from transform import four_point_transform
import argparse
import matplotlib.image as mpimg
from matplotlib import pyplot as plt
# What if ROI doesnt contain any lane?
# we can assume that lane is at the rightmost
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", help = "path to the image file")
args = vars(ap.parse_args())
image = mpimg.imread(args["image"])
im = plt.imread(args["image"])
#taking input from user
ax = plt.gca()
fig = plt.gcf()
implot = ax.imshow(im)
# coord = []
# coord = [(241, 316), (438, 312), (602, 447), (54, 447)] DSCN0632
coord = [(251, 314), (443, 306), (616, 435), (85, 445)]
# def onclick(event):
# if event.xdata != None and event.ydata != None :
# coord.append((int(event.xdata), int(event.ydata)))
# cid = fig.canvas.mpl_connect('button_press_event', onclick)
# plt.show()
# print(coord)
warped = four_point_transform(image, np.array(coord))
plt.imshow(warped),plt.show()
# cv2.waitKey(0)
cv2.destroyAllWindows()
cap = cv2.VideoCapture('testvideos/calibration/DSCN0622.MOV')
while (cap.isOpened()):
ret, frame = cap.read()
if frame is None:
break
# roi = frame[250:480, 0:640]
cv2.circle(frame,(320,435), 5, (0,0,255), -1)
cv2.imshow("Input",frame);
warped = four_point_transform(frame, np.array(coord))
gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
#houghline transform
high_thresh, thresh_im = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
lowThresh = 0.5*high_thresh
edges = cv2.Canny(gray, 50, 150)
kernel = np.ones((3,3),np.uint8)
dilation = cv2.dilate(edges,kernel,iterations = 1)
im = cv2.bilateralFilter(dilation, 5, 17,17)
#cv2.imshow('im',im)
lines = cv2.HoughLinesP(im, 1, np.pi/180.0, 50, np.array([]), 100, 10)
min_x = 640
if lines is not None:
for [[x1,y1,x2,y2]] in lines:
dx, dy = x2 - x1, y2 - y1
angle = np.arctan2(dy, dx)*180/(np.pi)
# print(angle)
if angle >0:
# print(x2, y2)
if abs(y2-210) <= 10 and x2 < min_x:
# print("yes")
min_x = x2
cv2.line(warped,(x1,y1),(x2,y2),(0,255,0),2)
print((min_x - 247.0)*(60.0/523.0))
# font = cv2.FONT_HERSHEY_SIMPLEX
# cv2.putText(frame,'Hello World!',(10,500), font, 1,(255,255,255),2)
cv2.imshow('frame', warped)
#cv2.waitKey(0)
# cv2.waitKey(0)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# stictching of closer line can be done after hough transform.
cap.release()
cv2.destroyAllWindows()
|
472073
|
import sys, os.path
nodo_dir = (os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + '\\AST\\')
sys.path.append(nodo_dir)
c3d_dir = (os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + '\\C3D\\')
sys.path.append(c3d_dir)
entorno_dir = (os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + '\\ENTORNO\\')
sys.path.append(entorno_dir)
from Nodo import Nodo
from Entorno import *
from Tipo_Expresion import *
from Tipo import Data_Type
from Tipo import Type
from Label import *
from Temporal import *
from Simbolo import *
class Declaracion_Variable(Nodo):
def __init__(self, nombreNodo, fila = -1, columna = -1, valor = None):
Nodo.__init__(self,nombreNodo, fila, columna, valor)
def execute(self, enviroment):
cantidadHijos = len(self.hijos)
if cantidadHijos == 2 :
identificador = self.hijos[0]
tipoDeclaracion = self.hijos[1].hijos[0]
nombreVariable = identificador.valor.lower()
if tipoDeclaracion.nombreNodo == 'TEXT':
if enviroment.existeSimboloEntornoActual(nombreVariable) :
print('error')
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.character)
tipoVariable.data_specific = 'text'
simboloVariable = Symbol(nombreVariable,tipoVariable,'')
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
elif tipoDeclaracion.nombreNodo == 'SMALLINT':
if enviroment.existeSimboloEntornoActual(nombreVariable) :
print('error')
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.numeric)
tipoVariable.data_specific = 'smallint'
simboloVariable = Symbol(nombreVariable,tipoVariable,0)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
elif tipoDeclaracion.nombreNodo == 'INTEGER':
if enviroment.existeSimboloEntornoActual(nombreVariable) :
print('error')
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.numeric)
tipoVariable.data_specific = 'integer'
simboloVariable = Symbol(nombreVariable,tipoVariable,0)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
elif tipoDeclaracion.nombreNodo == 'BIGINT':
if enviroment.existeSimboloEntornoActual(nombreVariable) :
print('error')
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.numeric)
tipoVariable.data_specific = 'bigint'
simboloVariable = Symbol(nombreVariable,tipoVariable,0)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
elif tipoDeclaracion.nombreNodo == 'DECIMAL':
if enviroment.existeSimboloEntornoActual(nombreVariable) :
print('error')
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.numeric)
tipoVariable.data_specific = 'decimal'
simboloVariable = Symbol(nombreVariable,tipoVariable,0)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
elif tipoDeclaracion.nombreNodo == 'NUMERIC':
if enviroment.existeSimboloEntornoActual(nombreVariable) :
print('error')
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.numeric)
tipoVariable.data_specific = 'numeric'
simboloVariable = Symbol(nombreVariable,tipoVariable,0)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
elif tipoDeclaracion.nombreNodo == 'REAL':
if enviroment.existeSimboloEntornoActual(nombreVariable) :
print('error')
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.numeric)
tipoVariable.data_specific = 'real'
simboloVariable = Symbol(nombreVariable,tipoVariable,0)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
elif tipoDeclaracion.nombreNodo == 'MONEY':
if enviroment.existeSimboloEntornoActual(nombreVariable) :
print('error')
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.numeric)
tipoVariable.data_specific = 'money'
simboloVariable = Symbol(nombreVariable,tipoVariable,0)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
elif tipoDeclaracion.nombreNodo == 'DATE':
if enviroment.existeSimboloEntornoActual(nombreVariable) :
print('error')
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.data_time)
tipoVariable.data_specific = 'data'
simboloVariable = Symbol(nombreVariable,tipoVariable,'2000-10-01')
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
elif tipoDeclaracion.nombreNodo == 'BOOLEAN':
if enviroment.existeSimboloEntornoActual(nombreVariable) :
print('error')
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.boolean)
tipoVariable.data_specific = 'boolean'
simboloVariable = Symbol(nombreVariable,tipoVariable,True)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
pass
elif cantidadHijos == 3 :
identificador = self.hijos[0]
nombreVariable = identificador.valor.lower()
tipoDeclaracion = self.hijos[1].hijos[0]
expresionExecute = self.hijos[2].hijos[0]
value = expresionExecute.execute(enviroment)
print('vienen tres nodos')
if tipoDeclaracion.nombreNodo == 'TEXT':
if expresionExecute.tipo.data_type == Data_Type.character :
if enviroment.existeSimboloEntornoActual(nombreVariable) :
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.character)
tipoVariable.data_specific = 'text'
simboloVariable = Symbol(nombreVariable,tipoVariable,value)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
else :
return False
elif tipoDeclaracion.nombreNodo == 'SMALLINT':
if expresionExecute.tipo.data_type == Data_Type.numeric :
if enviroment.existeSimboloEntornoActual(nombreVariable) :
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.numeric)
tipoVariable.data_specific = 'smallint'
simboloVariable = Symbol(nombreVariable,tipoVariable,value)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
else :
return False
elif tipoDeclaracion.nombreNodo == 'INTEGER':
if expresionExecute.tipo.data_type == Data_Type.numeric :
if enviroment.existeSimboloEntornoActual(nombreVariable) :
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.numeric)
tipoVariable.data_specific = 'integer'
simboloVariable = Symbol(nombreVariable,tipoVariable,value)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
else :
return False
elif tipoDeclaracion.nombreNodo == 'BIGINT':
if expresionExecute.tipo.data_type == Data_Type.numeric :
if enviroment.existeSimboloEntornoActual(nombreVariable) :
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.numeric)
tipoVariable.data_specific = 'bigint'
simboloVariable = Symbol(nombreVariable,tipoVariable,value)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
else :
return False
elif tipoDeclaracion.nombreNodo == 'DECIMAL':
if expresionExecute.tipo.data_type == Data_Type.numeric :
if enviroment.existeSimboloEntornoActual(nombreVariable) :
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.numeric)
tipoVariable.data_specific = 'decimal'
simboloVariable = Symbol(nombreVariable,tipoVariable,value)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
else :
return False
elif tipoDeclaracion.nombreNodo == 'NUMERIC':
if expresionExecute.tipo.data_type == Data_Type.numeric :
if enviroment.existeSimboloEntornoActual(nombreVariable) :
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.numeric)
tipoVariable.data_specific = 'numeric'
simboloVariable = Symbol(nombreVariable,tipoVariable,value)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
else :
return False
elif tipoDeclaracion.nombreNodo == 'REAL':
if expresionExecute.tipo.data_type == Data_Type.numeric :
if enviroment.existeSimboloEntornoActual(nombreVariable) :
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.numeric)
tipoVariable.data_specific = 'real'
simboloVariable = Symbol(nombreVariable,tipoVariable,value)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
else :
return False
elif tipoDeclaracion.nombreNodo == 'MONEY':
if expresionExecute.tipo.data_type == Data_Type.numeric :
if enviroment.existeSimboloEntornoActual(nombreVariable) :
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.numeric)
tipoVariable.data_specific = 'money'
simboloVariable = Symbol(nombreVariable,tipoVariable,value)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
else :
return False
elif tipoDeclaracion.nombreNodo == 'DATE':
if expresionExecute.tipo.data_type == Data_Type.data_time :
if enviroment.existeSimboloEntornoActual(nombreVariable) :
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.data_time)
tipoVariable.data_specific = 'data'
simboloVariable = Symbol(nombreVariable,tipoVariable,value)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
else :
return False
elif tipoDeclaracion.nombreNodo == 'BOOLEAN':
if expresionExecute.tipo.data_type == Data_Type.numeric :
if enviroment.existeSimboloEntornoActual(nombreVariable) :
return False
else:
# Creamos Simbolo
tipoVariable = Type_Expresion(Data_Type.boolean)
tipoVariable.data_specific = 'boolean'
simboloVariable = Symbol(nombreVariable,tipoVariable,value)
enviroment.ingresar_simbolo(nombreVariable,simboloVariable)
pass
pass
else :
return False
pass
def compile(self, enviroment):
codigoCompile = ''
cantidadHijos = len(self.hijos)
if cantidadHijos == 2 :
identificador = self.hijos[0]
tipoDeclaracion = self.hijos[1].hijos[0]
nombreVariable = identificador.valor.lower()
if tipoDeclaracion.nombreNodo == 'TEXT':
codigoCompile += nombreVariable + ' = None\n'
return codigoCompile
elif tipoDeclaracion.nombreNodo == 'SMALLINT':
codigoCompile += nombreVariable + ' = None\n'
return codigoCompile
elif tipoDeclaracion.nombreNodo == 'INTEGER':
codigoCompile += nombreVariable + ' = None\n'
return codigoCompile
elif tipoDeclaracion.nombreNodo == 'BIGINT':
codigoCompile += nombreVariable + ' = None\n'
return codigoCompile
elif tipoDeclaracion.nombreNodo == 'DECIMAL':
codigoCompile += nombreVariable + ' = None\n'
return codigoCompile
elif tipoDeclaracion.nombreNodo == 'NUMERIC':
codigoCompile += nombreVariable + ' = None\n'
return codigoCompile
elif tipoDeclaracion.nombreNodo == 'REAL':
codigoCompile += nombreVariable + ' = None\n'
return codigoCompile
elif tipoDeclaracion.nombreNodo == 'MONEY':
codigoCompile += nombreVariable + ' = None\n'
return codigoCompile
elif tipoDeclaracion.nombreNodo == 'DATE':
codigoCompile += nombreVariable + ' = None\n'
return codigoCompile
elif tipoDeclaracion.nombreNodo == 'BOOLEAN':
codigoCompile += nombreVariable + ' = None\n'
return codigoCompile
pass
elif cantidadHijos == 3 :
identificador = self.hijos[0]
nombreVariable = identificador.valor.lower()
tipoDeclaracion = self.hijos[1].hijos[0]
expresionExecute = self.hijos[2].hijos[0]
value = None
auxiliar = False
if expresionExecute.nombreNodo == 'SENTENCIA_SELECT':
value = expresionExecute.execute(enviroment)
print('Se ejecuto la sentencia select')
print('Tipo result: ',expresionExecute.tipo.data_type)
else:
value = expresionExecute.compile(enviroment)
auxiliar = True
if auxiliar == True :
codigoCompile += value
codigoCompile += nombreVariable + ' = ' + expresionExecute.dir + '\n'
return codigoCompile
else:
if expresionExecute.tipo.data_type == Data_Type.listaDatos :
codigoCompile += nombreVariable + ' = None\n'
return codigoCompile
else :
codigoCompile += nombreVariable + ' = ' + str(value) + '\n'
return codigoCompile
pass
def getText(self):
pass
|
472074
|
import argparse
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
from rlo import experiment_result
from rlo import plotting
from rlo import utils
def plot_empirical_predicted_values(
outfile, title_suffix, events, probabilities=[10, 50, 90]
):
# import json # determine the size of logs - uncomment if needed
# print("Size", len(json.dumps([e for e in events if e['event']=='plot_value_comparison'])))
train_logs = [r for r in events if r["event"] == "plot_value_comparison"]
by_expr = utils.group_by(train_logs, lambda r: r["expr"])
fig, axs = plt.subplots(
len(by_expr), 1, figsize=[15, 4 * len(by_expr)], squeeze=False,
)
x_axis_func = lambda r: r["generation"]
N_GENERATIONS = (
max([x_axis_func(rec) for rec in train_logs]) + 1
) # The first generation is numbered 0
x_vals = range(N_GENERATIONS)
N_REPETITIONS = max([int(r["repetition"]) for r in train_logs]) + 1
for ax, (expr, logs) in zip(axs.ravel(), by_expr.items()):
expr_cost = logs[0][
"expr_cost"
] # we just need an initial cost for the starting expression expr
# compute percentiles separately for each repetition for each generation
by_generation = utils.group_by(logs, x_axis_func)
all_percentiles = np.full(
(N_GENERATIONS, N_REPETITIONS, len(probabilities)), float("nan")
)
for generation, generation_logs in by_generation.items():
for repetition, rep_logs in utils.group_by(
generation_logs, lambda r: r["repetition"]
).items():
# find percentiles of (predicted - empirical) for repetition
all_percentiles[int(generation), int(repetition), :] = np.percentile(
[
p - e
for r in rep_logs
for p, e in zip(r["predicted_value"], r["empirical_value"])
],
probabilities,
axis=0,
)
# then average across repetitions (ignoring absent values=NaN)
av_percentiles = np.nanmean(all_percentiles, axis=1)
# and plot a line against generation for each percentile
for i in range(len(probabilities)):
ax.plot(
x_vals,
av_percentiles[:, i],
label=str(probabilities[i]) + "th percentile",
)
ax.set_title(
"Value evaluation for {} with cost {}, {}".format(
expr, expr_cost, title_suffix
),
fontsize=9,
)
ax.axhline(0, color="black", linewidth=1)
ax.set_ylabel("(predicted - empirical)", fontsize=9)
ax.set_xlabel("Generations", fontsize=9)
plt.figlegend(*ax.get_legend_handles_labels(), loc="upper left")
fig.tight_layout()
plt.savefig(outfile)
def plot_empirical_predicted_values_from_config(config, events):
plot_empirical_predicted_values(
plotting.format_figure_filename(config, "empirical_predicted_values.png"),
plotting.config_suffix(config),
events,
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"run_id",
type=str,
help="a run ID (e.g., 2019_01_06_13_15_48_13172) or path to a config.json file",
)
args = parser.parse_args()
config = experiment_result.load_config(args.run_id)
if "result_save_path" in config:
logs = experiment_result.load_events_from_config(config, verbosity=1)
plot_empirical_predicted_values_from_config(config, logs)
else:
plot_empirical_predicted_values(
"empirical_predicted_values.png", "", events=config
)
if __name__ == "__main__":
main()
|
472091
|
import gin
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.keras.layers import Input, Concatenate, Dense
from reaver.models.base.layers import Squeeze, Variable, RunningStatsNorm
from reaver.envs.base import Spec
@gin.configurable
def build_mlp(
obs_spec: Spec,
act_spec: Spec,
layer_sizes=(64, 64),
activation='relu',
initializer='glorot_uniform',
value_separate=False,
obs_shift=False,
obs_scale=False) -> tf.keras.Model:
"""
Factory method for a simple fully connected neural network model used in e.g. MuJuCo environment
If value separate is set to true then a separate path is added for value fn, otherwise branches out of last layer
If obs shift is set to true then observations are normalized to mean zero with running mean estimate
If obs scale is set to true then observations are standardized to std.dev one with running std.dev estimate
"""
inputs = inputs_ = [Input(s.shape, name="input_" + s.name) for s in obs_spec]
if obs_shift or obs_scale:
inputs_ = [RunningStatsNorm(obs_shift, obs_scale, name="norm_" + s.name)(x) for s, x in zip(obs_spec, inputs_)]
inputs_concat = Concatenate()(inputs_) if len(inputs_) > 1 else inputs_[0]
x = build_fc(inputs_concat, layer_sizes, activation, initializer)
outputs = [build_logits(space, x, initializer) for space in act_spec]
if value_separate:
x = build_fc(inputs_concat, layer_sizes, activation, initializer, 'value_')
value = Dense(1, name="value_out", kernel_initializer=initializer)(x)
value = Squeeze(axis=-1)(value)
outputs.append(value)
return tf.keras.Model(inputs=inputs, outputs=outputs)
def build_logits(space, prev_layer, initializer):
logits = Dense(space.size(), kernel_initializer=initializer, name="logits_" + space.name)(prev_layer)
if space.is_continuous():
logits = Variable(name="logstd")(logits)
return logits
def build_fc(input_layer, layer_sizes, activation, initializer, prefix=''):
x = input_layer
for i, size in enumerate(layer_sizes):
x = Dense(size, activation=activation, kernel_initializer=initializer, name='%sfc%02d' % (prefix, i+1))(x)
return x
|
472094
|
from .sankey_definition import ProcessGroup, Waypoint, Bundle, Elsewhere
from .ordering import new_node_indices, Ordering
def elsewhere_bundles(sankey_definition, add_elsewhere_waypoints=True):
"""Find new bundles and waypoints needed, so that every process group has a
bundle to Elsewhere and a bundle from Elsewhere.
If `add_elsewhere_waypoints` is True (the default), then new Waypoints are
created for these Bundles to flow through. Otherwise, the Bundles are
created without Waypoints, which will result in them being rendered as short
"stubs" on the nodes.
"""
# Build set of existing bundles to/from elsewhere.
has_to_elsewhere = set()
has_from_elsewhere = set()
for bundle in sankey_definition.bundles.values():
assert not (bundle.source is Elsewhere and bundle.target is Elsewhere)
if bundle.target is Elsewhere:
# XXX they might have different flow_selections?
# if bundle.source in has_to_elsewhere:
# raise ValueError('duplicate bundles to elsewhere from {}'.format(bundle.source))
has_to_elsewhere.add(bundle.source)
if bundle.source is Elsewhere:
# XXX they might have different flow_selections?
# if bundle.target in has_from_elsewhere:
# raise ValueError('duplicate bundles from elsewhere to {}'.format(bundle.target))
has_from_elsewhere.add(bundle.target)
# For each process group, add new bundles to/from elsewhere if not already
# existing. Each one should have a waypoint of rank +/- 1.
R = len(sankey_definition.ordering.layers)
new_waypoints = {}
new_bundles = {}
# Add elsewhere bundles to all process groups if there are no bundles to start with
no_bundles = (len(sankey_definition.bundles) == 0)
for u, process_group in sankey_definition.nodes.items():
# Skip waypoints
if not isinstance(process_group, ProcessGroup):
continue
waypoint_title = '→' if process_group.direction == 'R' else '←'
d_rank = +1 if process_group.direction == 'R' else -1
r, _, _ = sankey_definition.ordering.indices(u)
if no_bundles or (0 <= r + d_rank < R and u not in has_to_elsewhere):
dummy_id = '__{}>'.format(u)
assert dummy_id not in sankey_definition.nodes
if add_elsewhere_waypoints:
new_waypoints[dummy_id] = Waypoint(
direction=process_group.direction,
title=waypoint_title)
new_bundles[dummy_id] = Bundle(u, Elsewhere, waypoints=[dummy_id])
else:
new_bundles[dummy_id] = Bundle(u, Elsewhere)
if no_bundles or (0 <= r - d_rank < R and u not in has_from_elsewhere):
dummy_id = '__>{}'.format(u)
assert dummy_id not in sankey_definition.nodes
if add_elsewhere_waypoints:
new_waypoints[dummy_id] = Waypoint(
direction=process_group.direction,
title=waypoint_title)
new_bundles[dummy_id] = Bundle(Elsewhere, u, waypoints=[dummy_id])
else:
new_bundles[dummy_id] = Bundle(Elsewhere, u)
return new_waypoints, new_bundles
def augment(G, new_waypoints, new_bundles):
"""Add waypoints for new_bundles to layered graph G.
"""
for v in new_waypoints.values():
assert isinstance(v, Waypoint)
# copy G and order
G = G.copy()
R = len(G.ordering.layers)
# XXX sorting makes order deterministic, which can affect final placement
# of waypoints
for k, bundle in sorted(new_bundles.items(), reverse=True):
if not bundle.waypoints:
continue # show Elsewhere flows as short "stubs" on nodes without a Waypoint
assert len(bundle.waypoints) == 1
w = bundle.waypoints[0]
if bundle.to_elsewhere:
u = G.nodes[bundle.source]['node']
r, _, _ = G.ordering.indices(bundle.source)
d_rank = +1 if u.direction == 'R' else -1
G.add_node(w, node=new_waypoints[w])
r, G.ordering = check_order_edges(G.ordering, r, d_rank)
this_rank = G.ordering.layers[r + d_rank]
prev_rank = G.ordering.layers[r]
G.add_edge(bundle.source, w, bundles=[k])
i, j = new_node_indices(G, this_rank, prev_rank, w, side='below')
G.ordering = G.ordering.insert(r + d_rank, i, j, w)
elif bundle.from_elsewhere:
u = G.nodes[bundle.target]['node']
r, _, _ = G.ordering.indices(bundle.target)
d_rank = +1 if u.direction == 'R' else -1
G.add_node(w, node=new_waypoints[w])
r, G.ordering = check_order_edges(G.ordering, r, -d_rank)
this_rank = G.ordering.layers[r - d_rank]
prev_rank = G.ordering.layers[r]
G.add_edge(w, bundle.target, bundles=[k])
i, j = new_node_indices(G, this_rank, prev_rank, w, side='below')
G.ordering = G.ordering.insert(r - d_rank, i, j, w)
else:
assert False, "Should not call augment() with non-elsewhere bundle"
return G
def check_order_edges(ordering, r, dr):
layers = ordering.layers
nb = len(layers[0]) if layers else 1
if r + dr >= len(layers):
layers = layers + tuple(() for i in range(nb))
elif r + dr < 0:
layers = tuple(() for i in range(nb)) + layers
r += 1
return r, Ordering(layers)
|
472109
|
from textwrap import dedent
import attr
import xsimlab as xs
from xsimlab.formatting import (
add_attribute_section,
maybe_truncate,
pretty_print,
repr_process,
repr_model,
var_details,
wrap_indent,
)
from xsimlab.process import get_process_obj
def test_maybe_truncate():
assert maybe_truncate("test", 10) == "test"
assert maybe_truncate("longteststring", 10) == "longtes..."
def test_pretty_print():
assert pretty_print("test", 10) == "test" + " " * 6
def test_wrap_indent():
text = "line1\nline2"
expected = "-line1\n line2"
assert wrap_indent(text, start="-") == expected
expected = "line1\n line2"
assert wrap_indent(text, length=1) == expected
def test_var_details():
@xs.process
class P:
var = xs.variable(
dims=[(), "x"],
description="a variable",
global_name="global_var",
default=0,
groups=["g1", "g2"],
static=True,
attrs={"units": "m"},
encoding={"fill_value": -1},
)
var2 = xs.variable()
var_details_str = var_details(attr.fields(P).var)
expected = dedent(
"""\
A variable
Variable properties:
- type : ``variable``
- intent : ``in``
- global name : global_var
- dimensions : () or ('x',)
- groups : g1, g2
- default value : 0
- static : ``True``
Other attributes:
- units : m
Encoding options:
- fill_value : -1
"""
)
assert var_details_str == expected
@xs.process
class PP:
var = xs.foreign(P, "var2")
var_details_str = var_details(attr.fields(PP).var)
expected = dedent(
"""\
No description given
Variable properties:
- type : ``foreign``
- reference variable : :attr:`test_var_details.<locals>.P.var2`
- intent : ``in``
- dimensions : ()
"""
)
assert var_details_str == expected
@xs.process(autodoc=False)
class WithoutPlaceHolder:
"""My process"""
var1 = xs.variable(dims="x", description="a variable")
var2 = xs.variable()
@xs.process(autodoc=False)
class WithPlaceholder:
"""My process
{{attributes}}
"""
var1 = xs.variable(dims="x", description="a variable")
var2 = xs.variable()
def test_add_attribute_section():
# For testing, autodoc is set to False to avoid redundancy
expected = """My process
Attributes
----------
var1 : :class:`attr.Attribute`
A variable
Variable properties:
- type : ``variable``
- intent : ``in``
- dimensions : ('x',)
var2 : :class:`attr.Attribute`
No description given
Variable properties:
- type : ``variable``
- intent : ``in``
- dimensions : ()
"""
assert add_attribute_section(WithoutPlaceHolder).strip() == expected.strip()
assert add_attribute_section(WithPlaceholder).strip() == expected.strip()
def test_process_repr(
example_process_obj,
processes_with_state,
example_process_repr,
example_process_in_model_repr,
):
assert repr_process(example_process_obj) == example_process_repr
_, _, process_in_model = processes_with_state
assert repr_process(process_in_model) == example_process_in_model_repr
@xs.process
class Dummy:
def initialize(self):
pass
def run_step(self):
pass
expected = dedent(
"""\
<Dummy (xsimlab process)>
Variables:
*empty*
Simulation stages:
initialize
run_step
"""
)
assert repr_process(get_process_obj(Dummy)) == expected
def test_model_repr(simple_model, simple_model_repr):
assert repr_model(simple_model) == simple_model_repr
expected = "<xsimlab.Model (0 processes, 0 inputs)>\n"
assert repr(xs.Model({})) == expected
|
472114
|
import pandas as pd
import numpy as np
import json
import matplotlib.pyplot as plt
plt.rcParams.update({
"font.family": "CMU Serif"
})
def compute():
group = pd.read_csv('../data/2021-02-12to2021-02-26_deduped.csv').groupby("ENTITY_NAME")
data = group.size().reset_index().values.tolist()
x = list(map(lambda x: x[0], data))
y = list(map(lambda y: y[1], data))
sort_idx = np.argsort(y)[::-1]
x = np.take(x, sort_idx).tolist()
y = np.take(y, sort_idx).tolist()
with open("../output/company_data.json", 'w') as f:
json.dump([x, y], f)
def plot():
with open("../output/company_data.json", 'r') as f:
data = json.load(f)
x = data[0]
y = data[1]
print(f"Count {len(y)}, Mean {float(np.mean(y)):0.3f}, Std {float(np.std(y)):0.3f}, Min {np.min(y)}, Max {np.max(y)}")
print("Top 20")
print(x[:20])
fig = plt.figure(figsize=(9, 4))
fig.suptitle("Company count distribution (test)")
plt.xlabel("Company")
plt.ylabel("Count (log)")
plt.bar(range(len(x)), height=y, width=0.05, color='#30B6C2', log=True)
fig.savefig('../output/company_data_test.pdf')
if __name__ == '__main__':
compute()
plot()
|
472136
|
import openmdao.api as om
from pycycle.maps.ncp01 import NCP01
import numpy as np
class StallCalcs(om.ExplicitComponent):
"""Component to compute the stall margins at constant speed (SMN) and constant flow (SMW)"""
def setup(self):
self.add_input('PR_SMN', val=1.0, units=None, desc='SMN pressure ratio')
self.add_input('PR_SMW', val=1.0, units=None, desc='SMW pressure ratio')
self.add_input('PR_actual', val=1.0, units=None, desc='Actual pressure ratio')
self.add_input('Wc_SMN', val=1.0, units='lbm/s', desc='SMN corrected flow')
self.add_input('Wc_actual', val=1.0, units='lbm/s', desc='Actual corrected flow')
self.add_output('SMN', val=0.0, units=None, desc='Stall margin at constant speed')
self.add_output('SMW', val=0.0, units=None, desc='Stall margin at constant flow')
self.declare_partials('SMN', ['PR_SMN','PR_actual','Wc_SMN','Wc_actual'])
self.declare_partials('SMW', ['PR_SMW','PR_actual'])
def compute(self, inputs, outputs):
outputs['SMN'] = ((inputs['Wc_actual']/inputs['Wc_SMN'])/(inputs['PR_actual']/inputs['PR_SMN'])-1)*100.
outputs['SMW'] = (inputs['PR_SMW']-inputs['PR_actual'])/inputs['PR_actual'] * 100
def compute_partials(self, inputs, J):
PR_actual = inputs['PR_actual']
PR_SMN = inputs['PR_SMN']
PR_SMW = inputs['PR_SMW']
Wc_actual = inputs['Wc_actual']
Wc_SMN = inputs['Wc_SMN']
wc_ratio = 100*Wc_actual/Wc_SMN
J['SMN', 'PR_SMN'] = wc_ratio / PR_actual
J['SMN', 'PR_actual'] = -wc_ratio * PR_SMN/PR_actual**2
PR_ratio = 100*PR_SMN/PR_actual
J['SMN', 'Wc_SMN'] = -Wc_actual/Wc_SMN**2 * PR_ratio
J['SMN', 'Wc_actual'] = PR_ratio /Wc_SMN
J['SMW', 'PR_SMW'] = 100 / PR_actual
J['SMW', 'PR_actual'] = - 100 * PR_SMW/PR_actual**2
class MapScalars(om.ExplicitComponent):
"""Compute map scalars in design mode"""
def setup(self):
self.add_input('Nc', val=2.0, units='rpm',
desc='Computed design corrected shaft speed')
self.add_input('NcMap', val=2.0, units='rpm',
desc='Design corrected shaft speed of map')
self.add_input('PR', val=2.0,
desc='User input design pressure ratio')
self.add_input('PRmap', val=2.0,
desc='Design pressure ratio of map')
self.add_input('eff', val=1.0,
desc='User input design adiabatic efficiency')
self.add_input('effMap', val=1.0,
desc='Design adiabatic efficiency of map')
self.add_input('Wc', val=2.0, units='lbm/s',
desc='Computed design corrected mass flow rate')
self.add_input('WcMap', val=2.0, units='lbm/s',
desc='Design corrected mass flow rate of map')
self.add_output('s_Nc', shape=1,
desc='Scalar for design corrected shaft speed')
self.add_output('s_PR', shape=1,
desc='Scalar for design pressure ratio')
self.add_output('s_eff', shape=1,
desc='Scalar for design adiabatic efficiency')
self.add_output('s_Wc', shape=1,
desc='Scalar for design corrected mass flow rate')
self.declare_partials('s_Nc', ['Nc', 'NcMap'])
self.declare_partials('s_PR', ['PR', 'PRmap'])
self.declare_partials('s_eff', ['eff', 'effMap'])
self.declare_partials('s_Wc', ['Wc', 'WcMap'])
def compute(self, inputs, outputs):
outputs['s_Nc'] = inputs['Nc'] / inputs['NcMap']
outputs['s_PR'] = (inputs['PR'] - 1) / (inputs['PRmap'] - 1)
outputs['s_eff'] = inputs['eff'] / inputs['effMap']
outputs['s_Wc'] = inputs['Wc'] / inputs['WcMap']
def compute_partials(self, inputs, J):
J['s_Nc', 'Nc'] = 1. / inputs['NcMap']
J['s_Nc', 'NcMap'] = -inputs['Nc'] / inputs['NcMap']**2
J['s_PR', 'PR'] = 1. / (inputs['PRmap'] - 1)
J['s_PR', 'PRmap'] = -(inputs['PR'] - 1.) / (inputs['PRmap'] - 1.)**2
J['s_eff', 'eff'] = 1. / inputs['effMap']
J['s_eff', 'effMap'] = -inputs['eff'] * inputs['effMap']**(-2)
J['s_Wc', 'Wc'] = 1. / inputs['WcMap']
J['s_Wc', 'WcMap'] = -inputs['Wc'] * inputs['WcMap']**(-2)
class ScaledMapValues(om.ExplicitComponent):
"""Computes scaled map values for off-design mode"""
def setup(self):
self.add_input('effMap', val=2.0, desc='Efficiency from unscaled map')
self.add_input('PRmap', val=2.0,
desc='Pressure ratio from unscaled map')
self.add_input('WcMap', val=2.0, units='lbm/s',
desc='Corrected mass flow rate from unscaled map')
self.add_input('NcMap', val=2.0, units='rpm',
desc='Corrected shaft speed from unscaled map')
self.add_input('s_PR', val=2.0,
desc='Scalar for design corrected pressure ratio')
self.add_input('s_eff', val=2.0,
desc='Scalar for design corrected adiabatic efficiency')
self.add_input('s_Wc', val=2.0,
desc='Scalar for design corrected mass flow rate')
self.add_input('s_Nc', val=2.0,
desc='Scalar for design corrected speed')
self.add_output('PR', shape=1, desc='Pressure ratio', lower=1.00001)
self.add_output('eff', shape=1, desc='Adiabatic efficiency')
self.add_output('Wc', shape=1,
desc='Corrected mass flow rate', units='lbm/s')
self.add_output('Nc', shape=1,
desc='Corrected shaft speed', units='rpm')
self.declare_partials('PR', ['PRmap', 's_PR'])
self.declare_partials('eff', ['effMap', 's_eff'])
self.declare_partials('Wc', ['WcMap', 's_Wc'])
self.declare_partials('Nc', ['NcMap', 's_Nc'])
def compute(self, inputs, outputs):
outputs['PR'] = (inputs['PRmap'] - 1.) * inputs['s_PR'] + 1.
outputs['eff'] = inputs['effMap'] * inputs['s_eff']
outputs['Wc'] = inputs['WcMap'] * inputs['s_Wc']
outputs['Nc'] = inputs['NcMap'] * inputs['s_Nc']
def compute_partials(self, inputs, J):
J['PR', 'PRmap'] = inputs['s_PR']
J['PR', 's_PR'] = inputs['PRmap'] - 1.
J['eff', 'effMap'] = inputs['s_eff']
J['eff', 's_eff'] = inputs['effMap']
J['Wc', 'WcMap'] = inputs['s_Wc']
J['Wc', 's_Wc'] = inputs['WcMap']
J['Nc', 'NcMap'] = inputs['s_Nc']
J['Nc', 's_Nc'] = inputs['NcMap']
class CompressorMap(om.Group):
"""Runs design and off-design mode compressor map calculations"""
def initialize(self):
self.options.declare('map_data', default=NCP01)
self.options.declare('design', default=True)
self.options.declare('interp_method', default='slinear')
self.options.declare('extrap', default=False)
def setup(self):
map_data = self.options['map_data']
design = self.options['design']
method = self.options['interp_method']
extrap = self.options['extrap']
params = map_data.param_data
outputs = map_data.output_data
# Define map which will be used
readmap = om.MetaModelStructuredComp(method=method, extrapolate=extrap)
for p in params:
readmap.add_input(p['name'], val=p['default'], units=p['units'], training_data=p['values'])
for o in outputs:
readmap.add_output(o['name'], val=o['default'], units=o['units'], training_data=o['values'])
# Create instance of map for evaluating actual operating point
if design:
# In design mode, operating point specified by default values for RlineMap, NcMap and alphaMap
self.set_input_defaults('RlineMap', val=map_data.defaults['RlineMap'], units=None)
self.set_input_defaults('NcMap', val=map_data.defaults['NcMap'], units='rpm')
# Evaluate map using design point values
self.add_subsystem('map', readmap, promotes_inputs=['RlineMap', 'NcMap', 'alphaMap'],
promotes_outputs=['effMap', 'PRmap', 'WcMap'])
# Compute map scalars based on input PR, eff, Nc and Wc as well as unscaled map values
self.add_subsystem('scalars', MapScalars(),
promotes_inputs=['PR', 'eff', 'Nc', 'Wc', 'NcMap', 'effMap', 'PRmap', 'WcMap'],
promotes_outputs=['s_Nc', 's_PR', 's_eff', 's_Wc'])
else:
# In off-design mode, RlineMap, NcMap and alphaMap are input to map
self.add_subsystem('map', readmap, promotes_inputs=['RlineMap', 'NcMap', 'alphaMap'],
promotes_outputs=['effMap', 'PRmap', 'WcMap'])
# Compute scaled map outputs base on input scalars and unscaled map values
self.add_subsystem('scaledOutput', ScaledMapValues(),
promotes_inputs=['s_PR', 's_eff', 's_Wc', 's_Nc', 'NcMap', 'effMap', 'PRmap', 'WcMap'],
promotes_outputs=['PR', 'eff'])
# Use balance component to vary NcMap and RlineMap to match incoming corrected flow and speed
map_bal = om.BalanceComp()
map_bal.add_balance('NcMap', val=map_data.defaults['NcMap'], units='rpm', eq_units='rpm')
map_bal.add_balance('RlineMap', val=map_data.defaults['RlineMap'], units=None,
eq_units='lbm/s', lower=map_data.RlineStall)
self.add_subsystem(name='map_bal', subsys=map_bal,
promotes_inputs=[('lhs:NcMap','Nc'),('lhs:RlineMap','Wc')],
promotes_outputs=['NcMap', 'RlineMap'])
self.connect('scaledOutput.Nc','map_bal.rhs:NcMap')
self.connect('scaledOutput.Wc','map_bal.rhs:RlineMap')
# Define the Rline corresponding to stall
RlineStall = om.IndepVarComp()
RlineStall.add_output('RlineStall', val=map_data.RlineStall, units=None)
self.add_subsystem('stall_R', subsys=RlineStall)
# Evaluate map for the constant speed stall margin (SMN)
SMN_map = om.MetaModelStructuredComp(method=method, extrapolate=extrap)
for p in params:
SMN_map.add_input(p['name'], val=p['default'], units=p['units'], training_data=p['values'])
for o in outputs:
SMN_map.add_output(o['name'], val=o['default'], units=o['units'], training_data=o['values'])
self.add_subsystem('SMN_map', SMN_map, promotes_inputs=['NcMap', 'alphaMap'])
self.connect('stall_R.RlineStall', 'SMN_map.RlineMap')
# Evaluate map for the constant speed stall margin (SMN)
SMW_map = om.MetaModelStructuredComp(method=method, extrapolate=extrap)
for p in params:
SMW_map.add_input(p['name'], val=p['default'], units=p['units'], training_data=p['values'])
for o in outputs:
SMW_map.add_output(o['name'], val=o['default'], units=o['units'], training_data=o['values'])
self.add_subsystem('SMW_map', SMW_map, promotes_inputs=['alphaMap'])
self.connect('stall_R.RlineStall', 'SMW_map.RlineMap')
# Use balance to vary NcMap on SMW map to hold corrected flow constant
SMW_bal = om.BalanceComp()
SMW_bal.add_balance('NcMap', val=map_data.defaults['NcMap'], units='rpm', eq_units='lbm/s')
self.add_subsystem(name='SMW_bal', subsys=SMW_bal)
self.connect('SMW_bal.NcMap', 'SMW_map.NcMap')
self.connect('WcMap','SMW_bal.lhs:NcMap')
self.connect('SMW_map.WcMap','SMW_bal.rhs:NcMap')
# Compute the stall margins
self.add_subsystem('stall_margins', StallCalcs(),
promotes_inputs=[('PR_actual','PRmap'),('Wc_actual','WcMap')],
promotes_outputs=['SMN','SMW'])
self.connect('SMN_map.PRmap', 'stall_margins.PR_SMN')
self.connect('SMW_map.PRmap', 'stall_margins.PR_SMW')
self.connect('SMN_map.WcMap', 'stall_margins.Wc_SMN')
if __name__ == "__main__":
from pycycle.maps.ncp01 import NCP01
p1 = om.Problem()
ivc = p1.model.add_subsystem(
'ivc', om.IndepVarComp(), promotes=['*'])
# Design variables
ivc.add_output('alphaMap', 0.0)
ivc.add_output('PR', 2.0)
ivc.add_output('Nc', 1000.0, units='rpm')
ivc.add_output('eff', .9)
ivc.add_output('Wc', 3000., units='lbm/s')
# Off-design variables
# ivc.add_output('alphaMap', 0.0)
# ivc.add_output('Nc', 1000.0, units='rpm')
# ivc.add_output('Wc', 3000., units='lbm/s')
# ivc.add_output('s_Nc', 1.0)
# ivc.add_output('s_Wc', 1.0)
# ivc.add_output('s_PR', 1.0)
# ivc.add_output('s_eff', 1.0)
p1.model.add_subsystem('map', CompressorMap(
map_data=NCP01, design=True), promotes=['*'])
p1.setup()
# exit()
p1.run_model()
p1.check_partials()
# print('s_PRdes: ', p1['s_PRdes'])
# print('s_effDes: ', p1['s_effDes'])
# print('s_NcDes: ', p1['s_NcDes'])
# print('s_WcDes: ', p1['s_WcDes'])
# print(p1['shaftNc.NcMap'])
# print(p1['readMap.PRmap'])
# print(p1['readMap.effMap'])
# print(p1['readMap.WcMap'])
# print('Eff: ', p1['eff'])
# print('PR: ', p1['PR'])
# print('Wc: ', p1['Wc'])
# p2 = Problem()
# p2.model = CompressorMap(map_data=NCP01, design=False)
# p2.setup()
# p2['s_PRdes'] = 2.0
# p2['s_NcDes'] = 1000.0
# p2['s_effDes'] = 0.983606557377
# p2['s_WcDes'] = 0.937500146484
# p2['Nc'] = 900.0
# p2['RlineMap'] = 2.0
# p2['alphaMap'] = 0.0
# p2.run_model()
# print('s_PRdes: ', p2['s_PRdes'])
# print('s_effDes: ', p2['s_effDes'])
# print('s_NcDes: ', p2['s_NcDes'])
# print('s_WcDes: ', p2['s_WcDes'])
# print(p2['shaftNc.NcMap'])
# print(p2['readMap.PRmap'])
# print(p2['readMap.effMap'])
# print(p2['readMap.WcMap'])
# print('Eff: ', p2['eff'])
# print('PR: ', p2['PR'])
# print('Wc: ', p2['Wc'])
|
472215
|
import matplotlib.pyplot as plt
import numpy as np
from math import sin
t = np.arange(-6.0, 6.0, 0.1)
sinData = np.array([sin(xi) for xi in t])
linearData = np.array([xi for xi in t])
deltaData = np.array([xi - sin(xi) for xi in t])
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.plot(t, sinData, label="$y=\sin(x)$")
plt.plot(t, linearData, label="$y=x$")
plt.plot(t, deltaData, label="Difference between $\sin(x)$ and $x$")
plt.legend()
plt.show()
|
472220
|
import pytest
import sys
import os
present_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.abspath(os.path.join(present_dir, '..', '..'))
sys.path.insert(0, root_dir)
from behave_webdriver.transformers import FormatTransformer
|
472231
|
import numpy
import afnumpy
import numbers
class int64(numpy.int64):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.int64(x)
else:
return afnumpy.array(x).astype(cls)
class int32(numpy.int32):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.int32(x)
else:
return afnumpy.array(x).astype(cls)
class int16(numpy.intc):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.int16(x)
else:
return afnumpy.array(x).astype(cls)
class int8(numpy.int8):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.int8(x)
else:
return afnumpy.array(x).astype(cls)
class uint64(numpy.uint64):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.uint64(x)
else:
return afnumpy.array(x).astype(cls)
class uint32(numpy.uint32):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.uint32(x)
else:
return afnumpy.array(x).astype(cls)
class uint16(numpy.uint16):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.uint16(x)
else:
return afnumpy.array(x).astype(cls)
class uint8(numpy.uint8):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.uint8(x)
else:
return afnumpy.array(x).astype(cls)
class intc(numpy.intc):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.intc(x)
else:
return afnumpy.array(x).astype(cls)
class intp(numpy.intp):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.intp(x)
else:
return afnumpy.array(x).astype(cls)
class int_(numpy.int_):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.int_(x)
else:
return afnumpy.array(x).astype(cls)
class bool_(numpy.bool_):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.bool_(x)
else:
return afnumpy.array(x).astype(cls)
class float_(numpy.float_):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.float_(x)
else:
return afnumpy.array(x).astype(cls)
class float16(numpy.float16):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.float16(x)
else:
return afnumpy.array(x).astype(cls)
class float32(numpy.float32):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.float32(x)
else:
return afnumpy.array(x).astype(cls)
class float64(numpy.float64):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.float64(x)
else:
return afnumpy.array(x).astype(cls)
# removed for now
#class float128(numpy.float64):
# def __new__(cls, x=0):
# if isinstance(x, afnumpy.ndarray):
# raise NotImplementedError('Arrayfire does not support 128 bit floats')
# elif isinstance(x, numbers.Number):
# return numpy.float64(x)
# else:
# raise NotImplementedError('Arrayfire does not support 128 bit floats')
class complex_(numpy.complex_):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.complex_(x)
else:
return afnumpy.array(x).astype(cls)
class complex64(numpy.complex64):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.complex64(x)
else:
return afnumpy.array(x).astype(cls)
class complex128(numpy.complex128):
def __new__(cls, x=0):
if isinstance(x, afnumpy.ndarray):
return x.astype(cls)
elif isinstance(x, numbers.Number):
return numpy.complex128(x)
else:
return afnumpy.array(x).astype(cls)
float = float
complex = complex
bool = bool
int = int
long = int
bool8 = bool_
promote_types = numpy.promote_types
|
472280
|
import os
import sys
import argparse
import binascii
parser = argparse.ArgumentParser()
parser.add_argument("echo")
args = parser.parse_args()
f = open(args.echo, 'rb')
ft = f.read()
ft_head = ft[:64]
str_num = ord(ft_head[44])
#ft_body = ft[64 + 52 * str_num:]
print "FILE LEN = ", len(ft)
for i in range(str_num):
ft_info = ft[64 + 52 * i: 64 + 52 * ( i + 1 )]
ft_info_type = ft[64 + 52 * i: 65 + 52 * i ].encode('hex')
ft_info_type_ver_1 = int(binascii.b2a_hex(ft[71 + 52 * i: 72 + 52 * i ]),16)
ft_info_type_ver_2 = int(binascii.b2a_hex(ft[70 + 52 * i: 71 + 52 * i ]),16)
ft_info_type_ver_3 = int(binascii.b2a_hex(ft[69 + 52 * i: 70 + 52 * i ]),16)
ft_info_type_ver_4 = int(binascii.b2a_hex(ft[68 + 52 * i: 69 + 52 * i ]),16)
start_addr = 0
for j in range(8,12):
start_addr += ord(ft_info[j]) * ( 256 ** ( j - 8 ) )
ft_ilen = 0
for j in range(12,16):
ft_ilen += ord(ft_info[j]) * ( 256 ** ( j - 12 ) )
ft_ibody = ft[start_addr:start_addr + ft_ilen]
print "---", i, " ---"
print "WRITE TO ", ft_info_type + '.hex'
print "START: ", start_addr
print "FINISH: ", start_addr + ft_ilen - 1
print "LEN: ", ft_ilen
print "TYPE: ", ft_info_type
print "VERSION: ", ft_info_type_ver_1,".",ft_info_type_ver_2,".",ft_info_type_ver_3,".",ft_info_type_ver_4
fi = open( ft_info_type + '_(' + str(ft_info_type_ver_1) + '.' + str(ft_info_type_ver_2)+'.'+ str(ft_info_type_ver_3) + '.' + str(ft_info_type_ver_4)+').hex', 'wb')
fi.write(ft_ibody)
fi.close()
|
472294
|
from django.conf import settings
from posthog.redis import get_client
def reload_plugins_on_workers():
get_client().publish(settings.PLUGINS_RELOAD_PUBSUB_CHANNEL, "reload!")
|
472299
|
from keras.utils import to_categorical
from keras.models import model_from_json
from music21 import corpus, chord, note, pitch, interval
from config import maxChorales
import os.path
import numpy as np
def getChoralesIterator():
iterator = corpus.chorales.Iterator()
if maxChorales > iterator.highestNumber:
raise Exception('Not that many chorales')
return iterator
# loads chorales into flat array (see parseToFlatArray for definition)
def loadChorales():
notes = []
iterator = getChoralesIterator()
# load notes of chorales
for chorale in iterator[1:maxChorales]: # iterator is 1-based
transpose_to_C_A(chorale.parts[0])
notes = notes + parseToFlatArray(chorale.parts[0])
notes.append((['end'], 0.0)) # mark the end of the piece
return notes
# loads chorales as above along with the key signature of each one
def loadChoralesWithKeys():
notes = []
iterator = getChoralesIterator()
orig_keys = []
# load notes of chorales
for chorale in iterator[1:maxChorales]: # iterator is 1-based
k = chorale.parts[0].analyze('key')
orig_keys.append(k.tonicPitchNameWithCase)
transpose_to_C_A(chorale.parts[0])
notes = notes + parseToFlatArray(chorale.parts[0])
notes.append((['end'], 0.0)) # mark the end of the piece
return notes, orig_keys
# if the given chord is a note (singleton chord) then returns a list of itself, otherwise concatenates all notes in a list
def chordToNotes(notes):
if isinstance(notes, chord.Chord):
notes.duration.quarterLength
return [str(x.midi) for x in notes.pitches]
elif isinstance(notes, note.Note):
return [str(notes.pitch.midi)] # use midi naming to avoid problems with accidentals (same note different name due to accidental)
else: # this is a rest
return ['rest']
# transform a midi score to flat array (=> array of notes and their durations)
def parseToFlatArray(score):
notes = []
for _note in score.flat.notesAndRests:
# when the note is tied, add the duration to the last note instead of creating a new one
if (_note.tie is not None) and (_note.tie.type == 'continue' or _note.tie.type == 'stop'):
(n, dur) = notes[-1]
notes[-1] = (n, dur + _note.duration.quarterLength)
else:
notes.append((chordToNotes(_note), _note.duration.quarterLength))
return notes
# transposes a stream object (part) from its given basePitch to the targetPitch (transform happens in place)
def transpose_to(stream, basePitch, targetPitch):
i = interval.Interval(pitch.Pitch(basePitch), pitch.Pitch(targetPitch))
return stream.transpose(i, inPlace=True)
# create notes vocabulary from A2 to A6 with pitch and midi names
def createPitchVocabulary():
n = note.Note('A2')
note_vocab = []
note_names_vocab = []
while n.pitch.nameWithOctave != "B6":
note_vocab.append(str(n.pitch.midi))
note_names_vocab.append(n.pitch.nameWithOctave)
n.transpose(1, inPlace=True)
# append the special marks for rest and end of piece
note_vocab.append('rest')
note_vocab.append('end')
return note_vocab, note_names_vocab
# transforms a score to C major when the score key is major and to A minor when the score key is minor
def transpose_to_C_A(score):
k = score.analyze('key')
if k.mode == 'major':
transpose_to(score, k.tonic.name, 'C')
elif k.mode == 'minor':
transpose_to(score, k.tonic.name, 'A')
# creates note vocabularies and categorical vocabularies
def createPitchVocabularies():
note_vocab, note_names_vocab = createPitchVocabulary()
note_vocab_categorical = to_categorical(range(len(note_vocab)))
return note_vocab, note_names_vocab, note_vocab_categorical
def createPitchSpecificVocabularies(pitches):
note_vocab, note_names_vocab = createPitchVocabularySpecific(pitches)
note_vocab_categorical = to_categorical(range(len(note_vocab)))
return note_vocab, note_names_vocab, note_vocab_categorical
def createPitchVocabularySpecific(pitches):
distinct = np.unique(pitches)
note_vocab = []
note_names_vocab = []
for n in distinct:
if n != 'rest' and n != '_' and n != 'end':
note_vocab.append(n)
else:
note_vocab.append(n)
note_names_vocab.append(n)
return note_vocab, note_names_vocab
# create a vocabulary from the given durations
def createDurationVocabularySpecific(durations):
duration_vocab = np.unique(durations)
return duration_vocab
# load a saved model and its weights
def loadModelAndWeights(model_file, weights_file):
if os.path.exists(model_file) == False:
raise Exception("model file not found")
if os.path.exists(weights_file) == False:
raise Exception("weights file not found")
_file = open(model_file, 'r')
json = _file.read()
_file.close()
model = model_from_json(json)
model.load_weights(weights_file)
return model
|
472307
|
from __future__ import print_function
import os
import re
import sys
import site
import shutil
from six import add_metaclass
from distutils.sysconfig import get_python_lib
from setuptools import setup, find_packages, Extension
from setuptools.command.install import install as install_default
def get_version():
v = ''
with open('VERSION', 'r') as lines:
v = list(lines)[0]
return v
def prebuild():
suffix = ''
if sys.platform == 'linux' or sys.platform == 'linux2':
suffix = '.so'
elif sys.platform == 'darwin':
suffix = '.so'
elif sys.platform == 'win32':
suffix = '.pyd'
else:
print('Sorry, unsupported OS: {}'.format(sys.platform))
return
out_dir = 'build/lib'
out_format = re.compile('pylime\.?([a-zA-Z0-9]+)?(-[a-zA-Z0-9_]+)?(-[a-zA-Z0-9\-_]+)?' + suffix)
out_file = None
for f in os.listdir(out_dir):
m = out_format.search(f)
if m is not None:
out_file = m.group(0)
break
if out_file is None:
print('Please build the library with CMake first.')
print('If you did it, please make sure the path of the shared library.')
raise Exception('Installation failed!!')
return (get_python_lib(), [os.path.join(out_dir, out_file)])
# Install
class install(install_default):
def run(self):
install_default.run(self)
# Setup
setup(
cmdclass={ 'install' : install },
name='pylime',
version=get_version(),
author='tatsy',
author_email='<EMAIL>',
url='https://github.com/tatsy/lime.git',
description='Library for IMage Editing.',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: C++',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
data_files=[prebuild()]
)
|
472342
|
import logging
import os
import time
DEBUG = False
API_URL_PREFIX = "/anuvaad-etl/document-processor"
HOST = '0.0.0.0'
PORT = 5001
#BASE_DIR = '/opt/share/nginx/upload'
#download_folder = '/opt/share/nginx/upload'
BASE_DIR = 'upload'
download_folder = 'upload'
TASK_STAT = 'BLOCK-SEGMENTER'
ENABLE_CORS = False
# kafka dev
# input_topic_default = 'anuvaad-dp-tools-block-segmenter-input-v1'
# input_topic_identifier = 'KAFKA_ANUVAAD_DP_TOOLS_BLOCK_SEGMENTER_INPUT'
# input_topic = os.environ.get(input_topic_identifier, input_topic_default)
# output_topic_default = 'anuvaad-dp-tools-block-segmenter-output-v1'
# output_topic_identifier = 'KAFKA_ANUVAAD_DP_TOOLS_BLOCK_SEGMENTER_OUTPUT'
# output_topic = os.environ.get(output_topic_identifier, output_topic_default)
# kf_local_server = 'localhost:9092'
# kafka_ip_host = 'KAFKA_BOOTSTRAP_SERVER_HOST'
# bootstrap_server = os.environ.get(kafka_ip_host, kf_local_server)
# CONSUMER_GROUP_default = 'anuvaad-etl-bs-consumer-group'
# CONSUMER_GROUP_identifier = 'ANUVAAD_ETL_BS_CONSUMER_GROUP_V1'
# CONSUMER_GROUP = os.environ.get(CONSUMER_GROUP_identifier,CONSUMER_GROUP_default)
# kafka stage
input_topic_default = 'anuvaad-dp-tools-block-segmenter-input-satge'
input_topic_identifier = 'KAFKA_ANUVAAD_DP_TOOLS_BLOCK_SEGMENTER_INPUT_STAGE'
input_topic = os.environ.get(input_topic_identifier, input_topic_default)
output_topic_default = 'anuvaad-dp-tools-block-segmenter-output-stage'
output_topic_identifier = 'KAFKA_ANUVAAD_DP_TOOLS_BLOCK_SEGMENTER_OUTPUT_STAGE'
output_topic = os.environ.get(output_topic_identifier, output_topic_default)
kf_local_server = 'localhost:9092'
kafka_ip_host = 'KAFKA_BOOTSTRAP_SERVER_HOST'
bootstrap_server = os.environ.get(kafka_ip_host, kf_local_server)
CONSUMER_GROUP_default = 'anuvaad-etl-bs-consumer-group-stage'
CONSUMER_GROUP_identifier = 'ANUVAAD_ETL_BS_CONSUMER_GROUP_STAGE'
CONSUMER_GROUP = os.environ.get(CONSUMER_GROUP_identifier,CONSUMER_GROUP_default)
#folders and file path
BREAK_BLOCKS = True
DEVICE='cpu'
IMAGE_SIZE=1984
WEIGHT_PATH ="./src/utilities/yolov5/weights/exp14.pt"
CONF_THRESHOLD = 0.1
IOU_THRESHOLD = 0.45
logging.basicConfig(
filename=os.getenv("SERVICE_LOG", "server.log"),
level=logging.DEBUG,
format="%(levelname)s: %(asctime)s \
pid:%(process)s module:%(module)s %(message)s",
datefmt="%d/%m/%y %H:%M:%S",
)
DOCUMENT_CONFIGS = {
'LANGUAGE_TYPE': 'eng',
'HORI_BLOCK_WDTH_DIFF_PERC': 0.85,
'SUPERSCRIPT_HEIGHT_DIFFERENCE': 7.0,
'HORI_SPACE_TOO_CLOSE': 10.0,
'VERTICAL_SPACE_TOO_CLOSE': 5.0,
'AVERAGE_VERTICAL_SPACE': 12.0,
'LEFT_OR_RIGHT_ALIGNMENT_MARGIN': 20.0
}
BLOCK_CONFIGS = {
"right_margin_threshold": 0.10, "left_margin_threshold": 0.10,
"right_break_threshold": 0.06, "left_break_threshold": 0.05,
"header_left_threshold": 0.70, "header_right_threshold": 0.85,
"space_multiply_factor": 2.1
}
BLOCK_BREAK_CONFIG = {'margin_support': 2, 'width_threshold': 0.65}
TABLE_CONFIGS = {
"remove_background" : True ,
"background_threshold" : 50,
"extract_by" : 'starting_point'
}
PREPROCESS_CONFIGS = {'header_cut':0.15 , 'footer_cut' :0.85 ,'repeat_threshold' :0.95 ,'underline_threshold':0.25, 'margin':10 }
DROP_TEXT = ['SUPERSCRIPT']
|
472359
|
def logging_finish(self):
from .results import result_todf
# save the results
self = result_todf(self)
return self
|
472389
|
from django.core.management.base import BaseCommand, CommandError
import logging
from django.core.mail import send_mail
from vital.models import VLAB_User
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Command to email common notifications to all relevant users"
def add_arguments(self, parser):
parser.add_argument(
'-s', '--subject',
action='store',
dest='subject',
help='specify email subject',
required=True
)
parser.add_argument(
'-b', '--body',
action='store',
dest='body',
help='specify email body',
)
def handle(self, *args, **options):
to_email = []
subject = options['subject']
body = options['body']
for email in VLAB_User.objects.all():
to_email.append(email)
try:
logger.debug('Generating notification mails for users')
send_mail(subject, 'Hi, ' + body, '<EMAIL>', to_email, fail_silently=False)
except Exception as e:
logger.error(str(e))
|
472393
|
from datetime import datetime
from functools import wraps
def dynamic(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
for key, value in fn.__annotations__.items():
try:
kwargs[key] = value()
except TypeError:
pass
return fn(*args, **kwargs)
return wrapper
# Example
@dynamic
def printNow(l:list, now:datetime.now):
l.append(len(l))
print('List:', l, ' id:', id(l))
print('Now:', now)
# Test
for i in range(3):
printNow()
print()
|
472426
|
import enum
import functools
import json
from electrum_gui.common.basic import exceptions
from electrum_gui.common.basic.functional import json_encoders
@enum.unique
class Version(enum.IntEnum):
V1 = 1 # Legacy
V2 = 2 # Unify return values and exceptions
V3 = 3 # Single kwarg params as a json str
SUPPORTED_VERSIONS = list(Version)
@enum.unique
class ResultStatus(enum.IntEnum):
SUCCESS = 0
FAILED = 1
def api_entry(force_version: int = None):
def middle(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
def _filter_params(err=None):
error_msg = {"fun_name": func.__name__}
error_msg.update(
{k: v for k, v in kwargs.items() if k not in ["seed", "password", "mnemonic", "mnemonics"]}
)
if err is not None:
error_msg.update({"err_msg_detail": "%s:%s" % (err.__class__.__name__, err.args)})
return error_msg
pre_run_failure_msg = None
api_version = kwargs.pop("api_version", Version.V1)
api_version = force_version or api_version
if api_version == Version.V1: # Legacy
return func(*args, **kwargs)
# Version.V2 onward
if api_version not in SUPPORTED_VERSIONS:
pre_run_failure_msg = "Unsupported API version."
elif api_version >= Version.V3: # Load (and check) params
params = kwargs.pop("params", None)
# TODO:
# - check len(args) <= 1, only for the main resource id
# - check kwargs is now empty, all the parameters should be put in params
if params is not None:
try:
params = json.loads(params)
if not isinstance(params, dict):
raise ValueError
# TODO: add param validation
except ValueError:
pre_run_failure_msg = "Failed to load params."
else:
kwargs["params"] = params
else:
kwargs["params"] = {}
out = {
"status": ResultStatus.FAILED,
"api_version": api_version,
"other_info": "",
}
if pre_run_failure_msg is None:
try:
result = func(*args, **kwargs)
except exceptions.OneKeyException as e:
out.update({"err_msg_key": e.key, "other_info": e.other_info})
except Exception as e:
out.update(
{
"err_msg_key": exceptions.OneKeyException.key,
"low_level_error": _filter_params(e),
}
)
else:
out.update({"status": ResultStatus.SUCCESS, "info": result})
else:
out.update({"err_msg_key": exceptions.OneKeyException.key, "low_level_error": pre_run_failure_msg})
return json.dumps(out, cls=json_encoders.DecimalEncoder)
return wrapper
return middle
|
472428
|
import wx
APP_ID = u'TestJumpListApp'
def set_app_id():
import ctypes
try:
SetAppID = ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID
except AttributeError:
return
SetAppID(APP_ID)
def main():
set_app_id()
app = wx.App()
import cgui
assert cgui.SetUpJumpList(APP_ID, [(u'test', u'bc', u're', 4)])
if __name__ == '__main__':
main()
|
472434
|
import os
import json
import urllib.request
from .rpsls import RPSLS
from .rpsls_dto import get_rpsls_dto_json
def get_pick_predicted(user_name):
queried_url = _get_queried_url(user_name)
response = _get_response_from_predictor(queried_url)
predicted_pick = RPSLS[response['prediction'].lower()]
return get_rpsls_dto_json(predicted_pick)
def _get_queried_url(user_name):
predictor_url = os.getenv('PREDICTOR_URL')
return f'{predictor_url}&humanPlayerName={user_name}'
def _get_response_from_predictor(queried_url):
req = urllib.request.urlopen(queried_url)
encoding = req.info().get_content_charset('utf-8')
data = req.read()
return json.loads(data.decode(encoding))
|
472464
|
import time
from tqdm import tqdm
import torch
import numpy as np
# assignments: V x D
# D: num_clusterings, V: num_total_set
# W: num_candidates, P: num_clustering_pairs, C: num_centroids
class EfficientMI:
""" this implementation requires the users to use the same ncentroids for all clusterings """
# N: (P x C X C), a: (P x C), b: (P x C), n: (P)
# WN: (W x P x C X C), Wa: (W x P x C), Wb: (W x P x C), wn: (W x P)
def __init__(self, assignments, measure_type='mutual_info',
average_method='arithmetic', ncentroids=20, **kwargs):
self.average_method = average_method.lower()
self.ncentroids = ncentroids
self.assignments = torch.from_numpy(assignments).to(torch.long) # V x D
self.eps = np.finfo('float64').eps
def init(self, clustering_combinations, candidates):
self.combinations = clustering_combinations
self.init_cache()
self.init_candidates(candidates)
def init_cache(self):
P = len(self.combinations)
C = self.ncentroids
N = torch.full((P, C, C), self.eps)
a = N.sum(dim=1)
b = N.sum(dim=2)
n = a.sum(dim=-1)
self.cache = {'N': N, 'a': a, 'b': b, 'n': n}
def get_assignments(self, candidates):
candidates = torch.LongTensor(candidates) # W
assignments = self.assignments # V x D
assignments = assignments.index_select(dim=0, index=candidates)
return assignments
def init_candidates(self, candidates):
self.candidate_ids = torch.LongTensor(candidates)
assignments = self.get_assignments(candidates)
C = self.ncentroids
assignments = self.one_hot(assignments, C) # W x D x C
pair_ids = torch.LongTensor(self.combinations) # P x 2
p1 = self.gather_pairs(assignments, pair_ids[:, 0])
p2 = self.gather_pairs(assignments, pair_ids[:, 1])
N = torch.einsum('wpa,wpb->wpab', p1, p2) # W x P x C x C
a = N.sum(2)
b = N.sum(3)
n = b.sum(-1)
self.candidates = {'N': N, 'a': a, 'b': b, 'n': n}
@staticmethod
def gather_pairs(assignments, idx):
W, _, C = assignments.shape
idx = idx.unsqueeze(0).unsqueeze(-1)
idx = idx.repeat(W, 1, C)
return assignments.gather(dim=1, index=idx) # W x P x C
@staticmethod
def one_hot(x, N, default=0, value=1):
dtype = torch.float
x_onehot = torch.full((*x.shape, N), default, dtype=dtype)
value = torch.full(x_onehot.shape, value, dtype=dtype)
x_onehot.scatter_(dim=-1, index=x.unsqueeze(-1), src=value)
return x_onehot
def calc_score(self, *args, **kwargs):
scores = self._calc_score(*args, **kwargs)
scores = scores.mean(dim=-1) # W
score, idx = scores.max(dim=0)
return score.item(), idx.item()
def _calc_score(self, *args, **kwargs):
return self.calc_MI(*args, **kwargs)
def calc_MI(self, last):
N = last['N'] # W x P x C x C
a = last['a'].unsqueeze(2) # W x P x 1 x C
b = last['b'].unsqueeze(3)
n = last['n'].unsqueeze(-1).unsqueeze(-1)
scores = (N / n * (N.log() + n.log() - (a.log() + b.log()))).sum([2, 3]) # W x P
return scores
def get_last(self, candidates=None):
if candidates is None:
candidates = self.candidates
last = {key: self.cache[key].unsqueeze(0) + candidates[key]
for key in candidates.keys()}
return last
def update_cache(self, last, idx):
for key in last.keys():
self.cache[key] = last[key][idx]
def remove_idx_all(self, idx):
self.remove_idx('candidate_ids', idx)
self.remove_idx('candidates', idx)
def calc_measure(self):
last = self.get_last()
score, idx = self.calc_score(last)
candidate_idx = self.candidate_ids[idx].item()
self.update_cache(last, idx)
self.remove_idx_all(idx)
return score, candidate_idx
def remove_idx(self, name, idx):
data = getattr(self, name)
if isinstance(data, dict):
data = {key: self._remove_idx(val, idx) for key, val in data.items()}
else:
data = self._remove_idx(data, idx)
setattr(self, name, data)
def _remove_idx(self, data, idx):
return torch.cat((data[:idx], data[idx + 1:]), dim=0)
def _add_samples(self, ids):
'''
assignments = torch.LongTensor([[c.get_assignment(x) for c in self.clusterings]
for x in ids]) # W x D
'''
assignments = self.get_assignments(ids)
C = self.ncentroids
assignments = self.one_hot(assignments, C) # W x D x C
pair_ids = torch.LongTensor(self.combinations) # P x 2
p1 = self.gather_pairs(assignments, pair_ids[:, 0])
p2 = self.gather_pairs(assignments, pair_ids[:, 1])
N_whole = torch.einsum('wpa,wpb->wpab', p1, p2)
N = N_whole.sum(0) # P x C x C
a = N.sum(1) # P x C
b = N.sum(2)
n = b.sum(-1) # P
return {'N': N, 'a': a, 'b': b, 'n': n}
def add_samples(self, ids):
to_add = self._add_samples(ids)
for key in to_add.keys():
self.cache[key] += to_add[key]
def run_greedy(self, subset_size, start_indices, intermediate_target=None,
verbose=False, log_every=1, log_times=None,
node_rank=None, pid=None):
S = start_indices
GAIN = []
LOOKUPS = []
timelapse = []
greedy_start_time = time.time()
start_time = time.time()
# start from empty index
pbar = range(len(start_indices), subset_size - 1)
niters = subset_size - 1 - len(start_indices)
if log_times is not None:
log_every = niters // log_times
if verbose:
pbar = tqdm(pbar, desc='greedy iter')
for j in pbar:
start_time = time.time()
score, idx = self.calc_measure()
timelapse.append(time.time() - start_time)
S.append(idx)
GAIN.append(score)
LOOKUPS.append(0) # greedy search renders num_lookups meaningless
if verbose:
if (j + 1) % log_every == 0:
if intermediate_target is not None:
precision = len(set(intermediate_target) & set(S)) / len(set(S))
msg = "(LEN: {}, MEASURE: {}, PRECISION: {})".format(
len(S), score, precision)
else:
msg = "(LEN: {}, MEASURE: {})".format(len(S), score)
if node_rank is not None:
msg = 'Node: {}, '.format(node_rank) + msg
if pid is not None:
msg = 'PID: {}, '.format(node_rank) + msg
pbar.set_description(msg)
if verbose:
tqdm.write("Time Consumed: {} seconds".format(time.time() - greedy_start_time))
return (S, GAIN, timelapse, LOOKUPS)
def ensure_nonzero(self, x):
if torch.is_tensor(x):
x = torch.max(x, torch.full(x.shape, self.eps, dtype=x.dtype))
else:
x = max(x, self.eps)
return x
def generalized_mean(self, ha, hb):
if self.average_method == 'max':
normalizer = torch.max(ha, hb) # max avg
elif self.average_method == 'min':
normalizer = torch.min(ha, hb)
else:
# default is arithmetic
normalizer = (ha + hb) / 2 # arithmetic mean
return normalizer
class EfficientAMI(EfficientMI):
""" adjusted MI """
def _calc_score(self, *args, **kwargs):
return self.calc_AMI(*args, **kwargs)
def calc_EMI(self, last):
# maybe sklearn.metrics.cluster.expected_mutual_information?
# we need a way to 'DP' the factorials for faster computation
N = last['N'] # W x P x C x C
a = last['a'].unsqueeze(2) # W x P x 1 x C
b = last['b'].unsqueeze(3)
n = last['n'].unsqueeze(-1).unsqueeze(-1)
term1 = (N / n * (N.log() + n.log() - (a.log() + b.log())))
log_term2 = (a + 1).lgamma() + (b + 1).lgamma() + (n - a + 1).lgamma() + (n - b + 1).lgamma() \
- ((n + 1).lgamma() + (N + 1).lgamma() + (a - N + 1).lgamma() + (b - N + 1).lgamma()
+ (n - a - b + N + 1).lgamma())
scores = (term1 * log_term2.exp()).sum([2, 3])
return scores
@staticmethod
def calc_entropy(x, n):
p = x / n # W x P x C
return -(p * p.log()).sum(dim=-1)
def calc_entropies(self, last):
a = last['a'] # W x P x C
b = last['b'] # W x P x C
n = last['n'].unsqueeze(-1) # W x P x 1
ha = self.calc_entropy(a, n)
hb = self.calc_entropy(b, n)
return ha, hb
def calc_AMI(self, last):
mi = self.calc_MI(last)
emi = self.calc_EMI(last)
ha, hb = self.calc_entropies(last)
normalizer = self.generalized_mean(ha, hb)
denominator = normalizer - emi
'''
if denominator < 0:
denominator = min(denominator, -np.finfo('float64').eps)
else:
denominator = max(denominator, np.finfo('float64').eps)
'''
denominator = self.ensure_nonzero(denominator)
ami = (mi - emi) / denominator
return ami
class EfficientNMI(EfficientAMI):
def _calc_score(self, *args, **kwargs):
return self.calc_NMI(*args, **kwargs)
def calc_NMI(self, last):
mi = self.calc_MI(last)
ha, hb = self.calc_entropies(last)
normalizer = self.generalized_mean(ha, hb)
normalizer = self.ensure_nonzero(normalizer)
return (2 * mi) / normalizer
class ConstantMeasure(EfficientMI):
def _calc_score(self, *args, **kwargs):
return self.calc_constant(*args, **kwargs)
def calc_constant(self, last):
n = last['n'] # W x P
return torch.full_like(n, 1)
class EfficientMemMI(EfficientMI):
def calc_N(self, candidates, save_ids=True):
if save_ids:
self.candidate_ids = torch.LongTensor(candidates)
assignments = self.get_assignments(candidates) # W x D
pair_ids = torch.LongTensor(self.combinations) # P x 2
N = self.gather_pairs(assignments, pair_ids) # W x P x 2
return N
def init_candidates(self, candidates):
N = self.calc_N(candidates)
self.candidates = {'N': N}
def init_cache(self):
super().init_cache()
# P = len(self.combinations)
N = self.cache['N']
NlogN = (N * N.log()).sum([-1, -2])
a = self.cache['a']
aloga = (a * a.log()).sum(-1)
b = self.cache['b']
blogb = (b * b.log()).sum(-1)
self.cache = {**self.cache,
'NlogN': NlogN, 'aloga': aloga, 'blogb': blogb}
@staticmethod
def gather_pairs(assignments, idx):
# W x D, P x 2 -> W x P x 2
W = assignments.shape[0]
N = idx.shape[-1]
P, N = idx.shape
idx = idx.unsqueeze(0)
idx = idx.repeat(W, 1, 1) # W x P x 2
assignments = assignments.unsqueeze(-1)
assignments = assignments.repeat(1, 1, N)
return assignments.gather(dim=1, index=idx) # W x P x 2
def get_last(self, candidates=None):
if candidates is None:
candidates = self.candidates['N']
N = self.get_last_N(self.cache['N'], candidates) # W x P
a = self.get_last_ab(self.cache['a'], candidates, dim=1) # W x P
b = self.get_last_ab(self.cache['b'], candidates, dim=0) # W x P
last = {}
last['NlogN'] = self.update_nlogn(self.cache['NlogN'].unsqueeze(0), N)
last['aloga'] = self.update_nlogn(self.cache['aloga'].unsqueeze(0), a)
last['blogb'] = self.update_nlogn(self.cache['blogb'].unsqueeze(0), b)
last['n'] = (self.cache['n'] + 1).unsqueeze(0)
return last
@staticmethod
def nlogn(x):
return x * x.log()
def update_nlogn(self, prev, num):
return prev - self.nlogn(num) + self.nlogn(num + 1)
def get_last_N(self, cache, candidates):
# P x C x C, W x P x 2 -> W x P
c1 = candidates[:, :, 0]
cache = self.small_gather(cache, c1.t(), batch_dim=0).transpose(0, 1) # W x P x C
c2 = candidates[:, :, 1]
cache = cache.gather(dim=-1, index=c2.unsqueeze(-1)).squeeze(-1) # W x P
return cache
def get_last_ab(self, cache, candidates, dim=0):
# P x C, W x P -> W x P
c = candidates[:, :, dim]
W, P = c.shape
cache = cache.unsqueeze(0).repeat(W, 1, 1)
cache = cache.gather(dim=-1, index=c.unsqueeze(-1)).squeeze(-1)
return cache
@staticmethod
def small_gather(x, ids, batch_dim=0):
# use for loops...
res = []
B = x.shape[batch_dim]
for b in range(B):
res.append(x[b][ids[b]])
res = torch.stack(res, dim=0) # P x W x C
return res
def calc_MI(self, last):
# nlogn
NlogN = last['NlogN'] # W x P
aloga = last['aloga'] # W x P
blogb = last['blogb'] # W x P
n = last['n']
tN = (NlogN / n)
ta = (-aloga / n)
tb = (-blogb / n)
term1 = (tN + ta + tb)
term2 = n.log()
scores = (term1 + term2)
return scores
def update_cache(self, last, idx, last_idx=None):
# update nlogns
for key in last.keys():
if 'log' in key:
last_idx = last_idx if last_idx is not None else idx
self.cache[key] = last[key][last_idx]
self.update_mats(idx)
def calc_N_i(self, idx):
# update N, a, b, n
N_i = self.candidates['N'][idx] # P x 2
C = self.cache['N'].shape[-1]
N_i = (self.one_hot(N_i[:, 0], C),
self.one_hot(N_i[:, 1], C))
N_i = torch.einsum('pa,pb->pab', N_i[0], N_i[1]) # P x C x C
assert (N_i.sum([1, 2]) == 1).all(), 'error in getting one_hot representation of candidate'
return N_i
def update_mats(self, idx):
N_i = self.calc_N_i(idx)
self.cache['N'] += N_i
self.cache['a'] += N_i.sum(1)
self.cache['b'] += N_i.sum(2)
self.cache['n'] += 1
def add_samples(self, ids):
for idx in ids:
candidate = self.calc_N([idx], save_ids=False)
last = self.get_last(candidate)
self.update_cache(last, idx, last_idx=0)
|
472468
|
from django.conf import settings
from django.contrib.auth.models import Permission
from django.urls import reverse, resolve
from django.utils.encoding import force_text as force_unicode
from garb.tests.models import *
from garb.tests.urls import *
from garb.templatetags.garb_menu import get_menu, Menu, ItemLink, ItemLinkModel
from garb.tests.mixins import UserTestCaseMixin
class GarbMenuTestCase(UserTestCaseMixin):
app_label = 'tests'
route_blog = ("admin:{0}_{1}_changelist".format(app_label,'blog'))
route_blog_link = "/" + resolve(reverse('admin:%s_%s_changelist' % (app_label,'blog'))).route
route_content = ("admin:{0}_{1}_changelist".format(app_label,'blogcomment'))
def setUp(self):
self.setUpConfig()
self.login_superuser()
def setUpConfig(self):
settings.GARB_CONFIG = getattr(settings, 'GARB_CONFIG', {})
settings.GARB_CONFIG.update({
'MENU': [
{ 'label': 'menu1', 'icon': 'fa-user-plus', 'route': 'blog1', 'auth':'all' },
{ 'label': 'menu2', 'icon': 'fa-user-plus',
'sub_itens':[
{ 'model':'tests.blog'},
{ 'model':'tests.blogcomment'},
]
},
{ 'label': 'menu3', 'icon': 'fa-user-plus', 'auth':'all',
'sub_itens':[
{ 'label': 'sub1', 'link': 'www.uol.com.br', 'target':'_blank' },
{ 'label': 'sub2', 'route': 'blog1', 'permission': 'tests.can_hire', },
{ 'label': 'sub3', 'route': 'blog2', 'auth':'yes' },
{ 'label': 'sub4', 'route': 'blog3', 'auth':'no' },
{ 'label': 'sub5', 'link': 'www.uol.com.br'},
]
},
{ 'label': 'menu4', 'icon': 'fa-user-plus',
'sub_itens':[
{ 'label': 'sub6', 'route': 'blog1', 'permission': 'tests.can_hire', },
{ 'label': 'sub7', 'route': 'blog2' , 'auth':'yes'},
{ 'label': 'sub8', 'route': 'blog3' },
]
}
],
})
def make_menu_from_response(self):
return get_menu(self.response.context, self.response._request)
def test_index(self):
self.client.logout()
self.get_response(url="/")
def test_menu_admin(self):
self.client.logout()
self.login_superuser()
self.get_response()
mc = settings.GARB_CONFIG['MENU']
menu = self.make_menu_from_response()
self.assertEqual(len(menu), len(mc))
# as string
i = 0
self.assertEqual(menu[i].label, mc[i]['label'])
self.assertEqual(menu[i].route, mc[i]['route'])
self.assertEqual(menu[i].icon, 'fa-user-plus')
self.assertEqual(menu[i].auth, 'all')
i += 1 # as dict
self.assertEqual(type(menu[i].childrens[0]), ItemLinkModel)
self.assertEqual(len(menu[i].childrens), len(mc[i]['sub_itens']))
self.assertEqual(menu[i].icon, mc[i]['icon'])
self.assertEqual(menu[i].childrens[0].route, self.route_blog )
self.assertEqual(menu[i].childrens[0].get_url(), self.route_blog_link )
self.assertEqual(menu[i].childrens[1].route, self.route_content)
i += 1 # as dict
self.assertEqual(menu[i].auth, 'all')
self.assertEqual(menu[i].childrens[0].auth, 'all')
self.assertEqual(type(menu[i].childrens[0]), ItemLink)
self.assertEqual(menu[i].childrens[0].target, '_blank')
self.assertEqual(menu[i].childrens[1].auth, 'all')
self.assertEqual(menu[i].childrens[2].auth, 'yes')
self.assertEqual(menu[i].label, mc[i]['label'])
self.assertEqual(len(menu[i].childrens), 4)
self.assertEqual(len(menu[i].childrens), len(mc[i]['sub_itens'])-1)
self.assertEqual(menu[i].childrens[3].get_url(), 'http://www.uol.com.br')
self.assertEqual(menu[i].childrens[3].target, '_blank')
i += 1 # as dict
self.assertEqual(len(menu[i].childrens), 3)
self.assertEqual(menu[i].childrens[0].auth, 'yes')
self.assertEqual(menu[i].childrens[2].auth, 'yes')
def test_menu_user(self):
self.client.logout()
self.login_user()
self.get_response()
mc = settings.GARB_CONFIG['MENU']
menu = self.make_menu_from_response()
self.assertEqual(len(menu), len(mc)-1)
i = 0
self.assertEqual(menu[i].label, mc[i]['label'])
self.assertEqual(menu[i].route, mc[i]['route'])
self.assertEqual(menu[i].icon, 'fa-user-plus')
self.assertEqual(menu[i].auth, 'all')
i += 1 # as dict
self.assertEqual(menu[i].auth, 'all')
self.assertEqual(menu[i].childrens[0].auth, 'all')
self.assertEqual(type(menu[i].childrens[0]), ItemLink)
self.assertEqual(menu[i].childrens[0].target, '_blank')
self.assertEqual(menu[i].childrens[1].auth, 'yes')
self.assertEqual(menu[i].label, mc[i+1]['label'])
self.assertEqual(len(menu[i].childrens), 3)
self.assertEqual(len(menu[i].childrens), len(mc[i+1]['sub_itens'])-2)
self.assertEqual(menu[i].childrens[0].label, 'sub1')
self.assertEqual(menu[i].childrens[1].label, 'sub3')
def test_menu_only_authenticated(self):
self.client.logout()
self.get_response(url="/")
mc = settings.GARB_CONFIG['MENU']
menu = self.make_menu_from_response()
self.assertEqual(len(menu), len(mc)-2)
i = 0
self.assertEqual(menu[i].label, mc[i]['label'])
self.assertEqual(menu[i].route, mc[i]['route'])
self.assertEqual(menu[i].icon, 'fa-user-plus')
self.assertEqual(menu[i].auth, 'all')
i += 1 # as dict
self.assertEqual(menu[i].auth, 'all')
self.assertEqual(menu[i].childrens[0].auth, 'all')
self.assertEqual(type(menu[i].childrens[0]), ItemLink)
self.assertEqual(menu[i].childrens[0].target, '_blank')
self.assertEqual(menu[i].childrens[1].auth, 'no')
self.assertEqual(menu[i].label, mc[i+1]['label'])
self.assertEqual(len(menu[i].childrens), 3)
self.assertEqual(len(menu[i].childrens), len(mc[i+1]['sub_itens'])-2)
self.assertEqual(menu[i].childrens[0].label, 'sub1')
self.assertEqual(menu[i].childrens[1].label, 'sub4')
def test_menu_user_permission(self):
self.client.logout()
self.login_user_permission()
self.get_response()
mc = settings.GARB_CONFIG['MENU']
menu = self.make_menu_from_response()
self.assertEqual(len(menu), len(mc)-1)
i = 0
self.assertEqual(menu[i].label, mc[i]['label'])
self.assertEqual(menu[i].route, mc[i]['route'])
self.assertEqual(menu[i].icon, 'fa-user-plus')
self.assertEqual(menu[i].auth, 'all')
i += 1 # as dict
self.assertEqual(menu[i].auth, 'all')
self.assertEqual(menu[i].childrens[0].auth, 'all')
self.assertEqual(type(menu[i].childrens[0]), ItemLink)
self.assertEqual(menu[i].childrens[0].target, '_blank')
self.assertEqual(menu[i].childrens[1].permission, 'tests.can_hire')
self.assertEqual(menu[i].childrens[2].auth, 'yes')
self.assertEqual(menu[i].label, mc[i+1]['label'])
self.assertEqual(len(menu[i].childrens), 4)
self.assertEqual(len(menu[i].childrens), len(mc[i+1]['sub_itens'])-1)
self.assertEqual(menu[i].childrens[0].label, 'sub1')
self.assertEqual(menu[i].childrens[1].label, 'sub2')
self.assertEqual(menu[i].childrens[2].label, 'sub3')
def test_menu_active(self):
self.client.logout()
self.login_superuser()
self.get_response()
self.get_response(url=self.route_blog_link)
menu = self.make_menu_from_response()
self.assertEqual(menu[1].childrens[0].get_active(), True)
self.assertEqual(menu[1].childrens[1].get_active(), False)
self.assertEqual(menu[1].collapsed, True)
self.assertEqual(menu[2].collapsed, False)
|
472472
|
import abc
import pickle
from os import path
import numpy as np
class Data(object):
data = None
heavy_hitters = None
heavy_hitter_freq = None
def __init__(self, n_bits):
self.n_bits = n_bits
self.heavy_size = 128
self.heavy_hitters = [0] * self.heavy_size
self.heavy_hitter_freq = [0] * self.heavy_size
self.initial_generate()
def initial_generate(self):
# each user report one query
num_bytes = int(self.n_bits / 8)
f = gzip.open('query_%dbytes_sorted.txt.gz' % num_bytes, 'rb')
time_total = 657427
self.data = [0] * time_total
random_map = np.arange(time_total)
np.random.shuffle(random_map)
local_count = 0
overall_count = 0
for line in f:
l = line.split(b'\t')
query, count = l[0], int(l[1])
# convert to int
query_number = 0
if not len(query) == num_bytes:
print('wrong', query, count, l, len(query))
continue
for i in range(num_bytes):
query_number |= (query[i] << (8 * i))
if query[i] > 128:
print('wrong', query, query[i])
continue
for i in range(count):
self.data[random_map[overall_count]] = query_number
overall_count += 1
if local_count < self.heavy_size:
self.heavy_hitters[local_count] = query_number
self.heavy_hitter_freq[local_count] = count
local_count += 1
print('overall count:', overall_count)
def suffix_tally(self, low, high, cand_dict, shift_num):
mask = (1 << shift_num) - 1
buckets = np.zeros(len(cand_dict), dtype=np.int)
for i in range(int(low), int(high)):
cur_value = self.data[i] & mask
if cur_value in cand_dict:
buckets[cand_dict[cur_value]] += 1
return buckets
|
472487
|
import codecs
import imageio
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
import plotly.offline as py
import plotly.graph_objs as go
import pandas as pd
import tweepy
import locale
import emoji
import sys
import re
import string
import os
def get_user_tweets(api, username, count=200):
tweets = api.user_timeline(username, count=count)
texts = [tweet.text for tweet in tweets]
return texts
def get_mentions_names(tweets2):
users=[]
usernamesForTwitter = re.findall( r'(^|[^@\w])@(\w{1,15})\b',tweets2)
for user in usernamesForTwitter:
users.append(user[1])
return users
def show_html_table(words):
data = [go.Bar(
x = words.index.values[:30],
y = words.values[:30],
marker= dict(colorscale='Jet',
color = words.values[:30]),
text='ranking'
)]
layout = go.Layout(
title='Word Frequency Ranking'
)
fig = go.Figure(data=data, layout=layout)
py.plot(fig, filename=username)
def show_cloud(listData,typeFormat):
a = ' '.join(str(v) for v in listData)
wc = WordCloud(background_color="black", max_words=1000, mask=sherlock_mask, stopwords=stopwords)
wc.generate(a)
filename=typeFormat+'.png'
wc.recolor(colormap='PuBu' , random_state=42).to_file(filename)
#plt.figure(figsize=(80,80))
#plt.imshow(wc.recolor(colormap='PuBu' , random_state=42),interpolation='bilinear')
#plt.axis("off")
#plt.show(block=False)
def get_analysis(retweets,tweets,mentions):
df_retweets = pd.DataFrame({'retweets': retweets})
df_tweets = pd.DataFrame({'tweets': tweets})
df_mentions = pd.DataFrame({'mentions': mentions})
df_all=pd.concat([df_retweets,df_tweets,df_mentions],ignore_index=True, axis=1)
df_all.columns = [ 'Retweets','Tweets', 'Mentions']
df_all= df_all.applymap(lambda s:s.lower() if type(s) == str else s)
#print(df_all.head())
#for m in df_all['Tweets']:
#print(m)
disa_donuk=['!',"konser","arkadaş","oley",'hadi',"hey",'tatlım','canım','kuzum','bebek','bebeğim','mükemmel','şaka',
'selam','kutlarım','sosyal']
ice_donuk=['yalnız','keşke','pişman','ağla','gözyaşı','utanç','hayır','peki','belki','bilgilendirici','ciddi']
gercekci=['mümkün','net','olamaz','olur','oldu','olacak','tamam']
sezgisel=['belki','muhtemelen','acaba','ihtimal','his','düş','rüya','sevgi','sevmek','sezgi','seviyorum','hayranım',
'gerçeklik']
dusunen=['düşünce','düşünüyorum','aslında','mantıklı','doğru','yanlış','tespit','olmalı','tahmin','anlamlı','manalı','şüpheli',
'şüpheci','çünkü']
hassas=['kırık','buruk','hüzün','kırgın','ağla','yeterince','teşekkür','hassas','kırılgan']
sorgulayan=['neden','ne','nerede','niçin''ara','zaman','saat','ilk','son','net']
algılari_acik=['öğrendim','öğretici','bence',]
#Dışa dönük / Gerçekçi / Düşünen / Sorgulayan
Kisilik_1=[]
#İçe dönük / Gerçekçi / Düşünen / Sorgulayan
Kisilik_2=[]
#Dışa dönük / Gerçekçi / Hassas / Sorgulayan
Kisilik_3=[]
#İçe dönük / Gerçekçi / Hassas / Sorgulayan
Kisilik_4=[]
total_disa_donuk = df_all['Tweets'].str.contains('|'.join(disa_donuk))
total_ice_donuk = df_all['Tweets'].str.contains('|'.join(ice_donuk))
total_gercekci = df_all['Tweets'].str.contains('|'.join(gercekci))
total_sezgisel = df_all['Tweets'].str.contains('|'.join(sezgisel))
total_dusunen = df_all['Tweets'].str.contains('|'.join(dusunen))
total_hassas = df_all['Tweets'].str.contains('|'.join(hassas))
total_sorgulayan = df_all['Tweets'].str.contains('|'.join(sorgulayan))
total_algılari_acik = df_all['Tweets'].str.contains('|'.join(algılari_acik))
df_total=pd.concat([total_disa_donuk,total_ice_donuk,total_gercekci,total_sezgisel,total_dusunen,total_hassas,total_sorgulayan,total_algılari_acik],ignore_index=True, axis=1)
df_total.columns = [ 'disa_donuk','ice_donuk','gercekci','sezgisel','dusunen','hassas','sorgulayan','algılari_acik']
#print(df_total.head(10))
Dıs=df_total['disa_donuk'][df_total['disa_donuk']==True].count().sum()
Ic=df_total['ice_donuk'][df_total['ice_donuk']==True].count().sum()
if(Dıs>Ic):
print("Dışa Dönük ! ")
elif(Dıs==Ic):
print("Dışa ve İçe Dönüklük Dengeli.")
else:
print("İçe Dönük...")
G=df_total['gercekci'][df_total['gercekci']==True].count().sum()
S=df_total['sezgisel'][df_total['sezgisel']==True].count().sum()
if(G>S):
print("Gerçekçi ! ")
elif(G == S):
print("Gerçekçi ve Sezgisel Duyumlar Dengeli.")
else:
print("Sezgisel...")
D=df_total['dusunen'][df_total['dusunen']==True].count().sum()
H=df_total['hassas'][df_total['hassas']==True].count().sum()
if(D>H):
print("Düşünen..")
elif(D==H):
print("Düşünen ve Hassas Dengeli.")
else:
print("Hassas...")
Sor=df_total['sorgulayan'][df_total['sorgulayan']==True].count().sum()
Alg=df_total['algılari_acik'][df_total['algılari_acik']==True].count().sum()
if(Sor>Alg):
print("Sorgulayan..")
elif(Sor==Alg):
print("Sorgulayan ve Algıları Açık Dengeli.")
else:
print("Algıları Açık...")
def get_tweets():
#twitter authentication
CONSUMER_KEY = os.getenv('api-key')
CONSUMER_SECRET = os.getenv('api-secret-key')
ACCESS_TOKEN = os.getenv('access-token')
ACCESS_TOKEN_SECRET = os.getenv('access-secret-token')
AUTH = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
AUTH.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(AUTH)
return(get_user_tweets(api, username),api.get_user(username).name)
def get_stop_words(listData):
tempStopWord.append("https")
tempStopWord.append("RT")
tempStopWord.append("co")
tempStopWord.append("rt")
tempStopWord.append("rt'")
tempStopWord.append("rt '")
tempStopWord.append("bir")
tempStopWord.append(",")
tempStopWord.append(":)")
tempStopWord.append(":d")
if not listData:
return(set(tempStopWord))
else:
tempStopWord.extend(listData)
return(set(tempStopWord))
def get_clear_data(oldData):
cevap=0
rt=0
tw=0
lower_map = {
ord(u'I'): u'ı',
ord(u'İ'): u'i',
ord(u'Ç'): u'ç',
ord(u'Ğ'): u'ğ',
ord(u'Ö'): u'ö',
ord(u'O'): u'o',
ord(u'U'): u'u',
ord(u'Ü'): u'ü',
ord(u'Ş'): u'ş',
ord(u'S'): u's',
}
analaysisTweets=[]
tweets=[]
mentions=[]
retweets=[]
for data in oldData:
if data[0] == "@":
cevap = cevap + 1
mentions.append(data)
elif data[0:2] == "RT":
rt = rt + 1
retweets.append(data)
else:
tw = tw + 1
analaysisTweets.append(data)
if data not in stopwords:
for dataSplit in data.split(" "):
if dataSplit not in stopwords:
if any(ext in dataSplit for ext in stopwords):
tweets.append(dataSplit)
datas=[]
for tweet in tweets:
for word in tweet.split(" "):
datas.append(emoji.demojize(word.translate(lower_map).lower()))
if any(ext in "muh" for ext in datas):
print("url_string")
return (datas,analaysisTweets,mentions,retweets)
pm = __import__("stop_words")
tempStopWord=list(pm.STOP_WORDS)
image = imageio.imread("sherlock.png")
sherlock_mask = image
args = sys.argv
username = args[1]
locale.setlocale(locale.LC_ALL, 'tr_TR.utf8')
stopwords = get_stop_words(None)
name = get_tweets()[1]
data_new= get_clear_data(get_tweets()[0])
train = pd.DataFrame(data_new[0])
words=train.unstack().value_counts()
get_analysis(data_new[3],data_new[1],data_new[2])
a = ' '.join(str(v) for v in train.values.tolist())
print(len(a))
show_cloud(train.values.tolist(),"all")
show_cloud(get_mentions_names(a),"users")
userListstopword = ["@" + name for name in get_mentions_names(a)]
get_stop_words(userListstopword)
get_stop_words(get_mentions_names(a))
get_stop_words([ name+" '" for name in get_mentions_names(a)])
stopwords=get_stop_words([ name+"'" for name in get_mentions_names(a)])
train=None
words=None
#with open("out.txt", "w", encoding="utf-8") as f:
#f.write("$".join(stopwords))
data_new = get_clear_data(get_tweets()[0])
train = pd.DataFrame( data_new[0])
words=train.unstack().value_counts()
show_cloud(train.values.tolist(),"topic")
show_html_table(words)
#with open("outword.txt", "w", encoding="utf-8") as f:
# f.write('$'.join(str(v) for v in train.values.tolist()))
|
472565
|
import pygame as pg
class Platform(object):
def __init__(self, x, y, image, type_id):
self.image = image
self.rect = pg.Rect(x, y, 32, 32)
# 22 - question block
# 23 - brick block
self.typeID = type_id
self.type = 'Platform'
self.shaking = False
self.shakingUp = True
self.shakeOffset = 0
if self.typeID == 22:
self.currentImage = 0
self.imageTick = 0
self.isActivated = False
self.bonus = 'coin'
def update(self):
if self.typeID == 22:
self.imageTick += 1
if self.imageTick == 50:
self.currentImage = 1
elif self.imageTick == 60:
self.currentImage = 2
elif self.imageTick == 70:
self.currentImage = 1
elif self.imageTick == 80:
self.currentImage = 0
self.imageTick = 0
def shake(self):
if self.shakingUp:
self.shakeOffset -= 2
self.rect.y -= 2
else:
self.shakeOffset += 2
self.rect.y += 2
if self.shakeOffset == -20:
self.shakingUp = False
if self.shakeOffset == 0:
self.shaking = False
self.shakingUp = True
def spawn_bonus(self, core):
self.isActivated = True
self.shaking = True
self.imageTick = 0
self.currentImage = 3
if self.bonus == 'mushroom':
core.get_sound().play('mushroom_appear', 0, 0.5)
if core.get_map().get_player().powerLVL == 0:
core.get_map().spawn_mushroom(self.rect.x, self.rect.y)
else:
core.get_map().spawn_flower(self.rect.x, self.rect.y)
elif self.bonus == 'coin':
core.get_sound().play('coin', 0, 0.5)
core.get_map().spawn_debris(self.rect.x + 8, self.rect.y - 32, 1)
core.get_map().get_player().add_coins(1)
core.get_map().get_player().add_score(200)
def destroy(self, core):
core.get_map().spawn_debris(self.rect.x, self.rect.y, 0)
core.get_map().remove_object(self)
def render(self, core):
# Question block
if self.typeID == 22:
if not self.isActivated:
self.update()
elif self.shaking:
self.shake()
core.screen.blit(self.image[self.currentImage], core.get_map().get_camera().apply(self))
# Brick block
elif self.typeID == 23 and self.shaking:
self.shake()
core.screen.blit(self.image, core.get_map().get_camera().apply(self))
else:
core.screen.blit(self.image, core.get_map().get_camera().apply(self))
|
472575
|
import torch.nn as nn
import torch.nn.functional as F
from .binary_functions import IRNetSign, RANetActSign, RANetWSign
import torch
import math
class BaseBinaryConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1, bias=True,
binary_type=(True, True), **kwargs):
super(BaseBinaryConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
self.mode = binary_type
def binary_weight(self, x):
pass
def binary_input(self, w):
pass
def forward(self, input):
x = self.binary_input(input) if self.mode[0] else input
w = self.binary_weight(self.weight) if self.mode[1] else self.weight
output = F.conv2d(x, w, self.bias, self.stride, self.padding, self.dilation, self.groups)
return output
class IRConv2d(BaseBinaryConv2d):
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1, bias=True,
binary_type=(True, True), **kwargs):
super(IRConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, binary_type, **kwargs)
self.k = torch.tensor([10]).float().cuda()
self.t = torch.tensor([0.1]).float().cuda()
def binary_input(self, x):
return IRNetSign().apply(x, self.k, self.t)
def binary_weight(self, w):
bw = w - w.view(w.size(0), -1).mean(-1).view(w.size(0), 1, 1, 1)
bw = bw / bw.view(bw.size(0), -1).std(-1).view(bw.size(0), 1, 1, 1)
sw = torch.pow(torch.tensor([2] * bw.size(0)).cuda().float(),
(torch.log(bw.abs().view(bw.size(0), -1).mean(-1)) / math.log(2)).round().float()).view(
bw.size(0), 1, 1, 1).detach()
bw = IRNetSign().apply(bw, self.k, self.t)
return bw * sw
def ede(self, k, t):
self.k = k
self.t = t
class RAConv2d(BaseBinaryConv2d):
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1, bias=True,
binary_type=(True, True), **kwargs):
super(RAConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, binary_type, **kwargs)
self.sign_a = RANetActSign()
self.sign_w = RANetWSign()
def binary_input(self, x):
return self.sign_a(x)
def binary_weight(self, w):
bw = self.sign_w(w)
sw = torch.mean(torch.mean(torch.mean(abs(w),dim=3,keepdim=True),dim=2,keepdim=True),dim=1,keepdim=True).detach()
return bw * sw
class STEConv2d(BaseBinaryConv2d):
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1, bias=True,
binary_type=(True, True), **kwargs):
super(STEConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, binary_type, **kwargs)
self.sign_a = RANetWSign(clip=1.25)
self.sign_w = RANetWSign(clip=1.25)
def binary_input(self, x):
return self.sign_a(x)
def binary_weight(self, w):
return self.sign_w(w)
class ANDConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super(ANDConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
self.k = torch.tensor([10]).float().cuda()
self.t = torch.tensor([0.1]).float().cuda()
def forward(self, input):
w = self.weight
a = input
bw = w - w.view(w.size(0), -1).mean(-1).view(w.size(0), 1, 1, 1)
bw = bw / bw.view(bw.size(0), -1).std(-1).view(bw.size(0), 1, 1, 1)
sw = torch.pow(torch.tensor([2]*bw.size(0)).cuda().float(), (torch.log(bw.abs().view(bw.size(0), -1).mean(-1)) / math.log(2)).round().float()).view(bw.size(0), 1, 1, 1).detach()
bw = (IRNetSign().apply(bw, self.k, self.t) + 1.0) / 2.0
ba = (IRNetSign().apply(a, self.k, self.t) + 1.0) / 2.0
bw = bw * sw
output = F.conv2d(ba, bw, self.bias,
self.stride, self.padding,
self.dilation, self.groups)
return output
|
472592
|
from datetime import date
from decimal import Decimal
from controls.models import FinancialYear, ModuleSettings, Period
from django.conf import settings
from django.contrib.auth import get_user_model
from django.shortcuts import reverse
from django.test import TestCase
from nominals.models import Nominal, NominalTransaction
PURCHASES_CONTROL_ACCOUNT = "Purchase Ledger Control"
SALES_CONTROL_ACCOUNT = "Sales Ledger Control"
class TrialBalanceTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.url = reverse("nominals:trial_balance")
cls.user = get_user_model().objects.create_superuser(
username="dummy", password="<PASSWORD>")
# nominals
# Profit and Loss
revenues = Nominal.objects.create(name="Revenues")
revenue = Nominal.objects.create(name="Revenue", parent=revenues)
cls.sales = sales = Nominal.objects.create(name="Sales", parent=revenue)
expenses = Nominal.objects.create(name="Expenses")
expense = Nominal.objects.create(name="Expense", parent=expenses)
cls.sundry = sundry = Nominal.objects.create(name="Sundry", parent=expense)
# Balance Sheet
assets = Nominal.objects.create(name="Assets")
current_assets = Nominal.objects.create(parent=assets, name="Current Assets")
cls.sales_ledger_control = sales_ledger_control = Nominal.objects.create(parent=current_assets, name=SALES_CONTROL_ACCOUNT)
liabilities = Nominal.objects.create(name="Liabilities")
current_liabilities = Nominal.objects.create(parent=liabilities, name="Current Liabilities")
cls.purchase_ledger_control = purchase_ledger_control = Nominal.objects.create(parent=current_liabilities, name=PURCHASES_CONTROL_ACCOUNT)
cls.vat_control_account = vat_control_account = Nominal.objects.create(parent=current_liabilities, name=settings.DEFAULT_VAT_NOMINAL)
today = date.today()
# 2019
cls.fy_2019 = fy_2019 = FinancialYear.objects.create(financial_year=2019)
cls.p_201912 = p_201912 = Period.objects.create(
fy=fy_2019,
period="01",
fy_and_period="201912",
month_start=date(2019, 12, 31)
)
# 2020
cls.fy_2020 = fy_2020 = FinancialYear.objects.create(financial_year=2020)
cls.p_202001 = p_202001 = Period.objects.create(
fy=fy_2020,
period="01",
fy_and_period="202001",
month_start=date(2020, 1, 31)
)
cls.p_202002 = p_202002 = Period.objects.create(
fy=fy_2020,
period="02",
fy_and_period="202002",
month_start=date(2020, 2, 29)
)
ModuleSettings.objects.create(nominals_period=cls.p_202001)
# create a SL set of NL trans
NominalTransaction.objects.create(
header=1,
line=1,
module="SL",
ref="1",
period=p_202001,
type="si",
field="g",
nominal=sales,
value=-100,
date=today
)
NominalTransaction.objects.create(
header=1,
line=2,
module="SL",
ref="1",
period=p_202001,
type="si",
field="v",
nominal=vat_control_account,
value=-20,
date=today
)
NominalTransaction.objects.create(
header=1,
line=3,
module="SL",
ref="1",
period=p_202001,
type="si",
field="t",
nominal=sales_ledger_control,
value=120,
date=today
)
# create a PL set of NL trans
NominalTransaction.objects.create(
header=1,
line=1,
module="PL",
ref="1",
period=p_202001,
type="pi",
field="g",
nominal=sundry,
value=100,
date=today
)
NominalTransaction.objects.create(
header=1,
line=2,
module="PL",
ref="1",
period=p_202001,
type="pi",
field="v",
nominal=vat_control_account,
value=20,
date=today
)
NominalTransaction.objects.create(
header=1,
line=3,
module="PL",
ref="1",
period=p_202001,
type="pi",
field="t",
nominal=purchase_ledger_control,
value=-120,
date=today
)
def test(self):
self.client.force_login(self.user)
response = self.client.get(self.url)
self.assertEqual(
response.status_code,
200
)
context_data = response.context_data
debit_total = context_data["debit_total"]
credit_total = context_data["credit_total"]
"""
The totals are the total of the visible sums. In this case the total of the visible debit balances
is 220.
However really the total of debits is 240.
The user should have two nominals for vat really - vat input and vat output. This would be a software change.
"""
self.assertEqual(
debit_total,
220
)
self.assertEqual(
credit_total,
-220
)
ytd_debit_total = context_data["ytd_debit_total"]
ytd_credit_total = context_data["ytd_credit_total"]
self.assertEqual(
ytd_debit_total,
220
)
self.assertEqual(
ytd_credit_total,
-220
)
report = context_data["report"]
nominals_map = {}
for nominal_report in report:
nominals_map[nominal_report["nominal"]] = nominal_report
sales = nominals_map["Sales"]
sundry = nominals_map["Sundry"]
sales_ledger_control = nominals_map["Sales Ledger Control"]
purchase_ledger_control = nominals_map["Purchase Ledger Control"]
vat_control = nominals_map["Vat"]
self.assertEqual(
sales,
{
"nominal": "Sales",
"total": Decimal('-100.00'),
"parents": ["Revenues", "Revenue"],
"ytd": Decimal("-100.00")
}
)
self.assertEqual(
sundry,
{
"nominal": "Sundry",
"total": Decimal('100.00'),
"parents": ["Expenses", "Expense"],
"ytd": Decimal("100.00")
}
)
self.assertEqual(
sales_ledger_control,
{
"nominal": "Sales Ledger Control",
"total": Decimal('120.00'),
"parents": ["Assets", "Current Assets"],
"ytd": Decimal("120.00")
}
)
self.assertEqual(
purchase_ledger_control,
{
"nominal": "Purchase Ledger Control",
"total": Decimal('-120.00'),
"parents": ["Liabilities", "Current Liabilities"],
"ytd": Decimal("-120.00")
}
)
self.assertEqual(
vat_control,
{
"nominal": "Vat",
"total": Decimal('0.00'),
"parents": ["Liabilities", "Current Liabilities"],
"ytd": Decimal("0.00")
}
)
def test_different_fy(self):
self.client.force_login(self.user)
response = self.client.get(
self.url,
data={
"from_period": self.p_201912.pk,
"to_period": self.p_202001.pk
}
)
self.assertEqual(
response.status_code,
200
)
self.assertContains(
response,
"Period range must be within the same FY"
)
def test_same_fy_but_invalid_period_range(self):
self.client.force_login(self.user)
response = self.client.get(
self.url,
data={
"from_period": self.p_202002.pk,
"to_period": self.p_202001.pk
}
)
self.assertEqual(
response.status_code,
200
)
self.assertContains(
response,
"<li>Invalid period range. Period From cannot be after Period To</li>"
)
|
472608
|
import time
from base64 import b64encode
from .. import exceptions, const, config
from ..utils import misc
from ..models import deploy as models
from . import base
class Deploy(base.SoapService):
def _get_zip_content(self, input_file) -> str:
if isinstance(input_file, str):
input_file = open(input_file, 'rb')
return b64encode(
input_file.read()
).decode('utf-8')
def deploy(self, input_zip, options: models.Options = models.Options()) -> 'Deployment':
result = self._post(action='deploy', message_path='deploy/deploy.msg', message_attributes={
'zip_file': self._get_zip_content(input_zip),
'options': options.as_xml()
})
if result.has('soapenv:Envelope/soapenv:Body/soapenv:Fault/faultcode'):
raise exceptions.DeployCreateError(result.get_value('soapenv:Envelope/soapenv:Body/soapenv:Fault/faultstring'))
return Deployment(self, result.get_value('soapenv:Envelope/soapenv:Body/deployResponse/result/id'))
def check_deploy_status(self, async_process_id: str) -> models.Status:
result = self._post(action='checkDeployStatus', message_path='deploy/status.msg', message_attributes={
'async_process_id': async_process_id
})
result = result.get('soapenv:Envelope/soapenv:Body/checkDeployStatusResponse/result')
status = models.Status(result.get_value('status'), result.get_value('stateDetail', None), models.DeployDetails(
int(result.get_value('numberComponentsTotal')),
int(result.get_value('numberComponentErrors')),
int(result.get_value('numberComponentsDeployed'))
), models.DeployDetails(
int(result.get_value('numberTestsTotal')),
int(result.get_value('numberTestErrors')),
int(result.get_value('numberTestsCompleted'))
))
if status.status.lower().strip() == 'failed':
for failure in result.get_list('details/componentFailures'):
status.components.append_failure(models.ComponentFailure(
failure.get('componentType'),
failure.get('fileName'),
failure.get('problemType'),
failure.get('problem')
))
for failure in result.get_list('details/runTestResult/failures'):
status.tests.append_failure(models.UnitTestFailure(
failure.get('name'),
failure.get('methodName'),
failure.get('message'),
failure.get('stackTrace')
))
return status
def cancel(self, async_process_id: str) -> bool:
result = self._post(action='cancelDeploy', message_path='deploy/cancel.msg', message_attributes={
'async_process_id': async_process_id
})
return misc.parse_bool(result.get_value('soapenv:Envelope/soapenv:Body/cancelDeployResponse/result/done'))
class Deployment:
def __init__(self, deploy_service: Deploy, async_process_id: str):
self.deploy_service = deploy_service
self.async_process_id = async_process_id
self.start_time = time.time()
def get_elapsed_seconds(self):
return time.time() - self.start_time
def get_elapsed_time(self):
return time.strftime("%H:%M:%S", time.gmtime(self.get_elapsed_seconds()))
def cancel(self) -> bool:
return self.deploy_service.cancel(self.async_process_id)
def get_status(self) -> models.Status:
return self.deploy_service.check_deploy_status(self.async_process_id)
def is_done(self):
return self.get_status().status in const.STATUSES_DONE
def has_failed(self):
return self.get_status().status == const.STATUS_FAILED
def has_succeeded(self):
return self.get_status().status == const.STATUS_SUCCEEDED
def wait(self, tick: callable = None):
while True:
status = self.get_status()
if tick is not None and callable(tick):
tick(status)
if status.status in const.STATUSES_DONE:
break
time.sleep(config.DEPLOY_SLEEP_SECONDS)
|
472631
|
import tensorflow as tf
from .bbox import *
from .Label import LabelEncoder
from .compute_IoU import visualize_detections
"""Preprocessing data
Preprocessing the images involves two steps:
Resizing the image: Images are resized such that the shortest size is equal to 224 px for resnet,
Applying augmentation: Random scale jittering and random horizontal flipping are the only augmentations applied to the images.
Along with the images, bounding boxes are rescaled and flipped if required."""
def random_flip_horizontal(image, boxes):
"""Flips image and boxes horizontally with 50% chance
Arguments:
image: A 3-D tensor of shape `(height, width, channels)` representing an
image.
boxes: A tensor with shape `(num_boxes, 4)` representing bounding boxes,
having normalized coordinates.
Returns:
Randomly flipped image and boxes
"""
if tf.random.uniform(()) > 0.5:
image = tf.image.flip_left_right(image)
boxes = tf.stack(
[1 - boxes[:, 2], boxes[:, 1], 1 - boxes[:, 0], boxes[:, 3]], axis=-1
)
return image, boxes
def resize_and_pad_image(image, reshape_size = 224.0):
"""Resizes and pads image while preserving aspect ratio.
Arguments:
image: A 3-D tensor of shape `(height, width, channels)` representing an
image.
Returns:
image: Resized image.
image.shape: Resized shape
ratio_short: The scaling factor used to resize the short sides of image
ratio_long: The scaling factor used to resize the long sides of image
"""
image_shape = tf.cast(tf.shape(image)[:2], dtype=tf.float32)
ratio_short = reshape_size / tf.reduce_min(image_shape)
ratio_long = reshape_size / tf.reduce_max(image_shape)
image = tf.image.resize(image, tf.cast(tf.constant([224, 224]), dtype=tf.int32))
return image, image.shape, ratio_short, ratio_long
def preprocess_data(sample):
"""Applies preprocessing step to a single sample
Arguments:
sample: A dict representing a single training sample.
Returns:
image: Resized and padded image with random horizontal flipping applied.
bbox: Bounding boxes with the shape `(num_objects, 4)` where each box is
of the format `[x, y, width, height]`.
class_id: An tensor representing the class id of the objects, having
shape `(num_objects,)`.
"""
image = sample["image"]
bbox = swap_xy(sample["objects"]["bbox"])
class_id = tf.cast(sample["objects"]["label"], dtype=tf.int32)
image, bbox = random_flip_horizontal(image, bbox)
image, image_shape, _, __ = resize_and_pad_image(image)
bbox = tf.stack(
[
bbox[:, 0] * image_shape[1],
bbox[:, 1] * image_shape[0],
bbox[:, 2] * image_shape[1],
bbox[:, 3] * image_shape[0],
],
axis=-1,
)
bbox = convert_to_xywh(bbox)
return image, bbox, class_id
def prepare_image(image):
image, _, ratio_short, ratio_long = resize_and_pad_image(image)
image = tf.keras.applications.resnet.preprocess_input(image)
return tf.expand_dims(image, axis=0), ratio_short, ratio_long
def pipeline(dataset, batch_size):
"""To ensure that the model is fed with data efficiently we will be using tf.data API to create our input pipeline. The input pipeline consists for the following major processing steps:
Apply the preprocessing function to the samples
Create batches with fixed batch size. Since images in the batch can have different dimensions, and can also have different number of objects, we use padded_batch to the add the necessary padding to create rectangular tensors
Create targets for each sample in the batch using LabelEncoder"""
autotune = tf.data.AUTOTUNE # make sure that number of files readed is bigger or equal than batch size
dataset = dataset.map(preprocess_data, num_parallel_calls=autotune)
dataset = dataset.shuffle(4 * batch_size) # randomly samples elements from this buffer
dataset = dataset.padded_batch(batch_size=batch_size, padding_values=(0.0, 1e-8, -1), drop_remainder=True) # padded_batch(bsz, pad_shape, pad_val)
dataset = dataset.map(LabelEncoder().encode_batch, num_parallel_calls=autotune)
dataset = dataset.apply(tf.data.experimental.ignore_errors())
dataset = dataset.prefetch(autotune)
return dataset
|
472661
|
import unittest
from programy.config.brain.brain import BrainDynamicsConfiguration
from programy.dynamic.maps.map import DynamicMap
class MockDynamicMap(DynamicMap):
def __init__(self, config):
DynamicMap.__init__(self, config)
def map_value(self, client_context, input_value):
raise NotImplementedError()
class DynamicMapTests(unittest.TestCase):
def test_init(self):
config = BrainDynamicsConfiguration()
map = MockDynamicMap(config)
self.assertIsNotNone(map)
self.assertIsNotNone(map.config)
self.assertEqual(config, map.config)
with self.assertRaises(Exception):
map.map_value(None, None, None)
|
472696
|
from trapper.data.data_adapters import DataAdapter, DataAdapterForQuestionAnswering
from trapper.data.data_collator import DataCollator, InputBatch, InputBatchTensor
from trapper.data.data_processors import DataProcessor, SquadDataProcessor
from trapper.data.data_processors.data_processor import IndexedInstance
from trapper.data.dataset_loader import DatasetLoader
from trapper.data.dataset_reader import DatasetReader
from trapper.data.label_mapper import LabelMapper
from trapper.data.tokenizers import TokenizerWrapper
|
472754
|
from textwrap import dedent
from nuitka.plugins.PluginBase import NuitkaPluginBase
class NuitkaPluginFixBuild(NuitkaPluginBase):
plugin_name = "fix-build"
@staticmethod
def onModuleSourceCode(module_name, source_code):
if module_name == "torch.utils.data._typing":
source_code = source_code.replace(
"'__init_subclass__': _dp_init_subclass",
"'__init_subclass__': classmethod(_dp_init_subclass)",
)
elif module_name == "numba.core.decorators":
source_code = dedent(
"""\
from numba.stencils.stencil import stencil
def _wrapper(f):
return f
def jit(*args, **kwargs):
return _wrapper
def generated_jit(*args, **kwargs):
return _wrapper
def njit(*args, **kwargs):
return _wrapper
def cfunc(*args, **kwargs):
return _wrapper
def jit_module(*args, **kwargs):
pass
"""
)
elif module_name == "torch._jit_internal":
source_code = source_code.replace(
'warnings.warn(f"Unable to retrieve source',
"#",
)
elif module_name == "librosa.decompose":
source_code = source_code.replace("import sklearn.decomposition", "#")
elif module_name == "librosa.segment":
source_code = source_code.replace("import sklearn", "#")
return source_code
|
472775
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .rnn import LSTM
class Embeddings(nn.Module):
def __init__(self, num_embeddings, embedding_dim, embed_weight=None, pad_idx=0, unk_idx=None, dropout=0.0, word_dropout=0.0):
super(Embeddings, self).__init__()
self.pad_idx = pad_idx
self.unk_idx = unk_idx
self.word_dropout = word_dropout
self.embeddings = nn.Embedding(num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
padding_idx=pad_idx)
self.dropout = dropout
if embed_weight is None:
self.reset_params()
else:
# self.embeddings = nn.Embedding.from_pretrained(torch.from_numpy(embed_weight), freeze=True)
self.embeddings.weight.data.copy_(torch.from_numpy(embed_weight))
if word_dropout > 0:
assert unk_idx is not None
def reset_params(self):
nn.init.xavier_uniform_(self.embeddings.weight)
with torch.no_grad():
self.embeddings.weight[self.pad_idx].fill_(0)
@property
def requires_grad(self):
return self.embeddings.weight.requires_grad
@requires_grad.setter
def requires_grad(self, value: bool):
self.embeddings.weight.requires_grad = value
@property
def weight(self):
return self.embeddings.weight
def _drop_word(self, words):
r"""
按照一定概率随机将word设置为unk_index,这样可以使得unk这个token得到足够的训练,
且会对网络有一定的regularize的作用。设置该值时,必须同时设置unk_index
"""
drop_probs = torch.ones_like(words).float() * self.word_dropout
# drop_probs = torch.full_like(words, fill_value=self.word_dropout, dtype=torch.float, device=words.device)
drop_mask = torch.bernoulli(drop_probs).eq(1) # dropout_word越大,越多位置为1
pad_mask = words.ne(self.pad_idx)
mask = drop_mask & pad_mask
words = words.masked_fill(mask, self.unk_idx)
return words
def forward(self, x):
if self.word_dropout > 0 and self.training:
x = self._drop_word(x)
embed = self.embeddings(x)
embed = F.dropout(embed, p=self.dropout, training=self.training)
return embed
class ScaleDotProductAttention(nn.Module):
def __init__(self, k_dim, dropout=0.1):
super(ScaleDotProductAttention, self).__init__()
self.scale = 1. / k_dim ** 0.5
self.dropout = dropout
def forward(self, q, k, v, mask=None):
'''
:param q: (bz, q_len, q_dim)
:param k: (bz, k_len, k_dim)
:param v: (bz, v_len, v_dim)
k_len == v_len v_dim == q_dim
:param mask: (bz, k_len) 填充部分为0
:return: (bz, q_len, v_dim)
'''
# (bz, q_len, k_len)
att_score = torch.matmul(q, k.transpose(1, 2)).mul(self.scale)
if mask is not None:
att_mask = ~mask[:, None, :]
att_score = att_score.masked_fill(att_mask, -1e9)
att_weights = F.softmax(att_score, dim=-1)
att_out = torch.matmul(att_weights, v)
return att_out
class DotProductAttention(nn.Module):
def __init__(self, k_dim):
super(DotProductAttention, self).__init__()
self.scale = 1. / k_dim ** 0.5
def forward(self, hn, enc_out, mask=None):
'''
:param hn: query - rnn的末隐层状态 [batch_size, hidden_size]
:param enc_out: key - rnn的输出 [batch_size, seq_len, hidden_size]
:param mask: [batch_size, seq_len] 0对应pad
:return: att_out [batch_size, hidden_size]
'''
# type 1
# (bz, 1, hidden_size) * (bz, hidden_size, n_step) -> (bz, 1, n_step)
att_score = torch.matmul(hn.unsqueeze(1), enc_out.transpose(1, 2)).squeeze(1)
att_score.mul_(self.scale)
if mask is not None:
att_score = att_score.masked_fill(~mask, -1e9)
att_weights = F.softmax(att_score, dim=1) # (bz, n_step)
# (bz, 1, n_step) * (bz, n_step, hidden_size) -> (bz, 1, hidden_size)
att_out = torch.matmul(att_weights.unsqueeze(1), enc_out).squeeze(1)
'''
# type 2
# (bz, hidden_size, 1)
hidden = hn.reshape(hn.size(0), -1, 1)
# (bz, n_step, hidden_size) * (bz, hidden_size, 1) -> (bz, n_step)
att_score = torch.matmul(enc_out, hidden).squeeze(2)
att_score.mul_(self.scale)
if mask is not None:
att_score = att_score.masked_fill(~mask, -1e9)
attn_weights = F.softmax(att_score, dim=1)
# (bz, hidden_sze, n_step) * (bz, n_step, 1) -> (bz, hidden_size, 1)
att_out = torch.matmul(enc_out.transpose(1, 2), attn_weights.unsqueeze(2)).squeeze(2)
'''
return att_out
class AdditiveAttention(nn.Module):
def __init__(self, k_size, v_size, hidden_size=None, bias=True):
super(AdditiveAttention, self).__init__()
if hidden_size is None:
hidden_size = v_size
self.W1 = nn.Linear(k_size, hidden_size, bias=False)
self.W2 = nn.Linear(v_size, hidden_size, bias=bias)
self.V = nn.Linear(hidden_size, 1, bias=False)
self.reset_params()
def reset_params(self):
nn.init.xavier_normal_(self.W1.weight)
nn.init.xavier_normal_(self.W2.weight)
nn.init.xavier_uniform_(self.V.weight)
def forward(self, q, v, mask=None):
'''
:param q: (bz, hidden_size)
:param v: (bz, n_step, hidden_size)
:param mask: (bz, n_step)
:return:
'''
# (bz, 1, hidden_size)
expand_q = q.unsqueeze(1)
att_score = self.V(torch.tanh(self.W1(expand_q) + self.W2(v)))
if mask is not None:
att_score = att_score.masked_fill(~mask.unsqueeze(-1), -1e9)
# (bz, n_step, 1)
att_weights = F.softmax(att_score, dim=1)
# (bz, n_step)
attn_dist = att_weights.squeeze(dim=-1)
# (bz, hidden_size)
att_out = (att_weights * v).sum(dim=1)
return att_out, attn_dist
class NonlinearMLP(nn.Module):
def __init__(self, in_feature, out_feature, activation=None, bias=True):
super(NonlinearMLP, self).__init__()
if activation is None:
self.activation = lambda x: x
else:
assert callable(activation)
self.activation = activation
self.bias = bias
self.linear = nn.Linear(in_features=in_feature,
out_features=out_feature,
bias=bias)
self.reset_params()
def reset_params(self):
nn.init.xavier_uniform_(self.linear.weight)
# nn.init.orthogonal_(self.linear.weight)
if self.bias:
nn.init.zeros_(self.linear.bias)
def forward(self, inputs):
linear_out = self.linear(inputs)
return self.activation(linear_out)
# class Bilinear(nn.Module):
# def __init__(self, hidden_size, bias=True):
# """
# :param hidden_size: 输入的特征维度
# """
# super(Bilinear, self).__init__()
# self.U = nn.Parameter(torch.zeros(hidden_size, hidden_size))
# self.has_bias = bias
# if self.has_bias:
# self.bias = nn.Parameter(torch.zeros(1))
# else:
# self.register_parameter("bias", None)
#
# self.reset_parameters()
#
# def reset_parameters(self):
# nn.init.xavier_uniform_(self.U)
# if self.has_bias:
# nn.init.zeros_(self.bias)
#
# def forward(self, dep, head):
# """
# :param head: arc-head tensor [batch, length, hidden]
# :param dep: arc-dependent tensor [batch, length, hidden]
# :return output: tensor [batch, length, length]
# """
# output = dep.matmul(self.U)
# output = output.bmm(head.transpose(-1, -2))
# if self.has_bias:
# output = output + self.bias
# return output
class Bilinear(nn.Module):
def __init__(self, in_dim1, in_dim2, label_dim=1, use_input_bias=False):
super(Bilinear, self).__init__()
self.label_dim = label_dim
self.use_input_bias = use_input_bias
if self.use_input_bias:
in_dim1 += 1
in_dim2 += 1
self.U = nn.Parameter(torch.randn(label_dim, in_dim1, in_dim2))
self.bias = nn.Parameter(torch.zeros(1))
self.reset_params()
def reset_params(self):
nn.init.xavier_uniform_(self.U)
nn.init.zeros_(self.bias)
def forward(self, x1, x2):
'''
:param x1: (bs, len1, in_dim1)
:param x2: (bs, len2, in_dim2)
:return: (bs, len1, len2, label_dim)
'''
if self.use_input_bias: # Biaffine
# (bs, len1, 1)
bias1 = x1.new_ones(x1.size()[:-1] + (1, ))
# (bs, len2, 1)
bias2 = x2.new_ones(x2.size()[:-1] + (1, ))
# (bs, len1, in_dim1 + 1)
x1 = torch.cat((x1, bias1), dim=-1)
# (bs, len2, in_dim2 + 1)
x2 = torch.cat((x2, bias2), dim=-1)
# (bs, 1, len1, in_dim1) * (1/label_dim, in_dim1, in_dim2) -> (bs, 1/label_dim, len1, in_dim2)
tmp = torch.matmul(x1.unsqueeze(1), self.U)
# (bs, 1/label_dim, len1, in_dim2) * (bs, 1, in_dim2, len2) -> (bs, 1/label_dim, len1, len2)
out = torch.matmul(tmp, x2.unsqueeze(1).transpose(2, 3).contiguous())
final = out.squeeze(1) + self.bias
if self.label_dim > 1: # (bs, len1, len2, label_dim)
final = final.permute(0, 2, 3, 1)
return final
# class Biaffine(nn.Module):
# def __init__(self, in1_features, in2_features, num_label=1, bias=True):
# """
# :param in1_features: 输入的特征1维度
# :param in2_features: 输入的特征2维度
# :param num_label: 类别的个数
# :param bias: 是否使用bias. Default: ``True``
# """
# super(Biaffine, self).__init__()
# self.bilinear = nn.Bilinear(in1_features, in2_features, num_label, bias=bias)
# self.fc = nn.Linear(in1_features + in2_features, num_label, bias=False)
# self.reset_parameters()
#
# def reset_parameters(self):
# nn.init.xavier_uniform_(self.bilinear.weight)
# nn.init.xavier_uniform_(self.fc.weight)
#
# def forward(self, dep, head):
# """
# :param dep: [batch, seq_len, hidden] 输入特征1, 即dep
# :param head: [batch, seq_len, hidden] 输入特征2, 即head
# :return output: [batch, seq_len, num_cls] 每个元素对应类别的概率图
# """
# output = self.bilinear(head, dep) + self.fc(torch.cat(tuple([head, dep]), dim=-1).contiguous())
# return output
# self-adaptive attention
class AdaptiveBilinear(nn.Module):
def __init__(self):
super(AdaptiveBilinear, self).__init__()
def forward(self, x1, x2):
'''
:param x1: (b, l1, dim1)
:param x2: (b, l2, dim2)
:return:
'''
assert x1.size(-1) == x2.size(-1)
# (b, l1, l1)
x_1 = F.softmax(x1 @ x1.transpose(1, 2), dim=-1)
# (b, l2, l2)
x_2 = F.softmax(x2 @ x2.transpose(1, 2), dim=-1)
# (b, l1, l2)
x_12 = x1 @ x2.transpose(1, 2)
# (b, l1, l2)
x_12 = x_1 @ x_12 @ x_2.transpose(1, 2)
return x_12
class Biaffine(nn.Module):
def __init__(self, in_features,
out_features=1,
bias=(True, True)):
super(Biaffine, self).__init__()
self.in_features = in_features # mlp_arc_size / mlp_label_size
self.out_features = out_features # 1 / rel_size
self.bias = bias
# arc: mlp_size
# label: mlp_size + 1
self.linear_input_size = in_features + bias[0]
# arc: mlp_size * 1
# label: (mlp_size + 1) * rel_size
self.linear_output_size = out_features * (in_features + bias[1])
self.linear = nn.Linear(in_features=self.linear_input_size,
out_features=self.linear_output_size,
bias=False)
self.reset_params()
def reset_params(self):
nn.init.xavier_uniform_(self.linear.weight)
def forward(self, input1, input2):
batch_size, len1, dim1 = input1.size()
batch_size, len2, dim2 = input2.size()
if self.bias[0]:
ones = input1.data.new_ones(batch_size, len1, 1)
input1 = torch.cat((input1, ones), dim=-1)
# dim1 += 1
if self.bias[1]:
ones = input2.data.new_ones(batch_size, len2, 1)
input2 = torch.cat((input2, ones), dim=-1)
# dim2 += 1
# (bz, len1, dim1+1) -> (bz, len1, linear_output_size)
affine = self.linear(input1)
# (bz, len1 * self.out_features, dim2)
affine = affine.reshape(batch_size, len1 * self.out_features, -1)
# (bz, len1 * out_features, dim2) * (bz, dim2, len2)
# -> (bz, len1 * out_features, len2) -> (bz, len2, len1 * out_features)
biaffine = torch.bmm(affine, input2.transpose(1, 2)).transpose(1, 2).contiguous()
# (bz, len2, len1, out_features) # out_features: 1 or rel_size
biaffine = biaffine.reshape((batch_size, len2, len1, -1)).squeeze(-1)
return biaffine
class CharCNNEmbedding(nn.Module):
def __init__(self, nb_embeddings,
embed_size=100, # final embedding dim
embed_weight=None,
kernel_sizes=(3, 5, 7),
filter_nums=(50, 100, 150),
dropout=0.2):
super(CharCNNEmbedding, self).__init__()
self.dropout = dropout
for ks in kernel_sizes:
assert ks % 2 == 1, "Odd kernel is supported!"
self.char_embedding = Embeddings(num_embeddings=nb_embeddings,
embedding_dim=embed_size,
embed_weight=embed_weight,
pad_idx=0,
dropout=dropout)
self.convs = nn.ModuleList([nn.Sequential(
nn.Conv1d(in_channels=embed_size,
out_channels=filter_nums[i],
padding=ks // 2,
kernel_size=ks),
nn.ReLU(),
nn.AdaptiveMaxPool1d(output_size=1)
) for i, ks in enumerate(kernel_sizes)])
self.linear = nn.Linear(in_features=sum(filter_nums),
out_features=embed_size)
def forward(self, char_idxs):
'''
:param char_idxs: (bz, seq_len, max_wd_len)
:return: (bz, seq_len, embed_size)
'''
bz, seq_len, max_wd_len = char_idxs.size()
reshape_char_idxs = char_idxs.reshape(bz*seq_len, -1)
char_embed = self.char_embedding(reshape_char_idxs)
# (bz*seq_len, char_embed_size, ch_seq_len)
char_embed_ = char_embed.transpose(1, 2).contiguous()
char_convs = [conv(char_embed_).squeeze(-1) for conv in self.convs]
conv_outs = torch.cat(tuple(char_convs), dim=-1).contiguous()
# (bz, seq_len, embed_size)
embed_out = self.linear(conv_outs).reshape(bz, seq_len, -1)
embed_out = F.dropout(embed_out, p=self.dropout, training=self.training)
return embed_out
class CharLSTMEmbedding(nn.Module):
def __init__(self, nb_embeddings,
embed_size=50,
hidden_size=50,
bidirectional=True,
dropout=0.0):
super(CharLSTMEmbedding, self).__init__()
self.char_embedding = Embeddings(num_embeddings=nb_embeddings,
embedding_dim=embed_size,
pad_idx=0,
dropout=dropout)
self.lstm = LSTM(input_size=embed_size,
hidden_size=hidden_size,
num_layers=1,
batch_first=True,
bidirectional=bidirectional)
lstm_hidden_size = hidden_size * 2 if bidirectional else hidden_size
self.linear = nn.Linear(in_features=lstm_hidden_size,
out_features=embed_size)
self.dropout = dropout
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.linear.weight)
def forward(self, char_idxs):
'''
:param char_idxs: (bz, seq_len, max_wd_len)
:return: (bz, seq_len, embed_size)
'''
bz, seq_len, max_wd_len = char_idxs.size()
reshape_char_idxs = char_idxs.reshape(bz * seq_len, -1)
char_mask = reshape_char_idxs.gt(0) # (bz*seq_len, max_wd_len)
char_embed = self.char_embedding(reshape_char_idxs)
lstm_out = self.lstm(char_embed, non_pad_mask=char_mask)[0] # (bz*seq_len, max_wd_len, hidden_size)
# lstm_out = F.relu(lstm_out)
# pad部分置成-inf,以免对max_pool造成干扰
# 如果是avg_pool,pad部分置成0
mask_lstm_out = lstm_out.masked_fill(char_mask.unsqueeze(-1), -1e9)
# mask_lstm_out = mask_lstm_out.transpose(1, 2) # (bz*seq_len, hidden_size, max_wd_len)
# out = F.max_pool1d(mask_lstm_out, kernel_size=mask_lstm_out.size(-1)).squeeze(dim=-1)
out, _ = torch.max(mask_lstm_out, dim=1)
# (bz, seq_len, embed_size)
embed_out = self.linear(out).reshape(bz, seq_len, -1)
return embed_out
|
472779
|
from .model import Model
from .vae import VAE
from .vi import VI
from .ml import ML
from .gan import GAN
__all__ = [
'Model',
'ML',
'VAE',
'VI',
'GAN',
]
|
472781
|
import signal
import time
from leek_demo.app import app
@app.task(autoretry_for=(Exception,), retry_kwargs={'max_retries': 3})
def critical_task():
"""Will always fail/retry until max_retries"""
raise Exception("I'm a looser")
@app.task(autoretry_for=(Exception,), retry_kwargs={'max_retries': 3}, expires=120)
def revoked_expired_task():
"""Will fail/retry/.../expire"""
raise Exception("I will certainly expire")
@app.task()
def revoked_terminated_task():
"""Will be terminated"""
app.control.revoke(revoked_terminated_task.request.id, terminate=True, signal=signal.SIGTERM)
time.sleep(10)
@app.task(autoretry_for=(Exception,), retry_kwargs={'max_retries': 3})
def recovered_task():
"""Will fail/retry/.../succeed"""
if recovered_task.request.retries > 1:
return "I'm a survival"
raise Exception("I'm retrying my self")
|
472851
|
import urllib
import os
import time
import errno
import weakref
import base64
import json
import socket
#ASYNC
import asyncio
import aiohttp
from aiohttp import web
from urllib.parse import unquote
"""
HTTP Server interface
"""
def _get_viewer(ref):
#Get from weak reference, if deleted raise exception
lv = ref()
if not lv:
raise(Exception("Viewer not found"))
return lv
def _execute(lv, cmds):
if len(cmds) and cmds[0] == '_':
#base64 encoded commands or JSON state
cmds = str(base64.b64decode(cmds).decode('utf-8'))
#cmds = str(base64.b64decode(cmds), 'utf-8')
#Object to select can be provided in preceding angle brackets
selobj = None
if cmds[0] == '<':
pos = cmds.find('>')
selobj = lv.objects[cmds[1:pos]]
cmds = cmds[pos+1:]
#Execute commands via python API by preceding with '.'
done = False
if cmds[0] == '.':
attr = cmds.split()[0][1:]
pos = cmds.find(' ')
params = cmds[pos+1:]
if selobj:
#Call on Object
func = getattr(selobj, attr)
if func and callable(func):
func(params)
done = True
else:
#Call on Viewer
func = getattr(lv, attr)
if func and callable(func):
func(params)
done = True
elif cmds[0] == '$':
#Requests prefixed by '$' are sent
#from property collection controls
#format is $ID KEY VALUE
# - ID is the python id() of the properties object
# All properties collections are stored on their parent
# object using this id in the _collections dict
# - KEY is the property name key to set
# - VALUE is a json string containing the value to set
S = cmds.split()
target = S[0][1:]
if target in lv._collections:
#Get from _collections by id (weakref)
props = lv._collections[target]()
props[S[1]] = json.loads(S[2])
#Check for callback - if provided, call with updated props
func = getattr(props, 'callback')
if func and callable(func):
func(props)
#Default, call via lv.commands() scripting API
if not done:
if selobj:
selobj.select()
lv.commands(cmds)
headers = {'Access-Control-Allow-Origin' : '*',
'x-colab-notebook-cache-control' : 'no-cache'} #Colab: disable offline access cache
def img_response(lv, query={}):
global headers
resp = None
if 'width' in query and 'height' in query:
resp = lv.jpeg(resolution=(int(query['width']), int(query['height'])))
elif 'width' in query:
resp = lv.jpeg(resolution=(int(query['width']), 0))
else:
resp = lv.jpeg()
#Ensure the response is valid before serving
if resp is not None:
return web.Response(body=resp, content_type='image/jpeg', headers=headers)
else:
return web.Response(text='', headers=headers)
async def index(request):
global headers
#Index request returns full screen interactive view
lv = _get_viewer(request.app['viewer'])
w = lv.control.Window(align=None, wrapper=None, fullscreen=True)
code = lv.control.show(True, filename="")
return web.Response(text=code, headers=headers, content_type='text/html')
async def handle_get(request):
global headers
lv = _get_viewer(request.app['viewer'])
#Default to empty OK 200 response
response = web.Response(text='', headers=headers)
#print(request.path)
#for q in request.query:
# print(q, request.query[q])
if request.path.startswith('/image'):
response = img_response(lv, request.query)
elif request.path.startswith('/command=') or request.path.startswith('/icommand='):
pos1 = request.path.find('=')
pos2 = request.path.find('?')
if pos2 < 0: pos2 = len(request.path)
cmds = unquote(request.path[pos1+1:pos2])
#Run viewer commands
_execute(lv, cmds)
#Serve image or just respond 200
if request.path.startswith('/icommand='):
response = img_response(lv, request.query)
elif request.path.startswith('/getstate'):
state = lv.app.getState()
response = web.Response(text=state, headers=headers, content_type='application/json')
elif request.path.startswith('/connect'):
if 'url' in request.query:
#Save first valid connection URL on the viewer
url = request.query['url']
if len(lv._url) == 0:
lv._url = url
uid = id(lv)
response = web.Response(text=str(uid), headers=headers)
elif request.path.startswith('/key='):
pos2 = request.path.find('&')
cmds = unquote(request.path[1:pos2])
lv.commands('key ' + cmds, True)
elif request.path.startswith('/mouse='):
pos2 = request.path.find('&')
cmds = unquote(request.path[1:pos2])
lv.commands('mouse ' + cmds, True)
elif request.path.startswith('/db'):
#Send the database
db = bytes(lv.app.serialize())
response = web.Response(body=db, headers=headers, content_type='application/octet-stream')
else:
#Serve other urls as files if available
#print("UNKNOWN - ", request.path)
path = request.path
if os.path.exists(path):
#OK to always serve files in cwd?
response = web.FileResponse(path)
else:
#Serve files from lavavu html dir
#print(' - not found in cwd')
if path[0] == '/': path = path[1:]
path2 = os.path.join(lv.htmlpath, path)
if os.path.exists(path2) and os.path.isfile(path2):
#print(' - found in htmlpath')
response = web.FileResponse(path2)
return response
async def index_post(request):
lv = _get_viewer(request.app['viewer'])
#print("POST", request.path)
#text = await request.text()
text = ''
#Always interpret post data as commands
#(can perform othe r actions too based on path later if we want)
if request.body_exists:
body = await request.read()
#body = await request.text()
cmds = str(body, 'utf-8') #python3 only
#from urllib.parse import unquote
#data_string = unquote(body)
#cmds = str(data_string.decode('utf-8'))
#Run viewer commands
_execute(lv, cmds)
return web.Response(text='', headers=headers)
"""
HTTP Server manager class
"""
class Server(object):
def __init__(self, viewer, port=8080, ipv6=False, retries=100):
#Allows viewer to be garbage collected
self.viewer = weakref.ref(viewer)
self.ipv6 = ipv6
self.retries = retries
self._closing = False
self._lock = asyncio.Lock()
#Get port/socket before running server in synchronous code
self.socket, self.port = _listen(port, self.ipv6, self.retries)
async def run(self):
#Server run!
#Lock until the port is retrieved
async with self._lock:
await _serve(self.viewer, self.socket)
#ONLY NEED THIS IF NO LOOP EXISTS AND WE ARE MANAGING OUR OWN
#while not self._closing:
# await asyncio.sleep(3600) # sleep forever
# #await asyncio.sleep(1) # sleep forever
# print('_', end='')
#To stop, call cleanup (TODO: put this somewhere in closedown code)
#await runner.cleanup()
def _listen(port, ipv6, retries):
#Get port/socket before running server in synchronous code
#avoids race conditions over port number with subsequent code that
#tries to use server.port before it is confirmed/opened
hostidx = 0
for i in range(retries):
try:
hosts = []
socktype = socket.AF_INET
if ipv6:
hosts = ['::', 'localhost', '::1']
host = hosts[hostidx]
socktype = socket.AF_INET6
else:
hosts = ['0.0.0.0', 'localhost', '127.0.0.1']
host = hosts[hostidx]
#https://github.com/aio-libs/aiohttp/issues/1987#issuecomment-309401600
sock = socket.socket(socktype)
sock.bind((host, port))
# Aiohttp will call 'listen' inside.
# But it must be called before we actually use the port,
# any attempts to connect before the 'listen' call will
# be rejected.
sock.listen(128)
params = sock.getsockname()
port = params[1]
#print("Socket ready on host %s port %s" % (host, port))
return sock, port
except (Exception) as e:
#Try another port
if e.errno == errno.EADDRINUSE: #98
port += 1
#Try again...
elif e.errno == errno.EAFNOSUPPORT: #97 : Address family not supported by protocol
#Try next host name/address
hostidx += 1
if hostidx > 2:
#Try again without ipv6?
if ipv6:
ipv6 = False
else:
ipv6 = True
hostidx = 0
#Try again...
else:
print("Socket open failed: ", e, e.errno, host, port)
print("Failed to open socket, max retries reached")
return None, 0
async def _serve(viewer, sock):
try:
#Create web application manager
app = web.Application()
#Store viewer
app["viewer"] = viewer
#Add routes
app.router.add_get('/', index)
app.router.add_post('/', index_post),
app.router.add_get('/{tail:.*}', handle_get)
#Static routes? https://docs.aiohttp.org/en/stable/web_advanced.html
#app.add_routes([web.static('/', path_to_static_folder)])
#routes.static('/prefix', path_to_static_folder)
#app.add_routes([web.get('/', handler),
# web.post('/post', post_handler),
# web.put('/put', put_handler)])
#Can't use this, is blocking
#web.run_app(app, sock=sock)
# set up aiohttp - like run_app, but non-blocking - socket provided version
runner = aiohttp.web.AppRunner(app, access_log=None)
await runner.setup()
site = aiohttp.web_runner.SockSite(runner, sock=sock)
await site.start()
"""
#This works but have to wait for the port to be allocated before using
# passing socket as above gets port in synchronous code
# set up aiohttp - like run_app, but non-blocking
runner = aiohttp.web.AppRunner(app)
await runner.setup()
site = aiohttp.web.TCPSite(runner, host=host, port=port, reuse_address=False)
await site.start()
#Get port from first entry in list of active connections
for s in runner.sites:
#print(s.name, s._port)
return s._port #Get actual port allocated
return 0
"""
except (Exception) as e:
print("Server start failed: ", e)
#Ignore SIGPIPE altogether (does not apply on windows)
#import sys
#if sys.platform != 'win32':
# from signal import signal, SIGPIPE, SIG_IGN
# signal(SIGPIPE, SIG_IGN)
def serve(viewer, port=None, ipv6=False, retries=100):
s = Server(viewer, port, ipv6, retries)
#Attach to event loop
loop = asyncio.get_event_loop()
loop.create_task(s.run())
return s
"""
Main entry point - run server and open browser interface
"""
if __name__ == '__main__':
import lavavu
import asyncio
lv = lavavu.Viewer()
print(lv.server.port)
lv.browser()
lv.app.loop()
#lv.interactive()
|
472853
|
from core import *
from cameras import *
from geometry import *
from material import *
from helpers import *
import pygame
import random
class TestUpdatingTexture(Base):
def initialize(self):
self.setWindowTitle('Updating Textures')
self.setWindowSize(800,800)
self.renderer = Renderer()
self.renderer.setViewportSize(800,800)
self.renderer.setClearColor(0.25, 0.25, 0.25)
self.scene = Scene()
self.camera = PerspectiveCamera()
self.camera.transform.setPosition(0, 0, 2)
self.camera.transform.lookAt(0, 0, 0)
self.cameraControls = FirstPersonController(self.input, self.camera)
self.canvas = pygame.Surface( (128,128), pygame.SRCALPHA )
self.canvas.fill( [255,255,255] )
self.pixels = pygame.PixelArray(self.canvas)
self.canvasID = OpenGLUtils.initializeSurface(self.canvas)
geometry = QuadGeometry(width=1, height=1, widthResolution=1, heightResolution=1)
material = SurfaceBasicMaterial(texture=self.canvasID)
# disable filtering to see individual pixels more clearly
material.linearFiltering = False
mesh = Mesh(geometry, material)
self.scene.add(mesh)
def update(self):
self.cameraControls.update()
if self.input.resize():
size = self.input.getWindowSize()
self.camera.setAspectRatio( size["width"]/size["height"] )
self.renderer.setViewportSize(size["width"], size["height"])
# changing the color of 1000 pixels per frame
for i in range(1000):
x = random.randint(1,126)
y = random.randint(1,126)
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
self.pixels[x,y] = (r,g,b)
OpenGLUtils.updateSurface(self.canvas, self.canvasID)
self.renderer.render(self.scene, self.camera)
# instantiate and run the program
TestUpdatingTexture().run()
|
472859
|
import numpy as np
import PIL.Image
import cv2
import math
import skimage.draw
import matplotlib.pyplot as plt
import pickle
import torch.nn.functional as F
import torch
# lighting functions
import lighting
# show image in Jupyter Notebook (work inside loop)
from io import BytesIO
from IPython.display import display, Image
def show_img_arr(arr):
im = PIL.Image.fromarray(arr)
bio = BytesIO()
im.save(bio, format='png')
display(Image(bio.getvalue(), format='png'))
# write log in training phase
def take_notes(content, target_file, create_file = False):
if create_file == True:
f = open(target_file,"w")
else:
f = open(target_file,"a")
f.write(content)
f.close()
return len(content)
# convenient for saving tensor to file as snapshot
def save_to_img(src, output_path_name, src_type = "tensor", channel_order="cwd", scale = 255):
if src_type == "tensor":
src_arr = np.asarray(src) * scale
elif src_type == "array":
src_arr = src*scale
else:
print("save tensor error, cannot parse src type.")
return False
if channel_order == "cwd":
src_arr = (np.moveaxis(src_arr,0,2)).astype(np.uint8)
elif channel_order == "wdc":
src_arr = src_arr.astype(np.uint8)
else:
print("save tensor error, cannot parse channel order.")
return False
src_img = PIL.Image.fromarray(src_arr)
src_img.save(output_path_name)
return True
def save_batch_tensors(src_tensor, tgt_tensor, pred_tensor, output_name):
src_arr = np.asarray(src_tensor)
tgt_arr = np.asarray(tgt_tensor)
pred_arr = np.asarray(pred_tensor)
batch_size = src_arr.shape[0]
chn = src_arr.shape[1]
height = src_arr.shape[2]
width = src_arr.shape[3]
board_arr = np.zeros((chn, height*batch_size, width*3))
for j in range(batch_size):
board_arr[:,j*height:(j+1)*height,0:width] = src_arr[j]
board_arr[:,j*height:(j+1)*height,width:2*width] = tgt_arr[j]
board_arr[:,j*height:(j+1)*height,2*width:3*width] = pred_arr[j]
save_to_img(board_arr, output_name, src_type = "array")
# camera project and inv-project
class CamPara():
def __init__(self, K=None, Rt=None):
img_size = [200,200]
if K is None:
K = np.array([[500, 0, 112],
[0, 500, 112],
[0, 0, 1]])
else:
K = np.array(K)
if Rt is None:
Rt = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]])
else:
Rt = np.array(Rt)
R = Rt[:,:3]
t = Rt[:,3]
self.cam_center = -np.dot(R.transpose(),t)
# compute projection and inv-projection matrix
self.proj_mat = np.dot(K, Rt)
self.inv_proj_mat = np.linalg.pinv(self.proj_mat)
# compute ray directions of camera center pixel
c_uv = np.array([float(img_size[0])/2+0.5, float(img_size[1])/2+0.5])
self.center_dir = self.inv_project(c_uv)
def get_camcenter(self):
return self.cam_center
def get_center_dir(self):
return self.center_dir
def project(self, p_xyz):
p_xyz = np.double(p_xyz)
p_uv_1 = np.dot(self.proj_mat, np.append(p_xyz, 1))
if p_uv_1[2] == 0:
return 0
p_uv = (p_uv_1/p_uv_1[2])[:2]
return p_uv
# inverse projection, if depth is None, return a normalized direction
def inv_project(self, p_uv, depth=None):
p_uv = np.double(p_uv)
p_xyz_1 = np.dot(self.inv_proj_mat, np.append(p_uv, 1))
if p_xyz_1[3] == 0:
return 0
p_xyz = (p_xyz_1/p_xyz_1[3])[:3]
p_dir = p_xyz - self.cam_center
p_dir = p_dir / np.linalg.norm(p_dir)
if depth is None:
return p_dir
else:
real_xyz = self.cam_center + p_dir * depth
return real_xyz
# for photometric loss
def photometricLossgray(colorImg_gray, depthImg, albedoImg_gray,
mask, lighting_est, device, K, thres):
N,C,H,W = colorImg_gray.size()
# color loss
normals, _ = lighting.depthToNormalBatch(depthImg, device, K, thres)
SHs = lighting.normalToSHBatch(normals,device)
SHs = torch.reshape(SHs, (N, H*W, 9))
lighting_est = torch.reshape(lighting_est, (N, 9, 1))
#SHs to [B, H*W,9] lighting [B, 9, 1] --[N, H*W] --[B,H,W,1]
color_shading = torch.bmm(SHs, lighting_est) # N H*W 1
color_shading = torch.reshape(color_shading, (N, H, W))
mask1 = torch.reshape(mask[:,0,:,:], (N,H,W)) # one layer mask
color_pre = mask1 * (color_shading * albedoImg_gray) # N*H*W
colorImg_gray_mask = mask1 * colorImg_gray # mask
colorloss = F.l1_loss(color_pre, colorImg_gray_mask) # NHW size directly
return colorloss, color_pre
# come from hmr-src/util/image.py
def scale_and_crop(image, scale, center, img_size):
image_scaled, scale_factors = resize_img(image, scale)
# Swap so it's [x, y]
scale_factors = [scale_factors[1], scale_factors[0]]
center_scaled = np.round(center * scale_factors).astype(np.int)
margin = int(img_size / 2)
image_pad = np.pad(
image_scaled, ((margin, ), (margin, ), (0, )), mode='edge')
center_pad = center_scaled + margin
# figure out starting point
start_pt = center_pad - margin
end_pt = center_pad + margin
# crop:
crop = image_pad[start_pt[1]:end_pt[1], start_pt[0]:end_pt[0], :]
proc_param = {
'scale': scale,
'start_pt': start_pt,
'end_pt': end_pt,
'img_size': img_size
}
return crop, proc_param
def get_line_length(pixel_list):
if len(pixel_list) <= 1:
return 0
max_x, min_x = pixel_list[0][0], pixel_list[0][0]
max_y, min_y = pixel_list[0][1], pixel_list[0][1]
for i in range(len(pixel_list)):
if pixel_list[i][0]>max_x:
max_x = pixel_list[i][0]
elif pixel_list[i][0]<min_x:
min_x = pixel_list[i][0]
if pixel_list[i][1]>max_y:
max_y = pixel_list[i][1]
elif pixel_list[i][1]<min_y:
min_y = pixel_list[i][1]
l_x = max_x - min_x
l_y = max_y - min_y
length = len(pixel_list) * np.linalg.norm([l_x, l_y]) / max(l_x, l_y)
return length
# compute the distance between anchor points and silhouette boundary,
# (the model occluded parts are filtered out)
def measure_achr_dist(achr_verts,
achr_normals,
gt_sil,
proj_sil,
hd_size = (1000, 1000),
max_dist = 0.05,
anchor_num = 200,
):
# parse projected image to get distance for each anchor
if gt_sil.shape is not hd_size:
gt_sil = cv2.resize(gt_sil, dsize=(1000, 1000), interpolation=cv2.INTER_LINEAR)
if proj_sil.shape is not hd_size:
proj_sil = cv2.resize(proj_sil, dsize=(1000, 1000), interpolation=cv2.INTER_LINEAR)
proj_img_out = np.zeros((hd_size))
proj_img_in = np.zeros((hd_size))
cam_para = CamPara(K = np.array([[2232.142857, 0, 500],
[0, 2232.142857, 500],
[0, 0, 1]]))
# project vectors to image as index-valued lines
# make start_point list and end_out_point list, end_in_point list
start_point_list = []
end_out_point_list = []
end_in_point_list = []
for i in range(len(achr_verts)):
xy = cam_para.project(achr_verts[i])
x = int(xy[0]+0.5)
y = int(xy[1]+0.5)
if x<0 or y<0 or x>=hd_size[1] or y>=hd_size[0]:
continue
uv = cam_para.project(achr_verts[i] + achr_normals[i]*max_dist)
u = int(uv[0]+0.5)
v = int(uv[1]+0.5)
if u<0 or v<0 or u>=hd_size[1] or v>=hd_size[0]:
continue
ab = cam_para.project(achr_verts[i] - achr_normals[i]*max_dist)
a = int(ab[0]+0.5)
b = int(ab[1]+0.5)
if a<0 or b<0 or a>=hd_size[1] or b>=hd_size[0]:
continue
r_out, c_out = skimage.draw.line(y,x,v,u)
r_in, c_in = skimage.draw.line(y,x,b,a)
# draw img out and in
proj_img_out[r_out, c_out] = i+1
proj_img_in[r_in, c_in] = i+1
proj_img_out[proj_sil>=128] = 0
proj_img_out[gt_sil<128] = 0
proj_img_in[proj_sil<128] = 0
proj_img_in[gt_sil>=128] = 0
# build pixel map for efficiently using get_line_length()
pixel_map_in = [[] for i in range(anchor_num)]
pixel_map_out = [[] for i in range(anchor_num)]
for x in range(hd_size[1]):
for y in range(hd_size[0]):
if proj_img_in[x, y] > 0:
pixel_map_in[int(proj_img_in[x, y])-1].append([x,y])
if proj_img_out[x, y] > 0:
pixel_map_out[int(proj_img_out[x, y])-1].append([x,y])
# compute index list
index_list = [0] * len(achr_verts)
for i in range(anchor_num):
length_in = get_line_length(pixel_map_in[i]) #len(pixel_map_in[i])
length_out = get_line_length(pixel_map_out[i]) #len(pixel_map_out[i])
if length_in>length_out:
index_list[i] = -length_in
elif length_out>length_in:
index_list[i] = length_out
else:
index_list[i] = 0
#if length_in<2 and length_out>2:
# index_list[i] = length_out
#elif length_in>2 and length_out<2:
# index_list[i] = -length_in
#elif length_in>2 and length_out >2:
# if start_exist_out_list[i] == True:
# index_list[i] = length_out
# elif start_exist_in_list[i] == True:
# index_list[i] = -length_in
# else:
# index_list[i] = 0
#else:
# index_list[i] = 0
#if length_out >= length_in:
# index_list[i] = length_out
#else:
# index_list[i] = -length_in
return index_list
# draw vert moving vector in an image
def draw_vert_move(ori_achrs, new_achrs, bg_img = None):
if len(ori_achrs) != len(new_achrs):
print("ERROR: length not matched in draw_vert_move()")
return False
if bg_img is None:
bg_img = np.zeros((224,224,3))
else:
bg_img = bg_img.copy()
cam_para = CamPara()
img_size = bg_img.shape
for i in range(len(ori_achrs)):
xy = cam_para.project(ori_achrs[i])
x = int(xy[0]+0.5)
y = int(xy[1]+0.5)
if x<0 or y<0 or x>=img_size[1] or y>=img_size[0]:
continue
uv = cam_para.project(new_achrs[i])
u = int(uv[0]+0.5)
v = int(uv[1]+0.5)
if u<0 or v<0 or u>=img_size[1] or v>=img_size[0]:
continue
r, c = skimage.draw.line(y,x,v,u)
if(len(r)<3):
continue
bg_img[r, c, :] = np.array([0, 0, 255])
bg_img[y, x, :] = np.array([0, 255, 0])
return bg_img
# display loss, support multiple draw
class loss_board():
def __init__(self):
self.color_list = ("b","g","r","c","m","y","k","w")
self.color_id = 0
self.draw_num = 0
self.fig, self.ax = plt.subplots()
self.data = []
def draw(self, loss_file, kn_smth = 0):
# read file
f = open(loss_file, "r")
# skip file header
f.readline()
# make data list
ctt = f.read().split()
num = len(ctt)/3
data_list = []
for i in range(num):
data_list.append(float(ctt[i*3]))
# smooth if neccessary
if kn_smth != 0:
data_list_smth = []
for i in range(num):
d_sum = 0
count = 0
for j in range(i - kn_smth, i + kn_smth + 1):
if j<0 or j>= num:
continue
else:
d_sum += data_list[j]
count += 1
data_list_smth.append(d_sum/count)
data_list = data_list_smth
self.data.append(data_list)
self.ax.plot(data_list, color = self.color_list[self.color_id])
self.draw_num += 1
self.color_id = (self.draw_num) % len(self.color_list)
def show(self):
txt_ctt = ""
for i in range(self.draw_num):
if i == 0:
txt_ctt += "%d -- %s" % \
(i,self.color_list[i%len(self.color_list)])
else:
txt_ctt += "\n%d -- %s" % \
(i, self.color_list[i%len(self.color_list)])
plt.text(0.9, 0.85,
txt_ctt,
transform = self.ax.transAxes,
size=10,
ha="center",
va="center",
bbox=dict(boxstyle="round",color="silver")
)
plt.show()
def get_list(self):
return self.data
def get_joint_move(verts, lsp_joint, proc_para, mesh_joint, unseen_mode=False):
scale = proc_para["scale"]
img_size = proc_para["img_size"]
bias = np.array([img_size/2, img_size/2]) - proc_para["start_pt"]
point_list = mesh_joint["point_list"]
index_map = mesh_joint["index_map"]
flat_point_list = [item for sublist in point_list for item in sublist]
num_mj = len(point_list)
j_list = []
for i in range(num_mj):
j_p_list = []
for j in range(len(point_list[i])):
j_p_list.append(verts[point_list[i][j]])
j_list.append(sum(j_p_list)/len(j_p_list))
new_joint_verts = []
ori_joint_verts = []
cam_para = CamPara()
joint_move = []
joint_posi = []
for i in range(len(j_list)):
src_yx = cam_para.project(j_list[i])
src_y = src_yx[0]
src_x = src_yx[1]
joint_posi.append(src_yx.tolist())
if len(index_map[i]) == 1:
tgt_x = lsp_joint[1,index_map[i][0]]
tgt_y = lsp_joint[0,index_map[i][0]]
unseen_label = lsp_joint[2,index_map[i][0]]
elif len(index_map[i]) == 2:
tgt_x = (lsp_joint[1,index_map[i][0]] +
lsp_joint[1,index_map[i][1]]) / 2
tgt_y = (lsp_joint[0,index_map[i][0]] +
lsp_joint[0,index_map[i][1]]) / 2
unseen_label = lsp_joint[2,index_map[i][0]] * \
lsp_joint[2,index_map[i][1]]
tgt_y = tgt_y*scale + bias[0]
tgt_x = tgt_x*scale + bias[1]
#perspect_scale = j_list[i][2]/5. # proved to be unnecessary
joint_move_t = np.array([tgt_y - src_y, tgt_x - src_x, 0])
# many joints in LSPET/COCO are valid, filter them out using this label
if unseen_mode is True and unseen_label <= 0:
joint_move_t = joint_move_t*0
joint_move.append(joint_move_t[:2])
# make new joint verts
for j in point_list[i]:
new_joint_verts.append(verts[j] + joint_move_t*0.007)
ori_joint_verts.append(verts[j])
joint_move = np.array(joint_move)
joint_posi = np.array(joint_posi)
joint_posi[joint_posi<0] = 0
joint_posi[joint_posi>(img_size-1)] = img_size-1
new_joint_verts = np.array(new_joint_verts)
ori_joint_verts = np.array(ori_joint_verts)
return new_joint_verts, ori_joint_verts, joint_move, joint_posi
def get_joint_posi(verts,
point_list = [],
j2or3 = 2,
img_size = 224,
K = None,
Rt = None):
if point_list == []:
# read joint indexes
with open ('../predef/mesh_joint_list.pkl', 'rb') as fp:
item_dic = pickle.load(fp)
point_list = item_dic["point_list"]
num_mj = len(point_list)
joint3d_list = []
for i in range(num_mj):
j_p_list = []
for j in range(len(point_list[i])):
j_p_list.append(verts[point_list[i][j]])
joint3d_list.append(sum(j_p_list)/len(j_p_list))
if j2or3 == 3:
return joint3d_list
elif j2or3 == 2:
cam_para = CamPara(K = K, Rt = Rt)
joint2d_list = []
for i in range(len(joint3d_list)):
src_yx = cam_para.project(joint3d_list[i])
joint2d_list.append(src_yx.tolist())
joint2d_list = np.array(joint2d_list)
joint2d_list[joint2d_list<0] = 0
joint2d_list[joint2d_list>(img_size-1)] = img_size-1
return joint2d_list
else:
print("WARN: wrong j2or3 variable in get_joint_posi()")
return []
# get anchor movement
def get_achr_move(gt_sil, verts, vert_norms, proj_sil):
with open ('../predef/dsa_achr.pkl', 'rb') as fp:
dic_achr = pickle.load(fp)
achr_id = dic_achr['achr_id']
achr_num = len(achr_id)
ori_achr_verts = []
achr_norms = []
for j in range(achr_num):
ori_achr_verts.append(verts[achr_id[j]])
achr_norms.append(vert_norms[achr_id[j]])
ori_achr_verts = np.array(ori_achr_verts)
achr_norms = np.array(achr_norms)
# predict anchor_move of anchor point
achr_move = measure_achr_dist(ori_achr_verts,
achr_norms,
gt_sil,
proj_sil)
achr_move = np.array(achr_move)
diff = achr_move * 0.003
# make new_achr_verts
new_achr_verts = []
for j in range(achr_num):
new_achr_verts.append(ori_achr_verts[j] + achr_norms[j] * diff[j])
new_achr_verts = np.array(new_achr_verts)
return new_achr_verts, ori_achr_verts, achr_move
# compute Intersection over Union
def sil_iou(src_sil, tgt_sil):
# transfer to int array
src_sil = np.array(src_sil).astype(np.int)
tgt_sil = np.array(tgt_sil).astype(np.int)
# check channel numbers
if len(src_sil.shape)>2 or len(tgt_sil.shape)>2:
print("ERROR: input channel of sil_iou is more than two.")
return False
# threshold
src_sil[src_sil!=0] = 1
tgt_sil[tgt_sil!=0] = 1
# compute IoU
sil_I = src_sil - tgt_sil
sil_I[sil_I!=0] = 1
sil_U = src_sil + tgt_sil
sil_U[sil_U!=0] = 1
iou = 1. - float(np.sum(sil_I))/float(np.sum(sil_U))
return iou
# for smpl model random joint deform
import random
from mesh_edit import fast_deform_dja
class random_joint_deform():
def __init__(self,
predef_vert = True,
verts = [],
max_dist = 0.1):
self.predef_vert = predef_vert
self.max_dist = max_dist
self.fd_j = fast_deform_dja(weight = 10.0)
# read joint index list
with open ('../predef/mesh_joint_list.pkl', 'rb') as fp:
item_dic = pickle.load(fp)
self.point_list = item_dic["point_list"]
if self.predef_vert == True:
if verts == []:
print("ERROR: no predefine verts found when initialize RJD")
else:
self.verts = verts
def __call__(self,
verts = []):
if self.predef_vert == False:
if verts == []:
print("ERROR: no verts found when run RJD")
return False
self.verts = verts
new_joint_verts = []
ori_joint_verts = []
for i in range(len(self.point_list)):
joint_move = np.array([random.random() - 0.5,
random.random() - 0.5,
0])
if i == 5:
# joint weight decrease for hip
j_scale = self.max_dist * 0.1
else:
j_scale = self.max_dist
for j in self.point_list[i]:
new_joint_verts.append(self.verts[j] + joint_move*j_scale)
ori_joint_verts.append(self.verts[j])
ori_joint_verts = np.array(ori_joint_verts)
new_joint_verts = np.array(new_joint_verts)
new_verts = self.fd_j.deform(np.asarray(self.verts),
new_joint_verts)
return new_verts, ori_joint_verts, new_joint_verts
# get silhouette boundingbox
def get_sil_bbox(sil, margin = 0):
if len(sil.shape)>2:
sil = sil[:,:,0]
sil_col = np.sum(sil,1)
sil_row = np.sum(sil,0)
y_min = np.argmax(sil_col>0)
y_max = len(sil_col) - np.argmax(np.flip(sil_col, 0)>0)
x_min = np.argmax(sil_row>0)
x_max = len(sil_row) - np.argmax(np.flip(sil_row, 0)>0)
if margin != 0:
y_min -= margin
x_min -= margin
y_max += margin
x_max += margin
return y_min, y_max, x_min, x_max
# come from hmr-src/util/image.py
def resize_img(img, scale_factor):
new_size = (np.floor(np.array(img.shape[0:2]) * scale_factor)).astype(int)
new_img = cv2.resize(img, (new_size[1], new_size[0]))
# This is scale factor of [height, width] i.e. [y, x]
actual_factor = [
new_size[0] / float(img.shape[0]), new_size[1] / float(img.shape[1])
]
return new_img, actual_factor
import openmesh
# Compose verts and faces to openmesh TriMesh
def make_trimesh(verts, faces, compute_vn = True):
# if vertex index starts with 1, make it start with 0
if np.min(faces) == 1:
faces = np.array(faces)
faces = faces - 1
# make a mesh
mesh = openmesh.TriMesh()
# transfer verts and faces
for i in range(len(verts)):
mesh.add_vertex(verts[i])
for i in range(len(faces)):
a = mesh.vertex_handle(faces[i][0])
b = mesh.vertex_handle(faces[i][1])
c = mesh.vertex_handle(faces[i][2])
mesh.add_face(a,b,c)
# compute vert_norms
if compute_vn is True:
mesh.request_vertex_normals()
mesh.update_normals()
return mesh
# get shifted verts
def shift_verts(proc_param, verts, cam):
img_size = proc_param['img_size']
cam_s = cam[0]
cam_pos = cam[1:]
flength = 500.
tz = flength / (0.5 * img_size * cam_s)
trans = np.hstack([cam_pos, tz])
vert_shifted = verts + trans
return vert_shifted
# transform mpii joints to standard lsp definition
# for ALL at one time
def transform_mpii_joints(joints):
num = joints.shape[2]
joints_t = np.zeros((3, 14, num))
joints_t[:,0:6,:] = joints[:,0:6,:] # lower limbs
joints_t[:,6:12,:] = joints[:,10:16,:] # upper limbs
joints_t[:,12,:] = joints[:,8,:] # head
joints_t[:,13,:] = joints[:,9,:] # neck
# head compensation
joints_t[:2,13,:] = joints_t[:2,13,:]*0.8 + joints_t[:2,12,:]*0.2
# anckle compensation
joints_t[:2,5,:] = joints_t[:2,5,:]*0.95 + joints_t[:2,4,:]*0.05
joints_t[:2,0,:] = joints_t[:2,0,:]*0.95 + joints_t[:2,1,:]*0.05
return joints_t
# transform coco joints to standard lsp definition
# for ONLY one tuple
def transform_coco_joints(joints):
joints = np.transpose(joints)
joints_t = np.zeros((3, 14))
joints_t[:,0] = joints[:,16] # Right ankle
joints_t[:,1] = joints[:,14] # Right knee
joints_t[:,2] = joints[:,12] # Right hip
joints_t[:,3] = joints[:,11] # Left hip
joints_t[:,4] = joints[:,13] # Left knee
joints_t[:,5] = joints[:,15] # Left ankle
joints_t[:,6] = joints[:,10] # Right wrist
joints_t[:,7] = joints[:,8] # Right elbow
joints_t[:,8] = joints[:,6] # Right shoulder
joints_t[:,9] = joints[:,5] # Left shoulder
joints_t[:,10] = joints[:,7] # Left elbow
joints_t[:,11] = joints[:,9] # Left wrist
joints_t[:,12] = np.array([-1, -1, 0]) # Neck
joints_t[:,13] = np.array([-1, -1, 0]) # Head top
return joints_t
# transform h36m joints to standard lsp definition
# for ONLY one tuple
def transform_h36m_joints(joints):
joints = np.resize(joints, (32, 2)).transpose()
joints_t = np.ones((3, 14))
joints_t[:2,0] = joints[:,3] # Right ankle
joints_t[:2,1] = joints[:,2] # Right knee
joints_t[:2,2] = joints[:,1] # Right hip
joints_t[:2,3] = joints[:,6] # Left hip
joints_t[:2,4] = joints[:,7] # Left knee
joints_t[:2,5] = joints[:,8] # Left ankle
joints_t[:2,6] = joints[:,27] # Right wrist
joints_t[:2,7] = joints[:,26] # Right elbow
joints_t[:2,8] = joints[:,25] # Right shoulder
joints_t[:2,9] = joints[:,17] # Left shoulder
joints_t[:2,10] = joints[:,18] # Left elbow
joints_t[:2,11] = joints[:,19] # Left wrist
joints_t[:2,12] = joints[:,13] # Neck
joints_t[:2,13] = joints[:,15] # Head top
# anckle compensation
joints_t[:2,5] = joints_t[:2,5]*0.85 + joints_t[:2,4]*0.15
joints_t[:2,0] = joints_t[:2,0]*0.85 + joints_t[:2,1]*0.15
return joints_t
# draw sil from seg_points
def points2sil(seg_points, sil_shape):
seg_points = np.array(seg_points).astype(np.int32)
if len(seg_points.shape) == 1:
p_num = len(seg_points)/2
seg_points = np.resize(seg_points, (p_num, 2))
sil = np.zeros(sil_shape)
cv2.fillPoly(sil, [seg_points], (255), lineType=8)
return sil
def pad_arr(arr, pad):
if len(arr.shape) == 3:
pad_img = np.pad(arr.tolist(), ((pad,pad),(pad,pad),(0,0)), "edge")
elif len(arr.shape) == 2:
pad_img = np.pad(arr.tolist(), ((pad,pad),(pad,pad)), "edge")
else:
print("ERROR: cannot understand arr structure in func: pad_arr")
pad_img = False
pad_img = pad_img.astype(arr.dtype)
return pad_img
def center_crop(arr, center, size = 64):
center = np.asarray(center)
img_size = arr.shape[0]
center[center<0] = 0
center[center>(img_size-1)] = img_size-1
half_size = int(size/2.0)
arr_pad = pad_arr(arr, half_size)
center = np.round(center)
start_pt = (np.round(center)).astype(np.int)
end_pt = (np.round(center) + half_size*2).astype(np.int)
#print(start_pt[0],end_pt[0])
if len(arr.shape) == 3:
return arr_pad[start_pt[1]:end_pt[1], start_pt[0]:end_pt[0], :]
else:
return arr_pad[start_pt[1]:end_pt[1], start_pt[0]:end_pt[0]]
def center_crop_2dsize(arr, center, size):
center = np.array(center)
size = np.array(size)
img_size = arr.shape[0]
center[center<0] = 0
center[center>(img_size-1)] = img_size-1
half_size = (size/2.0).astype(np.int)
max_hs = np.max(half_size)
arr_pad = pad_arr(arr, max_hs)
center = np.round(center)
start_pt = (np.round(center) - half_size + max_hs).astype(np.int)
end_pt = (np.round(center) + half_size + max_hs).astype(np.int)
#print(start_pt[0],end_pt[0])
if len(arr.shape) == 3:
return arr_pad[start_pt[1]:end_pt[1], start_pt[0]:end_pt[0], :]
else:
return arr_pad[start_pt[1]:end_pt[1], start_pt[0]:end_pt[0]]
# for visualizing predict window in images
def draw_rect(img_arr, center, size=64, color=[255,0,0]):
ori_dtype = img_arr.dtype
img_arr = img_arr.astype(np.float)
half_size = int(size/2.0)
center = np.round(center)
start_pt = (np.round(center) - half_size).astype(np.int)
end_pt = (np.round(center) + half_size).astype(np.int)
cv2.rectangle(img_arr, tuple(start_pt), tuple(end_pt), color)
return img_arr.astype(ori_dtype)
# for visualizing predict window in images
def draw_joints_rect(img_arr, joint_posi, ratio = 1):
ori_dtype = img_arr.dtype
joint_num = len(joint_posi)
seed_arr = np.array([range(1,255,255/joint_num)]).astype(np.uint8)
color_list = cv2.applyColorMap(seed_arr, cv2.COLORMAP_RAINBOW)[0]
draw_arr = img_arr.astype(np.float)
for i in range(joint_num):
draw_arr = draw_rect(draw_arr, joint_posi[i],
color = color_list[i].tolist())
if ratio < 1:
draw_arr = draw_arr*ratio + img_arr.astype(np.float)*(1-ratio)
return draw_arr.astype(ori_dtype)
# for visualizing predict window in images
def draw_anchors_rect(img_arr, anchor_posi, sample = 1, ratio = 1):
ori_dtype = img_arr.dtype
joint_num = len(anchor_posi)
seed_arr = np.array([range(1,255,255/joint_num)]).astype(np.uint8)
color_list = cv2.applyColorMap(seed_arr, cv2.COLORMAP_RAINBOW)[0]
draw_arr = img_arr.astype(np.float)
for i in range(joint_num):
if (i%sample)!=0:
continue
draw_arr = draw_rect(draw_arr, anchor_posi[i],
size = 32,
color = color_list[i].tolist())
if ratio < 1:
draw_arr = draw_arr*ratio + img_arr.astype(np.float)*(1-ratio)
return draw_arr.astype(ori_dtype)
# write OBJ from vertex
# not tested yet
def verts2obj(out_verts, filename):
vert_num = len(out_verts)
faces = np.load("../predef/smpl_faces.npy")
face_num = len(faces)
with open(filename, 'w') as fp:
for j in range(vert_num):
fp.write( 'v %f %f %f\n' % ( out_verts[j,0], out_verts[j,1], out_verts[j,2]) )
for j in range(face_num):
fp.write( 'f %d %d %d\n' % (faces[j,0]+1, faces[j,1]+1, faces[j,2]+1) )
PIL.Image.fromarray(src_img.astype(np.uint8)).save("./output/src_img_%d.png" % test_num)
return True
# compute anchor_posi from achr_verts
def get_anchor_posi(achr_verts):
cam_para = CamPara()
achr_num = len(achr_verts)
achr_posi = np.zeros((achr_num, 2))
for i in range(achr_num):
achr_posi[i] = cam_para.project(achr_verts[i])
return achr_posi
# here supplement a post-processing for seg, to filter out
# the some objects containing less than min_pixel pixels
def refine_sil(sil, min_pixel):
if len(sil.shape)==3:
sil = sil[:,:,0]
c3 = True
else:
c3 = False
sil[sil>0] = 255
nb_components, output, stats, centroids = \
cv2.connectedComponentsWithStats(sil, connectivity = 8)
sizes = stats[1:, -1]; nb_components = nb_components - 1
refined_sil = np.zeros((output.shape))
#for every component in the image, you keep it only if it's above min_size
for i in range(0, nb_components):
if sizes[i] >= min_pixel:
refined_sil[output == i + 1] = 255
if c3 is True:
refined_sil = np.stack((refined_sil,)*3, -1)
return refined_sil
# subdivide mesh to 4 times faces
import openmesh as om
def subdiv_mesh_x4(mesh):
# get original vertex list
verts = mesh.points()
verts_list = verts.tolist()
verts_num = len(verts_list)
# make matrix to represent the id of each two verts
new_vert_dic = np.zeros((verts_num, verts_num), dtype = np.int)
# add vertexes
crt_id = verts_num
for e in mesh.edge_vertex_indices():
new_vert_dic[e[0], e[1]] = crt_id
new_vert_dic[e[1], e[0]] = crt_id
verts_list.append((verts[e[0]] + verts[e[1]])/2.)
crt_id += 1
faces_list = []
# add faces
for f in mesh.face_vertex_indices():
v1 = f[0]
v2 = f[1]
v3 = f[2]
v4 = new_vert_dic[v1, v2]
v5 = new_vert_dic[v2, v3]
v6 = new_vert_dic[v3, v1]
faces_list.append([v1, v4, v6])
faces_list.append([v4, v2, v5])
faces_list.append([v6, v5, v3])
faces_list.append([v4, v5, v6])
# make new mesh
subdiv_mesh = make_trimesh(verts_list, faces_list, compute_vn = False)
return subdiv_mesh
# remove toes from smpl mesh model
def smpl_detoe(mesh):
d_inds = [5506, 5507, 5508, 5509, 5510, 5511, 5512, 5513, 5514, 5515, 5516,
5517, 5518, 5519, 5520, 5521, 5522, 5523, 5524, 5525, 5526, 5527,
5528, 5529, 5530, 5531, 5532, 5533, 5534, 5535, 5536, 5537, 5538,
5539, 5540, 5541, 5542, 5543, 5544, 5545, 5546, 5547, 5548, 5549,
5550, 5551, 5552, 5553, 5554, 5555, 5556, 5557, 5558, 5559, 5560,
5561, 5562, 5563, 5564, 5565, 5566, 5567, 5568, 5569, 5570, 5571,
5572, 5573, 5574, 5575, 5576, 5577, 5578, 5579, 5580, 5581, 5582,
5583, 5584, 5585, 5586, 5587, 5588, 5589, 5590, 5591, 5592, 5593,
5594, 5595, 5596, 5597, 5598, 5599, 5600, 5601, 5602, 5603, 5604,
5605, 5606, 5607, 5608, 5609, 5610, 5611, 5612, 5613, 5614, 5615,
5616, 5617, 5618, 5619, 5620, 5621, 5622, 5623, 5624, 5625, 5626,
5627, 5628, 5629, 5630, 5631, 5632, 5633, 5634, 5635, 5636, 5637,
5638, 5639, 5640, 5641, 5642, 5643, 5644, 5645, 5646, 5647, 5648,
5649, 5650, 5651, 5652, 5653, 5654, 5655, 5656, 5657, 5658, 5659,
5660, 5661, 5662, 5663, 5664, 5665, 5666, 5667, 5668, 5669, 5670,
5671, 5672, 5673, 5674, 5675, 5676, 5677, 5678, 5679, 5680, 5681,
5682, 5683, 5684, 5685, 5686, 5687, 5688, 5689, 5690, 5691, 5692,
5693, 5694, 5695, 5696, 5697, 5698, 5699, 5700, 5701, 5702, 5703,
5704, 5705, 12394, 12395, 12396, 12397, 12398, 12399, 12400, 12401,
12402, 12403, 12404, 12405, 12406, 12407, 12408, 12409, 12410,
12411, 12412, 12413, 12414, 12415, 12416, 12417, 12418, 12419,
12420, 12421, 12422, 12423, 12424, 12425, 12426, 12427, 12428,
12429, 12430, 12431, 12432, 12433, 12434, 12435, 12436, 12437,
12438, 12439, 12440, 12441, 12442, 12443, 12444, 12445, 12446,
12447, 12448, 12449, 12450, 12451, 12452, 12453, 12454, 12455,
12456, 12457, 12458, 12459, 12460, 12461, 12462, 12463, 12464,
12465, 12466, 12467, 12468, 12469, 12470, 12471, 12472, 12473,
12474, 12475, 12476, 12477, 12478, 12479, 12480, 12481, 12482,
12483, 12484, 12485, 12486, 12487, 12488, 12489, 12490, 12491,
12492, 12493, 12494, 12495, 12496, 12497, 12498, 12499, 12500,
12501, 12502, 12503, 12504, 12505, 12506, 12507, 12508, 12509,
12510, 12511, 12512, 12513, 12514, 12515, 12516, 12517, 12518,
12519, 12520, 12521, 12522, 12523, 12524, 12525, 12526, 12527,
12528, 12529, 12530, 12531, 12532, 12533, 12534, 12535, 12536,
12537, 12538, 12539, 12540, 12541, 12542, 12543, 12544, 12545,
12546, 12547, 12548, 12549, 12550, 12551, 12552, 12553, 12554,
12555, 12556, 12557, 12558, 12559, 12560, 12561, 12562, 12563,
12564, 12565, 12566, 12567, 12568, 12569, 12570, 12571, 12572,
12573, 12574, 12575, 12576, 12577, 12578, 12579, 12580, 12581,
12582, 12583, 12584, 12585, 12586, 12587, 12588, 12589, 12590,
12591, 12592, 12593, ]
add_fv_list = [[3316, 3318, 3315], [3318, 3313, 3315],
[3313, 3310, 3315], [3313, 3304, 3310],
[3304, 3307, 3310], [3303, 3307, 3304],
[3303, 3300, 3307], [3291, 3300, 3303],
[3291, 3296, 3300], [3292, 3297, 3296],
[3292, 3296, 3291], [3292, 3294, 3297],
[6718, 6715, 6716], [6713, 6718, 6716],
[6713, 6716, 6711], [6704, 6713, 6711],
[6704, 6711, 6707], [6703, 6704, 6707],
[6703, 6707, 6701], [6692, 6703, 6701],
[6692, 6701, 6696], [6691, 6692, 6696],
[6691, 6696, 6697], [6694, 6691, 6697]]
face_list = mesh.face_vertex_indices().tolist()
new_face_list = []
for i in range(len(face_list)):
if not i in d_inds:
new_face_list.append(face_list[i])
new_face_list = new_face_list + add_fv_list
new_mesh = make_trimesh(mesh.points(), np.array(new_face_list))
return new_mesh
# flatten naval in smpl mesh
def flatten_naval(mesh):
verts = mesh.points()
verts[5234] = (verts[4402]+verts[3504])*0.5
verts[1767] = (verts[3504]+verts[917])*0.5
verts[1337] = (verts[917]+verts[1769])*0.5
verts[4813] = (verts[1769]+verts[4402])*0.5
verts[4812] = (verts[5234]+verts[4813])*0.5
verts[3501] = (verts[5234]+verts[1767])*0.5
verts[1336] = (verts[1767]+verts[1337])*0.5
verts[1768] = (verts[1337]+verts[4813])*0.5
verts[3500] = (verts[3504]+verts[4402]+verts[917]+verts[1769])*0.25
return mesh
# rotate verts along y axis
def rotate_verts_y(verts, y):
verts_mean = np.mean(verts, axis = 0)
verts = verts - verts_mean
angle = y*math.pi/180
R = np.array([[np.cos(angle), 0, np.sin(angle)],
[0, 1, 0],
[-np.sin(angle), 0, np.cos(angle)]])
for i in range(len(verts)):
verts[i] = np.dot(R, verts[i])
verts = verts + verts_mean
return verts
# rotate verts along x axis
def rotate_verts_x(verts, x):
verts_mean = np.mean(verts, axis = 0)
verts = verts - verts_mean
angle = x*math.pi/180
R = np.array([[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]])
for i in range(len(verts)):
verts[i] = np.dot(R, verts[i])
verts = verts + verts_mean
return verts
# rotate verts along z axis
def rotate_verts_z(verts, z):
verts_mean = np.mean(verts, axis = 0)
verts = verts - verts_mean
angle = z*math.pi/180
R = np.array([[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
for i in range(len(verts)):
verts[i] = np.dot(R, verts[i])
verts = verts + verts_mean
return verts
# used in argument parser
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
|
472863
|
from datetime import datetime
import pytz
class Entity(dict):
"""Common functionality for racing entities"""
def __init__(self, provider, property_cache, *args, **kwargs):
super(Entity, self).__init__(*args, **kwargs)
self.provider = provider
self.property_cache = dict(**property_cache) if property_cache is not None else dict()
if 'created_at' not in self:
self['created_at'] = self['updated_at'] = datetime.now(pytz.utc)
for key in self:
if isinstance(self[key], datetime):
try:
self[key] = pytz.utc.localize(self[key])
except ValueError:
pass
@property
def has_expired(self):
"""Expire entities sourced from an incompatible scraper version"""
return not self.provider.scraper.is_compatible_with(self['scraper_version'])
def get_cached_property(self, key, source_method, *source_args, **source_kwargs):
"""Get a cached property value, or source, cache and return it if necessary"""
if key not in self.property_cache:
self.property_cache[key] = source_method(*source_args, **source_kwargs)
return self.property_cache[key]
|
472873
|
r"""Command-line tool to start a debug proxy.
usage: python -m naoth.utils.DebugProxy host [port] [--target TARGET] [--print]
"""
import argparse
from . import DebugProxy
if __name__ == '__main__':
prog = 'python -m naoth.utils.DebugProxy'
description = 'A simple command line tool to start a naoth debug proxy.'
parser = argparse.ArgumentParser(prog=prog, description=description)
parser.add_argument('host',
help='The host of the naoth agent (eg. "localhost")')
parser.add_argument('port', nargs='?', type=int, default=5401,
help='The debug port of the naoth agent (default: 5401)')
parser.add_argument('--target', type=int, default=7777,
help='The proxy port where other applications can connect to (default: 7777)')
parser.add_argument('--print', action='store_true', default=False,
help='Print all commands and their responses going through the proxy.')
args = parser.parse_args()
p = DebugProxy(args.host, args.port, args.target, args.print)
print('DebugProxy active at {}:{} <-> localhost:{}'.format(args.host, args.port, args.target))
print('Ctrl+C to stop the proxy ...')
try:
p.join()
except KeyboardInterrupt:
pass
finally:
print('\nBye')
p.stop()
|
472874
|
import torch
from basic_nn import BasicNet
from spock.args import *
from spock.builder import ConfigArgBuilder
from spock.config import spock_config
@spock_config
class ModelConfig:
save_path: SavePathOptArg
n_features: IntArg
dropout: ListArg[float]
hidden_sizes: TupleArg[int]
activation: ChoiceArg(choice_set=["relu", "gelu", "tanh"])
def main():
# A simple description
description = "spock Tutorial"
# Build out the parser by passing in Spock config objects as *args after description
config = (
ConfigArgBuilder(ModelConfig, desc=description, create_save_path=True)
.save(file_extension=".toml")
.generate()
)
# Instantiate our neural net using
basic_nn = BasicNet(model_config=config.ModelConfig)
# Make some random data (BxH): H has dim of features in
test_data = torch.rand(10, config.ModelConfig.n_features)
result = basic_nn(test_data)
print(result)
if __name__ == "__main__":
main()
|
472883
|
from unittest.mock import patch
import datasets
from datasets import Dataset
def test_enable_disable_progress_bar():
dset = Dataset.from_dict({"col_1": [3, 2, 0, 1]})
with patch("tqdm.auto.tqdm") as mock_tqdm:
datasets.disable_progress_bar()
dset.map(lambda x: {"col_2": x["col_1"] + 1})
mock_tqdm.assert_not_called()
mock_tqdm.reset_mock()
datasets.enable_progress_bar()
dset.map(lambda x: {"col_2": x["col_1"] + 1})
mock_tqdm.assert_called()
|
472924
|
from keras_secure_image.secure_image import encrypt_directory, decrypt_img, transform_img, transform, rot, \
perform_rotation
|
472928
|
from sock import *
from data import *
from shellcode import *
from pattern import *
try:
# Try to import the ipython interactive shell
from IPython import embed as ipython # drop to interactive shell
except ImportError as e:
import sys
sys.stderr.write('Warning: IPython embed could not be imported')
|
472930
|
from typing import List, TYPE_CHECKING
if TYPE_CHECKING:
from interface import Interface
class Link:
def __init__(self, interfaces: List["Interface"]) -> None:
self.interfaces = sorted(interfaces)
def __eq__(self, other) -> bool:
return all(
int1 == int2 for int1, int2 in zip(self.interfaces, other.interfaces)
)
def __hash__(self) -> int:
return hash(tuple(self.interfaces))
def __str__(self) -> str:
return " <-> ".join(str(interface) for interface in self.interfaces)
def __repr__(self) -> str:
return f"{self.__class__.__qualname__}(" f"interfaces={self.interfaces})"
@property
def is_point_to_point(self) -> bool:
return len(self.interfaces) == 2
@property
def first_interface(self) -> "Interface":
if not self.is_point_to_point:
raise ValueError(
"Can't return the first interface because "
"there are more than two interfaces forming a link"
)
return self.interfaces[0]
@property
def second_interface(self) -> "Interface":
if not self.is_point_to_point:
raise ValueError(
"Can't return the second interface because "
"there are more than two interfaces forming a link"
)
return self.interfaces[1]
|
472988
|
import os
from click import UsageError
from click.testing import CliRunner
import numpy as np
import pytest
import rasterio
from rasterio.enums import Compression
from rio_color.scripts.cli import color, atmos, check_jobs
def equal(r1, r2):
with rasterio.open(r1) as src1:
with rasterio.open(r2) as src2:
return np.array_equal(src1.read(), src2.read())
def test_atmos_cli(tmpdir):
output = str(tmpdir.join("atmosj1.tif"))
runner = CliRunner()
result = runner.invoke(
atmos,
["-a", "0.03", "-b", "0.5", "-c", "15", "-j", "1", "tests/rgb8.tif", output],
)
assert result.exit_code == 0
assert os.path.exists(output)
output2 = str(tmpdir.join("atmosj2.tif"))
runner = CliRunner()
result = runner.invoke(
atmos,
["-a", "0.03", "-b", "0.5", "-c", "15", "-j", "2", "tests/rgb8.tif", output2],
)
assert result.exit_code == 0
assert os.path.exists(output2)
assert equal(output, output2)
def test_color_cli(tmpdir):
output = str(tmpdir.join("colorj1.tif"))
runner = CliRunner()
result = runner.invoke(
color,
[
"-d",
"uint8",
"-j",
"1",
"tests/rgb8.tif",
output,
"gamma 3 1.85",
"gamma 1,2 1.95",
"sigmoidal 1,2,3 35 0.13",
"saturation 1.15",
],
)
assert result.exit_code == 0
assert os.path.exists(output)
output2 = str(tmpdir.join("colorj2.tif"))
result = runner.invoke(
color,
[
"-d",
"uint8",
"-j",
"2",
"tests/rgb8.tif",
output2,
"gamma 3 1.85",
"gamma 1,2 1.95",
"sigmoidal 1,2,3 35 0.13",
"saturation 1.15",
],
)
assert result.exit_code == 0
assert os.path.exists(output2)
assert equal(output, output2)
def test_bad_op(tmpdir):
output = str(tmpdir.join("noop.tif"))
runner = CliRunner()
result = runner.invoke(
color, ["-d", "uint8", "-j", "1", "tests/rgb8.tif", output, "foob 115"]
)
assert result.exit_code == 2
assert "foob is not a valid operation" in result.output
assert not os.path.exists(output)
def test_color_jobsn1(tmpdir):
output = str(tmpdir.join("colorj1.tif"))
runner = CliRunner()
result = runner.invoke(
color,
[
"-d",
"uint8",
"-j",
"-1",
"tests/rgb8.tif",
output,
"gamma 1,2,3 1.85 sigmoidal rgb 35 0.13",
],
)
assert result.exit_code == 0
assert os.path.exists(output)
def test_check_jobs():
assert 1 == check_jobs(1)
assert check_jobs(-1) > 0
with pytest.raises(UsageError):
check_jobs(0)
def test_creation_opts(tmpdir):
output = str(tmpdir.join("color_opts.tif"))
runner = CliRunner()
result = runner.invoke(
color,
[
"--co",
"compress=jpeg",
"tests/rgb8.tif",
output,
"gamma 1,2,3 1.85 sigmoidal rgb 35 0.13",
],
)
assert result.exit_code == 0
with rasterio.open(output, "r") as src:
assert src.compression == Compression.jpeg
output = str(tmpdir.join("color_opts.tif"))
runner = CliRunner()
result = runner.invoke(
color, ["--co", "compress=jpeg", "tests/rgb8.tif", output, "gamma 1,2,3 1.85"]
)
assert result.exit_code == 0
with rasterio.open(output, "r") as src:
assert src.compression == Compression.jpeg
output = str(tmpdir.join("atmos_opts.tif"))
runner = CliRunner()
result = runner.invoke(
atmos,
[
"--co",
"compress=jpeg",
"-a",
"0.03",
"-b",
"0.5",
"-c",
"15",
"-j",
"1",
"tests/rgb8.tif",
output,
],
)
assert result.exit_code == 0
with rasterio.open(output, "r") as src:
assert src.compression == Compression.jpeg
def test_color_cli_rgba(tmpdir):
output = str(tmpdir.join("colorj1.tif"))
runner = CliRunner()
result = runner.invoke(
color,
[
"-d",
"uint8",
"-j",
"1",
"tests/rgba8.tif",
output,
"gamma 3 1.85",
"gamma 1,2 1.95",
"sigmoidal 1,2,3 35 0.13",
"saturation 1.15",
],
)
assert result.exit_code == 0
with rasterio.open("tests/rgba8.tif") as src:
with rasterio.open(output) as out:
assert out.profile["count"] == 4
# Alpha band is unaltered
assert np.array_equal(src.read(4), out.read(4))
def test_color_cli_16bit_photointerp(tmpdir):
output = str(tmpdir.join("color16color.tif"))
runner = CliRunner()
result = runner.invoke(
color,
[
"-d",
"uint16",
"-j",
"1",
"tests/rgb16.tif",
output,
"gamma 3 1.85",
"gamma 1,2 1.95",
],
)
assert result.exit_code == 0
with rasterio.open("tests/rgb16.tif") as src:
with rasterio.open(output) as out:
assert out.colorinterp == src.colorinterp
def test_color_empty_operations(tmpdir):
output = str(tmpdir.join("color.tif"))
runner = CliRunner()
result = runner.invoke(color, ["tests/rgb8.tif", output])
assert result.exit_code == 2
assert not os.path.exists(output)
result = runner.invoke(color, ["tests/rgb8.tif", output, ", , ,"])
assert result.exit_code == 2
def test_as_color(tmpdir):
runner = CliRunner()
result = runner.invoke(atmos, ["-a", "0.03", "--as-color", "foo.tif", "bar.tif"])
assert result.exit_code == 0
assert not os.path.exists("bar.tif")
assert (
result.output.strip()
== "rio color foo.tif bar.tif gamma g 0.99, gamma b 0.97, sigmoidal rgb 10.0 0.15"
)
|
473008
|
import torch
import torch.optim as optim
from torch.autograd import Variable
import torch.nn as nn
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from network import *
from dataset import *
from util import *
import time
import scipy.io
import os
import matplotlib.pyplot as plt
import pdb
def get_to_cuda(cuda):
def to_cuda(tensor):
return tensor.cuda() if cuda else tensor
return to_cuda
def convert_to_1hot(label, n_class):
# Convert a label map (N x 1 x H x W) into a one-hot representation (N x C x H x W)
label_swap = label.swapaxes(1, 3)
label_flat = label_swap.flatten()
n_data = len(label_flat)
label_1hot = np.zeros((n_data, n_class), dtype='int16')
label_1hot[range(n_data), label_flat] = 1
label_1hot = label_1hot.reshape((label_swap.shape[0], label_swap.shape[1], label_swap.shape[2], n_class))
label_1hot = label_1hot.swapaxes(1, 3)
return label_1hot
def categorical_dice(prediction, truth, k):
# Dice overlap metric for label value k
A = (np.argmax(prediction, axis=1) == k)
B = (np.argmax(truth, axis=1) == k)
return 2 * np.sum(A * B) / (np.sum(A) + np.sum(B)+0.001)
def huber_loss(x):
bsize, csize, height, width = x.size()
d_x = torch.index_select(x, 3, torch.arange(1, width).cuda()) - torch.index_select(x, 3, torch.arange(width-1).cuda())
d_y = torch.index_select(x, 2, torch.arange(1, height).cuda()) - torch.index_select(x, 2, torch.arange(height-1).cuda())
err = torch.sum(torch.mul(d_x, d_x))/height + torch.sum(torch.mul(d_y, d_y))/width
err /= bsize
tv_err = torch.sqrt(0.01+err)
return tv_err
def freeze_layer(layer):
for param in layer.parameters():
param.requires_grad = False
def plot_grid(gridx, gridy, **kwargs):
""" plot deformation grid """
for i in range(gridx.shape[0]):
plt.plot(gridx[i,:], gridy[i,:], **kwargs)
for i in range(gridx.shape[1]):
plt.plot(gridx[:,i], gridy[:,i], **kwargs)
def save_flow(x, pred, x_pred, flow):
#print(flow.shape)
x = x.data.cpu().numpy()
pred = pred.data.cpu().numpy()
x_pred = x_pred.data.cpu().numpy()
flow = flow.data.cpu().numpy() * 96
flow = flow[:,:, 60:140, 40:120]
X, Y = np.meshgrid(np.arange(0, 80, 2), np.arange(0, 80, 2))
plt.subplots(figsize=(6, 6))
plt.subplot(221)
plt.imshow(x[5, 0, 60:140, 40:120], cmap='gray')
plt.axis('off')
plt.subplot(222)
plt.imshow(pred[5, 0, 60:140, 40:120], cmap='gray')
plt.axis('off')
plt.subplot(223)
plt.imshow(x_pred[5, 0, 60:140, 40:120], cmap='gray')
plt.axis('off')
plt.subplot(224)
plt.imshow(x_pred[5, 0, 60:140, 40:120], cmap='gray')
plt.quiver(X, Y, flow[5, 0, fc00:db20:35b:7399::5, ::2], -flow[5, 1, fc00:db20:35b:7399::5, ::2], scale_units='xy', scale=1, color='r')
# plot_grid(X - flow[5, 0, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::6],
# Y - flow[5, 1, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::6],
# color='r', linewidth=0.5)
plt.axis('off')
plt.savefig('./models/flow_map.png')
plt.close()
lr = 1e-4
n_worker = 4
bs = 10
n_epoch = 100
model_save_path = './models/model_flow_tmp.pth'
model = Registration_Net()
print(model)
# model.load_state_dict(torch.load(model_save_path))
model = model.cuda()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),lr=lr)
flow_criterion = nn.MSELoss()
Tensor = torch.cuda.FloatTensor
def train(epoch):
model.train()
epoch_loss = []
for batch_idx, batch in tqdm(enumerate(training_data_loader, 1),
total=len(training_data_loader)):
x, x_pred, x_gnd = batch
x_c = Variable(x.type(Tensor))
x_predc = Variable(x_pred.type(Tensor))
optimizer.zero_grad()
net = model(x_c, x_predc, x_c)
flow_loss = flow_criterion(net['fr_st'], x_predc) + 0.01 * huber_loss(net['out'])
flow_loss.backward()
optimizer.step()
epoch_loss.append(flow_loss.item())
if batch_idx % 50 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(x), len(training_data_loader.dataset),
100. * batch_idx / len(training_data_loader), np.mean(epoch_loss)))
save_flow(x_c, x_predc, net['fr_st'], net['out'])
# scipy.io.savemat(os.path.join('./models/flow_test.mat'),
# mdict={'flow': net['out'].data.cpu().numpy()})
torch.save(model.state_dict(), model_save_path)
print("Checkpoint saved to {}".format(model_save_path))
data_path = '../test'
train_set = TrainDataset(data_path, transform=data_augment)
# loading the data
training_data_loader = DataLoader(dataset=train_set, num_workers=n_worker,
batch_size=bs, shuffle=True)
for epoch in range(0, n_epoch + 1):
print('Epoch {}'.format(epoch))
start = time.time()
train(epoch)
end = time.time()
print("training took {:.8f}".format(end-start))
|
473071
|
import logging
import json
from json.decoder import JSONDecodeError
from shutil import copyfile
import sqlite3
from .util import get_data_path, show_error
from .enums import ItemType
DATABASE_VERSION = 1
# decode item types stored in json.
# this is still required for migrating json data to sqlite
def decode_item_type(dict_):
if '__enum__' in dict_:
return getattr(ItemType, dict_['__enum__'])
return dict_
# convert a bytestring stored in sqlite back to our enum
def convert_reward_type(b):
return ItemType(int(b))
# register converter for the item type enum
sqlite3.register_converter('ITEM_TYPE', convert_reward_type)
class DatabaseConnectionContextManager:
def __init__(self, database_path):
self._database_path = database_path
self._connection = sqlite3.connect(self._database_path, detect_types=sqlite3.PARSE_DECLTYPES)
self.cursor = self._connection.cursor()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, exception_traceback):
self.cursor.close()
self._connection.close()
# to not ignore any exceptions that happened inside a with block
return False
def commit(self):
self._connection.commit()
class Database:
def __init__(self):
data_path = get_data_path()
self._database_path = data_path / 'database.sqlite3'
# create the database if it does not exist yet
if not self._database_path.exists():
self._create_database()
elif self._database_path.is_dir():
show_error(f'{self._database_path} exists, but is not a file.')
# the database exists, let's check the version of our data
logging.info('Checking database version')
with self._get_database_connection() as db:
try:
version = db.cursor.execute('SELECT data FROM meta WHERE name = "version"').fetchall()[0][0]
except (IndexError, sqlite3.DatabaseError, sqlite3.OperationalError):
show_error('Could not find version information in the database. The database might be corrupt.')
# exit if the version does not match. in the future, migrate if version is lower
if version != DATABASE_VERSION:
show_error('Unknown database version. Shutting down to not mess with any data.')
# create a backup
logging.info('Creating database backup')
copyfile(self._database_path, data_path / 'database.sqlite3.bak')
def _get_database_connection(self):
return DatabaseConnectionContextManager(self._database_path)
def _create_database(self):
logging.info('No existing database found, creating new one')
with self._get_database_connection() as db:
# create tables
db.cursor.execute('CREATE TABLE meta (name TEXT PRIMARY KEY, data BLOB)')
db.cursor.execute('CREATE TABLE banner_types (id INTEGER PRIMARY KEY, name TEXT)')
db.cursor.execute('''
CREATE TABLE wish_history (
id INTEGER,
uid INTEGER,
banner_type INTEGER,
type ITEM_TYPE,
rarity INTEGER,
time TEXT,
name TEXT,
UNIQUE (id, uid)
)
''')
# insert version
db.cursor.execute('INSERT INTO meta VALUES ("version", ?)', (DATABASE_VERSION,))
db.commit()
# check if there's an old .json database we can convert
data_path = get_data_path()
json_database_path = data_path / 'database.json'
if json_database_path.exists():
logging.info('Found old database.json, converting to new format')
old_data = None
with json_database_path.open('r', encoding='utf-8') as fp:
try:
old_data = json.load(fp, object_hook=decode_item_type)
except JSONDecodeError:
show_error(f'Fould old data at {json_database_path}, but could not read it. Aborting.')
# we have old data, let's insert it into our new sqlite db
if old_data is not None:
if 'banner_types' in old_data:
for [ id_, name ] in old_data['banner_types'].items():
db.cursor.execute('INSERT INTO banner_types VALUES (?, ?)', (int(id_), name))
if 'wish_history' in old_data:
for uid in old_data['wish_history']:
for banner_type_id in old_data['wish_history'][uid]:
for wish in old_data['wish_history'][uid][banner_type_id]:
db.cursor.execute('INSERT INTO wish_history VALUES (?, ?, ?, ?, ?, ?, ?)', (
wish['id'],
int(uid),
int(banner_type_id),
wish['type'],
wish['rarity'],
wish['time'],
wish['name']
))
db.commit()
logging.info('Finished conversion, deleting old database.json')
json_database_path.unlink()
def get_banner_types(self):
banner_types = {}
with self._get_database_connection() as db:
banner_types_tuples = db.cursor.execute('SELECT id, name FROM banner_types').fetchall()
for banner_type in banner_types_tuples:
banner_types[banner_type[0]] = banner_type[1]
return banner_types
def store_banner_types(self, banner_types):
logging.info('Storing banner types')
with self._get_database_connection() as db:
db.cursor.executemany('''
INSERT OR IGNORE INTO banner_types (id, name) VALUES (:key, :name)
''', banner_types)
db.commit()
def get_uids(self):
with self._get_database_connection() as db:
uids = db.cursor.execute('SELECT DISTINCT uid FROM wish_history').fetchall()
for uid in uids:
yield uid[0]
def get_wish_history(self, uid):
with self._get_database_connection() as db:
wish_history = db.cursor.execute('''
SELECT
id,
banner_type,
type,
rarity,
time,
name
FROM wish_history WHERE uid = ? ORDER BY time ASC
''', (uid,)).fetchall()
for wish in wish_history:
yield {
'id': wish[0],
'banner_type': wish[1],
'type': wish[2],
'rarity': wish[3],
'time': wish[4],
'name': wish[5]
}
def get_latest_wish_id(self, uid, banner_type):
with self._get_database_connection() as db:
try:
id_ = db.cursor.execute('SELECT MAX(id) FROM wish_history WHERE uid = ? AND banner_type = ?', (uid, banner_type)).fetchone()[0]
except IndexError:
id_ = None
return id_
def store_wish_history(self, wishes):
if len(wishes) == 0:
return
logging.info('Storing wish history')
with self._get_database_connection() as db:
db.cursor.executemany('''
INSERT OR IGNORE INTO wish_history
( id, uid, banner_type, type, rarity, time, name )
VALUES
( :id, :uid, :banner_type, :type, :rarity, :time, :name )
''', wishes)
db.commit()
|
473076
|
from django.forms import ModelForm, Select
from .models import CheckinSchedule
class CheckinScheduleForm(ModelForm):
class Meta:
model = CheckinSchedule
fields = ["timezone", "time_string"]
|
473096
|
import matplotlib.pyplot as plt
from pandas import read_csv
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import numpy as np
from test_labels_loader import load_test_labels
def minmax_rescale(probability):
scaler = MinMaxScaler(feature_range=(0.000000001, 0.999999999))
return scaler.fit_transform(probability)
def softmax_rescale(probability):
norm_x = StandardScaler().fit_transform(probability)
return 1.0 / (1.0 + np.exp(-norm_x))
def plot(clips, probs, labels, scale=None):
x, y = [], []
for i, subject in enumerate(subjects):
subject_idx = []
for j, s in enumerate(clips):
if subject in s:
subject_idx.append(j)
subject_idx = np.array(subject_idx)
subj_prob = probs[subject_idx]
if scale == 'softmax':
y.extend(softmax_rescale(np.expand_dims(subj_prob, axis=1)))
elif scale == 'minmax':
y.extend(minmax_rescale(np.expand_dims(subj_prob, axis=1)))
else:
y.extend(subj_prob)
x.extend([i] * len(subject_idx))
x = np.array(x, dtype='float32')
y = np.array(y)
# add jitter
rng = np.random.RandomState(42)
x += rng.normal(0.0, 0.08, size=len(x))
color = ['b'] * len(y)
markers = ['.'] * len(y)
if labels:
color = []
for subject in subjects:
subj_lables = labels[subject]['preictal']
color.extend(map(str, subj_lables))
color = np.array(color)
markers = np.copy(color)
markers[np.where(markers == '0')[0]] = '+'
markers[np.where(markers == '1')[0]] = '^'
color[np.where(color == '0')[0]] = 'b'
color[np.where(color == '1')[0]] = 'r'
zip_all = sorted(zip(x, y, color, markers), key=lambda tup: tup[2])
for a_, b_, c_, d_ in zip_all:
plt.scatter(a_, b_, c=c_, s=60, marker=d_)
if labels:
x1 = zip_all[0]
plt.scatter(x1[0], x1[1], c=x1[2], s=60, marker=x1[3], label='interictal')
x1 = zip_all[-1]
plt.scatter(x1[0], x1[1], c=x1[2], s=60, marker='^', label='preictal')
plt.ylabel('Preictal probability', fontsize=20)
plt.xticks(range(0, 7), subjects)
plt.subplots_adjust(bottom=0.15)
plt.legend()
ax = plt.gca()
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(20)
plt.xlim([-0.5, 6.5])
plt.ylim([-0.1, 1.1])
ax = plt.subplot(111)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),
ncol=2, fancybox=True, shadow=True)
plt.show()
if __name__ == '__main__':
test_labels_path = '/mnt/sda4/CODING/python/kaggle_data/test_labels.csv'
s1 = '/mnt/sda4/CODING/python/kaggle_data/submission_0.78612.csv'
s2 = '/mnt/sda4/CODING/python/kaggle_data/submission_lda8.csv'
subjects = ['Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Dog_5', 'Patient_1', 'Patient_2']
submission_df = read_csv(s1)
probs = submission_df['preictal']
clips = submission_df['clip']
labels = load_test_labels(test_labels_path)
plot(clips, probs, labels)
|
473119
|
from .utils import efficientnet
from .EfficientNet import EfficientNet
from .pruning import *
from .early_exit import *
|
473131
|
import array_utils
import numpy
import pytest
def test_0_compile_pymod_test_mod(pmgen_py_compile):
pmgen_py_compile(__name__)
ndims_to_test = [1, 2, 3, 4]
# for loop, values
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxForLoopValues",
"int32FindMaxForLoopValues_m",
])
def test_int32FindMaxForLoopValues(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name):
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, numpy.int32)
print ("\nrandom number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
expectedRes = arg.max()
res = getattr(pymod_test_mod, nim_test_proc_name)(arg)
print ("res = %s" % str(res))
assert res == expectedRes
# while loop, Forward Iter
@pytest.mark.parametrize("ndim", ndims_to_test)
def test_int32FindMaxWhileLoopForwardIter(pymod_test_mod, seeded_random_number_generator, ndim):
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, numpy.int32)
print ("\nrandom number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
expectedRes = arg.max()
res = pymod_test_mod.int32FindMaxWhileLoopForwardIter(arg)
print ("res = %s" % str(res))
assert res == expectedRes
# for loop, Forward Iter
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxForLoopForwardIter",
"int32FindMaxForLoopForwardIter_m",
"int32FindMaxForLoopForwardIter_i",
])
def test_int32FindMaxForLoopForwardIter(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name):
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, numpy.int32)
print ("\nnim_test_proc_name = %s" % nim_test_proc_name)
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
expectedRes = arg.max()
res = getattr(pymod_test_mod, nim_test_proc_name)(arg)
print ("res = %s" % str(res))
assert res == expectedRes
# while loop, Rand Acc Iter
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxWhileLoopRandaccIterDeref",
"int32FindMaxWhileLoopRandaccIterIndex0",
"int32FindMaxWhileLoopRandaccIterDerefPlusZeroOffset",
"int32FindMaxWhileLoopRandaccIterDerefMinusZeroOffset",
"int32FindMaxWhileLoopRandaccIterIndexVsPlusOffset_1",
"int32FindMaxWhileLoopRandaccIterIndexVsPlusOffset_2",
"int32FindMaxWhileLoopRandaccIterIndexVsPlusOffset_3",
"int32FindMaxWhileLoopRandaccIterIndexVsPlusOffset_4",
"int32FindMaxWhileLoopRandaccIterIndexVsPlusOffset_5",
"int32FindMaxWhileLoopRandaccIterIndexVsMinusOffset_1",
"int32FindMaxWhileLoopRandaccIterIndexVsMinusOffset_2",
"int32FindMaxWhileLoopRandaccIterIndexVsMinusOffset_3",
"int32FindMaxWhileLoopRandaccIterIndexVsMinusOffset_4",
"int32FindMaxWhileLoopRandaccIterIndexVsMinusOffset_5",
])
def test_int32FindMaxWhileLoopRandaccIterDerefAlternatives(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name):
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, numpy.int32)
print ("\nnim_test_proc_name = %s" % nim_test_proc_name)
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
expectedRes = arg.max()
res = getattr(pymod_test_mod, nim_test_proc_name)(arg)
print ("res = %s" % str(res))
assert res == expectedRes
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxWhileLoopRandaccIterIndexVsPlusOffsetK",
"int32FindMaxWhileLoopRandaccIterIndexVsMinusOffsetK",
])
@pytest.mark.parametrize("k", [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5])
def test_int32FindMaxWhileLoopRandaccIterDerefKParamAlternatives(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name, k):
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, numpy.int32)
print ("\nnim_test_proc_name = %s, k = %d" % (nim_test_proc_name, k))
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
expectedRes = arg.max()
res = getattr(pymod_test_mod, nim_test_proc_name)(arg, k)
print ("res = %s" % str(res))
assert res == expectedRes
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxWhileLoopRandaccIterDeltaN_1",
"int32FindMaxWhileLoopRandaccIterDeltaN_2",
])
@pytest.mark.parametrize("n", [1, 2, 3, 4, 5, 10, 100, 1000])
def test_int32FindMaxWhileLoopRandaccIterDeltaN_1(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name, n):
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, numpy.int32)
print ("\nnim_test_proc_name = %s, n = %d" % (nim_test_proc_name, n))
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
argDeltaN = arg.flat[::n]
print ("arg.flat[::n] =\n%s" % argDeltaN)
expectedRes = argDeltaN.max()
res = getattr(pymod_test_mod, nim_test_proc_name)(arg, n)
print ("res = %s" % str(res))
assert res == expectedRes
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxWhileLoopRandaccIterExcludeFirstM_1",
"int32FindMaxWhileLoopRandaccIterExcludeFirstM_2",
])
@pytest.mark.parametrize("m", [1, 2, 3, 4, 5, 10, 100, 1000])
def test_int32FindMaxWhileLoopRandaccIterExcludeFirstM_1(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name, m):
dtype = numpy.int32
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, dtype)
print ("\nnim_test_proc_name = %s, m = %d" % (nim_test_proc_name, m))
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
argAfterM = arg.flat[m:]
print ("arg.flat[m:] =\n%s" % argAfterM)
if argAfterM.size > 0:
expectedRes = argAfterM.max()
print ("expectedRes = %s" % str(expectedRes))
else:
expectedRes = numpy.iinfo(dtype).min
print ("expectedRes = %s (int32.min)" % str(expectedRes))
res = getattr(pymod_test_mod, nim_test_proc_name)(arg, m)
print ("res = %s" % str(res))
assert res == expectedRes
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxWhileLoopRandaccIterExcludeLastM_1",
"int32FindMaxWhileLoopRandaccIterExcludeLastM_2",
])
@pytest.mark.parametrize("m", [1, 2, 3, 4, 5, 10, 100, 1000])
def test_int32FindMaxWhileLoopRandaccIterExcludeLastM_1(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name, m):
dtype = numpy.int32
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, dtype)
print ("\nnim_test_proc_name = %s, m = %d" % (nim_test_proc_name, m))
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
argBeforeLastM = arg.flat[:-m]
print ("arg.flat[:-m] =\n%s" % argBeforeLastM)
if argBeforeLastM.size > 0:
expectedRes = argBeforeLastM.max()
print ("expectedRes = %s" % str(expectedRes))
else:
expectedRes = numpy.iinfo(dtype).min
print ("expectedRes = %s (int32.min)" % str(expectedRes))
res = getattr(pymod_test_mod, nim_test_proc_name)(arg, m)
print ("res = %s" % str(res))
assert res == expectedRes
# for loop, Rand Acc Iter
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxForLoopRandaccIterDeref",
"int32FindMaxForLoopRandaccIterDeref_m",
"int32FindMaxForLoopRandaccIterDeref_i",
"int32FindMaxForLoopRandaccIterIndex0_i",
])
def test_int32FindMaxForLoopRandaccIterDerefAlternatives(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name):
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, numpy.int32)
print ("\nnim_test_proc_name = %s" % nim_test_proc_name)
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
expectedRes = arg.max()
res = getattr(pymod_test_mod, nim_test_proc_name)(arg)
print ("res = %s" % str(res))
assert res == expectedRes
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxForLoopRandaccIterDeltaN",
"int32FindMaxForLoopRandaccIterDeltaN_m",
"int32FindMaxForLoopRandaccIterDeltaN_i",
])
@pytest.mark.parametrize("n", [1, 2, 3, 4, 5, 10, 100, 1000])
def test_int32FindMaxForLoopRandaccIterDeltaN_1(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name, n):
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, numpy.int32)
print ("\nnim_test_proc_name = %s, n = %d" % (nim_test_proc_name, n))
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
argDeltaN = arg.flat[::n]
print ("arg.flat[::n] =\n%s" % argDeltaN)
expectedRes = argDeltaN.max()
res = getattr(pymod_test_mod, nim_test_proc_name)(arg, n)
print ("res = %s" % str(res))
assert res == expectedRes
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxForLoopRandaccIterExcludeFirstM",
"int32FindMaxForLoopRandaccIterExcludeFirstM_m",
"int32FindMaxForLoopRandaccIterExcludeFirstM_i",
])
@pytest.mark.parametrize("m", [1, 2, 3, 4, 5, 10, 100, 1000])
def test_int32FindMaxForLoopRandaccIterExcludeFirstM_1(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name, m):
dtype = numpy.int32
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, dtype)
print ("\nnim_test_proc_name = %s, m = %d" % (nim_test_proc_name, m))
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
argAfterM = arg.flat[m:]
print ("arg.flat[m:] =\n%s" % argAfterM)
if argAfterM.size > 0:
expectedRes = argAfterM.max()
print ("expectedRes = %s" % str(expectedRes))
else:
expectedRes = numpy.iinfo(dtype).min
print ("expectedRes = %s (int32.min)" % str(expectedRes))
res = getattr(pymod_test_mod, nim_test_proc_name)(arg, m)
print ("res = %s" % str(res))
assert res == expectedRes
@pytest.mark.parametrize("ndim", ndims_to_test)
@pytest.mark.parametrize("nim_test_proc_name", [
"int32FindMaxForLoopRandaccIterExcludeLastM_i",
])
@pytest.mark.parametrize("m", [1, 2, 3, 4, 5, 10, 100, 1000])
def test_int32FindMaxForLoopRandaccIterExcludeLastM_1(pymod_test_mod, seeded_random_number_generator,
ndim, nim_test_proc_name, m):
dtype = numpy.int32
arg = array_utils.get_random_Nd_array_of_ndim_and_type(ndim, dtype)
print ("\nnim_test_proc_name = %s, m = %d" % (nim_test_proc_name, m))
print ("random number seed = %d\nndim = %d, shape = %s\narg =\n%s" % \
(seeded_random_number_generator, ndim, arg.shape, arg))
argBeforeLastM = arg.flat[:-m]
print ("arg.flat[:-m] =\n%s" % argBeforeLastM)
if argBeforeLastM.size > 0:
expectedRes = argBeforeLastM.max()
print ("expectedRes = %s" % str(expectedRes))
else:
expectedRes = numpy.iinfo(dtype).min
print ("expectedRes = %s (int32.min)" % str(expectedRes))
res = getattr(pymod_test_mod, nim_test_proc_name)(arg, m)
print ("res = %s" % str(res))
assert res == expectedRes
|
473155
|
from soap.common.formatting import underline
from soap.expression.operators import FIXPOINT_OP
from soap.expression.arithmetic import QuaternaryArithExpr
class FixExprIsNotForLoopException(Exception):
"""FixExpr object is not a for loop. """
class FixExpr(QuaternaryArithExpr):
"""Fixpoint expression."""
def __init__(self, a1, a2, a3, a4):
super().__init__(FIXPOINT_OP, a1, a2, a3, a4)
@property
def bool_expr(self):
return self.a1
@property
def loop_state(self):
return self.a2
@property
def loop_var(self):
return self.a3
@property
def init_state(self):
return self.a4
def format(self):
fixpoint_var = underline('e')
s = ('{op}(λ{fvar}.({bool_expr} ? {fvar} % {loop_state} : {var}))'
' % {init_state}')
return s.format(
fvar=fixpoint_var, op=self.op, bool_expr=self.bool_expr,
loop_state=self.loop_state.format(), var=self.loop_var,
init_state=self.init_state.format())
|
473159
|
from models.notification import UserActivityNotification
from config.app import celery_app, app as flask_app
from tasks.notification import user_activity_digest
from models.user_activity_log import UserActivityLog
from models.user import User, Role
from utils.custom_exception import InvalidUsage
import json
import arrow
# create_user_activity logs the user activity
@celery_app.task
def create_user_activity(current_user_id, action, event, resources=None, patch=None, success=False):
with flask_app.app_context():
user = User.query.filter_by(id=current_user_id).one_or_none()
if not user:
raise InvalidUsage("User Invalid.")
role = Role.query.filter_by(id=user.role_id).one_or_none()
if not role:
raise InvalidUsage("Role Invalid.")
# converting None to empty string
if resources is None:
resources = ''
else:
resources = json.dumps(resources)
if patch is None:
patch = ''
else:
patch = json.dumps(patch)
ual = UserActivityLog(
action=action,
event=event,
user=user,
success=success,
role=role,
resources=str(resources),
patch=str(patch)
)
try:
ual.save()
# send user activity for immediate notification integrations
active_user_ids = [user.id for user in User.query.filter_by(isActive=True).all()]
time = arrow.now().datetime
for notification in UserActivityNotification.query.filter(
UserActivityNotification.user_id.in_(active_user_ids)).all():
if notification.duration_in_mins == -1:
user_activity_digest(time, notification.id)
except:
pass
# create_user_activity logs the user activity while login
@celery_app.task
def create_user_activity_login(current_user_id, action, event, resources=None, patch=None, success=False):
with flask_app.app_context():
user = User.query.filter_by(id=current_user_id).one_or_none()
if not user:
raise InvalidUsage("User Invalid.")
role = Role.query.filter_by(id=user.role_id).one_or_none()
if not role:
raise InvalidUsage("Role Invalid.")
# converting None to empty string
if resources is None:
resources = ''
else:
resources = json.dumps(resources)
if patch is None:
patch = ''
else:
patch = json.dumps(patch)
ual = UserActivityLog(
action=action,
event=event,
user=user,
success=success,
role=role,
resources=str(resources),
patch=str(patch)
)
try:
ual.save()
# send user activity for immediate notification integrations
active_user_ids = [user.id for user in User.query.filter_by(isActive=True).all()]
time = arrow.now().datetime
for notification in UserActivityNotification.query.filter(
UserActivityNotification.user_id.in_(active_user_ids)).all():
if notification.duration_in_mins == -1:
user_activity_digest(time, notification.id)
except:
pass
|
473160
|
from unittest import TestCase, skip
from pytezos import ContractInterface
code = """
parameter unit;
storage address;
code { DROP ;
SENDER ;
NIL operation ;
PAIR }
"""
initial = '<KEY>'
source = 'KT1WhouvVKZFH94VXj9pa8v4szvfrBwXoBUj'
sender = '<KEY>'
@skip
class SenderContractTest(TestCase):
@classmethod
def setUpClass(cls):
cls.ci = ContractInterface.from_michelson(code).using('mainnet')
def test_sender(self):
res = self.ci.default().run_code(storage=initial, source=source, sender=sender)
self.assertEqual(sender, res.storage)
def test_no_source(self):
res = self.ci.default().run_code(storage=initial, sender=sender)
self.assertEqual(sender, res.storage)
def test_no_sender(self):
res = self.ci.default().run_code(storage=initial, source=source)
self.assertEqual(source, res.storage)
|
473167
|
from .test import BaseTest
try:
# python3
from urllib.request import Request, urlopen, HTTPError
except ImportError:
# fall back to python2
from urllib2 import Request, urlopen, HTTPError
class Test_Format_Conneg(BaseTest):
label = 'Negotiated format'
level = 1
category = 7
versions = [u'1.0', u'1.1']
validationInfo = None
def run(self, result):
url = result.make_url(params={})
hdrs = {'Accept': 'image/png;q=1.0'}
try:
r = Request(url, headers=hdrs)
wh = urlopen(r)
img = wh.read()
wh.close()
except HTTPError as e:
wh = e
ct = wh.headers['content-type']
result.last_url = url
try: # py2
result.last_headers = wh.headers.dict
except:
result.last_headers = wh.info()
result.last_status = wh.code
result.urls.append(url)
self.validationInfo.check('format', ct, 'image/png', result)
return result
|
473221
|
from pyps import workspace
with workspace("duplicate.c","duplicate/duplicate.c", preprocessor_file_name_conflict_handling=True) as w:
print ":".join([f.name for f in w.fun])
|
473223
|
from pycocotools.coco import COCO
import sys, os
from dataset.image_base import *
class MuPoTS(Image_base):
def __init__(self,train_flag=True, split='val', **kwargs):
super(MuPoTS,self).__init__(train_flag)
self.data_folder = os.path.join(self.data_folder,'MultiPersonTestSet/')
self.split = split
self.test2val_sample_ratio = 10
self.annot_path = os.path.join(self.data_folder, 'MuPoTS-3D.json')
self.image_folder = self.data_folder
self.load_data()
self.root_idx = constants.SMPL_ALL_54['Pelvis']
self.file_paths = list(self.annots.keys())
self.kp2d_mapper = constants.joint_mapping(constants.MuPoTS_17, constants.SMPL_ALL_54)
self.kp3d_mapper = constants.joint_mapping(constants.MuPoTS_17, constants.SMPL_ALL_54)
logging.info('MuPoTS dataset total {} samples, loading {} split'.format(self.__len__(), self.split))
def load_data(self):
annots = {}
db = COCO(self.annot_path)
logging.info("Get bounding box from groundtruth")
for aid in db.anns.keys():
ann = db.anns[aid]
image_id = ann['image_id']
img = db.loadImgs(image_id)[0]
img_path = img['file_name']
if img_path not in annots:
annots[img_path] = [[],[],[]]
fx, fy, cx, cy = img['intrinsic']
intrinsic_params = np.array([fx, fy, cx, cy])
kp3d = np.array(ann['keypoints_cam']) # [X, Y, Z] in camera coordinate
kp2d = np.array(ann['keypoints_img'])
bbox = np.array(ann['bbox'])
img_width, img_height = img['width'], img['height']
bbox = process_bbox(bbox, img_width, img_height)
annots[img_path][0].append(kp2d)
annots[img_path][1].append(kp3d)
annots[img_path][2].append(intrinsic_params)
if self.split == 'val':
self.file_paths = list(annots.keys())[::self.test2val_sample_ratio]
self.annots = {}
for key in self.file_paths:
self.annots[key] = annots[key]
del annots
elif self.split == 'test':
self.file_paths = list(annots.keys())
self.annots = annots
else:
print('split', self.split, 'is not recognized!')
raise NotImplementedError
def get_image_info(self, index):
img_name = self.file_paths[index]
imgpath = os.path.join(self.image_folder,img_name)
image = cv2.imread(imgpath)[:,:,::-1]
kp2ds, kp3ds = [], []
for kp2d, kp3d in zip(self.annots[img_name][0], self.annots[img_name][1]):
kp2ds.append(self.map_kps(kp2d,maps=self.kp2d_mapper))
kp3d = self.map_kps(kp3d/1000.,maps=self.kp3d_mapper)
kp3ds.append(kp3d)
kp2ds, kp3ds = np.array(kp2ds), np.array(kp3ds)
root_trans = kp3ds[:,self.root_inds].mean(1)
valid_masks = np.array([self._check_kp3d_visible_parts_(kp3d) for kp3d in kp3ds])
kp3ds -= root_trans[:,None]
kp3ds[~valid_masks] = -2.
fx, fy, cx, cy = self.annots[img_name][2][0]
camMats = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
person_num = len(kp2ds)
vis_masks = []
for kp2d in kp2ds:
vis_masks.append(_check_visible(kp2d,get_mask=True))
kp2ds = np.concatenate([kp2ds, np.array(vis_masks)[:,:,None]],2)
img_info = {'imgpath': imgpath, 'image': image, 'kp2ds': kp2ds, 'track_ids': None,\
'vmask_2d': np.array([[True,False,True] for _ in range(person_num)]), 'vmask_3d': np.array([[True,False,False,False] for _ in range(person_num)]),\
'kp3ds': kp3ds, 'params': None, 'camMats': camMats, 'img_size': image.shape[:2],'ds': 'mupots'}
return img_info
def cam2pixel(cam_coord, f, c):
x = cam_coord[:, 0] / (cam_coord[:, 2] + 1e-8) * f[0] + c[0]
y = cam_coord[:, 1] / (cam_coord[:, 2] + 1e-8) * f[1] + c[1]
z = cam_coord[:, 2]
img_coord = np.concatenate((x[:,None], y[:,None], z[:,None]),1)
return img_coord
def pixel2cam(pixel_coord, f, c):
x = (pixel_coord[:, 0] - c[0]) / f[0] * pixel_coord[:, 2]
y = (pixel_coord[:, 1] - c[1]) / f[1] * pixel_coord[:, 2]
z = pixel_coord[:, 2]
cam_coord = np.concatenate((x[:,None], y[:,None], z[:,None]),1)
return cam_coord
def world2cam(world_coord, R, t):
cam_coord = np.dot(R, world_coord.transpose(1,0)).transpose(1,0) + t.reshape(1,3)
return cam_coord
def get_bbox(joint_img):
# bbox extract from keypoint coordinates
bbox = np.zeros((4))
xmin = np.min(joint_img[:,0])
ymin = np.min(joint_img[:,1])
xmax = np.max(joint_img[:,0])
ymax = np.max(joint_img[:,1])
width = xmax - xmin - 1
height = ymax - ymin - 1
bbox[0] = (xmin + xmax)/2. - width/2*1.2
bbox[1] = (ymin + ymax)/2. - height/2*1.2
bbox[2] = width*1.2
bbox[3] = height*1.2
return bbox
def process_bbox(bbox, width, height):
# sanitize bboxes
x, y, w, h = bbox
x1 = np.max((0, x))
y1 = np.max((0, y))
x2 = np.min((width - 1, x1 + np.max((0, w - 1))))
y2 = np.min((height - 1, y1 + np.max((0, h - 1))))
if w*h > 0 and x2 >= x1 and y2 >= y1:
bbox = np.array([x1, y1, x2-x1, y2-y1])
else:
return None
# aspect ratio preserving bbox
w = bbox[2]
h = bbox[3]
c_x = bbox[0] + w/2.
c_y = bbox[1] + h/2.
aspect_ratio = 512/512
if w > aspect_ratio * h:
h = w / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
bbox[2] = w*1.25
bbox[3] = h*1.25
bbox[0] = c_x - bbox[2]/2.
bbox[1] = c_y - bbox[3]/2.
return bbox
def multi_meshgrid(*args):
"""
Creates a meshgrid from possibly many
elements (instead of only 2).
Returns a nd tensor with as many dimensions
as there are arguments
"""
args = list(args)
template = [1 for _ in args]
for i in range(len(args)):
n = args[i].shape[0]
template_copy = template.copy()
template_copy[i] = n
args[i] = args[i].view(*template_copy)
# there will be some broadcast magic going on
return tuple(args)
def flip(tensor, dims):
if not isinstance(dims, (tuple, list)):
dims = [dims]
indices = [torch.arange(tensor.shape[dim] - 1, -1, -1,
dtype=torch.int64) for dim in dims]
multi_indices = multi_meshgrid(*indices)
final_indices = [slice(i) for i in tensor.shape]
for i, dim in enumerate(dims):
final_indices[dim] = multi_indices[i]
flipped = tensor[final_indices]
assert flipped.device == tensor.device
assert flipped.requires_grad == tensor.requires_grad
return flipped
def _check_visible(joints, w=2048, h=2048, get_mask=False):
visibility = True
# check that all joints are visible
x_in = np.logical_and(joints[:, 0] < w, joints[:, 0] >= 0)
y_in = np.logical_and(joints[:, 1] < h, joints[:, 1] >= 0)
ok_pts = np.logical_and(x_in, y_in)
if np.sum(ok_pts) < 16:
visibility=False
if get_mask:
return ok_pts
return visibility
if __name__ == '__main__':
dataset=MuPoTS(train_flag=False)
test_dataset(dataset)
print('Done')
|
473252
|
from typing import Sequence, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.systems.base_system import BaseSystem
class EMixSystem(BaseSystem):
'''Implements the i-Mix algorithm on embedded inputs defined in https://arxiv.org/abs/2010.08887.
Because there aren't predefined augmentations, i-Mix is applied to the original embeddings. The
algorithm under default parameters can be summarized as
Algorithm 1:
lambda ~ Beta(1, 1)
lambda = max(lambda, 1 - lambda) # assures mixing coefficient >= 0.5
embs = embed(*x)
permuted_idxs = permute(arange(embs))
permuted_embs = stop_gradient[embs][permuted_idx]
mixed_embs = lambda * embs + (1 - lambda) * permuted_embs
logits = mixed_embs @ embs.T
contrastive_loss = cross_entropy(logits, arange(embs))
mixed_virtual_loss = cross_entropy(logits, permuted_idxs)
loss = contrastive_loss + mixed_virtual_loss
'''
ALPHA = 1.0
TEMPERATURE = 0.2
def __init__(self, config):
super().__init__(config)
self.beta_distribution = torch.distributions.beta.Beta(self.ALPHA, self.ALPHA)
self.ce = nn.CrossEntropyLoss(reduction='none')
def objective(
self,
embs_mix: torch.Tensor,
embs: torch.Tensor,
mix_coeff: Union[torch.Tensor, float],
randidx: torch.Tensor,
):
embs_mix = F.normalize(embs_mix, dim=1)
embs = F.normalize(embs, dim=1)
logits = embs_mix @ embs.T / self.TEMPERATURE
labels = torch.arange(logits.shape[0], dtype=torch.long, device=logits.device)
loss = mix_coeff * self.ce(logits, labels) + (1 - mix_coeff) * self.ce(logits, randidx)
loss = loss.mean()
with torch.no_grad():
acc = (logits.argmax(1) == labels).float().mean()
return loss, acc
def ssl_forward(self, batch, prehead=False):
batch = batch[1:-1] # could be multiple tensors here
# Embed first.
embs = self.model.embed(batch)
batch_size = embs.shape[0]
# Sample mixing coefficient from beta distribution.
mix_coeff = self.beta_distribution.sample([batch_size]).to(embs.device)
mix_coeff = mix_coeff.view(-1, *[1] * (embs.dim() - 1))
mix_coeff = torch.max(mix_coeff, 1 - mix_coeff)
# Generate augmentations.
randidx = torch.randperm(batch_size, device=embs.device)
embs_mix = embs[randidx].detach()
embs_mix = mix_coeff * embs + (1 - mix_coeff) * embs_mix
embs_mix = self.model.encode(embs_mix, prehead=prehead)
embs = self.model.encode(embs, prehead=prehead)
return embs_mix, embs, mix_coeff, randidx
def forward(self, inputs: Sequence[torch.Tensor], prehead=False):
return self.model.forward(inputs, prehead=prehead)
def training_step(self, batch, batch_idx):
embs_i, embs_j, mix_coeff, randidx = self.ssl_forward(batch, prehead=False)
loss, acc = self.objective(embs_i, embs_j, mix_coeff, randidx)
self.log('train_loss', loss.item(), on_step=True, on_epoch=False, prog_bar=False, sync_dist=True)
self.log('train_acc', acc.item(), on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
return loss
def validation_step(self, batch, batch_idx):
embs_i, embs_j, mix_coeff, randidx = self.ssl_forward(batch, prehead=False)
loss, acc = self.objective(embs_i, embs_j, mix_coeff, randidx)
return {'loss': loss.item(), 'acc': acc.item()}
def validation_epoch_end(self, validation_step_outputs):
loss = [val_dict['loss'] for val_dict in validation_step_outputs]
acc = [val_dict['acc'] for val_dict in validation_step_outputs]
self.log('val_loss', sum(loss) / len(loss), on_step=False, on_epoch=True, prog_bar=False, sync_dist=True)
self.log('val_acc', sum(acc) / len(acc), on_step=False, on_epoch=True, prog_bar=True, sync_dist=True)
|
473291
|
import re
from regparser.layer.layer import Layer
from regparser.tree import struct
from regparser.tree.priority_stack import PriorityStack
from regparser.tree.xml_parser import tree_utils
class HeaderStack(PriorityStack):
"""Used to determine Table Headers -- indeed, they are complicated
enough to warrant their own stack"""
def unwind(self):
children = [pair[1] for pair in self.pop()]
self.peek_last()[1].children = children
class TableHeaderNode(object):
"""Represents a cell in a table's header"""
def __init__(self, text, level):
self.text = text
self.level = level
self.children = []
def height(self):
child_heights = [0] + [c.height() for c in self.children]
return 1 + max(child_heights)
def width(self):
if not self.children:
return 1
return sum(c.width() for c in self.children)
def build_header(xml_nodes):
"""Builds a TableHeaderNode tree, with an empty root. Each node in the tree
includes its colspan/rowspan"""
stack = HeaderStack()
stack.add(0, TableHeaderNode(None, 0)) # Root
for xml_node in xml_nodes:
level = int(xml_node.attrib['H'])
text = tree_utils.get_node_text(xml_node, add_spaces=True).strip()
stack.add(level, TableHeaderNode(text, level))
while stack.size() > 1:
stack.unwind()
root = stack.m_stack[0][0][1]
max_height = root.height()
def set_rowspan(n):
n.rowspan = max_height - n.height() - n.level + 1
struct.walk(root, set_rowspan)
def set_colspan(n):
n.colspan = n.width()
struct.walk(root, set_colspan)
return root
def table_xml_to_plaintext(xml_node):
"""Markdown representation of a table. Note that this doesn't account
for all the options needed to display the table properly, but works fine
for simple tables. This gets included in the reg plain text"""
header = [tree_utils.get_node_text(hd, add_spaces=True).strip()
for hd in xml_node.xpath('./BOXHD/CHED')]
divider = ['---']*len(header)
rows = []
for tr in xml_node.xpath('./ROW'):
rows.append([tree_utils.get_node_text(td, add_spaces=True).strip()
for td in tr.xpath('./ENT')])
table = []
for row in [header] + [divider] + rows:
table.append('|' + '|'.join(row) + '|')
return '\n'.join(table)
def table_xml_to_data(xml_node):
"""Construct a data structure of the table data. We provide a different
structure than the native XML as the XML encodes too much logic. This
structure can be used to generate semi-complex tables which could not be
generated from the markdown above"""
header_root = build_header(xml_node.xpath('./BOXHD/CHED'))
header = [[] for _ in range(header_root.height())]
def per_node(node):
header[node.level].append({'text': node.text,
'colspan': node.colspan,
'rowspan': node.rowspan})
struct.walk(header_root, per_node)
header = header[1:] # skip the root
rows = []
for row in xml_node.xpath('./ROW'):
rows.append([tree_utils.get_node_text(td, add_spaces=True).strip()
for td in row.xpath('./ENT')])
return {'header': header, 'rows': rows}
class Formatting(Layer):
fenced_re = re.compile(r"```(?P<type>[a-zA-Z0-9 ]+)\w*\n"
+ r"(?P<lines>([^\n]*\n)+)"
+ r"```")
subscript_re = re.compile(r"([a-zA-Z0-9]+)_\{(\w+)\}")
dashes_re = re.compile(r"_{5,}$")
def process(self, node):
layer_el = []
if node.source_xml is not None:
if node.source_xml.tag == 'GPOTABLE':
tables = [node.source_xml]
else:
tables = []
tables.extend(node.source_xml.xpath('.//GPOTABLE'))
for table in tables:
layer_el.append({'text': table_xml_to_plaintext(table),
'locations': [0],
'table_data': table_xml_to_data(table)})
for match in Formatting.fenced_re.finditer(node.text):
layer_el.append({
'text': node.text[match.start():match.end()],
'locations': [0],
'fence_data': {
'type': match.group('type'),
'lines': filter(bool, match.group('lines').split("\n"))}})
subscripts = {}
for match in Formatting.subscript_re.finditer(node.text):
key = (match.group(1), match.group(2))
subscripts[key] = subscripts.get(key, 0) + 1
for key, count in subscripts.iteritems():
variable, subscript = key
layer_el.append({
'text': variable + '_{' + subscript + '}',
'locations': list(range(count)),
'subscript_data': {'variable': variable,
'subscript': subscript}})
for match in Formatting.dashes_re.finditer(node.text):
layer_el.append({
'text': node.text,
'locations': [0],
'dash_data': {
'text': node.text[:match.start()],
},
})
if layer_el:
return layer_el
|
473311
|
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
sizes = []
visited = [[False for value in row] for row in grid]
for i in range(len(grid)):
for j in range(len(grid[i])):
if visited[i][j]:
continue
else:
self.traverseNode(i, j, grid, visited, sizes)
sizes.sort(reverse= False)
if not sizes:
return 0
else:
return sizes[-1]
def traverseNode(self, i, j, grid, visited, sizes):
currentRiverSize = 0
nodesToExplore = [[i, j]]
while len(nodesToExplore):
currentNode = nodesToExplore.pop()
i = currentNode[0]
j = currentNode[1]
if visited[i][j]:
continue
visited[i][j] = True
if grid[i][j] == 0:
continue
currentRiverSize += 1
unvisitedNeighbours = self.getUnvisitedNeighbour(i, j, grid, visited)
for neighbour in unvisitedNeighbours:
nodesToExplore.append(neighbour)
if currentRiverSize > 0:
sizes.append(currentRiverSize)
def getUnvisitedNeighbour(self, i, j, grid, visited):
unvisitedNeighbours = []
if i > 0 and not visited[i - 1][j]:
unvisitedNeighbours.append([i - 1, j])
if i < len(grid) - 1 and not visited[i + 1][j]:
unvisitedNeighbours.append([i + 1, j])
if j > 0 and not visited[i][j - 1]:
unvisitedNeighbours.append([i, j - 1])
if j < len(grid[0]) - 1 and not visited[i][j + 1]:
unvisitedNeighbours.append([i, j + 1])
return unvisitedNeighbours
# My Own solution during MOCK
class Solution(object):
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
sizes = [0]
visited = [[False for _ in row] for row in grid]
for i in range(len(grid)):
for j in range(len(grid[0])):
if not visited[i][j] and grid[i][j] == 1:
self.traverseNode(i, j, grid, visited, sizes)
sizes.sort()
return sizes[-1]
def traverseNode(self, row, col, grid, visited, sizes):
nodesStack = [[row, col]]
currentSize = 0
while nodesStack:
i, j = nodesStack.pop()
if visited[i][j] == False:
visited[i][j] = True
currentSize += 1
unvisitedValidNeighbours = self.getUnvisitedValidNeighbours(i, j, grid, visited)
nodesStack.extend(unvisitedValidNeighbours)
sizes.append(currentSize)
def getUnvisitedValidNeighbours(self, row, col, grid, visited):
neighbours = []
if row > 0 and grid[row - 1][col] == 1 and not visited[row - 1][col]:
neighbours.append([row - 1, col])
if row < len(grid) - 1 and grid[row + 1][col] and not visited[row + 1][col]:
neighbours.append([row + 1, col])
if col > 0 and grid[row][col - 1] and not visited[row][col - 1]:
neighbours.append([row, col - 1])
if col < len(grid[0]) - 1 and grid[row][col + 1] and not visited[row][col + 1]:
neighbours.append([row, col + 1])
return neighbours
# Driver code
sol = Solution()
# grid = [[1,1,0,0,0],
# [1,1,0,0,0],
# [0,0,0,1,1],
# [0,0,0,1,1]]
grid = [[0]]
numOfIlands = sol.numIslands(grid)
print("Max Area of Iands: ", numOfIlands)
|
473321
|
import random
from collections import defaultdict
import numpy as np
from amplification.tasks.core import idk, uniform, Task, sequences
def matches(patterns, xs):
return np.all((patterns[:,np.newaxis,:] == SumTask.wild) |
(patterns[:,np.newaxis,:] == xs[np.newaxis, :, :]), axis=2)
class SumTask(Task):
wild = 1
answer_length = 1
fixed_vocab = 2
def repr_symbol(self, x):
if x == self.wild: return '*'
if x in self.differences:
return str(x - self.zero)
if x in self.alphabet:
return 'abcdef'[x - self.alphabet[0]]
if x == idk: return '?'
raise ValueError(x)
def __init__(self, length=6, size=float('inf'), nchars=2, modulus=None):
self.nvocab = self.fixed_vocab
self.size = min(size, nchars**length)
self.alphabet = self.allocate(nchars)
self.interaction_length = nchars
self.alphabet_plus = np.concatenate([[self.wild], self.alphabet])
self.modulus = modulus
if modulus is None:
self.max_d = (self.size + nchars - 1) // nchars
self.differences = self.allocate(2 * self.max_d + 1)
self.zero = self.differences[self.max_d]
else:
self.differences = self.allocate(self.modulus)
self.zero = self.differences[0]
self.all_strings = [np.array(x) for x in sequences(self.alphabet, length)]
self.length = length
self.question_length = length
self.fact_length = length + 1
def make_dbs(self, difficulty=float('inf')):
used_strings = min(self.size, difficulty+8)
strings = np.stack(random.sample(self.all_strings, used_strings))
values = np.random.choice([-1, 1], used_strings)
fast_db = {"strings": strings, "values": values}
facts = np.concatenate([strings, self.encode_n(values[:,np.newaxis])], axis=1)
return facts, fast_db
def answers(self, Qs, fast_db):
all_matches = matches(Qs, fast_db["strings"])
raw_As = np.dot(all_matches, fast_db["values"])
As = self.encode_n(raw_As)
return As[:, np.newaxis]
def make_q(self, fast_db):
Q = np.random.choice(self.alphabet, self.length, replace=True)
num_wilds = np.random.randint(1, self.length + 1)
indices = np.random.choice(self.length, num_wilds, replace=False)
Q[indices] = self.wild
return Q
def encode_n(self, x):
if self.modulus is None:
return self.zero + np.maximum(-self.max_d, np.minimum(self.max_d, x))
else:
return self.zero + np.mod(x, self.modulus)
def are_simple(self, Qs):
return np.all(Qs != self.wild, axis=-1)
def recursive_answer(self, Q):
Q = np.asarray(Q)
if not np.all(np.isin(Q, self.alphabet_plus)):
yield self.pad(self.zero), None
return
if not np.any(Q == self.wild):
yield (yield None, Q), None
return
wild_index = np.argmax(Q == self.wild)
result = 0
for c in self.alphabet:
new_Q = np.copy(Q)
new_Q[wild_index] = c
d = (yield None, new_Q)
if d not in self.differences:
yield self.pad(idk), None
return
result += d - self.zero
result = self.encode_n(result)
yield self.pad(result), None
def all_questions(self, fast_db):
yield from sequences(self.alphabet_plus, self.length)
def classify_question(self, Q, fast_db):
n = len([x for x in Q if x == self.wild])
return "wilds{}".format(n)
|
473328
|
import numpy as np
from mspasspy.ccore.utility import MsPASSError, AtomicType, ErrorSeverity, ProcessingStatus
from mspasspy.ccore.seismic import Seismogram, TimeSeries, TimeSeriesEnsemble, SeismogramEnsemble
def info(data, alg_id, alg_name, target=None):
"""
This helper function is used to log operations in processing history of mspass object.
Per best practice, every operations happen on the mspass object should be logged.
:param data: the mspass data object
:param alg_id: an id designator to uniquely define an instance of algorithm.
:param alg_name: the name of the algorithm that used on the mspass object.
:param target: if the mspass data object is an ensemble type, you may use target as index to
log on one specific object in the ensemble. If target is not specified, all the objects in the ensemble
will be logged using the same information.
:return: None
"""
empty_err_message = "cannot preserve history because container was empty\n" + \
"Must at least contain an origin record"
if isinstance(data, (TimeSeries, Seismogram)):
if data.live:
if data.is_empty():
data.elog.log_error(alg_name, empty_err_message, ErrorSeverity.Complaint)
else:
data.new_map(alg_name, alg_id,
AtomicType.TIMESERIES if isinstance(data, TimeSeries) else AtomicType.SEISMOGRAM,
ProcessingStatus.VOLATILE)
elif isinstance(data, (TimeSeriesEnsemble, SeismogramEnsemble)):
if (target is not None) and (len(data.member) <= target):
raise IndexError(
"logging_helper.info: target index is out of bound")
for i in range(len(data.member)) if target is None else [target]:
if data.member[i].live: # guarantee group member is not dead
if data.member[i].is_empty():
data.member[i].elog.log_error(alg_name, empty_err_message, ErrorSeverity.Complaint)
else:
data.member[i].new_map(alg_name, alg_id,
AtomicType.TIMESERIES
if isinstance(data.member[i], TimeSeries) else AtomicType.SEISMOGRAM,
ProcessingStatus.VOLATILE)
else:
print(
'Coding error - logging.info was passed an unexpected data type of', type(data))
print('Not treated as fatal but a bug fix is needed')
def ensemble_error(d, alg, message, err_severity=ErrorSeverity.Invalid):
"""
This is a small helper function useful for error handlers in except
blocks for ensemble objects. If a function is called on an ensemble
object that throws an exception this function will post the message
posted to all ensemble members. It silently does nothing if the
ensemble is empty.
:param err_severity: severity of the error, default as ErrorSeverity.Invalid.
:param d: is the ensemble data to be handled. It print and error message
and returns doing nothing if d is not one of the known ensemble
objects.
:param alg: is the algorithm name posted to elog on each member
:param message: is the string posted to all members
(Note due to a current flaw in the api we don't have access to the
severity attribute. For now this always set it Invalid)
"""
if isinstance(d, (TimeSeriesEnsemble, SeismogramEnsemble)):
n = len(d.member)
if n <= 0:
return
for i in range(n):
d.member[i].elog.log_error(alg, str(message), err_severity)
else:
print('Coding error - ensemble_error was passed an unexpected data type of',
type(d))
print('Not treated as fatal but a bug fix is needed')
def reduce(data1, data2, alg_id, alg_name):
"""
This function replicates the processing history of data2 onto data1, which is a common use case
in reduce stage. If data1 is dead, it will keep silent, i.e. no history will be replicated. If data2 is dead,
the processing history will still be replicated.
:param data1: Mspass object
:param data2: Mspass object
:param alg_id: The unique id of that user gives to the algorithm.
:param alg_name: The name of the reduce algorithm that uses this helper function.
:return: None
"""
if isinstance(data1, (TimeSeries, Seismogram)):
if type(data1) != type(data2):
raise TypeError(
"logging_helper.reduce: data2 has a different type as data1")
if data1.live:
data1.accumulate(alg_name,
alg_id,
AtomicType.TIMESERIES if isinstance(data1, TimeSeries)
else AtomicType.SEISMOGRAM,
data2)
elif isinstance(data1, (TimeSeriesEnsemble, SeismogramEnsemble)):
if type(data1) != type(data2):
raise TypeError(
"logging_helper.reduce: data2 has a different type as data1")
if len(data1.member) != len(data2.member):
raise IndexError(
"logging_helper.reduce: data1 and data2 have different sizes of member")
for i in range(len(data1.member)):
if data1.member[i].live: # guarantee group member is not dead
data1.member[i].accumulate(alg_name,
alg_id,
AtomicType.TIMESERIES if isinstance(data1.member[i], TimeSeries)
else AtomicType.SEISMOGRAM,
data2.member[i])
else:
print('Coding error - logging.reduce was passed an unexpected data type of', type(data1))
print('Not treated as fatal but a bug fix is needed')
|
473365
|
from setuptools import setup
setup(
name='GaMMA',
version='1.0.0',
packages=['gamma'],
install_requires=[
'scikit-learn',
],
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.