ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfde841732b11837b81dda865f24511c3ab48a1 | #!/usr/bin/env python3.6
from password import Password
def create_password(flo,me,beat,joby):
new_password = Password(flo,me,beat,joby)
return new_password
def save_passwords(password):
password.save_Password()
def del_password(password):
password.delete_password()
def find_password(user_name):
return Password.find_by_user_name(user_name)
def check_existng_passwords(user_name):
return Password.password_exist(user_name)
def display_passwords():
return Password.display_passwords()
def main():
print("Hello,What is your name?")
user_name = input()
print(f"Hello {user_name}. What would u like to do?")
print ('\n')
while True:
print("Use these short codes : cc - create a credentials, del - delete credential dc - display password, fc -find a password, ex -exit the password list ")
short_code = input().lower()
if short_code == 'cc':
print("Credential")
print("-"*10)
print("first_name")
f_name = input()
print("last_name")
last_name = input()
print("user_name")
u_user_name = input()
print("password")
p_password = input()
save_passwords(create_password(f_name,last_name,u_user_name,p_password))
print ('\n')
print(f"New credential {f_name} {last_name} created")
print ('\n')
elif short_code == 'dc':
if display_passwords():
print("Here is a list of all your passwords")
print('\n')
for password in display_passwords():
print(f"{password.first_name} {password.last_name} {password.user_name} {password.password}")
print('\n')
else:
print('\n')
print("You dont seem to have any passwords saved yet")
print('\n')
elif short_code == 'del':
print("Enter the username you want to delete")
search_user_name = input()
if check_existng_passwords(search_user_name):
search_password = find_password(search_user_name)
del_password(search_password)
print("account successfully deleted!")
else:
print("That account does not exist")
elif short_code == 'fc':
print("Enter the username you want to search for")
search_user_name = input()
if check_existng_passwords(search_user_name):
search_password = find_password(search_user_name)
print(f"{search_password.first_name} {search_password.last_name}")
print('-' * 20)
print(f"user_name.......{search_password.user_name}")
print(f"password.......{search_password.password}")
else:
print("That password does not exist")
elif short_code == "ex":
print("Bye")
break
else:
print("I really didn't get that. Please use the short codes")
if __name__ == '__main__':
main() |
py | 7dfde8d665334dbf67fdba47a46e9f4fb5095e68 | import logging
from schematics import Model
from schematics.types import IntType, ModelType, StringType, serializable
_LOGGER = logging.getLogger(__name__)
class RedrivePolicy(Model):
dead_ketter_target_arn = StringType(deserialize_from="deadLetterTargetArn")
max_receive_count = StringType(deserialize_from="maxReceiveCount")
class QueData(Model):
class Option:
serialize_when_none = False
region_name = StringType(default='')
url = StringType()
arn = StringType(deserialize_from="QueueArn")
approximate_number_of_messages = IntType(deserialize_from="ApproximateNumberOfMessages")
approximate_number_of_messages_delayed = IntType(deserialize_from="ApproximateNumberOfMessagesDelayed")
approximate_number_of_messages_not_visible = IntType(deserialize_from="ApproximateNumberOfMessagesNotVisible")
created_timestamp = StringType(deserialize_from='CreatedTimestamp')
delay_seconds = IntType(deserialize_from="DelaySeconds")
last_modified_timestamp = StringType(deserialize_from='LastModifiedTimestamp')
maximum_message_size = IntType(deserialize_from="MaximumMessageSize")
message_retention_period = IntType(deserialize_from="MessageRetentionPeriod")
receive_message_wait_time_seconds = IntType(deserialize_from="ReceiveMessageWaitTimeSeconds")
visibility_timeout = IntType(deserialize_from="VisibilityTimeout")
redrive_policy = ModelType(RedrivePolicy, deserialize_from="RedrivePolicy")
fifo_queue = StringType(deserialize_from="FifoQueue")
content_based_duplication = StringType(deserialize_from="ContentBasedDeduplication")
kms_master_key_id = StringType(deserialize_from="KmsMasterKeyId")
kms_data_key_reuse_period_seconds = StringType(deserialize_from="KmsDataKeyReusePeriodSeconds")
account_id = StringType()
policy = StringType(deserialize_from="Policy")
@serializable
def name(self):
return self.arn.split(':')[-1]
@serializable
def reference(self):
return {
"resource_id": self.arn,
"external_link": f"https://{self.region_name}.console.aws.amazon.com/sqs/home?{self.region_name}#queue-browser:selected={self.url};prefix={self.name}"
}
@serializable
def cloudwatch(self):
return {
"namespace": "AWS/SQS",
"dimensions": [
{
"Name": "QueueName",
"Value": self.name
}
],
"region_name": self.region_name
}
|
py | 7dfde8e26524ab2b00857b0ef91b55ce29735872 | from random import randint, choice
from elizabeth.core import Code
from elizabeth.exceptions import JSONKeyError
from elizabeth.utils import pull
__all__ = [
'USASpecProvider',
'BrazilSpecProvider',
'RussiaSpecProvider',
'JapanSpecProvider'
]
# Internal
_custom_code = Code.custom_code
class BrazilSpecProvider(object):
"""Class that provides special data for pt-br"""
class Meta:
name = 'brazil_provider'
@staticmethod
def cpf(with_mask=True):
"""Get a random CPF (brazilian social security code)
:param with_mask: use CPF mask (###.###.###-##) in the return
:returns: Random CPF
:Example:
001.137.297-40
"""
def get_verifying_digit_cpf(cpf, peso):
"""Calculates the verifying digit for the CPF
:param cpf: ist of integers with the CPF
:param peso: Integer with the weight for the modulo 11 calculate
:returns: the verifying digit for the CPF
"""
soma = 0
for index, digit in enumerate(cpf):
soma += digit * (peso - index)
resto = soma % 11
if resto == 0 or resto == 1 or resto >= 11:
return 0
return 11 - resto
cpf_without_dv = [randint(0, 9) for _ in range(9)]
first_dv = get_verifying_digit_cpf(cpf_without_dv, 10)
cpf_without_dv.append(first_dv)
second_dv = get_verifying_digit_cpf(cpf_without_dv, 11)
cpf_without_dv.append(second_dv)
cpf = ''.join([str(i) for i in cpf_without_dv])
if with_mask:
return cpf[:3] + '.' + cpf[3:6] + '.' + cpf[6:9] + '-' + cpf[9:]
return cpf
@staticmethod
def cnpj(with_mask=True):
"""Get a random cnpj (brazilian social security code)
:param with_mask: use cnpj mask (###.###.###-##) in the return
:returns: Random cnpj
:Example:
77.732.230/0001-70
"""
def get_verifying_digit_cnpj(cnpj, peso):
"""Calculates the verifying digit for the cnpj
:param cnpj: list of integers with the cnpj
:param peso: integer with the weigth for the modulo 11 calcule
:returns: the verifying digit for the cnpj
"""
soma = 0
if peso == 5:
peso_list = [5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
elif peso == 6:
peso_list = [6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
for i, _ in enumerate(cnpj):
soma += peso_list[i] * cnpj[i]
resto = soma % 11
if resto < 2:
return 0
return 11 - resto
cnpj_without_dv = [randint(0, 9) for _ in range(12)]
first_dv = get_verifying_digit_cnpj(cnpj_without_dv, 5)
cnpj_without_dv.append(first_dv)
second_dv = get_verifying_digit_cnpj(cnpj_without_dv, 6)
cnpj_without_dv.append(second_dv)
cnpj = ''.join([str(i) for i in cnpj_without_dv])
if with_mask:
return cnpj[:2] + '.' + cnpj[2:5] + '.' + cnpj[5:8] + '/' + \
cnpj[8:12] + '-' + cnpj[12:]
return cnpj
class USASpecProvider(object):
"""Class that provides special data for en"""
class Meta:
name = 'usa_provider'
@staticmethod
def tracking_number(service='usps'):
"""Generate random tracking number for USPS, FedEx and UPS.
:param service:
:return:
"""
service = service.lower()
if service not in ('usps', 'fedex', 'ups'):
raise ValueError('Unsupported post service')
services = {
'usps': (
'#### #### #### #### ####',
'@@ ### ### ### US'
),
'fedex': (
"#### #### ####",
"#### #### #### ###"
),
'ups': ("1Z@####@##########",)
}
mask = choice(services[service])
return _custom_code(mask=mask)
@staticmethod
def ssn():
"""Generate a random, but valid Social Security Number.
:returns: Random SSN
:Example:
569-66-5801
"""
# Valid SSNs exclude 000, 666, and 900-999 in the area group
area = randint(1, 899)
if area == 666:
area = 665
return '{:03}-{:02}-{:04}'.format(
area, randint(1, 99), randint(1, 9999))
@staticmethod
def personality(category='mbti'):
"""Generate a type of personality.
:param category: Category.
:return: Personality type.
:Example:
ISFJ.
"""
mbtis = ("ISFJ", "ISTJ", "INFJ", "INTJ",
"ISTP", "ISFP", "INFP", "INTP",
"ESTP", "ESFP", "ENFP", "ENTP",
"ESTJ", "ESFJ", "ENFJ", "ENTJ")
if category.lower() == 'rheti':
return randint(1, 10)
return choice(mbtis)
class RussiaSpecProvider(object):
"""Specific data for russian language (ru)"""
class Meta:
name = 'russia_provider'
@staticmethod
def generate_sentence():
"""Generate sentence from the parts.
:return: Sentence.
:rtype: str
"""
data = pull('text.json', 'ru')['sentence']
sentence = [choice(data[k]) for k in ('head', 'p1', 'p2', 'tail')]
return '{0} {1} {2} {3}'.format(*sentence)
@staticmethod
def patronymic(gender='female'):
"""Generate random patronymic name.
:param gender: Gender of person.
:return: Patronymic name.
:Example:
Алексеевна.
"""
gender = gender.lower()
try:
patronymic = pull('personal.json', 'ru')['patronymic']
return choice(patronymic[gender])
except:
raise JSONKeyError(
'Not exist key. Please use one of ["female", "male"]')
@staticmethod
def passport_series(year=None):
"""Generate random series of passport.
:param year: Year of manufacture.
:return: Series.
:Example:
02 15.
"""
year = randint(10, 16) if not \
year else year
region = randint(1, 99)
return '{:02d} {}'.format(region, year)
@staticmethod
def passport_number():
"""Generate random passport number.
:return: Number.
:Example:
560430
"""
return _custom_code(mask='######')
def series_and_number(self):
"""Generate a random passport number and series.
:return: Series and number.
:Example:
57 16 805199.
"""
return '%s %s' % (
self.passport_series(),
self.passport_number()
)
class JapanSpecProvider(object):
"""Class that provides special data for jp"""
class Meta:
name = 'japan_provider'
@staticmethod
def full_to_half(text, alnum=True):
"""Convert all full width katakana, alphanumeric and few special
characters like (, ), ・ to equivalent half width character.
:param text: The text to be converted.
:param alnum: Convert alphanumeric, default True.
:return: Text with full width characters converted to half width.
:Example:
QVCジャパン(0123)
"""
fh_kana_special = {
"ア": "ア", "イ": "イ", "ウ": "ウ", "エ": "エ", "オ": "オ", "カ": "カ",
"キ": "キ", "ク": "ク", "ケ": "ケ", "コ": "コ", "ガ": "ガ", "ギ": "ギ",
"グ": "グ", "ゲ": "ゲ", "ゴ": "ゴ", "サ": "サ", "シ": "シ", "ス": "ス",
"セ": "セ", "ソ": "ソ", "ザ": "ザ", "ジ": "ジ", "ズ": "ズ", "ゼ": "ゼ",
"ゾ": "ゾ", "タ": "タ", "チ": "チ", "ツ": "ツ", "テ": "テ", "ト": "ト",
"ダ": "ダ", "ヂ": "ヂ", "ヅ": "ヅ", "デ": "デ", "ド": "ド", "ナ": "ナ",
"ニ": "ニ", "ヌ": "ヌ", "ネ": "ネ", "ノ": "ノ", "ハ": "ハ", "ヒ": "ヒ",
"フ": "フ", "ヘ": "ヘ", "ホ": "ホ", "バ": "バ", "ビ": "ビ", "ブ": "ブ",
"ベ": "ベ", "ボ": "ボ", "パ": "パ", "ピ": "ピ", "プ": "プ", "ペ": "ペ",
"ポ": "ポ", "マ": "マ", "ミ": "ミ", "ム": "ム", "メ": "メ", "モ": "モ",
"ヤ": "ヤ", "ユ": "ユ", "ヨ": "ヨ", "ラ": "ラ", "リ": "リ", "ル": "ル",
"レ": "レ", "ロ": "ロ", "ワ": "ワ", "ヲ": "ヲ", "ン": "ン", "ァ": "ァ",
"ィ": "ィ", "ゥ": "ゥ", "ェ": "ェ", "ォ": "ォ", "ッ": "ッ", "ャ": "ャ",
"ュ": "ュ", "ョ": "ョ", "!": "!", """: "\"", "#": "#", "$": "$",
"%": "%", "&": "&", "'": "'", "(": "(", ")": ")", "*": "*",
"+": "+", "ー": "ー", "/": "/", ":": ":", ";": ";",
"<": "<", "=": "=", ">": ">", "?": "?", "@": "@", "[": "[",
"\": "\\", "]": "]", "^": "^", "_": "_", "`": "`", "{": "{",
"|": "|", "}": "}", "~": "~", "・": "・", "「": "「", "」": "」"
}
# leaving "。": "。", "、": "," out for now
_fh_alnum_offset = 65248 # 0xFEE0
result = ""
for char in text:
if char in fh_kana_special:
result += fh_kana_special[char]
elif alnum and ord("0") <= ord(char) <= ord("z"):
result += chr(ord(char) - _fh_alnum_offset)
else:
result += char
return result
@staticmethod
def half_to_full(text, alnum=True):
"""Convert all half width katakana, alphanumeric, and special characters
((, ), ) to equivalent full width character.
:param text: The text to be converted.
:param alnum: Convert alphanumeric, default True.
:return: Text with half width characters converted to full width.
:Example:
QVCジャパン(0123)
"""
_hf_alnum_offset = 65248
result = ""
hf_voiced_kana = {
"ガ": "ガ", "ギ": "ギ", "グ": "グ", "ゲ": "ゲ", "ゴ": "ゴ", "ザ": "ザ",
"ジ": "ジ", "ズ": "ズ", "ゼ": "ゼ", "ゾ": "ゾ", "ダ": "ダ", "ヂ": "ヂ",
"ヅ": "ヅ", "デ": "デ", "ド": "ド", "バ": "バ", "ビ": "ビ", "ブ": "ブ",
"ベ": "ベ", "ボ": "ボ", "パ": "パ", "ピ": "ピ", "プ": "プ", "ペ": "ペ",
"ポ": "ポ"
}
hf_kana_special = {
"ア": "ア", "イ": "イ", "ウ": "ウ", "エ": "エ", "オ": "オ", "カ": "カ",
"キ": "キ", "ク": "ク", "ケ": "ケ", "コ": "コ", "サ": "サ", "シ": "シ",
"ス": "ス", "セ": "セ", "ソ": "ソ", "タ": "タ", "チ": "チ", "ツ": "ツ",
"テ": "テ", "ト": "ト", "ナ": "ナ", "ニ": "ニ", "ヌ": "ヌ", "ネ": "ネ",
"ノ": "ノ", "ハ": "ハ", "ヒ": "ヒ", "フ": "フ", "ヘ": "ヘ", "ホ": "ホ",
"マ": "マ", "ミ": "ミ", "ム": "ム", "メ": "メ", "モ": "モ", "ヤ": "ヤ",
"ユ": "ユ", "ヨ": "ヨ", "ラ": "ラ", "リ": "リ", "ル": "ル", "レ": "レ",
"ロ": "ロ", "ワ": "ワ", "ヲ": "ヲ", "ン": "ン", "ァ": "ァ", "ィ": "ィ",
"ゥ": "ゥ", "ェ": "ェ", "ォ": "ォ", "ッ": "ッ", "ャ": "ャ", "ュ": "ュ",
"ョ": "ョ", "!": "!", "\"": """, "#": "#", "$": "$", "%": "%",
"&": "&", "'": "'", "(": "(", ")": ")", "*": "*", "+": "+",
"ー": "ー", "/": "/", ":": ":", ";": ";", "<": "<", "=": "=",
">": ">", "?": "?", "@": "@", "[": "[", "\\": "\", "]": "]",
"^": "^", "_": "_", "`": "`", "{": "{", "|": "|", "}": "}",
"~": "~", "・": "・", "「": "「", "」": "」"
}
def hf_parse(char, result):
"""Parse the char from half-width to full-width, append to result,
and return result.
:param char: Character to be parsed from half-width to full-width.
:param result: Previous result string.
:return: Result appended with parsed char.
:Example:
ラーメン
"""
if char in hf_kana_special:
result += hf_kana_special[char]
elif alnum and ord("0") <= ord(char) <= ord("z"):
result += chr(ord(char) + _hf_alnum_offset)
else:
result += char
return result
# leave "。": "。", ",": "、", for now
i = 0
while i < len(text) - 1:
# need to lookahead for separate voice mark
pair = text[i] + text[i + 1]
if (text[i + 1] == "゙" or text[i + 1] == "゚") and \
pair in hf_voiced_kana:
result += hf_voiced_kana[pair]
i += 2
continue
else:
result = hf_parse(text[i], result)
i += 1
if i == len(text) - 1:
result = hf_parse(text[i], result)
return result
|
py | 7dfde9b0bea1f1d18e98ea8474f5af2681986b3c | #!/usr/bin/env python3
""" Bouncing Balls Program
Displays one or more bouncing balls within a turtle screen.
This program utilizes the following programming features:
+ turtle module
+ time module
+ random module
"""
import turtle
import random
import time
def at_left_edge(ball, screen_width):
""" (int, int) -> bool
Handle ball when it hits the left edge of the screen
"""
if ball.xcor() < -screen_width / 2:
return True
else:
return False
def at_right_edge(ball, screen_width):
""" (int, int) -> bool
Handle ball when it hits the right edge of the screen
"""
if ball.xcor() > screen_width / 2:
return True
else:
return False
def at_top_edge(ball, screen_height):
""" (int, int) -> bool
Handle ball when it hits the top edge of the screen
"""
if ball.ycor() > screen_height / 2:
return True
else:
return False
def at_bottom_edge(ball, screen_height):
""" (int, int) -> bool
Handle ball when it hits the bottom edge of the screen
"""
if ball.ycor() < -screen_height / 2:
return True
else:
return False
def bounce_ball(ball, new_direction):
""" (int, str) -> int
Handle ball's change in direction when it bounces
Returns the new heading
"""
if new_direction == 'left' or new_direction == 'right':
new_heading = 180 - ball.heading()
elif new_direction == 'up' or new_direction == 'down':
new_heading = 360 - ball.heading()
return new_heading
def create_balls(num_balls):
""" (int) -> list
Initialize <num_balls>
Returns a list populated with <num_balls>
"""
balls = []
for each_ball in range(0, num_balls):
new_ball = turtle.Turtle()
new_ball.shape('circle')
new_ball.fillcolor('black')
new_ball.speed(0)
new_ball.penup()
new_ball.setheading(random.randint(1, 359))
balls.append(new_ball)
return balls
def main():
""" Bouncing Balls
Simulates bouncing balls in a turtle screen fo
Simulates bouncing balls in a turtle screen for a specified number
of seconds
"""
print('Simulates bouncing balls in a turtle screen for a specified number \
of seconds') # Program Greeting
screen_width = 800 # Init Screen width
screen_height = 600 # Init Screen height
turtle.setup(screen_width, screen_height) # Set window size
window = turtle.Screen() # Create turtle window
window.title('Bouncing Balls') # Set window title
# TODO: Input-Error-checking
num_secs = int(input('Enter number of seconds to run: ')) # Execution time
num_balls = int(input('Enter number of balls in simulation: ')) # Number of balls
balls = create_balls(num_balls) # create balls
start_time = time.time() # Set start time
# Begin Simulation
terminate = False # Init Loop Condition
while not terminate: # Enter loop
for each_ball in range(0, len(balls)):
balls[each_ball].forward(15)
if at_left_edge(balls[each_ball], screen_width):
balls[each_ball].setheading(bounce_ball(balls[each_ball], 'right'))
elif at_right_edge(balls[each_ball], screen_width):
balls[each_ball].setheading(bounce_ball(balls[each_ball], 'left'))
elif at_top_edge(balls[each_ball], screen_height):
balls[each_ball].setheading(bounce_ball(balls[each_ball], 'down'))
elif at_bottom_edge(balls[each_ball], screen_height):
balls[each_ball].setheading(bounce_ball(balls[each_ball], 'up'))
if time.time() - start_time > num_secs:
terminate = True
turtle.exitonclick() # Exit on Close window
if __name__ == '__main__':
main()
|
py | 7dfde9c9b96d97cd5b473eba85b1fd3055cb4dd8 | from .halostats import HaloStats
___red_end_user_data_statement__ = (
"This cog stores Discord ID's and Gamertags and uses Selenium to web-scrape the halotracker.com site for Halo Infinite stats\n**WARNING**\nRequires Google Chrome to be installed on server hosting the bot!"
)
async def setup(bot):
cog = HaloStats(bot)
bot.add_cog(cog)
|
py | 7dfdea425c1bae26f6997e1b9470028c0614e9b1 | from __future__ import absolute_import, division, print_function, unicode_literals
import os
import random
import unittest
from math import pi
import gpytorch
import torch
from gpytorch.kernels import MultitaskKernel, RBFKernel
from gpytorch.likelihoods import MultitaskGaussianLikelihood
from gpytorch.means import ConstantMean, MultitaskMean
from gpytorch.distributions import MultitaskMultivariateNormal
# Simple training data: let's try to learn a sine function
train_x = torch.linspace(0, 1, 100)
latent_error = torch.randn(train_x.size()) * 0.5
# y1 function is sin(2*pi*x) with noise N(0, 0.04)
train_y1 = torch.sin(train_x * (2 * pi)) + latent_error + torch.randn(train_x.size()) * 0.1
# y2 function is cos(2*pi*x) with noise N(0, 0.04)
train_y2 = torch.cos(train_x * (2 * pi)) + latent_error + torch.randn(train_x.size()) * 0.1
# Create a train_y which interleaves the two
train_y = torch.stack([train_y1, train_y2], -1)
class MultitaskGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = MultitaskMean(ConstantMean(), num_tasks=2)
self_covar_module = RBFKernel()
self.covar_module = MultitaskKernel(self_covar_module, num_tasks=2, rank=1)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultitaskMultivariateNormal(mean_x, covar_x)
class TestMultiTaskGPRegression(unittest.TestCase):
def setUp(self):
if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false":
self.rng_state = torch.get_rng_state()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
random.seed(0)
def tearDown(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def test_multitask_low_rank_noise_covar(self):
likelihood = MultitaskGaussianLikelihood(num_tasks=2, rank=1)
model = MultitaskGPModel(train_x, train_y, likelihood)
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam([{"params": model.parameters()}], lr=0.1) # Includes GaussianLikelihood parameters
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
n_iter = 50
for _ in range(n_iter):
# Zero prev backpropped gradients
optimizer.zero_grad()
# Make predictions from training data
# Again, note feeding duplicated x_data and indices indicating which task
output = model(train_x)
# TODO: Fix this view call!!
loss = -mll(output, train_y)
loss.backward()
optimizer.step()
# Test the model
model.eval()
likelihood.eval()
num_tasks = 2
task_noise_covar_factor = likelihood.task_noise_covar_factor
log_noise = likelihood.log_noise
task_noise_covar = task_noise_covar_factor.matmul(
task_noise_covar_factor.transpose(-1, -2)
) + log_noise.exp() * torch.eye(num_tasks)
self.assertGreater(task_noise_covar[0, 0, 1].item(), 0.05)
if __name__ == "__main__":
unittest.main()
|
py | 7dfdea5b8e59c361cb0d9c5cdba87089f0f84d8b | import datetime
from django.db import models
try:
from collections import OrderedDict
except: # pragma: no cover
from ordereddict import OrderedDict
import uuid
import json
from string import Template
import smtplib
import requests
from paying_for_college.csvkit.csvkit import Writer as cdw
from django.core.mail import send_mail
REGION_MAP = {'MW': ['IL', 'IN', 'IA', 'KS', 'MI', 'MN',
'MO', 'NE', 'ND', 'OH', 'SD', 'WI'],
'NE': ['CT', 'ME', 'MA', 'NH', 'NJ',
'NY', 'PA', 'RI', 'VT'],
'SO': ['AL', 'AR', 'DE', 'DC', 'FL', 'GA', 'KY', 'LA', 'MD',
'MS', 'NC', 'OK', 'SC', 'TN', 'TX', 'VA', 'WV'],
'WE': ['AK', 'AZ', 'CA', 'CO', 'HI', 'ID', 'MT', 'NV', 'NM',
'OR', 'UT', 'WA', 'WY']
}
CONTROL_MAP = {'1': 'Public',
'2': 'Private',
'3': 'For-profit'}
REGION_NAMES = {'MW': 'Midwest',
'NE': "Northeast",
'SO': 'South',
'WE': 'West'}
HIGHEST_DEGREES = { # highest-awarded values from Ed API and our CSV spec
'0': "Non-degree-granting",
'1': 'Certificate',
'2': "Associate degree",
'3': "Bachelor's degree",
'4': "Graduate degree"
}
LEVELS = { # Dept. of Ed classification of post-secondary degree levels
'1': "Program of less than 1 academic year",
'2': "Program of at least 1 but less than 2 academic years",
'3': "Associate's degree",
'4': "Program of at least 2 but less than 4 academic years",
'5': "Bachelor's degree",
'6': "Post-baccalaureate certificate",
'7': "Master's degree",
'8': "Post-master's certificate",
'17': "Doctor's degree-research/scholarship",
'18': "Doctor's degree-professional practice",
'19': "Doctor's degree-other"
}
NOTIFICATION_TEMPLATE = Template("""Disclosure notification for offer ID $oid\n\
timestamp: $time\n\
errors: $errors\n\
If errors are "none," the disclosure is confirmed.\
""")
def get_region(school):
"""return a school's region based on state"""
for region in REGION_MAP:
if school.state in REGION_MAP[region]:
return region
return ''
def make_divisible_by_6(value):
"""Makes sure a value, such as program_length, is divisible by 6"""
if not value or value % 6 == 0:
return value
else:
return value + (6 - (value % 6))
class ConstantRate(models.Model):
"""Rate values that generally only change annually"""
name = models.CharField(max_length=255)
slug = models.CharField(max_length=255,
blank=True,
help_text="VARIABLE NAME FOR JS")
value = models.DecimalField(max_digits=6, decimal_places=5)
note = models.TextField(blank=True)
updated = models.DateField(auto_now=True)
def __unicode__(self):
return u"%s (%s), updated %s" % (self.name, self.slug, self.updated)
class Meta:
ordering = ['slug']
class ConstantCap(models.Model):
"""Cap values that generally only change annually"""
name = models.CharField(max_length=255)
slug = models.CharField(max_length=255,
blank=True,
help_text="VARIABLE NAME FOR JS")
value = models.IntegerField()
note = models.TextField(blank=True)
updated = models.DateField(auto_now=True)
def __unicode__(self):
return u"%s (%s), updated %s" % (self.name, self.slug, self.updated)
class Meta:
ordering = ['slug']
# original data_json fields:
# ALIAS -- not needed, DELETE
# AVGMONTHLYPAY
# AVGSTULOANDEBT
# AVGSTULOANDEBTRANK -- not needed, DELETE
# BADALIAS -- not needed, DELETE
# BAH 1356 -- not needed, DELETE
# BOOKS
# CITY (now school.city)
# CONTROL (now school.control)
# DEFAULTRATE
# GRADRATE -- now school.grad_rate
# GRADRATERANK -- not needed, DELETE
# INDICATORGROUP
# KBYOSS (now school.KBYOSS) -- not needed, DELETE
# MEDIANDEBTCOMPLETER # new in 2015
# NETPRICE110K -- not needed, DELETE
# NETPRICE3OK -- not needed, DELETE
# NETPRICE48K -- not needed, DELETE
# NETPRICE75K -- not needed, DELETE
# NETPRICEGENERAL -- not needed, DELETE
# NETPRICEOK -- not needed, DELETE
# OFFERAA
# OFFERBA
# OFFERGRAD
# ONCAMPUSAVAIL
# ONLINE (now school.online)
# OTHEROFFCAMPUS
# OTHERONCAMPUS
# OTHERWFAMILY
# RETENTRATE -- not needed, DELETE
# RETENTRATELT4 # new in 2015 -- not needed, DELETE
# REPAY3YR # new in 2015
# ROOMBRDOFFCAMPUS
# ROOMBRDONCAMPUS
# SCHOOL (now school.primary_alias)
# SCHOOL_ID (now school.pk)
# STATE (now school.state)
# TUITIONGRADINDIS
# TUITIONGRADINS
# TUITIONGRADOSS
# TUITIONUNDERINDIS
# TUITIONUNDERINS
# TUITIONUNDEROSS
# ZIP (now school.zip5)
class Contact(models.Model):
"""school endpoint or email to which we send confirmations"""
contacts = models.TextField(help_text="COMMA-SEPARATED LIST OF EMAILS", blank=True)
endpoint = models.CharField(max_length=255, blank=True)
name = models.CharField(max_length=255, blank=True)
internal_note = models.TextField(blank=True)
def __unicode__(self):
return u", ".join([bit for bit in [self.contacts,
self.endpoint] if bit])
class School(models.Model):
"""
Represents a school
"""
SETTLEMENT_CHOICES = (
('edmc', 'Education Management Corporation'),
('', 'Non-settlement')
)
school_id = models.IntegerField(primary_key=True)
ope6_id = models.IntegerField(blank=True, null=True)
ope8_id = models.IntegerField(blank=True, null=True)
settlement_school = models.CharField(max_length=100,
blank=True,
choices=SETTLEMENT_CHOICES,
default='')
contact = models.ForeignKey(Contact, blank=True, null=True)
data_json = models.TextField(blank=True)
city = models.CharField(max_length=50, blank=True)
state = models.CharField(max_length=2, blank=True)
zip5 = models.CharField(max_length=5, blank=True)
enrollment = models.IntegerField(blank=True, null=True)
accreditor = models.CharField(max_length=255, blank=True)
ownership = models.CharField(max_length=255, blank=True)
control = models.CharField(max_length=50,
blank=True,
help_text="'Public', 'Private' or 'For-profit'")
url = models.TextField(blank=True)
degrees_predominant = models.TextField(blank=True)
degrees_highest = models.TextField(blank=True)
main_campus = models.NullBooleanField()
online_only = models.NullBooleanField()
operating = models.BooleanField(default=True)
under_investigation = models.BooleanField(default=False,
help_text=("Heightened Cash "
"Monitoring 2"))
KBYOSS = models.BooleanField(default=False) # shopping-sheet participant
grad_rate_4yr = models.DecimalField(max_digits=5,
decimal_places=3,
blank=True, null=True)
grad_rate_lt4 = models.DecimalField(max_digits=5,
decimal_places=3,
blank=True, null=True)
grad_rate = models.DecimalField(max_digits=5,
decimal_places=3,
blank=True, null=True,
help_text="A 2-YEAR POOLED VALUE")
repay_3yr = models.DecimalField(max_digits=13,
decimal_places=10,
blank=True, null=True,
help_text=("GRADS WITH A DECLINING BALANCE"
" AFTER 3 YRS"))
default_rate = models.DecimalField(max_digits=5,
decimal_places=3,
blank=True, null=True,
help_text="LOAN DEFAULT RATE AT 3 YRS")
median_total_debt = models.DecimalField(max_digits=7,
decimal_places=1,
blank=True, null=True,
help_text="MEDIAN STUDENT DEBT")
median_monthly_debt = models.DecimalField(max_digits=14,
decimal_places=9,
blank=True, null=True,
help_text=("MEDIAN STUDENT "
"MONTHLY DEBT"))
median_annual_pay = models.IntegerField(blank=True,
null=True,
help_text=("MEDIAN PAY "
"10 YRS AFTER ENTRY"))
avg_net_price = models.IntegerField(blank=True,
null=True,
help_text="OVERALL AVERAGE")
tuition_out_of_state = models.IntegerField(blank=True,
null=True)
tuition_in_state = models.IntegerField(blank=True,
null=True)
offers_perkins = models.BooleanField(default=False)
def as_json(self):
"""delivers pertinent data points as json"""
region = get_region(self)
ordered_out = OrderedDict()
jdata = json.loads(self.data_json)
dict_out = {
'books': jdata['BOOKS'],
'city': self.city,
'control': self.control,
'defaultRate': "{0}".format(self.default_rate),
'gradRate': "{0}".format(self.grad_rate),
'highestDegree': self.get_highest_degree(),
'medianMonthlyDebt': "{0}".format(self.median_monthly_debt),
'medianTotalDebt': "{0}".format(self.median_total_debt),
'nicknames': ", ".join([nick.nickname for nick
in self.nickname_set.all()]),
'offersPerkins': self.offers_perkins,
'onCampusAvail': jdata['ONCAMPUSAVAIL'],
'online': self.online_only,
'otherOffCampus': jdata['OTHEROFFCAMPUS'],
'otherOnCampus': jdata['OTHERONCAMPUS'],
'otherWFamily': jdata['OTHERWFAMILY'],
'predominantDegree': self.get_predominant_degree(),
'region': region,
'repay3yr': "{0}".format(self.repay_3yr),
'roomBrdOffCampus': jdata['ROOMBRDOFFCAMPUS'],
'roomBrdOnCampus': jdata['ROOMBRDONCAMPUS'],
'school': self.primary_alias,
'schoolID': self.pk,
'schoolSalary': self.median_annual_pay,
'settlementSchool': self.settlement_school,
'state': self.state,
'tuitionGradInDis': jdata['TUITIONGRADINDIS'],
'tuitionGradInS': jdata['TUITIONGRADINS'],
'tuitionGradOss': jdata['TUITIONGRADOSS'],
'tuitionUnderInDis': jdata['TUITIONUNDERINDIS'],
'tuitionUnderInS': self.tuition_in_state,
'tuitionUnderOoss': self.tuition_out_of_state,
'url': self.url,
'zip5': self.zip5,
}
for key in sorted(dict_out.keys()):
ordered_out[key] = dict_out[key]
return json.dumps(ordered_out)
def __unicode__(self):
return self.primary_alias + u" (%s)" % self.school_id
def get_predominant_degree(self):
predominant = ''
if (self.degrees_predominant and
self.degrees_predominant in HIGHEST_DEGREES):
predominant = HIGHEST_DEGREES[self.degrees_predominant]
return predominant
def get_highest_degree(self):
highest = ''
if (self.degrees_highest and
self.degrees_highest in HIGHEST_DEGREES):
highest = HIGHEST_DEGREES[self.degrees_highest]
return highest
def convert_ope6(self):
if self.ope6_id:
digits = len(str(self.ope6_id))
if digits < 6:
return ('0' * (6-digits)) + str(self.ope6_id)
else:
return str(self.ope6_id)
else:
return ''
def convert_ope8(self):
if self.ope8_id:
digits = len(str(self.ope8_id))
if digits < 8:
return ('0' * (8-digits)) + str(self.ope8_id)
else:
return str(self.ope8_id)
else:
return ''
@property
def primary_alias(self):
if len(self.alias_set.values()) != 0:
return self.alias_set.get(is_primary=True).alias
else:
return 'Not Available'
@property
def nicknames(self):
return ", ".join([nick.nickname for nick in self.nickname_set.all()])
class Notification(models.Model):
"""record of a disclosure verification"""
institution = models.ForeignKey(School)
oid = models.CharField(max_length=40)
timestamp = models.DateTimeField()
errors = models.CharField(max_length=255)
emails = models.TextField(blank=True,
help_text="COMMA-SEPARATED STRING OF EMAILS")
sent = models.BooleanField(default=False)
log = models.TextField(blank=True)
def __unicode__(self):
return "{0} {1} ({2})".format(self.oid,
self.institution.primary_alias,
self.institution.pk)
def notify_school(self):
school = self.institution
if not school.settlement_school:
nonmsg = "No notification required; {} is not a settlement school"
return nonmsg.format(school.primary_alias)
payload = {
'oid': self.oid,
'time': self.timestamp.isoformat(),
'errors': self.errors
}
now = datetime.datetime.now()
no_contact_msg = ("School notification failed: "
"No endpoint or email info {}".format(now))
# we prefer to use endpount notification, so use it first if existing
if school.contact:
if school.contact.endpoint:
endpoint = school.contact.endpoint
if type(endpoint) == unicode:
endpoint = endpoint.encode('utf-8')
try:
resp = requests.post(endpoint, data=payload, timeout=10)
except requests.exceptions.ConnectionError as e:
exmsg = ("Error: connection error at school "
"{} {}\n".format(now, e))
self.log = self.log + exmsg
self.save()
return exmsg
except requests.exceptions.Timeout:
exmsg = ("Error: connection with school "
"timed out {}\n".format(now))
self.log = self.log + exmsg
self.save()
return exmsg
except requests.exceptions.RequestException as e:
exmsg = ("Error: request error at school: "
"{} {}\n".format(now, e))
self.log = self.log + exmsg
self.save()
return exmsg
else:
if resp.ok:
self.sent = True
self.log = ("School notified "
"via endpoint {}".format(now))
self.save()
return self.log
else:
msg = ("Send attempted: {}\nURL: {}\n"
"response reason: {}\nstatus_code: {}\n"
"content: {}\n\n".format(now,
endpoint,
resp.reason,
resp.status_code,
resp.content))
self.log = self.log + msg
self.save()
return "Notification failed: {}".format(msg)
elif school.contact.contacts:
try:
send_mail("CFPB disclosure notification",
NOTIFICATION_TEMPLATE.substitute(payload),
"[email protected]",
[email for email in school.contact.contacts.split(',')],
fail_silently=False)
self.sent = True
self.emails = school.contact.contacts
self.log = ("School notified via email "
"at {}".format(self.emails))
self.save()
return self.log
except smtplib.SMTPException as e:
email_fail_msg = ("School email notification "
"failed on {}\n"
"Error: {}".format(now, e))
self.log = self.log + email_fail_msg
self.save()
return email_fail_msg
else:
self.log = self.log + no_contact_msg
self.save()
return no_contact_msg
else:
self.log = self.log + no_contact_msg
self.save()
return no_contact_msg
class Disclosure(models.Model):
"""Legally required wording for aspects of a school's aid disclosure"""
name = models.CharField(max_length=255)
institution = models.ForeignKey(School, blank=True, null=True)
text = models.TextField(blank=True)
def __unicode__(self):
return self.name + u" (%s)" % unicode(self.institution)
class Program(models.Model):
"""
Cost and outcome info for an individual course of study at a school
"""
DEBT_NOTE = "TITLEIV_DEBT + PRIVATE_DEBT + INSTITUTIONAL_DEBT"
institution = models.ForeignKey(School)
program_name = models.CharField(max_length=255)
accreditor = models.CharField(max_length=255, blank=True)
level = models.CharField(max_length=255, blank=True)
program_code = models.CharField(max_length=255, blank=True)
campus = models.CharField(max_length=255, blank=True)
cip_code = models.CharField(max_length=255, blank=True)
soc_codes = models.CharField(max_length=255, blank=True)
total_cost = models.IntegerField(blank=True, null=True,
help_text="COMPUTED")
time_to_complete = models.IntegerField(blank=True,
null=True,
help_text="IN MONTHS")
completion_rate = models.DecimalField(blank=True,
null=True,
max_digits=5,
decimal_places=2)
completion_cohort = models.IntegerField(blank=True,
null=True,
help_text="COMPLETION COHORT")
completers = models.IntegerField(blank=True,
null=True,
help_text="COMPLETERS OF THE PROGRAM")
titleiv_debt = models.IntegerField(blank=True, null=True)
private_debt = models.IntegerField(blank=True, null=True)
institutional_debt = models.IntegerField(blank=True, null=True)
mean_student_loan_completers = models.IntegerField(blank=True,
null=True,
help_text=DEBT_NOTE)
median_student_loan_completers = models.IntegerField(blank=True,
null=True,
help_text=DEBT_NOTE)
default_rate = models.DecimalField(blank=True,
null=True,
max_digits=5,
decimal_places=2)
salary = models.IntegerField(blank=True, null=True,
help_text='MEDIAN SALARY')
program_length = models.IntegerField(blank=True,
null=True,
help_text="IN MONTHS")
tuition = models.IntegerField(blank=True,
null=True)
fees = models.IntegerField(blank=True,
null=True)
housing = models.IntegerField(blank=True,
null=True,
help_text="HOUSING & MEALS")
books = models.IntegerField(blank=True,
null=True,
help_text="BOOKS & SUPPLIES")
transportation = models.IntegerField(blank=True, null=True)
other_costs = models.IntegerField(blank=True,
null=True)
job_rate = models.DecimalField(blank=True,
null=True,
max_digits=5,
decimal_places=2,
help_text="COMPLETERS WHO GET RELATED JOB")
job_note = models.TextField(blank=True,
help_text="EXPLANATION FROM SCHOOL")
test = models.BooleanField(default=False)
def __unicode__(self):
return u"%s (%s)" % (self.program_name, unicode(self.institution))
def get_level(self):
level = ''
if self.level and str(self.level) in HIGHEST_DEGREES:
level = HIGHEST_DEGREES[str(self.level)]
return level
def as_json(self):
ordered_out = OrderedDict()
dict_out = {
'accreditor': self.accreditor,
'books': self.books,
'campus': self.campus,
'cipCode': self.cip_code,
'completionRate': "{0}".format(self.completion_rate),
'completionCohort': self.completion_cohort,
'completers': self.completers,
'defaultRate': "{0}".format(self.default_rate),
'fees': self.fees,
'housing': self.housing,
'institution': self.institution.primary_alias,
'institutionalDebt': self.institutional_debt,
'jobNote': self.job_note,
'jobRate': "{0}".format(self.job_rate),
'level': self.get_level(),
'levelCode': self.level,
'meanStudentLoanCompleters': self.mean_student_loan_completers,
'medianStudentLoanCompleters': self.median_student_loan_completers,
'privateDebt': self.private_debt,
'programCode': self.program_code,
'programLength': make_divisible_by_6(self.program_length),
'programName': self.program_name,
'programSalary': self.salary,
'schoolID': self.institution.school_id,
'socCodes': self.soc_codes,
'timeToComplete': self.time_to_complete,
'titleIVDebt': self.titleiv_debt,
'totalCost': self.total_cost,
'transportation': self.transportation,
'tuition': self.tuition,
}
for key in sorted(dict_out.keys()):
ordered_out[key] = dict_out[key]
return json.dumps(ordered_out)
def as_csv(self, csvpath):
"""Output a CSV representation of a program"""
headings = [
'ipeds_unit_id',
'ope_id',
'program_code',
'program_name',
'program_length',
'program_level',
'accreditor',
'median_salary',
'average_time_to_complete',
'books_supplies',
'campus_name',
'cip_code',
'completion_rate',
'completion_cohort',
'completers',
'default_rate',
'job_placement_rate',
'job_placement_note',
'mean_student_loan_completers',
'median_student_loan_completers',
'soc_codes',
'total_cost',
'tuition_fees',
'test'
]
with open(csvpath, 'w') as f:
writer = cdw(f)
writer.writerow(headings)
writer.writerow([
self.institution.school_id,
'',
self.program_code,
self.program_name,
self.program_length,
self.level,
self.accreditor,
self.salary,
self.time_to_complete,
self.books,
self.campus,
self.cip_code,
"{}".format(self.completion_rate),
self.completion_cohort,
self.completers,
"{0}".format(self.default_rate),
"{0}".format(self.job_rate),
self.job_note,
self.mean_student_loan_completers,
self.median_student_loan_completers,
self.soc_codes,
self.total_cost,
self.tuition,
self.test
])
# class Offer(models.Model):
# """
# Financial aid package offered to a prospective student
# """
# school = models.ForeignKey(School)
# program = models.ForeignKey(Program)
# student_id = models.CharField(max_length=255)
# uuid = models.CharField(max_length=100, blank=True)
# # COST OF ATTENDANCE
# tuition = models.PositiveIntegerField(default=0,
# help_text="TUITION & FEES") # tui
# housing = models.PositiveIntegerField(default=0,
# help_text="HOUSING & MEALS") # hou
# books = models.PositiveIntegerField(default=0,
# help_text="BOOKS & SUPPLIES") # bks
# other = models.PositiveIntegerField(default=0,
# help_text="OTHER EXPENSES") # oth
# # MONEY FOR SCHOOL
# scholarships = models.IntegerField(default=0,
# help_text="SCHOLARSHIPS & GRANTS")
# pell_grant = models.PositiveIntegerField(default=0)
# tuition_assist = models.PositiveIntegerField(default=0,
# help_text='SCHOLARSHIPS')
# mil_assist = models.PositiveIntegerField(default=0,
# help_text='MILITARY ASSISTANCE')
# gi_bill = models.PositiveIntegerField(default=0)
# you_pay = models.PositiveIntegerField(default=0)
# family_pay = models.PositiveIntegerField(default=0)
# work_study = models.PositiveIntegerField(default=0)
# parent_loans = models.PositiveIntegerField(default=0)
# perkins_loans = models.PositiveIntegerField(default=0)
# subsidized_loans = models.PositiveIntegerField(default=0)
# unsubsidized_loans = models.PositiveIntegerField(default=0)
# plus_loans = models.PositiveIntegerField(default=0)
# private_loans = models.PositiveIntegerField(default=0)
# private_loan_interest = models.DecimalField(default=0.0,
# max_digits=5,
# decimal_places=2)
# school_loans = models.PositiveIntegerField(default=0)
# school_loan_interest = models.DecimalField(default=0.0,
# max_digits=5,
# decimal_places=2)
# timestamp = models.DateTimeField(blank=True, null=True)
# in_state = models.NullBooleanField(help_text="ONLY FOR PUBLIC SCHOOLS")
# def save(self, *args, **kwargs):
# if not self.uuid:
# self.uuid = str(uuid.uuid4())
# super(Offer, self).save(*args, **kwargs)
class Alias(models.Model):
"""
One of potentially several names for a school
"""
institution = models.ForeignKey(School)
alias = models.TextField()
is_primary = models.BooleanField(default=False)
def __unicode__(self):
return u"%s (alias for %s)" % (self.alias, unicode(self.institution))
class Meta:
verbose_name_plural = "Aliases"
class Nickname(models.Model):
"""
One of potentially several nicknames for a school
"""
institution = models.ForeignKey(School)
nickname = models.TextField()
is_female = models.BooleanField(default=False)
def __unicode__(self):
return u"%s (nickname for %s)" % (self.nickname,
unicode(self.institution))
class Meta:
ordering = ['nickname']
class BAHRate(models.Model):
"""
Basic Allowance for Housing (BAH) rates are zipcode-specific.
Used in GI Bill data and may go away.
"""
zip5 = models.CharField(max_length=5)
value = models.IntegerField()
class Worksheet(models.Model):
"""
The saved state of a student's comaprison worksheet.
This is likely to go away.
"""
guid = models.CharField(max_length=64, primary_key=True)
saved_data = models.TextField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Feedback(models.Model):
"""
User-submitted feedback
"""
created = models.DateTimeField(auto_now_add=True)
message = models.TextField()
def print_vals(obj, val_list=False, val_dict=False, noprint=False):
"""inspect a Django db object"""
keylist = sorted([key for key in obj._meta.get_all_field_names()],
key=lambda s: s.lower())
if val_list:
newlist = []
for key in keylist:
try:
print "%s: %s" % (key, obj.__getattribute__(key))
except:
pass
else:
newlist.append(key)
return [obj.__getattribute__(key) for key in newlist]
elif val_dict:
return obj.__dict__
else:
msg = ""
try:
msg += "%s values for %s:\n" % (obj._meta.object_name, obj)
except: # pragma: no cover
pass
for key in keylist:
try:
msg += "%s: %s\n" % (key, obj.__getattribute__(key))
except: # pragma: no cover
pass
if noprint is False:
print msg
else:
return msg
|
py | 7dfdea720d14b71ad6244fb9d63cf96803ea4088 | import pandas as pd
import numpy as np
from os.path import join as opj
import sys
#import matplotlib.pyplot as plt
#from matplotlib import cm
#import matplotlib as mpl
from scipy.spatial import distance
import scipy.cluster.hierarchy as sch
import json
"""jinja2 import triggers DeprecationWarning about imp module"""
from jinja2 import Environment, PackageLoader#, FileSystemLoader
from .modified_scipy_dendrogram import modified_dendrogram
__all__ = ['plot_hclust',
'plot_hclust_props']
"""TODO:
- Control x and y zoom independently
https://stackoverflow.com/questions/61071276/d3-synchronizing-2-separate-zoom-behaviors/61164185#61164185"""
set1_colors = ["#e41a1c", "#377eb8", "#4daf4a",
"#984ea3", "#ff7f00", "#ffff33",
"#a65628", "#f781bf", "#999999"]
set3_colors = ["#8dd3c7", "#ffffb3", "#bebada", "#fb8072",
"#80b1d3", "#fdb462", "#b3de69", "#fccde5",
"#d9d9d9", "#bc80bd", "#ccebc5", "#ffed6f"]
def plot_hclust(Z, title='', leaf_counts=None):
"""Plot tree of linkage-based hierarchical clustering. Nodes
annotated with cluster ID.
Parameters
----------
Z : linkage matrix
Result of calling sch.linkage on a compressed pair-wise distance matrix
leaf_counts : np.ndarray [Z.shape[0] - 1, ]
Weights/counts for each leaf of the tree, using the same order as pwdist matrix
used to generate linkage matrix Z.
Returns
-------
html : str
String that can be saved as HTML for viewing"""
html = plot_hclust_props(Z, title=title, leaf_counts=leaf_counts)
return html
def plot_hclust_props(Z, title='', res=None, alpha_col='pvalue', alpha=0.05, tooltip_cols=[], colors=None, prune_col=None, leaf_counts=None):
"""Plot tree of linkage-based hierarchical clustering, with nodes colored using stacked bars
representing proportion of cluster members associated with specific conditions. Nodes also optionally
annotated with pvalue, number of members or cluster ID.
Parameters
----------
Z : linkage matrix
Result of calling sch.linkage on a compressed pair-wise distance matrix
res : pd.DataFrame
Result from calling hcluster_diff, with observed/frequencies and p-values for each node
alpha_col : str
Column in res to use for 'alpha' annotation
alpha : float
Threshold for plotting the stacked bars and annotation
colors : tuple of valid colors
Used for stacked bars of conditions at each node
prune_col : str/column in res
Column of res that indicates whether a result/node can be pruned from the tree.
Tree will not print any pruned nodes that only contain other pruned nodes.
leaf_counts : np.ndarray [Z.shape[0] - 1, ]
Weights/counts for each leaf of the tree, using the same order as pwdist matrix
used to generate Z linkage matrix.
Returns
-------
html : str
String that can be saved as HTML for viewing"""
height=600
width=900
paths, lines, annotations, legend_data = _hclust_paths(Z, height=height, width=width,
res=res,
alpha_col=alpha_col,
alpha=alpha,
tooltip_cols=tooltip_cols,
colors=colors,
prune_col=prune_col,
leaf_counts=leaf_counts)
#lines_df = 100 * pd.DataFrame({'x1':np.random.rand(10), 'y1':np.random.rand(10), 'x2':np.random.rand(10), 'y2':np.random.rand(10)})
#lines_df = lines_df.assign(stroke='red', stroke_width=1.5)
#lines_json = lines_df.to_json(orient='records')
#circle_data = pd.DataFrame({'x':np.random.rand(10)*50 + width/2, 'y':np.random.rand(10)*50 + height/2}).to_json(orient='records')
jinja_env = Environment(loader=PackageLoader('hierdiff', 'templates'))
#jinja_env = Environment(loader=FileSystemLoader(opj(_git, 'hierdiff', 'hierdiff')))
tree_tmp = jinja_env.get_template('tree_template.html')
html = tree_tmp.render(mytitle=title,
line_data=json.dumps(lines),
annotation_data=json.dumps(annotations),
legend_data=json.dumps(legend_data),
path_data=json.dumps(paths),
height=height,
width=width)
return html
def _encode(s):
"""Creates valid JSON strings that can be decoded by JS in th browser"""
return str(s).replace('"', '@DBLQ').replace('<', '@LT').replace('>', '@GT').replace('/', '@SL')
def _hclust_paths(Z, height, width, margin=10, res=None, alpha_col='pvalue', alpha=0.05, tooltip_cols=[], colors=None, min_count=0, prune_col=None, leaf_counts=None):
if colors is None:
colors = set1_colors
lines = []
annotations = []
paths = []
if not res is None:
"""Use cts from every other element since cluster membership is
always the last feature of the counts array and is therefore the innermost loop"""
x_val_cols = [c for c in res.columns if 'val_' in c][::2]
x_ct_cols = [c.replace('val', 'ct') for c in x_val_cols]
x_vals = ['|'.join(res[c].iloc[0][:-1]) for c in x_val_cols]
legend_data = [{'label':v, 'color':c} for v,c in zip(x_vals, colors)]
else:
legend_data = []
dend = modified_dendrogram(Z, counts=leaf_counts)
xscale = _linear_scale_factory((np.min(np.array(dend['icoord'])), np.max(np.array(dend['icoord']))),
(0+margin, width-margin))
yscale = _linear_scale_factory((np.min(np.array(dend['dcoord'])), np.max(np.array(dend['dcoord']))),
(height-margin, 0+margin))
for xx, yy, cid in zip(dend['icoord'], dend['dcoord'], dend['cid_list']):
#axh.plot(xx, yy, zorder=1, lw=0.5, color='k', alpha=1)
if not res is None:
"""Assumes there is only one matching cid (not true if multiple results are passed)"""
cid_res = res.loc[res['cid'] == cid]
if len(cid_res) > 1:
print('Multiple matching results for each cluster id (i.e. cid)')
return
else:
cid_res = cid_res.iloc[0]
if not prune_col is None:
"""Prune (don't print) if all member results are also prune=True"""
prune = res.loc[res['cid'].isin(cid_res['children']), prune_col].all()
if prune:
continue
paths.append(dict(coords=[[xscale(x), yscale(y)] for x,y in zip(xx, yy)], stroke='black', stroke_width=1))
N = np.sum(cid_res['K_neighbors'])
ann = [f'cid: {cid:d}',
f'n_uniq: {N:1.0f}',
f'n_ct: {np.sum(cid_res[x_ct_cols]):1.0f}']
ann.extend(['%s: %s' % (tt, _encode(cid_res[tt])) for tt in tooltip_cols])
annotations.append(dict(annotation=ann, x1=xscale(xx[1]), x2=xscale(xx[2]), y1=yscale(yy[1]), y2=yscale(yy[2])))
if alpha is None or cid_res[alpha_col] <= alpha and N > min_count:
cts = np.asarray(cid_res[x_ct_cols])
obs = cts / np.sum(cts)
L = (xx[2] - xx[1])
xvec = L * np.concatenate(([0.], obs, [1.]))
curX = xx[1]
for i in range(len(obs)):
c = colors[i]
lines.append(dict(x1=xscale(curX),
x2=xscale(curX + L*obs[i]),
y1=yscale(yy[1]),
y2=yscale(yy[2]),
stroke=c,
stroke_width=10))
"""axh.plot([curX, curX + L*obs[i]],
yy[1:3],
color=c,
lw=10,
solid_capstyle='butt')"""
curX += L*obs[i]
else:
paths.append(dict(coords=[[xscale(x), yscale(y)] for x,y in zip(xx, yy)], stroke='black', stroke_width=1))
s = ['cid: %d' % cid]
annotations.append(dict(annotation=s, x1=xscale(xx[1]), x2=xscale(xx[2]), y1=yscale(yy[1]), y2=yscale(yy[2])))
paths = _translate_paths(paths)
return paths, lines, annotations, legend_data
def _linear_scale_factory(domain, rng):
scalar = (rng[1] - rng[0]) / (domain[1] - domain[0])
offset = rng[0] - scalar*domain[0]
def _scaler(x):
return x * scalar + offset
return _scaler
def _translate_paths(paths):
"""Simple translation of path coordinates to SVG path string"""
svg = []
for j,p in enumerate(paths):
tmp = ''
for i,c in enumerate(p['coords']):
if i == 0:
tmp += 'M%f,%f' % tuple(c)
else:
tmp += 'L%f,%f' % tuple(c)
paths[j]['str_coords'] = tmp
return paths
|
py | 7dfdea775c644de6769a62fc05202654ba3bcf3a | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class TrafficManagerEndpoint(pulumi.CustomResource):
custom_headers: pulumi.Output[list]
"""
One or more `custom_header` blocks as defined below
* `name` (`str`) - The name of the Traffic Manager endpoint. Changing this forces a
new resource to be created.
* `value` (`str`)
"""
endpoint_location: pulumi.Output[str]
"""
Specifies the Azure location of the Endpoint,
this must be specified for Profiles using the `Performance` routing method
if the Endpoint is of either type `nestedEndpoints` or `externalEndpoints`.
For Endpoints of type `azureEndpoints` the value will be taken from the
location of the Azure target resource.
"""
endpoint_monitor_status: pulumi.Output[str]
endpoint_status: pulumi.Output[str]
"""
The status of the Endpoint, can be set to
either `Enabled` or `Disabled`. Defaults to `Enabled`.
"""
geo_mappings: pulumi.Output[list]
"""
A list of Geographic Regions used to distribute traffic, such as `WORLD`, `UK` or `DE`. The same location can't be specified in two endpoints. [See the Geographic Hierarchies documentation for more information](https://docs.microsoft.com/en-us/rest/api/trafficmanager/geographichierarchies/getdefault).
"""
min_child_endpoints: pulumi.Output[float]
"""
This argument specifies the minimum number
of endpoints that must be ‘online’ in the child profile in order for the
parent profile to direct traffic to any of the endpoints in that child
profile. This argument only applies to Endpoints of type `nestedEndpoints`
and defaults to `1`.
"""
name: pulumi.Output[str]
"""
The name of the Traffic Manager endpoint. Changing this forces a
new resource to be created.
"""
priority: pulumi.Output[float]
"""
Specifies the priority of this Endpoint, this must be
specified for Profiles using the `Priority` traffic routing method. Supports
values between 1 and 1000, with no Endpoints sharing the same value. If
omitted the value will be computed in order of creation.
"""
profile_name: pulumi.Output[str]
"""
The name of the Traffic Manager Profile to attach
create the Traffic Manager endpoint.
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group in which to
create the Traffic Manager endpoint.
"""
subnets: pulumi.Output[list]
"""
One or more `subnet` blocks as defined below
* `first` (`str`)
* `last` (`str`)
* `scope` (`float`)
"""
target: pulumi.Output[str]
"""
The FQDN DNS name of the target. This argument must be
provided for an endpoint of type `externalEndpoints`, for other types it
will be computed.
"""
target_resource_id: pulumi.Output[str]
"""
The resource id of an Azure resource to
target. This argument must be provided for an endpoint of type
`azureEndpoints` or `nestedEndpoints`.
"""
type: pulumi.Output[str]
"""
The Endpoint type, must be one of:
- `azureEndpoints`
- `externalEndpoints`
- `nestedEndpoints`
"""
weight: pulumi.Output[float]
"""
Specifies how much traffic should be distributed to this
endpoint, this must be specified for Profiles using the `Weighted` traffic
routing method. Supports values between 1 and 1000.
"""
def __init__(__self__, resource_name, opts=None, custom_headers=None, endpoint_location=None, endpoint_status=None, geo_mappings=None, min_child_endpoints=None, name=None, priority=None, profile_name=None, resource_group_name=None, subnets=None, target=None, target_resource_id=None, type=None, weight=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a Traffic Manager Endpoint.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] custom_headers: One or more `custom_header` blocks as defined below
:param pulumi.Input[str] endpoint_location: Specifies the Azure location of the Endpoint,
this must be specified for Profiles using the `Performance` routing method
if the Endpoint is of either type `nestedEndpoints` or `externalEndpoints`.
For Endpoints of type `azureEndpoints` the value will be taken from the
location of the Azure target resource.
:param pulumi.Input[str] endpoint_status: The status of the Endpoint, can be set to
either `Enabled` or `Disabled`. Defaults to `Enabled`.
:param pulumi.Input[list] geo_mappings: A list of Geographic Regions used to distribute traffic, such as `WORLD`, `UK` or `DE`. The same location can't be specified in two endpoints. [See the Geographic Hierarchies documentation for more information](https://docs.microsoft.com/en-us/rest/api/trafficmanager/geographichierarchies/getdefault).
:param pulumi.Input[float] min_child_endpoints: This argument specifies the minimum number
of endpoints that must be ‘online’ in the child profile in order for the
parent profile to direct traffic to any of the endpoints in that child
profile. This argument only applies to Endpoints of type `nestedEndpoints`
and defaults to `1`.
:param pulumi.Input[str] name: The name of the Traffic Manager endpoint. Changing this forces a
new resource to be created.
:param pulumi.Input[float] priority: Specifies the priority of this Endpoint, this must be
specified for Profiles using the `Priority` traffic routing method. Supports
values between 1 and 1000, with no Endpoints sharing the same value. If
omitted the value will be computed in order of creation.
:param pulumi.Input[str] profile_name: The name of the Traffic Manager Profile to attach
create the Traffic Manager endpoint.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the Traffic Manager endpoint.
:param pulumi.Input[list] subnets: One or more `subnet` blocks as defined below
:param pulumi.Input[str] target: The FQDN DNS name of the target. This argument must be
provided for an endpoint of type `externalEndpoints`, for other types it
will be computed.
:param pulumi.Input[str] target_resource_id: The resource id of an Azure resource to
target. This argument must be provided for an endpoint of type
`azureEndpoints` or `nestedEndpoints`.
:param pulumi.Input[str] type: The Endpoint type, must be one of:
- `azureEndpoints`
- `externalEndpoints`
- `nestedEndpoints`
:param pulumi.Input[float] weight: Specifies how much traffic should be distributed to this
endpoint, this must be specified for Profiles using the `Weighted` traffic
routing method. Supports values between 1 and 1000.
The **custom_headers** object supports the following:
* `name` (`pulumi.Input[str]`) - The name of the Traffic Manager endpoint. Changing this forces a
new resource to be created.
* `value` (`pulumi.Input[str]`)
The **subnets** object supports the following:
* `first` (`pulumi.Input[str]`)
* `last` (`pulumi.Input[str]`)
* `scope` (`pulumi.Input[float]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/traffic_manager_endpoint.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['custom_headers'] = custom_headers
__props__['endpoint_location'] = endpoint_location
__props__['endpoint_status'] = endpoint_status
__props__['geo_mappings'] = geo_mappings
__props__['min_child_endpoints'] = min_child_endpoints
__props__['name'] = name
__props__['priority'] = priority
if profile_name is None:
raise TypeError("Missing required property 'profile_name'")
__props__['profile_name'] = profile_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['subnets'] = subnets
__props__['target'] = target
__props__['target_resource_id'] = target_resource_id
if type is None:
raise TypeError("Missing required property 'type'")
__props__['type'] = type
__props__['weight'] = weight
__props__['endpoint_monitor_status'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure:trafficmanager/endpoint:Endpoint")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(TrafficManagerEndpoint, __self__).__init__(
'azure:network/trafficManagerEndpoint:TrafficManagerEndpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, custom_headers=None, endpoint_location=None, endpoint_monitor_status=None, endpoint_status=None, geo_mappings=None, min_child_endpoints=None, name=None, priority=None, profile_name=None, resource_group_name=None, subnets=None, target=None, target_resource_id=None, type=None, weight=None):
"""
Get an existing TrafficManagerEndpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] custom_headers: One or more `custom_header` blocks as defined below
:param pulumi.Input[str] endpoint_location: Specifies the Azure location of the Endpoint,
this must be specified for Profiles using the `Performance` routing method
if the Endpoint is of either type `nestedEndpoints` or `externalEndpoints`.
For Endpoints of type `azureEndpoints` the value will be taken from the
location of the Azure target resource.
:param pulumi.Input[str] endpoint_status: The status of the Endpoint, can be set to
either `Enabled` or `Disabled`. Defaults to `Enabled`.
:param pulumi.Input[list] geo_mappings: A list of Geographic Regions used to distribute traffic, such as `WORLD`, `UK` or `DE`. The same location can't be specified in two endpoints. [See the Geographic Hierarchies documentation for more information](https://docs.microsoft.com/en-us/rest/api/trafficmanager/geographichierarchies/getdefault).
:param pulumi.Input[float] min_child_endpoints: This argument specifies the minimum number
of endpoints that must be ‘online’ in the child profile in order for the
parent profile to direct traffic to any of the endpoints in that child
profile. This argument only applies to Endpoints of type `nestedEndpoints`
and defaults to `1`.
:param pulumi.Input[str] name: The name of the Traffic Manager endpoint. Changing this forces a
new resource to be created.
:param pulumi.Input[float] priority: Specifies the priority of this Endpoint, this must be
specified for Profiles using the `Priority` traffic routing method. Supports
values between 1 and 1000, with no Endpoints sharing the same value. If
omitted the value will be computed in order of creation.
:param pulumi.Input[str] profile_name: The name of the Traffic Manager Profile to attach
create the Traffic Manager endpoint.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the Traffic Manager endpoint.
:param pulumi.Input[list] subnets: One or more `subnet` blocks as defined below
:param pulumi.Input[str] target: The FQDN DNS name of the target. This argument must be
provided for an endpoint of type `externalEndpoints`, for other types it
will be computed.
:param pulumi.Input[str] target_resource_id: The resource id of an Azure resource to
target. This argument must be provided for an endpoint of type
`azureEndpoints` or `nestedEndpoints`.
:param pulumi.Input[str] type: The Endpoint type, must be one of:
- `azureEndpoints`
- `externalEndpoints`
- `nestedEndpoints`
:param pulumi.Input[float] weight: Specifies how much traffic should be distributed to this
endpoint, this must be specified for Profiles using the `Weighted` traffic
routing method. Supports values between 1 and 1000.
The **custom_headers** object supports the following:
* `name` (`pulumi.Input[str]`) - The name of the Traffic Manager endpoint. Changing this forces a
new resource to be created.
* `value` (`pulumi.Input[str]`)
The **subnets** object supports the following:
* `first` (`pulumi.Input[str]`)
* `last` (`pulumi.Input[str]`)
* `scope` (`pulumi.Input[float]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/traffic_manager_endpoint.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["custom_headers"] = custom_headers
__props__["endpoint_location"] = endpoint_location
__props__["endpoint_monitor_status"] = endpoint_monitor_status
__props__["endpoint_status"] = endpoint_status
__props__["geo_mappings"] = geo_mappings
__props__["min_child_endpoints"] = min_child_endpoints
__props__["name"] = name
__props__["priority"] = priority
__props__["profile_name"] = profile_name
__props__["resource_group_name"] = resource_group_name
__props__["subnets"] = subnets
__props__["target"] = target
__props__["target_resource_id"] = target_resource_id
__props__["type"] = type
__props__["weight"] = weight
return TrafficManagerEndpoint(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | 7dfdebaa1e091eb8231a2ef9b239f464cd5c54da | import os
from unittest.mock import Mock
import pytest
from doit.exceptions import InvalidCommand
from doit.cmd_run import Run
from doit.cmd_list import List
from doit import doit_cmd
def cmd_main(args):
main = doit_cmd.DoitMain()
main.BIN_NAME = 'doit'
return main.run(args)
class TestRun(object):
def test_version(self, capsys):
cmd_main(["--version"])
out, err = capsys.readouterr()
assert "lib" in out
def test_usage(self, capsys):
cmd_main(["--help"])
out, err = capsys.readouterr()
assert "doit list" in out
def test_run_is_default(self, monkeypatch):
mock_run = Mock()
monkeypatch.setattr(Run, "execute", mock_run)
cmd_main([])
assert 1 == mock_run.call_count
def test_run_other_subcommand(self, monkeypatch):
mock_list = Mock()
monkeypatch.setattr(List, "execute", mock_list)
cmd_main(["list"])
assert 1 == mock_list.call_count
def test_cmdline_vars(self, monkeypatch):
mock_run = Mock()
monkeypatch.setattr(Run, "execute", mock_run)
cmd_main(['x=1', 'y=abc'])
assert '1' == doit_cmd.get_var('x')
assert 'abc' == doit_cmd.get_var('y')
assert None is doit_cmd.get_var('z')
def test_cmdline_novars(self, monkeypatch):
mock_run = Mock()
monkeypatch.setattr(Run, "execute", mock_run)
cmd_main(['x=1'])
# Simulate the variable below not being initialized by a subprocess on
# Windows. See https://github.com/pydoit/doit/issues/164.
doit_cmd._CMDLINE_VARS = None
assert None is doit_cmd.get_var('x')
def test_cmdline_vars_not_opts(self, monkeypatch):
mock_run = Mock()
monkeypatch.setattr(Run, "execute", mock_run)
cmd_main(['--z=5'])
assert None == doit_cmd.get_var('--z')
def test_cmdline_loader_option_before_cmd_name(self, monkeypatch):
mock_list = Mock()
monkeypatch.setattr(List, "execute", mock_list)
cmd_main(['-k', 'list', '--all'])
assert mock_list.called
params, args = mock_list.call_args[0]
assert params['subtasks'] == True
assert params['seek_file'] == True
assert args == []
def test_cmdline_loader_option_mixed(self, monkeypatch):
mock_run = Mock()
monkeypatch.setattr(Run, "execute", mock_run)
cmd_main(['-c', '-k', 'lala'])
assert mock_run.called
params, args = mock_run.call_args[0]
assert params['continue'] == True
assert params['seek_file'] == True
assert args == ['lala']
def test_task_loader_has_cmd_list(self, monkeypatch):
cmd_names = []
def save_cmd_names(self, params, args):
cmd_names.extend(self.loader.cmd_names)
monkeypatch.setattr(Run, "execute", save_cmd_names)
cmd_main([])
assert 'list' in cmd_names
def test_extra_config(self, monkeypatch, depfile_name):
outfile_val = []
def monkey_run(self, opt_values, pos_args):
outfile_val.append(opt_values['outfile'])
monkeypatch.setattr(Run, "execute", monkey_run)
extra_config = {
'outfile': 'foo.txt',
'dep_file': depfile_name,
}
doit_cmd.DoitMain(extra_config={'GLOBAL': extra_config}).run([])
assert outfile_val[0] == 'foo.txt'
class TestErrors(object):
def test_interrupt(self, monkeypatch):
def my_raise(*args):
raise KeyboardInterrupt()
mock_cmd = Mock(side_effect=my_raise)
monkeypatch.setattr(Run, "execute", mock_cmd)
pytest.raises(KeyboardInterrupt, cmd_main, [])
def test_user_error(self, capsys, monkeypatch):
mock_cmd = Mock(side_effect=InvalidCommand)
monkeypatch.setattr(Run, "execute", mock_cmd)
got = cmd_main([])
assert 3 == got
out, err = capsys.readouterr()
assert "ERROR" in err
def test_internal_error(self, capsys, monkeypatch):
mock_cmd = Mock(side_effect=Exception)
monkeypatch.setattr(Run, "execute", mock_cmd)
got = cmd_main([])
assert 3 == got
out, err = capsys.readouterr()
# traceback from Exception (this case code from mock lib)
assert "mock.py" in err
class TestConfig(object):
def test_no_ini_config_file(self):
main = doit_cmd.DoitMain(config_filenames=())
main.run(['--version'])
def test_load_plugins_command(self):
config_filename = os.path.join(os.path.dirname(__file__), 'sample.cfg')
main = doit_cmd.DoitMain(config_filenames=config_filename)
assert 1 == len(main.config['COMMAND'])
# test loaded plugin command is actually used with plugin name
assert 'foo' in main.get_cmds()
def test_merge_api_ini_config(self):
config_filename = os.path.join(os.path.dirname(__file__), 'sample.cfg')
api_config = {'GLOBAL': {'opty':'10', 'optz':'10'}}
main = doit_cmd.DoitMain(config_filenames=config_filename,
extra_config=api_config)
assert 1 == len(main.config['COMMAND'])
# test loaded plugin command is actually used with plugin name
assert 'foo' in main.get_cmds()
# INI has higher preference the api_config
assert main.config['GLOBAL'] == {'optx':'6', 'opty':'7', 'optz':'10'}
def test_execute_command_plugin(self, capsys):
config_filename = os.path.join(os.path.dirname(__file__), 'sample.cfg')
main = doit_cmd.DoitMain(config_filenames=config_filename)
main.run(['foo'])
got = capsys.readouterr()[0]
assert got == 'this command does nothing!\n'
|
py | 7dfdeca80a5358ba07c9d43642bf7cf6b8882c0d | from __future__ import print_function
import httplib2
import sys
from apiclient import discovery
from datetime import datetime
from friday_5pm_helper import (
EXPECTED_DATETIME_FORMATS,
TimeEntryData,
worklog_time_spent
)
from friday_5pm_helper.credentials import get_credentials
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/gsuite_utilities_gcalendar.json
LOCAL_CREDENTIAL_FILE = 'gsuite_utilities_gcalendar.json'
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
CLIENT_SECRET_FILE = 'client_secret_gcalendar.json'
APPLICATION_NAME = 'G Suite Utilities'
class GCalendarClient():
def __init__(self):
"""
Creates a Google Calendar API service object
:return: a Google Calendar API service object
"""
credentials = get_credentials(
LOCAL_CREDENTIAL_FILE,
CLIENT_SECRET_FILE,
SCOPES,
APPLICATION_NAME
)
http = credentials.authorize(httplib2.Http())
self.service = discovery.build('calendar', 'v3', http=http)
def gcalendar_events(self, start_date, end_date):
"""
Outputs a list of the next 'max_results' events on the user's calendar.
:param service: a Google Calendar API service object
:return: a list
"""
min_time = start_date.isoformat() + 'Z' # 'Z' indicates UTC time
max_time = end_date.isoformat() + 'Z'
eventsResult = self.service.events().list(
calendarId='primary',
timeMin=min_time,
timeMax=max_time,
singleEvents=True,
orderBy='startTime'
).execute()
return eventsResult.get('items', [])
def calc_interval(start_time, end_time):
"""
:param start_time: start time in datetime
:param end_time: end time in datetime
:return: string hh:mm
"""
for f in EXPECTED_DATETIME_FORMATS:
try:
end_dt = datetime.strptime(end_time, f)
start_dt = datetime.strptime(start_time, f)
t_delta_secs = (end_dt - start_dt).seconds
return (worklog_time_spent(t_delta_secs), start_dt, end_dt)
except Exception as e:
pass
print('Error: unable to parse {} and {}'.format(start_time, end_time))
return None
def retrieve_gcalendar_event_data(start_date, end_date, tasks_info):
"""
Retrieve calendar events' data in TimeEntryData format
:return: list of TimeEntryData
"""
time_entry_data_list = []
# see https://developers.google.com/google-apps/calendar/v3/reference/events/list
for event in GCalendarClient().gcalendar_events(start_date, end_date):
start_time_str = event['start'].get('dateTime', event['start'].get('date'))
end_time_str = event['end'].get('dateTime', event['end'].get('date'))
(interval, start, end) = calc_interval(start_time_str, end_time_str)
time_entry_data_list.append(TimeEntryData(
year=start.year,
month=start.month,
day=start.day,
interval=interval,
comment=event['summary'],
taskid=tasks_info['InternalMeeting']
))
return time_entry_data_list
if __name__ == '__main__':
from friday_5pm_helper import start_and_end_of_week_of_a_day
today = datetime.utcnow()
(start_date, end_date) = start_and_end_of_week_of_a_day(today)
ret = retrieve_gcalendar_event_data(start_date, end_date, tasks_info={'InternalMeeting':'123'})
for i in ret:
print(i)
sys.exit(0)
|
py | 7dfdece15445f99796dae5e74a902e1ddcbd0364 | import os
import sys
import transaction
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from ..models import (
DBSession,
MyModel,
Base,
)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
model = MyModel(name='one', value=1)
DBSession.add(model)
|
py | 7dfded18cd7790e8f9c04984c3876f830f7bc76f | # -*- coding: utf-8 -*-
from .base_command import BaseCommand
class RefreshCommand(BaseCommand):
"""
Reset and re-run all migrations.
migrate:refresh
{--d|database= : The database connection to use.}
{--p|path= : The path of migrations files to be executed.}
{--s|seed : Indicates if the seed task should be re-run.}
{--seed-path= : The path of seeds files to be executed.
Defaults to <comment>./seeds</comment>.}
{--seeder=database_seeder : The name of the root seeder.}
{--f|force : Force the operation to run.}
"""
def handle(self):
"""
Executes the command.
"""
if not self.confirm_to_proceed(
"<question>Are you sure you want to refresh the database?:</question> "
):
return
database = self.option("database")
options = [("--force", True)]
if self.option("path"):
options.append(("--path", self.option("path")))
if database:
options.append(("--database", database))
if self.get_definition().has_option("config"):
options.append(("--config", self.option("config")))
self.call("migrate:reset", options)
self.call("migrate", options)
if self._needs_seeding():
self._run_seeder(database)
def _needs_seeding(self):
return self.option("seed")
def _run_seeder(self, database):
options = [("--seeder", self.option("seeder")), ("--force", True)]
if database:
options.append(("--database", database))
if self.get_definition().has_option("config"):
options.append(("--config", self.option("config")))
if self.option("seed-path"):
options.append(("--path", self.option("seed-path")))
self.call("db:seed", options)
|
py | 7dfdeda8ad26faf7c23023f8158af5f711c812ee | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import Iterable
print isinstance([], Iterable)
ls = [1, 2, 3]
for index, value in enumerate(ls):
print index, value
ls = [x for x in range(1, 11)]
for x in ls:
print x
ls = (x for x in range(0, 11))
print ls.next()
for x in ls:
print x
def gen():
yield 99
for x in gen():
print x
|
py | 7dfdee6b5f8a713d132ed74c36a3fde00d75b1d5 | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team, The Microsoft Research team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, slow
from transformers.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from .test_tokenization_common import TokenizerTesterMixin
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model")
@require_sentencepiece
class XLMProphetNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = XLMProphetNetTokenizer
test_rust_tokenizer = False
def setUp(self):
super().setUp()
# We have a SentencePiece fixture for testing
tokenizer = XLMProphetNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(self.tmpdirname)
def test_full_tokenizer(self):
tokenizer = XLMProphetNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens),
[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]],
)
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
],
)
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids,
[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
],
)
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(
back_tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
],
)
@cached_property
def big_tokenizer(self):
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased")
@slow
def test_tokenization_base_easy_symbols(self):
symbols = "Hello World!"
original_tokenizer_encodings = [35389, 6672, 49, 2]
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
|
py | 7dfdeea90baf4abfe8f985cc91451397c44eb865 | # coding: utf-8
"""
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.1.1+01d50e5
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import graylog
from graylog.rest import ApiException
from graylog.apis.systemservice_manager_api import SystemserviceManagerApi
class TestSystemserviceManagerApi(unittest.TestCase):
""" SystemserviceManagerApi unit test stubs """
def setUp(self):
self.api = graylog.apis.systemservice_manager_api.SystemserviceManagerApi()
def tearDown(self):
pass
def test_list(self):
"""
Test case for list
List current status of ServiceManager
"""
pass
if __name__ == '__main__':
unittest.main()
|
py | 7dfdef0e67d0ef18672ac85f67e888b52d3f13f1 | from django.urls import path
from .views import ShareListView, ShareDetailView
urlpatterns = [
path('', ShareListView.as_view(), name='home'),
path(
'share/<int:share_id>/delete_article/<int:article_id>/',
ShareDetailView.delete_article,
name='delete_article'
),
path('share/<int:pk>/', ShareDetailView.as_view(), name='share_detail'),
]
|
py | 7dfdf0dc6d57e327a1630a6373c355dd334a2c6a | from pydantic import BaseModel, Field
class FeedbackModel(BaseModel):
title: str
message: str
class TransactionModel(BaseModel):
instrumentId: str
quantity: int
price: float
purchase_date: str = Field(..., regex="^\d{4}-\d{2}-\d{2}$")
|
py | 7dfdf102b88fd7350e626a20d0ce9e4cb67645eb | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._availability_sets_operations import build_create_or_update_request, build_delete_request, build_get_request, build_list_available_sizes_request, build_list_by_subscription_request, build_list_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AvailabilitySetsOperations:
"""AvailabilitySetsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: "_models.AvailabilitySet",
**kwargs: Any
) -> "_models.AvailabilitySet":
"""Create or update an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param parameters: Parameters supplied to the Create Availability Set operation.
:type parameters: ~azure.mgmt.compute.v2020_12_01.models.AvailabilitySet
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_12_01.models.AvailabilitySet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilitySet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'AvailabilitySet')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: "_models.AvailabilitySetUpdate",
**kwargs: Any
) -> "_models.AvailabilitySet":
"""Update an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param parameters: Parameters supplied to the Update Availability Set operation.
:type parameters: ~azure.mgmt.compute.v2020_12_01.models.AvailabilitySetUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_12_01.models.AvailabilitySet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilitySet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'AvailabilitySetUpdate')
request = build_update_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
availability_set_name: str,
**kwargs: Any
) -> None:
"""Delete an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
availability_set_name: str,
**kwargs: Any
) -> "_models.AvailabilitySet":
"""Retrieves information about an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_12_01.models.AvailabilitySet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilitySet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'} # type: ignore
@distributed_trace
def list_by_subscription(
self,
expand: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.AvailabilitySetListResult"]:
"""Lists all availability sets in a subscription.
:param expand: The expand expression to apply to the operation. Allowed values are
'instanceView'.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailabilitySetListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_12_01.models.AvailabilitySetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilitySetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
expand=expand,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AvailabilitySetListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/availabilitySets'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AvailabilitySetListResult"]:
"""Lists all availability sets in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailabilitySetListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_12_01.models.AvailabilitySetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilitySetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AvailabilitySetListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets'} # type: ignore
@distributed_trace
def list_available_sizes(
self,
resource_group_name: str,
availability_set_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineSizeListResult"]:
"""Lists all available virtual machine sizes that can be used to create a new virtual machine in
an existing availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineSizeListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_12_01.models.VirtualMachineSizeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineSizeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
template_url=self.list_available_sizes.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_available_sizes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes'} # type: ignore
|
py | 7dfdf1bd735b31abe7300b757cd9dbcfe64710a7 | # Copyright 2022 Touca, Inc. Subject to Apache-2.0 License.
from conans import ConanFile, CMake
class ToucaConan(ConanFile):
name = "touca"
homepage = "https://github.com/trytouca/trytouca/tree/main/sdk/cpp"
description = "Touca SDK for C++"
topics = ("regression-testing", "test-framework", "test-automation")
license = "Apache-2.0"
version = "1.5.2"
author = "Touca, Inc. <[email protected]>"
settings = "os", "compiler", "build_type", "arch"
options = {
"fPIC": [True, False],
"shared": [True, False],
"with_tests": [True, False],
"with_cli": [True, False],
"with_examples": [True, False],
"with_framework": [True, False],
"with_openssl": [True, False],
}
default_options = {
"fPIC": True,
"shared": False,
"with_tests": False,
"with_cli": False,
"with_examples": False,
"with_framework": True,
"with_openssl": True,
}
generators = "cmake_find_package"
exports_sources = [
"CMakeLists.txt",
"LICENSE",
"README.md",
"cmake/**",
"include/**",
"src/**",
"tests/**",
"cli/**",
]
def requirements(self):
self.requires("cpp-httplib/0.9.5")
self.requires("flatbuffers/2.0.0")
self.requires("fmt/8.0.1")
self.requires("ghc-filesystem/1.5.8")
self.requires("mpark-variant/1.4.0")
self.requires("rapidjson/1.1.0")
if (
self.options.with_examples
or self.options.with_framework
or self.options.with_cli
):
self.requires("cxxopts/2.2.1")
def build_requirements(self):
if self.options.with_tests:
self.build_requires("catch2/2.13.7")
def configure(self):
self.options["fmt"].header_only = True
self.options["flatbuffers"].header_only = True
self.options["cpp-httplib"].with_openssl = self.options.with_openssl
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["TOUCA_BUILD_TESTS"] = self.options.with_tests
cmake.definitions["TOUCA_BUILD_CLI"] = self.options.with_cli
cmake.definitions["TOUCA_BUILD_EXAMPLES"] = self.options.with_examples
cmake.definitions["TOUCA_BUILD_FRAMEWORK"] = self.options.with_framework
cmake.configure()
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def test(self):
cmake = self._configure_cmake()
cmake.test()
def package(self):
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
client_requirements = [
"cpp-httplib::cpp-httplib",
"fmt::fmt",
"flatbuffers::flatbuffers",
"ghc-filesystem::ghc-filesystem",
"mpark-variant::mpark-variant",
"rapidjson::rapidjson",
]
if (
self.options.with_examples
or self.options.with_framework
or self.options.with_cli
):
client_requirements.append("cxxopts::cxxopts")
self.cpp_info.name = "touca"
self.cpp_info.components["client"].names["cmake_find_package"] = "client"
self.cpp_info.components["client"].libs = ["touca"]
self.cpp_info.components["client"].requires = client_requirements
|
py | 7dfdf21ec76551ea009bea6733acaade0f2fe156 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2021 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Modeling classes for LayoutLMv2 model."""
import copy
import math
import paddle
import paddle.nn as nn
import paddle.tensor as tensor
import paddle.nn.functional as F
from paddle.nn import Layer
from paddle.nn import CrossEntropyLoss
from .. import PretrainedModel, register_base_model
from ..layoutxlm.visual_backbone import build_resnet_fpn_backbone
from ..layoutxlm.visual_backbone import read_config
__all__ = [
'LayoutLMv2Model', "LayoutLMv2PretrainedModel",
"LayoutLMv2ForTokenClassification", "LayoutLMv2ForPretraining",
"LayoutLMv2ForRelationExtraction"
]
def relative_position_bucket(relative_position,
bidirectional=True,
num_buckets=32,
max_distance=128):
ret = 0
if bidirectional:
num_buckets //= 2
ret += (relative_position > 0).astype(paddle.int64) * num_buckets
n = paddle.abs(relative_position)
else:
n = paddle.max(-relative_position,
paddle.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (paddle.log(
n.astype(paddle.float32) / max_exact) / math.log(max_distance /
max_exact) *
(num_buckets - max_exact)).astype(paddle.int64)
val_if_large = paddle.minimum(
val_if_large, paddle.full_like(val_if_large, num_buckets - 1))
ret += paddle.where(is_small, n, val_if_large)
return ret
# Copied from paddlenlp.transformers.layoutxlm.modeling.LayoutXLMPooler with XLM->LMv2
class LayoutLMv2Pooler(Layer):
def __init__(self, hidden_size, with_pool):
super(LayoutLMv2Pooler, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
self.with_pool = with_pool
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
if self.with_pool == 'tanh':
pooled_output = self.activation(pooled_output)
return pooled_output
# Copied from paddlenlp.transformers.layoutxlm.modeling.LayoutXLMEmbeddings with XLM->LMv2
class LayoutLMv2Embeddings(Layer):
"""
Include embeddings from word, position and token_type embeddings
"""
def __init__(self, config):
super(LayoutLMv2Embeddings, self).__init__()
self.word_embeddings = nn.Embedding(
config["vocab_size"], config["hidden_size"], padding_idx=0)
self.position_embeddings = nn.Embedding(
config["max_position_embeddings"], config["hidden_size"])
self.x_position_embeddings = nn.Embedding(
config["max_2d_position_embeddings"], config["coordinate_size"])
self.y_position_embeddings = nn.Embedding(
config["max_2d_position_embeddings"], config["coordinate_size"])
self.h_position_embeddings = nn.Embedding(
config["max_2d_position_embeddings"], config["coordinate_size"])
self.w_position_embeddings = nn.Embedding(
config["max_2d_position_embeddings"], config["coordinate_size"])
self.token_type_embeddings = nn.Embedding(config["type_vocab_size"],
config["hidden_size"])
self.LayerNorm = nn.LayerNorm(
config["hidden_size"], epsilon=config["layer_norm_eps"])
self.dropout = nn.Dropout(config["hidden_dropout_prob"])
self.register_buffer(
"position_ids",
paddle.arange(config["max_position_embeddings"]).expand((1, -1)))
def _cal_spatial_position_embeddings(self, bbox):
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :,
0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :,
1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :,
2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :,
3])
except IndexError as e:
raise IndexError(
"The :obj:`bbox`coordinate values should be within 0-1000 range."
) from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] -
bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] -
bbox[:, :, 0])
spatial_position_embeddings = paddle.concat(
[
left_position_embeddings,
upper_position_embeddings,
right_position_embeddings,
lower_position_embeddings,
h_position_embeddings,
w_position_embeddings,
],
axis=-1, )
return spatial_position_embeddings
def forward(self,
input_ids,
bbox=None,
token_type_ids=None,
position_ids=None):
if position_ids is None:
ones = paddle.ones_like(input_ids, dtype="int64")
seq_length = paddle.cumsum(ones, axis=-1)
position_ids = seq_length - ones
position_ids.stop_gradient = True
if token_type_ids is None:
token_type_ids = paddle.zeros_like(input_ids, dtype="int64")
input_embedings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :,
0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :,
1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :,
2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :,
3])
except IndexError as e:
raise IndexError(
"The :obj:`bbox`coordinate values should be within 0-1000 range."
) from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] -
bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] -
bbox[:, :, 0])
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = (
input_embedings + position_embeddings + left_position_embeddings +
upper_position_embeddings + right_position_embeddings +
lower_position_embeddings + h_position_embeddings +
w_position_embeddings + token_type_embeddings)
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class LayoutLMv2PretrainedModel(PretrainedModel):
model_config_file = "model_config.json"
pretrained_init_configuration = {
"layoutlmv2-base-uncased": {
"attention_probs_dropout_prob": 0.1,
"coordinate_size": 128,
"fast_qkv": True,
"gradient_checkpointing": False,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"image_feature_pool_shape": [7, 7, 256],
"initializer_range": 0.02,
"intermediate_size": 3072,
"layer_norm_eps": 1e-12,
"max_2d_position_embeddings": 1024,
"max_position_embeddings": 512,
"max_rel_2d_pos": 256,
"max_rel_pos": 128,
"model_type": "layoutlmv2",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"output_past": True,
"pad_token_id": 0,
"shape_size": 128,
"rel_2d_pos_bins": 64,
"rel_pos_bins": 32,
"type_vocab_size": 2,
"vocab_size": 30522,
"has_relative_attention_bias": True,
"has_spatial_attention_bias": True,
"has_visual_segment_embedding": False,
},
"layoutlmv2-large-uncased": {
"attention_probs_dropout_prob": 0.1,
"coordinate_size": 171,
"fast_qkv": False,
"gradient_checkpointing": False,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"image_feature_pool_shape": [7, 7, 256],
"initializer_range": 0.02,
"intermediate_size": 4096,
"layer_norm_eps": 1e-12,
"max_2d_position_embeddings": 1024,
"max_position_embeddings": 512,
"max_rel_2d_pos": 256,
"max_rel_pos": 128,
"model_type": "layoutlmv2",
"num_attention_heads": 16,
"num_hidden_layers": 24,
"output_past": True,
"pad_token_id": 0,
"shape_size": 170,
"rel_2d_pos_bins": 64,
"rel_pos_bins": 32,
"type_vocab_size": 2,
"vocab_size": 30522,
"has_relative_attention_bias": True,
"has_spatial_attention_bias": True,
"has_visual_segment_embedding": False,
}
}
resource_files_names = {"model_state": "model_state.pdparams"}
pretrained_resource_files_map = {
"model_state": {
"layoutlmv2-base-uncased":
"https://bj.bcebos.com/paddlenlp/models/transformers/layoutlmv2/layoutlmv2-base-uncased/model_state.pdparams",
"layoutlmv2-large-uncased":
"https://bj.bcebos.com/paddlenlp/models/transformers/layoutlmv2/layoutlmv2-large-uncased/model_state.pdparams",
}
}
base_model_prefix = "layoutlmv2"
def init_weights(self, layer):
""" Initialization hook """
if isinstance(layer, (nn.Linear, nn.Embedding)):
if isinstance(layer.weight, paddle.Tensor):
layer.weight.set_value(
paddle.tensor.normal(
mean=0.0,
std=self.pretrained_init_configuration[
"initializer_range"] if "initializer_range" in
self.pretrained_init_configuration else 0.02,
shape=layer.weight.shape))
# Copied from paddlenlp.transformers.layoutxlm.modeling.LayoutXLMSelfOutput with XLM->LMv2
class LayoutLMv2SelfOutput(nn.Layer):
def __init__(self, config):
super(LayoutLMv2SelfOutput, self).__init__()
self.dense = nn.Linear(config["hidden_size"], config["hidden_size"])
self.LayerNorm = nn.LayerNorm(
config["hidden_size"], epsilon=config["layer_norm_eps"])
self.dropout = nn.Dropout(config["hidden_dropout_prob"])
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from paddlenlp.transformers.layoutxlm.modeling.LayoutXLMSelfAttention with XLM->LMv2
class LayoutLMv2SelfAttention(nn.Layer):
def __init__(self, config):
super(LayoutLMv2SelfAttention, self).__init__()
if config["hidden_size"] % config[
"num_attention_heads"] != 0 and not hasattr(config,
"embedding_size"):
raise ValueError(
"The hidden size {} is not a multiple of the number of attention "
"heads {}".format(config["hidden_size"], config[
"num_attention_heads"]))
self.fast_qkv = config["fast_qkv"]
self.num_attention_heads = config["num_attention_heads"]
self.attention_head_size = int(config["hidden_size"] /
config["num_attention_heads"])
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.has_relative_attention_bias = config[
"has_relative_attention_bias"]
self.has_spatial_attention_bias = config["has_spatial_attention_bias"]
if config["fast_qkv"]:
self.qkv_linear = nn.Linear(
config["hidden_size"], 3 * self.all_head_size, bias_attr=False)
self.q_bias = self.create_parameter(
shape=[1, 1, self.all_head_size],
default_initializer=nn.initializer.Constant(0.0))
self.v_bias = self.create_parameter(
shape=[1, 1, self.all_head_size],
default_initializer=nn.initializer.Constant(0.0))
else:
self.query = nn.Linear(config["hidden_size"], self.all_head_size)
self.key = nn.Linear(config["hidden_size"], self.all_head_size)
self.value = nn.Linear(config["hidden_size"], self.all_head_size)
self.dropout = nn.Dropout(config["attention_probs_dropout_prob"])
def transpose_for_scores(self, x):
new_x_shape = x.shape[:-1] + [
self.num_attention_heads, self.attention_head_size
]
x = x.reshape(new_x_shape)
return x.transpose([0, 2, 1, 3])
def compute_qkv(self, hidden_states):
if self.fast_qkv:
qkv = self.qkv_linear(hidden_states)
q, k, v = paddle.chunk(qkv, 3, axis=-1)
if q.ndimension() == self.q_bias.ndimension():
q = q + self.q_bias
v = v + self.v_bias
else:
_sz = (1, ) * (q.ndimension() - 1) + (-1, )
q = q + self.q_bias.reshape(_sz)
v = v + self.v_bias.vreshape(_sz)
else:
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
return q, k, v
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None, ):
q, k, v = self.compute_qkv(hidden_states)
# (B, L, H*D) -> (B, H, L, D)
query_layer = self.transpose_for_scores(q)
key_layer = self.transpose_for_scores(k)
value_layer = self.transpose_for_scores(v)
query_layer = query_layer / math.sqrt(self.attention_head_size)
# [BSZ, NAT, L, L]
attention_scores = paddle.matmul(query_layer,
key_layer.transpose([0, 1, 3, 2]))
if self.has_relative_attention_bias:
attention_scores += rel_pos
if self.has_spatial_attention_bias:
attention_scores += rel_2d_pos
attention_scores = paddle.where(
attention_mask.astype(paddle.bool).expand_as(attention_scores),
paddle.ones_like(attention_scores) * float("-inf"),
attention_scores)
attention_probs = F.softmax(attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = paddle.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose([0, 2, 1, 3])
new_context_layer_shape = context_layer.shape[:-2] + [
self.all_head_size
]
context_layer = context_layer.reshape(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (
context_layer, )
return outputs
# Copied from paddlenlp.transformers.layoutxlm.modeling.LayoutXLMAttention with XLM->LMv2
class LayoutLMv2Attention(nn.Layer):
def __init__(self, config):
super(LayoutLMv2Attention, self).__init__()
self.self = LayoutLMv2SelfAttention(config)
self.output = LayoutLMv2SelfOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None, ):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos, )
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,
) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from paddlenlp.transformers.layoutxlm.modeling.LayoutXLMEncoder with XLM->LMv2
class LayoutLMv2Encoder(nn.Layer):
def __init__(self, config):
super(LayoutLMv2Encoder, self).__init__()
self.config = config
self.layer = nn.LayerList([
LayoutLMv2Layer(config) for _ in range(config["num_hidden_layers"])
])
self.has_relative_attention_bias = config[
"has_relative_attention_bias"]
self.has_spatial_attention_bias = config["has_spatial_attention_bias"]
if self.has_relative_attention_bias:
self.rel_pos_bins = config["rel_pos_bins"]
self.max_rel_pos = config["max_rel_pos"]
self.rel_pos_onehot_size = config["rel_pos_bins"]
self.rel_pos_bias = nn.Linear(
self.rel_pos_onehot_size,
config["num_attention_heads"],
bias_attr=False)
if self.has_spatial_attention_bias:
self.max_rel_2d_pos = config["max_rel_2d_pos"]
self.rel_2d_pos_bins = config["rel_2d_pos_bins"]
self.rel_2d_pos_onehot_size = config["rel_2d_pos_bins"]
self.rel_pos_x_bias = nn.Linear(
self.rel_2d_pos_onehot_size,
config["num_attention_heads"],
bias_attr=False)
self.rel_pos_y_bias = nn.Linear(
self.rel_2d_pos_onehot_size,
config["num_attention_heads"],
bias_attr=False)
def _cal_1d_pos_emb(self, hidden_states, position_ids):
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat,
num_buckets=self.rel_pos_bins,
max_distance=self.max_rel_pos, )
rel_pos = paddle.nn.functional.one_hot(
rel_pos,
num_classes=self.rel_pos_onehot_size).astype(hidden_states.dtype)
rel_pos = self.rel_pos_bias(rel_pos).transpose([0, 3, 1, 2])
return rel_pos
def _cal_2d_pos_emb(self, hidden_states, bbox):
position_coord_x = bbox[:, :, 0]
position_coord_y = bbox[:, :, 3]
rel_pos_x_2d_mat = position_coord_x.unsqueeze(
-2) - position_coord_x.unsqueeze(-1)
rel_pos_y_2d_mat = position_coord_y.unsqueeze(
-2) - position_coord_y.unsqueeze(-1)
rel_pos_x = relative_position_bucket(
rel_pos_x_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos, )
rel_pos_y = relative_position_bucket(
rel_pos_y_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos, )
rel_pos_x = F.one_hot(
rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).astype(
hidden_states.dtype)
rel_pos_y = F.one_hot(
rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).astype(
hidden_states.dtype)
rel_pos_x = self.rel_pos_x_bias(rel_pos_x).transpose([0, 3, 1, 2])
rel_pos_y = self.rel_pos_y_bias(rel_pos_y).transpose([0, 3, 1, 2])
rel_2d_pos = rel_pos_x + rel_pos_y
return rel_2d_pos
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
output_attentions=False,
output_hidden_states=False,
bbox=None,
position_ids=None, ):
all_hidden_states = () if output_hidden_states else None
rel_pos = self._cal_1d_pos_emb(
hidden_states,
position_ids) if self.has_relative_attention_bias else None
rel_2d_pos = self._cal_2d_pos_emb(
hidden_states, bbox) if self.has_spatial_attention_bias else None
hidden_save = dict()
hidden_save["input_hidden_states"] = hidden_states
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states, )
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[
i] if past_key_values is not None else None
# gradient_checkpointing is set as False here so we remove some codes here
hidden_save["input_attention_mask"] = attention_mask
hidden_save["input_layer_head_mask"] = layer_head_mask
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos, )
hidden_states = layer_outputs[0]
hidden_save["{}_data".format(i)] = hidden_states
return hidden_states,
# Copied from paddlenlp.transformers.layoutxlm.modeling.LayoutXLMIntermediate with XLM->LMv2
class LayoutLMv2Intermediate(nn.Layer):
def __init__(self, config):
super(LayoutLMv2Intermediate, self).__init__()
self.dense = nn.Linear(config["hidden_size"],
config["intermediate_size"])
if config["hidden_act"] == "gelu":
self.intermediate_act_fn = nn.GELU()
else:
assert False, "hidden_act is set as: {}, please check it..".format(
config["hidden_act"])
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from paddlenlp.transformers.layoutxlm.modeling.LayoutXLMOutput with XLM->LMv2
class LayoutLMv2Output(nn.Layer):
def __init__(self, config):
super(LayoutLMv2Output, self).__init__()
self.dense = nn.Linear(config["intermediate_size"],
config["hidden_size"])
self.LayerNorm = nn.LayerNorm(
config["hidden_size"], epsilon=config["layer_norm_eps"])
self.dropout = nn.Dropout(config["hidden_dropout_prob"])
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from paddlenlp.transformers.layoutxlm.modeling.LayoutXLMLayer with XLM->LMv2
class LayoutLMv2Layer(nn.Layer):
def __init__(self, config):
super(LayoutLMv2Layer, self).__init__()
# since chunk_size_feed_forward is 0 as default, no chunk is needed here.
self.seq_len_dim = 1
self.attention = LayoutLMv2Attention(config)
self.add_cross_attention = False # default as false
self.intermediate = LayoutLMv2Intermediate(config)
self.output = LayoutLMv2Output(config)
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None, ):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:
2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos, )
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[
1:] # add self attentions if we output attention weights
layer_output = self.feed_forward_chunk(attention_output)
outputs = (layer_output, ) + outputs
return outputs
# Copied from paddlenlp.transformers.layoutxlm.modeling.VisualBackbone
class VisualBackbone(nn.Layer):
def __init__(self, config):
super(VisualBackbone, self).__init__()
self.cfg = read_config()
self.backbone = build_resnet_fpn_backbone(self.cfg)
assert len(self.cfg.MODEL.PIXEL_MEAN) == len(self.cfg.MODEL.PIXEL_STD)
num_channels = len(self.cfg.MODEL.PIXEL_MEAN)
self.register_buffer(
"pixel_mean",
paddle.to_tensor(self.cfg.MODEL.PIXEL_MEAN).reshape(
[num_channels, 1, 1]))
self.register_buffer(
"pixel_std",
paddle.to_tensor(self.cfg.MODEL.PIXEL_STD).reshape(
[num_channels, 1, 1]))
self.out_feature_key = "p2"
# is_deterministic is disabled here.
self.pool = nn.AdaptiveAvgPool2D(config["image_feature_pool_shape"][:
2])
if len(config["image_feature_pool_shape"]) == 2:
config["image_feature_pool_shape"].append(
self.backbone.output_shape()[self.out_feature_key].channels)
assert self.backbone.output_shape(
)[self.out_feature_key].channels == config["image_feature_pool_shape"][
2]
def forward(self, images):
images_input = (
paddle.to_tensor(images) - self.pixel_mean) / self.pixel_std
features = self.backbone(images_input)
features = features[self.out_feature_key]
features = self.pool(features).flatten(start_axis=2).transpose(
[0, 2, 1])
return features
# Copied from paddlenlp.transformers.layoutxlm.modeling.LayoutXLMModel with XLM->LMv2
@register_base_model
class LayoutLMv2Model(LayoutLMv2PretrainedModel):
"""
The bare LayoutLMv2 Model outputting raw hidden-states.
This model inherits from :class:`~paddlenlp.transformers.model_utils.PretrainedModel`.
Refer to the superclass documentation for the generic methods.
This model is also a Paddle `paddle.nn.Layer <https://www.paddlepaddle.org.cn/documentation
/docs/en/api/paddle/fluid/dygraph/layers/Layer_en.html>`__ subclass. Use it as a regular Paddle Layer
and refer to the Paddle documentation for all matter related to general usage and behavior.
Args:
vocab_size (`int`):
Vocabulary size of the XLNet model. Defines the number of different tokens that can
be represented by the `inputs_ids` passed when calling XLNetModel.
hidden_size (`int`, optional):
Dimensionality of the encoder layers and the pooler layer. Defaults to ``768``.
num_hidden_layers (`int`, optional):
Number of hidden layers in the Transformer encoder. Defaults to ``12``.
num_attention_heads (`int`, optional):
Number of attention heads for each attention layer in the Transformer encoder.
Defaults to ``12``.
intermediate_size (`int`, optional):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
Defaults to ``3072``.
hidden_act (`str`, optional):
The non-linear activation function in the feed-forward layer.
``"gelu"``, ``"relu"`` and any other paddle supported activation functions
are supported. Defaults to ``"gelu"``.
hidden_dropout_prob (`float`, optional):
The dropout probability for all fully connected layers in the embeddings and encoder.
Defaults to ``0.1``.
attention_probs_dropout_prob (`float`, optional):
The dropout probability for all fully connected layers in the pooler.
Defaults to ``0.1``.
initializer_range (`float`, optional):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Defaults to ``0.02``.
"""
def __init__(
self,
with_pool='tanh',
**kwargs, ):
super(LayoutLMv2Model, self).__init__()
config = kwargs
self.config = kwargs
self.has_visual_segment_embedding = config[
"has_visual_segment_embedding"]
self.embeddings = LayoutLMv2Embeddings(config)
self.visual = VisualBackbone(config)
self.visual.stop_gradient = True
self.visual_proj = nn.Linear(config["image_feature_pool_shape"][-1],
config["hidden_size"])
if self.has_visual_segment_embedding:
self.visual_segment_embedding = self.create_parameter(
shape=[config["hidden_size"], ], dtype=paddle.float32)
self.visual_LayerNorm = nn.LayerNorm(
config["hidden_size"], epsilon=config["layer_norm_eps"])
self.visual_dropout = nn.Dropout(config["hidden_dropout_prob"])
self.encoder = LayoutLMv2Encoder(config)
self.pooler = LayoutLMv2Pooler(config["hidden_size"], with_pool)
def _calc_text_embeddings(self, input_ids, bbox, position_ids,
token_type_ids):
words_embeddings = self.embeddings.word_embeddings(input_ids)
position_embeddings = self.embeddings.position_embeddings(position_ids)
spatial_position_embeddings = self.embeddings._cal_spatial_position_embeddings(
bbox)
token_type_embeddings = self.embeddings.token_type_embeddings(
token_type_ids)
embeddings = words_embeddings + position_embeddings + spatial_position_embeddings + token_type_embeddings
embeddings = self.embeddings.LayerNorm(embeddings)
embeddings = self.embeddings.dropout(embeddings)
return embeddings
def _calc_img_embeddings(self, image, bbox, position_ids):
visual_embeddings = self.visual_proj(
self.visual(image.astype(paddle.float32)))
position_embeddings = self.embeddings.position_embeddings(position_ids)
spatial_position_embeddings = self.embeddings._cal_spatial_position_embeddings(
bbox)
embeddings = visual_embeddings + position_embeddings + spatial_position_embeddings
if self.has_visual_segment_embedding:
embeddings += self.visual_segment_embedding
embeddings = self.visual_LayerNorm(embeddings)
embeddings = self.visual_dropout(embeddings)
return embeddings
def forward(self,
input_ids=None,
bbox=None,
image=None,
token_type_ids=None,
position_ids=None,
attention_mask=None,
head_mask=None,
output_hidden_states=None,
output_attentions=None):
input_shape = input_ids.shape
visual_shape = list(input_shape)
visual_shape[1] = self.config["image_feature_pool_shape"][
0] * self.config["image_feature_pool_shape"][1]
final_shape = list(input_shape)
final_shape[1] += visual_shape[1]
visual_bbox_x = (paddle.arange(
0,
1000 * (self.config["image_feature_pool_shape"][1] + 1),
1000,
dtype=bbox.dtype, ) // self.config["image_feature_pool_shape"][1])
visual_bbox_y = (paddle.arange(
0,
1000 * (self.config["image_feature_pool_shape"][0] + 1),
1000,
dtype=bbox.dtype, ) // self.config["image_feature_pool_shape"][0])
expand_shape = self.config["image_feature_pool_shape"][0:2]
visual_bbox = paddle.stack(
[
visual_bbox_x[:-1].expand(expand_shape),
visual_bbox_y[:-1].expand(expand_shape[::-1]).transpose(
[1, 0]),
visual_bbox_x[1:].expand(expand_shape),
visual_bbox_y[1:].expand(expand_shape[::-1]).transpose([1, 0]),
],
axis=-1, ).reshape([-1, bbox.shape[-1]])
visual_bbox = visual_bbox.expand([final_shape[0], -1, -1])
final_bbox = paddle.concat([bbox, visual_bbox], axis=1)
if attention_mask is None:
attention_mask = paddle.ones(input_shape)
visual_attention_mask = paddle.ones(visual_shape)
attention_mask = attention_mask.astype(visual_attention_mask.dtype)
final_attention_mask = paddle.concat(
[attention_mask, visual_attention_mask], axis=1)
if token_type_ids is None:
token_type_ids = paddle.zeros(input_shape, dtype=paddle.int64)
if position_ids is None:
seq_length = input_shape[1]
position_ids = self.embeddings.position_ids[:, :seq_length]
position_ids = position_ids.expand_as(input_ids)
visual_position_ids = paddle.arange(0, visual_shape[1]).expand(
[input_shape[0], -1])
final_position_ids = paddle.concat(
[position_ids, visual_position_ids], axis=1)
if bbox is None:
bbox = paddle.zeros(input_shape + [4])
text_layout_emb = self._calc_text_embeddings(
input_ids=input_ids,
bbox=bbox,
token_type_ids=token_type_ids,
position_ids=position_ids, )
visual_emb = self._calc_img_embeddings(
image=image,
bbox=visual_bbox,
position_ids=visual_position_ids, )
final_emb = paddle.concat([text_layout_emb, visual_emb], axis=1)
extended_attention_mask = final_attention_mask.unsqueeze(1).unsqueeze(
2)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(
-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config["num_hidden_layers"],
-1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
else:
head_mask = [None] * self.config["num_hidden_layers"]
encoder_outputs = self.encoder(
final_emb,
extended_attention_mask,
bbox=final_bbox,
position_ids=final_position_ids,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states, )
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
return sequence_output, pooled_output
# Copied from paddlenlp.transformers.layoutxlm.modeling.LayoutXLMForTokenClassification with XLM->LMv2
class LayoutLMv2ForTokenClassification(LayoutLMv2PretrainedModel):
def __init__(self, layoutlmv2, num_classes=2, dropout=None):
super(LayoutLMv2ForTokenClassification, self).__init__()
self.num_classes = num_classes
if isinstance(layoutlmv2, dict):
self.layoutlmv2 = LayoutLMv2Model(**layoutlmv2)
else:
self.layoutlmv2 = layoutlmv2
self.dropout = nn.Dropout(dropout if dropout is not None else self.
layoutlmv2.config["hidden_dropout_prob"])
self.classifier = nn.Linear(self.layoutlmv2.config["hidden_size"],
num_classes)
self.classifier.apply(self.init_weights)
def get_input_embeddings(self):
return self.layoutlmv2.embeddings.word_embeddings
def forward(
self,
input_ids=None,
bbox=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
labels=None, ):
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask, )
seq_length = input_ids.shape[1]
# sequence out and image out
sequence_output, image_output = outputs[0][:, :seq_length], outputs[
0][:, seq_length:]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = logits,
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
if attention_mask is not None:
active_loss = attention_mask.reshape([-1, ]) == 1
active_logits = logits.reshape(
[-1, self.num_classes])[active_loss]
active_labels = labels.reshape([-1, ])[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(
logits.reshape([-1, self.num_classes]),
labels.reshape([-1, ]))
outputs = (loss, ) + outputs
return outputs
# Copied from paddlenlp.transformers.layoutxlm.modeling.LayoutXLMPredictionHead with XLM->LMv2
class LayoutLMv2PredictionHead(Layer):
"""
Bert Model with a `language modeling` head on top for CLM fine-tuning.
"""
def __init__(self,
hidden_size,
vocab_size,
activation,
embedding_weights=None):
super(LayoutLMv2PredictionHead, self).__init__()
self.transform = nn.Linear(hidden_size, hidden_size)
self.activation = getattr(nn.functional, activation)
self.layer_norm = nn.LayerNorm(hidden_size)
self.decoder_weight = self.create_parameter(
shape=[vocab_size, hidden_size],
dtype=self.transform.weight.dtype,
is_bias=False) if embedding_weights is None else embedding_weights
self.decoder_bias = self.create_parameter(
shape=[vocab_size], dtype=self.decoder_weight.dtype, is_bias=True)
def forward(self, hidden_states, masked_positions=None):
if masked_positions is not None:
hidden_states = paddle.reshape(hidden_states,
[-1, hidden_states.shape[-1]])
hidden_states = paddle.tensor.gather(hidden_states,
masked_positions)
# gather masked tokens might be more quick
hidden_states = self.transform(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = paddle.tensor.matmul(
hidden_states, self.decoder_weight,
transpose_y=True) + self.decoder_bias
return hidden_states
# Copied from paddlenlp.transformers.layoutxlm.modeling.LayoutXLMPretrainingHeads with XLM->LMv2
class LayoutLMv2PretrainingHeads(Layer):
def __init__(self,
hidden_size,
vocab_size,
activation,
embedding_weights=None):
super(LayoutLMv2PretrainingHeads, self).__init__()
self.predictions = LayoutLMv2PredictionHead(
hidden_size, vocab_size, activation, embedding_weights)
def forward(self, sequence_output, masked_positions=None):
prediction_scores = self.predictions(sequence_output, masked_positions)
return prediction_scores
# Copied from paddlenlp.transformers.layoutxlm.modeling.LayoutXLMForPretraining with XLM->LMv2
class LayoutLMv2ForPretraining(LayoutLMv2PretrainedModel):
def __init__(self, layoutlmv2):
super(LayoutLMv2ForPretraining, self).__init__()
self.layoutlmv2 = layoutlmv2
self.cls = LayoutLMv2PretrainingHeads(
self.layoutlmv2.config["hidden_size"],
self.layoutlmv2.config["vocab_size"],
self.layoutlmv2.config["hidden_act"],
embedding_weights=self.layoutlmv2.embeddings.word_embeddings.
weight)
def forward(self,
input_ids=None,
bbox=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
masked_positions=None):
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask, )
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output, masked_positions)
return prediction_scores
# Copied from paddlenlp.transformers.layoutxlm.modeling.LayoutXLMOutput with XLM->LMv2
class BiaffineAttention(nn.Layer):
"""Implements a biaffine attention operator for binary relation classification."""
def __init__(self, in_features, out_features):
super(BiaffineAttention, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bilinear = nn.Bilinear(
in_features, in_features, out_features, bias_attr=False)
self.linear = nn.Linear(2 * in_features, out_features)
def forward(self, x_1, x_2):
return self.bilinear(
x_1, x_2) + self.linear(paddle.concat(
(x_1, x_2), axis=-1))
# Copied from paddlenlp.transformers.layoutxlm.modeling.REDecoder
class REDecoder(nn.Layer):
def __init__(self, hidden_size=768, hidden_dropout_prob=0.1):
super(REDecoder, self).__init__()
self.entity_emb = nn.Embedding(3, hidden_size)
projection = nn.Sequential(
nn.Linear(hidden_size * 2, hidden_size),
nn.ReLU(),
nn.Dropout(hidden_dropout_prob),
nn.Linear(hidden_size, hidden_size // 2),
nn.ReLU(),
nn.Dropout(hidden_dropout_prob), )
self.ffnn_head = copy.deepcopy(projection)
self.ffnn_tail = copy.deepcopy(projection)
self.rel_classifier = BiaffineAttention(hidden_size // 2, 2)
self.loss_fct = CrossEntropyLoss()
def build_relation(self, relations, entities):
batch_size = len(relations)
new_relations = []
for b in range(batch_size):
if len(entities[b]["start"]) <= 2:
entities[b] = {"end": [1, 1], "label": [0, 0], "start": [0, 0]}
all_possible_relations = set(
[(i, j)
for i in range(len(entities[b]["label"]))
for j in range(len(entities[b]["label"]))
if entities[b]["label"][i] == 1 and entities[b]["label"][j] ==
2])
if len(all_possible_relations) == 0:
all_possible_relations = {(0, 1)}
positive_relations = set(
list(zip(relations[b]["head"], relations[b]["tail"])))
negative_relations = all_possible_relations - positive_relations
positive_relations = set([
i for i in positive_relations if i in all_possible_relations
])
reordered_relations = list(positive_relations) + list(
negative_relations)
relation_per_doc = {
"head": [i[0] for i in reordered_relations],
"tail": [i[1] for i in reordered_relations],
"label": [1] * len(positive_relations) + [0] *
(len(reordered_relations) - len(positive_relations))
}
assert len(relation_per_doc["head"]) != 0
new_relations.append(relation_per_doc)
return new_relations, entities
def get_predicted_relations(self, logits, relations, entities):
pred_relations = []
for i, pred_label in enumerate(logits.argmax(-1)):
if pred_label != 1:
continue
rel = {}
rel["head_id"] = relations["head"][i]
rel["head"] = (entities["start"][rel["head_id"]],
entities["end"][rel["head_id"]])
rel["head_type"] = entities["label"][rel["head_id"]]
rel["tail_id"] = relations["tail"][i]
rel["tail"] = (entities["start"][rel["tail_id"]],
entities["end"][rel["tail_id"]])
rel["tail_type"] = entities["label"][rel["tail_id"]]
rel["type"] = 1
pred_relations.append(rel)
return pred_relations
def forward(self, hidden_states, entities, relations):
batch_size, max_n_words, context_dim = hidden_states.shape
relations, entities = self.build_relation(relations, entities)
loss = 0
all_pred_relations = []
for b in range(batch_size):
head_entities = paddle.to_tensor(relations[b]["head"])
tail_entities = paddle.to_tensor(relations[b]["tail"])
relation_labels = paddle.to_tensor(relations[b]["label"])
entities_start_index = paddle.to_tensor(entities[b]["start"])
entities_labels = paddle.to_tensor(entities[b]["label"])
head_index = entities_start_index[head_entities]
head_label = entities_labels[head_entities]
head_label_repr = self.entity_emb(head_label)
tail_index = entities_start_index[tail_entities]
tail_label = entities_labels[tail_entities]
tail_label_repr = self.entity_emb(tail_label)
tmp_hidden_states = hidden_states[b][head_index]
if len(tmp_hidden_states.shape) == 1:
tmp_hidden_states = paddle.unsqueeze(tmp_hidden_states, axis=0)
head_repr = paddle.concat(
(tmp_hidden_states, head_label_repr), axis=-1)
tmp_hidden_states = hidden_states[b][tail_index]
if len(tmp_hidden_states.shape) == 1:
tmp_hidden_states = paddle.unsqueeze(tmp_hidden_states, axis=0)
tail_repr = paddle.concat(
(tmp_hidden_states, tail_label_repr), axis=-1)
heads = self.ffnn_head(head_repr)
tails = self.ffnn_tail(tail_repr)
logits = self.rel_classifier(heads, tails)
loss += self.loss_fct(logits, relation_labels)
pred_relations = self.get_predicted_relations(logits, relations[b],
entities[b])
all_pred_relations.append(pred_relations)
return loss, all_pred_relations
# Copied from paddlenlp.transformers.layoutxlm.modeling.LayoutXLMForRelationExtraction with XLM->LMv2
class LayoutLMv2ForRelationExtraction(LayoutLMv2PretrainedModel):
def __init__(self,
layoutlmv2,
hidden_size=768,
hidden_dropout_prob=0.1,
dropout=None):
super(LayoutLMv2ForRelationExtraction, self).__init__()
if isinstance(layoutlmv2, dict):
self.layoutlmv2 = LayoutLMv2Model(**layoutlmv2)
else:
self.layoutlmv2 = layoutlmv2
self.extractor = REDecoder(hidden_size, hidden_dropout_prob)
self.dropout = nn.Dropout(dropout if dropout is not None else self.
layoutlmv2.config["hidden_dropout_prob"])
def init_weights(self, layer):
"""Initialize the weights"""
if isinstance(layer, nn.Linear):
layer.weight.set_value(
paddle.tensor.normal(
mean=0.0, std=0.02, shape=layer.weight.shape))
if layer.bias is not None:
layer.bias.set_value(
paddle.tensor.zeros(shape=layer.bias.shape))
elif isinstance(layer, nn.Embedding):
layer.weight.set_value(
paddle.tensor.normal(
mean=0.0, std=0.02, shape=layer.weight.shape))
if layer._padding_idx is not None:
layer.weight[layer._padding_idx].set_value(
paddle.tensor.normal(
mean=0.0,
std=0.02,
shape=layer.weight[layer._padding_idx].shape))
elif isinstance(layer, nn.LayerNorm):
layer.weight.set_value(paddle.tensor.ones(shape=layer.bias.shape))
layer.bias.set_value(paddle.tensor.zeros(shape=layer.bias.shape))
def forward(
self,
input_ids,
bbox,
labels=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
entities=None,
relations=None, ):
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask, )
seq_length = input_ids.shape[1]
sequence_output, image_output = outputs[0][:, :seq_length], outputs[
0][:, seq_length:]
sequence_output = self.dropout(sequence_output)
loss, pred_relations = self.extractor(sequence_output, entities,
relations)
return dict(
loss=loss,
entities=entities,
relations=relations,
pred_relations=pred_relations,
hidden_states=outputs[0], )
|
py | 7dfdf36d28604de16e220fbdc41361b45d57ecbe | from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name = 'crib',
version = '0.2.3',
description = 'Minimal command line encryption for tiny notes',
long_description = readme(),
keywords = 'notes encryption minimal',
url = 'http://github.com/lepisma/crib',
author = 'lepisma',
author_email = '[email protected]',
license = 'MIT',
packages = ['crib'],
install_requires = ['pycrypto'],
entry_points={
'console_scripts': ['crib = crib.command:main'],
},
include_package_data = True,
zip_safe = False)
|
py | 7dfdf3bd912e7406ad6b408f0a86f6923e814d41 | import os
import errno
import shutil
import markdown
ROOT = "website"
SRC_DIR = "src"
BUILD_DIR = "build"
MD_DIR = f"{SRC_DIR}/md"
# list of folders in SRC_DIR to be copied to BUILD_DIR
ASSET_DIRS = ["css", "fonts", "images"]
# preload base html
with open("src/html/base.html", "r") as f:
base_html = "".join(line for line in f.readlines())
def mkdir_if_not_exists(directory):
if not os.path.isdir(directory):
os.mkdir(directory)
def copy(src, dest):
try:
shutil.copytree(src, dest)
except OSError as e:
if e.errno == errno.ENOTDIR:
shutil.copy(src, dest)
else:
print("Directory not copied: Error {}".format(e))
def get_md_files(directory):
for dirpath, _, filenames in os.walk(directory):
for f in filenames:
filepath = os.path.abspath(os.path.join(dirpath, f))
if filepath.endswith(".md"):
yield filepath
def page_source_from_md(markdown_file):
with open(markdown_file, "r") as md:
page_title = md.readline() # first line of markdown file reserved for page title
text = "".join(line for line in md.readlines())
content = markdown.markdown(text, extensions=["attr_list", "fenced_code", "codehilite"])
return base_html[:].format(title=page_title, body=content)
if __name__ == "__main__":
# check if script was launched from root folder
assert os.path.basename(os.getcwd()) == ROOT
mkdir_if_not_exists(BUILD_DIR)
# copy assets to build
for dirname in ASSET_DIRS:
copy(f"{SRC_DIR}/{dirname}", f"{BUILD_DIR}/{dirname}")
assert os.path.isdir(MD_DIR)
# convert markdown to html and write to html files in BUILD_DIR
for filepath in get_md_files(MD_DIR):
basedir, filename = os.path.split(filepath)
webpage_name = filename.split(".")[0] + ".html"
dest = f"{BUILD_DIR}/{basedir.split(MD_DIR)[-1]}"
mkdir_if_not_exists(dest);
with open(os.path.join(dest, webpage_name), "w") as webpage:
webpage.writelines(page_source_from_md(filepath))
|
py | 7dfdf40cd705eb57dfc6333bf5d5488590e97c12 | #python
import networkx as nx
import itertools
import copy
import networkx as nx
import pandas as pd
import matplotlib.pyplot as plt
G=nx.Graph()
G.add_edges_from([(1,2),(1,3),(1,4),(3,4)])
G.nodes(data=True)
G.node[1]['attribute']='value'
G.nodes(data=True)
plt.figure(figsize=(8, 6))
nx.draw(G )
plt.title('Graph Representation of Sleeping Giant Trail Map', size=15)
plt.show()
|
py | 7dfdf45859b5281007a8fa9d72491e1218cfd055 | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import io
import os
import sys
import textwrap
import weakref
from typing import List, Callable, Type
from ..typing import OperandType, TileableType, SessionType
from .context import Context
class _LogWrapper:
def __init__(self,
ctx: Context,
op: OperandType,
log_path: str):
self.ctx = ctx
self.op = op
self.log_path = log_path
self.file = open(log_path, 'w')
self.stdout = sys.stdout
self.raw_stdout = self.stdout
while isinstance(self.raw_stdout, _LogWrapper):
self.raw_stdout = self.raw_stdout.stdout
# flag about registering log path
self.is_log_path_registered = False
def __enter__(self):
self.file.__enter__()
# set stdout
sys.stdout = self
def __exit__(self, exc_type, exc_val, exc_tb):
self.file.__exit__(exc_type, exc_val, exc_tb)
# set back stdout
sys.stdout = self.stdout
def _register_log_path(self):
if self.is_log_path_registered:
return
# register log path
session_id = self.ctx.session_id
tileable_op_key = self.op.tileable_op_key
chunk_op_key = self.op.key
worker_addr = self.ctx.current_address
log_path = self.log_path
self.ctx.register_custom_log_path(
session_id, tileable_op_key, chunk_op_key,
worker_addr, log_path)
self.is_log_path_registered = True
def write(self, data):
self._register_log_path()
# write into file
self.file.write(data)
# force flush to make sure `fetch_log` can get stdout in time
self.file.flush()
# write into previous stdout
self.raw_stdout.write(data)
def flush(self):
self.raw_stdout.flush()
def redirect_custom_log(func: Callable[[Type, Context, OperandType], None]):
"""
Redirect stdout to a file by wrapping ``Operand.execute(ctx, op)``
"""
@functools.wraps(func)
def wrap(cls,
ctx: Context,
op: OperandType):
custom_log_dir = ctx.new_custom_log_dir()
if custom_log_dir is None:
return func(cls, ctx, op)
log_path = os.path.join(custom_log_dir, op.key)
with _LogWrapper(ctx, op, log_path):
return func(cls, ctx, op)
return wrap
_tileable_to_log_fetcher = weakref.WeakKeyDictionary()
class LogFetcher:
def __init__(self,
tileable_op_key: str,
session: SessionType):
self._tileable_op_key = tileable_op_key
self._session = session
self._chunk_op_key_to_result = dict()
self._chunk_op_key_to_offsets = dict()
def __len__(self):
return len(self._chunk_op_key_to_result)
@property
def chunk_op_keys(self) -> List[str]:
return list(self._chunk_op_key_to_result.keys())
@property
def results(self) -> list:
return list(self._chunk_op_key_to_result.values())
@property
def offsets(self) -> List[List[int]]:
return list(self._chunk_op_key_to_offsets.values())
def fetch(self,
offsets: List[int] = None,
sizes: List[int] = None):
if offsets is None:
offsets = self._chunk_op_key_to_offsets
if sizes is None:
sizes = 1 * 1024 ** 2 # 1M each time
result: dict = self._session.fetch_tileable_op_logs(
self._tileable_op_key, offsets=offsets, sizes=sizes)
if result is None:
return
for chunk_key, chunk_result in result.items():
self._chunk_op_key_to_result[chunk_key] = chunk_result['log']
self._chunk_op_key_to_offsets[chunk_key] = chunk_result['offset']
def _display(self, representation: bool = True):
if len(self) == 1:
content = next(iter(self._chunk_op_key_to_result.values()))
return repr(content) if representation else str(content)
sio = io.StringIO()
for chunk_op_key, content in self._chunk_op_key_to_result.items():
sio.write(textwrap.dedent(
f"""
Chunk op key: {chunk_op_key}
Out:
{content}"""))
result = sio.getvalue()
return repr(result) if representation else str(result)
def __repr__(self):
return self._display(True)
def __str__(self):
return self._display(False)
def fetch(
tileables: List[TileableType],
session: SessionType,
offsets: List[int] = None,
sizes: List[int] = None):
log_fetchers = []
for tileable in tileables:
tileable = tileable.data if hasattr(tileable, 'data') else tileable
if tileable not in _tileable_to_log_fetcher:
_tileable_to_log_fetcher[tileable] = LogFetcher(tileable.op.key, session)
log_fetcher = _tileable_to_log_fetcher[tileable]
log_fetcher.fetch(offsets=offsets, sizes=sizes)
log_fetchers.append(log_fetcher)
return log_fetchers
|
py | 7dfdf5200f1152c57a24969c0cd24eab50212bcb | """
This module contains methods that model the linear matter power spectrum.
"""
|
py | 7dfdf542c6607b0d790380d4dc8c4e47683f1aae | """
handle generalizing dcg rules, to form fewer, simpler rules
"""
import my_dcg as dcg
def merge_categories(rules, r1, r2):
if r1.category == r2.category:
return None
if r1.out != r2.out:
return None
# print('merging ', r1.category, 'and', r2.category)
# print('len(rules)', len(rules))
# print(rules)
fold_from = r2.category
fold_to = r1.category
rules = [r for r in rules if r != r2]
for rule in rules:
if rule.category == fold_from:
rule.category = fold_to
for item in rule.out:
if isinstance(item, dcg.NonTerminal):
if item.category == fold_from:
item.category = fold_to
# print('len(rules)', len(rules))
# print(rules)
return rules
def merge_chunked_bothrules(rules, r1, r2):
if r1.category != r2.category:
return None
cat = r1.category
assert cat == r2.category
if r1.arg.__class__ != dcg.ab or r2.arg.__class__ != dcg.ab:
return None
num_diffs = 0
if r1.arg.ai is None or r1.arg.bi is None or r2.arg.ai is None or r2.arg.bi is None:
return None
# print('r1', r1)
# print('r2', r2)
if r1.arg.ai != r2.arg.ai:
# assert r1.arg.bi == r2.arg.bi
if r1.arg.ai is None or r2.arg.ai is None:
return None
num_diffs += 1
diffarg1 = dcg.a(r1.arg.ai)
diffarg2 = dcg.a(r2.arg.ai)
vararg = dcg.a(None)
inarg = dcg.ab(None, r1.arg.bi)
if r1.arg.bi != r2.arg.bi:
if r1.arg.bi is None or r2.arg.bi is None:
return None
# assert r1.arg.ai == r2.arg.ai
num_diffs += 1
diffarg1 = dcg.b(r1.arg.bi)
diffarg2 = dcg.b(r2.arg.bi)
vararg = dcg.b(None)
inarg = dcg.ab(r1.arg.ai, None)
if num_diffs != 1:
return None
oldres_1 = dcg.translate(rules, dcg.NonTerminal(category=cat, arg=r1.arg))
oldres_2 = dcg.translate(rules, dcg.NonTerminal(category=cat, arg=r2.arg))
# first check that the non terminals match, and that the order of strings/nonterminals matches
if len(r1.out) != len(r2.out):
return None
num_diff_strings = 0
diff_pos = None
for i in range(len(r1.out)):
item1 = r1.out[i]
item2 = r2.out[i]
if item1.__class__ != item2.__class__:
return None
if isinstance(item1, str):
if item1 != item2:
num_diff_strings += 1
diff_pos = i
if num_diff_strings != 1:
return None
# common_left = []
# common_right = []
# if diff_pos > 0:
common_left = r1.out[:diff_pos]
common_right = r1.out[diff_pos + 1:]
# print('common_left', common_left)
# print('common_right', common_right)
s1 = r1.out[diff_pos]
s2 = r2.out[diff_pos]
# print('diff', s1, s2)
# go through, find first difference, from each end
first_diff = 0
last_diff = 1
for i in range(max(len(s1), len(s2))):
if len(s1) - 1 < i or len(s2) - 1 < i:
first_diff = i
break
if s1[i] != s2[i]:
first_diff = i
break
for i in range(1, max(len(s1), len(s2))):
# print('i', i, s1[-i], s2[-i])
if len(s1) < i or len(s2) < i:
# print('length break', i)
last_diff = i
break
if s1[-i] != s2[-i]:
last_diff = i
break
if first_diff == 0 and last_diff == 1:
return None
shortest_s_len = min(len(s1), len(s2))
if first_diff + last_diff - 1 > shortest_s_len:
# ignore one of them, because we're overlapping...
if last_diff - 1 > first_diff:
first_diff = 0
else:
last_diff = 1
s_start = ''
if first_diff > 0:
s_start = s1[:first_diff]
assert s_start == s2[:first_diff]
s_end = ''
# print('s1', s1)
# print('s2', s2)
if last_diff > 1:
s_end = s1[1 - last_diff:]
# print('last_diff', last_diff, 's_end', s_end)
assert s_end == s2[1 - last_diff:]
# print(s_start, s_end)
if last_diff > 1:
s1_mid = s1[first_diff:1 - last_diff]
s2_mid = s2[first_diff:1 - last_diff]
else:
s1_mid = s1[first_diff:]
s2_mid = s2[first_diff:]
# print(s_start, s_end)
# print('s1_mid', s1_mid, 's2_mid', s2_mid)
# asdf
# print('old rules', rules)
rules = [r for r in rules if r != r1 and r != r2]
cat = r1.category
new_cat = 'C' + str(len(rules))
# cat1 = 'C' + str(len(rules)) + '_1'
# cat2 = 'C' + str(len(rules)) + '_2'
# cat_common = 'C' + str(len(rules)) + '_c'
# print('new cat', new_cat)
# if s1_mid != '':
new_rule1 = dcg.Rule(
category=new_cat, arg=diffarg1, out=[s1_mid])
# print(new_rule1)
rules.append(new_rule1)
# if s2_mid != '':
new_rule2 = dcg.Rule(
category=new_cat, arg=diffarg2, out=[s2_mid])
# print(new_rule2)
rules.append(new_rule2)
# print('len(rules)', len(rules))
# print('len(rules)', len(rules))
# common_left = []
# common_right = []
# if diff_pos > 0:
# common_left =
if s_start != '':
common_left.append(s_start)
if s_end != '':
common_right.append(s_end)
new_rule_common = dcg.Rule(
category=cat,
out=common_left +
[dcg.NonTerminal(category=new_cat, arg=vararg)] +
common_right,
arg=inarg
)
# print('new_rule_common', new_rule_common)
rules.append(new_rule_common)
newres_1 = dcg.translate(rules, dcg.NonTerminal(category=cat, arg=r1.arg))
newres_2 = dcg.translate(rules, dcg.NonTerminal(category=cat, arg=r2.arg))
if False and (newres_1 != oldres_1 or newres_2 != oldres_2):
print('')
for rule in rules:
print(rule)
print('')
print('r1', r1)
print('r2', r2)
print('r1.arg', r1.arg)
print('r2.arg', r2.arg)
print('s_start [%s]' % s_start, 's_end [%s]' % s_end)
print('s1_mid [%s]' % s1_mid, 's2_mid [%s]' % s2_mid)
# print(rules)
print('oldres 1 [%s]' % oldres_1)
print('oldres 2 [%s]' % oldres_2)
print('newres_1 [%s]' % newres_1)
print('newres_2 [%s]' % newres_2)
# assert newres_1 == oldres_1
# assert newres_2 == oldres_2
# print('new rules', rules)
# import kirby2001
# kirby2001.print_table(rules)
return rules
def generalize(rules):
ever_modified = False
modified = True
while modified:
rules, modified = _generalize(rules)
if modified:
ever_modified = True
return rules, ever_modified
def _generalize(rules):
"""
input: rules
output: simpler_rules, was_simplified
"""
# was_simplified = False
"""
Do we really have to do O(n^2) comparisons? :(
I guess so :(
I mean, we could choose not to, but I think kirby 2001 probably
does them exhaustively
to simplify the code, we're just going to do a single merge per pass...
# note that if any rules are merged, those rules are both excluded from the rest of the pass
# I'm not sure if this is a theoretical requirement, but certainly simplifies :)
"""
# new_rules = []
for i, r1 in enumerate(rules):
# print('r1', r1)
# merge_done = False
for r2 in rules[i + 1:]:
# print('r2', r2)
new_rules = merge_categories(rules, r1, r2)
if new_rules is not None:
return new_rules, True
new_rules = merge_chunked_bothrules(rules, r1, r2)
if new_rules is not None:
return new_rules, True
# res_new = None
# res_new = merge_categories(r1, r2)
# # if res_new is None:
# #
# if res_new is not None:
# new_rules.append(res_new)
# merge_done = True
# break
# if merge_done:
# continue
# else:
# new_rules.append(r1)
# new_rules.append(r2)
return rules, False
# return simpler_rules, was_simplified
|
py | 7dfdf54bb57fef8f98ef1c80a4e08e35c87a2471 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SVGPath <=> skia-pathops constructs to enable ops on paths."""
import functools
import pathops # pytype: disable=import-error
from typing import Sequence, Tuple
from picosvg.svg_meta import SVGCommand, SVGCommandGen, SVGCommandSeq
from picosvg.svg_transform import Affine2D
# Absolutes coords assumed
# A should never occur because we convert arcs to cubics
# S,T should never occur because we eliminate shorthand
_SVG_CMD_TO_SKIA_FN = {
"M": pathops.Path.moveTo,
"L": pathops.Path.lineTo,
"Q": pathops.Path.quadTo,
"Z": pathops.Path.close,
"C": pathops.Path.cubicTo,
}
_SVG_TO_SKIA_LINE_CAP = {
"butt": pathops.LineCap.BUTT_CAP,
"round": pathops.LineCap.ROUND_CAP,
"square": pathops.LineCap.SQUARE_CAP,
}
_SVG_TO_SKIA_LINE_JOIN = {
"miter": pathops.LineJoin.MITER_JOIN,
"round": pathops.LineJoin.ROUND_JOIN,
"bevel": pathops.LineJoin.BEVEL_JOIN,
# No arcs or miter-clip
}
def _simple_skia_to_svg(svg_cmd, points) -> SVGCommandGen:
# pathops.Path gives us sequences of points, flatten 'em
yield (svg_cmd, tuple(c for pt in points for c in pt))
def _qcurveto_to_svg(points) -> SVGCommandGen:
for (control_pt, end_pt) in pathops.decompose_quadratic_segment(points):
yield ("Q", control_pt + end_pt)
def _end_path(points) -> SVGCommandGen:
if points:
raise ValueError("endPath should have no points")
return # pytype: disable=bad-return-type
yield
_SKIA_CMD_TO_SVG_CMD = {
# simple conversions
"moveTo": functools.partial(_simple_skia_to_svg, "M"),
"lineTo": functools.partial(_simple_skia_to_svg, "L"),
"quadTo": functools.partial(_simple_skia_to_svg, "Q"),
"curveTo": functools.partial(_simple_skia_to_svg, "C"),
"closePath": functools.partial(_simple_skia_to_svg, "Z"),
# more interesting conversions
"qCurveTo": _qcurveto_to_svg,
# nop
"endPath": _end_path,
}
_SVG_FILL_RULE_TO_SKIA_FILL_TYPE = {
"nonzero": pathops.FillType.WINDING,
"evenodd": pathops.FillType.EVEN_ODD,
}
def skia_path(svg_cmds: SVGCommandSeq, fill_rule: str) -> pathops.Path:
try:
fill_type = _SVG_FILL_RULE_TO_SKIA_FILL_TYPE[fill_rule]
except KeyError:
raise ValueError(f"Invalid fill rule: {fill_rule!r}")
sk_path = pathops.Path(fillType=fill_type)
for cmd, args in svg_cmds:
if cmd not in _SVG_CMD_TO_SKIA_FN:
raise ValueError(f'No mapping to Skia for "{cmd} {args}"')
_SVG_CMD_TO_SKIA_FN[cmd](sk_path, *args)
return sk_path
def svg_commands(skia_path: pathops.Path) -> SVGCommandGen:
for cmd, points in skia_path.segments:
if cmd not in _SKIA_CMD_TO_SVG_CMD:
raise ValueError(f'No mapping to svg for "{cmd} {points}"')
for svg_cmd, svg_args in _SKIA_CMD_TO_SVG_CMD[cmd](points):
yield (svg_cmd, svg_args)
def _do_pathop(
op: str, svg_cmd_seqs: Sequence[SVGCommandSeq], fill_rules: Sequence[str]
) -> SVGCommandGen:
if not svg_cmd_seqs:
return # pytype: disable=bad-return-type
assert len(svg_cmd_seqs) == len(fill_rules)
sk_path = skia_path(svg_cmd_seqs[0], fill_rules[0])
for svg_cmds, fill_rule in zip(svg_cmd_seqs[1:], fill_rules[1:]):
sk_path2 = skia_path(svg_cmds, fill_rule)
sk_path = pathops.op(sk_path, sk_path2, op, fix_winding=True)
else:
sk_path.simplify(fix_winding=True)
assert sk_path.fillType == pathops.FillType.WINDING
return svg_commands(sk_path)
def union(
svg_cmd_seqs: Sequence[SVGCommandSeq], fill_rules: Sequence[str]
) -> SVGCommandGen:
return _do_pathop(pathops.PathOp.UNION, svg_cmd_seqs, fill_rules)
def intersection(
svg_cmd_seqs: Sequence[SVGCommandSeq], fill_rules: Sequence[str]
) -> SVGCommandGen:
return _do_pathop(pathops.PathOp.INTERSECTION, svg_cmd_seqs, fill_rules)
def remove_overlaps(svg_cmds: SVGCommandSeq, fill_rule: str) -> SVGCommandGen:
"""Create a simplified path filled using the "nonzero" winding rule.
This uses skia-pathops simplify method to create a new path containing
non-overlapping contours that describe the same area as the original path.
The fill_rule ("evenodd" or "nonzero") of the original path determines
what is inside or outside the path.
After simplification, the winding order of the sub-paths is such that the
path looks the same no matter what fill rule is used to render it.
References:
https://oreillymedia.github.io/Using_SVG/extras/ch06-fill-rule.html
"""
sk_path = skia_path(svg_cmds, fill_rule=fill_rule)
sk_path.simplify(fix_winding=True)
assert sk_path.fillType == pathops.FillType.WINDING
return svg_commands(sk_path)
def transform(svg_cmds: SVGCommandSeq, affine: Affine2D) -> SVGCommandGen:
sk_path = skia_path(svg_cmds, fill_rule="nonzero").transform(*affine)
return svg_commands(sk_path)
def stroke(
svg_cmds: SVGCommandSeq,
svg_linecap: str,
svg_linejoin: str,
stroke_width: float,
stroke_miterlimit: float,
tolerance: float,
dash_array: Sequence[float] = (),
dash_offset: float = 0.0,
) -> SVGCommandGen:
"""Create a path that is a shape with its stroke applied.
The result may contain self-intersecting paths, thus it should be filled
using "nonzero" winding rule (otherwise with "evenodd" one may see gaps
where the sub-paths overlap).
"""
cap = _SVG_TO_SKIA_LINE_CAP.get(svg_linecap, None)
if cap is None:
raise ValueError(f"Unsupported cap {svg_linecap}")
join = _SVG_TO_SKIA_LINE_JOIN.get(svg_linejoin, None)
if join is None:
raise ValueError(f"Unsupported join {svg_linejoin}")
# the input path's fill_rule doesn't affect the stroked result so for
# simplicity here we assume 'nonzero'
sk_path = skia_path(svg_cmds, fill_rule="nonzero")
sk_path.stroke(stroke_width, cap, join, stroke_miterlimit, dash_array, dash_offset)
# nuke any conics that snuck in (e.g. with stroke-linecap="round")
sk_path.convertConicsToQuads(tolerance)
assert sk_path.fillType == pathops.FillType.WINDING
return svg_commands(sk_path)
def bounding_box(svg_cmds: SVGCommandSeq):
return skia_path(svg_cmds, fill_rule="nonzero").bounds
def path_area(svg_cmds: SVGCommandSeq, fill_rule: str) -> float:
"""Return the path's absolute area."""
sk_path = skia_path(svg_cmds, fill_rule=fill_rule)
sk_path.simplify(fix_winding=True)
return sk_path.area
|
py | 7dfdf594b0fdb514504b298a5f9c0e04e8817855 | from fontTools.misc.py23 import *
from fontTools.misc.loggingTools import CapturingLogHandler
import unittest
from fontTools.pens.basePen import AbstractPen
from fontTools.pens.pointPen import AbstractPointPen, PointToSegmentPen, \
SegmentToPointPen, GuessSmoothPointPen, ReverseContourPointPen
class _TestSegmentPen(AbstractPen):
def __init__(self):
self._commands = []
def __repr__(self):
return " ".join(self._commands)
def moveTo(self, pt):
self._commands.append("%s %s moveto" % (pt[0], pt[1]))
def lineTo(self, pt):
self._commands.append("%s %s lineto" % (pt[0], pt[1]))
def curveTo(self, *pts):
pts = ["%s %s" % pt for pt in pts]
self._commands.append("%s curveto" % " ".join(pts))
def qCurveTo(self, *pts):
pts = ["%s %s" % pt if pt is not None else "None" for pt in pts]
self._commands.append("%s qcurveto" % " ".join(pts))
def closePath(self):
self._commands.append("closepath")
def endPath(self):
self._commands.append("endpath")
def addComponent(self, glyphName, transformation):
self._commands.append("'%s' %s addcomponent" % (glyphName, transformation))
def _reprKwargs(kwargs):
items = []
for key in sorted(kwargs):
value = kwargs[key]
if isinstance(value, basestring):
items.append("%s='%s'" % (key, value))
else:
items.append("%s=%s" % (key, value))
return items
class _TestPointPen(AbstractPointPen):
def __init__(self):
self._commands = []
def __repr__(self):
return " ".join(self._commands)
def beginPath(self, identifier=None, **kwargs):
items = []
if identifier is not None:
items.append("identifier='%s'" % identifier)
items.extend(_reprKwargs(kwargs))
self._commands.append("beginPath(%s)" % ", ".join(items))
def addPoint(self, pt, segmentType=None, smooth=False, name=None,
identifier=None, **kwargs):
items = ["%s" % (pt,)]
if segmentType is not None:
items.append("segmentType='%s'" % segmentType)
if smooth:
items.append("smooth=True")
if name is not None:
items.append("name='%s'" % name)
if identifier is not None:
items.append("identifier='%s'" % identifier)
items.extend(_reprKwargs(kwargs))
self._commands.append("addPoint(%s)" % ", ".join(items))
def endPath(self):
self._commands.append("endPath()")
def addComponent(self, glyphName, transform, identifier=None, **kwargs):
items = ["'%s'" % glyphName, "%s" % transform]
if identifier is not None:
items.append("identifier='%s'" % identifier)
items.extend(_reprKwargs(kwargs))
self._commands.append("addComponent(%s)" % ", ".join(items))
class PointToSegmentPenTest(unittest.TestCase):
def test_open(self):
pen = _TestSegmentPen()
ppen = PointToSegmentPen(pen)
ppen.beginPath()
ppen.addPoint((10, 10), "move")
ppen.addPoint((10, 20), "line")
ppen.endPath()
self.assertEqual("10 10 moveto 10 20 lineto endpath", repr(pen))
def test_closed(self):
pen = _TestSegmentPen()
ppen = PointToSegmentPen(pen)
ppen.beginPath()
ppen.addPoint((10, 10), "line")
ppen.addPoint((10, 20), "line")
ppen.addPoint((20, 20), "line")
ppen.endPath()
self.assertEqual("10 10 moveto 10 20 lineto 20 20 lineto closepath", repr(pen))
def test_cubic(self):
pen = _TestSegmentPen()
ppen = PointToSegmentPen(pen)
ppen.beginPath()
ppen.addPoint((10, 10), "line")
ppen.addPoint((10, 20))
ppen.addPoint((20, 20))
ppen.addPoint((20, 40), "curve")
ppen.endPath()
self.assertEqual("10 10 moveto 10 20 20 20 20 40 curveto closepath", repr(pen))
def test_quad(self):
pen = _TestSegmentPen()
ppen = PointToSegmentPen(pen)
ppen.beginPath(identifier='foo')
ppen.addPoint((10, 10), "line")
ppen.addPoint((10, 40))
ppen.addPoint((40, 40))
ppen.addPoint((10, 40), "qcurve")
ppen.endPath()
self.assertEqual("10 10 moveto 10 40 40 40 10 40 qcurveto closepath", repr(pen))
def test_quad_onlyOffCurvePoints(self):
pen = _TestSegmentPen()
ppen = PointToSegmentPen(pen)
ppen.beginPath()
ppen.addPoint((10, 10))
ppen.addPoint((10, 40))
ppen.addPoint((40, 40))
ppen.endPath()
self.assertEqual("10 10 10 40 40 40 None qcurveto closepath", repr(pen))
def test_roundTrip1(self):
tpen = _TestPointPen()
ppen = PointToSegmentPen(SegmentToPointPen(tpen))
ppen.beginPath()
ppen.addPoint((10, 10), "line")
ppen.addPoint((10, 20))
ppen.addPoint((20, 20))
ppen.addPoint((20, 40), "curve")
ppen.endPath()
self.assertEqual("beginPath() addPoint((10, 10), segmentType='line') addPoint((10, 20)) "
"addPoint((20, 20)) addPoint((20, 40), segmentType='curve') endPath()",
repr(tpen))
def test_closed_outputImpliedClosingLine(self):
tpen = _TestSegmentPen()
ppen = PointToSegmentPen(tpen, outputImpliedClosingLine=True)
ppen.beginPath()
ppen.addPoint((10, 10), "line")
ppen.addPoint((10, 20), "line")
ppen.addPoint((20, 20), "line")
ppen.endPath()
self.assertEqual(
"10 10 moveto "
"10 20 lineto "
"20 20 lineto "
"10 10 lineto " # explicit closing line
"closepath",
repr(tpen)
)
def test_closed_line_overlapping_start_end_points(self):
# Test case from https://github.com/googlefonts/fontmake/issues/572.
tpen = _TestSegmentPen()
ppen = PointToSegmentPen(tpen, outputImpliedClosingLine=False)
# The last oncurve point on this closed contour is a "line" segment and has
# same coordinates as the starting point.
ppen.beginPath()
ppen.addPoint((0, 651), segmentType="line")
ppen.addPoint((0, 101), segmentType="line")
ppen.addPoint((0, 101), segmentType="line")
ppen.addPoint((0, 651), segmentType="line")
ppen.endPath()
# Check that we always output an explicit 'lineTo' segment at the end,
# regardless of the value of 'outputImpliedClosingLine', to disambiguate
# the duplicate point from the implied closing line.
self.assertEqual(
"0 651 moveto "
"0 101 lineto "
"0 101 lineto "
"0 651 lineto "
"0 651 lineto "
"closepath",
repr(tpen)
)
def test_roundTrip2(self):
tpen = _TestPointPen()
ppen = PointToSegmentPen(SegmentToPointPen(tpen))
ppen.beginPath()
ppen.addPoint((0, 651), segmentType="line")
ppen.addPoint((0, 101), segmentType="line")
ppen.addPoint((0, 101), segmentType="line")
ppen.addPoint((0, 651), segmentType="line")
ppen.endPath()
self.assertEqual(
"beginPath() "
"addPoint((0, 651), segmentType='line') "
"addPoint((0, 101), segmentType='line') "
"addPoint((0, 101), segmentType='line') "
"addPoint((0, 651), segmentType='line') "
"endPath()",
repr(tpen)
)
class TestSegmentToPointPen(unittest.TestCase):
def test_move(self):
tpen = _TestPointPen()
pen = SegmentToPointPen(tpen)
pen.moveTo((10, 10))
pen.endPath()
self.assertEqual("beginPath() addPoint((10, 10), segmentType='move') endPath()",
repr(tpen))
def test_poly(self):
tpen = _TestPointPen()
pen = SegmentToPointPen(tpen)
pen.moveTo((10, 10))
pen.lineTo((10, 20))
pen.lineTo((20, 20))
pen.closePath()
self.assertEqual("beginPath() addPoint((10, 10), segmentType='line') "
"addPoint((10, 20), segmentType='line') "
"addPoint((20, 20), segmentType='line') endPath()",
repr(tpen))
def test_cubic(self):
tpen = _TestPointPen()
pen = SegmentToPointPen(tpen)
pen.moveTo((10, 10))
pen.curveTo((10, 20), (20, 20), (20, 10))
pen.closePath()
self.assertEqual("beginPath() addPoint((10, 10), segmentType='line') "
"addPoint((10, 20)) addPoint((20, 20)) addPoint((20, 10), "
"segmentType='curve') endPath()", repr(tpen))
def test_quad(self):
tpen = _TestPointPen()
pen = SegmentToPointPen(tpen)
pen.moveTo((10, 10))
pen.qCurveTo((10, 20), (20, 20), (20, 10))
pen.closePath()
self.assertEqual("beginPath() addPoint((10, 10), segmentType='line') "
"addPoint((10, 20)) addPoint((20, 20)) "
"addPoint((20, 10), segmentType=qcurve) endPath()",
repr(tpen))
def test_quad(self):
tpen = _TestPointPen()
pen = SegmentToPointPen(tpen)
pen.qCurveTo((10, 20), (20, 20), (20, 10), (10, 10), None)
pen.closePath()
self.assertEqual("beginPath() addPoint((10, 20)) addPoint((20, 20)) "
"addPoint((20, 10)) addPoint((10, 10)) endPath()",
repr(tpen))
def test_roundTrip1(self):
spen = _TestSegmentPen()
pen = SegmentToPointPen(PointToSegmentPen(spen))
pen.moveTo((10, 10))
pen.lineTo((10, 20))
pen.lineTo((20, 20))
pen.closePath()
self.assertEqual("10 10 moveto 10 20 lineto 20 20 lineto closepath", repr(spen))
def test_roundTrip2(self):
spen = _TestSegmentPen()
pen = SegmentToPointPen(PointToSegmentPen(spen))
pen.qCurveTo((10, 20), (20, 20), (20, 10), (10, 10), None)
pen.closePath()
pen.addComponent('base', [1, 0, 0, 1, 0, 0])
self.assertEqual("10 20 20 20 20 10 10 10 None qcurveto closepath "
"'base' [1, 0, 0, 1, 0, 0] addcomponent",
repr(spen))
class TestGuessSmoothPointPen(unittest.TestCase):
def test_guessSmooth_exact(self):
tpen = _TestPointPen()
pen = GuessSmoothPointPen(tpen)
pen.beginPath(identifier="foo")
pen.addPoint((0, 100), segmentType="curve")
pen.addPoint((0, 200))
pen.addPoint((400, 200), identifier='bar')
pen.addPoint((400, 100), segmentType="curve")
pen.addPoint((400, 0))
pen.addPoint((0, 0))
pen.endPath()
self.assertEqual("beginPath(identifier='foo') "
"addPoint((0, 100), segmentType='curve', smooth=True) "
"addPoint((0, 200)) addPoint((400, 200), identifier='bar') "
"addPoint((400, 100), segmentType='curve', smooth=True) "
"addPoint((400, 0)) addPoint((0, 0)) endPath()",
repr(tpen))
def test_guessSmooth_almost(self):
tpen = _TestPointPen()
pen = GuessSmoothPointPen(tpen)
pen.beginPath()
pen.addPoint((0, 100), segmentType="curve")
pen.addPoint((1, 200))
pen.addPoint((395, 200))
pen.addPoint((400, 100), segmentType="curve")
pen.addPoint((400, 0))
pen.addPoint((0, 0))
pen.endPath()
self.assertEqual("beginPath() addPoint((0, 100), segmentType='curve', smooth=True) "
"addPoint((1, 200)) addPoint((395, 200)) "
"addPoint((400, 100), segmentType='curve', smooth=True) "
"addPoint((400, 0)) addPoint((0, 0)) endPath()",
repr(tpen))
def test_guessSmooth_tangent(self):
tpen = _TestPointPen()
pen = GuessSmoothPointPen(tpen)
pen.beginPath()
pen.addPoint((0, 0), segmentType="move")
pen.addPoint((0, 100), segmentType="line")
pen.addPoint((3, 200))
pen.addPoint((300, 200))
pen.addPoint((400, 200), segmentType="curve")
pen.endPath()
self.assertEqual("beginPath() addPoint((0, 0), segmentType='move') "
"addPoint((0, 100), segmentType='line', smooth=True) "
"addPoint((3, 200)) addPoint((300, 200)) "
"addPoint((400, 200), segmentType='curve') endPath()",
repr(tpen))
class TestReverseContourPointPen(unittest.TestCase):
def test_singlePoint(self):
tpen = _TestPointPen()
pen = ReverseContourPointPen(tpen)
pen.beginPath()
pen.addPoint((0, 0), segmentType="move")
pen.endPath()
self.assertEqual("beginPath() "
"addPoint((0, 0), segmentType='move') "
"endPath()",
repr(tpen))
def test_line(self):
tpen = _TestPointPen()
pen = ReverseContourPointPen(tpen)
pen.beginPath()
pen.addPoint((0, 0), segmentType="move")
pen.addPoint((0, 100), segmentType="line")
pen.endPath()
self.assertEqual("beginPath() "
"addPoint((0, 100), segmentType='move') "
"addPoint((0, 0), segmentType='line') "
"endPath()",
repr(tpen))
def test_triangle(self):
tpen = _TestPointPen()
pen = ReverseContourPointPen(tpen)
pen.beginPath()
pen.addPoint((0, 0), segmentType="line")
pen.addPoint((0, 100), segmentType="line")
pen.addPoint((100, 100), segmentType="line")
pen.endPath()
self.assertEqual("beginPath() "
"addPoint((0, 0), segmentType='line') "
"addPoint((100, 100), segmentType='line') "
"addPoint((0, 100), segmentType='line') "
"endPath()",
repr(tpen))
def test_cubicOpen(self):
tpen = _TestPointPen()
pen = ReverseContourPointPen(tpen)
pen.beginPath()
pen.addPoint((0, 0), segmentType="move")
pen.addPoint((0, 100))
pen.addPoint((100, 200))
pen.addPoint((200, 200), segmentType="curve")
pen.endPath()
self.assertEqual("beginPath() "
"addPoint((200, 200), segmentType='move') "
"addPoint((100, 200)) "
"addPoint((0, 100)) "
"addPoint((0, 0), segmentType='curve') "
"endPath()",
repr(tpen))
def test_quadOpen(self):
tpen = _TestPointPen()
pen = ReverseContourPointPen(tpen)
pen.beginPath()
pen.addPoint((0, 0), segmentType="move")
pen.addPoint((0, 100))
pen.addPoint((100, 200))
pen.addPoint((200, 200), segmentType="qcurve")
pen.endPath()
self.assertEqual("beginPath() "
"addPoint((200, 200), segmentType='move') "
"addPoint((100, 200)) "
"addPoint((0, 100)) "
"addPoint((0, 0), segmentType='qcurve') "
"endPath()",
repr(tpen))
def test_cubicClosed(self):
tpen = _TestPointPen()
pen = ReverseContourPointPen(tpen)
pen.beginPath()
pen.addPoint((0, 0), segmentType="line")
pen.addPoint((0, 100))
pen.addPoint((100, 200))
pen.addPoint((200, 200), segmentType="curve")
pen.endPath()
self.assertEqual("beginPath() "
"addPoint((0, 0), segmentType='curve') "
"addPoint((200, 200), segmentType='line') "
"addPoint((100, 200)) "
"addPoint((0, 100)) "
"endPath()",
repr(tpen))
def test_quadClosedOffCurveStart(self):
tpen = _TestPointPen()
pen = ReverseContourPointPen(tpen)
pen.beginPath()
pen.addPoint((100, 200))
pen.addPoint((200, 200), segmentType="qcurve")
pen.addPoint((0, 0), segmentType="line")
pen.addPoint((0, 100))
pen.endPath()
self.assertEqual("beginPath() "
"addPoint((100, 200)) "
"addPoint((0, 100)) "
"addPoint((0, 0), segmentType='qcurve') "
"addPoint((200, 200), segmentType='line') "
"endPath()",
repr(tpen))
def test_quadNoOnCurve(self):
tpen = _TestPointPen()
pen = ReverseContourPointPen(tpen)
pen.beginPath(identifier='bar')
pen.addPoint((0, 0))
pen.addPoint((0, 100), identifier='foo', arbitrary='foo')
pen.addPoint((100, 200), arbitrary=123)
pen.addPoint((200, 200))
pen.endPath()
pen.addComponent("base", [1, 0, 0, 1, 0, 0], identifier='foo')
self.assertEqual("beginPath(identifier='bar') "
"addPoint((0, 0)) "
"addPoint((200, 200)) "
"addPoint((100, 200), arbitrary=123) "
"addPoint((0, 100), identifier='foo', arbitrary='foo') "
"endPath() "
"addComponent('base', [1, 0, 0, 1, 0, 0], identifier='foo')",
repr(tpen))
def test_closed_line_overlapping_start_end_points(self):
# Test case from https://github.com/googlefonts/fontmake/issues/572
tpen = _TestPointPen()
pen = ReverseContourPointPen(tpen)
pen.beginPath()
pen.addPoint((0, 651), segmentType="line")
pen.addPoint((0, 101), segmentType="line")
pen.addPoint((0, 101), segmentType="line")
pen.addPoint((0, 651), segmentType="line")
pen.endPath()
self.assertEqual(
"beginPath() "
"addPoint((0, 651), segmentType='line') "
"addPoint((0, 651), segmentType='line') "
"addPoint((0, 101), segmentType='line') "
"addPoint((0, 101), segmentType='line') "
"endPath()",
repr(tpen)
)
|
py | 7dfdf5a76efbcf4d2a27ddefbe836fc1cc9a076c | # -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from .initial_state import InitialState
__all__ = ['InitialState']
|
py | 7dfdf6e9baa96472bf9caa35398c661c1d13d3c3 | from __future__ import print_function
from builtins import input
import os
import shutil
import logging
import re
import git
import socket
import time
from io import open
from knowledge_repo._version import __git_uri__
from ..post import KnowledgePost
from ..repository import KnowledgeRepository
from ..utils.exec_code import get_module_for_source
from ..utils.types import str_types
from ..utils.encoding import encode
logger = logging.getLogger(__name__)
class FolderKnowledgeRepository(KnowledgeRepository):
_registry_keys = ['', 'file']
TEMPLATES = {
'README.md': os.path.abspath(os.path.join(os.path.dirname(__file__), '../templates', 'repository_readme.md')),
'.knowledge_repo_config.yml': os.path.abspath(os.path.join(os.path.dirname(__file__), '../templates', 'repository_config.yml'))
}
@classmethod
def create(cls, uri, embed_tooling=False):
if uri.startswith('file://'):
uri = uri[len('file://'):]
path = os.path.abspath(uri)
if not os.path.exists(path):
os.makedirs(path)
# Add README and configuration templates
for filename, template in cls.TEMPLATES.items():
target = os.path.join(path, filename)
if not os.path.exists(target):
shutil.copy(template, target)
else:
logger.warning("Not overriding existing file '{}'.".format(filename))
return FolderKnowledgeRepository(path)
@classmethod
def from_uri(cls, uri, *args, **kwargs):
"""
If this folder is actually a git repository, a `GitKnowledgeRepository`
is returned instead, unless the folder knowledge repository is explicitly
requested via the 'file://' protocol.
"""
check_for_git = True
if uri.startswith('file://'):
check_for_git = False
uri = uri[len('file://'):]
if check_for_git and os.path.exists(os.path.join(uri, '.git')):
from .gitrepository import GitKnowledgeRepository
return GitKnowledgeRepository(uri, *args, **kwargs)
return cls(uri, *args, **kwargs)
def init(self, config='.knowledge_repo_config.yml', auto_create=False):
self.auto_create = auto_create
self.path = self.uri
self.config.update(os.path.join(self.path, config))
@property
def path(self):
return self._path
@path.setter
def path(self, path):
assert isinstance(path, str), "The path specified must be a string."
path = os.path.abspath(os.path.expanduser(path))
if not os.path.exists(path):
path = os.path.abspath(path)
if self.auto_create:
self.create(path)
else:
raise ValueError("Provided path '{}' does not exist.".format(path))
self._path = path
# ----------- Repository actions / state ------------------------------------
@property
def revision(self):
return time.time()
@property
def status(self):
return 'OK'
@property
def status_message(self):
return 'OK'
# ---------------- Post retrieval methods --------------------------------
def _dir(self, prefix, statuses):
posts = set()
if self.PostStatus.PUBLISHED in statuses:
for path, folders, files in os.walk(os.path.join(self.path, prefix or '')):
# Do not visit hidden folders
for folder in folders:
if folder.startswith('.'):
folders.remove(folder)
posts.update(
os.path.join(os.path.relpath(path, start=self.path), folder)
for folder in folders if folder.endswith('.kp')
)
posts.update(
os.path.join(os.path.relpath(path, start=self.path), file)
for file in files if file.endswith('.kp')
)
for post in sorted([post[2:] if post.startswith('./') else post for post in posts]):
yield post
# ------------- Post submission / addition user flow ----------------------
def _add_prepare(self, kp, path, update=False, **kwargs):
pass
def _add_cleanup(self, kp, path, update=False, **kwargs):
pass
def _submit(self, path=None, branch=None, force=False):
pass # Added posts are already submitted
def _publish(self, path): # Publish a post for general perusal
pass # Added posts are already published
def _unpublish(self, path): # unpublish a post for general perusal
raise NotImplementedError
def _accept(self, path): # Approve to publish a post for general perusal
pass
def _remove(self, path, all=False):
shutil.rmtree(os.path.join(self.path, path))
# ------------ Knowledge Post Data Retrieval Methods -------------------------
def _kp_uuid(self, path):
try:
return self._kp_read_ref(path, 'UUID')
except:
return None
def _kp_path(self, path, rel=None):
return KnowledgeRepository._kp_path(self, os.path.expanduser(path), rel=rel or self.path)
def _kp_exists(self, path, revision=None):
return os.path.exists(os.path.join(self.path, path))
def _kp_status(self, path, revision=None, detailed=False, branch=None):
return self.PostStatus.PUBLISHED
def _kp_get_revision(self, path):
# We use a 'REVISION' file in the knowledge post folder rather than using git
# revisions because using git rev-parse is slow.
try:
return int(self._kp_read_ref(path, 'REVISION'))
except:
return 0
def _kp_get_revisions(self, path):
raise NotImplementedError
def _kp_write_ref(self, path, reference, data, uuid=None, revision=None):
path = os.path.join(self.path, path)
if os.path.isfile(path):
kp = KnowledgePost.from_file(path, format='kp')
kp._write_ref(reference, data)
kp.to_file(path, format='kp')
else:
ref_path = os.path.join(path, reference)
ref_dir = os.path.dirname(ref_path)
if not os.path.exists(ref_dir):
os.makedirs(ref_dir)
with open(ref_path, 'wb') as f:
return f.write(data)
def _kp_dir(self, path, parent=None, revision=None): # TODO: Account for revision
path = os.path.join(self.path, path)
if os.path.isdir(path):
if parent:
path = os.path.join(path, parent)
for dirpath, dirnames, filenames in os.walk(os.path.join(self.path, path)):
for filename in filenames:
if dirpath == "" and filename == "REVISION":
continue
yield os.path.relpath(os.path.join(dirpath, filename), os.path.join(self.path, path))
else:
kp = KnowledgePost.from_file(path, format='kp')
for reference in kp._dir(parent=parent):
yield reference
def _kp_has_ref(self, path, reference, revision=None): # TODO: Account for revision
path = os.path.join(self.path, path)
if os.path.isdir(path):
return os.path.isfile(os.path.join(path, reference))
else:
kp = KnowledgePost.from_file(path, format='kp')
return kp._has_ref(reference)
def _kp_diff(self, path, head, base):
raise NotImplementedError
def _kp_new_revision(self, path, uuid=None):
self._kp_write_ref(path, "REVISION", encode(self._kp_get_revision(path) + 1))
if uuid:
self._kp_write_ref(path, "UUID", encode(uuid))
def _kp_read_ref(self, path, reference, revision=None):
path = os.path.join(self.path, path)
if os.path.isdir(path):
with open(os.path.join(self.path, path, reference), 'rb') as f:
return f.read()
else:
kp = KnowledgePost.from_file(path, format='kp')
return kp._read_ref(reference)
|
py | 7dfdf76830ab71996d70cb5c873ea75d4e041b87 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import sys, os, time
import argparse
try: import queue
except ImportError: import Queue as queue
curdir = os.path.dirname(os.path.abspath(sys.argv[0]))
try:
import ust_misc as um
except ImportError:
libdirs = [d for d in (os.path.join(curdir, '..', 'lib'), \
os.path.join(curdir, 'python', 'lib')) \
if os.path.isdir(d)]
if libdirs: sys.path.append(libdirs[0])
import ust_misc as um
import ust_target as ut
def create_session(usb_clk_ghz = 1.0 / 26): # {{{
um.cl_args = sys.argv[1:]
um.check_udagent_env(curdir)
session = ut.UstSession()
session.start()
session.set_udb_clk_freq(usb_clk_ghz)
session.apply_initial_configuration()
return session
# }}} def create_session
s = None#create_session()
class tinn(): # {{{
'''Magic constants and addresses copied from tinn.h
'''
PATH_ACPU_ELF = "riscv/temp/tinn.acpu.x"
PATH_SCPU_ELF = "riscv/temp/tinn.scpu.x"
PATH_ACPU_BIN = "riscv/bin/tinn.acpu.bin"
PATH_SCPU_BIN = "riscv/bin/tinn.scpu.bin"
PATH_ASCIIDATASET = "resources/apps/tinn/semeion.data"
PATH_SI_LOG = "log.si.txt"
PATH_SI_VCD = "tinn1.vcd"
# Initial values for PC set by session.get_jpam(initialise=True) using
# values from session._cpu_start_addr()
_START_ACPU = 0x60000000
_START_SCPU = 0x70000000
# Magic location for each core to signal the host via load/stores.
HOSTFLAG_ACPU = 0x50000000
HOSTFLAG_SCPU = 0x50000008
# Address of Inter-Core Communication flags controlling run state.
TOSLV_CMD = 0x50001000
TOMST_REQ = 0x50001008
# Address of pointers to message Inter-Core Communication buffers.
TOSLV_BUFFADDR = 0x50001010
TOMST_BUFFADDR = 0x50001018
TOMST_BINBUFFADDR = 0x50001020
# ASCII data loaded into memory separately at known address.
ASCIIDATASET_ADDR = 0x51000000
TAYGETE_AXI_COMMUNICATOR= 0xC0000000
TAYGETE_STATIC_INSTR = 0xC0010000
TAYGETE_VIRTUAL_CONSOLE = 0xC0020000
# }}} class tinn
def toline_sinst_data(msg, axi_id_w=7, human=True): # {{{
'''Return string for a single STIN message.
'''
# Time reported as a hex string.
msgtime = int(msg.meta["time"], 16)
if human:
metastr = "%s@%d:" % (msg.module, msgtime)
else:
metastr = "%d" % msgtime
fields = {str(k):v.value for (k,v) in msg.fields.items()}
size = fields["size"] + 1
channel = fields["channel"]
event = fields["event"]
flag = fields["flag"]
chstr = (" ch%d" % channel) if human else (" %d" % channel)
flagstr = " FLAG" if flag else ""
if fields["id_present"]:
infrastructure_id = int(fields["data"] & (2**axi_id_w-1))
data = fields["data"] >> axi_id_w
mststr = " mst=%d" % infrastructure_id
else:
data = fields["data"]
mststr = ""
if event or flag:
datastr = ""
else:
data = data & (2**(size*8)-1)
datastr = (" data=0x%x" % data) if human else (" 0x%x" % data)
return "".join([metastr, chstr, flagstr, datastr, mststr])
# }}} def toline_sinst_data
def proc_sinst(fd=sys.stdout, maxnum=10000, keepother=True, session=s): # {{{
'''Print and discard SI data messages like si_readlines, and classify any
other type of message.
'''
seen_msgs = 0
while seen_msgs < maxnum:
try:
(meta, msg) = session.mstream.msgs.get(False)
msg.meta = meta
seen_msgs += 1
except queue.Empty:
break
if session._check_msg_type(msg, "sinst_data"):
print(toline_sinst_data(msg, human=False), file=fd)
elif keepother:
session._classify_msg(msg)
return seen_msgs
# }}} def proc_sinst
def si_readlines(fd=sys.stdout, session=s, axi_id_w=7): # {{{
'''Print lines from all SI (Static Instrumentation) modules to an open file,
default append to STDOUT.
Returns a dict keyed by SI modules, each with a list of lines.
Line format is "<SI module>: <data line>".
E.g. To extract all lines from si1 the client should read all of the SI
lines with something like:
log = open("log.si.txt", 'w')
si_readlines(log, session)
... maybe more calls to si_readlines ...
log.close()
And the host may do something like this:
grep 'si1: ' log.si.txt
'''
cs = session.MSG_CLASS_SI_DATA
cmsgs = session.cmsgs
si_modules = [md for md in sorted(cmsgs.keys()) if cs in cmsgs[md]]
ret = {}
for md in si_modules:
#ret[md] = cmsgs[md][cs]
ret[md] = cmsgs[md].pop(cs)
if fd is not None:
for md,msgs in ret.items():
for msg in msgs:
print(toline_sinst_data(msg, axi_id_w=axi_id_w), file=fd)
fd.flush()
os.fsync(fd)
return ret
# }}} def si_readlines
def vc_readlines(fd=sys.stdout, session=s): # {{{
'''Print lines from all VC (Virtual Console) modules to an open file,
default append to STDOUT.
Returns a dict keyed by VC channels, each with a list of lines.
Line format is "<VC module>.<channel number>: <printf'ed line>".
This is similar to ust_target.py:UstSession.handle_vc() except the output
can be split by grepping on the first chararcters.
E.g. To extract all lines from vc1 channel 0 (ACPU on Taygete) the client
should read all of the VC lines with something like:
log = open("log.vc.txt", 'w')
_ = vc_readlines(log, session)
... maybe more calls to vc_readlines ...
log.close()
And the host may do something like this:
grep 'vc1.0: ' log.vc.txt
'''
ret = {}
for (m, ch) in sorted(session.vci.keys()):
vci = session.vci[(m, ch)]
s = (vci.vc_string + session.vcread(m, ch)).replace("\r", "")
lines = s.split("\n")
vci.vc_string = lines.pop()
chnm = "{}.{}".format(m, ch)
ret[chnm] = lines
if fd is not None:
for chnm,lines in ret.items():
for line in lines:
print(chnm + ": " + line, file=fd)
fd.flush()
os.fsync(fd)
return ret
# }}} def vc_readlines
def pprint_rsp(rsp): # {{{
if type(rsp) is list:
msgs = rsp
elif type(rsp) is ut.msg_codec.MsgObj:
msgs = [rsp]
else:
assert False, rsp
import pprint
for m in msgs:
assert type(m) is ut.msg_codec.MsgObj
pprint.pprint(m.fields.items())
# }}} def pprint_rsp
def get_msg(md="foo", msg="get_bar", fields={}, session=s): # {{{
rsp = session.send_get_msg_with_response(md,
session.blank_us_ctrl_fields(md, msg))
return rsp
# }}} def get_msg
def write_si_mailbox(session=s, # {{{
md="si1",
channel=0,
mailbox=0,
wrdata=0xDEADBEEF,
module_addr=tinn.TAYGETE_STATIC_INSTR,
verbose=True):
rsp_discovery = session.send_get_msg_with_response(md,
session.blank_us_ctrl_fields(md, "discovery_request"))[0].fields.items()
base_addr = [d.value for s,d in rsp_discovery if s == "base_addr"][0]
# UL-001174-TR-3B-Static Instrumentation User Guide.pdf
channel_address = base_addr + (0x200 * channel)
addrN = channel_address + (0x10 * mailbox)
final_addr = module_addr + addrN
is_timestamp = (mailbox & (1 << 0)) != 0
is_flag = (mailbox & (1 << 1)) != 0
is_marked = (mailbox & (1 << 2)) != 0
is_blocking = (mailbox & (1 << 3)) != 0
is_event = (mailbox & (1 << 4)) != 0
if verbose:
print("write_si_mailbox() WRITE")
print(" md=%s" % md)
print(" channel=%d" % channel)
print(" mailbox=%d" % mailbox)
print(" wrdata=%s -> final_addr=%s" % (hex(wrdata), hex(final_addr)))
print(" module_addr=%s" % hex(module_addr))
print(" base_addr=%s" % hex(base_addr))
print(" channel_address=%s" % hex(channel_address))
print(" addrN=%s" % hex(addrN))
print(" TIMESTAMP=%s" % is_timestamp)
print(" FLAG=%s" % is_flag)
print(" MARKED=%s" % is_marked)
print(" BLOCKING=%s" % is_blocking)
print(" EVENT=%s" % is_event)
session.dma_write32(final_addr, wrdata)
if verbose:
print(" SI MODULE CONFIG")
channels_m1 = [d.value for s,d in rsp_discovery if s == "channels"][0]
if channel <= channels_m1:
print(" channel=%d <= channels-1=%d" % (channel, channels_m1))
else:
print(" WARNING channel=%d > channels-1=%d" % (channel, channels_m1))
#
# enabled
#
rsp_enabled = session.send_get_msg_with_response(md,
session.blank_us_ctrl_fields(md, "get_enabled"))[0].fields.items()
module_enabled = [d.value for s,d in rsp_enabled if s == "module_enable"][0] != 0
if module_enabled:
print(" module enabled")
else:
print(" WARNING module disabled")
#
# event
#
if is_event:
eventnum = wrdata & 0xFF
event_select = 1 if eventnum > 128 else 0
maskindex = eventnum - (128 if event_select else 0)
rsp_event = session.send_get_msg_with_response(md,
session.blank_us_ctrl_fields(md, "get_event", {"select": event_select}))[0].fields.items()
event_mask = [d.value for s,d in rsp_event if s == "mask"][0]
event_enabled = (event_mask & (1 << maskindex)) != 0
if event_enabled:
print(" event %d (from LSByte of wrdata) enabled" % eventnum)
else:
print(" WARNING event %d (from LSByte of wrdata) disabled" % eventnum)
#
# power
#
rsp_power = session.send_get_msg_with_response(md,
session.blank_us_ctrl_fields(md, "get_power"))[0].fields.items()
clk_disable = [d.value for s,d in rsp_power if s == "clk_disable"][0] != 0
if clk_disable:
print(" internal clock gating enabled")
else:
print(" WARNING internal clock gating disabled")
#
# sinst
#
rsp_sinst = session.send_get_msg_with_response(md,
session.blank_us_ctrl_fields(md, "get_sinst"))[0].fields.items()
sys_flow = [d.value for s,d in rsp_sinst if s == "sys_flow"][0]
inst_flow = [d.value for s,d in rsp_sinst if s == "inst_flow"][0]
non_blocking_throttle_level = [d.value for s,d in rsp_sinst if s == "non_blocking_throttle_level"][0]
blocking_throttle_level = [d.value for s,d in rsp_sinst if s == "blocking_throttle_level"][0]
sys_timestamp = [d.value for s,d in rsp_sinst if s == "sys_timestamp"][0]
inst_timestamp_override = [d.value for s,d in rsp_sinst if s == "inst_timestamp_override"][0]
enable_event = [d.value for s,d in rsp_sinst if s == "enable_event"][0]
disable_event = [d.value for s,d in rsp_sinst if s == "disable_event"][0]
enable_event_control = [d.value for s,d in rsp_sinst if s == "enable_event_control"][0] != 0
disable_event_control = [d.value for s,d in rsp_sinst if s == "disable_event_control"][0] != 0
if enable_event_control:
print(" WARNING module_enable enabled by event %d" % enable_event)
if disable_event_control:
print(" WARNING module_enable disabled by event %d" % disable_event)
mst_id_capture_en = [d.value for s,d in rsp_sinst if s == "mst_id_capture_en"][0]
mst_id_filter_lo = [d.value for s,d in rsp_sinst if s == "mst_id_filter_lo"][0]
mst_id_filter_hi = [d.value for s,d in rsp_sinst if s == "mst_id_filter_hi"][0]
axis_id_width_p = [d.value for s,d in rsp_discovery if s == "axi_id"][0] + 1
max_id = 2**axis_id_width_p - 1
if mst_id_filter_lo != 0 or mst_id_filter_hi != max_id:
print(" WARNING (mst_id_filter_lo, mst_id_filter_hi)=(%d, %d)"
" but max range is (0, %d)" % (mst_id_filter_lo, mst_id_filter_hi, max_id))
tx_timeout = [d.value for s,d in rsp_sinst if s == "tx_timeout"][0]
if tx_timeout != 0:
print(" WARNING tx_timeout=%d so blocking messages may time out" % tx_timeout)
#
# sinst_enables
#
group, chnumber = divmod(channel, 32)
rsp_sinst_enables = session.send_get_msg_with_response(md,
session.blank_us_ctrl_fields(md, "get_sinst_enables", {"group_index": group}))[0].fields.items()
enable_map = [d.value for s,d in rsp_sinst_enables if s == "enable_map"][0]
channel_enabled = (enable_map & (1 << chnumber)) != 0
if channel_enabled:
print(" channel %d in group %d at chnumber %d enabled" % (channel, group, chnumber))
else:
print(" WARNING channel %d in group %d at chnumber %d disabled" % (channel, group, chnumber))
# }}} def write_si_mailbox
def si_set_sinst(session=s, # {{{
md="si1",
sys_flow=0,
inst_flow=0,
non_blocking_throttle_level="never",
blocking_throttle_level="never",
sys_timestamp=1,
inst_timestamp_override=0,
enable_event=0,
disable_event=0,
enable_event_control=0,
disable_event_control=0,
mst_id_capture_en=1,
mst_id_filter_lo=0,
mst_id_filter_hi=-1,
tx_timeout=0):
fields = {"control_code": "set_sinst",
"sys_flow": sys_flow,
"inst_flow": inst_flow,
"non_blocking_throttle_level": non_blocking_throttle_level,
"blocking_throttle_level": blocking_throttle_level,
"sys_timestamp": sys_timestamp,
"inst_timestamp_override": inst_timestamp_override,
"enable_event": enable_event,
"disable_event": disable_event,
"enable_event_control": enable_event_control,
"disable_event_control": disable_event_control,
"mst_id_capture_en": mst_id_capture_en,
"mst_id_filter_lo": mst_id_filter_lo,
"mst_id_filter_hi": mst_id_filter_hi,
"tx_timeout": tx_timeout}
session.mstream.send_msg(md, "system_control", fields)
rsp = session.send_get_msg_with_response(md, s.blank_us_ctrl_fields(md, "get_sinst"))
return rsp
# }}} def si_set_sinst
def si_set_enabled(session=s, # {{{
md="si1",
operation="apply",
module_enable=1):
fields = {"control_code": "set_enabled",
"operation": operation,
"module_enable": module_enable}
session.mstream.send_msg(md, "system_control", fields)
rsp = session.send_get_msg_with_response(md, s.blank_us_ctrl_fields(md, "get_enabled"))
return rsp
# }}} def si_set_enabled
def si_set_sinst_enables(session=s, # {{{
md="si1",
operation="apply",
group_index=0,
enable_map=(2**32-1)):
fields = {"control_code": "set_sinst_enables",
"operation": operation,
"group_index": group_index,
"enable_map": enable_map}
session.mstream.send_msg(md, "system_control", fields)
rsp = session.send_get_msg_with_response(md, s.blank_us_ctrl_fields(md, "get_sinst_enables"))
return rsp
# }}} def si_set_sinst_enables
def si_set_event(session=s, # {{{
md="si1",
operation="apply",
select=0,
mask=(2**128-1)):
fields = {"control_code": "set_event",
"operation": operation,
"select": select,
"mask": mask}
session.mstream.send_msg(md, "system_control", fields)
rsp = session.send_get_msg_with_response(md, s.blank_us_ctrl_fields(md, "get_event", {"select": select}))
return rsp
# }}} def si_set_event
def me_set_enabled(session=s, # {{{
md="rme1",
operation="apply",
upper_enable=(2**16-1),
upper_ingress_enable=(2**16-1)):
fields = {"control_code": "set_enabled",
"operation": operation,
"upper_enable": upper_enable,
"upper_ingress_enable": upper_ingress_enable}
session.mstream.send_msg(md, "system_control", fields)
rsp = session.send_get_msg_with_response(md, s.blank_us_ctrl_fields(md, "get_enabled"))
return rsp
# }}} def me_set_enabled
def me_set_event(session=s, # {{{
md="rme1",
operation="apply",
pathgroup=0,
pathway=0,
overflow=0,
select=0,
mask=(2**128-1)):
fields = {"control_code": "set_event",
"operation": operation,
"pathgroup": pathgroup,
"pathway": pathway,
"overflow": overflow,
"select": select,
"mask": mask}
session.mstream.send_msg(md, "system_control", fields)
rsp = session.send_get_msg_with_response(md, s.blank_us_ctrl_fields(md, "get_event", {"select": select}))
return rsp
# }}} def me_set_event
def run_tinn(): # {{{
print("Setting up UltraSoC infrastructure...", end='')
# Enable Static Instrumentation module.
si1_rsp_enabled = si_set_enabled(module_enable=0)
si1_rsp_sinst = si_set_sinst()
si1_rsp_event0 = si_set_event(select=0)
si1_rsp_event1 = si_set_event(select=1)
si1_rsp_sinst_enables = si_set_sinst_enables()
si1_rsp_msg_params = get_msg("si1", "get_msg_params")
# Enable Message Engine module.
rme1_rsp_discovery = get_msg("rme1", "discovery_request")
rme1_rsp_msg_params = get_msg("rme1", "get_msg_params")
rme1_rsp_route = get_msg("rme1", "get_route")
rme1_rsp_enabled = me_set_enabled(s)
rme1_rsp_event0 = me_set_event(s, select=0)
rme1_rsp_event1 = me_set_event(s, select=1)
print("DONE")
print("Copying semeion.data... ", end='')
print("%d bytes DONE" %
s.dma_write(tinn.ASCIIDATASET_ADDR, tinn.PATH_ASCIIDATASET))
print("Copying ACPU binary... ", end='')
print("%d bytes DONE" %
s.dma_write(tinn._START_ACPU, tinn.PATH_ACPU_BIN))
print("Copying SCPU binary... ", end='')
print("%d bytes DONE" %
s.dma_write(tinn._START_SCPU, tinn.PATH_SCPU_BIN))
# Initialise magic IPC locations, as specified in tinn_taygete.h
print("Initialising IPC locations... ", end='')
s.dma_write32(tinn.HOSTFLAG_ACPU, 0)
s.dma_write32(tinn.HOSTFLAG_SCPU, 0)
s.dma_write32(tinn.TOSLV_CMD, 0)
s.dma_write32(tinn.TOMST_REQ, 0)
s.dma_write32(tinn.TOSLV_BUFFADDR, 0xdeadbeef)
s.dma_write32(tinn.TOMST_BUFFADDR, 0xdeadbeef)
print("DONE")
print("Getting CPU handles...")
acpu = s.get_jpam("ACPU", xlen=64, initialise=True)
scpu = s.get_jpam("SCPU", xlen=64, initialise=True)
print("DONE")
print("Running CPUs...")
acpu.hart_run()
scpu.hart_run()
print("DONE")
print("Starting tinn... ", end='')
try:
vc_logfile = open("log.vc.txt", 'w')
except:
vc_logfile = sys.stdout
try:
si_logfile = open(tinn.PATH_SI_LOG, 'w')
except:
si_logfile = sys.stdout
# Arbitrary time delay before start recording to reduce the amount of quite
# boring data collected in training phase.
time.sleep(120) # 2m0s
# Enable SI now that sinst_data sink is available.
si1_rsp_enabled = si_set_enabled(module_enable=1)
print("DONE")
# Print VC lines until some infers have been done, checking magic address.
ts = time.time() # Time start
tn = ts # Time now (this loop iteration, initialization)
while (s.dma_read32(tinn.HOSTFLAG_ACPU) < 200):
tp = tn # Time previous (previous loop iteration)
tn = time.time() # Time now (this loop iteration)
if (tn - tp) < 1: # Wait at least a second between iterations.
time.sleep(1)
tn = time.time()
assert (tn - tp) >= 1
tds = tn - ts # Time difference since start
proc_sinst(si_logfile)
vc_readlines(vc_logfile)
print("Running tinn... %fs" % tds, end='\r')
sys.stdout.flush()
print("Ran tinn for %fs" % tds)
print("Stopping CPUs...")
# Disable module to stop more messages flooding out.
si_set_enabled(module_enable=0)
try:
acpu.hart_halt()
scpu.hart_halt()
except:
pass
print("DONE")
print("Closing logfiles... ", end='')
vc_readlines(vc_logfile)
proc_sinst(si_logfile)
vc_logfile.close()
si_logfile.close()
print("DONE")
# }}} def run_tinn
def si_logfile_to_vcd(): # {{{
def reduce_time(t, factor=130):
assert isinstance(t, int)
ret = int(t / factor) * factor
assert isinstance(ret, int)
return ret
print("Sorting SI logfile... ", end='')
try:
os.system("sort -n -o %s %s" % (tinn.PATH_SI_LOG, tinn.PATH_SI_LOG))
except:
pass
print("DONE")
print("Writing VCD header... ", end='')
# Look at every line and find all unique data values keeping count of how
# often each appears.
# Also find list of all signals which change at the first time.
dv_cnts = {}
t0_dvs = []
t0 = None
with open(tinn.PATH_SI_LOG, 'r') as fd:
for line in fd:
t, c, d = line.split()[:3]
dv = int(d, 16)
dv_cnts[dv] = dv_cnts.setdefault(dv, 0) + 1
tv = reduce_time(int(t))
if t0 == None:
t0 = tv
if tv <= t0:
t0_dvs.append(dv)
assert 0 < len(dv_cnts)
assert 0 < len(t0_dvs)
assert isinstance(t0, int)
# List of unique data values sorted by descending number of appearances.
dvs = [vc[0] for vc in \
sorted(dv_cnts.items(), key=lambda vc: vc[1], reverse=True)]
def int2varid(x): # {{{
assert type(x) is int
assert x >= 0
# Each variable is assigned an arbitrary, compact ASCII identifier for
# use in the value change section. The identifier is composed of
# printable ASCII characters from ! to ~ (decimal 33 to 126).
numerals = ''.join(chr(i) for i in range(33, 127))
base = len(numerals)
if x == 0:
return numerals[0]
r = []
while x:
r.append(numerals[x % base])
x //= base
r.reverse()
return ''.join(r)
# }}}
# Most frequently used data values have shorter varids.
dv_varids = {dv: int2varid(i) for i,dv in enumerate(dvs)}
# Use nm to generate {<dv>: <symbolname>}
# NOTE: If using polymorphic functions --demangle will make them all look
# like the same thing which may or may not be desirable.
tool = "$RISCV/bin/riscv64-unknown-elf-nm --demangle --no-sort"
symboltable = "tinn.symbols.txt"
cmdfmt = tool + " %s >> " + symboltable
# readelf could be used instead of nm
# c++filt could be used instead of --demangle
try:
os.remove(symboltable)
except:
pass
try:
os.system(cmdfmt % tinn.PATH_ACPU_ELF)
os.system(cmdfmt % tinn.PATH_SCPU_ELF)
except:
assert False, "Cannot create symboltable %s" % symboltable
# For each address find the corresponding function name and use that as
# signal name.
dv_nms = {}
with open(symboltable, 'r') as fd:
for line in fd:
# Line format for nm symbol table.
# 000000006000c4d4 T abort
# 00000000700014c8 t fprop(Tinn*, float const*)
# 0000000070014f88 r initDataSet(DataItem*)::__PRETTY_FUNCTION__
# 0000000070003096 T std::type_info::~type_info()
d, _t, n = line.split()[:3]
dv = int(d, 16)
# Only extract names for recorded data values.
if dv not in dvs: continue
if 0x60000000 <= dv <= 0x6fffffff:
sec = "acpu."
elif 0x70000000 <= dv <= 0x7fffffff:
sec = "scpu."
else:
sec = "misc."
# Remove function arg types from demangled name.
# Replace non-alphanum characters with underscores.
nm = ''.join(c if c.isalnum() else '_' for c in n.split('(')[0])
dv_nms[dv] = sec + nm
# Assign names to any remaining data values.
# NOTE: If everything is working as expected then this loop will do nothing.
# If you're getting lots on unknown names then the ELF may be out of date or
# something else has gone wrong with the risvc tool.
for dv in dvs:
if dv not in dv_nms.keys():
dv_nms[dv] = "unknown." + hex(dv)
with open(tinn.PATH_SI_VCD, 'w') as fd:
put = fd.write
put("$comment UltraSoC Taygete ZC706 platform running Tinn $end\n")
put("$date %s $end\n" % str(time.ctime()))
put("$timescale 1s $end\n")
put("$scope module tinn $end\n")
for dv in dvs:
nm = dv_nms[dv]
varid = dv_varids[dv]
put("$var wire 1 %s %s $end\n" % (dv_varids[dv], dv_nms[dv]))
put("$upscope $end\n")
put("$enddefinitions $end\n")
print("DONE")
print("Writing VCD body... ", end='')
# For each line use dict of varids keyed by addresses to append time and
# change to VCD body. ch0->0 (exit), ch1->1 (enter)
with open(tinn.PATH_SI_LOG, 'r') as fd_log, \
open(tinn.PATH_SI_VCD, 'a') as fd_vcd:
put = fd_vcd.write
# Zero-initialize all signals.
# NOTE: This will cause some misrepresentation for any signals which
# were actually non-zero at the beginning of recording.
put("#%d\n" % t0)
for dv in dvs:
if dv not in t0_dvs:
varid = dv_varids[dv]
put("b0 %s\n" % varid)
for line in fd_log:
t, c, d = line.split()[:3]
tv = reduce_time(int(t))
put("#%d\n" % tv)
dv = int(d, 16)
varid = dv_varids[dv]
if c == '0': cv = '0'
elif c == '1': cv = '1'
else: cv = 'x'
put("b%s %s\n" % (cv, varid))
print("DONE")
# }}} def si_logfile_to_vcd
#run_tinn()
si_logfile_to_vcd()
#print("""
#=== Interactive target session ===
# Use created session object. Example use:
# s.dma_read32(<address>) <- Read 32-bit word/register (via DMA)
# s.dma_write32(<address>,<value>) <- Write 32-bit word/register (via DMA)
# s.dma_write(<address>,<filename>) <- Write file to memory (via DMA)
# s.load_and_run_riscv(<binfile>,<CPU>)
# <- Load binary file to memory and run RISC-V hooked up to CPU
# s.run_riscv_app(<app_name>) <- Load and run binary files on all CPUs, <app_name> can be one of:
# {0}
# s.handle_vc() <- Read data from console and dump it to screen
# scpu.read_gpc("sp") <- Read stack pointer (GPR)
# scpu.read_dpc() <- Read PC
# scpu.read_csr("mcause") <- Read mcause (CSR)
#""".format(("\n" + " "*50).join(['"'+app+'"' for app in .find_all_binapps()])))
|
py | 7dfdf76bb13c44cfff3324e16598645a3de87ba9 | import json
import logging
import math
from ckan.common import c, request
from ckan.lib.base import render
from ckan.logic import get_action
from flask import Blueprint
from .helpers import advancedsearch_schema, field_options, query_helper
log = logging.getLogger(__name__)
bp_search = Blueprint('advanced_search_blueprint', __name__)
def get_blueprints():
return [
bp_search,
]
@bp_search.route('/advanced_search', methods=['GET', 'POST'])
def search():
from ckan import model
schema = advancedsearch_schema()
context = {
'model': model,
'session': model.Session,
'user': c.user
}
# On initial page load there is no page parameter so display the first page
# On possible page navigations use the page parameter to move to the next page
# NOTE: this works also with a GET request but the POST filters will not be submitted so all datasets will be returned
page = int(request.params.get('page', 1))
# Limit amount of results returned
limit = 20
search_query_filters = []
q = ''
main_query_field = schema['main_query_field']
options = {}
for key, val in schema['input_fields'].items():
# Skip field used for main query
if key == main_query_field:
continue
# Make a list of field options
options[key] = field_options(val)
if request.method == 'POST':
# Use the field labelled as the main_query to build the value for q
# TODO: Handle no main_query_field provided
main_query_helper = query_helper(schema['input_fields'].get(main_query_field))
q = main_query_helper(main_query_field, request.form, schema['input_fields'], context)
# Iterate through all fields in schema except the main_query_field
# and process every field with the provided query_helper
for key, val in schema['input_fields'].items():
# Skip field used for main query
if key == main_query_field:
continue
# Get query helper function from schema
query_helper_function = query_helper(val)
# TODO: handle no query_helper
if query_helper_function:
res = query_helper_function(key, request.form, schema['input_fields'], context)
if res:
search_query_filters.append(res)
sort_string = request.form.get('sort', 'metadata_created desc')
data_dict = {
'q': q,
'rows': limit,
'start': (page - 1) * limit,
'extras': {},
'sort': sort_string,
'defType': 'edismax',
'mm': 0
}
if search_query_filters:
# Outputs: (filter:value) AND (another_filter:another_value)
data_dict['fq'] = '(' + ') AND ('.join(search_query_filters) + ')'
query = get_action('package_search')(context, data_dict)
json_query = json.dumps(
{k: v for k, v in list(params_to_dict(request.form).items()) if k != 'page' and type(v) is list and len(v[0]) > 0}
)
filters = {
k: v for k, v in list(params_to_dict(request.form).items()) if k != 'search_target' and k != 'search_query'
and k != 'page' and k != 'released-before' and k != 'released-after' and k != 'updated-before'
and k != 'updated-after' and k != 'sort' and type(v) is list and len(v[0]) > 0
}
for key, value in filters.items():
if 'all' in value:
filters[key] = [{'value': 'all', 'label': 'All'}]
continue
if options and options[key]:
options_list = []
for option in value:
x = next((x for x in options[key] if x.get('value') == option), None)
if x:
options_list.append(x)
filters[key] = options_list
c.advanced_search = {
"item_count": query['count'],
# Round values up to get total amount of pages
"total_pages": int(math.ceil(float(query['count']) / float(limit))),
"collection": query['results'],
# Return query parameters to the UI so that it can populate the fields with the previous query values
# NOTE: Can this cause security issues? Returning POST request params back to the client
"last_query": params_to_dict(request.form),
"json_query": json_query,
"filters": filters,
"sort_string": sort_string,
"field_options": options
}
c.advanced_search['last_query']['page'] = page
return render('advanced_search/index.html')
def params_to_dict(params):
new_dict = {}
for i in params:
key = i
if not hasattr(new_dict, key):
value = params.getlist(i)
new_dict.setdefault(key, value)
return new_dict
|
py | 7dfdf76c75bab4942b6fee77d49831bbd923431c | # python library to interface with panda
import datetime
import binascii
import struct
import hashlib
import socket
import usb1
import os
import time
import traceback
import subprocess
from .dfu import PandaDFU
from .esptool import ESPROM, CesantaFlasher # noqa: F401
from .flash_release import flash_release # noqa: F401
from .update import ensure_st_up_to_date # noqa: F401
from .serial import PandaSerial # noqa: F401
from .isotp import isotp_send, isotp_recv
__version__ = '0.0.9'
BASEDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")
DEBUG = os.getenv("PANDADEBUG") is not None
# *** wifi mode ***
def build_st(target, mkfile="Makefile", clean=True):
from panda import BASEDIR
clean_cmd = "make -f %s clean" % mkfile if clean else ":"
cmd = 'cd %s && %s && make -f %s %s' % (os.path.join(BASEDIR, "board"), clean_cmd, mkfile, target)
try:
_ = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError:
raise
def parse_can_buffer(dat):
ret = []
for j in range(0, len(dat), 0x10):
ddat = dat[j:j+0x10]
f1, f2 = struct.unpack("II", ddat[0:8])
extended = 4
if f1 & extended:
address = f1 >> 3
else:
address = f1 >> 21
dddat = ddat[8:8+(f2&0xF)]
if DEBUG:
print(" R %x: %s" % (address, binascii.hexlify(dddat)))
ret.append((address, f2>>16, dddat, (f2>>4)&0xFF))
return ret
class PandaWifiStreaming(object):
def __init__(self, ip="192.168.0.10", port=1338):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setblocking(0)
self.ip = ip
self.port = port
self.kick()
def kick(self):
# must be called at least every 5 seconds
self.sock.sendto("hello", (self.ip, self.port))
def can_recv(self):
ret = []
while True:
try:
dat, addr = self.sock.recvfrom(0x200*0x10)
if addr == (self.ip, self.port):
ret += parse_can_buffer(dat)
except socket.error as e:
if e.errno != 35 and e.errno != 11:
traceback.print_exc()
break
return ret
# stupid tunneling of USB over wifi and SPI
class WifiHandle(object):
def __init__(self, ip="192.168.0.10", port=1337):
self.sock = socket.create_connection((ip, port))
def __recv(self):
ret = self.sock.recv(0x44)
length = struct.unpack("I", ret[0:4])[0]
return ret[4:4+length]
def controlWrite(self, request_type, request, value, index, data, timeout=0):
# ignore data in reply, panda doesn't use it
return self.controlRead(request_type, request, value, index, 0, timeout)
def controlRead(self, request_type, request, value, index, length, timeout=0):
self.sock.send(struct.pack("HHBBHHH", 0, 0, request_type, request, value, index, length))
return self.__recv()
def bulkWrite(self, endpoint, data, timeout=0):
if len(data) > 0x10:
raise ValueError("Data must not be longer than 0x10")
self.sock.send(struct.pack("HH", endpoint, len(data))+data)
self.__recv() # to /dev/null
def bulkRead(self, endpoint, length, timeout=0):
self.sock.send(struct.pack("HH", endpoint, 0))
return self.__recv()
def close(self):
self.sock.close()
# *** normal mode ***
class Panda(object):
# matches cereal.car.CarParams.SafetyModel
SAFETY_SILENT = 0
SAFETY_HONDA_NIDEC = 1
SAFETY_TOYOTA = 2
SAFETY_ELM327 = 3
SAFETY_GM = 4
SAFETY_HONDA_BOSCH_GIRAFFE = 5
SAFETY_FORD = 6
SAFETY_CADILLAC = 7
SAFETY_HYUNDAI = 8
SAFETY_CHRYSLER = 9
SAFETY_TESLA = 10
SAFETY_SUBARU = 11
SAFETY_BMW = 12
SAFETY_MAZDA = 13
SAFETY_NISSAN = 14
SAFETY_VOLKSWAGEN_MQB = 15
SAFETY_ALLOUTPUT = 17
SAFETY_GM_ASCM = 18
SAFETY_NOOUTPUT = 19
SAFETY_HONDA_BOSCH_HARNESS = 20
SAFETY_SUBARU_LEGACY = 22
SERIAL_DEBUG = 0
SERIAL_ESP = 1
SERIAL_LIN1 = 2
SERIAL_LIN2 = 3
GMLAN_CAN2 = 1
GMLAN_CAN3 = 2
REQUEST_IN = usb1.ENDPOINT_IN | usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE
REQUEST_OUT = usb1.ENDPOINT_OUT | usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE
HW_TYPE_UNKNOWN = b'\x00'
HW_TYPE_WHITE_PANDA = b'\x01'
HW_TYPE_GREY_PANDA = b'\x02'
HW_TYPE_BLACK_PANDA = b'\x03'
HW_TYPE_PEDAL = b'\x04'
HW_TYPE_UNO = b'\x05'
def __init__(self, serial=None, claim=True):
self._serial = serial
self._handle = None
self.connect(claim)
def close(self):
self._handle.close()
self._handle = None
def connect(self, claim=True, wait=False):
if self._handle != None:
self.close()
if self._serial == "WIFI":
self._handle = WifiHandle()
print("opening WIFI device")
self.wifi = True
else:
context = usb1.USBContext()
self._handle = None
self.wifi = False
while 1:
try:
for device in context.getDeviceList(skip_on_error=True):
#print(device)
if device.getVendorID() == 0xbbaa and device.getProductID() in [0xddcc, 0xddee]:
try:
this_serial = device.getSerialNumber()
except Exception:
continue
if self._serial is None or this_serial == self._serial:
self._serial = this_serial
print("opening device", self._serial, hex(device.getProductID()))
time.sleep(1)
self.bootstub = device.getProductID() == 0xddee
self.legacy = (device.getbcdDevice() != 0x2300)
self._handle = device.open()
if claim:
self._handle.claimInterface(0)
#self._handle.setInterfaceAltSetting(0, 0) #Issue in USB stack
break
except Exception as e:
print("exception", e)
traceback.print_exc()
if wait == False or self._handle != None:
break
context = usb1.USBContext() #New context needed so new devices show up
assert(self._handle != None)
print("connected")
def reset(self, enter_bootstub=False, enter_bootloader=False):
# reset
try:
if enter_bootloader:
self._handle.controlWrite(Panda.REQUEST_IN, 0xd1, 0, 0, b'')
else:
if enter_bootstub:
self._handle.controlWrite(Panda.REQUEST_IN, 0xd1, 1, 0, b'')
else:
self._handle.controlWrite(Panda.REQUEST_IN, 0xd8, 0, 0, b'')
except Exception:
pass
if not enter_bootloader:
self.reconnect()
def reconnect(self):
self.close()
time.sleep(1.0)
success = False
# wait up to 15 seconds
for i in range(0, 15):
try:
self.connect()
success = True
break
except Exception:
print("reconnecting is taking %d seconds..." % (i+1))
try:
dfu = PandaDFU(PandaDFU.st_serial_to_dfu_serial(self._serial))
dfu.recover()
except Exception:
pass
time.sleep(1.0)
if not success:
raise Exception("reconnect failed")
@staticmethod
def flash_static(handle, code):
# confirm flasher is present
fr = handle.controlRead(Panda.REQUEST_IN, 0xb0, 0, 0, 0xc)
assert fr[4:8] == b"\xde\xad\xd0\x0d"
# unlock flash
print("flash: unlocking")
handle.controlWrite(Panda.REQUEST_IN, 0xb1, 0, 0, b'')
# erase sectors 1 through 3
print("flash: erasing")
for i in range(1, 4):
handle.controlWrite(Panda.REQUEST_IN, 0xb2, i, 0, b'')
# flash over EP2
STEP = 0x10
print("flash: flashing")
for i in range(0, len(code), STEP):
handle.bulkWrite(2, code[i:i+STEP])
# reset
print("flash: resetting")
try:
handle.controlWrite(Panda.REQUEST_IN, 0xd8, 0, 0, b'')
except Exception:
pass
def flash(self, fn=None, code=None, reconnect=True):
print("flash: main version is " + self.get_version())
if not self.bootstub:
self.reset(enter_bootstub=True)
assert(self.bootstub)
if fn is None and code is None:
if self.legacy:
fn = "obj/comma.bin"
print("building legacy st code")
build_st(fn, "Makefile.legacy")
else:
fn = "obj/panda.bin"
print("building panda st code")
build_st(fn)
fn = os.path.join(BASEDIR, "board", fn)
if code is None:
with open(fn, "rb") as f:
code = f.read()
# get version
print("flash: bootstub version is " + self.get_version())
# do flash
Panda.flash_static(self._handle, code)
# reconnect
if reconnect:
self.reconnect()
def recover(self, timeout=None):
self.reset(enter_bootloader=True)
t_start = time.time()
while len(PandaDFU.list()) == 0:
print("waiting for DFU...")
time.sleep(0.1)
if timeout is not None and (time.time() - t_start) > timeout:
return False
dfu = PandaDFU(PandaDFU.st_serial_to_dfu_serial(self._serial))
dfu.recover()
# reflash after recover
self.connect(True, True)
self.flash()
return True
@staticmethod
def flash_ota_st():
ret = os.system("cd %s && make clean && make ota" % (os.path.join(BASEDIR, "board")))
time.sleep(1)
return ret==0
@staticmethod
def flash_ota_wifi(release=False):
release_str = "RELEASE=1" if release else ""
ret = os.system("cd {} && make clean && {} make ota".format(os.path.join(BASEDIR, "boardesp"),release_str))
time.sleep(1)
return ret==0
@staticmethod
def list():
context = usb1.USBContext()
ret = []
try:
for device in context.getDeviceList(skip_on_error=True):
if device.getVendorID() == 0xbbaa and device.getProductID() in [0xddcc, 0xddee]:
try:
ret.append(device.getSerialNumber())
except Exception:
continue
except Exception:
pass
# TODO: detect if this is real
#ret += ["WIFI"]
return ret
def call_control_api(self, msg):
self._handle.controlWrite(Panda.REQUEST_OUT, msg, 0, 0, b'')
# ******************* health *******************
def health(self):
dat = self._handle.controlRead(Panda.REQUEST_IN, 0xd2, 0, 0, 41)
a = struct.unpack("IIIIIIIIBBBBBBBBB", dat)
return {
"uptime": a[0],
"voltage": a[1],
"current": a[2],
"can_rx_errs": a[3],
"can_send_errs": a[4],
"can_fwd_errs": a[5],
"gmlan_send_errs": a[6],
"faults": a[7],
"ignition_line": a[8],
"ignition_can": a[9],
"controls_allowed": a[10],
"gas_interceptor_detected": a[11],
"car_harness_status": a[12],
"usb_power_mode": a[13],
"safety_mode": a[14],
"fault_status": a[15],
"power_save_enabled": a[16]
}
# ******************* control *******************
def enter_bootloader(self):
try:
self._handle.controlWrite(Panda.REQUEST_OUT, 0xd1, 0, 0, b'')
except Exception as e:
print(e)
pass
def get_version(self):
return self._handle.controlRead(Panda.REQUEST_IN, 0xd6, 0, 0, 0x40).decode('utf8')
@staticmethod
def get_signature_from_firmware(fn):
f = open(fn, 'rb')
f.seek(-128, 2) # Seek from end of file
return f.read(128)
def get_signature(self):
part_1 = self._handle.controlRead(Panda.REQUEST_IN, 0xd3, 0, 0, 0x40)
part_2 = self._handle.controlRead(Panda.REQUEST_IN, 0xd4, 0, 0, 0x40)
return bytes(part_1 + part_2)
def get_type(self):
return self._handle.controlRead(Panda.REQUEST_IN, 0xc1, 0, 0, 0x40)
def is_white(self):
return self.get_type() == Panda.HW_TYPE_WHITE_PANDA
def is_grey(self):
return self.get_type() == Panda.HW_TYPE_GREY_PANDA
def is_black(self):
return self.get_type() == Panda.HW_TYPE_BLACK_PANDA
def is_uno(self):
return self.get_type() == Panda.HW_TYPE_UNO
def has_obd(self):
return (self.is_uno() or self.is_black())
def get_serial(self):
dat = self._handle.controlRead(Panda.REQUEST_IN, 0xd0, 0, 0, 0x20)
hashsig, calc_hash = dat[0x1c:], hashlib.sha1(dat[0:0x1c]).digest()[0:4]
assert(hashsig == calc_hash)
return [dat[0:0x10].decode("utf8"), dat[0x10:0x10+10].decode("utf8")]
def get_secret(self):
return self._handle.controlRead(Panda.REQUEST_IN, 0xd0, 1, 0, 0x10)
# ******************* configuration *******************
def set_usb_power(self, on):
self._handle.controlWrite(Panda.REQUEST_OUT, 0xe6, int(on), 0, b'')
def set_power_save(self, power_save_enabled=0):
self._handle.controlWrite(Panda.REQUEST_OUT, 0xe7, int(power_save_enabled), 0, b'')
def set_esp_power(self, on):
self._handle.controlWrite(Panda.REQUEST_OUT, 0xd9, int(on), 0, b'')
def esp_reset(self, bootmode=0):
self._handle.controlWrite(Panda.REQUEST_OUT, 0xda, int(bootmode), 0, b'')
time.sleep(0.2)
def set_safety_mode(self, mode=SAFETY_SILENT):
self._handle.controlWrite(Panda.REQUEST_OUT, 0xdc, mode, 0, b'')
def set_can_forwarding(self, from_bus, to_bus):
# TODO: This feature may not work correctly with saturated buses
self._handle.controlWrite(Panda.REQUEST_OUT, 0xdd, from_bus, to_bus, b'')
def set_gmlan(self, bus=2):
# TODO: check panda type
if bus is None:
self._handle.controlWrite(Panda.REQUEST_OUT, 0xdb, 0, 0, b'')
elif bus in [Panda.GMLAN_CAN2, Panda.GMLAN_CAN3]:
self._handle.controlWrite(Panda.REQUEST_OUT, 0xdb, 1, bus, b'')
def set_obd(self, obd):
# TODO: check panda type
self._handle.controlWrite(Panda.REQUEST_OUT, 0xdb, int(obd), 0, b'')
def set_can_loopback(self, enable):
# set can loopback mode for all buses
self._handle.controlWrite(Panda.REQUEST_OUT, 0xe5, int(enable), 0, b'')
def set_can_enable(self, bus_num, enable):
# sets the can transciever enable pin
self._handle.controlWrite(Panda.REQUEST_OUT, 0xf4, int(bus_num), int(enable), b'')
def set_can_speed_kbps(self, bus, speed):
self._handle.controlWrite(Panda.REQUEST_OUT, 0xde, bus, int(speed*10), b'')
def set_uart_baud(self, uart, rate):
self._handle.controlWrite(Panda.REQUEST_OUT, 0xe4, uart, int(rate/300), b'')
def set_uart_parity(self, uart, parity):
# parity, 0=off, 1=even, 2=odd
self._handle.controlWrite(Panda.REQUEST_OUT, 0xe2, uart, parity, b'')
def set_uart_callback(self, uart, install):
self._handle.controlWrite(Panda.REQUEST_OUT, 0xe3, uart, int(install), b'')
# ******************* can *******************
def can_send_many(self, arr):
snds = []
transmit = 1
extended = 4
for addr, _, dat, bus in arr:
assert len(dat) <= 8
if DEBUG:
print(" W %x: %s" % (addr, binascii.hexlify(dat)))
if addr >= 0x800:
rir = (addr << 3) | transmit | extended
else:
rir = (addr << 21) | transmit
snd = struct.pack("II", rir, len(dat) | (bus << 4)) + dat
snd = snd.ljust(0x10, b'\x00')
snds.append(snd)
while True:
try:
#print("DAT: %s"%b''.join(snds).__repr__())
if self.wifi:
for s in snds:
self._handle.bulkWrite(3, s)
else:
self._handle.bulkWrite(3, b''.join(snds))
break
except (usb1.USBErrorIO, usb1.USBErrorOverflow):
print("CAN: BAD SEND MANY, RETRYING")
def can_send(self, addr, dat, bus):
self.can_send_many([[addr, None, dat, bus]])
def can_recv(self):
dat = bytearray()
while True:
try:
dat = self._handle.bulkRead(1, 0x10*256)
break
except (usb1.USBErrorIO, usb1.USBErrorOverflow):
print("CAN: BAD RECV, RETRYING")
time.sleep(0.1)
return parse_can_buffer(dat)
def can_clear(self, bus):
"""Clears all messages from the specified internal CAN ringbuffer as
though it were drained.
Args:
bus (int): can bus number to clear a tx queue, or 0xFFFF to clear the
global can rx queue.
"""
self._handle.controlWrite(Panda.REQUEST_OUT, 0xf1, bus, 0, b'')
# ******************* isotp *******************
def isotp_send(self, addr, dat, bus, recvaddr=None, subaddr=None):
return isotp_send(self, dat, addr, bus, recvaddr, subaddr)
def isotp_recv(self, addr, bus=0, sendaddr=None, subaddr=None):
return isotp_recv(self, addr, bus, sendaddr, subaddr)
# ******************* serial *******************
def serial_read(self, port_number):
ret = []
while 1:
lret = bytes(self._handle.controlRead(Panda.REQUEST_IN, 0xe0, port_number, 0, 0x40))
if len(lret) == 0:
break
ret.append(lret)
return b''.join(ret)
def serial_write(self, port_number, ln):
ret = 0
for i in range(0, len(ln), 0x20):
ret += self._handle.bulkWrite(2, struct.pack("B", port_number) + ln[i:i+0x20])
return ret
def serial_clear(self, port_number):
"""Clears all messages (tx and rx) from the specified internal uart
ringbuffer as though it were drained.
Args:
port_number (int): port number of the uart to clear.
"""
self._handle.controlWrite(Panda.REQUEST_OUT, 0xf2, port_number, 0, b'')
# ******************* kline *******************
# pulse low for wakeup
def kline_wakeup(self):
if DEBUG:
print("kline wakeup...")
self._handle.controlWrite(Panda.REQUEST_OUT, 0xf0, 0, 0, b'')
if DEBUG:
print("kline wakeup done")
def kline_drain(self, bus=2):
# drain buffer
bret = bytearray()
while True:
ret = self._handle.controlRead(Panda.REQUEST_IN, 0xe0, bus, 0, 0x40)
if len(ret) == 0:
break
elif DEBUG:
print("kline drain: " + binascii.hexlify(ret))
bret += ret
return bytes(bret)
def kline_ll_recv(self, cnt, bus=2):
echo = bytearray()
while len(echo) != cnt:
ret = self._handle.controlRead(Panda.REQUEST_OUT, 0xe0, bus, 0, cnt-len(echo))
if DEBUG and len(ret) > 0:
print("kline recv: " + binascii.hexlify(ret))
echo += ret
return str(echo)
def kline_send(self, x, bus=2, checksum=True):
def get_checksum(dat):
result = 0
result += sum(map(ord, dat)) if isinstance(b'dat', str) else sum(dat)
result = -result
return struct.pack("B", result % 0x100)
self.kline_drain(bus=bus)
if checksum:
x += get_checksum(x)
for i in range(0, len(x), 0xf):
ts = x[i:i+0xf]
if DEBUG:
print("kline send: " + binascii.hexlify(ts))
self._handle.bulkWrite(2, bytes([bus]) + ts)
echo = self.kline_ll_recv(len(ts), bus=bus)
if echo != ts:
print("**** ECHO ERROR %d ****" % i)
print(binascii.hexlify(echo))
print(binascii.hexlify(ts))
assert echo == ts
def kline_recv(self, bus=2):
msg = self.kline_ll_recv(2, bus=bus)
msg += self.kline_ll_recv(ord(msg[1])-2, bus=bus)
return msg
def send_heartbeat(self):
self._handle.controlWrite(Panda.REQUEST_OUT, 0xf3, 0, 0, b'')
# ******************* RTC *******************
def set_datetime(self, dt):
self._handle.controlWrite(Panda.REQUEST_OUT, 0xa1, int(dt.year), 0, b'')
self._handle.controlWrite(Panda.REQUEST_OUT, 0xa2, int(dt.month), 0, b'')
self._handle.controlWrite(Panda.REQUEST_OUT, 0xa3, int(dt.day), 0, b'')
self._handle.controlWrite(Panda.REQUEST_OUT, 0xa4, int(dt.isoweekday()), 0, b'')
self._handle.controlWrite(Panda.REQUEST_OUT, 0xa5, int(dt.hour), 0, b'')
self._handle.controlWrite(Panda.REQUEST_OUT, 0xa6, int(dt.minute), 0, b'')
self._handle.controlWrite(Panda.REQUEST_OUT, 0xa7, int(dt.second), 0, b'')
def get_datetime(self):
dat = self._handle.controlRead(Panda.REQUEST_IN, 0xa0, 0, 0, 8)
a = struct.unpack("HBBBBBB", dat)
return datetime.datetime(a[0], a[1], a[2], a[4], a[5], a[6])
# ******************* IR *******************
def set_ir_power(self, percentage):
self._handle.controlWrite(Panda.REQUEST_OUT, 0xb0, int(percentage), 0, b'')
# ******************* Fan ******************
def set_fan_power(self, percentage):
self._handle.controlWrite(Panda.REQUEST_OUT, 0xb1, int(percentage), 0, b'')
def get_fan_rpm(self):
dat = self._handle.controlRead(Panda.REQUEST_IN, 0xb2, 0, 0, 2)
a = struct.unpack("H", dat)
return a[0]
# ****************** Phone *****************
def set_phone_power(self, enabled):
self._handle.controlWrite(Panda.REQUEST_OUT, 0xb3, int(enabled), 0, b'')
|
py | 7dfdf7bc03eae9f5241d58e1bc0a734b59be3bb2 | import filecmp
import logging
import logging.config
import os
import shutil
import subprocess
import sys
from .colors import MessageColors
logger = logging.getLogger(__name__)
logging_levels = {
'0': 'ERROR',
'1': 'WARNING',
'2': 'INFO',
'3': 'DEBUG',
}
class ColorFilter(logging.Filter):
color_map = {
'ERROR': MessageColors.FAIL,
'WARNING': MessageColors.WARNING,
'INFO': MessageColors.HEADER,
'DEBUG': MessageColors.OK
}
def filter(self, record):
if sys.stdout.isatty():
record.msg = self.color_map[record.levelname] + record.msg + MessageColors.ENDC
return record
LOGGING = {
'version': 1,
'filters': {
'colorize': {
'()': ColorFilter
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'filters': ['colorize'],
'stream': 'ext://sys.stdout'
}
},
'loggers': {
'ansible_builder': {
'handlers': ['console'],
}
}
}
def configure_logger(verbosity):
LOGGING['loggers']['ansible_builder']['level'] = logging_levels[str(verbosity)]
logging.config.dictConfig(LOGGING)
def run_command(command, capture_output=False, allow_error=False):
logger.info('Running command:')
logger.info(' {0}'.format(' '.join(command)))
try:
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except FileNotFoundError:
logger.error(f"You do not have {command[0]} installed, please specify a different container runtime for this command.")
sys.exit(1)
output = []
for line in iter(process.stdout.readline, b''):
line = line.decode(sys.stdout.encoding)
if capture_output:
output.append(line.rstrip())
logger.debug(line)
rc = process.poll()
if rc is not None and rc != 0 and (not allow_error):
for line in output:
logger.error(line)
logger.error(f"An error occured (rc={rc}), see output line(s) above for details.")
sys.exit(1)
return (rc, output)
def write_file(filename: str, lines: list) -> bool:
new_text = '\n'.join(lines)
if os.path.exists(filename):
with open(filename, 'r') as f:
if f.read() == new_text:
logger.debug("File {0} is already up-to-date.".format(filename))
return False
else:
logger.warning('File {0} had modifications and will be rewritten'.format(filename))
with open(filename, 'w') as f:
f.write(new_text)
return True
def copy_file(source: str, dest: str) -> bool:
should_copy = False
if not os.path.exists(dest):
logger.debug("File {0} will be created.".format(dest))
should_copy = True
elif not filecmp.cmp(source, dest, shallow=False):
logger.warning('File {0} had modifications and will be rewritten'.format(dest))
should_copy = True
elif os.path.getmtime(source) > os.path.getmtime(dest):
logger.warning('File {0} updated time increased and will be rewritten'.format(dest))
should_copy = True
if should_copy:
shutil.copy(source, dest)
else:
logger.debug("File {0} is already up-to-date.".format(dest))
return should_copy
|
py | 7dfdf85170d7cddca2ba5522fa642516e74fe6aa | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Profile, Employee, Review
class RegistrationForm(UserCreationForm):
class Meta:
model = User
fields = ['first_name','last_name','email','username','password1','password2']
class ProfileForm(forms.ModelForm):
phone = forms.CharField()
address = forms.CharField()
gender = forms.ChoiceField(choices = (("Male","Male"),("Female","Female")))
height = forms.DecimalField()
weight = forms.DecimalField()
goals = forms.ChoiceField(choices = (('Weight Loss','Weight Loss'),('Increase Muscle Mass','Increase Muscle Mass'),('Maintain Fitness','Maintain Fitness')))
time = forms.ChoiceField(choices = (("10am","10am"), ("11am","11am"), ("12pm","12pm"),("1pm","1pm"),("2pm","2pm"),("3pm","3pm"),("4pm","4pm"),("5pm","5pm"),("6pm","6pm"),("7pm","7pm"),("8pm","8pm")))
class Meta:
model = Profile
fields = ['phone','address','gender','height','weight','goals','time']
class UserUpdateForm(forms.ModelForm):
class Meta:
model = User
fields = ['first_name', 'last_name', 'email']
class EmployeeForm(forms.ModelForm):
phone = forms.CharField()
gender = forms.ChoiceField(choices = (("Male","Male"),("Female","Female")))
class Meta:
model = Employee
fields = ['phone','gender']
class ReviewForm(forms.ModelForm):
name = forms.CharField()
email = forms.EmailField()
thoughts = forms.CharField()
class Meta:
model = Review
fields = ['name','email','thoughts'] |
py | 7dfdf870f8f95acd2fd5e410763fdf822eb0904d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""CLI to generate metadata cube for acceptance tests."""
from improver import cli
@cli.clizefy
@cli.with_output
def process(
mandatory_attributes_json: cli.inputjson,
*,
name="air_pressure_at_sea_level",
units=None,
spatial_grid="latlon",
time_period: int = None,
json_input: cli.inputjson = None,
ensemble_members: int = 8,
grid_spacing: float = None,
domain_corner: cli.comma_separated_list_of_float = None,
npoints: int = 71,
):
""" Generate a cube with metadata only.
Args:
mandatory_attributes_json (Dict):
Specifies the values of the mandatory attributes, title, institution and
source.
name (Optional[str]):
Output variable name, or if creating a probability cube the name of the
underlying variable to which the probability field applies.
units (Optional[str]):
Output variable units, or if creating a probability cube the units of the
underlying variable / threshold.
spatial_grid (Optional[str]):
What type of x/y coordinate values to use. Permitted values are
"latlon" or "equalarea".
time_period (Optional[int]):
The period in minutes between the time bounds. This is used to calculate
the lower time bound. If unset the diagnostic will be instantaneous, i.e.
without time bounds.
json_input (Optional[Dict]):
Dictionary containing values for one or more of: "name", "units", "time",
"time_bounds", "frt", "spp__relative_to_threshold", "attributes"
(dictionary of additional metadata attributes) and "coords" (dictionary).
"coords" can contain "height_levels" (list of height/pressure level values),
and one of "realizations", "percentiles" or "thresholds" (list of dimension
values).
ensemble_members (Optional[int]):
Number of ensemble members. Default 8. Will not be used if "realizations",
"percentiles" or "thresholds" provided in json_input.
grid_spacing (Optional[float]):
Resolution of grid (metres or degrees).
domain_corner (Optional[Tuple[float, float]]):
Bottom left corner of grid domain (y,x) (degrees for latlon or metres for
equalarea).
npoints (Optional[int]):
Number of points along each of the y and x spatial axes.
Returns:
iris.cube.Cube:
Output of generate_metadata()
"""
# Set arguments to pass to generate_metadata function and remove json_input for
# processing contents before adding
generate_metadata_args = locals()
for key in ["mandatory_attributes_json", "json_input"]:
generate_metadata_args.pop(key, None)
from improver.synthetic_data.generate_metadata import generate_metadata
from improver.synthetic_data.utilities import (
get_height_levels,
get_leading_dimension,
)
from improver.utilities.temporal import cycletime_to_datetime
if json_input is not None:
# Get leading dimension and height/pressure data from json_input
if "coords" in json_input:
coord_data = json_input["coords"]
(
json_input["leading_dimension"],
json_input["cube_type"],
) = get_leading_dimension(coord_data)
json_input["height_levels"], json_input["pressure"] = get_height_levels(
coord_data
)
json_input.pop("coords", None)
# Convert str time, frt and time_bounds to datetime
if "time" in json_input:
json_input["time"] = cycletime_to_datetime(json_input["time"])
if "frt" in json_input:
json_input["frt"] = cycletime_to_datetime(json_input["frt"])
if "time_bounds" in json_input:
time_bounds = []
for tb in json_input["time_bounds"]:
time_bounds.append(cycletime_to_datetime(tb))
json_input["time_bounds"] = time_bounds
# Update generate_metadata_args with the json_input data
generate_metadata_args.update(json_input)
return generate_metadata(mandatory_attributes_json, **generate_metadata_args)
|
py | 7dfdf8ea459673602b0468db3ae07300f1195067 | # coding=utf-8
import serial
import logging
from signals import DbusSignal, ModbusSignal
SERVICE_NAME = 'com.victronenergy.meteo'
DRIVER_NAME = 'dbus-imt-si-rs485tc.py'
LOG_LEVEL = logging.INFO
UPDATE_INTERVAL = 2 # seconds
# modbus configuration
BASE_ADDRESS = 0
NO_OF_REGISTERS = 9
SLAVE_ADDRESS = 1
# serial port configuration
PARITY = serial.PARITY_NONE
SERIAL_TIMEOUT = 0.5 # seconds
BAUD_RATE = 9600
BYTE_SIZE = 8
STOP_BITS = 1
MODE = 'rtu'
# signals configuration
SIGNALS = [
DbusSignal('/Mgmt/ProcessName', DRIVER_NAME),
DbusSignal('/Mgmt/ProcessVersion', '1.1.6'),
DbusSignal('/Mgmt/Connection', 'Modbus RTU'),
DbusSignal('/DeviceInstance', 1),
DbusSignal('/ProductId', 0xB030),
DbusSignal('/ProductName', 'IMT Si-RS485 Irradiance Sensor'),
DbusSignal('/Connected', True),
ModbusSignal('/Irradiance', register=0, gain=0.1, unit=u'W/m2'),
ModbusSignal('/WindSpeed', register=3, gain=0.1, unit=u'm/s'),
ModbusSignal('/CellTemperature', register=7, gain=0.1, unit=u'°C', signed=True),
ModbusSignal('/ExternalTemperature', register=8, gain=0.1, unit=u'°C', signed=True)
]
# subsensor settings
# keys must match with ModbusSignal's name above
# TTY will be replaced with the tty of the device
SETTINGS = {
'/WindSpeed': ['/Settings/Service/meteo/1/WindSpeedSensor', 'auto-detect', 0, 0],
'/ExternalTemperature': ['/Settings/Service/meteo/1/ExternalTemperatureSensor', 'auto-detect', 0, 0]
}
|
py | 7dfdf94d4c3c2f61a9cab5deb1335336c16b8315 | import unittest
try:
from unittest import mock
except ImportError: # Python 2 fallback.
import mock
from pydocusign import DocuSignClient, Signer
class DocuSignTestClient(DocuSignClient):
def login_information(self):
self.account_id = 'test'
self.account_url = '{root}/accounts/test'.format(root=self.root_url)
class ClientRequestTest(unittest.TestCase):
def test_create_envelope_recipients(self):
client = DocuSignTestClient()
with mock.patch.object(client, 'post') as post_mock:
signers = [
Signer(clientUserId='userid_2', email='[email protected]',
name='Signer 2'),
Signer(clientUserId='userid_2', email='[email protected]',
name='Signer 2'),
]
client.add_envelope_recipients('ABC123', signers)
url = '/accounts/{account_id}/envelopes/ABC123/recipients'.format(
account_id=client.account_id)
post_mock.assert_called_once_with(
url,
data={'signers': [signers[0].to_dict(), signers[1].to_dict()]},
expected_status_code=201
)
with mock.patch.object(client, 'post') as post_mock:
client.add_envelope_recipients('ABC123', [], resend_envelope=True)
post_mock.assert_called_once_with(
'/accounts/{}/envelopes/ABC123/recipients'
'?resend_envelope=true'.format(client.account_id),
data={'signers': []},
expected_status_code=201
)
def test_update_envelope_recipients(self):
client = DocuSignTestClient()
with mock.patch.object(client, 'put') as put_mock:
signers = [
Signer(clientUserId='userid_2', email='[email protected]',
name='Signer 2'),
Signer(clientUserId='userid_2', email='[email protected]',
name='Signer 2'),
]
client.update_envelope_recipients('ABC123', signers)
url = '/accounts/{account_id}/envelopes/ABC123/recipients'.format(
account_id=client.account_id)
put_mock.assert_called_once_with(
url, data={'signers': [signers[0].to_dict(), signers[1].to_dict()]}
)
with mock.patch.object(client, 'put') as put_mock:
client.update_envelope_recipients('ABC123', [], resend_envelope=True)
put_mock.assert_called_once_with(
'/accounts/{}/envelopes/ABC123/recipients'
'?resend_envelope=true'.format(client.account_id),
data={'signers': []}
)
def test_delete_envelope_recipient(self):
client = DocuSignTestClient()
with mock.patch.object(client, 'delete') as delete_mock:
client.delete_envelope_recipient('ABC123', '1')
url = '/accounts/{account_id}/envelopes/ABC123/recipients/1'.format(
account_id=client.account_id)
delete_mock.assert_called_once_with(url)
def test_delete_envelope_recipients(self):
client = DocuSignTestClient()
with mock.patch.object(client, 'delete') as delete_mock:
client.delete_envelope_recipients('ABC123', ['1', '2'])
url = '/accounts/{account_id}/envelopes/ABC123/recipients'.format(
account_id=client.account_id)
delete_mock.assert_called_once_with(
url, data={'signers': [{'recipientId': '1'}, {'recipientId': '2'}]}
)
def test_get_page_image(self):
client = DocuSignTestClient()
with mock.patch.object(client, 'get') as get_mock:
client.get_page_image('ABC123', 1, 1, 72, max_height=300)
url = '/accounts/{accountId}/envelopes/ABC123/documents/1/pages/1/' \
'page_image?dpi=72&max_height=300'\
.format(accountId=client.account_id)
get_mock.assert_called_once_with(url)
|
py | 7dfdfa706a5642684ce386632621c59482d5c61b | """Package where needed crypto material is stored."""
from sympc.store.crypto_primitive_provider import CryptoPrimitiveProvider
from sympc.store.crypto_store import CryptoStore
def register_primitive_generator(name: str):
"""Decorator to register a crypto primitive provider.
Args:
name (str): Name of the primitive.
# noqa: DAR201
"""
def register_generator(func_generator):
if name in CryptoPrimitiveProvider._func_providers:
raise ValueError(f"Provider {name} already in _func_providers")
CryptoPrimitiveProvider._func_providers[name] = func_generator
return func_generator
return register_generator
def register_primitive_store_add(name):
"""Decorator to add primitives to the store.
Args:
name (str): Name of the primitive.
# noqa: DAR201
"""
def register_add(func_add):
if name in CryptoStore._func_add_store:
raise ValueError(f"Crypto Store 'adder' {name} already in _func_add_store")
CryptoStore._func_add_store[name] = func_add
return func_add
return register_add
def register_primitive_store_get(name):
"""Decorator to retrieve primitives from the store.
Args:
name (str): Name of the primitive.
# noqa: DAR201
"""
def register_get(func_get):
if name in CryptoStore._func_get_store:
raise ValueError(f"Crypto Store 'getter' {name} already in _func_get_store")
CryptoStore._func_get_store[name] = func_get
return func_get
return register_get
__all__ = ["CryptoStore", "CryptoPrimitiveProvider"]
|
py | 7dfdfb0782cbd3ffa811193fe7db06555dcaa923 | import logging
from typing import Any, Dict, List, Optional, TypedDict, Union
from utility import Utility
log: logging.Logger = logging.getLogger(__name__)
class ElderChallenges(TypedDict):
"""Structure of elder_challenges.csv"""
id: int
ref: str
name: str
desc: str
amount: int
loot: int
xp: int
class GunUnlockChallenges(TypedDict):
"""Structure of gun_unlock_challenges.csv"""
id: int
ref: str
name: str
desc: str
amount: int
xp: int
loot: int
class BRWeeklyChallenges(TypedDict):
"""Structure of br_weekly_challenges.csv"""
id: int
ref: str
name: str
desc: str
amount: int
xp: int
loot: int
start: int
length: int
season: int
unknown1: str # Not defined in luashared/csvutils.lua
class WeeklyChallenges(TypedDict):
"""Structure of weekly_challenges.csv"""
id: int
ref: str
name: str
desc: str
amount: int
xp: int
loot: int
start: int
length: int
season: int
class StickerBookChallenges(TypedDict):
"""Structure of sticker_book_challenges.csv"""
id: int
ref: str
name: str
desc: str
amount: str # Array of ints
loot: str # Array of ints
XPReward: str # Array of ints
categoryType: str
icon: str
detailDesc: str
class PetWatchTurboTable(TypedDict):
"""Structure of mp/petwatchturbotable.csv"""
ref: str
phaseNum: int
phaseTime: int
bonusTimeMax: int
gameType: str
charmID: int
challengeDesc: str
class MiscChallenges(TypedDict):
"""Structure of misc_challenges.csv"""
id: int
ref: str
name: str
desc: str
amount: int
xp: int
loot: int
categoryType: str
icon: str
detailDesc: str
conversionType: str
hideSplash: int # bool
hideAARLoot: int # bool
showAARPopup: int # bool
sound: str
class T9SeasonalChallenges(TypedDict):
"""Structure of mp/t9_seasonal_challenges.csv"""
challengeID: int
challengeRef: str
seasonNum: int
seasonChallengeIndex: int
title: str
description: str
levelGate: int
isMastery: int # bool
isT9Exclusive: int # bool
tier1Quantity: int
tier1XP: int
tier2Quantity: int
tier2XP: int
tier3Quantity: int
tier3XP: int
tier4Quantity: int
tier4XP: int
tier5Quantity: int
tier5XP: int
callingCard: str
class T9SeasonalProgressionBlueprintRewards(TypedDict):
"""Structure of mp/progression/t9_seasonal_progression_blueprint_rewards.csv"""
seasonDispNum: int
seasonRank: int
blueprintID: int
class OfficerChallenges:
"""Officer Challenge XAssets."""
def Compile(self: Any) -> None:
"""Compile the Officer Challenge XAssets."""
challenges: List[Dict[str, Any]] = []
challenges = OfficerChallenges.Table(self, challenges)
Utility.WriteFile(self, f"{self.eXAssets}/officerChallenges.json", challenges)
log.info(f"Compiled {len(challenges):,} Officer Challenges")
def Table(self: Any, challenges: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Compile the elder_challenges.csv XAsset."""
table: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/elder_challenges.csv", ElderChallenges
)
if table is None:
return challenges
for entry in table:
if (ref := entry.get("ref")).startswith("ch_elder_s"):
season: Optional[int] = int(ref.split("ch_elder_s")[1].split("_")[0])
else:
season: Optional[int] = None
if (amount := entry.get("amount")) is not None:
amount: Optional[Union[str, int]] = f"{amount:,}"
challenges.append(
{
"altId": ref,
"name": self.localize.get(entry.get("name")),
"description": self.localize.get(entry.get("desc")).replace(
"&&1", amount
),
"season": season,
"xp": entry.get("xp"),
}
)
return challenges
class WeaponUnlockChallenges:
"""Weapon Unlock Challenge XAssets."""
def Compile(self: Any) -> None:
"""Compile the Weapon Unlock Challenge XAssets."""
challenges: List[Dict[str, Any]] = []
challenges = WeaponUnlockChallenges.Table(self, challenges)
Utility.WriteFile(
self, f"{self.eXAssets}/weaponUnlockChallenges.json", challenges
)
log.info(f"Compiled {len(challenges):,} Weapon Unlock Challenges")
def Table(self: Any, challenges: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Compile the gun_unlock_challenges.csv XAsset."""
table: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/gun_unlock_challenges.csv", GunUnlockChallenges
)
if table is None:
return challenges
for entry in table:
if (amount := entry.get("amount")) is not None:
amount: Optional[Union[str, int]] = f"{amount:,}"
challenges.append(
{
"id": entry.get("id"),
"altId": entry.get("ref"),
"name": self.localize.get(entry.get("name")),
"description": self.localize.get(entry.get("desc")).replace(
"&&1", amount
),
"rewards": [
{
"id": entry.get("loot"),
"type": self.ModernWarfare.GetLootType(entry.get("loot")),
},
{
"xp": entry.get("xp"),
},
],
}
)
return challenges
class WeeklyChallengesBR:
"""Weekly Battle Royale Challenges XAssets."""
def Compile(self: Any) -> None:
"""Compile the Weekly Battle Royale Challenges XAssets."""
challenges: List[Dict[str, Any]] = []
challenges = WeeklyChallengesBR.Table(self, challenges)
Utility.WriteFile(self, f"{self.eXAssets}/weeklyChallengesBR.json", challenges)
log.info(f"Compiled {len(challenges):,} Weekly Battle Royale Challenges")
def Table(self: Any, challenges: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Compile the br_weekly_challenges.csv XAsset."""
table: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/br_weekly_challenges.csv", BRWeeklyChallenges
)
if table is None:
return challenges
for entry in table:
altId: str = entry.get("ref")
season: int = int(altId.split("season_")[1].split("_")[0])
week: int = int(altId.split("week_")[1].split("_")[0])
if (amount := entry.get("amount")) is not None:
amount: Optional[Union[str, int]] = f"{amount:,}"
challenges.append(
{
"id": entry.get("id"),
"altId": altId,
"name": self.localize.get(entry.get("name")),
"description": self.localize.get(entry.get("desc")).replace(
"&&1", amount
),
"start": Utility.PrettyTime(self, entry.get("start")),
"season": season,
"week": week,
"xp": entry.get("xp"),
"rewards": [],
}
)
if (l := entry.get("loot")) is not None:
challenges[-1]["rewards"].append(
{"id": l, "type": self.ModernWarfare.GetLootType(l)}
)
return challenges
class WeeklyChallengesMP:
"""Weekly Multiplayer Challenges XAssets."""
def Compile(self: Any) -> None:
"""Compile the Weekly Multiplayer Challenges XAssets."""
challenges: List[Dict[str, Any]] = []
challenges = WeeklyChallengesMP.Table(self, challenges)
Utility.WriteFile(self, f"{self.eXAssets}/weeklyChallengesMP.json", challenges)
log.info(f"Compiled {len(challenges):,} Weekly Multiplayer Challenges")
def Table(self: Any, challenges: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Compile the weekly_challenges.csv XAsset."""
table: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/weekly_challenges.csv", WeeklyChallenges
)
if table is None:
return challenges
for entry in table:
altId: str = entry.get("ref")
season: int = int(altId.split("season_")[1].split("_")[0])
week: int = int(altId.split("week_")[1].split("_")[0])
if (amount := entry.get("amount")) is not None:
amount: Optional[Union[str, int]] = f"{amount:,}"
challenges.append(
{
"id": entry.get("id"),
"altId": altId,
"name": self.localize.get(entry.get("name")),
"description": self.localize.get(entry.get("desc")).replace(
"&&1", amount
),
"start": Utility.PrettyTime(self, entry.get("start")),
"season": season,
"week": week,
"xp": entry.get("xp"),
"rewards": [],
}
)
if (l := entry.get("loot")) is not None:
challenges[-1]["rewards"].append(
{"id": l, "type": self.ModernWarfare.GetLootType(l)}
)
return challenges
class MasteryChallenges:
"""Mastery Challenges XAssets."""
def Compile(self: Any) -> None:
"""Compile the Mastery Challenges XAssets."""
challenges: List[Dict[str, Any]] = []
challenges = MasteryChallenges.Table(self, challenges)
Utility.WriteFile(self, f"{self.eXAssets}/masteryChallenges.json", challenges)
log.info(f"Compiled {len(challenges):,} Mastery Challenges")
def Table(self: Any, challenges: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Compile the sticker_book_challenges.csv XAsset."""
table: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/sticker_book_challenges.csv", StickerBookChallenges
)
if table is None:
return challenges
for entry in table:
challenges.append(
{
"altId": entry.get("ref"),
"name": self.localize.get(entry.get("name")),
"description": self.localize.get(entry.get("desc")),
"category": entry.get("categoryType"),
"rewards": [],
}
)
amounts: List[int] = Utility.GetCSVArray(self, entry.get("amount"), int)
loot: List[int] = Utility.GetCSVArray(self, entry.get("loot"), int)
xp: List[int] = Utility.GetCSVArray(self, entry.get("XPReward"), int)
for a, l, x in zip(amounts, loot, xp):
challenges[-1]["rewards"].append(
{
"amount": a,
"xp": x,
"id": l,
"type": self.ModernWarfare.GetLootType(l),
}
)
if (desc := challenges[-1].get("description")) is not None:
challenges[-1]["description"] = desc.replace("&&1", str(amounts[-1]))
return challenges
class TurboChallenges:
"""Tomogunchi Turbo Challenges XAssets."""
def Compile(self: Any) -> None:
"""Compile the Tomogunchi Turbo Challenges XAssets."""
challenges: List[Dict[str, Any]] = []
challenges = TurboChallenges.Table(self, challenges)
Utility.WriteFile(self, f"{self.eXAssets}/turboChallenges.json", challenges)
log.info(f"Compiled {len(challenges):,} Tomogunchi Turbo Challenges")
def Table(self: Any, challenges: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Compile the mp/petwatchturbotable.csv XAsset."""
table: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/mp/petwatchturbotable.csv", PetWatchTurboTable
)
if table is None:
return challenges
for entry in table:
challenges.append(
{
"altId": entry.get("ref"),
"phase": entry.get("phaseNum"),
"description": self.localize.get(entry.get("challengeDesc")),
"phaseTime": entry.get("phaseTime"),
"maxBonusTime": entry.get("bonusTimeMax"),
"charmAltId": None
if (cid := entry.get("charmID")) is None
else f"cos_{cid}",
}
)
return challenges
class MiscellaneousChallenges:
"""Miscellaneous Challenges XAssets."""
def Compile(self: Any) -> None:
"""Compile the Miscellaneous Challenges XAssets."""
challenges: List[Dict[str, Any]] = []
challenges = MiscellaneousChallenges.Table(self, challenges)
Utility.WriteFile(self, f"{self.eXAssets}/miscChallenges.json", challenges)
log.info(f"Compiled {len(challenges):,} Miscellaneous Challenges")
def Table(self: Any, challenges: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Compile the misc_challenges.csv XAsset."""
table: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/misc_challenges.csv", MiscChallenges
)
if table is None:
return challenges
for entry in table:
if (d := self.localize.get(entry.get("desc"))) is not None:
desc: Optional[str] = d
elif (d := self.localize.get(entry.get("detailDesc"))) is not None:
desc = d
else:
desc = None
challenges.append(
{
"altId": entry.get("ref"),
"name": self.localize.get(entry.get("name")),
"description": desc,
"rewards": [
{
"id": entry.get("loot"),
"type": self.ModernWarfare.GetLootType(entry.get("loot")),
}
],
}
)
if desc is None:
continue
if (amount := entry.get("amount")) is not None:
challenges[-1]["description"] = desc.replace("&&1", f"{amount:,}")
return challenges
class SeasonalChallenges:
"""Seasonal Challenges XAssets."""
def Compile(self: Any) -> None:
"""Compile the Seasonal Challenges XAssets."""
challenges: List[Dict[str, Any]] = []
challenges = SeasonalChallenges.Table(self, challenges)
Utility.WriteFile(self, f"{self.eXAssets}/seasonalChallenges.json", challenges)
log.info(f"Compiled {len(challenges):,} Seasonal Challenges")
def Table(self: Any, challenges: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Compile the mp/t9_seasonal_challenges.csv XAsset."""
table: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/mp/t9_seasonal_challenges.csv", T9SeasonalChallenges
)
if table is None:
return challenges
for entry in table:
desc = self.localize.get(entry.get("description"))
challenges.append(
{
"id": entry.get("challengeID"),
"altId": entry.get("challengeRef"),
"name": self.localize.get(entry.get("title")),
"description": desc,
"season": entry.get("seasonNum"),
"image": entry.get("callingCard"),
"levelGate": entry.get("levelGate"),
"t9Exclusive": bool(entry.get("isT9Exclusive")),
"mastery": bool(entry.get("isMastery")),
"tiers": [],
}
)
if (amount := entry.get("tier1Quantity")) is not None:
challenges[-1]["tiers"].append(
{"amount": amount, "xp": entry.get("tier1XP")}
)
challenges[-1]["description"] = desc.replace("&&1", f"{amount:,}")
if (amount := entry.get("tier2Quantity")) is not None:
challenges[-1]["tiers"].append(
{"amount": amount, "xp": entry.get("tier2XP")}
)
challenges[-1]["description"] = desc.replace("&&1", f"{amount:,}")
if (amount := entry.get("tier3Quantity")) is not None:
challenges[-1]["tiers"].append(
{"amount": amount, "xp": entry.get("tier3XP")}
)
challenges[-1]["description"] = desc.replace("&&1", f"{amount:,}")
if (amount := entry.get("tier4Quantity")) is not None:
challenges[-1]["tiers"].append(
{"amount": amount, "xp": entry.get("tier4XP")}
)
challenges[-1]["description"] = desc.replace("&&1", f"{amount:,}")
if (amount := entry.get("tier5Quantity")) is not None:
challenges[-1]["tiers"].append(
{"amount": amount, "xp": entry.get("tier5XP")}
)
challenges[-1]["description"] = desc.replace("&&1", f"{amount:,}")
return challenges
class ProgressionRewards:
"""Progression Rewards XAssets."""
def Compile(self: Any) -> None:
"""Compile the Progression Rewards XAssets."""
rewards: List[Dict[str, Any]] = []
rewards = ProgressionRewards.Table(self, rewards)
Utility.WriteFile(self, f"{self.eXAssets}/progressionRewards.json", rewards)
log.info(f"Compiled {len(rewards):,} Progression Rewards")
def Table(self: Any, rewards: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Compile the mp/progression/t9_seasonal_progression_blueprint_rewards.csv XAsset."""
table: List[Dict[str, Any]] = Utility.ReadCSV(
self,
f"{self.iXAssets}/mp/progression/t9_seasonal_progression_blueprint_rewards.csv",
T9SeasonalProgressionBlueprintRewards,
)
if table is None:
return rewards
for entry in table:
rewards.append(
{
"season": entry.get("seasonDispNum"),
"rank": entry.get("seasonRank"),
"rewards": [
{
"id": (bId := entry.get("blueprintID")),
"type": self.ModernWarfare.GetLootType(bId),
}
],
}
)
return rewards
|
py | 7dfdfb460d35266939e12c8942eec8a15836b464 | # Generated by Django 3.0.2 on 2020-01-18 02:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hoodie', '0007_auto_20200117_1759'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='hoody',
),
migrations.AddField(
model_name='post',
name='hood',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hood_posts', to='hoodie.Hood'),
),
]
|
py | 7dfdfceaad14aea28338f6b1c30ad27737f7b853 | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Turns arbitrary objects into tf.CompositeTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import threading
import numpy as np
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util import tf_inspect
# pylint: enable=g-direct-tensorflow-import
__all__ = [
'auto_composite_tensor',
'AutoCompositeTensor',
'is_deferred_assertion_context',
]
_DEFERRED_ASSERTION_CONTEXT = threading.local()
_DEFERRED_ASSERTION_CONTEXT.is_deferred = False
def is_deferred_assertion_context():
return getattr(_DEFERRED_ASSERTION_CONTEXT, 'is_deferred', False)
@contextlib.contextmanager
def _deferred_assertion_context(is_deferred=True):
was_deferred = getattr(_DEFERRED_ASSERTION_CONTEXT, 'is_deferred', False)
_DEFERRED_ASSERTION_CONTEXT.is_deferred = is_deferred
try:
yield
finally:
_DEFERRED_ASSERTION_CONTEXT.is_deferred = was_deferred
_registry = {} # Mapping from (python pkg, class name) -> class.
_SENTINEL = object()
_AUTO_COMPOSITE_TENSOR_VERSION = 3
# Cache maps __init__ method to signature
_sig_cache = {}
def _cached_signature(f):
if f not in _sig_cache:
_sig_cache[f] = tf_inspect.signature(f)
return _sig_cache[f]
def _extract_init_kwargs(obj, omit_kwargs=(), limit_to=None,
prefer_static_value=()):
"""Extract constructor kwargs to reconstruct `obj`."""
# If `obj` inherits its constructor from `AutoCompositeTensor` (which inherits
# its constructor from `object`) return an empty dictionary to avoid
# triggering the error below due to *args and **kwargs in the constructor.
if type(obj).__init__ is AutoCompositeTensor.__init__:
return {}
sig = _cached_signature(type(obj).__init__)
if any(v.kind in (tf_inspect.Parameter.VAR_KEYWORD,
tf_inspect.Parameter.VAR_POSITIONAL)
for v in sig.parameters.values()):
raise ValueError(
'*args and **kwargs are not supported. Found `{}`'.format(sig))
keys = [p for p in sig.parameters if p != 'self' and p not in omit_kwargs]
if limit_to is not None:
keys = [k for k in keys if k in limit_to]
kwargs = {}
not_found = object()
for k in keys:
src1 = getattr(obj, k, not_found)
if src1 is not not_found:
kwargs[k] = src1
else:
src2 = getattr(obj, '_' + k, not_found)
if src2 is not not_found:
kwargs[k] = src2
else:
src3 = getattr(obj, 'parameters', {}).get(k, not_found)
if src3 is not not_found:
kwargs[k] = src3
else:
raise ValueError(
f'Could not determine an appropriate value for field `{k}` in'
f' object `{obj}`. Looked for \n'
f' 1. an attr called `{k}`,\n'
f' 2. an attr called `_{k}`,\n'
f' 3. an entry in `obj.parameters` with key "{k}".')
if k in prefer_static_value and kwargs[k] is not None:
if tf.is_tensor(kwargs[k]):
static_val = tf.get_static_value(kwargs[k])
if static_val is not None:
kwargs[k] = static_val
if isinstance(kwargs[k], (np.ndarray, np.generic)):
# Generally, these are shapes or int, but may be other parameters such as
# `power` for `tfb.PowerTransform`.
kwargs[k] = kwargs[k].tolist()
return kwargs
def _extract_type_spec_recursively(value):
"""Return (collection of) TypeSpec(s) for `value` if it includes `Tensor`s.
If `value` is a `Tensor` or `CompositeTensor`, return its `TypeSpec`. If
`value` is a collection containing `Tensor` values, recursively supplant them
with their respective `TypeSpec`s in a collection of parallel stucture.
If `value` is nont of the above, return it unchanged.
Args:
value: a Python `object` to (possibly) turn into a (collection of)
`tf.TypeSpec`(s).
Returns:
spec: the `TypeSpec` or collection of `TypeSpec`s corresponding to `value`
or `value`, if no `Tensor`s are found.
"""
if isinstance(value, composite_tensor.CompositeTensor):
return value._type_spec # pylint: disable=protected-access
if isinstance(value, tf.Variable):
return resource_variable_ops.VariableSpec(
value.shape, dtype=value.dtype, trainable=value.trainable)
if tf.is_tensor(value):
return tf.TensorSpec(value.shape, value.dtype)
if isinstance(value, (list, tuple)):
specs = [_extract_type_spec_recursively(v) for v in value]
was_tensor = list([a is not b for a, b in zip(value, specs)])
has_tensors = any(was_tensor)
has_only_tensors = all(was_tensor)
if has_tensors:
if has_tensors != has_only_tensors:
raise NotImplementedError(
'Found `{}` with both Tensor and non-Tensor parts: {}'
.format(type(value), value))
return type(value)(specs)
return value
class _AutoCompositeTensorTypeSpec(tf.TypeSpec):
"""A tf.TypeSpec for `AutoCompositeTensor` objects."""
__slots__ = ('_param_specs', '_non_tensor_params', '_omit_kwargs',
'_prefer_static_value', '_callable_params', '_serializable',
'_comparable')
def __init__(self, param_specs, non_tensor_params, omit_kwargs,
prefer_static_value, non_identifying_kwargs,
callable_params=None):
"""Initializes a new `_AutoCompositeTensorTypeSpec`.
Args:
param_specs: Python `dict` of `tf.TypeSpec` instances that describe
kwargs to the `AutoCompositeTensor`'s constructor that are `Tensor`-like
or `CompositeTensor` subclasses.
non_tensor_params: Python `dict` containing non-`Tensor` and non-
`CompositeTensor` kwargs to the `AutoCompositeTensor`'s constructor.
omit_kwargs: Python `tuple` of strings corresponding to the names of
kwargs to the `AutoCompositeTensor`'s constructor that should be omitted
from the `_AutoCompositeTensorTypeSpec`'s serialization, equality/
compatibility checks, and rebuilding of the `AutoCompositeTensor` from
`Tensor` components.
prefer_static_value: Python `tuple` of strings corresponding to the names
of `Tensor`-like kwargs to the `AutoCompositeTensor`s constructor that
may be stored as static values, if known. These are typically shapes or
axis values.
non_identifying_kwargs: Python `tuple` of strings corresponding to the
names of kwargs to the `AutoCompositeTensor`s constructor whose values
are not relevant to the unique identification of the
`_AutoCompositeTensorTypeSpec` instance. Equality/comparison checks and
`__hash__` do not depend on these kwargs.
callable_params: Python `dict` of callable kwargs to the
`AutoCompositeTensor`'s constructor that do not subclass
`CompositeTensor`, or `None`. If `callable_params` is a non-empty
`dict`, then serialization of the `_AutoCompositeTensorTypeSpec` is not
supported. Defaults to `None`, which is converted to an empty `dict`.
"""
self._param_specs = param_specs
self._non_tensor_params = non_tensor_params
self._omit_kwargs = omit_kwargs
self._prefer_static_value = prefer_static_value
self._non_identifying_kwargs = non_identifying_kwargs
self._callable_params = {} if callable_params is None else callable_params
self._serializable = (
_AUTO_COMPOSITE_TENSOR_VERSION,
self._param_specs,
self._non_tensor_params,
self._omit_kwargs,
self._prefer_static_value,
self._non_identifying_kwargs)
def remove_kwargs(d):
return {k: v for k, v in d.items()
if k not in self._non_identifying_kwargs}
self._comparable = (
_AUTO_COMPOSITE_TENSOR_VERSION,
remove_kwargs(self._param_specs),
remove_kwargs(self._non_tensor_params),
self._omit_kwargs,
self._prefer_static_value,
self._non_identifying_kwargs,
tf.nest.map_structure(id, remove_kwargs(self._callable_params)))
@classmethod
def from_instance(cls, instance, omit_kwargs=(), non_identifying_kwargs=()):
cls_value_type = cls.value_type.fget(None)
if type(instance) is not cls_value_type: # pylint: disable=unidiomatic-typecheck
raise ValueError(f'`{type(instance).__name__}` has inherited the '
f'`_type_spec` of `{cls_value_type.__name__}`. It '
f'should define its own, either directly, or by '
f'applying `auto_composite_tensor` to '
f'`{type(instance).__name__}.`')
prefer_static_value = tuple(
getattr(instance, '_composite_tensor_shape_params', ()))
kwargs = _extract_init_kwargs(instance, omit_kwargs=omit_kwargs,
prefer_static_value=prefer_static_value)
non_tensor_params = {}
param_specs = {}
callable_params = {}
for k, v in list(kwargs.items()):
# If v contains no Tensors, this will just be v
type_spec_or_v = _extract_type_spec_recursively(v)
if type_spec_or_v is not v:
param_specs[k] = type_spec_or_v
elif callable(v):
callable_params[k] = v
else:
non_tensor_params[k] = v
# Construct the spec.
return cls(param_specs=param_specs,
non_tensor_params=non_tensor_params,
omit_kwargs=omit_kwargs,
prefer_static_value=prefer_static_value,
non_identifying_kwargs=non_identifying_kwargs,
callable_params=callable_params)
def _to_components(self, obj):
return _extract_init_kwargs(obj, limit_to=list(self._param_specs))
def _from_components(self, components):
kwargs = dict(
self._non_tensor_params, **self._callable_params, **components)
with _deferred_assertion_context():
return self.value_type(**kwargs)
@property
def _component_specs(self):
return self._param_specs
def _serialize(self):
if self._callable_params:
raise ValueError(
f'Cannot serialize object with callable parameters that are not '
f'`CompositeTensor`s: {self._callable_params.keys()}.')
return self._serializable
@classmethod
def _deserialize(cls, encoded):
version = encoded[0]
if version == 1:
encoded = encoded + ((),)
version = 2
if version == 2:
encoded = encoded + ((),)
version = 3
if version != _AUTO_COMPOSITE_TENSOR_VERSION:
raise ValueError(f'Expected version {_AUTO_COMPOSITE_TENSOR_VERSION},'
f' but got {version}.')
return cls(*encoded[1:])
def most_specific_compatible_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
ValueError: If the `_callable_params` attributes of `self` and `other` are
not equal.
"""
if type(self) is not type(other):
raise ValueError(
f'No TypeSpec is compatible with both {self} and {other}.')
# pylint: disable=protected-access
if self._callable_params != other._callable_params:
raise ValueError(f'Callable parameters must be identical. Saw '
f'{self._callable_params} and {other._callable_params}.')
merged = self._TypeSpec__most_specific_compatible_type_serialization(
self._comparable[:-1], other._comparable[:-1])
# pylint: enable=protected-access
return type(self)(*merged[1:], self._callable_params)
def is_compatible_with(self, spec_or_value):
"""Returns true if `spec_or_value` is compatible with this TypeSpec."""
if not isinstance(spec_or_value, tf.TypeSpec):
spec_or_value = type_spec.type_spec_from_value(spec_or_value)
if type(self) is not type(spec_or_value):
return False
return self._TypeSpec__is_compatible(
self._comparable, spec_or_value._comparable) # pylint: disable=protected-access
def _with_tensor_ranks_only(self):
"""Returns a TypeSpec compatible with `self`, with tensor shapes relaxed.
Returns:
A `TypeSpec` that is compatible with `self`, where any `TensorShape`
information has been relaxed to include only tensor rank (and not
the dimension sizes for individual axes).
"""
def relax(value):
if isinstance(value, tf.TypeSpec):
return value._with_tensor_ranks_only() # pylint: disable=protected-access
elif (isinstance(value, tf.TensorShape) and
value.rank is not None):
return tf.TensorShape([None] * value.rank)
else:
return value
return type(self)(
tf.nest.map_structure(relax, self._param_specs),
self._non_tensor_params,
self._omit_kwargs,
self._prefer_static_value,
self._callable_params)
def __get_cmp_key(self):
return (type(self), self._TypeSpec__make_cmp_key(self._comparable))
def __repr__(self):
return '%s%r' % (
type(self).__name__, self._serializable + (self._callable_params,))
def __reduce__(self):
if self._callable_params:
raise ValueError(
f'Cannot serialize object with callable parameters that are not '
f'`CompositeTensor`s: {self._callable_params.keys()}.')
super(_AutoCompositeTensorTypeSpec, self).__reduce__()
def __eq__(self, other):
return (type(other) is type(self) and
self.__get_cmp_key() == other.__get_cmp_key()) # pylint: disable=protected-access
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.__get_cmp_key())
class AutoCompositeTensor(composite_tensor.CompositeTensor):
"""Recommended base class for `@auto_composite_tensor`-ified classes.
See details in `tfp.experimental.auto_composite_tensor` description.
"""
@property
def _type_spec(self):
# This property will be overwritten by the `@auto_composite_tensor`
# decorator. However, we need it so that a valid subclass of the `ABCMeta`
# class `CompositeTensor` can be constructed and passed to the
# `@auto_composite_tensor` decorator
pass
def auto_composite_tensor(
cls=None, omit_kwargs=(), non_identifying_kwargs=(), module_name=None):
"""Automagically generate `CompositeTensor` behavior for `cls`.
`CompositeTensor` objects are able to pass in and out of `tf.function` and
`tf.while_loop`, or serve as part of the signature of a TF saved model.
The contract of `auto_composite_tensor` is that all __init__ args and kwargs
must have corresponding public or private attributes (or properties). Each of
these attributes is inspected (recursively) to determine whether it is (or
contains) `Tensor`s or non-`Tensor` metadata. `list` and `tuple` attributes
are supported, but must either contain *only* `Tensor`s (or lists, etc,
thereof), or *no* `Tensor`s. E.g.,
- object.attribute = [1., 2., 'abc'] # valid
- object.attribute = [tf.constant(1.), [tf.constant(2.)]] # valid
- object.attribute = ['abc', tf.constant(1.)] # invalid
If the attribute is a callable, serialization of the `TypeSpec`, and therefore
interoperability with `tf.saved_model`, is not currently supported. As a
workaround, callables that do not contain or close over `Tensor`s may be
expressed as functors that subclass `AutoCompositeTensor` and used in place of
the original callable arg:
```python
@auto_composite_tensor(module_name='my.module')
class F(AutoCompositeTensor):
def __call__(self, *args, **kwargs):
return original_callable(*args, **kwargs)
```
Callable objects that do contain or close over `Tensor`s should either
(1) subclass `AutoCompositeTensor`, with the `Tensor`s passed to the
constructor, (2) subclass `CompositeTensor` and implement their own
`TypeSpec`, or (3) have a conversion function registered with
`type_spec.register_type_spec_from_value_converter`.
If the object has a `_composite_tensor_shape_parameters` field (presumed to
have `tuple` of `str` value), the flattening code will use
`tf.get_static_value` to attempt to preserve shapes as static metadata, for
fields whose name matches a name specified in that field. Preserving static
values can be important to correctly propagating shapes through a loop.
Note that the Distribution and Bijector base classes provide a
default implementation of `_composite_tensor_shape_parameters`, populated by
`parameter_properties` annotations.
If the decorated class `A` does not subclass `CompositeTensor`, a *new class*
will be generated, which mixes in `A` and `CompositeTensor`.
To avoid this extra class in the class hierarchy, we suggest inheriting from
`auto_composite_tensor.AutoCompositeTensor`, which inherits from
`CompositeTensor` and implants a trivial `_type_spec` @property. The
`@auto_composite_tensor` decorator will then overwrite this trivial
`_type_spec` @property. The trivial one is necessary because `_type_spec` is
an abstract property of `CompositeTensor`, and a valid class instance must be
created before the decorator can execute -- without the trivial `_type_spec`
property present, `ABCMeta` will throw an error! The user may thus do any of
the following:
#### `AutoCompositeTensor` base class (recommended)
```python
@tfp.experimental.auto_composite_tensor
class MyClass(tfp.experimental.AutoCompositeTensor):
...
mc = MyClass()
type(mc)
# ==> MyClass
```
#### No `CompositeTensor` base class (ok, but changes expected types)
```python
@tfp.experimental.auto_composite_tensor
class MyClass(object):
...
mc = MyClass()
type(mc)
# ==> MyClass_AutoCompositeTensor
```
#### `CompositeTensor` base class, requiring trivial `_type_spec`
```python
from tensorflow.python.framework import composite_tensor
@tfp.experimental.auto_composite_tensor
class MyClass(composite_tensor.CompositeTensor):
@property
def _type_spec(self): # will be overwritten by @auto_composite_tensor
pass
...
mc = MyClass()
type(mc)
# ==> MyClass
```
## Full usage example
```python
@tfp.experimental.auto_composite_tensor(omit_kwargs=('name',))
class Adder(tfp.experimental.AutoCompositeTensor):
def __init__(self, x, y, name=None):
with tf.name_scope(name or 'Adder') as name:
self._x = tf.convert_to_tensor(x)
self._y = tf.convert_to_tensor(y)
self._name = name
def xpy(self):
return self._x + self._y
def body(obj):
return Adder(obj.xpy(), 1.),
result, = tf.while_loop(
cond=lambda _: True,
body=body,
loop_vars=(Adder(1., 1.),),
maximum_iterations=3)
result.xpy() # => 5.
```
Args:
cls: The class for which to create a CompositeTensor subclass.
omit_kwargs: Optional sequence of kwarg names to be omitted from the spec.
non_identifying_kwargs: Optional sequence of kwarg names to be omitted from
equality/comparison checks and the `__hash__` method of the spec.
module_name: The module name with which to register the `TypeSpec`. If
`None`, defaults to `cls.__module__`.
Returns:
composite_tensor_subclass: A subclass of `cls` and TF CompositeTensor.
"""
if cls is None:
return functools.partial(auto_composite_tensor,
omit_kwargs=omit_kwargs,
non_identifying_kwargs=non_identifying_kwargs,
module_name=module_name)
if module_name is None:
module_name = cls.__module__
type_spec_class_name = f'{cls.__name__}_ACTTypeSpec'
type_spec_name = f'{module_name}.{type_spec_class_name}'
try:
ts = type_spec.lookup(type_spec_name)
return ts.value_type.fget(None)
except ValueError:
pass
# If the declared class is already a CompositeTensor subclass, we can avoid
# affecting the actual type of the returned class. Otherwise, we need to
# explicitly mix in the CT type, and hence create and return a newly
# synthesized type.
if issubclass(cls, composite_tensor.CompositeTensor):
@type_spec.register(type_spec_name)
class _AlreadyCTTypeSpec(_AutoCompositeTensorTypeSpec):
@property
def value_type(self):
return cls
_AlreadyCTTypeSpec.__name__ = type_spec_class_name
def _type_spec(obj):
return _AlreadyCTTypeSpec.from_instance(
obj, omit_kwargs, non_identifying_kwargs)
cls._type_spec = property(_type_spec) # pylint: disable=protected-access
return cls
clsid = (cls.__module__, cls.__name__, omit_kwargs,
non_identifying_kwargs)
# Check for subclass if retrieving from the _registry, in case the user
# has redefined the class (e.g. in a REPL/notebook).
if clsid in _registry and issubclass(_registry[clsid], cls):
return _registry[clsid]
@type_spec.register(type_spec_name)
class _GeneratedCTTypeSpec(_AutoCompositeTensorTypeSpec):
@property
def value_type(self):
return _registry[clsid]
_GeneratedCTTypeSpec.__name__ = type_spec_class_name
class _AutoCompositeTensor(cls, composite_tensor.CompositeTensor):
"""A per-`cls` subclass of `CompositeTensor`."""
@property
def _type_spec(self):
return _GeneratedCTTypeSpec.from_instance(
self, omit_kwargs, non_identifying_kwargs)
_AutoCompositeTensor.__name__ = cls.__name__
_registry[clsid] = _AutoCompositeTensor
return _AutoCompositeTensor
|
py | 7dfdff9a0045427ad5f76517838e6ba80ba219af | #!/usr/bin/env python3
"""
Semantic class weights calculation for OmniDet
# usage: ./compute_class_weights.py --config data/params.yaml
# author: Varun Ravi Kumar <[email protected]>
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; Authors provide no warranty with the software
and are not liable for anything.
"""
import json
import os
import numpy as np
from torch.utils.data import DataLoader
from tqdm import tqdm
from data_loader.woodscape_loader import WoodScapeRawDataset
from main import collect_tupperware
printj = lambda dic: print(json.dumps(dic, indent=4))
def main():
args = collect_tupperware()
printj(args)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_visible_devices or -1
print(f"=> Loading {args.dataset.upper()} training dataset")
# --- Load Data ---
train_dataset = WoodScapeRawDataset(data_path=args.dataset_dir,
path_file=args.train_file,
is_train=False,
config=args)
train_loader = DataLoader(train_dataset,
args.batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False)
# Get class weights from the selected weighing technique
print(f"=> Weighing technique: {args.weighing} \n"
f"Computing class weights... \n"
f"This can take a while depending on the dataset size")
if args.weighing.lower() == 'enet':
class_weights = enet_weighing(train_loader, args.num_classes)
elif args.weighing.lower() == 'mfb':
class_weights = median_freq_balancing(train_loader, args.num_classes)
else:
class_weights = None
with np.printoptions(precision=2, suppress=True):
print(f"Class weights: {class_weights}")
def enet_weighing(dataloader, num_classes, c=1.02):
"""Computes class weights as described in the ENet paper: w_class = 1 / (ln(c + p_class)),
where c is usually 1.02 and p_class is the propensity score of that
class: propensity_score = freq_class / total_pixels.
References: https://arxiv.org/abs/1606.02147
Keyword arguments:
:param dataloader: A data loader to iterate over the dataset.
:param num_classes: The number of classes.
:param c: An additional hyper-parameter which restricts the interval of values for the weights. Default: 1.02.
"""
class_count = 0
total = 0
for inputs in tqdm(dataloader):
flat_label = inputs["motion_labels", 0, 0].cpu().numpy().flatten()
# Sum up the number of pixels of each class and the total pixel counts for each label
class_count += np.bincount(flat_label, minlength=num_classes)
total += flat_label.size
# Compute propensity score and then the weights for each class
propensity_score = class_count / total
class_weights = 1 / (np.log(c + propensity_score))
return class_weights
def median_freq_balancing(dataloader, num_classes):
"""Computes class weights using median frequency balancing as described in https://arxiv.org/abs/1411.4734:
w_class = median_freq / freq_class,
where freq_class is the number of pixels of a given class divided by the total number of pixels in images where
that class is present, and median_freq is the median of freq_class.
:param dataloader: A data loader to iterate over the dataset whose weights are going to be
computed.
:param num_classes: The number of classes
"""
class_count = 0
total = 0
for inputs in tqdm(dataloader):
flat_label = inputs["motion_labels", 0, 0].cpu().numpy().flatten()
# Sum up the class frequencies
bincount = np.bincount(flat_label, minlength=num_classes)
# Create of mask of classes that exist in the label
mask = bincount > 0
# Multiply the mask by the pixel count. The resulting array has one element for each class.
# The value is either 0 (if the class does not exist in the label)
# or equal to the pixel count (if the class exists in the label)
total += mask * flat_label.size
# Sum up the number of pixels found for each class
class_count += bincount
# Compute the frequency and its median
freq = class_count / total
med = np.median(freq)
return med / freq
if __name__ == "__main__":
main()
|
py | 7dfe0048402830bb8f112d662e39dbe50a985955 | from PIL import Image
import os, glob
import numpy as np
import random, math
# 分類対象のカテゴリを選ぶ
root_dir = "./image/"
categories = ["normal", "beni", "negi", "cheese"]
nb_classes = len(categories)
image_size = 50
# 画像データを読み込む --- (※1)
X = [] # 画像データ
Y = [] # ラベルデータ
def add_sample(cat, fname, is_train):
img = Image.open(fname)
img = img.convert("RGB") # カラーモードの変更
img = img.resize((image_size, image_size)) # 画像サイズの変更
data = np.asarray(img)
X.append(data)
Y.append(cat)
if not is_train: return
# 角度を変えたデータを追加
# 少しずつ回転する
for ang in range(-20, 20, 5):
img2 = img.rotate(ang)
data = np.asarray(img2)
X.append(data)
Y.append(cat)
# img2.save("gyudon-"+str(ang)+".png")
# 反転する
img2 = img2.transpose(Image.FLIP_LEFT_RIGHT)
data = np.asarray(img2)
X.append(data)
Y.append(cat)
def make_sample(files, is_train):
global X, Y
X = []; Y = []
for cat, fname in files:
add_sample(cat, fname, is_train)
return np.array(X), np.array(Y)
# ディレクトリごとに分けられたファイルを収集する --- (※2)
allfiles = []
for idx, cat in enumerate(categories):
image_dir = root_dir + "/" + cat
files = glob.glob(image_dir + "/*.jpg")
for f in files:
allfiles.append((idx, f))
# シャッフルして学習データとテストデータに分ける --- (※3)
random.shuffle(allfiles)
th = math.floor(len(allfiles) * 0.6)
train = allfiles[0:th]
test = allfiles[th:]
X_train, y_train = make_sample(train, True)
X_test, y_test = make_sample(test, False)
xy = (X_train, X_test, y_train, y_test)
np.save("./image/gyudon2.npy", xy)
print("ok,", len(y_train))
|
py | 7dfe009fa19b94245acc4b59e189453929adfc9d | import os
import time
# -*- coding:utf-8 -*-
'''
-----------------------------Cpu-----------------------------
'''
# Cpu use
last_worktime=0
last_idletime=0
def get_cpu_use():
global last_worktime, last_idletime
f=open("/proc/stat","r")
line=""
while not "cpu " in line:
line=f.readline()
f.close()
spl=line.split(" ")
worktime=int(spl[2])+int(spl[3])+int(spl[4])
idletime=int(spl[5])
dworktime=(worktime-last_worktime)
didletime=(idletime-last_idletime)
rate=float(dworktime)/(didletime+dworktime)
cpu_t = rate*100
last_worktime=worktime
last_idletime=idletime
if(last_worktime==0):
return 0
return round(cpu_t,2)
# Cpu freq
def get_cpu_freq():
freq_str = os.popen('cat /proc/cpuinfo | grep MHz | uniq').read()
freqs =[]
while freq_str.find('\n') != -1:
freqs.append(float(freq_str[freq_str.find(':')+2:freq_str.find('\n')]))
freq_str=freq_str[freq_str.find('\n')+1:]
return max(freqs)
# Cpu temp
def get_cpu_temp():
if os.path.isfile('/sys/class/thermal/thermal_zone0/temp'):
temp_str = os.popen('cat /sys/class/thermal/thermal_zone0/temp').read()
elif os.path.isfile('/sys/class/hwmon/hwmon0/device/hwmon/hwmon0/temp1_input'):
temp_str = os.popen('cat /sys/class/hwmon/hwmon0/device/hwmon/hwmon0/temp1_input').read()
elif os.path.isfile('/sys/class/hwmon/hwmon0/device/hwmon0/temp1_input'):
temp_str = os.popen('cat /sys/class/hwmon/hwmon0/device/hwmon0/temp1_input').read()
else:
return -1
return (float(temp_str)/1000)
'''
-----------------------------Men-----------------------------
'''
def get_mem_use():
mem_str = os.popen('free').read()
if '内存'in mem_str :
mem_str = mem_str[mem_str.find('内存:')+3:mem_str.find('交换')]
else:
mem_str = mem_str[mem_str.find('Mem:')+4:mem_str.find('Swap')]
mem_str = mem_str.split()
total = int(float(mem_str[0])/1024)
used = int(float(mem_str[1])/1024)
percent = int(used/total*100)
return total,used,percent
def get_swap_use():
mem_str = os.popen('free').read()
if '内存'in mem_str :
mem_str = mem_str[mem_str.find('交换:')+3:]
else:
mem_str = mem_str[mem_str.find('Swap:')+5:]
mem_str = mem_str.split()
total = int(float(mem_str[0])/1024)
used = int(float(mem_str[1])/1024)
percent = int(used/total*100)
return total,used,percent
'''
-----------------------------Gpu-----------------------------
'''
gpus_str = os.popen('nvidia-smi -L').read()
gpus =[]
if 'communicate with the NVIDIA driver' not in gpus_str:
while gpus_str.find('\n') != -1:
gpus.append(gpus_str[gpus_str.find(':')+2:gpus_str.find('(')-1])
gpus_str=gpus_str[gpus_str.find('\n')+1:]
def get_gpu_use():
gpu_infos = []
out_string = os.popen('nvidia-smi').read()
# gpu infos
for i in range(len(gpus)):
infos_str = out_string
infos_str = infos_str[infos_str.find(str(i)+' '+gpus[i][:10]):]
infos_str = infos_str[infos_str.find('\n')+1:]
infos_str = infos_str[:infos_str.find('\n')+1]
infos_str = infos_str.split()
#['|', '50%', '42C', 'P0', '19W', '/', '75W', '|', '929MiB', '/', '5050MiB', '|', '14%', 'Default', '|']
if infos_str[1].replace('%','') == 'N/A':
fan = -1
else:
fan = int(infos_str[1].replace('%','')) # %
temp = int(infos_str[2].replace('C','')) # C
if infos_str[4] == 'N/A':
power_used = -1
power_max = -1
else:
power_used = int(infos_str[4].replace('W','')) if infos_str[4] !='N/A' else 0 # W
power_max = int(infos_str[6].replace('W','')) if infos_str[6] !='N/A' else -1 # W
# power_max = int(infos_str[6].replace('W',''))
mem_used = int(infos_str[8].replace('MiB','')) # MB
mem_max = int(infos_str[10].replace('MiB','')) # MB
util_used = int(infos_str[12].replace('%','')) if infos_str[12] !='N/A' else 0 # %
gpu_infos.append([fan,temp,power_used,power_max,mem_used,mem_max,util_used])
# cuda infos
infos_str = out_string
infos_str = infos_str.split('\n')
for line in infos_str:
if 'NVIDIA-SMI' in line:
cuda_infos = '\033[1;37m'+line.replace('|','')[1:]+'\033[0m'
# task_infos
infos_str = out_string
infos_str = infos_str[infos_str.find('Processes'):]
infos_str = infos_str[infos_str.find('\n')+1:]
task_infos = '\033[1;37m'+infos_str[:infos_str.find('\n')+1].replace('|', '')+'\033[0m'
infos_str = infos_str[infos_str.find('======'):]
infos_str = infos_str[infos_str.find('\n')+1:]
infos_str = infos_str[:infos_str.find('+-------------------')-1]
infos_str = infos_str.replace('|','')
infos_str = infos_str.split('\n')
print_flag = False
for line in infos_str:
line_split = line.split()
if 'No running' not in line and'+-------' not in line and float(line_split[-1].replace('MiB',''))>500:
task_infos = task_infos+line+'\n'
print_flag = True
if not print_flag:
task_infos=''
return gpu_infos,cuda_infos,task_infos
'''
-----------------------------Network-----------------------------
'''
def get_task_info():
stream = os.popen('top -n 1')._stream
out_string = stream.buffer.read().decode('utf-8',"ignore")
out_string = out_string.split('\n')
infos = ''
if len(out_string) > 10:
infos = '\033[1;37m PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND \033[0m\n'
for i in range(3):
infos += (out_string[7+i]+'\n')
return infos
'''
-----------------------------Network-----------------------------
'''
net_infos_history = [0,0]
def get_net_use(t_cost,network_adapter = 'all'):
net_str = os.popen('cat /proc/net/dev').read()
infos = []
net_infos = [0,0,0,0]
while net_str.find('\n') != -1:
if net_str[:50].find(':') != -1:
infos.append((net_str[net_str.find(':')+2:net_str.find('\n')-1]).split())
net_str=net_str[net_str.find('\n')+1:]
net_rxs=[];net_txs=[]
for info in infos:
net_rxs.append(int(info[0]))
net_txs.append(int(info[8]))
if network_adapter == 'all':
net_infos[0] = (sum(net_rxs)/1024)
net_infos[1] = (sum(net_txs)/1024)
elif network_adapter == 'auto':
net_infos[0] = (max(net_rxs)/1024)
net_infos[1] = (max(net_txs)/1024)
if net_infos_history[0] == 0:
net_infos[2] = 0
net_infos[3] = 0
else:
net_infos[2] = (net_infos[0]-net_infos_history[0])/t_cost
net_infos[3] = (net_infos[1]-net_infos_history[1])/t_cost
net_infos_history[0] = net_infos[0]
net_infos_history[1] = net_infos[1]
return net_infos
'''
-----------------------------Disk-----------------------------
'''
def get_disk_use():
disk_str = os.popen('df -h').read()
disk_str = disk_str.split('\n')
disk_infos = []
# print(disk_str)
allows = ['/home','/media','/sd'] # and '/'
for line in disk_str:
info = line.split()
if info != []:
for allow in allows:
if allow in line[38:] or info[5]=='/' or 'T' in info[1]:
info[4] = auto_color(info[4], int(info[4].replace('%','')))
disk_infos.append(info)
break
return disk_infos
'''
-----------------------------other tools-----------------------------
'''
def get_bar(percent,num = 25):
bar = '['
for i in range(num):
if i < round(percent/(100/num)):
bar += '#'
else:
bar += '-'
bar += ']'
bar = '{0:>5.1f}% '.format(percent)+bar
bar = auto_color(bar, percent)
return bar
def fill_str(string,num):
if len(string)>num:
return string[:num-3]+'...'
for i in range(num-len(string)):
string +=' '
return string
def auto_color(string,percent):
if 0<=percent<=70:
string = change_color(string, 'green')
elif 70<percent<=90:
string = change_color(string, 'yellow')
else:
string = change_color(string, 'red')
return string
def change_color(string,color):
if color =='red':
string = '\033[1;31m'+string+'\033[0m'
elif color == 'green':
string = '\033[1;32m'+string+'\033[0m'
elif color == 'yellow':
string = '\033[1;33m'+string+'\033[0m'
if color =='white':
string = '\033[1;37m'+string+'\033[0m'
return string
'''
-----------------------------main-----------------------------
'''
def main():
t_cost = 0.5
sleep_time = 0.5
smooth = 10
smooth_gpu_infosss = []
while(1):
t_start = time.time()
#cpu
cpu_used = get_cpu_use()
cpu_freq = get_cpu_freq()
cpu_temp = get_cpu_temp()
cpu_used_bar = get_bar(cpu_used,num=65)
task_infos = get_task_info()
#memory
mem_total,mem_used,mem_percent = get_mem_use()
mem_used_bar = get_bar(mem_percent)
swap_total,swap_used,swap_percent = get_swap_use()
swap_used_bar = get_bar(swap_percent)
#gpu
util_used_bars=[];gpu_mem_bars=[]
gpu_infoss,cuda_infos,gpu_task_infos = get_gpu_use()
if len(smooth_gpu_infosss) < smooth:
smooth_gpu_infosss.append(gpu_infoss)
else:
smooth_gpu_infosss[:smooth-1] = smooth_gpu_infosss[1:smooth]
smooth_gpu_infosss[smooth-1] = gpu_infoss
smooth_gpu_utils = [];smooth_gpu_powers = []
for i in range(len(gpus)):
utils = []; powers = []
for j in range(len(smooth_gpu_infosss)):
utils.append(smooth_gpu_infosss[j][i][6])
powers.append(smooth_gpu_infosss[j][i][2])
smooth_gpu_utils.append(sum(utils)/len(utils))
smooth_gpu_powers.append(int(sum(powers)/len(powers)))
for i in range(len(gpus)):
gpu_infos = gpu_infoss[i]
util_used_bars.append(get_bar(smooth_gpu_utils[i]))
gpu_mem_bars.append(get_bar(100*gpu_infoss[i][4]/gpu_infoss[i][5]))
#net
net_infos = get_net_use(t_cost)
#disk
disk_infos = get_disk_use()
#-----------------------------print-----------------------------
print_str = ''
#cpu memory
print_str += ('\033[1;37mCpu-T: {0:.1f}C | Freq: {1:.1f}MHz | Mem: {2:d}MB/{3:d}MB | Swap: {4:d}MB/{5:d}MB\033[0m\n').format(
cpu_temp,cpu_freq,mem_used,mem_total,swap_used,swap_total)
print_str += ('Cpu: '+cpu_used_bar+'\n')
print_str += ('Mem: '+mem_used_bar+' Swap:'+swap_used_bar+'\n')
# Task
print_str += (task_infos+'\n')
#gpu
print_str += (cuda_infos+'\n')
for i in range(len(gpus)):
print_str +=(('\033[1;37mGpu'+'{0:d}'+': '+gpus[i].replace('GeForce','').replace(' RTX','').replace(' ','').replace('GPU','')+' Temp: {1:.1f}C | Power: {2:>3d}w/{3:d}w | Mem: {4:>5d}MB/{5:d}MB | Fan: {6:d}%\033[0m').format(
i,gpu_infoss[i][1],smooth_gpu_powers[i],gpu_infoss[i][3],
gpu_infoss[i][4],gpu_infoss[i][5],gpu_infoss[i][0])+'\n')
print_str += ('Util:'+util_used_bars[i]+' Mem:'+gpu_mem_bars[i]+'\n')
print_str += (gpu_task_infos+'\n')
#net
print_str += (('\033[1;37mNetwork ↑ all:{0:.1f}GB ↓ all:{1:.1f}GB ↑ :{2:.1f}Kb/s ↓ :{3:.1f}Kb/s\033[0m').format(
net_infos[1]/1024/1024,net_infos[0]/1024/1024,net_infos[3],net_infos[2])+'\n')
#disk
print_str += ('\n\033[1;37mFilesystem Mounted on Used/Total Used%\033[0m'+'\n')
for disk_info in disk_infos:
print_str += (fill_str(disk_info[0], 23)+fill_str(disk_info[5], 23)+ \
fill_str(disk_info[2]+'/'+disk_info[1], 23)+fill_str(disk_info[4], 15)+'\n')
print("\033c", end="")
print(print_str,end="")
time.sleep(sleep_time)
t_end = time.time()
t_cost = t_end-t_start
if __name__ == '__main__':
main()
|
py | 7dfe012450cf7679377c58c795273804ee503fe2 | # ------------------------------- IMPORT MODULES & SETUP ------------------------------------------------
# Standard Libraries
import os
# os.environ['OMP_NUM_THREADS'] = '48'
import math
import json
import datetime
import numpy as np
import time
import nvtx
import argparse
# ------------------------------- CUSTOM FUNCTIONS ------------------------------------------------
# Custom Library
import sys
sys.path.append('../')
from proxy_apps.apps.timeseries_prediction import deepDMD, proxyDeepDMD, proxyDeepDMDMGPU, proxyDeepDMDPyTorch, proxyDeepDMDPyTorchJIT, hyperparameters
from proxy_apps.utils.tf import TimingCallback
from proxy_apps.utils.data.main import NpEncoder
from proxy_apps.utils import file_reader, path_handler
from proxy_apps.utils.data.grid import GridNetworkDataHandler, GridNetworkTFDataHandler, GridNetworkNewGen, GridDataGenPyTorch
# ------------------------------- PATH & LOGGER SETUP ------------------------------------------------
# Parse Arguments
parser = argparse.ArgumentParser(description='Run Time Series Prediction')
parser.add_argument("--model_name", choices=["Baseline", "TFDataGen", "TFDataOptMGPU", "TFDataOptMGPUAcc", "PyTorch", "PyTorchOpt"], type=str,
help="which implementation to run", required=True)
parser.add_argument("--platform", choices=["gpu", "cpu"], type=str, help="name of the platform (cpu/gpu)", required=True)
parser.add_argument("--machine_name", type=str, help="name of the machine", required=True)
parser.add_argument("--n_gpus", type=int, help="number of GPUs", default=0)
parser.add_argument("--n_cpus", type=int, help="number of CPUs", default=1)
parser.add_argument("--n_epochs", type=int, help="number of epochs", default=10)
parser.add_argument("--batch_size", type=int, help="batch size", default=1024)
parser.add_argument("--tensorboard", type=int, choices=[0, 1], help="whether to store tensorboard output")
parser.add_argument("--mixed_precision", type=int, choices=[0, 1], help="whether to turn on mixed precision or not")
args = parser.parse_args()
# System Setup
config = file_reader.read_config('config_baseline.json')
_N_EPOCHS = args.n_epochs
_BATCH_SIZE = args.batch_size
_APP_NAME = config["info"]["app_name"]
_NROWS = int(config["data"]["n_rows"])
_NCOLS = int(config["data"]["n_cols"])
_REPEAT_COLS = int(config["data"]["repeat_cols"])
_WINDOW_SIZE = int(config["data"]["window_size"])
_SHIFT_SIZE = int(config["data"]["shift_size"])
_STRIDE = int(config["data"]["stride"])
_N_SIGNALS = int(config["data"]["n_signals"])
_DTYPE = config["model"]["dtype"]
_MIXED_PRECISION = bool(args.mixed_precision)
_N_GPUS = args.n_gpus
_N_CPUS = args.n_cpus
_LABEL = args.model_name
_SUFFIX = args.platform + '_' + \
args.machine_name + '_' + \
'ng' + str(_N_GPUS) + '_' + \
'nc' + str(_N_CPUS) + '_' + \
'e' + str(_N_EPOCHS) + '_' + \
'b' + str(_BATCH_SIZE) + '_' + \
'r' + str(_REPEAT_COLS) + '_' + \
'mp' + str(args.mixed_precision) + '_' + _LABEL
performance_dict = dict()
# current directory
curr_dir = os.path.dirname(os.path.realpath(__file__))
# output directory
data_dir = path_handler.get_absolute_path(curr_dir, config["info"]["data_dir"] + config["info"]["name"] + "/" + config["info"]["app_name"] + "/" + _DTYPE + "/R" + str(_REPEAT_COLS) + "/")
if not os.path.exists(data_dir): os.makedirs(data_dir)
model_dir = path_handler.get_absolute_path(curr_dir, config["model"]["model_dir"] + config["info"]["name"] + "/" + config["info"]["app_name"] + "/" + _DTYPE + "/R" + str(_REPEAT_COLS) + "/")
if not os.path.exists(model_dir): os.makedirs(model_dir)
# ------------------------------- TensorFlow/PyTorch SETUP ------------------------------------------------
if _LABEL in ["PyTorch", "PyTorchOpt"]:
import torch
print("[INFO] PyTorch version: ", torch.__version__)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
if _DTYPE == "float64": torch.set_default_dtype(torch.float64)
else: torch.set_default_dtype(torch.float32)
if _MIXED_PRECISION:
# set floatx
_DTYPE = "float32"
torch.set_default_dtype(torch.float32)
def get_indexer(n_rows, window_size, shift_size, start_point, leave_last):
return np.arange(window_size)[None, :] + start_point + shift_size*np.arange(((n_rows - window_size - leave_last - start_point) // shift_size) + 1)[:, None]
else:
import tensorflow as tf
# TensorFlow Setup
print("[INFO] Tensorflow version: ", tf.__version__)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
print("Name:", gpu.name, " Type:", gpu.device_type)
# tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
for gpu in logical_gpus:
print("Name:", gpu.name, " Type:", gpu.device_type)
# tf.compat.v1.disable_eager_execution()
print("[INFO] Eager mode: ", tf.executing_eagerly()) # For easy reset of notebook state.
# Setup TensorFlow
tf.keras.backend.clear_session()
if _LABEL not in ["Baseline"]: tf.config.optimizer.set_jit(True) # Enable XLA.
# Setup Precision
if _LABEL in ["Baseline"]:
tf.keras.backend.set_floatx('float64')
def get_indexer(n_rows, window_size, shift_size, start_point, leave_last):
return np.arange(window_size)[None, :] + start_point + shift_size*np.arange(((n_rows - window_size - leave_last - start_point) // shift_size) + 1)[:, None]
elif _LABEL in ["TFDataGen", "TFDataOptMGPU", "TFDataOptMGPUAcc"]:
tf.keras.backend.set_floatx(_DTYPE)
if _MIXED_PRECISION:
# set floatx
_DTYPE = 'float32'
tf.keras.backend.set_floatx(_DTYPE)
# set policy
policy = tf.keras.mixed_precision.Policy('mixed_float16')
tf.keras.mixed_precision.set_global_policy(policy)
# check dtypes
print('Compute dtype: %s' % policy.compute_dtype)
print('Variable dtype: %s' % policy.variable_dtype)
@tf.function(experimental_compile=True)
def get_indexer(n_rows, window_size, shift_size, start_point, leave_last):
return np.arange(window_size)[None, :] + start_point + shift_size*np.arange(((n_rows - window_size - leave_last - start_point) // shift_size) + 1)[:, None]
# Mirror Strategy for MGPUs
if _LABEL in ["TFDataOptMGPU", "TFDataOptMGPUAcc"]:
mirrored_strategy = tf.distribute.MirroredStrategy()
# To avoid GPU Congestion
os.environ["TF_GPU_THREAD_MODE"] = "gpu_private"
# os.environ['TF_CUDNN_DETERMINISTIC']='1'
# ------------------------------- CREATE DATA HANDLER ------------------------------------------------
data_handler_nvtx = nvtx.start_range("Create Data Handler")
dh_start = time.time()
if _LABEL in ["Baseline", "TFDataOpt"]:
data_handler = GridNetworkDataHandler(scenario_dir=path_handler.get_absolute_path(curr_dir, config["info"]["input_dir"]),
n_rows=_NROWS,
n_cols=_NCOLS,
repeat_cols=_REPEAT_COLS,
dtype=_DTYPE
)
scenario_data = data_handler.load_grid_data()
# ------------------------------- DATA PREPROCESSING ------------------------------------------------
X_data, Y_data = data_handler.create_windows(scenario_data)
# ------------------------------- DATA NORMALIZATION ------------------------------------------------
X_array, Y_array = data_handler.scale_data(X_data, Y_data)
elif _LABEL in ["TFDataGen", "TFDataOptMGPU", "TFDataOptMGPUAcc"]:
data_handler = GridNetworkNewGen(scenario_dir=path_handler.get_absolute_path(curr_dir, config["info"]["input_dir"]),
n_rows=_NROWS,
n_cols=_NCOLS,
repeat_cols=_REPEAT_COLS,
d_type=_DTYPE
)
x_indexer = get_indexer(_NROWS, _WINDOW_SIZE, _SHIFT_SIZE, 0, _N_SIGNALS)
y_indexer = get_indexer(_NROWS, _WINDOW_SIZE, _SHIFT_SIZE, 1, 0)
scenario_data = data_handler.get_training_data(x_indexer, y_indexer)
elif _LABEL in ["PyTorch", "PyTorchOpt"]:
scenario_dir=path_handler.get_absolute_path(curr_dir, config["info"]["input_dir"])
dir_list = [scenario_dir + "/" + f + "/" for f in os.listdir(scenario_dir)]
x_indexer = get_indexer(_NROWS, _WINDOW_SIZE, _SHIFT_SIZE, 0, _N_SIGNALS)
y_indexer = get_indexer(_NROWS, _WINDOW_SIZE, _SHIFT_SIZE, 1, 0)
dataset = GridDataGenPyTorch(dir_list, _NROWS, _NCOLS, _REPEAT_COLS, x_indexer, y_indexer, d_type=_DTYPE)
if _LABEL == "PyTorch":
train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=_BATCH_SIZE)
elif _LABEL == "PyTorchOpt":
train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=_BATCH_SIZE, pin_memory=True, num_workers=int(_N_CPUS))
dh_stop = time.time()
performance_dict['data_handler_time'] = dh_stop-dh_start
nvtx.end_range(data_handler_nvtx)
# ------------------------------- CALLBACKS FOR TensorFlow ------------------------------------------------
if _LABEL not in ["PyTorch", "PyTorchOpt"]:
# timing callback
timing_cb = TimingCallback()
# Stopping criteria if the training loss doesn't go down by 1e-3
early_stop_cb = tf.keras.callbacks.EarlyStopping(
monitor='loss', min_delta = 1e-3, verbose = 1, mode='min', patience = 3,
baseline=None, restore_best_weights=True)
# all callbacks
callbacks=[]# [early_stop_cb, timing_cb]
if _LABEL in ["Baseline", "TFDataGen"]:
callbacks.append(timing_cb)
if args.tensorboard==1:
# Create a TensorBoard Profiler
logs = path_handler.get_absolute_path(curr_dir, config["model"]["tb_log_dir"] + _APP_NAME + "/" + _DTYPE + "/R" + str(_REPEAT_COLS) + "/tensorboard/" + _SUFFIX)
tb_callback = tf.keras.callbacks.TensorBoard(log_dir=logs, histogram_freq=1, embeddings_freq=1, profile_batch=(1,20))
callbacks.append(tb_callback)
# Initialize Hyperparameters - we can keep it as a dict instead of creating a separate class
hyper_param_dict = config["model"]["hyperparameters"]
hyper_param_dict['original_dim'] = _REPEAT_COLS * _NCOLS # input data dimension
hyper_param_dict['num_epochs'] = _N_EPOCHS # Number of epochs
hyper_param_dict['batch_size'] = _BATCH_SIZE
hyper_param_dict['dtype'] = _DTYPE
hp = hyperparameters.HyperParameters(hyper_param_dict)
hp.model_name = _LABEL
performance_dict["n_epochs"] = hp.ep
performance_dict["batch_size"] = hp.bs
if _LABEL == "Baseline":
# ------------------------------- MODEL TRAINING ------------------------------------------------
# Initialize, build, and fit the model
K_model = deepDMD.NeuralNetworkModel(hp)
K_model.compile(optimizer=tf.optimizers.Adagrad(hp.lr))
# training
model_training_nvtx = nvtx.start_range("Training")
m_start = time.time()
history = K_model.fit([X_array, Y_array], batch_size=hp.bs,
epochs=hp.ep,
callbacks=callbacks,
shuffle=True)
m_stop = time.time()
nvtx.end_range(model_training_nvtx)
# print info
print('[INFO]: Time taken for model training (time module):', m_stop - m_start, 'seconds')
print('[INFO]: Time taken for model training (Keras):', sum(timing_cb.logs), 'seconds')
performance_dict['training_time_module'] = (m_stop - m_start)
performance_dict['training_time_epoch_wise'] = timing_cb.logs
performance_dict['training_loss'] = history.history['loss']
K_model.save(path_handler.get_absolute_path(model_dir, _SUFFIX))
elif _LABEL in ["TFDataGen", "TFDataOptMGPU", "TFDataOptMGPUAcc"]:
# ------------------------------- MODEL TRAINING ------------------------------------------------
# Initialize and build the model
if _LABEL in ["TFDataOptMGPU", "TFDataOptMGPUAcc"]:
with mirrored_strategy.scope():
K_model = proxyDeepDMDMGPU.Encoder(hp)
optimizer = tf.optimizers.Adagrad(hp.lr)
if _MIXED_PRECISION: optimizer = tf.keras.mixed_precision.LossScaleOptimizer(optimizer)
K_model.compile(optimizer=optimizer)
else:
K_model = proxyDeepDMD.NeuralNetworkModel(hp, mixed_precision=_MIXED_PRECISION)
optimizer = tf.optimizers.Adagrad(hp.lr)
if _MIXED_PRECISION: optimizer = tf.keras.mixed_precision.LossScaleOptimizer(optimizer)
K_model.compile(optimizer=optimizer)
if _LABEL in ["TFDataOptMGPU", "TFDataOptMGPUAcc"]:
hp.bs = hp.bs * mirrored_strategy.num_replicas_in_sync
print(mirrored_strategy.num_replicas_in_sync)
# generate dataset
data_options = tf.data.Options()
data_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA
zip_data = scenario_data.with_options(data_options).batch(hp.bs)
training_dataset = zip_data.cache()
shuffle_buffer_size = x_indexer.shape[0] * x_indexer.shape[1] * len(data_handler.dir_list) // hp.bs
if _LABEL in ["TFDataOptMGPUAcc"]:
shuffle_buffer_size = mirrored_strategy.num_replicas_in_sync * shuffle_buffer_size
training_dataset = training_dataset.repeat(mirrored_strategy.num_replicas_in_sync)
training_dataset = training_dataset.shuffle(buffer_size=shuffle_buffer_size)
training_dataset = training_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
# for multi-gpu, split the data
if _LABEL in ["TFDataOptMGPU", "TFDataOptMGPUAcc"]:
training_dataset = mirrored_strategy.experimental_distribute_dataset(training_dataset)
# compile and fit model
model_training_nvtx = nvtx.start_range("Training")
m_start = time.time()
if _LABEL in ["TFDataOptMGPU", "TFDataOptMGPUAcc"]:
trainer = proxyDeepDMDMGPU.NeuralNetworkModel(hp, K_model, mixed_precision=_MIXED_PRECISION)
all_loss, epoch_time, avg_batch_time = trainer.fit(training_dataset, n_epochs=hp.ep, batch_size=hp.bs, steps_per_epoch=shuffle_buffer_size)
else:
K_model.encoder.build(input_shape=(None, _REPEAT_COLS * _NCOLS))
history = K_model.fit(training_dataset, epochs=hp.ep, callbacks=callbacks, workers=_N_CPUS, use_multiprocessing=True)
all_loss = history.history['loss']
m_stop = time.time()
nvtx.end_range(model_training_nvtx)
# print loss
print("Loss Values:", all_loss)
# print info
print('[INFO]: Time taken for model training (time module):', m_stop - m_start, 'seconds')
if _LABEL in ["TFDataOptMGPU", "TFDataOptMGPUAcc"]: print('[INFO]: Time taken for model training (Keras):', sum(epoch_time), 'seconds')
else: print('[INFO]: Time taken for model training (Keras):', sum(timing_cb.logs), 'seconds')
performance_dict['training_time_module'] = (m_stop - m_start)
if _LABEL in ["TFDataOptMGPU", "TFDataOptMGPUAcc"]: performance_dict['training_time_epoch_wise'] = epoch_time
else: performance_dict['training_time_epoch_wise'] = timing_cb.logs
performance_dict['training_loss'] = all_loss
if _LABEL in ["TFDataOptMGPU", "TFDataOptMGPUAcc"]: trainer.encoder.save(path_handler.get_absolute_path(model_dir, _SUFFIX))
else: K_model.encoder.save(path_handler.get_absolute_path(model_dir, _SUFFIX))
elif _LABEL in ["PyTorch", "PyTorchOpt"]:
# ------------------------------- MODEL TRAINING ------------------------------------------------
use_amp = False
if _MIXED_PRECISION: use_amp = True
# if _LABEL in ["PyTorch"]:
K_model = proxyDeepDMDPyTorch.Encoder(hp, device, use_amp)
# elif _LABEL in ["PyTorchOpt"]:
# K_model = torch.jit.script(proxyDeepDMDPyTorchJIT.Encoder(hp))
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
# dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
K_model = torch.nn.DataParallel(K_model)
K_model.to(device)
optimizer = torch.optim.Adagrad(K_model.parameters(), lr=hp.lr)
# fit model
model_training_nvtx = nvtx.start_range("Training")
m_start = time.time()
# if _LABEL in ["PyTorch"]:
trainer = proxyDeepDMDPyTorch.NeuralNetworkModel(hp, K_model, optimizer, device, use_amp)
# elif _LABEL in ["PyTorchOpt"]:
# trainer = proxyDeepDMDPyTorchJIT.NeuralNetworkModel(hp, K_model, optimizer, device, use_amp)
all_loss, epoch_time, avg_batch_time = trainer.fit(train_dataloader)
m_stop = time.time()
nvtx.end_range(model_training_nvtx)
# print loss
all_loss = [x.cpu().data.numpy() for x in all_loss]
print("Loss Values:", all_loss)
# print info
print('[INFO]: Time taken for model training (time module):', m_stop - m_start, 'seconds')
print('[INFO]: Time taken for model training (Keras):', sum(epoch_time), 'seconds')
performance_dict['training_time_module'] = (m_stop - m_start)
performance_dict['training_time_epoch_wise'] = epoch_time
performance_dict['training_loss'] = all_loss
# if _LABEL in ["PyTorch"]:
torch.save(trainer.encoder, path_handler.get_absolute_path(model_dir, _SUFFIX))
# elif _LABEL in ["PyTorchOpt"]:
# torch.jit.save(trainer.encoder, path_handler.get_absolute_path(model_dir, _SUFFIX))
# ------------------------------- SAVE PERFORMANCE DICT ------------------------------------------------
with open(path_handler.get_absolute_path(data_dir, "training_performance_" + _SUFFIX + ".json"), 'w') as fp:
json.dump(performance_dict, fp, cls=NpEncoder)
|
py | 7dfe01c34819dc03b711b4e70868f1e4f8b27ea8 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class ExpressRouteCircuit(Resource):
"""ExpressRouteCircuit resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param sku: The SKU.
:type sku: ~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitSku
:param allow_classic_operations: Allow classic operations
:type allow_classic_operations: bool
:param circuit_provisioning_state: The CircuitProvisioningState state of
the resource.
:type circuit_provisioning_state: str
:param service_provider_provisioning_state: The
ServiceProviderProvisioningState state of the resource. Possible values
are 'NotProvisioned', 'Provisioning', 'Provisioned', and 'Deprovisioning'.
Possible values include: 'NotProvisioned', 'Provisioning', 'Provisioned',
'Deprovisioning'
:type service_provider_provisioning_state: str or
~azure.mgmt.network.v2018_02_01.models.ServiceProviderProvisioningState
:param authorizations: The list of authorizations.
:type authorizations:
list[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitAuthorization]
:param peerings: The list of peerings.
:type peerings:
list[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitPeering]
:param service_key: The ServiceKey.
:type service_key: str
:param service_provider_notes: The ServiceProviderNotes.
:type service_provider_notes: str
:param service_provider_properties: The ServiceProviderProperties.
:type service_provider_properties:
~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitServiceProviderProperties
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param gateway_manager_etag: The GatewayManager Etag.
:type gateway_manager_etag: str
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'ExpressRouteCircuitSku'},
'allow_classic_operations': {'key': 'properties.allowClassicOperations', 'type': 'bool'},
'circuit_provisioning_state': {'key': 'properties.circuitProvisioningState', 'type': 'str'},
'service_provider_provisioning_state': {'key': 'properties.serviceProviderProvisioningState', 'type': 'str'},
'authorizations': {'key': 'properties.authorizations', 'type': '[ExpressRouteCircuitAuthorization]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'service_key': {'key': 'properties.serviceKey', 'type': 'str'},
'service_provider_notes': {'key': 'properties.serviceProviderNotes', 'type': 'str'},
'service_provider_properties': {'key': 'properties.serviceProviderProperties', 'type': 'ExpressRouteCircuitServiceProviderProperties'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'gateway_manager_etag': {'key': 'properties.gatewayManagerEtag', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, location: str=None, tags=None, sku=None, allow_classic_operations: bool=None, circuit_provisioning_state: str=None, service_provider_provisioning_state=None, authorizations=None, peerings=None, service_key: str=None, service_provider_notes: str=None, service_provider_properties=None, provisioning_state: str=None, gateway_manager_etag: str=None, **kwargs) -> None:
super(ExpressRouteCircuit, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.sku = sku
self.allow_classic_operations = allow_classic_operations
self.circuit_provisioning_state = circuit_provisioning_state
self.service_provider_provisioning_state = service_provider_provisioning_state
self.authorizations = authorizations
self.peerings = peerings
self.service_key = service_key
self.service_provider_notes = service_provider_notes
self.service_provider_properties = service_provider_properties
self.provisioning_state = provisioning_state
self.gateway_manager_etag = gateway_manager_etag
self.etag = None
|
py | 7dfe02c3e0edf68c6ab82b0a41965a33d9ebdf3e | '''
File: get_landsat8_aws.py
Author: Min Feng
Version: 0.1
Create: 2017-12-29 15:53:59
Description: download Landsat 8 images from AWS
'''
import logging
def _download(url, f_out):
logging.info('downloading URL %s' % url)
# return _get_s3_from_url(url, f_out)
from gio import run_commands
import os
_cmd = 'wget %s' % url
_d_out = os.path.dirname(f_out)
run_commands.run(_cmd, cwd=_d_out)
_f_out = os.path.join(_d_out, url.split('/')[-1])
if _f_out != f_out:
import shutil
shutil.move(_f_out, f_out)
return
import requests
import shutil
_num = 0
try:
with open(f_out, 'wb') as _fo:
_r = requests.get(url, stream=True)
print(_r.headers['content-length'])
shutil.copyfileobj(_r.raw, _fo)
except Exception as _err:
_num += 1
if _num > 3:
raise _err
# import urllib2
# with open(f_out, 'wb') as _fo:
# _num = 0
# try:
# _fo.write(urllib2.urlopen(url).read())
# except Exception, _err:
# _num += 1
# if _num > 3:
# raise _err
def _get_s3_from_url(url, f_out):
import urllib.parse
_p = urllib.parse.urlparse(url).path
if len(_p) > 1:
if _p.startswith('/'):
_p = _p[1:]
return _get_s3(_p, f_out)
def _get_s3(key, f_out):
import boto
_s3 = boto.connect_s3()
_bk = _s3.get_bucket('landsat-pds')
_kk = _bk.get_key(key)
if _kk == None:
raise Exception('failed found key %s' % key)
_t = f_out + '.bak'
with open(_t, 'wb') as _fo:
_kk.get_contents_to_file(_fo)
import shutil
shutil.move(_t, f_out)
def get_b8(sid, d_out=None):
import os
from gio import landsat
_fs = {}
_d_tmp = d_out
os.path.exists(_d_tmp) or os.makedirs(_d_tmp)
_id = landsat.parse(sid)
_c1 = 'c1/' if 'LC08_' in str(sid) else ''
for _b in ([10] + list(range(1, 8)) + [9, 11]):
_fid = '%s_B%s.TIF' % (sid, _b)
_fot = os.path.join(_d_tmp, _fid)
_fs[_b] = _fot
if os.path.exists(_fot):
continue
_url = 'http://landsat-pds.s3.amazonaws.com/%sL8/%03d/%03d/%s/%s' % \
(_c1, _id.path, _id.row, sid, _fid)
_download(_url, _fot)
for _b in ['MTL.txt']:
_fid = '%s_%s' % (sid, _b)
_fot = os.path.join(_d_tmp, _fid)
_fs[_b] = _fot
if os.path.exists(_fot):
continue
_url = 'http://landsat-pds.s3.amazonaws.com/%sL8/%03d/%03d/%s/%s' % \
(_c1, _id.path, _id.row, sid, _fid)
_download(_url, _fot)
return _fs
def download_scene_id(sid, d_out):
import os
from gio import file_unzip
with file_unzip.file_unzip() as _zip:
_d_tmp = _zip.generate_file()
os.path.exists(_d_tmp) or os.makedirs(_d_tmp)
if get_b8(sid, _d_tmp):
file_unzip.compress_folder(_d_tmp, d_out, [])
def main(opts):
download_scene_id(opts.scene_id, opts.output)
def usage():
_p = environ_mag.usage(True)
_p.add_argument('-i', '--scene-id', dest='scene_id', required=True)
_p.add_argument('-o', '--output', dest='output', required=True)
return _p
if __name__ == '__main__':
from gio import environ_mag
environ_mag.init_path()
environ_mag.run(main, [environ_mag.config(usage())])
|
py | 7dfe02e888c577c01691cc022fa3572ac5ac9c25 | """Common model."""
import matplotlib.pyplot as pyplot
import numpy
import seaborn
class BasePlotableModel:
"""
Base model to abstract common plotting features.
"""
numE = None
numF = None
numI = None
numQ_E = None
numQ_I = None
numR = None
numS = None
tseries = None
plotting_number_property = None
"""Property to access the number to base plotting on."""
def plot(
self,
ax=None,
plot_S="line",
plot_E="line",
plot_I="line",
plot_R="line",
plot_F="line",
plot_Q_E="line",
plot_Q_I="line",
combine_Q=True,
color_S="tab:green",
color_E="orange",
color_I="crimson",
color_R="tab:blue",
color_F="black",
color_Q_E="mediumorchid",
color_Q_I="mediumorchid",
color_reference="#E0E0E0",
dashed_reference_results=None,
dashed_reference_label="reference",
shaded_reference_results=None,
shaded_reference_label="reference",
vlines=[],
vline_colors=[],
vline_styles=[],
vline_labels=[],
ylim=None,
xlim=None,
legend=True,
title=None,
side_title=None,
plot_percentages=True,
):
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an Axes object if None provided:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if not ax:
fig, ax = pyplot.subplots()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Prepare data series to be plotted:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fseries = (
self.numF / getattr(self, self.plotting_number_property)
if plot_percentages
else self.numF
)
Eseries = (
self.numE / getattr(self, self.plotting_number_property)
if plot_percentages
else self.numE
)
Dseries = (
(self.numQ_E + self.numQ_I) / getattr(self, self.plotting_number_property)
if plot_percentages
else (self.numQ_E + self.numQ_I)
)
Q_Eseries = (
self.numQ_E / getattr(self, self.plotting_number_property)
if plot_percentages
else self.numQ_E
)
Q_Iseries = (
self.numQ_I / getattr(self, self.plotting_number_property)
if plot_percentages
else self.numQ_I
)
Iseries = (
self.numI / getattr(self, self.plotting_number_property)
if plot_percentages
else self.numI
)
Rseries = (
self.numR / getattr(self, self.plotting_number_property)
if plot_percentages
else self.numR
)
Sseries = (
self.numS / getattr(self, self.plotting_number_property)
if plot_percentages
else self.numS
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the reference data:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if dashed_reference_results:
dashedReference_tseries = dashed_reference_results.tseries[
:: int(getattr(self, self.plotting_number_property) / 100)
]
dashedReference_IDEstack = (
dashed_reference_results.numI
+ dashed_reference_results.numQ_I
+ dashed_reference_results.numQ_E
+ dashed_reference_results.numE
)[:: int(getattr(self, self.plotting_number_property) / 100)] / (
getattr(self, self.plotting_number_property) if plot_percentages else 1
)
ax.plot(
dashedReference_tseries,
dashedReference_IDEstack,
color="#E0E0E0",
linestyle="--",
label="$I+D+E$ (" + dashed_reference_label + ")",
zorder=0,
)
if shaded_reference_results:
shadedReference_tseries = shaded_reference_results.tseries
shadedReference_IDEstack = (
shaded_reference_results.numI
+ shaded_reference_results.numQ_I
+ shaded_reference_results.numQ_E
+ shaded_reference_results.numE
) / (
getattr(self, self.plotting_number_property) if plot_percentages else 1
)
ax.fill_between(
shaded_reference_results.tseries,
shadedReference_IDEstack,
0,
color="#EFEFEF",
label="$I+D+E$ (" + shaded_reference_label + ")",
zorder=0,
)
ax.plot(
shaded_reference_results.tseries,
shadedReference_IDEstack,
color="#E0E0E0",
zorder=1,
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the stacked variables:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
topstack = numpy.zeros_like(self.tseries)
if any(Fseries) and plot_F == "stacked":
ax.fill_between(
numpy.ma.masked_where(Fseries <= 0, self.tseries),
numpy.ma.masked_where(Fseries <= 0, topstack + Fseries),
topstack,
color=color_F,
alpha=0.5,
label="$F$",
zorder=2,
)
ax.plot(
numpy.ma.masked_where(Fseries <= 0, self.tseries),
numpy.ma.masked_where(Fseries <= 0, topstack + Fseries),
color=color_F,
zorder=3,
)
topstack = topstack + Fseries
if any(Eseries) and plot_E == "stacked":
ax.fill_between(
numpy.ma.masked_where(Eseries <= 0, self.tseries),
numpy.ma.masked_where(Eseries <= 0, topstack + Eseries),
topstack,
color=color_E,
alpha=0.5,
label="$E$",
zorder=2,
)
ax.plot(
numpy.ma.masked_where(Eseries <= 0, self.tseries),
numpy.ma.masked_where(Eseries <= 0, topstack + Eseries),
color=color_E,
zorder=3,
)
topstack = topstack + Eseries
if combine_Q and plot_Q_E == "stacked" and plot_Q_I == "stacked":
ax.fill_between(
numpy.ma.masked_where(Dseries <= 0, self.tseries),
numpy.ma.masked_where(Dseries <= 0, topstack + Dseries),
topstack,
color=color_Q_E,
alpha=0.5,
label="$Q_{all}$",
zorder=2,
)
ax.plot(
numpy.ma.masked_where(Dseries <= 0, self.tseries),
numpy.ma.masked_where(Dseries <= 0, topstack + Dseries),
color=color_Q_E,
zorder=3,
)
topstack = topstack + Dseries
else:
if any(Q_Eseries) and plot_Q_E == "stacked":
ax.fill_between(
numpy.ma.masked_where(Q_Eseries <= 0, self.tseries),
numpy.ma.masked_where(Q_Eseries <= 0, topstack + Q_Eseries),
topstack,
color=color_Q_E,
alpha=0.5,
label="$Q_E$",
zorder=2,
)
ax.plot(
numpy.ma.masked_where(Q_Eseries <= 0, self.tseries),
numpy.ma.masked_where(Q_Eseries <= 0, topstack + Q_Eseries),
color=color_Q_E,
zorder=3,
)
topstack = topstack + Q_Eseries
if any(Q_Iseries) and plot_Q_I == "stacked":
ax.fill_between(
numpy.ma.masked_where(Q_Iseries <= 0, self.tseries),
numpy.ma.masked_where(Q_Iseries <= 0, topstack + Q_Iseries),
topstack,
color=color_Q_I,
alpha=0.5,
label="$Q_I$",
zorder=2,
)
ax.plot(
numpy.ma.masked_where(Q_Iseries <= 0, self.tseries),
numpy.ma.masked_where(Q_Iseries <= 0, topstack + Q_Iseries),
color=color_Q_I,
zorder=3,
)
topstack = topstack + Q_Iseries
if any(Iseries) and plot_I == "stacked":
ax.fill_between(
numpy.ma.masked_where(Iseries <= 0, self.tseries),
numpy.ma.masked_where(Iseries <= 0, topstack + Iseries),
topstack,
color=color_I,
alpha=0.5,
label="$I$",
zorder=2,
)
ax.plot(
numpy.ma.masked_where(Iseries <= 0, self.tseries),
numpy.ma.masked_where(Iseries <= 0, topstack + Iseries),
color=color_I,
zorder=3,
)
topstack = topstack + Iseries
if any(Rseries) and plot_R == "stacked":
ax.fill_between(
numpy.ma.masked_where(Rseries <= 0, self.tseries),
numpy.ma.masked_where(Rseries <= 0, topstack + Rseries),
topstack,
color=color_R,
alpha=0.5,
label="$R$",
zorder=2,
)
ax.plot(
numpy.ma.masked_where(Rseries <= 0, self.tseries),
numpy.ma.masked_where(Rseries <= 0, topstack + Rseries),
color=color_R,
zorder=3,
)
topstack = topstack + Rseries
if any(Sseries) and plot_S == "stacked":
ax.fill_between(
numpy.ma.masked_where(Sseries <= 0, self.tseries),
numpy.ma.masked_where(Sseries <= 0, topstack + Sseries),
topstack,
color=color_S,
alpha=0.5,
label="$S$",
zorder=2,
)
ax.plot(
numpy.ma.masked_where(Sseries <= 0, self.tseries),
numpy.ma.masked_where(Sseries <= 0, topstack + Sseries),
color=color_S,
zorder=3,
)
topstack = topstack + Sseries
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the shaded variables:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if any(Fseries) and plot_F == "shaded":
ax.fill_between(
numpy.ma.masked_where(Fseries <= 0, self.tseries),
numpy.ma.masked_where(Fseries <= 0, Fseries),
0,
color=color_F,
alpha=0.5,
label="$F$",
zorder=4,
)
ax.plot(
numpy.ma.masked_where(Fseries <= 0, self.tseries),
numpy.ma.masked_where(Fseries <= 0, Fseries),
color=color_F,
zorder=5,
)
if any(Eseries) and plot_E == "shaded":
ax.fill_between(
numpy.ma.masked_where(Eseries <= 0, self.tseries),
numpy.ma.masked_where(Eseries <= 0, Eseries),
0,
color=color_E,
alpha=0.5,
label="$E$",
zorder=4,
)
ax.plot(
numpy.ma.masked_where(Eseries <= 0, self.tseries),
numpy.ma.masked_where(Eseries <= 0, Eseries),
color=color_E,
zorder=5,
)
if combine_Q and (
any(Dseries) and plot_Q_E == "shaded" and plot_Q_I == "shaded"
):
ax.fill_between(
numpy.ma.masked_where(Dseries <= 0, self.tseries),
numpy.ma.masked_where(Dseries <= 0, Dseries),
0,
color=color_Q_E,
alpha=0.5,
label="$Q_{all}$",
zorder=4,
)
ax.plot(
numpy.ma.masked_where(Dseries <= 0, self.tseries),
numpy.ma.masked_where(Dseries <= 0, Dseries),
color=color_Q_E,
zorder=5,
)
else:
if any(Q_Eseries) and plot_Q_E == "shaded":
ax.fill_between(
numpy.ma.masked_where(Q_Eseries <= 0, self.tseries),
numpy.ma.masked_where(Q_Eseries <= 0, Q_Eseries),
0,
color=color_Q_E,
alpha=0.5,
label="$Q_E$",
zorder=4,
)
ax.plot(
numpy.ma.masked_where(Q_Eseries <= 0, self.tseries),
numpy.ma.masked_where(Q_Eseries <= 0, Q_Eseries),
color=color_Q_E,
zorder=5,
)
if any(Q_Iseries) and plot_Q_I == "shaded":
ax.fill_between(
numpy.ma.masked_where(Q_Iseries <= 0, self.tseries),
numpy.ma.masked_where(Q_Iseries <= 0, Q_Iseries),
0,
color=color_Q_I,
alpha=0.5,
label="$Q_I$",
zorder=4,
)
ax.plot(
numpy.ma.masked_where(Q_Iseries <= 0, self.tseries),
numpy.ma.masked_where(Q_Iseries <= 0, Q_Iseries),
color=color_Q_I,
zorder=5,
)
if any(Iseries) and plot_I == "shaded":
ax.fill_between(
numpy.ma.masked_where(Iseries <= 0, self.tseries),
numpy.ma.masked_where(Iseries <= 0, Iseries),
0,
color=color_I,
alpha=0.5,
label="$I$",
zorder=4,
)
ax.plot(
numpy.ma.masked_where(Iseries <= 0, self.tseries),
numpy.ma.masked_where(Iseries <= 0, Iseries),
color=color_I,
zorder=5,
)
if any(Sseries) and plot_S == "shaded":
ax.fill_between(
numpy.ma.masked_where(Sseries <= 0, self.tseries),
numpy.ma.masked_where(Sseries <= 0, Sseries),
0,
color=color_S,
alpha=0.5,
label="$S$",
zorder=4,
)
ax.plot(
numpy.ma.masked_where(Sseries <= 0, self.tseries),
numpy.ma.masked_where(Sseries <= 0, Sseries),
color=color_S,
zorder=5,
)
if any(Rseries) and plot_R == "shaded":
ax.fill_between(
numpy.ma.masked_where(Rseries <= 0, self.tseries),
numpy.ma.masked_where(Rseries <= 0, Rseries),
0,
color=color_R,
alpha=0.5,
label="$R$",
zorder=4,
)
ax.plot(
numpy.ma.masked_where(Rseries <= 0, self.tseries),
numpy.ma.masked_where(Rseries <= 0, Rseries),
color=color_R,
zorder=5,
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the line variables:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if any(Fseries) and plot_F == "line":
ax.plot(
numpy.ma.masked_where(Fseries <= 0, self.tseries),
numpy.ma.masked_where(Fseries <= 0, Fseries),
color=color_F,
label="$F$",
zorder=6,
)
if any(Eseries) and plot_E == "line":
ax.plot(
numpy.ma.masked_where(Eseries <= 0, self.tseries),
numpy.ma.masked_where(Eseries <= 0, Eseries),
color=color_E,
label="$E$",
zorder=6,
)
if combine_Q and (any(Dseries) and plot_Q_E == "line" and plot_Q_I == "line"):
ax.plot(
numpy.ma.masked_where(Dseries <= 0, self.tseries),
numpy.ma.masked_where(Dseries <= 0, Dseries),
color=color_Q_E,
label="$Q_{all}$",
zorder=6,
)
else:
if any(Q_Eseries) and plot_Q_E == "line":
ax.plot(
numpy.ma.masked_where(Q_Eseries <= 0, self.tseries),
numpy.ma.masked_where(Q_Eseries <= 0, Q_Eseries),
color=color_Q_E,
label="$Q_E$",
zorder=6,
)
if any(Q_Iseries) and plot_Q_I == "line":
ax.plot(
numpy.ma.masked_where(Q_Iseries <= 0, self.tseries),
numpy.ma.masked_where(Q_Iseries <= 0, Q_Iseries),
color=color_Q_I,
label="$Q_I$",
zorder=6,
)
if any(Iseries) and plot_I == "line":
ax.plot(
numpy.ma.masked_where(Iseries <= 0, self.tseries),
numpy.ma.masked_where(Iseries <= 0, Iseries),
color=color_I,
label="$I$",
zorder=6,
)
if any(Sseries) and plot_S == "line":
ax.plot(
numpy.ma.masked_where(Sseries <= 0, self.tseries),
numpy.ma.masked_where(Sseries <= 0, Sseries),
color=color_S,
label="$S$",
zorder=6,
)
if any(Rseries) and plot_R == "line":
ax.plot(
numpy.ma.masked_where(Rseries <= 0, self.tseries),
numpy.ma.masked_where(Rseries <= 0, Rseries),
color=color_R,
label="$R$",
zorder=6,
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the vertical line annotations:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if len(vlines) > 0 and len(vline_colors) == 0:
vline_colors = ["gray"] * len(vlines)
if len(vlines) > 0 and len(vline_labels) == 0:
vline_labels = [None] * len(vlines)
if len(vlines) > 0 and len(vline_styles) == 0:
vline_styles = [":"] * len(vlines)
for vline_x, vline_color, vline_style, vline_label in zip(
vlines, vline_colors, vline_styles, vline_labels
):
if vline_x is not None:
ax.axvline(
x=vline_x,
color=vline_color,
linestyle=vline_style,
alpha=1,
label=vline_label,
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the plot labels:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ax.set_xlabel("days")
ax.set_ylabel(
"percent of population" if plot_percentages else "number of individuals"
)
ax.set_xlim(0, (max(self.tseries) if not xlim else xlim))
ax.set_ylim(0, ylim)
if plot_percentages:
ax.set_yticklabels(["{:,.0%}".format(y) for y in ax.get_yticks()])
if legend:
legend_handles, legend_labels = ax.get_legend_handles_labels()
ax.legend(
legend_handles[::-1],
legend_labels[::-1],
loc="upper right",
facecolor="white",
edgecolor="none",
framealpha=0.9,
prop={"size": 8},
)
if title:
ax.set_title(title, size=12)
if side_title:
ax.annotate(
side_title,
(0, 0.5),
xytext=(-45, 0),
ha="right",
va="center",
size=12,
rotation=90,
xycoords="axes fraction",
textcoords="offset points",
)
return ax
def figure_basic(
self,
plot_S="line",
plot_E="line",
plot_I="line",
plot_R="line",
plot_F="line",
plot_Q_E="line",
plot_Q_I="line",
combine_Q=True,
color_S="tab:green",
color_E="orange",
color_I="crimson",
color_R="tab:blue",
color_F="black",
color_Q_E="mediumorchid",
color_Q_I="mediumorchid",
color_reference="#E0E0E0",
dashed_reference_results=None,
dashed_reference_label="reference",
shaded_reference_results=None,
shaded_reference_label="reference",
vlines=[],
vline_colors=[],
vline_styles=[],
vline_labels=[],
ylim=None,
xlim=None,
legend=True,
title=None,
side_title=None,
plot_percentages=True,
figsize=(12, 8),
use_seaborn=True,
show=True,
):
fig, ax = pyplot.subplots(figsize=figsize)
if use_seaborn:
seaborn.set_style("ticks")
seaborn.despine()
self.plot(
ax=ax,
plot_S=plot_S,
plot_E=plot_E,
plot_I=plot_I,
plot_R=plot_R,
plot_F=plot_F,
plot_Q_E=plot_Q_E,
plot_Q_I=plot_Q_I,
combine_Q=combine_Q,
color_S=color_S,
color_E=color_E,
color_I=color_I,
color_R=color_R,
color_F=color_F,
color_Q_E=color_Q_E,
color_Q_I=color_Q_I,
color_reference=color_reference,
dashed_reference_results=dashed_reference_results,
dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results,
shaded_reference_label=shaded_reference_label,
vlines=vlines,
vline_colors=vline_colors,
vline_styles=vline_styles,
vline_labels=vline_labels,
ylim=ylim,
xlim=xlim,
legend=legend,
title=title,
side_title=side_title,
plot_percentages=plot_percentages,
)
if show:
pyplot.show()
return fig, ax
def figure_infections(
self,
plot_S=False,
plot_E="stacked",
plot_I="stacked",
plot_R=False,
plot_F=False,
plot_Q_E="stacked",
plot_Q_I="stacked",
combine_Q=True,
color_S="tab:green",
color_E="orange",
color_I="crimson",
color_R="tab:blue",
color_F="black",
color_Q_E="mediumorchid",
color_Q_I="mediumorchid",
color_reference="#E0E0E0",
dashed_reference_results=None,
dashed_reference_label="reference",
shaded_reference_results=None,
shaded_reference_label="reference",
vlines=[],
vline_colors=[],
vline_styles=[],
vline_labels=[],
ylim=None,
xlim=None,
legend=True,
title=None,
side_title=None,
plot_percentages=True,
figsize=(12, 8),
use_seaborn=True,
show=True,
):
fig, ax = pyplot.subplots(figsize=figsize)
if use_seaborn:
seaborn.set_style("ticks")
seaborn.despine()
self.plot(
ax=ax,
plot_S=plot_S,
plot_E=plot_E,
plot_I=plot_I,
plot_R=plot_R,
plot_F=plot_F,
plot_Q_E=plot_Q_E,
plot_Q_I=plot_Q_I,
combine_Q=combine_Q,
color_S=color_S,
color_E=color_E,
color_I=color_I,
color_R=color_R,
color_F=color_F,
color_Q_E=color_Q_E,
color_Q_I=color_Q_I,
color_reference=color_reference,
dashed_reference_results=dashed_reference_results,
dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results,
shaded_reference_label=shaded_reference_label,
vlines=vlines,
vline_colors=vline_colors,
vline_styles=vline_styles,
vline_labels=vline_labels,
ylim=ylim,
xlim=xlim,
legend=legend,
title=title,
side_title=side_title,
plot_percentages=plot_percentages,
)
if show:
pyplot.show()
return fig, ax
|
py | 7dfe039e143d758de0467096bcb27891941e2d7d | """
Models that are used by multiple_seat_ranking_methods.py
You can create and use your own Candidate and Ballot models as long as they implement the same properties and methods.
"""
from typing import List
class Candidate:
"""A candidate in the election."""
def __init__(self, name: str):
self.name = name
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return "<Candidate('%s')>" % self.name
def __hash__(self):
return hash(self.name)
def __eq__(self, other) -> bool:
if other is None:
return False
return self.name == other.name
class DuplicateCandidatesError(RuntimeError):
pass
class Ballot:
"""
A ballot (vote) where the voter has ranked all, or just some, of the candidates.
If a voter lists one candidate multiple times, a DuplicateCandidatesError is thrown.
"""
def __init__(self, ranked_candidates: List[Candidate]):
self.ranked_candidates: List[Candidate] = tuple(ranked_candidates)
if Ballot._is_duplicates(ranked_candidates):
raise DuplicateCandidatesError
if not Ballot._is_all_candidate_objects(ranked_candidates):
raise TypeError("Not all objects in ranked candidate list are of class Candidate or "
"implement the same properties and methods")
def __repr__(self) -> str:
candidate_name = ", ".join([candidate.name for candidate in self.ranked_candidates])
return "<Ballot(%s)>" % candidate_name
@staticmethod
def _is_duplicates(ranked_candidates) -> bool:
return len(set(ranked_candidates)) != len(ranked_candidates)
@staticmethod
def _is_all_candidate_objects(objects) -> bool:
for obj in objects:
if not Ballot._is_candidate_object(obj):
return False
# If all objects are Candidate-objects
return True
@staticmethod
def _is_candidate_object(obj) -> bool:
if obj.__class__ is Candidate:
return True
is_candidate_like = all([
hasattr(obj, 'name'),
hasattr(obj, '__hash__'),
hasattr(obj, '__eq__')
])
return is_candidate_like
|
py | 7dfe05856b5e995845a54f23464ab4fbaf9d8b62 | #! /usr/bin/python3.9
#
# MIT License
#
# Copyright (c) 2022 Adrian F. Hoefflin [srccircumflex]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from sys import path as sys_path
from sys import platform as _sys_platform
from re import sub as _re_sub
if _sys_platform == "win32":
sys_path.append(_re_sub("\\\[^\\\]+$", "", __file__))
else:
sys_path.append(_re_sub("/[^/]+$", "", __file__))
from _rc import configurations as CNF
import _main
hasattr(_main, "__file__")
CNF.LATERAL_ = True
try:
from ini.LogStream_ini import main
except Exception as e:
print(f"|[ {type(e)} ]|[ {e} ]|[ {e.__traceback__.tb_frame}")
CNF.EXP_EXIT(1)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit('')
except Exception as e:
print(f"|[ {type(e)} ]|[ {e} ]|[ {e.__traceback__.tb_frame}")
CNF.EXP_EXIT(1)
|
py | 7dfe05b11d68d3ef23a0d0cd5ccf93abfa3650d6 | from k5test import *
realm = K5Realm(create_kdb=False)
keyctl = which('keyctl')
out = realm.run([klist, '-c', 'KEYRING:process:abcd'], expected_code=1)
test_keyring = (keyctl is not None and
'Unknown credential cache type' not in out)
if not test_keyring:
skipped('keyring collection tests', 'keyring support not built')
# Run the collection test program against each collection-enabled type.
realm.run(['./t_cccol', 'DIR:' + os.path.join(realm.testdir, 'cc')])
if test_keyring:
def cleanup_keyring(anchor, name):
out = realm.run(['keyctl', 'list', anchor])
if ('keyring: ' + name + '\n') in out:
keyid = realm.run(['keyctl', 'search', anchor, 'keyring', name])
realm.run(['keyctl', 'unlink', keyid.strip(), anchor])
# Use the test directory as the collection name to avoid colliding
# with other build trees.
cname = realm.testdir
col_ringname = '_krb_' + cname
# Remove any keys left behind by previous failed test runs.
cleanup_keyring('@s', cname)
cleanup_keyring('@s', col_ringname)
cleanup_keyring('@u', col_ringname)
# Run test program over each subtype, cleaning up as we go. Don't
# test the persistent subtype, since it supports only one
# collection and might be in actual use.
realm.run(['./t_cccol', 'KEYRING:' + cname])
cleanup_keyring('@s', col_ringname)
realm.run(['./t_cccol', 'KEYRING:legacy:' + cname])
cleanup_keyring('@s', col_ringname)
realm.run(['./t_cccol', 'KEYRING:session:' + cname])
cleanup_keyring('@s', col_ringname)
realm.run(['./t_cccol', 'KEYRING:user:' + cname])
cleanup_keyring('@u', col_ringname)
realm.run(['./t_cccol', 'KEYRING:process:abcd'])
realm.run(['./t_cccol', 'KEYRING:thread:abcd'])
realm.stop()
# Test cursor semantics using real ccaches.
realm = K5Realm(create_host=False)
realm.addprinc('alice', password('alice'))
realm.addprinc('bob', password('bob'))
ccdir = os.path.join(realm.testdir, 'cc')
dccname = 'DIR:%s' % ccdir
duser = 'DIR::%s/tkt1' % ccdir
dalice = 'DIR::%s/tkt2' % ccdir
dbob = 'DIR::%s/tkt3' % ccdir
dnoent = 'DIR::%s/noent' % ccdir
realm.kinit('user', password('user'), flags=['-c', duser])
realm.kinit('alice', password('alice'), flags=['-c', dalice])
realm.kinit('bob', password('bob'), flags=['-c', dbob])
if test_keyring:
cleanup_keyring('@s', col_ringname)
krccname = 'KEYRING:session:' + cname
kruser = '%s:tkt1' % krccname
kralice = '%s:tkt2' % krccname
krbob = '%s:tkt3' % krccname
krnoent = '%s:noent' % krccname
realm.kinit('user', password('user'), flags=['-c', kruser])
realm.kinit('alice', password('alice'), flags=['-c', kralice])
realm.kinit('bob', password('bob'), flags=['-c', krbob])
def cursor_test(testname, args, expected):
outlines = realm.run(['./t_cccursor'] + args).splitlines()
outlines.sort()
expected.sort()
if outlines != expected:
fail('Output not expected for %s\n' % testname +
'Expected output:\n\n' + '\n'.join(expected) + '\n\n' +
'Actual output:\n\n' + '\n'.join(outlines))
mark('FILE cursor')
fccname = 'FILE:%s' % realm.ccache
cursor_test('file-default', [], [fccname])
cursor_test('file-default2', [realm.ccache], [fccname])
cursor_test('file-default3', [fccname], [fccname])
mark('DIR cursor')
cursor_test('dir', [dccname], [duser, dalice, dbob])
cursor_test('dir-subsidiary', [duser], [duser])
cursor_test('dir-nofile', [dnoent], [])
if test_keyring:
mark('KEYRING cursor')
cursor_test('keyring', [krccname], [kruser, kralice, krbob])
cursor_test('keyring-subsidiary', [kruser], [kruser])
cursor_test('keyring-noent', [krnoent], [])
mark('MEMORY cursor')
mfoo = 'MEMORY:foo'
mbar = 'MEMORY:bar'
cursor_test('filemem', [fccname, mfoo], [fccname])
cursor_test('dirmem', [dccname, mfoo], [duser, dalice, dbob])
cursor_test('mem', [mfoo, mbar], [mfoo])
if test_keyring:
cursor_test('keyringmem', [krccname, mfoo], [kruser, kralice, krbob])
# Test krb5_cccol_have_content.
mark('krb5_cccol_have_content')
realm.run(['./t_cccursor', dccname, 'CONTENT'])
realm.run(['./t_cccursor', fccname, 'CONTENT'])
realm.run(['./t_cccursor', realm.ccache, 'CONTENT'])
realm.run(['./t_cccursor', mfoo, 'CONTENT'], expected_code=1)
if test_keyring:
realm.run(['./t_cccursor', krccname, 'CONTENT'])
cleanup_keyring('@s', col_ringname)
# Make sure FILE doesn't yield a nonexistent default cache.
mark('FILE nonexistent')
realm.run([kdestroy])
cursor_test('noexist', [], [])
realm.run(['./t_cccursor', fccname, 'CONTENT'], expected_code=1)
success('Renewing credentials')
|
py | 7dfe073b1f3f5dc33a4b44de2003b1e6fecfe0d3 | """Functions used to preprocess the timeseries
Lucas Draichi
2019
"""
import datetime
import talib
import colorama
import requests
import pandas as pd
import numpy as np
# import plotly.graph_objs as go
from termcolor import colored
from configs.vars import WALLET_FIRST_SYMBOL, WALLET_SECOND_SYMBOL
colorama.init()
def get_dataset(mode, symbol, to_symbol, histo, limit):
df = pd.read_csv('datasets/bot_{}_{}_{}_{}.csv'.format(mode, symbol + to_symbol, limit, histo))
return df
def get_datasets(symbol, to_symbol, histo, limit):
"""Fetch the API and precess the desired pair
Arguments:
symbol {str} -- First pair
to_symbol {str} -- Second pair
histo {str ['day', 'hour']} -- Granularity
limit {int [100 - 2000]} -- [description]
Returns:
pandas.Dataframe -- The OHLCV and indicators dataframe
"""
headers = {'User-Agent': 'Mozilla/5.0', 'authorization': 'Apikey 3d7d3e9e6006669ac00584978342451c95c3c78421268ff7aeef69995f9a09ce'}
# OHLC
url = 'https://min-api.cryptocompare.com/data/histo{}?fsym={}&tsym={}&e=Binance&limit={}'.format(histo, symbol, to_symbol, limit)
print(colored('> downloading ' + symbol + ' OHLCV', 'green'))
response = requests.get(url, headers=headers)
json_response = response.json()
status = json_response['Response']
if status == "Error":
print(colored('=== {} ==='.format(json_response['Message']), 'red'))
raise AssertionError()
result = json_response['Data']
df = pd.DataFrame(result)
df['Date'] = df['time']
# df['Date'] = pd.to_datetime(df['time'], utc=True, unit='s')
df.drop('time', axis=1, inplace=True)
# indicators
# https://github.com/mrjbq7/ta-lib/blob/master/docs/func.md
open_price, high, low, close = np.array(df['open']), np.array(df['high']), np.array(df['low']), np.array(df['close'])
volume = np.array(df['volumefrom'])
# cycle indicators
df.loc[:, 'HT_DCPERIOD'] = talib.HT_DCPERIOD(close)
df.loc[:, 'HT_DCPHASE'] = talib.HT_DCPHASE(close)
df.loc[:, 'HT_PHASOR_inphase'], df.loc[:, 'HT_PHASOR_quadrature'] = talib.HT_PHASOR(close)
df.loc[:, 'HT_SINE_sine'], df.loc[:, 'HT_SINE_leadsine'] = talib.HT_SINE(close)
df.loc[:, 'HT_TRENDMODE'] = talib.HT_TRENDMODE(close)
# momemtum indicators
df.loc[:, 'ADX'] = talib.ADX(high, low, close, timeperiod=14)
df.loc[:, 'ADXR'] = talib.ADXR(high, low, close, timeperiod=14)
df.loc[:, 'APO'] = talib.APO(close, fastperiod=12, slowperiod=26, matype=0)
df.loc[:, 'AROON_down'], df.loc[:, 'AROON_up'] = talib.AROON(high, low, timeperiod=14)
df.loc[:, 'AROONOSC'] = talib.AROONOSC(high, low, timeperiod=14)
df.loc[:, 'BOP'] = talib.BOP(open_price, high, low, close)
df.loc[:, 'CCI'] = talib.CCI(high, low, close, timeperiod=14)
df.loc[:, 'CMO'] = talib.CMO(close, timeperiod=14)
df.loc[:, 'DX'] = talib.DX(high, low, close, timeperiod=14)
df['MACD'], df['MACD_signal'], df['MACD_hist'] = talib.MACD(close, fastperiod=12, slowperiod=26, signalperiod=9)
df.loc[:, 'MFI'] = talib.MFI(high, low, close, volume, timeperiod=14)
df.loc[:, 'MINUS_DI'] = talib.MINUS_DI(high, low, close, timeperiod=14)
df.loc[:, 'MINUS_DM'] = talib.MINUS_DM(high, low, timeperiod=14)
df.loc[:, 'MOM'] = talib.MOM(close, timeperiod=10)
df.loc[:, 'PPO'] = talib.PPO(close, fastperiod=12, slowperiod=26, matype=0)
df.loc[:, 'ROC'] = talib.ROC(close, timeperiod=10)
df.loc[:, 'RSI'] = talib.RSI(close, timeperiod=14)
df.loc[:, 'STOCH_k'], df.loc[:, 'STOCH_d'] = talib.STOCH(high, low, close, fastk_period=5, slowk_period=3, slowk_matype=0, slowd_period=3, slowd_matype=0)
df.loc[:, 'STOCHF_k'], df.loc[:, 'STOCHF_d'] = talib.STOCHF(high, low, close, fastk_period=5, fastd_period=3, fastd_matype=0)
df.loc[:, 'STOCHRSI_K'], df.loc[:, 'STOCHRSI_D'] = talib.STOCHRSI(close, timeperiod=30, fastk_period=14, fastd_period=10, fastd_matype=1)
df.loc[:, 'TRIX'] = talib.TRIX(close, timeperiod=30)
df.loc[:, 'ULTOSC'] = talib.ULTOSC(high, low, close, timeperiod1=7, timeperiod2=14, timeperiod3=28)
df.loc[:, 'WILLR'] = talib.WILLR(high, low, close, timeperiod=14)
# overlap studies
df.loc[:, 'BBANDS_upper'], df.loc[:, 'BBANDS_middle'], df.loc[:, 'BBANDS_lower'] = talib.BBANDS(close, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)
df.loc[:, 'DEMA'] = talib.DEMA(close, timeperiod=30)
df.loc[:, 'EMA'] = talib.EMA(close, timeperiod=30)
df.loc[:, 'HT_TRENDLINE'] = talib.HT_TRENDLINE(close)
df.loc[:, 'KAMA'] = talib.KAMA(close, timeperiod=30)
df.loc[:, 'MA'] = talib.MA(close, timeperiod=30, matype=0)
df.loc[:, 'MIDPOINT'] = talib.MIDPOINT(close, timeperiod=14)
df.loc[:, 'WMA'] = talib.WMA(close, timeperiod=30)
df.loc[:, 'SMA'] = talib.SMA(close)
# pattern recoginition
df.loc[:, 'CDL2CROWS'] = talib.CDL2CROWS(open_price, high, low, close)
df.loc[:, 'CDL3BLACKCROWS'] = talib.CDL3BLACKCROWS(open_price, high, low, close)
df.loc[:, 'CDL3INSIDE'] = talib.CDL3INSIDE(open_price, high, low, close)
df.loc[:, 'CDL3LINESTRIKE'] = talib.CDL3LINESTRIKE(open_price, high, low, close)
# price transform
df.loc[:, 'WCLPRICE'] = talib.WCLPRICE(high, low, close)
# statistic funcitons
df.loc[:, 'BETA'] = talib.BETA(high, low, timeperiod=5)
df.loc[:, 'CORREL'] = talib.CORREL(high, low, timeperiod=30)
df.loc[:, 'STDDEV'] = talib.STDDEV(close, timeperiod=5, nbdev=1)
df.loc[:, 'TSF'] = talib.TSF(close, timeperiod=14)
df.loc[:, 'VAR'] = talib.VAR(close, timeperiod=5, nbdev=1)
# volatility indicators
df.loc[:, 'ATR'] = talib.ATR(high, low, close, timeperiod=14)
df.loc[:, 'NATR'] = talib.NATR(high, low, close, timeperiod=14)
df.loc[:, 'TRANGE'] = talib.TRANGE(high, low, close)
# volume indicators
df.loc[:, 'AD'] = talib.AD(high, low, close, volume)
df.loc[:, 'ADOSC'] = talib.ADOSC(high, low, close, volume, fastperiod=3, slowperiod=10)
df.loc[:, 'OBV'] = talib.OBV(close, volume)
# wallet indicator to trading bot
# df.loc[:, 'wallet_{}'.format(symbol)] = 1.0
# df.loc[:, 'wallet_first_symbol'] = WALLET_FIRST_SYMBOL
# df.loc[:, 'wallet_second_symbol'] = WALLET_SECOND_SYMBOL
# df.loc[:, 'wallet_{}'.format(to_symbol)] = 0.0
# df.fillna(df.mean(), inplace=True)
df.dropna(inplace=True)
df.set_index('Date', inplace=True)
train_size = round(len(df) * 0.5) # 50% to train -> test with different value
df_train = df[:train_size]
df_train.name = symbol + to_symbol
df_rollout = df[train_size:]
df_rollout.name = symbol + to_symbol
df_train.to_csv('datasets/bot_train_{}_{}_{}.csv'.format(symbol + to_symbol, limit, histo))
df_rollout.to_csv('datasets/bot_rollout_{}_{}_{}.csv'.format(symbol + to_symbol, limit, histo))
return df_train, df_rollout
#------------------------------------------------------------->
def init_data(pair, mode, limit, histo):
"""Tranform the data from pandas.DataFrame to list to improve Ray's performance
Arguments:
pair {str} -- Pair
mode {str ['train', 'rollout]} -- Select the correct dataset,
train or rollout one.
Returns:
list, list -- The dataframe divided in two lists
"""
df = pd.read_csv('datasets/bot_{}_{}_{}_{}.csv'.format(mode, pair, limit, histo))
df.drop('Date', axis=1, inplace=True)
df_array = df.values.tolist()
keys = df.keys()
return keys, df_array
def build_layout(title, x_axis_title, y_axis_title):
"""Create the plotly's layout with custom configuration
Arguments:
title {str} -- Layout's central title
x_axis_title {str} -- Axis x title
y_axis_title {str} -- Axis y title
Returns:
Object -- Plotly object from plotly.graph_objs
"""
layout = go.Layout(plot_bgcolor='#2d2929',
paper_bgcolor='#2d2929',
title=title,
font=dict(color='rgb(255, 255, 255)', size=17),
legend=dict(orientation="h"),
yaxis=dict(title=y_axis_title),
xaxis=dict(title=x_axis_title))
return layout
def var_cov_matrix(df, weigths):
"""Compute covariance matrix with respect of given weigths
Arguments:
df {pandas.DataFrame} -- The timeseries object
weigths {list} -- List of weights to be used
Returns:
numpy.array -- The covariance matrix
"""
sigma = np.cov(np.array(df).T, ddof=0)
var = (np.array(weigths) * sigma * np.array(weigths).T).sum()
return var
def calc_exp_returns(avg_return, weigths):
"""Compute the expected returns
Arguments:
avg_return {pandas.DataFrame} -- The average of returns
weigths {list} -- A list of weigths
Returns:
array -- N dimensions array
"""
exp_returns = avg_return.dot(weigths.T)
return exp_returns
def print_dollar():
print(chr(27) + "[2J")
print(colored("""
||====================================================================||
||//$\\\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//$\\\||
||(100)==================| FEDERAL RESERVE NOTE |================(100)||
||\\\$// ~ '------========--------' \\\$//||
||<< / /$\ // ____ \\\ \ >>||
||>>| 12 //L\\\ // ///..) \\\ L38036133B 12 |<<||
||<<| \\\ // || <|| >\ || |>>||
||>>| \$/ || $$ --/ || One Hundred |<<||
||====================================================================||>||
||//$\\\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//$\\\||<||
||(100)==================| FEDERAL RESERVE NOTE |================(100)||>||
||\\\$// ~ '------========--------' \\\$//||\||
||<< / /$\ // ____ \\\ \ >>||)||
||>>| 12 //L\\\ // ///..) \\\ L38036133B 12 |<<||/||
||<<| \\\ // || <|| >\ || |>>||=||
||>>| \$/ || $$ --/ || One Hundred |<<||
||<<| L38036133B *\\\ |\_/ //* series |>>||
||>>| 12 *\\\/___\_//* 1989 |<<||
||<<\ Treasurer ______/Franklin\________ Secretary 12 />>||
||//$\ ~|UNITED STATES OF AMERICA|~ /$\\\||
||(100)=================== ONE HUNDRED DOLLARS =================(100)||
||\\\$//\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\\\$//||
||====================================================================||
""", 'green', attrs=['bold'])) |
py | 7dfe07d1c5255b0e5819f6f17e96b6fed3f79e6b | # coding:utf-8
"""
QUANTAXIS 数学模组
"""
def QA_math_diff(data):
y=[]
for i in range(0,len(data)-1,1):
y.append(float(data[i+1][0])-float(data[i][0]))
return y
def QA_math_max_min(data):
return max(data)-min(data)
|
py | 7dfe080ab0f663279ab8c6a2c551a689f918d72f | # # Exploratory Data Analysis of Engineered Features
# Observations:
# * Moderate to weak visual correlations between engineered features and targets
# * Score of first assessment looks to have a sigificant collelation with estimated final score, final outcome
# * Individual distributions show a lot of 0 values, unsurprising otherwise
import numpy as np
import pandas as pd
import seaborn as sns
sns.reset_defaults
sns.set_style(style='darkgrid')
import matplotlib.pyplot as plt
# plt.style.use('ggplot')
font = {'size' : 16}
plt.rc('font', **font)
plt.ion()
get_ipython().run_line_magic('matplotlib', 'inline')
plt.rcParams["patch.force_edgecolor"] = True
plt.rcParams['figure.figsize'] = (20.0, 10.0)
pd.set_option('display.max_columns', 2000)
pd.set_option('display.max_rows', 2000)
df = pd.read_csv('../data/processed/transformed_data_with_features.csv')
df.fillna(value=0, inplace=True)
# ## Engineered Feature Analysis
# ### Bivariate Plots Against Targets: final_result_num, estimated_final_score
sns.jointplot(x='clicks_per_day', y='final_result_num', data=df)
sns.jointplot(x='clicks_per_day', y='estimated_final_score', data=df)
sns.jointplot(x='pct_days_vle_accessed', y='final_result_num', data=df)
sns.jointplot(x='pct_days_vle_accessed', y='estimated_final_score', data=df)
sns.jointplot(x='studied_credits', y='final_result_num', data=df)
sns.jointplot(x='studied_credits', y='estimated_final_score', data=df)
sns.jointplot(x='max_clicks_one_day', y='final_result_num', data=df)
sns.jointplot(x='max_clicks_one_day', y='estimated_final_score', data=df)
sns.jointplot(x='first_date_vle_accessed', y='final_result_num', data=df)
sns.jointplot(x='first_date_vle_accessed', y='estimated_final_score', data=df)
sns.jointplot(x='avg_days_sub_early', y='final_result_num', data=df)
sns.jointplot(x='avg_days_sub_early', y='estimated_final_score', data=df)
sns.jointplot(x='days_early_first_assessment', y='final_result_num', data=df)
sns.jointplot(x='days_early_first_assessment', y='estimated_final_score', data=df)
sns.jointplot(x='score_first_assessment', y='final_result_num', data=df)
sns.jointplot(x='score_first_assessment', y='estimated_final_score', data=df)
sns.jointplot(x='avg_score', y='final_result_num', data=df)
sns.jointplot(x='avg_score', y='estimated_final_score', data=df)
# ### Univariate Plots of Engineered Features
f = list(df.columns[[6,7,8,9,10,11,12,13,14]])
for feat in f:
plt.figure()
sns.distplot(df[feat])
|
py | 7dfe08941d114a756589f5d939dd0addd093c13f |
import os
from utils.DatasetFilter import DatasetFilter
from utils.Dataset import Dataset
from utils.DatasetOptions import DatasetOptions
import helpers.constants as constants
import helpers.constantsNZ as constantsNZ
dirProject = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/';
dirData = dirProject + 'data/';
dirPlotsBase = dirProject + 'plots/feature_comparison_wiederkehrer_normal/'
dict_options_analyzing = {
'dir_data': dirData,
'data_prefix': 'patrec',
'dataset': '20122015',
'grouping': 'verylightgrouping',
'encoding': 'categorical',
'newfeatures': {'names': constants.NEW_FEATURES},
'featurereduction': None,
'filter_options': 'chronic_lung'
}
options = DatasetOptions(dict_options_analyzing);
dataset = Dataset(options);
datafilter = DatasetFilter(options);
datafilter.filterDataDisease()
|
py | 7dfe08cd4ef92164b6dc27099f94060b0cc5a1e0 | from collections import namedtuple
from srsly.msgpack import packb
class MyList(list):
pass
class MyDict(dict):
pass
class MyTuple(tuple):
pass
MyNamedTuple = namedtuple("MyNamedTuple", "x y")
def test_types():
assert packb(MyDict()) == packb(dict())
assert packb(MyList()) == packb(list())
assert packb(MyNamedTuple(1, 2)) == packb((1, 2))
|
py | 7dfe08f4c9e30f1a94b1d201f5406142f05e5030 | import json
import logging
import sys
from pathlib import Path
import pytest
import nestedtext as nt
sys.path.append(str(Path(__file__).parent / "official_tests" / "api"))
import nestedtext_official_tests as nt_test_api
logger = logging.getLogger(__name__)
skip_testcases = {
"inline_dict_01": "Unsupported cases: empty values, trailing commas",
"inline_list_01": "Unsupported cases: empty values, trailing commas",
}
skip_load_testcases = {}
skip_dump_testcases = {
"dict_16": "Colon in object key",
"dict_17": "Undiagnosed",
"dict_20": "Very weird object keys",
"dict_26": "Colon in object key",
"string_multiline_12": "Very weird characters",
}
@pytest.mark.parametrize("case", nt_test_api.load_test_cases(), ids=lambda c: c.id)
def test_all(case: nt_test_api.TestCase):
if case.id in skip_testcases:
pytest.skip(skip_testcases[case.id])
if "load" in case.case:
load_in_path = case.case["load"]["in"]["path"]
if case.id in skip_load_testcases:
logger.warning(
"Skipping load check for %s: %s", case.id, skip_load_testcases[case.id]
)
elif "out" in case.case["load"]:
logger.info("Checking successful load")
expected = case.case["load"]["out"]["data"]
with open(load_in_path, "r", encoding="utf-8") as f:
actual = nt.load(f)
assert actual == expected
# Debug info.
logger.debug("Loaded %s", load_in_path)
with open(load_in_path, "r", encoding="utf-8") as f:
logger.debug("\n%s", f.read())
logger.debug("%s", json.dumps(actual))
# Check loads() function too.
with open(load_in_path, "r", encoding="utf-8") as f:
actual2 = nt.loads(f.read())
assert actual2 == expected
elif "err" in case.case["load"]:
logger.info("Checking load error")
with pytest.raises(nt.NestedtextError):
with open(load_in_path, "r", encoding="utf-8") as f:
nt.load(f)
# TODO: Proper error checking
if "dump" in case.case:
if case.id in skip_dump_testcases:
logger.warning(
"Skipping dump check for %s: %s", case.id, skip_dump_testcases[case.id]
)
elif "out" in case.case["dump"]:
logger.info("Checking successful dump")
actual = nt.dumps(case.case["dump"]["in"]["data"])
with open(case.case["dump"]["out"]["path"], "r") as f:
expected = f.read()
assert actual.strip() == expected.strip()
elif "err" in case.case["dump"]:
logger.info("Checking dump error")
with pytest.raises(nt.NestedtextError):
nt.dumps(case.case["dump"]["in"]["data"])
# TODO: Proper error checking
|
py | 7dfe090f8131bb57a4ef7599324bec822c1b1ddc | import os
from config import Config
from .fonts import Fonts
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
@Client.on_message(filters.command('start'))
async def start(c, m):
owner = await c.get_users(int(Config.OWNER_ID))
owner_username = owner.username if owner.username else 'Ns_bot_updates'
# start text
text = f"""👋Hey! {m.from_user.mention(style='md')},
💡 ** I A Stylish Font Bot is SD Project** 🔥
`⚡⚡I can help you to get stylish fonts. Just send me some text and see magic📤.`
**👲 Maintained By:** {owner.mention(style='md')}
"""
# Buttons
buttons = [
[
InlineKeyboardButton('👨💻My Father👨💻', url=f"https://t.me/{owner_username}")
]
]
await m.reply_text(
text=text,
reply_markup=InlineKeyboardMarkup(buttons)
)
@Client.on_message(filters.private & filters.incoming & filters.text)
async def style_buttons(c, m, cb=False):
buttons = [[
InlineKeyboardButton('𝚃𝚢𝚙𝚎𝚠𝚛𝚒𝚝𝚎𝚛', callback_data='style+typewriter'),
InlineKeyboardButton('𝕆𝕦𝕥𝕝𝕚𝕟𝕖', callback_data='style+outline'),
InlineKeyboardButton('𝐒𝐞𝐫𝐢𝐟', callback_data='style+serif'),
],[
InlineKeyboardButton('𝑺𝒆𝒓𝒊𝒇', callback_data='style+bold_cool'),
InlineKeyboardButton('𝑆𝑒𝑟𝑖𝑓', callback_data='style+cool'),
InlineKeyboardButton('Sᴍᴀʟʟ Cᴀᴘs', callback_data='style+small_cap'),
],[
InlineKeyboardButton('𝓈𝒸𝓇𝒾𝓅𝓉', callback_data='style+script'),
InlineKeyboardButton('𝓼𝓬𝓻𝓲𝓹𝓽', callback_data='style+script_bolt'),
InlineKeyboardButton('ᵗⁱⁿʸ', callback_data='style+tiny'),
],[
InlineKeyboardButton('ᑕOᗰIᑕ', callback_data='style+comic'),
InlineKeyboardButton('𝗦𝗮𝗻𝘀', callback_data='style+sans'),
InlineKeyboardButton('𝙎𝙖𝙣𝙨', callback_data='style+slant_sans'),
],[
InlineKeyboardButton('𝘚𝘢𝘯𝘴', callback_data='style+slant'),
InlineKeyboardButton('𝖲𝖺𝗇𝗌', callback_data='style+sim'),
InlineKeyboardButton('Ⓒ︎Ⓘ︎Ⓡ︎Ⓒ︎Ⓛ︎Ⓔ︎Ⓢ︎', callback_data='style+circles'),
],[
InlineKeyboardButton('🅒︎🅘︎🅡︎🅒︎🅛︎🅔︎🅢︎', callback_data='style+circle_dark'),
InlineKeyboardButton('𝔊𝔬𝔱𝔥𝔦𝔠', callback_data='style+gothic'),
InlineKeyboardButton('𝕲𝖔𝖙𝖍𝖎𝖈', callback_data='style+gothic_bolt'),
],[
InlineKeyboardButton('C͜͡l͜͡o͜͡u͜͡d͜͡s͜͡', callback_data='style+cloud'),
InlineKeyboardButton('H̆̈ă̈p̆̈p̆̈y̆̈', callback_data='style+happy'),
InlineKeyboardButton('S̑̈ȃ̈d̑̈', callback_data='style+sad'),
],[
InlineKeyboardButton('Next ➡️', callback_data="nxt")
]]
if not cb:
await m.reply_text(m.text, reply_markup=InlineKeyboardMarkup(buttons), quote=True)
else:
await m.answer()
await m.message.edit_reply_markup(InlineKeyboardMarkup(buttons))
@Client.on_callback_query(filters.regex('^nxt'))
async def nxt(c, m):
if m.data == "nxt":
buttons = [[
InlineKeyboardButton('🇸 🇵 🇪 🇨 🇮 🇦 🇱 ', callback_data='style+special'),
InlineKeyboardButton('🅂🅀🅄🄰🅁🄴🅂', callback_data='style+squares'),
InlineKeyboardButton('🆂︎🆀︎🆄︎🅰︎🆁︎🅴︎🆂︎', callback_data='style+squares_bold'),
],[
InlineKeyboardButton('ꪖꪀᦔꪖꪶꪊᥴ𝓲ꪖ', callback_data='style+andalucia'),
InlineKeyboardButton('爪卂几ᘜ卂', callback_data='style+manga'),
InlineKeyboardButton('S̾t̾i̾n̾k̾y̾', callback_data='style+stinky'),
],[
InlineKeyboardButton('B̥ͦu̥ͦb̥ͦb̥ͦl̥ͦe̥ͦs̥ͦ', callback_data='style+bubbles'),
InlineKeyboardButton('U͟n͟d͟e͟r͟l͟i͟n͟e͟', callback_data='style+underline'),
InlineKeyboardButton('꒒ꍏꀷꌩꌃꀎꁅ', callback_data='style+ladybug'),
],[
InlineKeyboardButton('R҉a҉y҉s҉', callback_data='style+rays'),
InlineKeyboardButton('B҈i҈r҈d҈s҈', callback_data='style+birds'),
InlineKeyboardButton('S̸l̸a̸s̸h̸', callback_data='style+slash'),
],[
InlineKeyboardButton('s⃠t⃠o⃠p⃠', callback_data='style+stop'),
InlineKeyboardButton('S̺͆k̺͆y̺͆l̺͆i̺͆n̺͆e̺͆', callback_data='style+skyline'),
InlineKeyboardButton('A͎r͎r͎o͎w͎s͎', callback_data='style+arrows'),
],[
InlineKeyboardButton('ዪሀክቿነ', callback_data='style+qvnes'),
InlineKeyboardButton('S̶t̶r̶i̶k̶e̶', callback_data='style+strike'),
InlineKeyboardButton('F༙r༙o༙z༙e༙n༙', callback_data='style+frozen')
],[
InlineKeyboardButton('⬅️ Back', callback_data='nxt+0')
]]
await m.answer()
await m.message.edit_reply_markup(InlineKeyboardMarkup(buttons))
else:
await style_buttons(c, m, cb=True)
@Client.on_callback_query(filters.regex('^style'))
async def style(c, m):
await m.answer()
cmd, style = m.data.split('+')
if style == 'typewriter':
cls = Fonts.typewriter
if style == 'outline':
cls = Fonts.outline
if style == 'serif':
cls = Fonts.serief
if style == 'bold_cool':
cls = Fonts.bold_cool
if style == 'cool':
cls = Fonts.cool
if style == 'small_cap':
cls = Fonts.smallcap
if style == 'script':
cls = Fonts.script
if style == 'script_bolt':
cls = Fonts.bold_script
if style == 'tiny':
cls = Fonts.tiny
if style == 'comic':
cls = Fonts.comic
if style == 'sans':
cls = Fonts.san
if style == 'slant_sans':
cls = Fonts.slant_san
if style == 'slant':
cls = Fonts.slant
if style == 'sim':
cls = Fonts.sim
if style == 'circles':
cls = Fonts.circles
if style == 'circle_dark':
cls = Fonts.dark_circle
if style == 'gothic':
cls = Fonts.gothic
if style == 'gothic_bolt':
cls = Fonts.bold_gothic
if style == 'cloud':
cls = Fonts.cloud
if style == 'happy':
cls = Fonts.happy
if style == 'sad':
cls = Fonts.sad
if style == 'special':
cls = Fonts.special
if style == 'squares':
cls = Fonts.square
if style == 'squares_bold':
cls = Fonts.dark_square
if style == 'andalucia':
cls = Fonts.andalucia
if style == 'manga':
cls = Fonts.manga
if style == 'stinky':
cls = Fonts.stinky
if style == 'bubbles':
cls = Fonts.bubbles
if style == 'underline':
cls = Fonts.underline
if style == 'ladybug':
cls = Fonts.ladybug
if style == 'rays':
cls = Fonts.rays
if style == 'birds':
cls = Fonts.birds
if style == 'slash':
cls = Fonts.slash
if style == 'stop':
cls = Fonts.stop
if style == 'skyline':
cls = Fonts.skyline
if style == 'arrows':
cls = Fonts.arrows
if style == 'qvnes':
cls = Fonts.rvnes
if style == 'strike':
cls = Fonts.strike
if style == 'frozen':
cls = Fonts.frozen
new_text = cls(m.message.reply_to_message.text)
try:
await m.message.edit_text(new_text, reply_markup=m.message.reply_markup)
except:
pass
|
py | 7dfe0a207af47dab978b105f3f4fa35d9d89cae8 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['ConsumerGroup']
class ConsumerGroup(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
consumer_group_name: Optional[pulumi.Input[str]] = None,
event_hub_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
user_metadata: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Single item in List or Get Consumer group operation
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] consumer_group_name: The consumer group name
:param pulumi.Input[str] event_hub_name: The Event Hub name
:param pulumi.Input[str] location: Location of the resource.
:param pulumi.Input[str] name: Name of the consumer group.
:param pulumi.Input[str] namespace_name: The Namespace name
:param pulumi.Input[str] resource_group_name: Name of the resource group within the azure subscription.
:param pulumi.Input[str] type: ARM type of the Namespace.
:param pulumi.Input[str] user_metadata: The user metadata.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if consumer_group_name is None:
raise TypeError("Missing required property 'consumer_group_name'")
__props__['consumer_group_name'] = consumer_group_name
if event_hub_name is None:
raise TypeError("Missing required property 'event_hub_name'")
__props__['event_hub_name'] = event_hub_name
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
__props__['name'] = name
if namespace_name is None:
raise TypeError("Missing required property 'namespace_name'")
__props__['namespace_name'] = namespace_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['type'] = type
__props__['user_metadata'] = user_metadata
__props__['created_at'] = None
__props__['event_hub_path'] = None
__props__['updated_at'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:eventhub/latest:ConsumerGroup"), pulumi.Alias(type_="azure-nextgen:eventhub/v20150801:ConsumerGroup"), pulumi.Alias(type_="azure-nextgen:eventhub/v20170401:ConsumerGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ConsumerGroup, __self__).__init__(
'azure-nextgen:eventhub/v20140901:ConsumerGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ConsumerGroup':
"""
Get an existing ConsumerGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ConsumerGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
Exact time the message was created.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="eventHubPath")
def event_hub_path(self) -> pulumi.Output[str]:
"""
The path of the Event Hub.
"""
return pulumi.get(self, "event_hub_path")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> pulumi.Output[str]:
"""
The exact time the message was updated.
"""
return pulumi.get(self, "updated_at")
@property
@pulumi.getter(name="userMetadata")
def user_metadata(self) -> pulumi.Output[Optional[str]]:
"""
The user metadata.
"""
return pulumi.get(self, "user_metadata")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | 7dfe0a3b798f248ae89d618ea92ced547d91ebfc | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/7/16 21:21
# @Author : bxf
# @File : PUB_RULE_OPT.py
# @Software: PyCharm
from model.util.TMP_PAGINATOR import *
from model.util.PUB_RESP import *
from model.util.newID import *
class PUB_RULE_OPT:
def get_lists(self, data):
'''
获取规则列表
:return:
'''
try:
page = data.get('_page')
records = data.get('_limit')
sql = 'SELECT * FROM t_rule_info '
rule_lists = GET_RECORDS_SQL(sql, page, records)
tb_data = []
rule_list = rule_lists[2]#getJsonFromDatabase(rule_lists[1])
if rule_list:
for i in rule_list:
i['rule_import_desc'] = json.loads(i['rule_import_desc'])
i['rule_import_project'] = json.loads(i['rule_import_project'])
tb_data.append(i)
else:
tb_data=[]
result = rule_lists[0]
result['tb_data'] = tb_data
return_data = respdata().sucessResp(result)
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
except Exception as e:
return_data = respdata().exceptionResp(e)
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def get_lists_by_check(self, data):
'''
获取规则列表
:return:
'''
try:
page = data.get('_page')
records = data.get('_limit')
sql = 'SELECT * FROM t_rule_info WHERE rule_checked=1 '
rule_lists = GET_RECORDS_SQL(sql, page, records)
tb_data = []
rule_list =rule_lists[2] #getJsonFromDatabase(rule_lists[1])
if rule_list:
for i in rule_list:
i['rule_import_desc'] = json.loads(i['rule_import_desc'])
i['rule_import_project'] = json.loads(i['rule_import_project'])
tb_data.append(i)
else:
tb_data=[]
result = rule_lists[0]
result['tb_data'] = tb_data
return_data = respdata().sucessResp(result)
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
except Exception as e:
return_data = respdata().exceptionResp(e)
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def insert(self, data):
'''
新增
:param data: 新增数据
:return:
'''
try:
get_data = json.loads(data)
rule_id = newID().RULE_ID()
rule_import_desc = json.dumps(get_data['rule_import_desc'])
rule_import_project=get_data['rule_import_project']
# rule_import_project_a = dict()
# for i in get_data['rule_import_desc']:
# rule_import_project_a[i] = ''
# rule_import_project.append(rule_import_project_a)
rule_import_project=json.dumps(rule_import_project)
rule_checked=get_data['rule_checked']
del get_data['rule_checked']
del get_data['rule_import_desc']
insert_result = insertToDatabase('t_rule_info', get_data, rule_id=rule_id,group_id=0,
rule_import_desc=rule_import_desc, rule_import_project=rule_import_project,rule_checked=rule_checked)
return_data = respdata().sucessMessage('', '新增成功,新增记录数为: ' + str(insert_result))
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
except Exception as e:
return_data = respdata().failMessage('', '新增失败,请检查!错误信息为:' + str(e))
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def update(self, data):
'''
修改
:param data:
:return:
'''
try:
get_data=json.loads(data)
rule_id = get_data['rule_id']
update_result = updateToDatabase('t_rule_info', get_data, rule_id=rule_id)
return_data = respdata().sucessMessage('', '更新成功,更新条数为:' + str(update_result))
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
except Exception as e:
return_data = respdata().failMessage('', '更新失败,请检查!错误信息为:' + str(e))
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def delete(self, data):
'''
删除
:param data:
:return:
'''
rule_id = data
sql = 'DELETE FROM t_rule_info WHERE rule_id ="' + rule_id + '"'
DB_CONN().db_Update(sql)
return_data = respdata().sucessMessage('', '删除成功!')
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
# 规则的用例模板配置
def case_rule_list(self, id, data):
'''
公共规则用例模板获取
:param data: 规则ID
:return: 用例模板列表
'''
try:
page = data.get('_page')
records = data.get('_limit')
sql = 'SELECT * FROM rule_case_info WHERE rule_id="' + id + '"' + ' AND '
rule_lists = GET_RECORDS(sql, page, records)
return_data = respdata().sucessResp(rule_lists)
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
except Exception as e:
return_data = respdata().exceptionResp(e)
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def case_rule_insert(self, data):
'''
用例模板新增
:param data: 新增数据
:return:
'''
try:
get_data = json.loads(data)
rule_case_id = newID().RULE_CASE_ID()
insert_result = insertToDatabase('rule_case_info', get_data, rule_case_id=rule_case_id)
return_data = respdata().sucessMessage('', '新增成功,新增记录数为: ' + str(insert_result))
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
except Exception as e:
return_data = respdata().failMessage('', '新增失败,请检查!错误信息为:' + str(e))
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def case_rule_update(self, data):
'''
用例模板更新
:param data:
:return:
'''
try:
get_data=json.loads(data)
rule_case_id = get_data['rule_case_id']
del get_data['adddate']
del get_data['c_id']
update_result = updateToDatabase('rule_case_info', get_data,rule_case_id=rule_case_id)
return_data = respdata().sucessMessage('', '更新成功')
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
except Exception as e:
return_data = respdata().failMessage('', '更新失败,请检查!错误信息为:' + str(e))
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def case_rule_delete(self, data):
'''
用例模板删除
:param data: 用例模板ID
:return:
'''
rule_case_id = data
sql = 'DELETE FROM rule_case_info WHERE rule_case_id ="' + rule_case_id + '"'
DB_CONN().db_Update(sql)
return_data = respdata().sucessMessage('', '删除成功!')
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
|
py | 7dfe0a3dc7430c40ecfa138435e0cb0f054c9d39 | import sys
import os
import math
if __name__ == '__main__':
def eprint(message):
print(message, file=sys.stderr)
if len(sys.argv) != 3:
eprint("Syntax: {} <colorfile> <output>".format(sys.argv[0]))
sys.exit(1)
codes = []
with open(sys.argv[1]) as file:
line_number = 0
while True:
line_number = line_number + 1
line = file.readline()
if not line:
break
if len(line) == 0:
continue
if line.startswith("##") or line[0].isspace():
continue
# Get the key code
code = line[0]
# Make sure we have more than just that
if len(line) == 1:
eprint("Invalid line {} on line {}".format(line, line_number))
sys.exit(1)
# Ignore spaces until we get something else
line = line[1:]
ignore_count = 0
for c in line:
if c.isspace():
ignore_count = ignore_count + 1
continue
break
line = line[ignore_count:]
# Ignore spaces after the code
ignore_count = 0
for c in reversed(line):
if c.isspace():
ignore_count = ignore_count + 1
continue
break
line = line[:-ignore_count]
if len(line) < 6:
eprint("Invalid code {} on line {}".format(line, line_number))
sys.exit(1)
# Convert to lowercase
line = line.lower()
# Get a color
color = None
# Check if it's xxxxxx (reset)
if line == "xxxxxx":
continue
else:
color_int = 0
try:
color_int = int(line, 16)
except ValueError:
eprint("Invalid code {} on line {}".format(line, line_number))
sys.exit(1)
color = color_int
# Escape the code if needed
if code == "\\" or code == "'":
code = "\\" + code
# Add to the list
codes.append([code, color])
# Write the codes
with open(sys.argv[2], "w") as file:
file.write("// SPDX-License-Identifier: GPL-3.0-only\n// DON'T EDIT THIS FILE\n// Edit color_codes and then regenerate this file with the color_codes_generator.py script\n\n")
file.write("#include <cstdint>\n")
file.write("#include <optional>\n")
file.write("namespace Invader {\n")
file.write(" std::optional<std::uint32_t> color_from_color_code(std::int16_t code) {\n")
file.write(" switch(code) {\n")
for code in codes:
file.write(" case \'{}\':\n".format(code[0]))
file.write(" return 0xFF{:06X};\n".format(code[1]))
file.write(" default: return std::nullopt;\n")
file.write(" }\n")
file.write(" }\n")
file.write("}\n")
|
py | 7dfe0aad08879bd8d150b833a319ee5bc0e0f02e | #!/opt/libreoffice5.2/program/python
# -*- coding: utf-8 -*-
import uno # オートメーションのときのみ必要。
from com.sun.star.awt.PosSize import POSSIZE
from com.sun.star.style.VerticalAlignment import BOTTOM
import unohelper
from com.sun.star.awt import XActionListener
from com.sun.star.util import XCloseListener
from com.sun.star.view import XSelectionChangeListener
from com.sun.star.awt.MessageBoxType import INFOBOX
from com.sun.star.awt.MessageBoxButtons import BUTTONS_OK
def macro():
ctx = XSCRIPTCONTEXT.getComponentContext() # コンポーネントコンテクストの取得。
smgr = ctx.getServiceManager() # サービスマネージャーの取得。
doc = XSCRIPTCONTEXT.getDocument() # マクロを起動した時のドキュメントのモデルを取得。
docframe = doc.getCurrentController().getFrame() # モデル→コントローラ→フレーム、でドキュメントのフレームを取得。
docwindow = docframe.getContainerWindow() # ドキュメントのウィンドウを取得。
toolkit = docwindow.getToolkit() # ツールキットを取得。
dialog, addControl = dialogCreator(ctx, smgr, {"PositionX": 150, "PositionY": 150, "Width": 200, "Height": 200, "Title": "Selection Change", "Name": "dialog", "Step": 0, "TabIndex": 0, "Moveable": True})
dialog.createPeer(toolkit, docwindow) # ダイアログを描画。
dialogwindow = dialog.getPeer() # ダイアログウィンドウを取得。
addControl("FixedText", {"PositionX": 10, "PositionY": 0, "Width": 180, "Height": 30, "Label": "~Selection", "VerticalAlign": BOTTOM})
addControl("Edit", {"PositionX": 10, "PositionY": 40, "Width": 180, "Height": 30}, {"setFocus": None})
addControl("Button", {"PositionX": 80, "PositionY": 130, "Width": 110, "Height": 35, "DefaultButton": True, "Label": "~Show Selection"}, {"setActionCommand": "Button1", "addActionListener": ButtonListener(dialog, docwindow)})
createFrame = frameCreator(ctx, smgr, docframe) # 親フレームを渡す。
selectionchangelistener = SelectionChangeListener(dialog)
docframe.getController().addSelectionChangeListener(selectionchangelistener)
frame = createFrame(dialog.Model.Name, dialogwindow) # 新しいフレーム名、そのコンテナウィンドウ。
removeListeners = listenersRemover(docframe, frame, selectionchangelistener)
closelistener = CloseListener(removeListeners)
docframe.addCloseListener(closelistener)
frame.addCloseListener(closelistener)
dialog.setVisible(True) # ダイアログを見えるようにする。
def listenersRemover(docframe, frame, selectionchangelistener):
def removeListeners(closelistener):
frame.removeCloseListener(closelistener)
docframe.removeCloseListener(closelistener)
docframe.getController().removeSelectionChangeListener(selectionchangelistener)
return removeListeners
class SelectionChangeListener(unohelper.Base, XSelectionChangeListener):
def __init__(self, dialog):
self.dialog = dialog
self.flag = True
def selectionChanged(self, eventobject):
if self.flag:
self.flag = False
selection = eventobject.Source.getSelection()
if selection.supportsService("com.sun.star.text.TextRanges"):
if len(selection)>0:
rng = selection[0]
txt = rng.getString()
self.dialog.getControl("Edit1").setText(txt)
self.flag = True
def disposing(self, eventobject):
pass
class CloseListener(unohelper.Base, XCloseListener):
def __init__(self, removeListeners):
self.removeListeners = removeListeners
def queryClosing(self, eventobject, getownership):
pass
def notifyClosing(self, eventobject):
self.removeListeners(self)
def disposing(self, eventobject):
pass
class ButtonListener(unohelper.Base, XActionListener): # ボタンリスナー
def __init__(self, dialog, parentwindow): # windowはメッセージボックス表示のため。
self.dialog = dialog # ダイアログを取得。
self.parentwindow = parentwindow # ダイアログウィンドウを取得。
def actionPerformed(self, actionevent):
cmd = actionevent.ActionCommand # アクションコマンドを取得。
edit = self.dialog.getControl("Edit1") # editという名前のコントロールを取得。
if cmd == "Button1": # 開くアクションコマンドがButton1のとき
toolkit = self.parentwindow.getToolkit() # ツールキットを取得。
msgbox = toolkit.createMessageBox(self.parentwindow, INFOBOX, BUTTONS_OK, "Text Field", "{}".format(edit.getText())) # self.windowを親ウィンドウにしてメッセージボックスを作成。
msgbox.execute() # メッセージボックスを表示。
msgbox.dispose() # メッセージボックスを破棄。
def disposing(self, eventobject):
pass
def frameCreator(ctx, smgr, parentframe): # 新しいフレームを追加する関数を返す。親フレームを渡す。
def createFrame(framename, containerwindow): # 新しいフレーム名、そのコンテナウィンドウにするウィンドウを渡す。
frame = smgr.createInstanceWithContext("com.sun.star.frame.Frame", ctx) # 新しく作成したウィンドウを入れるためのフレームを作成。
frame.initialize(containerwindow) # フレームにウィンドウを入れる。
frame.setName(framename) # フレーム名を設定。
parentframe.getFrames().append(frame) # 新しく作ったフレームを既存のフレームの階層に追加する。
return frame
return createFrame
def dialogCreator(ctx, smgr, dialogprops): # ダイアログと、それにコントロールを追加する関数を返す。まずダイアログモデルのプロパティを取得。
dialog = smgr.createInstanceWithContext("com.sun.star.awt.UnoControlDialog", ctx) # ダイアログの生成。
dialog.setPosSize(dialogprops.pop("PositionX"), dialogprops.pop("PositionY"), dialogprops.pop("Width"), dialogprops.pop("Height"), POSSIZE) # ダイアログモデルのプロパティで設定すると単位がMapAppになってしまうのでコントロールに設定。
dialogmodel = smgr.createInstanceWithContext("com.sun.star.awt.UnoControlDialogModel", ctx) # ダイアログモデルの生成。
dialogmodel.setPropertyValues(tuple(dialogprops.keys()), tuple(dialogprops.values())) # ダイアログモデルのプロパティを設定。
dialog.setModel(dialogmodel) # ダイアログにダイアログモデルを設定。
dialog.setVisible(False) # 描画中のものを表示しない。
def addControl(controltype, props, attrs=None): # props: コントロールモデルのプロパティ、attr: コントロールの属性。
control = smgr.createInstanceWithContext("com.sun.star.awt.UnoControl{}".format(controltype), ctx) # コントロールを生成。
control.setPosSize(props.pop("PositionX"), props.pop("PositionY"), props.pop("Width"), props.pop("Height"), POSSIZE) # ピクセルで指定するために位置座標と大きさだけコントロールで設定。
if not "Name" in props:
props["Name"] = _generateSequentialName(controltype) # Nameがpropsになければ通し番号名を生成。
controlmodel = dialogmodel.createInstance("com.sun.star.awt.UnoControl{}Model".format(controltype)) # コントロールモデルを生成。UnoControlDialogElementサービスのためにUnoControlDialogModelからの作成が必要。
values = props.values() # プロパティの値がタプルの時にsetProperties()でエラーが出るのでその対応が必要。
if any(map(isinstance, values, [tuple]*len(values))):
[setattr(controlmodel, key, val) for key, val in props.items()] # valはリストでもタプルでも対応可能。XMultiPropertySetのsetPropertyValues()では[]anyと判断されてタプルも使えない。
else:
controlmodel.setPropertyValues(tuple(props.keys()), tuple(values))
control.setModel(controlmodel) # コントロールにコントロールモデルを設定。
dialog.addControl(props["Name"], control) # コントロールをダイアログに追加。
if attrs is not None: # Dialogに追加したあとでないと各コントロールへの属性は追加できない。
control = dialog.getControl(props["Name"]) # ダイアログに追加された後のコントロールを取得。
for key, val in attrs.items(): # メソッドの引数がないときはvalをNoneにしている。
if val is None:
getattr(control, key)()
else:
getattr(control, key)(val)
def _generateSequentialName(controltype): # 連番名の作成。
i = 1
flg = True
while flg:
name = "{}{}".format(controltype, i)
flg = dialog.getControl(name) # 同名のコントロールの有無を判断。
i += 1
return name
return dialog, addControl # ダイアログとそのダイアログにコントロールを追加する関数を返す。
g_exportedScripts = macro, #マクロセレクターに限定表示させる関数をタプルで指定。
if __name__ == "__main__": # オートメーションで実行するとき
import officehelper
import traceback
from functools import wraps
import sys
from com.sun.star.beans import PropertyValue
from com.sun.star.script.provider import XScriptContext
def connectOffice(func): # funcの前後でOffice接続の処理
@wraps(func)
def wrapper(): # LibreOfficeをバックグラウンドで起動してコンポーネントテクストとサービスマネジャーを取得する。
try:
ctx = officehelper.bootstrap() # コンポーネントコンテクストの取得。
except:
print("Could not establish a connection with a running office.")
sys.exit()
print("Connected to a running office ...")
smgr = ctx.getServiceManager() # サービスマネジャーの取得。
print("Using {} {}".format(*_getLOVersion(ctx, smgr))) # LibreOfficeのバージョンを出力。
try:
return func(ctx, smgr) # 引数の関数の実行。
except:
traceback.print_exc()
def _getLOVersion(ctx, smgr): # LibreOfficeの名前とバージョンを返す。
cp = smgr.createInstanceWithContext('com.sun.star.configuration.ConfigurationProvider', ctx)
node = PropertyValue(Name = 'nodepath', Value = 'org.openoffice.Setup/Product' ) # share/registry/main.xcd内のノードパス。
ca = cp.createInstanceWithArguments('com.sun.star.configuration.ConfigurationAccess', (node,))
return ca.getPropertyValues(('ooName', 'ooSetupVersion')) # LibreOfficeの名前とバージョンをタプルで返す。
return wrapper
@connectOffice # mainの引数にctxとsmgrを渡すデコレータ。
def main(ctx, smgr): # XSCRIPTCONTEXTを生成。
class ScriptContext(unohelper.Base, XScriptContext):
def __init__(self, ctx):
self.ctx = ctx
def getComponentContext(self):
return self.ctx
def getDesktop(self):
return self.ctx.getServiceManager().createInstanceWithContext("com.sun.star.frame.Desktop", self.ctx)
def getDocument(self):
return self.getDesktop().getCurrentComponent()
return ScriptContext(ctx)
XSCRIPTCONTEXT = main() # XSCRIPTCONTEXTを取得。
doc = XSCRIPTCONTEXT.getDocument() # ドキュメントを取得。
if not hasattr(doc, "getCurrentController"): # ドキュメント以外のとき。スタート画面も除外。
XSCRIPTCONTEXT.getDesktop().loadComponentFromURL("private:factory/swriter", "_blank", 0, ()) # Writerのドキュメントを開く。
while doc is None: # ドキュメントのロード待ち。
doc = XSCRIPTCONTEXT.getDocument()
macro()
|
py | 7dfe0b20a7478c78e5f865b831c0bf0208cf7327 | import pytest
from pandas import DataFrame, IndexSlice, MultiIndex, date_range
import pandas._testing as tm
@pytest.fixture
def df():
# c1
# 2016-01-01 00:00:00 a 0
# b 1
# c 2
# 2016-01-01 12:00:00 a 3
# b 4
# c 5
# 2016-01-02 00:00:00 a 6
# b 7
# c 8
# 2016-01-02 12:00:00 a 9
# b 10
# c 11
# 2016-01-03 00:00:00 a 12
# b 13
# c 14
dr = date_range("2016-01-01", "2016-01-03", freq="12H")
abc = ["a", "b", "c"]
mi = MultiIndex.from_product([dr, abc])
frame = DataFrame({"c1": range(0, 15)}, index=mi)
return frame
def test_partial_string_matching_single_index(df):
# partial string matching on a single index
for df_swap in [df.swaplevel(), df.swaplevel(0), df.swaplevel(0, 1)]:
df_swap = df_swap.sort_index()
just_a = df_swap.loc["a"]
result = just_a.loc["2016-01-01"]
expected = df.loc[IndexSlice[:, "a"], :].iloc[0:2]
expected.index = expected.index.droplevel(1)
tm.assert_frame_equal(result, expected)
def test_partial_string_timestamp_multiindex(df):
# GH10331
df_swap = df.swaplevel(0, 1).sort_index()
SLC = IndexSlice
# indexing with IndexSlice
result = df.loc[SLC["2016-01-01":"2016-02-01", :], :]
expected = df
tm.assert_frame_equal(result, expected)
# match on secondary index
result = df_swap.loc[SLC[:, "2016-01-01":"2016-01-01"], :]
expected = df_swap.iloc[[0, 1, 5, 6, 10, 11]]
tm.assert_frame_equal(result, expected)
# partial string match on year only
result = df.loc["2016"]
expected = df
tm.assert_frame_equal(result, expected)
# partial string match on date
result = df.loc["2016-01-01"]
expected = df.iloc[0:6]
tm.assert_frame_equal(result, expected)
# partial string match on date and hour, from middle
result = df.loc["2016-01-02 12"]
expected = df.iloc[9:12]
tm.assert_frame_equal(result, expected)
# partial string match on secondary index
result = df_swap.loc[SLC[:, "2016-01-02"], :]
expected = df_swap.iloc[[2, 3, 7, 8, 12, 13]]
tm.assert_frame_equal(result, expected)
# tuple selector with partial string match on date
result = df.loc[("2016-01-01", "a"), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
# Slicing date on first level should break (of course)
with pytest.raises(KeyError, match="'2016-01-01'"):
df_swap.loc["2016-01-01"]
def test_partial_string_timestamp_multiindex_str_key_raises(df):
# Even though this syntax works on a single index, this is somewhat
# ambiguous and we don't want to extend this behavior forward to work
# in multi-indexes. This would amount to selecting a scalar from a
# column.
with pytest.raises(KeyError, match="'2016-01-01'"):
df["2016-01-01"]
def test_partial_string_timestamp_multiindex_daily_resolution(df):
# GH12685 (partial string with daily resolution or below)
result = df.loc[IndexSlice["2013-03":"2013-03", :], :]
expected = df.iloc[118:180]
tm.assert_frame_equal(result, expected)
|
py | 7dfe0d3d0769cf1c96e8d9971e0ca11590ce7531 | """
可迭代对象工具集
"""
class IterableHelper:
"""
可迭代对象助手类
"""
# 静态方法:不需要操作实例与类成员
# 语义:工具函数(常用且独立)
@staticmethod
def find_all(iterable, condition):
"""
在可迭代对象中,根据任意条件查找满足的所有元素
:param iterable:可迭代对象
:param condition:函数类型,查找条件
:return:生成器,推算满足条件的元素
"""
for item in iterable:
if condition(item):
yield item
@staticmethod
def find_single(iterable, condition):
"""
在可迭代对象中,根据任意条件查找满足的单个元素
:param iterable:可迭代对象
:param condition:函数类型,查找条件
:return:生成器,推算满足条件的元素
"""
for item in iterable:
if condition(item):
return item
@staticmethod
def select(iterable, condition):
"""
在可迭代对象中,根据逻辑处理元素
:param iterable: 可迭代对象
:param condition: 函数类型的处理逻辑
:return:生成器,推算处理结果
"""
for item in iterable:
yield condition(item)
@staticmethod
def get_count(iterable, condition):
"""
在可迭代对象中计算满足条件的元素数量
:param iterable:可迭代对象
:param condition:函数类型的条件
:return:数量
"""
count = 0
for item in iterable:
if condition(item):
count += 1
return count
@staticmethod
def sum(iterable, condition):
"""
在可迭代对象中根据条件累计运算
:param iterable:可迭代对象
:param condition:函数类型的条件
:return:累计结果
"""
sum_value = 0
for item in iterable:
sum_value += condition(item)
return sum_value
@staticmethod
def delete_all(iterable, condition):
"""
在可迭代对象中删除满足条件的元素
:param iterable: 可迭代对象
:param condition: 函数类型的条件
:return:删除数量
"""
count = 0
for i in range(len(iterable) - 1, -1, -1):
if condition(iterable[i]):
del iterable[i]
count += 1
return count
@staticmethod
def get_max(iterable, condition):
"""
根据条件在可迭代对象中获取最大元素
:param iterable:可迭代对象
:param condition:函数类型的条件
:return:最大元素
"""
max_value = iterable[0]
for i in range(1, len(iterable)):
if condition(max_value) < condition(iterable[i]):
max_value = iterable[i]
return max_value
@staticmethod
def sort(iterable, condition, reverse=False):
"""
根据条件对象可迭代对象进行降序排列
:param iterable: 可迭代对象
:param condition: 函数类型的条件
:param reverse: 是否反转
"""
for r in range(len(iterable) - 1):
for c in range(r + 1, len(iterable)):
if reverse: # 是反转 降序排列(大-->小)
if condition(iterable[r]) < condition(iterable[c]):
iterable[r], iterable[c] = iterable[c], iterable[r]
else:
if condition(iterable[r]) > condition(iterable[c]):
iterable[r], iterable[c] = iterable[c], iterable[r]
|
py | 7dfe0e3d5b83419051be162aad496b4056e0329a | """Asynchronous client for the PVOutput API."""
from .models import Status, System
from .pvoutput import (
PVOutput,
PVOutputAuthenticationError,
PVOutputConnectionError,
PVOutputError,
)
__all__ = [
"PVOutput",
"PVOutputAuthenticationError",
"PVOutputConnectionError",
"PVOutputError",
"Status",
"System",
]
|
py | 7dfe0eca4881a985cc30cfb08131ea8e40d649c9 | from gym.utils import EzPickle
import numpy as np
from magical import geom
from magical.base_env import BaseEnv, ez_init
import magical.entities as en
SMALL_POS_BOUND = 0.05
DEFAULT_ROBOT_POSE = ((0.058, 0.53), -2.13)
DEFAULT_GOAL_COLOUR = en.ShapeColour.BLUE
DEFAULT_GOAL_XYHW = (-0.62, -0.17, 0.76, 0.75)
class MoveToRegionEnv(BaseEnv, EzPickle):
"""Simple task where the robot merely has to move to a single coloured goal
region."""
@ez_init()
def __init__(self,
rand_poses_minor=False,
rand_poses_full=False,
rand_goal_colour=False,
**kwargs):
super().__init__(**kwargs)
assert not (rand_poses_minor and rand_poses_full), \
"cannot specify both 'rand_poses_minor' and 'rand_poses_full'"
self.rand_poses_minor = rand_poses_minor
self.rand_poses_full = rand_poses_full
self.rand_goal_colour = rand_goal_colour
def on_reset(self):
goal_xyhw = DEFAULT_GOAL_XYHW
if self.rand_poses_minor or self.rand_poses_full:
# randomise width and height of goal region
# (unfortunately this has to be done before pose randomisation b/c
# I don't have an easy way of changing size later)
if self.rand_poses_minor:
hw_bound = self.JITTER_TARGET_BOUND
else:
hw_bound = None
sampled_hw = geom.randomise_hw(self.RAND_GOAL_MIN_SIZE,
self.RAND_GOAL_MAX_SIZE,
self.rng,
current_hw=goal_xyhw[2:],
linf_bound=hw_bound)
goal_xyhw = (*goal_xyhw[:2], *sampled_hw)
# colour the goal region
if self.rand_goal_colour:
goal_colour = self.rng.choice(
np.asarray(en.SHAPE_COLOURS, dtype='object'))
else:
goal_colour = DEFAULT_GOAL_COLOUR
# place the goal region
assert len(goal_xyhw) == 4, goal_xyhw
goal = en.GoalRegion(*goal_xyhw, goal_colour)
self.add_entities([goal])
self.__goal_ref = goal
# now place the robot
default_robot_pos, default_robot_angle = DEFAULT_ROBOT_POSE
robot = self._make_robot(default_robot_pos, default_robot_angle)
self.add_entities([robot])
self.__robot_ent_index = en.EntityIndex([robot])
if self.rand_poses_minor or self.rand_poses_full:
if self.rand_poses_minor:
# limit amount by which position and rotation can be randomised
pos_limits = self.JITTER_POS_BOUND
rot_limits = [None, self.JITTER_ROT_BOUND]
else:
# no limits, can randomise as much as needed
assert self.rand_poses_full
pos_limits = rot_limits = None
geom.pm_randomise_all_poses(self._space,
(self.__goal_ref, self._robot),
self.ARENA_BOUNDS_LRBT,
rng=self.rng,
rand_pos=True,
rand_rot=(False, True),
rel_pos_linf_limits=pos_limits,
rel_rot_limits=rot_limits)
def score_on_end_of_traj(self):
# this one just has a lazy binary reward
dist, _ = self.__goal_ref.goal_shape.point_query(
self._robot.robot_body.position)
if dist <= 0:
reward = 1.0
else:
reward = 0.0
assert 0 <= reward <= 1
return reward
|
py | 7dfe0ef71264f77bcb9cf269ad9ae85ebe4feddf | #!/usr/bin/python
# GoogleMapDownloader.py
# Created by Hayden Eskriett [http://eskriett.com]
#
# A script which when given a longitude, latitude and zoom level downloads a
# high resolution google map
# Find the associated blog post at: http://blog.eskriett.com/2013/07/19/downloading-google-maps/
# https://gis.stackexchange.com/a/42423
# 20 - (40075017/4294967296.0) * (2 ** (24-20)) * np.cos(np.deg2rad(44.439457))
"""
decimal places decimal degrees N/S or E/W at equator
2 0.01 1.1132 km
3 0.001 111.32 m
4 0.0001 11.132 m
5 0.00001 1.1132 m
"""
import urllib
from PIL import Image
import os
import math
import numpy as np
import cv2
import pandas as pd
import utm
import matplotlib.pyplot as plt
class GoogleMapDownloader:
"""
A class which generates high resolution google maps images given
a longitude, latitude and zoom level
"""
def __init__(self, lat, lng, zoom=12):
"""
GoogleMapDownloader Constructor
Args:
lat: The latitude of the location required
lng: The longitude of the location required
zoom: The zoom level of the location required, ranges from 0 - 23
defaults to 12
"""
self._lat = lat
self._lng = lng
self._zoom = zoom
def getXY(self):
"""
Generates an X,Y tile coordinate based on the latitude, longitude
and zoom level
Returns: An X,Y tile coordinate
"""
tile_size = 256
# Use a left shift to get the power of 2
# i.e. a zoom level of 2 will have 2^2 = 4 tiles
numTiles = 1 << self._zoom
# Find the x_point given the longitude
point_x = (tile_size / 2 + self._lng * tile_size / 360.0) * numTiles // tile_size
# Convert the latitude to radians and take the sine
sin_y = math.sin(self._lat * (math.pi / 180.0))
# Calulate the y coorindate
point_y = ((tile_size / 2) + 0.5 * math.log((1 + sin_y) / (1 - sin_y)) * -(
tile_size / (2 * math.pi))) * numTiles // tile_size
return int(point_x), int(point_y)
def generateImage(self, **kwargs):
"""
Generates an image by stitching a number of google map tiles together.
Args:
start_x: The top-left x-tile coordinate
start_y: The top-left y-tile coordinate
tile_width: The number of tiles wide the image should be -
defaults to 5
tile_height: The number of tiles high the image should be -
defaults to 5
type: type of map "hybrid" or "standard"
Returns:
A high-resolution Goole Map image.
"""
start_x = kwargs.get('start_x', None)
start_y = kwargs.get('start_y', None)
tile_width = kwargs.get('tile_width', 5)
tile_height = kwargs.get('tile_height', 5)
type = kwargs.get('type', "standard")
# Check that we have x and y tile coordinates
if start_x == None or start_y == None:
start_x, start_y = self.getXY()
# Determine the size of the image
width, height = 256 * tile_width, 256 * tile_height
# Create a new image of the size require
map_img = Image.new('RGB', (width, height))
print (tile_width, tile_height)
for x in range(0, tile_width):
for y in range(0, tile_height):
if type == "hybrid":
url = 'https://mt0.google.com/vt/lyrs=y&hl=en&x=' + str(start_x + x) + '&y=' \
+ str(start_y + y) + '&z=' + str(self._zoom)
else:
url = 'https://mt0.google.com/vt?x='+str(start_x+x)+'&y='+str(start_y+y)+\
'&z='+str(self._zoom)
print (x, y, url)
current_tile = str(x) + '-' + str(y)
urllib.request.urlretrieve(url, current_tile)
im = Image.open(current_tile)
map_img.paste(im, (x * 256, y * 256))
os.remove(current_tile)
return map_img
def add_wgs84(df, latitude="latitude", longitude="longitude"):
df = df.assign(**{'easting': -1., 'northing': -1., "zone_no": -1., "zone_letter": ""})
for idx, row in df.iterrows():
easting, northing, zone_no, zone_letter = utm.from_latlon(row[latitude],
row[longitude])
df.at[idx, "easting"] = easting
df.at[idx, "northing"] = northing
df.at[idx, "zone_no"] = zone_no
df.at[idx, "zone_letter"] = zone_letter
return df
class ImageWgsHandler:
def __init__(self, map_path):
self.map_image = map_image = cv2.imread(map_path)
self.img_rows, self.img_cols, _ = map_image.shape
# Load reference points
base = os.path.splitext(map_path)[0]
self.reference_points = reference_points = pd.read_csv(f"{base}.csv")
self.density = None
if os.path.isfile(f"{base}.density"):
with open(f"{base}.density", "r") as f:
self.density = float(f.read().strip())
if self.density:
print(f"Map density: {self.density} m /pixel")
reference_points = add_wgs84(reference_points)
# # # -- Check conversion
# img = plt.imread(map_path)
# eastings, northings = reference_points.easting.values, reference_points.northing.values
# rows = row_f.predict(np.column_stack([eastings, northings]))
# cols = col_f.predict(np.column_stack([eastings, northings]))
#
# # rows = reference_points.pixel_row
# # cols = reference_points.pixel_column
#
# fig = plt.figure()
# right, top = img_cols, img_rows
# plt.imshow(img, extent=[0, right, 0, top])
# plt.scatter(cols, top-rows, s=1.5, c="r")
# plt.axes().set_aspect('equal')
(row_f, col_f), (easting_f, northing_f) = self.get_conversion_functions(reference_points)
self.row_f, self.col_f = row_f, col_f
self.easting_f, self.northing_f = easting_f, northing_f
self.reference_points = reference_points
@staticmethod
def get_conversion_functions(reference_points):
# -- Function conversion from WGS to pixel
x = reference_points.easting.values
y = reference_points.northing.values
from sklearn import linear_model
# from sklearn import svm
#
# classifiers = [
# svm.SVR(),
# linear_model.SGDRegressor(),
# linear_model.BayesianRidge(),
# linear_model.LassoLars(),
# linear_model.ARDRegression(),
# linear_model.PassiveAggressiveRegressor(),
# linear_model.TheilSenRegressor(max_iter=1000, tol=1.e-7),
# linear_model.LinearRegression(normalize=True)]
#
# z = reference_points.pixel_row.values
# for item in classifiers:
# print(item)
# clf = item
# clf.fit(np.column_stack([x, y]), z)
# print(np.abs(clf.predict(np.column_stack([x, y])) - z).sum(), '\n')
#
# z = reference_points.pixel_column.values
# for item in classifiers:
# print(item)
# clf = item
# clf.fit(np.column_stack([x, y]), z)
# print(np.abs(clf.predict(np.column_stack([x, y])) - z).sum(), '\n')
z = reference_points.pixel_row.values
row_f = linear_model.LinearRegression()
row_f.fit(np.column_stack([x, y]), z)
z = reference_points.pixel_column.values
col_f = linear_model.LinearRegression()
col_f.fit(np.column_stack([x, y]), z)
# -- Function conversion from Pixels to wgs
x = reference_points.pixel_row.values
y = reference_points.pixel_column.values
z = reference_points.easting.values
easting_f = linear_model.LinearRegression()
easting_f.fit(np.column_stack([x, y]), z)
z = reference_points.northing.values
northing_f = linear_model.LinearRegression()
northing_f.fit(np.column_stack([x, y]), z)
return (row_f, col_f), (easting_f, northing_f)
def plot_wgs_coord(self, eastings, northings, padding=100, ax=None, show_image=True, c="r", convert_method=0):
import time
if len(eastings) <= 0:
return
st = time.time()
max_cols, max_rows = self.img_cols, self.img_rows
img = self.map_image
rows, cols = self.get_image_coord(eastings, northings, convert_method=convert_method)
min_rows, max_rows = int(np.clip(rows.min() - padding, 0, max_rows)), \
int(np.clip(rows.max() + padding, 0, max_rows))
min_cols, max_cols = int(np.clip(cols.min() - padding, 0, max_cols)), \
int(np.clip(cols.max() + padding, 0, max_cols))
img_show = cv2.cvtColor(img[min_rows: max_rows, min_cols: max_cols], cv2.COLOR_BGR2RGB)
fig = None
if ax is None:
fig, ax = plt.subplots()
if show_image:
ax.imshow(img_show, extent=[min_cols, max_cols, max_rows, min_rows], aspect="equal")
ax.scatter(cols, rows, s=1.5, c=c)
return fig, ax
def get_image_coord(self, eastings, northings, convert_method=1):
if self.density is not None and convert_method == 0:
density = self.density
ref_points = self.reference_points
a = np.column_stack([eastings, northings])
b = ref_points[["easting", "northing"]].values
dist = np.linalg.norm(a[:, np.newaxis] - b, axis=2)
ref = ref_points.iloc[dist.argmin(axis=1)]
cols = (ref.pixel_column + (eastings - ref.easting)/density).values
rows = (ref.pixel_row - (northings - ref.northing)/density).values
else:
row_f, col_f = self.row_f, self.col_f
rows = row_f.predict(np.column_stack([eastings, northings]))
cols = col_f.predict(np.column_stack([eastings, northings]))
return rows, cols
def get_wgs_coord(self, rows, cols):
easting_f, northing_f = self.easting_f, self.northing_f
easting = easting_f.predict(np.column_stack([rows, cols]))
northing = northing_f.predict(np.column_stack([rows, cols]))
return easting, northing
def validate_reference_points():
base = "util_data/high_res_full_UPB_hybrid"
map_image = cv2.imread(f"{base}.jpg")
with open(f"{base}.density", "r") as f:
density = float(f.read().strip())
start_lat, start_long = 44.444122, 26.042366
start_easting, start_northing, zone_no, zone_letter = utm.from_latlon(start_lat, start_long)
reference_points = pd.read_csv(f"{base}.csv")
reference_points = add_wgs84(reference_points)
img = plt.imread(f"{base}.jpg")
img_rows, img_cols, _ = map_image.shape
for idx, row in reference_points.iterrows():
if idx < 25:
continue
print(f"IDX: {idx}")
px_row = row.pixel_row
px_col = row.pixel_column
# PLOT pixel
fig = plt.figure()
right, top = img_cols, img_rows
plt.imshow(img, extent=[0, right, 0, top])
plt.scatter(px_col, top-px_row, s=4.5, c="r")
plt.axes().set_aspect('equal')
# PLOT gps
x = (row.easting - start_easting)/density
y = (start_northing - row.northing)/density
# fig = plt.figure()
# right, top = img_cols, img_rows
# plt.imshow(img, extent=[0, right, 0, top])
plt.scatter(x, top-y, s=3.5, c="b")
plt.axes().set_aspect('equal')
plt.waitforbuttonpress()
plt.close("all")
def main():
import utm
# Create a new instance of GoogleMap Downloader
lat = 44.444122
long = 26.042366
scale = 20
gmd = GoogleMapDownloader(lat, long, scale)
print("The tile coorindates are {}".format(gmd.getXY()))
exit(0)
try:
# Get the high resolution image
img = gmd.generateImage(tile_width=38, tile_height=41, type="hybrid")
except IOError:
print(
"Could not generate the image - try adjusting the zoom level and checking your coordinates")
else:
# Save the image to disk
img.save("/media/andrei/CE04D7C504D7AF291/nemodrive/data_collect/high_resolution_image_full"
"_full3.png")
print("The map has successfully been created")
exit(0)
# calculate pixel size
equator_zoom_24 = 0.009330692
scale_size = equator_zoom_24 * (2 ** (24-scale))
pixel_size = np.cos(lat) * scale_size
# WGS84 conversion from lat_lon GPS
easting, northing, zone_no, zone_letter = utm.from_latlon(lat, long)
easting += 2125 * pixel_size
northing += 8853 * pixel_size
new_lat, new_long = utm.to_latlon(easting, northing, zone_no, zone_letter)
if __name__ == "__main__":
main()
|
py | 7dfe101e1cfcdb3e95ad121ad22b542b0772f2ca | import os
import subprocess
import time
from leapp import reporting
from leapp.libraries.stdlib import api
ONE_MONTH = 2592000 # Number of seconds in one month
def is_executable(path):
"""
Checks if path exists, if it is file and if is executable.
"""
return os.path.exists(path) and os.path.isfile(path) and os.access(path, os.X_OK)
def get_xsession(path):
"""
Gets current XSession.
If there is more than one definition of xsession for some reason,
function returns last definition. Return empty string if no XSession found in file given
by path (any reason including bad path etc.)
"""
default_xsession = ""
if not isinstance(path, str):
return default_xsession
if not (os.path.exists(path) and os.path.isfile(path)): # Bad path - in container for example
return default_xsession
with open(path, "r") as f:
for line in f.readlines():
if "xsession" in line.lower():
default_xsession = line.split("=")[1].lower()
return default_xsession
def check_app_in_use(app):
"""
Method return True if application was used in last month, False in other cases.
"""
path = "{0}/.kde/share/config/{1}rc".format(os.environ.get("HOME"), app)
if os.path.isfile(path):
last_modified = os.stat(path).st_mtime
# Application is considered actively used, if it has been used in last month.
return last_modified >= int(time.time() - ONE_MONTH)
return False
def is_installed(app):
"""
Wrapper for "rpm -q <app>" command
Return value: True if application is found,
False in other cases.
Output of rpm command is not supressed.
"""
return True if subprocess.call(["rpm", "-q", app]) == 0 else False
def check_kde_gnome():
apps_in_use = 0
api.current_logger().info(" Detecting desktop environments ")
api.current_logger().info("==================================")
# Detect installed desktops by their startup files
kde_desktop_installed = is_executable("/usr/bin/startkde")
gnome_desktop_installed = is_executable("/usr/bin/gnome-session")
api.current_logger().info("* KDE installed: {0}".format(kde_desktop_installed))
api.current_logger().info("* Gnome installed: {0}".format(gnome_desktop_installed))
api.current_logger().info("----------------------------------")
# No desktop installed, we don't even care about apps as they are most likely not used or even installed
if not kde_desktop_installed and not gnome_desktop_installed:
api.current_logger().info("No desktop installed. Continuing with the upgrade.")
return
if kde_desktop_installed:
api.current_logger().info("KDE desktop is installed. Checking what we can do about it.")
if not gnome_desktop_installed:
api.current_logger().error("Cannot perform the upgrade because there is"
" no other desktop than KDE installed.")
# We cannot continue with the upgrade process
reporting.create_report([
reporting.Title("Cannot upgrade because there is no other desktop than KDE installed."),
reporting.Summary("With only KDE installed, there would be no other desktop after upgrade."),
reporting.Severity(reporting.Severity.HIGH),
reporting.Tags([
reporting.Tags.UPGRADE_PROCESS
]),
reporting.Flags([
reporting.Flags.INHIBITOR
]),
reporting.Remediation(
hint="Install GNOME desktop to be able to upgrade.")
])
return
# Assume GNOME is installed in this state
user = os.environ.get("USER")
default_xsession = get_xsession("/var/lib/AccountsService/users/{0}".format(user))
if not default_xsession:
api.current_logger().warn("Unable to detect default session.")
else:
if "plasma" in default_xsession: # using in because there can be some white spaces
api.current_logger().info("KDE used as default session.")
api.current_logger().info("Upgrade can be performed, but KDE desktop will"
" be removed in favor of GNOME")
reporting.create_report([
reporting.Title("Upgrade can be performed, but KDE will be uninstalled."),
reporting.Summary("KDE has to be uninstalled in favor of GNOME to perform the upgrade."),
reporting.Severity(reporting.Severity.MEDIUM),
reporting.Tags([
reporting.Tags.UPGRADE_PROCESS
])
else:
api.current_logger().info("GNOME used as default session. Continuing with the upgrade.")
api.current_logger().info("----------------------------------")
# At this state we can assume that KDE desktop as such is not installed or used and we just need to
# detect whether any KDE/Qt app is actively used to inform user that the application will be removed
# during the upgrade process
base_kde_apps = ("kde-baseapps",
"okular",
"ark",
"kdepim",
"konsole",
"gwenview",
"kdenetwork",
"kate", "kwrite")
api.current_logger().info(" Detecting installed KDE apps ")
api.current_logger().info("================================")
for app in base_kde_apps:
if is_installed(app):
if check_app_in_use(app):
api.current_logger().info("Application {0} is actively used".format(app))
apps_in_use += 1
api.current_logger().info("* {0} {1} installed.".format(app, "is" if is_installed(app) else "is not"))
api.current_logger().info("----------------------------------")
if apps_in_use > 0:
api.current_logger().info("KDE apps in use detected.")
reporting.create_report([
reporting.Title("Upgrade can be performed, but KDE apps will be uninstalled."),
reporting.Summary("KDE apps will be removed to perform the upgrade."),
reporting.Severity(reporting.Severity.MEDIUM),
reporting.Tags([
reporting.Tags.UPGRADE_PROCESS
]),
reporting.Remediation(
hint="KDE apps has to be removed, no other solution is possible.")
])
# upgrade can be performed, but user will loose KDE desktop in favor of GNOME desktop
else:
api.current_logger().info("No KDE app in use detected.")
# upgrade can be performed
|
py | 7dfe1043a50406cfc7d9b0f4e8c17878d5067bc6 | # module pyparsing.py
#
# Copyright (c) 2003-2013 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"})::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString( hello ))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "2.0.2"
__versionTime__ = "13 April 2014 12:10"
__author__ = "Paul McGuire <[email protected]>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr',
]
PY_3 = sys.version.startswith('3')
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
alphas = string.ascii_lowercase + string.ascii_uppercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return "loc msg pstr parserElement lineno col line " \
"markInputline __str__ __repr__".split()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like C{L{ParseFatalException}}, but thrown internally when an
C{L{ErrorStop<And._ErrorStop>}} ('-' operator) indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(
pe.pstr, pe.loc, pe.msg, pe.parserElement)
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by C{validate()} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>})
"""
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return len( self.__toklist ) > 0
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def iterkeys( self ):
"""Returns all named result keys."""
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def itervalues( self ):
"""Returns all named result values."""
return (self[k] for k in self.iterkeys())
def iteritems( self ):
return ((k, self[k]) for k in self.iterkeys())
if PY_3:
keys = iterkeys
values = itervalues
items = iteritems
else:
def keys( self ):
"""Returns all named result keys."""
return list(self.iterkeys())
def values( self ):
"""Returns all named result values."""
return list(self.itervalues())
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return list(self.iteritems())
def haskeys( self ):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop( self, *args, **kwargs):
"""Removes and returns item at specified index (default=last).
Supports both list and dict semantics for pop(). If passed no
argument or an integer argument, it will use list semantics
and pop tokens from the list of parsed tokens. If passed a
non-integer argument (most likely a string), it will use dict
semantics and pop the corresponding value from any defined
results names. A second default return value argument is
supported, just as in dict.pop()."""
if not args:
args = [-1]
if 'default' in kwargs:
args.append(kwargs['default'])
if (isinstance(args[0], int) or
len(args) == 1 or
args[0] in self):
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""Inserts new element at location index in the list of parsed tokens."""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def append( self, item ):
"""Add single element to end of ParseResults list of elements."""
self.__toklist.append(item)
def extend( self, itemseq ):
"""Add sequence of elements to end of ParseResults list of elements."""
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq)
def clear( self ):
"""Clear all elements and results names."""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__( self, name ):
try:
return self[name]
except KeyError:
return ""
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
return self.copy()
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = []
for i in self.__toklist:
if isinstance(i, ParseResults):
out.append(_ustr(i))
else:
out.append(repr(i))
return '[' + ', '.join(out) + ']'
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
if PY_3:
return dict( self.items() )
else:
return dict( self.iteritems() )
def copy( self ):
"""Returns a new copy of a C{ParseResults} object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+_ustr(self.asList()) )
items = sorted(self.items())
for k,v in items:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.haskeys():
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
return "".join(out)
def pprint(self, *args, **kwargs):
"""Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __dir__(self):
return dir(super(ParseResults,self)) + list(self.keys())
collections.MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
#~ if func in singleArgBuiltins:
#~ return lambda s,l,t: func(t)
#~ limit = 0
#~ foundArity = False
#~ def wrapper(*args):
#~ nonlocal limit,foundArity
#~ while 1:
#~ try:
#~ ret = func(*args[limit:])
#~ foundArity = True
#~ return ret
#~ except TypeError:
#~ if limit == maxargs or foundArity:
#~ raise
#~ limit += 1
#~ continue
#~ return wrapper
# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s,l,t: func(t)
limit = [0]
foundArity = [False]
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
if limit[0] <= maxargs and not foundArity[0]:
limit[0] += 1
continue
raise
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
"""
ParserElement.literalStringClass = cls
inlineLiteralsUsing = staticmethod(inlineLiteralsUsing)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{L{ParseFatalException}}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException as err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException as err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value, Exception):
raise value
return (value[0],value[1].copy())
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException as pe:
pe.__traceback__ = None
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring, parseAll=False ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString( self, instring ):
"""Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""Another extension to C{L{scanString}}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __add__(self, other ):
"""Implementation of + operator - returns C{L{And}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""Implementation of - operator, returns C{L{And}} with error stop"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""Implementation of - operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + L{ZeroOrMore}(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns C{L{MatchFirst}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns C{L{Or}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns C{L{Each}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns C{L{NotAny}}"""
return NotAny( self )
def __call__(self, name=None):
"""Shortcut for C{L{setResultsName}}, with C{listAllMatches=default}::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
If C{name} is omitted, same as calling C{L{copy}}.
"""
if name is not None:
return self.setResultsName(name)
else:
return self.copy()
def suppress( self ):
"""Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other.copy() )
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "r")
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or self.__dict__ == other.__dict__
elif isinstance(other, basestring):
try:
self.parseString(_ustr(other), parseAll=True)
return True
except ParseBaseException:
return False
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
class Token(ParserElement):
"""Abstract C{ParserElement} subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement.literalStringClass = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{L{Literal}}::
Literal("if") will match the leading C{'if'} in C{'ifAndOnlyIf'}.
Keyword("if") will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive
matching, default is C{False}.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{exclude} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
initChars = ''.join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
if not(instring[ loc ] in self.initChars):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
charset = ''.join(set(self.quoteChar[0]+self.endQuoteChar[0])).replace('^',r'\^').replace('-',r'\-')
self.escCharReplacePattern = re.escape(self.escChar)+("([%s])" % charset)
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{L{Word}} class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
#~ self.leaveWhitespace()
self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected start of line"
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, _generatorType ):
exprs = list(exprs)
if isinstance( exprs, basestring ):
self.exprs = [ Literal( exprs ) ]
elif isinstance( exprs, collections.Sequence ):
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(Literal, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop,self).__init__(*args, **kwargs)
self.name = '-'
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException(pe)
except IndexError:
raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
maxException = None
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and e not in opt1 ]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults:
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. C{FollowedBy}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. C{NotAny}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, expr, default=_optionalNotMatched ):
super(Optional,self).__init__( expr, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If C{include} is set to true, the matched expression is also parsed (the skipped text
and matched expression are returned as a 2-element list). The C{ignore}
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if failOn is not None and isinstance(failOn, basestring):
self.failOn = Literal(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
failParse = False
while loc <= instrlen:
try:
if self.failOn:
try:
self.failOn.tryParse(instring, loc)
except ParseBaseException:
pass
else:
failParse = True
raise ParseException(instring, loc, "Found expression " + str(self.failOn))
failParse = False
if self.ignoreExpr is not None:
while 1:
try:
loc = self.ignoreExpr.tryParse(instring,loc)
# print("found ignoreExpr, advance to", loc)
except ParseBaseException:
break
expr._parse( instring, loc, doActions=False, callPreParse=False )
skipText = instring[startLoc:loc]
if self.includeMatch:
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ skipText ]
except (ParseException,IndexError):
if failParse:
raise
else:
loc += 1
raise ParseException(instring, loc, self.errmsg, self)
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret <<= self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of C{ParseExpression}, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return list(map( str.upper, tokenlist ))
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and retToks.haskeys():
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, expr ):
super(Dict,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr )
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And( [ Literal(tt) for tt in tflat ] )
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will *not* match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
if isinstance(strs,basestring):
symbols = strs.split()
elif isinstance(strs, collections.Sequence):
symbols = list(strs[:])
elif isinstance(strs, _generatorType):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) )
else:
return Regex( "|".join(re.escape(sym) for sym in symbols) )
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. Simpler to use than the parse action C{L{keepOriginalText}}, and does not
require the inspect module to chase up the call stack. By default, returns a
string containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values."""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
del t[:]
t.insert(0, s[t._original_start:t._original_end])
del t["_original_start"]
del t["_original_end"]
matchExpr.setParseAction(extractText)
return matchExpr
def ungroup(expr):
"""Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty."""
return TokenConverter(expr).setParseAction(lambda t:t[0])
def locatedExpr(expr):
"""Helper to decorate a returned token with its starting and ending locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains C{<TAB>} characters, you may want to call
C{L{ParserElement.parseWithTabs}}
"""
locator = Empty().setParseAction(lambda s,l,t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\x' (\x21, which is a '!' character)
(\0x## is also supported for backwards compatibility)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
_expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{L{transformString<ParserElement.transformString>}()}.
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def keepOriginalText(s,startLoc,t):
"""DEPRECATED - use new helper method C{L{originalTextFor}}.
Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
try:
endloc = getTokensEndLoc()
except ParseException:
raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action")
del t[:]
t += ParseResults(s[startLoc:endloc])
return t
def getTokensEndLoc():
"""Method to be called from within a parse action to determine the end
location of the parsed tokens."""
import inspect
fstack = inspect.stack()
try:
# search up the stack (through intervening argument normalizers) for correct calling routine
for f in fstack[2:]:
if f[3] == "_parseNoCache":
endloc = f[0].f_locals["loc"]
return endloc
else:
raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action")
finally:
del fstack
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
- lpar - expression for matching left-parentheses (default=Suppress('('))
- rpar - expression for matching right-parentheses (default=Suppress(')'))
"""
ret = Forward()
lastExpr = baseExpr | ( lpar + ret + rpar )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward()#.setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr <<= ( matchExpr | lastExpr )
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy())
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=True)
A valid block must contain at least one C{blockStatement}.
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = Empty() + Empty().setParseAction(checkSubIndent)
PEER = Empty().setParseAction(checkPeerIndent)
UNDENT = Empty().setParseAction(checkUnindent)
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))
commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline()
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "'))
replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
def test( teststring ):
try:
tokens = simpleSQL.parseString( teststring )
tokenlist = tokens.asList()
print (teststring + "->" + str(tokenlist))
print ("tokens = " + str(tokens))
print ("tokens.columns = " + str(tokens.columns))
print ("tokens.tables = " + str(tokens.tables))
print (tokens.asXML("SQL",True))
except ParseBaseException as err:
print (teststring + "->")
print (err.line)
print (" "*(err.column-1) + "^")
print (err)
print()
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) )#.setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select AA,BB,CC from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select ^^^ frox Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
|
py | 7dfe10ad6a8f3273e4cc0f01e412de26588edf4f | from fabric.api import env
#Management ip addresses of hosts in the cluster
host1 = '[email protected]'
host2 = '[email protected]'
host3 = '[email protected]'
#External routers if any
#for eg.
#ext_routers = [('mx1', '10.204.216.253')]
ext_routers = []
#Autonomous system number
router_asn = 64512
#Host from which the fab commands are triggered to install and provision
host_build = '[email protected]'
#Role definition of the hosts.
env.roledefs = {
'all': [host1, host2, host3],
'cfgm': [host1],
'openstack': [host2],
'control': [host1],
'compute': [host3],
'collector': [host1],
'webui': [host1],
'database': [host1],
'build': [host_build],
'storage-master': [host1],
'storage-compute': [host1],
}
#Openstack admin password
env.openstack_admin_password = 'c0ntrail123'
#Hostnames
env.hostnames = {
'all': ['a3s40', 'a3s39', 'a3s43']
}
env.password = 'c0ntrail123'
#Passwords of each host
env.passwords = {
host1: 'c0ntrail123',
host2: 'c0ntrail123',
host3: 'c0ntrail123',
host_build: 'contrail123'
}
#For reimage purpose
env.ostypes = {
host1: 'redhat',
host2: 'redhat',
host3: 'redhat',
}
#OPTIONAL ANALYTICS CONFIGURATION
#================================
# database_dir is the directory where cassandra data is stored
#
# If it is not passed, we will use cassandra's default
# /var/lib/cassandra/data
#
#database_dir = '<separate-partition>/cassandra'
#
# analytics_data_dir is the directory where cassandra data for analytics
# is stored. This is used to seperate cassandra's main data storage [internal
# use and config data] with analytics data. That way critical cassandra's
# system data and config data are not overrun by analytis data
#
# If it is not passed, we will use cassandra's default
# /var/lib/cassandra/data
#
#analytics_data_dir = '<separate-partition>/analytics_data'
#
# ssd_data_dir is the directory where cassandra can store fast retrievable
# temporary files (commit_logs). Giving cassandra an ssd disk for this
# purpose improves cassandra performance
#
# If it is not passed, we will use cassandra's default
# /var/lib/cassandra/commit_logs
#
#ssd_data_dir = '<seperate-partition>/commit_logs_data'
#OPTIONAL BONDING CONFIGURATION
#==============================
#Inferface Bonding
#bond= {
# host1 : { 'name': 'bond0', 'member': ['p2p0p0','p2p0p1','p2p0p2','p2p0p3'], 'mode': '802.3ad', 'xmit_hash_policy': 'layer3+4' },
#}
#OPTIONAL SEPARATION OF MANAGEMENT AND CONTROL + DATA and OPTIONAL VLAN INFORMATION
#==================================================================================
#control_data = {
# host1 : { 'ip': '192.168.10.1/24', 'gw' : '192.168.10.254', 'device': 'bond0', 'vlan': '224' },
#}
#OPTIONAL STATIC ROUTE CONFIGURATION
#===================================
#static_route = {
# host1 : [{ 'ip': '10.1.1.0', 'netmask' : '255.255.255.0', 'gw':'192.168.10.254', 'intf': 'bond0' },
# { 'ip': '10.1.2.0', 'netmask' : '255.255.255.0', 'gw':'192.168.10.254', 'intf': 'bond0' }],
#}
#storage compute disk config
#storage_node_config = {
# host1 : { 'disks' : ['sdc', 'sdd'] },
#}
#live migration config
#live_migration = True
#To disable installing contrail interface rename package
env.interface_rename = False
#In environments where keystone is deployed outside of Contrail provisioning
#scripts , you can use the below options
#
# Note :
# "insecure" is applicable only when protocol is https
# The entries in env.keystone overrides the below options which used
# to be supported earlier :
# service_token
# keystone_ip
# keystone_admin_user
# keystone_admin_password
# region_name
#
env.keystone = {
'keystone_ip' : '10.84.12.15',
'auth_protocol' : 'http', #Default is http
'auth_port' : '35357', #Default is 35357
'admin_token' : '1232323223',
'admin_user' : 'admin', #Default is admin
'admin_password': 'c0ntrail123', #Default is contrail123
'service_tenant': 'service', #Default is service
'admin_tenant' : 'admin', #Default is admin
'region_name' : 'RegionOne', #Default is RegionOne
'insecure' : 'True', #Default = False
}
#
# In environments where openstack services are deployed independently
# from contrail, you can use the below options
# service_token : Common service token for for all services like nova,
# neutron, glance, cinder etc
# amqp_host : IP of AMQP Server to be used in openstack
#
# old
# 'service_token' : '15ee68dbae3b4416a7fda3400e0a6683',
env.openstack = {
'service_token' : 'a55e1eb7680d4d4eb092698480ab31f7',
'amqp_host' : '10.84.12.15',
}
# Neutron specific configuration
#env.neutron = {
# 'protocol': 'http', # Default is http
#}
#To enable multi-tenancy feature
#multi_tenancy = True
#To enable haproxy feature
#haproxy = True
#To Enable prallel execution of task in multiple nodes
#do_parallel = True
# To configure the encapsulation priority. Default: MPLSoGRE
#env.encap_priority = "'MPLSoUDP','MPLSoGRE','VXLAN'"
# Optional proxy settings.
# env.http_proxy = os.environ.get('http_proxy')
env.test_repo_dir="/home/stack/redhat_sanity/multi_node/contrail-test"
env.mail_from='[email protected]'
env.mail_to='[email protected]'
multi_tenancy=True
env.encap_priority="'MPLSoUDP','MPLSoGRE','VXLAN'"
env.mail_server='10.84.24.64'
env.mail_port='4000'
env.log_scenario='Redhat70_Two_Node_Sanity_[CONTRAIL_ALL_ROLES_PLUS_RDO]'
|
py | 7dfe11c756560a615406f7669294b717958a5a58 | """
More fully featured dataset readers and iterators for multitask training
then allennlp.
Differences:
- randomly sample batches from each dataset according to dataset size,
so gradient steps are spread out over each throught the course
of training.
- allows to use any generic iterators for each dataset
- allows to remove some datasets for vocab creation
Implementation in allennlp:
Interface for dataset and iterator in allennlp trainer:
train_generator = self._iterator(self._train_data,
num_epochs=1,
shuffle=shuffle)
num_training_batches = self._iterator.get_num_batches(self._train_data)
Interface for dataset and iterator in train command:
instances_for_vocab = []
for instance in dataset:
instances_for_vocab.append(instance)
--> then pass into Vocabulary.from_params(...)
So dataset needs to implement __iter__, except it is only called
to construct the Vocabulary, if we also pair this dataset with a
special iterator that doesn't call __iter__.
"""
from typing import Dict, List, Iterable
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data import Instance
from allennlp.data.iterators import DataIterator
from allennlp.data import Vocabulary
import numpy as np
import torch
class MultitaskDataset:
def __init__(self, datasets: Dict[str, Iterable[Instance]],
datasets_for_vocab_creation: List[str]):
self.datasets = datasets
self.datasets_for_vocab_creation = datasets_for_vocab_creation
def __iter__(self):
# with our iterator, this is only called for vocab creation
for key in self.datasets_for_vocab_creation:
for instance in self.datasets[key]:
yield instance
@DatasetReader.register("multitask_reader")
class MultitaskDatasetReader(DatasetReader):
def __init__(self,
dataset_readers: Dict[str, DatasetReader],
datasets_for_vocab_creation: List[str]) -> None:
super().__init__(False)
self.dataset_readers = dataset_readers
self.datasets_for_vocab_creation = datasets_for_vocab_creation
def read(self, file_path: Dict[str, str]):
"""
read returns an iterable of instances that is directly
iterated over when constructing vocab, and in the iterators.
Since we will also pair this reader with a special iterator,
we only have to worry about the case where the return value from
this call is used to iterate for vocab creation.
In addition, it is the return value from this that is passed
into Trainer as the dataset (and then into the iterator)
"""
datasets = {key: self.dataset_readers[key].read(fpath)
for key, fpath in file_path.items()}
return MultitaskDataset(datasets, self.datasets_for_vocab_creation)
@DataIterator.register("multitask_iterator")
class MultiTaskDataIterator(DataIterator):
def __init__(self,
iterators: Dict[str, DataIterator],
names_to_index: List[str],
iterate_forever: bool = False,
sampling_rates: List[float] = None) -> None:
self.iterators = iterators
self.names_to_index = names_to_index
self.sampling_rates = sampling_rates
self.iterate_forever = iterate_forever
def __call__(self,
multitask_dataset: MultitaskDataset,
num_epochs: int = None,
shuffle: bool = True):
# get the number of batches in each of the sub-iterators for
# the sampling rate
num_batches_per_iterator = []
for name in self.names_to_index:
dataset = multitask_dataset.datasets[name]
num_batches_per_iterator.append(
self.iterators[name].get_num_batches(dataset)
)
total_batches_per_epoch = sum(num_batches_per_iterator)
# make the sampling rates --
p = np.array(num_batches_per_iterator, dtype=np.float) \
/ total_batches_per_epoch
if self.iterate_forever:
total_batches_per_epoch = 1000000000
if self.sampling_rates is not None:
p = np.array(self.sampling_rates, dtype=np.float)
for epoch in range(num_epochs):
generators = []
for name in self.names_to_index:
dataset = multitask_dataset.datasets[name]
generators.append(
self.iterators[name](
dataset,
num_epochs=1,
shuffle=shuffle,
)
)
n_batches_this_epoch = 0
all_indices = np.arange(len(generators)).tolist()
while n_batches_this_epoch < total_batches_per_epoch:
index = np.random.choice(len(generators), p=p)
try:
batch = next(generators[index])
except StopIteration:
# remove this generator from the pile!
del generators[index]
if len(generators) == 0:
# something went wrong
raise ValueError
del all_indices[index]
newp = np.concatenate([p[:index], p[index+1:]])
newp /= newp.sum()
p = newp
continue
# add the iterator id
batch['dataset_index'] = torch.tensor(all_indices[index])
yield batch
n_batches_this_epoch += 1
def _take_instances(self, *args, **kwargs):
raise NotImplementedError
def _memory_sized_lists(self, *args, **kwargs):
raise NotImplementedError
def _ensure_batch_is_sufficiently_small(self, *args, **kwargs):
raise NotImplementedError
def get_num_batches(self, multitask_dataset: MultitaskDataset) -> int:
num_batches = 0
for name, dataset in multitask_dataset.datasets.items():
num_batches += self.iterators[name].get_num_batches(dataset)
return num_batches
def index_with(self, vocab: Vocabulary):
for iterator in self.iterators.values():
iterator.index_with(vocab)
|
py | 7dfe12629e155de84e41002084e19eab07976f75 | # coding: utf-8
#
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import json
from oslo_utils import strutils
from oslo_utils import uuidutils
import six
import wsme
from wsme import types as wtypes
from ironic.api.controllers.v1 import utils as v1_utils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import utils
class MacAddressType(wtypes.UserType):
"""A simple MAC address type."""
basetype = wtypes.text
name = 'macaddress'
@staticmethod
def validate(value):
return utils.validate_and_normalize_mac(value)
@staticmethod
def frombasetype(value):
if value is None:
return None
return MacAddressType.validate(value)
class UuidOrNameType(wtypes.UserType):
"""A simple UUID or logical name type."""
basetype = wtypes.text
name = 'uuid_or_name'
@staticmethod
def validate(value):
if not (uuidutils.is_uuid_like(value)
or v1_utils.is_valid_logical_name(value)):
raise exception.InvalidUuidOrName(name=value)
return value
@staticmethod
def frombasetype(value):
if value is None:
return None
return UuidOrNameType.validate(value)
class NameType(wtypes.UserType):
"""A simple logical name type."""
basetype = wtypes.text
name = 'name'
@staticmethod
def validate(value):
if not v1_utils.is_valid_logical_name(value):
raise exception.InvalidName(name=value)
return value
@staticmethod
def frombasetype(value):
if value is None:
return None
return NameType.validate(value)
class UuidType(wtypes.UserType):
"""A simple UUID type."""
basetype = wtypes.text
name = 'uuid'
@staticmethod
def validate(value):
if not uuidutils.is_uuid_like(value):
raise exception.InvalidUUID(uuid=value)
return value
@staticmethod
def frombasetype(value):
if value is None:
return None
return UuidType.validate(value)
class BooleanType(wtypes.UserType):
"""A simple boolean type."""
basetype = wtypes.text
name = 'boolean'
@staticmethod
def validate(value):
try:
return strutils.bool_from_string(value, strict=True)
except ValueError as e:
# raise Invalid to return 400 (BadRequest) in the API
raise exception.Invalid(six.text_type(e))
@staticmethod
def frombasetype(value):
if value is None:
return None
return BooleanType.validate(value)
class JsonType(wtypes.UserType):
"""A simple JSON type."""
basetype = wtypes.text
name = 'json'
def __str__(self):
# These are the json serializable native types
return ' | '.join(map(str, (wtypes.text, six.integer_types, float,
BooleanType, list, dict, None)))
@staticmethod
def validate(value):
try:
json.dumps(value)
except TypeError:
raise exception.Invalid(_('%s is not JSON serializable') % value)
else:
return value
@staticmethod
def frombasetype(value):
return JsonType.validate(value)
class ListType(wtypes.UserType):
"""A simple list type."""
basetype = wtypes.text
name = 'list'
@staticmethod
def validate(value):
"""Validate and convert the input to a ListType.
:param value: A comma separated string of values
:returns: A list of unique values, whose order is not guaranteed.
"""
items = [v.strip().lower() for v in six.text_type(value).split(',')]
# filter() to remove empty items
# set() to remove duplicated items
return list(set(filter(None, items)))
@staticmethod
def frombasetype(value):
if value is None:
return None
return ListType.validate(value)
macaddress = MacAddressType()
uuid_or_name = UuidOrNameType()
name = NameType()
uuid = UuidType()
boolean = BooleanType()
listtype = ListType()
# Can't call it 'json' because that's the name of the stdlib module
jsontype = JsonType()
class JsonPatchType(wtypes.Base):
"""A complex type that represents a single json-patch operation."""
path = wtypes.wsattr(wtypes.StringType(pattern='^(/[\w-]+)+$'),
mandatory=True)
op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'),
mandatory=True)
value = wsme.wsattr(jsontype, default=wtypes.Unset)
# The class of the objects being patched. Override this in subclasses.
# Should probably be a subclass of ironic.api.controllers.base.APIBase.
_api_base = None
# Attributes that are not required for construction, but which may not be
# removed if set. Override in subclasses if needed.
_extra_non_removable_attrs = set()
# Set of non-removable attributes, calculated lazily.
_non_removable_attrs = None
@staticmethod
def internal_attrs():
"""Returns a list of internal attributes.
Internal attributes can't be added, replaced or removed. This
method may be overwritten by derived class.
"""
return ['/created_at', '/id', '/links', '/updated_at', '/uuid']
@classmethod
def non_removable_attrs(cls):
"""Returns a set of names of attributes that may not be removed.
Attributes whose 'mandatory' property is True are automatically added
to this set. To add additional attributes to the set, override the
field _extra_non_removable_attrs in subclasses, with a set of the form
{'/foo', '/bar'}.
"""
if cls._non_removable_attrs is None:
cls._non_removable_attrs = cls._extra_non_removable_attrs.copy()
if cls._api_base:
fields = inspect.getmembers(cls._api_base,
lambda a: not inspect.isroutine(a))
for name, field in fields:
if getattr(field, 'mandatory', False):
cls._non_removable_attrs.add('/%s' % name)
return cls._non_removable_attrs
@staticmethod
def validate(patch):
_path = '/' + patch.path.split('/')[1]
if _path in patch.internal_attrs():
msg = _("'%s' is an internal attribute and can not be updated")
raise wsme.exc.ClientSideError(msg % patch.path)
if patch.path in patch.non_removable_attrs() and patch.op == 'remove':
msg = _("'%s' is a mandatory attribute and can not be removed")
raise wsme.exc.ClientSideError(msg % patch.path)
if patch.op != 'remove':
if patch.value is wsme.Unset:
msg = _("'add' and 'replace' operations need a value")
raise wsme.exc.ClientSideError(msg)
ret = {'path': patch.path, 'op': patch.op}
if patch.value is not wsme.Unset:
ret['value'] = patch.value
return ret
class LocalLinkConnectionType(wtypes.UserType):
"""A type describing local link connection."""
basetype = wtypes.DictType
name = 'locallinkconnection'
mandatory_fields = {'switch_id',
'port_id'}
valid_fields = mandatory_fields.union({'switch_info'})
@staticmethod
def validate(value):
"""Validate and convert the input to a LocalLinkConnectionType.
:param value: A dictionary of values to validate, switch_id is a MAC
address or an OpenFlow based datapath_id, switch_info is an
optional field.
For example::
{
'switch_id': mac_or_datapath_id(),
'port_id': 'Ethernet3/1',
'switch_info': 'switch1'
}
:returns: A dictionary.
:raises: Invalid if some of the keys in the dictionary being validated
are unknown, invalid, or some required ones are missing.
"""
wtypes.DictType(wtypes.text, wtypes.text).validate(value)
keys = set(value)
# This is to workaround an issue when an API object is initialized from
# RPC object, in which dictionary fields that are set to None become
# empty dictionaries
if not keys:
return value
invalid = keys - LocalLinkConnectionType.valid_fields
if invalid:
raise exception.Invalid(_('%s are invalid keys') % (invalid))
# Check all mandatory fields are present
missing = LocalLinkConnectionType.mandatory_fields - keys
if missing:
msg = _('Missing mandatory keys: %s') % missing
raise exception.Invalid(msg)
# Check switch_id is either a valid mac address or
# OpenFlow datapath_id and normalize it.
try:
value['switch_id'] = utils.validate_and_normalize_mac(
value['switch_id'])
except exception.InvalidMAC:
try:
value['switch_id'] = utils.validate_and_normalize_datapath_id(
value['switch_id'])
except exception.InvalidDatapathID:
raise exception.InvalidSwitchID(switch_id=value['switch_id'])
return value
@staticmethod
def frombasetype(value):
if value is None:
return None
return LocalLinkConnectionType.validate(value)
locallinkconnectiontype = LocalLinkConnectionType()
class VifType(JsonType):
basetype = wtypes.text
name = 'viftype'
mandatory_fields = {'id'}
@staticmethod
def validate(value):
super(VifType, VifType).validate(value)
keys = set(value)
# Check all mandatory fields are present
missing = VifType.mandatory_fields - keys
if missing:
msg = _('Missing mandatory keys: %s') % ', '.join(list(missing))
raise exception.Invalid(msg)
UuidOrNameType.validate(value['id'])
return value
@staticmethod
def frombasetype(value):
if value is None:
return None
return VifType.validate(value)
viftype = VifType()
|
py | 7dfe12cafe8bff6a6eb7a7e24bff9a4cd0e02118 | #
# Tests for FOQS lead-acid model
#
import pybamm
import unittest
class TestLeadAcidFOQS(unittest.TestCase):
def test_well_posed(self):
# debug mode slows down the FOQS model a fair bit, so turn off
pybamm.settings.debug_mode = False
model = pybamm.lead_acid.FOQS()
pybamm.settings.debug_mode = True
model.check_well_posedness()
class TestLeadAcidFOQSWithSideReactions(unittest.TestCase):
def test_well_posed_differential(self):
options = {"surface form": "differential", "hydrolysis": "true"}
# debug mode slows down the FOQS model a fair bit, so turn off
pybamm.settings.debug_mode = False
model = pybamm.lead_acid.FOQS(options)
pybamm.settings.debug_mode = True
model.check_well_posedness()
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
|
py | 7dfe1331af5462fe0486254c82a8d392dcc2c50f | import sys
from django.utils.timezone import now
try:
from django.db import models
except Exception:
print("There was an error loading django modules. Do you have django installed?")
sys.exit()
from django.conf import settings
import uuid
# Instructor model
class Instructor(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
full_time = models.BooleanField(default=True)
total_learners = models.IntegerField()
def __str__(self):
return self.user.username
# Learner model
class Learner(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
STUDENT = 'student'
DEVELOPER = 'developer'
DATA_SCIENTIST = 'data_scientist'
DATABASE_ADMIN = 'dba'
OCCUPATION_CHOICES = [
(STUDENT, 'Student'),
(DEVELOPER, 'Developer'),
(DATA_SCIENTIST, 'Data Scientist'),
(DATABASE_ADMIN, 'Database Admin')
]
occupation = models.CharField(
null=False,
max_length=20,
choices=OCCUPATION_CHOICES,
default=STUDENT
)
social_link = models.URLField(max_length=200)
def __str__(self):
return self.user.username + "," + \
self.occupation
# Course model
class Course(models.Model):
name = models.CharField(null=False, max_length=30, default='online course')
image = models.ImageField(upload_to='course_images/')
description = models.CharField(max_length=1000)
pub_date = models.DateField(null=True)
instructors = models.ManyToManyField(Instructor)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, through='Enrollment')
total_enrollment = models.IntegerField(default=0)
is_enrolled = False
def __str__(self):
return "Name: " + self.name + "," + \
"Description: " + self.description
# Lesson model
class Lesson(models.Model):
title = models.CharField(max_length=200, default="title")
order = models.IntegerField(default=0)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
content = models.TextField()
# Enrollment model
# <HINT> Once a user enrolled a class, an enrollment entry should be created between the user and course
# And we could use the enrollment to track information such as exam submissions
class Enrollment(models.Model):
AUDIT = 'audit'
HONOR = 'honor'
BETA = 'BETA'
COURSE_MODES = [
(AUDIT, 'Audit'),
(HONOR, 'Honor'),
(BETA, 'BETA')
]
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
date_enrolled = models.DateField(default=now)
mode = models.CharField(max_length=5, choices=COURSE_MODES, default=AUDIT)
rating = models.FloatField(default=5.0)
# <HINT> Create a Question Model with:
# Used to persist question content for a course
# Has a One-To-Many (or Many-To-Many if you want to reuse questions) relationship with course
# Has a grade point for each question
# Has question content
# Other fields and methods you would like to design
class Question(models.Model):
question_text = models.CharField(max_length = 200)
grade = models.IntegerField()
lessson_id = models.ForeignKey(Course, on_delete=models.CASCADE)
# <HINT> A sample model method to calculate if learner get the score of the question
def is_get_score(self, selected_ids):
all_answers = self.choice_set.filter(is_correct=True).count()
selected_correct = self.choice_set.filter(is_correct=True, id__in=selected_ids).count()
if all_answers == selected_correct:
return True
else:
return False
# <HINT> Create a Choice Model with:
# Used to persist choice content for a question
# One-To-Many (or Many-To-Many if you want to reuse choices) relationship with Question
# Choice content
# Indicate if this choice of the question is a correct one or not
# Other fields and methods you would like to design
class Choice(models.Model):
choice_text = models.CharField(max_length = 200)
is_correct = models.IntegerField()
question_id = models.ForeignKey(Question, on_delete=models.CASCADE)
# <HINT> The submission model
# One enrollment could have multiple submission
# One submission could have multiple choices
# One choice could belong to multiple submissions
class Submission(models.Model):
enrollment = models.ForeignKey(Enrollment, on_delete=models.CASCADE)
choices = models.ManyToManyField(Choice)
# Other fields and methods you would like to design |
py | 7dfe15f0699beb05ec7f979ac40511252e9a5ef1 | import pytest
from pytest import approx
import pymap3d as pm
@pytest.mark.parametrize("lat,dist", [(0, 0), (90, 10001965.729)])
def test_meridian_dist(lat, dist):
assert pm.meridian_dist(lat) == approx(dist)
@pytest.mark.parametrize(
"lat1,lat2,arclen",
[(0, 0, 0), (0, 90, 10001965.729), (0, -90, 10001965.729), (0, 40, 4429529.03035058), (40, 80, 4455610.84159)],
)
def test_meridian_arc(lat1, lat2, arclen):
"""
meridianarc(deg2rad(40), deg2rad(80), wgs84Ellipsoid)
"""
assert pm.meridian_arc(lat1, lat2) == approx(arclen)
@pytest.mark.parametrize(
"lon1,lon2,lat,dist",
[(0, 0, 0, 0), (0, 90, 0, 10018754.1714), (0, -90, 0, 10018754.1714), (90, 0, 0, 10018754.1714), (-90, 0, 0, 10018754.1714)],
)
def test_departure(lon1, lon2, lat, dist):
assert pm.departure(lon1, lon2, lat) == approx(dist)
@pytest.mark.parametrize(
"lat1,lon1,lat2,lon2,arclen,az",
[
(40, -80, 65, -148, 5248666.20853187, 302.0056736),
(0, 0, 0, 90, 10018754.17, 90),
(0, 0, 0, -90, 10018754.17, 270),
(0, 90, 0, 0, 10018754.17, 270),
(0, -90, 0, 0, 10018754.17, 90),
(1, 0, 0, 0, 110574.4, 180),
(-1, 0, 0, 0, 110574.4, 0),
],
)
def test_loxodrome_inverse(lat1, lon1, lat2, lon2, arclen, az):
"""
distance('rh', 40, -80, 65, -148, wgs84Ellipsoid)
azimuth('rh', 40, -80, 65, -148, wgs84Ellipsoid)
"""
rhdist, rhaz = pm.loxodrome_inverse(lat1, lon1, lat2, lon2)
assert rhdist == approx(arclen)
assert rhaz == approx(az)
assert isinstance(rhdist, float)
assert isinstance(rhaz, float)
def test_numpy_loxodrome_inverse():
pytest.importorskip("numpy")
d, a = pm.loxodrome_inverse([40, 40], [-80, -80], 65, -148)
assert d == approx(5248666.209)
assert a == approx(302.00567)
@pytest.mark.parametrize(
"lat0,lon0,rng,az,lat1,lon1",
[
(40, -80, 10000, 30, 40.077995, -79.9414144),
(0, 0, 0, 0, 0, 0),
(0, 0, 10018754.17, 90, 0, 90),
(0, 0, 10018754.17, -90, 0, -90),
(0, 0, 110574.4, 180, -1, 0),
(-1, 0, 110574.4, 0, 0, 0),
],
)
def test_loxodrome_direct(lat0, lon0, rng, az, lat1, lon1):
lat2, lon2 = pm.loxodrome_direct(lat0, lon0, rng, az)
assert lat2 == approx(lat1, abs=1e-6)
assert lon2 == approx(lon1)
assert isinstance(lat2, float)
assert isinstance(lon2, float)
def test_numpy_loxodrome_direct():
pytest.importorskip("numpy")
lat, lon = pm.loxodrome_direct([40, 40], [-80, -80], [10000, 10000], [30, 30])
assert lat == approx(40.077995)
assert lon == approx(-79.941414)
@pytest.mark.parametrize("lat,lon", [([0, 45, 90], [0, 45, 90])])
def test_meanm(lat, lon):
pytest.importorskip("numpy")
assert pm.meanm(lat, lon) == approx([47.26967, 18.460557])
|
py | 7dfe15f7f9913386ff59cdad7af6082f2c67aa57 | from django.contrib import admin
# Register your models here.
from .models import Direct
admin.site.register(Direct) |
py | 7dfe161cdda2e4efdb8c5eef45355ec5a5b7440a | import tensorflow as tf
import os, sys
import requests as http
import json
import datetime
from random import random
from time import sleep
from tqdm import tqdm
# import horovod.tensorflow as hvd
# hvd.init()
os.environ['CUDA_VISIBLE_DEVICES'] = "3" #str(hvd.local_rank())
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
# Create 2 virtual GPUs with 3GB memory each
try:
tf.config.experimental.set_virtual_device_configuration(
gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=3000),
tf.config.experimental.VirtualDeviceConfiguration(memory_limit=3000),
tf.config.experimental.VirtualDeviceConfiguration(memory_limit=3000)])
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
# print(len(gpus), "Physical GPU,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
pass
# Virtual devices must be set before GPUs have been initialized
from object_detection import DetectObject
from scipy.spatial.distance import euclidean
from tools import generate_detections as gdet
from deep_sort.tracker import Tracker
from deep_sort.detection import Detection
from deep_sort import preprocessing, nn_matching
from tensorflow.compat.v1 import InteractiveSession
from tensorflow.compat.v1 import ConfigProto
import matplotlib.pyplot as plt
import numpy as np
import cv2
from PIL import Image
from core.functions import *
from core.config import cfg
from tensorflow.python.saved_model import tag_constants
from core.yolov4 import filter_boxes
import core.utils as utils
from absl.flags import FLAGS
from absl import app, flags, logging
import time
import os
# define constants
flags.DEFINE_string('framework', 'tf', '(tf, tflite, trt')
flags.DEFINE_string('weights', './checkpoints/yolov4-416',
'path to weights file')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_boolean('tiny', False, 'yolo or yolo-tiny')
flags.DEFINE_string('model', 'yolov3', 'yolov3 or yolov4')
flags.DEFINE_string('video', './data/video/test.mp4',
'path to input video or set to 0 for webcam')
flags.DEFINE_string('output', None, 'path to output video')
flags.DEFINE_string('output_format', 'XVID',
'codec used in VideoWriter when saving video to file')
flags.DEFINE_float('iou', 0.45, 'iou threshold')
flags.DEFINE_float('score', 0.70, 'score threshold')
flags.DEFINE_boolean('dont_show', False, 'dont show video output')
flags.DEFINE_boolean('info', True, 'show detailed info of tracked objects')
flags.DEFINE_boolean('count', True, 'count objects being tracked on screen')
flags.DEFINE_boolean('crop', True, 'crop detections from images')
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
#check if the ROI folder exist in the root folder
if os.path.exists('./ROI'):
print('Clearing out files in ROI......')
cleardata = [os.remove(os.path.join('./ROI', f)) for f in os.listdir('./ROI')]
print('Cleared out files!!!')
else:
print("Creating exporting folder: ROI....")
os.mkdir('./ROI')
print("Created ROI Folder for image exportation......")
minimapArray = []
def main(_argv):
# Definition of the parameters
max_cosine_distance = 0.4
nn_budget = None
nms_max_overlap = 1.0
interpreter = None
# initialize deep sort
model_filename = 'model_data/mars-small128.pb'
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
# calculate cosine distance metric
metric = nn_matching.NearestNeighborDistanceMetric(
"cosine", max_cosine_distance, nn_budget)
# initialize tracker
tracker = Tracker(metric)
# load configuration for object detector
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
input_size = FLAGS.size
video_path = FLAGS.video
# get video name by using split method
video_name = video_path.split('/')[-1]
video_name = video_name.split('.')[0]
# load tflite model if flag is set
if FLAGS.framework == 'tflite':
interpreter = tf.lite.Interpreter(model_path=FLAGS.weights)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# otherwise load standard tensorflow saved model
else:
saved_model_loaded = tf.saved_model.load(
FLAGS.weights, tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
# begin video capture
try:
vid = cv2.VideoCapture(int(video_path))
except:
vid = cv2.VideoCapture(video_path)
# Try to read video if valid
return_value, frame = vid.read()
if return_value:
pass
else:
print('Invalid video Directory!!!')
filename = video_path.split('.')[-2]
# VideoOut = None
MinimapOut = None
# Get total number of frames in a video
TotalFrames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
# get video ready to save locally if flag is set
if FLAGS.output:
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
time_milli = vid.get(cv2.CAP_PROP_POS_MSEC)
time_milli = time_milli/1000
# set frame per seconds
vid.set(cv2.CAP_PROP_FPS, 1000)
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
frame_num = 0
count = 10
ObjectDetector = DetectObject()
for _, i in enumerate(tqdm(range(TotalFrames))):
return_value, frame = vid.read()
if return_value:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
else:
print('Video has ended or failed, try a different video format!')
break
# pass in the object detector
ObjectDetector.interpreter = interpreter
bboxes, frame, result = ObjectDetector.analyzeDetection(return_value, frame, frame_num, FLAGS,
infer, encoder, nms_max_overlap, tracker)
# loop through the bounding box and export into the ROI folder.
for i, j in bboxes.items():
xmin, ymin, w, h = int(j[0]), int(j[1]), int(j[2]), int(j[3])
if w <= 0 or h <= 0:
pass
else:
# ROI Extraction
maskedImage = frame[ymin:ymin+h, xmin:xmin+w]
roi_name= "./ROI/ROI_frame_%s.jpg" %(str(frame_num))
cv2.imwrite(roi_name, maskedImage) # save transformed image to path
# cv2.imshow('frame',result)
frame_num += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
|
py | 7dfe16424a2d3a67152024fcfa03142d8f8d6fe8 | """ Generate various steady state data sets."""
from bayescmd.steady_state import RunSteadyState
import os
import distutils
import json
import copy
import itertools
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
from matplotlib import rcParams
import numpy as np
MODEL_NAME = "bp_hypothermia_4"
# inputs = {"P_a": (30, 70), "Pa_CO2": (8, 160), "SaO2sup": (0.2, 1.0)}
# temperatures = [37, 35, 33.5]
# cbar = sns.color_palette("muted", n_colors=4)
# for direction in ["up", "down"]:
# for i, r in inputs.items():
# data = {}
# workdir = os.path.join(
# os.pardir, 'data', 'steady_state', MODEL_NAME, 'autoregulation')
# distutils.dir_util.mkpath(workdir)
# distutils.dir_util.mkpath(os.path.join(workdir, "Figures"))
# print("Running steady state - {}".format(i))
# for t in temperatures:
# print("Running temperature {}C".format(t))
# config = {
# "model_name": MODEL_NAME,
# "inputs": i,
# "parameters": {
# "temp": t
# },
# "targets": ["CBF"],
# "max_val": r[1],
# "min_val": r[0],
# "debug": True,
# "direction": direction
# }
# model = RunSteadyState(conf=config, workdir=workdir)
# output = model.run_steady_state()
# data[t] = output
# with open(os.path.join(workdir,
# "{}_{}.json".format(i, direction)), 'w') as f:
# json.dump(data, f)
# fig, ax = plt.subplots()
# for idx, t in enumerate(temperatures):
# ax.plot(data[t][i], data[t]['CBF'], label=t, color=cbar[idx])
# ax.set_title("Steady state for varying {} - {}".format(i, direction))
# ax.set_ylabel("CBF")
# ax.set_xlabel(i)
# legend = ax.legend(loc='upper center')
# fig.savefig(os.path.join(workdir, "Figures", "{}_{}.png".format(i, direction)),
# bbox_inches="tight")
outputs = ["CMRO2", "CCO", "HbT", "CBF", "Hbdiff",
"TOI", "Vmca", "HbO2", "HHb"]
# For debugging
# outputs.extend(["k_MAshut", "k_nMAshut", "Q_temp", "_ADP", "_ATP"])
q10_range = np.arange(0.1, 5.1, 0.1)
qdiff_range = np.arange(0.1, 1.0, 0.02)
q_range = list(itertools.product(q10_range, qdiff_range))
pa_range = [30, 40, 50, 60, 70]
sao2_range = [0.8, 0.9, 1.0]
cbar = sns.color_palette("Set1", n_colors=len(q10_range))
data = {}
direction = "down"
workdir = os.path.join(os.pardir, 'data', 'steady_state', MODEL_NAME,
"model_output")
distutils.dir_util.mkpath(workdir)
for q in q_range:
print("Running Q10 {}, q_diff {}".format(q[0], q[1]))
config = {
"model_name": MODEL_NAME,
"inputs": "temp",
"parameters": {
"Q_10": q[0],
"q_diff": q[1]
},
"targets": copy.copy(outputs),
"max_val": 37,
"min_val": 33.5,
"debug": False,
"direction": direction
}
model = RunSteadyState(conf=config, workdir=workdir)
output = model.run_steady_state()
data["{}_{}".format(q[0], q[1])] = output
with open(os.path.join(workdir, "q_range_runs_{}.json".format(direction)), 'w') as f:
json.dump(data, f)
# rcParams['axes.titlepad'] = 12
# for o in outputs:
# print("Plotting {}".format(o))
# if direction == "both":
# fig, ax = plt.subplots(nrows=len(q10_range),
# ncols=len(qdiff_range), figsize=(10, 10), sharey=True)
# for idx, q in enumerate(q_range):
# q_key = "{}_{}".format(q[0], q[1])
# ax[idx//len(q10_range)][idx % len(q10_range)].plot(data[q_key]["temp"][:len(data[q_key][o]) // 2 + 1],
# data[q_key][o][:len(
# data[q_key][o]) // 2 + 1],
# label="Up")
# ax[idx//len(q10_range)][idx % len(qdiff_range)].plot(data[q_key]["temp"][len(data[q_key][o]) // 2:],
# data[q_key][o][len(
# data[q_key][o]) // 2:],
# label="Down")
# ax[idx//len(q10_range)][idx %
# len(qdiff_range)].set_title("Q10: {} q_diff: {}".format(q[0], q[1]))
# ax[idx//len(q10_range)][idx %
# len(qdiff_range)].set_ylabel(o)
# ax[idx//len(q10_range)][idx %
# len(qdiff_range)].set_xlabel("Temp (C)")
# # ax[idx//5][idx % 5].legend()
# plt.tight_layout()
# plt.subplots_adjust(hspace=0.4)
# path = os.path.join(workdir, "Figures")
# distutils.dir_util.mkpath(path)
# fig.savefig(os.path.join(path, "{}_both.png".format(o)),
# bbox_inches="tight")
# plt.close()
# else:
# fig, ax = plt.subplots(nrows=len(q10_range),
# ncols=len(qdiff_range), figsize=(10, 10), sharey="col")
# for idx, q in enumerate(q_range):
# q_key = "{}_{}".format(q[0], q[1])
# ax.flatten()[idx].plot(data[q_key]["temp"],
# data[q_key][o], label="$Q_{10}$: %.2f\n$q_{diff}$: %.2f" % (q[0], q[1]))
# ax.flatten()[idx].set_title(
# "$Q_{10}$: %.2f $q_{diff}$: %.2f" % (q[0], q[1]))
# ax.flatten()[idx].set_ylabel(o)
# ax.flatten()[idx].set_xlabel("Temp (C)")
# # ax.flatten()[idx].legend()
# fig.suptitle("Effect of %s temperature on %s" %
# ("decreasing" if direction == "down" else "increasing", o))
# fig.subplots_adjust(top=0.9, hspace=0.5, wspace=0.5)
# path = os.path.join(workdir, "Figures")
# distutils.dir_util.mkpath(path)
# fig.savefig(os.path.join(path, "{}_{}.png".format(o, direction)),
# bbox_inches="tight")
# plt.close()
|
py | 7dfe16fce486c88484a754b232b589be1c6cee86 | import base64
import os
import time
from flask import Flask, jsonify, request, redirect, url_for, send_file, make_response, render_template
from flask_cors import CORS, cross_origin
from options.test_options import TestOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
import util.util as util
# Model loader
opt = TestOptions().parse()
opt.nThreads = 1
opt.batchSize = 1
opt.serial_batches = True
opt.no_flip = True
model = create_model(opt)
# Config
app = Flask(__name__, template_folder='templates', static_url_path='/static/')
CORS(app)
app.config['UPLOAD_DIR'] = os.path.join(os.getcwd(), 'upload')
app.config['RESULT_DIR'] = os.path.join(os.getcwd(), 'results')
app.config['ALLOWED_EXTENSIONS'] = {'jpg', 'jpeg', 'png'}
# Setup
if not os.path.exists(app.config['UPLOAD_DIR']):
os.mkdir(app.config['UPLOAD_DIR'])
if not os.path.exists(app.config['RESULT_DIR']):
os.mkdir(app.config['RESULT_DIR'])
# Helpers
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
def error(msg):
return jsonify({'error': msg})
# Routers
@app.route('/')
def pong():
return 'Hello', {'Content-Type': 'text-plain; charset=utf-8'}
@app.route('/home')
def home_page():
return render_template('main_page.html')
@app.route('/gen', methods=['POST'])
def gen():
# if 'file' not in request.files:
# if 'file' not in request.files:
# return error('file form-data not existed'), 412
# image = request.files['file']
# if not allowed_file(image.filename):
# return error('Only supported %s' % app.config['ALLOWED_EXTENSIONS']), 415
# Submit taylor.jpg ---> save image to upload/12345678/taylor.jpg (upload/timestamp/imagename.ext)
t = int(time.time())
image_dir = os.path.join(app.config['UPLOAD_DIR'], str(t))
image_path = os.path.join(image_dir, '1.png')
os.mkdir(image_dir)
image_data = base64.b64decode(request.data)
with open(image_path, 'wb') as f:
f.write(image_data)
# image.save(image_path)
# Prepare data loader
opt.dataroot = image_dir
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
for i, data in enumerate(dataset):
if i >= opt.how_many:
break
model.set_input(data)
# Forward
model.test()
# Convert image to numpy array
fake = util.tensor2im(model.fake_B.data)
# Save image
result_dir = os.path.join(app.config['RESULT_DIR'], str(t))
result_path = os.path.join(result_dir, '1.png')
os.mkdir(result_dir)
util.save_image(fake, result_path)
# with open(result_path, 'rb') as img_f:
# img_stream = img_f.read()
# img_stream = base64.b64encode(img_stream)
# return img_stream
return send_file(result_path)
if __name__ == '__main__':
app.run()
|
py | 7dfe17702ba0e2a96a68338248602d0d1371ef8a | # SPDX-FileCopyrightText: 2018 Mikey Sklar for Adafruit Industries
# SPDX-FileCopyrightText: 2020 Erin St. Blaine for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import time
import board
import neopixel
pixel_count = 6 # Number of NeoPixels
pixel_pin = board.D1 # Pin where NeoPixels are connected
speed = .1 # Animation speed (in seconds).
# This is how long to spend in a single animation frame.
# Higher values are slower.
# Good values to try are 400, 200, 100, 50, 25, etc.
animation = 0 # Type of animation, can be one of these values:
# 0 - Solid color pulse
# 1 - Moving color pulse
color_steps = 8 # Number of steps in the animation.
brightness = 1.0 # 0-1, higher number is brighter
# Adjacent colors (on color wheel).
# red yellow
color_animation = ([255, 0, 0], [255, 36, 0], [255, 72, 0], [255, 109, 0],
[255, 145, 0], [255, 182, 0], [255, 218, 0], [255, 255, 0])
# Adjacent colors
#([255, 0, 0], [255, 36, 0], [255, 72, 0], [255, 109, 0],
# [255, 145, 0], [255, 182, 0], [255, 218, 0], [255, 255, 0]) # red yellow
#([255, 255, 0], [218, 255, 0], [182, 255, 0], [145, 255, 0],
# [109, 255, 0], [72, 255, 0], [36, 255, 0], [0, 255, 0]) # yello green
#([0, 255, 0], [0, 255, 36], [0, 255, 72], [0, 255, 109],
# [0, 255, 145], [0, 255, 182], [0, 255, 218], [0, 255, 255]) # green cyan
#([0, 255, 255], [0, 218, 255], [0, 182, 255], [0, 145, 255],
# [0, 109, 255], [0, 72, 255], [0, 36, 255], [0, 0, 255]) # cyan blue
#([0, 0, 255], [36, 0, 255], [72, 0, 255], [109, 0, 255],
# [145, 0, 255], [182, 0, 255], [218, 0, 255], [255, 0, 255]) # blue magenta
#([255, 0, 255], [255, 0, 218], [255, 0, 182], [255, 0, 145],
# [255, 0, 109], [255, 0, 72], [255, 0, 36], [255, 0, 0]) # magenta red
# Complimentary colors
#([255, 0, 0], [218, 36, 36], [182, 72, 72], [145, 109, 109],
# [109, 145, 145], [72, 182, 182], [36, 218, 218], [0, 255, 255]) # red cyan
#([255, 255, 0], [218, 218, 36], [182, 182, 72], [145, 145, 109],
# [109, 109, 145], [72, 72, 182], [36, 36, 218], [0, 0, 255]) # yellow blue
#([0, 255, 0], [36, 218, 36], [72, 182, 72], [109, 145, 109],
# [145, 109, 145], [182, 72, 182], [218, 36, 218], [255, 0, 255]) # green magenta
# Other combos
#([255, 0, 0], [218, 36, 0], [182, 72, 0], [145, 109, 0],
# [109, 145, 0], [72, 182, 0], [36, 218, 0], [0, 255, 0]) # red green
#([255, 255, 0], [218, 255, 36], [182, 255, 72], [145, 255, 109],
# [109, 255, 145], [72, 255, 182], [36, 255, 218], [0, 255, 255]) # yellow cyan
#([0, 255, 0], [0, 218, 36], [0, 182, 72], [0, 145, 109],
# [0, 109, 145], [0, 72, 182], [0, 36, 218], [0, 0, 255]) # green blue
#([0, 255, 255], [36, 218, 255], [72, 182, 255], [109, 145, 255],
# [145, 109, 255], [182, 72, 255], [218, 36, 255], [255, 0, 255]) # cyan magenta
#([0, 0, 255], [36, 0, 218], [72, 0, 182], [109, 0, 145],
# [145, 0, 109], [182, 0, 72], [218, 0, 36], [255, 0, 0]) # blue red
#([255, 0, 255], [255, 36, 218], [255, 72, 182], [255, 109, 145],
# [255, 145, 109], [255, 182, 72], [255, 218, 36], [255, 255, 0]) # magenta yellow
# Solid colors fading to dark
#([255, 0, 0], [223, 0, 0], [191, 0, 0], [159, 0, 0],
# [127, 0, 0], [95, 0, 0], [63, 0, 0], [31, 0, 0]) # red
#([255, 153, 0], [223, 133, 0], [191, 114, 0], [159, 95, 0],
# [127, 76, 0], [95, 57, 0], [63, 38, 0], [31, 19, 0]) # orange
#([255, 255, 0], [223, 223, 0], [191, 191, 0], [159, 159, 0],
# [127, 127, 0], [95, 95, 0], [63, 63, 0], [31, 31, 0]) # yellow
#([0, 255, 0], [0, 223, 0], [0, 191, 0], [0, 159, 0],
# [0, 127, 0], [0, 95, 0], [0, 63, 0], [0, 31, 0]) # green
#([0, 0, 255], [0, 0, 223], [0, 0, 191], [0, 0, 159],
# [0, 0, 127], [0, 0, 95], [0, 0, 63], [0, 0, 31]) # blue
#([75, 0, 130], [65, 0, 113], [56, 0, 97], [46, 0, 81],
# [37, 0, 65], [28, 0, 48], [18, 0, 32], [9, 0, 16]) # indigo
#([139, 0, 255], [121, 0, 223], [104, 0, 191], [86, 0, 159],
# [69, 0, 127], [52, 0, 95], [34, 0, 63], [17, 0, 31]) # violet
#([255, 255, 255], [223, 223, 223], [191, 191, 191], [159, 159, 159],
# [127, 127, 127], [95, 95, 95], [63, 63, 63], [31, 31, 31]) # white
#([255, 0, 0], [255, 153, 0], [255, 255, 0], [0, 255, 0],
# [0, 0, 255], [75, 0, 130], [139, 0, 255], [255, 255, 255]) # rainbow colors
# Global state used by the sketch
strip = neopixel.NeoPixel(pixel_pin, pixel_count, brightness=1, auto_write=False)
while True: # Loop forever...
# Main loop will update all the pixels based on the animation.
for i in range(pixel_count):
# Animation 0, solid color pulse of all pixels.
if animation == 0:
current_step = (time.monotonic() / speed) % (color_steps * 2 - 2)
if current_step >= color_steps:
current_step = color_steps - (current_step - (color_steps - 2))
# Animation 1, moving color pulse. Use position to change brightness.
elif animation == 1:
current_step = (time.monotonic() / speed + i) % (color_steps * 2 - 2)
if current_step >= color_steps:
current_step = color_steps - (current_step - (color_steps - 2))
strip[i] = color_animation[int(current_step)]
# Show the updated pixels.
strip.show()
|
py | 7dfe1848d58b1c69954d8f6600a2e80a5d5b9076 | from django.test import TestCase
from courses import models
from courses.tests.factories import (
SemesterFactory, SemesterDepartmentFactory,
OfferedForFactory, CourseFactory, SectionFactory, DepartmentFactory,
PeriodFactory, SectionPeriodFactory
)
class SemesterBasedQuerySetTest(TestCase):
def setUp(self):
self.sem = SemesterFactory.create(year=2011, month=1)
self.sd1 = SemesterDepartmentFactory.create(semester=self.sem)
self.sd2 = SemesterDepartmentFactory.create(semester=self.sem)
def test_semester_based_queryset_for_a_semester(self):
departments = models.Department.objects.by_semester(2011, 1)
self.assertEqual([self.sd1.department, self.sd2.department], list(departments))
def test_semester_based_queryset_is_empty_for_another_semester(self):
departments = models.Department.objects.by_semester(2010, 1)
self.assertEqual([], list(departments))
def test_no_semester_filtering_if_none(self):
departments = models.Department.objects.by_semester(None, None)
self.assertEqual([self.sd1.department, self.sd2.department], list(departments))
class SerializableQuerySetTest(TestCase):
def setUp(self):
self.sem = SemesterFactory.create(year=2011, month=1)
dept1 = DepartmentFactory.create(name='depart1', code='dept1')
self.sd1 = SemesterDepartmentFactory.create(semester=self.sem, department=dept1)
dept2 = DepartmentFactory.create(name='depart2', code='dept2')
self.sd2 = SemesterDepartmentFactory.create(semester=self.sem, department=dept2)
def test_serializable_queryset(self):
departments = models.Department.objects.all().toJSON()
self.assertEqual([{
'name': 'depart1',
'code': 'dept1',
}, {
'name': 'depart2',
'code': 'dept2',
}], departments)
class SectionPeriodQuerySetTest(TestCase):
def setUp(self):
self.sem = SemesterFactory.create(year=2011, month=1)
self.dept = DepartmentFactory.create(code='CSCI')
SemesterDepartmentFactory.create(department=self.dept, semester=self.sem)
self.course = CourseFactory.create(number=2222, department=self.dept)
OfferedForFactory.create(course=self.course, semester=self.sem)
self.section = SectionFactory.create(course=self.course, semester=self.sem)
SectionPeriodFactory.create(section=self.section)
def test_filter_by_course_code(self):
sps = models.SectionPeriod.objects.by_course_code('CSCI', 2222)
self.assertEqual([self.section], [sp.section for sp in sps])
def test_filter_by_course(self):
c = models.Course.objects.all()[0]
sps = models.SectionPeriod.objects.by_course(c)
self.assertEqual([self.section], [sp.section for sp in sps])
def test_filter_by_sections(self):
sps = models.SectionPeriod.objects.by_sections([self.section])
self.assertEqual([self.section], [sp.section for sp in sps])
def test_filter_by_courses(self):
sps = models.SectionPeriod.objects.by_courses([self.course])
self.assertEqual([self.section], [sp.section for sp in sps])
class CourseQuerySetTest(TestCase):
def setUp(self):
self.sem = SemesterFactory.create()
self.dept1 = DepartmentFactory.create(code='CSCI')
self.dept2 = DepartmentFactory.create()
self.course1 = CourseFactory.create(department=self.dept1, name='the course')
OfferedForFactory.create(course=self.course1, semester=self.sem)
self.course2 = CourseFactory.create(department=self.dept2)
OfferedForFactory.create(course=self.course2, semester=self.sem)
self.course3 = CourseFactory.create(department=self.dept1, name='another course')
OfferedForFactory.create(course=self.course3, semester=self.sem)
def test_filter_by_department(self):
courses = models.Course.objects.by_department(self.dept1)
self.assertEqual([self.course1, self.course3], list(courses))
def test_search_by_department(self):
courses = models.Course.objects.search(dept='CSCI')
self.assertEqual([self.course1, self.course3], list(courses))
def test_search_by_query(self):
courses = models.Course.objects.search('another').get()
self.assertEqual(self.course3, courses)
|
py | 7dfe19487762242297c0d9583165c7e300ef33c5 | from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship, backref
from bot.DataBase.models.base import Base
class Admin(Base):
__tablename__ = "admins"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("users.id"))
user = relationship("User", backref=backref("admins", cascade="all, delete-orphan"))
type = Column(String, default="support")
def __init__(self, user):
self.user = user
|
py | 7dfe194968328344d7da6b727cb5e57fd47333eb | '''
Arduino interface
'''
import glob
import os
import serial
import subprocess
import time
PRODUCTION = os.getenv("PRODUCTION")
class FakeSerial(object):
# FIXME: implement and connect to Arduino class
def __init__(self, log, *args, **kwargs):
self.Log = log
self.Value = ''
self.Map = {
'I': "I",
'W': "30.0",
'V': "",
'1': '1',
'2': '2',
'3': '3'
}
self.Numbers = "123456789"
def close(self):
return
def write(self, value):
self.Value = value.decode().strip()
self.Log.debug("SERIAL: sent: '%s'"%self.Value)
if self.Value in self.Numbers:
if self.Value in self.Map['V']:
self.Map['V'] = self.Map['V'].replace(self.Value, "")
else:
self.Map['V'] = self.Map['V'] + self.Value
return
def readline(self):
resp = self.Map.get(self.Value, 'E')
self.Log.debug("SERIAL: response: '%s'"%resp)
return resp.encode()
class Arduino(object):
def __init__(self, log):
self.Log = log
self.Stream = None
self._newSerial()
self.Running = False
def _newSerial(self):
'''
Reset the serial device using the DTR lines
'''
try:
self.Stream.close()
except:
pass
if PRODUCTION:
serial_devices = glob.glob("/dev/ttyUSB*")
if len(serial_devices) < 1:
self.Log.error("No Serial devices detected. Restarting ...")
subprocess.call("sudo reboot", shell=True)
self.SerialDevice = sorted(serial_devices)[-1]
self.Stream = serial.Serial(self.SerialDevice, 57600, timeout=1)
else:
self.Stream = FakeSerial(self.Log)
if self._sendData('I') == 'I':
return
# still not reset
self.Log.error("Failed to reset Serial!!!")
def resetSerial(self):
try:
self.Stream.close()
except:
pass
if PRODUCTION:
# FIXME: match device to the actual
subprocess.call("sudo ./usbreset /dev/bus/usb/001/002", shell=True, cwd=os.path.expanduser("~/"))
time.sleep(2)
self._newSerial()
def _readResponse(self):
try:
response = self.Stream.readline().decode('utf-8').strip()
while len(response) > 0 and response.startswith('D'):
self.Log.debug(response)
response = self.Stream.readline().decode('utf-8').strip()
except Exception as e:
self.Log.error("Serial exception: %s" % (e), exc_info=1)
self.resetSerial()
self.Log.debug("SERIAL - Response: '%s'"%(response))
return response
def _sendData(self, value):
self._readResponse()
v = bytes(value, 'utf-8')
self.Log.debug("SERIAL - Sending: %s"%(v))
self.Stream.write(v)
return self._readResponse()
def handleDebugMessages(self):
self._readResponse()
def getOpenValves(self):
valves = self._sendData("V")
return [int(c) for c in valves]
def getWaterCounter(self):
# This should only be called via the WaterMeter class
ret = self._sendData("W")
try:
# int wont handle decimals in the string, but float handles with or without
return int(float(ret))
except Exception:
self.Log.error("Int(%s) conversion failed for Arduino.getWaterCounter()"%ret)
return 0
def toggleValve(self, valve):
if self._sendData(str(valve)) == str(valve):
return True
return False
def openValve(self, valve):
open_valves = self.getOpenValves()
if not valve in open_valves:
self.toggleValve(valve)
return True
return False
def closeValve(self, valve):
open_valves = self.getOpenValves()
if valve in open_valves:
self.toggleValve(valve)
return True
return False
def checkStartButton(self):
return self._sendData('S') == 'S'
def enterProgramMode(self):
self.Running = True
return self._sendData('P') == 'P'
def leaveProgramMode(self):
self.Running = False
return self._sendData('p') == 'p'
def isProgramRunning(self):
return self.Running
|
py | 7dfe1a6f2e3d8e1ddea3bb94b0ee898471077e0b | _base_ = [
'../../_base_/meta_test/tiered-imagenet_meta-test_5way-1shot.py',
'../../_base_/runtime/iter_based_runtime.py',
'../../_base_/schedules/adam_100k_iter.py'
]
img_size = 84
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromBytes'),
dict(type='RandomResizedCrop', size=img_size),
dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_label']),
dict(type='Collect', keys=['img', 'gt_label'])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=8,
train=dict(
type='EpisodicDataset',
num_episodes=100000,
num_ways=5,
num_shots=5,
num_queries=16,
dataset=dict(
type='TieredImageNetDataset',
data_prefix='data/tiered_imagenet',
subset='train',
pipeline=train_pipeline)))
model = dict(
type='ProtoNet',
backbone=dict(type='ResNet12'),
head=dict(type='PrototypeHead'))
|
py | 7dfe1aa0eb5f70709d7fd4319b80819a6d8d897a | from django.test import TestCase
from django.urls import reverse
from .utils import add_default_data
from petition.models import Petition, Signature
class ConfirmViewTest(TestCase):
"""Test confirm view"""
@classmethod
def setUpTestData(cls):
add_default_data()
def test_ConfirmOk(self):
data = {
'first_name': 'Alan',
'last_name': 'John',
'email': '[email protected]',
'subscribed_to_mailinglist': False,
}
petition = Petition.objects.filter(published=True).first()
response = self.client.post(reverse('create_signature', args=[petition.id]), data, follow=True)
self.assertRedirects(response, petition.url)
signature = Signature.objects.filter(petition=petition).first()
self.assertEqual(signature.confirmed, False)
confirm_hash = signature.confirmation_hash
response = self.client.get(reverse('confirm', args=[petition.id, confirm_hash]), follow=True)
self.assertRedirects(response, petition.url)
signature = Signature.objects.filter(petition=petition).first() # Reload the object
self.assertEqual(signature.confirmed, True) |
py | 7dfe1ab72272fb73e5716974903c3c1814653c17 | from collections import OrderedDict
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
OSCAR_SHOP_NAME = 'Oscar'
OSCAR_SHOP_TAGLINE = ''
OSCAR_HOMEPAGE = reverse_lazy('promotions:home')
# Dynamic class loading
OSCAR_DYNAMIC_CLASS_LOADER = 'oscar.core.loading.default_class_loader'
# Basket settings
OSCAR_BASKET_COOKIE_LIFETIME = 7 * 24 * 60 * 60
OSCAR_BASKET_COOKIE_OPEN = 'oscar_open_basket'
OSCAR_BASKET_COOKIE_SECURE = False
OSCAR_MAX_BASKET_QUANTITY_THRESHOLD = 10000
# Recently-viewed products
OSCAR_RECENTLY_VIEWED_COOKIE_LIFETIME = 7 * 24 * 60 * 60
OSCAR_RECENTLY_VIEWED_COOKIE_NAME = 'oscar_history'
OSCAR_RECENTLY_VIEWED_COOKIE_SECURE = False
OSCAR_RECENTLY_VIEWED_PRODUCTS = 20
# Currency
OSCAR_DEFAULT_CURRENCY = 'USD'
# Paths
OSCAR_IMAGE_FOLDER = 'images/products/%Y/%m/'
OSCAR_PROMOTION_FOLDER = 'images/promotions/'
OSCAR_DELETE_IMAGE_FILES = True
# Copy this image from oscar/static/img to your MEDIA_ROOT folder.
# It needs to be there so Sorl can resize it.
OSCAR_MISSING_IMAGE_URL = 'image_not_found.jpg'
OSCAR_UPLOAD_ROOT = '/tmp'
# Address settings
OSCAR_REQUIRED_ADDRESS_FIELDS = ('first_name', 'last_name', 'line1',
'line4', 'postcode', 'country')
# Pagination settings
OSCAR_OFFERS_PER_PAGE = 20
OSCAR_PRODUCTS_PER_PAGE = 20
OSCAR_REVIEWS_PER_PAGE = 20
OSCAR_NOTIFICATIONS_PER_PAGE = 20
OSCAR_EMAILS_PER_PAGE = 20
OSCAR_ORDERS_PER_PAGE = 20
OSCAR_ADDRESSES_PER_PAGE = 20
OSCAR_STOCK_ALERTS_PER_PAGE = 20
OSCAR_DASHBOARD_ITEMS_PER_PAGE = 20
# Checkout
OSCAR_ALLOW_ANON_CHECKOUT = False
# Promotions
OSCAR_PROMOTION_POSITIONS = (('page', 'Page'),
('right', 'Right-hand sidebar'),
('left', 'Left-hand sidebar'))
# Reviews
OSCAR_ALLOW_ANON_REVIEWS = True
OSCAR_MODERATE_REVIEWS = False
# Accounts
OSCAR_ACCOUNTS_REDIRECT_URL = 'customer:profile-view'
# This enables sending alert notifications/emails instantly when products get
# back in stock by listening to stock record update signals.
# This might impact performance for large numbers of stock record updates.
# Alternatively, the management command ``oscar_send_alerts`` can be used to
# run periodically, e.g. as a cron job. In this case eager alerts should be
# disabled.
OSCAR_EAGER_ALERTS = True
# Registration
OSCAR_SEND_REGISTRATION_EMAIL = True
OSCAR_FROM_EMAIL = '[email protected]'
# Slug handling
OSCAR_SLUG_FUNCTION = 'oscar.core.utils.default_slugifier'
OSCAR_SLUG_MAP = {}
OSCAR_SLUG_BLACKLIST = []
OSCAR_SLUG_ALLOW_UNICODE = False
# Cookies
OSCAR_COOKIES_DELETE_ON_LOGOUT = ['oscar_recently_viewed_products', ]
# Hidden Oscar features, e.g. wishlists or reviews
OSCAR_HIDDEN_FEATURES = []
# Menu structure of the dashboard navigation
OSCAR_DASHBOARD_NAVIGATION = [
{
'label': _('Dashboard'),
'icon': 'icon-th-list',
'url_name': 'dashboard:index',
},
{
'label': _('Catalogue'),
'icon': 'icon-sitemap',
'children': [
{
'label': _('Products'),
'url_name': 'dashboard:catalogue-product-list',
},
{
'label': _('Product Types'),
'url_name': 'dashboard:catalogue-class-list',
},
{
'label': _('Categories'),
'url_name': 'dashboard:catalogue-category-list',
},
{
'label': _('Ranges'),
'url_name': 'dashboard:range-list',
},
{
'label': _('Low stock alerts'),
'url_name': 'dashboard:stock-alert-list',
},
]
},
{
'label': _('Fulfilment'),
'icon': 'icon-shopping-cart',
'children': [
{
'label': _('Orders'),
'url_name': 'dashboard:order-list',
},
{
'label': _('Statistics'),
'url_name': 'dashboard:order-stats',
},
{
'label': _('Partners'),
'url_name': 'dashboard:partner-list',
},
# The shipping method dashboard is disabled by default as it might
# be confusing. Weight-based shipping methods aren't hooked into
# the shipping repository by default (as it would make
# customising the repository slightly more difficult).
# {
# 'label': _('Shipping charges'),
# 'url_name': 'dashboard:shipping-method-list',
# },
]
},
{
'label': _('Customers'),
'icon': 'icon-group',
'children': [
{
'label': _('Customers'),
'url_name': 'dashboard:users-index',
},
{
'label': _('Stock alert requests'),
'url_name': 'dashboard:user-alert-list',
},
]
},
{
'label': _('Offers'),
'icon': 'icon-bullhorn',
'children': [
{
'label': _('Offers'),
'url_name': 'dashboard:offer-list',
},
{
'label': _('Vouchers'),
'url_name': 'dashboard:voucher-list',
},
{
'label': _('Voucher Sets'),
'url_name': 'dashboard:voucher-set-list',
},
],
},
{
'label': _('Content'),
'icon': 'icon-folder-close',
'children': [
{
'label': _('Content blocks'),
'url_name': 'dashboard:promotion-list',
},
{
'label': _('Content blocks by page'),
'url_name': 'dashboard:promotion-list-by-page',
},
{
'label': _('Pages'),
'url_name': 'dashboard:page-list',
},
{
'label': _('Email templates'),
'url_name': 'dashboard:comms-list',
},
{
'label': _('Reviews'),
'url_name': 'dashboard:reviews-list',
},
]
},
{
'label': _('Reports'),
'icon': 'icon-bar-chart',
'url_name': 'dashboard:reports-index',
},
]
OSCAR_DASHBOARD_DEFAULT_ACCESS_FUNCTION = 'oscar.apps.dashboard.nav.default_access_fn' # noqa
# Search facets
OSCAR_SEARCH_FACETS = {
'fields': OrderedDict([
# The key for these dicts will be used when passing facet data
# to the template. Same for the 'queries' dict below.
('product_class', {'name': _('Type'), 'field': 'product_class'}),
('rating', {'name': _('Rating'), 'field': 'rating'}),
# You can specify an 'options' element that will be passed to the
# SearchQuerySet.facet() call.
# For instance, with Elasticsearch backend, 'options': {'order': 'term'}
# will sort items in a facet by title instead of number of items.
# It's hard to get 'missing' to work
# correctly though as of Solr's hilarious syntax for selecting
# items without a specific facet:
# http://wiki.apache.org/solr/SimpleFacetParameters#facet.method
# 'options': {'missing': 'true'}
]),
'queries': OrderedDict([
('price_range',
{
'name': _('Price range'),
'field': 'price',
'queries': [
# This is a list of (name, query) tuples where the name will
# be displayed on the front-end.
(_('0 to 20'), '[0 TO 20]'),
(_('20 to 40'), '[20 TO 40]'),
(_('40 to 60'), '[40 TO 60]'),
(_('60+'), '[60 TO *]'),
]
}),
]),
}
OSCAR_PROMOTIONS_ENABLED = True
OSCAR_PRODUCT_SEARCH_HANDLER = None
|
py | 7dfe1b01895f9a4abb64cee8e5019944bbad5a59 | import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from phyllo.phyllo_logger import logger
# seems to work fine
# should probably check on chapter divisions
def getBooks(soup):
siteURL = 'http://www.thelatinlibrary.com'
textsURL = []
# get links to books in the collection
for a in soup.find_all('a', href=True):
link = a['href']
textsURL.append("{}/{}".format(siteURL, a['href']))
# remove unnecessary URLs
while ("http://www.thelatinlibrary.com/index.html" in textsURL):
textsURL.remove("http://www.thelatinlibrary.com/index.html")
textsURL.remove("http://www.thelatinlibrary.com/classics.html")
textsURL.remove("http://www.thelatinlibrary.com/christian.html")
logger.info("\n".join(textsURL))
return textsURL
def main():
# The collection URL below.
collURL = 'http://www.thelatinlibrary.com/bernardcluny.html'
collOpen = urllib.request.urlopen(collURL)
collSOUP = BeautifulSoup(collOpen, 'html5lib')
author = collSOUP.title.string.strip()
colltitle = collSOUP.title.string.strip()
date = collSOUP.span.string.strip().replace('(', '').replace(')', '').replace(u"\u2013", '-')
textsURL = getBooks(collSOUP)
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author = 'Bernard of Cluny'")
for url in textsURL:
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
title = textsoup.title.string.split(':')[1].strip()
getp = textsoup.find_all('p')
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
verses = []
chapter_f = p.find('b')
if chapter_f is not None:
chapter = p.get_text().strip()
verse = 0
continue
elif p.find('br') is not None:
brtags = p.findAll('br')
try:
try:
firstline = brtags[0].previous_sibling.strip()
except:
firstline = brtags[0].previous_sibling.previous_sibling.strip()
verses.append(firstline)
except:
pass
for br in brtags:
try:
text = br.next_sibling.next_sibling.strip()
except:
text = br.next_sibling.strip()
if text is None or text == '' or text.isspace():
continue
if text.endswith(r'[0-9]+'):
try:
text = text.split(r'[0-9]')[0].strip()
except:
pass
verses.append(text)
else:
text = p.get_text()
verses.append(text.strip())
for v in verses:
if v.startswith('Christian'):
continue
if v is None or v == '' or v.isspace():
continue
# verse number assignment.
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'poetry'))
if __name__ == '__main__':
main()
|
py | 7dfe1bc0e46dd6e6790669e74cdd25eb586d6a87 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 09:12:35 2020
@author: Ajit Johnson Nirmal
Convert counts table to ANN object
Added capability to take in multiple images and create a single AnnData Object
"""
# Import library
import numpy as np
import anndata as ad
import pandas as pd
import random
def mcmicro_to_object (image_path,remove_dna=True,remove_string_from_name=None,
islog=True,drop_markers=None,random_sample=None,
CellId='CellId',split='Area',custom_imageid=None,
min_cells=None):
"""
Parameters
----------
image_path : list
List of path to the image or images. Each Image should have a unique path supplied.
remove_dna : bool, optional
Remove the DNA channels from the final output. Looks for channels with the string 'dna' in it. The default is True.
remove_string_from_name : string, optional
Used to celan up channel names. If a string is given, that particular string will be removed from all marker names.
If multiple images are passed, just use the string that appears in the first image. The default is None.
islog : bool, optional
If the data is in log scale, passing true will convert it to natural scale. The default is True.
drop_markers : list, optional
List of markers to drop from the analysis. e.g. ["CD3D", "CD20"]. The default is None.
random_sample : int, optional
Randomly sub-sample the data with the desired number of cells. The default is None.
CellId : string, optional
Name of the column that contains the cell ID. The default is CellId.
split : string, optional
To split the CSV into counts table and meta data, pass in the name of the column
that immediately follows the marker quantification. The default is Area.
Returns
-------
AnnData Object
Example
-------
image_path = ['/Users/aj/whole_sections/PTCL1_450.csv',
'/Users/aj/whole_sections/PTCL2_552.csv']
adata = histocat_to_object (image_path, marker_list= my_markers, islog=True,
drop_markers= ['CD21', 'ACTIN'],remove_string_from_name='Cell_PTCL1_450', random_sample=5000)
"""
# Import data based on the location provided
def load_process_data (image):
# Print the data that is being processed
print("Loading " + str(image.rsplit('/', 1)[-1]))
d = pd.read_csv(image)
# If the data does not have a unique image ID column, add one.
if 'ImageId' not in d.columns:
if custom_imageid is not None:
imid = custom_imageid
else:
imid = random.randint(1000000,9999999)
d['ImageId'] = imid
# Unique name for the data
d.index = d['ImageId'].astype(str)+'_'+d[CellId].astype(str)
# Drop ImageId and cell ID column
d.drop([CellId], axis=1, inplace=True)
# Move Image ID to the last column
cols = d.columns.tolist()
cols.insert(len(cols), cols.pop(cols.index('ImageId')))
d = d.reindex(columns= cols)
# If there is INF replace with zero
d = d.replace([np.inf, -np.inf], 0)
# Return data
return d
# Apply function to all images and create a master dataframe
r_load_process_data = lambda x: load_process_data(image=x) # Create lamda function
all_data = list(map(r_load_process_data, list(image_path))) # Apply function
# Merge all the data into a single large dataframe
for i in range(len(all_data)):
all_data[i].columns = all_data[0].columns
entire_data = pd.concat(all_data, axis=0, sort=False)
#Remove the images that contain less than a defined threshold of cells (min_cells)
if min_cells is not None:
to_drop = entire_data['ImageId'].value_counts()[entire_data['ImageId'].value_counts() < min_cells].index
entire_data = entire_data[~entire_data['ImageId'].isin(to_drop)]
print('Removed Images that contained less than '+str(min_cells)+' cells: '+ str(to_drop.values))
# Split the data into expression data and meta data
# Step-1 (Find the index of the column with name Area)
split_idx = entire_data.columns.get_loc(split)
meta = entire_data.iloc [:,split_idx:]
# Step-2 (select only the expression values)
entire_data = entire_data.iloc [:,:split_idx]
# Save a copy of the column names in the uns space of ANNDATA
if remove_string_from_name != None:
markers = list(entire_data.columns.str.replace(remove_string_from_name, ''))
else:
markers = list(entire_data.columns)
# Remove DNA channels
if remove_dna == True:
entire_data = entire_data.loc[:,~entire_data.columns.str.contains('dna', case=False)]
# Drop unnecessary markers
if drop_markers != None:
for i in drop_markers:
entire_data = entire_data.loc[:,~entire_data.columns.str.contains(i, case=False)]
# Rename the columns of the data
if remove_string_from_name != None:
entire_data.columns = entire_data.columns.str.replace(remove_string_from_name, '')
# Convert the data to natural scale
if islog==True:
entire_data= np.exp(entire_data)
# Randomly sample the data
if random_sample != None:
entire_data = entire_data.sample(n=random_sample,replace=False)
# Create an anndata object
adata = ad.AnnData(entire_data)
adata.obs = meta
adata.uns['all_markers'] = markers
#
# Return data
return adata |
py | 7dfe1bcc94542529486aa6bc785275dfdc046c8c | import logging
import pytest
import random
from ocs_ci.framework import config
from ocs_ci.ocs.resources import pod
from ocs_ci.framework.testlib import (
tier4,
tier4a,
tier4b,
ManageTest,
ignore_leftovers,
ipi_deployment_required,
)
from ocs_ci.ocs import constants, node
from ocs_ci.ocs.cluster import CephCluster, is_lso_cluster
from ocs_ci.ocs.resources.storage_cluster import osd_encryption_verification
from ocs_ci.framework.pytest_customization.marks import (
skipif_openshift_dedicated,
skipif_bmpsi,
bugzilla,
skipif_external_mode,
)
from ocs_ci.helpers.sanity_helpers import Sanity
log = logging.getLogger(__name__)
def select_osd_node_name():
"""
select randomly one of the osd nodes
Returns:
str: the selected osd node name
"""
osd_node_names = node.get_osd_running_nodes()
osd_node_name = random.choice(osd_node_names)
log.info(f"Selected OSD is {osd_node_name}")
return osd_node_name
def check_node_replacement_verification_steps(
old_node_name, new_node_name, old_osd_node_names, old_osd_ids
):
"""
Check if the node replacement verification steps finished successfully.
Args:
old_node_name (str): The name of the old node that has been deleted
new_node_name (str): The name of the new node that has been created
old_osd_node_names (list): The name of the new node that has been added to osd nodes
old_osd_ids (list): List of the old osd ids
Raises:
AssertionError: If the node replacement verification steps failed.
"""
new_osd_node_name = node.wait_for_new_osd_node(old_osd_node_names, timeout=1500)
assert new_osd_node_name, "New osd node not found"
assert node.node_replacement_verification_steps_ceph_side(
old_node_name, new_node_name, new_osd_node_name
)
assert node.node_replacement_verification_steps_user_side(
old_node_name, new_node_name, new_osd_node_name, old_osd_ids
)
def delete_and_create_osd_node(osd_node_name):
"""
Delete an osd node, and create a new one to replace it
Args:
osd_node_name (str): The osd node name to delete
"""
new_node_name = None
old_osd_ids = node.get_node_osd_ids(osd_node_name)
old_osd_node_names = node.get_osd_running_nodes()
# error message for invalid deployment configuration
msg_invalid = (
"ocs-ci config 'deployment_type' value "
f"'{config.ENV_DATA['deployment_type']}' is not valid, "
f"results of this test run are all invalid."
)
if config.ENV_DATA["deployment_type"] == "ipi":
if is_lso_cluster():
# TODO: Implement functionality for Internal-Attached devices mode
# once ocs-ci issue #4545 is resolved
# https://github.com/red-hat-storage/ocs-ci/issues/4545
pytest.skip("Functionality not implemented for this deployment mode")
else:
new_node_name = node.delete_and_create_osd_node_ipi(osd_node_name)
elif config.ENV_DATA["deployment_type"] == "upi":
if config.ENV_DATA["platform"].lower() == constants.AWS_PLATFORM:
new_node_name = node.delete_and_create_osd_node_aws_upi(osd_node_name)
elif config.ENV_DATA["platform"].lower() == constants.VSPHERE_PLATFORM:
if is_lso_cluster():
new_node_name = node.delete_and_create_osd_node_vsphere_upi_lso(
osd_node_name, use_existing_node=False
)
else:
new_node_name = node.delete_and_create_osd_node_vsphere_upi(
osd_node_name, use_existing_node=False
)
else:
log.error(msg_invalid)
pytest.fail(msg_invalid)
log.info("Start node replacement verification steps...")
check_node_replacement_verification_steps(
osd_node_name, new_node_name, old_osd_node_names, old_osd_ids
)
@tier4
@tier4a
@ignore_leftovers
@ipi_deployment_required
@skipif_openshift_dedicated
@skipif_bmpsi
@skipif_external_mode
class TestNodeReplacementWithIO(ManageTest):
"""
Knip-894 Node replacement proactive with IO
"""
@pytest.fixture(autouse=True)
def init_sanity(self):
"""
Initialize Sanity instance
"""
self.sanity_helpers = Sanity()
def test_nodereplacement_proactive_with_io_running(
self,
pvc_factory,
pod_factory,
dc_pod_factory,
bucket_factory,
rgw_bucket_factory,
):
"""
Knip-894 Node Replacement proactive when IO running in the background
"""
# Get worker nodes
worker_node_list = node.get_worker_nodes()
log.info(f"Current available worker nodes are {worker_node_list}")
osd_node_name = select_osd_node_name()
log.info("Creating dc pod backed with rbd pvc and running io in bg")
for worker_node in worker_node_list:
if worker_node != osd_node_name:
rbd_dc_pod = dc_pod_factory(
interface=constants.CEPHBLOCKPOOL, node_name=worker_node, size=20
)
pod.run_io_in_bg(rbd_dc_pod, expect_to_fail=False, fedora_dc=True)
log.info("Creating dc pod backed with cephfs pvc and running io in bg")
for worker_node in worker_node_list:
if worker_node != osd_node_name:
cephfs_dc_pod = dc_pod_factory(
interface=constants.CEPHFILESYSTEM, node_name=worker_node, size=20
)
pod.run_io_in_bg(cephfs_dc_pod, expect_to_fail=False, fedora_dc=True)
delete_and_create_osd_node(osd_node_name)
# Creating Resources
log.info("Creating Resources using sanity helpers")
self.sanity_helpers.create_resources(
pvc_factory, pod_factory, bucket_factory, rgw_bucket_factory
)
# Deleting Resources
self.sanity_helpers.delete_resources()
# Verify everything running fine
log.info("Verifying All resources are Running and matches expected result")
self.sanity_helpers.health_check(tries=120)
# Verify OSD is encrypted
if config.ENV_DATA.get("encryption_at_rest"):
osd_encryption_verification()
@tier4
@tier4a
@ignore_leftovers
@skipif_openshift_dedicated
@skipif_bmpsi
@skipif_external_mode
class TestNodeReplacement(ManageTest):
"""
Knip-894 Node replacement proactive
"""
@pytest.fixture(autouse=True)
def init_sanity(self):
"""
Initialize Sanity instance
"""
self.sanity_helpers = Sanity()
def test_nodereplacement_proactive(self):
"""
Knip-894 Node Replacement proactive(without IO running)
"""
osd_node_name = select_osd_node_name()
delete_and_create_osd_node(osd_node_name)
# Verify everything running fine
log.info("Verifying All resources are Running and matches expected result")
self.sanity_helpers.health_check(tries=120)
# Verify OSD encrypted
if config.ENV_DATA.get("encryption_at_rest"):
osd_encryption_verification()
ceph_cluster_obj = CephCluster()
assert ceph_cluster_obj.wait_for_rebalance(
timeout=1800
), "Data re-balance failed to complete"
@tier4b
@ignore_leftovers
@bugzilla("1840539")
@pytest.mark.polarion_id("OCS-2535")
@skipif_external_mode
class TestNodeReplacementTwice(ManageTest):
"""
Node replacement twice:
node_x -> node_y
node_z -> node_x
After node_replacement, the deleted node (node_x) suppose to be removed from the ceph-osd-tree.
The BZ deals with the SECOND node_replacement.
The existence of the deleted node (node_x from previous replacement) in the crash-map ends with:
1. node is labeled for rack correctly
2. ceph side host still on the old rack
"""
def test_nodereplacement_twice(self):
for i in range(2):
# Get random node name for replacement
node_name_to_delete = select_osd_node_name()
log.info(f"Selected node for replacement: {node_name_to_delete}")
delete_and_create_osd_node(node_name_to_delete)
ct_pod = pod.get_ceph_tools_pod()
tree_output = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd tree")
log.info("ceph osd tree output:")
log.info(tree_output)
assert not (
node_name_to_delete in str(tree_output)
), f"Deleted host {node_name_to_delete} still exist in ceph osd tree after node replacement"
|
py | 7dfe1cea000358a4e2f2f40af695960ae5dbe459 | '''
==========================================================================
GrantHoldArbiter_test.py
==========================================================================
Author : Yanghui Ou
Date : Jan 22, 2020
'''
from pymtl3 import *
from .GrantHoldArbiter import GrantHoldArbiter
def test_simple():
arb = GrantHoldArbiter( nreqs=4 )
arb.elaborate()
arb.apply( DefaultPassGroup() )
arb.sim_reset()
arb.reqs @= 0b0011
arb.hold @= 0
arb.en @= 1
arb.sim_tick()
g0 = arb.grants.clone()
arb.sim_tick()
arb.hold @= 1
arb.sim_eval_combinational()
assert arb.grants == g0
arb.sim_tick()
|
py | 7dfe1d58c75d0f02573560f51c06df1adfe09e09 | #!/usr/bin/env python
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os
sys.path.append(os.path.dirname(os.path.realpath(__file__))+'/lib')
from cloudagents import CloudAgent
from time import sleep
ca = CloudAgent()
ca.required_config = {
"name": "Wringer",
"version": "0.1.0",
"author": "Jeff Kramer",
"url": "http://www.hpcloud.com/",
"help": """This script does integration testing.""",
"config":
[{
"name": "count",
"regexp": "^\d+$",
"title": "Count",
"description": "Number to count down from.",
"type": "string",
"required": True
}
]
}
def agent():
count = int(ca.conf['count'])
for num in range(count,-1,-1):
sleep(1)
percent = 100-int((float(num)/float(count))*100.0)
ca.log(str(num),"Counted down to "+str(num)+".",percent)
ca.run(agent)
|
py | 7dfe1dae1b3b1c76850b2f81d093956ff049d1af | #!/usr/bin/env python3
from telegram.ext import Dispatcher,CommandHandler,CallbackContext
from telegram import BotCommand,Update
from json import dumps
msg_type = {
"video":["file_id","file_unique_id","width","height","duration"],
"photo":["file_id","file_unique_id","width","height","file_size"],
"audio":["file_id","file_unique_id","mime_type","file_size"],
"animation":["file_id","file_unique_id","width","height","duration"],
"sticker":["file_id","file_unique_id","width","height","is_animated"],
"video_note":["file_id","file_unique_id","length","duration"],
"voice":["file_id","file_unique_id","duration","mime_type","file_size"]
}
def getobjinfo (msgtype,msgobj):
if msgtype == "photo":
msg = f"PhotoSize("
else:
msg = f"{msgtype.capitalize()}("
for i in msg_type[msgtype]:
msg += str(f'{i}="{msgobj.__dict__[i]}",')
return f"{msg[:-1]})\n"
def getmsgtype(update,context):
if update.message.reply_to_message:
if update.message.reply_to_message.video:
video = update.message.reply_to_message.video
update.message.reply_video(video,caption=f'{getobjinfo("video",video)}\nMade By Parker&hdcola')
elif update.message.reply_to_message.photo:
msg = ''
photo = update.message.reply_to_message.photo
lastindex = -1
for i in photo:
lastindex += 1
msg += f"\nPhoto {lastindex+1}:\n{getobjinfo('photo',i)}\n"
msg += '\nMade By Parker&hdcola'
update.message.reply_photo(photo[lastindex],caption=msg)
elif update.message.reply_to_message.audio:
audio = update.message.reply_to_message.audio
update.message.reply_audio(audio,caption=f'{getobjinfo("audio",audio)}\nMade By Parker&hdcola')
elif update.message.reply_to_message.animation:
animation = update.message.reply_to_message.animation
update.message.reply_animation(animation,caption=f'{getobjinfo("animation",animation)}\nMade By Parker&hdcola')
elif update.message.reply_to_message.sticker:
sticker = update.message.reply_to_message.sticker
update.message.reply_text(f'{getobjinfo("sticker",sticker)}\nMade By Parker&hdcola')
elif update.message.reply_to_message.video_note:
video_note = update.message.reply_to_message.sticker
update.message.reply_text(f'{getobjinfo("video_note",video_note)}\nMade By Parker&hdcola')
elif update.message.reply_to_message.voice:
voice = update.message.reply_to_message.voice
update.message.reply_voice(voice,caption=f'{getobjinfo("voice",voice)}\nMade By Parker&hdcola')
else:
info(update,context)
else:
info(update,context)
def info(update : Update, context : CallbackContext):
u = str(update)
u = dumps(eval(u),indent=2)
update.message.reply_text(text=u)
# context.bot.send_message(update.effective_user.id,text=u)
def add_dispatcher(dp:Dispatcher):
dp.add_handler(CommandHandler('ainfo', getmsgtype))
dp.add_handler(CommandHandler("info", info))
return [BotCommand('ainfo','得到消息中的对象声明(Made by Parker&hdcola)'),BotCommand('info','查看消息的信息数据')]
|
py | 7dfe1e39ce74497e45a8baf832cb5b0c61e46e16 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from qiita_core.exceptions import QiitaError
class QiitaDBError(QiitaError):
"""Base class for all qiita_db exceptions"""
pass
class QiitaDBNotImplementedError(QiitaDBError):
""""""
pass
class QiitaDBExecutionError(QiitaDBError):
"""Exception for error when executing SQL queries"""
pass
class QiitaDBConnectionError(QiitaDBError):
"""Exception for error when connecting to the db"""
pass
class QiitaDBColumnError(QiitaDBError):
"""Exception when missing table information or excess information passed"""
pass
class QiitaDBDuplicateError(QiitaDBError):
"""Exception when duplicating something in the database"""
def __init__(self, obj_name, attributes):
super(QiitaDBDuplicateError, self).__init__()
self.args = ("The '%s' object with attributes (%s) already exists."
% (obj_name, attributes),)
class QiitaDBStatusError(QiitaDBError):
"""Exception when editing is done with an unallowed status"""
pass
class QiitaDBUnknownIDError(QiitaDBError):
"""Exception for error when an object does not exists in the DB"""
def __init__(self, missing_id, table):
super(QiitaDBUnknownIDError, self).__init__()
self.args = ("The object with ID '%s' does not exists in table '%s'"
% (missing_id, table),)
class QiitaDBDuplicateHeaderError(QiitaDBError):
"""Exception for error when a MetadataTemplate has duplicate columns"""
def __init__(self):
super(QiitaDBDuplicateHeaderError, self).__init__()
self.args = ("Duplicate headers found in MetadataTemplate. Note "
"that the headers are not case-sensitive",)
class QiitaDBIncompatibleDatatypeError(QiitaDBError):
"""When arguments are used with incompatible operators in a query"""
def __init__(self, operator, argument_type):
super(QiitaDBIncompatibleDatatypeError, self).__init__()
self.args = ("The %s operator is not for use with data of type %s" %
(operator, str(argument_type)))
|
py | 7dfe2004d5e90bfd72dcfb3a6555fc4932848357 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('image_anchor05.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image(
'D7', self.image_dir + 'yellow.png',
{'x_offset': 1, 'y_offset': 2, 'positioning': 2})
workbook.close()
self.assertExcelEqual()
|
py | 7dfe200a0c942972e4d9a2fd8c7fb8369856d106 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class ChemicalTestonConcreteSample(Document):
pass
|
py | 7dfe204c084eddcc43cc4a7d035eaca051832d1d | #https://stackoverflow.com/a/70664652/15181929
import contextlib as _contextlib
try:
import msvcrt as _msvcrt
# Length 0 sequences, length 1 sequences...
_ESCAPE_SEQUENCES = [frozenset(("\x00", "\xe0"))]
_next_input = _msvcrt.getwch
_set_terminal_raw = _contextlib.nullcontext
_input_ready = _msvcrt.kbhit
except ImportError: # Unix
import sys as _sys, tty as _tty, termios as _termios, \
select as _select, functools as _functools
# Length 0 sequences, length 1 sequences...
_ESCAPE_SEQUENCES = [
frozenset(("\x1b",)),
frozenset(("\x1b\x5b", "\x1b\x4f"))]
@_contextlib.contextmanager
def _set_terminal_raw():
fd = _sys.stdin.fileno()
old_settings = _termios.tcgetattr(fd)
try:
_tty.setraw(_sys.stdin.fileno())
yield
finally:
_termios.tcsetattr(fd, _termios.TCSADRAIN, old_settings)
_next_input = _functools.partial(_sys.stdin.read, 1)
def _input_ready():
return _select.select([_sys.stdin], [], [], 0) == ([_sys.stdin], [], [])
_MAX_ESCAPE_SEQUENCE_LENGTH = len(_ESCAPE_SEQUENCES)
def _get_keystroke():
key = _next_input()
while (len(key) <= _MAX_ESCAPE_SEQUENCE_LENGTH and
key in _ESCAPE_SEQUENCES[len(key)-1]):
key += _next_input()
return key
def _flush():
while _input_ready():
_next_input()
def key_pressed(key: str = None, *, flush: bool = True) -> bool:
"""Return True if the specified key has been pressed
Args:
key: The key to check for. If None, any key will do.
flush: If True (default), flush the input buffer after the key was found.
Return:
boolean stating whether a key was pressed.
"""
with _set_terminal_raw():
if key is None:
if not _input_ready():
return False
if flush:
_flush()
return True
while _input_ready():
keystroke = _get_keystroke()
if keystroke == key:
if flush:
_flush()
return True
return False
def print_key() -> None:
"""Print the key that was pressed
Useful for debugging and figuring out keys.
"""
with _set_terminal_raw():
_flush()
print("\\x" + "\\x".join(map("{:02x}".format, map(ord, _get_keystroke()))))
def wait_key(key=None, *, pre_flush=False, post_flush=True) -> str:
"""Wait for a specific key to be pressed.
Args:
key: The key to check for. If None, any key will do.
pre_flush: If True, flush the input buffer before waiting for input.
Useful in case you wish to ignore previously pressed keys.
post_flush: If True (default), flush the input buffer after the key was
found. Useful for ignoring multiple key-presses.
Returns:
The key that was pressed.
"""
with _set_terminal_raw():
if pre_flush:
_flush()
if key is None:
key = _get_keystroke()
if post_flush:
_flush()
return key
while _get_keystroke() != key:
pass
if post_flush:
_flush()
return key
|
py | 7dfe2105c8785683d46ae44239392071d01a9c70 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.fruitohms import fruitohms
def test_fruitohms():
"""Test module fruitohms.py by downloading
fruitohms.csv and testing shape of
extracted data has 128 rows and 2 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = fruitohms(test_path)
try:
assert x_train.shape == (128, 2)
except:
shutil.rmtree(test_path)
raise()
|
py | 7dfe2238ba320293069f087c5760015499624e47 | """Console script for opencdms."""
import sys
import click
@click.command()
def main(args=None):
"""Console script for opencdms."""
# See click documentation at https://click.palletsprojects.com/
click.echo("Replace this message by putting your code into "
"opencdms.cli.main")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.