ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3e8bb7f3c5ea921ee145bb9ebc29964ed45496 | #!/usr/bin/env python
import csv
import dataset
import json
import locale
import logging
import re
import sys
from collections import OrderedDict
from itertools import groupby
SECTION_BREAK = 'CLEARANCE RATE DATA FOR INDEX OFFENSES'
END_BREAK = ' READ'
FIELDNAMES = ['year', 'state', 'ori7', 'lea_name', 'population', 'mos', 'agg_assault_cleared', 'agg_assault_cleared_pct', 'agg_assault_count', 'arson_cleared', 'arson_cleared_pct', 'arson_count', 'burglary_cleared', 'burglary_cleared_pct', 'burglary_count', 'forcible_rape_cleared', 'forcible_rape_cleared_pct', 'forcible_rape_count', 'larceny_theft_cleared', 'larceny_theft_cleared_pct', 'larceny_theft_count', 'murder_cleared', 'murder_cleared_pct', 'murder_count', 'mvt_cleared', 'mvt_cleared_pct', 'mvt_count', 'property_cleared', 'property_cleared_pct', 'property_count', 'robbery_cleared', 'robbery_cleared_pct', 'robbery_count', 'violent_cleared', 'violent_cleared_pct', 'violent_count']
CRIME_TYPES = [
'violent',
'property',
'murder',
#'forcible_rape',
'robbery',
'agg_assault',
'burglary',
'larceny_theft',
'mvt',
'arson',
]
IMPORT_FILES = [
('2011', '2011-clearance-rates.txt'),
('2012', '2012-clearance-rates.txt'),
('2013', '2013-clearance-rates.txt'),
('2014', '2014-clearance-rates.txt'),
]
POPULATION_BUCKETS = [
{
'name': '1,000,000 and above',
'low': 1000000,
'high': None,
},
{
'name': '500,000 to 999,999',
'low': 500000,
'high': 999999,
},
{
'name': '250,000 to 499,999',
'low': 250000,
'high': 499999,
},
{
'name': '100,000 to 249,999',
'low': 100000,
'high': 249999,
},
{
'name': '50,000 to 99,999',
'low': 50000,
'high': 99999,
},
{
'name': '25,000 to 49,999',
'low': 25000,
'high': 49999,
},
{
'name': '10,000 to 24,999',
'low': 10000,
'high': 24999,
},
{
'name': 'Under 10,000',
'low': 1, # Population should never be 0
'high': 9999,
},
]
locale.setlocale(locale.LC_ALL, 'en_US')
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('ucr-parser')
db = dataset.connect('postgresql:///ucr_clearance')
def parse(file_path, year):
output = []
f = open(file_path)
line = skip_to_start(f)
while True:
row = {
'year': year
}
for i in range(0, 4):
if SECTION_BREAK in line:
line = skip_section_break(f)
# We're done!
if END_BREAK in line or line == '':
return output
line_parts = split_line(line)
if i == 0:
row['ori7'] = line_parts[0]
if row['ori7'].startswith('0'):
row['ori7'] = row['ori7'][1:]
row['lea_name'] = ' '.join(line_parts[1:])
row['state'] = parse_state(row['ori7'])
if i == 1:
row['mos'] = parse_int(line_parts[0])
row['violent_count'] = parse_int(line_parts[3])
row['property_count'] = parse_int(line_parts[4])
row['murder_count'] = parse_int(line_parts[5])
row['forcible_rape_count'] = parse_int(line_parts[6])
row['robbery_count'] = parse_int(line_parts[7])
row['agg_assault_count'] = parse_int(line_parts[8])
row['burglary_count'] = parse_int(line_parts[9])
row['larceny_theft_count'] = parse_int(line_parts[10])
row['mvt_count'] = parse_int(line_parts[11])
row['arson_count'] = parse_int(line_parts[12])
if i == 2:
row['population'] = parse_int(line_parts[0])
row['violent_cleared'] = parse_int(line_parts[3])
row['property_cleared'] = parse_int(line_parts[4])
row['murder_cleared'] = parse_int(line_parts[5])
row['forcible_rape_cleared'] = parse_int(line_parts[6])
row['robbery_cleared'] = parse_int(line_parts[7])
row['agg_assault_cleared'] = parse_int(line_parts[8])
row['burglary_cleared'] = parse_int(line_parts[9])
row['larceny_theft_cleared'] = parse_int(line_parts[10])
row['mvt_cleared'] = parse_int(line_parts[11])
row['arson_cleared'] = parse_int(line_parts[12])
if i == 3:
row['violent_cleared_pct'] = parse_pct(line_parts[1])
row['property_cleared_pct'] = parse_pct(line_parts[2])
row['murder_cleared_pct'] = parse_pct(line_parts[3])
row['forcible_rape_cleared_pct'] = parse_pct(line_parts[4])
row['robbery_cleared_pct'] = parse_pct(line_parts[5])
row['agg_assault_cleared_pct'] = parse_pct(line_parts[6])
row['burglary_cleared_pct'] = parse_pct(line_parts[7])
row['larceny_theft_cleared_pct'] = parse_pct(line_parts[8])
row['mvt_cleared_pct'] = parse_pct(line_parts[9])
row['arson_cleared_pct'] = parse_pct(line_parts[10])
line = f.readline()
logger.debug('Writing row for %s (%s), %s' % (row['ori7'], row['lea_name'], year))
output.append(row)
def skip_to_start(f):
"""
Skip to start of data
"""
while True:
line = f.readline()
if SECTION_BREAK in line:
break
return line
def skip_section_break(f):
"""
Read four lines after section break
"""
f.readline()
f.readline()
f.readline()
return f.readline()
def split_line(line):
return re.sub(' +', ' ', line).strip().split(' ')
def parse_pct(value):
"""
Parse percentage
"""
return float(value)/100
def parse_int(value):
"""
Parse integer
"""
return locale.atoi(value)
def parse_state(value):
"""
Parse state from LEA code.
"""
return value[0:2]
def get_data():
"""
Get and parse raw data
"""
all_data = []
for year, file in IMPORT_FILES:
data_file = 'data/%s' % file
data = parse(data_file, year)
all_data = all_data + data
return all_data
def get_agencies():
"""
Get agency data
"""
agencies = {}
with open('data/agency-crosswalk.csv') as f:
reader = csv.DictReader(f)
for row in reader:
agencies[row['ORI7']] = row
return agencies
def write_agency_lookup():
"""
Write agency lookup
"""
result = db.query("""
select
a.ori7, a.agency, a.agentype, a.state
from agencies as a join clearance_rates as c on a.ori7 = c.ori7
group by a.ori7, a.agency, a.agentype, a.state
order by a.ori7
""")
dataset.freeze(result, format='csv', filename='output/agency_names.csv')
def write_clearance_json():
"""
Write json data
"""
result = db.query("""
select
a.ori7, a.agency, a.state, a.agentype,
c.year, c.population, c.mos,
c.violent_count, c.violent_cleared, c.violent_cleared_pct,
c.property_count, c.property_cleared, c.property_cleared_pct,
c.murder_count, c.murder_cleared, c.murder_cleared_pct,
c.forcible_rape_count, c.forcible_rape_cleared, c.forcible_rape_cleared_pct,
c.robbery_count, c.robbery_cleared, c.robbery_cleared_pct,
c.agg_assault_count, c.agg_assault_cleared, c.agg_assault_cleared_pct,
c.burglary_count, c.burglary_cleared, c.burglary_cleared_pct,
c.mvt_count, c.mvt_cleared, c.mvt_cleared_pct,
c.larceny_theft_count, c.larceny_theft_cleared, c.larceny_theft_cleared_pct,
c.arson_count, c.arson_cleared, c.arson_cleared_pct
from clearance_rates as c join agencies as a on a.ori7 = c.ori7
order by c.ori7, c.year
""")
medians = analyze_medians()
data = []
for row in result:
data.append(row)
for ori7, yearly_data in groupby(data, lambda x: x['ori7']):
output = {
'ori7': ori7,
'crimes': OrderedDict(),
}
for row in yearly_data:
year = row['year']
has_median = False
if row['agentype'] == 'Municipal police':
has_median = True
bucket = get_population_bucket(row['population'])
if bucket and not output.get('medians'):
output['medians'] = OrderedDict()
if not output.get('agency'):
output['agency'] = row['agency']
output['state'] = row['state']
output['agency_type'] = row['agentype']
if year == '2013' and has_median and bucket:
output['population_bucket'] = bucket
output['population'] = row['population']
for field in CRIME_TYPES:
if not output['crimes'].get(field):
output['crimes'][field] = {}
if has_median and bucket:
if not output['medians'].get(field):
output['medians'][field] = {}
output['medians'][field][year] = {}
output['crimes'][field][year] = {}
output['crimes'][field][year]['mos'] = row['mos']
for measure in ['count', 'cleared', 'cleared_pct']:
if row['mos'] < 12:
output['crimes'][field][year][measure] = None
else:
row_value = row['%s_%s' % (field, measure)]
if measure == 'cleared' and row_value == 0 and row['%s_%s' % (field, 'count')] > 0:
output['data_warning'] = True
output['crimes'][field][year][measure] = row_value
if output.get('medians') and bucket:
median_key = 'median_%s_%s' % (field, measure)
median_value = medians[year][bucket][median_key]
if row['mos'] < 12:
output['medians'][field][year][measure] = None
else:
output['medians'][field][year][measure] = median_value
with open('output/%s.json' % ori7, 'w') as outfile:
logger.debug('Writing output/%s.json' % ori7)
json.dump(output, outfile)
def get_population_bucket(population):
"""
Get population bucket
"""
for bucket in POPULATION_BUCKETS:
if bucket['high']:
if population >= bucket['low'] and population <= bucket['high']:
return bucket['name']
else:
if population >= bucket['low']:
return bucket['name']
return None
def analyze_medians():
"""
Analyze medians
"""
# Output is per-year, per-bucket, per-crime-type
output = {}
# Loop over years
for year, filename in IMPORT_FILES:
output[year] = {}
for bucket in POPULATION_BUCKETS:
where = 'population >= %d' % bucket['low']
if bucket['high']:
where = '%s and population <= %d' % (where, bucket['high'])
result = db.query("""
select
median(violent_count) as median_violent_count,
median(violent_cleared) as median_violent_cleared,
median(violent_cleared_pct) as median_violent_cleared_pct,
median(property_count) as median_property_count,
median(property_cleared) as median_property_cleared,
median(property_cleared_pct) as median_property_cleared_pct,
median(murder_count) as median_murder_count,
median(murder_cleared) as median_murder_cleared,
median(murder_cleared_pct) as median_murder_cleared_pct,
median(robbery_count) as median_robbery_count,
median(robbery_cleared) as median_robbery_cleared,
median(robbery_cleared_pct) as median_robbery_cleared_pct,
median(agg_assault_count) as median_agg_assault_count,
median(agg_assault_cleared) as median_agg_assault_cleared,
median(agg_assault_cleared_pct) as median_agg_assault_cleared_pct,
median(burglary_count) as median_burglary_count,
median(burglary_cleared) as median_burglary_cleared,
median(burglary_cleared_pct) as median_burglary_cleared_pct,
median(mvt_count) as median_mvt_count,
median(mvt_cleared) as median_mvt_cleared,
median(mvt_cleared_pct) as median_mvt_cleared_pct,
median(larceny_theft_count) as median_larceny_theft_count,
median(larceny_theft_cleared) as median_larceny_theft_cleared,
median(larceny_theft_cleared_pct) as median_larceny_theft_cleared_pct,
median(arson_count) as median_arson_count,
median(arson_cleared) as median_arson_cleared,
median(arson_cleared_pct) as median_arson_cleared_pct
from clearance_rates as c join agencies as a on a.ori7 = c.ori7
where mos=12 and year='%s'
and a.agentype='Municipal police'
and %s
""" % (year, where))
data = []
for row in result:
data.append(row)
output[year][bucket['name']] = data[0]
return output
def write_rates_to_db(data):
"""
Write clearance rate data to db
"""
logger.info('writing rates')
table = db['clearance_rates']
table.insert_many(data)
def write_agencies_to_db(agencies):
"""
Write agency data to db
"""
logger.info('writing agencies')
table = db['agencies']
process_agencies = []
for agency in agencies.values():
if not agency.get('ORI7'):
continue
processed_agency = {}
for key, value in agency.items():
# Skip the empty column whose meaning is not known
if key != '':
processed_agency[key.lower()] = value
process_agencies.append(processed_agency)
table.insert_many(process_agencies)
if __name__ == '__main__':
logger.info('Parsing agency data')
agencies = get_agencies()
logger.info('Writing agency data to db')
write_agencies_to_db(agencies)
logger.info('Parsing clearance data')
data = get_data()
logger.info('Writing clearance data to db')
write_rates_to_db(data)
logger.info('Writing agency lookup')
write_agency_lookup()
logger.info('Writing individual JSON files')
write_clearance_json()
|
py | 1a3e8ce5735154ce8ff25725a4e2bd25ac3d79f4 | import torch.nn as nn
import torch.nn.functional as F
# from ws.wsRLInterfaces.PARAM_KEY_NAMES import STATE_DIMENSIONS, ACTION_DIMENSIONS
class ActorCritic(nn.Module):
def __init__(self, app_info):
super(ActorCritic, self).__init__()
env = app_info.ENV
action_size = env.fn_get_action_size()
state_size = env.fn_get_state_size()
hidden_layer_size = 256
self._app_info = app_info
self.state_to_hidden = nn.Linear(state_size, hidden_layer_size)
self.action_layer = nn.Linear(hidden_layer_size, action_size)
self.value_layer = nn.Linear(hidden_layer_size, 1)
self.state_value = None
# self.state_values = []
def forward(self, state):
action_info = None
hidden = self.state_to_hidden(state)
state = F.relu(hidden)
self.state_value = self.value_layer(state)
action_info = self.action_layer(state)
policy = F.softmax(action_info)
return policy
return None
def get_state_value(self):
return self.state_value
|
py | 1a3e8cf6f862493cbe6472526fc9b7dc594b59b3 | from django.contrib import admin
from .models import Contact
from .models import UserDetail
# Register your models here.
admin.site.register(Contact)
admin.site.register(UserDetail)
|
py | 1a3e8d5097ce35285e767a952dfb764fbe98b6bd | #!/usr/bin/env python
# -.- coding: UTF-8 -.-
# Creted by Jordan Newman 10th June 2017
import os, sys, socket, struct
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = '\033[30m', '\033[31m', '\033[32m', '\033[33m', '\033[34m', '\033[1;35m', '\033[36m', '\033[37m'
if not sys.platform.startswith('linux'):
raise SystemExit("{0}This program only works on {1}linux{2} machines{3}".format(RED, YELLOW, RED, WHITE))
try:
import netifaces
from netifaces import AF_INET
netifacesInstalled = True
except:
print("{0}Please install the {1}\'netifaces\'{2} python library to enable all features of this command{3}".format(RED, GREEN, RED, WHITE))
netifacesInstalled = False
def displayInterfaces(interfaces):
print("""{0}.__ ______________________________________________ _______ __________ ________
| |\ \__ ___/_ ____/_ _ \_ ____/ _ \ \ __ \\\\_ _____// _____/
| |/ | \| | | __)_ | _/| __)/ /_\ \/ / \/ | __)_ \____ \
| / | \ | | \| | \| \/ | \ \____| \/ \\
|__\___|_ /___| /______ /|___|_ /\__ /\____|__ /\____ /______ /______ /
\/ \/ \/ \/ \/ \/ \/ \/{1}""").format(GREEN, WHITE)
for i in interfaces:
print(u'{0}\n\u250c[{1}{2}{3}]\n\u251c\u2500\u2500[{4}MAC{5}]\u257a\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2578[{6}{7}{8}]').format(RED, BLACK, i['name'], RED, YELLOW, RED, GREEN, i['mac'], RED)
print(u'\u251c\u2500\u2500[{0}IP{1}]\u257a\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2578[{2}{3}{4}]').format(YELLOW, RED, GREEN, i['ip'], RED)
print(u'\u251c\u2500\u2500[{0}Gateway{1}]\u257a\u2500\u2500\u2500\u2500\u2500\u2578[{2}{3}{4}]').format(YELLOW, RED, GREEN, i['gateway'], RED)
print(u'\u2514\u2500\u2500[{0}Gateway MAC{1}]\u257a\u2500\u2578[{2}{3}{4}]{5}').format(YELLOW, RED, GREEN, i['gatewayMac'], RED, WHITE)
def getInterfaces():
interfaces = os.listdir("/sys/class/net")
interfacesList = []
for interface in interfaces:
mac = getMAC(interface)
ip = getIP(interface)
gw = getGateway()
gwMac = getGatewayMAC(interface)
interfacesList.append({"name": interface, "ip": ip, "mac": mac, "gateway": gw, "gatewayMac": gwMac})
return interfacesList
def getGateway():
with open('/proc/net/route') as r:
for line in r:
fields = line.strip().split()
if fields[1] != '00000000' or not int(fields[3], 16) & 2:
continue
return socket.inet_ntoa(struct.pack("<L", int(fields[2], 16)))
def getMAC(iFace = None):
if iFace != None:
try:
conn = open('/sys/class/net/'+iFace+'/address')
mac = conn.read().strip()
conn.close()
return mac
except:
pass # /sys/class/net/iFace/address probably didnt exist
else:
return 'unknown'
def getGatewayMAC(iFace = None):
entries = {}
with open('/proc/net/arp') as arpFile:
for line in arpFile:
fields = line.strip().split()
if iFace == None:
return fileds[3]
entries[fields[5]] = fields[3]
if iFace == None or iFace not in entries:
entriesKeys = entries.keys()
if len(entriesKeys) >= 2:
return entries[entriesKeys[1]]
else:
return "unknown"
else:
return entries[iFace]
def getIP(iFace = None):
if netifacesInstalled == True and iFace != None:
internetBroadcastInfo = netifaces.ifaddresses(iFace)[AF_INET]
return internetBroadcastInfo[0]['addr']
iFaceIP = socket.gethostbyname(socket.gethostname())
if iFaceIP[0:6] == "127.0." and iFace != "lo":
return "unknown"
return iFaceIP
def resizeTerminal():
sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=26, cols=96))
if __name__ == "__main__":
#columns = os.popen('stty size', 'r').read().split()[1]
#if int(columns) < 95:
# resizeTerminal()
# I made the banner thinner, so there is no longer any need to resize terminal window :)
iFaces = getInterfaces()
displayInterfaces(iFaces)
|
py | 1a3e8d5702244055faf850d848dfb06f0632436b | """Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 10
validate_train_set = True
save_every = 10
restart_from_save = False
# Training (schedule) parameters
# - batch sizes
batch_size = 32
sunny_batch_size = 4
batches_per_chunk = 16
AV_SLICE_PER_PAT = 1
num_epochs_train = 175 * AV_SLICE_PER_PAT
# - learning rate and method
base_lr = .0001
learning_rate_schedule = {
0: base_lr,
num_epochs_train*9/10: base_lr/10,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotation": (-180, 180),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
}
use_hough_roi = True # use roi to center patches
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(64,64)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
# Input sizes
image_size = 64
data_sizes = {
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:2ch": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:4ch": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:shape": (batch_size, 2,),
"sliced:meta:PatientAge": (batch_size, 1),
"sliced:meta:PatientSex": (batch_size, 1),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 200 # More augmentations since a we only use single slices
tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# Architecture
def build_model():
#import here, such that our global variables are not overridden!
from . import j6_2ch_128mm, j6_4ch
meta_2ch = j6_2ch_128mm.build_model()
meta_4ch = j6_4ch.build_model()
l_age = nn.layers.InputLayer(data_sizes["sliced:meta:PatientAge"])
l_sex = nn.layers.InputLayer(data_sizes["sliced:meta:PatientSex"])
l_meta_2ch_systole = meta_2ch["meta_outputs"]["systole"]
l_meta_2ch_diastole = meta_2ch["meta_outputs"]["diastole"]
l_meta_4ch_systole = meta_4ch["meta_outputs"]["systole"]
l_meta_4ch_diastole = meta_4ch["meta_outputs"]["diastole"]
l_meta_systole = nn.layers.ConcatLayer([l_age, l_sex, l_meta_2ch_systole, l_meta_4ch_systole])
l_meta_diastole = nn.layers.ConcatLayer([l_age, l_sex, l_meta_2ch_diastole, l_meta_4ch_diastole])
ldsys1 = nn.layers.DenseLayer(l_meta_systole, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
ldsys2 = nn.layers.DenseLayer(ldsys1drop, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys2drop = nn.layers.dropout(ldsys2, p=0.5)
ldsys3 = nn.layers.DenseLayer(ldsys2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.softmax)
ldsys3drop = nn.layers.dropout(ldsys3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate
ldsys3dropnorm = layers.NormalisationLayer(ldsys3drop)
l_systole = layers.CumSumLayer(ldsys3dropnorm)
lddia1 = nn.layers.DenseLayer(l_meta_diastole, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia1drop = nn.layers.dropout(lddia1, p=0.5)
lddia2 = nn.layers.DenseLayer(lddia1drop, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia2drop = nn.layers.dropout(lddia2, p=0.5)
lddia3 = nn.layers.DenseLayer(lddia2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.softmax)
lddia3drop = nn.layers.dropout(lddia3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate
lddia3dropnorm = layers.NormalisationLayer(lddia3drop)
l_diastole = layers.CumSumLayer(lddia3dropnorm)
submodels = [meta_2ch, meta_4ch]
return {
"inputs": dict({
"sliced:meta:PatientAge": l_age,
"sliced:meta:PatientSex": l_sex,
}, **{ k: v for d in [model["inputs"] for model in submodels]
for k, v in list(d.items()) }
),
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
},
"regularizable": dict({
}, **{ k: v for d in [model["regularizable"] for model in submodels if "regularizable" in model]
for k, v in list(d.items()) }
),
"pretrained":{
j6_2ch_128mm.__name__: meta_2ch["outputs"],
j6_4ch.__name__: meta_4ch["outputs"],
},
"cutoff_gradients": [
] + [ v for d in [model["meta_outputs"] for model in submodels if "meta_outputs" in model]
for v in list(d.values()) ]
}
|
py | 1a3e8df0ef39a6a18ad84a5ea6e316322176ac49 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, EqualTo, Email, ValidationError
import email_validator
from flaskblog.models import User
class RegistrationForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is taken. Please choose a different one.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email is taken. Please choose a different one.')
class LoginForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
|
py | 1a3e8e60f1059501d3f025f83fc6ca915ed94c90 | import re
from queries import *
from expresiones import *
# -----------------------------------------------------------------------------
# Grupo 6
#
# Universidad de San Carlos de Guatemala
# Facultad de Ingenieria
# Escuela de Ciencias y Sistemas
# Organizacion de Lenguajes y Compiladores 2
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# INICIA ANALIZADOR LEXICO
# -----------------------------------------------------------------------------
#palabras reservadas del lenguaje
reservadas = {
# PALABRAS RESERVADAS POR SQL
'show' : 'SHOW',
'databases' : 'DATABASES',
'database' : 'DATABASE',
'tables' : 'TABLES',
'columns' : 'COLUMNS',
'from' : 'FROM',
'select' : 'SELECT',
'distinct' : 'DISTINCT',
'limit' : 'LIMIT',
'offset' : 'OFFSET',
'of':'OF',
'order' : 'ORDER',
'by' : 'BY',
'where' : 'WHERE',
'and' : 'AND',
'or' : 'OR',
'not' : 'NOT',
'in' : 'IN',
'concat' : 'CONCAT',
'only':'ONLY',
'as' : 'AS',
'sqrt' : 'SQRT',
'avg' : 'AVG',
'sum' : 'SUM',
'cont' :'CONT',
'desc' : 'DESC',
'asc' : 'ASC',
'like' : 'LIKE',
'min' : 'MIN',
'max' : 'MAX',
'abs' : 'ABS',
'on' : 'ON',
'union' : 'UNION',
'all' : 'ALL',
'insert' : 'INSERT',
'into' : 'INTO',
'values' : 'VALUES',
'update' : 'UPDATE',
'set' : 'SET',
'delete' : 'DELETE',
'create' : 'CREATE',
'primary' : 'PRIMARY',
'key' : 'KEY',
'null' : 'NULL',
'nulls':'NULLS',
'unique' : 'UNIQUE',
'check' : 'CHECK',
'cbrt' : 'CBRT',
'ceil' : 'CEIL',
'ceiling' : 'CEILING',
'degrees' : 'DEGREES',
'div':'DIV',
'exp':'EXP',
'factorial':'FACTORIAL',
'floor':'FLOOR',
'gcd':'GCD',
'lcm':'LCM',
'ln':'LN',
'log':'LOG',
'log10':'LOG10',
#'current':'CURRENT',
'default' : 'DEFAULT',
'auto_increment' : 'AUTO_INCREMENT',
'alter' : 'ALTER',
'table' : 'TABLE',
'add' : 'ADD',
'drop' : 'DROP',
'column' : 'COLUMN',
'rename' : 'RENAME',
'to' : 'TO',
'replace' : 'REPLACE',
'type' : 'TYPE',
'enum' : 'ENUM',
'if' : 'IF',
'exists' : 'EXISTS',
'min_scale':'MIN_SCALE',
'mod':'MOD',
'pi':'PI',
'power':'POWER',
'radians':'RADIANS',
'round':'ROUND',
'scale':'SCALE',
'sign':'SIGN',
'mode' : 'MODE',
'owner' : 'OWNER',
'constraint' : 'CONSTRAINT',
'foreign' : 'FOREIGN',
'references' : 'REFERENCES',
'inherits' : 'INHERITS',
'group' : 'GROUP',
'having' : 'HAVING',
'inner' : 'INNER',
'outer' : 'OUTER',
'trim_scale':'TRIM_SCALE',
'trunc':'TRUNC',
'width_bucket':'WIDTH_BUCKET',
'random':'RANDOM',
'setseed':'SETSEED',
'acos':'ACOS',
'acosd':'ACOSD',
'asin':'ASIN',
'asind':'ASIND',
'atan':'ATAN',
'atan2':'ATAN2',
'cos':'COS',
'cosd':'COSD',
'cot':'COT',
'cotd':'COTD',
'sin':'SIN',
'sind':'SIND',
'tan':'TAN',
'tand':'TAND',
'atand':'ATAND',
'atan2d':'ATAN2D',
'sinh':'SINH',
'cosh':'COSH',
'tanh':'TANH',
'asinh':'ASINH',
'acosh':'ACOSH',
'atanh':'ATANH',
'length':'LENGTH',
'substring':'SUBSTRING',
'trim':'TRIM',
'get_byte':'GET_BYTE',
'md5':'MD5',
'set_byte':'SET_BYTE',
'sha256':'SHA256',
'substr':'SUBSTR',
'convert':'CONVERT',
'encode':'ENCODE',
'decode':'DECODE',
'escape':'ESCAPE',
'any':'ANY',
'some':'SOME',
'using':'USING',
'first':'FIRST',
'last':'LAST',
'current_user':'CURRENT_USER',
'session_user':'SESSION_USER',
'symmetric':'SYMMETRIC',
'left' : 'LEFT',
'right' : 'RIGHT',
'full' : 'FULL',
'join' : 'JOIN',
'natural' : 'NATURAL',
'case' : 'CASE',
'then' : 'THEN',
'begin' : 'BEGIN',
'end' : 'END',
'else' : 'ELSE',
'greatest' : 'GREATEST',
'least' : 'LEAST',
'intersect' : 'INTERSECT',
'except' : 'EXCEPT',
# tipos de datos permitidos
'smallint' : 'SMALLINT',
'integer' : 'INTEGER',
'bigint' : 'BIGINT',
'decimal' : 'DECIMAL',
'numeric' : 'NUMERIC',
'real' : 'REAL',
'double' : 'DOUBLE',
'precision' : 'PRECISION',
'money' : 'MONEY',
'varying' : 'VARYING',
'varchar' : 'VARCHAR',
'character' : 'CHARACTER',
'char' : 'CHAR',
'text' : 'TEXT',
'boolean' : 'BOOLEAN',
'timestamp':'TIMESTAMP',
'time':'TIME',
'date':'DATE',
'interval':'INTERVAL',
'year':'YEAR',
'month':'MONTH',
'day':'DAY',
'hour':'HOUR',
'minute':'MINUTE',
'second':'SECOND',
'to':'TO',
'true':'TRUE',
'false':'FALSE',
'declare' : 'DECLARE',
'function' : 'FUNCTION',
'returns' : 'RETURNS',
'returning':'RETURNING',
'exec':'EXEC',
'execute':'EXECUTE',
'between' : 'BETWEEN',
'ilike' : 'ILIKE',
'is':'IS',
'isnull':'ISNULL',
'notnull':'NOTNULL',
#enums
'type':'TYPE',
'ENUM':'ENUM',
#para trim
'leading':'LEADING',
'trailing':'TRAILING',
'both':'BOTH',
'for':'FOR',
'symmetric':'SYMMETRIC',
'use' : 'USE',
'now' : 'NOW',
'extract' : 'EXTRACT',
'date_part' : 'DATE_PART',
'current_date' : 'CURRENT_DATE',
'current_time' : 'CURRENT_TIME',
# INDEX
'index':'INDEX',
'hash':'HASH',
'perform' : 'PERFORM',
'procedure' : 'PROCEDURE',
'out' : 'OUT',
'language' : 'LANGUAGE',
'plpgsql' : 'PLPGSQL',
'rowtype' : 'ROWTYPE',
'alias' : 'ALIAS'
# revisar funciones de tiempo y fechas
}
# listado de tokens que manejara el lenguaje (solo la forma en la que los llamare en las producciones)
tokens = [
'PUNTOYCOMA',
'MAS',
'MENOS',
'POR',
'DIV',
'DOSPUNTOS',
'PUNTO',
'TYPECAST',
'CORCHETEIZQ',
'CORCHETEDER',
'POTENCIA',
'RESIDUO',
'MAYOR',
'MENOR',
'IGUAL',
'MAYORIGUAL',
'MENORIGUAL',
'DIFERENTE',
'IGUALIGUAL',
'PARENTESISIZQUIERDA',
'PARENTESISDERECHA',
'COMA',
'NOTEQUAL',
'SIMBOLOOR',
'SIMBOLOAND',
'SIMBOLOAND2',
'SIMBOLOOR2',
'NUMERAL',
'COLOCHO',
'DESPLAZAMIENTODERECHA',
'DESPLAZAMIENTOIZQUIERDA',
'DOLAR',
#tokens que si devuelven valor
'DECIMALTOKEN',
'ENTERO',
'CADENA',
'ETIQUETA',
'ID'
] + list(reservadas.values())
# Tokens y la forma en la que se usaran en el lenguaje
t_PUNTOYCOMA = r';'
t_MAS = r'\+'
t_MENOS = r'-'
t_POR = r'\*'
t_DIV = r'/'
t_DOSPUNTOS = r':'
t_PUNTO = r'\.'
t_TYPECAST = r'::'
t_CORCHETEDER = r']'
t_CORCHETEIZQ = r'\['
t_POTENCIA = r'\^'
t_RESIDUO = r'%'
t_MAYOR = r'<'
t_MENOR = r'>'
t_IGUAL = r'='
t_MAYORIGUAL = r'>='
t_MENORIGUAL = r'<='
t_DIFERENTE = r'<>'
t_IGUALIGUAL = r'=='
t_PARENTESISIZQUIERDA = r'\('
t_PARENTESISDERECHA = r'\)'
t_COMA = r','
t_NOTEQUAL = r'!='
t_SIMBOLOOR = r'\|\|' #esto va a concatenar cadenas
t_SIMBOLOAND = r'&&'
t_SIMBOLOAND2 = r'\&'
t_SIMBOLOOR2 = r'\|'
t_NUMERAL = r'\#' #REVISAR
t_COLOCHO = r'~' #REVISAR
t_DESPLAZAMIENTODERECHA = r'>>'
t_DESPLAZAMIENTOIZQUIERDA = r'<<'
t_DOLAR = r'\$'
#definife la estructura de los decimales
def t_DECIMAL(t):
r'\d+\.\d+'
try:
t.value = float(t.value)
except ValueError:
print("El valor decimal es muy largo %d", t.value)
t.value = 0
return t
#definife la estructura de los enteros
def t_ENTERO(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("El valor del entero es muy grande %d", t.value)
t.value = 0
return t
#definife la estructura de las cadenas
def t_CADENA(t):
r'[\'|\"].*?[\'|\"]'
t.value = t.value[1:-1] # quito las comillas del inicio y final de la cadena
return t
#definife la estructura de las etiquetas, por el momento las tomo unicamente como letras y numeros
def t_ETIQUETA(t):
r'[a-zA-Z_]+[a-zA-Z0-9_]*'
t.type = reservadas.get(t.value.lower(),'ID') # Check for reserved words
print("ALV:",t)
print("ALV:",t.type)
return t
# Comentario simple # ...
def t_COMENTARIO_SIMPLE(t):
r'--.*\n'
t.lexer.lineno += 1
def t_COMENTARIO_MULTILINEA(t):
r'/\*(.|\n|)*?\*/'
t.lexer.lineno += t.value.count("\n")
# ----------------------- Caracteres ignorados -----------------------
# caracter equivalente a un tab
t_ignore = " \t"
#caracter equivalente a salto de linea
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
x=caden.splitlines()
filas=len(x)-1
print("filas que no cambian: ",filas)
if h.filapivote>0:
fila=(t.lineno-1)-h.filapivote*filas
else:
fila=(t.lineno-1)
h.filapivote+=1
print("Caracter lexico no permitido ==> '%s'" % t.value)
h.errores+= "<tr><td>"+str(t.value[0])+"</td><td>"+str(fila)+"</td><td>"+str(find_column(caden,t))+"</td><td>LEXICO</td><td>token no pertenece al lenguaje</td></tr>\n"
t.lexer.skip(1)
# Construyendo el analizador léxico
import ply.lex as lex
lexer = lex.lex()
# -----------------------------------------------------------------------------
# INICIA ANALIZADOR SINTACTICO
# -----------------------------------------------------------------------------
# Asociación de operadores y precedencia
precedence = (
('left','TYPECAST'),
('right','UMINUS'),
('right','UNOT'),
('left','MAS','MENOS'),
('left','POTENCIA'),
('left','POR','DIV','RESIDUO'),
('left','AND','OR','SIMBOLOOR2','SIMBOLOOR','SIMBOLOAND2'),
('left','DESPLAZAMIENTOIZQUIERDA','DESPLAZAMIENTODERECHA'),
)
#IMPORTACION DE CLASES ALTERNAS
import reportes as h
# estructura de mi gramatica
#-----------------------------------------------------INICIO--------------------------------------------------------------------
def p_inicio_1(t) :
'inicio : queries'
h.reporteGramatical1 +="inicio ::= queries \n"
t[0]=t[1]
p=t[0]
h.insertarSimbolos(p)
def p_queries_1(t) :
'queries : queries query'
h.reporteGramatical1 +="queries ::= queries query\n"
t[1].append(t[2])
t[0]=t[1]
def p_queries_2(t) :
'queries : query'
h.reporteGramatical1 +="queries ::= query\n"
t[0]=[t[1]]
#-----------------------------------------------------LISTA DE FUNCIONES--------------------------------------------------------------------
def p_query(t):
'''query : mostrarBD
| crearBD
| alterBD
| dropBD
| useBD
| operacion
| insertinBD
| updateinBD
| deleteinBD
| createTable
| inheritsBD
| dropTable
| alterTable
| variantesAt
| contAdd
| contDrop
| contAlter
| selectData PUNTOYCOMA
| tipos
| createIndex
| combinacionSelects PUNTOYCOMA
| execFunction
'''
h.reporteGramatical1 +="query ::= opcion\n"
h.reporteGramatical2 +="t[0]=t[1]\n"
t[0]=t[1]
# derivando cada produccion a cosas como el create, insert, select; funciones como avg, sum, substring irian como otra produccion
#dentro del select (consulta)
# empiezan las producciones de las operaciones finales
#la englobacion de las operaciones
#-----------------------------------------------------CREATE INDEX--------------------------------------------------------------------
def p_createIndex(t):
'createIndex : CREATE INDEX ID ON ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA PUNTOYCOMA'
h.reporteGramatical1 +="createIndex ::= CREATE INDEX ID ON ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateIndex(t[3],t[5],t[7]) \n"
t[0] = CreateIndex("INDEX",t[3],t[5],t[7])
def p_createIndex_1_1(t):
'createIndex : CREATE INDEX ID ON ID PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA PUNTOYCOMA'
h.reporteGramatical1 +="createIndex ::= CREATE INDEX ID ON ID PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateIndexParams(t[3],t[5],t[7],t[8])\n"
t[0] = CreateIndexParams("INDEX",t[3],t[5],t[7],t[8])
def p_createIndex_1_2(t):
'createIndex : CREATE INDEX ID ON ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA'
h.reporteGramatical1 +="createIndex ::= CREATE INDEX ID ON ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateIndexWhere(t[3],t[5],t[7],t[10])\n"
t[0] = CreateIndexWhere("INDEX",t[3],t[5],t[7],t[10])
def p_createIndex_1_1_2(t):
'createIndex : CREATE INDEX ID ON ID PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA'
h.reporteGramatical1 +="createIndex ::= CREATE INDEX ID ON ID PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateIndexParamsWhere(t[3],t[5],t[7],t[8],t[11]) \n"
t[0] = CreateIndexParamsWhere("INDEX",t[3],t[5],t[7],t[8],t[11])
def p_createIndex_2(t):
'createIndex : CREATE INDEX ID ON ID USING HASH PARENTESISIZQUIERDA listaid PARENTESISDERECHA PUNTOYCOMA'
h.reporteGramatical1 +="createIndex ::= CREATE INDEX ID ON ID USING HASH PARENTESISIZQUIERDA listaid PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = t[0] = CreateIndex(t[3],t[5],t[9]) \n"
t[0] = CreateIndex("INDEX USING HASH",t[3],t[5],t[9])
def p_createIndex_2_1(t):
'createIndex : CREATE INDEX ID ON ID USING HASH PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA PUNTOYCOMA'
h.reporteGramatical1 +="createIndex ::= CREATE INDEX ID ON ID USING HASH PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateIndexParams(t[3],t[5],t[9],t[10])\n"
t[0] = CreateIndexParams("INDEX USING HASH",t[3],t[5],t[9],t[10])
def p_createIndex_2_2(t):
'createIndex : CREATE INDEX ID ON ID USING HASH PARENTESISIZQUIERDA listaid PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA'
h.reporteGramatical1 +="createIndex ::= CREATE INDEX ID ON ID USING HASH PARENTESISIZQUIERDA listaid PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateIndexWhere(t[3],t[5],t[9],t[12])\n"
t[0] = CreateIndexWhere("INDEX USING HASH",t[3],t[5],t[9],t[12])
def p_createIndex_2_1_2(t):
'createIndex : CREATE INDEX ID ON ID USING HASH PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA'
h.reporteGramatical1 +="createIndex ::= CREATE INDEX ID ON ID USING HASH PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateIndexParamsWhere(t[3],t[5],t[9],t[10],t[13])\n"
t[0] = CreateIndexParamsWhere("INDEX USING HASH",t[3],t[5],t[9],t[10],t[13])
def p_createIndex_3(t):
'createIndex : CREATE UNIQUE INDEX ID ON ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA PUNTOYCOMA'
h.reporteGramatical1 +="createIndex ::= CREATE UNIQUE INDEX ID ON ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = t[0] = CreateIndex(t[4],t[6],t[8]\n"
t[0] = CreateIndex("UNIQUE",t[4],t[6],t[8])
def p_createIndex_3_1(t):
'createIndex : CREATE UNIQUE INDEX ID ON ID PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA PUNTOYCOMA'
h.reporteGramatical1 +="createIndex ::= CREATE UNIQUE INDEX ID ON ID PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateIndexParams(t[4],t[6],t[8],t[9])\n"
t[0] = CreateIndexParams("UNIQUE",t[4],t[6],t[8],t[9])
def p_createIndex_3_2(t):
'createIndex : CREATE UNIQUE INDEX ID ON ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA'
h.reporteGramatical1 +="createIndex ::= CREATE UNIQUE INDEX ID ON ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateIndexWhere(t[4],t[6],t[8],t[11])\n"
t[0] = CreateIndexWhere("UNIQUE",t[4],t[6],t[8],t[11])
def p_createIndex_3_1_2(t):
'createIndex : CREATE UNIQUE INDEX ID ON ID PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA'
h.reporteGramatical1 +="createIndex ::= CREATE UNIQUE INDEX ID ON ID PARENTESISIZQUIERDA ID indexParams PARENTESISDERECHA WHERE whereOptions PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateIndexParamsWhere(t[4],t[6],t[8],t[9],t[12])\n"
t[0] = CreateIndexParamsWhere("UNIQUE",t[4],t[6],t[8],t[9],t[12])
def p_indexParams(t):
'indexParams : sort'
h.reporteGramatical1 +="indexParams ::= sort\n"
h.reporteGramatical2 +="t[0] = t[1]\n"
t[0] = t[1]
def p_whereOptions_1(t):
'whereOptions : asignaciones'
h.reporteGramatical1 +="whereOptions ::= asignaciones\n"
h.reporteGramatical2 +="t[0] = t[1]\n"
t[0] = t[1]
def p_whereOptions_2(t):
'whereOptions : operacion'
h.reporteGramatical1 +="whereOptions ::= operacion\n"
h.reporteGramatical2 +="t[0] = t[1]\n"
t[0] = t[1]
def p_whereOptions_3(t):
'whereOptions : search_condition'
h.reporteGramatical1 +="whereOptions ::= search_condition\n"
h.reporteGramatical2 +="t[0] = t[1]\n"
t[0] = t[1]
def p_sortOptions_1(t):
'sort : NULLS FIRST'
h.reporteGramatical1 +="sort ::= NULLS FIRST\n"
h.reporteGramatical2 +="t[0] = t[2]\n"
t[0] = t[2]
def p_sortOptions_1_1(t):
'sort : DESC NULLS FIRST'
h.reporteGramatical1 +="sort ::= DESC NULLS FIRST\n"
h.reporteGramatical2 +="t[0] = t[3]\n"
t[0] = SortOptions(t[1],t[3])
def p_sortOptions_1_2(t):
'sort : ASC NULLS FIRST'
h.reporteGramatical1 +="sort ::= ASC NULLS FIRST\n"
h.reporteGramatical2 +="t[0] = t[3]\n"
t[0] = SortOptions(t[1],t[3])
def p_sortOptions_2(t):
'sort : NULLS LAST'
h.reporteGramatical1 +="sort ::= NULLS LAST\n"
h.reporteGramatical2 +="t[0] = t[2]\n"
t[0] = t[2]
def p_sortOptions_2_1(t):
'sort : DESC NULLS LAST'
h.reporteGramatical1 +="sort ::= DESC NULLS LAST\n"
h.reporteGramatical2 +="t[0] = t[3]\n"
t[0] = SortOptions(t[1],t[3])
def p_sortOptions_2_2(t):
'sort : ASC NULLS LAST'
h.reporteGramatical1 +="sort ::= ASC NULLS LAST\n"
h.reporteGramatical2 +="t[0] = t[3]\n"
t[0] = SortOptions(t[1],t[3])
#-----------------------------------------------------CREATE DB--------------------------------------------------------------------
def p_crearBaseDatos_1(t):
'crearBD : CREATE DATABASE ID PUNTOYCOMA'
h.reporteGramatical1 +="crearBD ::= CREATE DATABASE ID PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateDatabases(t[3])\n"
t[0] = CreateDatabases(t[3])
def p_crearBaseDatos_2(t):
'crearBD : CREATE DATABASE IF NOT EXISTS ID PUNTOYCOMA'
h.reporteGramatical1 +="crearBD ::= CREATE DATABASE IF NOT EXISTS ID PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = Create_IF_Databases(t[3],t[6])\n"
t[0] = Create_IF_Databases(t[3],t[6])
def p_crear_replace_BaseDatos_1(t):
'crearBD : CREATE OR REPLACE DATABASE ID PUNTOYCOMA'
h.reporteGramatical1 +="crearBD ::= CREATE OR REPLACE DATABASE ID PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateDatabases(t[5])\n"
t[0] = Create_Replace_Databases(t[3],t[5])
def p_crear_replace_BaseDatos_2(t):
'crearBD : CREATE OR REPLACE DATABASE IF NOT EXISTS ID PUNTOYCOMA'
h.reporteGramatical1 +="crearBD ::= CREATE OR REPLACE DATABASE ID PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateDatabases(t[5])\n"
t[0] = Create_Replace_IF_Databases(t[3],t[5],t[8])
def p_crear_param_BaseDatos_1(t):
'crearBD : CREATE DATABASE ID parametrosCrearBD PUNTOYCOMA'
h.reporteGramatical1 +="crearBD ::= CREATE DATABASE ID parametrosCrearBD PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateDatabaseswithParameters(t[3],t[4])\n"
t[0] = CreateDatabaseswithParameters(t[3],t[4])
def p_crear_param_BaseDatos_2(t):
'crearBD : CREATE DATABASE IF NOT EXISTS ID parametrosCrearBD PUNTOYCOMA'
h.reporteGramatical1 +="crearBD ::= CREATE DATABASE ID parametrosCrearBD PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateDatabaseswithParameters(t[3],t[4])\n"
t[0] = Create_Databases_IFwithParameters(t[3],t[6],t[7])
def p_crear_replace_param_BaseDatos_1(t):
'crearBD : CREATE OR REPLACE DATABASE ID parametrosCrearBD PUNTOYCOMA'
h.reporteGramatical1 +="crearBD ::= CREATE OR REPLACE DATABASE ID parametrosCrearBD PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateDatabaseswithParameters(t[5],t[6])\n"
t[0] = Create_Replace_DatabaseswithParameters(t[3],t[5],t[6])
def p_crear_replace_param_BaseDatos_2(t):
'crearBD : CREATE OR REPLACE DATABASE IF NOT EXISTS ID parametrosCrearBD PUNTOYCOMA'
h.reporteGramatical1 +="crearBD ::= CREATE OR REPLACE DATABASE ID parametrosCrearBD PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = CreateDatabaseswithParameters(t[5],t[6])\n"
t[0] = Create_Replace_Databases_IFwithParameters(t[3],t[5],t[8],t[9])
def p_parametrosCrearBD_1(t):
'parametrosCrearBD : parametrosCrearBD parametroCrearBD'
h.reporteGramatical1 +="parametrosCrearBD ::= parametrosCrearBD parametroCrearBD\n"
h.reporteGramatical2 +="t[1].append(t[2])\n t[0]=t[1]\n"
t[1].append(t[2])
t[0]=t[1]
def p_parametrosCrearBD_2(t):
'parametrosCrearBD : parametroCrearBD'
h.reporteGramatical1 +="parametrosCrearBD ::= parametroCrearBD\n"
h.reporteGramatical2 +="t[0]=[t[1]]\n"
t[0]=[t[1]]
def p_parametroCrearBD(t):
'''parametroCrearBD : OWNER IGUAL final
| MODE IGUAL final
'''
h.reporteGramatical1 +="parametroCrearBD ::= "+str(t[1])+" IGUAL "+str(t[3])+"\n"
if t[1] == "OWNER":
h.reporteGramatical2 +="t[0]=ExpresionOwner(t[1],t[3])\n"
t[0]=ExpresionOwner(t[1],t[3])
elif t[1] == "MODE":
h.reporteGramatical2 +="t[0]=ExpresionMode(t[1],t[3])\n"
t[0]=ExpresionMode(t[1],t[3])
#-----------------------------------------------------SHOW DB--------------------------------------------------------------------
def p_mostrarBD(t):
'mostrarBD : SHOW DATABASES PUNTOYCOMA'
h.reporteGramatical1 +="mostrarBD ::= SHOW DATABASES PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]=ShowDatabases(1)\n"
t[0]=ShowDatabases(1)
def p_usarBaseDatos(t):
'useBD : USE ID PUNTOYCOMA'
h.reporteGramatical1 +="useBD ::= USE ID PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]=UseDatabases(t[2])\n"
t[0]=UseDatabases(t[2])
print("t[0]:",t[0])
#-----------------------------------------------------ALTER BD--------------------------------------------------------------------
def p_alterBD_1(t):
'alterBD : ALTER DATABASE ID RENAME TO ID PUNTOYCOMA'
h.reporteGramatical1 +="alterBD ::= ALTER DATABASE "+str(t[3])+" RENAME TO "+str(t[6])+" PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = AlterDB(t[3],t[6])\n"
t[0] = AlterDB(t[3],t[6])
def p_alterBD_2(t):
'alterBD : ALTER DATABASE ID OWNER TO parametroAlterUser PUNTOYCOMA'
h.reporteGramatical1 +="alterBD ::= ALTER DATABASE "+str(t[3])+" OWNER TO "+str(t[6])+" PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = AlterOwner(t[3],t[4],t[6])\n"
t[0] = AlterOwner(t[3],t[4],t[6])
def p_parametroAlterUser(t):
'''parametroAlterUser : CURRENT_USER
| SESSION_USER
| final
'''
h.reporteGramatical1 +="parametroAlterUser ::= "+str(t[1])+" \n"
h.reporteGramatical2 +="t[0] = t[1]\n"
t[0] = t[1]
#-----------------------------------------------------DROP TABLE-----------------------------------------------------------------
def p_dropTable(t) :
'dropTable : DROP TABLE ID PUNTOYCOMA'
h.reporteGramatical1 +="dropTable ::= DROP TABLE ID PUNTOYCOMA\n"
t[0]=DropTable(t[3])
#-----------------------------------------------------ALTER TABLE-----------------------------------------------------------------
def p_alterTable(t):
'''
alterTable : ALTER TABLE ID variantesAt PUNTOYCOMA
'''
h.reporteGramatical1 +="alterTable ::= ALTER TABLE ID variantesAt PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0] = AlterTable(t[3],t[4])"
t[0] = AlterTable(t[3],t[4])
#---------------------------------------------------TIPOS------------------------------------------------------------------------
def p_variantesAt(t):
'''
variantesAt : ADD contAdd
| ALTER contAlter
| DROP contDrop
'''
if t[1].upper()=="ADD":
h.reporteGramatical1 +="variantesAt ::= ADD contAdd\n"
h.reporteGramatical2 +="t[0]=VariantesAt(t[1],t[2])"
t[0]=VariantesAt(t[1],t[2])
elif t[1].upper()=="ALTER":
h.reporteGramatical1 +="variantesAt ::= ALTER listaContAlter\n"
h.reporteGramatical2 +="t[0]=VariantesAt(t[1],t[2])"
t[0]=VariantesAt(t[1],t[2])
elif t[1].upper()=="DROP":
h.reporteGramatical1 +="variantesAt ::= DROP contDrop\n"
h.reporteGramatical2 +="t[0]=VariantesAt(t[1],t[2])"
t[0]=VariantesAt(t[1],t[2])
# SE SEPARO LA LISTA PARA PODER MANIPULAR DATOS
def p_listaContAlter(t):
'''
listaContAlter : listaContAlter COMA contAlter
'''
h.reporteGramatical1 +="listaContAlter ::= listaContAlter COMA contAlter\n"
def p_listaContAlter_2(t):
'''
listaContAlter : contAlter
'''
h.reporteGramatical1 +="listaContAlter ::= contAlter\n"
def p_contAlter(t):
'''
contAlter : COLUMN ID SET NOT NULL
| COLUMN ID TYPE tipo
'''
if t[3].upper()=="SET":
h.reporteGramatical1 +="contAlter ::= COLUMN ID SET NOT NULL\n"
h.reporteGramatical2 +="t[0]=contAlter(t[2],t[3],t[4])"
t[0]=contAlter(t[2],t[3],t[4])
elif t[3].upper()=="TYPE":
h.reporteGramatical1 +="contAlter ::= COLUMN ID TYPE tipo\n"
h.reporteGramatical2 +="t[0]=contAlter(t[2],t[3],t[4])"
t[0]=contAlter(t[2],t[3],t[4])
def p_contAdd(t):
'''
contAdd : COLUMN ID tipo
| CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| FOREIGN KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA REFERENCES ID
| PRIMARY KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA
| CONSTRAINT ID FOREIGN KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA REFERENCES ID PARENTESISIZQUIERDA ID PARENTESISDERECHA
| CONSTRAINT ID PRIMARY KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA
| CONSTRAINT ID UNIQUE PARENTESISIZQUIERDA ID PARENTESISDERECHA
'''
if t[1].upper()=="COLUMN":
h.reporteGramatical1 +="contAdd ::= COLUMN ID tipo\n"
h.reporteGramatical2 +="t[0]=contAdd(t[1],t[3],t[2],None,None,None,None)"
t[0]=contAdd(t[1],t[3],t[2],None,None,None,None)
elif t[1].upper()=="CHECK":
h.reporteGramatical1 +="contAdd ::= CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=contAdd(t[1],None,None,None,None,None,t[3])"
t[0]=contAdd(t[1],None,None,None,None,None,t[3])
elif t[1].upper()=="FOREIGN":
h.reporteGramatical1 +="contAdd ::= FOREIGN KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA REFERENCES ID\n"
h.reporteGramatical2 +="t[0]=contAdd(t[1],None,t[4],t[7],None,None,None)"
t[0]=contAdd(t[1],None,t[4],t[7],None,None,None)
elif t[1].upper()=="PRIMARY":
h.reporteGramatical1 +="contAdd ::= PRIMARY KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=contAdd(t[1],None,t[4],None,None,None,None)"
t[0]=contAdd(t[1],None,t[4],None,None,None,None)
elif t[1].upper()=="CONSTRAINT":
if t[3].upper()=="PRIMARY":
h.reporteGramatical1 +="contAdd ::= CONSTRAINT ID PRIMARY KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=contAdd(t[1],t[3],t[2],t[6],None,None,None)"
t[0]=contAdd(t[1],t[3],t[2],t[6],None,None,None)
elif t[3].upper()=="FOREIGN":
h.reporteGramatical1 +="contAdd ::= CONSTRAINT ID FOREIGN KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA REFERENCES ID PARENTESISIZQUIERDA ID PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=contAdd(t[1],t[3],t[2],t[6],t[9],t[11],None)"
t[0]=contAdd(t[1],t[3],t[2],t[6],t[9],t[11],None)
else:
h.reporteGramatical1 +="contAdd ::= CONSTRAINT ID UNIQUE PARENTESISIZQUIERDA ID PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=contAdd(t[1],None,t[2],None,None,None,t[5])"
t[0]=contAdd(t[1],t[3],t[2],None,None,None,t[5])
def p_contDrop(t):
'''
contDrop : COLUMN ID
| CONSTRAINT ID
| PRIMARY KEY
'''
if t[1].upper()=="COLUMN":
h.reporteGramatical1 +="contDrop ::= COLUMN ID \n"
h.reporteGramatical2 +="t[0]=contDrop(t[1],t[2])"
t[0]=contDrop(t[1],t[2])
elif t[1].upper()=="CONSTRAINT":
h.reporteGramatical1 +="contDrop ::= CONSTRAINT ID\n"
h.reporteGramatical2 +="t[0]=contDrop(t[1],t[2])"
t[0]=contDrop(t[1],t[2])
elif t[1].upper()=="PRIMARY":
h.reporteGramatical1 +="contDrop ::= PRIMARY KEY\n"
h.reporteGramatical2 +="t[0]=contDrop(t[1],None)"
t[0]=contDrop(t[1],None)
# SE SEPARO LA LISTA PARA PODER MANIPULAR DATOS
def p_listaID(t):
'''
listaid : listaid COMA final
'''
h.reporteGramatical1 +="listaid ::= listaid COMA ID\n"
h.reporteGramatical2 +="t[1].append(t[3])\nt[0]=t[1]\n"
t[1].append(t[3])
t[0]=t[1]
def p_listaID_2(t):
'''
listaid : final
'''
h.reporteGramatical1 +="listaid ::= ID\n"
h.reporteGramatical2 +="t[0]=[t[1]]"
t[0]=[t[1]]
#-----------------------------------------------------DROP BD--------------------------------------------------------------------
def p_dropBD_1(t):
'dropBD : DROP DATABASE ID PUNTOYCOMA'
h.reporteGramatical1 +="dropBD ::= DROP DATABASE "+str(t[3])+" PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]= DropDB(t[3])\n"
t[0]= DropDB(t[3])
def p_dropBD_2(t):
'dropBD : DROP DATABASE IF EXISTS ID PUNTOYCOMA'
h.reporteGramatical1 +="dropBD ::= DROP DATABASE IF EXISTS "+str(t[5])+" PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]= DropDBIF(t[3],t[5])\n"
t[0]= DropDBIF(t[3],t[5])
#-----------------------------------------------------OPERACIONES Y EXPRESIONES--------------------------------------------------------------------
def p_operacion(t):
'''operacion : operacion MAS operacion
| operacion MENOS operacion
| operacion POR operacion
| operacion DIV operacion
| operacion RESIDUO operacion
| operacion POTENCIA operacion
| operacion AND operacion
| operacion OR operacion
| operacion SIMBOLOOR2 operacion
| operacion SIMBOLOOR operacion
| operacion SIMBOLOAND2 operacion
| operacion DESPLAZAMIENTOIZQUIERDA operacion
| operacion DESPLAZAMIENTODERECHA operacion
| operacion IGUAL operacion
| operacion IGUALIGUAL operacion
| operacion NOTEQUAL operacion
| operacion MAYORIGUAL operacion
| operacion MENORIGUAL operacion
| operacion MAYOR operacion
| operacion MENOR operacion
| operacion DIFERENTE operacion
| PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| PARENTESISIZQUIERDA listaid PARENTESISDERECHA
'''
# --------------------------------------------------------------------------------------------------------------
if t[2]=='+':
h.reporteGramatical1 +="operacion ::= operacion MAS operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MAS)\n"
t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MAS)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='-':
h.reporteGramatical1 +="operacion ::= operacion MENOS operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MENOS)\n"
t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MENOS)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='*':
h.reporteGramatical1 +="operacion ::= operacion POR operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.POR)\n"
t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.POR)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='/':
h.reporteGramatical1 +="operacion ::= operacion DIV operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.DIVIDIDO)\n"
t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.DIVIDIDO)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='%':
h.reporteGramatical1 +="operacion ::= operacion RESIDUO operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MODULO)\n"
t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.MODULO)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='^':
print("entra a la potencia")
h.reporteGramatical1 +="operacion ::= operacion POTENCIA operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.POTENCIA)\n"
t[0]=ExpresionAritmetica(t[1],t[3],OPERACION_ARITMETICA.POTENCIA)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=="AND":
h.reporteGramatical1 +="operacion ::= operacion AND operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.AND)\n"
t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.AND)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=="OR":
h.reporteGramatical1 +="operacion ::= operacion OR operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)\n"
t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='|':
h.reporteGramatical1 +="operacion ::= operacion | operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)\n"
t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='||':
h.reporteGramatical1 +="operacion ::= operacion || operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)\n"
t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='&':
h.reporteGramatical1 +="operacion ::= operacion & operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.AND)\n"
t[0]=ExpresionLogica(t[1],t[3],OPERACION_LOGICA.AND)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='<<':
print(t[2])
h.reporteGramatical1 +="operacion ::= operacion DESPLAZAMIENTOIZQUIERDA operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionBIT(t[1],t[3],OPERACION_BIT.DESPLAZAMIENTO_IZQUIERDA)\n"
t[0]=ExpresionBIT(t[1],t[3],OPERACION_BIT.DESPLAZAMIENTO_IZQUIERDA)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='>>':
h.reporteGramatical1 +="operacion ::= operacion DESPLAZAMIENTODERECHA operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionBIT(t[1],t[3],OPERACION_BIT.DESPLAZAMIENTO_DERECHA)\n"
t[0]=ExpresionBIT(t[1],t[3],OPERACION_BIT.DESPLAZAMIENTO_DERECHA)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='=':
t[0]=ExpresionIgualdad(t[1],t[3])
#t[0]=operacionDelete(t[1],t[3],t[2])
h.reporteGramatical1 +="operacion ::= operacion IGUAL operacion\n"
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='==':
h.reporteGramatical1 +="operacion ::= operacion IGUALIGUAL operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.IGUAL_IGUAL)\n"
t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.IGUAL_IGUAL)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='!=':
h.reporteGramatical1 +="operacion ::= operacion NOTEQUAL operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.NO_IGUAL)\n"
t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.NO_IGUAL)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='>=':
h.reporteGramatical1 +="operacion ::= operacion MAYORIGUAL operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYOR_IGUAL)\n"
t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYOR_IGUAL)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='<=':
h.reporteGramatical1 +="operacion ::= operacion MENORIGUAL operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MENOR_IGUAL)\n"
t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MENOR_IGUAL)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='>':
h.reporteGramatical1 +="operacion ::= operacion MAYOR operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYOR)\n"
t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYOR)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='<':
h.reporteGramatical1 +="operacion ::= operacion MENOR operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MENOR)\n"
t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MENOR)
# --------------------------------------------------------------------------------------------------------------
elif t[2]=='<>':
h.reporteGramatical1 +="operacion ::= operacion DIFERENTE operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.DIFERENTE)\n"
t[0]=ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.DIFERENTE)
# --------------------------------------------------------------------------------------------------------------
else:
h.reporteGramatical1 +="operacion ::= PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
t[0]=t[2]
# --------------------------------------------------------------------------------------------------------------
def p_operacion_menos_unario(t):
'''operacion : MENOS ENTERO %prec UMINUS
| MENOS DECIMAL %prec UMINUS
'''
h.reporteGramatical1 +="operacion ::= MENOS operacion %prec UMINUS\n"
h.reporteGramatical2 +="t[0]=ExpresionNegativo(t[2])\n"
t[0]=ExpresionNegativo(t[2])
# --------------------------------------------------------------------------------------------------------------
def p_operacion_not_unario(t):
'operacion : NOT operacion %prec UNOT'
h.reporteGramatical1 +="operacion ::= NOT operacion %prec UNOT\n"
h.reporteGramatical2 +="t[0]=ExpresionNOT(t[2])\n"
t[0]=ExpresionNOT(t[2])
# --------------------------------------------------------------------------------------------------------------
def p_operacion_funcion(t):
'operacion : funcionBasica'
h.reporteGramatical1 +="operacion ::= funcionBasica\n"
h.reporteGramatical2 +="t[0]=t[1]\n"
t[0]=t[1]
# --------------------------------------------------------------------------------------------------------------
def p_operacion_final(t):
'operacion : final'
t[0] = t[1]
h.reporteGramatical1 +="operacion ::= final\n"
h.reporteGramatical2 +="t[0]=t[1]\n"
t[0]=t[1]
#-----------------------------------------------------FUNCIONES MATEMATICAS--------------------------------------------------------------------
def p_funcion_basica(t):
'''funcionBasica : ABS PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| CBRT PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| CEIL PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| CEILING PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| DEGREES PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| DIV PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| EXP PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| FACTORIAL PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| FLOOR PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| GCD PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| LCM PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| LN PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| LOG PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| MOD PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| PI PARENTESISIZQUIERDA PARENTESISDERECHA
| POWER PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| RADIANS PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ROUND PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| SIGN PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| SQRT PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| TRIM_SCALE PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| TRUNC PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| WIDTH_BUCKET PARENTESISIZQUIERDA operacion COMA operacion COMA operacion COMA operacion PARENTESISDERECHA
| RANDOM PARENTESISIZQUIERDA PARENTESISDERECHA
| ACOS PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ACOSD PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ASIN PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ASIND PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ATAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ATAND PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ATAN2 PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| ATAN2D PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| COS PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| COSD PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| COT PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| COTD PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| SIN PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| SIND PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| TAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| TAND PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| SINH PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| GREATEST PARENTESISIZQUIERDA select_list PARENTESISDERECHA
| LEAST PARENTESISIZQUIERDA select_list PARENTESISDERECHA
| NOW PARENTESISIZQUIERDA PARENTESISDERECHA
| COSH PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| TANH PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ASINH PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ACOSH PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| ATANH PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| LENGTH PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| TRIM PARENTESISIZQUIERDA opcionTrim operacion FROM operacion PARENTESISDERECHA
| GET_BYTE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| MD5 PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| SET_BYTE PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA
| SHA256 PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| SUBSTR PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA
| CONVERT PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA
| ENCODE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| DECODE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA
| AVG PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| SUM PARENTESISIZQUIERDA operacion PARENTESISDERECHA
| EXTRACT PARENTESISIZQUIERDA opcionTiempo FROM TIMESTAMP operacion PARENTESISDERECHA
| ID PARENTESISIZQUIERDA operacion COMA INTERVAL operacion PARENTESISDERECHA
| CURRENT_TIME
| CURRENT_DATE
'''
if t[1].upper()=="ABS":
h.reporteGramatical1 +="funcionBasica ::= ABS PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionABS(t[3])\n"
t[0]=ExpresionABS(t[3])
elif t[1].upper()=="CBRT":
h.reporteGramatical1 +="funcionBasica ::= CBRT PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionCBRT(t[3])\n"
t[0]=ExpresionCBRT(t[3])
elif t[1].upper()=="CEIL":
h.reporteGramatical1 +="funcionBasica ::= CEIL PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionCEIL(t[3])\n"
t[0]=ExpresionCEIL(t[3])
elif t[1].upper()=="CEILING":
h.reporteGramatical1 +="funcionBasica ::= CEILING PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionCEILING(t[3])\n"
t[0]=ExpresionCEILING(t[3])
elif t[1].upper()=="DEGREES":
t[0]=ExpresionDEGREES(t[3])
h.reporteGramatical1 +="funcionBasica ::= DEGREES PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionDEGREES(t[3])\n"
elif t[1].upper()=="DIV":
print("entra a DIV++++++++++++")
t[0]=ExpresionDIV(t[3],t[5])
h.reporteGramatical1 +="funcionBasica ::= DIV PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionDIV(t[3],t[5])\n"
elif t[1].upper()=="EXP":
t[0]=ExpresionEXP(t[3])
h.reporteGramatical1 +="funcionBasica ::= EXP PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionEXP(t[3])\n"
elif t[1].upper()=="FACTORIAL":
t[0]=ExpresionFACTORIAL(t[3])
h.reporteGramatical1 +="funcionBasica ::= FACTORIAL PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionFACTORIAL(t[3])\n"
elif t[1].upper()=="FLOOR":
t[0]=ExpresionFLOOR(t[3])
h.reporteGramatical1 +="funcionBasica ::= FLOOR PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionFLOOR(t[3])\n"
elif t[1].upper()=="GCD":
t[0]=ExpresionGCD(t[3],t[5])
h.reporteGramatical1 +="funcionBasica ::= GCD PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionGCD(t[3],t[5])\n"
elif t[1].upper()=="LN":
t[0]=ExpresionLN(t[3])
h.reporteGramatical1 +="funcionBasica ::= LN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionLN(t[3])\n"
elif t[1].upper()=="LOG":
t[0]=ExpresionLOG(t[3])
h.reporteGramatical1 +="funcionBasica ::= LOG PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionLOG(t[3])\n"
elif t[1].upper()=="MOD":
t[0]=ExpresionMOD(t[3],t[5])
h.reporteGramatical1 +="funcionBasica ::= MOD PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionMOD(t[3],t[5])\n"
elif t[1].upper()=="PI":
t[0]=ExpresionPI(1)
h.reporteGramatical1 +="funcionBasica ::= PI PARENTESISIZQUIERDA PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionPI(1)\n"
elif t[1].upper()=="POWER":
t[0]=ExpresionPOWER(t[3],t[5])
h.reporteGramatical1 +="funcionBasica ::= POWER PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionPOWER(t[3],t[5])\n"
elif t[1].upper()=="RADIANS":
t[0]=ExpresionRADIANS(t[3])
h.reporteGramatical1 +="funcionBasica ::= RADIANS PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionRADIANS(t[3])\n"
elif t[1].upper()=="ROUND":
t[0]=ExpresionROUND(t[3])
h.reporteGramatical1 +="funcionBasica ::= ROUND PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionROUND(t[3])\n"
elif t[1].upper()=="SIGN":
t[0]=ExpresionSIGN(t[3])
h.reporteGramatical1 +="funcionBasica ::= SIGN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSIGN(t[3])\n"
elif t[1].upper()=="SQRT":
t[0]=ExpresionSQRT(t[3])
h.reporteGramatical1 +="funcionBasica ::= SQRT PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSQRT(t[3])\n"
elif t[1].upper()=="TRUNC":
t[0]=ExpresionTRUNC(t[3])
h.reporteGramatical1 +="funcionBasica ::= TRUNC PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="[0]=ExpresionTRUNC(t[3])\n"
elif t[1].upper()=="WIDTH_BUCKET":
t[0]=ExpresionWIDTHBUCKET(t[3],t[5],t[7],t[9])
h.reporteGramatical1 +="funcionBasica ::= WIDTH_BUCKET PARENTESISIZQUIERDA operacion COMA operacion COMA operacion COMA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionWIDTHBUCKET(t[3],t[5],t[7],t[9])\n"
elif t[1].upper()=="RANDOM":
t[0]=ExpresionRANDOM(1)
h.reporteGramatical1 +="funcionBasica ::= RANDOM PARENTESISIZQUIERDA PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionRANDOM(1)\n"
elif t[1].upper()=="ACOS":
t[0]=ExpresionACOS(t[3])
h.reporteGramatical1 +="funcionBasica ::= ACOS PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionACOS(t[3])\n"
elif t[1].upper()=="ACOSD":
t[0]=ExpresionACOSD(t[3])
h.reporteGramatical1 +="funcionBasica ::= ACOSD PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionACOSD(t[3])\n"
elif t[1].upper()=="ASIN":
t[0]=ExpresionASIN(t[3])
h.reporteGramatical1 +="funcionBasica ::= ASIN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="tt[0]=ExpresionASIN(t[3])\n"
elif t[1].upper()=="ASIND":
t[0]=ExpresionASIND(t[3])
h.reporteGramatical1 +="funcionBasica ::= ASIND PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionASIND(t[3])\n"
elif t[1].upper()=="ATAN":
t[0]=ExpresionATAN(t[3])
h.reporteGramatical1 +="funcionBasica ::= ATAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionATAN(t[3])\n"
elif t[1].upper()=="ATAND":
t[0]=ExpresionATAND(t[3])
h.reporteGramatical1 +="funcionBasica ::= ATAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionATAND(t[3])\n"
elif t[1].upper()=="ATAN2":
t[0]=ExpresionATAN2(t[3],t[5])
h.reporteGramatical1 +="funcionBasica ::= ATAN2 PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionATAN2(t[3],t[5])\n"
elif t[1].upper()=="ATAN2D":
t[0]=ExpresionATAN2D(t[3],t[5])
h.reporteGramatical1 +="funcionBasica ::= ATAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionATAN2D(t[3],t[5])\n"
elif t[1].upper()=="COS":
t[0]=ExpresionCOS(t[3])
h.reporteGramatical1 +="funcionBasica ::= COS PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionCOS(t[3])\n"
elif t[1].upper()=="COSD":
t[0]=ExpresionCOSD(t[3])
h.reporteGramatical1 +="funcionBasica ::= COSD PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionCOSD(t[3])\n"
elif t[1].upper()=="COT":
t[0]=ExpresionCOT(t[3])
h.reporteGramatical1 +="funcionBasica ::= COT PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionCOT(t[3])\n"
elif t[1].upper()=="COTD":
t[0]=ExpresionCOTD(t[3])
h.reporteGramatical1 +="funcionBasica ::= COTD PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionCOTD(t[3])\n"
elif t[1].upper()=="SIN":
t[0]=ExpresionSIN(t[3])
h.reporteGramatical1 +="funcionBasica ::= SIN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSIN(t[3])\n"
elif t[1].upper()=="SIND":
t[0]=ExpresionSIND(t[3])
h.reporteGramatical1 +="funcionBasica ::= SIND PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSIND(t[3])\n"
elif t[1].upper()=="TAN":
t[0]=ExpresionTAN(t[3])
h.reporteGramatical1 +="funcionBasica ::= TAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionTAN(t[3])\n"
elif t[1].upper()=="TAND":
t[0]=ExpresionTAND(t[3])
h.reporteGramatical1 +="funcionBasica ::= TAND PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionTAND(t[3])\n"
elif t[1].upper()=="SINH":
t[0]=ExpresionSINH(t[3])
h.reporteGramatical1 +="funcionBasica ::= SINH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSINH(t[3])\n"
elif t[1].upper()=="COSH":
t[0]=ExpresionCOSH(t[3])
h.reporteGramatical1 +="funcionBasica ::= COSH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionCOSH(t[3])\n"
elif t[1].upper()=="TANH":
t[0]=ExpresionTANH(t[3])
h.reporteGramatical1 +="funcionBasica ::= TANH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionTANH(t[3])\n"
elif t[1].upper()=="ASINH":
t[0]=ExpresionASINH(t[3])
h.reporteGramatical1 +="funcionBasica ::= ASINH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionASINH(t[3])\n"
elif t[1].upper()=="ACOSH":
t[0]=ExpresionACOSH(t[3])
h.reporteGramatical1 +="funcionBasica ::= ACOSH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionACOSH(t[3])\n"
elif t[1].upper()=="ATANH":
t[0]=ExpresionATANH(t[3])
h.reporteGramatical1 +="funcionBasica ::= ATANH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionATANH(t[3])\n"
elif t[1].upper()=="GREATEST":
t[0]=ExpresionGREATEST(t[3])
h.reporteGramatical1 +="funcionBasica ::= GREATEST PARENTESISIZQUIERDA select_list PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionGREATEST(t[3])\n"
elif t[1].upper()=="LEAST":
t[0]=ExpresionLEAST(t[3])
h.reporteGramatical1 +="funcionBasica ::= LEAST PARENTESISIZQUIERDA select_list PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionLEAST(t[3])\n"
elif t[1].upper()=="NOW":
t[0]=ExpresionNOW(1)
h.reporteGramatical1 +="funcionBasica ::= NOW PARENTESISIZQUIERDA PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionNOW(1)\n"
elif t[1].upper()=="LENGTH":
h.reporteGramatical1 +="funcionBasica ::= LENGTH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionLENGTH(t[3])\n"
t[0]=ExpresionLENGTH(t[3])
elif t[1].upper()=="TRIM":
h.reporteGramatical1 +="funcionBasica ::= TRIM PARENTESISIZQUIERDA opcionTrim operacion FROM operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionTRIM(t[3],t[4],t[6])\n"
t[0]=ExpresionTRIM(t[3],t[4],t[6])
elif t[1].upper()=="GET_BYTE":
h.reporteGramatical1 +="funcionBasica ::= GET_BYTE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n"
elif t[1].upper()=="MD5":
h.reporteGramatical1 +="funcionBasica ::= MD5 PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionMD5(t[3])\n"
t[0]=ExpresionMD5(t[3])
elif t[1].upper()=="SET_BYTE":
h.reporteGramatical1 +="funcionBasica ::= SET_BYTE PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA\n"
elif t[1].upper()=="SHA256":
h.reporteGramatical1 +="funcionBasica ::= SHA256 PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSHA256(t[3])\n"
t[0]=ExpresionSHA256(t[3])
elif t[1].upper()=="SUBSTR":
h.reporteGramatical1 +="funcionBasica ::= SUBSTR PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSUBSTR(t[3],t[5],t[7])\n"
t[0]=ExpresionSUBSTR(t[3],t[5],t[7])
elif t[1].upper()=="CONVERT":
h.reporteGramatical1 +="funcionBasica ::= CONVERT PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA\n"
elif t[1].upper()=="ENCODE":
h.reporteGramatical1 +="funcionBasica ::= ENCODE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n"
elif t[1].upper()=="DECODE":
h.reporteGramatical1 +="funcionBasica ::= DECODE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n"
elif t[1].upper()=="AVG":
h.reporteGramatical1 +="funcionBasica ::= AVG PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
elif t[1].upper()=="SUM":
h.reporteGramatical1 +="funcionBasica ::= SUM PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
elif t[1].upper()=="EXTRACT":
h.reporteGramatical1 +="funcionBasica ::= EXTRACT PARENTESISIZQUIERDA opcionTiempo FROM TIMESTAMP operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionEXTRACT(t[3],t[6])\n"
t[0]=ExpresionEXTRACT(t[3],t[6])
elif t[1].upper()=="DATE_PART":
h.reporteGramatical1 +="funcionBasica ::= DATE_PART PARENTESISIZQUIERDA operacion COMA INTERVAL operacion PARENTESISDERECHA\n"
elif t[1].upper()=="CURRENT_DATE":
h.reporteGramatical1 +="funcionBasica ::= CURRENT_DATE \n"
h.reporteGramatical2 +="t[0]=ExpresionCurrentDate(1)\n"
t[0]=ExpresionCurrentDate(1)
elif t[1].upper()=="CURRENT_TIME":
h.reporteGramatical1 +="funcionBasica ::= CURRENT_TIME\n"
h.reporteGramatical2 +="t[0]=ExpresionCurrentTime(1)\n"
t[0]=ExpresionCurrentTime(1)
else:
print("no entra a ninguna en funcionBasica")
def p_funcion_basica_1(t):
'funcionBasica : SUBSTRING PARENTESISIZQUIERDA operacion FROM operacion FOR operacion PARENTESISDERECHA'
h.reporteGramatical1 +="funcionBasica ::= SUBSTRING PARENTESISIZQUIERDA operacion FROM operacion FOR operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSUBSTRINGA(t[3],t[5],t[7])\n"
t[0]=ExpresionSUBSTRINGA(t[3],t[5],t[7])
def p_funcion_basica_2(t):
'funcionBasica : SUBSTRING PARENTESISIZQUIERDA operacion FROM operacion PARENTESISDERECHA'
h.reporteGramatical1 +="funcionBasica ::= SUBSTRING PARENTESISIZQUIERDA operacion FROM operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSUBSTRINGB(t[3],t[5])\n"
t[0]=ExpresionSUBSTRINGB(t[3],t[5])
def p_funcion_basica_3(t):
'funcionBasica : SUBSTRING PARENTESISIZQUIERDA operacion FOR operacion PARENTESISDERECHA'
h.reporteGramatical1 +="funcionBasica ::= SUBSTRING PARENTESISIZQUIERDA operacion FOR operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=ExpresionSUBSTRINGC(t[3],t[5])\n"
t[0]=ExpresionSUBSTRINGC(t[3],t[5])
def p_opcionTrim(t):
''' opcionTrim : LEADING
| TRAILING
| BOTH
'''
h.reporteGramatical1 +="opcionTrim ::= "+str(t[1])+"\n"
# falta mandar a las funciones de fechas y dates y todo eso
if t[1].upper()=="LEADING":
h.reporteGramatical1 +="funcioopcionTrimnBasica ::= LEADING\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(1)\n"
t[0]=ExpresionCadenas("1")
elif t[1].upper()=="TRAILING":
h.reporteGramatical1 +="opcionTrim ::= TRAILING\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(2)\n"
t[0]=ExpresionCadenas("2")
elif t[1].upper()=="BOTH":
h.reporteGramatical1 +="opcionTrim ::= BOTH\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(3)\n"
t[0]=ExpresionCadenas("3")
def p_opcionTiempo(t):
'''opcionTiempo : YEAR
| MONTH
| DAY
| HOUR
| MINUTE
| SECOND
'''
if t[1].upper()=="YEAR":
h.reporteGramatical1 +="opcionTiempo ::= YEAR\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(1)\n"
t[0]=ExpresionCadenas("1")
elif t[1].upper()=="MONTH":
h.reporteGramatical1 +="opcionTiempo ::= MONTH\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(2)\n"
t[0]=ExpresionCadenas("2")
elif t[1].upper()=="DAY":
h.reporteGramatical1 +="opcionTiempo ::= DAY\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(3)\n"
t[0]=ExpresionCadenas("3")
elif t[1].upper()=="HOUR":
h.reporteGramatical1 +="opcionTiempo ::= HOUR\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(4)\n"
t[0]=ExpresionCadenas("4")
elif t[1].upper()=="MINUTE":
h.reporteGramatical1 +="opcionTiempo ::= MINUTE\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(5)\n"
t[0]=ExpresionCadenas("5")
elif t[1].upper()=="SECOND":
h.reporteGramatical1 +="opcionTiempo ::= SECOND\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(6)\n"
t[0]=ExpresionCadenas("6")
#-----------------------------------------------------PRODUCCIONES TERMINALES--------------------------------------------------------------------
def p_final(t):
'''final : DECIMAL
| ENTERO'''
h.reporteGramatical1 +="final ::= Numero("+str(t[1])+")\n"
h.reporteGramatical2 +="t[0]=ExpresionNumero(t[1])\n"
t[0]=ExpresionNumero(t[1])
def p_final_id(t):
'final : ID'
t[0] = t[1]
h.reporteGramatical1 +="final ::= ID("+str(t[1])+")\n"
h.reporteGramatical2 +="t[0]=ExpresionIdentificador(t[1])\n"
t[0]=ExpresionIdentificador(t[1])
def p_final_invocacion(t):
'final : ID PUNTO ID'
h.reporteGramatical1 +="final ::= ID("+str(t[1])+") . ID("+str(t[3])+")\n"
h.reporteGramatical2 +="t[0] = ExpresionInvocacion(t[1],t[3])\n"
t[0] = ExpresionLlamame(t[1],t[3])
def p_final_invocacion_2(t):
'final : ID PUNTO POR'
h.reporteGramatical1 +="final ::= ID("+str(t[1])+") . ID("+str(t[3])+")\n"
h.reporteGramatical2 +="t[0] = ExpresionInvocacion(t[1],t[3])\n"
t[0] = ExpresionLlamame(t[1],t[3])
def p_final_cadena(t):
'final : CADENA'
t[0] = t[1]
h.reporteGramatical1 +="final ::= CADENA ("+t[1]+")\n"
h.reporteGramatical2 +="t[0]=ExpresionCadenas(t[1])\n"
t[0]=ExpresionCadenas(t[1])
#-----------------------------------------------------INSERT BD--------------------------------------------------------------------
def p_insertBD_1(t):
'insertinBD : INSERT INTO ID VALUES PARENTESISIZQUIERDA listaParam PARENTESISDERECHA PUNTOYCOMA'
#print(t[3],t[6])
t[0] = InsertinDataBases(t[3],None,t[6])
h.reporteGramatical1 +="insertinBD ::= INSERT INTO ID VALUES PARENTESISIZQUIERDA listaParam PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 += "InsertinDabaBases(t[3],t[6])\n"
def p_insertBD_2(t):
'insertinBD : INSERT INTO ID PARENTESISIZQUIERDA listaParam PARENTESISDERECHA VALUES PARENTESISIZQUIERDA listaParam PARENTESISDERECHA PUNTOYCOMA'
print(t[9])
t[0] = InsertinDataBases(t[3],t[5],t[9])
h.reporteGramatical1 +="insertinBD ::= INSERT INTO ID PARENTESISIZQUIERDA listaParam PARENTESISDERECHA VALUES PARENTESISIZQUIERDA listaParam PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 += "t[0] = InsertinDataBases(t[3],t[5],t[9])\n"
# SE SEPARO LA LISTA EN 2 METODOS PARA MANEJAR DATOS
def p_listaParam(t):
'''listaParam : listaParam COMA listaP
'''
t[1].append(t[3])
t[0] = t[1]
h.reporteGramatical1 +="insertinBD ::= listaParam COMA operacion\n"
h.reporteGramatical2 +="t[0]=t[1]\n"
def p_listaParam_2(t):
'''listaParam : listaP
'''
t[0] = [t[1]]
h.reporteGramatical1 +="listaParam ::= operacion\n"
h.reporteGramatical2 +="t[0]=[t[1]]\n"
def p_listaP_1(t):
'listaP : operacion'
print("---------------",t[1])
t[0] = t[1]
def p_listaP_2(t):
'listaP : ID operacion'
t[0] = t[1]
print(t[0])
def p_listaP_3(t):
'listaP : ID PARENTESISIZQUIERDA PARENTESISDERECHA'
t[0] = t[1]+"()"
print(t[0])
#-----------------------------------------------------UPDATE BD--------------------------------------------------------------------
def p_updateBD(t):
'updateinBD : UPDATE ID SET asignaciones WHERE operacion PUNTOYCOMA'
t[0]= UpdateinDataBase(t[2],t[4],t[6])
h.reporteGramatical1 +="updateinBD ::= UPDATE ID SET asignacion WHERE operacion PUNTOYCOMA\n"
h.reporteGramatical1 +="t[0]=UpdateinDabaBase(t[2].t[4],t[6])\n"
# SE SEPARO LA LISTA EN 2 METODOS PARA MANEJAR DATOS
def p_asignaciones(t):
'''asignaciones : asignaciones COMA operacion
'''
t[1].append(t[3])
t[0] = t[1]
h.reporteGramatical1 +="asignaciones ::= asignaciones COMA operacion\n"
h.reporteGramatical2 +="t[0]=t[1]\n"
def p_asignaciones_2(t):
'''asignaciones : operacion
'''
t[0] = [t[1]]
h.reporteGramatical1 +="asignaciones ::= asigna\n"
h.reporteGramatical2 +="t[0]=[t[1]]\n"
#-----------------------------------------------------DELETE IN BD--------------------------------------------------------------------
def p_deleteinBD_1(t):
'deleteinBD : DELETE FROM ID PUNTOYCOMA'
t[0] = t[3]
h.reporteGramatical1 +="deleteinBD ::= DELETE FROM ID PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]=t[3]\n"
def p_deleteinBD_2(t):
'deleteinBD : DELETE FROM ID WHERE operacion PUNTOYCOMA'
t[0] = DeleteinDataBases(t[3],t[5])
h.reporteGramatical1 +="deleteinBD ::= DELETE FROM ID WHERE operacion PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]=DeleteinDataBases(t[3],t[5])\n"
#-----------------------------------------------------CREATE TABLE CON INHERITS-------------------------------------------------------
def p_inheritsBD(t):
'inheritsBD : CREATE TABLE ID PARENTESISIZQUIERDA creaColumnas PARENTESISDERECHA INHERITS PARENTESISIZQUIERDA ID PARENTESISDERECHA PUNTOYCOMA'
t[0]=InheritsBD(t[3],t[9],t[5])
h.reporteGramatical1 +="inheritsBD ::= CREATE TABLE ID PARENTESISIZQUIERDA creaColumnas PARENTESISDERECHA INHERITS PARENTESISIZQUIERDA ID PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]=InheritsBD(t[3],t[9],t[5])\n"
#-----------------------------------------------------CREATE TABLE--------------------------------------------------------------------
def p_createTable(t):
'createTable : CREATE TABLE ID PARENTESISIZQUIERDA creaColumnas PARENTESISDERECHA PUNTOYCOMA'
t[0]= CreateTable(t[3],t[5])
h.reporteGramatical1 +="createTable ::= CREATE TABLE ID PARENTESISIZQUIERDA creaColumnas PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 += "t[0]= CreateTable(t[3],t[5])\n"
# --------------------------------------------------------------------------------------------------------------
# SE SEPARO LA LISTA EN 2 METODOS PARA MANEJAR DATOS
def p_creaColumna(t):
'''creaColumnas : creaColumnas COMA Columna
'''
t[1].append(t[3])
t[0] = t[1]
#print(t[0])
h.reporteGramatical1 +="creaColumnas ::= creaColumnas COMA Columna\n"
h.reporteGramatical2 +="t[1]\n"
def p_creaColumna_2(t):
'''creaColumnas : Columna
'''
t[0]=[t[1]]
h.reporteGramatical1 +="createTable ::= Columna\n"
h.reporteGramatical2 +="[t[1]]\n"
# --------------------------------------------------------------------------------------------------------------
#INICIA LAS PRODUCCIONES DE COLUMNAS
def p_columna_1(t):
'Columna : ID tipo'
t[0]=TipoAtributoTable(ColumnasTable(t[1],t[2],None),OPERACION_RESTRICCION_COLUMNA.COLUMNASINRESTRICCION)
h.reporteGramatical1 +="Columna ::= ID tipo\n"
h.reporteGramatical2 +="t[0]=TipoAtributoTable(ColumnasTable(t[1],t[2],None),OPERACION_RESTRICCION_COLUMNA.COLUMNASINRESTRICCION)"
def p_columna_2(t):
'Columna : ID tipo paramOpcional'
t[0]=TipoAtributoTable(ColumnasTable(t[1],t[2],t[3]),OPERACION_RESTRICCION_COLUMNA.COLUMNACONRESTRICCION)
h.reporteGramatical1 +="Columna ::= ID tipo paramOpcional"
h.reporteGramatical2 +="t[0]=TipoAtributoTable(ColumnasTable(t[1],t[2],t[3]),OPERACION_RESTRICCION_COLUMNA.COLUMNACONRESTRICCION)\n"
def p_columna_3(t):
'Columna : UNIQUE PARENTESISIZQUIERDA listaParam PARENTESISDERECHA'
t[0]=TipoAtributoTable(RestriccionUnique(t[3]),OPERACION_RESTRICCION_COLUMNA.UNIQUE_ATRIBUTO)
h.reporteGramatical1 +="Columna : UNIQUE PARENTESISIZQUIERDA listaParam PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=TipoAtributoTable(RestriccionUnique(t[3]),OPERACION_RESTRICCION_COLUMNA.UNIQUE_ATRIBUTO)\n"
def p_columna_4(t):
'''Columna : constraintcheck
'''
t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_CONSTRAINT)
h.reporteGramatical1 +="Columna ::= constraintcheck\n"
h.reporteGramatical2 +="t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_CONSTRAINT)\n"
def p_columna_5(t):
'Columna : checkinColumn'
t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_SIMPLE)
h.reporteGramatical1 +="Columna ::= checkinColumn\n"
h.reporteGramatical2 +="t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_SIMPLE)\n"
def p_columna_6(t):
'Columna : primaryKey'
t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.PRIMARY_KEY)
h.reporteGramatical1 +="Columna ::= primaryKey\n"
h.reporteGramatical2 +="t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.PRIMARY_KEY)\n"
def p_columna_7(t):
'Columna : foreignKey'
t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.FOREIGN_KEY)
h.reporteGramatical1 +="Columna ::= foreingKey\n"
h.reporteGramatical2 += "t[0]=TipoAtributoTable(t[1],OPERACION_RESTRICCION_COLUMNA.FOREIGN_KEY)\n"
# --------------------------------------------------------------------------------------------------------------
#INICIA LA LISTA DE RESTRICCIONES OPCIONALES EN LAS COLUMNAS
def p_paramOpcional(t):
'''paramOpcional : paramOpcional paramopc
'''
t[1].append(t[2])
t[0] = t[1]
h.reporteGramatical1 +="paramOpcional ::= paramOpcional paramopc\n"
h.reporteGramatical2 +="t[0]=t[1]\n"
def p_paramOpcional_1(t):
'''paramOpcional : paramopc
'''
t[0] = [t[1]]
h.reporteGramatical1 +="paramOpcional ::= paramopc\n"
h.reporteGramatical2 +="t[0]=[t[1]]\n"
# --------------------------------------------------------------------------------------------------------------
#INICIA LAS RESTRICCIONES EN LAS COLUMNAS
def p_paramopc_1(t):
'''paramopc : DEFAULT final
| NULL
| NOT NULL
| UNIQUE
| PRIMARY KEY
'''
if t[1].upper() == "DEFAULT":
t[0] = TipoRestriccion(RestriccionDefaul(t[2]),OPERACION_RESTRICCION_COLUMNA.DEFAULT)
h.reporteGramatical1 +="paramopc ::= DEFAULT final\n"
h.reporteGramatical2 +="TipoRestriccion(RestriccionDefaul(t[2]),OPERACION_RESTRICCION_COLUMNA.DEFAULT)\n"
elif t[1].upper() == "NULL":
t[0] = TipoRestriccion(RestriccionNull(1),OPERACION_RESTRICCION_COLUMNA.NULL)
h.reporteGramatical1 +="paramopc ::= NULL\n"
h.reporteGramatical2 +="TipoRestriccion(RestriccionNull(1),OPERACION_RESTRICCION_COLUMNA.NULL)\n"
elif t[1].upper() == "NOT":
t[0] = TipoRestriccion(RestriccionNotNull(1),OPERACION_RESTRICCION_COLUMNA.NOT_NULL)
h.reporteGramatical1 +="paramopc ::= NOT NULL\n"
h.reporteGramatical2 +="t[0] = TipoRestriccion(RestriccionNotNull(1),OPERACION_RESTRICCION_COLUMNA.NOT_NULL)\n"
elif t[1].upper() == "UNIQUE":
t[0] = TipoRestriccion(RestriccionUniqueSimple(1),OPERACION_RESTRICCION_COLUMNA.UNIQUE_COLUMNA)
h.reporteGramatical1 +="paramopc ::= UNIQUE\n"
h.reporteGramatical2 +="TipoRestriccion(RestriccionUniqueSimple(1),OPERACION_RESTRICCION_COLUMNA.UNIQUE_COLUMNA)\n"
elif t[1].upper() == "PRIMARY" and t[2].upper()=="KEY":
t[0] = TipoRestriccion(RestriccionPrimaryKeyColumn(1),OPERACION_RESTRICCION_COLUMNA.PRIMARY_KEY)
h.reporteGramatical1 +="paramopc ::= PRIMARY KEY\n"
h.reporteGramatical2 +="TipoRestriccion(RestriccionPrimaryKeyColumn(1),OPERACION_RESTRICCION_COLUMNA.PRIMARY_KEY)\n"
else:
print("FFFFF")
# --------------------------------------------------------------------------------------------------------------
#LLAMADA A LAS RESTRICCION CHECK
def p_paramopc_2(t):
'paramopc : constraintcheck'
t[0] = TipoRestriccion(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_CONSTRAINT)
h.reporteGramatical1 +="paramopc ::= constraintcheck\n"
h.reporteGramatical2 +="t[0] = TipoRestriccion(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_CONSTRAINT)\n"
def p_paramopc_3(t):
'paramopc : checkinColumn'
t[0]=TipoRestriccion(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_SIMPLE)
h.reporteGramatical1 +="paramopc ::= checkinColumn\n"
h.reporteGramatical2 +="t[0]=TipoRestriccion(t[1],OPERACION_RESTRICCION_COLUMNA.CHECK_SIMPLE)\n"
# --------------------------------------------------------------------------------------------------------------
#RESTRICCION UNIQUE
def p_paramopc_4(t):
'paramopc : CONSTRAINT ID UNIQUE'
t[0] = TipoRestriccion(RestriccionConstraintUnique(t[2]),OPERACION_RESTRICCION_COLUMNA.UNIQUE_CONSTAINT)
h.reporteGramatical1 +="paramopc ::= CONSTRAINT ID UNIQUE\n"
h.reporteGramatical2 +="t[0] = TipoRestriccion(RestriccionConstraintUnique(t[2]),OPERACION_RESTRICCION_COLUMNA.UNIQUE_CONSTAINT)\n"
# --------------------------------------------------------------------------------------------------------------
#RESTRICION CHECK
def p_checkcolumna(t):
'checkinColumn : CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA'
t[0]=RestriccionCheck(t[3])
h.reporteGramatical1 +="checkinColumn ::= CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=RestriccionCheck(t[3])\n"
def p_constraintcheck(t):
'constraintcheck : CONSTRAINT ID CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA'
t[0]=RestriccionConstraintCheck(t[2],t[5])
h.reporteGramatical1 +="constraintcheck : CONSTRAINT ID CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=RestriccionConstraintCheck(t[2],t[5])\n"
def p_primaryKey(t):
'primaryKey : PRIMARY KEY PARENTESISIZQUIERDA listaParam PARENTESISDERECHA'
t[0]=RestriccionPrimaryKey(t[4])
h.reporteGramatical1 +="primaryKey ::= PRIMARY KEY PARENTESISIZQUIERDA listaParam PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=RestriccionPrimaryKey(t[4])\n"
def p_foreingkey(t):
'foreignKey : FOREIGN KEY PARENTESISIZQUIERDA listaParam PARENTESISDERECHA REFERENCES ID PARENTESISIZQUIERDA listaParam PARENTESISDERECHA'
t[0]=RestriccionForeingkey(t[7],t[4],t[9])
h.reporteGramatical1 +="foreignKey ::= FOREIGN KEY PARENTESISIZQUIERDA listaParam PARENTESISDERECHA REFERENCES ID PARENTESISIZQUIERDA listaParam PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=RestriccionForeingkey(t[7],t[4],t[9])\n"
#-----------------------------------------------------TIPOS DE DATOS--------------------------------------------------------------------
def p_tipo(t):
'''tipo : SMALLINT
| INTEGER
| BIGINT
| NUMERIC
| REAL
| DOUBLE PRECISION
| MONEY
| VARCHAR PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA
| CHARACTER VARYING PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA
| CHARACTER PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA
| CHAR PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA
| TEXT
| BOOLEAN
| TIMESTAMP
| TIME
| INTERVAL
| DATE
| YEAR
| MONTH
| DAY
| HOUR
| MINUTE
| SECOND
'''
# --------------------------------------------------------------------------------------------------------------
if t[1].upper()=="SMALLINT":
t[0] = TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="INTEGER":
t[0] = TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="BIGINT":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="NUMERIC":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="REAL":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="DOUBLE":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="MONEY":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="CHARACTER" and t[2].upper()=="VARYING":
t[0]=TipoDatoColumna(t[2],t[4])
h.reporteGramatical1 +="tipo ::= CHARACTER VARYING PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],t[4])\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="VARCHAR":
t[0]=TipoDatoColumna(t[1],t[3])
h.reporteGramatical1 +="tipo ::= VARCHAR PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],t[3])\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="CHARACTER":
t[0]=TipoDatoColumna(t[1],t[3])
h.reporteGramatical1 +="tipo ::= CHARACTER PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],t[3])\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="CHAR":
t[0]=TipoDatoColumna(t[1],t[3])
h.reporteGramatical1 +="tipo ::= CHAR PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],t[3])\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="TEXT":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="BOOLEAN":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="TIMESTAMP":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="TIME":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="INTERVAL":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="DATE":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="YEAR":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="MONT":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="HOUR":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="MINUT":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
elif t[1].upper()=="SECOND":
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
def p_tipo_2(t):
'tipo : DECIMAL'
t[0]=TipoDatoColumna(t[1],None)
h.reporteGramatical1 +="tipo ::= "+str(t[1])+"\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],None)\n"
# --------------------------------------------------------------------------------------------------------------
def p_tipo_3(t):
'tipo : DECIMAL PARENTESISIZQUIERDA ENTERO COMA ENTERO PARENTESISDERECHA '
val = str(t[3])+","+str(t[5])
t[0]=TipoDatoColumna(t[1],val)
h.reporteGramatical1 +"tipo ::= "+str(t[1])+"("+str(t[3])+","+str(t[5])+")\n"
h.reporteGramatical2 +="t[0]=TipoDatoColumna(t[1],val)"
#--------------------------------------------------- SENTENCIA SELECT --------------------------------------------------------------
def p_select(t):
'''selectData : SELECT select_list FROM select_list WHERE search_condition opcionesSelect
| SELECT POR FROM select_list WHERE search_condition opcionesSelect
'''
if t[2]=='*':
h.reporteGramatical1 +="selectData ::= SELECT POR FROM select_list WHERE search_condition opcionesSelect \n"
print("/////////////////// SELECT CON ASTERISCO ////////////////////////")
print("Columnas: ",t[2])
print("Tablas: ",t[4])
print("Where: ",QueryWhere(t[6]))
print("Extras: ",t[7])
t[0]=Select5(t[2],t[4],QueryWhere(t[6]),t[7])
else:
h.reporteGramatical1 +="selectData ::= SELECT select_list FROM select_list WHERE search_condition opcionesSelect \n"
print("/////////////////// SELECT SIN ASTERISCO ////////////////////////")
print("Columnas: ",t[2])
print("Tablas: ",t[4])
print("Where: ",QueryWhere(t[6]))
print("Extras: ",t[7])
t[0]=Select5(t[2],t[4],QueryWhere(t[6]),t[7])
def p_select_1(t):
'''selectData : SELECT select_list FROM select_list WHERE search_condition
| SELECT POR FROM select_list WHERE search_condition
'''
if t[2]=='*':
h.reporteGramatical1 +="selectData ::= SELECT POR FROM select_list WHERE search_condition \n"
h.reporteGramatical2 +="t[0]=Select3(t[4],QueryWhere(t[6]))\n"
print("entra al select con where y asterisco/////////////////")
t[0]=Select3(t[4],QueryWhere(t[6]))
print("el objeto que sube")
print(t[0])
else:
h.reporteGramatical1 +="selectData ::= SELECT select_list FROM select_list WHERE search_condition \n"
h.reporteGramatical2 +=" t[0]=Select4(t[2],t[4],QueryWhere(t[6]))\n"
print("entra al select con where y campos /////////////////")
print(t[2])
print(t[4])
print(t[6])
t[0]=Select4(t[2],t[4],QueryWhere(t[6]))
print(t[0])
# esta full
def p_select_2(t):
'''selectData : SELECT select_list FROM select_list
| SELECT POR FROM select_list
'''
if t[2]=='*':
h.reporteGramatical1 +="selectData ::= SELECT POR FROM select_list \n"
h.reporteGramatical2 +=" t[0]=Select(1,t[4])\n"
print("entra a select_2 A")
#se le agrega como segundo parametro el 2 que significa que venia asterirsco o todas las tablas
t[0]=Select(1,2,t[4])
else:
# select tipo 4
h.reporteGramatical1 +="selectData ::= SELECT select_list FROM select_list \n"
h.reporteGramatical2 +=" t[0]=Select2(2,t[2],t[4])\n"
print("entra a select_2 B")
print(t[2])
print(t[4])
t[0]=Select2(2,t[2],t[4])
# esta full
def p_select_3(t):
'''selectData : SELECT select_list
'''
h.reporteGramatical1 +="selectData ::= SELECT select_list \n"
h.reporteGramatical2 +=" t[0]=Select(1,t[2])\n"
#se le agrega el 2do 1 si solo vienen datos y no tablas
t[0]=Select(1,1,t[2])
def p_opcionesSelect_1(t):
'''opcionesSelect : opcionesSelect opcionSelect
'''
h.reporteGramatical1 +="opcionesSelect ::= opcionesSelect opcionSelect\n"
print(t[1])
t[1].append(t[2])
t[0]=t[1]
def p_opcionesSelect_2(t):
'''opcionesSelect : opcionSelect
'''
h.reporteGramatical1 +="opcionesSelect ::= opcionSelect\n"
print(t[1])
t[0]=[t[1]]
def p_opcionesSelect_3(t):
'''opcionSelect : LIMIT operacion
| GROUP BY select_list
| HAVING select_list
| ORDER BY select_list
'''
if t[1].upper()=="LIMIT":
h.reporteGramatical1 +="opcionSelect ::= LIMIT operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionLimit(t[2])\n"
t[0]=ExpresionLimit(t[2])
elif t[1].upper()=="GROUP":
h.reporteGramatical1 +="opcionSelect ::= GROUP BY select_list\n"
h.reporteGramatical2 +="t[0]=ExpresionGroup(t[3])\n"
t[0]=ExpresionGroup(t[3])
elif t[1].upper()=="HAVING":
h.reporteGramatical1 +="opcionSelect ::= HAVING select_list\n"
h.reporteGramatical2 +="t[0]=ExpresionHaving(t[2])\n"
t[0]=ExpresionHaving(t[2])
elif t[1].upper()=="ORDER":
h.reporteGramatical1 +="opcionSelect ::= ORDER BY select_list\n"
h.reporteGramatical2 +="t[0]=ExpresionOrder(t[3],'ASC')\n"
t[0]=ExpresionOrder(t[3],'ASC')
def p_opcionesSelect_4(t):
'''opcionSelect : LIMIT operacion OFFSET operacion
| ORDER BY select_list ordenamiento
'''
if t[1].upper()=="LIMIT":
h.reporteGramatical1 +="opcionSelect ::= LIMIT operacion OFFSET operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionLimitOffset(t[2],t[4])\n"
t[0]=ExpresionLimitOffset(t[2],t[4])
elif t[1].upper()=="ORDER":
h.reporteGramatical1 +="opcionSelect ::= ORDER BY select_list ordenamiento\n"
h.reporteGramatical2 +="t[0]=ExpresionOrder(t[3],t[4])\n"
t[0]=ExpresionOrder(t[3],t[4])
def p_ordenamiento(t):
'''ordenamiento : ASC
| DESC '''
h.reporteGramatical1 +="ordenamiento ::= "+str(t[1])+"\n"
h.reporteGramatical2 +=" t[0]=str(t[1])\n"
t[0]=str(t[1])
def p_search_condition_2(t):
'search_condition : final NOT IN PARENTESISIZQUIERDA selectData PARENTESISDERECHA'
h.reporteGramatical1 +="search_condition ::= NOT search_condition\n"
print("esta condicion es del not con operacion******************")
print(t[1])
print(t[5])
t[0]=ExpresionNotIn(t[1],t[5])
#agregar eeste al arbol y 3D
def p_search_condition_5(t):
'search_condition : NOT EXISTS PARENTESISIZQUIERDA selectData PARENTESISDERECHA'
h.reporteGramatical1 +="search_condition ::= NOT search_condition\n"
print("esta condicion es del not con operacion******************")
print(t[4])
t[0]=ExpresionNotExists(t[4])
#agregar eeste al arbol y 3D
def p_search_condition_6(t):
'search_condition : EXISTS PARENTESISIZQUIERDA selectData PARENTESISDERECHA'
h.reporteGramatical1 +="search_condition ::= NOT search_condition\n"
print("esta condicion es del not con operacion******************")
print(t[3])
t[0]=ExpresionExists(t[3])
#agregar eeste al arbol y 3D
def p_search_condition_7(t):
'search_condition : final IN PARENTESISIZQUIERDA selectData PARENTESISDERECHA'
h.reporteGramatical1 +="search_condition ::= NOT search_condition\n"
print("esta condicion es del not con operacion******************")
print(t[1])
print(t[4])
t[0]=ExpresionIn(t[1],t[4])
# PARA ABAJO YA ESTA
def p_search_condition_3(t):
'search_condition : operacion'
h.reporteGramatical1 +="search_condition ::= operacion\n"
h.reporteGramatical2 +=" t[0]=t[1]\n"
print("entra a la operacion del seach_condition++++++++++++++++++++++++++++++++++++++++")
print(t[1])
t[0]=t[1]
def p_search_condition_4(t):
'search_condition : PARENTESISIZQUIERDA search_condition PARENTESISDERECHA'
h.reporteGramatical1 +="search_condition ::= PARENTESISIZQUIERDA search_condition PARENTESISDERECHA\n"
h.reporteGramatical2 +=" t[0]=t[2]\n"
print("entra a la condicion con el parentesis")
print(t[2])
t[0]=t[2]
def p_select_list_1(t):
' select_list : select_list COMA operacion'
h.reporteGramatical1 +="select_list ::= select_list COMA operacion\n"
h.reporteGramatical2 +=" t[1].append(t[3])\nt[0]=t[1]\n"
print("Entra a select list COMA operacion****************************************")
t[1].append(t[3])
print(t[1])
t[0]=t[1]
def p_select_list_6(t):
' select_list : select_list COMA asignacion'
h.reporteGramatical1 +="select_list ::= select_list COMA asignacion\n"
h.reporteGramatical2 +=" t[0]=Asignacion(t[1],t[3])\n"
print(" entra al select_list COMA operacion-------------")
t[1].append(t[3])
t[0]=t[1]
print(t[0])
def p_select_list_7(t):
' select_list : asignacion'
h.reporteGramatical1 +="select_list ::= asignacion\n"
h.reporteGramatical2 +=" t[0]=t[1]\n"
print(" entra al select_list: asignacion-------------")
print(t[1])
t[0]=[t[1]]
def p_select_list_2(t):
'select_list : operacion'
h.reporteGramatical1 +="select_list ::= operacion\n"
h.reporteGramatical2 +=" t[0]=[ExpresionFuncionBasica(t[1])]\n"
print("select_list+++++++++++++++++++++++++")
print(t[1])
t[0]=[ExpresionFuncionBasica(t[1])]
def p_asignacion_1(t):
' asignacion : operacion AS operacion'
h.reporteGramatical1 +="select_list ::= select_list AS operacion\n"
h.reporteGramatical2 +=" t[0]=[Asignacion(t[1],t[3])]\n"
print("entra a asignacion: operacion AS operacion")
t[0]=Asignacion(t[1],t[3])
def p_asignacion_2(t):
' asignacion : final final'
h.reporteGramatical1 +="select_list ::= final final\n"
h.reporteGramatical2 +=" t[0]=[Asignacion(t[1],t[2])]\n"
print(" entra al select_list de 2 finales-------------")
t[0]=Asignacion(t[1],t[2])
print(t[0])
def p_funcion_basica_4(t):
'funcionBasica : operacion BETWEEN operacion '
h.reporteGramatical1 +="funcionBasica ::= operacion BETWEEN operacion AND operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionBetween(t[1],t[3])\n"
print("entra al between con sus operaciones")
print(t[1])
print(t[3])
t[0]=ExpresionBetween(t[1],t[3])
def p_funcion_basica_7(t):
'funcionBasica : operacion NOT BETWEEN operacion'
h.reporteGramatical1 +="funcionBasica ::= operacion NOT BETWEEN operacion AND operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionNotBetween(t[1],t[4])\n"
print("entra al NOT between con sus operaciones")
print(t[1])
print(t[3])
t[0]=ExpresionNotBetween(t[1],t[4])
def p_funcion_basica_8(t):
'funcionBasica : operacion BETWEEN SYMMETRIC operacion '
h.reporteGramatical1 +="funcionBasica ::= operacion BETWEEN SYMMETRIC operacion AND operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionBetweenSymmetric(t[1],t[4])\n"
t[0]=ExpresionBetweenSymmetric(t[1],t[4])
def p_funcion_basica_9(t):
'funcionBasica : operacion NOT BETWEEN SYMMETRIC operacion '
h.reporteGramatical1 +="funcionBasica ::= operacion NOT BETWEEN SYMMETRIC operacion AND operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionNotBetweenSymmetric(t[1],t[5])\n"
t[0]=ExpresionNotBetweenSymmetric(t[1],t[5])
def p_funcion_basica_10(t):
'''funcionBasica : operacion IS DISTINCT FROM operacion
'''
h.reporteGramatical1 +="funcionBasica ::= operacion IS DISTINCT FROM operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionIsDistinct(t[1],t[5])\n"
print("entra al IS DISTINCT ++++++++++++++++++")
t[0]=ExpresionIsDistinct(t[1],t[5])
def p_funcion_basica_11(t):
'''funcionBasica : operacion IS NOT DISTINCT FROM operacion'''
h.reporteGramatical1 +="funcionBasica ::= operacion IS NOT DISTINCT FROM operacion\n"
h.reporteGramatical2 +="t[0]=ExpresionIsNotDistinct(t[1],t[6])\n"
print("entra al IS NOT DISTINCT ++++++++++++++++++")
t[0]=ExpresionIsNotDistinct(t[1],t[6])
def p_tipos(t):
'''tipos : CREATE TYPE final AS ENUM PARENTESISIZQUIERDA select_list PARENTESISDERECHA PUNTOYCOMA'''
print("entra al enum++++++++++++++++++++++++++++++++")
h.reporteGramatical1 +="tipos ::= CREATE TYPE final AS ENUM PARENTESISIZQUIERDA select_list PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]=Tipo(t[3],t[7])\n"
print(t[3])
print(t[7])
t[0]=Tipo(t[3],t[7])
#debo agregar estos al arbol y a la 3D
#--------------------------------------------------------------------------------------------------------------------
# AGREGACION DEL UNION
def p_combinacionSelects(t):
'''combinacionSelects : selectData UNION selectData
| selectData INTERSECT selectData
| selectData EXCEPT selectData
'''
print("*************************Entra a procesar el UNION********************")
if t[2].upper()=="UNION":
t[0]=QueryUnion(t[1],t[3])
elif t[2].upper()=="INTERSECT":
t[0]=QueryIntersect(t[1],t[3])
elif t[2].upper()=="EXCEPT":
t[0]=QueryExcept(t[1],t[3])
def p_select_4(t):
'''selectData : SELECT select_list FROM tipoJoin
| SELECT POR FROM tipoJoin
'''
if t[2]=='*':
print("entro al select * tipo join ++++++++++++++++++++++++++++++")
print(t[2])
t[0]=Select6(t[2],t[4])
else:
print("entro al select lista tipo join ++++++++++++++++++++++++++++++")
print(t[2])
t[0]=Select6(t[2],t[4])
def p_tipoJoin_1(t):
'''tipoJoin : select_list INNER JOIN select_list ON operacion
| select_list NATURAL INNER JOIN select_list
'''
if t[2].upper()=="INNER":
print("entro al tipoJoin1 INNER----------------------------------------------------")
print(t[1])
print(t[2])
print(t[4])
print(t[6])
t[0]=ExpresionJoinA(t[1],t[2],t[4],t[6])
elif t[2].upper()=="NATURAL":
print("entro al NATURAL ----------------------------------------------------")
print(t[1])
print(t[2])
print(t[3])
print(t[5])
t[0]=ExpresionJoinB(t[1],t[2],t[3],t[5])
def p_tipoJoin_2(t):
'''tipoJoin : select_list otroTipoJoin OUTER JOIN select_list ON operacion
| select_list NATURAL otroTipoJoin OUTER JOIN select_list
'''
if t[2].upper()=="NATURAL":
print("entro al tipoJoin2 NATURAL ----------------------------------------------------")
print(t[1])
print(t[2])
print(t[3])
print(t[4])
print(t[6])
t[0]=ExpresionJoinC(t[1],t[2],t[3],t[4],t[6])
else:
print("entro al tipoJoin2 ELSE ----------------------------------------------------")
print(t[1])
print(t[2])
print(t[3])
print(t[5])
print(t[7])
t[0]=ExpresionJoinD(t[1],t[2],t[3],t[5],t[7])
def p_otroTipoJoin(t):
''' otroTipoJoin : LEFT
| RIGHT
| FULL
'''
print("entra al otro tipo de join para su condicion")
t[0]=t[1]
def p_execFunction(t):
'execFunction : execOption ID PUNTOYCOMA'
h.reporteGramatical1 +="execFunction ::= execOption ID PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]=execFunction(t[2])\n"
t[0]=execFunction(t[2])
def p_execFunction_1(t):
'execFunction : execOption ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA PUNTOYCOMA'
h.reporteGramatical1 +="execFunction ::= execOption ID PARENTESISIZQUIERDA listaid PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]=execFunctionParams(t[2],t[4])\n"
t[0]=execFunctionParams(t[2],t[4])
def p_execFunction_2(t):
'execFunction : execOption ID PARENTESISIZQUIERDA PARENTESISDERECHA PUNTOYCOMA'
h.reporteGramatical1 +="execFunction ::= execOption ID PARENTESISIZQUIERDA PARENTESISDERECHA PUNTOYCOMA\n"
h.reporteGramatical2 +="t[0]=execFunction(t[2])\n"
t[0]=execFunction(t[2])
def p_execOption_1(t):
'execOption : EXEC'
t[0] = t[1]
def p_execOption_2(t):
'execOption : EXECUTE'
t[0] = t[1]
#para manejar los errores sintacticos
#def p_error(t): #en modo panico :v
# print("token error: ",t)
# print("Error sintáctico en '%s'" % t.value[0])
# print("Error sintáctico en '%s'" % t.value[1])
#def p_error(t): #en modo panico :v
# while True:
# tok=parser.token()
# if not tok or tok.type==';':break
# parser.errok()
# return tok
def find_column(input, token):
line_start = input.rfind('\n', 0, token.lexpos) + 1
print((token.lexpos - line_start) +1 )
return (token.lexpos - line_start)
def p_error(t):
print("token: '%s'" %t)
print("Error sintáctico en '%s' " % t.value)
#h.filapivote+=1
x=caden.splitlines()
filas=len(x)-1
print("filas que no cambian: ",filas)
if h.filapivote>0:
fila=(t.lineno-1)-h.filapivote*filas
else:
fila=(t.lineno-1)
h.filapivote+=1
h.errores+= "<tr><td>"+str(t.value)+"</td><td>"+str(fila)+"</td><td>"+str(find_column(caden,t))+"</td><td>SINTACTICO</td><td>el token no va aqui</td></tr>\n"
print("Error sintáctico fila '%s'" % fila)
print("Error sintáctico col '%s'" % find_column(caden,t))
if not t:
print("End of File!")
return
# Read ahead looking for a closing '}'
while True:
tok = parser.token() # Get the next token
if not tok or tok.type == 'PUNTOYCOMA':
break
parser.restart()
import ply.yacc as yacc
parser = yacc.yacc()
def parse(input) :
global caden
caden=""
caden=input
return parser.parse(input) |
py | 1a3e8ee574a50c7eabe890e9b54a83e16bae036c | import sys
from ethereum import transactions as t
from ethereum.abi import ContractTranslator
from ethereum._solidity import get_solidity
import rlp
solidity = get_solidity()
key = '7942db5f27595d040231a44b95de331d45eaa78cfa3f21663c95d4bbc97afbe4'
addr = 'ce7fb4c38949d7c09bd95197c3981ec8bb0638e5'
args, kwargs = [], {}
i = 0
while i < len(sys.argv):
if sys.argv[i][:2] == '--':
kwargs[sys.argv[i][2:]] = sys.argv[i+1]
i += 2
else:
args.append(sys.argv[i])
i += 1
adStorer_abi = solidity.mk_full_signature(open('one_phase_auction.sol').read() + open('two_phase_auction.sol').read() + open('adStorer.sol').read(), contract_name='adStorer')
ct = ContractTranslator(adStorer_abi)
nonce = int(kwargs['nonce'])
data = ct.encode('initialize', [240, 240, 240, 120, 50, 10])
o = '['
for i in range(8):
tx = t.Transaction(nonce, 60 * 10**9, 2500000, kwargs['address'], 0, data)
o += '"0x' + rlp.encode(tx.sign(key)).encode('hex') + '",'
nonce += 1
print o[:-1] + ']'
|
py | 1a3e8fe8a72130b465072141760cecf79f7e48eb | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DeleteImagePipelineRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DeleteImagePipeline')
self.set_method('POST')
def get_ImagePipelineId(self):
return self.get_query_params().get('ImagePipelineId')
def set_ImagePipelineId(self,ImagePipelineId):
self.add_query_param('ImagePipelineId',ImagePipelineId)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_TemplateTags(self):
return self.get_query_params().get('TemplateTag')
def set_TemplateTags(self, TemplateTags):
for depth1 in range(len(TemplateTags)):
if TemplateTags[depth1].get('Key') is not None:
self.add_query_param('TemplateTag.' + str(depth1 + 1) + '.Key', TemplateTags[depth1].get('Key'))
if TemplateTags[depth1].get('Value') is not None:
self.add_query_param('TemplateTag.' + str(depth1 + 1) + '.Value', TemplateTags[depth1].get('Value'))
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) |
py | 1a3e9057af6dce4d40704828169673a9dc64edd1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import random
import torch
import torchvision
from torchvision.transforms import functional as F
from .rf_transforms import (
RandomHorizontalFlip3D,
RandomVerticalFlip3D,
Pad3D,
CalibrateMWPose,
GenerateHMS,
)
from .pc_transforms import (
SplitSourceRef,
Resampler,
FixedResampler,
RandomJitter,
RandomCrop,
RandomTransformSE3,
RandomTransformSE3_euler,
RandomRotatorZ,
ShufflePoints,
SetDeterministic,
)
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = random.choice(self.min_size)
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, image, target=None):
size = self.get_size(image.size)
image = F.resize(image, size)
if target is None:
return image
target = target.resize(image.size)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
image = F.hflip(image)
target = target.transpose(0)
return image, target
class RandomVerticalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
image = F.vflip(image)
target = target.transpose(1)
return image, target
class ColorJitter(object):
def __init__(self,
brightness=None,
contrast=None,
saturation=None,
hue=None,
):
self.color_jitter = torchvision.transforms.ColorJitter(
brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue,)
def __call__(self, image, target):
image = self.color_jitter(image)
return image, target
class ToTensor(object):
def __call__(self, image, target):
return F.to_tensor(image), target
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target=None):
image = F.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image
return image, target
|
py | 1a3e90c801a8412583286bac91ab2acfcf48cc97 | # Forked from https://github.com/psobot/keynote-parser/blob/master/keynote_parser/codec.py
import struct
import snappy
from functools import partial
from numbers_parser.mapping import ID_NAME_MAP
from numbers_parser.exceptions import NotImplementedError
from google.protobuf.internal.decoder import _DecodeVarint32
from google.protobuf.json_format import MessageToDict
from numbers_parser.generated.TSPArchiveMessages_pb2 import ArchiveInfo
class IWAFile(object):
def __init__(self, chunks, filename=None):
self.chunks = chunks
self.filename = filename
@classmethod
def from_buffer(cls, data, filename=None):
try:
chunks = []
while data:
chunk, data = IWACompressedChunk.from_buffer(data, filename)
chunks.append(chunk)
return cls(chunks, filename)
except Exception as e: # pragma: no cover
if filename:
raise ValueError("Failed to deserialize " + filename) from e
else:
raise
def to_dict(self):
try:
return {"chunks": [chunk.to_dict() for chunk in self.chunks]}
except Exception as e: # pragma: no cover
if self.filename:
raise ValueError("Failed to serialize " + self.filename) from e
else:
raise
class IWACompressedChunk(object):
def __init__(self, archives):
self.archives = archives
def __eq__(self, other):
return self.archives == other.archives # pragma: no cover
@classmethod
def _decompress_all(cls, data):
while data:
header = data[:4]
first_byte = header[0]
if not isinstance(first_byte, int):
first_byte = ord(first_byte)
if first_byte != 0x00:
raise ValueError( # pragma: no cover
"IWA chunk does not start with 0x00! (found %x)" % first_byte
)
unpacked = struct.unpack_from("<I", bytes(header[1:]) + b"\x00")
length = unpacked[0]
chunk = data[4 : 4 + length]
data = data[4 + length :]
try:
yield snappy.uncompress(chunk)
except Exception: # pragma: no cover
# Try to see if this data isn't compressed in the first place.
# If this data is still compressed, parsing it as Protobuf
# will almost definitely fail anyways.
yield chunk
@classmethod
def from_buffer(cls, data, filename=None):
data = b"".join(cls._decompress_all(data))
archives = []
while data:
archive, data = IWAArchiveSegment.from_buffer(data, filename)
archives.append(archive)
return cls(archives), None
def to_dict(self):
return {"archives": [archive.to_dict() for archive in self.archives]}
class ProtobufPatch(object):
def __init__(self, data):
self.data = data
def __eq__(self, other):
return self.data == other.data # pragma: no cover
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.data) # pragma: no cover
def to_dict(self):
return message_to_dict(self.data)
@classmethod
def FromString(cls, message_info, proto_klass, data):
# Recent numbers does not apear to store date this way
assert len(message_info.diff_field_path.path) != 1
return cls(proto_klass.FromString(data))
class IWAArchiveSegment(object):
def __init__(self, header, objects):
self.header = header
self.objects = objects
def __eq__(self, other):
return (
self.header == other.header and self.objects == other.objects
) # pragma: no cover
def __repr__(self):
return "<%s identifier=%s objects=%s>" % ( # pragma: no cover
self.__class__.__name__,
self.header.identifier,
repr(self.objects).replace("\n", " ").replace(" ", " "),
)
@classmethod
def from_buffer(cls, buf, filename=None):
archive_info, payload = get_archive_info_and_remainder(buf)
if not repr(archive_info):
raise ValueError(
"Segment doesn't seem to start with an ArchiveInfo!"
) # pragma: no cover
payloads = []
n = 0
for message_info in archive_info.message_infos:
try:
if message_info.type == 0 and archive_info.should_merge and payloads:
base_message = archive_info.message_infos[
message_info.base_message_index
]
klass = partial(
ProtobufPatch.FromString,
message_info,
ID_NAME_MAP[base_message.type],
)
else:
klass = ID_NAME_MAP[message_info.type]
except KeyError: # pragma: no cover
raise NotImplementedError(
"Don't know how to parse Protobuf message type "
+ str(message_info.type)
)
try:
message_payload = payload[n : n + message_info.length]
if hasattr(klass, "FromString"):
output = klass.FromString(message_payload)
else:
output = klass(message_payload)
except Exception as e: # pragma: no cover
raise ValueError(
"Failed to deserialize %s payload of length %d: %s"
% (klass, message_info.length, e)
)
payloads.append(output)
n += message_info.length
return cls(archive_info, payloads), payload[n:]
def to_dict(self):
return {
"header": header_to_dict(self.header),
"objects": [message_to_dict(message) for message in self.objects],
}
def message_to_dict(message):
if hasattr(message, "to_dict"):
return message.to_dict()
output = MessageToDict(message)
output["_pbtype"] = type(message).DESCRIPTOR.full_name
return output
def header_to_dict(message):
output = message_to_dict(message)
for message_info in output["messageInfos"]:
del message_info["length"]
return output
def get_archive_info_and_remainder(buf):
msg_len, new_pos = _DecodeVarint32(buf, 0)
n = new_pos
msg_buf = buf[n : n + msg_len]
n += msg_len
return ArchiveInfo.FromString(msg_buf), buf[n:]
|
py | 1a3e90e3a5e100dd0fbb85397642768dc6712cdf | from __future__ import division
import argparse
import copy
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv import Config
from mmcv.runner import init_dist
from mmdet import __version__
from mmdet.apis import set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
#参数解析器,终端命令行运行一个demo.py后后面跟的参数(运行tools/train.py可以跟的参数),argparse 模块可以让人轻松编写用户友好的命令行接口
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector') #第一步:创建一个解析器
parser.add_argument('config', help='train config file path') #第二步:添加参数
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument(
'--resume_from', help='the checkpoint file to resume from')
parser.add_argument(
'--validate',
action='store_true',
help='whether to evaluate the checkpoint during training')
parser.add_argument(
'--gpus',
type=int,
default=1,
help='number of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--autoscale-lr',
action='store_true',
help='automatically scale lr with the number of gpus')
args = parser.parse_args() #第三步:解析参数
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config) #源码:mmcv/utils/config.py;功能:接受一个文件,命令行当传入一个config文件时就把模型的配置参数穿打cfg了
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# update configs according to CLI args
if args.work_dir is not None: #如果没有给存储路径的话,就存到默认路径
cfg.work_dir = args.work_dir
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([('{}: {}'.format(k, v))
for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info('Distributed training: {}'.format(distributed))
logger.info('Config:\n{}'.format(cfg.text))
# set random seeds
if args.seed is not None:
logger.info('Set random seed to {}, deterministic: {}'.format(
args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
model = build_detector(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) #通过config文件传过来的参数,初始化model参数
datasets = [build_dataset(cfg.data.train)] #数据格式创建
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__,
config=cfg.text,
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
#这一步直接调用模型,数据,配置文件信息。。。训练,接下来追溯到mmdet/apis/train.py
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=args.validate,
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
|
py | 1a3e91f22912130acd76cecd9fda8121b2bd7d9a | import numpy as np
from numpy.testing import assert_array_almost_equal as assert_close
import scipy.ndimage
from skimage.feature import peak
def test_trivial_case():
trivial = np.zeros((25, 25))
peak_indices = peak.peak_local_max(trivial, min_distance=1, indices=True)
assert not peak_indices # inherent boolean-ness of empty list
peaks = peak.peak_local_max(trivial, min_distance=1, indices=False)
assert (peaks.astype(np.bool) == trivial).all()
def test_noisy_peaks():
peak_locations = [(7, 7), (7, 13), (13, 7), (13, 13)]
# image with noise of amplitude 0.8 and peaks of amplitude 1
image = 0.8 * np.random.random((20, 20))
for r, c in peak_locations:
image[r, c] = 1
peaks_detected = peak.peak_local_max(image, min_distance=5)
assert len(peaks_detected) == len(peak_locations)
for loc in peaks_detected:
assert tuple(loc) in peak_locations
def test_relative_threshold():
image = np.zeros((5, 5), dtype=np.uint8)
image[1, 1] = 10
image[3, 3] = 20
peaks = peak.peak_local_max(image, min_distance=1, threshold_rel=0.5)
assert len(peaks) == 1
assert_close(peaks, [(3, 3)])
def test_absolute_threshold():
image = np.zeros((5, 5), dtype=np.uint8)
image[1, 1] = 10
image[3, 3] = 20
peaks = peak.peak_local_max(image, min_distance=1, threshold_abs=10)
assert len(peaks) == 1
assert_close(peaks, [(3, 3)])
def test_constant_image():
image = 128 * np.ones((20, 20), dtype=np.uint8)
peaks = peak.peak_local_max(image, min_distance=1)
assert len(peaks) == 0
def test_flat_peak():
image = np.zeros((5, 5), dtype=np.uint8)
image[1:3, 1:3] = 10
peaks = peak.peak_local_max(image, min_distance=1)
assert len(peaks) == 4
def test_num_peaks():
image = np.zeros((7, 7), dtype=np.uint8)
image[1, 1] = 10
image[1, 3] = 11
image[1, 5] = 12
image[3, 5] = 8
image[5, 3] = 7
assert len(peak.peak_local_max(image, min_distance=1)) == 5
peaks_limited = peak.peak_local_max(image, min_distance=1, num_peaks=2)
assert len(peaks_limited) == 2
assert (1, 3) in peaks_limited
assert (1, 5) in peaks_limited
peaks_limited = peak.peak_local_max(image, min_distance=1, num_peaks=4)
assert len(peaks_limited) == 4
assert (1, 3) in peaks_limited
assert (1, 5) in peaks_limited
assert (1, 1) in peaks_limited
assert (3, 5) in peaks_limited
def test_reorder_labels():
np.random.seed(21)
image = np.random.uniform(size=(40, 60))
i, j = np.mgrid[0:40, 0:60]
labels = 1 + (i >= 20) + (j >= 30) * 2
labels[labels == 4] = 5
i, j = np.mgrid[-3:4, -3:4]
footprint = (i * i + j * j <= 9)
expected = np.zeros(image.shape, float)
for imin, imax in ((0, 20), (20, 40)):
for jmin, jmax in ((0, 30), (30, 60)):
expected[imin:imax, jmin:jmax] = scipy.ndimage.maximum_filter(
image[imin:imax, jmin:jmax], footprint=footprint)
expected = (expected == image)
result = peak.peak_local_max(image, labels=labels, min_distance=1,
threshold_rel=0, footprint=footprint,
indices=False, exclude_border=False)
assert (result == expected).all()
def test_indices_with_labels():
np.random.seed(21)
image = np.random.uniform(size=(40, 60))
i, j = np.mgrid[0:40, 0:60]
labels = 1 + (i >= 20) + (j >= 30) * 2
i, j = np.mgrid[-3:4, -3:4]
footprint = (i * i + j * j <= 9)
expected = np.zeros(image.shape, float)
for imin, imax in ((0, 20), (20, 40)):
for jmin, jmax in ((0, 30), (30, 60)):
expected[imin:imax, jmin:jmax] = scipy.ndimage.maximum_filter(
image[imin:imax, jmin:jmax], footprint=footprint)
expected = (expected == image)
result = peak.peak_local_max(image, labels=labels, min_distance=1,
threshold_rel=0, footprint=footprint,
indices=True, exclude_border=False)
assert (result == np.transpose(expected.nonzero())).all()
def test_ndarray_indices_false():
nd_image = np.zeros((5,5,5))
nd_image[2,2,2] = 1
peaks = peak.peak_local_max(nd_image, min_distance=1, indices=False)
assert (peaks == nd_image.astype(np.bool)).all()
def test_ndarray_exclude_border():
nd_image = np.zeros((5,5,5))
nd_image[[1,0,0],[0,1,0],[0,0,1]] = 1
nd_image[3,0,0] = 1
nd_image[2,2,2] = 1
expected = np.zeros_like(nd_image, dtype=np.bool)
expected[2,2,2] = True
result = peak.peak_local_max(nd_image, min_distance=2, indices=False)
assert (result == expected).all()
def test_empty():
image = np.zeros((10, 20))
labels = np.zeros((10, 20), int)
result = peak.peak_local_max(image, labels=labels,
footprint=np.ones((3, 3), bool),
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(~ result)
def test_one_point():
image = np.zeros((10, 20))
labels = np.zeros((10, 20), int)
image[5, 5] = 1
labels[5, 5] = 1
result = peak.peak_local_max(image, labels=labels,
footprint=np.ones((3, 3), bool),
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result == (labels == 1))
def test_adjacent_and_same():
image = np.zeros((10, 20))
labels = np.zeros((10, 20), int)
image[5, 5:6] = 1
labels[5, 5:6] = 1
result = peak.peak_local_max(image, labels=labels,
footprint=np.ones((3, 3), bool),
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result == (labels == 1))
def test_adjacent_and_different():
image = np.zeros((10, 20))
labels = np.zeros((10, 20), int)
image[5, 5] = 1
image[5, 6] = .5
labels[5, 5:6] = 1
expected = (image == 1)
result = peak.peak_local_max(image, labels=labels,
footprint=np.ones((3, 3), bool),
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result == expected)
result = peak.peak_local_max(image, labels=labels,
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result == expected)
def test_not_adjacent_and_different():
image = np.zeros((10, 20))
labels = np.zeros((10, 20), int)
image[5, 5] = 1
image[5, 8] = .5
labels[image > 0] = 1
expected = (labels == 1)
result = peak.peak_local_max(image, labels=labels,
footprint=np.ones((3, 3), bool),
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result == expected)
def test_two_objects():
image = np.zeros((10, 20))
labels = np.zeros((10, 20), int)
image[5, 5] = 1
image[5, 15] = .5
labels[5, 5] = 1
labels[5, 15] = 2
expected = (labels > 0)
result = peak.peak_local_max(image, labels=labels,
footprint=np.ones((3, 3), bool),
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result == expected)
def test_adjacent_different_objects():
image = np.zeros((10, 20))
labels = np.zeros((10, 20), int)
image[5, 5] = 1
image[5, 6] = .5
labels[5, 5] = 1
labels[5, 6] = 2
expected = (labels > 0)
result = peak.peak_local_max(image, labels=labels,
footprint=np.ones((3, 3), bool),
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result == expected)
def test_four_quadrants():
np.random.seed(21)
image = np.random.uniform(size=(40, 60))
i, j = np.mgrid[0:40, 0:60]
labels = 1 + (i >= 20) + (j >= 30) * 2
i, j = np.mgrid[-3:4, -3:4]
footprint = (i * i + j * j <= 9)
expected = np.zeros(image.shape, float)
for imin, imax in ((0, 20), (20, 40)):
for jmin, jmax in ((0, 30), (30, 60)):
expected[imin:imax, jmin:jmax] = scipy.ndimage.maximum_filter(
image[imin:imax, jmin:jmax], footprint=footprint)
expected = (expected == image)
result = peak.peak_local_max(image, labels=labels, footprint=footprint,
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result == expected)
def test_disk():
'''regression test of img-1194, footprint = [1]
Test peak.peak_local_max when every point is a local maximum
'''
np.random.seed(31)
image = np.random.uniform(size=(10, 20))
footprint = np.array([[1]])
result = peak.peak_local_max(image, labels=np.ones((10, 20)),
footprint=footprint,
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result)
result = peak.peak_local_max(image, footprint=footprint)
assert np.all(result)
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
|
py | 1a3e921f8df63b36e23b045b36525602c816d1d0 | from __future__ import absolute_import, print_function
"""
Command for starting up an authenticating reverse proxy for use in development.
Please, don't use me in production!
"""
import six.moves.BaseHTTPServer
from django.conf import settings
import getpass
import socket
from nsot.util.commands import NsotCommand, CommandError
class Command(NsotCommand):
help = "Start an authenticating reverse proxy for use in development."
def add_arguments(self, parser):
parser.add_argument(
"username",
nargs="?",
default=getpass.getuser(),
help="Username used for authentication.",
)
parser.add_argument(
"-a",
"--address",
type=str,
default=settings.NSOT_HOST,
help="Address to listen on.",
)
parser.add_argument(
"-d",
"--domain",
type=str,
default="localhost",
help="Domain for user account.",
)
parser.add_argument(
"-H",
"--auth-header",
type=str,
default=settings.USER_AUTH_HEADER,
help="HTTP user auth header name.",
)
parser.add_argument(
"-P",
"--backend-port",
type=int,
default=settings.NSOT_PORT,
help="Port to proxy to.",
)
parser.add_argument(
"-p",
"--listen-port",
type=int,
default=settings.NSOT_PORT + 1,
help="Port to listen on.",
)
def handle(self, **options):
username = options.get("username")
try:
from mrproxy import UserProxyHandler
except ImportError:
raise SystemExit(
"mrproxy is required for the user proxy. Please see "
"README.rst."
)
class ServerArgs(object):
"""Argument container for http service."""
def __init__(self, backend_port, username, auth_header):
self.backend_port = backend_port
self.header = ["%s: %s" % (auth_header, username)]
username = "%s@%s" % (username, options.get("domain"))
address = options.get("address")
auth_header = options.get("auth_header")
backend_port = options.get("backend_port")
listen_port = options.get("listen_port")
# Try to start the server
try:
server = six.moves.BaseHTTPServer.HTTPServer(
(address, listen_port), UserProxyHandler
)
except socket.error as err:
raise CommandError(err)
else:
server.args = ServerArgs(backend_port, username, auth_header)
# Run until we hit ctrl-C
try:
print(
"Starting proxy on %s %s => %s, auth '%s: %s'"
% (address, backend_port, listen_port, auth_header, username)
)
server.serve_forever()
except KeyboardInterrupt:
print("Bye!")
|
py | 1a3e922af7abb6cde76e6b163ad0fb67b30c4720 | """Builder for websites."""
import os
import shutil
from regolith.builders.basebuilder import BuilderBase
from regolith.dates import get_dates
from regolith.fsclient import _id_key
from regolith.sorters import ene_date_key, position_key
from regolith.tools import (
all_docs_from_collection,
filter_publications,
filter_projects,
make_bibtex_file,
document_by_value,
dereference_institution,
)
class HtmlBuilder(BuilderBase):
"""Build HTML files for website"""
btype = "html"
def __init__(self, rc):
super().__init__(rc)
# TODO: get this from the RC
self.cmds = [
"root_index",
"people",
"projects",
"blog",
"jobs",
"abstracts",
"nojekyll",
"cname",
"finish",
]
def construct_global_ctx(self):
"""Constructs the global context"""
super().construct_global_ctx()
gtx = self.gtx
rc = self.rc
gtx["jobs"] = list(all_docs_from_collection(rc.client, "jobs"))
gtx["people"] = sorted(
all_docs_from_collection(rc.client, "people"),
key=position_key,
reverse=True,
)
gtx["abstracts"] = list(
all_docs_from_collection(rc.client, "abstracts")
)
gtx["group"] = document_by_value(
all_docs_from_collection(rc.client, "groups"), "name", rc.groupname
)
gtx["all_docs_from_collection"] = all_docs_from_collection
gtx["institutions"] = sorted(
all_docs_from_collection(rc.client, "institutions"), key=_id_key
)
def finish(self):
"""Move files over to their destination and remove them from the
source"""
# static
stsrc = os.path.join(
getattr(self.rc, "static_source", "templates"), "static"
)
stdst = os.path.join(self.bldir, "static")
if os.path.isdir(stdst):
shutil.rmtree(stdst)
if os.path.isdir(stsrc):
shutil.copytree(stsrc, stdst)
def root_index(self):
"""Render root index"""
self.render("root_index.html", "index.html", title="Home")
make_bibtex_file(list(all_docs_from_collection(self.rc.client,
"citations")),
pid='group',
person_dir=self.bldir,
)
def people(self):
"""Render people, former members, and each person"""
rc = self.rc
peeps_dir = os.path.join(self.bldir, "people")
former_peeps_dir = os.path.join(self.bldir, "former")
os.makedirs(peeps_dir, exist_ok=True)
os.makedirs(former_peeps_dir, exist_ok=True)
peeps = self.gtx["people"]
for p in peeps:
names = frozenset(p.get("aka", []) + [p["name"]])
pubs = filter_publications(
all_docs_from_collection(rc.client, "citations"),
names,
reverse=True,
bold=False,
)
bibfile = make_bibtex_file(
pubs, pid=p["_id"], person_dir=peeps_dir
)
ene = p.get("employment", []) + p.get("education", [])
ene.sort(key=ene_date_key, reverse=True)
for e in ene:
dereference_institution(e,
all_docs_from_collection(
rc.client, "institutions"))
projs = filter_projects(
all_docs_from_collection(rc.client, "projects"), names
)
for serve in p.get("service", []):
serve_dates = get_dates(serve)
date = serve_dates.get("date")
if not date:
date = serve_dates.get("end_date")
if not date:
date = serve_dates.get("begin_date")
serve["year"] = date.year
serve["month"] = date.month
sns = p.get("service", [])
sns.sort(key=ene_date_key, reverse=True)
p["service"] = sns
self.render(
"person.html",
os.path.join("people", p["_id"] + ".html"),
p=p,
title=p.get("name", ""),
pubs=pubs,
names=names,
bibfile=bibfile,
education_and_employment=ene,
projects=projs,
)
self.render(
"people.html", os.path.join("people", "index.html"), title="People"
)
self.render(
"former.html",
os.path.join("former", "index.html"),
title="Former Members",
)
def projects(self):
"""Render projects"""
rc = self.rc
projs = all_docs_from_collection(rc.client, "projects")
self.render(
"projects.html", "projects.html", title="Projects", projects=projs
)
def blog(self):
"""Render the blog and rss"""
rc = self.rc
blog_dir = os.path.join(self.bldir, "blog")
os.makedirs(blog_dir, exist_ok=True)
posts = list(all_docs_from_collection(rc.client, "blog"))
posts.sort(key=ene_date_key, reverse=True)
for post in posts:
self.render(
"blog_post.html",
os.path.join("blog", post["_id"] + ".html"),
post=post,
title=post["title"],
)
self.render(
"blog_index.html",
os.path.join("blog", "index.html"),
title="Blog",
posts=posts,
)
self.render("rss.xml", os.path.join("blog", "rss.xml"), items=posts)
def jobs(self):
"""Render the jobs and each job"""
jobs_dir = os.path.join(self.bldir, "jobs")
os.makedirs(jobs_dir, exist_ok=True)
for job in self.gtx["jobs"]:
self.render(
"job.html",
os.path.join("jobs", job["_id"] + ".html"),
job=job,
title="{0} ({1})".format(job["title"], job["_id"]),
)
self.render(
"jobs.html", os.path.join("jobs", "index.html"), title="Jobs"
)
def abstracts(self):
"""Render each abstract"""
abs_dir = os.path.join(self.bldir, "abstracts")
os.makedirs(abs_dir, exist_ok=True)
for ab in self.gtx["abstracts"]:
self.render(
"abstract.html",
os.path.join("abstracts", ab["_id"] + ".html"),
abstract=ab,
title="{0} {1} - {2}".format(
ab["firstname"], ab["lastname"], ab["title"]
),
)
def nojekyll(self):
"""Touches a nojekyll file in the build dir"""
with open(os.path.join(self.bldir, ".nojekyll"), "a+"):
pass
def cname(self):
"""Add CNAME"""
rc = self.rc
if not hasattr(rc, "cname"):
return
with open(
os.path.join(self.bldir, "CNAME"), "w", encoding="utf-8"
) as f:
f.write(rc.cname)
|
py | 1a3e92b6b63b0bd6c54d3d302724903505eba208 | """
# Definition for a Node.
class Node(object):
def __init__(self, val, children):
self.val = val
self.children = children
"""
class Solution(object):
def maxDepth(self, root):
"""
:type root: Node
:rtype: int
"""
if not root:
return 0
res = 1
for child in root.children:
if not child:
return 1
res = max(res, 1 + self.maxDepth(child))
return res |
py | 1a3e93779d08b0326671667e8512eca571b21fc2 | # Copyright (c) 2009 Aldo Cortesi
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Anshuman Bhaduri
# Copyright (c) 2012 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
from multiprocessing import Value
import libqtile.log_utils
import libqtile.core
import libqtile.utils
import libqtile.hook
import logging
from .conftest import BareConfig
# TODO: more tests required.
# 1. Check all hooks that can be fired
class Call:
def __init__(self, val):
self.val = val
def __call__(self, val):
self.val = val
@pytest.yield_fixture
def hook_fixture():
class Dummy:
pass
dummy = Dummy()
libqtile.log_utils.init_log(logging.CRITICAL, log_path=None, log_color=False)
libqtile.hook.init(dummy)
yield
libqtile.hook.clear()
def test_cannot_fire_unknown_event():
with pytest.raises(libqtile.utils.QtileError):
libqtile.hook.fire("unknown")
@pytest.mark.usefixtures("hook_fixture")
def test_hook_calls_subscriber():
test = Call(0)
libqtile.core.manager.hook.subscribe.group_window_add(test)
libqtile.core.manager.hook.fire("group_window_add", 8)
assert test.val == 8
@pytest.mark.usefixtures("hook_fixture")
def test_subscribers_can_be_added_removed():
test = Call(0)
libqtile.core.manager.hook.subscribe.group_window_add(test)
assert libqtile.core.manager.hook.subscriptions
libqtile.core.manager.hook.clear()
assert not libqtile.core.manager.hook.subscriptions
@pytest.mark.usefixtures("hook_fixture")
def test_can_unsubscribe_from_hook():
test = Call(0)
libqtile.core.manager.hook.subscribe.group_window_add(test)
libqtile.core.manager.hook.fire("group_window_add", 3)
assert test.val == 3
libqtile.core.manager.hook.unsubscribe.group_window_add(test)
libqtile.core.manager.hook.fire("group_window_add", 4)
assert test.val == 3
def test_can_subscribe_to_startup_hooks(qtile_nospawn):
config = BareConfig
self = qtile_nospawn
self.startup_once_calls = Value('i', 0)
self.startup_calls = Value('i', 0)
self.startup_complete_calls = Value('i', 0)
def inc_startup_once_calls():
self.startup_once_calls.value += 1
def inc_startup_calls():
self.startup_calls.value += 1
def inc_startup_complete_calls():
self.startup_complete_calls.value += 1
libqtile.core.manager.hook.subscribe.startup_once(inc_startup_once_calls)
libqtile.core.manager.hook.subscribe.startup(inc_startup_calls)
libqtile.core.manager.hook.subscribe.startup_complete(inc_startup_complete_calls)
self.start(config)
self.start_qtile = True
assert self.startup_once_calls.value == 1
assert self.startup_calls.value == 1
assert self.startup_complete_calls.value == 1
# TODO Restart and check that startup_once doesn't fire again
@pytest.mark.usefixtures('hook_fixture')
def test_can_update_by_selection_change(qtile):
test = Call(0)
libqtile.core.manager.hook.subscribe.selection_change(test)
libqtile.core.manager.hook.fire('selection_change', 'hello')
assert test.val == 'hello'
@pytest.mark.usefixtures('hook_fixture')
def test_can_call_by_selection_notify(qtile):
test = Call(0)
libqtile.core.manager.hook.subscribe.selection_notify(test)
libqtile.core.manager.hook.fire('selection_notify', 'hello')
assert test.val == 'hello'
|
py | 1a3e937a4330b1dc35a1d7382d08696b0947c522 | # -*- coding: utf-8 -*-
import json
import os.path
import sys
import yaml
from lemoncheesecake.project import Project
class MyProject(Project):
def build_report_title(self):
with open(os.path.join(os.path.dirname(__file__), "docker-compose.yml")) as compose_file:
compose = yaml.load(compose_file, Loader=yaml.FullLoader)
echo_image_name = compose["services"]["echo"]["image"]
echo_image_version = echo_image_name.replace("echoprotocol/echo:", "")
return "ECHO tests (ECHO v. {})".format(echo_image_version)
project_dir = os.path.dirname(__file__)
sys.path.append(project_dir)
project = MyProject(project_dir)
project.metadata_policy.add_property_rule("main", "type", on_suite=True, required=False)
project.metadata_policy.add_property_rule("positive", "type", on_suite=True, required=False)
project.metadata_policy.add_property_rule("negative", "type", on_suite=True, required=False)
RESOURCES_DIR = os.path.join(os.path.dirname(__file__), "resources")
genesis_path = "genesis.json" if "GENESIS_FILE" not in os.environ else os.environ["GENESIS_FILE"]
GENESIS = json.load(open(os.path.join(os.path.dirname(__file__), genesis_path)))
if "ROPSTEN" in os.environ and os.environ["ROPSTEN"].lower() != "false":
ROPSTEN = True
else:
ROPSTEN = False
if "DEBUG" in os.environ and os.environ["DEBUG"].lower() != "false":
DEBUG = True
else:
DEBUG = False
if "BASE_URL" not in os.environ:
BASE_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["BASE_URL"]
else:
BASE_URL = os.environ["BASE_URL"]
if "WALLET_URL" not in os.environ:
WALLET_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["WALLET_URL"]
else:
WALLET_URL = os.environ["WALLET_URL"]
if "ETHEREUM_URL" not in os.environ:
ETHEREUM_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["ETHEREUM_URL"]
else:
ETHEREUM_URL = os.environ["ETHEREUM_URL"]
if "BITCOIN_URL" not in os.environ:
BITCOIN_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["BITCOIN_URL"]
else:
BITCOIN_URL = os.environ["BITCOIN_URL"]
if "ETHRPC_URL" not in os.environ:
ETHRPC_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["ETHRPC_URL"]
else:
ETHRPC_URL = os.environ["ETHRPC_URL"]
if "TESTRPC_URL" not in os.environ:
TESTRPC_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["TESTRPC_URL"]
else:
TESTRPC_URL = os.environ["TESTRPC_URL"]
if "NATHAN_PK" not in os.environ:
NATHAN_PK = json.load(open(os.path.join(RESOURCES_DIR, "private_keys.json")))["NATHAN_PK"]
else:
NATHAN_PK = os.environ["NATHAN_PK"]
if "INIT0_PK" not in os.environ:
INIT0_PK = json.load(open(os.path.join(RESOURCES_DIR, "private_keys.json")))["INIT0_PK"]
else:
INIT0_PK = os.environ["INIT0_PK"]
if "INIT1_PK" not in os.environ:
INIT1_PK = json.load(open(os.path.join(RESOURCES_DIR, "private_keys.json")))["INIT1_PK"]
else:
INIT1_PK = os.environ["INIT1_PK"]
if "INIT2_PK" not in os.environ:
INIT2_PK = json.load(open(os.path.join(RESOURCES_DIR, "private_keys.json")))["INIT2_PK"]
else:
INIT2_PK = os.environ["INIT2_PK"]
if "INIT3_PK" not in os.environ:
INIT3_PK = json.load(open(os.path.join(RESOURCES_DIR, "private_keys.json")))["INIT3_PK"]
else:
INIT3_PK = os.environ["INIT3_PK"]
if "INIT4_PK" not in os.environ:
INIT4_PK = json.load(open(os.path.join(RESOURCES_DIR, "private_keys.json")))["INIT4_PK"]
else:
INIT4_PK = os.environ["INIT4_PK"]
ECHO_OPERATIONS = json.load(open(os.path.join(RESOURCES_DIR, "echo_operations.json")))
ECHO_CONTRACTS = json.load(open(os.path.join(RESOURCES_DIR, "echo_contracts.json")))
WALLETS = os.path.join(RESOURCES_DIR, "wallets.json")
UTILS = os.path.join(RESOURCES_DIR, "utils.json")
ECHO_INITIAL_BALANCE = int(GENESIS["initial_balances"][0]["amount"])
ECHO_ASSET_SYMBOL = GENESIS["initial_balances"][0]["asset_symbol"]
INITIAL_ACCOUNTS = GENESIS["initial_accounts"]
INITIAL_COMMITTEE_CANDIDATES = GENESIS["initial_committee_candidates"]
INITIAL_ACCOUNTS_COUNT = len(INITIAL_ACCOUNTS)
INITIAL_ACCOUNTS_NAMES = []
for i in range(INITIAL_ACCOUNTS_COUNT):
INITIAL_ACCOUNTS_NAMES.append(INITIAL_ACCOUNTS[i]["name"])
INITIAL_COMMITTEE_ETH_ADDRESSES = []
for i, initial_committee_candidate in enumerate(INITIAL_COMMITTEE_CANDIDATES):
if initial_committee_candidate["owner_name"] == INITIAL_ACCOUNTS_NAMES[i]:
INITIAL_COMMITTEE_ETH_ADDRESSES.append(initial_committee_candidate["eth_address"])
ACCOUNT_PREFIX = "account"
DEFAULT_ACCOUNTS_COUNT = 1000
MAIN_TEST_ACCOUNT_COUNT = 1
BLOCK_RELEASE_INTERVAL = 5
BLOCKS_NUM_TO_WAIT = 10
REQUIRED_DEPOSIT_AMOUNT = GENESIS["initial_parameters"]["committee_frozen_balance_to_activate"]
UNFREEZE_DURATION_SECONDS = GENESIS["initial_parameters"]["committee_balance_unfreeze_duration_seconds"]
BASE_ASSET_SYMBOL, ETH_ASSET_SYMBOL = "ECHO", "EETH"
ETH_ASSET_ID = GENESIS["initial_parameters"]["sidechain_config"]["ETH_asset_id"]
BTC_ASSET_ID = GENESIS["initial_parameters"]["sidechain_config"]["BTC_asset_id"]
ETH_CONTRACT_ADDRESS = "0x" + GENESIS["initial_parameters"]["sidechain_config"]["eth_contract_address"]
UNPAID_FEE_METHOD = "0x19c4518a"
COMMITTEE = "0x130f679d"
SATOSHI_PRECISION = 100000000
GAS_PRICE = GENESIS["initial_parameters"]["sidechain_config"]["gas_price"]
MIN_ETH_WITHDRAW_FEE = GENESIS["initial_parameters"]["sidechain_config"]["eth_withdrawal_fee"]
MIN_ETH_WITHDRAW = GENESIS["initial_parameters"]["sidechain_config"]["eth_withdrawal_min"]
SATOSHI_PER_BYTE = GENESIS["initial_parameters"]["sidechain_config"]["satoshis_per_byte"]
BTC_FEE = GENESIS["initial_parameters"]["sidechain_config"]["btc_deposit_withdrawal_fee"]
BTC_WITHDRAWAL_MIN = GENESIS["initial_parameters"]["sidechain_config"]["btc_deposit_withdrawal_min"]
ETHEREUM_OPERATIONS = json.load(open(os.path.join(RESOURCES_DIR, "ethereum_transactions.json")))
ETHEREUM_CONTRACTS = json.load(open(os.path.join(RESOURCES_DIR, "ethereum_contracts.json")))
with open(".env") as env_file:
GANACHE_PK = (env_file.readline().split('RPC_ACCOUNT=')[1]).split(",")[0]
with open(".env") as env_file:
ROPSTEN_PK = env_file.readlines()[-1].split('ROPSTEN_PRIVATE_KEY=')[1]
|
py | 1a3e938db0cb38c32a9971aef634f536cc0c2ccb | "Script to add SimPizza to Haldis"
from app import db
from models import Location, Product
pizzas = [
"Bolognese de luxe",
"Hawaï",
"Popeye",
"Pepperoni",
"Seafood",
"Hot pizzaaah!!!",
"Salmon delight",
"Full option",
"Pitza kebab",
"Multi cheese",
"4 Seasons",
"Mega fish",
"Creamy multi cheese",
"Green fiësta",
"Chicken bbq",
"Funky chicken",
"Veggie",
"Meat lovers",
"Scampi mampi",
"Tabasco",
"Chicken time",
"Meatballs",
"Tuna",
"Anchovy",
"Calzone",
"Bbq meatballs",
"Creamy chicken",
"Hot bolognese",
]
def add() -> None:
"Add Simpizza to the database"
simpizza = Location()
simpizza.configure(
"Sim-pizza",
"De Pintelaan 252 9000 Gent",
"tel: 09/321.02.00",
"http://simpizza.be",
)
db.session.add(simpizza)
for pizza in pizzas:
entry = Product()
entry.configure(simpizza, pizza, 1195)
db.session.add(entry)
|
py | 1a3e949bfac42649ee5c57412337fe78144b7486 | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import time
import ctypes
from builtins import range
import s1ap_types
import s1ap_wrapper
class TestMultipleEnbPartialReset(unittest.TestCase):
def setUp(self):
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
def tearDown(self):
self._s1ap_wrapper.cleanup()
def test_multiple_enb_partial_reset(self):
""" Multi eNB + attach 1 UE + s1ap partial reset + detach """
""" Note: Before execution of this test case,
make sure that following steps are correct
1. Configure same plmn and tac in both MME and s1ap tester
2. How to configure plmn and tac in MME:
a. Set mcc and mnc in gateway.mconfig for mme service
b. Set tac in gateway.mconfig for mme service
c. Restart MME service
3. How to configure plmn and tac in s1ap tester,
a. For multi-eNB test case, configure plmn and tac from test case.
In each multi-eNB test case, set plmn, plmn length and tac
in enb_list
b. For single eNB test case, configure plmn and tac in nbAppCfg.txt
"""
# column is an enb parameter, row is number of enbs
""" Cell Id, Tac, EnbType, PLMN Id, PLMN length """
enb_list = [[1, 1, 1, "00101", 5],
[2, 1, 1, "00101", 5],
[3, 1, 1, "00101", 5],
[4, 1, 1, "00101", 5],
[5, 1, 1, "00101", 5]]
self._s1ap_wrapper.multiEnbConfig(len(enb_list), enb_list)
time.sleep(2)
ue_ids = []
num_ues = 1
self._s1ap_wrapper.configUEDevice(num_ues)
for _ in range(num_ues):
req = self._s1ap_wrapper.ue_req
print("************************* Calling attach for UE id ",
req.ue_id)
self._s1ap_wrapper.s1_util.attach(
req.ue_id,
s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t,
)
ue_ids.append(req.ue_id)
# Wait on EMM Information from MME
self._s1ap_wrapper._s1_util.receive_emm_info()
# Trigger eNB Reset
# Add delay to ensure S1APTester sends attach partial before sending
# eNB Reset Request
time.sleep(0.5)
print("************************* Sending eNB Partial Reset Request")
reset_req = s1ap_types.ResetReq()
reset_req.rstType = s1ap_types.resetType.PARTIAL_RESET.value
reset_req.cause = s1ap_types.ResetCause()
reset_req.cause.causeType = \
s1ap_types.NasNonDelCauseType.TFW_CAUSE_MISC.value
# Set the cause to MISC.hardware-failure
reset_req.cause.causeVal = 3
reset_req.r = s1ap_types.R()
reset_req.r.partialRst = s1ap_types.PartialReset()
reset_req.r.partialRst.numOfConn = num_ues
reset_req.r.partialRst.ueIdLst = (
ctypes.c_ubyte * reset_req.r.partialRst.numOfConn
)()
for indx in range(reset_req.r.partialRst.numOfConn):
reset_req.r.partialRst.ueIdLst[indx] = ue_ids[indx]
print(
"Reset_req.r.partialRst.ueIdLst[indx]",
reset_req.r.partialRst.ueIdLst[indx],
indx,
)
print("ue_ids", ue_ids)
self._s1ap_wrapper.s1_util.issue_cmd(
s1ap_types.tfwCmd.RESET_REQ, reset_req)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(response.msg_type, s1ap_types.tfwCmd.RESET_ACK.value)
# Trigger detach request
for ue in ue_ids:
print("************************* Calling detach for UE id ", ue)
# self._s1ap_wrapper.s1_util.detach(
# ue, detach_type, wait_for_s1)
self._s1ap_wrapper.s1_util.detach(
ue, s1ap_types.ueDetachType_t.UE_NORMAL_DETACH.value, True
)
if __name__ == "__main__":
unittest.main()
|
py | 1a3e95083572b92f6750b2142a090755f7ec1e15 | from dataclasses import dataclass
from numbers import Number
from typing import Union
from gretel_client.transformers.masked_restore import (
MaskedRestoreTransformerConfig,
MaskedRestoreTransformer,
)
from gretel_client.transformers.transformers.fpe_base import FpeBase, FpeBaseConfig
FPE_XFORM_CHAR = "0"
@dataclass(frozen=True)
class FpeStringConfig(MaskedRestoreTransformerConfig, FpeBaseConfig):
"""
FpeString transformer applies a format preserving encryption as defined by https://www.nist.gov/ to the data value.
The encryption works on strings. The result is stateless and given the correct key, the original
value can be restored.
Args:
radix: Base from 2 to 62, determines base of incoming data types. Base2 = binary, Base62 = alphanumeric
including upper and lower case characters.
secret: 256bit AES encryption string specified as 64 hexadecimal characters.
mask: An optional list of ``StringMask`` objects. If provided only the parts of the string defined by the masks
will be encrypted.
"""
class FpeString(MaskedRestoreTransformer, FpeBase):
config_class = FpeStringConfig
def __init__(self, config: FpeStringConfig):
super().__init__(config)
def _transform(self, value: Union[Number, str]) -> Union[Number, str]:
return FpeBase._transform(self, value)
def _restore(self, value: Union[Number, str]) -> Union[Number, str]:
return FpeBase._restore(self, value)
|
py | 1a3e96471d46eff867e2c185461d34519a4750dd | from toontown.toonbase.ToonBaseGlobal import *
from panda3d.core import *
from panda3d.toontown import *
from toontown.toonbase.ToontownGlobals import *
import random
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from direct.actor import Actor
import ToonInteriorColors
from toontown.hood import ZoneUtil
class DistributedPetshopInterior(DistributedObject.DistributedObject):
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.dnaStore = cr.playGame.dnaStore
def generate(self):
DistributedObject.DistributedObject.generate(self)
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
self.setup()
def randomDNAItem(self, category, findFunc):
codeCount = self.dnaStore.getNumCatalogCodes(category)
index = self.randomGenerator.randint(0, codeCount - 1)
code = self.dnaStore.getCatalogCode(category, index)
return findFunc(code)
def replaceRandomInModel(self, model):
baseTag = 'random_'
npc = model.findAllMatches('**/' + baseTag + '???_*')
for i in xrange(npc.getNumPaths()):
np = npc.getPath(i)
name = np.getName()
b = len(baseTag)
category = name[b + 4:]
key1 = name[b]
key2 = name[b + 1]
if key1 == 'm':
model = self.randomDNAItem(category, self.dnaStore.findNode)
newNP = model.copyTo(np)
if key2 == 'r':
self.replaceRandomInModel(newNP)
elif key1 == 't':
texture = self.randomDNAItem(category, self.dnaStore.findTexture)
np.setTexture(texture, 100)
newNP = np
if key2 == 'c':
if category == 'TI_wallpaper' or category == 'TI_wallpaper_border':
self.randomGenerator.seed(self.zoneId)
newNP.setColorScale(self.randomGenerator.choice(self.colors[category]))
else:
newNP.setColorScale(self.randomGenerator.choice(self.colors[category]))
def setZoneIdAndBlock(self, zoneId, block):
self.zoneId = zoneId
self.block = block
def chooseDoor(self):
doorModelName = 'door_double_round_ul'
if doorModelName[-1:] == 'r':
doorModelName = doorModelName[:-1] + 'l'
else:
doorModelName = doorModelName[:-1] + 'r'
door = self.dnaStore.findNode(doorModelName)
return door
def setup(self):
self.dnaStore = base.cr.playGame.dnaStore
self.randomGenerator = random.Random()
self.randomGenerator.seed(self.zoneId)
self.interior = loader.loadModel('phase_4/models/modules/PetShopInterior')
self.interior.reparentTo(render)
self.fish = Actor.Actor('phase_4/models/props/interiorfish-zero', {'swim': 'phase_4/models/props/interiorfish-swim'})
self.fish.reparentTo(self.interior)
self.fish.setColorScale(0.8, 0.9, 1, 0.8)
self.fish.setScale(0.8)
self.fish.setPos(0, 6, -4)
self.fish.setPlayRate(0.7, 'swim')
self.fish.loop('swim')
hoodId = ZoneUtil.getCanonicalHoodId(self.zoneId)
self.colors = ToonInteriorColors.colors[hoodId]
self.replaceRandomInModel(self.interior)
door = self.chooseDoor()
doorOrigin = render.find('**/door_origin;+s')
doorNP = door.copyTo(doorOrigin)
doorOrigin.setScale(0.8, 0.8, 0.8)
doorOrigin.setPos(doorOrigin, 0, -0.25, 0)
doorColor = self.randomGenerator.choice(self.colors['TI_door'])
DNADoor.setupDoor(doorNP, self.interior, doorOrigin, self.dnaStore, str(self.block), doorColor)
doorFrame = doorNP.find('door_*_flat')
doorFrame.wrtReparentTo(self.interior)
doorFrame.setColor(doorColor)
del self.colors
del self.dnaStore
del self.randomGenerator
self.interior.flattenMedium()
def disable(self):
self.fish.stop()
self.fish.cleanup()
del self.fish
self.interior.removeNode()
del self.interior
DistributedObject.DistributedObject.disable(self)
|
py | 1a3e976c56b6ca3f8d08750266a886bac2ffe466 | import cv2
import cPickle as pickle
import scipy.io
import numpy as np
import os
import sys
import random
from utils import slice_list
SHOW_IMAGES = False
FOLDS = 3
DATA_FRAGMENT = -1
BOARD_FILL_COLOR = 1e-5
def get_image_pack_fn(key):
ds = key[0]
if ds == 'g':
fold = int(key[1])
return GehlerDataSet().get_image_pack_fn(fold)
elif ds == 'c':
camera = int(key[1])
fold = int(key[2])
return ChengDataSet(camera).get_image_pack_fn(fold)
elif ds == 'm':
assert False
class ImageRecord:
def __init__(self, dataset, fn, illum, mcc_coord, img, extras=None):
self.dataset = dataset
self.fn = fn
self.illum = illum
self.mcc_coord = mcc_coord
# BRG images
self.img = img
self.extras = extras
def __repr__(self):
return '[%s, %s, (%f, %f, %f)]' % (self.dataset, self.fn, self.illum[0],
self.illum[1], self.illum[2])
class DataSet:
def get_subset_name(self):
return ''
def get_directory(self):
return 'data/' + self.get_name() + '/'
def get_img_directory(self):
return 'data/' + self.get_name() + '/'
def get_meta_data_fn(self):
return self.get_directory() + self.get_subset_name() + 'meta.pkl'
def dump_meta_data(self, meta_data):
print 'Dumping data =>', self.get_meta_data_fn()
print ' Total records:', sum(map(len, meta_data))
print ' Slices:', map(len, meta_data)
with open(self.get_meta_data_fn(), 'wb') as f:
pickle.dump(meta_data, f, protocol=-1)
print 'Dumped.'
def load_meta_data(self):
with open(self.get_meta_data_fn()) as f:
return pickle.load(f)
def get_image_pack_fn(self, fold):
return self.get_directory() + self.get_subset_name(
) + 'image_pack.%d.pkl' % fold
def dump_image_pack(self, image_pack, fold):
with open(self.get_image_pack_fn(fold), 'wb') as f:
pickle.dump(image_pack, f, protocol=-1)
def load_image_pack(self, fold):
with open(self.get_meta_data_fn()) as f:
return pickle.load(f)
def regenerate_image_pack(self, meta_data, fold):
image_pack = []
for i, r in enumerate(meta_data):
print 'Processing %d/%d\r' % (i + 1, len(meta_data)),
sys.stdout.flush()
r.img = self.load_image_without_mcc(r)
if SHOW_IMAGES:
cv2.imshow('img',
cv2.resize(
np.power(r.img / 65535., 1.0 / 3.2), (0, 0),
fx=0.25,
fy=0.25))
il = r.illum
if len(il.shape) >= 3:
cv2.imshow('Illum', il)
cv2.waitKey(0)
image_pack.append(r)
print
self.dump_image_pack(image_pack, fold)
def regenerate_image_packs(self):
meta_data = self.load_meta_data()
print 'Dumping image packs...'
print '%s folds found' % len(meta_data)
for f, m in enumerate(meta_data):
self.regenerate_image_pack(m, f)
def get_folds(self):
return FOLDS
class GehlerDataSet(DataSet):
def get_name(self):
return 'gehler'
def regenerate_meta_data(self):
meta_data = []
print "Loading and shuffle fn_and_illum[]"
ground_truth = scipy.io.loadmat(self.get_directory() + 'ground_truth.mat')[
'real_rgb']
ground_truth /= np.linalg.norm(ground_truth, axis=1)[..., np.newaxis]
filenames = sorted(os.listdir(self.get_directory() + 'images'))
folds = scipy.io.loadmat(self.get_directory() + 'folds.mat')
filenames2 = map(lambda x: str(x[0][0][0]), folds['Xfiles'])
#print filenames
#print filenames2
for i in range(len(filenames)):
assert filenames[i][:-4] == filenames2[i][:-4]
for i in range(len(filenames)):
fn = filenames[i]
mcc_coord = self.get_mcc_coord(fn)
meta_data.append(
ImageRecord(
dataset=self.get_name(),
fn=fn,
illum=ground_truth[i],
mcc_coord=mcc_coord,
img=None))
if DATA_FRAGMENT != -1:
meta_data = meta_data[:DATA_FRAGMENT]
print 'Warning: using only first %d images...' % len(meta_data)
meta_data_folds = [[], [], []]
for i in range(FOLDS):
fold = list(folds['te_split'][0][i][0])
print len(fold)
for j in fold:
meta_data_folds[i].append(meta_data[j - 1])
for i in range(3):
print 'Fold', i
print map(lambda m: m.fn, meta_data_folds[i])
print sum(map(len, meta_data_folds))
assert sum(map(len, meta_data_folds)) == len(filenames)
for i in range(3):
assert set(meta_data_folds[i]) & set(meta_data_folds[(i + 1) % 3]) == set(
)
self.dump_meta_data(meta_data_folds)
def get_mcc_coord(self, fn):
# Note: relative coord
with open(self.get_directory() + 'coordinates/' + fn.split('.')[0] +
'_macbeth.txt', 'r') as f:
lines = f.readlines()
width, height = map(float, lines[0].split())
scale_x = 1 / width
scale_y = 1 / height
lines = [lines[1], lines[2], lines[4], lines[3]]
polygon = []
for line in lines:
line = line.strip().split()
x, y = (scale_x * float(line[0])), (scale_y * float(line[1]))
polygon.append((x, y))
return np.array(polygon, dtype='float32')
def load_image(self, fn):
file_path = self.get_img_directory() + '/images/' + fn
raw = np.array(cv2.imread(file_path, -1), dtype='float32')
if fn.startswith('IMG'):
# 5D3 images
black_point = 129
else:
black_point = 1
raw = np.maximum(raw - black_point, [0, 0, 0])
return raw
def load_image_without_mcc(self, r):
raw = self.load_image(r.fn)
img = (np.clip(raw / raw.max(), 0, 1) * 65535.0).astype(np.uint16)
polygon = r.mcc_coord * np.array([img.shape[1], img.shape[0]])
polygon = polygon.astype(np.int32)
cv2.fillPoly(img, [polygon], (BOARD_FILL_COLOR,) * 3)
return img
class ChengDataSet(DataSet):
def __init__(self, camera_id):
camera_names = [
'Canon1DsMkIII', 'Canon600D', 'FujifilmXM1', 'NikonD5200',
'OlympusEPL6', 'PanasonicGX1', 'SamsungNX2000', 'SonyA57'
]
self.camera_name = camera_names[camera_id]
def get_subset_name(self):
return self.camera_name + '-'
def get_name(self):
return 'cheng'
def regenerate_meta_data(self):
meta_data = []
ground_truth = scipy.io.loadmat(self.get_directory() + 'ground_truth/' +
self.camera_name + '_gt.mat')
illums = ground_truth['groundtruth_illuminants']
darkness_level = ground_truth['darkness_level']
saturation_level = ground_truth['saturation_level']
cc_coords = ground_truth['CC_coords']
illums /= np.linalg.norm(illums, axis=1)[..., np.newaxis]
filenames = sorted(os.listdir(self.get_directory() + 'images'))
filenames = filter(lambda f: f.startswith(self.camera_name), filenames)
extras = {
'darkness_level': darkness_level,
'saturation_level': saturation_level
}
for i in range(len(filenames)):
fn = filenames[i]
y1, y2, x1, x2 = cc_coords[i]
mcc_coord = np.array([(x1, y1), (x1, y2), (x2, y2), (x2, y1)])
meta_data.append(
ImageRecord(
dataset=self.get_name(),
fn=fn,
illum=illums[i],
mcc_coord=mcc_coord,
img=None,
extras=extras))
random.shuffle(meta_data)
if DATA_FRAGMENT != -1:
meta_data = meta_data[:DATA_FRAGMENT]
print 'Warning: using only first %d images...' % len(meta_data)
meta_data = slice_list(meta_data, [1] * self.get_folds())
self.dump_meta_data(meta_data)
def load_image(self, fn, darkness_level, saturation_level):
file_path = self.get_directory() + '/images/' + fn
raw = np.array(cv2.imread(file_path, -1), dtype='float32')
raw = np.maximum(raw - darkness_level, [0, 0, 0])
raw *= 1.0 / saturation_level
return raw
def load_image_without_mcc(self, r):
img = (np.clip(
self.load_image(r.fn, r.extras['darkness_level'], r.extras[
'saturation_level']), 0, 1) * 65535.0).astype(np.uint16)
#polygon = r.mcc_coord * np.array([img.shape[1], img.shape[0]])
polygon = r.mcc_coord
polygon = polygon.astype(np.int32)
cv2.fillPoly(img, [polygon], (BOARD_FILL_COLOR,) * 3)
return img
if __name__ == '__main__':
ds = GehlerDataSet()
ds.regenerate_meta_data()
ds.regenerate_image_packs()
|
py | 1a3e97ad88f485d9dffd5eeb530aa209e4aafdac | # Generated by Django 2.1.5 on 2019-03-01 17:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('openbook_notifications', '0003_postreactionnotification'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notifications', to=settings.AUTH_USER_MODEL),
),
]
|
py | 1a3e97e969e02de468d9a265c05c7c3f89dee061 | # -*- coding: utf-8 -*-
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017, 2018, 2019, 2020 Caleb Bell <[email protected]>
Copyright (C) 2020 Yoel Rene Cortes-Pena <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This module contains lookup functions enthalpies and standard entropies of
formation. Lookup functions are availa for the liquid, solid, and gas states.
A compound may be in more than one lookup function.
For reporting bugs, adding feature requests, or submitting pull requests,
please use the `GitHub issue tracker <https://github.com/CalebBell/chemicals/>`_.
.. contents:: :local:
Solid Heat of Formation
-----------------------
.. autofunction:: chemicals.reaction.Hfs
.. autofunction:: chemicals.reaction.Hfs_methods
.. autodata:: chemicals.reaction.Hfs_all_methods
Liquid Heat of Formation
------------------------
.. autofunction:: chemicals.reaction.Hfl
.. autofunction:: chemicals.reaction.Hfl_methods
.. autodata:: chemicals.reaction.Hfl_all_methods
Gas Heat of Formation
---------------------
.. autofunction:: chemicals.reaction.Hfg
.. autofunction:: chemicals.reaction.Hfg_methods
.. autodata:: chemicals.reaction.Hfg_all_methods
Solid Absolute Entropy
----------------------
.. autofunction:: chemicals.reaction.S0s
.. autofunction:: chemicals.reaction.S0s_methods
.. autodata:: chemicals.reaction.S0s_all_methods
Liquid Absolute Entropy
-----------------------
.. autofunction:: chemicals.reaction.S0l
.. autofunction:: chemicals.reaction.S0l_methods
.. autodata:: chemicals.reaction.S0l_all_methods
Gas Absolute Entropy
--------------------
.. autofunction:: chemicals.reaction.S0g
.. autofunction:: chemicals.reaction.S0g_methods
.. autodata:: chemicals.reaction.S0g_all_methods
Utility Functions
-----------------
.. autofunction:: chemicals.reaction.Gibbs_formation
.. autofunction:: chemicals.reaction.entropy_formation
.. autofunction:: chemicals.reaction.Hf_basis_converter
Chemical Reactions
------------------
.. autofunction:: chemicals.reaction.balance_stoichiometry
.. autofunction:: chemicals.reaction.stoichiometric_matrix
"""
__all__ = ['Hfg', 'Hfl', 'Hfs', 'S0g', 'S0l', 'S0s',
'Hfl_methods', 'Hfg_methods', 'Hfs_methods',
'S0l_methods', 'S0g_methods', 'S0s_methods',
'Hfl_all_methods', 'Hfg_all_methods', 'Hfs_all_methods',
'S0l_all_methods', 'S0g_all_methods', 'S0s_all_methods',
'Gibbs_formation', 'entropy_formation', 'Hf_basis_converter',
'balance_stoichiometry', 'stoichiometric_matrix']
from chemicals.utils import ceil, log10, PY37, source_path, os_path_join, can_load_data
from chemicals import heat_capacity
from chemicals.data_reader import (register_df_source,
data_source,
retrieve_from_df_dict,
retrieve_any_from_df_dict,
list_available_methods_from_df_dict)
# %% Register data sources and lazy load them
CRC = 'CRC'
YAWS = 'YAWS'
API_TDB_G = 'API_TDB_G'
ATCT_L = 'ATCT_L'
ATCT_G = 'ATCT_G'
TRC = 'TRC'
folder = os_path_join(source_path, 'Reactions')
register_df_source(folder, 'API TDB Albahri Hf (g).tsv')
register_df_source(folder, 'ATcT 1.112 (g).tsv')
register_df_source(folder, 'ATcT 1.112 (l).tsv')
register_df_source(folder, 'Yaws Hf S0 (g).tsv')
_reaction_data_loaded = False
def _load_reaction_data():
global Hfg_API_TDB_data, Hfg_ATcT_data, Hfl_ATcT_data, Hfg_S0g_YAWS_data
global Hfg_sources, Hfl_sources, Hfs_sources
global S0g_sources, S0l_sources, S0s_sources
global _reaction_data_loaded
Hfg_API_TDB_data = data_source('API TDB Albahri Hf (g).tsv')
Hfg_ATcT_data = data_source('ATcT 1.112 (g).tsv')
Hfl_ATcT_data = data_source('ATcT 1.112 (l).tsv')
Hfg_S0g_YAWS_data = data_source('Yaws Hf S0 (g).tsv')
_reaction_data_loaded = True
S0g_sources = {
CRC: heat_capacity.CRC_standard_data,
YAWS: Hfg_S0g_YAWS_data,
}
S0l_sources = {
CRC: heat_capacity.CRC_standard_data,
}
S0s_sources = {
CRC: heat_capacity.CRC_standard_data,
}
Hfg_sources = {
ATCT_G: Hfg_ATcT_data,
CRC: heat_capacity.CRC_standard_data,
API_TDB_G: Hfg_API_TDB_data,
TRC: heat_capacity.TRC_gas_data,
YAWS: Hfg_S0g_YAWS_data,
}
Hfl_sources = {
ATCT_L: Hfl_ATcT_data,
CRC: heat_capacity.CRC_standard_data,
}
Hfs_sources = {
CRC: heat_capacity.CRC_standard_data,
}
if PY37:
def __getattr__(name):
if name in ('Hfg_API_TDB_data', 'Hfg_ATcT_data',
'Hfl_ATcT_data', 'Hfg_S0g_YAWS_data',
'Hfg_sources', 'Hfl_sources', 'Hfs_sources',
'S0g_sources', 'S0l_sources', 'S0s_sources'):
_load_reaction_data()
return globals()[name]
raise AttributeError("module %s has no attribute %s" %(__name__, name))
else:
if can_load_data:
_load_reaction_data()
# %% Lookup functions
# TODO: more data from https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3692305/
# has dippr standard heats of formation, about 55% of the database
Hfs_all_methods = (CRC,)
'''Tuple of method name keys. See the `Hfs` for the actual references'''
def Hfs_methods(CASRN):
"""Return all methods available to obtain the Hfs for the desired chemical.
Parameters
----------
CASRN : str
CASRN, [-]
Returns
-------
methods : list[str]
Methods which can be used to obtain the Hfs with the given
inputs.
See Also
--------
Hfs
"""
if not _reaction_data_loaded: _load_reaction_data()
return list_available_methods_from_df_dict(Hfs_sources, CASRN, 'Hfs')
def Hfs(CASRN, method=None):
r'''This function handles the retrieval of a chemical's solid/crystaline
standard phase heat of formation. The lookup is based on CASRNs. Will
automatically select a data source to use if no method is provided; returns
None if the data is not available.
Parameters
----------
CASRN : str
CASRN [-]
Returns
-------
Hfs : float
Solid standard-state heat of formation, [J/mol]
Other Parameters
----------------
method : string, optional
A string for the method name to use, as defined by constants in
Hfs_methods
Notes
-----
Sources are:
* 'CRC', from the CRC handbook (1360 values)
Examples
--------
>>> Hfs('101-81-5') # Diphenylmethane
71500.0
See Also
--------
Hfs_methods
References
----------
.. [1] Ruscic, Branko, Reinhardt E. Pinzon, Gregor von Laszewski, Deepti
Kodeboyina, Alexander Burcat, David Leahy, David Montoy, and Albert F.
Wagner. "Active Thermochemical Tables: Thermochemistry for the 21st
Century." Journal of Physics: Conference Series 16, no. 1
(January 1, 2005): 561. doi:10.1088/1742-6596/16/1/078.
'''
if not _reaction_data_loaded: _load_reaction_data()
if method:
return retrieve_from_df_dict(Hfs_sources, CASRN, 'Hfs', method)
else:
return retrieve_any_from_df_dict(Hfs_sources, CASRN, 'Hfs')
Hfl_all_methods = (ATCT_L, CRC)
'''Tuple of method name keys. See the `Hfl` for the actual references'''
def Hfl_methods(CASRN):
"""Return all methods available to obtain the Hfl for the desired chemical.
Parameters
----------
CASRN : str
CASRN, [-]
Returns
-------
methods : list[str]
Methods which can be used to obtain the Hfl with the given
inputs.
See Also
--------
Hfl
"""
if not _reaction_data_loaded: _load_reaction_data()
return list_available_methods_from_df_dict(Hfl_sources, CASRN, 'Hfl')
def Hfl(CASRN, method=None):
r'''This function handles the retrieval of a chemical's liquid standard
phase heat of formation. The lookup is based on CASRNs. Will automatically
select a data source to use if no method is provided; returns None if
the data is not available.
Parameters
----------
CASRN : str
CASRN [-]
Returns
-------
Hfl : float
Liquid standard-state heat of formation, [J/mol]
Other Parameters
----------------
method : string, optional
A string for the method name to use, as defined in the variable,
`Hfl_all_methods`.
Notes
-----
Sources are:
* 'ATCT_L', the Active Thermochemical Tables version 1.112.
* 'CRC', from the CRC handbook (1360 values)
Examples
--------
>>> Hfl('67-56-1')
-238400.0
See Also
--------
Hfl_methods
References
----------
.. [1] Ruscic, Branko, Reinhardt E. Pinzon, Gregor von Laszewski, Deepti
Kodeboyina, Alexander Burcat, David Leahy, David Montoy, and Albert F.
Wagner. "Active Thermochemical Tables: Thermochemistry for the 21st
Century." Journal of Physics: Conference Series 16, no. 1
(January 1, 2005): 561. doi:10.1088/1742-6596/16/1/078.
.. [2] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
'''
if not _reaction_data_loaded: _load_reaction_data()
if method:
return retrieve_from_df_dict(Hfl_sources, CASRN, 'Hfl', method)
else:
return retrieve_any_from_df_dict(Hfl_sources, CASRN, 'Hfl')
Hfg_all_methods = (ATCT_G, TRC, CRC, YAWS)
'''Tuple of method name keys. See the `Hfg` for the actual references'''
def Hfg_methods(CASRN):
"""Return all methods available to obtain the Hfg for the desired chemical.
Parameters
----------
CASRN : str
CASRN, [-]
Returns
-------
methods : list[str]
Methods which can be used to obtain the Hfg with the given
inputs.
See Also
--------
Hfg
"""
if not _reaction_data_loaded: _load_reaction_data()
return list_available_methods_from_df_dict(Hfg_sources, CASRN, 'Hfg')
def Hfg(CASRN, method=None):
r'''This function handles the retrieval of a chemical's gas heat of
formation. Lookup is based on CASRNs. Will automatically select a data
source to use if no method is provided; returns None if the data is not
available.
Parameters
----------
CASRN : str
CASRN [-]
Returns
-------
Hfg : float
Ideal gas phase heat of formation, [J/mol]
Other Parameters
----------------
method : string, optional
A string for the method name to use, as defined by constants in
Hfg_methods
Notes
-----
Function has data for approximately 8700 chemicals. Sources are:
* 'ATCT_G', the Active Thermochemical Tables version 1.112 (600 values)
* 'TRC', from a 1994 compilation (1750 values)
* 'CRC', from the CRC handbook (1360 values)
* 'YAWS', a large compillation of values, mostly estimated (5000 values)
'TRC' data may have come from computational procedures, for example petane
is off by 30%.
Examples
--------
>>> Hfg('67-56-1')
-200700.0
>>> Hfg('67-56-1', method='YAWS')
-200900.0
>>> Hfg('67-56-1', method='CRC')
-201000.0
>>> Hfg('67-56-1', method='TRC')
-190100.0
See Also
--------
Hfg_methods
References
----------
.. [1] Ruscic, Branko, Reinhardt E. Pinzon, Gregor von Laszewski, Deepti
Kodeboyina, Alexander Burcat, David Leahy, David Montoy, and Albert F.
Wagner. "Active Thermochemical Tables: Thermochemistry for the 21st
Century." Journal of Physics: Conference Series 16, no. 1
(January 1, 2005): 561. doi:10.1088/1742-6596/16/1/078.
.. [2] Frenkelʹ, M. L, Texas Engineering Experiment Station, and
Thermodynamics Research Center. Thermodynamics of Organic Compounds in
the Gas State. College Station, Tex.: Thermodynamics Research Center,
1994.
.. [3] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
.. [4] Yaws, Carl L. Thermophysical Properties of Chemicals and
Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional
Publishing, 2014.
'''
if not _reaction_data_loaded: _load_reaction_data()
if method:
return retrieve_from_df_dict(Hfg_sources, CASRN, 'Hfg', method)
else:
return retrieve_any_from_df_dict(Hfg_sources, CASRN, 'Hfg')
S0s_all_methods = (CRC,)
'''Tuple of method name keys. See the `S0s` for the actual references'''
def S0s_methods(CASRN):
"""Return all methods available to obtain the S0s for the desired chemical.
Parameters
----------
CASRN : str
CASRN, [-]
Returns
-------
methods : list[str]
Methods which can be used to obtain the S0s with the given
inputs.
See Also
--------
S0s
"""
if not _reaction_data_loaded: _load_reaction_data()
return list_available_methods_from_df_dict(S0s_sources, CASRN, 'S0s')
def S0s(CASRN, method=None):
r'''This function handles the retrieval of a chemical's absolute
entropy at a reference temperature of 298.15 K and pressure of 1 bar,
in the solid state. Lookup is based on CASRNs. Will automatically select a
data source to use if no method is provided; returns None if the data is not
available.
Parameters
----------
CASRN : str
CASRN [-]
Returns
-------
S0s : float
Ideal gas standard absolute entropy of compound, [J/mol/K]
Other Parameters
----------------
method : string, optional
A string for the method name to use, as defined by constants in
`S0s_all_methods`.
Notes
-----
Sources are:
* 'CRC', from the CRC handbook (1360 values)
Examples
--------
>>> S0s('7439-93-2') # Lithium
29.1
See Also
--------
S0s_methods
'''
if not _reaction_data_loaded: _load_reaction_data()
if method:
return retrieve_from_df_dict(S0s_sources, CASRN, 'S0s', method)
else:
return retrieve_any_from_df_dict(S0s_sources, CASRN, 'S0s')
S0l_all_methods = (CRC,)
'''Tuple of method name keys. See the `S0l` for the actual references'''
def S0l_methods(CASRN):
"""Return all methods available to obtain the S0l for the desired chemical.
Parameters
----------
CASRN : str
CASRN, [-]
Returns
-------
methods : list[str]
Methods which can be used to obtain the S0l with the given
inputs.
See Also
--------
S0l
"""
if not _reaction_data_loaded: _load_reaction_data()
return list_available_methods_from_df_dict(S0l_sources, CASRN, 'S0l')
def S0l(CASRN, method=None):
r'''This function handles the retrieval of a chemical's absolute
entropy at a reference temperature of 298.15 K and pressure of 1 bar,
in the liquid state.
Lookup is based on CASRNs. Will automatically select a data
source to use if no method is provided; returns None if the data is not
available.
Parameters
----------
CASRN : str
CASRN [-]
Returns
-------
S0l : float
Ideal gas standard absolute entropy of compound, [J/mol/K]
Other Parameters
----------------
method : string, optional
A string for the method name to use, as defined in the variable,
`S0l_all_methods`.
Notes
-----
Sources are:
* 'CRC', from the CRC handbook
Examples
--------
>>> S0l('7439-97-6') # Mercury
75.9
See Also
--------
S0l_methods
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
'''
if not _reaction_data_loaded: _load_reaction_data()
if method:
return retrieve_from_df_dict(S0l_sources, CASRN, 'S0l', method)
else:
return retrieve_any_from_df_dict(S0l_sources, CASRN, 'S0l')
S0g_all_methods = (CRC, YAWS)
'''Tuple of method name keys. See the `S0g` for the actual references'''
def S0g_methods(CASRN):
"""Return all methods available to obtain the S0g for the desired chemical.
Parameters
----------
CASRN : str
CASRN, [-]
Returns
-------
methods : list[str]
Methods which can be used to obtain the S0g with the given
inputs.
See Also
--------
S0g
"""
if not _reaction_data_loaded: _load_reaction_data()
return list_available_methods_from_df_dict(S0g_sources, CASRN, 'S0g')
def S0g(CASRN, method=None):
r'''This function handles the retrieval of a chemical's absolute
entropy at a reference temperature of 298.15 K and pressure of 1 bar,
in the ideal gas state.
Lookup is based on CASRNs. Will automatically select a data
source to use if no method is provided; returns None if the data is not
available.
Parameters
----------
CASRN : str
CASRN [-]
Returns
-------
S0g : float
Ideal gas standard absolute entropy of compound, [J/mol/K]
Other Parameters
----------------
method : string, optional
A string for the method name to use, as defined in the variable,
`S0g_all_methods`
Notes
-----
Function has data for approximately 5400 chemicals. Sources are:
* 'CRC', from the CRC handbook (520 values)
* 'YAWS', a large compillation of values, mostly estimated (4890 values)
Examples
--------
>>> S0g('67-56-1')
239.9
>>> S0g('67-56-1', method='YAWS')
239.88
See Also
--------
S0g_methods
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
.. [2] Yaws, Carl L. Thermophysical Properties of Chemicals and
Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional
Publishing, 2014.
'''
if not _reaction_data_loaded: _load_reaction_data()
if method:
return retrieve_from_df_dict(S0g_sources, CASRN, 'S0g', method)
else:
return retrieve_any_from_df_dict(S0g_sources, CASRN, 'S0g')
# %% Converter functions
def Hf_basis_converter(Hvapm, Hf_liq=None, Hf_gas=None):
r'''This function converts a liquid or gas enthalpy of formation to the
other. This is useful, as thermodynamic packages often work with ideal-
gas as the reference state and require ideal-gas enthalpies of formation.
Parameters
----------
Hvapm : float
Molar enthalpy of vaporization of compound at 298.15 K or (unlikely)
the reference temperature, [J/mol]
Hf_liq : float, optional
Enthalpy of formation of the compound in its liquid state, [J/mol]
Hf_gas : float, optional
Enthalpy of formation of the compound in its ideal-gas state, [J/mol]
Returns
-------
Hf_calc : float, optional
Enthalpy of formation of the compound in the other state to the one
provided, [J/mol]
Notes
-----
Examples
--------
Calculate the ideal-gas enthalpy of formation for water, from its standard-
state (liquid) value:
>>> Hf_basis_converter(44018, Hf_liq=-285830)
-241812
Calculate the standard-state (liquid) enthalpy of formation for water, from
its ideal-gas value:
>>> Hf_basis_converter(44018, Hf_gas=-241812)
-285830
'''
if Hf_liq is None and Hf_gas is None:
raise ValueError("Provide either a liquid or a gas enthalpy of formation")
if Hvapm is None or Hvapm < 0.0:
raise ValueError("Enthalpy of formation unknown or zero")
if Hf_liq is None:
return Hf_gas - Hvapm
else:
return Hf_liq + Hvapm
def Gibbs_formation(dHf, S0_abs, dHfs_std, S0_abs_elements, coeffs_elements,
T_ref=298.15):
r'''This function calculates the Gibbs free energy of formation of a
compound, from its constituent elements.
The calculated value will be for a "standard-state" value if `dHf` and
`S0_abs` are provided in the standard state; or it will be in an
"ideal gas" basis if they are both for an ideal gas. For compounds which
are gases at STP, the two values are the same.
Parameters
----------
dHf : float
Molar enthalpy of formation of the created compound, [J/mol]
S0_abs : float
Absolute molar entropy of the created compound at the reference
temperature, [J/mol/K]
dHfs_std : list[float]
List of standard molar enthalpies of formation of all elements used in
the formation of the created compound, [J/mol]
S0_abs_elements : list[float]
List of standard absolute molar entropies at the reference temperature
of all elements used in the formation of the created compound,
[J/mol/K]
coeffs_elements : list[float]
List of coefficients for each compound (i.e. 1 for C, 2 for H2 if the
target is methane), in the same order as `dHfs_std` and
`S0_abs_elements`, [-]
T_ref : float, optional
The standard state temperature, default 298.15 K; few values are
tabulated at other temperatures, [-]
Returns
-------
dGf : float
Gibbs free energy of formation for the created compound, [J/mol]
Notes
-----
Be careful for elements like Bromine - is the tabulated value for Br2 or
Br?
Examples
--------
Calculate the standard-state Gibbs free energy of formation for water,
using water's standard state heat of formation and absolute entropy
at 298.15 K:
>>> Gibbs_formation(-285830, 69.91, [0, 0], [130.571, 205.147], [1, .5])
-237161.633825
Calculate the ideal-gas state Gibbs free energy of formation for water,
using water's ideal-gas state heat of formation and absolute entropy
at 298.15 K as a gas:
>>> Gibbs_formation(-241818, 188.825, [0, 0], [130.571, 205.147], [1, .5])
-228604.141075
Calculate the Gibbs free energy of formation for CBrF3 (it is a gas at STP,
so its standard-state and ideal-gas state values are the same) at 298.15 K:
>>> Gibbs_formation(-648980, 297.713, [0, 0, 0], [5.74, 152.206, 202.789], [1, .5, 1.5])
-622649.329975
Note in the above calculation that the Bromine's `S0` and `Hf` are for Br2;
and that the value for Bromine as a liquid, which is its standard state,
is used.
References
----------
.. [1] "Standard Gibbs Free Energy of Formation Calculations Chemistry
Tutorial." Accessed March, 2019. https://www.ausetute.com.au/gibbsform.html.
'''
N = len(coeffs_elements)
dH = dHf
dS = S0_abs
for i in range(N):
dH -= dHfs_std[i]*coeffs_elements[i]
dS -= S0_abs_elements[i]*coeffs_elements[i]
return dH - T_ref*dS
def entropy_formation(Hf, Gf, T_ref=298.15):
r'''This function calculates the entropy of formation of a
compound, from its constituent elements.
The calculated value will be for a "standard-state" value if `Hf` and
`Gf` are provided in the standard state; or it will be in an
"ideal gas" basis if they are both for an ideal gas. For compounds which
are gases at STP, the two values are the same.
Parameters
----------
Hf : float
Molar enthalpy of formation of the compound, [J/mol]
Gf : float
Molar Gibbs free energy of formation of the compound, [J/mol]
T_ref : float, optional
The standard state temperature, default 298.15 K; few values are
tabulated at other temperatures, [-]
Returns
-------
S0 : float
Entropy of formation of the compound, [J/mol/K]
Notes
-----
Examples
--------
Entropy of formation of methane:
>>> entropy_formation(Hf=-74520, Gf=-50490)
-80.59701492537314
Entropy of formation of water in ideal gas state:
>>> entropy_formation(Hf=-241818, Gf=-228572)
-44.427301693778304
'''
return (Hf - Gf)/T_ref
# %% Stoichiometry functions
def stoichiometric_matrix(atomss, reactants):
r'''This function calculates a stoichiometric matrix of reactants and
stoichiometric matrix, as required by a solver to compute the reation
coefficients.
Parameters
----------
atomss : list[dict[(str, float)]]
A list of dictionaties of (element, element_count) pairs for each
chemical, [-]
reactants : list[bool]
List of booleans indicating whether each chemical is a reactant (True)
or a product (False), [-]
Returns
-------
matrix : list[list[float]]
Chemical reaction matrix for further processing; rows contain element
counts of each compound, and the columns represent each chemical, [-]
Notes
-----
The rows of the matrix contain the element counts of each compound,
and the columns represent each chemical.
Examples
--------
MgO2 -> Mg + 1/2 O2
(k=1)
>>> stoichiometric_matrix([{'Mg': 1, 'O': 1}, {'Mg': 1}, {'O': 2}], [True, False, False])
[[1, -1, 0], [1, 0, -2]]
Cl2 + propylene -> allyl chloride + HCl
>>> stoichiometric_matrix([{'Cl': 2}, {'C': 3, 'H': 6}, {'C': 3, 'Cl': 1, 'H': 5}, {'Cl': 1, 'H': 1}], [True, True, False, False, False])
[[0, 3, -3, 0], [2, 0, -1, -1], [0, 6, -5, -1]]
Al + 4HNO3 -> Al(NO3)3 + NO + 2H2O
(k=1)
>>> stoichiometric_matrix([{'Al': 1}, {'H': 1, 'N': 1, 'O': 3}, {'Al': 1, 'N': 3, 'O': 9}, {'N': 1, 'O': 1}, {'H': 2, 'O': 1}], [True, True, False, False, False])
[[1, 0, -1, 0, 0], [0, 1, 0, 0, -2], [0, 1, -3, -1, 0], [0, 3, -9, -1, -1]]
4Fe + 3O2 -> 2(Fe2O3)
(k=2)
>>> stoichiometric_matrix([{'Fe': 1}, {'O': 2}, {'Fe':2, 'O': 3}], [True, True, False])
[[1, 0, -2], [0, 2, -3]]
4NH3 + 5O2 -> 4NO + 6(H2O)
(k=4)
>>> stoichiometric_matrix([{'N': 1, 'H': 3}, {'O': 2}, {'N': 1, 'O': 1}, {'H': 2, 'O': 1}], [True, True, False, False])
[[3, 0, 0, -2], [1, 0, -1, 0], [0, 2, -1, -1]]
No unique solution:
C2H5NO2 + C3H7NO3 + 2C6H14N4O2 + 3C5H9NO2 + 2C9H11NO2 -> 8H2O + C50H73N15O11
>>> stoichiometric_matrix([{'C': 2, 'H': 5, 'N': 1, 'O': 2}, {'C': 3, 'H': 7, 'N': 1, 'O': 3}, {'C': 6, 'H': 14, 'N': 4, 'O': 2}, {'C': 5, 'H': 9, 'N': 1, 'O': 2}, {'C': 9, 'H': 11, 'N': 1, 'O': 2}, {'H': 2, 'O': 1}, {'C': 50, 'H': 73, 'N': 15, 'O': 11}], [True, True, True, True, True, False, False])
[[2, 3, 6, 5, 9, 0, -50], [5, 7, 14, 9, 11, -2, -73], [1, 1, 4, 1, 1, 0, -15], [2, 3, 2, 2, 2, -1, -11]]
References
----------
.. [1] Sen, S. K., Hans Agarwal, and Sagar Sen. "Chemical Equation
Balancing: An Integer Programming Approach." Mathematical and Computer
Modelling 44, no. 7 (October 1, 2006): 678-91.
https://doi.org/10.1016/j.mcm.2006.02.004.
.. [2] URAVNOTE, NOVOODKRITI PARADOKSI V. TEORIJI, and ENJA KEMIJSKIH
REAKCIJ. "New Discovered Paradoxes in Theory of Balancing Chemical
Reactions." Materiali in Tehnologije 45, no. 6 (2011): 503-22.
'''
n_compounds = len(atomss)
elements = set()
for atoms in atomss:
elements.update(atoms.keys())
elements = sorted(list(elements)) # Ensure reproducibility
n_elements = len(elements)
matrix = [[0]*n_compounds for _ in range(n_elements)]
for i, atoms in enumerate(atomss):
for k, v in atoms.items():
if not reactants[i]:
v = -v
matrix[elements.index(k)][i] = v
return matrix
def balance_stoichiometry(matrix, rounding=9, allow_fractional=False):
r'''This function balances a chemical reaction.
Parameters
----------
matrix : list[list[float]]
Chemical reaction matrix for further processing; rows contain element
counts of each compound, and the columns represent each chemical, [-]
Returns
-------
coefficients : list[float]
Balanced coefficients; all numbers are positive, [-]
Notes
-----
Balance the reaction 4 NH3 + 5 O2 = 4 NO + 6 H2O, without knowing the
coefficients:
>>> matrix = stoichiometric_matrix([{'N': 1, 'H': 3}, {'O': 2}, {'N': 1, 'O': 1}, {'H': 2, 'O': 1}], [True, True, False, False])
>>> matrix
[[3, 0, 0, -2], [1, 0, -1, 0], [0, 2, -1, -1]]
>>> balance_stoichiometry(matrix)
[4.0, 5.0, 4.0, 6.0]
>>> balance_stoichiometry(matrix, allow_fractional=True)
[1.0, 1.25, 1.0, 1.5]
This algorithm relies on `scipy`.
The behavior of this function for inputs which do not have a unique
solution is undefined.
This algorithm may suffer from floating point issues. If you believe there
is an error in the result, please report your reaction to the developers.
References
----------
.. [1] Sen, S. K., Hans Agarwal, and Sagar Sen. "Chemical Equation
Balancing: An Integer Programming Approach." Mathematical and Computer
Modelling 44, no. 7 (October 1, 2006): 678-91.
https://doi.org/10.1016/j.mcm.2006.02.004.
.. [2] URAVNOTE, NOVOODKRITI PARADOKSI V. TEORIJI, and ENJA KEMIJSKIH
REAKCIJ. "New Discovered Paradoxes in Theory of Balancing Chemical
Reactions." Materiali in Tehnologije 45, no. 6 (2011): 503-22.
'''
import scipy.linalg
done = scipy.linalg.null_space(matrix)
if len(done[0]) > 1:
raise ValueError("No solution")
d = done[:, 0].tolist()
min_value_inv = 1.0/min(d)
d = [i*min_value_inv for i in d]
if not allow_fractional:
from fractions import Fraction
max_denominator = 10**rounding
fs = [Fraction(x).limit_denominator(max_denominator=max_denominator) for x in d]
all_denominators = set([i.denominator for i in fs])
if 1 in all_denominators:
all_denominators.remove(1)
for den in sorted(list(all_denominators), reverse=True):
fs = [num*den for num in fs]
if all(i.denominator == 1 for i in fs):
break
# May have gone too far
return [float(i) for i in fs]
# done = False
# for i in range(100):
# for c in d:
# ratio = c.as_integer_ratio()[1]
# if ratio != 1:
# d = [di*ratio for di in d]
# break
# done = True
# if done:
# break
#
# d_as_int = [int(i) for i in d]
# for i, j in zip(d, d_as_int):
# if i != j:
# raise ValueError("Could not find integer coefficients (%s, %s)" %(i, j))
# return d_as_int
else:
d = [round(i, rounding + int(ceil(log10(abs(i))))) for i in d]
return d
|
py | 1a3e97f942d90364f45c1f6b4333edc204a48724 | import unittest
import numpy as np
from cwepr import utils
class TestConvertmT2g(unittest.TestCase):
def test_values_are_positive(self):
values = np.linspace(340, 350, 100)
mw_freq = 9.5
self.assertTrue(all(utils.convert_mT2g(values, mw_freq) > 0))
def test_values_have_correct_range(self):
values = np.linspace(340, 350, 100)
mw_freq = 9.5
condition = \
(np.floor(np.log10(utils.convert_mT2g(values, mw_freq))) == 0)
self.assertTrue(all(condition))
class TestConvertg2mT(unittest.TestCase):
def test_values_are_positive(self):
values = np.linspace(1.8, 4, 100)
mw_freq = 9.5
self.assertTrue(all(utils.convert_mT2g(values, mw_freq) > 0))
def test_values_have_correct_range(self):
values = np.linspace(1.8, 4, 100)
mw_freq = 9.5
condition = \
(np.floor(np.log10(utils.convert_g2mT(values, mw_freq))) == 2)
self.assertTrue(all(condition))
class TestNotZero(unittest.TestCase):
def test_not_zero_of_zero_returns_nonzero_value(self):
self.assertGreater(utils.not_zero(0), 0)
def test_not_zero_of_zero_returns_np_float_resolution(self):
self.assertEqual(np.finfo(np.float64).resolution,
utils.not_zero(0))
def test_not_zero_of_positive_value_preserves_sign(self):
self.assertGreater(utils.not_zero(1e-20), 0)
def test_not_zero_of_negative_value_preserves_sign(self):
self.assertLess(utils.not_zero(-1e-20), 0)
def test_not_zero_of_negative_value_closer_than_limit_returns_limit(self):
self.assertEqual(-np.finfo(np.float64).resolution,
utils.not_zero(-1e-20))
|
py | 1a3e98f9a109533495f4cc5cd61fe29a74c69551 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Ai the coins developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC HTTP basics."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = False
def setup_network(self):
self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock==None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #connection must be closed because bitcoind should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
py | 1a3e9b0c5d7c0b2747aafedc890d409d46a6493f | """SciUnit tests live in this module."""
import inspect
import traceback
from sciunit import settings
from sciunit.base import SciUnit
from .capabilities import ProducesNumber
from .models import Model
from .scores import Score, BooleanScore, NoneScore, ErrorScore, TBDScore,\
NAScore
from .validators import ObservationValidator, ParametersValidator
from .errors import Error, CapabilityError, ObservationError,\
InvalidScoreError, ParametersError
class Test(SciUnit):
"""Abstract base class for tests."""
def __init__(self, observation, name=None, **params):
"""
Args:
observation (dict): A dictionary of observed values to parameterize
the test.
name (str, optional): Name of the test instance.
"""
self.name = name if name else self.__class__.__name__
assert isinstance(self.name, str), "Test name must be a string"
if self.description is None:
self.description = self.__class__.__doc__
self.params = params if params else {}
self.verbose = self.params.pop('verbose', 1)
#self.params.update(params)
self.validate_params(self.params)
self.observation = observation
if settings['PREVALIDATE']:
self.validate_observation(self.observation)
if self.score_type is None or not issubclass(self.score_type, Score):
raise Error(("The score type '%s' specified for Test '%s' "
"is not valid.") % (self.score_type, self.name))
super(Test, self).__init__()
name = None
"""The name of the test. Defaults to the test class name."""
description = None
"""A description of the test. Defaults to the docstring for the class."""
observation = None
"""The empirical observation that the test is using."""
params = None
"""A dictionary containing the parameters to the test."""
score_type = BooleanScore
"""A score type for this test's `judge` method to return."""
converter = None
"""A conversion to be done on the score after it is computed."""
observation_schema = None
"""A schema that the observation must adhere to (validated by cerberus).
Can also be a list of schemas, one of which the observation must match.
If it is a list, each schema in the list can optionally be named by putting
(name, schema) tuples in that list."""
params_schema = None
"""A schema that the params must adhere to (validated by cerberus).
Can also be a list of schemas, one of which the params must match."""
def validate_observation(self, observation):
"""Validate the observation provided to the constructor.
Raises an ObservationError if invalid.
"""
if not observation:
raise ObservationError("Observation is missing.")
if not isinstance(observation, dict):
raise ObservationError("Observation is not a dictionary.")
if "mean" in observation and observation["mean"] is None:
raise ObservationError("Observation mean cannot be 'None'.")
if self.observation_schema:
if isinstance(self.observation_schema, list):
schemas = [x[1] if isinstance(x, tuple) else x
for x in self.observation_schema]
schema = {'oneof_schema': schemas,
'type': 'dict'}
else:
schema = {'schema': self.observation_schema,
'type': 'dict'}
schema = {'observation': schema}
v = ObservationValidator(schema, test=self)
if not v.validate({'observation': observation}):
raise ObservationError(v.errors)
return observation
@classmethod
def observation_schema_names(cls):
"""Return a list of names of observation schema, if they are set."""
names = []
if cls.observation_schema:
if isinstance(cls.observation_schema, list):
names = [x[0] if isinstance(x, tuple) else 'Schema %d' % (i+1)
for i, x in enumerate(cls.observation_schema)]
return names
def validate_params(self, params):
"""Validate the params provided to the constructor.
Raises an ParametersError if invalid.
"""
if params is None:
raise ParametersError("Parameters cannot be `None`.")
if not isinstance(params, dict):
raise ParametersError("Parameters are not a dictionary.")
if self.params_schema:
if isinstance(self.params_schema, list):
schema = {'oneof_schema': self.params_schema,
'type': 'dict'}
else:
schema = {'schema': self.params_schema,
'type': 'dict'}
schema = {'params': schema}
v = ParametersValidator(schema, test=self)
if not v.validate({'params': params}):
raise ParametersError(v.errors)
return params
required_capabilities = ()
"""A sequence of capabilities that a model must have in order for the
test to be run. Defaults to empty."""
def check_capabilities(self, model, skip_incapable=False,
require_extra=False):
"""Check that test's required capabilities are implemented by `model`.
Raises an Error if model is not a Model.
Raises a CapabilityError if model does not have a capability.
"""
if not isinstance(model, Model):
raise Error("Model %s is not a sciunit.Model." % str(model))
capable = all([self.check_capability(model, c, skip_incapable,
require_extra)
for c in self.required_capabilities])
return capable
def check_capability(self, model, c, skip_incapable=False,
require_extra=False):
"""Check if `model` has capability `c`.
Optionally (default:True) raise a `CapabilityError` if it does not.
"""
capable = c.check(model, require_extra=require_extra)
if not capable and not skip_incapable:
raise CapabilityError(model, c)
return capable
def condition_model(self, model):
"""Update the model in any way needed before generating the prediction.
This could include updating parameters such as simulation durations
that do not define the model but do define experiments performed on
the model.
No default implementation.
"""
pass
def generate_prediction(self, model):
"""Generate a prediction from a model using the required capabilities.
No default implementation.
"""
raise NotImplementedError(("Test %s does not implement "
"generate_prediction.") % str())
def check_prediction(self, prediction):
"""Check the prediction for acceptable values.
No default implementation.
"""
pass
def compute_score(self, observation, prediction):
"""Generates a score given the observations provided in the constructor
and the prediction generated by generate_prediction.
Must generate a score of score_type.
No default implementation.
"""
if not hasattr(self, 'score_type') or \
not hasattr(self.score_type, 'compute'):
raise NotImplementedError(("Test %s either implements no "
"compute_score method or provides no "
"score_type with a compute method.")
% self.name)
# After some processing of the observation and the prediction.
score = self.score_type.compute(observation, prediction)
return score
def _bind_score(self, score, model, observation, prediction):
"""Bind some useful attributes to the score."""
score.model = model
score.test = self
score.prediction = prediction
score.observation = observation
# Don't let scores share related_data.
score.related_data = score.related_data.copy()
self.bind_score(score, model, observation, prediction)
def bind_score(self, score, model, observation, prediction):
"""For the user to bind additional features to the score."""
pass
def check_score_type(self, score):
"""Check that the score is the correct type for this test."""
if not isinstance(score, (self.score_type, NoneScore, ErrorScore)):
msg = (("Score for test '%s' is not of correct type. "
"The test requires type %s but %s was provided.")
% (self.name, self.score_type.__name__,
score.__class__.__name__))
raise InvalidScoreError(msg)
def _judge(self, model, skip_incapable=True):
"""Generate a score for the model (internal API use only)."""
# 1.
self.check_capabilities(model, skip_incapable=skip_incapable)
# 2.
prediction = self.generate_prediction(model)
self.check_prediction(prediction)
self.last_model = model
# 3. Validate observation and compute score
validated = self.validate_observation(self.observation)
if validated is not None:
self.observation = validated
score = self.compute_score(self.observation, prediction)
if self.converter:
score = self.converter.convert(score)
# 4.
self.check_score_type(score)
# 5.
self._bind_score(score, model, self.observation, prediction)
return score
def judge(self, model, skip_incapable=False, stop_on_error=True,
deep_error=False):
"""Generate a score for the provided model (public method).
Operates as follows:
1. Checks if the model has all the required capabilities. If it does
not, and skip_incapable=False, then a `CapabilityError` is raised.
2. Calls generate_prediction to generate a prediction.
3. Calls score_prediction to generate a score.
4. Checks that the score is of score_type, raising an
InvalidScoreError.
5. Equips the score with metadata:
a) A reference to the model, in attribute model.
b) A reference to the test, in attribute test.
c) A reference to the prediction, in attribute prediction.
d) A reference to the observation, in attribute observation.
6. Returns the score.
If stop_on_error is true (default), exceptions propagate upward. If
false, an ErrorScore is generated containing the exception.
If deep_error is true (not default), the traceback will contain the
actual code execution error, instead of the content of an ErrorScore.
"""
if isinstance(model, (list, tuple, set)):
# If a collection of models is provided
from .suites import TestSuite
suite = TestSuite([self], name=self.name)
# then test them using a one-test suite.
return suite.judge(model, skip_incapable=skip_incapable,
stop_on_error=stop_on_error,
deep_error=deep_error)
if deep_error:
score = self._judge(model, skip_incapable=skip_incapable)
else:
try:
score = self._judge(model, skip_incapable=skip_incapable)
except CapabilityError as e:
score = NAScore(str(e))
score.model = model
score.test = self
except Exception as e:
e.stack = traceback.format_exc()
score = ErrorScore(e)
score.model = model
score.test = self
if isinstance(score, ErrorScore) and stop_on_error:
raise score.score # An exception.
return score
def check(self, model, skip_incapable=True, stop_on_error=True,
require_extra=False):
"""Check to see if the test can run this model.
Like judge, but without actually running the test. Just returns a Score
indicating whether the model can take the test or not.
"""
try:
if self.check_capabilities(model, skip_incapable=skip_incapable,
require_extra=require_extra):
score = TBDScore(None)
else:
score = NAScore(None)
except Exception as e:
score = ErrorScore(e)
if stop_on_error:
raise e
return score
def optimize(self, model):
"""Optimize the parameters of the model to get the best score."""
raise NotImplementedError(("Optimization not implemented "
"for Test '%s'" % self))
def describe(self):
"""Describe the test in words."""
result = "No description available"
if self.description:
result = "%s" % self.description
else:
if self.__doc__:
s = []
s += [self.__doc__.strip().replace('\n', '').
replace(' ', '')]
if self.converter:
s += [self.converter.description]
result = '\n'.join(s)
return result
@property
def state(self):
"""Get the frozen (pickled) model state."""
return self._state(exclude=['last_model'])
@classmethod
def is_test_class(cls, other_cls):
"""Return whether `other_cls` is a subclass of this test class."""
return inspect.isclass(other_cls) and issubclass(other_cls, cls)
def __str__(self):
"""Return the string representation of the test's name."""
return '%s' % self.name
class TestM2M(Test):
"""Abstract class for handling tests involving multiple models.
Enables comparison of model to model predictions, and also against
experimental reference data (optional).
Note: 'TestM2M' would typically be used when handling mutliple (>2)
models, with/without experimental reference data. For single model
tests, you can use the 'Test' class.
"""
def __init__(self, observation=None, name=None, **params):
super(TestM2M, self).__init__(observation, name=name, **params)
def validate_observation(self, observation):
"""Validate the observation provided to the constructor.
Note: TestM2M does not compulsorily require an observation
(i.e. None allowed).
"""
pass
def compute_score(self, prediction1, prediction2):
"""Generate a score given the observations provided in the constructor
and/or the prediction(s) generated by generate_prediction.
Must generate a score of score_type.
No default implementation.
"""
try:
# After some processing of the observation and/or the prediction(s)
score = self.score_type.compute(prediction1, prediction2)
return score
except Exception:
raise NotImplementedError(("Test %s either implements no "
"compute_score method or provides no "
"score_type with a compute method.")
% self.name)
def _bind_score(self, score, prediction1, prediction2, model1, model2):
"""Bind some useful attributes to the score."""
score.model1 = model1
score.model2 = model2
score.test = self
score.prediction1 = prediction1
score.prediction2 = prediction2
# Don't let scores share related_data.
score.related_data = score.related_data.copy()
self.bind_score(score, prediction1, prediction2, model1, model2)
def bind_score(self, score, prediction1, prediction2, model1, model2):
"""For the user to bind additional features to the score."""
pass
def _judge(self, prediction1, prediction2, model1, model2=None):
# TODO: Not sure if below statement is required
# self.last_model = model
# 6.
score = self.compute_score(prediction1, prediction2)
if self.converter:
score = self.converter.convert(score)
# 7.
if not isinstance(score, (self.score_type, NoneScore, ErrorScore)):
raise InvalidScoreError(("Score for test '%s' is not of correct "
"type. The test requires type %s but %s "
"was provided.")
% (self.name, self.score_type.__name__,
score.__class__.__name__))
# 8.
self._bind_score(score, prediction1, prediction2, model1, model2)
return score
def judge(self, models, skip_incapable=False, stop_on_error=True,
deep_error=False):
"""Generate a score matrix for the provided model(s).
Operates as follows:
1. Check if models have been specified as a list/tuple/set.
If not, raise exception.
2. Create a list of predictions. If a test observation is provided,
add it to predictions.
3. Checks if all models have all the required capabilities. If a model
does not, then a CapabilityError is raised.
4. Calls generate_prediction to generate predictions for each model,
and these are appeneded to the predictions list.
5. Generate a 2D list as a placeholder for all the scores.
6. Calls score_prediction to generate scores for each comparison.
7. Checks that the score is of score_type, raising an
InvalidScoreError.
8. Equips the score with metadata:
a) Reference(s) to the model(s), in attribute model1 (and model2).
b) A reference to the test, in attribute test.
c) A reference to the predictions, in attributes prediction1 and
prediction2.
9. Returns the score as a Pandas DataFrame.
If stop_on_error is true (default), exceptions propagate upward. If
false, an ErrorScore is generated containing the exception.
If deep_error is true (not default), the traceback will contain the
actual code execution error, instead of the content of an ErrorScore.
"""
# 1.
if not isinstance(models, (list, tuple, set)):
raise TypeError(("Models must be specified as a list, tuple or "
"set. For single model tests, use 'Test' class."))
else:
models = list(models)
# 2.
predictions = []
# If observation exists, store it as first element in predictions[]
if self.observation:
predictions.append(self.observation)
for model in models:
if not isinstance(model, Model):
raise TypeError(("TestM2M's judge method received a non-Model."
"Invalid model name: '%s'" % model))
else:
try:
# 3.
self.check_capabilities(model,
skip_incapable=skip_incapable)
# 4.
prediction = self.generate_prediction(model)
self.check_prediction(prediction)
predictions.append(prediction)
except CapabilityError as e:
raise CapabilityError(model, e.capability,
("TestM2M's judge method resulted in"
" error for '%s'. Error: '%s'" %
(model, str(e))))
except Exception as e:
raise Exception(("TestM2M's judge method resulted in error"
"for '%s'. Error: '%s'" %
(model, str(e))))
# 5. 2D list for scores; num(rows) = num(cols) = num(predictions)
scores = [[NoneScore for x in range(len(predictions))]
for y in range(len(predictions))]
for i in range(len(predictions)):
for j in range(len(predictions)):
if not self.observation:
model1 = models[i]
model2 = models[j]
elif i == 0 and j == 0:
model1 = None
model2 = None
elif i == 0:
model1 = models[j-1]
model2 = None
elif j == 0:
model1 = models[i-1]
model2 = None
else:
model1 = models[i-1]
model2 = models[j-1]
scores[i][j] = self._judge(predictions[i], predictions[j],
model1, model2)
if isinstance(scores[i][j], ErrorScore) and stop_on_error:
raise scores[i][j].score # An exception.
# 9.
from sciunit.scores.collections_m2m import ScoreMatrixM2M
sm = ScoreMatrixM2M(self, models, scores=scores)
return sm
"""
# TODO: see if this needs to be updated and provided:
def optimize(self, model):
raise NotImplementedError(("Optimization not implemented "
"for Test '%s'" % self))
"""
class RangeTest(Test):
"""Test if the model generates a number with a certain sign"""
def __init__(self, observation, name=None):
super(RangeTest, self).__init__(observation, name=name)
required_capabilities = (ProducesNumber,)
score_type = BooleanScore
def validate_observation(self, observation):
assert type(observation) in (tuple, list, set)
assert len(observation) == 2
assert observation[1] > observation[0]
def generate_prediction(self, model):
return model.produce_number()
def compute_score(self, observation, prediction):
low = observation[0]
high = observation[1]
return self.score_type(low < prediction < high)
|
py | 1a3e9c02b21aa68f4aef831721a2b9b81d409d43 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Libermatic and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestAhongeCommerceSettings(unittest.TestCase):
pass
|
py | 1a3e9d0ec7cd2b36f10752088f0d5f5cb4375ac2 | # -*- coding: utf-8 -*-
import hashlib
from PyQt5 import QtWidgets as W
import requests
from . import CONFIG, qmessage_critical_with_detail
from .AdminWindow import AdminWindow
from .SellerWindow import SellerWindow
from ui import Ui_LoginWindow
URL = CONFIG['remote']['url']
class LoginWindow(W.QDialog):
def __init__(self):
super().__init__()
self.ui = Ui_LoginWindow()
self.ui.setupUi(self)
# register slot functions
self.ui.login_button.clicked.connect(self.handle_login_button_clicked)
self.ui.cancel_button.clicked.connect(self.handle_cancel_button_clicked)
# NOTE DO NOT use names on_[object_name]_[signal_name] for slots!!!
# You may override the original slot functions!
def handle_login_button_clicked(self):
user, passwd = (
self.ui.user_input.text(),
hashlib.md5(self.ui.passwd_input.text().encode('utf-8')).hexdigest()
)
try:
ret = requests.get(f'{URL}/api/auth/login/{user}/{passwd}')
except Exception as e:
qmessage_critical_with_detail('连接错误', '无法连接至服务端!', str(e), self)
return
if ret.status_code != 200:
if ret.status_code == 406:
msg = ret.json()
if msg == 'User not found':
msg = '用户名不存在!'
elif msg == 'Wrong password':
msg = '密码错误!'
elif msg == 'User already logged-in elsewhere':
msg = '用户已在其他地方登陆!'
ret = W.QMessageBox.warning(self, '警告', msg)
return
ret = W.QMessageBox.critical(self, '错误', '发生了未知的服务端错误!')
return
ret = ret.json()
if ('employee_id' not in ret) or ('role' not in ret):
ret = W.QMessageBox.critical(self, '错误', '服务端通信协议升级,请更新您的客户端!')
return
employee_id, role = ret['employee_id'], ret['role']
# print(f'Successfully logged-in as {user} ({employee_id}, {role})!')
if role == 'admin':
self.next_window = AdminWindow()
elif role == 'common':
self.next_window = SellerWindow()
else:
raise ValueError(f'Window for role {role} not implemented!')
self.next_window.user_data['login'] = user
self.next_window.user_data['employee_id'] = employee_id
self.next_window.user_data['role'] = role
self.next_window.user_data['auth'] = (user, passwd)
self.next_window.show()
self.close()
def handle_cancel_button_clicked(self):
self.close()
def closeEvent(self, evt):
return super().closeEvent(evt)
|
py | 1a3e9d713d8d312ad682cee9b4c6fc31735f9eac | #FLM: Adjust Anchors
__copyright__ = __license__ = """
Copyright (c) 2010-2012 Adobe Systems Incorporated. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__doc__ = """
Adjust Anchors v1.2 - Jul 12 2012
This script provides a UI for adjusting the position of anchors interactively.
FontLab's own UI for ajusting anchors is too poor.
Opening FontLab's Preview window and selecting the Anchors pane before running
this script, will allow you to preview the adjustments even better.
==================================================
Versions:
v1.0 - Apr 29 2010 - Initial version.
v1.1 - Jun 15 2012 - UI improvements.
v1.2 - Jul 12 2012 - Fixed issue that affected single master fonts.
"""
listGlyphsSelected = []
def getgselectedglyphs(font, glyph, gindex):
listGlyphsSelected.append(gindex)
fl.ForSelected(getgselectedglyphs)
def getMasterNames(masters, axes):
global matrix
masterNames = []
if masters > 1:
for m in range(masters):
mtx = matrix[m]
masterName = ''
for i in range(len(axes)):
masterName += ' ' + axes[i][1] + str(mtx[i])
masterNames.append(masterName)
return masterNames
matrix = [
(0,0,0,0),(1,0,0,0),(0,1,0,0),(1,1,0,0),(0,0,1,0),(1,0,1,0),(0,1,1,0),(1,1,1,0),
(0,0,0,1),(1,0,0,1),(0,1,0,1),(1,1,0,1),(0,0,1,1),(1,0,1,1),(0,1,1,1),(1,1,1,1)
]
STYLE_RADIO = STYLE_CHECKBOX + cTO_CENTER
def run(gIndex):
masters = f[0].layers_number
axes = f.axis
masterNames = getMasterNames(masters, axes)
increment = 0
if len(axes) == 3:
increment = 90
elif len(axes) > 3:
fl.Message("This macro does not support 4-axis fonts")
return
fl.EditGlyph(gIndex) # opens Glyph Window in case it's not open yet
glyphBkupDict = {} # this will store a copy of the edited glyphs and will be used in case 'Cancel' is pressed
class DialogClass:
def __init__(self):
self.d = Dialog(self)
self.d.size = Point(660, 110 + 48*4 + increment)
self.d.Center()
self.d.title = 'Adjust Anchors'
self.anchorList = []
self.anchorList_index = 0
self.anchorList_selected = 0
self.selectedAnchor = None
self.glyph = f[gIndex]
self.gIndex = gIndex
self.gName = self.glyph.name
self.gHasAnchors = 0
self.glyphList = []
self.glyphList_index = 0
self.glyphList_selected = 0
self.selectedglyph = None
self.k_BIG_SHIFT = 20
self.k_MEDIUM_SHIFT = 5
self.k_SMALL_SHIFT = 1
self.Xshift = 0
self.Yshift = 0
self.Xorig = 0
self.Yorig = 0
self.Xfinal = 0
self.Yfinal = 0
self.RBmasterIndex = 0
if fl.layer == 0: self.RBmaster0 = 1
else: self.RBmaster0 = 0
if fl.layer == 1: self.RBmaster1 = 1
else: self.RBmaster1 = 0
if fl.layer == 2: self.RBmaster2 = 1
else: self.RBmaster2 = 0
if fl.layer == 3: self.RBmaster3 = 1
else: self.RBmaster3 = 0
if fl.layer == 4: self.RBmaster4 = 1
else: self.RBmaster4 = 0
if fl.layer == 5: self.RBmaster5 = 1
else: self.RBmaster5 = 0
if fl.layer == 6: self.RBmaster6 = 1
else: self.RBmaster6 = 0
if fl.layer == 7: self.RBmaster7 = 1
else: self.RBmaster7 = 0
# Fill in the Anchor list
for anchor in self.glyph.anchors:
self.anchorList.append(anchor.name)
# Fill in the Glyph list
for g in f.glyphs:
if len(g.anchors) > 0:
self.glyphList.append(g.name)
# Checks if the initially selected glyph has anchors
if self.gName in self.glyphList:
self.gHasAnchors = 1
posy = 10 + 48*0 # (xTop , yTop , xBot , yBot)
self.d.AddControl(BUTTONCONTROL, Rect(310, posy, 350, posy+40), 'Yplus5', STYLE_BUTTON, '+'+ str(self.k_MEDIUM_SHIFT))
posy = 10 + 24*1
self.d.AddControl(LISTCONTROL, Rect( 10, posy, 150, posy+110), 'glyphList', STYLE_LIST, 'Glyphs')
self.d.AddControl(LISTCONTROL, Rect(510, posy, 650, posy+110), 'anchorList', STYLE_LIST, 'Anchors')
posy = 10 + 48*1
self.d.AddControl(BUTTONCONTROL, Rect(310, posy, 350, posy+40), 'Yplus1', STYLE_BUTTON, '+'+ str(self.k_SMALL_SHIFT))
posy = 10 + 48*2
self.d.AddControl(BUTTONCONTROL, Rect(160, posy, 200, posy+40), 'Xminus20', STYLE_BUTTON, '-'+ str(self.k_BIG_SHIFT))
self.d.AddControl(BUTTONCONTROL, Rect(210, posy, 250, posy+40), 'Xminus5', STYLE_BUTTON, '-'+ str(self.k_MEDIUM_SHIFT))
self.d.AddControl(BUTTONCONTROL, Rect(260, posy, 300, posy+40), 'Xminus1', STYLE_BUTTON, '-'+ str(self.k_SMALL_SHIFT))
self.d.AddControl(STATICCONTROL, Rect(310, posy, 323, posy+20), 'stat_label', STYLE_LABEL+cTO_CENTER, 'x:')
self.d.AddControl(STATICCONTROL, Rect(323, posy, 360, posy+20), 'Xshift', STYLE_LABEL+cTO_CENTER)
self.d.AddControl(STATICCONTROL, Rect(310, posy+20, 323, posy+40), 'stat_label', STYLE_LABEL+cTO_CENTER, 'y:')
self.d.AddControl(STATICCONTROL, Rect(323, posy+20, 360, posy+40), 'Yshift', STYLE_LABEL+cTO_CENTER)
self.d.AddControl(BUTTONCONTROL, Rect(360, posy, 400, posy+40), 'Xplus1', STYLE_BUTTON, '+'+ str(self.k_SMALL_SHIFT))
self.d.AddControl(BUTTONCONTROL, Rect(410, posy, 450, posy+40), 'Xplus5', STYLE_BUTTON, '+'+ str(self.k_MEDIUM_SHIFT))
self.d.AddControl(BUTTONCONTROL, Rect(460, posy, 500, posy+40), 'Xplus20', STYLE_BUTTON, '+'+ str(self.k_BIG_SHIFT))
for i in range(len(masterNames)):
posy = 154 + 22*i
self.d.AddControl(CHECKBOXCONTROL, Rect( 25, posy, 200, posy+20), 'RBmaster'+ str(i), STYLE_RADIO, masterNames[i])
posy = 10 + 48*3
self.d.AddControl(BUTTONCONTROL, Rect(310, posy, 350, posy+40), 'Yminus1', STYLE_BUTTON, '-'+ str(self.k_SMALL_SHIFT))
self.d.AddControl(STATICCONTROL, Rect(528, posy, 650, posy+20), 'stat_label', STYLE_LABEL+cTO_CENTER, 'Original position')
self.d.AddControl(STATICCONTROL, Rect(530, posy+20, 543, posy+40), 'stat_label', STYLE_LABEL+cTO_CENTER, 'x:')
self.d.AddControl(STATICCONTROL, Rect(543, posy+20, 580, posy+40), 'Xorig', STYLE_LABEL+cTO_CENTER)
self.d.AddControl(STATICCONTROL, Rect(590, posy+20, 603, posy+40), 'stat_label', STYLE_LABEL+cTO_CENTER, 'y:')
self.d.AddControl(STATICCONTROL, Rect(603, posy+20, 640, posy+40), 'Yorig', STYLE_LABEL+cTO_CENTER)
posy = 10 + 48*4
self.d.AddControl(BUTTONCONTROL, Rect(310, posy, 350, posy+40), 'Yminus5', STYLE_BUTTON, '-'+ str(self.k_MEDIUM_SHIFT))
self.d.AddControl(STATICCONTROL, Rect(528, posy, 650, posy+20), 'stat_label', STYLE_LABEL+cTO_CENTER, 'Final position')
self.d.AddControl(STATICCONTROL, Rect(530, posy+20, 543, posy+40), 'stat_label', STYLE_LABEL+cTO_CENTER, 'x:')
self.d.AddControl(STATICCONTROL, Rect(543, posy+20, 580, posy+40), 'Xfinal', STYLE_LABEL+cTO_CENTER)
self.d.AddControl(STATICCONTROL, Rect(590, posy+20, 603, posy+40), 'stat_label', STYLE_LABEL+cTO_CENTER, 'y:')
self.d.AddControl(STATICCONTROL, Rect(603, posy+20, 640, posy+40), 'Yfinal', STYLE_LABEL+cTO_CENTER)
#====== DIALOG FUNCTIONS =========
def on_Xminus20(self, code):
if self.anchorList_selected:
self.Xshift -= self.k_BIG_SHIFT
self.d.PutValue('Xshift')
self.updateXfinal()
self.update_glyph()
def on_Xminus5(self, code):
if self.anchorList_selected:
self.Xshift -= self.k_MEDIUM_SHIFT
self.d.PutValue('Xshift')
self.updateXfinal()
self.update_glyph()
def on_Xminus1(self, code):
if self.anchorList_selected:
self.Xshift -= self.k_SMALL_SHIFT
self.d.PutValue('Xshift')
self.updateXfinal()
self.update_glyph()
def on_Xplus1(self, code):
if self.anchorList_selected:
self.Xshift += self.k_SMALL_SHIFT
self.d.PutValue('Xshift')
self.updateXfinal()
self.update_glyph()
def on_Xplus5(self, code):
if self.anchorList_selected:
self.Xshift += self.k_MEDIUM_SHIFT
self.d.PutValue('Xshift')
self.updateXfinal()
self.update_glyph()
def on_Xplus20(self, code):
if self.anchorList_selected:
self.Xshift += self.k_BIG_SHIFT
self.d.PutValue('Xshift')
self.updateXfinal()
self.update_glyph()
def on_Yminus5(self, code):
if self.anchorList_selected:
self.Yshift -= self.k_MEDIUM_SHIFT
self.d.PutValue('Yshift')
self.updateYfinal()
self.update_glyph()
def on_Yminus1(self, code):
if self.anchorList_selected:
self.Yshift -= self.k_SMALL_SHIFT
self.d.PutValue('Yshift')
self.updateYfinal()
self.update_glyph()
def on_Yplus1(self, code):
if self.anchorList_selected:
self.Yshift += self.k_SMALL_SHIFT
self.d.PutValue('Yshift')
self.updateYfinal()
self.update_glyph()
def on_Yplus5(self, code):
if self.anchorList_selected:
self.Yshift += self.k_MEDIUM_SHIFT
self.d.PutValue('Yshift')
self.updateYfinal()
self.update_glyph()
def on_glyphList(self, code):
self.glyphList_selected = 1
self.gHasAnchors = 1
self.d.GetValue('glyphList')
self.gName = self.glyphList[self.glyphList_index] # Name of the glyph selected on the glyph list
self.gIndex = f.FindGlyph(self.gName)
fl.iglyph = self.gIndex # Switch the glyph on the Glyph Window
self.glyph = f[self.gIndex]
self.updateAnchorsList()
self.resetDialogValues()
def on_anchorList(self, code):
self.anchorList_selected = 1
self.d.GetValue('anchorList')
self.updateDialogValues()
def on_RBmaster0(self, code): self.updateRBmaster(0)
def on_RBmaster1(self, code): self.updateRBmaster(1)
def on_RBmaster2(self, code): self.updateRBmaster(2)
def on_RBmaster3(self, code): self.updateRBmaster(3)
def on_RBmaster4(self, code): self.updateRBmaster(4)
def on_RBmaster5(self, code): self.updateRBmaster(5)
def on_RBmaster6(self, code): self.updateRBmaster(6)
def on_RBmaster7(self, code): self.updateRBmaster(7)
def on_ok(self, code):
return 1
#====== RESET FUNCTIONS =========
def resetDialogValues(self):
self.resetXorig()
self.resetYorig()
self.resetXshift()
self.resetYshift()
self.resetXfinal()
self.resetYfinal()
def resetXorig(self):
self.Xorig = 0
self.d.PutValue('Xorig')
def resetYorig(self):
self.Yorig = 0
self.d.PutValue('Yorig')
def resetXshift(self):
self.Xshift = 0
self.d.PutValue('Xshift')
def resetYshift(self):
self.Yshift = 0
self.d.PutValue('Yshift')
def resetXfinal(self):
self.Xfinal = 0
self.d.PutValue('Xfinal')
def resetYfinal(self):
self.Yfinal = 0
self.d.PutValue('Yfinal')
#====== UPDATE FUNCTIONS =========
def updateRBmaster(self, newIndex):
self.RBmasterIndex = newIndex
if self.RBmasterIndex == 0: self.RBmaster0 = 1
else: self.RBmaster0 = 0
if self.RBmasterIndex == 1: self.RBmaster1 = 1
else: self.RBmaster1 = 0
if self.RBmasterIndex == 2: self.RBmaster2 = 1
else: self.RBmaster2 = 0
if self.RBmasterIndex == 3: self.RBmaster3 = 1
else: self.RBmaster3 = 0
if self.RBmasterIndex == 4: self.RBmaster4 = 1
else: self.RBmaster4 = 0
if self.RBmasterIndex == 5: self.RBmaster5 = 1
else: self.RBmaster5 = 0
if self.RBmasterIndex == 6: self.RBmaster6 = 1
else: self.RBmaster6 = 0
if self.RBmasterIndex == 7: self.RBmaster7 = 1
else: self.RBmaster7 = 0
for v in ['RBmaster0','RBmaster1','RBmaster2','RBmaster3','RBmaster4','RBmaster5','RBmaster6','RBmaster7']:
self.d.PutValue(v)
fl.layer = self.RBmasterIndex
if self.gHasAnchors and self.anchorList_selected:
self.updateDialogValues()
def updateAnchorsList(self):
self.anchorList = []
for anchor in self.glyph.anchors:
self.anchorList.append(anchor.name)
self.d.PutValue('anchorList')
self.anchorList_selected = 0
self.selectedAnchor = None
def updateDialogValues(self):
self.selectedAnchor = self.glyph.anchors[self.anchorList_index].Layer(fl.layer)
self.updateXorig(self.selectedAnchor.x)
self.updateYorig(self.selectedAnchor.y)
self.resetXshift()
self.resetYshift()
self.updateXfinal()
self.updateYfinal()
def updateXorig(self, pos):
self.Xorig = pos
self.d.PutValue('Xorig')
def updateYorig(self, pos):
self.Yorig = pos
self.d.PutValue('Yorig')
def updateXfinal(self):
if self.anchorList_selected:
self.Xfinal = self.Xorig + self.Xshift
self.d.PutValue('Xfinal')
def updateYfinal(self):
if self.anchorList_selected:
self.Yfinal = self.Yorig + self.Yshift
self.d.PutValue('Yfinal')
def update_glyph(self):
if self.anchorList_selected:
if self.gIndex not in glyphBkupDict:
# print "Made backup copy of '%s'" % self.glyph.name
glyphBkupDict[self.gIndex] = Glyph(f[self.gIndex])
fl.SetUndo(self.gIndex)
x = self.Xfinal
y = self.Yfinal
anchorPosition = Point(x, y)
anchorIndex = self.anchorList_index
anchor = self.glyph.anchors[anchorIndex]
# In single master fonts the adjustment of the anchors cannot be handled by the codepath used for multiple
# master fonts, because the UI gets updated but the changes are not stored in the VFB file upon saving.
if masters == 1:
anchor.x = x
anchor.y = y
else:
anchor.SetLayer(fl.layer, anchorPosition)
fl.UpdateGlyph(self.gIndex)
def Run(self):
return self.d.Run()
d = DialogClass()
if d.Run() == 1:
f.modified = 1
else:
for gID in glyphBkupDict:
f[gID] = glyphBkupDict[gID]
fl.UpdateGlyph(gID)
f.modified = 0
if __name__ == "__main__":
f = fl.font
gIndex = fl.iglyph
if f is None:
fl.Message('No font opened')
elif gIndex < 0:
if len(listGlyphsSelected) == 0:
fl.Message('Glyph selection is not valid')
else:
gIndex = listGlyphsSelected[0]
run(gIndex)
else:
run(gIndex)
|
py | 1a3e9dfe60ad2e553f3ea373cd010391d389c8f5 | from django.contrib import admin
# Register your models here.
from .models import Question, Choice
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
list_display = ('question_text', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['question_text']
admin.site.register(Question, QuestionAdmin)
|
py | 1a3e9e5a1c00a5c4ed887091b59abdd8620cf10b | from manga_py.provider import Provider
from .helpers.std import Std
class HocVienTruyenTranhCom(Provider, Std):
def get_chapter_index(self) -> str:
idx = self.re.search(r'/chapter/(\d+)', self.chapter)
return '{}-{}'.format(self.chapter_id, idx.group(1))
def _test_main_url(self, url):
if self.re.search('/chapter/', url):
url = self.html_fromstring(url, '#subNavi a', 0).get('href')
return url
def get_content(self):
url = self._test_main_url(self.get_url())
return self.http_get(self.http().normalize_uri(url))
def get_manga_name(self) -> str:
url = self._test_main_url(self.get_url())
return self.re.search('/manga/[^/]+/([^/]+)', url).group(1)
def get_chapters(self):
return self._elements('.table-scroll table.table td > a')
def get_files(self):
selector = '.manga-container img.page'
items = self.html_fromstring(self.chapter, selector)
return [i.get('src') for i in items]
def get_cover(self):
return self._cover_from_content('.__info-container .__image img')
def book_meta(self) -> dict:
# todo meta
pass
main = HocVienTruyenTranhCom
|
py | 1a3e9f59b747f3b80495a42f08fc656bee4bdab3 | # Escreva um programa que converta uma temperatura digitada em
# °C em °F. A fórmula para essa conversão está no livro
ceusius = float(input('digite a temperatura em graus ceusius '))
farenheight = 9*ceusius/5+32
print('a temperatura em farenheight é %2.2f'% farenheight) |
py | 1a3ea156382444492343d72618f5ba84584e81ed | import numpy as np
import torch
def to_tensor(blob):
if isinstance(blob, np.ndarray):
return torch.from_numpy(blob)
if isinstance(blob, int) or isinstance(blob, float):
return torch.Tensor(blob)
if isinstance(blob, dict):
ts = {}
for k, v in blob.items():
ts[k] = to_tensor(v)
return ts
if isinstance(blob, list):
ts = list([to_tensor(e) for e in blob])
return ts
if isinstance(blob, tuple):
# namedtuple
if hasattr(blob, '_fields'):
ts = {k: to_tensor(getattr(blob, k)) for k in blob._fields}
ts = type(blob)(**ts)
else:
ts = tuple([to_tensor(e) for e in blob])
return ts
def to_device(blob, device, *args, **kwargs):
if hasattr(blob, 'to'):
return blob.to(device, *args, **kwargs)
if isinstance(blob, torch.Tensor):
return blob.to(device, *args, **kwargs)
if isinstance(blob, dict):
ts = {}
for k, v in blob.items():
ts[k] = to_device(v, device)
return ts
if isinstance(blob, list):
ts = list([to_device(e, device) for e in blob])
return ts
if isinstance(blob, tuple):
# namedtuple
if hasattr(blob, '_fields'):
ts = {k: to_device(getattr(blob, k), device) for k in blob._fields}
ts = type(blob)(**ts)
else:
ts = tuple([to_device(e, device) for e in blob])
return ts
return blob
# raise ValueError('type of {} is not support for to_device'.format(type(blob)))
|
py | 1a3ea1e3e69ad7674a93023c635759167bc6e0a6 | """
odm2rest
--------
A Python RESTful web service inteface for accessing data in an
ODM2 database via Django rest swagger APIs.
"""
from __future__ import (absolute_import, division, print_function)
import os
from setuptools import find_packages, setup
import versioneer
here = os.path.abspath(os.path.dirname(__file__))
# Dependencies.
with open('requirements.txt') as f:
requirements = f.readlines()
install_requires = [t.strip() for t in requirements]
with open(os.path.join(here, 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='odm2rest',
version=versioneer.get_version(),
packages=find_packages(),
include_package_data=True,
license='BSD License',
description='A Python RESTful web service inteface for accessing data in an '
'ODM2 database via Django rest swagger APIs',
long_description=README,
url='https://github.com/ODM2/ODM2RESTfulWebServices',
author='Landung Setiawan',
author_email='[email protected]',
install_requires=install_requires,
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.11',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
py | 1a3ea1f9f715fbbd398462590932a2089376c243 | # Generated by Django 3.0.8 on 2020-07-29 15:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('content', '0004_auto_20200703_1702'),
]
operations = [
migrations.AlterField(
model_name='post',
name='alt_google_photo_url',
field=models.URLField(blank=True, max_length=400, null=True, verbose_name='Alternate Google Photo URL'),
),
]
|
py | 1a3ea2637ad63f0afac94c2e4a42e0787be1ca79 | from uuid import UUID
from datetime import time
from enum import Enum
from typing import Dict, List, Optional
from pydantic import BaseModel, AnyHttpUrl, validator, root_validator
class ContactTypeEnum(Enum):
JIRA = "jira"
MAIL = "mail"
PUSHOVER = "pushover"
SEND_SMS = "send-sms"
SLACK = "slack"
TELEGRAM = "telegram"
TWILIO_SMS = "twilio sms"
TWILIO_VOICE = "twilio voice"
class DaysEnum(Enum):
MON = "Mon"
TUE = "Tue"
WED = "Wed"
THU = "Thu"
FRI = "Fri"
SAT = "Sat"
SUN = "Sun"
class TtlStateEnum(Enum):
DEL = "DEL"
ERROR = "ERROR"
NODATA = "NODATA"
OK = "OK"
WARN = "WARN"
class ParentTriggerRef(BaseModel):
tags: List[str]
name: str
def __hash__(self):
return hash((
frozenset(tags),
name,
))
def __eq__(self, other):
return (
set(self.tags) == set(other.tags)
and self.name == other.name
)
class Saturation(BaseModel):
type: str
fallback: Optional[str] = None
parameters: Optional[dict] = None
def to_custom_dict(self) -> Dict:
result = {
"type": self.type,
}
if self.fallback is not None:
result["fallback"] = self.fallback
if self.parameters is not None:
result["extra_parameters"] = self.parameters
return result
@classmethod
def from_moira_client_model(cls, moira_saturation: "moira_client.models.trigger.Saturation"):
d = moira_saturation.to_dict()
d["parameters"] = d.pop("extra_parameters", None)
return cls(**d)
def __hash__(self):
dct = self.to_custom_dict()
return hash(_freeze_dict(dct))
def __eq__(self, other):
if isinstance(other, Saturation):
return self.to_custom_dict() == other.to_custom_dict()
else:
raise ValueError("Incomparable types")
def _freeze_dict(dct):
"""Tries to freeze a dict to make it hashable."""
result = []
for key, value in dct.items():
if isinstance(value, dict):
value = _freeze_dict(value)
result.append((key, value))
result.sort()
return tuple(result)
class Trigger(BaseModel):
id: Optional[str] = None
name: str
tags: List[str]
targets: List[str]
warn_value: Optional[int] = None
error_value: Optional[int] = None
desc: str = ""
ttl: int = 600
ttl_state: TtlStateEnum = TtlStateEnum.NODATA
expression: Optional[str] = ""
is_pull_type: bool = False
dashboard: Optional[AnyHttpUrl] = None
pending_interval: Optional[int] = 0
day_disable: List[DaysEnum] = []
time_start: Optional[time] = time(hour=0, minute=0)
time_end: Optional[time] = time(hour=23, minute=59)
parents: Optional[List[str]]
saturation: Optional[List[Saturation]] = list()
@validator("id")
def id_uuid(cls, v):
try:
UUID(v)
except ValueError:
raise
return v
@root_validator
def check_thresholds_values(cls, values):
warn_value, error_value = (
values.get('warn_value') is not None,
values.get('error_value') is not None,
)
if warn_value ^ error_value:
raise ValueError('must provide warn_value and error_value')
if (
warn_value & error_value
and len(values.get('targets')) > 1
and values.get('expression') is None
):
raise ValueError('must use single target with warn_value and error_value')
return values
def to_custom_dict(self) -> Dict:
return {
'name': self.name,
'tags': self.tags,
'targets': self.targets,
'warn_value': self.warn_value,
'error_value': self.error_value,
'desc': self.desc,
'ttl': self.ttl,
'ttl_state': self.ttl_state.value,
'expression': self.expression,
'is_pull_type': self.is_pull_type,
'dashboard': self.dashboard,
'pending_interval': self.pending_interval,
'sched': {
'startOffset': self.time_start.hour * 60 + self.time_start.minute,
'endOffset': self.time_end.hour * 60 + self.time_end.minute,
'tzOffset': 0,
'days': [
{'name': day.value, 'enabled': day not in self.day_disable}
for day in DaysEnum
],
},
'parents': self.parents,
'saturation': [
s.to_custom_dict()
for s in self.saturation
],
}
class TriggerFile(Trigger):
parents: Optional[List[ParentTriggerRef]]
class Contact(BaseModel):
id: Optional[str] = None
type: ContactTypeEnum
value: str
fallback_value: Optional[str] = None
def __hash__(self):
return f"{self.type}:{self.value}:{self.fallback_value}".__hash__()
class Escalation(BaseModel):
contacts: List[Contact]
offset_in_minutes: int = 0
class Subscription(BaseModel):
tags: List[str]
contacts: Optional[List[Contact]] = []
escalations: Optional[List[Escalation]] = []
day_disable: List[DaysEnum] = []
time_start: Optional[time] = time(hour=0, minute=0)
time_end: Optional[time] = time(hour=23, minute=59)
def to_custom_dict(self) -> Dict:
return {
'tags': self.tags,
'contacts': [c.id for c in self.contacts],
'escalations': [
{
'contacts': [c.id for c in e.contacts],
'offset_in_minutes': e.offset_in_minutes,
}
for e in self.escalations
],
'sched': {
'startOffset': self.time_start.hour * 60 + self.time_start.minute,
'endOffset': self.time_end.hour * 60 + self.time_end.minute,
'tzOffset': 0,
'days': [
{'name': day.value, 'enabled': day not in self.day_disable}
for day in DaysEnum
],
},
}
class Alerts(BaseModel):
version: float = 1
prefix: str = ""
triggers: List[TriggerFile] = []
alerting: List[Subscription] = []
|
py | 1a3ea2b685bea20d18142a51b15c68c068a3f52f | """Window Covering devices."""
from ..extended_property import (
DURATION_HIGH,
DURATION_LOW,
ON_LEVEL,
RAMP_RATE,
X10_HOUSE,
X10_UNIT,
)
from ..groups import COVER
from ..operating_flag import (
DUAL_LINE_ON,
FORWARD_ON,
KEY_BEEP_ON,
LED_BLINK_ON_ERROR_OFF,
LED_BLINK_ON_TX_ON,
LED_ON,
MOMENTARY_LINE_ON,
NOT_3_WAY,
PROGRAM_LOCK_ON,
)
from .open_close_responder_base import OpenCloseResponderBase
class WindowCovering(OpenCloseResponderBase):
"""Window Covering device."""
def __init__(self, address, cat, subcat, firmware=0x00, description="", model=""):
"""Init the WindowCovering class."""
super().__init__(
address, cat, subcat, firmware, description, model, state_name=COVER
)
def _register_operating_flags(self):
"""Register the operating and properties."""
super()._register_operating_flags()
self._add_operating_flag(PROGRAM_LOCK_ON, 0, 0, 0, 1)
self._add_operating_flag(LED_BLINK_ON_TX_ON, 0, 1, 2, 3)
self._add_operating_flag(LED_ON, 0, 4, 0x0A, 0x0B, is_reversed=True)
self._add_operating_flag(KEY_BEEP_ON, 0, 5, 0x0C, 0x0D)
self._add_operating_flag(LED_BLINK_ON_ERROR_OFF, 2, 3, 0x15, 0x16)
self._add_operating_flag(DUAL_LINE_ON, 3, 0, 0x1E, 0x1F)
self._add_operating_flag(MOMENTARY_LINE_ON, 3, 1, 0x20, 0x21)
self._add_operating_flag(NOT_3_WAY, 3, 3, 0x22, 0x23)
self._add_operating_flag(FORWARD_ON, 3, 4, 0x24, 0x25)
self._add_property(X10_HOUSE, 5, None) # 4
self._add_property(X10_UNIT, 6, None) # 4
self._add_property(RAMP_RATE, 7, 5)
# Need to verify use_data position
self._add_property(ON_LEVEL, 8, 6)
self._add_property(DURATION_HIGH, 9, None) # 0x10
self._add_property(DURATION_LOW, 10, None) # 0x10
|
bzl | 1a3ea38b4ecc9a268314243ffde26734015fc49a | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Repository rules/macros for rules_proto.
"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def rules_proto_repo():
if "rules_proto" not in native.existing_rules():
http_archive(
name = "rules_proto",
sha256 = "e0cab008a9cdc2400a1d6572167bf9c5afc72e19ee2b862d18581051efab42c9",
strip_prefix = "rules_proto-c0b62f2f46c85c16cb3b5e9e921f0d00e3101934",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_proto/archive/c0b62f2f46c85c16cb3b5e9e921f0d00e3101934.tar.gz",
"https://github.com/bazelbuild/rules_proto/archive/c0b62f2f46c85c16cb3b5e9e921f0d00e3101934.tar.gz",
],
)
|
py | 1a3ea510fa58e3d698a33f5c7c720b70ecace1c5 | import random
from unittest.mock import ANY
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from password_generator.input_sanitise import sanitise
from .test_password_generator_data import big_input
from .test_password_generator_data import big_output
class PasswordGeneratorTest(TestCase):
def setUp(self):
self.maxDiff = None
self.client = APIClient()
def test_post_words(self):
first_phrase = 'The quick brown fox jumps over lazy dog'
response = self.client.post(
'/words/bulk_add_words/',
data={
'wordset': 'english',
'text': first_phrase
}
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
response.json(),
{
'added': {sanitise(word): 1 for word in first_phrase.split()}
}
)
response = self.client.get('/words/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.json(),
{
'count': first_phrase.count(' ') + 1,
'next': None,
'previous': None,
'results': [
{
'count': 1, 'id': ANY, 'word': sanitise(word),
'wordset': 'english', 'word_length': len(sanitise(word))
}
for word in first_phrase.split()
],
},
)
second_phrase = 'the slow red dog jumps over the energetic cow'
response = self.client.post(
'/words/bulk_add_words/',
data={
'wordset': 'english',
'text': second_phrase
}
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
response.json(),
{
'added': {sanitise(word): second_phrase.count(word) for word in second_phrase.split()}
}
)
# Change these if you change the phrases above
response = self.client.get('/words/?word=the')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.json()['results'][0],
{'id': ANY, 'wordset': 'english', 'word': 'the', 'count': 3, 'word_length': 3}
)
response = self.client.get('/words/?word=brown')
self.assertEqual(
response.json()['results'][0],
{'id': ANY, 'wordset': 'english', 'word': 'brown', 'count': 1, 'word_length': 5}
)
response = self.client.get('/words/?word=fantastic')
self.assertFalse(response.json()['results'])
def test_generate_password(self):
words = 'word word another word this word can be used in a password exceptionally long word as well'
response = self.client.post(
'/words/bulk_add_words/',
data={
'wordset': 'english',
'text': words
}
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
random.seed(1)
response = self.client.get('/languages/english/generate_password/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.json(),
{'passphrase': 'another password well well', 'permutations_chosen_from': 2401},
)
# Only allow short words
response = self.client.get(
'/languages/english/generate_password/?min_word_length=2&max_word_length=4&length=3'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.json(),
{'passphrase': 'this used this', 'permutations_chosen_from': 729},
)
# Only allow long words
response = self.client.get(
'/languages/english/generate_password/?min_word_length=8&max_word_length=15&length=2'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.json(),
{'passphrase': 'exceptionally exceptionally', 'permutations_chosen_from': 4},
)
# Only allow very common words
response = self.client.get(
'/languages/english/generate_password/?words_allowed=2'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.json(),
{'passphrase': 'another another word word', 'permutations_chosen_from': 16},
)
def test_big_dirty_input(self):
response = self.client.post(
'/words/bulk_add_words/',
data={
'wordset': 'english',
'text': big_input
}
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.json(), {'added': big_output})
|
py | 1a3ea54cbfa2cdf035d6249485ed4f1b2af36b77 | """
File: diatonic_tone_cache.py
Purpose: To provide a cachse for DiatonicTone instances.
"""
from tonalmodel.diatonic_tone import DiatonicTone
class DiatonicToneCache(object):
""""
Cache for all possible DiatonicTone's. This cache is very small, but also provides an object level identity
to each tone in the system, if used.
The cache is implemented as a singleton. The constructor is meant to be 'private', and not called externally.
All access should be through either get_cache() or get_tone().
"""
DIATONIC_CACHE = None
def __init__(self):
"""
Constructor.
Args: None
"""
# map tone name to tone.
self.diatonic_map = {}
self.__build_diatonics()
@staticmethod
def get_cache():
if DiatonicToneCache.DIATONIC_CACHE is None:
DiatonicToneCache.DIATONIC_CACHE = DiatonicToneCache()
return DiatonicToneCache.DIATONIC_CACHE
@staticmethod
def get_tone(tone_text):
cache = DiatonicToneCache.get_cache()
return cache.get_cache_tone(tone_text)
@staticmethod
def get_tones():
cache = DiatonicToneCache.get_cache()
tones = []
for ltr in DiatonicTone.DIATONIC_LETTERS:
for aug in DiatonicTone.AUGMENTATIONS:
tones.append(cache.get_cache_tone(ltr + aug))
return tones
def get_cache_tone(self, tone_text):
return self.diatonic_map[tone_text.lower()]
def __build_diatonics(self):
"""
Builds all diatonic tones for the cache.
"""
for ltr in DiatonicTone.DIATONIC_LETTERS:
for aug in DiatonicTone.AUGMENTATIONS:
self.diatonic_map[(ltr + aug).lower()] = DiatonicTone(ltr + aug) |
py | 1a3ea6fb1865db954b8e8b496e23dd0da1df4936 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import click
from calendar import month_abbr
from datetime import datetime, date, timedelta
from dateutil.relativedelta import relativedelta
FILLED = u'\u25CF'
EMPTY = u'\u25CB'
row_label_formats = {
'year': '{year:<{max_year_width}}',
'age': 'Age {age:<{max_age_width}}'
}
class Date(click.ParamType):
name = 'date'
def __init__(self, format="%d-%m-%Y"):
self.format = format
def convert(self, value, param, ctx):
try:
return datetime.strptime(value, self.format).date()
except ValueError:
self.fail('%s is not a valid date' % value, param, ctx)
def header(fill=' ', default_width=9, widths={'Feb': 8}):
return ''.join('{month:{fill}<{width}}'
.format(month=abbr, fill=fill,
width=widths.get(abbr, default_width))
for abbr in month_abbr[1:])
# Week of the year
yweek = lambda d: timedelta(days=d.timetuple().tm_yday) // timedelta(weeks=1)
@click.command()
@click.option('--birth-date',
'-d',
type=Date(),
help='Date of birth (dd-mm-YYYY)',
prompt='Date of birth (dd-mm-YYYY)')
@click.option('--life-expectancy',
'-l',
'expected_years',
type=int,
default=85,
help='Number of years you expect to live')
@click.option('--row-label',
'-r',
type=click.Choice(['year', 'age']),
default='year',
help='Label for rows')
@click.option('--row-label-period',
type=int,
default=5,
help='Show label after every duration')
@click.option('--highlight-date',
'-h',
multiple=True,
type=Date(),
help='Dates to highlight')
def main(birth_date, expected_years, row_label, row_label_period, highlight_date):
expected_death_date = birth_date + relativedelta(years=expected_years)
expected_death_year = expected_death_date.year
birth_year = birth_date.year
curr_date = date.today()
with click.progressbar(range(1, 53), label='{}/52 weeks of year'.format(yweek(curr_date))) as bar:
for i in bar:
if i == yweek(curr_date):
break
# ensures that the formatting won't break for those who are alive
# between 9999 and 10000 A.D. and still using this for some reason
max_year_width = len(str(expected_death_year)) + 1
max_age_width = len(str(expected_years)) + 1
fmt_dct = dict(age=expected_years,
year=expected_death_year,
max_year_width=max_year_width,
max_age_width=max_age_width)
row_label_len = len(row_label_formats[row_label].format(**fmt_dct))
# Normalize set of dates to highlight (using set for constant time lookup)
highlight_set = set(date(d.year, 1, 1) + timedelta(weeks=yweek(d))
for d in highlight_date)
for year in range(birth_year, expected_death_year + 1):
if year == birth_year: # Print header on first iteration in loop
click.echo(' ' * row_label_len, nl=False)
click.echo(header())
age = year - birth_year
if age % row_label_period:
click.echo(' ' * row_label_len, nl=False)
else:
fmt_dct = dict(age=age,
year=year,
max_year_width=max_year_width,
max_age_width=max_age_width)
click.echo(row_label_formats[row_label].format(**fmt_dct), nl=False)
date_iter = date(year, 1, 1)
while date_iter.year == year:
if birth_date < date_iter < curr_date:
if date_iter in highlight_set:
click.secho(FILLED, nl=False, fg='red')
else:
click.secho(FILLED, nl=False, fg='green')
else:
click.echo(EMPTY, nl=False)
click.echo(' ', nl=False)
date_iter += timedelta(weeks=1)
click.echo('')
if __name__ == '__main__':
main()
|
py | 1a3ea7019444a27b4a23e782c7c4f67df8d54dc0 | divider = """//IEEE Floating Point Divider (Single Precision)
//Copyright (C) Jonathan P Dawson 2013
//2013-12-12
//
module divider(
input_a,
input_b,
input_a_stb,
input_b_stb,
output_z_ack,
clk,
rst,
output_z,
output_z_stb,
input_a_ack,
input_b_ack);
input clk;
input rst;
input [31:0] input_a;
input input_a_stb;
output input_a_ack;
input [31:0] input_b;
input input_b_stb;
output input_b_ack;
output [31:0] output_z;
output output_z_stb;
input output_z_ack;
reg s_output_z_stb;
reg [31:0] s_output_z;
reg s_input_a_ack;
reg s_input_b_ack;
reg [3:0] state;
parameter get_a = 4'd0,
get_b = 4'd1,
unpack = 4'd2,
special_cases = 4'd3,
normalise_a = 4'd4,
normalise_b = 4'd5,
divide_0 = 4'd6,
divide_1 = 4'd7,
divide_2 = 4'd8,
divide_3 = 4'd9,
normalise_1 = 4'd10,
normalise_2 = 4'd11,
round = 4'd12,
pack = 4'd13,
put_z = 4'd14;
reg [31:0] a, b, z;
reg [23:0] a_m, b_m, z_m;
reg [9:0] a_e, b_e, z_e;
reg a_s, b_s, z_s;
reg guard, round_bit, sticky;
reg [50:0] quotient, divisor, dividend, remainder;
reg [5:0] count;
always @(posedge clk)
begin
case(state)
get_a:
begin
s_input_a_ack <= 1;
if (s_input_a_ack && input_a_stb) begin
a <= input_a;
s_input_a_ack <= 0;
state <= get_b;
end
end
get_b:
begin
s_input_b_ack <= 1;
if (s_input_b_ack && input_b_stb) begin
b <= input_b;
s_input_b_ack <= 0;
state <= unpack;
end
end
unpack:
begin
a_m <= a[22 : 0];
b_m <= b[22 : 0];
a_e <= a[30 : 23] - 127;
b_e <= b[30 : 23] - 127;
a_s <= a[31];
b_s <= b[31];
state <= special_cases;
end
special_cases:
begin
//if a is NaN or b is NaN return NaN
if ((a_e == 128 && a_m != 0) || (b_e == 128 && b_m != 0)) begin
z[31] <= 1;
z[30:23] <= 255;
z[22] <= 1;
z[21:0] <= 0;
state <= put_z;
//if a is inf and b is inf return NaN
end else if ((a_e == 128) && (b_e == 128)) begin
z[31] <= 1;
z[30:23] <= 255;
z[22] <= 1;
z[21:0] <= 0;
state <= put_z;
//if a is inf return inf
end else if (a_e == 128) begin
z[31] <= a_s ^ b_s;
z[30:23] <= 255;
z[22:0] <= 0;
state <= put_z;
//if b is zero return NaN
if ($signed(b_e == -127) && (b_m == 0)) begin
z[31] <= 1;
z[30:23] <= 255;
z[22] <= 1;
z[21:0] <= 0;
state <= put_z;
end
//if b is inf return zero
end else if (b_e == 128) begin
z[31] <= a_s ^ b_s;
z[30:23] <= 0;
z[22:0] <= 0;
state <= put_z;
//if a is zero return zero
end else if (($signed(a_e) == -127) && (a_m == 0)) begin
z[31] <= a_s ^ b_s;
z[30:23] <= 0;
z[22:0] <= 0;
state <= put_z;
//if b is zero return NaN
if (($signed(b_e) == -127) && (b_m == 0)) begin
z[31] <= 1;
z[30:23] <= 255;
z[22] <= 1;
z[21:0] <= 0;
state <= put_z;
end
//if b is zero return inf
end else if (($signed(b_e) == -127) && (b_m == 0)) begin
z[31] <= a_s ^ b_s;
z[30:23] <= 255;
z[22:0] <= 0;
state <= put_z;
end else begin
//Denormalised Number
if ($signed(a_e) == -127) begin
a_e <= -126;
end else begin
a_m[23] <= 1;
end
//Denormalised Number
if ($signed(b_e) == -127) begin
b_e <= -126;
end else begin
b_m[23] <= 1;
end
state <= normalise_a;
end
end
normalise_a:
begin
if (a_m[23]) begin
state <= normalise_b;
end else begin
a_m <= a_m << 1;
a_e <= a_e - 1;
end
end
normalise_b:
begin
if (b_m[23]) begin
state <= divide_0;
end else begin
b_m <= b_m << 1;
b_e <= b_e - 1;
end
end
divide_0:
begin
z_s <= a_s ^ b_s;
z_e <= a_e - b_e;
quotient <= 0;
remainder <= 0;
count <= 0;
dividend <= a_m << 27;
divisor <= b_m;
state <= divide_1;
end
divide_1:
begin
quotient <= quotient << 1;
remainder <= remainder << 1;
remainder[0] <= dividend[50];
dividend <= dividend << 1;
state <= divide_2;
end
divide_2:
begin
if (remainder >= divisor) begin
quotient[0] <= 1;
remainder <= remainder - divisor;
end
if (count == 49) begin
state <= divide_3;
end else begin
count <= count + 1;
state <= divide_1;
end
end
divide_3:
begin
z_m <= quotient[26:3];
guard <= quotient[2];
round_bit <= quotient[1];
sticky <= quotient[0] | (remainder != 0);
state <= normalise_1;
end
normalise_1:
begin
if (z_m[23] == 0 && $signed(z_e) > -126) begin
z_e <= z_e - 1;
z_m <= z_m << 1;
z_m[0] <= guard;
guard <= round_bit;
round_bit <= 0;
end else begin
state <= normalise_2;
end
end
normalise_2:
begin
if ($signed(z_e) < -126) begin
z_e <= z_e + 1;
z_m <= z_m >> 1;
guard <= z_m[0];
round_bit <= guard;
sticky <= sticky | round_bit;
end else begin
state <= round;
end
end
round:
begin
if (guard && (round_bit | sticky | z_m[0])) begin
z_m <= z_m + 1;
if (z_m == 24'hffffff) begin
z_e <=z_e + 1;
end
end
state <= pack;
end
pack:
begin
z[22 : 0] <= z_m[22:0];
z[30 : 23] <= z_e[7:0] + 127;
z[31] <= z_s;
if ($signed(z_e) == -126 && z_m[23] == 0) begin
z[30 : 23] <= 0;
end
//if overflow occurs, return inf
if ($signed(z_e) > 127) begin
z[22 : 0] <= 0;
z[30 : 23] <= 255;
z[31] <= z_s;
end
state <= put_z;
end
put_z:
begin
s_output_z_stb <= 1;
s_output_z <= z;
if (s_output_z_stb && output_z_ack) begin
s_output_z_stb <= 0;
state <= get_a;
end
end
endcase
if (rst == 1) begin
state <= get_a;
s_input_a_ack <= 0;
s_input_b_ack <= 0;
s_output_z_stb <= 0;
end
end
assign input_a_ack = s_input_a_ack;
assign input_b_ack = s_input_b_ack;
assign output_z_stb = s_output_z_stb;
assign output_z = s_output_z;
endmodule
"""
multiplier = """//IEEE Floating Point Multiplier (Single Precision)
//Copyright (C) Jonathan P Dawson 2013
//2013-12-12
module multiplier(
input_a,
input_b,
input_a_stb,
input_b_stb,
output_z_ack,
clk,
rst,
output_z,
output_z_stb,
input_a_ack,
input_b_ack);
input clk;
input rst;
input [31:0] input_a;
input input_a_stb;
output input_a_ack;
input [31:0] input_b;
input input_b_stb;
output input_b_ack;
output [31:0] output_z;
output output_z_stb;
input output_z_ack;
reg s_output_z_stb;
reg [31:0] s_output_z;
reg s_input_a_ack;
reg s_input_b_ack;
reg [3:0] state;
parameter get_a = 4'd0,
get_b = 4'd1,
unpack = 4'd2,
special_cases = 4'd3,
normalise_a = 4'd4,
normalise_b = 4'd5,
multiply_0 = 4'd6,
multiply_1 = 4'd7,
normalise_1 = 4'd8,
normalise_2 = 4'd9,
round = 4'd10,
pack = 4'd11,
put_z = 4'd12;
reg [31:0] a, b, z;
reg [23:0] a_m, b_m, z_m;
reg [9:0] a_e, b_e, z_e;
reg a_s, b_s, z_s;
reg guard, round_bit, sticky;
reg [49:0] product;
always @(posedge clk)
begin
case(state)
get_a:
begin
s_input_a_ack <= 1;
if (s_input_a_ack && input_a_stb) begin
a <= input_a;
s_input_a_ack <= 0;
state <= get_b;
end
end
get_b:
begin
s_input_b_ack <= 1;
if (s_input_b_ack && input_b_stb) begin
b <= input_b;
s_input_b_ack <= 0;
state <= unpack;
end
end
unpack:
begin
a_m <= a[22 : 0];
b_m <= b[22 : 0];
a_e <= a[30 : 23] - 127;
b_e <= b[30 : 23] - 127;
a_s <= a[31];
b_s <= b[31];
state <= special_cases;
end
special_cases:
begin
//if a is NaN or b is NaN return NaN
if ((a_e == 128 && a_m != 0) || (b_e == 128 && b_m != 0)) begin
z[31] <= 1;
z[30:23] <= 255;
z[22] <= 1;
z[21:0] <= 0;
state <= put_z;
//if a is inf return inf
end else if (a_e == 128) begin
z[31] <= a_s ^ b_s;
z[30:23] <= 255;
z[22:0] <= 0;
//if b is zero return NaN
if (($signed(b_e) == -127) && (b_m == 0)) begin
z[31] <= 1;
z[30:23] <= 255;
z[22] <= 1;
z[21:0] <= 0;
end
state <= put_z;
//if b is inf return inf
end else if (b_e == 128) begin
z[31] <= a_s ^ b_s;
z[30:23] <= 255;
z[22:0] <= 0;
//if a is zero return NaN
if (($signed(a_e) == -127) && (a_m == 0)) begin
z[31] <= 1;
z[30:23] <= 255;
z[22] <= 1;
z[21:0] <= 0;
end
state <= put_z;
//if a is zero return zero
end else if (($signed(a_e) == -127) && (a_m == 0)) begin
z[31] <= a_s ^ b_s;
z[30:23] <= 0;
z[22:0] <= 0;
state <= put_z;
//if b is zero return zero
end else if (($signed(b_e) == -127) && (b_m == 0)) begin
z[31] <= a_s ^ b_s;
z[30:23] <= 0;
z[22:0] <= 0;
state <= put_z;
end else begin
//Denormalised Number
if ($signed(a_e) == -127) begin
a_e <= -126;
end else begin
a_m[23] <= 1;
end
//Denormalised Number
if ($signed(b_e) == -127) begin
b_e <= -126;
end else begin
b_m[23] <= 1;
end
state <= normalise_a;
end
end
normalise_a:
begin
if (a_m[23]) begin
state <= normalise_b;
end else begin
a_m <= a_m << 1;
a_e <= a_e - 1;
end
end
normalise_b:
begin
if (b_m[23]) begin
state <= multiply_0;
end else begin
b_m <= b_m << 1;
b_e <= b_e - 1;
end
end
multiply_0:
begin
z_s <= a_s ^ b_s;
z_e <= a_e + b_e + 1;
product <= a_m * b_m * 4;
state <= multiply_1;
end
multiply_1:
begin
z_m <= product[49:26];
guard <= product[25];
round_bit <= product[24];
sticky <= (product[23:0] != 0);
state <= normalise_1;
end
normalise_1:
begin
if (z_m[23] == 0) begin
z_e <= z_e - 1;
z_m <= z_m << 1;
z_m[0] <= guard;
guard <= round_bit;
round_bit <= 0;
end else begin
state <= normalise_2;
end
end
normalise_2:
begin
if ($signed(z_e) < -126) begin
z_e <= z_e + 1;
z_m <= z_m >> 1;
guard <= z_m[0];
round_bit <= guard;
sticky <= sticky | round_bit;
end else begin
state <= round;
end
end
round:
begin
if (guard && (round_bit | sticky | z_m[0])) begin
z_m <= z_m + 1;
if (z_m == 24'hffffff) begin
z_e <=z_e + 1;
end
end
state <= pack;
end
pack:
begin
z[22 : 0] <= z_m[22:0];
z[30 : 23] <= z_e[7:0] + 127;
z[31] <= z_s;
if ($signed(z_e) == -126 && z_m[23] == 0) begin
z[30 : 23] <= 0;
end
//if overflow occurs, return inf
if ($signed(z_e) > 127) begin
z[22 : 0] <= 0;
z[30 : 23] <= 255;
z[31] <= z_s;
end
state <= put_z;
end
put_z:
begin
s_output_z_stb <= 1;
s_output_z <= z;
if (s_output_z_stb && output_z_ack) begin
s_output_z_stb <= 0;
state <= get_a;
end
end
endcase
if (rst == 1) begin
state <= get_a;
s_input_a_ack <= 0;
s_input_b_ack <= 0;
s_output_z_stb <= 0;
end
end
assign input_a_ack = s_input_a_ack;
assign input_b_ack = s_input_b_ack;
assign output_z_stb = s_output_z_stb;
assign output_z = s_output_z;
endmodule
"""
adder = """//IEEE Floating Point Adder (Single Precision)
//Copyright (C) Jonathan P Dawson 2013
//2013-12-12
module adder(
input_a,
input_b,
input_a_stb,
input_b_stb,
output_z_ack,
clk,
rst,
output_z,
output_z_stb,
input_a_ack,
input_b_ack);
input clk;
input rst;
input [31:0] input_a;
input input_a_stb;
output input_a_ack;
input [31:0] input_b;
input input_b_stb;
output input_b_ack;
output [31:0] output_z;
output output_z_stb;
input output_z_ack;
reg s_output_z_stb;
reg [31:0] s_output_z;
reg s_input_a_ack;
reg s_input_b_ack;
reg [3:0] state;
parameter get_a = 4'd0,
get_b = 4'd1,
unpack = 4'd2,
special_cases = 4'd3,
align = 4'd4,
add_0 = 4'd5,
add_1 = 4'd6,
normalise_1 = 4'd7,
normalise_2 = 4'd8,
round = 4'd9,
pack = 4'd10,
put_z = 4'd11;
reg [31:0] a, b, z;
reg [26:0] a_m, b_m;
reg [23:0] z_m;
reg [9:0] a_e, b_e, z_e;
reg a_s, b_s, z_s;
reg guard, round_bit, sticky;
reg [27:0] sum;
always @(posedge clk)
begin
case(state)
get_a:
begin
s_input_a_ack <= 1;
if (s_input_a_ack && input_a_stb) begin
a <= input_a;
s_input_a_ack <= 0;
state <= get_b;
end
end
get_b:
begin
s_input_b_ack <= 1;
if (s_input_b_ack && input_b_stb) begin
b <= input_b;
s_input_b_ack <= 0;
state <= unpack;
end
end
unpack:
begin
a_m <= {a[22 : 0], 3'd0};
b_m <= {b[22 : 0], 3'd0};
a_e <= a[30 : 23] - 127;
b_e <= b[30 : 23] - 127;
a_s <= a[31];
b_s <= b[31];
state <= special_cases;
end
special_cases:
begin
//if a is NaN or b is NaN return NaN
if ((a_e == 128 && a_m != 0) || (b_e == 128 && b_m != 0)) begin
z[31] <= 1;
z[30:23] <= 255;
z[22] <= 1;
z[21:0] <= 0;
state <= put_z;
//if a is inf return inf
end else if (a_e == 128) begin
z[31] <= a_s;
z[30:23] <= 255;
z[22:0] <= 0;
//if a is inf and signs don't match return nan
if ((b_e == 128) && (a_s != b_s)) begin
z[31] <= b_s;
z[30:23] <= 255;
z[22] <= 1;
z[21:0] <= 0;
end
state <= put_z;
//if b is inf return inf
end else if (b_e == 128) begin
z[31] <= b_s;
z[30:23] <= 255;
z[22:0] <= 0;
state <= put_z;
//if a is zero return b
end else if ((($signed(a_e) == -127) && (a_m == 0)) && (($signed(b_e) == -127) && (b_m == 0))) begin
z[31] <= a_s & b_s;
z[30:23] <= b_e[7:0] + 127;
z[22:0] <= b_m[26:3];
state <= put_z;
//if a is zero return b
end else if (($signed(a_e) == -127) && (a_m == 0)) begin
z[31] <= b_s;
z[30:23] <= b_e[7:0] + 127;
z[22:0] <= b_m[26:3];
state <= put_z;
//if b is zero return a
end else if (($signed(b_e) == -127) && (b_m == 0)) begin
z[31] <= a_s;
z[30:23] <= a_e[7:0] + 127;
z[22:0] <= a_m[26:3];
state <= put_z;
end else begin
//Denormalised Number
if ($signed(a_e) == -127) begin
a_e <= -126;
end else begin
a_m[26] <= 1;
end
//Denormalised Number
if ($signed(b_e) == -127) begin
b_e <= -126;
end else begin
b_m[26] <= 1;
end
state <= align;
end
end
align:
begin
if ($signed(a_e) > $signed(b_e)) begin
b_e <= b_e + 1;
b_m <= b_m >> 1;
b_m[0] <= b_m[0] | b_m[1];
end else if ($signed(a_e) < $signed(b_e)) begin
a_e <= a_e + 1;
a_m <= a_m >> 1;
a_m[0] <= a_m[0] | a_m[1];
end else begin
state <= add_0;
end
end
add_0:
begin
z_e <= a_e;
if (a_s == b_s) begin
sum <= a_m + b_m;
z_s <= a_s;
end else begin
if (a_m >= b_m) begin
sum <= a_m - b_m;
z_s <= a_s;
end else begin
sum <= b_m - a_m;
z_s <= b_s;
end
end
state <= add_1;
end
add_1:
begin
if (sum[27]) begin
z_m <= sum[27:4];
guard <= sum[3];
round_bit <= sum[2];
sticky <= sum[1] | sum[0];
z_e <= z_e + 1;
end else begin
z_m <= sum[26:3];
guard <= sum[2];
round_bit <= sum[1];
sticky <= sum[0];
end
state <= normalise_1;
end
normalise_1:
begin
if (z_m[23] == 0 && $signed(z_e) > -126) begin
z_e <= z_e - 1;
z_m <= z_m << 1;
z_m[0] <= guard;
guard <= round_bit;
round_bit <= 0;
end else begin
state <= normalise_2;
end
end
normalise_2:
begin
if ($signed(z_e) < -126) begin
z_e <= z_e + 1;
z_m <= z_m >> 1;
guard <= z_m[0];
round_bit <= guard;
sticky <= sticky | round_bit;
end else begin
state <= round;
end
end
round:
begin
if (guard && (round_bit | sticky | z_m[0])) begin
z_m <= z_m + 1;
if (z_m == 24'hffffff) begin
z_e <=z_e + 1;
end
end
state <= pack;
end
pack:
begin
z[22 : 0] <= z_m[22:0];
z[30 : 23] <= z_e[7:0] + 127;
z[31] <= z_s;
if ($signed(z_e) == -126 && z_m[23] == 0) begin
z[30 : 23] <= 0;
end
//if overflow occurs, return inf
if ($signed(z_e) > 127) begin
z[22 : 0] <= 0;
z[30 : 23] <= 255;
z[31] <= z_s;
end
state <= put_z;
end
put_z:
begin
s_output_z_stb <= 1;
s_output_z <= z;
if (s_output_z_stb && output_z_ack) begin
s_output_z_stb <= 0;
state <= get_a;
end
end
endcase
if (rst == 1) begin
state <= get_a;
s_input_a_ack <= 0;
s_input_b_ack <= 0;
s_output_z_stb <= 0;
end
end
assign input_a_ack = s_input_a_ack;
assign input_b_ack = s_input_b_ack;
assign output_z_stb = s_output_z_stb;
assign output_z = s_output_z;
endmodule
"""
int_to_float = """//Integer to IEEE Floating Point Converter (Single Precision)
//Copyright (C) Jonathan P Dawson 2013
//2013-12-12
module int_to_float(
input_a,
input_a_stb,
output_z_ack,
clk,
rst,
output_z,
output_z_stb,
input_a_ack);
input clk;
input rst;
input [31:0] input_a;
input input_a_stb;
output input_a_ack;
output [31:0] output_z;
output output_z_stb;
input output_z_ack;
reg s_output_z_stb;
reg [31:0] s_output_z;
reg s_input_a_ack;
reg s_input_b_ack;
reg [2:0] state;
parameter get_a = 3'd0,
convert_0 = 3'd1,
convert_1 = 3'd2,
convert_2 = 3'd3,
round = 3'd4,
pack = 3'd5,
put_z = 3'd6;
reg [31:0] a, z, value;
reg [23:0] z_m;
reg [7:0] z_r;
reg [7:0] z_e;
reg z_s;
reg guard, round_bit, sticky;
always @(posedge clk)
begin
case(state)
get_a:
begin
s_input_a_ack <= 1;
if (s_input_a_ack && input_a_stb) begin
a <= input_a;
s_input_a_ack <= 0;
state <= convert_0;
end
end
convert_0:
begin
if ( a == 0 ) begin
z_s <= 0;
z_m <= 0;
z_e <= -127;
state <= pack;
end else begin
value <= a[31] ? -a : a;
z_s <= a[31];
state <= convert_1;
end
end
convert_1:
begin
z_e <= 31;
z_m <= value[31:8];
z_r <= value[7:0];
state <= convert_2;
end
convert_2:
begin
if (!z_m[23]) begin
z_e <= z_e - 1;
z_m <= z_m << 1;
z_m[0] <= z_r[7];
z_r <= z_r << 1;
end else begin
guard <= z_r[7];
round_bit <= z_r[6];
sticky <= z_r[5:0] != 0;
state <= round;
end
end
round:
begin
if (guard && (round_bit || sticky || z_m[0])) begin
z_m <= z_m + 1;
if (z_m == 24'hffffff) begin
z_e <=z_e + 1;
end
end
state <= pack;
end
pack:
begin
z[22 : 0] <= z_m[22:0];
z[30 : 23] <= z_e + 127;
z[31] <= z_s;
state <= put_z;
end
put_z:
begin
s_output_z_stb <= 1;
s_output_z <= z;
if (s_output_z_stb && output_z_ack) begin
s_output_z_stb <= 0;
state <= get_a;
end
end
endcase
if (rst == 1) begin
state <= get_a;
s_input_a_ack <= 0;
s_output_z_stb <= 0;
end
end
assign input_a_ack = s_input_a_ack;
assign output_z_stb = s_output_z_stb;
assign output_z = s_output_z;
endmodule
"""
float_to_int = """//IEEE Floating Point to Integer Converter (Single Precision)
//Copyright (C) Jonathan P Dawson 2013
//2013-12-12
module float_to_int(
input_a,
input_a_stb,
output_z_ack,
clk,
rst,
output_z,
output_z_stb,
input_a_ack);
input clk;
input rst;
input [31:0] input_a;
input input_a_stb;
output input_a_ack;
output [31:0] output_z;
output output_z_stb;
input output_z_ack;
reg s_output_z_stb;
reg [31:0] s_output_z;
reg s_input_a_ack;
reg [2:0] state;
parameter get_a = 3'd0,
special_cases = 3'd1,
unpack = 3'd2,
convert = 3'd3,
put_z = 3'd4;
reg [31:0] a_m, a, z;
reg [8:0] a_e;
reg a_s;
always @(posedge clk)
begin
case(state)
get_a:
begin
s_input_a_ack <= 1;
if (s_input_a_ack && input_a_stb) begin
a <= input_a;
s_input_a_ack <= 0;
state <= unpack;
end
end
unpack:
begin
a_m[31:8] <= {1'b1, a[22 : 0]};
a_m[7:0] <= 0;
a_e <= a[30 : 23] - 127;
a_s <= a[31];
state <= special_cases;
end
special_cases:
begin
if ($signed(a_e) == -127) begin
z <= 0;
state <= put_z;
end else if ($signed(a_e) > 31) begin
z <= 32'h80000000;
state <= put_z;
end else begin
state <= convert;
end
end
convert:
begin
if ($signed(a_e) < 31 && a_m) begin
a_e <= a_e + 1;
a_m <= a_m >> 1;
end else begin
if (a_m[31]) begin
z <= 32'h80000000;
end else begin
z <= a_s ? -a_m : a_m;
end
state <= put_z;
end
end
put_z:
begin
s_output_z_stb <= 1;
s_output_z <= z;
if (s_output_z_stb && output_z_ack) begin
s_output_z_stb <= 0;
state <= get_a;
end
end
endcase
if (rst == 1) begin
state <= get_a;
s_input_a_ack <= 0;
s_output_z_stb <= 0;
end
end
assign input_a_ack = s_input_a_ack;
assign output_z_stb = s_output_z_stb;
assign output_z = s_output_z;
endmodule
"""
double_divider = """//IEEE Floating Point Divider (Double Precision)
//Copyright (C) Jonathan P Dawson 2014
//2014-01-11
//
module double_divider(
input_a,
input_b,
input_a_stb,
input_b_stb,
output_z_ack,
clk,
rst,
output_z,
output_z_stb,
input_a_ack,
input_b_ack);
input clk;
input rst;
input [63:0] input_a;
input input_a_stb;
output input_a_ack;
input [63:0] input_b;
input input_b_stb;
output input_b_ack;
output [63:0] output_z;
output output_z_stb;
input output_z_ack;
reg s_output_z_stb;
reg [63:0] s_output_z;
reg s_input_a_ack;
reg s_input_b_ack;
reg [3:0] state;
parameter get_a = 4'd0,
get_b = 4'd1,
unpack = 4'd2,
special_cases = 4'd3,
normalise_a = 4'd4,
normalise_b = 4'd5,
divide_0 = 4'd6,
divide_1 = 4'd7,
divide_2 = 4'd8,
divide_3 = 4'd9,
normalise_1 = 4'd10,
normalise_2 = 4'd11,
round = 4'd12,
pack = 4'd13,
put_z = 4'd14;
reg [63:0] a, b, z;
reg [52:0] a_m, b_m, z_m;
reg [12:0] a_e, b_e, z_e;
reg a_s, b_s, z_s;
reg guard, round_bit, sticky;
reg [108:0] quotient, divisor, dividend, remainder;
reg [6:0] count;
always @(posedge clk)
begin
case(state)
get_a:
begin
s_input_a_ack <= 1;
if (s_input_a_ack && input_a_stb) begin
a <= input_a;
s_input_a_ack <= 0;
state <= get_b;
end
end
get_b:
begin
s_input_b_ack <= 1;
if (s_input_b_ack && input_b_stb) begin
b <= input_b;
s_input_b_ack <= 0;
state <= unpack;
end
end
unpack:
begin
a_m <= a[51 : 0];
b_m <= b[51 : 0];
a_e <= a[62 : 52] - 1023;
b_e <= b[62 : 52] - 1023;
a_s <= a[63];
b_s <= b[63];
state <= special_cases;
end
special_cases:
begin
//if a is NaN or b is NaN return NaN
if ((a_e == 1024 && a_m != 0) || (b_e == 1024 && b_m != 0)) begin
z[63] <= 1;
z[62:52] <= 2047;
z[51] <= 1;
z[50:0] <= 0;
state <= put_z;
//if a is inf and b is inf return NaN
end else if ((a_e == 1024) && (b_e == 1024)) begin
z[63] <= 1;
z[62:52] <= 2047;
z[51] <= 1;
z[50:0] <= 0;
state <= put_z;
//if a is inf return inf
end else if (a_e == 1024) begin
z[63] <= a_s ^ b_s;
z[62:52] <= 2047;
z[51:0] <= 0;
state <= put_z;
//if b is zero return NaN
if ($signed(b_e == -1023) && (b_m == 0)) begin
z[63] <= 1;
z[62:52] <= 2047;
z[51] <= 1;
z[50:0] <= 0;
state <= put_z;
end
//if b is inf return zero
end else if (b_e == 1024) begin
z[63] <= a_s ^ b_s;
z[62:52] <= 0;
z[51:0] <= 0;
state <= put_z;
//if a is zero return zero
end else if (($signed(a_e) == -1023) && (a_m == 0)) begin
z[63] <= a_s ^ b_s;
z[62:52] <= 0;
z[51:0] <= 0;
state <= put_z;
//if b is zero return NaN
if (($signed(b_e) == -1023) && (b_m == 0)) begin
z[63] <= 1;
z[62:52] <= 2047;
z[51] <= 1;
z[50:0] <= 0;
state <= put_z;
end
//if b is zero return inf
end else if (($signed(b_e) == -1023) && (b_m == 0)) begin
z[63] <= a_s ^ b_s;
z[62:52] <= 2047;
z[51:0] <= 0;
state <= put_z;
end else begin
//Denormalised Number
if ($signed(a_e) == -1023) begin
a_e <= -1022;
end else begin
a_m[52] <= 1;
end
//Denormalised Number
if ($signed(b_e) == -1023) begin
b_e <= -1022;
end else begin
b_m[52] <= 1;
end
state <= normalise_a;
end
end
normalise_a:
begin
if (a_m[52]) begin
state <= normalise_b;
end else begin
a_m <= a_m << 1;
a_e <= a_e - 1;
end
end
normalise_b:
begin
if (b_m[52]) begin
state <= divide_0;
end else begin
b_m <= b_m << 1;
b_e <= b_e - 1;
end
end
divide_0:
begin
z_s <= a_s ^ b_s;
z_e <= a_e - b_e;
quotient <= 0;
remainder <= 0;
count <= 0;
dividend <= a_m << 56;
divisor <= b_m;
state <= divide_1;
end
divide_1:
begin
quotient <= quotient << 1;
remainder <= remainder << 1;
remainder[0] <= dividend[108];
dividend <= dividend << 1;
state <= divide_2;
end
divide_2:
begin
if (remainder >= divisor) begin
quotient[0] <= 1;
remainder <= remainder - divisor;
end
if (count == 107) begin
state <= divide_3;
end else begin
count <= count + 1;
state <= divide_1;
end
end
divide_3:
begin
z_m <= quotient[55:3];
guard <= quotient[2];
round_bit <= quotient[1];
sticky <= quotient[0] | (remainder != 0);
state <= normalise_1;
end
normalise_1:
begin
if (z_m[52] == 0 && $signed(z_e) > -1022) begin
z_e <= z_e - 1;
z_m <= z_m << 1;
z_m[0] <= guard;
guard <= round_bit;
round_bit <= 0;
end else begin
state <= normalise_2;
end
end
normalise_2:
begin
if ($signed(z_e) < -1022) begin
z_e <= z_e + 1;
z_m <= z_m >> 1;
guard <= z_m[0];
round_bit <= guard;
sticky <= sticky | round_bit;
end else begin
state <= round;
end
end
round:
begin
if (guard && (round_bit | sticky | z_m[0])) begin
z_m <= z_m + 1;
if (z_m == 53'hffffff) begin
z_e <=z_e + 1;
end
end
state <= pack;
end
pack:
begin
z[51 : 0] <= z_m[51:0];
z[62 : 52] <= z_e[10:0] + 1023;
z[63] <= z_s;
if ($signed(z_e) == -1022 && z_m[52] == 0) begin
z[62 : 52] <= 0;
end
//if overflow occurs, return inf
if ($signed(z_e) > 1023) begin
z[51 : 0] <= 0;
z[62 : 52] <= 2047;
z[63] <= z_s;
end
state <= put_z;
end
put_z:
begin
s_output_z_stb <= 1;
s_output_z <= z;
if (s_output_z_stb && output_z_ack) begin
s_output_z_stb <= 0;
state <= get_a;
end
end
endcase
if (rst == 1) begin
state <= get_a;
s_input_a_ack <= 0;
s_input_b_ack <= 0;
s_output_z_stb <= 0;
end
end
assign input_a_ack = s_input_a_ack;
assign input_b_ack = s_input_b_ack;
assign output_z_stb = s_output_z_stb;
assign output_z = s_output_z;
endmodule
"""
double_multiplier = """//IEEE Floating Point Multiplier (Double Precision)
//Copyright (C) Jonathan P Dawson 2014
//2014-01-10
module double_multiplier(
input_a,
input_b,
input_a_stb,
input_b_stb,
output_z_ack,
clk,
rst,
output_z,
output_z_stb,
input_a_ack,
input_b_ack);
input clk;
input rst;
input [63:0] input_a;
input input_a_stb;
output input_a_ack;
input [63:0] input_b;
input input_b_stb;
output input_b_ack;
output [63:0] output_z;
output output_z_stb;
input output_z_ack;
reg s_output_z_stb;
reg [63:0] s_output_z;
reg s_input_a_ack;
reg s_input_b_ack;
reg [3:0] state;
parameter get_a = 4'd0,
get_b = 4'd1,
unpack = 4'd2,
special_cases = 4'd3,
normalise_a = 4'd4,
normalise_b = 4'd5,
multiply_0 = 4'd6,
multiply_1 = 4'd7,
normalise_1 = 4'd8,
normalise_2 = 4'd9,
round = 4'd10,
pack = 4'd11,
put_z = 4'd12;
reg [63:0] a, b, z;
reg [52:0] a_m, b_m, z_m;
reg [12:0] a_e, b_e, z_e;
reg a_s, b_s, z_s;
reg guard, round_bit, sticky;
reg [107:0] product;
always @(posedge clk)
begin
case(state)
get_a:
begin
s_input_a_ack <= 1;
if (s_input_a_ack && input_a_stb) begin
a <= input_a;
s_input_a_ack <= 0;
state <= get_b;
end
end
get_b:
begin
s_input_b_ack <= 1;
if (s_input_b_ack && input_b_stb) begin
b <= input_b;
s_input_b_ack <= 0;
state <= unpack;
end
end
unpack:
begin
a_m <= a[51 : 0];
b_m <= b[51 : 0];
a_e <= a[62 : 52] - 1023;
b_e <= b[62 : 52] - 1023;
a_s <= a[63];
b_s <= b[63];
state <= special_cases;
end
special_cases:
begin
//if a is NaN or b is NaN return NaN
if ((a_e == 1024 && a_m != 0) || (b_e == 1024 && b_m != 0)) begin
z[63] <= 1;
z[62:52] <= 2047;
z[51] <= 1;
z[50:0] <= 0;
state <= put_z;
//if a is inf return inf
end else if (a_e == 1024) begin
z[63] <= a_s ^ b_s;
z[62:52] <= 2047;
z[51:0] <= 0;
state <= put_z;
//if b is zero return NaN
if (($signed(b_e) == -1023) && (b_m == 0)) begin
z[63] <= 1;
z[62:52] <= 2047;
z[51] <= 1;
z[50:0] <= 0;
state <= put_z;
end
//if b is inf return inf
end else if (b_e == 1024) begin
z[63] <= a_s ^ b_s;
z[62:52] <= 2047;
z[51:0] <= 0;
//if b is zero return NaN
if (($signed(a_e) == -1023) && (a_m == 0)) begin
z[63] <= 1;
z[62:52] <= 2047;
z[51] <= 1;
z[50:0] <= 0;
state <= put_z;
end
state <= put_z;
//if a is zero return zero
end else if (($signed(a_e) == -1023) && (a_m == 0)) begin
z[63] <= a_s ^ b_s;
z[62:52] <= 0;
z[51:0] <= 0;
state <= put_z;
//if b is zero return zero
end else if (($signed(b_e) == -1023) && (b_m == 0)) begin
z[63] <= a_s ^ b_s;
z[62:52] <= 0;
z[51:0] <= 0;
state <= put_z;
end else begin
//Denormalised Number
if ($signed(a_e) == -1023) begin
a_e <= -1022;
end else begin
a_m[52] <= 1;
end
//Denormalised Number
if ($signed(b_e) == -1023) begin
b_e <= -1022;
end else begin
b_m[52] <= 1;
end
state <= normalise_a;
end
end
normalise_a:
begin
if (a_m[52]) begin
state <= normalise_b;
end else begin
a_m <= a_m << 1;
a_e <= a_e - 1;
end
end
normalise_b:
begin
if (b_m[52]) begin
state <= multiply_0;
end else begin
b_m <= b_m << 1;
b_e <= b_e - 1;
end
end
multiply_0:
begin
z_s <= a_s ^ b_s;
z_e <= a_e + b_e + 1;
product <= a_m * b_m * 4;
state <= multiply_1;
end
multiply_1:
begin
z_m <= product[107:55];
guard <= product[54];
round_bit <= product[53];
sticky <= (product[52:0] != 0);
state <= normalise_1;
end
normalise_1:
begin
if (z_m[52] == 0) begin
z_e <= z_e - 1;
z_m <= z_m << 1;
z_m[0] <= guard;
guard <= round_bit;
round_bit <= 0;
end else begin
state <= normalise_2;
end
end
normalise_2:
begin
if ($signed(z_e) < -1022) begin
z_e <= z_e + 1;
z_m <= z_m >> 1;
guard <= z_m[0];
round_bit <= guard;
sticky <= sticky | round_bit;
end else begin
state <= round;
end
end
round:
begin
if (guard && (round_bit | sticky | z_m[0])) begin
z_m <= z_m + 1;
if (z_m == 53'hffffff) begin
z_e <=z_e + 1;
end
end
state <= pack;
end
pack:
begin
z[51 : 0] <= z_m[51:0];
z[62 : 52] <= z_e[11:0] + 1023;
z[63] <= z_s;
if ($signed(z_e) == -1022 && z_m[52] == 0) begin
z[62 : 52] <= 0;
end
//if overflow occurs, return inf
if ($signed(z_e) > 1023) begin
z[51 : 0] <= 0;
z[62 : 52] <= 2047;
z[63] <= z_s;
end
state <= put_z;
end
put_z:
begin
s_output_z_stb <= 1;
s_output_z <= z;
if (s_output_z_stb && output_z_ack) begin
s_output_z_stb <= 0;
state <= get_a;
end
end
endcase
if (rst == 1) begin
state <= get_a;
s_input_a_ack <= 0;
s_input_b_ack <= 0;
s_output_z_stb <= 0;
end
end
assign input_a_ack = s_input_a_ack;
assign input_b_ack = s_input_b_ack;
assign output_z_stb = s_output_z_stb;
assign output_z = s_output_z;
endmodule
"""
double_adder = """//IEEE Floating Point Adder (Double Precision)
//Copyright (C) Jonathan P Dawson 2013
//2013-12-12
module double_adder(
input_a,
input_b,
input_a_stb,
input_b_stb,
output_z_ack,
clk,
rst,
output_z,
output_z_stb,
input_a_ack,
input_b_ack);
input clk;
input rst;
input [63:0] input_a;
input input_a_stb;
output input_a_ack;
input [63:0] input_b;
input input_b_stb;
output input_b_ack;
output [63:0] output_z;
output output_z_stb;
input output_z_ack;
reg s_output_z_stb;
reg [63:0] s_output_z;
reg s_input_a_ack;
reg s_input_b_ack;
reg [3:0] state;
parameter get_a = 4'd0,
get_b = 4'd1,
unpack = 4'd2,
special_cases = 4'd3,
align = 4'd4,
add_0 = 4'd5,
add_1 = 4'd6,
normalise_1 = 4'd7,
normalise_2 = 4'd8,
round = 4'd9,
pack = 4'd10,
put_z = 4'd11;
reg [63:0] a, b, z;
reg [55:0] a_m, b_m;
reg [52:0] z_m;
reg [12:0] a_e, b_e, z_e;
reg a_s, b_s, z_s;
reg guard, round_bit, sticky;
reg [56:0] sum;
always @(posedge clk)
begin
case(state)
get_a:
begin
s_input_a_ack <= 1;
if (s_input_a_ack && input_a_stb) begin
a <= input_a;
s_input_a_ack <= 0;
state <= get_b;
end
end
get_b:
begin
s_input_b_ack <= 1;
if (s_input_b_ack && input_b_stb) begin
b <= input_b;
s_input_b_ack <= 0;
state <= unpack;
end
end
unpack:
begin
a_m <= {a[51 : 0], 3'd0};
b_m <= {b[51 : 0], 3'd0};
a_e <= a[62 : 52] - 1023;
b_e <= b[62 : 52] - 1023;
a_s <= a[63];
b_s <= b[63];
state <= special_cases;
end
special_cases:
begin
//if a is NaN or b is NaN return NaN
if ((a_e == 1024 && a_m != 0) || (b_e == 1024 && b_m != 0)) begin
z[63] <= 1;
z[62:52] <= 2047;
z[51] <= 1;
z[50:0] <= 0;
state <= put_z;
//if a is inf return inf
end else if (a_e == 1024) begin
z[63] <= a_s;
z[62:52] <= 2047;
z[51:0] <= 0;
//if a is inf and signs don't match return nan
if ((b_e == 1024) && (a_s != b_s)) begin
z[63] <= 1;
z[62:52] <= 2047;
z[51] <= 1;
z[50:0] <= 0;
end
state <= put_z;
//if b is inf return inf
end else if (b_e == 1024) begin
z[63] <= b_s;
z[62:52] <= 2047;
z[51:0] <= 0;
state <= put_z;
//if a is zero return b
end else if ((($signed(a_e) == -1023) && (a_m == 0)) && (($signed(b_e) == -1023) && (b_m == 0))) begin
z[63] <= a_s & b_s;
z[62:52] <= b_e[10:0] + 1023;
z[51:0] <= b_m[55:3];
state <= put_z;
//if a is zero return b
end else if (($signed(a_e) == -1023) && (a_m == 0)) begin
z[63] <= b_s;
z[62:52] <= b_e[10:0] + 1023;
z[51:0] <= b_m[55:3];
state <= put_z;
//if b is zero return a
end else if (($signed(b_e) == -1023) && (b_m == 0)) begin
z[63] <= a_s;
z[62:52] <= a_e[10:0] + 1023;
z[51:0] <= a_m[55:3];
state <= put_z;
end else begin
//Denormalised Number
if ($signed(a_e) == -1023) begin
a_e <= -1022;
end else begin
a_m[55] <= 1;
end
//Denormalised Number
if ($signed(b_e) == -1023) begin
b_e <= -1022;
end else begin
b_m[55] <= 1;
end
state <= align;
end
end
align:
begin
if ($signed(a_e) > $signed(b_e)) begin
b_e <= b_e + 1;
b_m <= b_m >> 1;
b_m[0] <= b_m[0] | b_m[1];
end else if ($signed(a_e) < $signed(b_e)) begin
a_e <= a_e + 1;
a_m <= a_m >> 1;
a_m[0] <= a_m[0] | a_m[1];
end else begin
state <= add_0;
end
end
add_0:
begin
z_e <= a_e;
if (a_s == b_s) begin
sum <= {1'd0, a_m} + b_m;
z_s <= a_s;
end else begin
if (a_m > b_m) begin
sum <= {1'd0, a_m} - b_m;
z_s <= a_s;
end else begin
sum <= {1'd0, b_m} - a_m;
z_s <= b_s;
end
end
state <= add_1;
end
add_1:
begin
if (sum[56]) begin
z_m <= sum[56:4];
guard <= sum[3];
round_bit <= sum[2];
sticky <= sum[1] | sum[0];
z_e <= z_e + 1;
end else begin
z_m <= sum[55:3];
guard <= sum[2];
round_bit <= sum[1];
sticky <= sum[0];
end
state <= normalise_1;
end
normalise_1:
begin
if (z_m[52] == 0 && $signed(z_e) > -1022) begin
z_e <= z_e - 1;
z_m <= z_m << 1;
z_m[0] <= guard;
guard <= round_bit;
round_bit <= 0;
end else begin
state <= normalise_2;
end
end
normalise_2:
begin
if ($signed(z_e) < -1022) begin
z_e <= z_e + 1;
z_m <= z_m >> 1;
guard <= z_m[0];
round_bit <= guard;
sticky <= sticky | round_bit;
end else begin
state <= round;
end
end
round:
begin
if (guard && (round_bit | sticky | z_m[0])) begin
z_m <= z_m + 1;
if (z_m == 53'h1fffffffffffff) begin
z_e <=z_e + 1;
end
end
state <= pack;
end
pack:
begin
z[51 : 0] <= z_m[51:0];
z[62 : 52] <= z_e[10:0] + 1023;
z[63] <= z_s;
if ($signed(z_e) == -1022 && z_m[52] == 0) begin
z[62 : 52] <= 0;
end
//if overflow occurs, return inf
if ($signed(z_e) > 1023) begin
z[51 : 0] <= 0;
z[62 : 52] <= 2047;
z[63] <= z_s;
end
state <= put_z;
end
put_z:
begin
s_output_z_stb <= 1;
s_output_z <= z;
if (s_output_z_stb && output_z_ack) begin
s_output_z_stb <= 0;
state <= get_a;
end
end
endcase
if (rst == 1) begin
state <= get_a;
s_input_a_ack <= 0;
s_input_b_ack <= 0;
s_output_z_stb <= 0;
end
end
assign input_a_ack = s_input_a_ack;
assign input_b_ack = s_input_b_ack;
assign output_z_stb = s_output_z_stb;
assign output_z = s_output_z;
endmodule
"""
long_to_double = """//Integer to IEEE Floating Point Converter (Double Precision)
//Copyright (C) Jonathan P Dawson 2013
//2013-12-12
module long_to_double(
input_a,
input_a_stb,
output_z_ack,
clk,
rst,
output_z,
output_z_stb,
input_a_ack);
input clk;
input rst;
input [63:0] input_a;
input input_a_stb;
output input_a_ack;
output [63:0] output_z;
output output_z_stb;
input output_z_ack;
reg s_output_z_stb;
reg [63:0] s_output_z;
reg s_input_a_ack;
reg s_input_b_ack;
reg [2:0] state;
parameter get_a = 3'd0,
convert_0 = 3'd1,
convert_1 = 3'd2,
convert_2 = 3'd3,
round = 3'd4,
pack = 3'd5,
put_z = 3'd6;
reg [63:0] a, z, value;
reg [52:0] z_m;
reg [10:0] z_r;
reg [10:0] z_e;
reg z_s;
reg guard, round_bit, sticky;
always @(posedge clk)
begin
case(state)
get_a:
begin
s_input_a_ack <= 1;
if (s_input_a_ack && input_a_stb) begin
a <= input_a;
s_input_a_ack <= 0;
state <= convert_0;
end
end
convert_0:
begin
if ( a == 0 ) begin
z_s <= 0;
z_m <= 0;
z_e <= -1023;
state <= pack;
end else begin
value <= a[63] ? -a : a;
z_s <= a[63];
state <= convert_1;
end
end
convert_1:
begin
z_e <= 63;
z_m <= value[63:11];
z_r <= value[10:0];
state <= convert_2;
end
convert_2:
begin
if (!z_m[52]) begin
z_e <= z_e - 1;
z_m <= z_m << 1;
z_m[0] <= z_r[10];
z_r <= z_r << 1;
end else begin
guard <= z_r[10];
round_bit <= z_r[9];
sticky <= z_r[8:0] != 0;
state <= round;
end
end
round:
begin
if (guard && (round_bit || sticky || z_m[0])) begin
z_m <= z_m + 1;
if (z_m == 53'h1fffffffffffff) begin
z_e <=z_e + 1;
end
end
state <= pack;
end
pack:
begin
z[51 : 0] <= z_m[51:0];
z[62 : 52] <= z_e + 1023;
z[63] <= z_s;
state <= put_z;
end
put_z:
begin
s_output_z_stb <= 1;
s_output_z <= z;
if (s_output_z_stb && output_z_ack) begin
s_output_z_stb <= 0;
state <= get_a;
end
end
endcase
if (rst == 1) begin
state <= get_a;
s_input_a_ack <= 0;
s_output_z_stb <= 0;
end
end
assign input_a_ack = s_input_a_ack;
assign output_z_stb = s_output_z_stb;
assign output_z = s_output_z;
endmodule
"""
double_to_long = """//IEEE Floating Point to Integer Converter (Double Precision)
//Copyright (C) Jonathan P Dawson 2014
//2014-01-11
module double_to_long(
input_a,
input_a_stb,
output_z_ack,
clk,
rst,
output_z,
output_z_stb,
input_a_ack);
input clk;
input rst;
input [63:0] input_a;
input input_a_stb;
output input_a_ack;
output [63:0] output_z;
output output_z_stb;
input output_z_ack;
reg s_output_z_stb;
reg [63:0] s_output_z;
reg s_input_a_ack;
reg [2:0] state;
parameter get_a = 3'd0,
special_cases = 3'd1,
unpack = 3'd2,
convert = 3'd3,
put_z = 3'd4;
reg [63:0] a_m, a, z;
reg [11:0] a_e;
reg a_s;
always @(posedge clk)
begin
case(state)
get_a:
begin
s_input_a_ack <= 1;
if (s_input_a_ack && input_a_stb) begin
a <= input_a;
s_input_a_ack <= 0;
state <= unpack;
end
end
unpack:
begin
a_m[63:11] <= {1'b1, a[51 : 0]};
a_m[10:0] <= 0;
a_e <= a[62 : 52] - 1023;
a_s <= a[63];
state <= special_cases;
end
special_cases:
begin
if ($signed(a_e) == -1023) begin
//zero
z <= 0;
state <= put_z;
end else if ($signed(a_e) == 1024 && a[51:0] != 0) begin
//nan
z <= 64'h8000000000000000;
state <= put_z;
end else if ($signed(a_e) > 63) begin
//too big
if (a_s) begin
z <= 64'h8000000000000000;
end else begin
z <= 64'h0000000000000000;
end
state <= put_z;
end else begin
state <= convert;
end
end
convert:
begin
if ($signed(a_e) < 63 && a_m) begin
a_e <= a_e + 1;
a_m <= a_m >> 1;
end else begin
if (a_m[63] && a_s) begin
z <= 64'h8000000000000000;
end else begin
z <= a_s ? -a_m : a_m;
end
state <= put_z;
end
end
put_z:
begin
s_output_z_stb <= 1;
s_output_z <= z;
if (s_output_z_stb && output_z_ack) begin
s_output_z_stb <= 0;
state <= get_a;
end
end
endcase
if (rst == 1) begin
state <= get_a;
s_input_a_ack <= 0;
s_output_z_stb <= 0;
end
end
assign input_a_ack = s_input_a_ack;
assign output_z_stb = s_output_z_stb;
assign output_z = s_output_z;
endmodule
"""
float_to_double = """//Integer to IEEE Floating Point Converter (Double Precision)
//Copyright (C) Jonathan P Dawson 2013
//2013-12-12
module float_to_double(
input_a,
input_a_stb,
output_z_ack,
clk,
rst,
output_z,
output_z_stb,
input_a_ack);
input clk;
input rst;
input [31:0] input_a;
input input_a_stb;
output input_a_ack;
output [63:0] output_z;
output output_z_stb;
input output_z_ack;
reg s_output_z_stb;
reg [63:0] s_output_z;
reg s_input_a_ack;
reg s_input_b_ack;
reg [1:0] state;
parameter get_a = 3'd0,
convert_0 = 3'd1,
normalise_0 = 3'd2,
put_z = 3'd3;
reg [63:0] z;
reg [10:0] z_e;
reg [52:0] z_m;
reg [31:0] a;
always @(posedge clk)
begin
case(state)
get_a:
begin
s_input_a_ack <= 1;
if (s_input_a_ack && input_a_stb) begin
a <= input_a;
s_input_a_ack <= 0;
state <= convert_0;
end
end
convert_0:
begin
z[63] <= a[31];
z[62:52] <= (a[30:23] - 127) + 1023;
z[51:0] <= {a[22:0], 29'd0};
if (a[30:23] == 255) begin
z[62:52] <= 2047;
end
state <= put_z;
if (a[30:23] == 0) begin
if (a[23:0]) begin
state <= normalise_0;
z_e <= 897;
z_m <= {1'd0, a[22:0], 29'd0};
end
z[62:52] <= 0;
end
end
normalise_0:
begin
if (z_m[52]) begin
z[62:52] <= z_e;
z[51:0] <= z_m[51:0];
state <= put_z;
end else begin
z_m <= {z_m[51:0], 1'd0};
z_e <= z_e - 1;
end
end
put_z:
begin
s_output_z_stb <= 1;
s_output_z <= z;
if (s_output_z_stb && output_z_ack) begin
s_output_z_stb <= 0;
state <= get_a;
end
end
endcase
if (rst == 1) begin
state <= get_a;
s_input_a_ack <= 0;
s_output_z_stb <= 0;
end
end
assign input_a_ack = s_input_a_ack;
assign output_z_stb = s_output_z_stb;
assign output_z = s_output_z;
endmodule
"""
double_to_float = """//IEEE Floating Point to Integer Converter (Double Precision)
//Copyright (C) Jonathan P Dawson 2014
//2014-01-11
module double_to_float(
input_a,
input_a_stb,
output_z_ack,
clk,
rst,
output_z,
output_z_stb,
input_a_ack);
input clk;
input rst;
input [63:0] input_a;
input input_a_stb;
output input_a_ack;
output [31:0] output_z;
output output_z_stb;
input output_z_ack;
reg s_output_z_stb;
reg [31:0] s_output_z;
reg s_input_a_ack;
reg [1:0] state;
parameter get_a = 3'd0,
unpack = 3'd1,
denormalise = 3'd2,
put_z = 3'd3;
reg [63:0] a;
reg [31:0] z;
reg [10:0] z_e;
reg [23:0] z_m;
reg guard;
reg round;
reg sticky;
always @(posedge clk)
begin
case(state)
get_a:
begin
s_input_a_ack <= 1;
if (s_input_a_ack && input_a_stb) begin
a <= input_a;
s_input_a_ack <= 0;
state <= unpack;
end
end
unpack:
begin
z[31] <= a[63];
state <= put_z;
if (a[62:52] == 0) begin
z[30:23] <= 0;
z[22:0] <= 0;
end else if (a[62:52] < 897) begin
z[30:23] <= 0;
z_m <= {1'd1, a[51:29]};
z_e <= a[62:52];
guard <= a[28];
round <= a[27];
sticky <= a[26:0] != 0;
state <= denormalise;
end else if (a[62:52] == 2047) begin
z[30:23] <= 255;
z[22:0] <= 0;
if (a[51:0]) begin
z[22] <= 1;
end
end else if (a[62:52] > 1150) begin
z[30:23] <= 255;
z[22:0] <= 0;
end else begin
z[30:23] <= (a[62:52] - 1023) + 127;
if (a[28] && (a[27] || a[26:0])) begin
z[22:0] <= a[51:29] + 1;
end else begin
z[22:0] <= a[51:29];
end
end
end
denormalise:
begin
if (z_e == 897 || (z_m == 0 && guard == 0)) begin
state <= put_z;
z[22:0] <= z_m;
if (guard && (round || sticky)) begin
z[22:0] <= z_m + 1;
end
end else begin
z_e <= z_e + 1;
z_m <= {1'd0, z_m[23:1]};
guard <= z_m[0];
round <= guard;
sticky <= sticky | round;
end
end
put_z:
begin
s_output_z_stb <= 1;
s_output_z <= z;
if (s_output_z_stb && output_z_ack) begin
s_output_z_stb <= 0;
state <= get_a;
end
end
endcase
if (rst == 1) begin
state <= get_a;
s_input_a_ack <= 0;
s_output_z_stb <= 0;
end
end
assign input_a_ack = s_input_a_ack;
assign output_z_stb = s_output_z_stb;
assign output_z = s_output_z;
endmodule
"""
|
py | 1a3ea71872ca184a797ec630177c53c4833eb22e | import run_squad as rs
import tokenization
import collections
import json
import os
import modeling
import requests
import math
def read_squad_data(json_input, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
# input_data = json_input["data"]
input_data = json_input
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
#데이터를 받아오는 부분
examples = []
# 읽어온 input_data는 paragraphs와 title로 구분 되어있음
# paragraph는 질의응답셋인 qas와 문맥정보를 의미하는 context로 구성되어 있다.
for entry in input_data:
# input_date에서 각 데이터를 하나씩 불러 오고
# 데이터를 context 먼저 처리
paragraph_text = entry["context"]
doc_tokens = [] # 띄어쓰기 기준으로 단어를 토큰으로 나눈다
char_to_word_offset = [] # 각 charater가 몇 번째 단어에 속하는지 순서를 0,1,2,...,n으로 나타낸다
prev_is_whitespace = True
for c in paragraph_text: # context를 character 단위로 받아온다.
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace: # character가 화이트스페이스가 아니고, 이전이 화이트페이스면
doc_tokens.append(c) # 최초 삽입
else:
doc_tokens[-1] += c # 마지막 배열의 요소에 character들을 추
prev_is_whitespace = False #character가 화이트스페이스가 아니므로 false로 변경
char_to_word_offset.append(len(doc_tokens) - 1) #0 부터 시작으로 len -1
# qas_id = qa["id"] # 질의의 id
question_text = entry["question"] #질문 데이터
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
start_position = -1
end_position = -1
orig_answer_text = ""
example = rs.SquadExample(
qas_id=1,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def process_inputs(input_data):
bert_config = modeling.BertConfig.from_json_file(rs.FLAGS.bert_config_file)
eval_examples = read_squad_data(input_data,is_training=False)
eval_features = []
eval_writer = rs.FeatureWriter(
filename=os.path.join("./colab_output", "train.tf_record"),
is_training=False)
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
# 토크나이저에 사전과 do_lower_case 설정
tokenizer = tokenization.FullTokenizer(
vocab_file=rs.FLAGS.vocab_file, do_lower_case=rs.FLAGS.do_lower_case)
rs.convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=rs.FLAGS.max_seq_length,
doc_stride=rs.FLAGS.doc_stride,
max_query_length=rs.FLAGS.max_query_length,
is_training=False,
output_fn=append_feature)
eval_writer.close()
return eval_examples, eval_features
def process_result(result):
# unique_id = int(result["unique_ids"].int64_val[0])
# start_logits = [float(x) for x in result["start_logits"].float_val]
# end_logits = [float(x) for x in result["end_logits"].float_val]
unique_id = int(result["unique_ids"][0])
start_logits= result["start_logits"].tolist()
end_logits = result["end_logits"].tolist()
# start_logits = np.array(start_logits).reshape(batch_size, max_seq_length)
# end_logits = np.array(end_logits).reshape(batch_size, max_seq_length)
formatted_result = rs.RawResult(
unique_id=unique_id,
start_logits=start_logits[0],
end_logits=end_logits[0])
return formatted_result
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, version_2_with_negative ):
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = rs.get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > rs.FLAGS.null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
return all_predictions, all_nbest_json
def process_output(all_results,
eval_examples,
eval_features,
input_data,
n_best, n_best_size, max_answer_length):
output_prediction_file = os.path.join(rs.FLAGS.output_dir, "predictions.json")
output_nbest_file = os.path.join(rs.FLAGS.output_dir, "nbest_predictions.json")
output_null_log_odds_file = os.path.join(rs.FLAGS.output_dir, "null_odds.json")
all_predictions, all_nbest_json = write_predictions(eval_examples,
eval_features,
all_results,
n_best_size=n_best_size,
max_answer_length=max_answer_length,
do_lower_case=True,
version_2_with_negative=False)
return all_predictions, all_nbest_json
# re = []
# for i in range(len(all_predictions)):
# id_ = input_data[i]["id"]
# if n_best:
# re.append(collections.OrderedDict({
# "id": id_,
# "question": input_data[i]["question"],
# "best_prediction": all_predictions[id_],
# "n_best_predictions": all_nbest_json[id_]
# }))
# else:
# re.append(collections.OrderedDict({
# "id": id_,
# "question": input_data[i]["question"],
# "best_prediction": all_predictions[id_]
# }))
# return re
if __name__ == "__main__":
input_data = {
"options": {
"n_best": True,
"n_best_size": 3,
"max_answer_length": 30
},
"data": [
{
"id": "001",
"question": "Who invented LSTM?",
"context": "Many aspects of speech recognition were taken over by a deep learning method called long short-term memory (LSTM), a recurrent neural network published by Hochreiter and Schmidhuber in 1997.[51] LSTM RNNs avoid the vanishing gradient problem and can learn \"Very Deep Learning\" tasks[2] that require memories of events that happened thousands of discrete time steps before, which is important for speech. In 2003, LSTM started to become competitive with traditional speech recognizers on certain tasks.[52] Later it was combined with connectionist temporal classification (CTC)[53] in stacks of LSTM RNNs.[54] In 2015, Google's speech recognition reportedly experienced a dramatic performance jump of 49% through CTC-trained LSTM, which they made available through Google Voice Search."
}
]
}
url="http://localhost:8501/v1/models/korquad_cpu_model:predict"
print(type(input_data))
print(type(json.dumps(input_data)))
json_input=json.dumps(input_data)
example = process_inputs(input_data)
# p_result=process_result(example[1][0])
input_ids = []
input_mask = []
segment_ids = []
unique_id= str(example[1][0].unique_id)
for e in example[1][0].input_ids:
input_ids.append(str(e))
for e in example[1][0].input_mask:
input_mask.append(str(e))
for e in example[1][0].segment_ids:
segment_ids.append(str(e))
pred_input = {
"inputs":{
"examples":{
"unique_id": example[1][0].unique_id,
"input_ids": example[1][0].input_ids,
"input_mask": example[1][0].input_mask,
"segment_ids": example[1][0].segment_ids,
}
}
}
pred_input5 = {
"inputs": {
"examples": {
"unique_id": unique_id,
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids,
}
}
}
pred_input2 = {
"inputs": {
"examples": [ input_ids, input_mask, segment_ids]
}
}
pred_input3 = {
"instances": [ unique_id, input_ids, input_mask, segment_ids]
}
# {
# "unique_id": example[1][0].unique_id,
# "input_ids": example[1][0].input_ids,
# "input_mask": example[1][0].input_mask,
# "segment_ids": example[1][0].segment_ids,
# }
# [
# example[1][0].unique_id,
# example[1][0].input_ids,
# example[1][0].input_mask,
# example[1][0].segment_ids,
# ]
# pred_input={
# "instances":[
# 0,
# example[1][0].input_ids,
# example[1][0].input_mask,
# example[1][0].segment_ids,
# 0,
# ]
#
# }
print(pred_input3)
# post
j_data=json.dumps(pred_input3)
# base64_data=base64.b64encode(j_data)
r = requests.post(url, data=j_data)
print(r.status_code)
print(r.text)
|
py | 1a3ea8ddc40d246ad35a4a372ba4a18d1a0c6b40 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 23 09:47:26 2019
@author: Artem Los
"""
import xml.etree.ElementTree
import json
import base64
import datetime
import copy
import time
from licensing.internal import HelperMethods
class ActivatedMachine:
def __init__(self, IP, Mid, Time, FriendlyName="", FloatingExpires = ""):
self.IP = IP
self.Mid = Mid
# TODO: check if time is int, and convert to datetime in this case.
self.Time = Time
self.FriendlyName = FriendlyName
self.FloatingExpires = FloatingExpires
class Reseller:
"""
Information about the reseller.
"""
def __init__(self, Id, InviteId, ResellerUserId, Created, Name, Url, Email, Phone, Description):
self.Id = Id
self.InviteId = InviteId
self.ResellerUserId = ResellerUserId
self.Created = Created
self.Name = Name
self.Url = Url
self.Email = Email
self.Phone = Phone
self.Description = Description
class LicenseKey:
def __init__(self, ProductId, ID, Key, Created, Expires, Period, F1, F2,\
F3, F4, F5, F6, F7, F8, Notes, Block, GlobalId, Customer, \
ActivatedMachines, TrialActivation, MaxNoOfMachines, \
AllowedMachines, DataObjects, SignDate, Reseller, RawResponse):
self.product_id = ProductId
self.id = ID
self.key = Key
self.created = Created
self.expires = Expires
self.period = Period
self.f1 = F1
self.f2 = F2
self.f3 = F3
self.f4 = F4
self.f5 = F5
self.f6 = F6
self.f7 = F7
self.f8 = F8
self.notes = Notes
self.block = Block
self.global_id = GlobalId
self.customer = Customer
self.activated_machines = ActivatedMachines
self.trial_activation = TrialActivation
self.max_no_of_machines = MaxNoOfMachines
self.allowed_machines = AllowedMachines
self.data_objects = DataObjects
self.sign_date = SignDate
self.reseller = Reseller
self.raw_response = RawResponse
@staticmethod
def from_response(response):
if response.result == "1":
raise ValueError("The response did not contain any license key object since it was unsuccessful. Message '{0}'.".format(response.message))
obj = json.loads(base64.b64decode(response.license_key).decode('utf-8'))
reseller = None
if "Reseller" in obj and obj["Reseller"] != None:
reseller = Reseller(**obj["Reseller"])
return LicenseKey(obj["ProductId"], obj["ID"], obj["Key"], datetime.datetime.fromtimestamp(obj["Created"]),\
datetime.datetime.fromtimestamp(obj["Expires"]), obj["Period"], obj["F1"], obj["F2"], \
obj["F3"], obj["F4"],obj["F5"],obj["F6"], obj["F7"], \
obj["F8"], obj["Notes"], obj["Block"], obj["GlobalId"],\
obj["Customer"], LicenseKey.__load_activated_machines(obj["ActivatedMachines"]), obj["TrialActivation"], \
obj["MaxNoOfMachines"], obj["AllowedMachines"], obj["DataObjects"], \
datetime.datetime.fromtimestamp(obj["SignDate"]),reseller, response)
def save_as_string(self):
"""
Save the license as a string that can later be read by load_from_string.
"""
res = copy.copy(self.raw_response.__dict__)
res["licenseKey"] = res["license_key"]
res.pop("license_key", None)
return json.dumps(res)
@staticmethod
def load_from_string(rsa_pub_key, string, signature_expiration_interval = -1):
"""
Loads a license from a string generated by save_as_string.
Note: if an error occurs, None will be returned. An error can occur
if the license string has been tampered with or if the public key is
incorrectly formatted.
:param signature_expiration_interval: If the license key was signed,
this method will check so that no more than "signatureExpirationInterval"
days have passed since the last activation.
"""
response = Response("","","","")
try:
response = Response.from_string(string)
except Exception as ex:
return None
if response.result == "1":
return None
else:
try:
pubKey = RSAPublicKey.from_string(rsa_pub_key)
if HelperMethods.verify_signature(response, pubKey):
licenseKey = LicenseKey.from_response(response)
if signature_expiration_interval > 0 and \
(licenseKey.sign_date + datetime.timedelta(days=1*signature_expiration_interval) < datetime.datetime.utcnow()):
return None
return licenseKey
else:
return None
except Exception:
return None
@staticmethod
def __load_activated_machines(obj):
if obj == None:
return None
arr = []
for item in obj:
arr.append(ActivatedMachine(**item))
return arr
class Response:
def __init__(self, license_key, signature, result, message):
self.license_key = license_key
self.signature = signature
self.result = result
self.message = message
@staticmethod
def from_string(responseString):
obj = json.loads(responseString)
licenseKey = ""
signature = ""
result = 0
message = ""
if "licenseKey" in obj:
licenseKey = obj["licenseKey"]
if "signature" in obj:
signature = obj["signature"]
if "message" in obj:
message = obj["message"]
if "result" in obj:
result = obj["result"]
else:
result = 1
return Response(licenseKey, signature, result, message)
class RSAPublicKey:
def __init__(self, modulus, exponent):
self.modulus = modulus
self.exponent = exponent
@staticmethod
def from_string(rsaPubKeyString):
"""
The rsaPubKeyString can be found at https://app.cryptolens.io/User/Security.
It should be of the following format:
<RSAKeyValue><Modulus>...</Modulus><Exponent>AQAB</Exponent></RSAKeyValue>
"""
rsaKey = xml.etree.ElementTree.fromstring(rsaPubKeyString)
return RSAPublicKey(rsaKey.find('Modulus').text, rsaKey.find('Exponent').text)
|
py | 1a3eaba586f0ef77d1530b67eca1ce0a4c3ee2ac | try:
# Python 2
from proxycrawl import ProxyCrawlMiddleware
from request import ProxyCrawlRequest
from response import ProxyCrawlResponse, ProxyCrawlTextResponse
except ImportError:
# Python 3
from .proxycrawl import ProxyCrawlMiddleware
from .request import ProxyCrawlRequest
|
py | 1a3eac5cacc346b2fdb014bc1c3d6a35f477ac14 |
import theano
import theano.tensor
class ScalarSoftsign(theano.scalar.UnaryScalarOp):
# TODO : need description for class
@staticmethod
def static_impl(x):
return x / (1.0 + abs(x))
def impl(self, x):
return ScalarSoftsign.static_impl(x)
def grad(self, inp, grads):
x, = inp
gz, = grads
if 'float' in x.type.dtype:
d = (1.0 + abs(x))
return [gz / (d * d)]
else:
return NotImplemented
def c_code(self, node, name, inp, out, sub):
x, = inp
z, = out
if node.inputs[0].type in [theano.scalar.float32,
theano.scalar.float64]:
return "%(z)s = %(x)s / (1.0+fabs(%(x)s));" % locals()
raise NotImplementedError('only floating point x is implemented')
scalar_softsign = ScalarSoftsign(theano.scalar.upgrade_to_float,
name='scalar_softsign')
softsign = theano.tensor.Elemwise(scalar_softsign, name='softsign')
|
py | 1a3ead1517d2d6b91b808f7c760a29254ca3ddb3 | """
Django settings for Dj_RAC project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ph1qxtay7$&7*$t7e$a5c&3prory396fb$3#!$h)qrvh-pico9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework', # Adding the REST framework - required for all RESTful projects
'firstApp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Dj_RAC.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Dj_RAC.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'employeedb',
'USER': 'root',
'PASSWORD': 'admin'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
py | 1a3eae286a8b3bf1af8227b746b238c0e635ed27 | import abc
from collections import OrderedDict
from torch import nn as nn
from utils.logging import logger
import utils.eval_util as eval_util
from utils.rng import get_global_pkg_rng_state
import utils.pytorch_util as ptu
import gtimer as gt
from replay_buffer import ReplayBuffer
from path_collector import MdpPathCollector, RemoteMdpPathCollector
from tqdm import trange
import ray
import torch
import numpy as np
import random
class BatchRLAlgorithm(metaclass=abc.ABCMeta):
def __init__(
self,
trainer,
exploration_data_collector: MdpPathCollector,
remote_eval_data_collector: RemoteMdpPathCollector,
replay_buffer: ReplayBuffer,
batch_size,
max_path_length,
num_epochs,
num_eval_steps_per_epoch,
num_expl_steps_per_train_loop,
num_trains_per_train_loop,
num_train_loops_per_epoch=1,
min_num_steps_before_training=0,
optimistic_exp_hp=None,
):
super().__init__()
"""
The class state which should not mutate
"""
self.batch_size = batch_size
self.max_path_length = max_path_length
self.num_epochs = num_epochs
self.num_eval_steps_per_epoch = num_eval_steps_per_epoch
self.num_trains_per_train_loop = num_trains_per_train_loop
self.num_train_loops_per_epoch = num_train_loops_per_epoch
self.num_expl_steps_per_train_loop = num_expl_steps_per_train_loop
self.min_num_steps_before_training = min_num_steps_before_training
self.optimistic_exp_hp = optimistic_exp_hp
"""
The class mutable state
"""
self._start_epoch = 0
"""
This class sets up the main training loop, so it needs reference to other
high level objects in the algorithm
But these high level object maintains their own states
and has their own responsibilities in saving and restoring their state for checkpointing
"""
self.trainer = trainer
self.expl_data_collector = exploration_data_collector
self.remote_eval_data_collector = remote_eval_data_collector
self.replay_buffer = replay_buffer
def train(self, start_epoch=0):
self._start_epoch = start_epoch
self._train()
def _train(self):
# Fill the replay buffer to a minimum before training starts
if self.min_num_steps_before_training > self.replay_buffer.num_steps_can_sample():
init_expl_paths = self.expl_data_collector.collect_new_paths(
self.trainer.policy,
self.max_path_length,
self.min_num_steps_before_training,
discard_incomplete_paths=False,
)
self.replay_buffer.add_paths(init_expl_paths)
self.expl_data_collector.end_epoch(-1)
for epoch in gt.timed_for(
trange(self._start_epoch, self.num_epochs),
save_itrs=True,
):
# To evaluate the policy remotely,
# we're shipping the policy params to the remote evaluator
# This can be made more efficient
# But this is currently extremely cheap due to small network size
pol_state_dict = ptu.state_dict_cpu(self.trainer.policy)
remote_eval_obj_id = self.remote_eval_data_collector.async_collect_new_paths.remote(
self.max_path_length,
self.num_eval_steps_per_epoch,
discard_incomplete_paths=True,
deterministic_pol=True,
pol_state_dict=pol_state_dict)
gt.stamp('remote evaluation submit')
for _ in range(self.num_train_loops_per_epoch):
new_expl_paths = self.expl_data_collector.collect_new_paths(
self.trainer.policy,
self.max_path_length,
self.num_expl_steps_per_train_loop,
discard_incomplete_paths=False,
optimistic_exploration=self.optimistic_exp_hp['should_use'],
optimistic_exploration_kwargs=dict(
policy=self.trainer.policy,
qfs=[self.trainer.qf1, self.trainer.qf2],
hyper_params=self.optimistic_exp_hp
)
)
gt.stamp('exploration sampling', unique=False)
self.replay_buffer.add_paths(new_expl_paths)
gt.stamp('data storing', unique=False)
for _ in range(self.num_trains_per_train_loop):
train_data = self.replay_buffer.random_batch(
self.batch_size)
self.trainer.train(train_data)
gt.stamp('training', unique=False)
# Wait for eval to finish
ray.get([remote_eval_obj_id])
gt.stamp('remote evaluation wait')
self._end_epoch(epoch)
def _end_epoch(self, epoch):
self._log_stats(epoch)
self.expl_data_collector.end_epoch(epoch)
ray.get([self.remote_eval_data_collector.end_epoch.remote(epoch)])
self.replay_buffer.end_epoch(epoch)
self.trainer.end_epoch(epoch)
# We can only save the state of the program
# after we call end epoch on all objects with internal state.
# This is so that restoring from the saved state will
# lead to identical result as if the program was left running.
if epoch > 0:
snapshot = self._get_snapshot(epoch)
logger.save_itr_params(epoch, snapshot)
gt.stamp('saving')
logger.record_dict(_get_epoch_timings())
logger.record_tabular('Epoch', epoch)
write_header = True if epoch == 0 else False
logger.dump_tabular(with_prefix=False, with_timestamp=False,
write_header=write_header)
def _get_snapshot(self, epoch):
snapshot = dict(
trainer=self.trainer.get_snapshot(),
exploration=self.expl_data_collector.get_snapshot(),
evaluation_remote=ray.get(
self.remote_eval_data_collector.get_snapshot.remote()),
evaluation_remote_rng_state=ray.get(
self.remote_eval_data_collector.get_global_pkg_rng_state.remote()
),
replay_buffer=self.replay_buffer.get_snapshot()
)
# What epoch indicates is that at the end of this epoch,
# The state of the program is snapshot
# Not to be consfused with at the beginning of the epoch
snapshot['epoch'] = epoch
# Save the state of various rng
snapshot['global_pkg_rng_state'] = get_global_pkg_rng_state()
return snapshot
def _log_stats(self, epoch):
logger.log("Epoch {} finished".format(epoch), with_timestamp=True)
"""
Replay Buffer
"""
logger.record_dict(
self.replay_buffer.get_diagnostics(),
prefix='replay_buffer/'
)
"""
Trainer
"""
logger.record_dict(self.trainer.get_diagnostics(), prefix='trainer/')
"""
Exploration
"""
logger.record_dict(
self.expl_data_collector.get_diagnostics(),
prefix='exploration/'
)
expl_paths = self.expl_data_collector.get_epoch_paths()
logger.record_dict(
eval_util.get_generic_path_information(expl_paths),
prefix="exploration/",
)
"""
Remote Evaluation
"""
logger.record_dict(
ray.get(self.remote_eval_data_collector.get_diagnostics.remote()),
prefix='remote_evaluation/',
)
remote_eval_paths = ray.get(
self.remote_eval_data_collector.get_epoch_paths.remote())
logger.record_dict(
eval_util.get_generic_path_information(remote_eval_paths),
prefix="remote_evaluation/",
)
"""
Misc
"""
gt.stamp('logging')
def to(self, device):
for net in self.trainer.networks:
net.to(device)
def _get_epoch_timings():
times_itrs = gt.get_times().stamps.itrs
times = OrderedDict()
epoch_time = 0
for key in sorted(times_itrs):
time = times_itrs[key][-1]
epoch_time += time
times['time/{} (s)'.format(key)] = time
times['time/epoch (s)'] = epoch_time
times['time/total (s)'] = gt.get_times().total
return times
|
py | 1a3eae90338e0f93671d186552fed055c463101c | from cvxopt import matrix
from cvxopt.lapack import syev
import numpy as np
class LatentPCA:
""" Structured Extension for Principle Component Analysis.
Written by Nico Goernitz, TU Berlin, 2014
"""
def __init__(self, sobj):
self.sobj = sobj # structured object
self.sol = None # (vector) solution vector (after training, of course)
self.latent = None
def fit(self, max_iter=50):
""" Solve the optimization problem with a
sequential convex programming/DC-programming
approach:
Iteratively, find the most likely configuration of
the latent variables and then, optimize for the
model parameter using fixed latent states.
"""
samples = self.sobj.get_num_samples()
dims = self.sobj.get_num_dims()
self.latent = np.random.randint(0, self.sobj.get_num_states(), samples)
self.sol = np.random.randn(dims)
psi = np.zeros((dims, samples))
old_psi = np.zeros((dims, samples))
threshold = 0.
iter = 0
# terminate if objective function value doesn't change much
while iter < max_iter and (iter < 2 or np.sum(np.abs(psi-old_psi)) >= 0.001):
print('Starting iteration {0}.'.format(iter))
print(np.sum(np.abs(psi-old_psi)))
iter += 1
old_psi = psi.copy()
# 1. linearize
# for the current solution compute the
# most likely latent variable configuration
mean = np.zeros(dims)
for i in range(samples):
_, self.latent[i], psi[:, i] = self.sobj.argmax(self.sol, i)
mean += psi[:, i]
mean /= np.float(samples)
mpsi = psi - mean.reshape((dims, 1))
# 2. solve the intermediate convex optimization problem
A = mpsi.dot(mpsi.T)
W = np.zeros((dims, dims))
syev(matrix(A), matrix(W), jobz='V')
self.sol = np.array(A[:, dims-1]).ravel()
return self.sol, self.latent, threshold
def apply(self, pred_sobj):
""" Application of the StructuredPCA:
score = max_z <sol*,\Psi(x,z)>
latent_state = argmax_z <sol*,\Psi(x,z)>
"""
samples = pred_sobj.get_num_samples()
vals = np.zeros(samples)
structs = []
for i in range(samples):
vals[i], struct, _ = pred_sobj.argmax(self.sol, i)
structs.append(struct)
return vals, structs
|
py | 1a3eaeb0fec58e7bff5d49f796181be510f60976 | # -*- coding: utf-8 -*-
from brawlpython.sessions import SyncSession
from brawlpython.api_toolkit import unique, same
from configobj import ConfigObj
import pytest
import time
url_uuid = "http://httpbin.org/uuid"
config = ConfigObj("config.ini")
api_key = config["DEFAULT"].get("API_KEY")
@pytest.yield_fixture
def factory():
client = None
def maker(*args, **kwargs):
nonlocal client
client = SyncSession(*args, **kwargs)
return client
yield maker
if client is not None:
client.close()
@pytest.yield_fixture
def client(factory):
return factory(api_key, cache_ttl=1)
def test_sync_init():
client = SyncSession(api_key)
assert isinstance(client, SyncSession)
def test_closing(client):
assert not client.closed
for _ in 1, 2:
client.close()
assert client.closed
def test_cache(client):
responses = [client.get(url_uuid) for _ in range(2)]
assert same(responses)
time.sleep(2)
assert client.get(url_uuid) != responses[0]
def test_no_cache(factory):
client = factory(api_key, use_cache=False)
assert unique([client.get(url_uuid) for _ in range(2)])
assert unique(client.gets([url_uuid] * 2))
if __name__ == "__main__":
import run_tests
run_tests.run(__file__)
|
py | 1a3eb07cabd75a0a752cb3ab9b78c29cfffd8367 | #!c:\users\rowsu\onedrive\desktop\state7hotel\env\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
py | 1a3eb1139759f1fc6f26eb56c5bbc73882be0ce8 | # -*- coding: utf-8 -*-
"""
String formatting functionality for some primitive types. We do this since
it depends on several object implementations at once (e.g. Buffer and String),
which themselves need say, integers.
"""
from __future__ import print_function, division, absolute_import
import math
import flypy.types
from flypy import jit
#===------------------------------------------------------------------===
# Formatters
#===------------------------------------------------------------------===
@jit
def sprintf(buf, fmt, x):
"""
Print `x` to `buf` according to `format`.
Returns the number of characters written.
"""
fmt = flypy.runtime.obj.core.as_cstring(fmt)
n = len(buf)
result = flypy.libc.snprintf(buf.pointer(), n, fmt, x)
#if result >= n:
# raise ValueError("Unable to print to buffer:")
return result + 1 # n bytes + '\0
@jit
def format_static(fmt, x, n):
"""
Format 'x' according to 'fmt' using a static buffer size 'n'.
- upcast to a double
- use snprintf
- resize buffer according to # of bytes written
"""
buf = flypy.runtime.obj.core.newbuffer(flypy.types.char, n)
n = sprintf(buf, fmt, x)
buf.resize(n)
return flypy.types.String(buf)
|
py | 1a3eb1249acf9b4a2b1ebaf2986ff36d812f4bd0 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .state_management_bot import StateManagementBot
__all__ = ['StateManagementBot'] |
py | 1a3eb13504ccbf1c83b1c3c15bad61c888dd60d2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenServicemarketOrderNotifyRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.servicemarket.order.notify'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
py | 1a3eb139634ea92563d3b03f15b847d55ab5aa3b | from gehm.utils.distances import *
from tests.test_data import create_test_data
import pytest
import numpy as np
import torch
@pytest.mark.distances
def test_embedding_first_order_proximity():
positions=torch.as_tensor(np.random.rand(4,2))
pos1=positions[1,:]
pos2=positions[3,:]
distance=np.round(1-np.sqrt(np.square(abs(pos1-pos2)).sum()),4)
sim_matrix=np.round(embedding_first_order_proximity(positions,False),4)
assert (distance==sim_matrix[1,3]), "Distances do not fit, {} != {}".format(distance,sim_matrix[1,3])
|
py | 1a3eb2b6e20be72a2dee255abaae3b387320e70e | # -*- coding: utf-8 -*-
from django.conf import settings
from django.conf.urls.defaults import patterns, include, url
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
from ....cart.app import cart_app
from ....cart.models import Cart, CART_SESSION_KEY
from ....delivery.tests import TestDeliveryProvider
from ....order import handler as order_handler
from ....order.models import Order
from ....payment import ConfirmationFormNeeded
from ....payment.tests import TestPaymentProvider
from ....product.tests import DeadParrot
from ..common.views import prepare_order, confirmation
from . import views
urlpatterns = patterns('',
url(r'^cart/', include(cart_app.urls)),
url(r'^checkout/', include('satchless.contrib.checkout.singlestep.urls')),
)
class TestPaymentProviderWithConfirmation(TestPaymentProvider):
def confirm(self, order):
raise ConfirmationFormNeeded(action='http://test.payment.gateway.example.com')
class CheckoutTest(TestCase):
urls = 'satchless.contrib.checkout.singlestep.tests'
def _setup_settings(self, custom_settings):
original_settings = {}
for setting_name, value in custom_settings.items():
if hasattr(settings, setting_name):
original_settings[setting_name] = getattr(settings, setting_name)
setattr(settings, setting_name, value)
return original_settings
def _teardown_settings(self, original_settings, custom_settings=None):
custom_settings = custom_settings or {}
for setting_name, value in custom_settings.items():
if setting_name in original_settings:
setattr(settings, setting_name, value)
else:
delattr(settings, setting_name)
def setUp(self):
self.parrot = DeadParrot.objects.create(slug='parrot', species="Hyacinth Macaw")
self.dead_parrot = self.parrot.variants.create(color='blue', looks_alive=False)
self.custom_settings = {
'SATCHLESS_DELIVERY_PROVIDERS': [TestDeliveryProvider],
'SATCHLESS_PAYMENT_PROVIDERS': [TestPaymentProviderWithConfirmation],
}
self.original_settings = self._setup_settings(self.custom_settings)
order_handler.init_queues()
self.anon_client = Client()
def tearDown(self):
self._teardown_settings(self.original_settings, self.custom_settings)
order_handler.init_queues()
def _test_status(self, url, method='get', *args, **kwargs):
status_code = kwargs.pop('status_code', 200)
client = kwargs.pop('client_instance', Client())
data = kwargs.pop('data', {})
response = getattr(client, method)(url, data=data)
self.assertEqual(response.status_code, status_code,
'Incorrect status code for: %s, (%s, %s)! Expected: %s, received: %s. HTML:\n\n%s' % (
url.decode('utf-8'), args, kwargs, status_code, response.status_code,
response.content.decode('utf-8')))
return response
def _get_or_create_cart_for_client(self, client, typ='satchless_cart'):
self._test_status(reverse('satchless-cart-view'), client_instance=client)
return Cart.objects.get(pk=client.session[CART_SESSION_KEY % typ], typ=typ)
def _get_or_create_order_for_client(self, client):
self._test_status(reverse(prepare_order), method='post',
client_instance=client, status_code=302)
order_pk = client.session.get('satchless_order', None)
return Order.objects.get(pk=order_pk)
def _get_order_items(self, order):
order_items = set()
for group in order.groups.all():
order_items.update(group.items.values_list('product_variant', 'quantity'))
return order_items
def test_checkout_view_passes_with_correct_data(self):
cart = self._get_or_create_cart_for_client(self.anon_client)
cart.replace_item(self.dead_parrot, 1)
order = self._get_or_create_order_for_client(self.anon_client)
response = self._test_status(reverse(views.checkout,
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
data={'email': '[email protected]'})
dg = response.context['delivery_group_forms']
data = {'billing_first_name': 'First',
'billing_last_name': 'Last',
'billing_street_address_1': 'Via Rodeo 1',
'billing_city': 'Beverly Hills',
'billing_country': 'US',
'billing_country_area': 'AZ',
'billing_phone': '555-555-5555',
'billing_postal_code': '90210'}
for g, typ, form in dg:
data[form.add_prefix('email')] = '[email protected]'
response = self._test_status(reverse(views.checkout,
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
status_code=302, method='post', data=data,
follow=True)
order = Order.objects.get(pk=order.pk)
self.assertRedirects(response, reverse(confirmation,
kwargs={'order_token':
order.token}))
self.assertEqual(order.status, 'payment-pending')
def test_confirmation_view_redirects_when_order_or_payment_is_missing(self):
cart = self._get_or_create_cart_for_client(self.anon_client)
cart.replace_item(self.dead_parrot, 1)
order = self._get_or_create_order_for_client(self.anon_client)
# without payment
self._test_status(reverse(confirmation, kwargs={'order_token':
order.token}),
client_instance=self.anon_client, status_code=302)
# finish checkout view
response = self._test_status(reverse(views.checkout,
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
data={'email': '[email protected]'})
dg = response.context['delivery_group_forms']
data = {'billing_first_name': 'First',
'billing_last_name': 'Last',
'billing_street_address_1': 'Via Rodeo 1',
'billing_city': 'Beverly Hills',
'billing_country': 'US',
'billing_country_area': 'AZ',
'billing_phone': '555-555-5555',
'billing_postal_code': '90210'}
for g, typ, form in dg:
data[form.add_prefix('email')] = '[email protected]'
response = self._test_status(reverse(views.checkout,
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
status_code=302, method='post', data=data,
follow=True)
self._test_status(reverse(confirmation, kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
status_code=200)
|
py | 1a3eb402d91cf44e9a94a271702cc9b6f38c939b | from rest_framework import serializers
from data_ocean.models import Status, Authority, TaxpayerType, Register
class StatusSerializer(serializers.ModelSerializer):
class Meta:
model = Status
fields = ['name']
class AuthoritySerializer(serializers.ModelSerializer):
class Meta:
model = Authority
fields = ['id', 'name', 'code']
class TaxpayerTypeSerializer(serializers.ModelSerializer):
name = serializers.CharField(max_length=200)
class Meta:
model = TaxpayerType
fields = ['id', 'name']
class RegisterSerializer(serializers.ModelSerializer):
status = serializers.CharField(source='get_status_display')
class Meta:
model = Register
fields = [
'id',
'name',
'name_eng',
'source_name',
'source_url_address',
'source_api_address',
'source_register_id',
'api_list',
'api_detail',
'status',
'total_records',
'updated_at'
]
|
py | 1a3eb48066773f3458f8cd32a0d389c086fd3213 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Wrapping existing analysis utils."""
# pylint: disable=invalid-name
from . import _ffi_api
def expr_deep_equal(lhs, rhs):
"""Deeply compare two nested expressions.
Parameters
----------
lhs : PrimExpr
The left operand.
rhs : PrimExpr
The right operand.
Returns
-------
result : bool
The comparison result
Note
----
This function does not remap variable bindings, it will not
return true for (let x = 1 in x + 1) vs (let y = 1 in y + 1), unless x.same_as(y).
Use py:func:`tvm.ir.structural_equal` to handle structural variable remapping.
Due to the restriction of not remapping variables, this function can run
faster than StructuralEqual and can be used as a utility function during arithmetic
simplifications.
Always consider py:func:`tvm.ir.structural_equal` first, which handles
the structural remapping.
See Also
--------
tvm.ir.structural_equal
"""
return _ffi_api.expr_deep_equal(lhs, rhs)
def verify_ssa(func):
"""Verify if the func is in SSA form.
Parameters
----------
func: tvm.tir.PrimFunc
The module to be verified.
Returns
-------
result : bool
The result of verification.
"""
return _ffi_api.verify_ssa(func)
def verify_memory(func):
"""Verify if func contains illegal host side direct memory access.
Parameters
----------
func: tvm.tir.PrimFunc
The module to be verified.
Returns
-------
result : bool
The result of verification.
"""
return _ffi_api.verify_memory(func)
def verify_gpu_code(func, constraints):
"""Verify if module contains illegal host side direct memory access.
Parameters
----------
func: tvm.tir.PrimFunc
The module to be verified.
constraints : Dict[str, int]
The attribute constraints.
Returns
-------
result : bool
The result of verification.
"""
return _ffi_api.verify_gpu_code(func, constraints)
|
py | 1a3eb48c62a55493725990953e9060ae9f1940b2 | # -*- coding: utf-8 -*-
# From https://github.com/wiseodd/hipsternet/blob/master/hipsternet/im2col.py
import numpy as np
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1):
# First figure out what the size of the output should be
N, C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = int((H + 2 * padding - field_height) / stride + 1)
out_width = int((W + 2 * padding - field_width) / stride + 1)
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return k.astype(int), i.astype(int), j.astype(int)
def im2col_indices(x, field_height, field_width, padding=1, stride=1):
""" An implementation of im2col based on some fancy indexing """
# Zero-pad the input
p = padding
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding, stride)
cols = x_padded[:, k, i, j]
C = x.shape[1]
cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)
return cols
def col2im_indices(cols, x_shape, field_height=3, field_width=3, padding=1,
stride=1):
""" An implementation of col2im based on fancy indexing and np.add.at """
N, C, H, W = x_shape
H_padded, W_padded = H + 2 * padding, W + 2 * padding
x_padded = np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype)
k, i, j = get_im2col_indices(x_shape, field_height, field_width, padding, stride)
cols_reshaped = cols.reshape(C * field_height * field_width, -1, N)
cols_reshaped = cols_reshaped.transpose(2, 0, 1)
np.add.at(x_padded, (slice(None), k, i, j), cols_reshaped)
if padding == 0:
return x_padded
return x_padded[:, :, padding:-padding, padding:-padding]
|
py | 1a3eb4ad3be6db41a96392242313b1ab7389a901 | __author__ = 'guorongxu'
import sys
import logging
import correlator
def calculate(output_file, input_file_0, input_file_1, threshold):
if threshold > 0:
correlator.correlate(output_file, input_file_0, input_file_1, spearman="spearman", rho_threshold = threshold)
else:
correlator.correlate(output_file, input_file_0, input_file_1, spearman="spearman")
if __name__ == "__main__":
output_file = sys.argv[1]
input_file_0 = sys.argv[2]
input_file_1 = sys.argv[3]
threshold = float(sys.argv[4])
logging.basicConfig(filename='search_engine.log',level=logging.DEBUG)
logging.info("Input file 0: " + input_file_0)
logging.info("Input file 1: " + input_file_1)
logging.info("Output file: " + output_file)
calculate(output_file, input_file_0, input_file_1, threshold)
|
py | 1a3eb5625ce388e53e06ecc9289bffe0a28a12a9 | #!/usr/bin/env python3
# coding:utf-8
import email
message = open("email.txt", "rb").read().decode() # 将本题注释的所有内容保存为 email.txt
mail = email.message_from_string(message)
audio = mail.get_payload(0).get_payload(decode=True)
f = open("indian.wav", "wb") # 音频内容:sorry
f.write(audio)
f.close()
|
py | 1a3eb5980623f744d31bb4790466319cbb5e0bf6 | def nlps_knitro(*args,nout=5,oc=None):
if oc == None:
from ....oc_api import oc_matpower
oc = oc_matpower()
return oc.nlps_knitro(*args,nout=nout)
|
py | 1a3eb5e38a1da2a6d6e15c5c1ca41c955191ee6b | from zzcore import StdAns
import requests
import sxtwl
from datetime import datetime
from config import HFWEATHERKEY
class Ans(StdAns):
def GETMSG(self):
msg = f'早上好,今天是{calendar()}\n\n'
msg += getWeather() + '\n\n'
# t = requests.get('https://v1.hitokoto.cn/?c=k&encode=text').text
t =("只要不失去你的崇高,整个世界都会向你敞开")
msg += t
return msg
def getWeather(id='101120206'):
def wemoji(text):
if '雪' in text:
return text + '🌨'
if '雨' in text:
return text + '🌧️'
if '阴' in text:
return text + '⛅'
if '云' in text:
return text + '🌤'
if '晴' in text:
return text + '☀️'
return text
url = 'https://devapi.heweather.net/v7/weather/3d'
params = {
'location': id,
'key': HFWEATHERKEY,
}
r = requests.get(url=url, params=params).json()
tdw = r['daily'][0]
# ndw = r['daily'][1]
# weather = f"今日日间{wemoji(tdw['textDay'])},温度{tdw['tempMin']}~{tdw['tempMax']}℃,{tdw['windDirDay']}{tdw['windScaleDay']}级;夜间{wemoji(tdw['textNight'])},{tdw['windDirNight']}{tdw['windScaleNight']}级。明日日间{wemoji(ndw['textDay'])},温度{ndw['tempMin']}~{ndw['tempMax']}℃。"
weather = f"今日日间{wemoji(tdw['textDay'])},温度{tdw['tempMin']}~{tdw['tempMax']}℃,{tdw['windDirDay']}{tdw['windScaleDay']}级;夜间{wemoji(tdw['textNight'])},{tdw['windDirNight']}{tdw['windScaleNight']}级。"
if float(tdw['precip']) > 0:
weather += '\n记得收好衣服,出门带伞~'
return weather
def calendar():
# 可选 教学、寒假、暑假 等
NowStatus = "暑假"
# 开始周次是今年的第几周
StartWeek = 28
# 今年考研开始日期
KaoYanDate = datetime(2021, 12, 21)
ymc = ["冬", "腊", "正", "二", "三", "四", "五", "六", "七", "八", "九", "十"]
rmc = ["初一", "初二", "初三", "初四", "初五", "初六", "初七", "初八", "初九", "初十", "十一", "十二", "十三", "十四", "十五",
"十六", "十七", "十八", "十九", "二十", "廿一", "廿二", "廿三", "廿四", "廿五", "廿六", "廿七", "廿八", "廿九", "三十", "卅一"]
zmc = ["一", "二", "三", "四", "五", "六", "天"]
nowdate = datetime.now()
djs = (KaoYanDate - nowdate).days -1
y = nowdate.year
m = nowdate.month
d = nowdate.day
zc = int(nowdate.strftime("%W")) - StartWeek
z = zmc[nowdate.weekday()]
lunar = sxtwl.Lunar()
lunarday = lunar.getDayBySolar(y, m, d)
lunardaychinese = f"{ymc[lunarday.Lmc]}月{rmc[lunarday.Ldi]}"
if lunarday.Lleap:
lunardaychinese = "闰" + lunardaychinese
cal = f"{m}月{d}日,{lunardaychinese},{NowStatus}第{zc}周,星期{z}\n\n距离 2022 考研还有 {djs} 天"
return cal
|
py | 1a3eb711bf4ef245a4451124f28a57c2119429e4 | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Parsimony LLC and contributors
# For license information, please see license.txt
from frappe.model.document import Document
class ShipstationWarehouse(Document):
pass
|
py | 1a3eb79f9d52254df9b13731272637e6becb8f74 | from mldesigner import command_component
from azure.ai.ml.entities._job.resource_configuration import ResourceConfiguration
resources = ResourceConfiguration()
resources.instance_count = 2
@command_component(resources = resources)
def basic_component(
port1: str,
param1: int,
):
""" module run logic goes here """
return port1
|
py | 1a3eb7dcdb390fafd3e3e19785a540027fd6096a | # -*- encoding: utf-8 -*-
'''
Created on: 2016
Author: Mizael Martinez
'''
from base_datos import *
import threading
from time import sleep
import time
from Controlador import *
def main():
t = Test()
t.go()
try:
join_threads(t.threads)
except KeyboardInterrupt:
print "\nKeyboardInterrupt catched."
class Test(object):
def __init__(self):
self.running = True
self.threads = []
self.contador=0
self.bd=BaseDatos(True)
def procesarPendiente(self,pendiente):
id_pendiente=pendiente[0]
id_imagen=pendiente[1]
c=Controlador_Compresion()
c.prepararCompresion(id_pendiente,id_imagen)
def foo(self):
while(self.running):
pendientes=self.bd.obtenerPendientes()
print pendientes,
print len(pendientes)
if pendientes != 0:
for pendiente in pendientes:
hora=str(time.time())
hilo = threading.Thread(target=self.procesarPendiente,name=hora,args=(pendiente,))
hilo.daemon=True
hilo.start()
self.threads.append(hilo)
sleep(1)
def get_user_input(self):
while True:
x = raw_input("Enter 'e' for exit: ")
if x.lower() == 'e':
self.running = False
break
def go(self):
hora=str(time.time())
hilo = threading.Thread(target=self.foo,name=hora)
hilo.daemon = True
hilo.start()
self.threads.append(hilo)
def join_threads(threads):
for t in threads:
while t.isAlive():
t.join(5)
if __name__ == "__main__":
main() |
py | 1a3eb86dd9a642ad6259a245b4767964298f06d9 | from typing import Any, List, Union, Optional, Dict
import gym
import numpy as np
import pettingzoo
from functools import reduce
from ding.envs import BaseEnv, BaseEnvTimestep, FrameStackWrapper
from ding.torch_utils import to_ndarray, to_list
from ding.envs.common.common_function import affine_transform
from ding.utils import ENV_REGISTRY, import_module
@ENV_REGISTRY.register('petting_zoo')
class PettingZooEnv(BaseEnv):
# Now only supports simple_spread_v2.
# All agents' observations should have the same shape.
def __init__(self, cfg: dict) -> None:
self._cfg = cfg
self._init_flag = False
self._replay_path = None
self._env_family = self._cfg.env_family
self._env_id = self._cfg.env_id
# self._num_agents = self._cfg.n_agent
self._num_landmarks = self._cfg.n_landmark
self._continuous_actions = self._cfg.get('continuous_actions', False)
self._max_cycles = self._cfg.get('max_cycles', 25)
self._act_scale = self._cfg.get('act_scale', False)
self._agent_specific_global_state = self._cfg.get('agent_specific_global_state', False)
if self._act_scale:
assert self._continuous_actions, 'Only continuous action space env needs act_scale'
def reset(self) -> np.ndarray:
if not self._init_flag:
import_module(['pettingzoo.{}.{}'.format(self._env_family, self._env_id)])
self._env = pettingzoo.__dict__[self._env_family].__dict__[self._env_id].parallel_env(
N=self._cfg.n_agent, continuous_actions=self._continuous_actions, max_cycles=self._max_cycles
)
if hasattr(self, '_seed') and hasattr(self, '_dynamic_seed') and self._dynamic_seed:
np_seed = 100 * np.random.randint(1, 1000)
self._env.seed(self._seed + np_seed)
elif hasattr(self, '_seed'):
self._env.seed(self._seed)
if self._replay_path is not None:
self._env = gym.wrappers.Monitor(
self._env, self._replay_path, video_callable=lambda episode_id: True, force=True
)
obs = self._env.reset()
if not self._init_flag:
# Because agents cannot be accessed before reset
self._agents = self._env.agents
self._num_agents = len(self._env.agents)
self._action_space = gym.spaces.Dict({agent: self._env.action_space(agent) for agent in self._agents})
single_agent_obs_space = self._env.action_space(self._agents[0])
if isinstance(single_agent_obs_space, gym.spaces.Box):
self._action_dim = single_agent_obs_space.shape
elif isinstance(single_agent_obs_space, gym.spaces.Discrete):
self._action_dim = (single_agent_obs_space.n, )
else:
raise Exception('Only support `Box` or `Discrte` obs space for single agent.')
# only for env 'simple_spread_v2', n_agent = 5
# now only for the case that each agent in the team have the same obs structure and corresponding shape.
if not self._cfg.agent_obs_only:
self._observation_space = gym.spaces.Dict({
'agent_state':
gym.spaces.Box(
low=float("-inf"),
high=float("inf"),
shape=(self._num_agents, self._env.observation_space('agent_0').shape[0]), # (self._num_agents, 30)
dtype=np.float32
) ,
'global_state':
gym.spaces.Box(
low=float("-inf"),
high=float("inf"),
shape=(70,),
dtype=np.float32
),
'agent_alone_state':
gym.spaces.Box(
low=float("-inf"),
high=float("inf"),
shape=(self._num_agents, 22),
dtype=np.float32
),
'agent_alone_padding_state':
gym.spaces.Box(
low=float("-inf"),
high=float("inf"),
shape=(self._num_agents, self._env.observation_space('agent_0').shape[0]), # (self._num_agents, 30)
dtype=np.float32
),
'action_mask':
gym.spaces.Box(
low=float("-inf"),
high=float("inf"),
shape=(self._num_agents, self._action_dim[0]), # (self._num_agents, 5)
dtype=np.float32
)})
# whether use agent_specific_global_state. It is usually used in AC multiagent algos, e.g., mappo, masac, etc.
if self._agent_specific_global_state:
agent_specifig_global_state = gym.spaces.Box(
low = float("-inf"),
high = float("inf"),
shape = (self._num_agents, self._env.observation_space('agent_0').shape[0] + 70),
dtype = np.float32
)
self._observation_space['global_state'] = agent_specifig_global_state
else:
# for case when env.agent_obs_only=True
self._observation_space = gym.spaces.Box(
low=float("-inf"),
high=float("inf"),
shape=(self._num_agents, self._env.observation_space('agent_0').shape[0]), # (self._num_agents, 30)
dtype=np.float32
)
self._reward_space = gym.spaces.Dict(
{
agent: gym.spaces.Box(low=float("-inf"), high=float("inf"), shape=(1, ), dtype=np.float32)
for agent in self._agents
}
)
self._init_flag = True
# self._final_eval_reward = {agent: 0. for agent in self._agents}
self._final_eval_reward = 0.
self._step_count = 0
obs_n = self._process_obs(obs)
return obs_n
def close(self) -> None:
if self._init_flag:
self._env.close()
self._init_flag = False
def render(self) -> None:
self._env.render()
def seed(self, seed: int, dynamic_seed: bool = True) -> None:
self._seed = seed
self._dynamic_seed = dynamic_seed
np.random.seed(self._seed)
def step(self, action: np.ndarray) -> BaseEnvTimestep:
self._step_count += 1
assert isinstance(action, np.ndarray), type(action)
action = self._process_action(action)
if self._act_scale:
for agent in self._agents:
# print(action[agent])
# print(self.action_space[agent])
# print(self.action_space[agent].low, self.action_space[agent].high)
action[agent] = affine_transform(
action[agent], min_val=self.action_space[agent].low, max_val=self.action_space[agent].high
)
obs, rew, done, info = self._env.step(action)
obs_n = self._process_obs(obs)
rew_n = np.array([sum([rew[agent] for agent in self._agents])])
# collide_sum = 0
# for i in range(self._num_agents):
# collide_sum += info['n'][i][1]
# collide_penalty = self._cfg.get('collide_penal', self._num_agent)
# rew_n += collide_sum * (1.0 - collide_penalty)
# rew_n = rew_n / (self._cfg.get('max_cycles', 25) * self._num_agent)
self._final_eval_reward += rew_n
# occupied_landmarks = info['n'][0][3]
# if self._step_count >= self._max_step or occupied_landmarks >= self._n_agent \
# or occupied_landmarks >= self._num_landmarks:
# done_n = True
# else:
# done_n = False
done_n = reduce(lambda x, y: x and y, done.values()) or self._step_count >= self._max_cycles
# for agent in self._agents:
# self._final_eval_reward[agent] += rew[agent]
if done_n: # or reduce(lambda x, y: x and y, done.values())
info['final_eval_reward'] = self._final_eval_reward
# for agent in rew:
# rew[agent] = to_ndarray([rew[agent]])
return BaseEnvTimestep(obs_n, rew_n, done_n, info)
def enable_save_replay(self, replay_path: Optional[str] = None) -> None:
if replay_path is None:
replay_path = './video'
self._replay_path = replay_path
def _process_obs(self, obs: 'torch.Tensor') -> np.ndarray: # noqa
obs = np.array([obs[agent] for agent in self._agents]).astype(np.float32)
if self._cfg.get('agent_obs_only', False):
return obs
ret = {}
# Raw agent observation structure is --
# [self_vel, self_pos, landmark_rel_positions, other_agent_rel_positions, communication]
# where `communication` are signals from other agents (two for each agent in `simple_spread_v2`` env)
# agent_state: Shape (n_agent, 2 + 2 + n_landmark * 2 + (n_agent - 1) * 2 + (n_agent - 1) * 2).
# Stacked observation. Contains
# - agent itself's state(velocity + position)
# - position of items that the agent can observe(e.g. other agents, landmarks)
# - communication
ret['agent_state'] = obs
# global_state: Shape (n_agent * (2 + 2) + n_landmark * 2 + n_agent * (n_agent - 1) * 2, ).
# 1-dim vector. Contains
# - all agents' state(velocity + position) +
# - all landmarks' position +
# - all agents' communication
ret['global_state'] = np.concatenate(
[
obs[0, 2:-(self._num_agents - 1) * 2], # all agents' position + all landmarks' position
obs[:, 0:2].flatten(), # all agents' velocity
obs[:, -(self._num_agents - 1) * 2:].flatten() # all agents' communication
]
)
# agent_specific_global_state: Shape (n_agent, 2 + 2 + n_landmark * 2 + (n_agent - 1) * 2 + (n_agent - 1) * 2 + n_agent * (2 + 2) + n_landmark * 2 + n_agent * (n_agent - 1) * 2).
# 2-dim vector. contains
# - agent_state info
# - global_state info
if self._agent_specific_global_state:
ret['global_state'] = np.concatenate(
[
ret['agent_state'],
np.expand_dims(ret['global_state'], axis=0).repeat(5, axis=0)
],
axis=1
)
# agent_alone_state: Shape (n_agent, 2 + 2 + n_landmark * 2 + (n_agent - 1) * 2).
# Stacked observation. Exclude other agents' positions from agent_state. Contains
# - agent itself's state(velocity + position) +
# - landmarks' positions (do not include other agents' positions)
# - communication
ret['agent_alone_state'] = np.concatenate(
[
obs[:, 0:(4 + self._num_agents * 2)], # agent itself's state + landmarks' position
obs[:, -(self._num_agents - 1) * 2:], # communication
],
1
)
# agent_alone_padding_state: Shape (n_agent, 2 + 2 + n_landmark * 2 + (n_agent - 1) * 2 + (n_agent - 1) * 2).
# Contains the same information as agent_alone_state;
# But 0-padding other agents' positions.
ret['agent_alone_padding_state'] = np.concatenate(
[
obs[:, 0:(4 + self._num_agents * 2)], # agent itself's state + landmarks' position
np.zeros((self._num_agents,
(self._num_agents - 1) * 2), np.float32), # Other agents' position(0-padding)
obs[:, -(self._num_agents - 1) * 2:] # communication
],
1
)
# action_mask: All actions are of use(either 1 for discrete or 5 for continuous). Thus all 1.
ret['action_mask'] = np.ones((self._num_agents, *self._action_dim))
return ret
def _process_action(self, action: 'torch.Tensor') -> Dict[str, np.ndarray]: # noqa
dict_action = {}
for i, agent in enumerate(self._agents):
agent_action = action[i]
if agent_action.shape == (1, ):
agent_action = agent_action.squeeze() # 0-dim array
dict_action[agent] = agent_action
return dict_action
def random_action(self) -> np.ndarray:
random_action = self.action_space.sample()
for k in random_action:
if isinstance(random_action[k], np.ndarray):
pass
elif isinstance(random_action[k], int):
random_action[k] = to_ndarray([random_action[k]], dtype=np.int64)
return random_action
def __repr__(self) -> str:
return "DI-engine PettingZoo Env"
@property
def agents(self) -> List[str]:
return self._agents
@property
def observation_space(self) -> gym.spaces.Space:
return self._observation_space
@property
def action_space(self) -> gym.spaces.Space:
return self._action_space
@property
def reward_space(self) -> gym.spaces.Space:
return self._reward_space
|
py | 1a3eb9523447ab62e834de8ea492fc604a9be835 | config = {
"interfaces": {
"google.datastore.v1.Datastore": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 60000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 60000,
"total_timeout_millis": 600000
}
},
"methods": {
"Lookup": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"RunQuery": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"BeginTransaction": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"Commit": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"Rollback": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"AllocateIds": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"ReserveIds": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
}
}
}
}
}
|
py | 1a3eb99228b9a87f679bfd2ea193a6024fc577d9 | import copy
from datetime import datetime
import threading
import uuid
from optuna import distributions # NOQA
from optuna.exceptions import DuplicatedStudyError
from optuna.storages import base
from optuna.storages.base import DEFAULT_STUDY_NAME_PREFIX
from optuna.study import StudyDirection
from optuna.study import StudySummary
from optuna.trial import FrozenTrial
from optuna.trial import TrialState
from optuna import type_checking
if type_checking.TYPE_CHECKING:
from typing import Any # NOQA
from typing import Dict # NOQA
from typing import List # NOQA
from typing import Optional # NOQA
from typing import Tuple # NOQA
class InMemoryStorage(base.BaseStorage):
"""Storage class that stores data in memory of the Python process.
This class is not supposed to be directly accessed by library users.
"""
def __init__(self):
# type: () -> None
self._trial_id_to_study_id_and_number = {} # type: Dict[int, Tuple[int, int]]
self._study_name_to_id = {} # type: Dict[str, int]
self._studies = {} # type: Dict[int, _StudyInfo]
self._max_study_id = -1
self._max_trial_id = -1
self._lock = threading.RLock()
def __getstate__(self):
# type: () -> Dict[Any, Any]
state = self.__dict__.copy()
del state["_lock"]
return state
def __setstate__(self, state):
# type: (Dict[Any, Any]) -> None
self.__dict__.update(state)
self._lock = threading.RLock()
def create_new_study(self, study_name=None):
# type: (Optional[str]) -> int
with self._lock:
study_id = self._max_study_id + 1
self._max_study_id += 1
if study_name is not None:
if study_name in self._study_name_to_id:
raise DuplicatedStudyError
else:
study_uuid = str(uuid.uuid4())
study_name = DEFAULT_STUDY_NAME_PREFIX + study_uuid
self._studies[study_id] = _StudyInfo(study_name)
self._study_name_to_id[study_name] = study_id
return study_id
def delete_study(self, study_id):
# type: (int) -> None
with self._lock:
self._check_study_id(study_id)
for trial in self._studies[study_id].trials:
del self._trial_id_to_study_id_and_number[trial._trial_id]
study_name = self._studies[study_id].name
del self._study_name_to_id[study_name]
del self._studies[study_id]
def set_study_direction(self, study_id, direction):
# type: (int, StudyDirection) -> None
with self._lock:
self._check_study_id(study_id)
study = self._studies[study_id]
if study.direction != StudyDirection.NOT_SET and study.direction != direction:
raise ValueError(
"Cannot overwrite study direction from {} to {}.".format(
study.direction, direction
)
)
study.direction = direction
def set_study_user_attr(self, study_id, key, value):
# type: (int, str, Any) -> None
with self._lock:
self._check_study_id(study_id)
self._studies[study_id].user_attrs[key] = value
def set_study_system_attr(self, study_id, key, value):
# type: (int, str, Any) -> None
with self._lock:
self._check_study_id(study_id)
self._studies[study_id].system_attrs[key] = value
def get_study_id_from_name(self, study_name):
# type: (str) -> int
with self._lock:
if study_name not in self._study_name_to_id:
raise KeyError("No such study {}.".format(study_name))
return self._study_name_to_id[study_name]
def get_study_id_from_trial_id(self, trial_id):
# type: (int) -> int
with self._lock:
self._check_trial_id(trial_id)
return self._trial_id_to_study_id_and_number[trial_id][0]
def get_study_name_from_id(self, study_id):
# type: (int) -> str
with self._lock:
self._check_study_id(study_id)
return self._studies[study_id].name
def get_study_direction(self, study_id):
# type: (int) -> StudyDirection
with self._lock:
self._check_study_id(study_id)
return self._studies[study_id].direction
def get_study_user_attrs(self, study_id):
# type: (int) -> Dict[str, Any]
with self._lock:
self._check_study_id(study_id)
return self._studies[study_id].user_attrs
def get_study_system_attrs(self, study_id):
# type: (int) -> Dict[str, Any]
with self._lock:
self._check_study_id(study_id)
return self._studies[study_id].system_attrs
def get_all_study_summaries(self):
# type: () -> List[StudySummary]
with self._lock:
return [self._build_study_summary(study_id) for study_id in self._studies.keys()]
def _build_study_summary(self, study_id: int) -> StudySummary:
study = self._studies[study_id]
return StudySummary(
study_name=study.name,
direction=study.direction,
best_trial=copy.deepcopy(self._get_trial(study.best_trial_id))
if study.best_trial_id is not None
else None,
user_attrs=copy.deepcopy(study.user_attrs),
system_attrs=copy.deepcopy(study.system_attrs),
n_trials=len(study.trials),
datetime_start=min(
[trial.datetime_start for trial in self.get_all_trials(study_id, deepcopy=False)]
)
if study.trials
else None,
study_id=study_id,
)
def create_new_trial(self, study_id, template_trial=None):
# type: (int, Optional[FrozenTrial]) -> int
with self._lock:
self._check_study_id(study_id)
if template_trial is None:
trial = self._create_running_trial()
else:
trial = copy.deepcopy(template_trial)
trial_id = self._max_trial_id + 1
self._max_trial_id += 1
trial.number = len(self._studies[study_id].trials)
trial._trial_id = trial_id
self._trial_id_to_study_id_and_number[trial_id] = (study_id, trial.number)
self._studies[study_id].trials.append(trial)
self._update_cache(trial_id, study_id)
return trial_id
@staticmethod
def _create_running_trial():
# type: () -> FrozenTrial
return FrozenTrial(
trial_id=-1, # dummy value.
number=-1, # dummy value.
state=TrialState.RUNNING,
params={},
distributions={},
user_attrs={},
system_attrs={},
value=None,
intermediate_values={},
datetime_start=datetime.now(),
datetime_complete=None,
)
def set_trial_state(self, trial_id, state):
# type: (int, TrialState) -> bool
with self._lock:
trial = self._get_trial(trial_id)
self.check_trial_is_updatable(trial_id, trial.state)
trial = copy.copy(trial)
self.check_trial_is_updatable(trial_id, trial.state)
if state == TrialState.RUNNING and trial.state != TrialState.WAITING:
return False
trial.state = state
if state.is_finished():
trial.datetime_complete = datetime.now()
self._set_trial(trial_id, trial)
study_id = self._trial_id_to_study_id_and_number[trial_id][0]
self._update_cache(trial_id, study_id)
else:
self._set_trial(trial_id, trial)
return True
def set_trial_param(self, trial_id, param_name, param_value_internal, distribution):
# type: (int, str, float, distributions.BaseDistribution) -> bool
with self._lock:
trial = self._get_trial(trial_id)
self.check_trial_is_updatable(trial_id, trial.state)
study_id = self._trial_id_to_study_id_and_number[trial_id][0]
# Check param distribution compatibility with previous trial(s).
if param_name in self._studies[study_id].param_distribution:
distributions.check_distribution_compatibility(
self._studies[study_id].param_distribution[param_name], distribution
)
# Check param has not been set; otherwise, return False.
if param_name in trial.params:
return False
# Set param distribution.
self._studies[study_id].param_distribution[param_name] = distribution
# Set param.
trial = copy.copy(trial)
trial.params = copy.copy(trial.params)
trial.params[param_name] = distribution.to_external_repr(param_value_internal)
trial.distributions = copy.copy(trial.distributions)
trial.distributions[param_name] = distribution
self._set_trial(trial_id, trial)
return True
def get_trial_number_from_id(self, trial_id):
# type: (int) -> int
with self._lock:
self._check_trial_id(trial_id)
return self._trial_id_to_study_id_and_number[trial_id][1]
def get_best_trial(self, study_id):
# type: (int) -> FrozenTrial
with self._lock:
self._check_study_id(study_id)
best_trial_id = self._studies[study_id].best_trial_id
if best_trial_id is None:
raise ValueError("No trials are completed yet.")
return self.get_trial(best_trial_id)
def get_trial_param(self, trial_id, param_name):
# type: (int, str) -> float
with self._lock:
trial = self._get_trial(trial_id)
distribution = trial.distributions[param_name]
return distribution.to_internal_repr(trial.params[param_name])
def set_trial_value(self, trial_id, value):
# type: (int, float) -> None
with self._lock:
trial = self._get_trial(trial_id)
self.check_trial_is_updatable(trial_id, trial.state)
trial = copy.copy(trial)
self.check_trial_is_updatable(trial_id, trial.state)
trial.value = value
self._set_trial(trial_id, trial)
def _update_cache(self, trial_id: int, study_id: int) -> None:
trial = self._get_trial(trial_id)
if trial.state != TrialState.COMPLETE:
return
best_trial_id = self._studies[study_id].best_trial_id
if best_trial_id is None:
self._studies[study_id].best_trial_id = trial_id
return
best_trial = self._get_trial(best_trial_id)
assert best_trial is not None
best_value = best_trial.value
new_value = trial.value
if best_value is None:
self._studies[study_id].best_trial_id = trial_id
return
# Complete trials do not have `None` values.
assert new_value is not None
if self.get_study_direction(study_id) == StudyDirection.MAXIMIZE:
if best_value < new_value:
self._studies[study_id].best_trial_id = trial_id
else:
if best_value > new_value:
self._studies[study_id].best_trial_id = trial_id
def set_trial_intermediate_value(self, trial_id, step, intermediate_value):
# type: (int, int, float) -> bool
with self._lock:
trial = self._get_trial(trial_id)
self.check_trial_is_updatable(trial_id, trial.state)
self.check_trial_is_updatable(trial_id, trial.state)
trial = copy.copy(trial)
values = copy.copy(trial.intermediate_values)
if step in values:
return False
values[step] = intermediate_value
trial.intermediate_values = values
self._set_trial(trial_id, trial)
return True
def set_trial_user_attr(self, trial_id, key, value):
# type: (int, str, Any) -> None
with self._lock:
self._check_trial_id(trial_id)
trial = self._get_trial(trial_id)
self.check_trial_is_updatable(trial_id, trial.state)
self.check_trial_is_updatable(trial_id, trial.state)
trial = copy.copy(trial)
trial.user_attrs = copy.copy(trial.user_attrs)
trial.user_attrs[key] = value
self._set_trial(trial_id, trial)
def set_trial_system_attr(self, trial_id, key, value):
# type: (int, str, Any) -> None
with self._lock:
trial = self._get_trial(trial_id)
self.check_trial_is_updatable(trial_id, trial.state)
self.check_trial_is_updatable(trial_id, trial.state)
trial = copy.copy(trial)
trial.system_attrs = copy.copy(trial.system_attrs)
trial.system_attrs[key] = value
self._set_trial(trial_id, trial)
def get_trial(self, trial_id):
# type: (int) -> FrozenTrial
with self._lock:
return self._get_trial(trial_id)
def _get_trial(self, trial_id: int) -> FrozenTrial:
self._check_trial_id(trial_id)
study_id, trial_number = self._trial_id_to_study_id_and_number[trial_id]
return self._studies[study_id].trials[trial_number]
def _set_trial(self, trial_id: int, trial: FrozenTrial) -> None:
study_id, trial_number = self._trial_id_to_study_id_and_number[trial_id]
self._studies[study_id].trials[trial_number] = trial
def get_all_trials(self, study_id, deepcopy=True):
# type: (int, bool) -> List[FrozenTrial]
with self._lock:
self._check_study_id(study_id)
if deepcopy:
return copy.deepcopy(self._studies[study_id].trials)
else:
return self._studies[study_id].trials[:]
def get_n_trials(self, study_id, state=None):
# type: (int, Optional[TrialState]) -> int
with self._lock:
self._check_study_id(study_id)
if state is None:
return len(self._studies[study_id].trials)
return sum(
trial.state == state for trial in self.get_all_trials(study_id, deepcopy=False)
)
def _check_study_id(self, study_id):
# type: (int) -> None
if study_id not in self._studies:
raise KeyError("No study with study_id {} exists.".format(study_id))
def _check_trial_id(self, trial_id: int) -> None:
if trial_id not in self._trial_id_to_study_id_and_number:
raise KeyError("No trial with trial_id {} exists.".format(trial_id))
class _StudyInfo:
def __init__(self, name: str) -> None:
self.trials = [] # type: List[FrozenTrial]
self.param_distribution = {} # type: Dict[str, distributions.BaseDistribution]
self.user_attrs = {} # type: Dict[str, Any]
self.system_attrs = {} # type: Dict[str, Any]
self.name = name # type: str
self.direction = StudyDirection.NOT_SET
self.best_trial_id = None # type: Optional[int]
|
py | 1a3ebab1f42ff2a8cd348d6eed94f8f8f682bc13 | from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User, Permission
from majora2 import models
from tatl import models as tmodels
from django.utils import timezone
class Command(BaseCommand):
help = "Load a list of organisations"
def add_arguments(self, parser):
parser.add_argument('filename')
def handle(self, *args, **options):
su = User.objects.get(is_superuser=True)
fh = open(options["filename"])
for line in fh:
fields = line.strip().split('\t')
code = fields[1]
name = fields[0]
org, created = models.Institute.objects.get_or_create(code=code, name=name)
org.save()
if created:
treq = tmodels.TatlPermFlex(
user = su,
substitute_user = None,
used_permission = "majora2.management.commands.load_orgs",
timestamp = timezone.now(),
content_object = org,
)
treq.save()
|
py | 1a3ebb6edb1ad8878fbbc1c262fb112b400e2a77 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <[email protected]>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Contains the classes for the global used variables:
- Request
- Response
- Session
"""
from gluon.storage import Storage, List
from gluon.streamer import streamer, stream_file_or_304_or_206, DEFAULT_CHUNK_SIZE
from gluon.xmlrpc import handler
from gluon.contenttype import contenttype
from gluon.html import xmlescape, TABLE, TR, PRE, URL
from gluon.http import HTTP, redirect
from gluon.fileutils import up
from gluon.serializers import json, custom_json
import gluon.settings as settings
from gluon.utils import web2py_uuid, secure_dumps, secure_loads
from gluon.settings import global_settings
from gluon import recfile
import hashlib
import portalocker
try:
import cPickle as pickle
except:
import pickle
from pickle import Pickler, MARK, DICT, EMPTY_DICT
from types import DictionaryType
import cStringIO
import datetime
import re
import copy_reg
import Cookie
import os
import sys
import traceback
import threading
import cgi
import urlparse
import copy
import tempfile
from gluon.cache import CacheInRam
from gluon.fileutils import copystream
FMT = '%a, %d-%b-%Y %H:%M:%S PST'
PAST = 'Sat, 1-Jan-1971 00:00:00'
FUTURE = 'Tue, 1-Dec-2999 23:59:59'
try:
from gluon.contrib.minify import minify
have_minify = True
except ImportError:
have_minify = False
try:
import simplejson as sj # external installed library
except:
try:
import json as sj # standard installed library
except:
import gluon.contrib.simplejson as sj # pure python library
regex_session_id = re.compile('^([\w\-]+/)?[\w\-\.]+$')
__all__ = ['Request', 'Response', 'Session']
current = threading.local() # thread-local storage for request-scope globals
css_template = '<link href="%s" rel="stylesheet" type="text/css" />'
js_template = '<script src="%s" type="text/javascript"></script>'
coffee_template = '<script src="%s" type="text/coffee"></script>'
typescript_template = '<script src="%s" type="text/typescript"></script>'
less_template = '<link href="%s" rel="stylesheet/less" type="text/css" />'
css_inline = '<style type="text/css">\n%s\n</style>'
js_inline = '<script type="text/javascript">\n%s\n</script>'
# IMPORTANT:
# this is required so that pickled dict(s) and class.__dict__
# are sorted and web2py can detect without ambiguity when a session changes
class SortingPickler(Pickler):
def save_dict(self, obj):
self.write(EMPTY_DICT if self.bin else MARK+DICT)
self.memoize(obj)
self._batch_setitems([(key, obj[key]) for key in sorted(obj)])
SortingPickler.dispatch = copy.copy(Pickler.dispatch)
SortingPickler.dispatch[DictionaryType] = SortingPickler.save_dict
def sorting_dumps(obj, protocol=None):
file = cStringIO.StringIO()
SortingPickler(file, protocol).dump(obj)
return file.getvalue()
# END #####################################################################
def copystream_progress(request, chunk_size=10 ** 5):
"""
Copies request.env.wsgi_input into request.body
and stores progress upload status in cache_ram
X-Progress-ID:length and X-Progress-ID:uploaded
"""
env = request.env
if not env.get('CONTENT_LENGTH', None):
return cStringIO.StringIO()
source = env['wsgi.input']
try:
size = int(env['CONTENT_LENGTH'])
except ValueError:
raise HTTP(400, "Invalid Content-Length header")
try: # Android requires this
dest = tempfile.NamedTemporaryFile()
except NotImplementedError: # and GAE this
dest = tempfile.TemporaryFile()
if not 'X-Progress-ID' in request.get_vars:
copystream(source, dest, size, chunk_size)
return dest
cache_key = 'X-Progress-ID:' + request.get_vars['X-Progress-ID']
cache_ram = CacheInRam(request) # same as cache.ram because meta_storage
cache_ram(cache_key + ':length', lambda: size, 0)
cache_ram(cache_key + ':uploaded', lambda: 0, 0)
while size > 0:
if size < chunk_size:
data = source.read(size)
cache_ram.increment(cache_key + ':uploaded', size)
else:
data = source.read(chunk_size)
cache_ram.increment(cache_key + ':uploaded', chunk_size)
length = len(data)
if length > size:
(data, length) = (data[:size], size)
size -= length
if length == 0:
break
dest.write(data)
if length < chunk_size:
break
dest.seek(0)
cache_ram(cache_key + ':length', None)
cache_ram(cache_key + ':uploaded', None)
return dest
class Request(Storage):
"""
Defines the request object and the default values of its members
- env: environment variables, by gluon.main.wsgibase()
- cookies
- get_vars
- post_vars
- vars
- folder
- application
- function
- args
- extension
- now: datetime.datetime.now()
- utcnow : datetime.datetime.utcnow()
- is_local
- is_https
- restful()
"""
def __init__(self, env):
Storage.__init__(self)
self.env = Storage(env)
self.env.web2py_path = global_settings.applications_parent
self.env.update(global_settings)
self.cookies = Cookie.SimpleCookie()
self._get_vars = None
self._post_vars = None
self._vars = None
self._body = None
self.folder = None
self.application = None
self.function = None
self.args = List()
self.extension = 'html'
self.now = datetime.datetime.now()
self.utcnow = datetime.datetime.utcnow()
self.is_restful = False
self.is_https = False
self.is_local = False
self.global_settings = settings.global_settings
def parse_get_vars(self):
"""Takes the QUERY_STRING and unpacks it to get_vars
"""
query_string = self.env.get('QUERY_STRING', '')
dget = urlparse.parse_qs(query_string, keep_blank_values=1) # Ref: https://docs.python.org/2/library/cgi.html#cgi.parse_qs
get_vars = self._get_vars = Storage(dget)
for (key, value) in get_vars.iteritems():
if isinstance(value, list) and len(value) == 1:
get_vars[key] = value[0]
def parse_post_vars(self):
"""Takes the body of the request and unpacks it into
post_vars. application/json is also automatically parsed
"""
env = self.env
post_vars = self._post_vars = Storage()
body = self.body
# if content-type is application/json, we must read the body
is_json = env.get('content_type', '')[:16] == 'application/json'
if is_json:
try:
json_vars = sj.load(body)
except:
# incoherent request bodies can still be parsed "ad-hoc"
json_vars = {}
pass
# update vars and get_vars with what was posted as json
if isinstance(json_vars, dict):
post_vars.update(json_vars)
body.seek(0)
# parse POST variables on POST, PUT, BOTH only in post_vars
if (body and not is_json
and env.request_method in ('POST', 'PUT', 'DELETE', 'BOTH')):
query_string = env.pop('QUERY_STRING', None)
dpost = cgi.FieldStorage(fp=body, environ=env, keep_blank_values=1)
try:
post_vars.update(dpost)
except:
pass
if query_string is not None:
env['QUERY_STRING'] = query_string
# The same detection used by FieldStorage to detect multipart POSTs
body.seek(0)
def listify(a):
return (not isinstance(a, list) and [a]) or a
try:
keys = sorted(dpost)
except TypeError:
keys = []
for key in keys:
if key is None:
continue # not sure why cgi.FieldStorage returns None key
dpk = dpost[key]
# if an element is not a file replace it with
# its value else leave it alone
pvalue = listify([(_dpk if _dpk.filename else _dpk.value)
for _dpk in dpk]
if isinstance(dpk, list) else
(dpk if dpk.filename else dpk.value))
if len(pvalue):
post_vars[key] = (len(pvalue) > 1 and pvalue) or pvalue[0]
@property
def body(self):
if self._body is None:
try:
self._body = copystream_progress(self)
except IOError:
raise HTTP(400, "Bad Request - HTTP body is incomplete")
return self._body
def parse_all_vars(self):
"""Merges get_vars and post_vars to vars
"""
self._vars = copy.copy(self.get_vars)
for key, value in self.post_vars.iteritems():
if not key in self._vars:
self._vars[key] = value
else:
if not isinstance(self._vars[key], list):
self._vars[key] = [self._vars[key]]
self._vars[key] += value if isinstance(value, list) else [value]
@property
def get_vars(self):
"""Lazily parses the query string into get_vars
"""
if self._get_vars is None:
self.parse_get_vars()
return self._get_vars
@property
def post_vars(self):
"""Lazily parse the body into post_vars
"""
if self._post_vars is None:
self.parse_post_vars()
return self._post_vars
@property
def vars(self):
"""Lazily parses all get_vars and post_vars to fill vars
"""
if self._vars is None:
self.parse_all_vars()
return self._vars
def compute_uuid(self):
self.uuid = '%s/%s.%s.%s' % (
self.application,
self.client.replace(':', '_'),
self.now.strftime('%Y-%m-%d.%H-%M-%S'),
web2py_uuid())
return self.uuid
def user_agent(self):
from gluon.contrib import user_agent_parser
session = current.session
user_agent = session._user_agent
if user_agent:
return user_agent
user_agent = user_agent_parser.detect(self.env.http_user_agent)
for key, value in user_agent.items():
if isinstance(value, dict):
user_agent[key] = Storage(value)
user_agent = session._user_agent = Storage(user_agent)
return user_agent
def requires_https(self):
"""
If request comes in over HTTP, redirects it to HTTPS
and secures the session.
"""
cmd_opts = global_settings.cmd_options
# checking if this is called within the scheduler or within the shell
# in addition to checking if it's not a cronjob
if ((cmd_opts and (cmd_opts.shell or cmd_opts.scheduler))
or global_settings.cronjob or self.is_https):
current.session.secure()
else:
current.session.forget()
redirect(URL(scheme='https', args=self.args, vars=self.vars))
def restful(self):
def wrapper(action, self=self):
def f(_action=action, _self=self, *a, **b):
self.is_restful = True
method = _self.env.request_method
if len(_self.args) and '.' in _self.args[-1]:
_self.args[-1], _, self.extension = self.args[-1].rpartition('.')
current.response.headers['Content-Type'] = \
contenttype('.' + _self.extension.lower())
rest_action = _action().get(method, None)
if not (rest_action and method == method.upper()
and callable(rest_action)):
raise HTTP(405, "method not allowed")
try:
return rest_action(*_self.args, **getattr(_self, 'vars', {}))
except TypeError, e:
exc_type, exc_value, exc_traceback = sys.exc_info()
if len(traceback.extract_tb(exc_traceback)) == 1:
raise HTTP(400, "invalid arguments")
else:
raise
f.__doc__ = action.__doc__
f.__name__ = action.__name__
return f
return wrapper
class Response(Storage):
"""
Defines the response object and the default values of its members
response.write( ) can be used to write in the output html
"""
def __init__(self):
Storage.__init__(self)
self.status = 200
self.headers = dict()
self.headers['X-Powered-By'] = 'web2py'
self.body = cStringIO.StringIO()
self.session_id = None
self.cookies = Cookie.SimpleCookie()
self.postprocessing = []
self.flash = '' # used by the default view layout
self.meta = Storage() # used by web2py_ajax.html
self.menu = [] # used by the default view layout
self.files = [] # used by web2py_ajax.html
self._vars = None
self._caller = lambda f: f()
self._view_environment = None
self._custom_commit = None
self._custom_rollback = None
self.generic_patterns = ['*']
self.delimiters = ('{{', '}}')
self.formstyle = 'table3cols'
self.form_label_separator = ': '
def write(self, data, escape=True):
if not escape:
self.body.write(str(data))
else:
self.body.write(xmlescape(data))
def render(self, *a, **b):
from compileapp import run_view_in
if len(a) > 2:
raise SyntaxError(
'Response.render can be called with two arguments, at most')
elif len(a) == 2:
(view, self._vars) = (a[0], a[1])
elif len(a) == 1 and isinstance(a[0], str):
(view, self._vars) = (a[0], {})
elif len(a) == 1 and hasattr(a[0], 'read') and callable(a[0].read):
(view, self._vars) = (a[0], {})
elif len(a) == 1 and isinstance(a[0], dict):
(view, self._vars) = (None, a[0])
else:
(view, self._vars) = (None, {})
self._vars.update(b)
self._view_environment.update(self._vars)
if view:
import cStringIO
(obody, oview) = (self.body, self.view)
(self.body, self.view) = (cStringIO.StringIO(), view)
run_view_in(self._view_environment)
page = self.body.getvalue()
self.body.close()
(self.body, self.view) = (obody, oview)
else:
run_view_in(self._view_environment)
page = self.body.getvalue()
return page
def include_meta(self):
s = "\n";
for meta in (self.meta or {}).iteritems():
k, v = meta
if isinstance(v,dict):
s = s+'<meta'+''.join(' %s="%s"' % (xmlescape(key), xmlescape(v[key])) for key in v) +' />\n'
else:
s = s+'<meta name="%s" content="%s" />\n' % (k, xmlescape(v))
self.write(s, escape=False)
def include_files(self, extensions=None):
"""
Caching method for writing out files.
By default, caches in ram for 5 minutes. To change,
response.cache_includes = (cache_method, time_expire).
Example: (cache.disk, 60) # caches to disk for 1 minute.
"""
from gluon import URL
files = []
has_js = has_css = False
for item in self.files:
if extensions and not item.split('.')[-1] in extensions:
continue
if item in files:
continue
if item.endswith('.js'):
has_js = True
if item.endswith('.css'):
has_css = True
files.append(item)
if have_minify and ((self.optimize_css and has_css) or (self.optimize_js and has_js)):
# cache for 5 minutes by default
key = hashlib.md5(repr(files)).hexdigest()
cache = self.cache_includes or (current.cache.ram, 60 * 5)
def call_minify(files=files):
return minify.minify(files,
URL('static', 'temp'),
current.request.folder,
self.optimize_css,
self.optimize_js)
if cache:
cache_model, time_expire = cache
files = cache_model('response.files.minified/' + key,
call_minify,
time_expire)
else:
files = call_minify()
s = ''
for item in files:
if isinstance(item, str):
f = item.lower().split('?')[0]
# if static_version we need also to check for
# static_version_urls. In that case, the _.x.x.x
# bit would have already been added by the URL()
# function
if self.static_version and not self.static_version_urls:
item = item.replace(
'/static/', '/static/_%s/' % self.static_version, 1)
if f.endswith('.css'):
s += css_template % item
elif f.endswith('.js'):
s += js_template % item
elif f.endswith('.coffee'):
s += coffee_template % item
elif f.endswith('.ts'):
# http://www.typescriptlang.org/
s += typescript_template % item
elif f.endswith('.less'):
s += less_template % item
elif isinstance(item, (list, tuple)):
f = item[0]
if f == 'css:inline':
s += css_inline % item[1]
elif f == 'js:inline':
s += js_inline % item[1]
self.write(s, escape=False)
def stream(self,
stream,
chunk_size=DEFAULT_CHUNK_SIZE,
request=None,
attachment=False,
filename=None
):
"""
If in a controller function::
return response.stream(file, 100)
the file content will be streamed at 100 bytes at the time
Args:
stream: filename or read()able content
chunk_size(int): Buffer size
request: the request object
attachment(bool): prepares the correct headers to download the file
as an attachment. Usually creates a pop-up download window
on browsers
filename(str): the name for the attachment
Note:
for using the stream name (filename) with attachments
the option must be explicitly set as function parameter (will
default to the last request argument otherwise)
"""
headers = self.headers
# for attachment settings and backward compatibility
keys = [item.lower() for item in headers]
if attachment:
if filename is None:
attname = ""
else:
attname = filename
headers["Content-Disposition"] = \
'attachment;filename="%s"' % attname
if not request:
request = current.request
if isinstance(stream, (str, unicode)):
stream_file_or_304_or_206(stream,
chunk_size=chunk_size,
request=request,
headers=headers,
status=self.status)
# ## the following is for backward compatibility
if hasattr(stream, 'name'):
filename = stream.name
if filename and not 'content-type' in keys:
headers['Content-Type'] = contenttype(filename)
if filename and not 'content-length' in keys:
try:
headers['Content-Length'] = \
os.path.getsize(filename)
except OSError:
pass
env = request.env
# Internet Explorer < 9.0 will not allow downloads over SSL unless caching is enabled
if request.is_https and isinstance(env.http_user_agent, str) and \
not re.search(r'Opera', env.http_user_agent) and \
re.search(r'MSIE [5-8][^0-9]', env.http_user_agent):
headers['Pragma'] = 'cache'
headers['Cache-Control'] = 'private'
if request and env.web2py_use_wsgi_file_wrapper:
wrapped = env.wsgi_file_wrapper(stream, chunk_size)
else:
wrapped = streamer(stream, chunk_size=chunk_size)
return wrapped
def download(self, request, db, chunk_size=DEFAULT_CHUNK_SIZE, attachment=True, download_filename=None):
"""
Example of usage in controller::
def download():
return response.download(request, db)
Downloads from http://..../download/filename
"""
from pydal.exceptions import NotAuthorizedException, NotFoundException
current.session.forget(current.response)
if not request.args:
raise HTTP(404)
name = request.args[-1]
items = re.compile('(?P<table>.*?)\.(?P<field>.*?)\..*').match(name)
if not items:
raise HTTP(404)
(t, f) = (items.group('table'), items.group('field'))
try:
field = db[t][f]
except AttributeError:
raise HTTP(404)
try:
(filename, stream) = field.retrieve(name, nameonly=True)
except NotAuthorizedException:
raise HTTP(403)
except NotFoundException:
raise HTTP(404)
except IOError:
raise HTTP(404)
headers = self.headers
headers['Content-Type'] = contenttype(name)
if download_filename is None:
download_filename = filename
if attachment:
headers['Content-Disposition'] = \
'attachment; filename="%s"' % download_filename.replace('"', '\"')
return self.stream(stream, chunk_size=chunk_size, request=request)
def json(self, data, default=None):
if 'Content-Type' not in self.headers:
self.headers['Content-Type'] = 'application/json'
return json(data, default=default or custom_json)
def xmlrpc(self, request, methods):
"""
assuming::
def add(a, b):
return a+b
if a controller function \"func\"::
return response.xmlrpc(request, [add])
the controller will be able to handle xmlrpc requests for
the add function. Example::
import xmlrpclib
connection = xmlrpclib.ServerProxy(
'http://hostname/app/contr/func')
print connection.add(3, 4)
"""
return handler(request, self, methods)
def toolbar(self):
from html import DIV, SCRIPT, BEAUTIFY, TAG, URL, A
BUTTON = TAG.button
admin = URL("admin", "default", "design", extension='html',
args=current.request.application)
from gluon.dal import DAL
dbstats = []
dbtables = {}
infos = DAL.get_instances()
for k, v in infos.iteritems():
dbstats.append(TABLE(*[TR(PRE(row[0]), '%.2fms' % (row[1]*1000))
for row in v['dbstats']]))
dbtables[k] = dict(defined=v['dbtables']['defined'] or '[no defined tables]',
lazy=v['dbtables']['lazy'] or '[no lazy tables]')
u = web2py_uuid()
backtotop = A('Back to top', _href="#totop-%s" % u)
# Convert lazy request.vars from property to Storage so they
# will be displayed in the toolbar.
request = copy.copy(current.request)
request.update(vars=current.request.vars,
get_vars=current.request.get_vars,
post_vars=current.request.post_vars)
return DIV(
BUTTON('design', _onclick="document.location='%s'" % admin),
BUTTON('request',
_onclick="jQuery('#request-%s').slideToggle()" % u),
BUTTON('response',
_onclick="jQuery('#response-%s').slideToggle()" % u),
BUTTON('session',
_onclick="jQuery('#session-%s').slideToggle()" % u),
BUTTON('db tables',
_onclick="jQuery('#db-tables-%s').slideToggle()" % u),
BUTTON('db stats',
_onclick="jQuery('#db-stats-%s').slideToggle()" % u),
DIV(BEAUTIFY(request), backtotop,
_class="w2p-toolbar-hidden", _id="request-%s" % u),
DIV(BEAUTIFY(current.session), backtotop,
_class="w2p-toolbar-hidden", _id="session-%s" % u),
DIV(BEAUTIFY(current.response), backtotop,
_class="w2p-toolbar-hidden", _id="response-%s" % u),
DIV(BEAUTIFY(dbtables), backtotop,
_class="w2p-toolbar-hidden", _id="db-tables-%s" % u),
DIV(BEAUTIFY(dbstats), backtotop,
_class="w2p-toolbar-hidden", _id="db-stats-%s" % u),
SCRIPT("jQuery('.w2p-toolbar-hidden').hide()"),
_id="totop-%s" % u
)
class Session(Storage):
"""
Defines the session object and the default values of its members (None)
- session_storage_type : 'file', 'db', or 'cookie'
- session_cookie_compression_level :
- session_cookie_expires : cookie expiration
- session_cookie_key : for encrypted sessions in cookies
- session_id : a number or None if no session
- session_id_name :
- session_locked :
- session_masterapp :
- session_new : a new session obj is being created
- session_hash : hash of the pickled loaded session
- session_pickled : picked session
if session in cookie:
- session_data_name : name of the cookie for session data
if session in db:
- session_db_record_id
- session_db_table
- session_db_unique_key
if session in file:
- session_file
- session_filename
"""
def connect(self,
request=None,
response=None,
db=None,
tablename='web2py_session',
masterapp=None,
migrate=True,
separate=None,
check_client=False,
cookie_key=None,
cookie_expires=None,
compression_level=None
):
"""
Used in models, allows to customize Session handling
Args:
request: the request object
response: the response object
db: to store/retrieve sessions in db (a table is created)
tablename(str): table name
masterapp(str): points to another's app sessions. This enables a
"SSO" environment among apps
migrate: passed to the underlying db
separate: with True, creates a folder with the 2 initials of the
session id. Can also be a function, e.g. ::
separate=lambda(session_name): session_name[-2:]
check_client: if True, sessions can only come from the same ip
cookie_key(str): secret for cookie encryption
cookie_expires: sets the expiration of the cookie
compression_level(int): 0-9, sets zlib compression on the data
before the encryption
"""
from gluon.dal import Field
request = request or current.request
response = response or current.response
masterapp = masterapp or request.application
cookies = request.cookies
self._unlock(response)
response.session_masterapp = masterapp
response.session_id_name = 'session_id_%s' % masterapp.lower()
response.session_data_name = 'session_data_%s' % masterapp.lower()
response.session_cookie_expires = cookie_expires
response.session_client = str(request.client).replace(':', '.')
response.session_cookie_key = cookie_key
response.session_cookie_compression_level = compression_level
# check if there is a session_id in cookies
try:
old_session_id = cookies[response.session_id_name].value
except KeyError:
old_session_id = None
response.session_id = old_session_id
# if we are supposed to use cookie based session data
if cookie_key:
response.session_storage_type = 'cookie'
elif db:
response.session_storage_type = 'db'
else:
response.session_storage_type = 'file'
# why do we do this?
# because connect may be called twice, by web2py and in models.
# the first time there is no db yet so it should do nothing
if (global_settings.db_sessions is True
or masterapp in global_settings.db_sessions):
return
if response.session_storage_type == 'cookie':
# check if there is session data in cookies
if response.session_data_name in cookies:
session_cookie_data = cookies[response.session_data_name].value
else:
session_cookie_data = None
if session_cookie_data:
data = secure_loads(session_cookie_data, cookie_key,
compression_level=compression_level)
if data:
self.update(data)
response.session_id = True
# else if we are supposed to use file based sessions
elif response.session_storage_type == 'file':
response.session_new = False
response.session_file = None
# check if the session_id points to a valid sesion filename
if response.session_id:
if not regex_session_id.match(response.session_id):
response.session_id = None
else:
response.session_filename = \
os.path.join(up(request.folder), masterapp,
'sessions', response.session_id)
try:
response.session_file = \
recfile.open(response.session_filename, 'rb+')
portalocker.lock(response.session_file,
portalocker.LOCK_EX)
response.session_locked = True
self.update(pickle.load(response.session_file))
response.session_file.seek(0)
oc = response.session_filename.split('/')[-1].split('-')[0]
if check_client and response.session_client != oc:
raise Exception("cookie attack")
except:
response.session_id = None
if not response.session_id:
uuid = web2py_uuid()
response.session_id = '%s-%s' % (response.session_client, uuid)
separate = separate and (lambda session_name: session_name[-2:])
if separate:
prefix = separate(response.session_id)
response.session_id = '%s/%s' % (prefix, response.session_id)
response.session_filename = \
os.path.join(up(request.folder), masterapp,
'sessions', response.session_id)
response.session_new = True
# else the session goes in db
elif response.session_storage_type == 'db':
if global_settings.db_sessions is not True:
global_settings.db_sessions.add(masterapp)
# if had a session on file alreday, close it (yes, can happen)
if response.session_file:
self._close(response)
# if on GAE tickets go also in DB
if settings.global_settings.web2py_runtime_gae:
request.tickets_db = db
if masterapp == request.application:
table_migrate = migrate
else:
table_migrate = False
tname = tablename + '_' + masterapp
table = db.get(tname, None)
# Field = db.Field
if table is None:
db.define_table(
tname,
Field('locked', 'boolean', default=False),
Field('client_ip', length=64),
Field('created_datetime', 'datetime',
default=request.now),
Field('modified_datetime', 'datetime'),
Field('unique_key', length=64),
Field('session_data', 'blob'),
migrate=table_migrate,
)
table = db[tname] # to allow for lazy table
response.session_db_table = table
if response.session_id:
# Get session data out of the database
try:
(record_id, unique_key) = response.session_id.split(':')
record_id = long(record_id)
except (TypeError, ValueError):
record_id = None
# Select from database
if record_id:
row = table(record_id, unique_key=unique_key)
# Make sure the session data exists in the database
if row:
# rows[0].update_record(locked=True)
# Unpickle the data
session_data = pickle.loads(row.session_data)
self.update(session_data)
response.session_new = False
else:
record_id = None
if record_id:
response.session_id = '%s:%s' % (record_id, unique_key)
response.session_db_unique_key = unique_key
response.session_db_record_id = record_id
else:
response.session_id = None
response.session_new = True
# if there is no session id yet, we'll need to create a
# new session
else:
response.session_new = True
# set the cookie now if you know the session_id so user can set
# cookie attributes in controllers/models
# cookie will be reset later
# yet cookie may be reset later
# Removed comparison between old and new session ids - should send
# the cookie all the time
if isinstance(response.session_id, str):
response.cookies[response.session_id_name] = response.session_id
response.cookies[response.session_id_name]['path'] = '/'
if cookie_expires:
response.cookies[response.session_id_name]['expires'] = \
cookie_expires.strftime(FMT)
session_pickled = pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
response.session_hash = hashlib.md5(session_pickled).hexdigest()
if self.flash:
(response.flash, self.flash) = (self.flash, None)
def renew(self, clear_session=False):
if clear_session:
self.clear()
request = current.request
response = current.response
session = response.session
masterapp = response.session_masterapp
cookies = request.cookies
if response.session_storage_type == 'cookie':
return
# if the session goes in file
if response.session_storage_type == 'file':
self._close(response)
uuid = web2py_uuid()
response.session_id = '%s-%s' % (response.session_client, uuid)
separate = (lambda s: s[-2:]) if session and response.session_id[2:3] == "/" else None
if separate:
prefix = separate(response.session_id)
response.session_id = '%s/%s' % \
(prefix, response.session_id)
response.session_filename = \
os.path.join(up(request.folder), masterapp,
'sessions', response.session_id)
response.session_new = True
# else the session goes in db
elif response.session_storage_type == 'db':
table = response.session_db_table
# verify that session_id exists
if response.session_file:
self._close(response)
if response.session_new:
return
# Get session data out of the database
if response.session_id is None:
return
(record_id, sep, unique_key) = response.session_id.partition(':')
if record_id.isdigit() and long(record_id) > 0:
new_unique_key = web2py_uuid()
row = table(record_id)
if row and row.unique_key == unique_key:
table._db(table.id == record_id).update(unique_key=new_unique_key)
else:
record_id = None
if record_id:
response.session_id = '%s:%s' % (record_id, new_unique_key)
response.session_db_record_id = record_id
response.session_db_unique_key = new_unique_key
else:
response.session_new = True
def _fixup_before_save(self):
response = current.response
rcookies = response.cookies
if self._forget and response.session_id_name in rcookies:
del rcookies[response.session_id_name]
elif self._secure and response.session_id_name in rcookies:
rcookies[response.session_id_name]['secure'] = True
def clear_session_cookies(self):
request = current.request
response = current.response
session = response.session
masterapp = response.session_masterapp
cookies = request.cookies
rcookies = response.cookies
# if not cookie_key, but session_data_name in cookies
# expire session_data_name from cookies
if response.session_data_name in cookies:
rcookies[response.session_data_name] = 'expired'
rcookies[response.session_data_name]['path'] = '/'
rcookies[response.session_data_name]['expires'] = PAST
if response.session_id_name in rcookies:
del rcookies[response.session_id_name]
def save_session_id_cookie(self):
request = current.request
response = current.response
session = response.session
masterapp = response.session_masterapp
cookies = request.cookies
rcookies = response.cookies
# if not cookie_key, but session_data_name in cookies
# expire session_data_name from cookies
if not response.session_cookie_key:
if response.session_data_name in cookies:
rcookies[response.session_data_name] = 'expired'
rcookies[response.session_data_name]['path'] = '/'
rcookies[response.session_data_name]['expires'] = PAST
if response.session_id:
rcookies[response.session_id_name] = response.session_id
rcookies[response.session_id_name]['path'] = '/'
expires = response.session_cookie_expires
if isinstance(expires, datetime.datetime):
expires = expires.strftime(FMT)
if expires:
rcookies[response.session_id_name]['expires'] = expires
def clear(self):
# see https://github.com/web2py/web2py/issues/735
response = current.response
if response.session_storage_type == 'file':
target = recfile.generate(response.session_filename)
try:
os.unlink(target)
except:
pass
elif response.session_storage_type == 'db':
table = response.session_db_table
if response.session_id:
(record_id, sep, unique_key) = response.session_id.partition(':')
if record_id.isdigit() and long(record_id) > 0:
table._db(table.id == record_id).delete()
Storage.clear(self)
def is_new(self):
if self._start_timestamp:
return False
else:
self._start_timestamp = datetime.datetime.today()
return True
def is_expired(self, seconds=3600):
now = datetime.datetime.today()
if not self._last_timestamp or \
self._last_timestamp + datetime.timedelta(seconds=seconds) > now:
self._last_timestamp = now
return False
else:
return True
def secure(self):
self._secure = True
def forget(self, response=None):
self._close(response)
self._forget = True
def _try_store_in_cookie(self, request, response):
if self._forget or self._unchanged(response):
# self.clear_session_cookies()
self.save_session_id_cookie()
return False
name = response.session_data_name
compression_level = response.session_cookie_compression_level
value = secure_dumps(dict(self),
response.session_cookie_key,
compression_level=compression_level)
rcookies = response.cookies
rcookies.pop(name, None)
rcookies[name] = value
rcookies[name]['path'] = '/'
expires = response.session_cookie_expires
if isinstance(expires, datetime.datetime):
expires = expires.strftime(FMT)
if expires:
rcookies[name]['expires'] = expires
return True
def _unchanged(self, response):
session_pickled = pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
response.session_pickled = session_pickled
session_hash = hashlib.md5(session_pickled).hexdigest()
return response.session_hash == session_hash
def _try_store_in_db(self, request, response):
# don't save if file-based sessions,
# no session id, or session being forgotten
# or no changes to session (Unless the session is new)
if (not response.session_db_table
or self._forget
or (self._unchanged(response) and not response.session_new)):
if (not response.session_db_table
and global_settings.db_sessions is not True
and response.session_masterapp in global_settings.db_sessions):
global_settings.db_sessions.remove(response.session_masterapp)
# self.clear_session_cookies()
self.save_session_id_cookie()
return False
table = response.session_db_table
record_id = response.session_db_record_id
if response.session_new:
unique_key = web2py_uuid()
else:
unique_key = response.session_db_unique_key
session_pickled = response.session_pickled or pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
dd = dict(locked=False,
client_ip=response.session_client,
modified_datetime=request.now,
session_data=session_pickled,
unique_key=unique_key)
if record_id:
if not table._db(table.id == record_id).update(**dd):
record_id = None
if not record_id:
record_id = table.insert(**dd)
response.session_id = '%s:%s' % (record_id, unique_key)
response.session_db_unique_key = unique_key
response.session_db_record_id = record_id
self.save_session_id_cookie()
return True
def _try_store_in_cookie_or_file(self, request, response):
if response.session_storage_type == 'file':
return self._try_store_in_file(request, response)
if response.session_storage_type == 'cookie':
return self._try_store_in_cookie(request, response)
def _try_store_in_file(self, request, response):
try:
if (not response.session_id or self._forget
or self._unchanged(response)):
# self.clear_session_cookies()
self.save_session_id_cookie()
return False
if response.session_new or not response.session_file:
# Tests if the session sub-folder exists, if not, create it
session_folder = os.path.dirname(response.session_filename)
if not os.path.exists(session_folder):
os.mkdir(session_folder)
response.session_file = recfile.open(response.session_filename, 'wb')
portalocker.lock(response.session_file, portalocker.LOCK_EX)
response.session_locked = True
if response.session_file:
session_pickled = response.session_pickled or pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
response.session_file.write(session_pickled)
response.session_file.truncate()
finally:
self._close(response)
self.save_session_id_cookie()
return True
def _unlock(self, response):
if response and response.session_file and response.session_locked:
try:
portalocker.unlock(response.session_file)
response.session_locked = False
except: # this should never happen but happens in Windows
pass
def _close(self, response):
if response and response.session_file:
self._unlock(response)
try:
response.session_file.close()
del response.session_file
except:
pass
def pickle_session(s):
return Session, (dict(s),)
copy_reg.pickle(Session, pickle_session)
|
py | 1a3ebb88b8c2e2e6612d093c3b13c28f1d646c70 | import pandas as pd
import numpy as np
import datetime
import zipfile
import urllib.request
import os
import shutil
def get_confirmados_por_semana(file_path=None, activos=False):
"""Regresa la serie de tiempo con agregados y acumulados por municipio y por semana."""
serie_municipios = pd.read_csv(file_path,
dtype={'ENTIDAD_UM': str,
'ENTIDAD_NAC': str,
'ENTIDAD_RES': str,
'MUNICIPIO_RES': str}, encoding='latin1')
serie_municipios.loc[:, 'municipio_cvegeo'] = serie_municipios['ENTIDAD_RES'] + \
serie_municipios['MUNICIPIO_RES']
confirmados_municipios = serie_municipios.loc[serie_municipios['RESULTADO'] == 1, [
'FECHA_SINTOMAS', 'FECHA_INGRESO', 'RESULTADO', 'municipio_cvegeo']].copy()
confirmados_municipios.loc[:, 'FECHA_INGRESO'] = pd.to_datetime(
confirmados_municipios['FECHA_INGRESO'], format="%Y-%m-%d")
confirmados_municipios.loc[:, 'FECHA_SINTOMAS'] = pd.to_datetime(
confirmados_municipios['FECHA_SINTOMAS'], format="%Y-%m-%d")
if activos:
print("Calculando casos activos")
intervalo = pd.Timedelta('14 days')
por_fecha = (confirmados_municipios.sort_values(by='FECHA_SINTOMAS')
.sort_values(by='FECHA_SINTOMAS')
.set_index('FECHA_SINTOMAS')
.groupby(['municipio_cvegeo', 'FECHA_INGRESO'])
.rolling(intervalo).sum()[['RESULTADO']]
.rename({'RESULTADO':'total'}, axis=1)
.reset_index()
.drop('FECHA_SINTOMAS', axis=1)
)
else:
por_fecha = (confirmados_municipios.groupby(['municipio_cvegeo', 'FECHA_INGRESO'])
.size()
.reset_index()
.rename({0: 'total'}, axis=1)
)
confirmados_municipios_dia = (por_fecha
.pivot_table("total", "FECHA_INGRESO", "municipio_cvegeo")
.unstack()
.reset_index()
.fillna(0)
.rename({0: 'total'}, axis=1)
.set_index(['FECHA_INGRESO', 'municipio_cvegeo'])
)
por_semana = (confirmados_municipios_dia
.groupby(['municipio_cvegeo', pd.Grouper(level='FECHA_INGRESO', freq='W')])[['total']]
.sum()
)
por_semana = (por_semana
.reset_index()
.set_index(['FECHA_INGRESO', 'municipio_cvegeo'])
)
por_semana = por_semana.reset_index()
por_semana = (por_semana
.set_index('FECHA_INGRESO')
.groupby('municipio_cvegeo')
.apply(lambda d: d.reindex(pd.date_range(min(por_semana.FECHA_INGRESO),
max(por_semana.FECHA_INGRESO),
freq='W')))
.drop('municipio_cvegeo', axis=1)
.fillna(method='ffill')
)
por_semana.index.names = ['municipio_cvegeo', 'FECHA_INGRESO']
por_semana.reset_index('municipio_cvegeo', inplace=True)
por_semana['acumulados'] = por_semana.groupby('municipio_cvegeo').cumsum()
return por_semana
def get_defunciones_por_semana(file_path=None):
"""Regresa la serie de tiempo con agregados y acumulados por municipio y por semana."""
serie_municipios = pd.read_csv(file_path,
dtype={'ENTIDAD_UM': str,
'ENTIDAD_NAC': str,
'ENTIDAD_RES': str,
'MUNICIPIO_RES': str}, encoding='latin1')
serie_municipios.loc[:, 'municipio_cvegeo'] = serie_municipios['ENTIDAD_RES'] + \
serie_municipios['MUNICIPIO_RES']
confirmados_municipios = serie_municipios.loc[(serie_municipios['RESULTADO'] == 1) & (serie_municipios['FECHA_DEF'] != '9999-99-99'), [
'FECHA_SINTOMAS', 'FECHA_DEF', 'RESULTADO', 'municipio_cvegeo']].copy()
confirmados_municipios.loc[:, 'FECHA_DEF'] = pd.to_datetime(
confirmados_municipios['FECHA_DEF'], format="%Y-%m-%d")
confirmados_municipios.loc[:, 'FECHA_SINTOMAS'] = pd.to_datetime(
confirmados_municipios['FECHA_SINTOMAS'], format="%Y-%m-%d")
por_fecha = (confirmados_municipios.groupby(['municipio_cvegeo', 'FECHA_DEF'])
.size()
.reset_index()
.rename({0: 'total'}, axis=1)
)
confirmados_municipios_dia = (por_fecha
.pivot_table("total", "FECHA_DEF", "municipio_cvegeo")
.unstack()
.reset_index()
.fillna(0)
.rename({0: 'total'}, axis=1)
.set_index(['FECHA_DEF', 'municipio_cvegeo'])
)
por_semana = (confirmados_municipios_dia
.groupby(['municipio_cvegeo', pd.Grouper(level='FECHA_DEF', freq='W')])[['total']]
.sum()
)
por_semana = (por_semana
.reset_index()
.set_index(['FECHA_DEF', 'municipio_cvegeo'])
)
por_semana = por_semana.reset_index()
por_semana = (por_semana
.set_index('FECHA_DEF')
.groupby('municipio_cvegeo')
.apply(lambda d: d.reindex(pd.date_range(min(por_semana.FECHA_DEF),
max(por_semana.FECHA_DEF),
freq='W')))
.drop('municipio_cvegeo', axis=1)
.fillna(method='ffill')
)
por_semana.index.names = ['municipio_cvegeo', 'FECHA_DEF']
por_semana.reset_index('municipio_cvegeo', inplace=True)
por_semana['acumulados'] = por_semana.groupby('municipio_cvegeo').cumsum()
return por_semana
|
py | 1a3ebbbc2ba811328c93faf5486ac400fca83666 | """
My attempt at a python implementation of BLS.
"""
import os
import sys
import numpy as np
from numpy import array
import pdb
from scipy import ndimage
from keptoy import P2a,a2tdur,ntrans
# Global parameters:
cad = 30./60./24.
# The minimum acceptable points to report s2n
nptsMi =10
# bpt - the number of bins in folded transit.
bpt = 5.
# In order for a region to qualify as having a transit, it must have
# fillFact of the expected points
fillFact = .75
# Minimum number of filled transits inorder for a point to be factored
# into S/N
ntMi = 3
# blsw runs tries millions of P phase combinations. We want to
# compute the FAP by fitting the PDF and extrapolating to the high S/N
# regime. nhist is the number of s2n points to return.
nS2nFAP = 1e6
def blsw(t0,f0,PGrid,retph=False,retdict=False):
"""
Compute BLS statistic for a the range of periods specified in PGrid.
Returns:
return s2nGrid,s2nFAP,ntrials
"""
# Protect the input vectors.
t = t0.copy()
f = f0.copy()
# For a given period, there is an expected transit duration
# assuming solar type star and equatorial transit. tmi and tma
# set how far from this ideal case we will search over.
ftdur = array([0.75,1.25])
ng = len(PGrid)
t -= t[0]
f -= np.mean(f)
ff = f**2
s2nGrid = np.zeros(ng) - 1
phGrid = np.zeros(ng)
if retph:
phGridl = []
s2nGridl = []
s2nFAP = array([])
tbase = t.ptp()
ntrials = 0. # Counter for the total number of (P,ph) tests
# calculate maximum eggress
npad = ftdur[1] * a2tdur( P2a( max(PGrid) ) ) / cad
npad *= 2 #
for i in range(ng):
# Phase fold the data according to the trial period.
P = PGrid[i]
ntmax = np.ceil(tbase/P)
ph = np.mod(t/P,1.)
# For this particular period, this is the expected transit
# duration.
tdur = a2tdur( P2a(P) )
phdur = tdur / P
# We bin so there are bpt bins per transit
# Force there to be an integer number of bins per period
bwd = tdur / bpt
nbP = np.ceil(P/bwd)
bwd = P/nbP
bins = np.linspace(0,ntmax*P,nbP*ntmax+1)
# How many points do we expect in our transit?
fb = ntrans(tbase,P,.9)*(bwd/cad)
# Calculate the following quantities in each bin.
# 1. sum of f
# 2. sum of f**2
# 3. number of points in each bin.
sb,be = np.histogram(t,weights=f,bins=bins)
ssb,be = np.histogram(t,weights=ff,bins=bins)
cb,be = np.histogram(t,bins=bins)
# Reshape arrays. This is the phase folding
sb = sb.reshape(ntmax, nbP).transpose()
ssb = ssb.reshape(ntmax,nbP).transpose()
cb = cb.reshape(ntmax, nbP).transpose()
# We only need to retain transit information about the
# counts. So we can sum the sb, and ssb and perform 1 dim
# convolution
sb = sb.sum(axis=1)
ssb = ssb.sum(axis=1)
# We compute the sums of sb, ssb, and cb over a trial transit
# width using a convolution kernel. We will let that kernel
# have a small range of widths, which will let us be sensitive
# to transits of different lengths.
kwdMi = int( ftdur[0] * bpt )
kwdMa = int( ftdur[1] * bpt ) + 1
kwdArr = np.arange(kwdMi,kwdMa+1)
# The number of transit durations we'll try
ntdur = kwdMa-kwdMi+1
s2n = np.empty((nbP,ntdur))
for j in range(ntdur):
# Construct kernel
kwd = kwdArr[j]
kern = np.zeros((kwdMa,ntmax))
kern[:kwd,0] = 1
# We given the cadence and the box width, we expect a
# certain number of points in each box
nExp = kwd*bwd / cad
# Sum the following quantities in transit
# 1. f, data values
# 2. ff, square of the data values
# 3. n, number of data points.
st = ndimage.convolve(sb ,kern[::,0] , mode='wrap')
sst = ndimage.convolve(ssb,kern[::,0] , mode='wrap')
# Number of points in box of kwd*bwd
nBox = ndimage.convolve(cb ,kern,mode='wrap')
# Number of points in box after folding
nfBox = nBox.sum(axis=1)
boolFill = (nBox > nExp * fillFact).astype(int)
nTrans = boolFill.sum(axis=1)
idGap = np.where(nTrans < ntMi)[0]
# Average depth
df = st / nfBox
# Standard deviation of the points in tranist
sigma = np.sqrt( sst / nfBox - df**2 )
s2n[::,j] = -df / (sigma / np.sqrt(nfBox) )
s2n[idGap,j] = 0
# Compute the maximum over trial phases
s2nFlat = s2n.max(axis=1)
idMa = np.nanargmax(s2nFlat)
s2nGrid[i] = s2nFlat[idMa]
phGrid[i] = idMa / nbP
if retph:
phGridl.append(np.linspace(0 , len(df)/nb , len(df) ))
s2nGridl.append(s2nFlat)
if retph:
return phGridl,s2nGridl
else:
d = {'phGrid':phGrid,'s2nGrid':s2nGrid,'PGrid':PGrid}
return d
def grid(tbase,ftdurmi,Pmin=100.,Pmax=None,Psmp=0.5):
"""
Make a grid in (P,ph) for BLS to search over.
ftdurmi - Minimum fraction of tdur that we'll look for.
Pmin - minumum period in grid
Pmax - Maximum period. Defaults to tbase/2, the maximum period
that has a possibility of having 3 transits
phsmp - How finely to sample phase (in units of minimum tdur
allowed)
Psmp - How finely to sample period? The last transit of the
latest trial period of neighboring periods must only be off by a
fraction of a transit duration.
"""
if Pmax == None:
Pmax = tbase/2.
P,ph = array([]),array([])
P0 = Pmin
while P0 < Pmax:
tdur = a2tdur( P2a(P0) ) # Expected transit time.
tdurmi = ftdurmi * tdur
P = np.append( P, P0 )
# Threshold is tighter because of accordian effect.
dP = Psmp * tdurmi #/ (nt -1)
P0 += dP
return P
def fap(t0nl, f0nl):
"""
"""
nb = 20
# Histogram the values between s2n 3 and 5
hist,bin_edges = histogram(s2nFAP,range=[0,20],bins=nb)
x = (bin_edges[:-1]+bin_edges[1:])/2
id = where((x > 3) & (x < 5))[0]
if len(where(hist[id] < 10 )[0] ) != 0:
sys.stderr.write('Not enough points to fit PDF\n')
print x[id],hist[id]
return None
p = polyfit(x[id],log10(hist[id]),1)
s2nfit = polyval(p,s2n)
s2nfit = 10**s2nfit
return s2nfit
def eblspro(tl,fl,PGrid,i):
sys.stderr.write("%i\n" % i)
return ebls.blsw(tl,fl,PGrid)
|
py | 1a3ebc25eb62e1ec7ff6d163b086bb94a2d72bc6 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from skbio.util import classproperty, overrides
from ._nucleotide_sequence import NucleotideSequence
from ._iupac_sequence import IUPACSequence
class RNA(NucleotideSequence):
"""Store RNA sequence data and optional associated metadata.
Only characters in the IUPAC RNA character set [1]_ are supported.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the RNA sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence.
positional_metadata : Pandas DataFrame consumable, optional
Arbitrary per-character metadata. For example, quality data from
sequencing reads. Must be able to be passed directly to the Pandas
DataFrame constructor.
validate : bool, optional
If ``True``, validation will be performed to ensure that all sequence
characters are in the IUPAC RNA character set. If ``False``, validation
will not be performed. Turning off validation will improve runtime
performance. If invalid characters are present, however, there is
**no guarantee that operations performed on the resulting object will
work or behave as expected.** Only turn off validation if you are
certain that the sequence characters are valid. To store sequence data
that is not IUPAC-compliant, use ``Sequence``.
case_insenstive : bool, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters in order to be valid IUPAC RNA characters.
Attributes
----------
values
metadata
positional_metadata
alphabet
gap_chars
nondegenerate_chars
degenerate_chars
degenerate_map
complement_map
See Also
--------
DNA
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from skbio import RNA
>>> s = RNA('ACCGAAU')
>>> s
RNA('ACCGAAU', length=7, has_metadata=False, has_positional_metadata=False)
Convert lowercase characters to uppercase:
>>> s = RNA('AcCGaaU', lowercase=True)
>>> s
RNA('ACCGAAU', length=7, has_metadata=False, has_positional_metadata=False)
"""
@classproperty
@overrides(NucleotideSequence)
def complement_map(cls):
comp_map = {
'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
'H': 'D', 'V': 'B', 'N': 'N'
}
comp_map.update({c: c for c in cls.gap_chars})
return comp_map
@classproperty
@overrides(IUPACSequence)
def nondegenerate_chars(cls):
return set("ACGU")
@classproperty
@overrides(IUPACSequence)
def degenerate_map(cls):
return {
"R": set("AG"), "Y": set("CU"), "M": set("AC"), "K": set("UG"),
"W": set("AU"), "S": set("GC"), "B": set("CGU"), "D": set("AGU"),
"H": set("ACU"), "V": set("ACG"), "N": set("ACGU")
}
|
py | 1a3ebd787979100d303b1a63b6451e764141b50a | from .base_token import BaseToken
class LiteralToken(BaseToken):
def __init__(self, command, parent=None) -> None:
super().__init__(command, parent)
def get_type(self) -> str:
return "literal" |
py | 1a3ebdea8c8910829e9b56688dfeddf5977510eb | from time import time
from typing import Callable, Optional, Union, Tuple
import numpy as np
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
from tensorflow.keras.optimizers import RMSprop
##### Hide lines below until Lab 4
import wandb
from wandb.keras import WandbCallback
##### Hide lines above until Lab 4
from text_recognizer.datasets.base import Dataset
from text_recognizer.models.base import Model
from training.gpu_util_sampler import GPUUtilizationSampler
EARLY_STOPPING = True
GPU_UTIL_SAMPLER = True
def train_model(model: Model, dataset: Dataset, epochs: int, batch_size: int, gpu_ind: Optional[int]=None, use_wandb=False) -> Model:
callbacks = []
if EARLY_STOPPING:
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=4, verbose=1, mode='auto')
callbacks.append(early_stopping)
if GPU_UTIL_SAMPLER and gpu_ind is not None:
gpu_utilization = GPUUtilizationSampler(gpu_ind)
callbacks.append(gpu_utilization)
##### Hide lines below until Lab 4
if use_wandb:
wandb = WandbCallback()
callbacks.append(wandb)
##### Hide lines above until Lab 4
model.network.summary()
t = time()
history = model.fit(dataset, batch_size, epochs, callbacks)
print('Training took {:2f} s'.format(time() - t))
if GPU_UTIL_SAMPLER and gpu_ind is not None:
gpu_utilizations = gpu_utilization.samples
print(f'GPU utilization: {round(np.mean(gpu_utilizations), 2)} +- {round(np.std(gpu_utilizations), 2)}')
return model
|
py | 1a3ec0097e104b371a7115ab47f218508b7b2b42 | """
The unit test module for AutoTVM dialect.
"""
# pylint:disable=missing-docstring, redefined-outer-name, invalid-name
# pylint:disable=unused-argument, unused-import, wrong-import-position, ungrouped-imports
import argparse
import glob
import os
import tempfile
from copy import deepcopy
import json
import mock
import numpy as np
import pytest
from moto import mock_dynamodb2
from lorien.util import is_dialect_enabled
if not is_dialect_enabled("autotvm"):
pytest.skip("AutoTVM dialect is not available", allow_module_level=True)
from tvm import autotvm
from tvm.autotvm.measure import MeasureInput, MeasureResult
from tvm.autotvm.task.space import ConfigEntity
from lorien.database.table import create_table
from lorien.dialect.tvm_dial.autotvm_dial.job import (
AutoTVMJob,
AutoTVMJobConfigs,
create_autotvm_tuner,
)
from lorien.dialect.tvm_dial.autotvm_dial.extract_from_model import extract_from_models
from lorien.dialect.tvm_dial.autotvm_dial.extract_from_record import extract_from_records
from lorien.dialect.tvm_dial.autotvm_dial.result import AutoTVMRecords, AutoTVMTuneResult
from lorien.dialect.tvm_dial.autotvm_dial.workload import AutoTVMWorkload
from lorien.tune.result import TuneErrorCode
from lorien.util import dump_to_yaml, load_from_yaml
@pytest.fixture
def fixture_autotvm_workload():
# This fixture workload has 429 configs.
workload = AutoTVMWorkload()
workload.task_name = "dense_nopack.x86"
workload.args = [
["TENSOR", [1, 9216], "float32"],
["TENSOR", [4096, 9216], "float32"],
None,
"float32",
]
workload.lib = "topi"
workload.target = "llvm"
return workload
def gen_x86_conv2d_log_record(target, n_config, data, weight, stride, padding, dilation):
records = []
# Generate configs for 3 different data layouts to test the commit mechanism.
layouts = [(1, 8), (2, 16), (4, 4)]
assert n_config % len(layouts) == 0
# Generate records to mimic tuning logs.
inp = [
target,
"conv2d_NCHWc.x86",
[
["TENSOR", data, "float32"],
["TENSOR", weight, "float32"],
(stride, stride),
(padding, padding, padding, padding),
(dilation, dilation),
"NCHW",
"NCHW",
"float32",
],
{},
]
latencies = np.random.uniform(1e-6, 1e-5, n_config)
for idx in range(0, n_config, len(layouts)):
for sid, layout in enumerate(layouts):
entity = [
["tile_ic", "sp", [-1, layout[0]]],
["tile_oc", "sp", [-1, layout[1]]],
["tile_ow", "sp", [-1, 1]],
["unroll_kw", "ot", False],
]
records.append(
{
"input": inp,
"config": {"index": idx, "code_hash": "", "entity": entity},
"result": [[latencies[idx + sid]], 0, 0, idx + sid],
"version": 0.2,
"tvm_version": "0.7.dev1",
}
)
return records
def gen_cuda_conv2d_log_record(n_config, data, weight, stride, padding, dilation):
records = []
# Generate records to mimic tuning logs.
inp = [
"cuda",
"conv2d_nchw.cuda",
[
["TENSOR", data, "float32"],
["TENSOR", weight, "float32"],
(stride, stride),
(padding, padding, padding, padding),
(dilation, dilation),
"float32",
],
{},
]
latencies = np.random.uniform(1e-6, 1e-5, n_config)
for idx in range(n_config):
entity = [
["tile_f", "sp", [-1, 1, 1, idx + 1]],
["tile_y", "sp", [-1, 1, 1, 1]],
["tile_x", "sp", [-1, 1, 1, 1]],
["tile_rc", "sp", [-1, 1]],
["tile_ry", "sp", [-1, 1]],
["tile_rx", "sp", [-1, 1]],
["auto_unroll_max_step", "ot", 0],
["unroll_explicit", "ot", 0],
]
records.append(
{
"input": inp,
"config": {"index": idx, "code_hash": "", "entity": entity},
"result": [[latencies[idx]], 0, 0, idx],
"version": 0.2,
"tvm_version": "0.7.dev1",
}
)
return records
def gen_dense_log_record_w_cblas(target, n_config, shape_a, shape_b):
records = []
# Generate records to mimic tuning logs.
assert n_config > 1, "Must have at least one non-vendor library record"
n_config -= 1
inp = [
target,
"dense_pack.x86",
[["TENSOR", shape_a, "float32"], ["TENSOR", shape_b, "float32"], None, "float32"],
{},
]
latencies = np.random.uniform(1e-6, 1e-5, n_config)
for idx in range(n_config):
entity = [
["tile_y", "sp", [-1, 1, idx + 1]],
["tile_x", "sp", [-1, 1, 1]],
["tile_k", "sp", [-1, 1]],
]
records.append(
{
"input": inp,
"config": {"index": idx, "code_hash": "", "entity": entity},
"result": [[latencies[idx]], 0, 0, idx],
"version": 0.2,
"tvm_version": "0.7.dev1",
}
)
# Add one vendor library record.
inp = [
target,
"dense_cblas.x86",
[["TENSOR", shape_a, "float32"], ["TENSOR", shape_b, "float32"], None, "float32"],
{},
]
records.append(
{
"input": inp,
"config": {"index": 0, "code_hash": "", "entity": []},
"result": [[5e-7], 0, 0, 0],
"version": 0.2,
"tvm_version": "0.7.dev1",
}
)
return records
def test_workload():
# pylint:disable=missing-docstring, redefined-outer-name
workload = AutoTVMWorkload()
workload.target = "cuda -model=v100 -libs=cublas"
# Test invalid arguments caused task creation failure
workload.args = [[1, 3, 224, 224], [32, 3, 3, 3]]
with pytest.raises(RuntimeError):
workload.to_task()
workload.args = [
["TENSOR", [1, 3, 224, 224], "float32"],
["TENSOR", [32, 3, 3, 3], "float32"],
]
# Test missing task definition
with pytest.raises(RuntimeError):
workload.to_task()
workload.task_name = "conv2d_nchw_winograd.cuda"
# Test invalid workload for the TOPI schedule. conv2d winograd on CUDA only accepts stide 1.
workload.args += [[2, 2], [1, 1, 1, 1], [1, 1], "float32"]
with pytest.raises(RuntimeError):
workload.to_task()
workload.args[-4] = [1, 1]
task = workload.to_task()
assert isinstance(workload.to_job(), AutoTVMJob)
# Test load from task. -libs should be removed from target since conv2d_nchw_winograd.cuda
# does not depend on it.
workload_from_task = AutoTVMWorkload.from_task(task)
assert (
workload_from_task.target
== "cuda -keys=cuda,gpu -max_num_threads=1024 -model=v100 -thread_warp_size=32"
)
# Other than that should be identical.
workload_from_task.target = workload.target
assert workload == workload_from_task
task.target = None
with pytest.raises(RuntimeError):
AutoTVMWorkload.from_task(task)
# Test dump and load from YAML
workload_str = dump_to_yaml(workload)
assert workload == load_from_yaml(workload_str, AutoTVMWorkload)
workload2 = deepcopy(workload)
# Different argument values.
workload2.args[-2] = [0, 0]
assert workload > workload2
# Different argument numbers.
workload2.args = workload2.args[:-1]
assert workload > workload2
# Different target.
workload2.target = "cuda -model=zz"
assert workload < workload2
# Test loading invalid workload
with pytest.raises(RuntimeError):
load_from_yaml(workload_str.replace("TENSOR", ""), AutoTVMWorkload)
# Test mutation
workload = AutoTVMWorkload()
workload.task_name = "conv2d_NCHWc.x86"
workload.target = "llvm"
workload.args = [
["TENSOR", [1, 3, 224, 224], "float32"],
["TENSOR", [32, 3, 3, 3], "float32"],
[1, 1],
[1, 1, 1, 1],
[1, 1],
"NCHW",
"NCHW",
"float32",
]
# A rule to mutate batch size and channel
rules = {(0, 1, 0): "[1, 2, 3, 4]", (0, 1, 1): "[v, v * 2, v * 4]"}
mutated = workload.mutate(rules)
assert len(mutated) == 12
# Wrong index
rules = {(0, 1, 0, 0): "[1, 2, 3, 4]"}
with pytest.raises(RuntimeError):
workload.mutate(rules)
# Wrong description
rules = {(0, 1, 0): "[a, a * 2]"}
with pytest.raises(RuntimeError):
workload.mutate(rules)
def test_create_autotvm_tuner(fixture_autotvm_workload):
task = fixture_autotvm_workload.to_task()
create_autotvm_tuner("xgb", task)
create_autotvm_tuner("ga", task)
create_autotvm_tuner("random", task)
create_autotvm_tuner("gridsearch", task)
with pytest.raises(RuntimeError):
create_autotvm_tuner("wrong-tuner", task)
@mock_dynamodb2
def test_job_n_configs_n_commit_n_query(mocker, fixture_autotvm_workload):
table_name = "lorien-test"
arn = create_table(table_name, region_name="us-west-2")
workload = fixture_autotvm_workload
job = workload.to_job()
assert isinstance(job, AutoTVMJob)
assert not job.is_target_compatible("cuda")
task = workload.to_task()
configs = argparse.Namespace(
tuner="random",
ntrial=4,
test=1,
repeat=1,
min=400,
db="{ region_name: us-west-2 }",
commit_table_name=table_name,
commit_nbest=1,
commit_workload=False,
commit_log_to=None,
)
job_configs = job.create_job_configs(configs)
job_configs.commit_options["table-arn"] = arn
assert isinstance(job_configs, AutoTVMJobConfigs)
assert job_configs.tune_options
assert job_configs.measure_options
assert job_configs.check_tvm_build_config()
# Localize with RPC runner
rpc_config = argparse.Namespace(device="test-device", runner_port=188875)
job_configs.localize("llvm", configs=rpc_config)
with tempfile.TemporaryDirectory(prefix="lorien-test-autotvm-commit-") as temp_dir:
# Localize with local runner
job_configs = job.create_job_configs(configs)
job_configs.tune_options["tune_dir"] = temp_dir
job_configs.commit_options["table-arn"] = arn
job_configs.tvm_build_config = {}
job_configs.localize("llvm")
def mock_tuner_no_valid(_, task):
class MockTuner:
def tune(self, n_trial, early_stopping, measure_option, callbacks):
for _ in range(2):
res = mock.MagicMock()
res.error_no = 2
callbacks[1](None, [None], [res])
return MockTuner()
mocker.patch(
"lorien.dialect.tvm_dial.autotvm_dial.job.create_autotvm_tuner"
).side_effect = mock_tuner_no_valid
job.tune(job_configs.tune_options, job_configs.measure_options, job_configs.commit_options)
assert job.result.error_code == TuneErrorCode.NO_VALID_RESULT
def mock_tuner(_, task):
class MockTuner:
def __init__(self, task):
self.task = task
def tune(self, n_trial, early_stopping, measure_option, callbacks):
# Write to log file to test commit
inp = MeasureInput("llvm", self.task, ConfigEntity(0, "", {}, []))
ret = MeasureResult([10], 0, 20, 0)
callbacks[0](None, [inp], [ret])
inp = MeasureInput("llvm", self.task, ConfigEntity(1, "", {}, []))
ret = MeasureResult([1e8], 2, 20, 0)
callbacks[0](None, [inp], [ret])
# Update metadata
res = mock.MagicMock()
res.error_no = 0
res.costs = [1, 1, 1]
inp = mock.MagicMock()
inp.task = mock.MagicMock()
inp.task.flop = 1e9
callbacks[1](None, [inp], [res])
return MockTuner(task)
mocker.patch(
"lorien.dialect.tvm_dial.autotvm_dial.job.create_autotvm_tuner"
).side_effect = mock_tuner
# Do not commit
job.tune(job_configs.tune_options, job_configs.measure_options, commit_options=None)
assert job.result.error_code == TuneErrorCode.NORMAL
assert "tune_logs" in job.result.metadata
# Success
job.tune(job_configs.tune_options, job_configs.measure_options, job_configs.commit_options)
assert job.result.error_code == TuneErrorCode.NORMAL
assert "tune_logs" not in job.result.metadata
# Test failed to localize
mock_check_tvm_build_config = mock.MagicMock()
mock_check_tvm_build_config.return_value = False
job_configs.check_tvm_build_config = mock_check_tvm_build_config
with pytest.raises(RuntimeError):
job_configs.localize("llvm")
log_file = os.path.join(temp_dir, "tune.log")
inps = [
MeasureInput("llvm", task, ConfigEntity(1, "", {}, [])),
MeasureInput("llvm", task, ConfigEntity(2, "", {}, [])),
]
ress = [MeasureResult([1e8], 2, 20, 0), MeasureResult([1e2], 0, 20, 0)]
autotvm.callback.log_to_file(log_file)(None, inps, ress)
# Add other records to test the filter.
with open(log_file, "a") as filep:
records = gen_dense_log_record_w_cblas(
"llvm -mcpu=core-avx2 -libs=cblas", 5, [100, 1024], [256, 1024]
)
for record in records:
filep.write("{}\n".format(json.dumps(record)))
records = AutoTVMTuneResult.create_records_by_workloads(log_file, 1, workload)
assert len(records) == 1
assert records[0].target_key == "llvm -keys=cpu -link-params=0", records[0].target_key
assert records[0].alter_key == "llvm_cpu", records[0].alter_key
assert ( # pylint: disable=line-too-long
records[0].workload_key
== "dense_nopack.x86#_TENSOR__1_9216__float32_#_TENSOR__4096_9216__float32_#None#float32"
), records[0].workload_key
job.result.commit_tuning_log(
workload, log_file, table_name, nbest=1, region_name="us-west-2"
)
job.result.commit_tuning_log(None, log_file, table_name, nbest=1, region_name="us-west-2")
records = AutoTVMRecords(task.target, workload.get_workload_key())
records.query(table_name, region_name="us-west-2")
assert len(records) == 1
records = AutoTVMRecords(task.target, workload.get_workload_key())
records.query(table_name, use_alter_key=True, region_name="us-west-2")
assert len(records) == 1
# Do not provide workload key to query all records with the same target
records = AutoTVMRecords("llvm", workload_key=None)
records.query(table_name, region_name="us-west-2")
assert len(records) == 1
def test_extract_from_model():
configs = argparse.Namespace(
gcv=["alexnet", "alexnet: { data: [1, 3, 224, 224]}"],
target=["llvm -libs=cblas"],
tf=[],
tflite=[],
onnx=[],
keras=[],
torch=[],
mxnet=[],
)
workloads = extract_from_models(configs)
assert len(workloads) == 14, "\nWorkloads:\n%s" % "\n".join([str(wkl) for wkl in workloads])
# Test failure.
configs = argparse.Namespace(
gcv=["alexnet_wrong_name"],
target=["llvm"],
tf=[],
tflite=[],
onnx=[],
keras=[],
torch=[],
mxnet=[],
)
workloads = extract_from_models(configs)
assert len(workloads) == 0
@mock_dynamodb2
def test_extract_from_record(mocker):
# Mock a table.
records = gen_x86_conv2d_log_record(
"llvm -mcpu=core-avx2 -libs=cblas", 6, [1, 1024, 32, 32], [16, 1024, 3, 3], 1, 1, 1
)
records += gen_dense_log_record_w_cblas(
"llvm -mcpu=core-avx2 -libs=cblas", 5, [100, 1024], [256, 1024]
)
table_name = "lorien-test"
with tempfile.TemporaryDirectory(prefix="lorien-test-autotvm-layout-") as temp_dir:
create_table(table_name, region_name="us-west-2")
log_file = "{}/fake.log".format(temp_dir)
with open(log_file, "w") as filep:
for record in records:
filep.write("{}\n".format(json.dumps(record)))
AutoTVMTuneResult().commit_tuning_log(None, log_file, table_name, region_name="us-west-2")
# Test layout transform workload generation.
configs = argparse.Namespace(
table_name=table_name,
db='{ "region_name": "us-west-2" }',
target=["llvm"],
ignore_target_attrs=False,
)
# The target "llvm" does not match "llvm -mcpu=core-avx2" so it should get nothing
# unless we enable ignore-target-attrs.
assert len(extract_from_records(configs)) == 0
# "gen_x86_conv2d_log_record" generates 3 layouts, but one of them has the same
# input and output layout so it should be ignored when generting layout transform workloads.
# In addition, all records from "gen_dense_log_record_w_cblas" should be ignored because layout
# transform does not support dense.
configs.ignore_target_attrs = True
assert len(extract_from_records(configs)) == 2
# Intend to fail all task creations.
mocker.patch(
"lorien.dialect.tvm_dial.autotvm_dial.extract_from_record.autotvm.task.create"
).side_effect = Exception()
assert not extract_from_records(configs)
def test_gen_feature():
with tempfile.TemporaryDirectory(prefix="lorien-test-autotvm-feature-") as temp_dir:
log_dir = os.path.join(temp_dir, "logs")
os.mkdir(log_dir)
# Generate the first log file, which includes conv2d_NCHWc.x86
log_file = os.path.join(log_dir, "fake1.log")
with open(log_file, "w") as filep:
records = gen_x86_conv2d_log_record(
"llvm -mcpu=core-avx2", 6, [1, 1024, 32, 32], [16, 1024, 3, 3], 1, 1, 1
)
failed_record = deepcopy(records[0])
failed_record["result"][1] = 1 # let error code be non-zero.
records.append(failed_record)
for record in records:
filep.write("{}\n".format(json.dumps(record)))
# Generate the second log file, which includes dense_cblas.x86 and dense_pack.x86
log_file = os.path.join(log_dir, "fake2.log")
with open(log_file, "w") as filep:
records = gen_dense_log_record_w_cblas(
"llvm -mcpu=core-avx2", 5, [100, 1024], [256, 1024]
)
for record in records:
filep.write("{}\n".format(json.dumps(record)))
feature_dir = os.path.join(temp_dir, "features")
AutoTVMTuneResult.gen_features(log_dir, feature_dir)
# The lock files should be removed.
assert not glob.glob("{}/**/*.lock".format(feature_dir), recursive=True)
def check_helper(name, n_data, n_numeric_features, n_category_features):
"""Check dumped feature files."""
csv_file = os.path.join(feature_dir, "{}.csv".format(name))
meta_file = os.path.join(feature_dir, "{}.meta".format(name))
assert os.path.exists(csv_file), "Missing %s" % csv_file
assert os.path.exists(meta_file), "Missing %s" % meta_file
with open(csv_file, "r") as filep:
features = filep.readline().replace("\n", "").split(",")
assert len(features) == n_numeric_features + n_category_features + 1
assert len(filep.read().split("\n")) == n_data + 1
with open(meta_file, "r") as filep:
n_numeric = 0
n_category = 0
for line in filep:
tokens = line.split(",")
if tokens[1] == "numeric":
n_numeric += 1
elif tokens[1] == "category":
n_category += 1
assert n_numeric == n_numeric_features
assert n_category == n_category_features
check_helper("conv2d_NCHWc.x86", 7, 22, 6)
check_helper("dense_cblas.x86", 1, 4, 4)
check_helper("dense_pack.x86", 4, 12, 4)
def test_extract_feature(fixture_autotvm_workload):
task = fixture_autotvm_workload.to_task()
config_dict = {
"index": 7,
"code_hash": "some_hash",
"entity": [
("tile", "sp", [16, 4]),
("reorder", "re", [0, 2, 1]),
("annotate", "an", "unroll"),
("other", "ot", "auto"),
],
}
config = ConfigEntity.from_json_dict(config_dict)
inp = MeasureInput("llvm", task, config)
features = AutoTVMTuneResult.extract_feature(inp)
expected_features = {
"in_0": 1,
"in_1": 9216,
"in_2": "float32",
"in_3": 4096,
"in_4": 9216,
"in_5": "float32",
"attr_0": None,
"attr_1": "float32",
"sp_tile_0": 16,
"sp_tile_1": 4,
"re_reorder": "0;2;1",
"an_annotate": "unroll",
"ot_other": "auto",
}
assert features == expected_features
|
py | 1a3ec09c8fa5546f187c26ce3916ae8796060ad9 | import java.code_to_gast.java_router as java_router
import javalang
def for_loop_to_gast(node):
"""
Decides what type of for loop it is based on its children
"""
if type(node.control) == javalang.tree.ForControl:
return for_range_to_gast(node)
else:
return for_of_to_gast(node)
def for_range_to_gast(node):
"""
Handle java range loops recursively using the router
"""
gast = {"type": "forRangeStatement"}
gast["body"] = java_router.node_to_gast(node.body)
gast["init"] = java_router.node_to_gast(node.control.init)
gast["test"] = java_router.node_to_gast(node.control.condition)
gast["update"] = java_router.node_to_gast(node.control.update[0])
return gast
def for_of_to_gast(node):
"""
Handle java for of loops that iterate over elements in an array, dictionary, etc.
"""
gast = {"type": "forOfStatement"}
# TODO revaluate how we do variable assignment to account for this type of var assignment
gast["init"] = java_router.node_to_gast(
node.control.var.declarators[0].name)
gast["body"] = java_router.node_to_gast(node.body)
gast["iter"] = java_router.node_to_gast(node.control.iterable.member)
return gast
def while_statement_to_gast(node):
"""
Handle while statements in java to gast
"""
gast = {"type": "whileStatement"}
gast["body"] = java_router.node_to_gast(node.body)
gast["test"] = java_router.node_to_gast(node.condition)
return gast
|
py | 1a3ec0da296577ece29081617a2097b11efe1269 | """
This file offers the methods to automatically retrieve the graph Hydrogenophaga flava NBRC 102514.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def HydrogenophagaFlavaNbrc102514(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Hydrogenophaga flava NBRC 102514 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Hydrogenophaga flava NBRC 102514 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="HydrogenophagaFlavaNbrc102514",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | 1a3ec17d7576ba3abbfb6a9307e43202bd568829 | # Definisikan function validate
def validate(hand):
# Tambahkan control flow berdasarkan nilai hand
if hand < 0 or hand > 2:
return False
else:
return True
def print_hand(hand, name='Tamu'):
hands = ['Batu', 'Kertas', 'Gunting']
print(name + ' memilih: ' + hands[hand])
print('Memulai permainan Batu Kertas Gunting!')
player_name = input('Masukkan nama Anda: ')
print('Pilih tangan: (0: Batu, 1: Kertas, 2: Gunting)')
player_hand = int(input('masukkan nomor (0-2): '))
# Tambahkan control flow berdasarkan nilai return dari function validate
if validate(player_hand):
print_hand(player_hand, player_name)
else:
print('Mohon masukkan nomor yang benar') |
py | 1a3ec19d7ece24163086967a831579e75964cc5d | import argparse
import torch
import benchmark_core
import benchmark_utils
"""Performance microbenchmarks's main binary.
This is the main function for running performance microbenchmark tests.
It also registers existing benchmark tests via Python module imports.
"""
def main():
parser = argparse.ArgumentParser(
description="Run microbenchmarks.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'--tag_filter',
help='tag_filter can be used to run the shapes which matches the tag. (all is used to run all the shapes)',
default='short')
# This option is used to filter test cases to run.
parser.add_argument(
'--operators',
help='Filter tests based on comma-delimited list of operators to test',
default=None)
parser.add_argument(
'--operator_range',
help='Filter tests based on operator_range(e.g. a-c or b,c-d)',
default=None)
parser.add_argument(
'--test_name',
help='Run tests that have the provided test_name',
default=None)
parser.add_argument(
'--list_ops',
help='List operators without running them',
action='store_true')
parser.add_argument(
'--list_tests',
help='List all test cases without running them',
action='store_true')
parser.add_argument(
"--iterations",
help="Repeat each operator for the number of iterations",
type=int
)
parser.add_argument(
"--num_runs",
help="Run each test for num_runs. Each run executes an operator for number of <--iterations>",
type=int,
default=1,
)
parser.add_argument(
"--min_time_per_test",
help="Set the minimum time (unit: seconds) to run each test",
type=int,
default=0,
)
parser.add_argument(
"--warmup_iterations",
help="Number of iterations to ignore before measuring performance",
default=100,
type=int
)
parser.add_argument(
"--omp_num_threads",
help="Number of OpenMP threads used in PyTorch/Caffe2 runtime",
default=None,
type=int
)
parser.add_argument(
"--mkl_num_threads",
help="Number of MKL threads used in PyTorch/Caffe2 runtime",
default=None,
type=int
)
parser.add_argument(
"--ai_pep_format",
type=benchmark_utils.str2bool,
nargs='?',
const=True,
default=False,
help="Print result when running on AI-PEP"
)
parser.add_argument(
"--use_jit",
type=benchmark_utils.str2bool,
nargs='?',
const=True,
default=False,
help="Run operators with PyTorch JIT mode"
)
parser.add_argument(
"--forward_only",
type=benchmark_utils.str2bool,
nargs='?',
const=True,
default=False,
help="Only run the forward path of operators"
)
parser.add_argument(
'--framework',
help='Comma-delimited list of frameworks to test (Caffe2, PyTorch)',
default="Caffe2,PyTorch")
parser.add_argument(
'--device',
help='Run tests on the provided architecture (cpu, cuda)',
default='None')
args, _ = parser.parse_known_args()
if args.omp_num_threads:
# benchmark_utils.set_omp_threads sets the env variable OMP_NUM_THREADS
# which doesn't have any impact as C2 init logic has already been called
# before setting the env var.
# In general, OMP_NUM_THREADS (and other OMP env variables) needs to be set
# before the program is started.
# From Chapter 4 in OMP standard: https://www.openmp.org/wp-content/uploads/openmp-4.5.pdf
# "Modifications to the environment variables after the program has started,
# even if modified by the program itself, are ignored by the OpenMP implementation"
benchmark_utils.set_omp_threads(args.omp_num_threads)
if benchmark_utils.is_pytorch_enabled(args.framework):
torch.set_num_threads(args.omp_num_threads)
if args.mkl_num_threads:
benchmark_utils.set_mkl_threads(args.mkl_num_threads)
benchmark_core.BenchmarkRunner(args).run()
if __name__ == "__main__":
main()
|
py | 1a3ec2305e043705b9de2db9bda449e352c7b264 | #INSERTION SORT
def insertion_sort(array):
# We start from 1 since the first element is trivially sorted
for index in range(1, len(array)):
currentValue = array[index]
currentPosition = index
while currentPosition > 0 and array[currentPosition - 1] > currentValue:
array[currentPosition] = array[currentPosition -1]
currentPosition = currentPosition - 1
array[currentPosition] = currentValue
print("array now : ")
print(array)
if __name__ == '__main__':
array = []
n = int(input("Enter number of elements : "))
for i in range(0, n):
ele = int(input())
array.append(ele) # adding the element
print("sorted array: " + str(insertion_sort(array)))
|
py | 1a3ec2af53b6085a4ffa4ff23d55ff6398c92e92 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Syndicate Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool persistence.
By default, syndicated will dump mempool on shutdown and
then reload it on startup. This can be overridden with
the -persistmempool=0 command line option.
Test is as follows:
- start node0, node1 and node2. node1 has -persistmempool=0
- create 5 transactions on node2 to its own address. Note that these
are not sent to node0 or node1 addresses because we don't want
them to be saved in the wallet.
- check that node0 and node1 have 5 transactions in their mempools
- shutdown all nodes.
- startup node0. Verify that it still has 5 transactions
in its mempool. Shutdown node0. This tests that by default the
mempool is persistent.
- startup node1. Verify that its mempool is empty. Shutdown node1.
This tests that with -persistmempool=0, the mempool is not
dumped to disk when the node is shut down.
- Restart node0 with -persistmempool=0. Verify that its mempool is
empty. Shutdown node0. This tests that with -persistmempool=0,
the mempool is not loaded from disk on start up.
- Restart node0 with -persistmempool. Verify that it has 5
transactions in its mempool. This tests that -persistmempool=0
does not overwrite a previously valid mempool stored on disk.
- Remove node0 mempool.dat and verify savemempool RPC recreates it
and verify that node1 can load it and has 5 transaction in its
mempool.
- Verify that savemempool throws when the RPC is called if
node1 can't write to disk.
"""
import os
import time
from test_framework.test_framework import SyndicateTestFramework
from test_framework.util import *
class MempoolPersistTest(SyndicateTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[], ["-persistmempool=0"], []]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
self.log.debug("Mine a single block to get out of IBD")
self.nodes[0].generate(1)
self.sync_all()
self.log.debug("Send 5 transactions from node2 (to its own address)")
for i in range(5):
self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
node2_balance = self.nodes[2].getbalance()
self.sync_all()
self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions.")
self.stop_nodes()
self.start_node(1) # Give this one a head-start, so we can be "extra-sure" that it didn't load anything later
self.start_node(0)
self.start_node(2)
# Give syndicated a second to reload the mempool
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5, timeout=1)
wait_until(lambda: len(self.nodes[2].getrawmempool()) == 5, timeout=1)
# The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now:
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify accounting of mempool transactions after restart is correct
self.nodes[2].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(node2_balance, self.nodes[2].getbalance())
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
self.stop_nodes()
self.start_node(0, extra_args=["-persistmempool=0"])
# Give syndicated a second to reload the mempool
time.sleep(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
self.stop_nodes()
self.start_node(0)
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
mempooldat0 = os.path.join(self.options.tmpdir, 'node0', 'regtest', 'mempool.dat')
mempooldat1 = os.path.join(self.options.tmpdir, 'node1', 'regtest', 'mempool.dat')
self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
os.remove(mempooldat0)
self.nodes[0].savemempool()
assert os.path.isfile(mempooldat0)
self.log.debug("Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions")
os.rename(mempooldat0, mempooldat1)
self.stop_nodes()
self.start_node(1, extra_args=[])
wait_until(lambda: len(self.nodes[1].getrawmempool()) == 5)
self.log.debug("Prevent syndicated from writing mempool.dat to disk. Verify that `savemempool` fails")
# to test the exception we are setting bad permissions on a tmp file called mempool.dat.new
# which is an implementation detail that could change and break this test
mempooldotnew1 = mempooldat1 + '.new'
with os.fdopen(os.open(mempooldotnew1, os.O_CREAT, 0o000), 'w'):
pass
assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool)
os.remove(mempooldotnew1)
if __name__ == '__main__':
MempoolPersistTest().main()
|
py | 1a3ec437cdca64fa26c03a5138cfe665f7f2c426 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import multiprocessing
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from octavia.amphorae.drivers.health import heartbeat_udp
from octavia.common import service
from octavia.controller.healthmanager import health_manager
from octavia.controller.healthmanager import update_db
from octavia import version
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def hm_listener():
# TODO(german): steved'or load those drivers
udp_getter = heartbeat_udp.UDPStatusGetter(
update_db.UpdateHealthDb(),
update_db.UpdateStatsDb())
while True:
udp_getter.check()
def hm_health_check():
hm = health_manager.HealthManager()
while True:
hm.health_check()
def main():
service.prepare_service(sys.argv)
gmr.TextGuruMeditation.setup_autorun(version)
processes = []
hm_listener_proc = multiprocessing.Process(name='HM_listener',
target=hm_listener)
processes.append(hm_listener_proc)
hm_health_check_proc = multiprocessing.Process(name='HM_health_check',
target=hm_health_check)
processes.append(hm_health_check_proc)
LOG.info("Health Manager listener process starts:")
hm_listener_proc.start()
LOG.info("Health manager check process starts:")
hm_health_check_proc.start()
try:
for process in processes:
process.join()
except KeyboardInterrupt:
LOG.info("Health Manager existing due to signal")
hm_listener_proc.terminate()
hm_health_check_proc.terminate()
|
py | 1a3ec4fb593ccfed1f74af06582dc271cde9d24c | from pipeline.interface.commandline import CommandlineInterface
from pipeline.level.controller import LevelController
from optparse import OptionParser
import sys
class LevelCommandlineInterface(CommandlineInterface):
name = "Level"
def make(self, args):
controller = LevelController()
controller.make(args)
def set(self, args):
controller = LevelController()
controller.set(args)
def remove(self, args):
controller = LevelController()
controller.remove(args)
def list(self, args):
controller = LevelController()
list_ = controller.list(args)
self.view = True
self.levels = list_
|
py | 1a3ec50c83bb7d89d714ed75e59c2af519c45b93 | # !/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
class ChannelType(object):
"""
Define if channel type is input or output.
These values must be set according to Bpod firmware specification.
"""
#: Input channel
INPUT = 1
#: Output channel
OUTPUT = 2
class ChannelName(object):
"""
Available channel names.
These values must be set according to Bpod firmware specification.
"""
#: Output channel with PWM support (e.g. Led)
PWM = "PWM"
#: Output channel for connecting a valve
VALVE = "Valve"
#: BNC channel
BNC = "BNC"
#: Wire channel
WIRE = "Wire"
#: Serial channel
SERIAL = "Serial"
#: Flex channel
FLEX = "Flex"
class EventsPositions(object):
"""
"""
def __init__(self):
self.Event_USB = 0 # type: int
self.Event_Port = 0 # type: int
self.Event_BNC = 0 # type: int
self.Event_Wire = 0 # type: int
self.Event_Flex = 0 # type: int
self.globalTimerStart = 0 # type: int
self.globalTimerEnd = 0 # type: int
self.globalTimerTrigger = 0 # type: int
self.globalTimerCancel = 0 # type: int
self.globalCounter = 0 # type: int
self.globalCounterReset = 0 # type: int
self.condition = 0 # type: int
self.jump = 0 # type: int
self.Tup = 0 # type: int
self.output_USB = 0 # type: int
self.output_VALVE = 0 # type: int
self.output_BNC = 0 # type: int
self.output_Wire = 0 # type: int
self.output_PWM = 0 # type: int
self.output_Flex = 0 # type: int
self.analogThreshEnable = 0 # type: int
self.analogThreshDisable = 0 # type: int
def __str__(self):
return (
"Events Positions\n"
"Event_USB: {Event_USB}\n"
"Event_Port: {Event_Port}\n"
"Event_BNC: {Event_BNC}\n"
"Event_Wire {Event_Wire}\n"
"Event_Flex: {Event_Flex}\n"
"globalTimerStart: {globalTimerStart}\n"
"globalTimerEnd: {globalTimerEnd}\n"
"globalTimerTrigger: {globalTimerTrigger}\n"
"globalTimerCancel: {globalTimerCancel}\n"
"globalCounter: {globalCounter}\n"
"globalCounterReset: {globalCounterReset}\n"
"condition: {condition}\n"
"jump: {jump}\n"
"Tup: {Tup}\n"
"output_USB: {output_USB}\n"
"output_VALVE: {output_VALVE}\n"
"output_BNC: {output_BNC}\n"
"output_Wire: {output_Wire}\n"
"output_PWM: {output_PWM}\n"
"output_Flex: {output_Flex}\n"
"analogThreshEnable: {analogThreshEnable}\n"
"analogThreshDisable: {analogThreshDisable}\n"
"".format(
Event_USB=self.Event_USB,
Event_Port=self.Event_Port,
Event_BNC=self.Event_BNC,
Event_Wire=self.Event_Wire,
Event_Flex=self.Event_Flex,
globalTimerStart=self.globalTimerStart,
globalTimerEnd=self.globalTimerEnd,
globalTimerTrigger=self.globalTimerTrigger,
globalTimerCancel=self.globalTimerCancel,
globalCounter=self.globalCounter,
globalCounterReset=self.globalCounterReset,
condition=self.condition,
jump=self.jump,
Tup=self.Tup,
output_USB=self.output_USB,
output_VALVE=self.output_VALVE,
output_BNC=self.output_BNC,
output_Wire=self.output_Wire,
output_PWM=self.output_PWM,
output_Flex=self.output_Flex,
analogThreshEnable=self.analogThreshEnable,
analogThreshDisable=self.analogThreshDisable
)
)
class Channels(object):
"""
Bpod main class
"""
def __init__(self):
self.event_names = []
self.input_channel_names = []
self.output_channel_names = []
self.events_positions = EventsPositions()
def setup_input_channels(self, hardware, modules):
"""
Generate event and input channel names
"""
Pos = 0
nUSB = 0
nUART = 0
nBNCs = 0
nWires = 0
nPorts = 0
nFlex = 0
for i in range(len(hardware.inputs)):
if hardware.inputs[i] == "U":
nUART += 1
module = modules[nUART - 1]
module_name = ""
if module.connected:
module_name = module.name
self.input_channel_names += [module_name]
else:
module_name = "Serial" + str(nUART)
self.input_channel_names += [module_name]
n_module_event_names = len(module.event_names)
for j in range(module.n_serial_events):
if j < n_module_event_names:
self.event_names += [module_name + "_" + module.event_names[j]]
else:
self.event_names += [module_name + "_" + str(j + 1)]
Pos += 1
elif hardware.inputs[i] == "X":
if nUSB == 0:
self.events_positions.Event_USB = Pos
nUSB += 1
self.input_channel_names += ["USB" + str(nUSB)]
loops_n = int(hardware.max_serial_events / (len(modules) + 1))
for j in range(loops_n):
self.event_names += ["SoftCode" + str(j + 1)]
Pos += 1
elif hardware.inputs[i] == "P":
if nPorts == 0:
self.events_positions.Event_Port = Pos
nPorts += 1
self.input_channel_names += ["Port" + str(nPorts)]
self.event_names += [self.input_channel_names[-1] + "In"]
Pos += 1
self.event_names += [self.input_channel_names[-1] + "Out"]
Pos += 1
elif hardware.inputs[i] == "B":
if nBNCs == 0:
self.events_positions.Event_BNC = Pos
nBNCs += 1
self.input_channel_names += ["BNC" + str(nBNCs)]
self.event_names += [self.input_channel_names[-1] + "High"]
Pos += 1
self.event_names += [self.input_channel_names[-1] + "Low"]
Pos += 1
elif hardware.inputs[i] == "W":
if nWires == 0:
self.events_positions.Event_Wire = Pos
nWires += 1
self.input_channel_names += ["Wire" + str(nWires)]
self.event_names += [self.input_channel_names[-1] + "High"]
Pos += 1
self.event_names += [self.input_channel_names[-1] + "Low"]
Pos += 1
elif hardware.inputs[i] == "F":
if nFlex == 0:
self.events_positions.Event_Flex = Pos
# Check if channel is configured for digital input
if hardware.flex_channel_types[nFlex] == 0:
nFlex += 1
self.input_channel_names += ["Flex" + str(nFlex)]
self.event_names += [self.input_channel_names[-1] + "High"]
Pos += 1
self.event_names += [self.input_channel_names[-1] + "Low"]
Pos += 1
# Check if channel is configured for analog input
elif hardware.flex_channel_types[nFlex] == 2:
nFlex += 1
self.input_channel_names += ["Flex" + str(nFlex)]
self.event_names += [self.input_channel_names[-1] + "Trig1"]
Pos += 1
self.event_names += [self.input_channel_names[-1] + "Trig2"]
Pos += 1
# This means the flex channel must be configured as output
else:
self.input_channel_names += ["---"] # Placeholder to maintain appropriate index
self.event_names += ["---"] # Placeholder for "high"/"trig1"
Pos += 1
self.event_names += ["---"] # Placeholder for "low"/"trig2"
Pos += 1
nFlex += 1 # increment to maintain flex_channel_types index
self.events_positions.globalTimerStart = Pos
for i in range(hardware.n_global_timers):
self.event_names += ["GlobalTimer" + str(i + 1) + "_Start"]
Pos += 1
self.events_positions.globalTimerEnd = Pos
for i in range(hardware.n_global_timers):
self.event_names += ["GlobalTimer" + str(i + 1) + "_End"]
self.input_channel_names += ["GlobalTimer" + str(i + 1)]
Pos += 1
self.events_positions.globalCounter = Pos
for i in range(hardware.n_global_counters):
self.event_names += ["GlobalCounter" + str(i + 1) + "_End"]
Pos += 1
self.events_positions.condition = Pos
for i in range(hardware.n_conditions):
self.event_names += ["Condition" + str(i + 1)]
Pos += 1
self.event_names += ["Tup"]
self.events_positions.Tup = Pos
Pos += 1
logger.debug("event_names: %s", self.event_names)
logger.debug("events_positions: %s", self.events_positions)
def setup_output_channels(self, hardware, modules):
"""
Generate output channel names
"""
nUSB = 0
nUART = 0
nVALVE = 0
nBNCs = 0
nWires = 0
nPorts = 0
nFlex = 0
for i in range(len(hardware.outputs)):
if hardware.outputs[i] == "U":
nUART += 1
module = modules[nUART - 1]
module_name = ""
if module.connected:
module_name = module.name
self.output_channel_names += [module_name]
else:
module_name = "Serial" + str(nUART)
self.output_channel_names += [module_name]
elif hardware.outputs[i] == "X":
if nUSB == 0:
self.events_positions.output_USB = len(self.output_channel_names)
nUSB += 1
self.output_channel_names += ["SoftCode"]
elif hardware.outputs[i] == "V":
if nVALVE == 0:
self.events_positions.output_VALVE = len(self.output_channel_names)
nVALVE += 1
self.output_channel_names += ["Valve" + str(nVALVE)] # Assume an SPI shift register mapping bits of a byte to 8 valves
elif hardware.outputs[i] == "B":
if nBNCs == 0:
self.events_positions.output_BNC = len(self.output_channel_names)
nBNCs += 1
self.output_channel_names += ["BNC" + str(nBNCs)]
elif hardware.outputs[i] == "W":
if nWires == 0:
self.events_positions.output_Wire = len(self.output_channel_names)
nWires += 1
self.output_channel_names += ["Wire" + str(nWires)]
elif hardware.outputs[i] == "P":
if nPorts == 0:
self.events_positions.output_PWM = len(self.output_channel_names)
nPorts += 1
self.output_channel_names += ["PWM" + str(nPorts)]
elif hardware.outputs[i] == "F":
if nFlex == 0:
self.events_positions.output_Flex = len(self.output_channel_names)
# Check if channel is configured for digital output
if hardware.flex_channel_types[nFlex] == 1:
nFlex += 1
self.output_channel_names += ["Flex" + str(nFlex) + "DO"]
# Check if channel is configured for analog output
elif hardware.flex_channel_types[nFlex] == 3:
nFlex += 1
self.output_channel_names += ["Flex" + str(nFlex) + "AO"]
# This means the flex channel must be configured as input
else:
self.output_channel_names += ["---"] # placeholder to maintain appropriate index.
nFlex += 1 # increment to maintain the flex_channel_types index
self.output_channel_names += ["GlobalTimerTrig"]
self.events_positions.globalTimerTrigger = len(self.output_channel_names) - 1
self.output_channel_names += ["GlobalTimerCancel"]
self.events_positions.globalTimerCancel = len(self.output_channel_names) - 1
self.output_channel_names += ["GlobalCounterReset"]
self.events_positions.globalCounterReset = len(self.output_channel_names) - 1
if hardware.machine_type > 3:
self.output_channel_names += ["AnalogThreshEnable"]
self.events_positions.analogThreshEnable = len(self.output_channel_names) - 1
self.output_channel_names += ["AnalogThreshDisable"]
self.events_positions.analogThreshDisable = len(self.output_channel_names) - 1
logger.debug("output_channel_names: %s", self.output_channel_names)
logger.debug("events_positions: %s", self.events_positions)
def get_event_name(self, event_idx):
"""
:param event_idx:
:return:
"""
try:
event_name = self.event_names[event_idx]
except IndexError:
event_name = "unknown event name"
return event_name
def __str__(self):
buff = "\n****************** EVENTS ******************\n"
for idx, event in enumerate(self.event_names):
buff += "{0: >3} : {1: <24}".format(idx, event)
if ((idx + 1) % 3) == 0 and idx != 0:
buff += "\n"
buff += "\n\n****************** INPUT CHANNELS ******************\n"
for idx, channel in enumerate(self.input_channel_names):
buff += "{0: >3} : {1: <24}".format(idx, channel)
if ((idx + 1) % 3) == 0 and idx != 0:
buff += "\n"
buff += "\n\n****************** OUTPUT CHANNELS ******************\n"
for idx, channel in enumerate(self.output_channel_names):
buff += "{0: >3} : {1: <24}".format(idx, channel)
if ((idx + 1) % 3) == 0 and idx != 0:
buff += "\n"
return "SMA Channels\n" + buff + "\n\n"
|
py | 1a3ec673a9d94d4e7150aedaf10c7353fa212561 | from pydex.core.designer import Designer
import numpy as np
import sobol_seq
"""
Setting : a non-dynamic experimental system with 2 time-invariant control variables
and 1 response.
Problem : design optimal experiment for a order 2 polynomial.
Solution : 3^2 factorial design, varying efforts depending on chosen criterion:
~ D-optimal: well distributed.
~ A-optimal: slight central-focus.
~ E-optimal: strong central-focus.
"""
def simulate(ti_controls, model_parameters):
return np.array([
# constant term
model_parameters[0] +
# linear term
model_parameters[1] * ti_controls[0] +
model_parameters[2] * ti_controls[1] +
# linear-linear terms
model_parameters[3] * ti_controls[0] * ti_controls[1] +
# squared terms
model_parameters[4] * ti_controls[0] ** 2 +
model_parameters[5] * ti_controls[1] ** 2
])
designer = Designer()
designer.simulate = simulate
designer.model_parameters = np.ones(6) # values won't affect design, but still needed
designer.ti_controls_candidates = designer.enumerate_candidates(
bounds=[
[-1, 1],
[-1, 1],
],
levels=[
11,
11,
],
)
designer.start_logging()
designer.initialize(verbose=2) # 0: silent, 1: overview, 2: detailed, 3: very detailed
designer.ti_controls_names = [r"$x_1$", r"$x_2$"]
""" cvxpy optimizers """
package, optimizer = ("cvxpy", "MOSEK")
# package, optimizer = ("cvxpy", "SCS")
# package, optimizer = ("cvxpy", "CVXOPT") # only for A-optimal
""" scipy optimizers, all supported, but many require unconstrained form """
# package, optimizer = ("scipy", "powell")
# package, optimizer = ("scipy", "cg")
# package, optimizer = ("scipy", "tnc")
# package, optimizer = ("scipy", "l-bfgs-b")
# package, optimizer = ("scipy", "bfgs")
# package, optimizer = ("scipy", "nelder-mead")
# package, optimizer = ("scipy", "SLSQP") # supports constrained form
designer.eval_sensitivities(method="central", num_steps=3)
""" designing experiment """
criterion = designer.d_opt_criterion
designer.design_experiment(
criterion=criterion,
package=package,
optimizer=optimizer,
write=False,
)
designer.print_optimal_candidates()
designer.apportion(9)
designer.plot_optimal_efforts()
designer.plot_optimal_controls(non_opt_candidates=True, title=True, write=False)
criterion = designer.a_opt_criterion
designer.design_experiment(
criterion=criterion,
package=package,
optimizer=optimizer,
write=False,
)
designer.print_optimal_candidates()
designer.apportion(9)
designer.plot_optimal_efforts()
designer.plot_optimal_controls(non_opt_candidates=True, title=True, write=False)
criterion = designer.e_opt_criterion
designer.design_experiment(
criterion=criterion,
package=package,
optimizer=optimizer,
write=False,
)
designer.print_optimal_candidates()
designer.apportion(11)
designer.plot_optimal_efforts()
designer.plot_optimal_controls(non_opt_candidates=True, title=True, write=False)
designer.stop_logging()
designer.show_plots()
|
py | 1a3ec67eee549b81258edf475c5b891f17d90a8b | import logging
from .models import TwitterBotResponseLog, TwitterBotVisitLog
logger = logging.getLogger(__name__)
class LogTwitterbotLinkVisitMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
param = 'twitterbot_log_id'
if param in request.GET:
response_log = TwitterBotResponseLog.objects.get(id=request.GET[param])
logger.info(
f'{self.__class__.__name__} - Someone visit {request.path} from status {response_log.tweet_url}'
)
TwitterBotVisitLog.objects.create(request_path=request.path, response_log=response_log)
response = self.get_response(request)
return response
|
py | 1a3ec74bf154045a350eb2527f0af167804bdb68 | import pickle
import random
import numpy as np
from soepy.simulate.simulate_python import simulate
from soepy.soepy_config import TEST_RESOURCES_DIR
from development.tests.auxiliary.auxiliary import cleanup
def test1():
"""This test runs a random selection of test regression tests from
our regression test battery.
"""
vault = TEST_RESOURCES_DIR / "regression_vault.soepy.pkl"
with open(vault, "rb") as file:
tests = pickle.load(file)
for i in random.sample(range(0, 100), 10):
(
model_spec_init_dict,
random_model_params_df,
exog_educ_shares,
exog_child_age_shares,
exog_partner_shares,
exog_exper_shares_pt,
exog_exper_shares_ft,
exog_child_info,
exog_partner_arrival_info,
exog_partner_separation_info,
expected_df,
) = tests[i]
exog_educ_shares.to_pickle("test.soepy.educ.shares.pkl")
exog_child_age_shares.to_pickle("test.soepy.child.age.shares.pkl")
exog_child_info.to_pickle("test.soepy.child.pkl")
exog_partner_shares.to_pickle("test.soepy.partner.shares.pkl")
exog_exper_shares_pt.to_pickle("test.soepy.pt.exp.shares.pkl")
exog_exper_shares_ft.to_pickle("test.soepy.ft.exp.shares.pkl")
exog_partner_arrival_info.to_pickle("test.soepy.partner.arrival.pkl")
exog_partner_separation_info.to_pickle("test.soepy.partner.separation.pkl")
calculated_df = simulate(random_model_params_df, model_spec_init_dict)
for col in expected_df.columns.tolist():
np.testing.assert_array_almost_equal(
expected_df[col],
calculated_df[col],
)
cleanup()
|
py | 1a3ec7a40dde3738dcd9dfd31345803a51fc549f | import ctypes
import time, math, random
from random import randint
import win32gui, win32con, win32api
dx=10
def OnPaint(hwnd, msg, wp, lp):
global dx
font=win32gui.LOGFONT()
font.lfFaceName="Consolas"
font.lfHeight=48
# font.lfWidth=font.lfHeight
# font.lfWeight=150
# font.lfItalic=1
# font.lfUnderline=1
hfont=win32gui.CreateFontIndirect(font)
dc, ps=win32gui.BeginPaint(hwnd)
win32gui.SetGraphicsMode(dc, win32con.GM_ADVANCED)
l,t,r,b=win32gui.GetClientRect(hwnd)
br=win32gui.CreateSolidBrush(win32api.RGB(0,0,255))
bitmap=win32gui.CreateBitmap(20,5,4,1,None)
win32gui.SelectObject(dc, bitmap)
win32gui.SelectObject(dc, br)
win32gui.SelectObject(dc, hfont)
win32gui.SetTextColor(dc,win32api.RGB(randint(1,255),randint(1,255),randint(1,255)));
win32gui.DrawText(dc,'hello',-1,(100,100,300,300),0)
win32gui.FillRect(dc,(200+dx,200+dx,100+dx,100+dx),br)
dx=(dx+10)%100
win32gui.EndPaint(hwnd, ps)
return 0
wc = win32gui.WNDCLASS()
wc.lpszClassName = 'win32'
wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hbrBackground = win32con.COLOR_WINDOW+1
wndproc={win32con.WM_PAINT:OnPaint}
wc.lpfnWndProc=wndproc
wc.hCursor = win32gui.LoadCursor (None, win32con.IDC_ARROW)
class_atom=win32gui.RegisterClass(wc)
hwnd = win32gui.CreateWindow(class_atom,'hello',
win32con.WS_OVERLAPPEDWINDOW|win32con.WS_VISIBLE,
350,120,640,480, 0, 0, 0, None)
for _ in range(30):
win32gui.InvalidateRect(hwnd,None,True)
win32gui.PumpWaitingMessages()
time.sleep(0.1)
win32gui.DestroyWindow(hwnd)
win32gui.UnregisterClass(class_atom,None)
|
py | 1a3eca029f21fdf01566707f15747fcb7a1cd7e9 | # Generated by Django 3.2.9 on 2021-11-10 13:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Foods',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('res_name', models.CharField(max_length=30)),
('res_adrs', models.CharField(max_length=50)),
('category', models.CharField(default='', max_length=20)),
('subcategory', models.CharField(default='', max_length=40)),
('price', models.IntegerField()),
('desc', models.CharField(max_length=10)),
('images', models.ImageField(default='', upload_to='foodie/images')),
],
),
]
|
py | 1a3ecbc0c5dcd79439c1bb0124e5a6311b6540f3 | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis, leif strand
# orthologue
# (c) 1998-2020 all rights reserved
#
# externals
import socket
# my interface
from .Channel import Channel
# declaration
class Socket(socket.socket, Channel):
"""
A channel that uses sockets as the communication mechanism
This class captures the part of the {socket} interface that is independent of the type of
socket. The implementation of the remainder of the {Channel} interface is provided by
subclasses.
"""
# types
from ..schemata import inet
# access to the individual channel end points
@property
def inbound(self):
"""
Retrieve the channel end point that can be read
"""
# easy enough
return self
@property
def outbound(self):
"""
Retrieve the channel end point that can be written
"""
# easy enough
return self
# access to the socket properties
@property
def peer(self):
"""
Return the address of my peer, i.e. the remote endpoint of the socket
"""
# get the raw address
address = self.getpeername()
# parse it, decorate it and return it
return self.inet().recognize(family=self.family, address=address)
# interface
def accept(self):
"""
Wait for a connection attempt, build a channel around the socket to the peer, and
return it along with the address of the remote process
"""
# bypass the socket interface because it calls the wrong constructor explicitly
fd, address = self._accept()
# build the channel
channel = type(self)(self.family, self.type, self.proto, fileno=fd)
# build the address
address = self.inet().recognize(family=self.family, address=address)
# adjust the socket flags; see {socket.py} in the standard library for more details
if socket.getdefaulttimeout() is None and self.gettimeout(): channel.setblocking(True)
# return the channel to and the address of the peer process
return channel, address
# meta-methods
def __str__(self):
return "socket to {.peer}".format(self)
# implementation details
__slots__ = () # socket has it, so why not...
# end of file
|
py | 1a3ecc192dbcede20de43ae21aa4de9992d1c0a8 | from rest_framework import viewsets, mixins
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from django.shortcuts import render, get_object_or_404
from django.db.models import Q
from . import serializers
from .models import Adventure, Author, Room, Artifact, Effect, Monster, Player, PlayerProfile, Hint, ActivityLog
def index(request, path=''):
"""
The home page
"""
return render(request, 'index.html')
def about(request):
"""
The "about" page
"""
return render(request, 'about.html')
def privacy_policy(request):
"""
The "privacy policy" page
"""
return render(request, 'privacy.html')
def main_hall(request):
"""
The container for the "main hall" react app
"""
return render(request, 'main-hall.html')
def adventure(request, slug):
"""
The container for the "core" a.k.a. "adventure" angular app
"""
return render(request, 'adventure.html', {'slug': slug})
def adventure_list(request):
adventures = Adventure.objects.filter(active=True).order_by('name')
return render(request, 'adventure-list.html', {'adventures': adventures})
def manual(request):
return render(request, 'manual.html')
class AuthorViewSet(viewsets.ReadOnlyModelViewSet):
"""
For listing or retrieving authors.
"""
queryset = Author.objects.filter()
serializer_class = serializers.AuthorSerializer
def get_queryset(self):
queryset = self.queryset
return queryset
class AdventureViewSet(viewsets.ReadOnlyModelViewSet):
"""
For listing or retrieving adventure data.
"""
queryset = Adventure.objects.filter(active=True)
serializer_class = serializers.AdventureSerializer
def get_queryset(self):
queryset = Adventure.objects.filter(active=True)
return queryset
def retrieve(self, request, pk=None):
queryset = self.queryset
adv = get_object_or_404(queryset, slug=pk)
serializer = serializers.AdventureSerializer(adv)
return Response(serializer.data)
class RoomViewSet(viewsets.ReadOnlyModelViewSet):
"""
Lists room data for an adventure.
"""
queryset = Room.objects.all()
serializer_class = serializers.RoomSerializer
def get_queryset(self):
adventure_id = self.kwargs['adventure_id']
return self.queryset.filter(adventure__slug=adventure_id)
class ArtifactViewSet(viewsets.ReadOnlyModelViewSet):
"""
Lists artifact data for an adventure.
"""
queryset = Artifact.objects.order_by('artifact_id')
serializer_class = serializers.ArtifactSerializer
def get_queryset(self):
adventure_id = self.kwargs['adventure_id']
return self.queryset.filter(adventure__slug=adventure_id)
class EffectViewSet(viewsets.ReadOnlyModelViewSet):
"""
Lists effect data for an adventure.
"""
queryset = Effect.objects.all()
serializer_class = serializers.EffectSerializer
def get_queryset(self):
adventure_id = self.kwargs['adventure_id']
return self.queryset.filter(adventure__slug=adventure_id)
class MonsterViewSet(viewsets.ReadOnlyModelViewSet):
"""
Lists monster data for an adventure.
"""
queryset = Monster.objects.all().order_by('monster_id')
serializer_class = serializers.MonsterSerializer
def get_queryset(self):
adventure_id = self.kwargs['adventure_id']
return self.queryset.filter(adventure__slug=adventure_id)
class HintViewSet(viewsets.ReadOnlyModelViewSet):
"""
Lists hints for an adventure.
"""
queryset = Hint.objects.all()
serializer_class = serializers.HintSerializer
def get_queryset(self):
adventure_id = self.kwargs['adventure_id']
return self.queryset.filter(Q(adventure__slug=adventure_id) | Q(question="EAMON GENERAL HELP.", edx="E001")).order_by('index')
class PlayerProfileViewSet(viewsets.ModelViewSet):
"""
API endpoints for user data. This is read/write.
"""
serializer_class = serializers.PlayerProfileSerializer
queryset = PlayerProfile.objects.all()
permission_classes = (AllowAny,)
def retrieve(self, request, *args, **kwargs):
pass
def create(self, request, *args, **kwargs):
"""
This is actually an "upsert" for users
"""
social_id = self.request.data['social_id']
request_uuid = self.request.data['uuid']
# create a profile if not found
pl, created = PlayerProfile.objects.get_or_create(social_id=social_id)
db_uuid = pl.uuid
if created:
pl.social_id = social_id
pl.uuid = request_uuid
pl.save()
# look for any player characters with the browser's old UUID, and update them to match the profile's UUID
players = Player.objects.filter(uuid=request_uuid).exclude(uuid=db_uuid)
print("Updating players...")
for p in players:
print("Updating player: " + p.name)
print("Old UUID: " + p.uuid)
print("New UUID: " + db_uuid)
p.uuid = db_uuid
p.save()
serializer = serializers.PlayerProfileSerializer(pl)
return Response(serializer.data)
class PlayerViewSet(viewsets.ModelViewSet):
"""
API endpoints for player data. This is read/write.
"""
queryset = Player.objects.all()
serializer_class = serializers.PlayerSerializer
permission_classes = (AllowAny,)
"""
Override the default query set to filter by the UUID which is passed in the query string.
This prevents people from seeing each other's adventurers.
"""
def get_queryset(self):
uuid = self.request.query_params.get('uuid', None)
if uuid is None:
# in a PUT request the uuid is in the body rather than the query string
uuid = self.request.data.get('uuid', None)
queryset = self.queryset
if uuid is not None:
# filter the list by the UUID provided in the query string
queryset = queryset.filter(uuid=uuid)
else:
# prevent showing all players if no UUID was passed
queryset = queryset.filter(uuid='This will match nothing')
return queryset.order_by('name')
"""
API URL to update a player. Overrides the parent class.
"""
def update(self, request, *args, **kwargs):
# uuid = self.request.query_params.get('uuid', None)
# if uuid is not None:
# raise PermissionError
data = request.data
instance = self.get_object()
# flatten the weapon and spell abilities into the columns Django wants
data['wpn_axe'] = data['weapon_abilities']['1']
data['wpn_bow'] = data['weapon_abilities']['2']
data['wpn_club'] = data['weapon_abilities']['3']
data['wpn_spear'] = data['weapon_abilities']['4']
data['wpn_sword'] = data['weapon_abilities']['5']
# spell abilities. use the "original" values which include skill improvements during the adventure,
# but don't count reduced odds due to caster fatigue.
data['spl_blast'] = data['spell_abilities_original']['blast']
data['spl_heal'] = data['spell_abilities_original']['heal']
data['spl_power'] = data['spell_abilities_original']['power']
data['spl_speed'] = data['spell_abilities_original']['speed']
# to pass validation, need to fix some values on the inventory items
for key, value in enumerate(data['inventory']):
data['inventory'][key]['type'] = int(data['inventory'][key]['type'])
if 'weapon_type' not in data['inventory'][key] or data['inventory'][key]['weapon_type'] == 0:
data['inventory'][key]['weapon_type'] = None
data['inventory'][key]['player'] = instance.id
serializer = self.get_serializer(instance, data=request.data, partial=False)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return Response(serializer.data)
class LogViewSet(mixins.CreateModelMixin, viewsets.GenericViewSet):
"""
API endpoints for the logger. This is read/write.
"""
queryset = ActivityLog.objects.all()
serializer_class = serializers.ActivityLogSerializer
permission_classes = (AllowAny,)
|
py | 1a3ecc61f3a2c1b56b9c35b9e216083ad9a3effd | """Base segment definitions.
Here we define:
- BaseSegment. This is the root class for all segments, and is
designed to hold other subsegments.
- UnparsableSegment. A special wrapper to indicate that the parse
function failed on this block of segments and to prevent further
analysis.
"""
from io import StringIO
from cached_property import cached_property
from typing import Any, Callable, Optional, List, Tuple, NamedTuple, Iterator
import logging
from sqlfluff.core.string_helpers import (
frame_msg,
curtail_string,
)
from sqlfluff.core.parser.match_result import MatchResult
from sqlfluff.core.parser.match_logging import parse_match_logging
from sqlfluff.core.parser.match_wrapper import match_wrapper
from sqlfluff.core.parser.helpers import (
check_still_complete,
trim_non_code_segments,
)
from sqlfluff.core.parser.matchable import Matchable
from sqlfluff.core.parser.markers import PositionMarker
from sqlfluff.core.parser.context import ParseContext
# Instantiate the linter logger (only for use in methods involved with fixing.)
linter_logger = logging.getLogger("sqlfluff.linter")
class FixPatch(NamedTuple):
"""An edit patch for a templated file."""
templated_slice: slice
fixed_raw: str
# The patch category, functions mostly for debugging and explanation
# than for function. It allows traceability of *why* this patch was
# generated. It has no siginificance for processing.
patch_category: str
class BaseSegment:
"""The base segment element.
This defines the base element which drives both Lexing, Parsing and Linting.
A large chunk of the logic which defines those three operations are centered
here. Much of what is defined in the BaseSegment is also used by its many
subclasses rather than directly here.
For clarity, the `BaseSegment` is mostly centered around a segment which contains
other subsegments. For segments which don't have *children*, refer to the `RawSegment`
class (which still inherits from this one).
Segments are used both as instances to hold chunks of text, but also as classes
themselves where they function a lot like grammars, and return instances of themselves
when they match. The many classmethods in this class are usually to serve their
purpose as a matcher.
"""
# `type` should be the *category* of this kind of segment
type = "base"
parse_grammar: Optional[Matchable] = None
# We define the type here but no value. Subclasses must provide a value.
match_grammar: Matchable
comment_seperate = False
optional = False # NB: See the sequence grammar for details
_name: Optional[str] = None
is_meta = False
# Are we able to have non-code at the start or end?
can_start_end_non_code = False
# Can we allow it to be empty? Usually used in combination
# with the can_start_end_non_code.
allow_empty = False
# What other kwargs need to be copied when applying fixes.
additional_kwargs: List[str] = []
def __init__(self, segments, pos_marker=None, name: Optional[str] = None):
# A cache variable for expandable
self._is_expandable = None
# Surrogate name option.
self._surrogate_name = name
if len(segments) == 0:
raise RuntimeError(
"Setting {} with a zero length segment set. This shouldn't happen.".format(
self.__class__
)
)
if hasattr(segments, "matched_segments"):
# Safely extract segments from a match
self.segments = segments.matched_segments
elif isinstance(segments, tuple):
self.segments = segments
elif isinstance(segments, list):
self.segments = tuple(segments)
else:
raise TypeError(f"Unexpected type passed to BaseSegment: {type(segments)}")
if not pos_marker:
# If no pos given, it's the pos of the first segment.
if isinstance(segments, (tuple, list)):
pos_marker = PositionMarker.from_child_markers(
*(seg.pos_marker for seg in segments)
)
else:
raise TypeError(
f"Unexpected type passed to BaseSegment: {type(segments)}"
)
self.pos_marker: PositionMarker = pos_marker
def __eq__(self, other):
# NB: this should also work for RawSegment
return (
# Same class NAME. (could be constructed elsewhere)
self.__class__.__name__ == other.__class__.__name__
and (self.raw == other.raw)
# Both must have a non-null position marker to compare.
and self.pos_marker
and other.pos_marker
# We only match that the *start* is the same. This means we can
# still effectively construct searches look for segments.
# This is important for .apply_fixes().
and (
self.pos_marker.start_point_marker()
== other.pos_marker.start_point_marker()
)
)
def __repr__(self):
return f"<{self.__class__.__name__}: ({self.pos_marker})>"
# ################ PRIVATE PROPERTIES
@property
def _comments(self):
"""Returns only the comment elements of this segment."""
return [seg for seg in self.segments if seg.is_type("comment")]
@property
def _non_comments(self):
"""Returns only the non-comment elements of this segment."""
return [seg for seg in self.segments if not seg.is_type("comment")]
# ################ PUBLIC PROPERTIES
@property
def name(self):
"""The name of this segment.
The reason for three routes for names is that some subclasses
might want to override the name rather than just getting
the class name. Instances may also override this with the
_surrogate_name.
Name should be specific to this kind of segment, while `type`
should be a higher level descriptor of the kind of segment.
For example, the name of `+` is 'plus' but the type might be
'binary_operator'.
"""
return self._surrogate_name or self._name or self.__class__.__name__
@property
def is_expandable(self):
"""Return true if it is meaningful to call `expand` on this segment.
We need to do this recursively because even if *this* segment doesn't
need expanding, maybe one of its children does.
Once a segment is *not* expandable, it can never become so, which is
why the variable is cached.
"""
if self._is_expandable is False:
return self._is_expandable
elif self.parse_grammar:
return True
elif self.segments and any(s.is_expandable for s in self.segments):
return True
else:
# Cache the variable
self._is_expandable = False
return False
@cached_property
def is_code(self):
"""Return True if this segment contains any code."""
return any(seg.is_code for seg in self.segments)
@cached_property
def is_comment(self):
"""Return True if this is entirely made of comments."""
return all(seg.is_comment for seg in self.segments)
@cached_property
def is_whitespace(self):
"""Return True if this segment is entirely whitespace."""
return all(seg.is_whitespace for seg in self.segments)
@cached_property
def raw(self):
"""Make a string from the segments of this segment."""
return self._reconstruct()
@cached_property
def raw_upper(self):
"""Make an uppercase string from the segments of this segment."""
return self._reconstruct().upper()
@cached_property
def matched_length(self):
"""Return the length of the segment in characters."""
return sum(seg.matched_length for seg in self.segments)
# ################ STATIC METHODS
@staticmethod
def segs_to_tuple(segs, **kwargs):
"""Return a tuple structure from an iterable of segments."""
return tuple(seg.to_tuple(**kwargs) for seg in segs)
@staticmethod
def _suffix():
"""Return any extra output required at the end when logging.
NB Override this for specific subclasses if we want extra output.
"""
return ""
@staticmethod
def expand(segments, parse_context):
"""Expand the list of child segments using their `parse` methods."""
segs = ()
for stmt in segments:
try:
if not stmt.is_expandable:
parse_context.logger.info(
"[PD:%s] Skipping expansion of %s...",
parse_context.parse_depth,
stmt,
)
segs += (stmt,)
continue
except Exception as err:
parse_context.logger.error(
"%s has no attribute `is_expandable`. This segment appears poorly constructed.",
stmt,
)
raise err
if not hasattr(stmt, "parse"):
raise ValueError(
"{} has no method `parse`. This segment appears poorly constructed.".format(
stmt
)
)
parse_depth_msg = "Parse Depth {}. Expanding: {}: {!r}".format(
parse_context.parse_depth,
stmt.__class__.__name__,
curtail_string(stmt.raw, length=40),
)
parse_context.logger.info(frame_msg(parse_depth_msg))
res = stmt.parse(parse_context=parse_context)
if isinstance(res, BaseSegment):
segs += (res,)
else:
# We might get back an iterable of segments
segs += tuple(res)
# Basic Validation
check_still_complete(segments, segs, ())
return segs
@classmethod
def _position_segments(cls, segments, parent_pos=None):
"""Refresh positions of segments within a span.
This does two things:
- Assign positions to any segments without them.
- Updates the working line_no and line_pos for all
segments during fixing.
New segments are assumed to be metas or insertions
and so therefore have a zero-length position in the
source and templated file.
"""
# If there are no segments, there's no need to reposition.
if not segments:
return segments
# Work out our starting position for working through
if parent_pos:
line_no = parent_pos.working_line_no
line_pos = parent_pos.working_line_pos
# If we don't have it, infer it from the first position
# in this segment that does have a position.
else:
for fwd_seg in segments:
if fwd_seg.pos_marker:
line_no = fwd_seg.pos_marker.working_line_no
line_pos = fwd_seg.pos_marker.working_line_pos
break
else:
linter_logger.warning("SEG: %r, POS: %r", segments, parent_pos)
raise ValueError("Unable to find working position.")
# Use the index so that we can look forward
# and backward.
for idx, segment in enumerate(segments):
# Fill any that don't have a position.
if not segment.pos_marker:
# Can we get a position from the previous?
if idx > 0:
segment.pos_marker = segments[idx - 1].pos_marker.end_point_marker()
# Can we get it from the parent?
elif parent_pos:
segment.pos_marker = parent_pos.start_point_marker()
# Search forward for a following one, if we have to?
else:
for fwd_seg in segments[idx + 1 :]:
if fwd_seg.pos_marker:
segments[
idx
].pos_marker = fwd_seg.pos_marker.start_point_marker()
break
else:
raise ValueError("Unable to position new segment")
# Update the working position.
segment.pos_marker = segment.pos_marker.with_working_position(
line_no,
line_pos,
)
line_no, line_pos = segment.pos_marker.infer_next_position(
segment.raw, line_no, line_pos
)
# If this segment has children, recurse and reposition them too.
if segment.segments:
segment.segments = cls._position_segments(
segment.segments, parent_pos=segment.pos_marker
)
return segments
# ################ CLASS METHODS
@classmethod
def simple(cls, parse_context: ParseContext) -> Optional[List[str]]:
"""Does this matcher support an uppercase hash matching route?
This should be true if the MATCH grammar is simple. Most more
complicated segments will be assumed to overwrite this method
if they wish to be considered simple.
"""
if cls.match_grammar:
return cls.match_grammar.simple(parse_context=parse_context)
else:
# Other segments will either override this method, or aren't
# simple.
return None
@classmethod
def is_optional(cls):
"""Return True if this segment is optional.
This is used primarily in sequence matching, where optional
segments can be skipped.
"""
return cls.optional
@classmethod
def class_is_type(cls, *seg_type):
"""Is this segment class (or its parent) of the given type."""
# Do we match on the type of _this_ class.
if cls.type in seg_type:
return True
# If not, check types of parents.
for base_class in cls.__bases__:
if base_class is object:
break
elif base_class.type in seg_type:
return True
elif base_class.type == "base":
break
return False
@classmethod
def structural_simplify(cls, elem):
"""Simplify the structure recursively so it serializes nicely in json/yaml."""
if isinstance(elem, tuple):
# Does this look like an element?
if len(elem) == 2 and isinstance(elem[0], str):
# This looks like a single element, make a dict
elem = {elem[0]: cls.structural_simplify(elem[1])}
elif isinstance(elem[0], tuple):
# This looks like a list of elements.
keys = [e[0] for e in elem]
# Any duplicate elements?
if len(set(keys)) == len(keys):
# No, we can use a mapping tuple
elem = {e[0]: cls.structural_simplify(e[1]) for e in elem}
else:
# Yes, this has to be a list :(
elem = [cls.structural_simplify(e) for e in elem]
return elem
@classmethod
@match_wrapper(v_level=4)
def match(
cls, segments: Tuple["BaseSegment", ...], parse_context: ParseContext
) -> MatchResult:
"""Match a list of segments against this segment.
Note: Match for segments is done in the ABSTRACT.
When dealing with concrete then we're always in parse.
Parse is what happens during expand.
Matching can be done from either the raw or the segments.
This raw function can be overridden, or a grammar defined
on the underlying class.
"""
# Edge case, but it's possible that we have *already matched* on
# a previous cycle. Do should first check whether this is a case
# of that.
if len(segments) == 1 and isinstance(segments[0], cls):
# This has already matched. Winner.
parse_match_logging(
cls.__name__,
"_match",
"SELF",
parse_context=parse_context,
v_level=3,
symbol="+++",
)
return MatchResult.from_matched(segments)
elif len(segments) > 1 and isinstance(segments[0], cls):
parse_match_logging(
cls.__name__,
"_match",
"SELF",
parse_context=parse_context,
v_level=3,
symbol="+++",
)
# This has already matched, but only partially.
return MatchResult((segments[0],), segments[1:])
if cls.match_grammar:
# Call the private method
with parse_context.deeper_match() as ctx:
m = cls.match_grammar.match(segments=segments, parse_context=ctx)
# Calling unify here, allows the MatchResult class to do all the type checking.
if not isinstance(m, MatchResult):
raise TypeError(
"[PD:{} MD:{}] {}.match. Result is {}, not a MatchResult!".format(
parse_context.parse_depth,
parse_context.match_depth,
cls.__name__,
type(m),
)
)
# Once unified we can deal with it just as a MatchResult
if m.has_match():
return MatchResult(
(cls(segments=m.matched_segments),), m.unmatched_segments
)
else:
return MatchResult.from_unmatched(segments)
else:
raise NotImplementedError(
f"{cls.__name__} has no match function implemented"
)
# ################ PRIVATE INSTANCE METHODS
def _reconstruct(self):
"""Make a string from the segments of this segment."""
return "".join(seg.raw for seg in self.segments)
def _preface(self, ident, tabsize):
"""Returns the preamble to any logging."""
padded_type = "{padding}{modifier}{type}".format(
padding=" " * (ident * tabsize),
modifier="[META] " if self.is_meta else "",
type=self.get_type() + ":",
)
preface = "{pos:20}|{padded_type:60} {suffix}".format(
pos=str(self.pos_marker) if self.pos_marker else "-",
padded_type=padded_type,
suffix=self._suffix() or "",
)
# Trim unnecessary whitespace before returning
return preface.rstrip()
# ################ PUBLIC INSTANCE METHODS
def get_type(self):
"""Returns the type of this segment as a string."""
return self.type
def is_type(self, *seg_type):
"""Is this segment (or its parent) of the given type."""
return self.class_is_type(*seg_type)
def invalidate_caches(self):
"""Invalidate the cached properties.
This should be called whenever the segments within this
segment is mutated.
"""
for key in ["is_code", "is_comment", "raw", "raw_upper", "matched_length"]:
self.__dict__.pop(key, None)
def get_start_point_marker(self):
"""Get a point marker at the start of this segment."""
return self.pos_marker.start_point_marker()
def get_end_point_marker(self):
"""Get a point marker at the end of this segment."""
return self.pos_marker.end_point_marker()
def get_start_loc(self):
"""Get a location tuple at the start of this segment."""
return self.pos_marker.working_loc
def get_end_loc(self):
"""Get a location tuple at the end of this segment."""
return self.pos_marker.working_loc_after(
self.raw,
)
def stringify(self, ident=0, tabsize=4, code_only=False):
"""Use indentation to render this segment and its children as a string."""
buff = StringIO()
preface = self._preface(ident=ident, tabsize=tabsize)
buff.write(preface + "\n")
if not code_only and self.comment_seperate and len(self._comments) > 0:
if self._comments:
buff.write((" " * ((ident + 1) * tabsize)) + "Comments:" + "\n")
for seg in self._comments:
buff.write(
seg.stringify(
ident=ident + 2,
tabsize=tabsize,
code_only=code_only,
)
)
if self._non_comments:
buff.write((" " * ((ident + 1) * tabsize)) + "Code:" + "\n")
for seg in self._non_comments:
buff.write(
seg.stringify(
ident=ident + 2,
tabsize=tabsize,
code_only=code_only,
)
)
else:
for seg in self.segments:
# If we're in code_only, only show the code segments, otherwise always true
if not code_only or seg.is_code:
buff.write(
seg.stringify(
ident=ident + 1,
tabsize=tabsize,
code_only=code_only,
)
)
return buff.getvalue()
def to_tuple(self, code_only=False, show_raw=False, include_meta=False):
"""Return a tuple structure from this segment."""
# works for both base and raw
if show_raw and not self.segments:
result = (self.get_type(), self.raw)
elif code_only:
result = (
self.get_type(),
tuple(
seg.to_tuple(
code_only=code_only,
show_raw=show_raw,
include_meta=include_meta,
)
for seg in self.segments
if seg.is_code and not seg.is_meta
),
)
else:
result = (
self.get_type(),
tuple(
seg.to_tuple(
code_only=code_only,
show_raw=show_raw,
include_meta=include_meta,
)
for seg in self.segments
if include_meta or not seg.is_meta
),
)
return result
def as_record(self, **kwargs):
"""Return the segment as a structurally simplified record.
This is useful for serialization to yaml or json.
kwargs passed to to_tuple
"""
return self.structural_simplify(self.to_tuple(**kwargs))
def raw_list(self):
"""Return a list of raw elements, mostly for testing or searching."""
buff = []
for s in self.segments:
buff += s.raw_list()
return buff
def iter_raw_seg(self):
"""Iterate raw segments, mostly for searching."""
for s in self.segments:
yield from s.iter_raw_seg()
def iter_segments(self, expanding=None, pass_through=False):
"""Iterate raw segments, optionally expanding some chldren."""
for s in self.segments:
if expanding and s.is_type(*expanding):
yield from s.iter_segments(
expanding=expanding if pass_through else None
)
else:
yield s
def iter_unparsables(self):
"""Iterate through any unparsables this segment may contain."""
for s in self.segments:
yield from s.iter_unparsables()
def type_set(self):
"""Return a set of the types contained, mostly for testing."""
typs = {self.type}
for s in self.segments:
typs |= s.type_set()
return typs
def is_raw(self):
"""Return True if this segment has no children."""
return len(self.segments) == 0
def get_child(self, *seg_type):
"""Retrieve the first of the children of this segment with matching type."""
for seg in self.segments:
if seg.is_type(*seg_type):
return seg
return None
def get_children(self, *seg_type):
"""Retrieve the all of the children of this segment with matching type."""
buff = []
for seg in self.segments:
if seg.is_type(*seg_type):
buff.append(seg)
return buff
def select_children(
self,
start_seg: Optional["BaseSegment"] = None,
stop_seg: Optional["BaseSegment"] = None,
select_if: Optional[Callable[["BaseSegment"], Any]] = None,
loop_while: Optional[Callable[["BaseSegment"], Any]] = None,
):
"""Retrieve subset of children based on range and filters.
Often useful by linter rules when generating fixes, e.g. to find
whitespace segments between two already known segments.
"""
start_index = self.segments.index(start_seg) if start_seg else -1
stop_index = self.segments.index(stop_seg) if stop_seg else len(self.segments)
buff = []
for seg in self.segments[start_index + 1 : stop_index]:
if loop_while and not loop_while(seg):
break
if not select_if or select_if(seg):
buff.append(seg)
return buff
def recursive_crawl(self, *seg_type, recurse_into=True):
"""Recursively crawl for segments of a given type.
Args:
seg_type: :obj:`str`: one or more type of segment
to look for.
recurse_into: :obj:`bool`: When an element of type "seg_type" is
found, whether to recurse into it.
"""
# Check this segment
if self.is_type(*seg_type):
match = True
yield self
else:
match = False
if recurse_into or not match:
# Recurse
for seg in self.segments:
yield from seg.recursive_crawl(*seg_type, recurse_into=recurse_into)
def path_to(self, other):
"""Given a segment which is assumed within self, get the intermediate segments.
Returns:
:obj:`list` of segments, including the segment we're looking for.
None if not found.
"""
# Return self if we've found the segment.
if self is other:
return [self]
# Are we in the right ballpark?
# NB: Comparisons have a higher precedence than `not`.
if not self.get_start_loc() <= other.get_start_loc() <= self.get_end_loc():
return None
# Do we have any child segments at all?
if not self.segments:
return None
# Check through each of the child segments
for seg in self.segments:
res = seg.path_to(other)
if res:
return [self] + res
return None
def parse(self, parse_context=None, parse_grammar=None):
"""Use the parse grammar to find subsegments within this segment.
A large chunk of the logic around this can be found in the `expand` method.
Use the parse setting in the context for testing, mostly to check how deep to go.
True/False for yes or no, an integer allows a certain number of levels.
Optionally, this method allows a custom parse grammar to be
provided which will override any existing parse grammar
on the segment.
"""
# Clear the blacklist cache so avoid missteps
if parse_context:
parse_context.blacklist.clear()
# the parse_depth and recurse kwargs control how deep we will recurse for testing.
if not self.segments:
# This means we're a root segment, just return an unmutated self
return self
# Check the Parse Grammar
parse_grammar = parse_grammar or self.parse_grammar
if parse_grammar is None:
# No parse grammar, go straight to expansion
parse_context.logger.debug(
"{}.parse: no grammar. Going straight to expansion".format(
self.__class__.__name__
)
)
else:
# For debugging purposes. Ensure that we don't have non-code elements
# at the start or end of the segments. They should always in the middle,
# or in the parent expression.
segments = self.segments
if self.can_start_end_non_code:
pre_nc, segments, post_nc = trim_non_code_segments(segments)
else:
pre_nc = ()
post_nc = ()
if (not segments[0].is_code) and (not segments[0].is_meta):
raise ValueError(
"Segment {} starts with non code segment: {!r}.\n{!r}".format(
self, segments[0].raw, segments
)
)
if (not segments[-1].is_code) and (not segments[-1].is_meta):
raise ValueError(
"Segment {} ends with non code segment: {!r}.\n{!r}".format(
self, segments[-1].raw, segments
)
)
# NOTE: No match_depth kwarg, because this is the start of the matching.
with parse_context.matching_segment(self.__class__.__name__) as ctx:
m = parse_grammar.match(segments=segments, parse_context=ctx)
if not isinstance(m, MatchResult):
raise TypeError(
"[PD:{}] {}.match. Result is {}, not a MatchResult!".format(
parse_context.parse_depth, self.__class__.__name__, type(m)
)
)
# Basic Validation, that we haven't dropped anything.
check_still_complete(segments, m.matched_segments, m.unmatched_segments)
if m.has_match():
if m.is_complete():
# Complete match, happy days!
self.segments = pre_nc + m.matched_segments + post_nc
else:
# Incomplete match.
# For now this means the parsing has failed. Lets add the unmatched bit at the
# end as something unparsable.
# TODO: Do something more intelligent here.
self.segments = (
pre_nc
+ m.matched_segments
+ (
UnparsableSegment(
segments=m.unmatched_segments + post_nc,
expected="Nothing...",
),
)
)
elif self.allow_empty and not segments:
# Very edge case, but some segments are allowed to be empty other than non-code
self.segments = pre_nc + post_nc
else:
# If there's no match at this stage, then it's unparsable. That's
# a problem at this stage so wrap it in an unparsable segment and carry on.
self.segments = (
pre_nc
+ (
UnparsableSegment(
segments=segments,
expected=self.name,
), # NB: tuple
)
+ post_nc
)
# Recurse if allowed (using the expand method to deal with the expansion)
parse_context.logger.debug(
"{}.parse: Done Parse. Plotting Recursion. Recurse={!r}".format(
self.__class__.__name__, parse_context.recurse
)
)
parse_depth_msg = "###\n#\n# Beginning Parse Depth {}: {}\n#\n###\nInitial Structure:\n{}".format(
parse_context.parse_depth + 1, self.__class__.__name__, self.stringify()
)
if parse_context.may_recurse():
parse_context.logger.debug(parse_depth_msg)
with parse_context.deeper_parse() as ctx:
self.segments = self.expand(self.segments, parse_context=ctx)
return self
def apply_fixes(self, fixes):
"""Apply an iterable of fixes to this segment.
Used in applying fixes if we're fixing linting errors.
If anything changes, this should return a new version of the segment
rather than mutating the original.
Note: We need to have fixes to apply AND this must have children. In the case
of raw segments, they will be replaced or removed by their parent and
so this function should just return self.
"""
if fixes and not self.is_raw():
# Get a reference to self to start with, but this will rapidly
# become a working copy.
r = self
# Make a working copy
seg_buffer = []
todo_buffer = list(self.segments)
while True:
if len(todo_buffer) == 0:
break
else:
seg = todo_buffer.pop(0)
fix_buff = fixes.copy()
unused_fixes = []
while fix_buff:
f = fix_buff.pop()
# Look for identity not just equality.
# This handles potential positioning ambiguity.
if f.anchor is seg:
linter_logger.debug(
"Matched fix against segment: %s -> %s", f, seg
)
if f.edit_type == "delete":
# We're just getting rid of this segment.
seg = None
elif f.edit_type in ("edit", "create"):
# We're doing a replacement (it could be a single segment or an iterable)
if isinstance(f.edit, BaseSegment):
seg_buffer.append(f.edit)
else:
for s in f.edit:
seg_buffer.append(s)
if f.edit_type == "create":
# in the case of a creation, also add this segment on the end
seg_buffer.append(seg)
else:
raise ValueError(
"Unexpected edit_type: {!r} in {!r}".format(
f.edit_type, f
)
)
# We've applied a fix here. Move on, this also consumes the fix
# TODO: Maybe deal with overlapping fixes later.
break
else:
# We've not used the fix so we should keep it in the list for later.
unused_fixes.append(f)
else:
seg_buffer.append(seg)
# Switch over the the unused list
fixes = unused_fixes + fix_buff
# Invalidate any caches
self.invalidate_caches()
# Then recurse (i.e. deal with the children) (Requeueing)
seg_queue = seg_buffer
seg_buffer = []
for seg in seg_queue:
s, fixes = seg.apply_fixes(fixes)
seg_buffer.append(s)
# Reform into a new segment
r = r.__class__(
# Realign the segments within
segments=self._position_segments(
tuple(seg_buffer), parent_pos=r.pos_marker
),
pos_marker=r.pos_marker,
# Pass through any additional kwargs
**{k: getattr(self, k) for k in self.additional_kwargs},
)
# Return the new segment with any unused fixes.
return r, fixes
else:
return self, fixes
def iter_patches(self, templated_str: str) -> Iterator[FixPatch]:
"""Iterate through the segments generating fix patches.
The patches are generated in TEMPLATED space. This is important
so that we defer dealing with any loops until later. At this stage
everything *should* happen in templated order.
Occasionally we have an insertion around a placeholder, so we also
return a hint to deal with that.
"""
# Does it match? If so we can ignore it.
matches = self.raw == templated_str[self.pos_marker.templated_slice]
if matches:
return
# If we're here, the segment doesn't match the original.
# If it's all literal, then we don't need to recurse.
if self.pos_marker.is_literal():
# Yield the position in the source file and the patch
yield FixPatch(
self.pos_marker.templated_slice, self.raw, patch_category="literal"
)
# Can we go deeper?
elif not self.segments:
# It's not literal, but it's also a raw segment. If were going
# to yield a change, we would have done it from the parent, so
# we just abort from here.
return
else:
# This segment isn't a literal, but has changed, we need to go deeper.
# Iterate through the child segments
templated_idx = self.pos_marker.templated_slice.start
insert_buff = ""
for seg_idx, segment in enumerate(self.segments):
# First check for insertions.
# We know it's an insertion if it has length but not in the templated file.
if segment.raw and segment.pos_marker.is_point():
# Add it to the insertion buffer if it has length:
if segment.raw:
insert_buff += segment.raw
linter_logger.debug(
"Appending insertion buffer. %r @idx: %s",
insert_buff,
templated_idx,
)
continue
# If we get here, then we know it's an original.
# Check for deletions at the before this segment (vs the TEMPLATED).
start_diff = segment.pos_marker.templated_slice.start - templated_idx
# Check to see whether there's a discontinuity before the current segment
if start_diff > 0 or insert_buff:
# If we have an insert buffer, then it's an edit, otherwise a deletion.
yield FixPatch(
slice(
segment.pos_marker.templated_slice.start
- max(start_diff, 0),
segment.pos_marker.templated_slice.start,
),
insert_buff,
patch_category="mid_point",
)
insert_buff = ""
# Now we deal with any changes *within* the segment itself.
yield from segment.iter_patches(templated_str=templated_str)
# Once we've dealt with any patches from the segment, update
# our position markers.
templated_idx = segment.pos_marker.templated_slice.stop
# After the loop, we check whether there's a trailing deletion
# or insert. Also valid if we still have an insertion buffer here.
end_diff = self.pos_marker.templated_slice.stop - templated_idx
if end_diff or insert_buff:
yield FixPatch(
slice(
self.pos_marker.templated_slice.stop - end_diff,
self.pos_marker.templated_slice.stop,
),
insert_buff,
patch_category="end_point",
)
class BracketedSegment(BaseSegment):
"""A segment containing a bracketed expression."""
type = "bracketed"
additional_kwargs = ["start_bracket", "end_bracket"]
def __init__(
self,
*args,
# These are tuples of segments but we're expecting them to
# be tuples of length 1. This is because we'll almost always
# be doing tuple arithmetic with the results and constructing
# 1-tuples on the fly is very easy to misread.
start_bracket: Tuple[BaseSegment] = None,
end_bracket: Tuple[BaseSegment] = None,
**kwargs,
):
"""Stash the bracket segments for later."""
if not start_bracket or not end_bracket:
raise ValueError(
"Attempted to construct Bracketed segment without specifying brackets."
)
self.start_bracket = start_bracket
self.end_bracket = end_bracket
super().__init__(*args, **kwargs)
@classmethod
def simple(cls, parse_context: ParseContext) -> Optional[List[str]]:
"""Simple methods for bracketed and the persitent brackets."""
start_brackets = [
start_bracket
for _, start_bracket, _, persistent in parse_context.dialect.sets(
"bracket_pairs"
)
if persistent
]
start_simple = []
for ref in start_brackets:
start_simple += parse_context.dialect.ref(ref).simple(parse_context)
return start_simple
@classmethod
def match(
cls, segments: Tuple["BaseSegment", ...], parse_context: ParseContext
) -> MatchResult:
"""Only useful as a terminator."""
if segments and isinstance(segments[0], cls):
return MatchResult((segments[0],), segments[1:])
return MatchResult.from_unmatched(segments)
class UnparsableSegment(BaseSegment):
"""This is a segment which can't be parsed. It indicates a error during parsing."""
type = "unparsable"
# From here down, comments are printed separately.
comment_seperate = True
_expected = ""
def __init__(self, *args, expected="", **kwargs):
self._expected = expected
super().__init__(*args, **kwargs)
def _suffix(self):
"""Return any extra output required at the end when logging.
NB Override this for specific subclasses if we want extra output.
"""
return f"!! Expected: {self._expected!r}"
def iter_unparsables(self):
"""Iterate through any unparsables.
As this is an unparsable, it should yield itself.
"""
yield self
|
py | 1a3eccea037433288c196d21004f95be7bbe84a6 | from typing import Any, Optional, Union
from castutils.builtins.strings import to_str
from castutils.types import GenericType
def as_float(obj: Any, /) -> float:
if isinstance(obj, float):
return obj
else:
raise TypeError("Object is not of instance float")
def as_float_or(obj: Any, fallback: GenericType, /) -> Union[float, GenericType]:
try:
return as_float(obj)
except TypeError:
return fallback
def to_float(
obj: Any,
/,
encoding: Optional[str] = None,
errors: Optional[str] = None,
) -> float:
try:
if isinstance(obj, float):
return obj
elif isinstance(obj, (str, bytes)):
return float(to_str(obj, encoding=encoding, errors=errors))
elif isinstance(obj, bool):
return float(obj)
return float(obj)
except Exception as exception:
raise ValueError("Object cannot transform to float") from exception
def to_float_or(
obj: Any,
fallback: GenericType,
/,
encoding: Optional[str] = None,
errors: Optional[str] = None,
) -> Union[float, GenericType]:
try:
return to_float(obj, encoding=encoding, errors=errors)
except ValueError:
return fallback
|
py | 1a3ecd308031043c53b1361f29c36d46a317353b | """testModels.py: Trying to fit some models to the weather and price data.
Also calculating some evaluation metrics. Trying to plot using pandas plot
functionality.
"""
__author__ = "Cameron Roach"
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn import cross_validation, linear_model
plt.style.use("ggplot")
os.chdir("/Users/cameronroach/Documents/PyCharm Projects/PriceForecast/tests")
#region Load and transform data
# Load data with pandas. Easier than csv module.
pricePT = pd.read_csv("../data/HistData/price_PT.csv", header=0, sep=";")
weather = pd.read_csv("../data/HistWeather/weather_hist.csv")
locations = pd.read_csv("../data/HistWeather/locations.csv")
# Rename columns
pricePT.rename(columns = {"Price":"price",
"date (UTC)":"ts"}, inplace=True)
weather.rename(columns = {"prediction_date":"ts"}, inplace=True)
# Convert date columns to datetime type.
pricePT["ts"] = pricePT["ts"].apply(pd.to_datetime)
weather["ts"] = weather["ts"].apply(pd.to_datetime)
# Group weather stations in same countries and take simple average of
# temperatures, wind speeds, etc.
weatherMean = pd.merge(weather, locations)
#weather.query("Country=='Spain'")
# TODO: calculate the difference between the two country averages and maybe avg countries to get one average temp value and one difference between countries value. Only do this if there is strong correlation between the two country average temperatures - CHECK!
weatherMean = (
#weatherMean.groupby(["ts", "Country"], as_index=False)
weatherMean.groupby(["ts", "Country"])
[["temperature"]]
.mean()
#.reset_index() #ungroups
.unstack() #Used for MultiIndex. Similar to cast.
)
#sns.lmplot(x="ts", y="temperature", col="Country", data=weatherMean)
# weatherMean currently has a multiIndex (temperature and country. Need to
# convert to single index so that merge can work.
weatherMean.columns = ["_".join(col).strip() for col in
weatherMean.columns.values]
weatherMean = weatherMean.reset_index()
# Merge data frames
price = pd.merge(pricePT, weatherMean)
# Want date as index so that can do the resample function so that NaN's
# appear for missing data.
# TODO: Figure out if it's possible to do this without setting the index - just specify a column instead. Makes more sense that way! See: http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling
price = price.set_index("ts").resample("60 min").reset_index()
# Add calendar variables to price dataframe. Period of day, day of week,
# weekends, month, season, etc.
price = (price.assign(Year = price["ts"].dt.year)
.assign(Month = price["ts"].dt.month)
.assign(Hour = price["ts"].dt.hour)
.assign(DoW = price["ts"].dt.dayofweek) #Monday=0
.assign(DoY = price["ts"].dt.dayofyear)
.assign(Date = price["ts"].dt.date))
# TODO: Instead of >=, should really use in. Returns error if using in [5,6]
price = (price.assign(Weekend = np.where(price["DoW"] >= 5, "Weekend",
"Weekday")))
# Add hourly lags for weather variables
# TODO: See how gaps in the data are affecting the lags. Could possibly have
# a situation where the accuracy introduced by including lags is offset by
# exluding more rows with NaNs. Maybe replace missing lagged values with
# average?
price = (price
.assign(temperature_Portugal_lag1 = price[
"temperature_Portugal"].shift(1))
.assign(temperature_Portugal_lag2 = price[
"temperature_Portugal"].shift(2))
.assign(temperature_Portugal_lag3 = price[
"temperature_Portugal"].shift(3))
.assign(temperature_Spain_lag1 = price[
"temperature_Spain"].shift(1))
.assign(temperature_Spain_lag2 = price[
"temperature_Spain"].shift(2))
.assign(temperature_Spain_lag3 = price[
"temperature_Spain"].shift(3)))
# TODO: Add lags for previous day. Same time yesterday or avg temp?
#endregion
#region Plots
# Plot of average temperature and demand for Spain and Portugal
color_dict = {"Weekend":"red", "Weekday":"blue"}
fig = plt.figure()
ax = fig.add_subplot(1,2,1)
ax.scatter(x=price["temperature_Portugal"], y=price["price"],
c=[color_dict[i] for i in price["Weekend"]])
ax.set_title(str("Portugal electricity price and temperature"))
ax.set_xlabel("Temperature")
ax.set_ylabel("Price $/MWh")
ax = fig.add_subplot(1,2,2)
ax.scatter(x=price["temperature_Spain"], y=price["price"],
c=[color_dict[i] for i in price["Weekend"]])
ax.set_title(str("Spain electricity price and temperature"))
ax.set_xlabel("Temperature")
ax.set_ylabel("Price $/MWh")
# This plot is the reason resample had to happen above. Ensures that
# interpolation doesn't happen for missing values because we now have NaN
# values instead.
ax = price.plot(x="ts", y=["temperature_Portugal", "temperature_Spain"],
subplots=True, sharex=True, title="Average temperatures in Spain "
"and Portugal",
color="blue")
ax = price.plot(x="ts", y="price", title="Electricity price in Portugal")
#This is one way of splitting up the boxplots. Looks gross though.
# ax = price[["DoW", "price"]].groupby("DoW").boxplot()
#But this way is better
# TODO: Figure out a way to get it to ignore that so that there aren"t so many NaNs, without requiring index - like tidyr spread
ax = price[["DoW", "price"]].pivot(columns="DoW").boxplot()
ax = price[["Date", "Hour", "price"]].pivot(
index="Date", columns="Hour").boxplot()
#endregion
#region Fit models
# Remove NaNs and unwanted columns for modelling and put into training data
# dataframe
# TODO: could try a better method for dealing with NaNs, e.g., fill in with the median, but ignoring for the moment. See: https://www.kaggle.com/c/titanic/details/getting-started-with-python-ii
all_data = price.dropna().copy() #copy needed or get SettingWithCopyWarning
all_data["Weekend"] = all_data["Weekend"]\
.map( {"Weekend": 1, "Weekday": 0} )\
.astype(int) #numpy and sklearn need numerics
rndm_idx = np.random.rand(len(all_data)) < 0.8
train_data = all_data[rndm_idx].copy()
test_data = all_data[~rndm_idx].copy()
x_train = train_data.drop(["Year", "DoW", "DoY", "ts", "Date", "price"],
axis=1)
x_test = test_data.drop(["Year", "DoW", "DoY", "ts", "Date", "price"],
axis=1)
y_train = train_data["price"]
y_test = test_data["price"]
# Baseline models
baseline_avg = y_train.mean()
# TODO: baseline_naive = last value. Need to do k-fold cross validation for
# this with the folds equal to the size of the forecast horizon (5 days)
# Fit random forest
forest = RandomForestRegressor(n_estimators = 100)
forest_fit = forest.fit(x_train, y_train)
# TODO: setup k-fold cross validation
scores = cross_validation.cross_val_score(forest, x_train, y_train, cv=5,
scoring="mean_absolute_error")
print("Mean absolute error: %0.2f (+/- %0.2f)" % (scores.mean(),
scores.std() * 1.96))
print forest_fit.feature_importances_
# Fit a regression model
lr1 = linear_model.LinearRegression()
lr1.fit(x_train, y_train)
scores = cross_validation.cross_val_score(lr1, x_train, y_train, cv=5,
scoring="mean_absolute_error")
print("Mean absolute error: %0.2f (+/- %0.2f)" % (scores.mean(),
scores.std() * 1.96))
print('Coefficients: \n', lr1.coef_)
plt.scatter(y_test, lr1.predict(x_test), color='black')
#endregion
#region Evaluation metrics for test data
# Compare predictions against actuals
test_data["price_bl_avg"] = baseline_avg
test_data["price_rf"] = forest_fit.predict(x_test)
test_data["price_lr1"] = lr1.predict(x_test)
test_data.plot(x="ts", y=["price", "price_bl_avg", "price_rf", "price_lr1"],
title="Price predictions compared to actuals")
# Note: this should only really be looked at once the best model setup has
# been decided from the cross-validation steps.
test_data["ae_bl_avg"] = abs(test_data["price"]-test_data["price_bl_avg"])
test_data["ape_bl_avg"] = test_data["ae_bl_avg"]/test_data["price"]
test_data["ae_rf"] = abs(test_data["price"]-test_data["price_rf"])
test_data["ape_rf"] = test_data["ae_rf"]/test_data["price"]
test_data["ae_lr1"] = abs(test_data["price"]-test_data["price_lr1"])
test_data["ape_lr1"] = test_data["ae_lr1"]/test_data["price"]
test_data.plot(x="ts", y=["ae_rf", "ae_bl_avg", "ae_lr1"])
test_data.plot(x="ts", y=["ape_rf", "ape_bl_avg", "ape_lr1"])
plt.scatter(x=test_data["temperature_Portugal"],
y=test_data["ae_rf"])
plt.scatter(x=test_data["temperature_Portugal"],
y=test_data["ape_rf"])
print("Mean absolute errors:")
print test_data[["ae_bl_avg", "ae_rf", "ae_lr1"]].mean(axis=0)
print("Mean absolute percentage errors:")
print test_data[["ape_bl_avg", "ape_rf", "ape_lr1"]].mean(axis=0)
#endregion |
py | 1a3ece3b49e6e7901d088b748a11e4b43a2e9bce | from p5 import *
def setup():
title("🐍 Jörgs Python Sketch 🐍".encode("utf-8"))
def draw():
background(245, 245, 245)
run() |
py | 1a3ece4a570655c444736bc666c6979c77f156fd | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# slice paddle model generator
#
import sys
import os
import numpy as np
import paddle
from save_model import exportModel
from save_model import saveModel
data_type = 'float32'
def slice(name : str, x, axes : list, start : list, end : list):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type)
out = paddle.fluid.layers.slice(node_x, axes = axes, starts = start, ends = end)
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def slice_dyn(test_shape=[2,8,10,10]):
paddle.disable_static()
data = paddle.rand(shape=test_shape, dtype='float32')
'''
slice w/ decrease_axis
'''
@paddle.jit.to_static
def test_slice_decrease_axis(x):
return x[0, 1:3, :, 5]
exportModel('slice_decrease_axis', test_slice_decrease_axis, [data], target_dir=sys.argv[1]) # output shape (2, 10)
'''
slice w/o decrease_axis
'''
@paddle.jit.to_static
def test_slice(x):
return paddle.slice(x, axes=[0,1,3], starts=[0,1,5], ends=[1,3,6])
# exportModel('slice_dyn', test_slice, [data], target_dir=sys.argv[1]) # output shape (1, 2, 10, 1) # disable it by default as this kind of test model already there. It's for comparsion only.
'''
slice w/ decrease_axis of all dims
'''
@paddle.jit.to_static
def test_slice_decrease_axis_all(x):
return x[0, 0, 0, 0]
exportModel('slice_decrease_axis_all', test_slice_decrease_axis_all, [data], target_dir=sys.argv[1]) # output shape (1,)
'''
slice w/o decrease_axis of all dims
'''
@paddle.jit.to_static
def test_slice_alldim(x):
return paddle.slice(x, axes=[0,1,2,3], starts=[0,0,0,0], ends=[1,1,1,1])
# exportModel('slice_alldim', test_slice_alldim, [data], target_dir=sys.argv[1]) # output shape (1, 1, 1, 1) # disable it by default as this kind of test model already there. It's for comparsion only.
'''
a test case simulating the last reshape2 of ocrnet which accepts slice (with decrease_axes in all dims) as its parents.
'''
def slice_reshape(B=1, C=256, H=16, W=32):
paddle.disable_static()
data = paddle.rand(shape=[B, C, H*W], dtype='float32')
@paddle.jit.to_static
def test_model(x):
x2 = paddle.assign([-1, -1, 16, 32]).astype('int32')
node_reshape = paddle.reshape(x, [0, 256, x2[2], x2[3]])
return node_reshape
exportModel('slice_reshape', test_model, [data], target_dir=sys.argv[1])
def main():
x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(4, 3, 5).astype(data_type)
slice("slice", x, axes=[1, 2], start=(0, 1), end=(-1, 3))
x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(2, 30).astype(data_type)
slice("slice_1d", x, axes=[0], start=[0], end=[1])
if __name__ == "__main__":
main()
slice_dyn()
slice_reshape() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.