hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9699da91536be3b5f7938a488f2471ecc9d269c7
| 1,318 |
py
|
Python
|
app/web/obtain_url.py
|
gpp0725/EchoProxy
|
0273f47397b76fa0292db267d99eeb9dccc4e869
|
[
"Apache-2.0"
] | null | null | null |
app/web/obtain_url.py
|
gpp0725/EchoProxy
|
0273f47397b76fa0292db267d99eeb9dccc4e869
|
[
"Apache-2.0"
] | null | null | null |
app/web/obtain_url.py
|
gpp0725/EchoProxy
|
0273f47397b76fa0292db267d99eeb9dccc4e869
|
[
"Apache-2.0"
] | null | null | null |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/3/4 0004 2:09
# @Author : Gpp
# @File : obtain_url.py
from app.web import api
from flask_restful import Resource
from flask import make_response, send_from_directory, jsonify
from app.helper.encrypt import two_encrypting
from app.crud.proxy_crud import ProtocolCrud
from app.helper.get_one_encrypt import get_one_encrypt_data
from app.helper.update_subscribe import add_proxy
# @api.resource('/generate')
# class Generate(Resource):
# def get(self):
# proxies = ProtocolCrud.get_all_share()
# one_encrypt = get_one_encrypt_data(proxies)
# result = add_proxy(two_encrypting(''.join(one_encrypt)))
# return jsonify(result)
@api.resource('/generate/<proxy_information>')
class GetUrl(Resource):
def get(self, proxy_information):
# 获取代理元数据
proxies = ProtocolCrud.get_all_share()
one_encrypt = get_one_encrypt_data(proxies)
add_proxy(two_encrypting(''.join(one_encrypt)))
# 获取代理实时信息
# 获取prometheus数据存入别名
# 生成订阅链接
response = make_response(send_from_directory('url_file', f'{proxy_information}.txt', as_attachment=True))
response.headers["Content-Disposition"] = f"attachment; filename={proxy_information}.txt"
return response
| 33.794872 | 113 | 0.707132 | 594 | 0.431686 | 0 | 0 | 641 | 0.465843 | 0 | 0 | 637 | 0.462936 |
969a1ae2aa1e6f093f672d0dc08a8182fddc7227
| 920 |
py
|
Python
|
oshino/run.py
|
CodersOfTheNight/oshino
|
08e35d004aa16a378d87d5e548649a1bc1f5dc17
|
[
"MIT"
] | 6 |
2016-11-06T17:47:57.000Z
|
2020-04-08T12:20:59.000Z
|
oshino/run.py
|
CodersOfTheNight/oshino
|
08e35d004aa16a378d87d5e548649a1bc1f5dc17
|
[
"MIT"
] | 24 |
2016-11-15T06:20:50.000Z
|
2019-02-08T18:54:57.000Z
|
oshino/run.py
|
CodersOfTheNight/oshino
|
08e35d004aa16a378d87d5e548649a1bc1f5dc17
|
[
"MIT"
] | null | null | null |
import logging
from argparse import ArgumentParser
from dotenv import load_dotenv, find_dotenv
from .config import load
from .core.heart import start_loop
logger = logging.getLogger(__name__)
try:
load_dotenv(find_dotenv())
except Exception as ex:
logger.error("Error while loading .env: '{}'. Ignoring.".format(ex))
def main(args):
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
cfg = load(args.config)
start_loop(cfg, args.noop)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--config", help="Config file", default="config.yaml")
parser.add_argument("--noop", action="store_true", default=False, help="Events will be processed, but not sent to Riemann")
parser.add_argument("--debug", action="store_true", default=False, help="Debug mode")
main(parser.parse_args())
| 27.058824 | 127 | 0.71413 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.209783 |
969aa0f8463c5dac76a29b27b5b12bf01e79a4cf
| 4,113 |
py
|
Python
|
sendmail_win_cs.py
|
Fatman13/gta_swarm
|
1c4603f39cd7831f5907fd619594452b3320f75f
|
[
"MIT"
] | null | null | null |
sendmail_win_cs.py
|
Fatman13/gta_swarm
|
1c4603f39cd7831f5907fd619594452b3320f75f
|
[
"MIT"
] | null | null | null |
sendmail_win_cs.py
|
Fatman13/gta_swarm
|
1c4603f39cd7831f5907fd619594452b3320f75f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
import glob
import click
import os
import json
import datetime
import re
import csv
from requests.exceptions import ConnectionError
from exchangelib import DELEGATE, IMPERSONATION, Account, Credentials, ServiceAccount, \
EWSDateTime, EWSTimeZone, Configuration, NTLM, CalendarItem, Message, \
Mailbox, Attendee, Q, ExtendedProperty, FileAttachment, ItemAttachment, \
HTMLBody, Build, Version
sendmail_secret = None
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'secrets.json')) as data_file:
sendmail_secret = (json.load(data_file))['sendmail_win']
TO_REGISTER = 'Confirmed (to register)'
def dump_csv(res, output_filename, from_date):
keys = res[0].keys()
final_output_filename = '_'.join(['Output_sendmail',
output_filename,
from_date.strftime('%y%m%d'),
datetime.datetime.now().strftime('%H%M')
]) + '.csv'
with open(final_output_filename, 'w', newline='', encoding='utf-8') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(res)
@click.command()
@click.option('--filename', default='output_hotel_ref_')
@click.option('--email', default='[email protected]')
def sendmail_win_cs(filename, email):
target_filename = filename + '*.csv'
newest = max(glob.iglob(target_filename), key=os.path.getctime)
print('newest file: ' + newest)
today_date = datetime.datetime.now().strftime('%y%m%d')
try:
newest_date = re.search( filename + '(\d+)', newest).group(1)
except AttributeError:
newest_date = ''
print('newest date: ' + newest_date)
if newest_date != today_date:
print('Error: newest date != today date.. mannual intervention needed..')
return
print('Setting account..')
# Username in WINDOMAIN\username format. Office365 wants usernames in PrimarySMTPAddress
# ('[email protected]') format. UPN format is also supported.
credentials = Credentials(username='APACNB\\809452', password=sendmail_secret['password'])
print('Discovering..')
# If the server doesn't support autodiscover, use a Configuration object to set the server
# location:
config = Configuration(server='emailuk.kuoni.com', credentials=credentials)
try:
account = Account(primary_smtp_address=email, config=config,
autodiscover=False, access_type=DELEGATE)
except ConnectionError as e:
print('Fatal: Connection Error.. aborted..')
return
print('Logged in as: ' + str(email))
recipient_email = '[email protected]'
recipient_email1 = '[email protected]'
recipient_email2 = '[email protected]'
recipient_email3 = '[email protected]'
recipient_email4 = '[email protected]'
recipient_email6 = '[email protected]'
recipient_email5 = '[email protected]'
body_text = 'FYI\n' + \
'Best\n' + \
'-Yu'
title_text = '[[[ Ctrip hotel reference ]]]'
# Or, if you want a copy in e.g. the 'Sent' folder
m = Message(
account=account,
folder=account.sent,
sender=Mailbox(email_address=email),
author=Mailbox(email_address=email),
subject=title_text,
body=body_text,
# to_recipients=[Mailbox(email_address=recipient_email1),
# Mailbox(email_address=recipient_email2),
# Mailbox(email_address=recipient_email3)
# ]
# to_recipients=[Mailbox(email_address=recipient_email1),
# Mailbox(email_address=recipient_email2),
# Mailbox(email_address=recipient_email3),
# Mailbox(email_address=recipient_email4),
# Mailbox(email_address=recipient_email5)
# ]
to_recipients=[Mailbox(email_address=recipient_email1),
Mailbox(email_address=recipient_email2),
Mailbox(email_address=recipient_email3),
Mailbox(email_address=recipient_email4)
]
)
with open(newest, 'rb') as f:
update_csv = FileAttachment(name=newest, content=f.read())
m.attach(update_csv)
m.send_and_save()
print('Message sent.. ')
if __name__ == '__main__':
sendmail_win_cs()
| 34.855932 | 102 | 0.706054 | 0 | 0 | 0 | 0 | 2,899 | 0.704838 | 0 | 0 | 1,448 | 0.352054 |
969ab9ef2de1a8573dbf6d90268f74dbc9e16fef
| 29,479 |
py
|
Python
|
utilities/access_concatenate_daily.py
|
pizzathief/PyFluxPro
|
c075c0040b4a9d6c9ab75ca1cef158f1307f8396
|
[
"BSD-3-Clause"
] | 1 |
2021-01-17T20:53:39.000Z
|
2021-01-17T20:53:39.000Z
|
utilities/access_concatenate_daily.py
|
pizzathief/PyFluxPro
|
c075c0040b4a9d6c9ab75ca1cef158f1307f8396
|
[
"BSD-3-Clause"
] | null | null | null |
utilities/access_concatenate_daily.py
|
pizzathief/PyFluxPro
|
c075c0040b4a9d6c9ab75ca1cef158f1307f8396
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Purpose:
Reads the hourly ACCESS files pulled from the BoM OPeNDAP site
and concatenates them into a single file.
This script file takes a control file name on the command line.
The control file lists the sites to be processed and the variables
to be processed.
Normal usage is to process all files in a monthly sub-directory.
Usage:
python access_concat.py access_concat.txt
Author: PRI
Date: September 2015
"""
# Python modules
import configobj
import datetime
import glob
import logging
import netCDF4
import numpy
import os
import pytz
import pdb
from scipy.interpolate import interp1d
import sys
# since the scripts directory is there, try importing the modules
sys.path.append('../scripts')
# PFP
import constants as c
import meteorologicalfunctions as mf
import qcio
import qcutils
# !!! classes !!!
class ACCESSData(object):
def __init__(self):
self.globalattr = {}
self.globalattr["file_list"] = []
self.variables = {}
self.varattr = {}
# !!! start of function definitions !!!
def get_info_dict(cf,site):
info = {}
in_path = cf["Sites"][site]["in_filepath"]
in_name = cf["Sites"][site]["in_filename"]
info["in_filename"] = os.path.join(in_path,in_name)
out_path = cf["Sites"][site]["out_filepath"]
if not os.path.exists(out_path): os.makedirs(out_path)
out_name = cf["Sites"][site]["out_filename"]
info["out_filename"] = os.path.join(out_path,out_name)
info["interpolate"] = True
if not cf["Sites"][site].as_bool("interpolate"):
info["interpolate"] = False
info["site_name"] = cf["Sites"][site]["site_name"]
info["site_timezone"] = cf["Sites"][site]["site_timezone"]
info["site_tz"] = pytz.timezone(info["site_timezone"])
return info
def get_datetime(ds_60minutes,f,info):
valid_date = f.variables["valid_date"][:]
nRecs = len(valid_date)
valid_time = f.variables["valid_time"][:]
dl = [datetime.datetime.strptime(str(int(valid_date[i])*10000+int(valid_time[i])),"%Y%m%d%H%M") for i in range(0,nRecs)]
dt_utc_all = numpy.array(dl)
time_step = numpy.array([(dt_utc_all[i]-dt_utc_all[i-1]).total_seconds() for i in range(1,len(dt_utc_all))])
time_step = numpy.append(time_step,3600)
idx = numpy.where(time_step!=0)[0]
dt_utc = dt_utc_all[idx]
dt_utc = [x.replace(tzinfo=pytz.utc) for x in dt_utc]
dt_loc = [x.astimezone(info["site_tz"]) for x in dt_utc]
dt_loc = [x-x.dst() for x in dt_loc]
dt_loc = [x.replace(tzinfo=None) for x in dt_loc]
ds_60minutes.series["DateTime"] = {}
ds_60minutes.series["DateTime"]["Data"] = dt_loc
nRecs = len(ds_60minutes.series["DateTime"]["Data"])
ds_60minutes.globalattributes["nc_nrecs"] = nRecs
return idx
def set_globalattributes(ds_60minutes,info):
ds_60minutes.globalattributes["time_step"] = 60
ds_60minutes.globalattributes["time_zone"] = info["site_timezone"]
ds_60minutes.globalattributes["site_name"] = info["site_name"]
ds_60minutes.globalattributes["xl_datemode"] = 0
ds_60minutes.globalattributes["nc_level"] = "L1"
return
def get_accessdata(cf,ds_60minutes,f,info):
# latitude and longitude, chose central pixel of 3x3 grid
ds_60minutes.globalattributes["latitude"] = f.variables["lat"][1]
ds_60minutes.globalattributes["longitude"] = f.variables["lon"][1]
# list of variables to process
var_list = cf["Variables"].keys()
# get a series of Python datetimes and put this into the data structure
valid_date = f.variables["valid_date"][:]
nRecs = len(valid_date)
valid_time = f.variables["valid_time"][:]
dl = [datetime.datetime.strptime(str(int(valid_date[i])*10000+int(valid_time[i])),"%Y%m%d%H%M") for i in range(0,nRecs)]
dt_utc_all = numpy.array(dl)
time_step = numpy.array([(dt_utc_all[i]-dt_utc_all[i-1]).total_seconds() for i in range(1,len(dt_utc_all))])
time_step = numpy.append(time_step,3600)
idxne0 = numpy.where(time_step!=0)[0]
idxeq0 = numpy.where(time_step==0)[0]
idx_clipped = numpy.where((idxeq0>0)&(idxeq0<nRecs))[0]
idxeq0 = idxeq0[idx_clipped]
dt_utc = dt_utc_all[idxne0]
dt_utc = [x.replace(tzinfo=pytz.utc) for x in dt_utc]
dt_loc = [x.astimezone(info["site_tz"]) for x in dt_utc]
dt_loc = [x-x.dst() for x in dt_loc]
dt_loc = [x.replace(tzinfo=None) for x in dt_loc]
flag = numpy.zeros(len(dt_loc),dtype=numpy.int32)
ds_60minutes.series["DateTime"] = {}
ds_60minutes.series["DateTime"]["Data"] = dt_loc
ds_60minutes.series["DateTime"]["Flag"] = flag
ds_60minutes.series["DateTime_UTC"] = {}
ds_60minutes.series["DateTime_UTC"]["Data"] = dt_utc
ds_60minutes.series["DateTime_UTC"]["Flag"] = flag
nRecs = len(ds_60minutes.series["DateTime"]["Data"])
ds_60minutes.globalattributes["nc_nrecs"] = nRecs
# we're done with valid_date and valid_time, drop them from the variable list
for item in ["valid_date","valid_time","lat","lon"]:
if item in var_list: var_list.remove(item)
# create the QC flag with all zeros
nRecs = ds_60minutes.globalattributes["nc_nrecs"]
flag_60minutes = numpy.zeros(nRecs,dtype=numpy.int32)
# get the UTC hour
hr_utc = [x.hour for x in dt_utc]
attr = qcutils.MakeAttributeDictionary(long_name='UTC hour')
qcutils.CreateSeries(ds_60minutes,'Hr_UTC',hr_utc,Flag=flag_60minutes,Attr=attr)
# now loop over the variables listed in the control file
for label in var_list:
# get the name of the variable in the ACCESS file
access_name = qcutils.get_keyvaluefromcf(cf,["Variables",label],"access_name",default=label)
# warn the user if the variable not found
if access_name not in f.variables.keys():
msg = "Requested variable "+access_name
msg = msg+" not found in ACCESS data"
logging.error(msg)
continue
# get the variable attibutes
attr = get_variableattributes(f,access_name)
# loop over the 3x3 matrix of ACCESS grid data supplied
for i in range(0,3):
for j in range(0,3):
label_ij = label+'_'+str(i)+str(j)
if len(f.variables[access_name].shape)==3:
series = f.variables[access_name][:,i,j]
elif len(f.variables[access_name].shape)==4:
series = f.variables[access_name][:,0,i,j]
else:
msg = "Unrecognised variable ("+label
msg = msg+") dimension in ACCESS file"
logging.error(msg)
series = series[idxne0]
qcutils.CreateSeries(ds_60minutes,label_ij,series,
Flag=flag_60minutes,Attr=attr)
return
def get_variableattributes(f,access_name):
attr = {}
# following code for netCDF4.MFDataset()
# for vattr in f.variables[access_name].ncattrs():
# attr[vattr] = getattr(f.variables[access_name],vattr)
# following code for access_read_mfiles2()
attr = f.varattr[access_name]
attr["missing_value"] = c.missing_value
return attr
def changeunits_airtemperature(ds_60minutes):
attr = qcutils.GetAttributeDictionary(ds_60minutes,"Ta_00")
if attr["units"] == "K":
for i in range(0,3):
for j in range(0,3):
label = "Ta_"+str(i)+str(j)
Ta,f,a = qcutils.GetSeriesasMA(ds_60minutes,label)
Ta = Ta - c.C2K
attr["units"] = "C"
qcutils.CreateSeries(ds_60minutes,label,Ta,Flag=f,Attr=attr)
return
def changeunits_soiltemperature(ds_60minutes):
attr = qcutils.GetAttributeDictionary(ds_60minutes,"Ts_00")
if attr["units"] == "K":
for i in range(0,3):
for j in range(0,3):
label = "Ts_"+str(i)+str(j)
Ts,f,a = qcutils.GetSeriesasMA(ds_60minutes,label)
Ts = Ts - c.C2K
attr["units"] = "C"
qcutils.CreateSeries(ds_60minutes,label,Ts,Flag=f,Attr=attr)
return
def changeunits_pressure(ds_60minutes):
attr = qcutils.GetAttributeDictionary(ds_60minutes,"ps_00")
if attr["units"] == "Pa":
for i in range(0,3):
for j in range(0,3):
label = "ps_"+str(i)+str(j)
ps,f,a = qcutils.GetSeriesasMA(ds_60minutes,label)
ps = ps/float(1000)
attr["units"] = "kPa"
qcutils.CreateSeries(ds_60minutes,label,ps,Flag=f,Attr=attr)
return
def get_windspeedanddirection(ds_60minutes):
for i in range(0,3):
for j in range(0,3):
u_label = "u_"+str(i)+str(j)
v_label = "v_"+str(i)+str(j)
Ws_label = "Ws_"+str(i)+str(j)
u,f,a = qcutils.GetSeriesasMA(ds_60minutes,u_label)
v,f,a = qcutils.GetSeriesasMA(ds_60minutes,v_label)
Ws = numpy.sqrt(u*u+v*v)
attr = qcutils.MakeAttributeDictionary(long_name="Wind speed",
units="m/s",height="10m")
qcutils.CreateSeries(ds_60minutes,Ws_label,Ws,Flag=f,Attr=attr)
# wind direction from components
for i in range(0,3):
for j in range(0,3):
u_label = "u_"+str(i)+str(j)
v_label = "v_"+str(i)+str(j)
Wd_label = "Wd_"+str(i)+str(j)
u,f,a = qcutils.GetSeriesasMA(ds_60minutes,u_label)
v,f,a = qcutils.GetSeriesasMA(ds_60minutes,v_label)
Wd = float(270) - numpy.ma.arctan2(v,u)*float(180)/numpy.pi
index = numpy.ma.where(Wd>360)[0]
if len(index)>0: Wd[index] = Wd[index] - float(360)
attr = qcutils.MakeAttributeDictionary(long_name="Wind direction",
units="degrees",height="10m")
qcutils.CreateSeries(ds_60minutes,Wd_label,Wd,Flag=f,Attr=attr)
return
def get_relativehumidity(ds_60minutes):
for i in range(0,3):
for j in range(0,3):
q_label = "q_"+str(i)+str(j)
Ta_label = "Ta_"+str(i)+str(j)
ps_label = "ps_"+str(i)+str(j)
RH_label = "RH_"+str(i)+str(j)
q,f,a = qcutils.GetSeriesasMA(ds_60minutes,q_label)
Ta,f,a = qcutils.GetSeriesasMA(ds_60minutes,Ta_label)
ps,f,a = qcutils.GetSeriesasMA(ds_60minutes,ps_label)
RH = mf.RHfromspecifichumidity(q, Ta, ps)
attr = qcutils.MakeAttributeDictionary(long_name='Relative humidity',
units='%',standard_name='not defined')
qcutils.CreateSeries(ds_60minutes,RH_label,RH,Flag=f,Attr=attr)
return
def get_absolutehumidity(ds_60minutes):
for i in range(0,3):
for j in range(0,3):
Ta_label = "Ta_"+str(i)+str(j)
RH_label = "RH_"+str(i)+str(j)
Ah_label = "Ah_"+str(i)+str(j)
Ta,f,a = qcutils.GetSeriesasMA(ds_60minutes,Ta_label)
RH,f,a = qcutils.GetSeriesasMA(ds_60minutes,RH_label)
Ah = mf.absolutehumidityfromRH(Ta, RH)
attr = qcutils.MakeAttributeDictionary(long_name='Absolute humidity',
units='g/m3',standard_name='not defined')
qcutils.CreateSeries(ds_60minutes,Ah_label,Ah,Flag=f,Attr=attr)
return
def changeunits_soilmoisture(ds_60minutes):
attr = qcutils.GetAttributeDictionary(ds_60minutes,"Sws_00")
for i in range(0,3):
for j in range(0,3):
label = "Sws_"+str(i)+str(j)
Sws,f,a = qcutils.GetSeriesasMA(ds_60minutes,label)
Sws = Sws/float(100)
attr["units"] = "frac"
qcutils.CreateSeries(ds_60minutes,label,Sws,Flag=f,Attr=attr)
return
def get_radiation(ds_60minutes):
for i in range(0,3):
for j in range(0,3):
label_Fn = "Fn_"+str(i)+str(j)
label_Fsd = "Fsd_"+str(i)+str(j)
label_Fld = "Fld_"+str(i)+str(j)
label_Fsu = "Fsu_"+str(i)+str(j)
label_Flu = "Flu_"+str(i)+str(j)
label_Fn_sw = "Fn_sw_"+str(i)+str(j)
label_Fn_lw = "Fn_lw_"+str(i)+str(j)
Fsd,f,a = qcutils.GetSeriesasMA(ds_60minutes,label_Fsd)
Fld,f,a = qcutils.GetSeriesasMA(ds_60minutes,label_Fld)
Fn_sw,f,a = qcutils.GetSeriesasMA(ds_60minutes,label_Fn_sw)
Fn_lw,f,a = qcutils.GetSeriesasMA(ds_60minutes,label_Fn_lw)
Fsu = Fsd - Fn_sw
Flu = Fld - Fn_lw
Fn = (Fsd-Fsu)+(Fld-Flu)
attr = qcutils.MakeAttributeDictionary(long_name='Up-welling long wave',
standard_name='surface_upwelling_longwave_flux_in_air',
units='W/m2')
qcutils.CreateSeries(ds_60minutes,label_Flu,Flu,Flag=f,Attr=attr)
attr = qcutils.MakeAttributeDictionary(long_name='Up-welling short wave',
standard_name='surface_upwelling_shortwave_flux_in_air',
units='W/m2')
qcutils.CreateSeries(ds_60minutes,label_Fsu,Fsu,Flag=f,Attr=attr)
attr = qcutils.MakeAttributeDictionary(long_name='Calculated net radiation',
standard_name='surface_net_allwave_radiation',
units='W/m2')
qcutils.CreateSeries(ds_60minutes,label_Fn,Fn,Flag=f,Attr=attr)
return
def get_groundheatflux(ds_60minutes):
for i in range(0,3):
for j in range(0,3):
label_Fg = "Fg_"+str(i)+str(j)
label_Fn = "Fn_"+str(i)+str(j)
label_Fh = "Fh_"+str(i)+str(j)
label_Fe = "Fe_"+str(i)+str(j)
Fn,f,a = qcutils.GetSeriesasMA(ds_60minutes,label_Fn)
Fh,f,a = qcutils.GetSeriesasMA(ds_60minutes,label_Fh)
Fe,f,a = qcutils.GetSeriesasMA(ds_60minutes,label_Fe)
Fg = Fn - Fh - Fe
attr = qcutils.MakeAttributeDictionary(long_name='Calculated ground heat flux',
standard_name='downward_heat_flux_in_soil',
units='W/m2')
qcutils.CreateSeries(ds_60minutes,label_Fg,Fg,Flag=f,Attr=attr)
return
def get_availableenergy(ds_60miutes):
for i in range(0,3):
for j in range(0,3):
label_Fg = "Fg_"+str(i)+str(j)
label_Fn = "Fn_"+str(i)+str(j)
label_Fa = "Fa_"+str(i)+str(j)
Fn,f,a = qcutils.GetSeriesasMA(ds_60minutes,label_Fn)
Fg,f,a = qcutils.GetSeriesasMA(ds_60minutes,label_Fg)
Fa = Fn - Fg
attr = qcutils.MakeAttributeDictionary(long_name='Calculated available energy',
standard_name='not defined',units='W/m2')
qcutils.CreateSeries(ds_60minutes,label_Fa,Fa,Flag=f,Attr=attr)
return
def perdelta(start,end,delta):
curr = start
while curr <= end:
yield curr
curr += delta
def interpolate_to_30minutes(ds_60minutes):
ds_30minutes = qcio.DataStructure()
# copy the global attributes
for this_attr in ds_60minutes.globalattributes.keys():
ds_30minutes.globalattributes[this_attr] = ds_60minutes.globalattributes[this_attr]
# update the global attribute "time_step"
ds_30minutes.globalattributes["time_step"] = 30
# generate the 30 minute datetime series
dt_loc_60minutes = ds_60minutes.series["DateTime"]["Data"]
dt_loc_30minutes = [x for x in perdelta(dt_loc_60minutes[0],dt_loc_60minutes[-1],datetime.timedelta(minutes=30))]
nRecs_30minutes = len(dt_loc_30minutes)
dt_utc_60minutes = ds_60minutes.series["DateTime_UTC"]["Data"]
dt_utc_30minutes = [x for x in perdelta(dt_utc_60minutes[0],dt_utc_60minutes[-1],datetime.timedelta(minutes=30))]
# update the global attribute "nc_nrecs"
ds_30minutes.globalattributes['nc_nrecs'] = nRecs_30minutes
ds_30minutes.series["DateTime"] = {}
ds_30minutes.series["DateTime"]["Data"] = dt_loc_30minutes
flag = numpy.zeros(len(dt_loc_30minutes),dtype=numpy.int32)
ds_30minutes.series["DateTime"]["Flag"] = flag
ds_30minutes.series["DateTime_UTC"] = {}
ds_30minutes.series["DateTime_UTC"]["Data"] = dt_utc_30minutes
flag = numpy.zeros(len(dt_utc_30minutes),dtype=numpy.int32)
ds_30minutes.series["DateTime_UTC"]["Flag"] = flag
# get the year, month etc from the datetime
qcutils.get_xldatefromdatetime(ds_30minutes)
qcutils.get_ymdhmsfromdatetime(ds_30minutes)
# interpolate to 30 minutes
nRecs_60 = len(ds_60minutes.series["DateTime"]["Data"])
nRecs_30 = len(ds_30minutes.series["DateTime"]["Data"])
x_60minutes = numpy.arange(0,nRecs_60,1)
x_30minutes = numpy.arange(0,nRecs_60-0.5,0.5)
varlist_60 = ds_60minutes.series.keys()
# strip out the date and time variables already done
for item in ["DateTime","DateTime_UTC","xlDateTime","Year","Month","Day","Hour","Minute","Second","Hdh","Hr_UTC"]:
if item in varlist_60: varlist_60.remove(item)
# now do the interpolation (its OK to interpolate accumulated precipitation)
for label in varlist_60:
series_60minutes,flag,attr = qcutils.GetSeries(ds_60minutes,label)
ci_60minutes = numpy.zeros(len(series_60minutes))
idx = numpy.where(abs(series_60minutes-float(c.missing_value))<c.eps)[0]
ci_60minutes[idx] = float(1)
int_fn = interp1d(x_60minutes,series_60minutes)
series_30minutes = int_fn(x_30minutes)
int_fn = interp1d(x_60minutes,ci_60minutes)
ci_30minutes = int_fn(x_30minutes)
idx = numpy.where(abs(ci_30minutes-float(0))>c.eps)[0]
series_30minutes[idx] = numpy.float64(c.missing_value)
flag_30minutes = numpy.zeros(nRecs_30, dtype=numpy.int32)
flag_30minutes[idx] = numpy.int32(1)
qcutils.CreateSeries(ds_30minutes,label,series_30minutes,Flag=flag_30minutes,Attr=attr)
# get the UTC hour
hr_utc = [float(x.hour)+float(x.minute)/60 for x in dt_utc_30minutes]
attr = qcutils.MakeAttributeDictionary(long_name='UTC hour')
flag_30minutes = numpy.zeros(nRecs_30, dtype=numpy.int32)
qcutils.CreateSeries(ds_30minutes,'Hr_UTC',hr_utc,Flag=flag_30minutes,Attr=attr)
return ds_30minutes
def get_instantaneous_precip30(ds_30minutes):
hr_utc,f,a = qcutils.GetSeries(ds_30minutes,'Hr_UTC')
for i in range(0,3):
for j in range(0,3):
label = "Precip_"+str(i)+str(j)
# get the accumulated precipitation
accum,flag,attr = qcutils.GetSeries(ds_30minutes,label)
# get the 30 minute precipitation
precip = numpy.ediff1d(accum,to_begin=0)
# now we deal with the reset of accumulated precipitation at 00, 06, 12 and 18 UTC
# indices of analysis times 00, 06, 12, and 18
idx1 = numpy.where(numpy.mod(hr_utc,6)==0)[0]
# set 30 minute precipitation at these times to half of the analysis value
precip[idx1] = accum[idx1]/float(2)
# now get the indices of the 30 minute period immediately the analysis time
# these values will have been interpolated between the last forecast value
# and the analysis value, they need to be set to half of the analysis value
idx2 = idx1-1
# remove negative indices
idx2 = idx2[idx2>=0]
# set these 30 minute times to half the analysis value
precip[idx2] = accum[idx2+1]/float(2)
# set precipitations less than 0.01 mm to 0
idx3 = numpy.ma.where(precip<0.01)[0]
precip[idx3] = float(0)
# set instantaneous precipitation to missing when accumlated precipitation was missing
idx = numpy.where(flag!=0)[0]
precip[idx] = float(c.missing_value)
# set some variable attributes
attr["long_name"] = "Precipitation total over time step"
attr["units"] = "mm/30 minutes"
qcutils.CreateSeries(ds_30minutes,label,precip,Flag=flag,Attr=attr)
return
def get_instantaneous_precip60(ds_60minutes):
hr_utc,f,a = qcutils.GetSeries(ds_60minutes,'Hr_UTC')
for i in range(0,3):
for j in range(0,3):
label = "Precip_"+str(i)+str(j)
# get the accumulated precipitation
accum,flag,attr = qcutils.GetSeries(ds_60minutes,label)
# get the 30 minute precipitation
precip = numpy.ediff1d(accum,to_begin=0)
# now we deal with the reset of accumulated precipitation at 00, 06, 12 and 18 UTC
# indices of analysis times 00, 06, 12, and 18
idx1 = numpy.where(numpy.mod(hr_utc,6)==0)[0]
# set 30 minute precipitation at these times to the analysis value
precip[idx1] = accum[idx1]
# set accumulated precipitations less than 0.001 mm to 0
idx2 = numpy.ma.where(precip<0.01)[0]
precip[idx2] = float(0)
# set instantaneous precipitation to missing when accumlated precipitation was missing
idx = numpy.where(flag!=0)[0]
precip[idx] = float(c.missing_value)
# set some variable attributes
attr["long_name"] = "Precipitation total over time step"
attr["units"] = "mm/60 minutes"
qcutils.CreateSeries(ds_60minutes,label,precip,Flag=flag,Attr=attr)
def access_read_mfiles2(file_list,var_list=[]):
f = ACCESSData()
# check that we have a list of files to process
if len(file_list)==0:
print "access_read_mfiles: empty file_list received, returning ..."
return f
# make sure latitude and longitude are read
if "lat" not in var_list: var_list.append("lat")
if "lon" not in var_list: var_list.append("lon")
# make sure valid_date and valid_time are read
if "valid_date" not in var_list: var_list.append("valid_date")
if "valid_time" not in var_list: var_list.append("valid_time")
for file_name in file_list:
# open the netCDF file
ncfile = netCDF4.Dataset(file_name)
# check the number of records
dims = ncfile.dimensions
shape = (len(dims["time"]),len(dims["lat"]),len(dims["lon"]))
# move to the next file if this file doesn't have 25 time records
if shape[0]!=1:
print "access_read_mfiles: length of time dimension in "+file_name+" is "+str(shape[0])+" (expected 1)"
continue
# move to the next file if this file doesn't have 3 latitude records
if shape[1]!=3:
print "access_read_mfiles: length of lat dimension in "+file_name+" is "+str(shape[1])+" (expected 3)"
continue
# move to the next file if this file doesn't have 3 longitude records
if shape[2]!=3:
print "access_read_mfiles: length of lon dimension in "+file_name+" is "+str(shape[2])+" (expected 3)"
continue
# seems OK to continue with this file ...
# add the file name to the file_list in the global attributes
f.globalattr["file_list"].append(file_name)
# get the global attributes
for gattr in ncfile.ncattrs():
if gattr not in f.globalattr:
f.globalattr[gattr] = getattr(ncfile,gattr)
# if no variable list was passed to this routine, use all variables
if len(var_list)==0:
var_list=ncfile.variables.keys()
# load the data into the data structure
for var in var_list:
# get the name of the variable in the ACCESS file
access_name = qcutils.get_keyvaluefromcf(cf,["Variables",var],"access_name",default=var)
# check that the requested variable exists in the ACCESS file
if access_name in ncfile.variables.keys():
# check to see if the variable is already in the data structure
if access_name not in f.variables.keys():
f.variables[access_name] = ncfile.variables[access_name][:]
else:
f.variables[access_name] = numpy.concatenate((f.variables[access_name],ncfile.variables[access_name][:]),axis=0)
# now copy the variable attribiutes
# create the variable attribute dictionary
if access_name not in f.varattr: f.varattr[access_name] = {}
# loop over the variable attributes
for this_attr in ncfile.variables[access_name].ncattrs():
# check to see if the attribute has already
if this_attr not in f.varattr[access_name].keys():
# add the variable attribute if it's not there already
f.varattr[access_name][this_attr] = getattr(ncfile.variables[access_name],this_attr)
else:
print "access_read_mfiles: ACCESS variable "+access_name+" not found in "+file_name
if access_name not in f.variables.keys():
f.variables[access_name] = makedummyseries(shape)
else:
f.variables[access_name] = numpy.concatenate((f.variables[access_name],makedummyseries(shape)),axis=0)
# close the netCDF file
ncfile.close()
# return with the data structure
return f
def makedummyseries(shape):
return numpy.ma.masked_all(shape)
# !!! end of function definitions !!!
# !!! start of main program !!!
# start the logger
logging.basicConfig(filename='access_concat.log',level=logging.DEBUG)
console = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%H:%M:%S')
console.setFormatter(formatter)
console.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
# get the control file name from the command line
#cf_name = sys.argv[1]
cf_name = qcio.get_controlfilename(path='../controlfiles',title='Choose a control file')
# get the control file contents
logging.info('Reading the control file')
cf = configobj.ConfigObj(cf_name)
# get stuff from the control file
logging.info('Getting control file contents')
site_list = cf["Sites"].keys()
var_list = cf["Variables"].keys()
# loop over sites
#site_list = ["AdelaideRiver"]
for site in site_list:
info = get_info_dict(cf,site)
logging.info("Processing site "+info["site_name"])
# instance the data structures
logging.info('Creating the data structures')
ds_60minutes = qcio.DataStructure()
# get a sorted list of files that match the mask in the control file
file_list = sorted(glob.glob(info["in_filename"]))
# read the netcdf files
logging.info('Reading the netCDF files for '+info["site_name"])
f = access_read_mfiles2(file_list,var_list=var_list)
# get the data from the netCDF files and write it to the 60 minute data structure
logging.info('Getting the ACCESS data')
get_accessdata(cf,ds_60minutes,f,info)
# set some global attributes
logging.info('Setting global attributes')
set_globalattributes(ds_60minutes,info)
# check for time gaps in the file
logging.info("Checking for time gaps")
if qcutils.CheckTimeStep(ds_60minutes):
qcutils.FixTimeStep(ds_60minutes)
# get the datetime in some different formats
logging.info('Getting xlDateTime and YMDHMS')
qcutils.get_xldatefromdatetime(ds_60minutes)
qcutils.get_ymdhmsfromdatetime(ds_60minutes)
#f.close()
# get derived quantities and adjust units
logging.info("Changing units and getting derived quantities")
# air temperature from K to C
changeunits_airtemperature(ds_60minutes)
# soil temperature from K to C
changeunits_soiltemperature(ds_60minutes)
# pressure from Pa to kPa
changeunits_pressure(ds_60minutes)
# wind speed from components
get_windspeedanddirection(ds_60minutes)
# relative humidity from temperature, specific humidity and pressure
get_relativehumidity(ds_60minutes)
# absolute humidity from temperature and relative humidity
get_absolutehumidity(ds_60minutes)
# soil moisture from kg/m2 to m3/m3
changeunits_soilmoisture(ds_60minutes)
# net radiation and upwelling short and long wave radiation
get_radiation(ds_60minutes)
# ground heat flux as residual
get_groundheatflux(ds_60minutes)
# Available energy
get_availableenergy(ds_60minutes)
if info["interpolate"]:
# interploate from 60 minute time step to 30 minute time step
logging.info("Interpolating data to 30 minute time step")
ds_30minutes = interpolate_to_30minutes(ds_60minutes)
# get instantaneous precipitation from accumulated precipitation
get_instantaneous_precip30(ds_30minutes)
# write to netCDF file
logging.info("Writing 30 minute data to netCDF file")
ncfile = qcio.nc_open_write(info["out_filename"])
qcio.nc_write_series(ncfile, ds_30minutes,ndims=1)
else:
# get instantaneous precipitation from accumulated precipitation
get_instantaneous_precip60(ds_60minutes)
# write to netCDF file
logging.info("Writing 60 minute data to netCDF file")
ncfile = qcio.nc_open_write(info["out_filename"])
qcio.nc_write_series(ncfile, ds_60minutes,ndims=1)
logging.info('All done!')
| 47.546774 | 132 | 0.641915 | 174 | 0.005903 | 111 | 0.003765 | 0 | 0 | 0 | 0 | 8,246 | 0.279725 |
969bb241fcdc0d7ab1f0ae016a66c74578107f98
| 639 |
py
|
Python
|
AMAO/apps/Avaliacao/views/exibir.py
|
arruda/amao
|
83648aa2c408b1450d721b3072dc9db4b53edbb8
|
[
"MIT"
] | 2 |
2017-04-26T14:08:02.000Z
|
2017-09-01T13:10:17.000Z
|
AMAO/apps/Avaliacao/views/exibir.py
|
arruda/amao
|
83648aa2c408b1450d721b3072dc9db4b53edbb8
|
[
"MIT"
] | null | null | null |
AMAO/apps/Avaliacao/views/exibir.py
|
arruda/amao
|
83648aa2c408b1450d721b3072dc9db4b53edbb8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from django.contrib.auth import login
from django.shortcuts import redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from Aluno.views.utils import aluno_exist
from annoying.decorators import render_to
from django.contrib.auth.models import User
from Avaliacao.models import *
from Aluno.models import *
@render_to('avaliacao/exibir.html')
@aluno_exist
def exibir(request,template_id):
aluno = request.user.aluno_set.get()
avaliacao=Avaliacao.objects.get(pk=template_id)
questoes=avaliacao.questoes.all()
return locals()
| 27.782609 | 57 | 0.791862 | 0 | 0 | 0 | 0 | 233 | 0.364632 | 0 | 0 | 46 | 0.071987 |
969bbfec8ddf57f2a21ea2c8536548a16473aafe
| 2,771 |
py
|
Python
|
avem_theme/functions/sanitize.py
|
mverleg/django-boots-plain-theme
|
2355270293ddb3db4762470a43c72311bf11be07
|
[
"BSD-3-Clause"
] | null | null | null |
avem_theme/functions/sanitize.py
|
mverleg/django-boots-plain-theme
|
2355270293ddb3db4762470a43c72311bf11be07
|
[
"BSD-3-Clause"
] | null | null | null |
avem_theme/functions/sanitize.py
|
mverleg/django-boots-plain-theme
|
2355270293ddb3db4762470a43c72311bf11be07
|
[
"BSD-3-Clause"
] | null | null | null |
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from django.conf import settings
DEFAULT_NOSCR_ALLOWED_TAGS = 'strong:title b i em:title p:title h1:title h2:title h3:title h4:title h5:title ' + \
'div:title span:title ol ul li:title a:href:title:rel img:src:alt:title dl td:title dd:title' + \
'table:cellspacing:cellpadding thead tbody th tr td:title:colspan:rowspan br'
def sanitize_html(text, add_nofollow = False,
allowed_tags = getattr(settings, 'NOSCR_ALLOWED_TAGS', DEFAULT_NOSCR_ALLOWED_TAGS)):
"""
Cleans an html string:
* remove any not-whitelisted tags
- remove any potentially malicious tags or attributes
- remove any invalid tags that may break layout
* esca[e any <, > and & from remaining text (by bs4); this prevents
> >> <<script>script> alert("Haha, I hacked your page."); </</script>script>\
* optionally add nofollow attributes to foreign anchors
* removes comments
:comment * optionally replace some tags with others:
:arg text: Input html.
:arg allowed_tags: Argument should be in form 'tag2:attr1:attr2 tag2:attr1 tag3', where tags are allowed HTML
tags, and attrs are the allowed attributes for that tag.
:return: Sanitized html.
This is based on https://djangosnippets.org/snippets/1655/
"""
try:
from bs4 import BeautifulSoup, Comment, NavigableString
except ImportError:
raise ImportError('to use sanitize_html() and |noscr, you need to install beautifulsoup4')
""" function to check if urls are absolute
note that example.com/path/file.html is relative, officially and in Firefox """
is_relative = lambda url: not bool(urlparse(url).netloc)
""" regex to remove javascript """
#todo: what exactly is the point of this? is there js in attribute values?
#js_regex = compile(r'[\s]*(&#x.{1,7})?'.join(list('javascript')))
""" allowed tags structure """
allowed_tags = [tag.split(':') for tag in allowed_tags.split()]
allowed_tags = {tag[0]: tag[1:] for tag in allowed_tags}
""" create comment-free soup """
soup = BeautifulSoup(text)
for comment in soup.findAll(text = lambda text: isinstance(text, Comment)):
comment.extract()
for tag in soup.find_all(recursive = True):
if tag.name not in allowed_tags:
""" hide forbidden tags (keeping content) """
tag.hidden = True
else:
""" whitelisted tags """
tag.attrs = {attr: val for attr, val in tag.attrs.items() if attr in allowed_tags[tag.name]}
""" add nofollow to external links if requested """
if add_nofollow and tag.name == 'a' and 'href' in tag.attrs:
if not is_relative(tag.attrs['href']):
tag.attrs['rel'] = (tag.attrs['rel'] if 'rel' in tag.attrs else []) + ['nofollow']
""" return as unicode """
return soup.renderContents().decode('utf8')
| 37.958904 | 114 | 0.714904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,646 | 0.594009 |
969bdd00695dbe7a914d09d8df086240e345cdbb
| 15,054 |
py
|
Python
|
plotDiff_log.py
|
kmoskovtsev/Electrons-on-Helium-Scripts
|
b7325c64a62def9b963b66bfb078ee82553c2ed4
|
[
"Unlicense"
] | null | null | null |
plotDiff_log.py
|
kmoskovtsev/Electrons-on-Helium-Scripts
|
b7325c64a62def9b963b66bfb078ee82553c2ed4
|
[
"Unlicense"
] | null | null | null |
plotDiff_log.py
|
kmoskovtsev/Electrons-on-Helium-Scripts
|
b7325c64a62def9b963b66bfb078ee82553c2ed4
|
[
"Unlicense"
] | null | null | null |
from __future__ import division
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import gsd
import gsd.fl
import numpy as np
import os
import sys
import datetime
import time
import pickle
from shutil import copyfile
import inspect
import md_tools27 as md_tools
from multiprocessing import Pool
"""
This script plots diffusion vs Gamma in log(D)-log(Gamma) or log(D)-gamma format. The data from a .dat file is used, must be precalculated by plotDiff_pG_parallel.py.
Arguments: --cmfree, --cmfixed for the free-moving center of mass regime, and v_cm subtracted respectively.
--sf <fubfolder>: subfolder to process (e.g. p32)
--NP <number>: number of subprocesses to use for parallelization. Very efficient acceleration by a factor of <number>.
"""
#Use LaTex for text
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Computer Modern Roman']})
rc('text', usetex=True)
def read_log(path):
coulomb_status = ''
with open(path + '/log.txt', 'r') as f:
for i, line in enumerate(f.readlines()):
if i == 0:
timestamp = line.rstrip()
if line[:10] == '# Periodic':
words = line.split(' ')
p = int(words[9])
A = float(words[6])
if line[:4] == '# a ':
words = line.split(' ')
repeat_x = int(words[6])
repeat_y = int(words[9])
Np = 2*repeat_x*repeat_y
if line[:7] == '# Gamma':
words = line.split(' ')
dt = float(words[9])
if line[:9] == '# Coulomb':
words = line.split(' ')
coulomb_status = words[-1]
if line[:9] == '# N_therm':
words = line.split(' ')
snap_period = int(float(words[5]))
# T_gamma = 31.8265130646
if line[:9] == '# T_gamma':
words = line.split(' ')
T_gamma = float(words[3])
return {'timestamp': timestamp,'A':A, 'p':p, 'Np': Np, 'coulomb_status':coulomb_status, 'snap_period':snap_period,\
'dt':dt, 'T_gamma':T_gamma}
def OLS(x, y):
'''OLS: x must be a vertical two-dimensional array'''
X = np.hstack((np.reshape(np.ones(x.shape[0]), (-1,1)), x))#.transpose()
Xpr = X.transpose()
beta = np.dot(np.dot(np.linalg.inv(np.dot(Xpr, X)), Xpr), y)
#Estimate errors
sigma_sq = np.dot(y - np.dot(X, beta), y - np.dot(X, beta))/(len(y) - 1.)
sigma_beta_sq = sigma_sq*np.linalg.inv(np.dot(Xpr, X))
return beta, sigma_beta_sq # = [f_0, df/d(A^2)]
def diffusion_from_transport_gsd(folder_path, f_name, center_fixed = True, useframes = -1):
"""
Diffusion constant D is calculated from 4Dt = <(r(t) - r(0))^2>, or 2D_x*t = <(x(t) - x(0))^2>.
The average is calculated over all particles and over different time origins.
Time origins go from 0 to n_frames/2, and t goes from 0 to n_frames/2. This way,
the data are always within the trajectory.
center_fixed = True: eliminate oveall motion of center of mass
return D_x, D_y
D_x, D_y diffusion for x- and y-coordinates;
"""
params = read_log(folder_path)
if folder_path[-1] != '/':
folder_path = folder_path + '/'
with gsd.fl.GSDFile(folder_path + f_name, 'rb') as f:
n_frames = f.nframes
box = f.read_chunk(frame=0, name='configuration/box')
half_frames = int(n_frames/2) - 1 #sligtly less than half to avoid out of bound i
if useframes < 1 or useframes > half_frames:
useframes = half_frames
t_step = f.read_chunk(frame=0, name='configuration/step')
n_p = f.read_chunk(frame=0, name='particles/N')
x_sq_av = np.zeros(useframes)
y_sq_av = np.zeros(useframes)
for t_origin in range(n_frames - useframes - 1):
pos_0 = f.read_chunk(frame=t_origin, name='particles/position')
mean_pos_0 = np.mean(pos_0, axis = 0)
pos = pos_0
pos_raw = pos_0
for j_frame in range(useframes):
pos_m1 = pos
pos_m1_raw = pos_raw
pos_raw = f.read_chunk(frame=j_frame + t_origin, name='particles/position') - pos_0
pos = md_tools.correct_jumps(pos_raw, pos_m1, pos_m1_raw, box[0], box[1])
if center_fixed:
pos -= np.mean(pos, axis = 0) - mean_pos_0 #correct for center of mass movement
x_sq_av[j_frame] += np.mean(pos[:,0]**2)
y_sq_av[j_frame] += np.mean(pos[:,1]**2)
x_sq_av /= (n_frames - useframes - 1)
y_sq_av /= (n_frames - useframes - 1)
# OLS estimate for beta_x[0] + beta_x[1]*t = <|x_i(t) - x_i(0)|^2>
a = np.ones((useframes, 2)) # matrix a = ones(half_frames) | (0; dt; 2dt; 3dt; ...)
a[:,1] = params['snap_period']*params['dt']*np.cumsum(np.ones(useframes), axis = 0) - params['dt']
b_cutoff = int(useframes/10) #cutoff to get only linear part of x_sq_av, makes results a bit more clean
beta_x = np.linalg.lstsq(a[b_cutoff:, :], x_sq_av[b_cutoff:], rcond=-1)
beta_y = np.linalg.lstsq(a[b_cutoff:, :], y_sq_av[b_cutoff:], rcond=-1)
fig, ax = plt.subplots(1,1, figsize=(7,5))
ax.scatter(a[:,1], x_sq_av, label='$\\langle x^2\\rangle$')
ax.scatter(a[:,1], y_sq_av, label='$\\langle y^2\\rangle$')
ax.legend(loc=7)
ax.set_xlabel('$t$')
ax.set_ylabel('$\\langle r_i^2 \\rangle$')
if center_fixed:
center_fixed_str = 'cm_fixed'
else:
center_fixed_str = 'cm_free'
fig.savefig(folder_path + 'r2_diff_' + f_name +'_' + center_fixed_str + '.png')
plt.close('all')
D_x = beta_x[0][1]/2
D_y = beta_y[0][1]/2
print('D_x = {}'.format(D_x))
print('D_y = {}'.format(D_y))
return (D_x, D_y)
def diffusion_helper(arg_dict):
return diffusion_from_transport_gsd(arg_dict['sf'], arg_dict['fname'], center_fixed=arg_dict['center_fixed'], useframes = arg_dict['useframes'])
def Teff_from_gsd(args):
fpath = args['sf'] + '/' + args['fname']
with gsd.fl.GSDFile(fpath, 'rb') as f:
n_frames = f.nframes
N = f.read_chunk(frame=0, name='particles/N')
v = np.zeros((n_frames, int(N), 2))
for t in range(n_frames):
v_t = f.read_chunk(frame=t, name='particles/velocity')
v[t, :, 0] = v_t[:,0]
v[t, :, 1] = v_t[:,1]
#v_cm = np.mean(v, axis=1)
#mean_v_cmx = np.mean(v_cm[:,0])
#print("mean v_cm = {}".format(mean_v_cmx))
#sigma_v_cmx = np.sqrt(np.mean((v_cm[:,0] - mean_v_cmx)**2))/np.sqrt(n_frames)
#print("error = {}".format(sigma_v_cmx))
#mean_v_cmy = np.mean(v_cm[:,1])
#print("mean v_cm_y = {}".format(mean_v_cmy))
#sigma_v_cmy = np.sqrt(np.mean((v_cm[:,1] - mean_v_cmy)**2))/np.sqrt(n_frames)
#print("error_y = {}".format(sigma_v_cmy))
#v_rel = np.swapaxes(v, 0,1) - v_cm
v_swap = np.swapaxes(v, 0,1)
#T_eff = 0.5*np.mean(v_rel[:,:,0]**2 + v_rel[:,:,1]**2, axis = 0)
T_eff = 0.5*np.mean(v_swap[:,:,0]**2 + v_swap[:,:,1]**2, axis = 0)
print('T_eff = {}'.format(np.mean(T_eff)))
return np.mean(T_eff)
def print_help():
print('This script plots diffusion vs Gamma for data taken in diffusion measurements.')
print('===========================================================')
print('Usage: python plotDiff_pG.py diffusion_data/a32x32_* [--options]')
print('This will process all folders that match mobility_data/a32x32_*')
print('===========================================================')
print('Options:')
print('\t--cmfixed will subtract the displacement of the center of mass in diffusion calculation (default behavior)')
print('\t--cmfree will NOT subtract the displacement of the center of mass in diffusion calculation (default behavior)')
print('\t--showtext will print text info on the plots')
print('\t--NP N - will use N parallel processes in the calculations')
print('\t--sf [subfolder] - will only process the specified subfolder in all folders')
print('\t--help or -h will print this help')
## =======================================================================
# Units
unit_M = 9.10938356e-31 # kg, electron mass
unit_D = 1e-6 # m, micron
unit_E = 1.38064852e-23 # m^2*kg/s^2
unit_t = np.sqrt(unit_M*unit_D**2/unit_E) # = 2.568638150515e-10 s
epsilon_0 = 8.854187817e-12 # F/m = C^2/(J*m), vacuum permittivity
hbar = 1.0545726e-27/(unit_E*1e7)/unit_t
m_e = 9.10938356e-31/unit_M
unit_Q = np.sqrt(unit_E*1e7*unit_D*1e2) # Coulombs
unit_Qe = unit_Q/4.8032068e-10 # e, unit charge in units of elementary charge e
e_charge = 1/unit_Qe # electron charge in units of unit_Q
curr_fname = inspect.getfile(inspect.currentframe())
curr_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
##=======================================================================
# Make a list of folders we want to process
cm_fixed = True #default that can be changed by --cmfree
cm_fixed_str = 'cm_fixed'
show_text = False
Nproc = 1
selected_subfolders = []
folder_list = []
for i in range(len(sys.argv)):
if os.path.isdir(sys.argv[i]):
folder_list.append(sys.argv[i])
elif sys.argv[i] == '--sf':
try:
selected_subfolders.append(sys.argv[i+1])
except:
raise RuntimeError('Could not recognize the value of --sf. argv={}'.format(argv))
elif sys.argv[i] == '--showtext':
show_text = True
elif sys.argv[i] == '--GC':
gamma_c = float(sys.argv[i+1])
elif sys.argv[i] == '--help' or sys.argv[i] == '-h':
print_help()
exit()
try:
print('Gamma_c = {}'.format(gamma_c))
except:
raise RuntimeError('Gamma_c not specified. Use --GC argument.')
print('Selected subfolders: {}'.format(selected_subfolders))
# Make a list of subfolders p### in each folders
subfolder_lists = []
for folder in folder_list:
sf_list = []
for item in os.walk(folder):
# subfolder name and contained files
sf_list.append((item[0], item[2]))
sf_list = sf_list[1:]
subfolder_lists.append(sf_list)
##=======================================================================
for ifold, folder in enumerate(folder_list):
print('==========================================================')
print(folder)
print('==========================================================')
# Keep only selected subfolders in the list is there is selection
if len(selected_subfolders) > 0:
sf_lists_to_go = []
for isf, sf in enumerate(subfolder_lists[ifold]):
sf_words = sf[0].split('/')
if sf_words[-1] in selected_subfolders:
sf_lists_to_go.append(sf)
else:
sf_lists_to_go = subfolder_lists[ifold]
for isf, sf in enumerate(sf_lists_to_go):
sf_words = sf[0].split('/')
print(sf_words[-1])
if sf_words[-1][0] != 'p':
raise ValueError("Expected subfolder name to start with `p`, in {}".format(fname))
log_data = read_log(sf[0])
folder_name = folder.split('/')[-1]
if sf[0][-1] == '/':
sf[0] = sf[0][:-1]
sf_name = sf[0].split('/')[-1]
#Read Dx Dy vs Gamma from the .dat file
#DxDy_data = {'Dx_arr':Dx_arr, 'Dy_arr':Dy_arr, 'Dx_arr_gauss': Dx_arr*cm2s_convert, 'Dy_arr_gauss':Dy_arr*cm2s_convert, \
# 'gamma_arr':gamma_arr, 'gamma_eff_arr':gamma_eff_arr}
cm_fixed_str = 'cm_fixed'
with open(sf[0] + '/DxDy_data_' + cm_fixed_str + '_' + sf_name + '_' + folder_name + '.dat', 'r') as ff:
DxDy_data = pickle.load(ff)
Dx_arr = DxDy_data['Dx_arr']
Dy_arr = DxDy_data['Dy_arr']
gamma_eff_arr = DxDy_data['gamma_eff_arr']
# Remove points where gamma > gamma_c
clip_ind = np.where(gamma_eff_arr < gamma_c)[0]
Dx_arr_clip = Dx_arr[clip_ind]
Dy_arr_clip = Dy_arr[clip_ind]
gamma_arr_clip = gamma_eff_arr[clip_ind]
print('Dx_arr = {}'.format(Dx_arr_clip))
print('Dy_arr = {}'.format(Dy_arr_clip))
## ======================================================================
## Plot Dx,Dy vs effective G (calculated from data rather then read from the log)
# in Gaussian units
labelfont = 28
tickfont = labelfont - 4
legendfont = labelfont - 4
cm2s_convert = unit_D**2/unit_t*1e4
fig, ax1 = plt.subplots(1,1, figsize=(7,6))
scatter1 = ax1.scatter(gamma_arr_clip, np.log(Dx_arr_clip*cm2s_convert), label='$D_\\perp$', color = 'green', marker='o')
ax1.set_xlabel('$\\Gamma$', fontsize=labelfont)
ax1.set_ylabel('$\\log(D/D_0)$', fontsize=labelfont)
scatter2 = ax1.scatter(gamma_arr_clip, np.log(Dy_arr_clip*cm2s_convert), label='$D_\\parallel$', color = 'red', marker='s')
#ax1.set_xlim([np.min(gamma_eff_arr) - 2, np.max(gamma_eff_arr) + 2])
ax1.legend(loc=1, fontsize=legendfont)
ax1.tick_params(labelsize= tickfont)
ax1.locator_params(nbins=6, axis='y')
formatter = mticker.ScalarFormatter(useMathText=True)
formatter.set_powerlimits((-3,2))
ax1.yaxis.set_major_formatter(formatter)
#Place text
if show_text:
text_list = ['$\\Gamma_c = {:.1f}$'.format(gamma_c)]
y_lim = ax1.get_ylim()
x_lim = ax1.get_xlim()
h = y_lim[1] - y_lim[0]
w = x_lim[1] - x_lim[0]
text_x = x_lim[0] + 0.5*w
text_y = y_lim[1] - 0.05*h
if type(text_list) == list:
n_str = len(text_list)
for i_fig in range(n_str):
ax1.text(text_x, text_y - 0.05*h*i_fig, text_list[i_fig])
elif type(text_list) == str:
ax1.text(text_x, text_y, text_list)
else:
raise TypeError('text_list must be a list of strings or a string')
#fig.patch.set_alpha(alpha=1)
plt.tight_layout()
fig.savefig(folder + '/' + 'DxDy_G_log_' + sf_name + '_' + folder_name + '_{:.2f}'.format(gamma_c) + '.pdf')
#fig.savefig(sf[0] + '/' + 'DxDy_Geff_' + cm_fixed_str + '_' + sf_name + '_' + folder_name + '.png')
#fig.savefig(sf[0] + '/' + 'DxDy_Geff_' + cm_fixed_str + '_' + sf_name + '_' + folder_name + '.eps')
#fig.savefig(sf[0] + '/' + 'DxDy_Geff_' + cm_fixed_str + '_' + sf_name + '_' + folder_name + '.pdf')
plt.close('all')
| 43.634783 | 167 | 0.561778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,518 | 0.366547 |
969c409f7ce05c9902d3127ae8558f487796543d
| 1,609 |
py
|
Python
|
backuppy/cli/put.py
|
drmorr0/backuppy
|
ed6c60b049aaeb6107a073af2d81ccbe0a9abc59
|
[
"Apache-2.0"
] | 4 |
2021-08-20T02:51:59.000Z
|
2022-01-06T18:18:53.000Z
|
backuppy/cli/put.py
|
drmorr0/backuppy
|
ed6c60b049aaeb6107a073af2d81ccbe0a9abc59
|
[
"Apache-2.0"
] | 26 |
2019-06-06T02:23:29.000Z
|
2021-07-29T06:43:04.000Z
|
backuppy/cli/put.py
|
drmorr0/backuppy
|
ed6c60b049aaeb6107a073af2d81ccbe0a9abc59
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import staticconf
from backuppy.args import add_name_arg
from backuppy.args import subparser
from backuppy.manifest import lock_manifest
from backuppy.manifest import Manifest
from backuppy.stores import get_backup_store
def main(args: argparse.Namespace) -> None:
staticconf.YamlConfiguration(args.config, flatten=False)
backup_set_config = staticconf.read('backups')[args.name]
staticconf.DictConfiguration(backup_set_config, namespace=args.name)
backup_store = get_backup_store(args.name)
if args.manifest:
manifest = Manifest(args.filename)
private_key_filename = backup_store.config.read('private_key_filename', default='')
lock_manifest(
manifest,
private_key_filename,
backup_store._save,
backup_store._load,
backup_store.options,
)
else:
with backup_store.unlock():
backup_store.save_if_new(args.filename)
HELP_TEXT = '''
WARNING: this command is considered "plumbing" and should be used for debugging or
exceptional cases only. You can render your backup store inaccessible if it is used
incorrectly. Use at your own risk!
'''
@subparser('put', HELP_TEXT, main)
def add_put_parser(subparser) -> None: # pragma: no cover
add_name_arg(subparser)
subparser.add_argument(
dest='filename',
help='File to store in the backup'
)
subparser.add_argument(
'--manifest',
action='store_true',
help='Save the file as manifest in the backup store (THIS CAN RENDER YOUR BACKUP UNUSABLE)',
)
| 30.942308 | 100 | 0.705407 | 0 | 0 | 0 | 0 | 409 | 0.254195 | 0 | 0 | 416 | 0.258546 |
969caf4ae896145b97abded195e8a8ae66368a89
| 6,349 |
py
|
Python
|
OnePy/feeds/feedbase.py
|
sibuzu/OnePy
|
464fca1c68a10f90ad128da3bfb03f05d2fc24bc
|
[
"MIT"
] | null | null | null |
OnePy/feeds/feedbase.py
|
sibuzu/OnePy
|
464fca1c68a10f90ad128da3bfb03f05d2fc24bc
|
[
"MIT"
] | null | null | null |
OnePy/feeds/feedbase.py
|
sibuzu/OnePy
|
464fca1c68a10f90ad128da3bfb03f05d2fc24bc
|
[
"MIT"
] | null | null | null |
from abc import abstractmethod, ABCMeta
import csv
from datetime import datetime
import funcy as fy
from OnePy.barbase import Current_bar, Bar
from OnePy.event import events, MarketEvent
class FeedMetabase(metaclass=ABCMeta):
dtformat = "%Y-%m-%d %H:%M:%S"
tmformat = "%H:%M:%S"
timeindex = None
def __init__(self, instrument, fromdate, todate):
self.instrument = instrument
self.fromdate = fromdate
self.todate = todate
self.cur_bar = Current_bar()
# self.bar_dict = {self.instrument: []}
self.bar = Bar(instrument)
self.preload_bar_list = []
self.continue_backtest = True
# 以下变量会被初始化
self._per_comm = None
self._commtype = None
self._mult = None
self._per_margin = None
self._executemode = None
self._trailingstop_executemode = None
self._iteral_buffer = None
self._buffer_days = None
self._iteral_data = None
def set_per_comm(self, value):
self._per_comm = value
def set_commtype(self, value):
self._commtype = value
def set_mult(self, value):
self._mult = value
def set_per_margin(self, value):
self._per_margin = value
def set_executemode(self, value):
self._executemode = value
def set_trailingstop_executemode(self, value):
self._trailingstop_executemode = value
def set_iteral_buffer(self, value):
self._iteral_buffer = value
def set_buffer_days(self, value):
self._buffer_days = value
@property
def per_comm(self):
return self._per_comm
@property
def commtype(self):
return self._commtype
@property
def mult(self):
return self._mult
@property
def per_margin(self):
return self._per_margin
@property
def executemode(self):
return self._executemode
@property
def trailingstop_executemode(self):
return self._trailingstop_executemode
@property
def iteral_buffer(self):
return self._iteral_buffer
@property
def buffer_days(self):
return self._buffer_days
@abstractmethod
def load_data(self):
"""读取数据"""
raise NotImplementedError("load_data shold be overrided")
@abstractmethod
def get_new_bar(self):
"""获得新行情"""
raise NotImplementedError("get_new_bar shold be overrided")
@abstractmethod
def preload(self):
"""为indicator缓存数据"""
raise NotImplementedError("preload shold be overrided")
def run_once(self):
"""先load一次,以便cur_bar能够缓存两条数据"""
self._iteral_data = self.load_data()
self.get_new_bar()
self.preload() # preload for indicator
def __update_bar(self):
"""更新行情"""
self.bar.set_instrument(self.instrument)
self.bar.add_new_bar(self.cur_bar.cur_data)
def start(self):
pass
def prenext(self):
self.get_new_bar()
def next(self):
self.__update_bar()
events.put(MarketEvent(self))
class CSVFeedBase(FeedMetabase):
"""自动识别CSV数据中有open,high,low,close,volume数据,但要说明日期格式"""
dtformat = "%Y-%m-%d %H:%M:%S"
tmformat = "%H:%M:%S"
timeindex = None
def __init__(self, datapath, instrument, fromdate=None, todate=None):
super(CSVFeedBase, self).__init__(instrument, fromdate, todate)
self.datapath = datapath
self.__set_date()
def __set_date(self):
"""将日期转化为datetime对象"""
if self.fromdate:
self.fromdate = datetime.strptime(self.fromdate, "%Y-%m-%d")
if self.todate:
self.todate = datetime.strptime(self.todate, "%Y-%m-%d")
def __set_dtformat(self, bar):
"""识别日期"""
date = bar["date"]
dt = "%Y-%m-%d %H:%M:%S"
if self.timeindex:
date = datetime.strptime(str(date), self.dtformat).strftime("%Y-%m-%d")
return date + " " + bar[self.timeindex.lower()]
else:
return datetime.strptime(str(date), self.dtformat).strftime(dt)
def get_new_bar(self):
def __update():
new_bar = next(self._iteral_data)
new_bar = fy.walk_keys(lambda x: x.lower(), new_bar)
new_bar["date"] = self.__set_dtformat(new_bar)
for i in new_bar:
try:
new_bar[i] = float(new_bar[i]) # 将数值转化为float
except ValueError:
pass
return new_bar
try:
new_bar = __update()
# 日期范围判断
dt = "%Y-%m-%d %H:%M:%S"
if self.fromdate:
while datetime.strptime(new_bar["date"], dt) < self.fromdate:
new_bar = __update()
if self.todate:
while datetime.strptime(new_bar["date"], dt) > self.todate:
raise StopIteration
self.cur_bar.add_new_bar(new_bar)
except StopIteration:
self.continue_backtest = False # stop backtest
def load_data(self):
return csv.DictReader(open(self.datapath))
def preload(self):
"""
只需运行一次,先将fromdate前的数据都load到preload_bar_list
若没有fromdate,则不用load
"""
self.set_iteral_buffer(self.load_data()) # for indicator
def _update():
bar = next(self.iteral_buffer)
bar = fy.walk_keys(lambda x: x.lower(), bar)
bar["date"] = self.__set_dtformat(bar)
for i in bar:
try:
bar[i] = float(bar[i]) # 将数值转化为float
except ValueError:
pass
return bar
try:
bar = _update()
# 日期范围判断
dt = "%Y-%m-%d %H:%M:%S"
if self.fromdate:
while datetime.strptime(bar["date"], dt) < self.fromdate:
bar = _update()
self.preload_bar_list.append(bar)
else:
self.preload_bar_list.pop(-1) # 经过验证bug检查的,最后删除掉一个重复
elif self.fromdate is None:
pass
else:
raise SyntaxError("Catch a Bug!")
except IndexError:
pass
except StopIteration:
print("???")
self.preload_bar_list.reverse()
| 26.902542 | 83 | 0.575839 | 6,431 | 0.970424 | 0 | 0 | 968 | 0.146069 | 0 | 0 | 1,001 | 0.151049 |
969ce91c3c9eb7731f2a4d716dfbab07efce7259
| 4,912 |
py
|
Python
|
conanfile.py
|
hsdk123/corrade
|
0d624d1f980f0376b2227356759f1d6e8761e6a3
|
[
"MIT",
"Unlicense"
] | null | null | null |
conanfile.py
|
hsdk123/corrade
|
0d624d1f980f0376b2227356759f1d6e8761e6a3
|
[
"MIT",
"Unlicense"
] | null | null | null |
conanfile.py
|
hsdk123/corrade
|
0d624d1f980f0376b2227356759f1d6e8761e6a3
|
[
"MIT",
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
from conans.errors import ConanException
import os
import shutil
def sort_libs(correct_order, libs, lib_suffix='', reverse_result=False):
# Add suffix for correct string matching
correct_order[:] = [s.__add__(lib_suffix) for s in correct_order]
result = []
for expectedLib in correct_order:
for lib in libs:
if expectedLib == lib:
result.append(lib)
if reverse_result:
# Linking happens in reversed order
result.reverse()
return result
class CorradeConan(ConanFile):
name = "corrade"
version = "2019.10"
description = "Corrade is a multiplatform utility library written \
in C++11/C++14. It's used as a base for the Magnum \
graphics engine, among other things."
# topics can get used for searches, GitHub topics, Bintray tags etc. Add here keywords about the library
topics = ("conan", "corrad", "magnum", "filesystem", "console", "environment", "os")
url = "https://github.com/mosra/corrade"
homepage = "https://magnum.graphics/corrade"
author = "helmesjo <[email protected]>"
license = "MIT" # Indicates license type of the packaged library; please use SPDX Identifiers https://spdx.org/licenses/
exports = ["COPYING"]
exports_sources = ["CMakeLists.txt", "src/*", "package/conan/*", "modules/*"]
generators = "cmake"
short_paths = True # Some folders go out of the 260 chars path length scope (windows)
# Options may need to change depending on the packaged library.
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"build_deprecated": [True, False],
"with_interconnect": [True, False],
"with_pluginmanager": [True, False],
"with_rc": [True, False],
"with_testsuite": [True, False],
"with_utility": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"build_deprecated": True,
"with_interconnect": True,
"with_pluginmanager": True,
"with_rc": True,
"with_testsuite": True,
"with_utility": True,
}
_build_subfolder = "build_subfolder"
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
def configure(self):
if self.settings.compiler == 'Visual Studio' and int(self.settings.compiler.version.value) < 14:
raise ConanException("{} requires Visual Studio version 14 or greater".format(self.name))
def source(self):
# Wrap the original CMake file to call conan_basic_setup
shutil.move("CMakeLists.txt", "CMakeListsOriginal.txt")
shutil.move(os.path.join("package", "conan", "CMakeLists.txt"), "CMakeLists.txt")
def _configure_cmake(self):
cmake = CMake(self)
def add_cmake_option(option, value):
var_name = "{}".format(option).upper()
value_str = "{}".format(value)
var_value = "ON" if value_str == 'True' else "OFF" if value_str == 'False' else value_str
cmake.definitions[var_name] = var_value
for option, value in self.options.items():
add_cmake_option(option, value)
# Corrade uses suffix on the resulting 'lib'-folder when running cmake.install()
# Set it explicitly to empty, else Corrade might set it implicitly (eg. to "64")
add_cmake_option("LIB_SUFFIX", "")
add_cmake_option("BUILD_STATIC", not self.options.shared)
if self.settings.compiler == 'Visual Studio':
add_cmake_option("MSVC2015_COMPATIBILITY", int(self.settings.compiler.version.value) == 14)
add_cmake_option("MSVC2017_COMPATIBILITY", int(self.settings.compiler.version.value) == 17)
cmake.configure(build_folder=self._build_subfolder)
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("COPYING", dst="licenses", src=".")
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
# See dependency order here: https://doc.magnum.graphics/magnum/custom-buildsystems.html
allLibs = [
#1
"CorradeUtility",
"CorradeContainers",
#2
"CorradeInterconnect",
"CorradePluginManager",
"CorradeTestSuite",
]
# Sort all built libs according to above, and reverse result for correct link order
suffix = '-d' if self.settings.build_type == "Debug" else ''
builtLibs = tools.collect_libs(self)
self.cpp_info.libs = sort_libs(correct_order=allLibs, libs=builtLibs, lib_suffix=suffix, reverse_result=True)
| 36.932331 | 125 | 0.631311 | 4,302 | 0.875814 | 0 | 0 | 0 | 0 | 0 | 0 | 1,964 | 0.399837 |
969d12ed4be6b78b744d2cdccc1f9a2142ee0a79
| 416 |
py
|
Python
|
tests/test_null.py
|
StephenNneji/python-fastjsonschema
|
e7441c2efa40f5ac099a7788b8dafe6942146cf0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_null.py
|
StephenNneji/python-fastjsonschema
|
e7441c2efa40f5ac099a7788b8dafe6942146cf0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_null.py
|
StephenNneji/python-fastjsonschema
|
e7441c2efa40f5ac099a7788b8dafe6942146cf0
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from fastjsonschema import JsonSchemaException
exc = JsonSchemaException('data must be null', value='{data}', name='data', definition='{definition}', rule='type')
@pytest.mark.parametrize('value, expected', [
(0, exc),
(None, None),
(True, exc),
('abc', exc),
([], exc),
({}, exc),
])
def test_null(asserter, value, expected):
asserter({'type': 'null'}, value, expected)
| 24.470588 | 115 | 0.632212 | 0 | 0 | 0 | 0 | 235 | 0.564904 | 0 | 0 | 87 | 0.209135 |
969d7989d597d987141a08864cd0542293d4eb73
| 644 |
py
|
Python
|
server/api/python/comprehension.py
|
DigitalCompanion/trustometer
|
acd7a2ab4927195ee5455d3274efff9f76e1395f
|
[
"MIT"
] | 8 |
2018-10-27T14:47:09.000Z
|
2019-06-13T15:11:04.000Z
|
server/api/python/comprehension.py
|
DigitalCompanion/trustometer
|
acd7a2ab4927195ee5455d3274efff9f76e1395f
|
[
"MIT"
] | 3 |
2020-08-18T12:17:05.000Z
|
2020-08-18T12:17:46.000Z
|
server/api/python/comprehension.py
|
futurityab/trustometer
|
acd7a2ab4927195ee5455d3274efff9f76e1395f
|
[
"MIT"
] | 3 |
2019-06-13T15:06:09.000Z
|
2020-05-09T08:23:49.000Z
|
import boto3
import json
def init(ACCESS_KEY, SECRET_KEY):
return boto3.client(service_name='comprehend', region_name="us-west-2", aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)
def get_entities(client, title):
return client.detect_entities(Text=title, LanguageCode='en').get('Entities')
def get_key_phrases(client, title):
return client.detect_key_phrases(Text=title, LanguageCode='en').get('KeyPhrases')
def get_sentiment(client, title):
sentiment = client.detect_sentiment(Text=title, LanguageCode='en')
return [sentiment.get('Sentiment').title(), sentiment.get('SentimentScore')]
| 35.777778 | 140 | 0.751553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.130435 |
969d8f8281712317dc2a93dac04a3282f946abb9
| 394 |
py
|
Python
|
checkrightrotate.py
|
parasshaha/Python-
|
6c0bdae04cf74aa2742585ebcedb2274075fa644
|
[
"Unlicense"
] | null | null | null |
checkrightrotate.py
|
parasshaha/Python-
|
6c0bdae04cf74aa2742585ebcedb2274075fa644
|
[
"Unlicense"
] | null | null | null |
checkrightrotate.py
|
parasshaha/Python-
|
6c0bdae04cf74aa2742585ebcedb2274075fa644
|
[
"Unlicense"
] | null | null | null |
def checkrot(str1,str2):
if len(str1)==len(str2):
str3=str1+str1
ad=str3.__contains__(str2)
if ad==True:
print("it is right rotate")
else:
print("It is not right rotate ")
else:
print("It is invalid string.Out of range")
def main():
str1=input(" Enter the first String : ")
str2=input("Enter the second String: ")
checkrot(str1,str2)
if __name__ =='__main__':
main()
| 19.7 | 45 | 0.664975 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.36802 |
969e155793ce7396e91744cc2b8d9f9238771262
| 6,781 |
py
|
Python
|
bot.py
|
Fido2603/WatchDog
|
4607b374fdd29d2c82ea9a2a4a8de10f2ed3a94f
|
[
"MIT"
] | null | null | null |
bot.py
|
Fido2603/WatchDog
|
4607b374fdd29d2c82ea9a2a4a8de10f2ed3a94f
|
[
"MIT"
] | null | null | null |
bot.py
|
Fido2603/WatchDog
|
4607b374fdd29d2c82ea9a2a4a8de10f2ed3a94f
|
[
"MIT"
] | 3 |
2018-11-12T14:02:57.000Z
|
2020-04-13T21:48:02.000Z
|
import discord
from discord.ext import commands
from discord import Embed, Permissions
from Util import logger
import os
import database
# Import the config
try:
import config
except ImportError:
print("Couldn't import config.py! Exiting!")
exit()
# Import a monkey patch, if that exists
try:
import monkeyPatch
except ImportError:
print("DEBUG: No Monkey patch found!")
bot = commands.Bot(command_prefix=os.getenv('prefix'), description='Well boys, we did it. Baddies are no more.',
activity=discord.Game(name="with the banhammer"))
startup_extensions = ["essentials",
"moderation",
"info",
"listenerCog"]
# Function to update the database on startup
async def updateDatabase():
# Fetch bans from the banlistguild, and smack them into the db
banguild = bot.get_guild(int(os.getenv('banlistguild')))
ban_list = await banguild.bans()
for BanEntry in ban_list:
if BanEntry.reason is not None:
if "not global" in BanEntry.reason.lower():
continue
if not database.isBanned(BanEntry.user.id):
database.newBan(userid=BanEntry.user.id, discordtag=BanEntry.user.name + "#" + BanEntry.user.discriminator,
avatarurl=BanEntry.user.avatar_url)
# Make sure appeal guild is set up properly
async def checkAppealGuild():
appealguild = bot.get_guild(int(os.getenv('appealguild')))
appealchannel = None
for channel in appealguild.channels:
if channel.name == "appeal-here":
appealchannel = channel
break
if appealchannel is None:
await logger.log("No appealchannel found! Trying to create one!", bot, "INFO")
try:
overwrites = {
appealguild.default_role: discord.PermissionOverwrite(read_messages=True, send_messages=False),
appealguild.me: discord.PermissionOverwrite(read_messages=True, send_messages=True,
manage_messages=True, embed_links=True,
add_reactions=True)
}
appealchannel = await appealguild.create_text_channel("appeal-here", overwrites=overwrites)
except Exception as e:
await logger.log("Could not create an appeal channel! Returning! - " + str(e), bot, "ERROR")
return
history = await appealchannel.history(limit=5).flatten()
# check if no messages
if len(history) == 0: # no messages
# Sending the message
await logger.log("Sending the appeal channel message", bot, "INFO")
message = await appealchannel.send(content="Hello there! Welcome to the WatchDog Appeal Server!\n" +
"\nTo begin your appeal process, please click this reaction!")
# now we add a reaction to the message
await message.add_reaction("✅")
@bot.event
async def on_connect():
logger.logDebug("----------[LOGIN SUCESSFULL]----------", "INFO")
logger.logDebug(" Username: " + bot.user.name, "INFO")
logger.logDebug(" UserID: " + str(bot.user.id), "INFO")
logger.logDebug("--------------------------------------", "INFO")
print("\n")
logger.logDebug("Updating the database!", "INFO")
await updateDatabase()
logger.logDebug("Done updating the database!", "INFO")
print("\n")
# Ban appeal server setup
await checkAppealGuild()
# Bot done starting up
await logger.log("Bot startup done!", bot, "INFO", "Bot startup done.\n")
@bot.event
async def on_ready():
# Bot startup is now done...
logger.logDebug("WatchDog has (re)connected to Discord!")
@bot.event
async def on_command_error(ctx: commands.Context, error):
if isinstance(error, commands.NoPrivateMessage):
await ctx.send("This command cannot be used in private messages")
elif isinstance(error, commands.BotMissingPermissions):
await ctx.send(
embed=Embed(color=discord.Color.red(), description="I need the permission `Ban Members` to sync the bans!"))
elif isinstance(error, commands.MissingPermissions):
await ctx.send(
embed=Embed(color=discord.Color.red(), description="You are missing the permission `Ban Members`!"))
elif isinstance(error, commands.CheckFailure):
return
elif isinstance(error, commands.CommandOnCooldown):
return
elif isinstance(error, commands.MissingRequiredArgument):
return
elif isinstance(error, commands.BadArgument):
return
elif isinstance(error, commands.CommandNotFound):
return
else:
await ctx.send("Something went wrong while executing that command... Sorry!")
await logger.log("%s" % error, bot, "ERROR")
@bot.event
async def on_guild_join(guild):
await logger.log("Joined a new guild (`%s` - `%s`)" % (guild.name, guild.id), bot, "INFO")
# Check the bot's ban permission
if Permissions.ban_members in guild.get_member(bot.user.id).guild_permissions:
# Get bans from db
bans = database.getBans()
# make new list for userid in bans, if member is in guild
ban_members = [userid for userid in bans if guild.get_member(userid)]
logger.logDebug(str(ban_members))
# Ban the found users
for userid in ban_members:
await guild.ban(bot.get_user(int(userid)), reason="WatchDog - Global Ban")
logger.logDebug("Banned user in guild hahayes")
@bot.event
async def on_message(message: discord.Message):
if message.author.bot:
return
ctx: commands.Context = await bot.get_context(message)
if message.content.startswith(os.getenv('prefix')):
if ctx.command is not None:
if isinstance(message.channel, discord.DMChannel):
await logger.log("`%s` (%s) used the `%s` command in their DM's" % (
ctx.author.name, ctx.author.id, ctx.invoked_with), bot, "INFO")
else:
await logger.log("`%s` (%s) used the `%s` command in the guild `%s` (%s), in the channel `%s` (%s)" % (
ctx.author.name, ctx.author.id, ctx.invoked_with, ctx.guild.name, ctx.guild.id, ctx.channel.name,
ctx.channel.id), bot, "INFO")
await bot.invoke(ctx)
else:
return
if __name__ == '__main__':
logger.setup_logger()
# Load extensions
for extension in startup_extensions:
try:
bot.load_extension(f"cogs.{extension}")
except Exception as e:
logger.logDebug(f"Failed to load extension {extension}. - {e}", "ERROR")
bot.run(os.getenv('token'))
| 38.748571 | 120 | 0.627636 | 0 | 0 | 0 | 0 | 3,424 | 0.504791 | 5,574 | 0.82176 | 1,897 | 0.27967 |
969e2f3ff112021f4be66464e152ec69c802c02b
| 320 |
py
|
Python
|
connect/eaas/exceptions.py
|
bdjilka/connect-extension-runner
|
7930b34dae92addb3807984fd553debc2b78ac23
|
[
"Apache-2.0"
] | null | null | null |
connect/eaas/exceptions.py
|
bdjilka/connect-extension-runner
|
7930b34dae92addb3807984fd553debc2b78ac23
|
[
"Apache-2.0"
] | null | null | null |
connect/eaas/exceptions.py
|
bdjilka/connect-extension-runner
|
7930b34dae92addb3807984fd553debc2b78ac23
|
[
"Apache-2.0"
] | null | null | null |
#
# This file is part of the Ingram Micro CloudBlue Connect EaaS Extension Runner.
#
# Copyright (c) 2021 Ingram Micro. All Rights Reserved.
#
class EaaSError(Exception):
pass
class MaintenanceError(EaaSError):
pass
class CommunicationError(EaaSError):
pass
class StopBackoffError(EaaSError):
pass
| 16 | 80 | 0.74375 | 167 | 0.521875 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.43125 |
969ea553ff4cdd6978d9da12725a1d04afc89e38
| 354 |
py
|
Python
|
tests/i18n/patterns/urls/wrong_namespace.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/i18n/patterns/urls/wrong_namespace.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/i18n/patterns/urls/wrong_namespace.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import url
from django.conf.urls.i18n import i18n_patterns
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name='dummy.html')
app_name = 'account'
urlpatterns = i18n_patterns(
url(_(r'^register/$'), view, name='register'),
)
| 29.5 | 56 | 0.757062 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.127119 |
969ea9cfc35b7e706cf517d502bb8ce349a6ac08
| 2,004 |
py
|
Python
|
gsheetsdb/url.py
|
JagritiG/gsheet-db-api-plus
|
620247bb7ce36b327fc91feab8b48fc70e8c158f
|
[
"MIT"
] | null | null | null |
gsheetsdb/url.py
|
JagritiG/gsheet-db-api-plus
|
620247bb7ce36b327fc91feab8b48fc70e8c158f
|
[
"MIT"
] | null | null | null |
gsheetsdb/url.py
|
JagritiG/gsheet-db-api-plus
|
620247bb7ce36b327fc91feab8b48fc70e8c158f
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from moz_sql_parser import parse as parse_sql
import pyparsing
import re
from six.moves.urllib import parse
FROM_REGEX = re.compile(' from ("http.*?")', re.IGNORECASE)
def get_url(url, headers=0, gid=0, sheet=None):
parts = parse.urlparse(url)
if parts.path.endswith('/edit'):
path = parts.path[:-len('/edit')]
else:
path = parts.path
path = '/'.join((path.rstrip('/'), 'gviz/tq'))
qs = parse.parse_qs(parts.query)
if 'headers' in qs:
headers = int(qs['headers'][-1])
if 'gid' in qs:
gid = qs['gid'][-1]
if 'sheet' in qs:
sheet = qs['sheet'][-1]
if parts.fragment.startswith('gid='):
gid = parts.fragment[len('gid='):]
args = OrderedDict()
if headers > 0:
args['headers'] = headers
if sheet is not None:
args['sheet'] = sheet
else:
args['gid'] = gid
params = parse.urlencode(args)
return parse.urlunparse(
(parts.scheme, parts.netloc, path, None, params, None))
def extract_url(sql):
try:
url = parse_sql(sql)['from']
except pyparsing.ParseException:
# fallback to regex to extract from
match = FROM_REGEX.search(sql)
if match:
return match.group(1).strip('"')
return
while isinstance(url, dict):
url = url['value']['from']
return url
# Function to extract url from any sql statement
def url_from_sql(sql):
"""
Extract url from any sql statement.
:param sql:
:return:
"""
try:
parsed_sql = re.split('[( , " )]', str(sql))
for i, val in enumerate(parsed_sql):
if val.startswith('https:'):
sql_url = parsed_sql[i]
return sql_url
except Exception as e:
print("Error: {}".format(e))
| 21.782609 | 63 | 0.597804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.168663 |
969f792ffed604b2cbdb6448c4f912247a60d7f2
| 5,676 |
py
|
Python
|
weechat/.weechat/python/autoload/weechat_bot2human.py
|
CoelacanthusHex/dotfiles
|
e9cc372ba1c5d90e29fdcb1a81c8eb06b6f83bc5
|
[
"Unlicense"
] | 10 |
2021-01-22T08:40:51.000Z
|
2022-01-01T12:14:37.000Z
|
weechat/.weechat/python/autoload/weechat_bot2human.py
|
CoelacanthusHex/dotfiles
|
e9cc372ba1c5d90e29fdcb1a81c8eb06b6f83bc5
|
[
"Unlicense"
] | 1 |
2020-04-18T16:47:51.000Z
|
2020-05-20T20:46:30.000Z
|
weechat/.weechat/python/autoload/weechat_bot2human.py
|
ayalhw/dotfiles
|
c43d0d8543e62a7196c3eddadf66df045bdbbdeb
|
[
"Unlicense"
] | 1 |
2021-10-02T12:02:01.000Z
|
2021-10-02T12:02:01.000Z
|
# -*- coding:utf-8 -*-
# Bot2Human
#
# Replaces messages from bots to humans
# typically used in channels that are connected with other IMs using bots
#
# For example, if a bot send messages from XMPP is like `[nick] content`,
# weechat would show `bot | [nick] content` which looks bad; this script
# make weecaht display `nick | content` so that the messages looks like
# normal IRC message
#
# Options
#
# plugins.var.python.bot2human.bot_nicks
# space seperated nicknames to forwarding bots
# example: teleboto toxsync tg2arch
#
# plugins.var.python.nick_content_re.X
# X is a 0-2 number. This options specifies regex to match nickname
# and content. Default regexes are r'\[(?P<nick>.+?)\] (?P<text>.*)',
# r'\((?P<nick>.+?)\) (?P<text>.*)', and r'<(?P<nick>.+?)> (?P<text>.*)'
#
# plugins.var.python.nick_re_count
# Number of rules defined
#
# Changelog:
# 0.3.0: Add relayed nicks into nicklist, enabling completion
# 0.2.2: Support ZNC timestamp
# 0.2.1: Color filtering only applies on nicknames
# More than 3 nick rules can be defined
# 0.2.0: Filter mIRC color and other control seq from message
# 0.1.1: Bug Fixes
# 0.1: Initial Release
#
import weechat as w
import re
SCRIPT_NAME = "bot2human"
SCRIPT_AUTHOR = "Justin Wong & Hexchain & quietlynn"
SCRIPT_DESC = "Replace IRC message nicknames with regex match from chat text"
SCRIPT_VERSION = "0.3.0"
SCRIPT_LICENSE = "GPLv3"
DEFAULTS = {
'nick_re_count': '4',
'nick_content_re.0': r'\[(?:\x03[0-9,]+)?(?P<nick>[^:]+?)\x0f?\] (?P<text>.*)',
'nick_content_re.1': r'(?:\x03[0-9,]+)?\[(?P<nick>[^:]+?)\]\x0f? (?P<text>.*)',
'nick_content_re.2': r'\((?P<nick>[^:]+?)\) (?P<text>.*)',
'nick_content_re.3': r'<(?:\x03[0-9,]+)?(?P<nick>[^:]+?)\x0f?> (?P<text>.*)',
'bot_nicks': "",
'znc_ts_re': r'\[\d\d:\d\d:\d\d\]\s+',
}
CONFIG = {
'nick_re_count': -1,
'nick_content_res': [],
'bot_nicks': [],
'znc_ts_re': None,
}
def parse_config():
for option, default in DEFAULTS.items():
# print(option, w.config_get_plugin(option))
if not w.config_is_set_plugin(option):
w.config_set_plugin(option, default)
CONFIG['nick_re_count'] = int(w.config_get_plugin('nick_re_count'))
CONFIG['bot_nicks'] = w.config_get_plugin('bot_nicks').split(' ')
for i in range(CONFIG['nick_re_count']):
option = "nick_content_re.{}".format(i)
CONFIG['nick_content_res'].append(
re.compile(w.config_get_plugin(option))
)
CONFIG['znc_ts_re'] = re.compile(w.config_get_plugin('znc_ts_re'))
def config_cb(data, option, value):
parse_config()
return w.WEECHAT_RC_OK
def filter_color(msg):
# filter \x01 - \x19 control seq
# filter \x03{foreground}[,{background}] color string
return re.sub(r'\x03[\d,]+|[\x00-\x1f]', '', msg)
def msg_cb(data, modifier, modifier_data, string):
# w.prnt("blue", "test_msg_cb " + string)
parsed = w.info_get_hashtable("irc_message_parse", {'message': string})
# w.prnt("", "%s" % parsed)
matched = False
for bot in CONFIG['bot_nicks']:
# w.prnt("", "%s, %s" % (parsed["nick"], bot))
if parsed['nick'] == bot:
t = parsed.get(
'text',
parsed["arguments"][len(parsed["channel"])+2:]
)
# ZNC timestamp
ts = ""
mts = CONFIG['znc_ts_re'].match(t)
if mts:
ts = mts.group()
t = t[mts.end():]
for r in CONFIG['nick_content_res']:
# parsed['text'] only exists in weechat version >= 1.3
m = r.match(t)
if not m:
continue
nick, text = m.group('nick'), m.group('text')
nick = filter_color(nick)
nick = re.sub(r'\s', '_', nick)
parsed['host'] = parsed['host'].replace(bot, nick)
parsed['text'] = ts + text
matched = True
buffer = w.info_get("irc_buffer", "%s,%s" % (modifier_data, parsed['channel']))
add_nick(nick, buffer, "")
break
if matched:
break
else:
return string
return ":{host} {command} {channel} :{text}".format(**parsed)
def add_nick(name, buffer, group):
group = get_nick_group(buffer, 'bot2human')
if not w.nicklist_search_nick(buffer, group, name):
w.nicklist_add_nick(buffer, group, name, "weechat.color.nicklist_group", "~", "lightgreen", 1)
return w.WEECHAT_RC_OK
def get_nick_group(buffer, group_name):
group = w.nicklist_search_group(buffer, "", group_name)
if not group:
group = w.nicklist_add_group(buffer, "", group_name, "weechat.color.nicklist_group", 1)
return group
def nicklist_nick_added_cb(data, signal, buffer):
group = get_nick_group(buffer, 'bot2human')
return w.WEECHAT_RC_OK
if __name__ == '__main__':
w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "", "")
parse_config()
w.hook_modifier("irc_in_privmsg", "msg_cb", "")
w.hook_config("plugins.var.python."+SCRIPT_NAME+".*", "config_cb", "")
# Glowing Bear will choke if a nick is added into a newly created group.
# As a workaround, we add the group as soon as possible BEFORE Glowing Bear loads groups,
# and we must do that AFTER EVERY nicklist reload. nicklist_nick_added satisfies both.
# TODO(quietlynn): Find better signals to hook instead.
w.hook_signal("nicklist_nick_added", "nicklist_nick_added_cb", "")
# vim: ts=4 sw=4 sts=4 expandtab
| 33.988024 | 102 | 0.604651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,928 | 0.515856 |
96a3b55fdb3ad0865f22a54baf973a421e94d7be
| 10,713 |
py
|
Python
|
MS-thesis/excel-format/sir/Updated/New folder/test.py
|
iffishells/Pushto-TTS-FYP
|
7ed3a180ba4c1e609ae5aa5e76bfd093a3d3d140
|
[
"Apache-2.0"
] | 2 |
2021-12-06T04:28:18.000Z
|
2021-12-20T03:33:00.000Z
|
MS-thesis/excel-format/sir/Updated/New folder/test.py
|
iffishells/Pushto-TTS-FYP
|
7ed3a180ba4c1e609ae5aa5e76bfd093a3d3d140
|
[
"Apache-2.0"
] | null | null | null |
MS-thesis/excel-format/sir/Updated/New folder/test.py
|
iffishells/Pushto-TTS-FYP
|
7ed3a180ba4c1e609ae5aa5e76bfd093a3d3d140
|
[
"Apache-2.0"
] | 1 |
2021-12-29T16:44:59.000Z
|
2021-12-29T16:44:59.000Z
|
import xlrd
import pandas as pd
from openpyxl import load_workbook
from xlrd import open_workbook
import nltk
from nltk.tree import Tree
from nltk.parse.generate import generate
from nltk.tree import *
import os
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
import xml.etree.ElementTree as etree
import xlrd
import time
import sys
from nltk import induce_pcfg
from nltk.parse import pchart
from nltk import PCFG
from nltk.draw.util import CanvasFrame
import nltk
import re
import pandas
sys.setrecursionlimit(5000)
##start = time.time()
##PERIOD_OF_TIME = 15 # 5min
##while True :
sen = input("Enter your sentence: ")
sent = word_tokenize(sen)
#sen = "مهربانی وکړه بیاي ووايه . يوسف غلے شو . دیړ وخت وشو نہ خکاری"
##for i in sent_tokenize(sen):
## print(i)
##
##gram =("""
##S -> NP VP [1.0]
##NP -> ADJ [0.0041666667] | N [0.0041666667] | N N [0.3] | PN [0.0041666667] | ADJ N [0.0041666667] | AV N [0.0041666667] | N ADJ [0.1] | NU NU [0.5] | NU AP [0.0041666667] | ADJ AP [0.0041666667] | AV [0.0041666667] | ADJ AP [0.0041666667] | N PN [0.0041666667] | VP N [0.0041666667] | PN ADV [0.0041666667] | AV ADV [0.0041666667] | N VP [0.0041666667] | NU N [0.0041666667] | NU [0.0041666667] | V [0.0041666667] | AV AP [0.0041666667] | ADJ VP [0.0041666667] | N AP [0.0041666667] | ADJ AP [0.0041666667] | ADJ NP [0.0041666667] | N NP [0.0041666667]
##VP -> V AP [0.557] | ADJ V [0.05] | AP [0.00625] | NP [0.00625] | AV PN [0.056] | V ADV [0.00625] | V [0.00625] | AV AP [0.00625] | N ADV [0.00625] | N [0.00625] | NU N [0.1] | N V [0.0375] | ADJ AP [0.00625] | N AV [0.10] | V ADJ [0.00625] | ADJ NP [0.00625] | N AP [0.00625] | N NP [0.00625] | NP NP [0.00625] | AV VP [0.00625] | ADJ VP [0.00625] | N VP [0.00625]
##AP -> AV V [0.056] | V NP [0.166] | ADJ V [0.051] | NP VP [0.0142857143] | AV NP [0.0142857143] | PN NP [0.0142857143] | N V [0.037] | NU N [0.2] | AV N [0.2] | ADJ PN [0.066] | V VP [0.0142857143] | N ADV [0.0142857143] | PN AV [0.024] | ADJ VP [0.0142857143] | PN N [0.1] | AV ADV [0.0142857143]
##ADV -> ADV ADJ [0.4] | PN VP [0.025] | N AP [0.025] | AV AV [0.5] | V AP [0.025] | N V [0.025]
##""")
#0.0769231
gram = ("""
S -> NP NP RP VP RP NP PRP VP [0.0769230769]
NP -> N [0.0294118]
NP -> PRP N [0.0294118]
VP -> V [0.05]
NP -> N N [0.0294118]
VP -> V [0.05]
S -> NP RP POP NP NP PP ADJ VP [0.0769230769]
NP -> PRP N [0.0294118]
NP -> N [0.0294118]
NP -> PRP N [0.0294118]
PP -> NP POP [0.2]
NP -> PRP N [0.0294118]
VP -> V [0.05]
S -> ADVP INT CO PP ADV INT RP ADJ PP NP ADV VP [0.0769230769]
ADVP -> ADV NP [0.333333]
NP -> N [0.0294118]
PP -> NP POP [0.6]
NP -> N [0.0294118]
NP -> N [0.0294118]
NP -> PRN [0.0294118]
VP -> V [0.1]
S -> NP PP NP NP VP [0.0769230769]
NP -> N [0.0294118]
PP -> PRP NP [0.2]
NP -> PRP N [0.0294118]
NP -> PRP N [0.0294118]
NP -> PRP N N [0.0294118]
VP -> V [0.05]
S -> NP ADJP ADVP VP [0.0769230769]
NP -> NP CO NP [0.0294118]
NP -> PRP N [0.0294118]
NP -> PRP N [0.0294118]
ADJP -> ADJ ADJ NP [0.333333]
NP -> N [0.0294118]
ADVP -> ADV NP [0.333333]
NP -> N [0.0294118]
VP -> V [0.05]
S -> PP VP CO NP VP [0.0769230769]
NP -> N N [0.0294118]
VP -> V [0.05]
NP -> N [0.0294118]
VP -> V [0.05]
S -> NP NP NP VP VP [0.0769230769]
NP -> PRN [0.0294118]
NP -> PRP N N [0.0294118]
NP -> PRP N [0.0294118]
VP -> V [0.05]
VP -> V [0.1]
S -> NP NP VP [0.0769230769]
NP -> PRN [0.0294118]
NP -> N [0.0294118]
VP -> V [0.05]
S -> NP ADJP VP [0.0769230769]
NP -> PRN [0.0294118]
ADJP -> ADJ NP [0.333333]
NP -> N N [0.0294118]
VP -> V [0.05]
S -> NP ADJP VP VP [0.0769230769]
NP -> PRN [0.0294118]
ADJP -> ADJ NP [0.333333]
NP -> N [0.0294118]
VP -> V [0.05]
VP -> V [0.05]
S -> NP ADJ VP VP [0.0769230769]
NP -> PRN [0.0588235]
VP -> V [0.1]
S -> NP VP VP VP [0.0769230769]
VP -> V [0.05]
S -> NP ADVP VP [0.0769230769]
NP -> PRN [0.0294118]
ADVP -> PRP ADV RP [0.333333]
VP -> V [0.05]
""")
##gram =("""
##S -> NP VP [1.0]
##NP -> ADJ [0] | N [0] | N N [0.4] | PN [0] | ADJ N [0] | AV N [0] | N ADJ [0.1] | NU NU [0.5] | NU AP [0] | ADJ AP [0] | AV [0] | ADJ AP [0] | N PN [0] | VP N [0] | PN ADV [0] | AV ADV [0] | N VP [0] | NU N [0] | NU [0] | V [0] | AV AP [0] | ADJ VP [0] | N AP [0] | ADJ AP [0] | ADJ NP [0] | N NP [0]
##VP -> V AP [0.557] | ADJ V [0.05] | AP [0.00625] | NP [0.00625] | AV PN [0.056] | V ADV [0.00625] | V [0.00625] | AV AP [0.00625] | N ADV [0.00625] | N [0.00625] | NU N [0.1] | N V [0.0375] | ADJ AP [0.00625] | N AV [0.10] | V ADJ [0.00625] | ADJ NP [0.00625] | N AP [0.00625] | N NP [0.00625] | NP NP [0.00625] | AV VP [0.00625] | ADJ VP [0.00625] | N VP [0.00625]
##AP -> AV V [0.056] | V NP [0.166] | ADJ V [0.051] | NP VP [0.0142857143] | AV NP [0.0142857143] | PN NP [0.0142857143] | N V [0.037] | NU N [0.2] | AV N [0.2] | ADJ PN [0.066] | V VP [0.0142857143] | N ADV [0.0142857143] | PN AV [0.024] | ADJ VP [0.0142857143] | PN N [0.1] | AV ADV [0.0142857143]
##ADV -> ADV ADJ [0.4] | PN VP [0.025] | N AP [0.025] | AV AV [0.5] | V AP [0.025] | N V [0.025]
##""")
##
##د هغه ناوړه ملګري وویل
##
##gram = ("""
##S -> NP VP [1.0]
##NP -> AV [0.5] | ADJ AP [0.5]
##VP -> AP [1.0]
##AP -> PN NP [0.5] | N V [0.5]
##AV -> "د" [1.0]
##PN -> "هغه" [1.0]
##ADJ -> "ناوړه" [1.0]
##V -> "وویل" [1.0]
##N -> "ملګري" [1.0]
##""")
##یوه وفاداره میرمن جوړه شوه
##gram = ("""
##S -> NP VP
##NP -> NU | N N
##VP -> NP NP
##
##""")
#دویم تن وویل
##gram =("""
##S -> NP VP
##NP -> V
##VP -> N V
##""")
##dic = pandas.read_csv("dictionary.csv")
##doc = pandas.read_csv("corpus2.csv", quotechar='"', delimiter=',')
#book = open_workbook("Pastho dictionary2.xlsx")
##for sheet in book.sheets():
## for rowidx in range(sheet.nrows):
## row = sheet.row(rowidx)
## for i in sent:
## for colidx,cell in enumerate(row):
## if cell.value == i:#row value
## #print ("Found Row Element")
## #print(rowidx, colidx)
## #print(cell.value)
## print(row)
## print('\n')
##
##book = load_workbook("Pastho dictionary2.xlsx")
##worksheet = book.sheetnames
##sheet = book["Sheet1"]
##c=1
##for i in sheet:
## d = sheet.cell(row=c, column=2)
##
## if(d.value is None):
## print(" Try Again ")
##
##
## elif (d.value == " Noun "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "N ->" + "'" + cell.value + "'" + " " + "[0.0000851934]" + "\n"
##
##
## elif (d.value == "Noun "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "N ->" + "'" + cell.value + "'" + " " + "[0.0000851934]" + "\n"
##
##
## elif (d.value == " Verb "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "V ->" + "'" + cell.value + "'" + " " + "[0.0005530973]" + "\n"
##
##
## elif (d.value == "Verb "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "V ->" + "'" + cell.value + "'" + " " + "[0.0005530973]" + "\n"
##
##
## elif (d.value == " Adjective "):
##
## cell = sheet.cell(row=c, column=1)
## gram = gram + "ADJ ->" + "'" + cell.value + "'" + " " + "[0.000280112]" + "\n"
##
##
## elif (d.value == "Adjective "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "ADJ ->" + "'" + cell.value + "'" + " " + "[0.000280112]" + "\n"
##
##
## elif (d.value == " Participles "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "PP ->" + "'" + cell.value + "'" + " " + "[0.0588235294]" + "\n"
## #print("hi")
##
## elif (d.value == " Adverb "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "AV ->" + "'" + cell.value + "'" + " " + "[0.0025380711]" + "\n"
##
##
## elif (d.value == "Adverb "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "AV ->" + "'" + cell.value + "'" + " " + "[0.0025380711]" + "\n"
##
##
## elif (d.value == " numerical "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "NU ->" + "'" + cell.value + "'" + " " + "[0.0222222222]" + "\n"
##
##
## elif (d.value == "numerical "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "NU ->" + "'" + cell.value + "'" + " " + "[0.0222222222]" + "\n"
##
##
## elif (d.value == " proNoun "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "PN ->" + "'" + cell.value + "'" + " " + "[0.0125]" + "\n"
##
##
##
## elif (d.value == " ProNoun "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "PN ->" + "'" + cell.value + "'" + " " + "[0.0125]" + "\n"
##
##
##
## elif (d.value == "ProNoun "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "PN ->" + "'" + cell.value + "'" + " " + "[0.0125]" + "\n"
##
##
##
## elif (d.value == " suffix "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "SA ->" + "'" + cell.value + "'" + " " + "[0.0476190476]" + "\n"
##
##
##
## elif (d.value == " Suffix "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "SA ->" + "'" + cell.value + "'" + " " + "[0.0476190476]" + "\n"
## c=c+1
#print(gram)
grammar1 = nltk.PCFG.fromstring(gram)
sr_parser = nltk.ViterbiParser(grammar1)
#max=0
for tree in sr_parser.parse(sent):
print(tree)
##
## with open("prob.txt", "a", encoding='utf-8') as output:
## output.write(str(tree))
## output.write("\n")
##
## if (tree.prob() > max):
## max=tree.prob()
## max_tree=tree
##
##print(max)
##print(max_tree)
##sr_parser = nltk.parse.chart.ChartParser(grammar1)
#sr_parser = nltk.RecursiveDescentParser(grammar1)
#sr_parser = nltk.ShiftReduceParser(grammar1)
##for tree in sr_parser.parse(sent):
## #values = tree
##
## with open("test.txt", "a", encoding='utf-8') as output:
## output.write(str(tree))
## output.write("\n")
##
## print(tree)
## #break
##
| 31.508824 | 576 | 0.477364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,812 | 0.903666 |
96a3d255da97bc30ed9f93ea22fbcadc0ebc221e
| 1,013 |
py
|
Python
|
RFEM/SpecialObjects/intersection.py
|
Dlubal-Software/RFEM_Python_Client
|
9e29c598dadf380d49677c463931f0be659ccc40
|
[
"MIT"
] | 16 |
2021-10-13T21:00:11.000Z
|
2022-03-21T11:12:09.000Z
|
RFEM/SpecialObjects/intersection.py
|
Dlubal-Software/RFEM_Python_Client
|
9e29c598dadf380d49677c463931f0be659ccc40
|
[
"MIT"
] | 49 |
2021-10-19T13:18:51.000Z
|
2022-03-30T08:20:17.000Z
|
RFEM/SpecialObjects/intersection.py
|
Dlubal-Software/RFEM_Python_Client
|
9e29c598dadf380d49677c463931f0be659ccc40
|
[
"MIT"
] | 7 |
2021-10-13T06:06:24.000Z
|
2022-03-29T17:48:39.000Z
|
from RFEM.initModel import Model, clearAtributes
class Instersection():
def __init__(self,
no: int = 1,
surface_1: int = 1,
surface_2: int = 2,
comment: str = '',
params: dict = None,
model = Model):
# Client model | Intersection
clientObject = model.clientModel.factory.create('ns0:intersection')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Intersection No.
clientObject.no = no
# Assigned surfaces
clientObject.surface_a = surface_1
clientObject.surface_b = surface_2
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
if params:
for key in params:
clientObject[key] = params[key]
# Add Intersection to client model
model.clientModel.service.set_intersection(clientObject)
| 28.942857 | 75 | 0.582428 | 962 | 0.949654 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.2231 |
96a494380c4f8173563744e5544c96b9515e8e78
| 6,760 |
py
|
Python
|
tests/test_storage.py
|
HumanCellAtlas/data-store
|
6b27d0f7e0110c62b3079151708689ab5145f15b
|
[
"MIT"
] | 46 |
2017-03-24T15:56:09.000Z
|
2021-03-15T19:49:07.000Z
|
tests/test_storage.py
|
HumanCellAtlas/DCC
|
6b27d0f7e0110c62b3079151708689ab5145f15b
|
[
"MIT"
] | 1,799 |
2017-04-04T17:54:28.000Z
|
2020-11-19T12:30:13.000Z
|
tests/test_storage.py
|
HumanCellAtlas/DCC
|
6b27d0f7e0110c62b3079151708689ab5145f15b
|
[
"MIT"
] | 13 |
2017-03-27T23:49:35.000Z
|
2021-01-18T07:39:49.000Z
|
#!/usr/bin/env python
# coding: utf-8
import os
import sys
import string
import unittest
from uuid import uuid4
from unittest import mock
from random import random, randint
from datetime import datetime, timedelta
pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # noqa
sys.path.insert(0, pkg_root) # noqa
import dss
from dss import Replica
from dss.util.version import datetime_to_version_format
from dss.storage.identifiers import UUID_REGEX, TOMBSTONE_SUFFIX
from dss.storage.bundles import enumerate_available_bundles, get_tombstoned_bundles
from dss.logging import configure_test_logging
from tests.infra import testmode, MockStorageHandler
class MockCloudBlobstoreHandle:
bundle_uuid: str = None
tombstoned_bundles: list = None
untombstoned_bundles: list = None
tombstones: list = None
listing: list = None
@classmethod
def list(cls, bucket, pfx):
for fqid in cls.listing:
yield fqid
@classmethod
def gen_bundle_listing(cls,
number_of_versions: int,
versioned_tombstone_probability: float=0.0,
unversioned_tombstone_probability: float=0.0):
cls.bundle_uuid = str(uuid4())
untombstoned_bundles = list()
tombstoned_bundles = list()
tombstones = list()
for _ in range(number_of_versions):
random_date = datetime.utcnow() - timedelta(days=randint(0, 364),
hours=randint(0, 23),
minutes=randint(0, 59))
bundle_fqid = f"{cls.bundle_uuid}.{datetime_to_version_format(random_date)}"
bundle_key = f"bundles/{bundle_fqid}"
if random() <= versioned_tombstone_probability:
tombstones.append(f"{bundle_key}.{TOMBSTONE_SUFFIX}")
tombstoned_bundles.append(bundle_key)
else:
untombstoned_bundles.append(bundle_key)
cls.tombstoned_bundles = tombstoned_bundles
cls.untombstoned_bundles = untombstoned_bundles
cls.tombstones = tombstones
listing = untombstoned_bundles + tombstoned_bundles + tombstones
if random() <= unversioned_tombstone_probability:
listing.append(f"bundles/{cls.bundle_uuid}.{TOMBSTONE_SUFFIX}")
cls.listing = sorted(listing)
def setUpModule():
configure_test_logging()
@testmode.standalone
class TestRegexIdentifiers(unittest.TestCase):
def test_REGEX_MATCHING(self):
chars = string.ascii_lowercase + string.digits
for i, c in enumerate(chars):
uuid = f'{c*8}-{c*4}-{c*4}-{c*4}-{c*12}'
self.assertTrue(UUID_REGEX.match(uuid), uuid)
for i in range(100):
uuid = str(uuid4())
self.assertTrue(UUID_REGEX.match(uuid), uuid)
@testmode.standalone
class TestStorageBundles(unittest.TestCase):
@classmethod
def setUpClass(cls):
dss.Config.set_config(dss.BucketConfig.TEST)
@mock.patch("dss.Config.get_blobstore_handle")
def test_uuid_enumeration(self, mock_list_v2):
mock_list_v2.return_value = MockStorageHandler()
resp = enumerate_available_bundles(replica='aws')
for x in resp['bundles']:
self.assertNotIn('.'.join([x['uuid'], x['version']]), MockStorageHandler.dead_bundles)
self.assertNotIn('.'.join([x['uuid'], x['version']]), MockStorageHandler.dead_bundles_without_suffix)
@mock.patch("dss.Config.get_blobstore_handle")
def test_tombstone_pages(self, mock_list_v2):
mock_list_v2.return_value = MockStorageHandler()
for tests in MockStorageHandler.test_per_page:
test_size = tests['size']
last_good_bundle = tests['last_good_bundle']
resp = enumerate_available_bundles(replica='aws', per_page=test_size)
page_one = resp['bundles']
for x in resp['bundles']:
self.assertNotIn('.'.join([x['uuid'], x['version']]), MockStorageHandler.dead_bundles)
self.assertNotIn('.'.join([x['uuid'], x['version']]), MockStorageHandler.dead_bundles_without_suffix)
self.assertDictEqual(last_good_bundle, resp['bundles'][-1])
search_after = resp['search_after']
resp = enumerate_available_bundles(replica='aws', per_page=test_size,
search_after=search_after)
for x in resp['bundles']:
self.assertNotIn('.'.join([x['uuid'], x['version']]), MockStorageHandler.dead_bundles)
self.assertNotIn('.'.join([x['uuid'], x['version']]), MockStorageHandler.dead_bundles_without_suffix)
self.assertNotIn(x, page_one)
# TODO add test to enumerate list and ensure all bundles that should be present are there.
# TODO: Add test for dss.storage.bundles.get_bundle_manifest
# TODO: Add test for dss.storage.bundles.save_bundle_manifest
@mock.patch("dss.storage.bundles.Config.get_blobstore_handle", return_value=MockCloudBlobstoreHandle)
def test_get_tombstoned_bundles(self, _):
with self.subTest("Retrieve bundle fqid associated with versioned tombstone"):
mock_handle = MockCloudBlobstoreHandle
mock_handle.gen_bundle_listing(1, versioned_tombstone_probability=1.0)
for e in get_tombstoned_bundles(Replica.aws, mock_handle.tombstones[-1]):
self.assertEqual(mock_handle.tombstoned_bundles[0], e)
with self.subTest("Retrieve bundle fqids associated with unversioned tombstone"):
mock_handle.gen_bundle_listing(10,
versioned_tombstone_probability=0.5,
unversioned_tombstone_probability=1.0)
unversioned_tombstone_key = f"bundles/{mock_handle.bundle_uuid}.{TOMBSTONE_SUFFIX}"
listed_keys = {e for e in get_tombstoned_bundles(Replica.aws, unversioned_tombstone_key)}
expected_keys = {e for e in mock_handle.untombstoned_bundles}
unexpected_keys = {e for e in mock_handle.tombstoned_bundles}
self.assertEqual(listed_keys, expected_keys)
self.assertNotIn(unversioned_tombstone_key, listed_keys)
self.assertEqual(0, len(unexpected_keys.intersection(listed_keys)))
with self.subTest("Passing in non-tombstone key should raise"):
mock_handle.gen_bundle_listing(1, versioned_tombstone_probability=1.0)
with self.assertRaises(ValueError):
for e in get_tombstoned_bundles(Replica.aws, "asdf"):
pass
if __name__ == '__main__':
unittest.main()
| 45.066667 | 117 | 0.660059 | 5,934 | 0.877811 | 83 | 0.012278 | 5,778 | 0.854734 | 0 | 0 | 1,017 | 0.150444 |
96a6810cf017f549b181521c6cc7573fff263c40
| 11,035 |
py
|
Python
|
headless/ches_prod_test_titles_headless.py
|
sherrli/Testing-Automation
|
d5a59ed10613b782cd4a8dc29d084c78ee883300
|
[
"MIT"
] | 1 |
2019-04-05T15:51:30.000Z
|
2019-04-05T15:51:30.000Z
|
headless/ches_prod_test_titles_headless.py
|
shli17/Testing-Automation
|
d5a59ed10613b782cd4a8dc29d084c78ee883300
|
[
"MIT"
] | null | null | null |
headless/ches_prod_test_titles_headless.py
|
shli17/Testing-Automation
|
d5a59ed10613b782cd4a8dc29d084c78ee883300
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python
# coding=utf-8
# Headless firefox title test for jenkins build.
intro="""
----------------------------------------------------------------
File : ches_prod_test_titles_headless.py
Description : Headless firefox title test for ches prod sites.
Author : Sherri Li
----------------------------------------------------------------
"""
print(intro)
from selenium import webdriver
from xvfbwrapper import Xvfb
import unittest
import os
import sys
sys.path.append("..")
import json
import time
import datetime
import timeit
import logging
import write_log
import check_status
import create_log
import spreadsheet
# Generate log folder and file.
# Default log level is INFO (everything). Go to create_log.py to change.
folderName = create_log.createLog("ChesProdTitle")
class ChesProdTitleTest(unittest.TestCase):
# Name of the JSON file containing the sites to test
json_file = "sites-prod.json" # can modify later
today = str(datetime.datetime.now())
# Here are the class variables shared among all instances "self" of ChesProdTitleTest
ERRORCOLOR = "\u001b[31m" #red
WARNINGCOLOR = "\u001b[33m" #yellow
SUCCESSCOLOR = "\u001b[32m" #green
DEFAULTCOLOR = "\u001b[0m" #white
browser = None
browserType = 'firefox'
sites = None
timeStart = None
###################################
# SETUP FUNCTIONS #################
###################################
def __init__(self, *args, **kwargs):
super(ChesProdTitleTest, self).__init__(*args, **kwargs)
if self.sites is None:
self.setupSites()
# Initialize a Spreadsheet object!
self.spreadsheet = spreadsheet.Spreadsheet()
self.spreadsheet.open_sheet(str(type(self).__name__))
self.next_row = self.spreadsheet.next_available_row()
def __del__(self):
if self.browser is not None:
try:
self.browser.quit()
except:
pass
try:
del self.spreadsheet
except Exception as e:
print(e)
# NOTE: timer integrated into helper function to making gspread logging easier.
# def setUp(self):
# if self.timeStart is None:
# self.timeStart = timeit.default_timer()
#
# def tearDown(self):
# if self.timeStart is not None:
# timeElapsed = timeit.default_timer() - self.timeStart
# write_log.logSummary(self.browserType, timeElapsed)
def setupSites(self):
#use json credentials file
with open(os.getcwd() + '/' + self.json_file) as data_file:
data = json.load(data_file)
self.sites = {}
for siteData in data['sites']:
site = {}
site['url']= siteData['url']
site['title']= siteData['title']
self.sites[siteData['siteName']] = site
###################################
# TEST FUNCTIONS ##################
###################################
# def test_admissions(self):
# self.run_test('admissions', self.next_row)
#
# def test_admstats(self):
# self.run_test("admstats", self.next_row+1)
#
# def test_btconference(self):
# self.run_test("btconference", self.next_row+2)
#
# def test_conrooms(self):
# self.run_test("conrooms", self.next_row+3)
#
# def test_careercentersecure_student(self):
# self.run_test("careercentersecure-student", self.next_row+4)
#
# def test_careercentersecure_staff(self):
# self.run_test("careercentersecure-staff", self.next_row+5)
#
# def test_careercentersecure_public(self):
# self.run_test("careercentersecure-public", self.next_row+6)
#
# def test_eop(self):
# self.run_test("eop", self.next_row+7)
#
# def test_eop2(self):
# self.run_test("eop2", self.next_row+8)
#
# def test_fixit(self):
# self.run_test("fixit", self.next_row+9)
#
# def test_fixit_queue(self):
# self.run_test("fixit-queue", self.next_row+10)
#
# def test_jdbs(self):
# self.run_test("jdbs", self.next_row+11)
#
# def test_orientation(self):
# self.run_test("orientation", self.next_row+12)
# For loop
def test_title(self):
with Xvfb() as xvfb:
try:
driver = webdriver.Firefox()
driver.implicitly_wait(30)
self.browser = driver
except:
write_log.logSetupError("firefox")
print("Unable to load firefox")
self.next_row += 1
#continue
assert(self.browser is not None)
assert(self.sites is not None)
browser = self.browser
print('\n')
# Grab each site from the dictionary.
for siteName in self.sites:
site = self.sites[siteName]
# Populate spreadsheet with app name, class name, current date.
self.spreadsheet.write_cell(self.next_row,1,siteName)
self.spreadsheet.write_cell(self.next_row,2,type(self).__name__)
self.spreadsheet.write_cell(self.next_row,3,self.today[:16])
# Call the function to get status code from file check_status.py.
result = check_status.checkStatus(site['url'], [200, 301, 302])
if result==False:
print("FAIL: "+site['url']+" returns invalid http response.")
self.spreadsheet.write_cell(self.next_row,5,"fail\ninvalid http response")
self.next_row += 1
continue # Skip to next test in loop
else:
# Begin timer
self.timeStart = timeit.default_timer()
browser.get(site['url'])
write_log.logInfoMsg(siteName, "title test started")
print("Testing " + siteName)# + " with " + browser.name.capitalize() + " Found site['title'] " + browser.title)
browser.implicitly_wait(30)
# You can also search for 'text' in browser.page_source rather than browser.title
if site['title'] not in browser.title:
self.spreadsheet.write_cell(self.next_row,5,site['title'] + " not found")
write_log.logErrorMsg(siteName+'\n', "Desired title '" + site['title'] + "' not found")
print(self.ERRORCOLOR+"ERROR:"+self.DEFAULTCOLOR+ "desired title '" + site['title'] + "' not found on " + siteName)
browser.save_screenshot(folderName+'/error_'+siteName+'.png')
else:
#END TIME
timeElapsed = timeit.default_timer() - self.timeStart
write_log.logSummary(self.browserType, timeElapsed)
# Populate spreadsheet with time and result.
self.spreadsheet.write_cell(self.next_row,4,round(timeElapsed, 5))
self.spreadsheet.write_cell(self.next_row,5,"pass")
write_log.logSuccess(siteName+'\n', "title")
print(self.SUCCESSCOLOR+"passed:"+self.DEFAULTCOLOR+ "'" + site['title'] + "' found on " + siteName)
self.next_row += 1
###################################
# HELPER FUNCTION #################
###################################
# def run_test(self,siteName,row):
# with Xvfb() as xvfb:
# try:
# driver = webdriver.Firefox()
# driver.implicitly_wait(30)
# self.browser = driver
# except:
# write_log.logSetupError("firefox")
# print("Unable to load firefox")
#
# assert(self.browser is not None)
# assert(self.sites is not None)
# browser = self.browser
# print('\n')
#
# # Check that the site url exists.
# try:
# site = self.sites[siteName]
# except:
# write_log.logErrorMsg("your disk. Check to make sure the "+self.json_file+" is up to date.\n", "Test terminated prematurely. You are missing the "+siteName+" url")
# print(self.ERRORCOLOR+"ERROR: "+self.DEFAULTCOLOR + siteName + " credentials not found on your disk.")
# return
#
# site = self.sites[siteName]
# # Populate spreadsheet with app name, class name, current date.
# self.spreadsheet.write_cell(row,1,siteName)
# self.spreadsheet.write_cell(row,2,type(self).__name__)
# self.spreadsheet.write_cell(row,3,self.today[:16])
#
# # Once the site is found, make sure HTTP status code is 200, 301, or 302.
# # Call the function to get status code from file check_status.py.
# result = check_status.checkStatus(site['url'], [200, 301, 302])
# if result==False:
# self.spreadsheet.write_cell(row,5,"fail\ninvalid http response")
# return # exit test
# else:
# #BEGIN TIME
# self.timeStart = timeit.default_timer()
# browser.get(site['url'])
# write_log.logInfoMsg(siteName, "title test started")
# print("Testing " + siteName)# + " with " + browser.name.capitalize() + " Found site['title'] " + browser.title)
# browser.implicitly_wait(30)
#
# # You can also search for 'text' in browser.page_source rather than browser.title
# if site['title'] not in browser.title:
# self.spreadsheet.write_cell(row,5,site['title'] + " not found")
# write_log.logErrorMsg(siteName+'\n', "Desired title '" + site['title'] + "' not found")
# print(self.ERRORCOLOR+"ERROR:"+self.DEFAULTCOLOR+ "desired title '" + site['title'] + "' not found on " + siteName)
# browser.save_screenshot(folderName+'/error_'+siteName+'.png')
# else:
# #END TIME
# timeElapsed = timeit.default_timer() - self.timeStart
# write_log.logSummary(self.browserType, timeElapsed)
# # Populate spreadsheet with time and result.
# self.spreadsheet.write_cell(row,4,round(timeElapsed, 5))
# self.spreadsheet.write_cell(row,5,"pass")
# write_log.logSuccess(siteName+'\n', "title")
# print(self.SUCCESSCOLOR+"passed:"+self.DEFAULTCOLOR+ "'" + site['title'] + "' found on " + siteName)
#
# Kick off the test!
if __name__ == "__main__":
#print("\n\u001b[33smAll test logs and screenshots will be saved to the following folder in your current directory:\n" + folderName + "\n\u001b[0m")
unittest.main()
| 41.175373 | 181 | 0.553512 | 6,607 | 0.598731 | 0 | 0 | 0 | 0 | 0 | 0 | 6,523 | 0.591119 |
96a69bc47e9c073ff2335f4ac224effa211b40aa
| 4,579 |
py
|
Python
|
pyteiser/matchmaker.py
|
goodarzilab/pyteiser
|
3ac78604c768957022cc7751ccdd337960a816f2
|
[
"MIT"
] | 6 |
2020-12-01T08:10:07.000Z
|
2022-01-17T02:09:13.000Z
|
pyteiser/matchmaker.py
|
goodarzilab/pyteiser
|
3ac78604c768957022cc7751ccdd337960a816f2
|
[
"MIT"
] | 4 |
2021-05-19T06:24:30.000Z
|
2022-01-27T20:18:44.000Z
|
pyteiser/matchmaker.py
|
goodarzilab/pyteiser
|
3ac78604c768957022cc7751ccdd337960a816f2
|
[
"MIT"
] | 5 |
2020-07-04T02:05:30.000Z
|
2021-06-26T10:24:16.000Z
|
import numba
import time
from . import glob_var
from . import structures
# for some reason, caching of this function fails the run on Columbia HPC and it doesn't really affect the speed
# since it only needs to compile once but it's getting called so many times
@numba.jit(cache=False, nopython=True, nogil=True)
def match_motif_seq(n_motif, n_sequence, ind, is_degenerate = False):
# this function only works with n_motif and n_sequence classes,
# not with w_motif and w_sequence
left_index = ind
right_index = left_index + n_motif.linear_length - 1
for i in range(n_motif.length):
if is_degenerate:
is_correct_nt = n_sequence.nt_is_a_degenerate(left_index, n_motif.sequence[i])
else:
is_correct_nt = n_sequence.nt_is_a(left_index, n_motif.sequence[i])
if n_motif.structure[i] == glob_var._stem:
if not is_correct_nt or not n_sequence.is_paired(left_index, right_index):
# either the sequence does not match or the left and right bases cannot form a Watson-Crick base pair
return False
left_index += 1
right_index -= 1
else:
if not is_correct_nt: # the sequence does not match
return False
left_index += 1
return True
# for some reason, caching of this function fails the run on Columbia HPC and it doesn't really affect the speed
# since it only needs to compile once but it's getting called so many times
@numba.jit(cache=False, nopython=True, nogil=True)
def is_there_motif_instance(n_motif, n_sequence, is_degenerate = False):
# this function only works with n_motif and n_sequence classes,
# not with w_motif and w_sequence
for i in range(n_sequence.length - n_motif.linear_length + 1):
# sequence_string = sequence.print(return_string = True)
# print(sequence_string[i : i + motif.linear_length])
if match_motif_seq(n_motif, n_sequence, i, is_degenerate):
return True
return False
@numba.jit(cache=True, nopython=True, nogil=True)
def find_all_motif_instances(n_motif, n_sequence, is_degenerate = False):
# this function only works with n_motif and n_sequence classes,
# not with w_motif and w_sequence
motif_instances = []
for i in range(n_sequence.length - n_motif.linear_length + 1):
# sequence_string = sequence.print(return_string = True)
# print(sequence_string[i : i + motif.linear_length])
if match_motif_seq(n_motif, n_sequence, i, is_degenerate):
motif_instances.append(i)
return motif_instances
# I have tried really hard to improve performance of this step with numba
# the main problem is that I have a list of n_sequence objects and their size can vary
# therefore, I can't pass them to function as a numpy array with any of the standard formats
# I can mane a numpy array with an object dtyo (like dtype=structures.n_sequence) but Numba does not support it
# for more detailed explanation, see https://stackoverflow.com/questions/14639496/how-to-create-a-numpy-array-of-arbitrary-length-strings
# numba will deprecate standard python lists too
# there is also numba typed list structure (from numba.typed import List) but it's an experimental feature so far so I don't want to rely on it
# see here https://numba.pydata.org/numba-doc/dev/reference/pysupported.html
# so there is no way to pass a bunch of variable-sized sequence objects to numba in the way that would make the iterations faster
def calculate_profile_one_motif(motif, n_seqs_list, is_degenerate = False):
start_time = time.time()
current_profile = structures.w_profile(len(n_seqs_list))
for i, seq in enumerate(n_seqs_list):
match = is_there_motif_instance(motif, seq, is_degenerate)
if match:
current_profile.values[i] = True
end_time = time.time()
time_spent = end_time - start_time
return current_profile, time_spent
def calculate_profiles_list_motifs(n_motifs_list, n_seqs_list,
do_print=False,
is_degenerate = False):
profiles_list = [0] * len(n_motifs_list)
for i, motif in enumerate(n_motifs_list):
current_profile, time_spent = calculate_profile_one_motif(motif, n_seqs_list, is_degenerate)
profiles_list[i] = current_profile.values
if do_print:
print("Motif number %d binds %d sequences. It took %.2f seconds"
% (i, current_profile.sum(), time_spent))
return profiles_list
| 42.794393 | 143 | 0.70736 | 0 | 0 | 0 | 0 | 2,163 | 0.472374 | 0 | 0 | 1,963 | 0.428696 |
96a6c1aaacc3e456bbd64b90f5f744423a7befea
| 4,574 |
py
|
Python
|
clean.py
|
stephtdouglas/k2spin
|
9a73e35e99b925015a91e37b5fd785440adf78f9
|
[
"MIT"
] | null | null | null |
clean.py
|
stephtdouglas/k2spin
|
9a73e35e99b925015a91e37b5fd785440adf78f9
|
[
"MIT"
] | null | null | null |
clean.py
|
stephtdouglas/k2spin
|
9a73e35e99b925015a91e37b5fd785440adf78f9
|
[
"MIT"
] | null | null | null |
"""Basic cleanup on lightcurves (trimming, sigma-clipping)."""
import logging
import numpy as np
import matplotlib.pyplot as plt
import k2spin.utils as utils
from k2spin import detrend
def trim(time, flux, unc_flux):
"""Remove infs, NaNs, and negative flux values.
Inputs
------
time, flux, unc_flux: array_like
Outputs
-------
trimmed_time, trimmed_flux, trimmed_unc: arrays
good: boolean mask, locations that were kept
"""
good = np.where((np.isfinite(flux)==True) & (flux>0) &
(np.isfinite(unc_flux)==True) &
(np.isfinite(time)==True) & (time>2061.5))[0]
trimmed_time = time[good]
trimmed_flux = flux[good]
trimmed_unc = unc_flux[good]
return trimmed_time, trimmed_flux, trimmed_unc, good
def smooth_and_clip(time, flux, unc_flux, clip_at=3, to_plot=False):
"""Smooth the lightcurve, then clip based on residuals."""
if to_plot:
plt.figure(figsize=(8,4))
ax = plt.subplot(111)
ax.plot(time,flux,'k.',label="orig")
# Simple sigma clipping first to get rid of really big outliers
ct, cf, cu, to_keep = sigma_clip(time, flux, unc_flux, clip_at=clip_at)
logging.debug("c len t %d f %d u %d tk %d", len(ct), len(cf),
len(cu), len(to_keep))
if to_plot: ax.plot(ct, cf, '.',label="-1")
# Smooth with supersmoother without much bass enhancement
for i in range(3):
det_out = detrend.simple_detrend(ct, cf, cu, phaser=0)
detrended_flux, detrended_unc, bulk_trend = det_out
# Take the difference, and find the standard deviation of the residuals
# logging.debug("flux, bulk trend, diff")
# logging.debug(cf[:5])
# logging.debug(bulk_trend[:5])
f_diff = cf - bulk_trend
# logging.debug(f_diff[:5])
diff_std = np.zeros(len(f_diff))
diff_std[ct<=2102] = np.std(f_diff[ct<=2102])
diff_std[ct>2102] = np.std(f_diff[ct>2102])
# logging.debug("std %f %f",diff_std[0], diff_std[-1])
if to_plot:
ax.plot(ct, bulk_trend)
logging.debug("%d len tk %d diff %d", i, len(to_keep), len(f_diff))
# Clip outliers based on residuals this time
to_keep = to_keep[abs(f_diff)<=(diff_std*clip_at)]
ct = time[to_keep]
cf = flux[to_keep]
cu = unc_flux[to_keep]
if to_plot:
ax.plot(ct, cf, '.',label=str(i))
if to_plot:
ax.legend()
clip_time = time[to_keep]
clip_flux = flux[to_keep]
clip_unc_flux = unc_flux[to_keep]
return clip_time, clip_flux, clip_unc_flux, to_keep
def sigma_clip(time, flux, unc_flux, clip_at=6):
"""Perform sigma-clipping on the lightcurve.
Inputs
------
time, flux, unc_flux: array_like
clip_at: float (optional)
how many sigma to clip at. Defaults to 6.
Outputs
-------
clipped_time, clipped_flux, clipped_unc: arrays
to_keep: boolean mask of locations that were kept
"""
# Compute statistics on the lightcurve
med, stdev = utils.stats(flux, unc_flux)
# Sigma-clip the lightcurve
outliers = abs(flux-med)>(stdev*clip_at)
to_clip = np.where(outliers==True)[0]
to_keep = np.where(outliers==False)[0]
logging.debug("Sigma-clipping")
logging.debug(to_clip)
clipped_time = np.delete(time, to_clip)
clipped_flux = np.delete(flux, to_clip)
clipped_unc = np.delete(unc_flux, to_clip)
# Return clipped lightcurve
return clipped_time, clipped_flux, clipped_unc, to_keep
def prep_lc(time, flux, unc_flux, clip_at=3):
"""Trim, sigma-clip, and calculate stats on a lc.
Inputs
------
time, flux, unc_flux: array_like
clip_at: float (optional)
How many sigma to clip at. Defaults to 6.
Set to None for no sigma clipping
Outputs
-------
clean_time, clean_flux, clean_unc: arrays
"""
# Trim the lightcurve, remove bad values
t_time, t_flux, t_unc, t_kept = trim(time, flux, unc_flux)
# Run sigma-clipping if desired, repeat 2X
if clip_at is not None:
c_time, c_flux, c_unc, c_kept = smooth_and_clip(t_time, t_flux, t_unc,
clip_at=clip_at)
else:
c_time, c_flux, c_unc, c_kept = t_time, t_flux, t_unc, t_kept
all_kept = t_kept[c_kept]
# Calculate statistics on lightcurve
c_med, c_stdev = utils.stats(c_flux, c_unc)
# Return cleaned lightcurve and statistics
return c_time, c_flux, c_unc, c_med, c_stdev, all_kept
| 29.895425 | 79 | 0.630302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,784 | 0.390031 |
96a6e69d914f940d6ce83071f9858c2504a877e2
| 140 |
py
|
Python
|
nested ternary.py
|
ps2809/Python-Examples
|
0574f53787af28bf5bd011c139d340091454a4f9
|
[
"MIT"
] | 1 |
2021-07-30T06:15:18.000Z
|
2021-07-30T06:15:18.000Z
|
nested ternary.py
|
ps2809/Python-Examples
|
0574f53787af28bf5bd011c139d340091454a4f9
|
[
"MIT"
] | null | null | null |
nested ternary.py
|
ps2809/Python-Examples
|
0574f53787af28bf5bd011c139d340091454a4f9
|
[
"MIT"
] | null | null | null |
a=int(input('enter a:'))
b=int(input('enter b:'))
c=int(input('enter c:'))
min_value= a if a<b and a<c else b if b<c else c
print(min_value)
| 28 | 48 | 0.657143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.214286 |
96a81b3b0875d5b95d7dd34bd4be73ffcfb6fd0c
| 758 |
py
|
Python
|
pylinsql/timing.py
|
hunyadi/pylinsql
|
bba0017322edbda25a5a2c87f5b46407eea9a00a
|
[
"MIT"
] | null | null | null |
pylinsql/timing.py
|
hunyadi/pylinsql
|
bba0017322edbda25a5a2c87f5b46407eea9a00a
|
[
"MIT"
] | null | null | null |
pylinsql/timing.py
|
hunyadi/pylinsql
|
bba0017322edbda25a5a2c87f5b46407eea9a00a
|
[
"MIT"
] | null | null | null |
import asyncio
import functools
import time
def _log_func_timing(f, args, kw, sec: float):
print("func: %r args: [%r, %r] took: %2.4f sec" % (f.__name__, args, kw, sec))
def timing(f):
"Decorator to log"
if asyncio.iscoroutinefunction(f):
@functools.wraps(f)
async def wrap(*args, **kw):
ts = time.time()
result = await f(*args, **kw)
te = time.time()
_log_func_timing(f, args, kw, te - ts)
return result
else:
@functools.wraps(f)
def wrap(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
_log_func_timing(f, args, kw, te - ts)
return result
return wrap
| 22.294118 | 82 | 0.51847 | 0 | 0 | 0 | 0 | 454 | 0.598945 | 205 | 0.270449 | 59 | 0.077836 |
96a839ea7a6be1421d492c4092e290ebd78292b8
| 715 |
py
|
Python
|
examples/wsgi_usage/apache_modwsgi_server_example.py
|
digimatspa/python-jsonrpc
|
7f8a022c112f8957cee18c54fc48557690cfe417
|
[
"MIT"
] | 97 |
2015-01-06T14:29:31.000Z
|
2022-02-17T07:27:11.000Z
|
examples/wsgi_usage/apache_modwsgi_server_example.py
|
HoverHell/python-jsonrpc
|
41bcd48dd7879ca780481605dc1ffb611ead9100
|
[
"MIT"
] | 37 |
2015-01-03T11:00:48.000Z
|
2021-04-23T06:12:45.000Z
|
examples/wsgi_usage/apache_modwsgi_server_example.py
|
HoverHell/python-jsonrpc
|
41bcd48dd7879ca780481605dc1ffb611ead9100
|
[
"MIT"
] | 63 |
2015-02-04T20:14:48.000Z
|
2022-02-17T07:27:13.000Z
|
#!/usr/bin/env python
# coding: utf-8
# BEGIN --- required only for testing, remove in real world code --- BEGIN
import os
import sys
THISDIR = os.path.dirname(os.path.abspath(__file__))
APPDIR = os.path.abspath(os.path.join(THISDIR, os.path.pardir, os.path.pardir))
sys.path.insert(0, APPDIR)
# END --- required only for testing, remove in real world code --- END
#
# See http://tools.cherrypy.org/wiki/ModWSGI
#
import cherrypy
from pyjsonrpc.cp import CherryPyJsonRpc, rpcmethod
class Root(CherryPyJsonRpc):
@rpcmethod
def add(self, a, b):
"""Test method"""
return a + b
index = CherryPyJsonRpc.request_handler
# WSGI-Application
application = cherrypy.Application(Root())
| 21.029412 | 79 | 0.706294 | 161 | 0.225175 | 0 | 0 | 82 | 0.114685 | 0 | 0 | 261 | 0.365035 |
96a84245805dc4fa3773a993afd163825be5f67d
| 27,511 |
py
|
Python
|
dicomToProjection/convertDicoms.py
|
tarolangner/mri_biometry
|
8c52f48c2c9ff823a300c5298ea3992b53440816
|
[
"MIT"
] | null | null | null |
dicomToProjection/convertDicoms.py
|
tarolangner/mri_biometry
|
8c52f48c2c9ff823a300c5298ea3992b53440816
|
[
"MIT"
] | null | null | null |
dicomToProjection/convertDicoms.py
|
tarolangner/mri_biometry
|
8c52f48c2c9ff823a300c5298ea3992b53440816
|
[
"MIT"
] | null | null | null |
import os
import sys
import io
import time
import zipfile
import pydicom
import numpy as np
import scipy.interpolate
import numba_interpolate
from skimage import filters
import nrrd
import cv2
c_out_pixel_spacing = np.array((2.23214293, 2.23214293, 3.))
c_resample_tolerance = 0.01 # Only interpolate voxels further off of the voxel grid than this
c_interpolate_seams = True # If yes, cut overlaps between stations to at most c_max_overlap and interpolate along them, otherwise cut at center of overlap
c_correct_intensity = True # If yes, apply intensity correction along overlap
c_max_overlap = 8 # Used in interpolation, any station overlaps are cut to be most this many voxels in size
c_trim_axial_slices = 4 # Trim this many axial slices from the output volume to remove folding artefacts
c_use_gpu = True # If yes, use numba for gpu access, otherwise use scipy on cpu
c_store_mip = True # If yes, extract 2d mean intensity projections as .npy
c_store_ff_slice = False # If If yes, extract single fat fraction slice with liver coverage
c_store_volumes = False # If yes, extract 3d volumes as .nrrd
def main(argv):
input_file = "paths_input.txt"
output_path = "output/"
#ignore_errors = True
ignore_errors = False
if not os.path.exists(os.path.dirname(output_path)): os.makedirs(os.path.dirname(output_path))
with open(input_file) as f: input_paths = f.read().splitlines()
start_time = time.time()
for i in range(len(input_paths)):
dicom_path = input_paths[i]
subject_id = os.path.basename(dicom_path).split("_")[0]
output_file = output_path + "{}".format(subject_id)
print("Processing subject {}: {}".format(i, subject_id))
if ignore_errors:
try:
convertDicom(dicom_path, output_file)
except:
print(" Something went wrong with patient {}".format(subject_id))
else:
convertDicom(dicom_path, output_file)
end_time = time.time()
print("Elapsed time: {}".format(end_time - start_time))
##
# Extract mean intensity projection from input UK Biobank style DICOM zip
def convertDicom(input_path_zip, output_path):
if not os.path.exists(input_path_zip):
print(" ERROR: Could not find input file {}".format(input_path_zip))
return
# Get water and fat signal stations
(voxels_w, voxels_f, positions, pixel_spacings) = getSignalStations(input_path_zip)
origin = np.amin(np.array(positions), axis=0)
# Resample stations onto output volume voxel grid
(voxels_w, _, _, _) = resampleStations(voxels_w, positions, pixel_spacings)
(voxels_f, W, W_end, W_size) = resampleStations(voxels_f, positions, pixel_spacings)
# Cut station overlaps to at most c_max_overlap
(_, _, _, _, voxels_w) = trimStationOverlaps(W, W_end, W_size, voxels_w)
(overlaps, W, W_end, W_size, voxels_f) = trimStationOverlaps(W, W_end, W_size, voxels_f)
# Combine stations to volumes
volume_w = fuseVolume(W, W_end, W_size, voxels_w, overlaps)
volume_f = fuseVolume(W, W_end, W_size, voxels_f, overlaps)
# Create and store mean intensity projections
storeOutput(volume_w, volume_f, output_path, origin)
def storeOutput(volume_w, volume_f, output_path, origin):
if c_store_mip:
mip_w = formatMip(volume_w)
mip_f = formatMip(volume_f)
#mip_out = np.dstack((mip_w, mip_f, np.zeros(mip_w.shape)))
#cv2.imwrite(output_path + ".png", mip_out)
mip_out = np.dstack((mip_w, mip_f))
np.save(output_path + ".npy", mip_out.transpose(2, 0, 1))
if c_store_ff_slice or c_store_volumes:
(volume_wf, volume_ff, volume_mask) = calculateFractions(volume_w, volume_f)
if c_store_ff_slice:
slice_ff = formatSliceFF(volume_ff, volume_mask)
np.save(output_path + "_ff.npy", slice_ff)
if c_store_volumes:
storeNrrd(volume_w, output_path + "_W", origin)
storeNrrd(volume_f, output_path + "_F", origin)
storeNrrd(volume_wf, output_path + "_WF", origin)
storeNrrd(volume_ff, output_path + "_FF", origin)
storeNrrd(volume_mask, output_path + "_mask", origin)
def calculateFractions(volume_w, volume_f):
volume_sum = volume_w + volume_f
volume_sum[volume_sum == 0] = 1
volume_wf = 1000 * volume_w / volume_sum
volume_ff = 1000 * volume_f / volume_sum
# Calculate body mask by getting otsu thresholds
# for all coronal slices and applying their mean
ts = np.zeros(volume_sum.shape[1])
for i in range(volume_sum.shape[1]):
ts[i] = filters.threshold_otsu(volume_sum[:, i, :])
t = np.mean(ts)
volume_mask = np.ones(volume_w.shape).astype("uint8")
volume_mask[volume_sum < t] = 0
return (volume_wf, volume_ff, volume_mask)
def storeNrrd(volume, output_path, origin):
# See: http://teem.sourceforge.net/nrrd/format.html
header = {'dimension': 3}
header['type'] = "float"
header['sizes'] = volume.shape
# Spacing info compatible with 3D Slicer
header['space dimension'] = 3
header['space directions'] = np.array(c_out_pixel_spacing * np.eye(3,3))
header['space origin'] = origin
header['space units'] = "\"mm\" \"mm\" \"mm\""
header['encoding'] = 'gzip'
#
nrrd.write(output_path + ".nrrd", volume, header, compression_level=1)
def formatSliceFF(volume, mask):
bed_width = 22
volume = volume[:, :volume.shape[1]-bed_width, :]
mask = mask[:, :mask.shape[1]-bed_width, :]
# Determine mass of body mask
mass = np.count_nonzero(mask) # centre of total mass
mass_sag_half = np.count_nonzero(mask[:int(mask.shape[0] / 2), :, :]) # centre of mass of right half of body
# Coronal slice at centre of mass
com_cor = getSliceOfMass(mass / 2, mask, 1)
slice_cor = formatFractionSlice(volume[:, com_cor, :])
# Sagittal slice at centre of mass of right body half
com_sag = getSliceOfMass(mass_sag_half / 2, mask, 0)
slice_sag = formatFractionSlice(volume[com_sag, :, :])
# Combine to single output
slice_out = np.concatenate((slice_cor, slice_sag), 1)
slice_out = slice_out[:176, :]
slice_out = cv2.resize(slice_out, (376, 176))
slice_out = slice_out.reshape(1, 176, 376)
return slice_out
def getSliceOfMass(mass, mask, axis):
com_i = 0
shifts = np.array(mask.shape)
for i in range(mask.shape[axis]):
shifts[axis] = i
mass_i = np.count_nonzero(mask[:shifts[0], :shifts[1], :shifts[2]])
if mass_i >= mass:
com_i = i
break
return com_i
def formatFractionSlice(img):
img = np.rot90(img, 1)
img = np.clip(img / 500., 0, 1) * 255 # Encode percentages of 0-50%
img = img.astype("uint8")
return img
# Generate mean intensity projection
def formatMip(volume):
bed_width = 22
volume = volume[:, :volume.shape[1]-bed_width, :]
# Coronal projection
slice_cor = np.sum(volume, axis = 1)
slice_cor = np.rot90(slice_cor, 1)
# Sagittal projection
slice_sag = np.sum(volume, axis = 0)
slice_sag = np.rot90(slice_sag, 1)
# Normalize intensities
slice_cor = (normalize(slice_cor) * 255).astype("uint8")
slice_sag = (normalize(slice_sag) * 255).astype("uint8")
# Combine to single output
slice_out = np.concatenate((slice_cor, slice_sag), 1)
slice_out = cv2.resize(slice_out, (256, 256))
return slice_out
def normalize(img):
img = img.astype("float")
img = (img - np.amin(img)) / (np.amax(img) - np.amin(img))
return img
def getSignalStations(input_path_zip):
# Get stations from DICOM
(stat_voxels, stat_names, stat_positions, stat_pixel_spacings, stat_timestamps) = stationsFromDicom(input_path_zip)
# Find water and fat signal station data
(voxels_w, positions_w, pixel_spacings, timestamps_w) = extractStationsForModality("_W", stat_names, stat_voxels, stat_positions, stat_pixel_spacings, stat_timestamps)
(voxels_f, positions_f, _, timestamps_f) = extractStationsForModality("_F", stat_names, stat_voxels, stat_positions, stat_pixel_spacings, stat_timestamps)
# Ensure that water and fat stations match in position and size and non-redundant
(stations_consistent, voxels_w, voxels_f, positions, pixel_spacings) = ensureStationConsistency(voxels_w, voxels_f, positions_w, positions_f, timestamps_w, timestamps_f, pixel_spacings)
if not stations_consistent:
print(" ERROR: Stations are inconsistent!")
return
return (voxels_w, voxels_f, positions, pixel_spacings)
def ensureStationConsistency(voxels_w, voxels_f, positions_w, positions_f, timestamps_w, timestamps_f, pixel_spacings):
# Abort if water and fat stations are not in the same positions
if not np.allclose(positions_w, positions_f):
print("ABORT: Water and fat stations are not in the same position!")
return (False, voxels_w, voxels_f, positions_w)
(voxels_w, voxels_f, positions_w, positions_f, timestamps_w, timestamps_f, pixel_spacings) = removeDeprecatedStations(voxels_w, voxels_f, positions_w, positions_f, timestamps_w, timestamps_f, pixel_spacings)
# Crop corresponding stations to same size where necessary
for i in range(len(positions_w)):
if not np.array_equal(voxels_w[i].shape, voxels_f[i].shape):
print("WARNING: Corresponding stations {} have different dimensions: {} vs {} (Water vs Fat)".format(i, voxels_w[i].shape, voxels_f[i].shape))
print(" Cutting to largest common size")
# Cut to common size
min_size = np.amin(np.vstack((voxels_w[i].shape, voxels_f[i].shape)), axis=0)
voxels_w[i] = np.ascontiguousarray(voxels_w[i][:min_size[0], :min_size[1], :min_size[2]])
voxels_f[i] = np.ascontiguousarray(voxels_f[i][:min_size[0], :min_size[1], :min_size[2]])
# Sort by position
pos_z = np.array(positions_w)[:, 2]
(pos_z, pos_indices) = zip(*sorted(zip(pos_z, np.arange(len(pos_z))), reverse=True))
voxels_w = [voxels_w[i] for i in pos_indices]
positions_w = [positions_w[i] for i in pos_indices]
timestamps_w = [timestamps_w[i] for i in pos_indices]
voxels_f = [voxels_f[i] for i in pos_indices]
positions_f = [positions_f[i] for i in pos_indices]
timestamps_f = [timestamps_f[i] for i in pos_indices]
pixel_spacings = [pixel_spacings[i] for i in pos_indices]
return (True, voxels_w, voxels_f, positions_w, pixel_spacings)
def removeDeprecatedStations(voxels_w, voxels_f, positions_w, positions_f, timestamps_w, timestamps_f, pixel_spacings):
# In case of redundant stations, choose the newest
if len(np.unique(positions_w, axis=0)) != len(positions_w):
seg_select = []
for pos in np.unique(positions_w, axis=0):
# Find stations at current position
offsets = np.array(positions_w) - np.tile(pos, (len(positions_w), 1))
dist = np.sum(np.abs(offsets), axis=1)
indices_p = np.where(dist == 0)[0]
if len(indices_p) > 1:
# Choose newest station
timestamps_w_p = [str(x).replace(".", "") for f, x in enumerate(timestamps_w) if f in indices_p]
# If you get scanned around midnight its your own fault
recent_p = np.argmax(np.array(timestamps_w_p))
print("WARNING: Image stations ({}) are superimposed. Choosing most recently imaged one ({})".format(indices_p, indices_p[recent_p]))
seg_select.append(indices_p[recent_p])
else:
seg_select.append(indices_p[0])
voxels_w = [x for f,x in enumerate(voxels_w) if f in seg_select]
positions_w = [x for f,x in enumerate(positions_w) if f in seg_select]
timestamps_w = [x for f,x in enumerate(timestamps_w) if f in seg_select]
voxels_f = [x for f,x in enumerate(voxels_f) if f in seg_select]
positions_f = [x for f,x in enumerate(positions_f) if f in seg_select]
timestamps_f = [x for f,x in enumerate(timestamps_f) if f in seg_select]
pixel_spacings = [x for f,x in enumerate(pixel_spacings) if f in seg_select]
return (voxels_w, voxels_f, positions_w, positions_f, timestamps_w, timestamps_f, pixel_spacings)
def fuseVolume(W, W_end, W_size, voxels, overlaps):
S = len(voxels)
# Cast to datatype
for i in range(S):
voxels[i] = voxels[i].astype("float32")
# Taper off station edges linearly for later addition
if c_interpolate_seams:
voxels = fadeStationEdges(overlaps, W_size, voxels)
# Adjust mean intensity of overlapping slices
if c_correct_intensity:
voxels = correctOverlapIntensity(overlaps, W_size, voxels)
# Combine stations into volume by addition
volume = combineStationsToVolume(W, W_end, voxels)
# Remove slices affected by folding
if c_trim_axial_slices > 0:
start = c_trim_axial_slices
end = volume.shape[2] - c_trim_axial_slices
volume = volume[:, :, start:end]
return volume
def combineStationsToVolume(W, W_end, voxels):
S = len(voxels)
volume_dim = np.amax(W_end, axis=0).astype("int")
volume = np.zeros(volume_dim)
for i in range(S):
volume[W[i, 0]:W_end[i, 0], W[i, 1]:W_end[i, 1], W[i, 2]:W_end[i, 2]] += voxels[i][:, :, :]
#
volume = np.flip(volume, 2)
volume = np.swapaxes(volume, 0, 1)
return volume
def extractStationsForModality(tag, stat_names, stat_voxels, stat_positions, stat_pixel_spacings, stat_timestamps):
# Merge all stats with given tag
indices_t = [f for f, x in enumerate(stat_names) if str(tag) in str(x)]
voxels_t = [x for f, x in enumerate(stat_voxels) if f in indices_t]
positions_t = [x for f, x in enumerate(stat_positions) if f in indices_t]
pixel_spacings_t = [x for f, x in enumerate(stat_pixel_spacings) if f in indices_t]
timestamps_t = [x for f, x in enumerate(stat_timestamps) if f in indices_t]
return (voxels_t, positions_t, pixel_spacings_t, timestamps_t)
def getSignalSliceNamesInZip(z):
file_names = [f.filename for f in z.infolist()]
# Search for manifest file (name may be misspelled)
csv_name = [f for f in file_names if "manifest" in f][0]
with z.open(csv_name) as f0:
data = f0.read() # Decompress into memory
entries = str(data).split("\\n")
entries.pop(-1)
# Remove trailing blank lines
entries = [f for f in entries if f != ""]
# Get indices of relevant columns
header_elements = entries[0].split(",")
column_filename = [f for f,x in enumerate(header_elements) if "filename" in x][0]
# Search for tags such as "Dixon_noBH_F". The manifest header can not be relied on
for e in entries:
entry_parts = e.split(",")
column_desc = [f for f,x in enumerate(entry_parts) if "Dixon_noBH_F" in x]
if column_desc:
column_desc = column_desc[0]
break
# Get slice descriptions and filenames
descriptions = [f.split(",")[column_desc] for f in entries]
filenames = [f.split(",")[column_filename] for f in entries]
# Extract signal images only
chosen_rows = [f for f,x in enumerate(descriptions) if "_W" in x or "_F" in x]
chosen_filenames = [x for f,x in enumerate(filenames) if f in chosen_rows]
return chosen_filenames
##
# Return, for S stations:
# R: station start coordinates, shape Sx3
# R_end: station end coordinates, shape Sx3
# dims: station extents, shape Sx3
#
# Coordinates in R and R_end are in the voxel space of the first station
def getReadCoordinates(voxels, positions, pixel_spacings):
S = len(voxels)
# Convert from list to arrays
positions = np.array(positions)
pixel_spacings = np.array(pixel_spacings)
# Get dimensions of stations
dims = np.zeros((S, 3))
for i in range(S):
dims[i, :] = voxels[i].shape
# Get station start coordinates
R = positions
origin = np.array(R[0])
for i in range(S):
R[i, :] = (R[i, :] - origin) / c_out_pixel_spacing
R[:, 0] -= np.amin(R[:, 0])
R[:, 1] -= np.amin(R[:, 1])
R[:, 2] *= -1
R[:, [0, 1]] = R[:, [1, 0]]
# Get station end coordinates
R_end = np.array(R)
for i in range(S):
R_end[i, :] += dims[i, :] * pixel_spacings[i, :] / c_out_pixel_spacing
return (R, R_end, dims)
##
# Linearly taper off voxel values along overlap of two stations,
# so that their addition leads to a linear interpolation.
def fadeStationEdges(overlaps, W_size, voxels):
S = len(voxels)
for i in range(S):
# Only fade inwards facing edges for outer stations
fadeToPrev = (i > 0)
fadeToNext = (i < (S - 1))
# Fade ending edge (facing to next station)
if fadeToNext:
for j in range(overlaps[i]):
factor = (j+1) / (float(overlaps[i]) + 1) # exclude 0 and 1
voxels[i][:, :, W_size[i, 2] - 1 - j] *= factor
# Fade starting edge (facing to previous station)
if fadeToPrev:
for j in range(overlaps[i-1]):
factor = (j+1) / (float(overlaps[i-1]) + 1) # exclude 0 and 1
voxels[i][:, :, j] *= factor
return voxels
##
# Take mean intensity of slices at the edge of the overlap between stations i and (i+1)
# Adjust mean intensity of each slice along the overlap to linear gradient between these means
def correctOverlapIntensity(overlaps, W_size, voxels):
S = len(voxels)
for i in range(S - 1):
overlap = overlaps[i]
# Get average intensity at outer ends of overlap
edge_a = voxels[i+1][:, :, overlap]
edge_b = voxels[i][:, :, W_size[i, 2] - 1 - overlap]
mean_a = np.mean(edge_a)
mean_b = np.mean(edge_b)
for j in range(overlap):
# Get desired mean intensity along gradient
factor = (j+1) / (float(overlap) + 1)
target_mean = mean_b + (mean_a - mean_b) * factor
# Get current mean of slice when both stations are summed
slice_b = voxels[i][:, :, W_size[i, 2] - overlap + j]
slice_a = voxels[i+1][:, :, j]
slice_mean = np.mean(slice_a) + np.mean(slice_b)
# Get correction factor
correct = target_mean / slice_mean
# correct intensity to match linear gradient
voxels[i][:, :, W_size[i, 2] - overlap + j] *= correct
voxels[i+1][:, :, j] *= correct
return voxels
##
# Ensure that the stations i and (i + 1) overlap by at most c_max_overlap.
# Trim any excess symmetrically
# Update their extents in W and W_end
def trimStationOverlaps(W, W_end, W_size, voxels):
W = np.array(W)
W_end = np.array(W_end)
W_size = np.array(W_size)
S = len(voxels)
overlaps = np.zeros(S).astype("int")
for i in range(S - 1):
# Get overlap between current and next station
overlap = W_end[i, 2] - W[i + 1, 2]
# No overlap
if overlap <= 0:
print("WARNING: No overlap between stations {} and {}. Image might be faulty.".format(i, i+1))
# Small overlap which can for interpolation
elif overlap <= c_max_overlap and c_interpolate_seams:
print("WARNING: Overlap between stations {} and {} is only {}. Using this overlap for interpolation".format(i, i+1, overlap))
# Large overlap which must be cut
else:
if c_interpolate_seams:
# Keep an overlap of at most c_max_overlap
cut_a = (overlap - c_max_overlap) / 2.
overlap = c_max_overlap
else:
# Cut at center of seam
cut_a = overlap / 2.
overlap = 0
cut_b = int(np.ceil(cut_a))
cut_a = int(np.floor(cut_a))
voxels[i] = voxels[i][:, :, 0:(W_size[i, 2] - cut_a)]
voxels[i + 1] = voxels[i + 1][:, :, cut_b:]
#
W_end[i, 2] = W_end[i, 2] - cut_a
W_size[i, 2] -= cut_a
W[i + 1, 2] = W[i + 1, 2] + cut_b
W_size[i + 1, 2] -= cut_b
overlaps[i] = overlap
return (overlaps, W, W_end, W_size, voxels)
##
# Station voxels are positioned at R to R_end, not necessarily aligned with output voxel grid
# Resample stations onto voxel grid of output volume
def resampleStations(voxels, positions, pixel_spacings):
# R: station positions off grid respective to output volume
# W: station positions on grid after resampling
(R, R_end, dims) = getReadCoordinates(voxels, positions, pixel_spacings)
# Get coordinates of voxels to write to
W = np.around(R).astype("int")
W_end = np.around(R_end).astype("int")
W_size = W_end - W
result_data = []
#
for i in range(len(voxels)):
# Get largest offset off of voxel grid
offsets = np.concatenate((R[i, :].flatten(), R_end[i, :].flatten()))
offsets = np.abs(offsets - np.around(offsets))
max_offset = np.amax(offsets)
# Get difference in voxel counts
voxel_count_out = np.around(W_size[i, :])
voxel_count_dif = np.sum(voxel_count_out - dims[i, :])
# No resampling if station voxels are already aligned with output voxel grid
doResample = (max_offset > c_resample_tolerance or voxel_count_dif != 0)
result = None
if doResample:
if c_use_gpu:
# Use numba implementation on gpu:
scalings = (R_end[i, :] - R[i, :]) / dims[i, :]
offsets = R[i, :] - W[i, :]
result = numba_interpolate.interpolate3d(W_size[i, :], voxels[i], scalings, offsets)
else:
# Use scipy CPU implementation:
# Define positions of station voxels (off of output volume grid)
x_s = np.linspace(int(R[i, 0]), int(R_end[i, 0]), int(dims[i, 0]))
y_s = np.linspace(int(R[i, 1]), int(R_end[i, 1]), int(dims[i, 1]))
z_s = np.linspace(int(R[i, 2]), int(R_end[i, 2]), int(dims[i, 2]))
# Define positions of output volume voxel grid
y_v = np.linspace(W[i, 0], W_end[i, 0], W_size[i, 0])
x_v = np.linspace(W[i, 1], W_end[i, 1], W_size[i, 1])
z_v = np.linspace(W[i, 2], W_end[i, 2], W_size[i, 2])
xx_v, yy_v, zz_v = np.meshgrid(x_v, y_v, z_v)
pts = np.zeros((xx_v.size, 3))
pts[:, 1] = xx_v.flatten()
pts[:, 0] = yy_v.flatten()
pts[:, 2] = zz_v.flatten()
# Resample stations onto output voxel grid
rgi = scipy.interpolate.RegularGridInterpolator((x_s, y_s, z_s), voxels[i], bounds_error=False, fill_value=None)
result = rgi(pts)
else:
# No resampling necessary
result = voxels[i]
result_data.append(result.reshape(W_size[i, :]))
return (result_data, W, W_end, W_size)
def groupSlicesToStations(sl_pixels, sl_series, sl_names, sl_positions, sl_pixel_spacings, sl_times):
# Group by series into stats
unique_series = np.unique(sl_series)
#
stat_voxels = []
stat_series = []
stat_names = []
stat_positions = []
stat_voxel_spacings = []
stat_times = []
# Each series forms one station
for s in unique_series:
# Get slice indices for series s
indices_s = [f for f, x in enumerate(sl_series) if x == s]
# Get physical positions of slice
sl_positions_s = [x for f, x in enumerate(sl_positions) if f in indices_s]
position_max = np.amax(np.array(sl_positions_s).astype("float"), axis=0)
stat_positions.append(position_max)
# Combine slices to stations
voxels_s = slicesToStationData(indices_s, sl_positions_s, sl_pixels)
stat_voxels.append(voxels_s)
# Get index of first slice
sl_0 = indices_s[0]
stat_series.append(sl_series[sl_0])
stat_names.append(sl_names[sl_0])
stat_times.append(sl_times[sl_0])
# Get 3d voxel spacing
voxel_spacing_2d = sl_pixel_spacings[sl_0]
# Get third dimension by dividing station extent by slice count
z_min = np.amin(np.array(sl_positions_s)[:, 2].astype("float"))
z_max = np.amax(np.array(sl_positions_s)[:, 2].astype("float"))
z_spacing = (z_max - z_min) / (len(sl_positions_s) - 1)
voxel_spacing = np.hstack((voxel_spacing_2d, z_spacing))
stat_voxel_spacings.append(voxel_spacing)
return (stat_voxels, stat_names, stat_positions, stat_voxel_spacings, stat_times)
def getDataFromDicom(ds):
pixels = ds.pixel_array
series = ds.get_item(["0020", "0011"]).value
series = int(series)
name = ds.get_item(["0008", "103e"]).value
position = ds.get_item(["0020", "0032"]).value
position = np.array(position.decode().split("\\")).astype("float32")
pixel_spacing = ds.get_item(["0028", "0030"]).value
pixel_spacing = np.array(pixel_spacing.decode().split("\\")).astype("float32")
start_time = ds.get_item(["0008", "0031"]).value
return (pixels, series, name, position, pixel_spacing, start_time)
def slicesToStationData(slice_indices, slice_positions, slices):
# Get size of output volume station
slice_count = len(slice_indices)
slice_shape = slices[slice_indices[0]].shape
# Get slice positions
slices_z = np.zeros(slice_count)
for z in range(slice_count):
slices_z[z] = slice_positions[z][2]
# Sort slices by position
(slices_z, slice_indices) = zip(*sorted(zip(slices_z, slice_indices), reverse=True))
# Write slices to volume station
dim = np.array((slice_shape[0], slice_shape[1], slice_count))
station = np.zeros(dim)
for z in range(dim[2]):
slice_z_index = slice_indices[z]
station[:, :, z] = slices[slice_z_index]
return station
def stationsFromDicom(input_path_zip):
# Get slice info
pixels = []
series = []
names = []
positions = []
pixel_spacings = []
times = []
#
z = zipfile.ZipFile(input_path_zip)
signal_slice_names = getSignalSliceNamesInZip(z)
for i in range(len(signal_slice_names)):
# Read signal slices in memory
with z.open(signal_slice_names[i]) as f0:
data = f0.read() # Decompress into memory
ds = pydicom.read_file(io.BytesIO(data)) # Read from byte stream
(pixels_i, series_i, name_i, position_i, spacing_i, time_i) = getDataFromDicom(ds)
pixels.append(pixels_i)
series.append(series_i)
names.append(name_i)
positions.append(position_i)
pixel_spacings.append(spacing_i)
times.append(time_i)
z.close()
(stat_voxels, stat_names, stat_positions, stat_voxel_spacings, stat_times) = groupSlicesToStations(pixels, series, names, positions, pixel_spacings, times)
return (stat_voxels, stat_names, stat_positions, stat_voxel_spacings, stat_times)
if __name__ == '__main__':
main(sys.argv)
| 32.947305 | 211 | 0.637963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,182 | 0.22471 |
96a94e5f66df21e992b1df975469b8edd292ca16
| 3,285 |
py
|
Python
|
ffttest.py
|
teslaworksumn/Reactor
|
ba6d2d80bd606047e81a5e1ccc0f1af26497feb7
|
[
"MIT"
] | null | null | null |
ffttest.py
|
teslaworksumn/Reactor
|
ba6d2d80bd606047e81a5e1ccc0f1af26497feb7
|
[
"MIT"
] | null | null | null |
ffttest.py
|
teslaworksumn/Reactor
|
ba6d2d80bd606047e81a5e1ccc0f1af26497feb7
|
[
"MIT"
] | null | null | null |
# From http://julip.co/2012/05/arduino-python-soundlight-spectrum/
# Python 2.7 code to analyze sound and interface with Arduino
import pyaudio # from http://people.csail.mit.edu/hubert/pyaudio/
import serial # from http://pyserial.sourceforge.net/
import numpy # from http://numpy.scipy.org/
import audioop
import sys
import math
import struct
'''
Sources
http://www.swharden.com/blog/2010-03-05-realtime-fft-graph-of-audio-wav-file-or-microphone-input-with-python-scipy-and-wckgraph/
http://macdevcenter.com/pub/a/python/2001/01/31/numerically.html?page=2
'''
MAX = 0
NUM = 20
def list_devices():
# List all audio input devices
p = pyaudio.PyAudio()
i = 0
n = p.get_device_count()
while i < n:
dev = p.get_device_info_by_index(i)
if dev['maxInputChannels'] > 0:
print str(i)+'. '+dev['name']
i += 1
def fft():
chunk = 2**11 # Change if too fast/slow, never less than 2**11
scale = 25 # Change if too dim/bright
exponent = 3 # Change if too little/too much difference between loud and quiet sounds
samplerate = 44100
# CHANGE THIS TO CORRECT INPUT DEVICE
# Enable stereo mixing in your sound card
# to make you sound output an input
# Use list_devices() to list all your input devices
device = 1 # Mic
#device = 4 # SF2
p = pyaudio.PyAudio()
stream = p.open(format = pyaudio.paInt16,
channels = 1,
rate = 44100,
input = True,
frames_per_buffer = chunk,
input_device_index = device)
print "Starting, use Ctrl+C to stop"
try:
ser = serial.Serial(
port='/dev/ttyS0',
timeout=1
)
while True:
data = stream.read(chunk)
# Do FFT
levels = calculate_levels(data, chunk, samplerate)
# Make it look better and send to serial
for level in levels:
level = max(min(level / scale, 1.0), 0.0)
level = level**exponent
level = int(level * 255)
#ser.write(chr(level))
#sys.stdout.write(str(level)+' ')
#sys.stdout.write('\n')
#s = ser.read(6)
except KeyboardInterrupt:
pass
finally:
print "\nStopping"
stream.close()
p.terminate()
#ser.close()
def calculate_levels(data, chunk, samplerate):
# Use FFT to calculate volume for each frequency
global MAX
# Convert raw sound data to Numpy array
fmt = "%dH"%(len(data)/2)
data2 = struct.unpack(fmt, data)
data2 = numpy.array(data2, dtype='h')
# Apply FFT
fourier = numpy.fft.fft(data2)
ffty = numpy.abs(fourier[0:len(fourier)/2])/1000
ffty1=ffty[:len(ffty)/2]
ffty2=ffty[len(ffty)/2::]+2
ffty2=ffty2[::-1]
ffty=ffty1+ffty2
ffty=numpy.log(ffty)-2
fourier = list(ffty)[4:-4]
fourier = fourier[:len(fourier)/2]
size = len(fourier)
# Add up for 6 lights
levels = [sum(fourier[i:(i+size/NUM)]) for i in xrange(0, size, size/NUM)][:NUM]
return levels
if __name__ == '__main__':
#list_devices()
fft()
| 28.076923 | 128 | 0.578082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,216 | 0.370167 |
96aa0527808e7632054573910aceede43a35b2b3
| 6,422 |
py
|
Python
|
monitoring/perf-monitor-test.py
|
abhisheksawarkar/gcp-ml-ops
|
462780e6caad370781e191f530f1fd4a4a57431c
|
[
"Apache-2.0"
] | 30 |
2021-04-14T16:52:19.000Z
|
2022-03-17T20:39:42.000Z
|
monitoring/perf-monitor-test.py
|
shashank3959/gcp-ml-ops
|
afa7885e0230c580296724d6dcc5e619a115f24c
|
[
"Apache-2.0"
] | null | null | null |
monitoring/perf-monitor-test.py
|
shashank3959/gcp-ml-ops
|
afa7885e0230c580296724d6dcc5e619a115f24c
|
[
"Apache-2.0"
] | 4 |
2021-04-14T16:52:28.000Z
|
2022-01-13T19:05:26.000Z
|
# Copyright (c) 2021 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os
import logging
import argparse
import sys
import warnings
import sys
import time
import json
import cudf
from sklearn import metrics
import pandas as pd
import tritonclient.http as httpclient
import tritonclient.grpc as grpcclient
from tritonclient.utils import *
from google.cloud import pubsub_v1
from google.protobuf.json_format import MessageToJson
from google.pubsub_v1.types import Encoding
def publish_batch(project_id, topic_id, current_batch, pred_label):
# Initialize a Publisher client.
client = pubsub_v1.PublisherClient()
topic_path = client.topic_path(project_id, topic_id)
batch_size = len(pred_label)
df = current_batch.to_pandas()
for i in range(batch_size):
row = df.iloc[i]
frame = {
"input0": row[CONTINUOUS_COLUMNS].values.tolist(),
"input1": row[CATEGORICAL_COLUMNS].values.tolist(),
"trueval": row['label'],
"predval": response.as_numpy("OUTPUT0")[i].astype('float64')
}
payload = json.dumps(frame).encode('utf-8')
# When you publish a message, the client returns a future.
api_future = client.publish(topic_path, data=''.encode(), payload=payload)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-u',
'--triton_grpc_url',
type=str,
required=False,
default='localhost:8001',
help='URL to Triton gRPC Endpoint')
parser.add_argument('-m',
'--model_name',
type=str,
required=False,
default='dcn_ens',
help='Name of the model ensemble to load')
parser.add_argument('-d',
'--test_data',
type=str,
required=False,
default='/crit_int_pq/day_23.parquet',
help='Path to a test .parquet file. Default')
parser.add_argument('-b',
'--batch_size',
type=int,
required=False,
default=64,
help='Batch size. Max is 64 at the moment, but this max size could be specified when create the model and the ensemble.')
parser.add_argument('-n',
'--n_batches',
type=int,
required=False,
default=1,
help='Number of batches of data to send')
parser.add_argument('-v',
'--verbose',
type=bool,
required=False,
default=False,
help='Verbosity, True or False')
parser.add_argument("--project_id",
type=str,
required=True,
default="dl-tme",
help="Google Cloud project ID")
parser.add_argument("--topic_id",
type=str,
required=True,
default="pubsub",
help="Pub/Sub topic ID")
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO, datefmt='%d-%m-%y %H:%M:%S')
logging.info(f"Args: {args}")
# warnings can be disabled
if not sys.warnoptions:
warnings.simplefilter("ignore")
try:
triton_client = grpcclient.InferenceServerClient(url=args.triton_grpc_url, verbose=args.verbose)
logging.info("Triton client created.")
triton_client.is_model_ready(args.model_name)
logging.info(f"Model {args.model_name} is ready!")
except Exception as e:
logging.error(f"Channel creation failed: {str(e)}")
sys.exit()
# Load the dataset
CATEGORICAL_COLUMNS = ['C' + str(x) for x in range(1,27)]
CONTINUOUS_COLUMNS = ['I' + str(x) for x in range(1,14)]
LABEL_COLUMNS = ['label']
col_names = CATEGORICAL_COLUMNS + CONTINUOUS_COLUMNS
col_dtypes = [np.int32]*26 + [np.int64]*13
logging.info("Reading dataset..")
all_batches = cudf.read_parquet(args.test_data, num_rows=args.batch_size*args.n_batches)
results=[]
with grpcclient.InferenceServerClient(url=args.triton_grpc_url) as client:
for batch in range(args.n_batches):
logging.info(f"Requesting inference for batch {batch}..")
start_idx = batch*args.batch_size
end_idx = (batch+1)*(args.batch_size)
# Convert the batch to a triton inputs
current_batch = all_batches[start_idx:end_idx]
columns = [(col, current_batch[col]) for col in col_names]
inputs = []
for i, (name, col) in enumerate(columns):
d = col.values_host.astype(col_dtypes[i])
d = d.reshape(len(d), 1)
inputs.append(grpcclient.InferInput(name, d.shape, np_to_triton_dtype(col_dtypes[i])))
inputs[i].set_data_from_numpy(d)
outputs = []
outputs.append(grpcclient.InferRequestedOutput("OUTPUT0"))
response = client.infer(args.model_name, inputs, request_id=str(1), outputs=outputs)
results.extend(response.as_numpy("OUTPUT0"))
publish_batch(args.project_id, args.topic_id,
current_batch,
response.as_numpy("OUTPUT0"))
logging.info(f"ROC AUC Score: {metrics.roc_auc_score(all_batches[LABEL_COLUMNS].values.tolist(), results)}")
| 34.902174 | 145 | 0.575833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,814 | 0.282467 |
96aa3bc5e94ffc210e626376f0da8dd2ffc01f94
| 3,996 |
py
|
Python
|
daemon/core/gui/dialogs/throughput.py
|
montag451/core
|
3be162b0b0f54b35520b980023abdfad4ff5e489
|
[
"BSD-2-Clause"
] | null | null | null |
daemon/core/gui/dialogs/throughput.py
|
montag451/core
|
3be162b0b0f54b35520b980023abdfad4ff5e489
|
[
"BSD-2-Clause"
] | null | null | null |
daemon/core/gui/dialogs/throughput.py
|
montag451/core
|
3be162b0b0f54b35520b980023abdfad4ff5e489
|
[
"BSD-2-Clause"
] | null | null | null |
"""
throughput dialog
"""
import tkinter as tk
from tkinter import ttk
from typing import TYPE_CHECKING
from core.gui.dialogs.colorpicker import ColorPickerDialog
from core.gui.dialogs.dialog import Dialog
from core.gui.themes import FRAME_PAD, PADX, PADY
if TYPE_CHECKING:
from core.gui.app import Application
class ThroughputDialog(Dialog):
def __init__(self, master: "Application", app: "Application"):
super().__init__(master, app, "Throughput Config", modal=False)
self.app = app
self.canvas = app.canvas
self.show_throughput = tk.IntVar(value=1)
self.exponential_weight = tk.IntVar(value=1)
self.transmission = tk.IntVar(value=1)
self.reception = tk.IntVar(value=1)
self.threshold = tk.DoubleVar(value=self.canvas.throughput_threshold)
self.width = tk.IntVar(value=self.canvas.throughput_width)
self.color = self.canvas.throughput_color
self.color_button = None
self.top.columnconfigure(0, weight=1)
self.draw()
def draw(self):
button = ttk.Checkbutton(
self.top,
variable=self.show_throughput,
text="Show Throughput Level On Every Link",
)
button.grid(sticky="ew")
button = ttk.Checkbutton(
self.top,
variable=self.exponential_weight,
text="Use Exponential Weighted Moving Average",
)
button.grid(sticky="ew")
button = ttk.Checkbutton(
self.top, variable=self.transmission, text="Include Transmissions"
)
button.grid(sticky="ew")
button = ttk.Checkbutton(
self.top, variable=self.reception, text="Include Receptions"
)
button.grid(sticky="ew")
label_frame = ttk.LabelFrame(self.top, text="Link Highlight", padding=FRAME_PAD)
label_frame.columnconfigure(0, weight=1)
label_frame.grid(sticky="ew")
scale = ttk.Scale(
label_frame,
from_=0,
to=1000,
value=0,
orient=tk.HORIZONTAL,
variable=self.threshold,
)
scale.grid(sticky="ew", pady=PADY)
frame = ttk.Frame(label_frame)
frame.grid(sticky="ew")
frame.columnconfigure(1, weight=1)
label = ttk.Label(frame, text="Threshold Kbps (0 disabled)")
label.grid(row=0, column=0, sticky="ew", padx=PADX)
entry = ttk.Entry(frame, textvariable=self.threshold)
entry.grid(row=0, column=1, sticky="ew", pady=PADY)
label = ttk.Label(frame, text="Width")
label.grid(row=1, column=0, sticky="ew", padx=PADX)
entry = ttk.Entry(frame, textvariable=self.width)
entry.grid(row=1, column=1, sticky="ew", pady=PADY)
label = ttk.Label(frame, text="Color")
label.grid(row=2, column=0, sticky="ew", padx=PADX)
self.color_button = tk.Button(
frame,
text=self.color,
command=self.click_color,
bg=self.color,
highlightthickness=0,
)
self.color_button.grid(row=2, column=1, sticky="ew")
self.draw_spacer()
frame = ttk.Frame(self.top)
frame.grid(sticky="ew")
for i in range(2):
frame.columnconfigure(i, weight=1)
button = ttk.Button(frame, text="Save", command=self.click_save)
button.grid(row=0, column=0, sticky="ew", padx=PADX)
button = ttk.Button(frame, text="Cancel", command=self.destroy)
button.grid(row=0, column=1, sticky="ew")
def click_color(self):
color_picker = ColorPickerDialog(self, self.app, self.color)
self.color = color_picker.askcolor()
self.color_button.config(bg=self.color, text=self.color, bd=0)
def click_save(self):
self.canvas.throughput_threshold = self.threshold.get()
self.canvas.throughput_width = self.width.get()
self.canvas.throughput_color = self.color
self.destroy()
| 36 | 88 | 0.62037 | 3,676 | 0.91992 | 0 | 0 | 0 | 0 | 0 | 0 | 328 | 0.082082 |
96aa991d741b497c4ac277aabd1b587505844ad6
| 5,488 |
py
|
Python
|
backend/api/tests/unit_tests/test_cards.py
|
hieutt99/aidudu
|
00dff59e8dff109904b340cd7ae763d6201773e8
|
[
"MIT"
] | 3 |
2021-10-31T19:32:20.000Z
|
2022-01-02T15:31:11.000Z
|
backend/api/tests/unit_tests/test_cards.py
|
hieutt99/aidudu
|
00dff59e8dff109904b340cd7ae763d6201773e8
|
[
"MIT"
] | 49 |
2021-10-31T16:08:35.000Z
|
2022-01-04T16:29:06.000Z
|
backend/api/tests/unit_tests/test_cards.py
|
hieutt99/aidudu
|
00dff59e8dff109904b340cd7ae763d6201773e8
|
[
"MIT"
] | 2 |
2021-12-19T17:03:22.000Z
|
2022-01-03T08:27:01.000Z
|
from django.test.client import BOUNDARY
from api.tests.unit_tests.utils import *
class CardTest(APITestCase):
# url = reverse('card-detail')
def setUp(self):
hook_init_APITestCase(self)
self.test_workspace = Workspace.objects.create(
name='test_workspace',
# default value for other fields
)
self.test_workspace.save()
self.workspace_membership = WorkspaceMembership.objects.create(
workspace=self.test_workspace,
user=self.me,
# default value for other fields
)
self.workspace_membership.save()
self.test_board = Board.objects.create(
name='test_board',
workspace=self.test_workspace,
# default value for other fields
)
self.test_board.save()
self.board_membership = BoardMembership.objects.create(
user=self.me,
board=self.test_board,
# default value for other fields
)
self.test_list = List.objects.create(
name='test_list',
board=self.test_board
# default value for other fields
)
self.test_list.save()
test_card_1= Card.objects.create(
list=self.test_list,
title='test_card 1',
description='This is card number 1'
# default value for other fields
)
test_card_1.save()
test_card_2= Card.objects.create(
list=self.test_list,
title='test_card 2',
description='This is card number 2'
# default value for other fields
)
test_card_2.save()
self.test_card = [test_card_1, test_card_2]
self.card_membership = CardMembership.objects.create(
user=self.me,
card=self.test_card[0]
)
self.test_label = Label.objects.create(
board=self.test_board,
name='test label'
# default value for other fields
)
self.test_label.save()
def tearDown(self):
if self.test_card is not None:
for card in self.test_card:
card.delete()
if self.card_membership is not None:
self.card_membership.delete()
if self.test_label is not None:
self.test_label.delete()
if self.workspace_membership is not None:
self.workspace_membership.delete()
if self.test_list is not None:
self.test_list.delete()
if self.test_board is not None:
self.test_board.delete()
if self.test_workspace is not None:
self.test_workspace.delete()
if self.me is not None:
self.me.delete()
def test_success_get_card(self):
resp = self.client.get(reverse('card-detail', args=[self.test_card[0].id]))
self.assertEqual(200, resp.status_code)
self.assertEqual(resp.data['title'], self.test_card[0].title)
self.assertEqual(resp.data['description'], self.test_card[0].description)
self.assertEqual(resp.data['list'], self.test_list.id)
def test_success_create_card(self):
data = {
'list': self.test_list.id,
'title': 'card 2',
'description': 'This is card number 2'
}
resp = self.client.post(reverse('card-list'), data)
self.assertEqual(201, resp.status_code)
def test_success_update_card(self):
data = {
'list': self.test_list.id,
'title': 'modified title',
'description': 'modified desc'
}
resp = self.client.put(reverse('card-detail', args=[self.test_card[0].id]), data=data)
self.assertEqual(200, resp.status_code)
#check if card is updated
resp = self.client.get(reverse('card-detail', args=[self.test_card[0].id]))
self.assertEqual(200, resp.status_code)
self.assertEqual(resp.data['title'], 'modified title')
self.assertEqual(resp.data['description'], 'modified desc')
def test_success_add_label_card(self):
data = {
'id': self.test_label.id
}
resp = self.client.post(reverse('card-handle-labels-in-card', args=[self.test_card[0].id]), data=data)
self.assertEqual(204, resp.status_code)
def test_success_remove_label(self):
data = {
'id': self.test_label.id
}
resp = self.client.post(reverse('card-handle-labels-in-card', args=[self.test_card[0].id]), data=data)
self.assertEqual(204, resp.status_code)
def test_success_add_member_card(self):
data = {
'id': self.me.id
}
resp = self.client.post(reverse('card-handle-members-in-card', args=[self.test_card[1].id]), data=data)
self.assertEqual(204, resp.status_code)
def test_success_remove_member(self):
data = {
'id': self.me.id
}
resp = self.client.post(reverse('card-handle-members-in-card', args=[self.test_card[1].id]), data=data)
self.assertEqual(204, resp.status_code)
def test_success_delete_card(self):
resp = self.client.delete(reverse('card-detail', args=[self.test_card[0].id]))
self.assertEqual(204, resp.status_code)
# check if card is deleted
resp = self.client.get(reverse('card-detail', args=[self.test_card[0].id]))
self.assertEqual(404, resp.status_code)
| 35.179487 | 111 | 0.598397 | 5,405 | 0.984876 | 0 | 0 | 0 | 0 | 0 | 0 | 857 | 0.156159 |
96ab9f2c7f20292bca2815ee86e2e792b39a18da
| 1,412 |
py
|
Python
|
mouse.py
|
Ra-Na/android-mouse-cursor
|
b9f0a8394871cb17a2d6ec1a0cc2548b86990ce0
|
[
"MIT"
] | 7 |
2019-12-05T13:34:37.000Z
|
2022-01-15T09:58:11.000Z
|
mouse.py
|
Ra-Na/android-mouse-cursor
|
b9f0a8394871cb17a2d6ec1a0cc2548b86990ce0
|
[
"MIT"
] | null | null | null |
mouse.py
|
Ra-Na/android-mouse-cursor
|
b9f0a8394871cb17a2d6ec1a0cc2548b86990ce0
|
[
"MIT"
] | 5 |
2019-07-27T02:28:04.000Z
|
2022-02-14T15:10:25.000Z
|
import socket
def getch(): # define non-Windows version
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
# get your phones IP by visiting https://www.whatismyip.com/
# then specify your IPv6 here like so
UDP_IP = "2a01:30:2a04:3c1:c83c:2315:9d2b:9a40" # IPv6
UDP_PORT = 9999
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
print ""
print "W, A, S, D - Move mouse"
print "Space - Click"
print "Q - Quit"
# IPv6
sock = socket.socket(socket.AF_INET6, # Internet
socket.SOCK_DGRAM) # UDP
# IPv4
# sock = socket.socket(socket.AF_INET, # Internet
# socket.SOCK_DGRAM) # UDP
while True:
key = ord(getch())
if key == 119: # W
# print 'up'
sock.sendto('0', (UDP_IP, UDP_PORT))
elif key == 97: # A
# print 'left'
sock.sendto('2', (UDP_IP, UDP_PORT))
elif key == 115: # S
# print 'down'
sock.sendto('1', (UDP_IP, UDP_PORT))
elif key == 100: # D
# print 'right'
sock.sendto('3', (UDP_IP, UDP_PORT))
elif key == 113: # Q
break
elif key == 32: # Space
# print 'click'
sock.sendto('4', (UDP_IP, UDP_PORT))
| 25.214286 | 62 | 0.576487 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 499 | 0.353399 |
96adbd6c68f6247e87e6ccdd7457197d2e799780
| 4,278 |
py
|
Python
|
routes/process_tag.py
|
PowerSaucisse/QuarKEY-api-server
|
ba327d3a49e8ea35efbb989550cb8a1429098b15
|
[
"MIT"
] | 5 |
2021-07-26T14:46:35.000Z
|
2021-07-26T22:50:56.000Z
|
routes/process_tag.py
|
PowerSaucisse/quarkey-api-server
|
ba327d3a49e8ea35efbb989550cb8a1429098b15
|
[
"MIT"
] | null | null | null |
routes/process_tag.py
|
PowerSaucisse/quarkey-api-server
|
ba327d3a49e8ea35efbb989550cb8a1429098b15
|
[
"MIT"
] | null | null | null |
from utils.security.auth import AccountAuthToken
import falcon, uuid, datetime
from routes.middleware import AuthorizeResource
from utils.base import api_validate_form, api_message
from utils.config import AppState
class ProcessTag:
def __init__(self) -> None:
self._token_controller = AccountAuthToken('', '')
@falcon.before(AuthorizeResource(roles=['standard']))
def on_post(self, req, resp):
resp.status = falcon.HTTP_BAD_REQUEST
payload = self._token_controller.decode(req.get_header('Authorization'))
tag_id = uuid.uuid4().hex
q1 = None
with AppState.Database.CONN.cursor() as cur:
cur.execute("SELECT t1.id, t1.name FROM tags AS t1 WHERE t1.f_owner = %s AND t1.name = %s", (payload["uid"], req.media["name"]))
q1 = cur.fetchall()
api_message("d", f'tag SQL request content {q1}')
if len(q1) > 0:
resp.media = {"title": "BAD_REQUEST", "description": "tag already exist"}
return
with AppState.Database.CONN.cursor() as cur:
try:
cur.execute(
"INSERT INTO tags (id, f_owner, name, color) VALUES (%s, %s, %s, %s)",
(
tag_id,
payload["uid"],
req.media["name"],
req.media["color"]
)
)
AppState.Database.CONN.commit()
except Exception as e:
AppState.Database.CONN.rollback()
api_message("e", f'Failed transaction : {e}')
raise falcon.HTTPBadRequest()
resp.status = falcon.HTTP_CREATED
resp.media = {"title": "CREATED", "description": "tag created successful", "content": {"tag_id": tag_id}}
@falcon.before(AuthorizeResource(roles=["standard"]))
def on_delete(self, req, resp):
resp.status = falcon.HTTP_BAD_REQUEST
payload = self._token_controller.decode(req.get_header('Authorization'))
tag_id = req.get_param("tag_id")
tag_name = req.get_param("tag_name")
q1 = None
with AppState.Database.CONN.cursor() as cur:
if tag_name is not None and tag_name != 'global':
cur.execute("SELECT id FROM tags WHERE f_owner = %s AND name = %s", (payload["uid"], tag_name))
else:
cur.execute("SELECT id FROM tags WHERE f_owner = %s AND id = %s AND name != 'global'", (payload["uid"], tag_id))
q1 = cur.fetchone()
api_message("d", f'tag id by request : {q1[0]}, type : {type(q1[0])}')
if q1 is None or len(q1) < 1:
return
tag_id = uuid.UUID(q1[0]).hex
with AppState.Database.CONN.cursor() as cur:
try:
cur.execute("DELETE FROM password_tag_linkers WHERE f_tag = %s", (tag_id,))
cur.execute("DELETE FROM tags AS t1 WHERE t1.id = %s", (tag_id,))
AppState.Database.CONN.commit()
except Exception as e:
AppState.Database.CONN.rollback()
api_message("e", f'Failed transaction : {e}')
raise falcon.HTTPBadRequest()
resp.status = falcon.HTTP_OK
@falcon.before(AuthorizeResource(roles=["standard"]))
def on_get(self, req, resp):
resp.status = falcon.HTTP_400
payload = self._token_controller.decode(req.get_header('Authorization'))
q1 = None
with AppState.Database.CONN.cursor() as cur:
cur.execute("SELECT id, name, color FROM tags WHERE f_owner = %s AND name != 'global'", (payload["uid"],))
q1 = cur.fetchall()
if len(q1) < 1:
resp.status = falcon.HTTP_200
resp.media = {"title": "OK", "description": "Empty tag list"}
return
results: list = []
for x in q1:
tag_itm: dict = {}
tag_itm["id"] = uuid.UUID(x[0]).hex
tag_itm["name"] = x[1]
tag_itm["color"] = x[2]
results.append(tag_itm)
resp.status = falcon.HTTP_OK
resp.media = {"title": "OK", "description": "tags getted successful", "content": results}
return
| 39.981308 | 140 | 0.556568 | 4,060 | 0.949042 | 0 | 0 | 3,932 | 0.919121 | 0 | 0 | 974 | 0.227676 |
96aeee51e8b4208d515dafe2237e76a19c17dd76
| 895 |
py
|
Python
|
players/human.py
|
pikatyuu/deep-learning-othello
|
d9f149b01f079f5d021ba9655445cd43a847a628
|
[
"MIT"
] | null | null | null |
players/human.py
|
pikatyuu/deep-learning-othello
|
d9f149b01f079f5d021ba9655445cd43a847a628
|
[
"MIT"
] | null | null | null |
players/human.py
|
pikatyuu/deep-learning-othello
|
d9f149b01f079f5d021ba9655445cd43a847a628
|
[
"MIT"
] | null | null | null |
class Human():
def __init__(self, name="Human"):
self.name = name
def action(self, game):
safe_input = False
while not safe_input:
pos = input("choose a position: ")
if pos == "draw":
game.draw()
elif pos == "exit":
import sys
sys.exit()
elif pos == "movable":
print(game.movable)
elif len(pos) == 2:
clone = game.clone()
pos = tuple(map(int, tuple(pos)))
if clone.can_play(pos):
safe_input = True
else:
print("// Error: Can't put it down //")
else:
print("Error: Invaild input")
return game.play(pos)
def game_finished(self, game):
pass
def all_game_finished(self):
pass
| 27.96875 | 59 | 0.444693 | 894 | 0.998883 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.115084 |
96af356d59393d735c1df16fcdd2f437e70407ca
| 2,338 |
py
|
Python
|
HackerEarth/Python/BasicProgramming/InputOutput/BasicsOfInputOutput/SeatingArrangement.py
|
cychitivav/programming_exercises
|
e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78
|
[
"MIT"
] | null | null | null |
HackerEarth/Python/BasicProgramming/InputOutput/BasicsOfInputOutput/SeatingArrangement.py
|
cychitivav/programming_exercises
|
e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78
|
[
"MIT"
] | null | null | null |
HackerEarth/Python/BasicProgramming/InputOutput/BasicsOfInputOutput/SeatingArrangement.py
|
cychitivav/programming_exercises
|
e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78
|
[
"MIT"
] | null | null | null |
#!/Usr/bin/env python
"""
Akash and Vishal are quite fond of travelling. They mostly travel by railways. They were travelling in a train one day and they got interested in the seating arrangement of their compartment. The compartment looked something like
So they got interested to know the seat number facing them and the seat type facing them. The seats are denoted as follows :
Window Seat : WS
Middle Seat : MS
Aisle Seat : AS
You will be given a seat number, find out the seat number facing you and the seat type, i.e. WS, MS or AS.
INPUT:
First line of input will consist of a single integer T denoting number of test-cases. Each test-case consists of a single integer N denoting the seat-number.
OUTPUT:
For each test case, print the facing seat-number and the seat-type, separated by a single space in a new line.
CONSTRAINTS:
1 ≤ T ≤ 10^5
1 ≤ N ≤ 10^8
"""
__author__ = "Cristian Chitiva"
__date__ = "March 17, 2019"
__email__ = "[email protected]"
T = int(input())
while T > 0:
N = int(input())
position = N % 12
section = N//12
if position == 1:
word = str((position + 11) + 12*section)
print(word + ' WS')
elif position == 2:
word = str((position + 9) + 12*section)
print(word + ' MS')
elif position == 3:
word = str((position + 7) + 12*section)
print(word + ' AS')
elif position == 4:
word = str((position + 5) + 12*section)
print(word + ' AS')
elif position == 5:
word = str((position + 3) + 12*section)
print(word + ' MS')
elif position == 6:
word = str((position + 1) + 12*section)
print(word + ' WS')
elif position == 7:
word = str((position - 1) + 12*section)
print(word + ' WS')
elif position == 8:
word = str((position - 3) + 12*section)
print(word + ' MS')
elif position == 9:
word = str((position - 5) + 12*section)
print(word + ' AS')
elif position == 10:
word = str((position - 7) + 12*section)
print(word + ' AS')
elif position == 11:
word = str((position - 9) + 12*section)
print(word + ' MS')
else:
word = str((position - 11) + 12*section)
print(word + ' WS')
T -= 1
| 32.027397 | 230 | 0.582977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,015 | 0.432651 |
96af8b4a48adf5297e31757c90f73a77f6edf704
| 101 |
py
|
Python
|
vault_password.py
|
RMuskovets/empireofcode
|
a2a9cfe2c43c7f28999b426601063dd0af352db5
|
[
"Apache-2.0"
] | 1 |
2018-02-20T12:11:45.000Z
|
2018-02-20T12:11:45.000Z
|
vault_password.py
|
RMuskovets/empireofcode
|
a2a9cfe2c43c7f28999b426601063dd0af352db5
|
[
"Apache-2.0"
] | null | null | null |
vault_password.py
|
RMuskovets/empireofcode
|
a2a9cfe2c43c7f28999b426601063dd0af352db5
|
[
"Apache-2.0"
] | null | null | null |
def golf(p): return len(p)>9 and p!=p.lower() and p!=p.upper() and any('0'<=l and l<='9' for l in p)
| 50.5 | 100 | 0.584158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.059406 |
96af9cf77f54780c67f68a366b9f2da0eae70db7
| 3,149 |
py
|
Python
|
analysis/marc_verification_sharp.py
|
maxschalz/studious_potato
|
a368aa88036c1f0ffcd494e994b0975be2575210
|
[
"BSD-3-Clause"
] | null | null | null |
analysis/marc_verification_sharp.py
|
maxschalz/studious_potato
|
a368aa88036c1f0ffcd494e994b0975be2575210
|
[
"BSD-3-Clause"
] | null | null | null |
analysis/marc_verification_sharp.py
|
maxschalz/studious_potato
|
a368aa88036c1f0ffcd494e994b0975be2575210
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import matplotlib
matplotlib.use('pgf')
import matplotlib.pyplot as plt
import numpy as np
from multi_isotope_calculator import Multi_isotope
import plotsettings as ps
plt.style.use('seaborn-darkgrid')
plt.rcParams.update(ps.tex_fonts())
def main():
plot()
#figure5()
def figure1():
"""Compare data to Sharp paper (tails U234 vs product U235)"""
data = np.genfromtxt("../data/sharp_fig1.csv", delimiter=",")
data = data[np.argsort(data[:,0])]
composition = {'234': 5.5e-3, '235': (0.72, 3, 0.2)}
calculator = Multi_isotope(composition, feed=1, process='diffusion',
downblend=False)
results = np.empty(shape=data.shape, dtype=float)
for i, xp in enumerate(data[:,0]):
calculator.set_product_enrichment(xp*100)
calculator.calculate_staging()
results[i,0] = calculator.xp[3]
results[i,1] = calculator.xt[2]
data *= 100
results *= 100
pulls = 100 * (data[:,1]-results[:,1]) / data[:,1]
ylims = (1e299, 0)
for values in (data, results):
ylims = (min(ylims[0], min(values[:,1])),
max(ylims[1], max(values[:,1])))
return data, results, pulls
def figure5():
"""Compare data to Sharp paper (tails qty vs product qty)"""
sharp = np.genfromtxt("../data/sharp_fig5.csv", delimiter=",")
sharp = sharp[np.argsort(sharp[:,0])]
calc = Multi_isotope({'235': (0.711, 5, 0.2)}, max_swu=15000,
process='diffusion', downblend=False)
results = np.empty(shape=sharp.shape, dtype=float)
for i, xp in enumerate(sharp[:,0]):
calc.set_product_enrichment(xp*100)
calc.calculate_staging()
results[i,0] = calc.xp[3] * 100
results[i,1] = calc.t
sharp[:,0] *= 100
pulls = 100 * (sharp[:,1]-results[:,1]) / sharp[:,1]
return sharp, results, pulls
def plot():
fig1 = figure1()
fig5 = figure5()
figsize = ps.set_size(subplots=(2,2))
fig, ax = plt.subplots(figsize=figsize, nrows=2, ncols=2)
plt.rcParams.update({'lines.markersize': 4})
for i, (data, result, pulls) in enumerate((fig1, fig5)):
ax[0,i].plot(result[:,0], result[:,1], color=ps.colors(0),
label="MARC algorithm", zorder=2, linewidth=1)
ax[0,i].scatter(data[::3,0], data[::3,1], marker="x",
color=ps.colors(1), label="Sharp 2013", zorder=3)
ax[1,i].scatter(data[:,0], pulls, s=1, zorder=2)
ax[0,i].legend()
ax[0,i].set_xlim(0, 100)
ax[1,i].set_xlim(0, 100)
ax[1,i].set_xlabel(r"$x_{235,P}$ [\%at]")
ax[1,i].axhline(0, color="C3", zorder=1, linewidth=1)
ax[0,1].ticklabel_format(axis="y", style="sci", scilimits=(-2,2))
ax[0,0].set_ylabel(r"$x_{234,T}$ [\%at]")
ax[1,0].set_ylabel(r"relative difference [%]")
ax[0,1].set_ylabel(r"$T$ [kg/yr]")
ax[1,1].set_ylabel(r"relative difference [%]")
plt.tight_layout()
plt.savefig("../plots/checks_marc_sharp1.pdf")
plt.close()
return
if __name__=='__main__':
main()
| 30.278846 | 73 | 0.580502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 480 | 0.152429 |
96b02e8ac66ecd2c65e6e010e248801adc096f97
| 497 |
py
|
Python
|
clase6/clases.py
|
Tank3-TK3/codigo-basico-Python
|
580e8d284fa8a4d70b2a264762c91bd64c89ab80
|
[
"MIT"
] | 7 |
2021-04-19T01:32:49.000Z
|
2021-06-04T17:38:04.000Z
|
clase6/clases.py
|
Tank3-TK3/codigo-basico-Python
|
580e8d284fa8a4d70b2a264762c91bd64c89ab80
|
[
"MIT"
] | null | null | null |
clase6/clases.py
|
Tank3-TK3/codigo-basico-Python
|
580e8d284fa8a4d70b2a264762c91bd64c89ab80
|
[
"MIT"
] | null | null | null |
class Animals:
def comer(self):
print("Comiendo")
def dormir(self):
print("Durmiendo")
class Perro:
def __init__(self, nombre):
self.nombre = nombre
def comer(self):
print("Comiendo")
def dormir(self):
print("Durmiendo")
def ladrar(self):
print("Ladrando")
print("--------------------------------------------------")
firulais = Perro("Firulais")
firulais.comer()
firulais.dormir()
firulais.ladrar()
print("--------------------------------------------------")
| 23.666667 | 60 | 0.525151 | 285 | 0.573441 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.334004 |
96b0583a014d7b5a8ac9ea17b0f8eea2bc40f0eb
| 3,103 |
py
|
Python
|
homeworks_advanced/homework2_attention_in_seq2seq/modules.py
|
BiscuitsLayer/ml-mipt
|
24917705189d2eb97a07132405b4f93654cb1aaf
|
[
"MIT"
] | 1 |
2021-08-01T11:29:11.000Z
|
2021-08-01T11:29:11.000Z
|
homeworks_advanced/homework2_attention_in_seq2seq/modules.py
|
ivasio/ml-mipt
|
9c8896b4dfe46ee02bc5fdbca47acffbeca6828e
|
[
"MIT"
] | null | null | null |
homeworks_advanced/homework2_attention_in_seq2seq/modules.py
|
ivasio/ml-mipt
|
9c8896b4dfe46ee02bc5fdbca47acffbeca6828e
|
[
"MIT"
] | null | null | null |
import random
import torch
from torch import nn
from torch.nn import functional as F
class Encoder(nn.Module):
def __init__(self, input_dim, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.input_dim = input_dim
self.emb_dim = emb_dim
self.hid_dim = hid_dim
self.n_layers = n_layers
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout=dropout)
self.dropout = nn.Dropout(p=dropout)
def forward(self, src):
embedded = self.dropout(self.embedding(src))
output, (hidden, cell) = self.rnn(embedded)
return output, hidden, cell
class Attention(nn.Module):
def __init__(self, enc_hid_dim, dec_hid_dim):
super().__init__()
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.attn = # <YOUR CODE HERE>
def forward(self, hidden, encoder_outputs):
# <YOUR CODE HERE>
return
class DecoderWithAttention(nn.Module):
def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention):
super().__init__()
self.emb_dim = emb_dim
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.output_dim = output_dim
self.attention = attention
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = # <YOUR CODE HERE>
self.out = # <YOUR CODE HERE>
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, encoder_outputs):
# <YOUR CODE HERE>
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
assert encoder.hid_dim == decoder.dec_hid_dim, \
"Hidden dimensions of encoder and decoder must be equal!"
def forward(self, src, trg, teacher_forcing_ratio = 0.5):
#src = [src sent len, batch size]
#trg = [trg sent len, batch size]
#teacher_forcing_ratio is probability to use teacher forcing
#e.g. if teacher_forcing_ratio is 0.75 we use ground-truth inputs 75% of the time
# Again, now batch is the first dimention instead of zero
batch_size = trg.shape[1]
max_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
#tensor to store decoder outputs
outputs = torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device)
#last hidden state of the encoder is used as the initial hidden state of the decoder
enc_states, hidden, cell = self.encoder(src)
#first input to the decoder is the <sos> tokens
input = trg[0,:]
for t in range(1, max_len):
output, hidden = self.decoder(input, hidden, enc_states)
outputs[t] = output
teacher_force = random.random() < teacher_forcing_ratio
top1 = output.max(1)[1]
input = (trg[t] if teacher_force else top1)
return outputs
| 29.836538 | 92 | 0.638737 | 3,006 | 0.96874 | 0 | 0 | 0 | 0 | 0 | 0 | 574 | 0.184982 |
96b22687eb09935202fe84c81b4d3c7659c65ad8
| 1,753 |
py
|
Python
|
zipselected/__init__.py
|
raguay/ZipSelected
|
8663623498db6e87beded2aaecac65cd0979788d
|
[
"MIT"
] | 6 |
2017-01-26T09:09:51.000Z
|
2021-12-14T11:38:54.000Z
|
zipselected/__init__.py
|
raguay/ZipSelected
|
8663623498db6e87beded2aaecac65cd0979788d
|
[
"MIT"
] | 2 |
2017-03-17T11:24:26.000Z
|
2018-02-22T13:47:41.000Z
|
zipselected/__init__.py
|
raguay/ZipSelected
|
8663623498db6e87beded2aaecac65cd0979788d
|
[
"MIT"
] | 2 |
2017-10-16T06:19:27.000Z
|
2020-05-15T13:42:26.000Z
|
from fman import DirectoryPaneCommand, show_alert
import os
import zipfile
from fman.url import as_human_readable
from fman.url import as_url
def zipdir(rootZip, path, ziph):
numf = 0
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file),os.path.join(rootZip,os.path.basename(file)))
numf += 1
for dir in dirs:
numf += zipdir(os.path.join(rootZip,os.path.basename(dir)),os.path.join(root,dir),ziph)
return numf
class ZipSelected(DirectoryPaneCommand):
def __call__(self):
selected_files = self.pane.get_selected_files()
output = ""
if len(selected_files) >= 1 or (len(selected_files) == 0 and self.get_chosen_files()):
if len(selected_files) == 0 and self.get_chosen_files():
selected_files.append(self.get_chosen_files()[0])
dirPath = os.path.dirname(as_human_readable(selected_files[0]))
dirName = os.path.basename(dirPath)
zipName = os.path.join(dirPath, dirName + ".zip")
numf = 0
zipf = zipfile.ZipFile(zipName, 'w')
for file in selected_files:
file = as_human_readable(file)
if os.path.isdir(file):
numf += zipdir(os.path.join(dirName, os.path.basename(file)),file,zipf)
else:
zipf.write(file, os.path.join(dirName, os.path.basename(file)))
numf += 1
output += str(numf) + " files were zipped!"
zipf.close()
else:
output += "No files or directories selected"
show_alert(output)
| 39.840909 | 100 | 0.58243 | 1,185 | 0.675984 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.051911 |
96b3255531b199084f95bb09b62e2c476d0885f5
| 626 |
py
|
Python
|
functions/aurora_check_status.py
|
aws-samples/aws-stepfunctions-aurora-clone
|
ca60dbb1e98bb337662ac6140a2749fa03363d48
|
[
"MIT-0"
] | 7 |
2022-02-22T16:23:00.000Z
|
2022-03-18T18:44:06.000Z
|
functions/aurora_check_status.py
|
aws-samples/aws-stepfunctions-aurora-clone
|
ca60dbb1e98bb337662ac6140a2749fa03363d48
|
[
"MIT-0"
] | null | null | null |
functions/aurora_check_status.py
|
aws-samples/aws-stepfunctions-aurora-clone
|
ca60dbb1e98bb337662ac6140a2749fa03363d48
|
[
"MIT-0"
] | null | null | null |
import boto3
client = boto3.client('rds')
def lambda_handler(event, context):
target_db_cluster_identifier=event['TargetDBClusterIdentifier']
payload = event.copy()
try:
response = client.describe_db_clusters(DBClusterIdentifier=target_db_cluster_identifier)
payload['status'] = response['DBClusters'][0]['Status']
return payload
except client.exceptions.DBClusterNotFoundFault as e:
print(e)
payload['status'] = 'not-found'
payload['message'] = 'There is no cluster to remove...'
return payload
| 25.04 | 96 | 0.629393 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.194888 |
96b4507fab2d696dd5272cc8fb8efb5a6fdf9e81
| 6,545 |
py
|
Python
|
smellCatalog/InputProcessor.py
|
neilernst/smells
|
c093ee72a12f62693d8635359b7ca4958ecba0e0
|
[
"MIT"
] | null | null | null |
smellCatalog/InputProcessor.py
|
neilernst/smells
|
c093ee72a12f62693d8635359b7ca4958ecba0e0
|
[
"MIT"
] | null | null | null |
smellCatalog/InputProcessor.py
|
neilernst/smells
|
c093ee72a12f62693d8635359b7ca4958ecba0e0
|
[
"MIT"
] | 1 |
2019-07-15T14:16:37.000Z
|
2019-07-15T14:16:37.000Z
|
import re
from Smell import Smell
from SmellCategory import SmellCategory
from Reference import Reference
SMELL = "\[smell\]"
SMELL_ID = "\[smell-id\]"
SMELL_NAME = "\[smell-name\]"
SMELL_END = "\[smell-end\]"
SMELL_DES = "\[smell-description\]"
SMELL_AKA = "\[smell-aka\]"
SMELL_CATEGORY = "\[smell-category\]"
SMELL_SUBCATEGORY = "\[smell-subcategory\]"
SMELL_REF = "\[smell-ref\]"
SCAT = "\[define-smell-category\]"
SCAT_ID = "\[smell-category-id\]"
SCAT_NAME = "\[smell-category-name\]"
SCAT_PARENT = "\[smell-category-parent\]"
SCAT_END = "\[define-smell-category-end\]"
REF = "\[reference\]"
REF_ID = "\[ref-id\]"
REF_TEXT = "\[ref-text\]"
REF_IMAGE = "\[ref-image\]"
REF_URL = "\[ref-url\]"
REF_END = "\[ref-end\]"
class InputProcessor(object):
def __init__(self, path):
self.input_file_path = path
self.smell_list = []
def process(self):
cur_smell_obj = None
with open(self.input_file_path, "r", errors='ignore') as reader:
for line in reader:
line = line.strip()
if (line == ""):
continue
smell_pattern = re.compile(SMELL)
id_pattern = re.compile(SMELL_ID)
name_pattern = re.compile(SMELL_NAME)
des_pattern = re.compile(SMELL_DES)
aka_pattern = re.compile(SMELL_AKA)
end_pattern = re.compile(SMELL_END)
cat_pattern = re.compile(SMELL_CATEGORY)
sub_pattern = re.compile(SMELL_SUBCATEGORY)
ref_pattern = re.compile(SMELL_REF)
if(re.search(smell_pattern, line) != None):
cur_smell_obj = Smell()
elif (re.search(end_pattern, line)):
self.smell_list.append(cur_smell_obj)
elif (re.search(id_pattern, line) != None):
cur_smell_obj.id = re.split(SMELL_ID, line)[1].strip()
elif (re.search(name_pattern, line) != None):
cur_smell_obj.name = re.split(SMELL_NAME, line)[1].strip()
elif (re.search(des_pattern, line) != None):
cur_smell_obj.description = re.split(SMELL_DES, line)[1].strip()
elif (re.search(aka_pattern, line) != None):
cur_smell_obj.aka.append(re.split(SMELL_AKA, line)[1].strip())
elif (re.search(cat_pattern, line) != None):
cur_smell_obj.category = re.split(SMELL_CATEGORY, line)[1].strip()
elif (re.search(sub_pattern, line) != None):
cur_smell_obj.sub_category = re.split(SMELL_SUBCATEGORY, line)[1].strip()
elif (re.search(ref_pattern, line) != None):
cur_smell_obj.reference = re.split(SMELL_REF, line)[1].strip()
return self.smell_list
def find_parent(self, parent_id, category_list):
for cat in category_list:
if (cat.id == parent_id):
return cat
return None
def link_categories(self, category_list):
for cat in category_list:
cat.parent_obj = self.find_parent(cat.parent, category_list)
def get_category_list(self, SMELL_CATEGORY_FILE_PATH):
category_list = []
cur_category_obj = None
with open(SMELL_CATEGORY_FILE_PATH, "r", errors='ignore') as reader:
for line in reader:
line = line.strip()
if (line == ""):
continue
scat_pattern = re.compile(SCAT)
scat_id_pattern = re.compile(SCAT_ID)
scat_name_pattern = re.compile(SCAT_NAME)
scat_parent_pattern = re.compile(SCAT_PARENT)
scat_end_pattern = re.compile(SCAT_END)
if(re.search(scat_pattern, line) != None):
cur_category_obj = SmellCategory()
elif (re.search(scat_end_pattern, line)):
category_list.append(cur_category_obj)
elif (re.search(scat_id_pattern, line) != None):
cur_category_obj.id = re.split(SCAT_ID, line)[1].strip()
elif (re.search(scat_name_pattern, line) != None):
cur_category_obj.name = re.split(SCAT_NAME, line)[1].strip()
elif (re.search(scat_parent_pattern, line) != None):
cur_category_obj.parent = re.split(SCAT_PARENT, line)[1].strip()
self.link_categories(category_list)
return category_list
def get_ref_list(self, REF_FILE_PATH):
cur_ref_obj = None
ref_list = []
with open(REF_FILE_PATH, "r", errors='ignore') as reader:
for line in reader:
line = line.strip()
if (line == ""):
continue
ref_pattern = re.compile(REF)
ref_id_pattern = re.compile(REF_ID)
ref_text_pattern = re.compile(REF_TEXT)
ref_image_pattern = re.compile(REF_IMAGE)
ref_url_pattern = re.compile(REF_URL)
ref_end_pattern = re.compile(REF_END)
if(re.search(ref_pattern, line) != None):
cur_ref_obj = Reference()
elif (re.search(ref_end_pattern, line) != None):
ref_list.append(cur_ref_obj)
elif (re.search(ref_id_pattern, line) != None):
cur_ref_obj.id = re.split(REF_ID, line)[1].strip()
elif (re.search(ref_text_pattern, line) != None):
cur_ref_obj.text = re.split(REF_TEXT, line)[1].strip()
elif (re.search(ref_url_pattern, line) != None):
cur_ref_obj.url = re.split(REF_URL, line)[1].strip()
elif (re.search(ref_image_pattern, line) != None):
cur_ref_obj.image = re.split(REF_IMAGE, line)[1].strip()
return ref_list
def populate_aka_obj(self, smell_list):
for smell in smell_list:
for aka in smell.aka:
if (aka != ''): # NErnst prevents empty AKA matches
smell_obj = self.find_smell_obj(aka, smell_list)
if smell_obj == None:
print("Related smell not found: " + aka)
else:
smell.aka_obj_list.append(smell_obj)
def find_smell_obj(self, aka, smell_list):
for smell in smell_list:
if (smell.id == aka):
return smell
return None
| 42.5 | 93 | 0.559664 | 5,816 | 0.888617 | 0 | 0 | 0 | 0 | 0 | 0 | 470 | 0.071811 |
96b5076f3752a0f19a06b6d629287742be1b298b
| 414 |
py
|
Python
|
YorForger/modules/redis/afk_redis.py
|
Voidxtoxic/kita
|
b2a3007349727280e149dcca017413d7dc2e7648
|
[
"MIT"
] | null | null | null |
YorForger/modules/redis/afk_redis.py
|
Voidxtoxic/kita
|
b2a3007349727280e149dcca017413d7dc2e7648
|
[
"MIT"
] | null | null | null |
YorForger/modules/redis/afk_redis.py
|
Voidxtoxic/kita
|
b2a3007349727280e149dcca017413d7dc2e7648
|
[
"MIT"
] | null | null | null |
from YorForger import REDIS
# AFK
def is_user_afk(userid):
rget = REDIS.get(f"is_afk_{userid}")
return bool(rget)
def start_afk(userid, reason):
REDIS.set(f"is_afk_{userid}", reason)
def afk_reason(userid):
return strb(REDIS.get(f"is_afk_{userid}"))
def end_afk(userid):
REDIS.delete(f"is_afk_{userid}")
return True
# Helpers
def strb(redis_string):
return str(redis_string)
| 15.333333 | 46 | 0.695652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.207729 |
96b51c0b082319955c9c8c901bb9467463e9b730
| 859 |
py
|
Python
|
mini_event.py
|
shubhamjain/earphone-event
|
0513a06904ea98c3962015d6edaf5f63943a03b7
|
[
"MIT"
] | 6 |
2018-08-16T21:38:40.000Z
|
2020-11-19T05:53:09.000Z
|
mini_event.py
|
shubhamjain/earphone-event
|
0513a06904ea98c3962015d6edaf5f63943a03b7
|
[
"MIT"
] | 1 |
2020-10-21T17:55:07.000Z
|
2020-10-21T17:55:07.000Z
|
mini_event.py
|
shubhamjain/earphone-event
|
0513a06904ea98c3962015d6edaf5f63943a03b7
|
[
"MIT"
] | 1 |
2021-09-08T15:05:52.000Z
|
2021-09-08T15:05:52.000Z
|
import threading
class mini_event:
BUTTON_DOWN = 1
BUTTON_UP = 2
BUTTON_HOLD = 3
subscribers = { BUTTON_DOWN: [], BUTTON_UP : [], BUTTON_HOLD: [] }
trigger_hold_stop = False # This value should be turned to True if the hold event callback needs to be stopped.
def add_subscriber( self, callback, event ):
self.subscribers[event].append( callback )
def fire_event( self, event ):
if( event == self.BUTTON_UP ):
self.trigger_hold_stop = True
if( event == self.BUTTON_DOWN ):
self.trigger_hold_stop = False
for callback in self.subscribers[event]:
if( event == self.BUTTON_HOLD ):
thread = threading.Thread(target=callback, args=[self.BUTTON_HOLD, self])
thread.start()
else:
callback(event)
| 28.633333 | 115 | 0.604191 | 839 | 0.976717 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.098952 |
96b58e236e198367799150eb2cf1c9825aebfff3
| 13,225 |
py
|
Python
|
tin/utils.py
|
balazsdukai/tin2stardb
|
efb160ba744f757c4a6d4674c7abec8bf0694415
|
[
"MIT"
] | null | null | null |
tin/utils.py
|
balazsdukai/tin2stardb
|
efb160ba744f757c4a6d4674c7abec8bf0694415
|
[
"MIT"
] | null | null | null |
tin/utils.py
|
balazsdukai/tin2stardb
|
efb160ba744f757c4a6d4674c7abec8bf0694415
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Various utility functions for handling geometry etc."""
import math
from statistics import mean
from typing import Tuple, Union, Iterable, Generator, Mapping
import logging
MODULE_MATPLOTLIB_AVAILABLE = True
try:
import matplotlib.pyplot as plt
import matplotlib.lines as lines
except ImportError as e:
MODULE_MATPLOTLIB_AVAILABLE = False
log = logging.getLogger(__name__)
def __ccw__(vertices, star, link):
"""Sort the link in CounterClockWise order around the star"""
x, y, z = 0, 1, 2
localized = [(vertices[v][x] - vertices[star][x],
vertices[v][y] - vertices[star][y]) for v in link]
rev_lookup = {localized[i]: a for i, a in enumerate(link)}
return rev_lookup, sorted(localized, key=lambda p: math.atan2(p[1], p[0]))
def sort_ccw(vertices, stars) -> Generator:
"""Sort vertices in counter-clockwise order."""
for star, link in stars.items():
rev_lookup, ccw = __ccw__(vertices, star, link)
yield star, [rev_lookup[co] for co in ccw]
def link_is_ccw(vertices, stars) -> Generator:
"""Check if the link of the star is ordered CounterClockWise."""
for star, link in stars.items():
rev_lookup, ccw = __ccw__(vertices, star, link)
yield star, all(rev_lookup[co]==link[i] for i,co in enumerate(ccw))
def link_is_consistent(stars) -> Generator:
"""Checks if the links are consistent, thus vertex A is also present in the
link of vertex B, if vertex B is in the link of vertex A."""
for star, link in stars.items():
yield star, all(star in stars[_star] for _star in link)
def triangle_is_consistent(stars, triangles) -> Generator:
"""Check that each adjacent triangle's vertices are consistent in it's star
with the current one."""
def __stars_are_consistent(tri, stars):
for i, star in enumerate(tri):
link = stars[star]
try:
# star is vertex 1 (v1) of the triangle (tri)
idx_v2 = link.index(tri[i-2])
idx_v3 = link.index(tri[i-1])
_idx_v3 = idx_v2+1 if idx_v2+1 < len(link) else 0
if _idx_v3 !=idx_v3:
pass
yield _idx_v3 == idx_v3
except ValueError:
yield False
for tri in triangles:
yield tri, all(__stars_are_consistent(tri, stars))
def distance(a,b) -> float:
"""Distance between point a and point b"""
x,y = 0,1
return math.sqrt((a[x] - b[x])**2 + (a[y] - b[y])**2)
def orientation(a: Tuple[float, float], b: Tuple[float, float],
c: Tuple[float, float]):
"""
Determine if point (p) is LEFT, RIGHT, COLLINEAR with line segment (ab).
:param a: Point 1
:param b: Point 2
:param c: Point which orientation to is determined with respect to (a,b)
:return: 1 if (a,b,p) is CCW, 0 if p is collinear, -1 if (a,b,p) is CW
>>> orientation((0.0, 0.0), (1.0, 0.0), (2.0, 0.0))
0
>>> orientation((0.0, 0.0), (1.0, 0.0), (0.5, 0.0))
0
>>> orientation((0.0, 0.0), (1.0, 0.0), (0.5, 1.0))
1
>>> orientation((0.0, 0.0), (1.0, 0.0), (0.5, -1.0))
-1
"""
x,y = 0,1
re = ((a[x] - c[x]) * (b[y] - c[y])) - ((a[y] - c[y]) * (b[x] - c[x]))
if re > 0:
return 1
elif re == 0:
return 0
else:
return -1
def is_between(a,c,b) -> bool:
"""Return True if point c is on the segment ab
Ref.: https://stackoverflow.com/a/328193
"""
return math.isclose(distance(a,c) + distance(c,b), distance(a,b))
def in_bbox(tri: Tuple, bbox: Tuple) -> bool:
"""Evaluates if a triangle is in the provided bounding box.
A triangle is in the BBOX if it's centorid is either completely within
the BBOX, or overlaps with the South (lower) or West (left) boundaries
of the BBOX.
:param tri: A triangle defined as a tuple of three cooridnates of (x,y,z)
:param bbox: Bounding Box as (minx, miny, maxx, maxy)
"""
if not bbox or not tri:
return False
x,y,z = 0,1,2
minx, miny, maxx, maxy = bbox
# mean x,y,z coordinate of the triangle
centroid = (mean(v[x] for v in tri),
mean(v[y] for v in tri))
within = ((minx < centroid[x] < maxx) and
(miny < centroid[y] < maxy))
on_south_bdry = is_between((minx, miny), centroid, (maxx, miny))
on_west_bdry = is_between((minx, miny), centroid, (minx, maxy))
return any((within, on_south_bdry, on_west_bdry))
def bbox(polygon) -> Tuple[float, float, float, float]:
"""Compute the Bounding Box of a polygon.
:param polygon: List of coordinate pairs (x,y)
"""
x,y = 0,1
vtx = polygon[0]
minx, miny, maxx, maxy = vtx[x], vtx[y], vtx[x], vtx[y]
for vtx in polygon[1:]:
if vtx[x] < minx:
minx = vtx[x]
elif vtx[y] < miny:
miny = vtx[y]
elif vtx[x] > maxx:
maxx = vtx[x]
elif vtx[y] > maxy:
maxy = vtx[y]
return minx, miny, maxx, maxy
def get_polygon(feature):
"""Get the polygon boundaries from a GeoJSON feature."""
if not feature['geometry']['type'] == 'Polygon':
log.warning(f"Feature ID {feature['properties']['id']} is not a Polygon")
else:
return feature['geometry']['coordinates'][0]
def find_side(polygon: Iterable[Tuple[float, ...]],
neighbor: Iterable[Tuple[float, ...]],
abs_tol: float = 0.0) ->\
Union[Tuple[None, None],
Tuple[str, Tuple[Tuple[float, float], Tuple[float, float]]]]:
"""Determines on which side does the neighbor polygon is located.
.. warning::
Assumes touching BBOXes of equal dimensions.
:param polygon: The base polygon. A list of coordinate tuples.
:param neighbor: The neighbor polygon.
:param abs_tol: Absolute coordinate tolerance. Passed on to `:math.isclose`
:returns: One of ['E', 'N', 'W', 'S'], the touching line segment
"""
minx, miny, maxx, maxy = 0,1,2,3
bbox_base = bbox(polygon)
bbox_nbr = bbox(neighbor)
if math.isclose(bbox_nbr[minx], bbox_base[maxx], abs_tol=abs_tol) \
and math.isclose(bbox_nbr[miny], bbox_base[miny], abs_tol=abs_tol):
return 'E', ((bbox_base[maxx], bbox_base[miny]), (bbox_base[maxx], bbox_base[maxy]))
elif math.isclose(bbox_nbr[minx], bbox_base[minx], abs_tol=abs_tol) \
and math.isclose(bbox_nbr[miny], bbox_base[maxy], abs_tol=abs_tol):
return 'N', ((bbox_base[maxx], bbox_base[maxy]), (bbox_base[minx], bbox_base[maxy]))
elif math.isclose(bbox_nbr[maxx], bbox_base[minx], abs_tol=abs_tol) \
and math.isclose(bbox_nbr[maxy], bbox_base[maxy], abs_tol=abs_tol):
return 'W', ((bbox_base[minx], bbox_base[maxy]), (bbox_base[minx], bbox_base[miny]), )
elif math.isclose(bbox_nbr[maxx], bbox_base[maxx], abs_tol=abs_tol) \
and math.isclose(bbox_nbr[maxy], bbox_base[miny], abs_tol=abs_tol):
return 'S', ((bbox_base[minx], bbox_base[miny]), (bbox_base[maxx], bbox_base[miny]), )
else:
return None,None
def plot_star(vid, stars, vertices):
"""Plots the location of a vertex and its incident vertices in its link.
:Example: plot_star(1, stars, vertices)
:param vid: Vertex ID
:param stars: List with the Link of the vertex
:param vertices: List with vertex coordinates (used as lookup)
:return: Plots a plot on screen
"""
if not MODULE_MATPLOTLIB_AVAILABLE:
raise ModuleNotFoundError("matplotlib is not installed, cannot plot")
plt.clf()
pts = [vertices[vid]] + [vertices[v] for v in stars[vid]]
r = list(zip(*pts))
plt.scatter(*r[0:2])
labels = [vid] + stars[vid]
# zip joins x and y coordinates in pairs
for i, e in enumerate(labels):
if e == vid:
plt.annotate(e, # this is the text
(pts[i][0], pts[i][1]), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0, 10), # distance from text to points (x,y)
ha='center',
# horizontal alignment can be left, right or center
color='red')
else:
plt.annotate(e, # this is the text
(pts[i][0], pts[i][1]),
textcoords="offset points",
xytext=(0, 10),
ha='center')
plt.show()
def mean_coordinate(points: Iterable[Tuple]) -> Tuple[float, float]:
"""Compute the mean x- and y-coordinate from a list of points.
:param points: An iterable of coordinate tuples where the first two elements
of the tuple are the x- and y-coordinate respectively.
:returns: A tuple of (mean x, mean y) coordinates
"""
mean_x = mean(pt[0] for pt in points)
mean_y = mean(pt[1] for pt in points)
return mean_x, mean_y
# Computing Morton-code. Reference: https://github.com/trevorprater/pymorton ---
def __part1by1_64(n):
"""64-bit mask"""
n &= 0x00000000ffffffff # binary: 11111111111111111111111111111111, len: 32
n = (n | (n << 16)) & 0x0000FFFF0000FFFF # binary: 1111111111111111000000001111111111111111, len: 40
n = (n | (n << 8)) & 0x00FF00FF00FF00FF # binary: 11111111000000001111111100000000111111110000000011111111, len: 56
n = (n | (n << 4)) & 0x0F0F0F0F0F0F0F0F # binary: 111100001111000011110000111100001111000011110000111100001111, len: 60
n = (n | (n << 2)) & 0x3333333333333333 # binary: 11001100110011001100110011001100110011001100110011001100110011, len: 62
n = (n | (n << 1)) & 0x5555555555555555 # binary: 101010101010101010101010101010101010101010101010101010101010101, len: 63
return n
def __unpart1by1_64(n):
n &= 0x5555555555555555 # binary: 101010101010101010101010101010101010101010101010101010101010101, len: 63
n = (n ^ (n >> 1)) & 0x3333333333333333 # binary: 11001100110011001100110011001100110011001100110011001100110011, len: 62
n = (n ^ (n >> 2)) & 0x0f0f0f0f0f0f0f0f # binary: 111100001111000011110000111100001111000011110000111100001111, len: 60
n = (n ^ (n >> 4)) & 0x00ff00ff00ff00ff # binary: 11111111000000001111111100000000111111110000000011111111, len: 56
n = (n ^ (n >> 8)) & 0x0000ffff0000ffff # binary: 1111111111111111000000001111111111111111, len: 40
n = (n ^ (n >> 16)) & 0x00000000ffffffff # binary: 11111111111111111111111111111111, len: 32
return n
def interleave(*args):
"""Interleave two integers"""
if len(args) != 2:
raise ValueError('Usage: interleave2(x, y)')
for arg in args:
if not isinstance(arg, int):
print('Usage: interleave2(x, y)')
raise ValueError("Supplied arguments contain a non-integer!")
return __part1by1_64(args[0]) | (__part1by1_64(args[1]) << 1)
def deinterleave(n):
if not isinstance(n, int):
print('Usage: deinterleave2(n)')
raise ValueError("Supplied arguments contain a non-integer!")
return __unpart1by1_64(n), __unpart1by1_64(n >> 1)
def morton_code(x: float, y: float):
"""Takes an (x,y) coordinate tuple and computes their Morton-key.
Casts float to integers by multiplying them with 100 (millimeter precision).
"""
return interleave(int(x * 100), int(y * 100))
def rev_morton_code(morton_key: int) -> Tuple[float, float]:
"""Get the coordinates from a Morton-key"""
x,y = deinterleave(morton_key)
return float(x)/100.0, float(y)/100.0
# Compute tile range -----------------------------------------------------------
def tilesize(tin_paths) -> Tuple[float, float]:
"""Compute the tile size from Morton-codes for the input TINs.
.. note:: Assumes regular grid.
:returns: The x- and y-dimensions of a tile
"""
centroids = []
for i, morton_code in enumerate(tin_paths):
if i == 2:
break
else:
centroids.append(rev_morton_code(morton_code))
return abs(centroids[0][0] - centroids[1][0]), abs(centroids[0][1] - centroids[1][1])
def __in_bbox__(point, range):
"""Check if a point is within a BBOX."""
def compute_8neighbors(tin_paths: Mapping, tilesize: Tuple) -> Generator:
"""Computes the 8 neighbors for the tiles, using a predefined search range."""
for mc, filepath in tin_paths.items():
center = rev_morton_code(mc)
neighbours = set()
# Search range
minx = center[0] - tilesize[0]
miny = center[1] - tilesize[1]
maxx = center[0] + tilesize[0]
maxy = center[1] + tilesize[1]
for mc_nbr, filepath_nbr in tin_paths.items():
if mc_nbr != mc:
center_nbr = rev_morton_code(mc_nbr)
within = ((minx < center_nbr[0] < maxx) and
(miny < center_nbr[1] < maxy))
if within:
neighbours.update([filepath_nbr.with_suffix('.pickle'),])
yield mc, (filepath, neighbours)
| 38.444767 | 127 | 0.607108 | 0 | 0 | 2,419 | 0.182911 | 0 | 0 | 0 | 0 | 5,046 | 0.38155 |
96b5abda600a3ff8286fd10ad76e69e6c1844b69
| 7,748 |
py
|
Python
|
tfworker/cli.py
|
objectrocket/terraform-worker
|
5a3c81a465d31bf7c9186fa59be2bfa8f4578449
|
[
"Apache-2.0"
] | 6 |
2020-02-10T21:53:18.000Z
|
2021-07-21T18:10:02.000Z
|
tfworker/cli.py
|
RSS-Engineering/terraform-worker
|
98b04eacd828448985bc9ded3a46497f06d7c6ae
|
[
"Apache-2.0"
] | 4 |
2020-09-20T13:04:26.000Z
|
2021-03-23T21:20:57.000Z
|
tfworker/cli.py
|
RSS-Engineering/terraform-worker
|
98b04eacd828448985bc9ded3a46497f06d7c6ae
|
[
"Apache-2.0"
] | 3 |
2020-06-12T18:38:33.000Z
|
2020-09-15T21:01:53.000Z
|
#!/usr/bin/env python
# Copyright 2020 Richard Maynard ([email protected])
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import struct
import sys
import click
from tfworker import constants as const
from tfworker.commands import CleanCommand, RootCommand, TerraformCommand
from tfworker.commands.root import get_platform
from tfworker.commands.version import VersionCommand
def validate_deployment(ctx, deployment, name):
"""Validate the deployment is no more than 16 characters."""
if len(name) > 16:
click.secho("deployment must be less than 16 characters", fg="red")
raise SystemExit(2)
return name
def validate_gcp_creds_path(ctx, path, value):
if value:
if not os.path.isabs(value):
value = os.path.abspath(value)
if os.path.isfile(value):
return value
click.secho(f"Could not resolve GCP credentials path: {value}", fg="red")
raise SystemExit(3)
def validate_host():
"""Ensure that the script is being run on a supported platform."""
supported_opsys = ["darwin", "linux"]
supported_machine = ["amd64"]
opsys, machine = get_platform()
if opsys not in supported_opsys:
click.secho(
f"this application is currently not known to support {opsys}",
fg="red",
)
raise SystemExit(2)
if machine not in supported_machine:
click.secho(
f"this application is currently not known to support running on {machine} machines",
fg="red",
)
if struct.calcsize("P") * 8 != 64:
click.secho(
"this application can only be run on 64 bit hosts, in 64 bit mode", fg="red"
)
raise SystemExit(2)
return True
@click.group()
@click.option(
"--aws-access-key-id",
envvar="AWS_ACCESS_KEY_ID",
help="AWS Access key",
)
@click.option(
"--aws-secret-access-key",
envvar="AWS_SECRET_ACCESS_KEY",
help="AWS access key secret",
)
@click.option(
"--aws-session-token",
envvar="AWS_SESSION_TOKEN",
help="AWS access key token",
)
@click.option(
"--aws-role-arn",
envvar="AWS_ROLE_ARN",
help="If provided, credentials will be used to assume this role (complete ARN)",
)
@click.option(
"--aws-external-id",
envvar="AWS_EXTERNAL_ID",
help="If provided, will be used to assume the role specified by --aws-role-arn",
)
@click.option(
"--aws-region",
envvar="AWS_DEFAULT_REGION",
default=const.DEFAULT_AWS_REGION,
help="AWS Region to build in",
)
@click.option(
"--aws-profile",
envvar="AWS_PROFILE",
help="The AWS/Boto3 profile to use",
)
@click.option(
"--gcp-region",
envvar="GCP_REGION",
default=const.DEFAULT_GCP_REGION,
help="Region to build in",
)
@click.option(
"--gcp-creds-path",
envvar="GCP_CREDS_PATH",
help=(
"Relative path to the credentials JSON file for the service account to be used."
),
callback=validate_gcp_creds_path,
)
@click.option(
"--gcp-project",
envvar="GCP_PROJECT",
help="GCP project name to which work will be applied",
)
@click.option(
"--config-file",
default=const.DEFAULT_CONFIG,
envvar="WORKER_CONFIG_FILE",
required=True,
)
@click.option(
"--repository-path",
default=const.DEFAULT_REPOSITORY_PATH,
envvar="WORKER_REPOSITORY_PATH",
required=True,
help="The path to the terraform module repository",
)
@click.option(
"--backend",
type=click.Choice(["s3", "gcs"]),
help="State/locking provider. One of: s3, gcs",
)
@click.option(
"--backend-bucket",
help="Bucket (must exist) where all terraform states are stored",
)
@click.option(
"--backend-prefix",
default=const.DEFAULT_BACKEND_PREFIX,
help=f"Prefix to use in backend storage bucket for all terraform states (DEFAULT: {const.DEFAULT_BACKEND_PREFIX})",
)
@click.option(
"--backend-region",
default=const.DEFAULT_AWS_REGION,
help="Region where terraform rootc/lock bucket exists",
)
@click.option(
"--create-backend-bucket/--no-create-backend-bucket",
default=True,
help="Create the backend bucket if it does not exist",
)
@click.option(
"--config-var",
multiple=True,
default=[],
help='key=value to be supplied as jinja variables in config_file under "var" dictionary, can be specified multiple times',
)
@click.pass_context
def cli(context, **kwargs):
"""CLI for the worker utility."""
validate_host()
config_file = kwargs["config_file"]
try:
context.obj = RootCommand(args=kwargs)
except FileNotFoundError:
click.secho(f"configuration file {config_file} not found", fg="red", err=True)
raise SystemExit(1)
@cli.command()
@click.option("--limit", help="limit operations to a single definition", multiple=True)
@click.argument("deployment", callback=validate_deployment)
@click.pass_obj
def clean(rootc, *args, **kwargs): # noqa: E501
""" clean up terraform state """
# clean just items if limit supplied, or everything if no limit
CleanCommand(rootc, *args, **kwargs).exec()
@cli.command()
def version():
""" display program version """
VersionCommand().exec()
sys.exit(0)
@cli.command()
@click.option(
"--clean/--no-clean",
default=True,
help="clean up the temporary directory created by the worker after execution",
)
@click.option(
"--apply/--no-apply",
"tf_apply",
default=False,
help="apply the terraform configuration",
)
@click.option(
"--force/--no-force",
"force",
default=False,
help="force apply/destroy without plan change",
)
@click.option(
"--destroy/--no-destroy",
default=False,
help="destroy a deployment instead of create it",
)
@click.option(
"--show-output/--no-show-output",
default=True,
help="show output from terraform commands",
)
@click.option(
"--terraform-bin",
help="The complate location of the terraform binary",
)
@click.option(
"--b64-encode-hook-values/--no--b64-encode-hook-values",
"b64_encode",
default=False,
help=(
"Terraform variables and outputs can be complex data structures, setting this"
" open will base64 encode the values for use in hook scripts"
),
)
@click.option(
"--terraform-modules-dir",
default="",
help=(
"Absolute path to the directory where terraform modules will be stored."
"If this is not set it will be relative to the repository path at ./terraform-modules"
),
)
@click.option("--limit", help="limit operations to a single definition", multiple=True)
@click.argument("deployment", callback=validate_deployment)
@click.pass_obj
def terraform(rootc, *args, **kwargs):
""" execute terraform orchestration """
tfc = TerraformCommand(rootc, *args, **kwargs)
click.secho(f"building deployment {kwargs.get('deployment')}", fg="green")
click.secho(f"using temporary Directory: {tfc.temp_dir}", fg="yellow")
# common setup required for all definitions
click.secho("downloading plugins", fg="green")
tfc.plugins.download()
click.secho("preparing modules", fg="green")
tfc.prep_modules()
tfc.exec()
sys.exit(0)
if __name__ == "__main__":
cli()
| 28.277372 | 126 | 0.672303 | 0 | 0 | 0 | 0 | 5,434 | 0.701342 | 0 | 0 | 3,963 | 0.511487 |
96b74e78276fe832497e5e00ed9a762980bd1fbc
| 3,777 |
py
|
Python
|
shs/input/dialogs/ac_init.py
|
ansobolev/shs
|
7a5f61bd66fe1e8ae047a4d3400b055175a53f4e
|
[
"MIT"
] | 1 |
2016-06-22T13:30:25.000Z
|
2016-06-22T13:30:25.000Z
|
shs/input/dialogs/ac_init.py
|
ansobolev/shs
|
7a5f61bd66fe1e8ae047a4d3400b055175a53f4e
|
[
"MIT"
] | 1 |
2017-12-01T04:49:45.000Z
|
2017-12-01T04:49:45.000Z
|
shs/input/dialogs/ac_init.py
|
ansobolev/shs
|
7a5f61bd66fe1e8ae047a4d3400b055175a53f4e
|
[
"MIT"
] | null | null | null |
import wx
from wx.lib.agw.floatspin import FloatSpin
from shs.input.fdf_options import ChoiceLine, MeasuredLine, NumberLine, ThreeNumberLine
try:
from geom import Geom
except ImportError:
from shs.geom import Geom
class Bravais(ChoiceLine):
label = 'Composition'
choices = ['BCC', 'FCC', 'SC']
optional = False
class LatticeConstant(MeasuredLine):
label = 'Lattice constant'
value = 1.
digits = 2
increment = 0.01
units = ['Bohr', 'Ang']
optional = False
class DistortionLevel(NumberLine):
label = 'Distortion level (in %)'
value = 0.
digits = 0
increment = 1.
range_val = (0., 100.)
optional = False
class SuperCell(ThreeNumberLine):
label = 'Supercell'
optional = False
class ACInitDialog(wx.Dialog):
def __init__(self, *args, **kwds):
self.types = kwds.pop('types')
wx.Dialog.__init__(self, *args, **kwds)
self.bravais = Bravais(self)
self.type_label = []
self.typefs = []
if len(self.types) == 0:
self.add_type_btn = wx.Button(self, -1, "Add type")
self.add_type_btn.Bind(wx.EVT_BUTTON, self.add_type)
else:
for t in self.types:
self.type_label.append(wx.StaticText(self, -1, t))
self.typefs.append(FloatSpin(self, -1, min_val=0, value=1., digits=0))
self.sc = SuperCell(self)
self.alat = LatticeConstant(self)
self.dist = DistortionLevel(self)
self.__set_properties()
self.__do_layout()
def __set_properties(self):
self.SetTitle("Initialize geometry")
def __do_layout(self):
comp_label = wx.StaticBox(self, -1, 'Composition')
comp_sizer = wx.StaticBoxSizer(comp_label, wx.HORIZONTAL)
self.comp_inside = wx.GridSizer(2, len(self.types), 2, 2)
for l in self.type_label:
self.comp_inside.Add(l, 0, wx.ALIGN_CENTER, 0)
for fs in self.typefs:
self.comp_inside.Add(fs, 0, wx.ALIGN_CENTER, 0)
comp_sizer.Add(self.comp_inside, 1, wx.ALL | wx.EXPAND, 5)
if len(self.types) == 0:
comp_sizer.Add(self.add_type_btn, 0, wx.ALL | wx.EXPAND, 5)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.bravais.sizer, 0, wx.EXPAND, 0)
sizer.Add(comp_sizer, 0, wx.ALL | wx.EXPAND, 5)
sizer.Add(self.alat.sizer, 0, wx.EXPAND, 0)
sizer.Add(self.sc.sizer, 0, wx.EXPAND, 0)
sizer.Add(self.dist.sizer, 0, wx.EXPAND, 0)
sizer.Add(self.CreateSeparatedButtonSizer(wx.OK|wx.CANCEL), 0, wx.ALL|wx.EXPAND, 5)
self.SetSizer(sizer)
self.Fit()
self.Layout()
def add_type(self, evt):
self.comp_inside.Clear()
self.comp_inside.SetCols(self.comp_inside.GetCols()+1)
self.type_label.append(wx.TextCtrl(self, -1))
self.typefs.append(FloatSpin(self, -1, min_val=0, value=1., digits=0))
for l in self.type_label:
self.comp_inside.Add(l, 0, wx.ALIGN_CENTER, 0)
for fs in self.typefs:
self.comp_inside.Add(fs, 0, wx.ALIGN_CENTER, 0)
self.Fit()
self.Layout()
def init_geom(self):
bravais = self.bravais.GetValue()
alat, unit = self.alat.GetValue()
sc = self.sc.GetValue()
dist = self.dist.GetValue()
if len(self.types) == 0:
comp = dict(zip([il.GetValue() for il in self.type_label], [ifs.GetValue() for ifs in self.typefs]))
else:
comp = dict(zip(self.types, [ifs.GetValue() for ifs in self.typefs]))
g = Geom()
g.initialize(bravais, comp, sc, alat, unit, dist_level=dist)
g.geom2opts()
return g.opts["AtomicCoordinatesAndAtomicSpecies"]
| 32.282051 | 112 | 0.605507 | 3,538 | 0.936722 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.047127 |
96b8879f01bcc6f2a6fb4f8f1c990b4167027165
| 5,377 |
py
|
Python
|
mgs/v1.0/data_server.py
|
vt-rocksat-2017/dashboard
|
e99a71edc74dd8b7f3eec023c381524561a7b6e4
|
[
"MIT"
] | 1 |
2017-08-09T19:57:38.000Z
|
2017-08-09T19:57:38.000Z
|
mgs/v1.0/data_server.py
|
vt-rocksat-2017/dashboard
|
e99a71edc74dd8b7f3eec023c381524561a7b6e4
|
[
"MIT"
] | null | null | null |
mgs/v1.0/data_server.py
|
vt-rocksat-2017/dashboard
|
e99a71edc74dd8b7f3eec023c381524561a7b6e4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#########################################
# Title: Rocksat Data Server Class #
# Project: Rocksat #
# Version: 1.0 #
# Date: August, 2017 #
# Author: Zach Leffke, KJ4QLP #
# Comment: Initial Version #
#########################################
import socket
import threading
import sys
import os
import errno
import time
import binascii
import numpy
import datetime as dt
from logger import *
class Data_Server(threading.Thread):
def __init__ (self, options):
threading.Thread.__init__(self,name = 'DataServer')
self._stop = threading.Event()
self.ip = options.ip
self.port = options.port
self.id = options.id
self.ts = options.ts
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #TCP Socket
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.connected = False
self.log_fh = setup_logger(self.id, 'main', self.ts)
self.logger = logging.getLogger('main')
self.last_frame_ts = dt.datetime.utcnow() #Time Stamp of last received frame
self.frame_count = 0
self.adsb_count = 0
self.ais_count = 0
self.hw_count = 0
def run(self):
print "Data Server Running..."
try:
self.sock.connect((self.ip, self.port))
self.connected = True
print self.utc_ts() + "Connected to Modem..."
except Exception as e:
self.Handle_Connection_Exception(e)
while (not self._stop.isSet()):
if self.connected == True:
data = self.sock.recv(4096)
if len(data) == 256:
self.Decode_Frame(data, dt.datetime.utcnow())
else:
self.connected = False
elif self.connected == False:
print self.utc_ts() + "Disconnected from modem..."
time.sleep(1)
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #TCP Socket
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.connect((self.ip, self.port))
self.connected = True
print self.utc_ts() + "Connected to Modem..."
except Exception as e:
self.Handle_Connection_Exception(e)
sys.exit()
def Decode_Frame(self, rx_frame, ts):
self.frame_count += 1
self.last_frame_ts = ts
#print str(self.frame_count) + ',' + binascii.hexlify(rx_frame)
self.logger.info(str(self.frame_count) + ',' + binascii.hexlify(rx_frame))
self.Decode_Header(rx_frame)
def Decode_Header(self, rx_frame):
callsign = str(rx_frame[0:6]) #Callsign
dn_pkt_id = numpy.uint16(struct.unpack('>H',rx_frame[6:8]))[0] #downlink frame id
up_pkt_id = numpy.uint16(struct.unpack('>H',rx_frame[8:10]))[0] #uplink frame id
msg_type = numpy.uint8(struct.unpack('>B',rx_frame[10]))[0] #message type, 0=ADSB, 1=AIS, 2=HW
msg_type_str = ""
if msg_type == 0: msg_type_str = 'ADSB'
elif msg_type == 1: msg_type_str = ' AIS'
elif msg_type == 2: msg_type_str = ' HW'
print self.last_frame_ts, self.frame_count, callsign, dn_pkt_id, up_pkt_id, msg_type_str
def Handle_Connection_Exception(self, e):
#print e, type(e)
errorcode = e[0]
if errorcode==errno.ECONNREFUSED:
pass
#print errorcode, "Connection refused"
elif errorcode==errno.EISCONN:
print errorcode, "Transport endpoint is already connected"
self.sock.close()
else:
print e
self.sock.close()
self.connected = False
def get_frame_counts(self):
self.valid_count = len(self.valid.time_tx)
self.fault_count = len(self.fault.time_tx)
self.recon_count = len(self.recon.time_tx)
self.total_count = self.valid_count + self.fault_count + self.recon_count
#print self.utc_ts(), self.total_count, self.valid_count, self.fault_count, self.recon_count
return self.total_count, self.valid_count, self.fault_count, self.recon_count
def set_start_time(self, start):
print self.utc_ts() + "Mission Clock Started"
ts = start.strftime('%Y%m%d_%H%M%S')
self.log_file = "./log/rocksat_"+ self.id + "_" + ts + ".log"
log_f = open(self.log_file, 'a')
msg = "Rocksat Receiver ID: " + self.id + "\n"
msg += "Log Initialization Time Stamp: " + str(start) + " UTC\n\n"
log_f.write(msg)
log_f.close()
self.log_flag = True
print self.utc_ts() + "Logging Started: " + self.log_file
self.valid_start = True
self.start_time = start
for i in range(len(self.valid.time_rx)):
self.valid.rx_offset[i] = (self.valid.time_rx[i]-self.start_time).total_seconds()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def utc_ts(self):
return str(dt.datetime.utcnow()) + " UTC | "
| 38.407143 | 109 | 0.565929 | 4,864 | 0.904594 | 0 | 0 | 0 | 0 | 0 | 0 | 1,055 | 0.196206 |
96b888fef4eb174221ced8eecdc0b4280bce51d8
| 3,932 |
py
|
Python
|
handledata.py
|
bioPunkKitchen/climate.local
|
ccd29da3d84542d5f9c73a5d75bc3ceefeef1f08
|
[
"MIT"
] | 1 |
2019-05-28T18:33:49.000Z
|
2019-05-28T18:33:49.000Z
|
handledata.py
|
bioPunkKitchen/climate.local
|
ccd29da3d84542d5f9c73a5d75bc3ceefeef1f08
|
[
"MIT"
] | 1 |
2019-12-30T14:52:02.000Z
|
2020-01-04T11:41:08.000Z
|
handledata.py
|
bioPunkKitchen/climate.local
|
ccd29da3d84542d5f9c73a5d75bc3ceefeef1f08
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import datetime
import time
import os
import matplotlib.pyplot as plt
import matplotlib.dates as md
import numpy as np
class handle_data:
data_file = "./data/data.log"
data_list = []
def __init__(self):
pass
def insert_data(self, timestamp, temp, state_onoff, state_light, state_cooling, state_heating):
"""
Insert data to log file and add timestamp.
"""
if state_onoff == 'on':
state_onoff = 1
else:
state_onoff = 0
if state_light == 'on':
state_light = 1
else:
state_light = 0
if state_cooling == 'on':
state_cooling = 1
else:
state_cooling = 0
if state_heating == 'on':
state_heating = 1
else:
state_heating = 0
data_string = str(timestamp) + ";" + str(temp) + ";" + str(state_onoff) + ";" + str(state_light) + ";" + str(state_cooling) + ";" + str(state_heating) + "\n"
self.data_list.append(data_string)
#print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tInserted data: data_list.append len=", len(self.data_list))
return
def append_data_to_file(self):
"""
Append data to log file.
"""
try:
with open(self.data_file, "a") as outfile:
for entry in self.data_list:
outfile.write(str(entry))
except IOError:
print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tIOError opening data.log for appending data")
self.data_list.clear()
return
def clean_file(self):
"""
Clean log file in order to reset measurement.
"""
try:
with open(self.data_file, "w") as outfile:
outfile.write("Timestamp; Temp; State_onoff; State_light; State_cooling; State_heating\n")
except IOError:
print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tIOError opening data.log for writing")
return
def update_graph(self, path):
"""
Generate or update graph from data file.
"""
lines = sum(1 for _ in open(self.data_file))
if lines > 1:
data=np.genfromtxt(self.data_file, delimiter=';', skip_header=1, names=['Time', 'Temp', 'Onoff', 'Light', 'Cooling', 'Heating'], dtype=([('Time', '<U30'), ('Temp', '<f8'), ('Onoff', '<f8'), ('Light', '<f8'), ('Cooling', '<f8'), ('Heating', '<f8')]))
fig, ax1 = plt.subplots()
if data['Temp'].shape:
if data['Temp'].shape[0] > 120:
ax1.plot(data['Temp'][((data['Temp'].shape[0])-120):(data['Temp'].shape[0])], color = 'r', label = 'Temp.')
else:
ax1.plot(data['Temp'], color = 'r', label = 'Temp.')
else:
ax1.plot(data['Temp'], color = 'r', label = 'Temp.')
ax1.set_xlim([0,120])
ax1.set_xticks([0,30,60,90,120])
ax1.set_ylabel('Temp (°C)', color='r')
ax1.tick_params('y', colors='r')
yt=range(-1,41,1)
ax1.set_yticks(yt, minor=True)
ax1.set_xlabel('last two hours (scale:min.)')
"""
ax2 = ax1.twinx()
ax2.plot(data['Light'], color = 'g', label = 'Light', marker = 'o')
ax2.plot(data['Onoff'], color = 'y', label = 'Onoff', marker = '*')
ax2.plot(data['Heating'], color = 'r', label = 'Heating')
ax2.plot(data['Cooling'], color = 'b', label = 'Cooling')
ax2.set_ylabel('Light (on=1/off=0)', color='b')
ax2.tick_params('y', colors='b')
ax2.set_yticks([0,1], minor=False)
"""
fig.tight_layout()
#plt.legend(['Temp. inside'], loc='upper left')
plt.savefig(path, bbox_inches='tight')
plt.close(fig)
print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tGraph generated/updated.")
else:
#os.remove(path)
#os.mknod(path)
#os.chmod(path, 0o644)
try:
with open(path, "w") as outfile:
outfile.write("")
except IOError:
print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tIOError: Could not generate empty graph file.")
print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tNo data, graph is empty.")
return
# Test:
if __name__ == '__main__':
hd = handle_data()
#hd.clean_file()
hd.update_graph('./static/data_log.png')
| 30.015267 | 252 | 0.625127 | 3,671 | 0.933384 | 0 | 0 | 0 | 0 | 0 | 0 | 1,634 | 0.415459 |
96b9956367c551043c19348764e4606177dd4559
| 555 |
py
|
Python
|
day01/python/beckel/solution.py
|
clssn/aoc-2019
|
a978e5235855be937e60a1e7f88d1ef9b541be15
|
[
"MIT"
] | 22 |
2019-11-27T08:28:46.000Z
|
2021-04-27T05:37:08.000Z
|
day01/python/wiedmann/solution.py
|
sancho1241/aoc-2019
|
e0f63824c8250e0f84a42805e1a7ff7d9232002c
|
[
"MIT"
] | 77 |
2019-11-16T17:22:42.000Z
|
2021-05-10T20:36:36.000Z
|
day01/python/wiedmann/solution.py
|
sancho1241/aoc-2019
|
e0f63824c8250e0f84a42805e1a7ff7d9232002c
|
[
"MIT"
] | 43 |
2019-11-27T06:36:51.000Z
|
2021-11-03T20:56:48.000Z
|
import math
def fuel_needed(mass):
return math.floor(int(mass)/3 - 2)
def fuel_needed_recursive(mass):
fuel_needed_i = fuel_needed(mass)
if (fuel_needed_i <= 0):
return 0
return fuel_needed_i + fuel_needed_recursive(fuel_needed_i)
total_fuel = 0
total_fuel_recursive = 0
with open("input.txt", "r") as fp:
for line in fp:
total_fuel += fuel_needed(line)
total_fuel_recursive += fuel_needed_recursive(line)
print("Total fuel: " + str(total_fuel))
print("Total fuel recursive: " + str(total_fuel_recursive))
| 25.227273 | 63 | 0.704505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.093694 |
96b9a2d50c1e158d5bd73be619a6523cec7b4cfa
| 45,634 |
py
|
Python
|
arraytool_rc.py
|
zinka/arraytool_gui
|
c1ba763e170f7efde99414a29946410c4994e924
|
[
"BSD-3-Clause"
] | 11 |
2017-04-20T20:08:04.000Z
|
2022-03-29T22:30:24.000Z
|
arraytool_rc.py
|
zinka/arraytool_gui
|
c1ba763e170f7efde99414a29946410c4994e924
|
[
"BSD-3-Clause"
] | null | null | null |
arraytool_rc.py
|
zinka/arraytool_gui
|
c1ba763e170f7efde99414a29946410c4994e924
|
[
"BSD-3-Clause"
] | 7 |
2018-01-28T12:59:45.000Z
|
2022-03-19T12:34:25.000Z
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt4 (Qt v4.8.7)
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore
qt_resource_data = "\
\x00\x00\x03\x97\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x16\x00\x00\x00\x16\x08\x06\x00\x00\x00\xc4\xb4\x6c\x3b\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x03\x14\x49\x44\
\x41\x54\x38\x8d\x9d\x95\x4f\x68\x1c\x55\x1c\xc7\x3f\x6f\x66\x76\
\x36\x59\xdb\x22\x68\x0e\x36\x20\x45\x89\x50\x04\x0f\x1e\xa4\xb5\
\x88\x7f\xd0\x3d\x88\x47\x11\x64\x4c\x73\xf2\x56\xcf\x1e\xbd\x8a\
\x78\xea\x45\xeb\x41\x44\xcc\xa9\x67\xc1\x28\x88\x42\x6a\x6d\x83\
\xff\xb0\x17\xcd\xa1\x96\x6c\x43\x9a\xad\x49\x37\xdd\x9d\x9d\x79\
\xf3\x7b\xbf\x9f\x87\xdd\x6e\x77\xd3\xa8\x49\xbf\xf0\x98\xf9\x31\
\xcc\xe7\x7d\xdf\xf7\xfd\xe6\x8d\xcb\xb2\x8c\xff\x52\x15\xca\xc2\
\x39\x17\x9b\x99\x73\xce\x01\x70\xe7\x26\xad\xa5\x7f\xf4\xf3\xf2\
\xf9\x34\x4d\xdb\xbb\xdf\x4b\x00\xbc\xf7\xf1\xec\xec\xec\xe1\xbd\
\xc0\xeb\x1b\xad\xfa\x67\x9f\x7e\x8e\x73\x6e\x62\x00\x2c\x2e\x7e\
\x71\x7c\xe9\x9b\xa5\x15\xe0\x29\x60\xe7\x1e\xf0\xdc\xdc\xdc\xef\
\xce\xb9\xc7\x01\xdb\x0d\x36\x33\xd6\xd6\xd6\x28\xcb\x72\x54\x87\
\x10\x50\x55\x4e\x9f\x5e\xa0\xd3\xe9\x3c\xba\xf2\xd3\xca\x85\xb2\
\xef\x9f\x49\xd3\xb4\x3f\x01\x16\x91\xe3\xf3\xf3\xf3\x84\x10\x30\
\x33\x54\x15\x00\x55\xe5\x83\x0f\xdf\x27\x8a\x22\x2e\x5d\xbe\x34\
\x31\xe1\xc9\x13\x27\x00\x38\x73\xe6\x1d\x77\xee\x93\x8f\x9f\x5c\
\xbe\xb0\x7c\x16\xe3\xed\xdd\x60\x13\x11\xd7\x6a\xb5\x10\x11\x54\
\x15\x55\xc5\xcc\x70\xce\x91\xa6\x29\xcd\x57\x9a\x98\xd9\xc8\x35\
\x40\xbb\xdd\x46\x44\x78\xf6\xe4\x29\x77\xf1\xc7\x1f\xde\x50\xd9\
\x05\x0e\x21\x98\xaa\x3a\x11\x19\x2d\xf3\xce\x00\x48\x92\x84\xa5\
\xaf\xbf\x22\x84\x30\x72\xfc\xe2\x0b\x2f\xe1\xbd\x1f\x14\x0e\x54\
\x35\x82\x68\x32\x63\x91\xca\x44\x2a\x42\x08\xf8\xea\x2e\x58\x44\
\xc0\xc5\xe0\x62\x9a\xcd\x57\x27\xdc\x8e\x5f\x93\xa4\x8e\x59\xa4\
\xeb\xb5\x97\xa7\xbe\x3b\x76\xcd\x67\xab\xab\xea\xb2\x2c\xa3\xd1\
\x68\xc8\xc2\xc2\x42\x7c\xf1\x4a\x8b\xf3\xbf\x08\x51\x74\x77\x66\
\x33\x23\x72\x63\xf5\xae\xfd\x1d\xf2\x87\xab\x73\xc1\x39\xd7\xbe\
\x7c\xee\xcd\xa3\xa3\x8c\x43\x08\x6c\xde\x56\x9e\x7b\xfa\x18\xaf\
\x9d\x7a\x62\xf4\x02\xc0\xb0\xbb\xf6\xd4\xf8\xa3\xdc\x4b\xfc\xde\
\x47\xdf\xce\x64\xd9\x5b\x36\xca\x58\x44\xc8\x4b\x63\x66\xba\xce\
\x66\xa7\xe0\xfa\x56\xff\x5f\x50\x7b\x6b\xaa\x16\xf3\xf0\xe1\x1a\
\x51\x1c\x75\x61\xb2\x2b\xe8\x55\xf0\xd8\x03\x75\x30\x48\xc6\xe2\
\xd8\x8f\xd4\xa0\xf4\x81\xc8\x45\x9d\x11\xb8\xaa\x06\x9b\x97\x57\
\x8e\x23\x8d\x3a\x66\x90\xc4\xfb\x07\xf7\x0a\x61\x3a\x8d\xf1\x5e\
\x70\x11\xdb\x23\xf0\x20\x8a\x40\xee\xe1\x50\xa3\x06\x40\xb2\x0f\
\x6e\x50\x23\x2f\x03\x12\x94\xe9\xb4\x46\xd1\xf7\x00\x37\xc7\x1d\
\x13\x42\xa0\xa8\x00\x1c\x5e\xf4\x1e\x80\x1a\xc3\xaf\x72\x50\x7b\
\x51\xd4\x26\x3b\xa4\xd3\x2b\x31\xb5\x1b\xe3\x8e\x55\x44\xe8\x7b\
\xc3\x07\xa3\x5b\x04\x7a\xa5\xe0\x45\xf7\x04\xec\x19\x47\x19\xb3\
\x79\x2b\x47\x54\xd7\x27\x1c\x57\x22\xf8\x60\x6c\x77\x2b\x5a\x5b\
\x7d\x82\xfe\x3f\x6c\x5c\x3b\xb5\x88\xad\x4e\x51\x59\xd0\xcd\x11\
\x18\xd0\xbc\x0c\xd4\x6b\x31\xb7\xfa\x82\x1c\x10\x0a\x50\x56\xc6\
\x76\xb7\xf4\x38\xf7\xf7\x38\x98\x6e\xa1\x3c\xf2\xd0\x11\x1e\x6c\
\x24\x1c\x9a\x3a\x58\xab\x01\x44\xce\xd1\xeb\x95\x62\x6a\x5b\x00\
\xc9\xe2\xe2\x62\xd2\x6c\x36\xed\xcf\x8d\x92\x9b\x9d\x82\xef\x7f\
\xbe\x7a\x60\x28\x40\x30\xa5\xdb\xcb\x53\xe9\x6d\x5f\x23\x1e\x38\
\xae\xe7\x79\xfe\xe5\xaf\xab\xed\xd7\xab\x46\x3d\xb9\xdd\x2b\xee\
\x03\xeb\xcc\xcc\xa4\xdc\xd9\x58\xbe\x72\xfe\xdd\xab\xbf\x0d\x4f\
\x81\x1a\x70\x14\x98\x61\x2c\x9a\xfb\x90\x32\xf8\x3d\xfd\x95\x65\
\x59\x91\x00\x02\x5c\x07\x6e\x30\x79\xa6\x1c\x54\x36\x84\x57\x00\
\xff\x00\xf1\x00\xf2\xbe\xa6\x77\x61\xc0\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\x3c\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x16\x00\x00\x00\x16\x08\x06\x00\x00\x00\xc4\xb4\x6c\x3b\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x03\xb9\x49\x44\
\x41\x54\x38\x8d\x8d\x95\xcf\x6b\x1d\x55\x14\xc7\x3f\xf7\xde\x99\
\x3b\xf3\x7e\x98\xa6\x79\x4d\x8a\x3e\x42\x63\x4c\x84\xb6\x21\xa5\
\x92\x55\x0b\xba\x68\x8c\xb8\x0b\xb4\x22\x2d\x6a\x31\x58\xc1\x8d\
\x74\xd9\x14\x04\x37\x21\x42\x71\x53\xf5\x1f\xa8\x88\x88\x5d\xb9\
\x10\x5c\x25\x8b\x4a\xba\x68\x5a\x5c\x34\xd0\x52\x5e\xc1\x1a\xac\
\xf0\x12\x5f\x26\xef\xe7\x9d\x1f\xd7\xc5\xfb\xe1\x7b\xc9\x0b\x7a\
\xe0\x30\xc3\xcc\xe1\x33\xdf\x7b\xe6\xfc\x10\xd6\x5a\xfe\x87\x09\
\x40\x02\x4e\xeb\x0a\x60\x81\x08\x88\x5b\xf7\x3d\xe6\x00\xbc\xf7\
\xc1\xa5\x25\x63\xcc\xf5\x83\xa8\x4a\x29\xd2\xe9\x34\x03\x03\x87\
\xf0\x3d\x0f\x80\x30\x0a\x71\x5d\x7d\x73\x79\xe9\x8b\x6b\x40\x7d\
\x2f\xdc\x01\x30\xc6\x5c\x7f\xe7\xfc\xbb\x14\x0a\x05\xaa\xd5\x2a\
\xb5\x5a\x8d\x5a\xad\x86\x31\x06\x63\x0c\x52\x4a\xa6\xa7\xa6\xb9\
\x72\xe5\x23\xa4\x6c\x0a\xae\xd7\x6b\x7c\xf5\xcd\xd7\x9f\x02\x4b\
\x80\x69\x29\xef\x05\x03\x78\x9e\x87\xd6\x9a\x30\x0c\x89\xa2\x88\
\x38\x8e\x49\x92\x04\x00\xad\x35\x43\x43\x43\x28\xa5\x50\x4a\x01\
\x90\x4e\x67\x18\x19\x1e\x06\x48\x03\xdb\x7d\x53\x01\xe0\xfb\x3e\
\x5a\x6b\xa2\x28\x22\x49\x12\xda\xb9\x37\xc6\xe0\x79\x1e\x99\x4c\
\x06\x21\x24\x42\x34\x15\x0b\x01\x5a\x7b\x3d\x8c\xbe\x60\xad\x35\
\xbe\xef\xf7\x40\x85\x10\x48\x29\xd1\x5a\xe3\xb5\x72\xdb\x6d\xad\
\xb4\x88\x7e\x60\xd9\x1d\xe4\x38\x0e\x9e\xe7\xe1\xfb\x3e\xbe\xef\
\xe3\x79\x5e\xc7\xb5\xd6\x88\xbe\x08\xf8\x21\x9b\x7d\xe3\x7b\x21\
\x6e\xf5\x55\x2c\xa5\xec\xb8\x52\x0a\xc7\x71\x98\x98\x98\x20\x8e\
\x63\x8a\xc5\x22\xae\xeb\xf6\x85\xfe\x3c\x3d\x7d\xf1\xf0\xd4\xd4\
\xf5\xe0\xd1\x23\xf5\x9d\x10\x1f\xb7\x1e\x47\x1d\xc5\xed\x63\x77\
\xfb\xe6\xe6\x26\xbe\xef\x33\x39\x39\xd9\xf9\x69\x6d\xb3\x49\x82\
\x73\xeb\x5b\xb2\xf9\xfc\xe2\x6b\x57\xaf\x7a\x4a\xa9\x48\x2a\x55\
\x96\x8e\x53\x16\x50\xec\x49\xfc\x5e\xb0\xb5\x96\x42\xa1\xc0\xa9\
\x53\xa7\x30\xc6\x74\xe2\xc2\x20\xe0\xce\xfc\x3c\xe3\xc3\xc3\x8c\
\x9f\x3d\xeb\xdb\x52\x89\xd7\x6f\xdc\xf0\xdb\xef\x7f\x59\x58\xf0\
\x7b\xc0\x7b\x55\x67\x32\x19\x66\x66\x66\xb8\x7b\xf7\x2e\xc7\x8f\
\x1f\x07\xa0\xf2\xf4\x29\xab\x73\x73\x8c\xce\xcc\x30\x3c\x3a\xca\
\xee\xca\x0a\x58\x8b\x50\x0a\x9b\x24\x0c\xcc\xce\x22\xa4\x64\x9f\
\x62\x21\x44\xe7\x03\x27\x4e\x9c\xe0\xd9\xb3\x67\x94\x4a\x25\x52\
\xa9\x14\x00\xbf\x5e\xb8\xc0\x91\x91\x11\x06\x7d\x9f\xdd\xd5\x55\
\x6c\x14\x91\x54\x2a\x24\x95\x0a\x32\x95\x22\x75\xf2\x24\x08\xc1\
\x81\x8a\xf3\xf9\x3c\xd5\x6a\x95\x87\x0f\x1f\x92\xcd\x66\x71\x9c\
\x66\xe8\xe9\x9b\x37\xb9\x33\x3f\x8f\x63\x0c\x2f\x00\xf5\x8d\x0d\
\x6c\xab\x91\x00\x1a\x4f\x9e\x60\xe3\x78\x7f\x71\xb7\xc1\x5b\x5b\
\x5b\xec\xec\xec\x90\x4e\xa7\xd1\x5a\x03\x90\x24\x09\x47\xce\x9c\
\xe1\xcd\xb5\x35\x56\xce\x9d\xe3\x70\x36\xc3\xd1\xd3\xa7\x91\x03\
\x03\xcd\x8e\x51\x8a\xb8\x5a\x05\xfa\x74\x4d\x1b\x1c\x86\x61\xa7\
\xc4\xa4\x94\x14\x0a\x05\xa4\x94\x8c\x8d\x8d\x91\x7b\x65\x9c\xd9\
\x7b\xf7\xf8\xe9\xec\x19\xea\x9b\x9b\xf5\x51\xc7\xf1\x7f\x5b\x5b\
\x33\xf6\xdf\xbe\xf8\x5b\x1e\x04\xee\xae\x67\xc7\x71\x28\x95\x4a\
\xec\x04\x3b\x54\x2a\x65\xe2\x38\x46\x0d\x1e\xa2\xb1\x78\x8d\x72\
\xad\xb6\xf2\x78\x63\xa3\x66\x21\xb9\x64\xad\xdb\xf2\x17\xf7\x81\
\xdb\x70\xa5\x14\x5a\x6b\x8a\xc5\x22\xeb\xf7\xd7\xd1\x9e\x26\xff\
\x52\x9e\x5c\x2e\x47\x14\x45\x18\x63\xa8\x9a\x06\xe7\xb7\xb7\x3f\
\xa9\x95\xcb\x5f\x0a\x78\xdc\xcd\xe8\x3b\x40\xb4\xd6\x04\x41\xc0\
\xfa\xfa\x3d\x5e\x1e\x1f\xe3\xf2\xe5\xf7\x39\x76\x6c\xac\x39\xfd\
\xa2\x90\x30\x0a\x89\xe3\x98\x60\x77\x17\xa0\x71\x31\x8a\x3e\x07\
\x3e\xeb\x0b\xb6\xd6\xa2\x94\xc2\x5a\xcb\x83\x07\xf7\x39\x34\x38\
\xc0\x87\x0b\x97\xc9\xe5\x72\xb8\xae\x4b\x1c\x87\xd4\xea\x11\x02\
\xb0\x16\x92\x24\x26\x08\x02\x80\x06\x07\x6d\x90\x38\x8e\x91\x52\
\xf2\xfc\xaf\xe7\x08\x2c\x6f\xbd\x3d\x47\x2a\xe5\xe3\xba\x6e\x73\
\xd8\x87\x61\xcf\x08\xb3\x34\x5b\xfa\x3f\xc1\xc1\x4e\xc0\x1f\x7f\
\xfe\xce\xc4\xab\xcd\xe3\x22\x2c\x8d\x46\x9d\x86\x69\x34\x81\x1d\
\xaa\xe8\x30\x92\xc4\x52\xa9\x94\xa1\xb9\x3d\xfa\x83\x47\x8e\x8e\
\x2c\xdf\xfe\xf1\xf6\x62\x77\xe7\x89\x83\x66\x64\x57\xea\xb4\xd6\
\xcb\xec\x59\x49\x6d\xfb\x07\xe7\xa5\x7a\x91\x9a\x1b\x94\x49\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\xb4\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x16\x00\x00\x00\x16\x08\x06\x00\x00\x00\xc4\xb4\x6c\x3b\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xfc\x00\xe9\x00\x4f\x34\xd7\
\xb1\x0d\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\
\x0d\xd7\x01\x42\x28\x9b\x78\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xd5\x02\x12\x0e\x25\x30\xca\x43\x26\x09\x00\x00\x02\x41\x49\x44\
\x41\x54\x38\xcb\xad\x95\xcd\x6b\x13\x61\x10\x87\x9f\x99\xdd\x54\
\x9a\xe2\x07\x4d\x9b\x35\x08\x9e\x3c\x88\x78\x12\xc4\x56\xf0\x1f\
\xf0\xd2\x83\x27\x6f\xe2\xd5\xb3\x08\x62\x2f\xa2\xe2\xc7\x3f\xe1\
\xc1\x8b\x78\x90\xdc\x45\x11\x04\x35\xa5\x17\x0b\x7a\xb2\xb9\xd4\
\x90\x18\x28\x22\x31\xa6\xd9\x64\xc7\xc3\x7e\x66\x53\xed\x6a\x7d\
\x21\x64\x77\xd8\xf7\x79\x67\x7e\xbf\x99\x5d\x61\x9f\x6b\xd4\x59\
\x11\xa0\x04\xf8\xae\x57\xb7\x38\x2e\xf1\xc5\xfd\x87\x77\x6f\x03\
\xab\x7f\x03\x55\x09\xa8\x1c\x19\x50\x5b\xec\xe1\xa8\xb1\x33\xd4\
\xc7\x17\x2f\x34\xaf\xb9\x5e\xbd\xef\x66\x9e\x5b\xbd\x71\xfd\x66\
\x61\xa8\x99\x0f\xfe\x47\x82\xfe\x4b\x7c\xab\x60\x94\xd9\xfe\xfa\
\xe9\x8a\xe1\xbc\x1a\x75\x56\x9e\xba\xf9\x0d\xcd\x66\x13\xc7\x71\
\x30\x33\x44\x04\x91\xb0\xa8\xec\x35\x18\x25\x69\x51\xd6\x17\x38\
\xa5\x59\x6c\xe6\x12\xe2\xcc\x73\x68\xe1\x35\x16\x3c\xbb\x2c\xf6\
\x6d\x5d\xf3\xe0\x18\x10\x43\xcc\x0c\xcc\x30\x33\x2c\x30\xcc\x00\
\xc6\xa8\xf4\x10\xd9\xc1\x64\x01\x74\x1e\x50\xe0\x00\x30\x2e\x03\
\x87\xf7\x04\x23\x40\x1c\x53\x41\x15\x44\x1c\x02\x39\x48\xc0\x22\
\x32\xda\x04\xff\x0d\x04\x1d\x86\x83\x06\x62\x3f\x36\x80\xed\xdf\
\x82\x55\x35\xfc\x17\x41\x44\xd3\xc3\x44\x51\x71\x08\x38\xca\x80\
\x65\x86\x76\x02\x86\x6b\xb8\xfe\x73\x36\x3f\x6f\x21\x04\x4f\x80\
\x96\xbb\x1b\x58\x55\x53\x8d\x11\x90\xac\xc6\x02\x2a\x08\x33\x98\
\x1c\xc3\xa7\xc2\x98\x53\xcc\xce\x95\x68\x6c\x7c\xe7\xcc\xc9\xd6\
\x07\xe0\xe7\x54\xc6\x6b\xeb\x8d\x49\xb3\x32\xd9\x27\x3f\xe2\x2a\
\x1c\x4c\xe7\x30\xe7\x38\x32\x73\x9a\x5e\xbf\x84\xeb\xd5\xfb\xae\
\x57\xb7\x29\xf0\xb9\xb3\x4b\xd4\x6a\xb5\x9c\xa6\xa4\x72\xa8\x44\
\xf7\x51\x75\xa2\x61\x55\x93\x63\xc1\xae\x52\xb4\xdb\xed\x70\x93\
\x69\x28\x43\x52\x3e\x49\x15\x91\xa7\x44\x0f\x4c\x2d\xf7\x4f\x5d\
\x61\x62\x49\x76\xf9\x5e\x4e\x0e\x10\x41\x29\x00\x4e\x36\x01\x1a\
\x99\x15\x6a\x0a\xa2\xd1\x7d\x1c\x8f\x92\x0d\xe3\xb9\x71\xcf\x07\
\xde\xbd\x7f\x3b\x95\x91\x48\x28\x45\x0c\xcd\x6a\x2c\xb1\x09\x7b\
\x81\x97\x97\xce\x53\xad\x56\xd3\x1e\xd6\xc8\x44\x34\x32\x4e\x10\
\x14\xd5\x6c\x97\x68\x31\x8d\xbb\xdd\xee\x44\xbb\x31\xf1\x9e\x90\
\xe4\x80\x6c\x65\x85\x34\x4e\x4f\xd1\x8c\x79\x11\x90\x78\xc4\x49\
\xf4\x2f\x0c\x4e\xb2\xd1\xd4\xb8\x88\x17\x5b\x99\x4e\xa3\x49\xa6\
\x8f\x0b\x66\x9c\xb8\x1e\x8f\x36\x93\xc6\x85\x55\x50\x2c\x63\xcf\
\xf3\xf8\x1f\x2b\x01\x07\x41\x70\xe7\xc1\xa3\x7b\xb7\xf6\x03\xfb\
\xb2\xd5\xba\x3a\xf5\xcd\xcb\xab\xf0\x8f\xec\xe4\x63\xfa\x0b\x23\
\xfb\x93\xa3\x4d\x98\xb6\xe0\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x04\x7e\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x16\x00\x00\x00\x16\x08\x06\x00\x00\x00\xc4\xb4\x6c\x3b\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\
\x0d\xd7\x01\x42\x28\x9b\x78\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xd5\x0b\x0a\x0d\x34\x15\x48\x65\x72\x06\x00\x00\x04\x0b\x49\x44\
\x41\x54\x38\xcb\xad\x95\x5d\x6c\x14\x55\x14\xc7\x7f\x33\xfb\x39\
\x9d\x6d\xbb\xd3\x65\x97\xdd\xd6\x65\x59\x3e\x96\x22\x05\xda\x92\
\x56\x8b\x1f\x58\x13\x1f\x30\x42\x34\x46\x7c\xd0\x17\xd3\xf8\x00\
\x35\x48\x55\x52\x03\xfa\x20\x46\x02\x18\x8b\x69\x6c\x1f\x44\x1f\
\x34\x8a\x92\xc8\x8b\x0f\x68\x0c\x31\x36\xc6\x88\x34\x41\x54\xac\
\x25\xb4\xb5\x36\xc5\xd6\xdd\xb6\xd3\x76\xbf\x67\x67\x77\x7c\x68\
\x77\x6c\xa9\x50\x4c\xfc\x27\x37\xe7\xe6\x9e\x73\xfe\xf7\x7f\xcf\
\x3d\x33\x57\x60\x01\x9e\x7b\xa3\xfd\xfd\x3f\x12\x95\x8f\xc6\xa6\
\xb4\x8a\x85\xeb\x92\xcd\x48\xfa\x5c\xf1\xbe\x95\xf2\x6f\x6d\x9d\
\xaf\x7e\xf6\x1d\xb7\x01\xa1\x38\xe9\xfa\xa0\xad\xf6\xec\x8f\xe1\
\x8b\xab\x23\x1b\xb3\xa1\x95\x3e\x31\x9e\x13\x9d\x55\x65\x4e\xf1\
\xcf\xb8\xc6\x54\x3a\xcb\xf4\xd5\xf3\x43\xa9\x64\xc6\x11\x56\xfa\
\xdb\xbb\x5f\x3b\xfd\xf1\x72\xc4\x96\x22\xe9\xf9\xfe\xc0\xd9\x55\
\x5b\x1e\x70\x0f\x0f\xf4\xdb\x07\x87\x87\x6d\x23\xbf\x0f\x8a\x23\
\x13\x93\xd8\x4a\x2b\x50\x9c\x76\x9c\xde\x75\xca\x74\xba\x50\x76\
\x3d\xe6\xdc\xb9\xeb\x11\xcf\xa5\x8b\xdf\xfc\x3a\x70\x2b\x62\x11\
\xe0\x87\x6b\xce\xe3\x77\xac\xb0\x0e\x27\x06\xbe\xba\xda\xe8\xbd\
\xf0\x74\x4f\xc7\x3e\xf1\xbe\x50\xef\xce\xdc\xcc\xf5\xd9\xd1\xc1\
\x2b\x46\x74\x36\x4e\x3c\x99\x40\xf1\x05\x90\x7d\x41\xc7\x2f\xb1\
\xad\x9f\xbe\xf4\xfa\x2e\x79\x59\x62\x35\x25\xad\xb3\xe4\x13\x33\
\xf7\xaf\x1f\x7d\xe8\xe8\x8b\xef\x7d\x02\x70\xec\xe0\x47\x5f\x36\
\x04\x47\x8f\x24\x54\x55\x48\xce\x4e\xa1\x65\x52\x00\xf8\x82\xeb\
\x45\xaf\xac\x8d\x4c\x68\xe1\xee\x65\x6b\xfc\xd4\xa1\x17\x3e\x0f\
\x3b\x72\x3f\xcf\xaa\x85\xc3\x0b\x9d\x0e\x49\xe0\xdb\xa9\x10\xab\
\x37\xd5\x2f\x4a\x72\xa7\xfb\x98\xf8\x6b\x9c\xca\xc2\xf4\x8d\x7c\
\xed\x9d\x6f\x77\x9d\x00\xb0\x02\x04\xcb\xe2\x5f\xa4\x26\xed\xdd\
\xc7\x8e\xbe\xb9\x64\xe7\x07\xdb\xba\x96\xac\x65\x3c\x77\x31\x3a\
\xd4\xc3\x87\x27\x17\xe9\xe0\xe5\x43\x07\x8f\x03\x27\xcc\x52\xa4\
\xc6\xed\xd7\x14\xb7\x97\x54\x6a\xee\xb8\xc9\x64\xd2\x1c\x26\x59\
\xfa\x9f\xb9\xc5\x66\x5f\x14\x37\x32\x3a\x42\x32\x99\xc4\xeb\xf5\
\xb2\xff\x40\x6b\xad\xa9\x18\x58\xeb\xf7\x07\x00\x48\x24\x13\xb8\
\x64\xd7\xbf\x2a\x2d\xc2\x28\x14\x00\x78\xec\xc8\x69\x74\x4d\xe3\
\xcc\xe1\x3d\x00\x28\x8a\x42\x2c\x16\x8b\x00\x97\xc5\xf9\xd8\xdd\
\x75\x5b\xeb\x49\xa7\xd3\x08\x08\xa6\xd2\xea\x86\x1d\x54\x37\xec\
\xa0\x5c\xf1\x02\x90\xcd\x66\xe6\x6c\x3a\xce\xba\xda\xbb\x59\x53\
\xd3\x00\x40\x89\x54\x02\xc0\x3d\x4d\xf7\x02\x6c\x36\x4b\x01\x3c\
\x5c\x59\x59\xb5\x48\xd5\xbb\xcf\x37\x33\xf0\xd3\xf7\x00\x8c\x8d\
\x0e\x81\x61\x90\xd7\x73\x64\xd2\x49\xe2\x33\x2a\x00\x43\x57\x7a\
\x79\x67\xdf\x76\x33\x27\x18\x0c\x01\xbc\x02\x20\xee\x3f\xd0\x5a\
\x2b\x49\x25\xd8\xed\x76\x3c\x1e\x8f\x19\x14\xf0\x86\xe8\x68\xd9\
\x46\x7f\x6f\x0f\x15\xde\x4a\x0a\x86\x31\x5f\x07\x83\x32\xb7\x87\
\x81\xcb\x17\xe8\x68\xd9\x46\xa8\x32\x02\x80\x2c\xcb\xf3\xe5\xf0\
\xb0\xff\x40\x6b\xad\x08\x44\x56\xfa\xfc\x00\xa8\xaa\x8a\x2c\xcb\
\xc8\xb2\x8c\x28\x8a\x84\xab\x36\xd0\xb9\xb7\xc9\x24\x17\x05\x01\
\x57\x79\x05\x03\x97\x2f\xd0\xb9\xb7\x89\x70\xd5\x06\x33\x5e\x55\
\xe7\x4e\x51\x5e\x56\x0e\x10\x11\x81\xcd\x77\x6e\xdc\x44\x2e\x97\
\x33\xac\x56\x0b\x33\x33\x33\xa8\xaa\x8a\x24\x49\xc8\xb2\x4c\x4d\
\xa4\x9e\x53\x6d\xcd\xf4\xf7\xf6\xe0\x5e\x11\x60\xb8\xef\x12\xa7\
\xda\x9a\xa9\x89\xd4\x9b\x84\xaa\xaa\x92\xcf\xeb\x24\x12\x09\x63\
\xed\x9a\xb5\x00\x4d\x62\x36\x9b\xdd\xe2\xf7\xfb\x71\xb9\x5c\x82\
\xae\xe7\x29\xcc\xdf\x78\x31\x41\x55\x55\x2a\x4a\x03\x74\x3c\xdb\
\x40\x7f\x6f\x0f\x6f\x3d\x53\x4f\x45\x69\xc0\xf4\x01\xe4\xf3\x3a\
\x08\xa0\x28\x8a\x10\x0c\xae\x22\x93\xc9\x96\x5a\x1d\x0e\xc7\xee\
\x48\x24\x62\xb6\xcb\xcd\xa0\x28\x8d\x7c\x7d\xb2\x71\xd9\xdf\x65\
\x75\x75\x35\x4e\xa7\xa3\xc5\x0a\x10\x8b\xc5\x98\x9c\x9c\xbc\x79\
\xdf\x16\x2f\xee\x36\x7c\x5e\xef\x5c\x6b\x16\x3f\x10\x34\x4d\xc3\
\xe3\xf1\x70\xee\xdc\x39\x1c\x0e\x07\xb2\xab\x84\x52\x57\x19\xa2\
\x28\x22\x49\x12\x82\x20\x20\x49\x12\x2e\x97\x0b\x55\x55\xf1\xf9\
\x7c\x44\xa3\xd1\x25\xb6\x48\x2c\x2e\xdc\x4d\xd7\x75\x2c\x16\x0b\
\x86\x61\x20\x0a\x16\xb2\xd9\x2c\x9a\xa6\x61\x18\x06\x36\x9b\xed\
\x96\xca\x6f\x84\xa9\x58\x10\x04\xd2\xe9\x34\x75\x75\xb5\xe8\x7a\
\x1e\x8b\xc5\x82\xae\xeb\x66\xa0\xdd\x6e\xe7\xbf\xc0\x0a\x10\x8d\
\x46\x0d\xbf\xdf\x3f\xff\x4c\x79\x96\x4d\x72\xbb\xdd\x00\x84\xc3\
\xe1\x25\x76\x6c\x6c\xcc\x00\x04\xe1\x89\x27\x1f\xdf\x13\x08\xf8\
\xcf\xf0\x3f\x62\x6c\x6c\x7c\xfb\xdf\xe7\x60\x9f\x53\x70\x22\xd1\
\x6a\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\xd5\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x16\x00\x00\x00\x16\x08\x06\x00\x00\x00\xc4\xb4\x6c\x3b\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\
\x67\x9b\xee\x3c\x1a\x00\x00\x04\x67\x49\x44\x41\x54\x38\x8d\xa5\
\x93\x7d\x68\xd5\x55\x1c\xc6\x3f\xe7\xf7\x7a\xef\xfd\xdd\xdd\x3b\
\xb7\xb6\xb9\x79\xa7\x6d\xbe\xcd\x19\xe9\x72\x58\x62\x43\x7b\x93\
\x62\x25\x46\x82\x34\xc8\x7f\xf2\x6a\x50\xa9\x11\x44\x21\xcd\x5c\
\x85\x95\x98\x53\x4a\x21\x15\x35\x19\x51\x52\x19\x68\x8a\x60\x66\
\x8a\xba\x4d\xa7\xd3\xcd\x97\x4d\x45\xd7\x72\x2f\x6d\x73\x6f\xf7\
\xde\xdd\x97\xdf\xef\xf4\x47\x57\xc9\x5a\xa0\xf8\x85\x87\x73\xe0\
\x70\x3e\xe7\xe1\xe1\x39\x62\xdd\xcb\x8a\x1f\x58\xa4\x08\xb1\x5c\
\x0a\x91\x2a\xa4\xb3\x61\x69\x95\xbd\x82\xfb\x1c\x45\x08\xde\xcd\
\x19\x53\xf8\xf1\xac\xb9\xc1\xc0\x93\xa5\xaf\x78\x3d\xbe\x11\xcb\
\x2a\xcb\xd4\x05\xf7\x0d\x56\x55\x3d\x58\x58\x3c\xdb\x0c\x5f\x3b\
\x48\xbc\xf7\x0a\x53\x1f\x9d\x63\x29\x82\x75\x95\x65\xaa\x7e\x5f\
\x60\xc7\x8e\xc7\x9c\x44\x02\x45\x35\x18\x6c\x3b\x8b\x65\x79\xc8\
\x1a\x35\xd6\x0f\x2c\xd9\xba\x52\xbc\xb0\xe3\x23\xf3\xd8\x57\x4b\
\xc4\x3d\x3f\xa2\x00\xdb\xcf\xd5\x1c\x88\xf8\xf2\x67\x63\x65\x4d\
\xa6\xbf\xa5\x86\x82\xa2\x12\x8f\x99\xc6\x1a\xc3\x95\xf2\xad\x94\
\x4c\x13\x39\xe4\xdd\xbb\x63\xc9\x8a\xee\xf6\x6b\x95\x47\xf6\x7f\
\x33\xa4\x8d\x98\x80\x99\xf6\x20\x2d\x9d\x47\x48\x1b\x9b\xee\x9a\
\xb7\x70\xb9\x3b\x3b\x77\x5c\x54\x24\x78\xf8\x5e\xc1\x42\x4a\x09\
\x40\x65\x99\x3a\x5f\xd3\xb5\xaf\xd3\x27\x7a\xdd\xbe\x0c\x8b\x19\
\x4f\x3d\x87\xcb\xf0\x72\xe6\xad\x3d\xf4\xfc\xd4\x6c\x0b\x5b\x6c\
\x2a\xb5\xed\x37\xef\xda\xf1\xad\x8d\x7f\xa2\x53\xe7\xce\x8b\xf5\
\x3d\x10\x30\x65\xd1\x8c\xc9\x44\x7e\x3f\x45\xcd\xb3\xdb\xe0\x70\
\x27\x85\x23\x03\xaa\xad\xc8\xd7\x36\x97\x8b\x85\x9b\xcb\xef\x2e\
\x6f\x05\x60\x6b\xb9\x98\x29\xa1\xae\x70\xea\xb8\x8c\x49\x53\x0b\
\x44\x6f\x6d\x13\x0d\xf3\xce\x92\xd6\xad\x13\xb0\x52\xb9\xf8\xe7\
\x0d\x52\x17\x17\x68\x39\xb9\xb9\x1b\x55\x4d\x5c\xdf\x5c\x2e\xe6\
\xdf\x55\x14\xdb\x56\x89\xbd\x52\xe8\x73\x52\xd3\xfc\x61\x4f\x63\
\xcc\xe3\xec\x08\x69\x05\x19\xa3\x18\x8a\xc7\xb8\xda\xd7\xc5\x98\
\x4f\xc6\x93\xf5\x4c\x00\x77\x4a\x31\x7d\x3d\xed\xd4\xfe\xb6\x3f\
\xdc\xd7\xd3\xd1\x18\x8f\x3b\x65\xc1\x0a\x79\xf9\x7f\xc1\x00\x5b\
\x56\x89\x00\x0e\x93\x46\x7e\xa6\x7c\x37\x21\x6d\x64\x6a\x77\x34\
\x44\xb7\x16\x25\x6f\xbd\xc4\x33\x5a\xc5\x34\x3d\xe8\x86\x85\xcb\
\xfb\x08\x9a\xa7\x88\x3f\xae\x5d\x90\xc7\x0f\xee\x09\xc7\xa2\x83\
\xab\x1d\x47\x7e\x1a\xac\x90\x89\x61\xc1\xb7\x66\xaf\xaa\xbe\x6f\
\xab\xf2\x83\x94\x59\xf9\x22\xe7\x9d\xeb\x42\x71\x49\x34\xcd\xc4\
\x34\x3d\x18\xa6\x07\x4d\x33\xd1\x35\x2f\xba\x35\x8d\xb8\xcc\xe7\
\x74\xf5\x2f\x91\x2b\x0d\x47\x7a\x63\xb1\xe8\x2a\x60\x7b\xb0\x42\
\x46\x87\x05\x03\x6c\x59\x29\xa2\xa5\x65\xaf\x1b\xa1\xae\x9d\x48\
\x27\x84\xa6\x19\x98\xa6\x85\x61\x5a\xe8\x7a\x1a\xc8\x28\x6e\xb7\
\x07\x55\x4f\x05\xb3\x88\xfe\x01\x8b\xfa\xea\x7d\xe1\xab\x17\x6a\
\xe3\x52\x3a\x6b\x1d\x87\x35\xca\x7f\xa8\x80\xa2\xd2\x1a\x1e\xec\
\x46\x35\x32\x01\x90\x52\x22\xf9\xdb\x80\xa2\x0c\xa1\x99\x01\x22\
\xa1\x28\xd2\x8e\x21\xa2\x27\xf1\xbb\xea\x79\xfc\xe9\x52\xcf\xf3\
\x65\xcb\xfc\x52\x28\x2b\x6e\xb7\xe2\xdf\x23\x1d\x76\x77\xb5\xb5\
\x38\x86\x6b\x3c\x20\x49\xc4\x13\x48\xdb\x01\x4c\xec\x84\x8d\xae\
\x86\xd0\x3d\x13\xe9\xed\xe9\xc7\x91\x06\x20\x51\x54\x2f\x97\x1b\
\xab\xa3\x9a\x42\x55\xb0\x42\x0e\x0d\x0b\x76\x1c\xbe\x6f\x3e\x7f\
\x26\xac\xbb\x27\x20\x84\x0b\x29\x1d\x62\xb1\x04\x52\x4a\x6c\x27\
\x85\x48\xa8\x17\x97\x5b\xc1\xe5\x2b\xa4\xb3\xad\x0f\x5b\x2f\xe1\
\x62\xfd\xf1\x44\xd3\xb9\xea\x96\x78\xdc\x79\x63\x58\xc7\x42\x08\
\x65\xf1\x87\xd4\x47\x42\xe1\x4b\x1d\xad\x4d\xd2\xf0\x4e\x41\x51\
\x21\x11\xb7\x89\x86\x7b\xd1\x5d\xe9\x0c\x45\x24\xe1\xde\x2b\x78\
\xfc\xf9\xb8\xfc\x25\xd4\x1e\xda\x19\x3f\xf1\xeb\x8f\x97\x7a\x7a\
\xed\x92\x60\x85\x8c\xdc\x01\x16\x42\x28\x42\x08\x1f\x90\x05\x64\
\x1f\x3e\xc9\xda\xb3\xb5\xd5\x51\xd3\x2a\x46\xd5\x33\x31\x3c\x99\
\x84\x07\x87\x88\x85\xaf\x92\x92\x31\x9d\xee\x6e\x95\x81\x7e\xc9\
\xf1\xc3\x7b\xa2\xa7\xeb\x1a\x4f\xac\xdc\x98\x58\xf0\xf6\xe7\x28\
\x42\x08\x9f\x10\x42\x28\x49\xa8\x00\xac\xa4\x52\x00\x5f\xd5\xcf\
\xdc\x68\xef\xe8\x69\xb8\x50\x77\x20\x61\xa5\xcf\xc5\x71\xe2\xb8\
\x7d\x63\xe9\xbb\x09\xaa\x91\x87\x61\x8d\x63\xff\xae\xf5\xb1\x93\
\xa7\x9a\xf6\x2d\x5d\x1d\x2f\x6f\xed\xc0\x4c\xde\xb5\x00\xeb\x76\
\xdd\x84\x10\xde\xe4\x81\x37\xa9\x94\x40\x16\x59\xef\xbd\xca\x17\
\x8f\xcd\x7a\x22\x23\x3b\x37\x53\x18\xae\x6c\xfa\xda\x1b\x69\xbd\
\xde\x6c\x5f\x3c\xdf\x12\xde\x77\xd4\xd9\xb0\xfb\x10\xc7\x80\x41\
\x60\x20\xb9\x0e\x02\x5d\x77\xf4\x58\x08\xe1\x02\xfc\x80\x27\x29\
\x77\x71\x21\xa3\x83\x2f\x89\x6d\x33\xe7\xcc\x4b\x89\x45\xda\x38\
\x53\x5d\x1b\xeb\xec\x72\x8e\xae\xaf\x92\x1b\xda\xba\xb8\x09\x44\
\x92\x0a\x03\x37\x81\x01\x29\xa5\x3d\xec\x07\x49\x46\x64\x00\x2e\
\xc0\x58\xf4\x22\x53\xa6\x3f\xc4\x0f\x71\x9b\x53\x0d\xcd\x7c\xb9\
\x69\x17\x35\x40\x1c\x88\x01\x43\x40\x14\x48\xc8\x7f\xc0\xfe\x02\
\xac\xc3\xe6\x28\xcc\x5c\x0b\x9f\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x04\x49\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x16\x00\x00\x00\x16\x08\x06\x00\x00\x00\xc4\xb4\x6c\x3b\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\
\x0d\xd7\x01\x42\x28\x9b\x78\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xd5\x0b\x0a\x0d\x39\x23\x32\x71\x99\xd2\x00\x00\x03\xd6\x49\x44\
\x41\x54\x38\xcb\xad\x94\x5f\x4c\x5b\x55\x1c\xc7\x3f\xf7\x52\xfa\
\x87\x0b\x94\xdb\xae\xa5\x2d\x56\x56\x60\x1d\x73\x6c\x43\x96\x4e\
\x99\x7f\x26\x26\x3e\xcc\xb8\x45\x63\x9c\x0f\x1a\x63\xc2\xd3\xc6\
\x02\xd4\x6d\x62\x36\x7d\x70\xea\x1c\xd3\x30\x25\x42\x8c\xe8\x83\
\x46\xa7\x4b\xdc\x8b\x2f\x1a\xb3\x07\x89\x31\xe2\x48\x26\xea\x44\
\xb6\x41\xc5\xa6\x13\xa4\x8c\x0b\xf4\x7f\x0b\xbd\x3e\xb4\x54\xea\
\xdc\xc0\x84\x6f\x72\x73\xce\x3d\xe7\xfb\xfb\x9e\xef\xf9\xde\x73\
\xae\xc0\x32\x1c\x7c\xad\xe3\x83\x3f\xc2\x8e\x47\x83\x33\x49\xd3\
\xf2\x71\x43\xa1\x1a\xb1\x16\x87\x86\xcb\xa5\xdf\xbc\xdd\x2f\x7d\
\xfe\x1d\xab\x80\xb0\xd4\xe9\xf9\xd0\x5b\x7f\xee\x47\xd7\x85\xf5\
\xee\x4d\x89\xca\x72\xab\x18\x4a\x89\xfa\x8a\x52\xbd\xf8\x67\x28\
\xc9\x4c\x2c\xc1\xec\xe5\xf3\xbe\x68\x24\xae\x73\xc9\x23\x1d\xbd\
\x2f\x9f\xf9\x64\x25\xe1\x82\x25\xd1\xf3\x23\xf6\x73\xb7\x6f\x7d\
\xa0\x6c\x7c\x74\x44\x3b\x36\x3e\x5e\xe8\xff\x7d\x4c\xf4\x4f\x5f\
\xa7\xb0\xc4\x84\xac\xd7\xa2\xb7\xd4\xc8\xb3\xb1\x74\xe9\xb5\xa0\
\x7e\xf7\x9e\x47\xcc\x17\x2f\x7c\xf3\xeb\xe8\xad\x84\x45\x80\x1f\
\xae\xea\x3b\x6f\x5b\xa7\x19\x0f\x8f\x7e\x7d\x79\x87\x65\xe0\xe9\
\xfe\xae\x03\xe2\x7d\x95\x83\xbb\x53\x73\xd7\xe6\x03\x63\x97\xd4\
\xa9\xf9\x10\xa1\x48\x18\xd9\x6a\x47\xb2\x3a\x75\xbf\x04\xb7\x7d\
\x76\xf8\x95\x3d\xd2\x8a\xc2\x4a\xd4\x50\x53\xb0\x18\x9e\xbb\x7f\
\x43\xe0\xa1\x13\x87\xde\xff\x14\xe0\xe4\x91\x8f\xbf\xf2\x38\x03\
\xc7\xc3\x8a\x22\x44\xe6\x67\x48\xc6\xa3\x00\x58\x9d\x1b\x44\x8b\
\x94\xf4\x4f\x27\x5d\xbd\x2b\x66\xfc\xd4\xd1\xe7\xbe\x70\xe9\x52\
\x3f\xcf\x2b\xe9\x63\xcb\x27\x75\x06\x81\x6f\x67\x2a\x59\xbf\xb9\
\x21\xaf\xa8\x2c\x36\xcc\xf4\x5f\x93\x38\xd2\xb3\xff\xd6\xeb\xe8\
\x7e\xab\xe7\x14\x80\x06\xc0\x59\x1a\xfa\x32\x7a\x5d\xdb\x7b\xf2\
\xc4\x1b\x37\xac\xfc\xa0\xb7\xe7\x86\xb1\xb8\xf9\x2e\x02\xbe\x7e\
\x3e\x3a\x9d\xe7\x83\x17\x8e\x1e\xe9\x04\x4e\xe5\xa2\x88\x4e\x6a\
\xaf\xca\x65\x16\xa2\xd1\xcc\x76\x23\x91\x48\xee\xc9\x89\xc5\xfe\
\xe9\x17\x14\x6a\xf3\x78\xfe\x80\x9f\x48\x24\x82\xc5\x62\xa1\xb5\
\xbd\xa5\x3e\xe7\x18\xa8\xb6\xd9\xec\x00\x84\x23\x61\x8a\xa5\xe2\
\xff\x74\xba\x04\x35\x9d\x06\xe0\xb1\xe3\x67\x58\x48\x26\x39\x7b\
\x6c\x1f\x00\xb2\x2c\x13\x0c\x06\xdd\xc0\x90\x98\xe5\xee\xbd\x73\
\x5b\x03\xb1\x58\x0c\x01\x21\xe7\xb4\xd6\xb3\x8b\x5a\xcf\x2e\x8c\
\xb2\x05\x80\x44\x22\x9e\x69\x63\x21\x6a\xea\xef\xa6\xaa\xce\x03\
\x40\x91\xa1\x08\x80\x7b\x1a\xef\x05\xd8\x92\x8b\x02\x78\xd8\xe1\
\xa8\xc8\x73\xf5\x5e\x5b\x13\xa3\x3f\x7d\x0f\xc0\x44\xc0\x07\xaa\
\xca\xe2\x42\x8a\x78\x2c\x42\x68\x4e\x01\xc0\x77\x69\x90\x77\x0e\
\xec\xcc\xd5\x38\x9d\x95\x00\x2f\x02\x88\xad\xed\x2d\xf5\x06\x43\
\x11\x5a\xad\x16\xb3\xd9\x9c\x23\xd9\x2d\x95\x74\x35\x6f\x67\x64\
\xb0\x1f\x93\xc5\x41\x5a\x55\xb3\x39\xa8\x94\x96\x99\x19\x1d\x1a\
\xa0\xab\x79\x3b\x95\x0e\x37\x00\x92\x24\x65\xe3\x30\xd3\xda\xde\
\x52\x2f\x02\xee\x72\xab\x0d\x00\x45\x51\x90\x24\x09\x49\x92\x10\
\x45\x11\x57\xc5\x46\xba\xf7\x37\xe6\xc4\x45\x41\xa0\xd8\x68\x62\
\x74\x68\x80\xee\xfd\x8d\xb8\x2a\x36\xe6\xf8\x8a\x92\xd9\x85\xb1\
\xd4\x08\xe0\x16\x81\x2d\x77\x6c\xda\x4c\x2a\x95\x52\x35\x9a\x02\
\xe6\xe6\xe6\x50\x14\x05\x83\xc1\x80\x24\x49\xd4\xb9\x1b\xe8\xf3\
\x36\x31\x32\xd8\x4f\xd9\x3a\x3b\xe3\xc3\x17\xe9\xf3\x36\x51\xe7\
\x6e\xc8\x09\x2a\x8a\xc2\xe2\xe2\x02\xe1\x70\x58\xad\xae\xaa\x06\
\x68\xd4\x24\x12\x89\xad\x36\x9b\x8d\xd7\x3b\x5f\x15\x6e\x75\x93\
\x3c\x25\x49\x06\x07\x61\x47\xc9\x15\xfa\xde\xbd\x72\xd3\x0b\xf7\
\xec\x33\xcd\xc4\xe3\x89\x12\x8d\x4e\xa7\xdb\xeb\x76\x67\x72\x6a\
\x6f\x3b\x8c\xa0\xaa\xa8\x08\x80\x0a\x82\x00\xaa\xba\xd4\x64\x4b\
\x85\x2c\x07\xaa\xaa\xaa\xf0\xf9\x7c\x08\x42\xe6\x12\x9f\x7e\xfb\
\x4d\x6a\x6b\x6b\xd1\xeb\x75\xcd\x1a\x80\x60\x30\x98\x3d\x36\x06\
\xfc\x7e\xff\x6a\x7e\xb7\xd4\xd4\xd4\x00\x60\x32\xc9\xd9\xc3\x95\
\x59\x79\x49\x4b\x5c\x4e\x36\xc9\x26\x56\x0b\x59\x96\x33\x1f\xcb\
\x58\x86\xd1\x58\x8a\xd1\x68\xcc\x9b\xd7\x2c\x7f\x29\xd4\x16\xe2\
\xf1\x78\x58\x0b\xe4\x09\xb7\x79\x0f\xfe\xaf\xe2\xd6\xf6\x96\x9b\
\xd6\x68\x00\xa6\xa6\xa6\xd4\x43\xde\xe7\x85\xb5\x70\x3a\x31\x31\
\xa1\x02\x82\xf0\xc4\x93\x8f\xef\xb3\xdb\x6d\x67\x59\x43\x4c\x4c\
\x4c\xee\xfc\x1b\x0f\xd8\x70\x1b\x27\x6e\xb0\x4a\x00\x00\x00\x00\
\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\xd0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x16\x00\x00\x00\x16\x08\x06\x00\x00\x00\xc4\xb4\x6c\x3b\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\
\x0d\xd7\x01\x42\x28\x9b\x78\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xd6\x01\x05\x0d\x1e\x16\x2a\x40\x95\xcd\x00\x00\x05\x5d\x49\x44\
\x41\x54\x38\xcb\x8d\x95\x5d\x6c\x54\xc7\x15\xc7\x7f\x33\xf7\xde\
\xfd\x5e\x7b\xd7\x4b\xfc\xb1\xc4\x26\x36\x35\xb8\x24\xc1\xc1\x5e\
\x27\x0d\x21\x98\xd0\xa4\x69\x95\x14\x35\x44\x4d\xa5\xc4\x4d\x8b\
\x88\xa5\x7e\xa8\xe9\x53\xfa\x56\x59\x4d\x9e\xe8\x53\x55\xa9\x6a\
\x25\x2b\xb4\x2e\x8b\xd2\x56\x6d\x20\xa1\x2f\xc5\x0a\x15\x6d\x6a\
\x0a\xac\x1d\x30\x06\x4c\x83\x03\x98\x85\xf5\xae\xd7\xbb\x5e\xef\
\xf7\xbd\xbb\xf7\xf6\x21\xbb\xc6\x4e\xab\xaa\xe7\x69\x74\x66\xce\
\xef\xfc\x75\xce\x9c\x19\xc1\xff\x61\x3b\x5f\xfe\x69\x5c\x77\x07\
\x9b\xd7\xb8\xe2\x91\xd1\xa1\xd6\xff\x15\xa3\x7e\xd6\x11\x1a\x0e\
\xf7\x03\x44\x46\x87\x26\xeb\x3e\xdd\x1d\x6c\x7e\xe9\xd1\x66\xf2\
\xc5\x12\x7a\x41\x67\x7c\xae\xd4\xf2\x99\xf3\x47\x81\x57\xd6\xc6\
\xc8\x75\xd0\xd7\x8e\x7c\x13\x3d\x7f\x6e\x53\xb3\xeb\x5c\x68\x38\
\x7c\x60\x7d\x4a\x93\x1b\xd1\x24\x46\x45\x5f\x05\x86\x86\xc3\x03\
\xc2\x28\xfc\xe5\x41\xbf\xd9\x0d\x9c\xac\x8b\x02\x10\xab\xd0\x83\
\x63\x03\x4d\x32\xfb\xe1\x1b\xaf\x0e\xda\xe6\xa3\x71\x7e\x79\x6a\
\x61\x15\xf9\xc5\x1d\xed\x6c\x09\x28\x9c\x3a\x7b\x8d\x87\xba\x36\
\xf0\xa7\x8b\xd9\x7b\x7b\x9d\x82\x4d\xf7\x05\x88\xa6\x8a\x9c\xbc\
\x9e\x5f\x89\x8c\x0e\x35\xae\x07\x1f\x38\x9c\xf9\xfa\xae\x76\x6f\
\x57\xd0\x2f\x02\x3e\x0f\xc2\x32\x51\xa5\xc5\xcd\xd8\x0a\xa9\xe5\
\x3c\x91\x99\x5b\x08\x21\xd0\x54\x85\xe6\x26\x2f\xb9\x95\x14\xfe\
\xc0\x06\x12\xc9\x2c\x5d\x6d\x4d\xb4\x78\xed\x1c\x9d\x4a\x1a\xa9\
\x42\xe5\xf1\xc8\xe8\xd0\xe4\xbd\x52\xa8\xb6\xef\xff\xf9\xf4\xd5\
\xf4\x06\x9f\x13\xb0\x58\xce\x96\x49\xa4\x4b\x4c\xcd\xdc\x66\x7a\
\xf6\x0e\x0e\x4d\xc3\xa1\x69\x68\x52\x21\x93\x29\x62\x09\x17\xe9\
\x74\x91\x56\x7f\x03\x7a\xc5\xe2\xed\x33\x0b\x66\x1d\xba\x4e\xf1\
\xa7\x35\xfe\x6d\x28\xe0\x30\xcf\x8d\x1c\xd8\x25\x92\xe9\x3c\x4b\
\xe9\x1c\x97\x66\xef\xd0\x11\xf4\xd3\xd5\x71\x1f\x00\xa5\xb2\x41\
\x22\x99\xe5\xc6\xed\x24\x52\x08\x3a\x83\x01\x4e\x4c\xce\x5b\x8b\
\xba\x3a\x12\x19\x1d\x7a\xeb\xbf\x36\x4f\x29\x2e\x05\x37\xb5\x78\
\xb0\x4c\x0b\x55\x91\x78\xdd\x0e\x76\x0f\x74\xe3\x73\x49\xeb\x83\
\x0f\xa7\xaa\xff\x8c\xcc\x90\x4e\x2d\xe3\xb0\x69\x3c\xd6\xdb\x89\
\x43\xd3\x48\x65\x0a\x0c\x6e\x76\x0b\xaa\x95\x57\xd6\xb2\x44\x68\
\x38\xfc\x77\x60\x17\x40\x4f\x47\x13\x2f\x0f\x76\xa2\x58\x26\xd9\
\x5c\x89\x72\xb9\x42\x26\x93\x34\xc7\x4e\x47\xcf\x54\x35\xf7\xf3\
\x91\xd1\xa1\xe5\xbd\xfb\x7e\xf8\xd2\xd7\xbe\xfa\x74\xd8\xeb\xf1\
\x6b\x8a\x22\x29\x14\x74\xda\x9a\xbd\xbc\x75\xec\xea\x5a\xee\x9b\
\x4a\xb0\x7f\xff\xe1\xd7\xbf\xb4\x91\x67\x7a\x5b\x78\x6a\x7b\x10\
\x01\xc4\xe2\x19\x0a\x85\x32\x5b\x3a\x03\xfc\xfc\xf7\x13\xc5\xb2\
\xcd\x1f\x8a\x8c\x0e\x2d\x03\xdc\xb8\x76\xf6\xb2\xea\xb8\xbf\xf5\
\xd1\xfe\xed\x03\xdb\xb6\x6e\xc4\xed\xb4\x63\xd7\x34\xb6\xb7\xb8\
\xd8\x11\x74\xb2\x23\x68\x63\x32\x5a\x1c\x94\x00\x1d\xc1\x20\x86\
\xa1\x12\x4f\x16\xb8\x3e\x97\x60\x71\x31\xcb\xc2\xc2\x0a\x96\x69\
\x92\x57\x1a\x1d\x75\x68\xdd\xb2\x99\xa5\x23\x85\x42\x8e\xc6\x06\
\x27\xa5\x82\xc1\xb5\x7f\xc5\x59\x59\xd1\xa9\x56\x54\x7c\x8d\x81\
\x7b\x35\x9e\xba\x30\x6b\xc5\x62\x8b\x2c\xa5\xf2\xa4\xd2\x79\xaa\
\x15\x13\xbb\x4d\x45\x48\x81\xcd\x2c\x19\xa1\xe1\xb0\x6f\x2d\xd8\
\xe5\xf5\x3f\xbe\x6d\xeb\x03\xe8\xe5\x0a\xb1\xd8\x32\x1e\xb7\x9d\
\x06\x8f\xc4\x32\xf3\x56\x3c\x7e\xd7\x04\x50\x82\x7d\x2f\xf4\x5c\
\x8a\xeb\x0f\x5f\xbc\x5b\x24\x72\x73\x99\x3d\x3d\x2d\x58\x55\x0b\
\x4d\x51\xf0\x35\x78\x70\x92\x53\x67\x13\x95\x50\xb0\xff\xc5\x13\
\x77\xa7\xde\x2d\xed\xdc\xff\xe3\xcd\x4f\x86\x7a\xfe\xf8\x58\xff\
\x83\xda\x9d\x68\x9a\x62\xde\xc0\xeb\x71\x70\xf8\xbd\xf1\xfc\xdf\
\xe2\x76\xdb\xc5\x05\x5d\x00\x6f\xae\xbb\x6e\xcf\xbd\xf0\xfa\x3e\
\xfb\xe6\x9d\xc7\xbf\xbc\xad\x4d\x48\x21\x90\x42\xf0\x48\x6f\x3b\
\xff\x98\x9a\xe1\xe4\xc4\xac\xe5\x75\xd9\x45\x5f\x77\x90\xfd\xcf\
\x3f\x89\x94\x82\xf3\xe7\x6e\x22\x85\x20\xbe\xb4\xc0\xd8\x99\xe8\
\xcc\xd9\xb1\xef\x3c\xfc\x1f\x23\x5d\x7b\x2b\x3e\xd8\x11\x74\xee\
\xf9\x4a\x5f\xa7\x2c\xe4\x74\xa4\x10\x08\x21\xb8\xbf\xdd\x4f\x6b\
\x4b\x23\x52\x0a\xf2\xb9\x32\xd5\x8a\x09\x40\xf4\x76\x1a\x4d\x53\
\x38\x7f\x79\x86\xf7\x3e\xae\x64\x2d\xab\xba\x77\xf2\xed\x6f\x4f\
\x02\xd6\x2a\xb8\xff\xb5\x23\xa7\x7c\x2e\x75\xd7\x77\xf7\x3e\xa0\
\x08\xc3\x26\x2d\xd3\x5a\x05\x4b\x29\x6a\x6b\x50\x15\xc9\xa6\xce\
\x00\x20\x88\xde\x4a\x21\x85\xa0\x8a\xce\xf4\x27\x51\x7e\x37\x9d\
\x31\xcc\x4a\xf9\x89\xa9\x5f\x1f\x8c\xa8\x00\xfd\x07\x7f\x13\x12\
\x96\xb9\xfb\x5b\x4f\x74\x08\xb7\xe6\x91\x86\x59\xc1\xe3\xb3\x23\
\x85\xa4\x54\x30\x3e\xed\x72\x0d\x2e\x85\x20\xbd\x54\xa4\xb5\xad\
\x01\xd3\x2a\xe3\x75\x35\x62\x59\x76\x9a\x1b\x1c\xc8\x6a\x42\x35\
\xf4\x92\x02\x08\x05\x10\xb1\x8f\x8e\x2f\xb4\xf5\x3e\xf7\x85\xb9\
\x78\x6e\xf3\x9e\xcf\xb7\x8b\x7c\x29\xcb\xc8\xfb\x57\x38\x3f\x3d\
\x9d\x5a\x5c\xc9\x39\xfa\xba\x83\xc2\xa6\xa8\xa8\x52\x62\xb3\x49\
\xa6\xe6\xa2\xd6\xc8\xd1\x09\x7d\xfc\x72\x34\xe7\xd2\xb0\xd9\x9c\
\x0e\x31\x39\x3d\x53\xfd\x38\x59\xfc\x45\xd5\x28\x5d\x4d\x5c\x19\
\xbf\xab\xd4\xea\xac\xc4\x2e\x9c\x38\xd6\xb8\xe5\xa9\xed\x91\xf9\
\xec\xd6\x6b\xf1\x02\x4b\xe9\xd4\x47\x13\x63\x3f\xf8\x5e\xae\xf1\
\x21\xfe\x7a\x4b\xef\x7d\x71\xa0\x0b\x2c\x81\x40\x72\xec\xc2\x1d\
\x31\x3b\x33\xf1\xb3\x2b\xc7\x47\x0e\xc5\xbd\xbd\xbe\x4b\xf3\x8b\
\xdd\x97\xe6\x93\xe1\xe9\x3f\xbc\x71\x28\x71\x65\xfc\x06\x50\xad\
\x37\x50\x01\xdc\x40\x5b\xcf\xbe\x91\x1f\x3d\xf2\xea\xaf\xce\x03\
\x83\xc0\x33\xc0\xb3\xa1\xe1\xb0\x35\x77\x3d\x61\x45\x4e\x7f\x62\
\xcd\x5d\x4f\x58\xa1\xe1\xb0\x05\x3c\x0b\x3c\x0d\xec\x06\xfa\x80\
\xcf\x01\xad\x40\x03\x60\xab\x7f\x4d\x16\x50\x51\x34\x67\x69\xf6\
\xfd\x9f\xbc\x03\x9c\x00\x1c\x80\xbd\x96\x14\x00\xff\x46\xcf\xba\
\x01\xac\x29\xd3\x81\x3c\x50\x00\x4a\x40\x05\x30\xd5\x1a\x14\xa0\
\x52\x35\x8a\x45\xa0\x2a\x55\xbb\x6e\x73\x37\x39\x4b\x99\x98\x04\
\x64\x79\x65\xf1\xdd\x6f\x1c\x3a\xb9\xbf\x4e\x34\x0a\x99\x77\x80\
\x78\x5d\x50\x0d\x58\xae\x25\x31\x00\xf3\xdf\xf0\x11\x56\x82\x70\
\x21\x0b\x3b\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\x7f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x16\x00\x00\x00\x16\x08\x06\x00\x00\x00\xc4\xb4\x6c\x3b\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x11\x00\x00\
\x0b\x11\x01\x7f\x64\x5f\x91\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xd5\x09\x1a\x0b\x18\x25\x79\xd9\x51\x49\x00\x00\x04\x0c\x49\x44\
\x41\x54\x38\xcb\xa5\x95\x7f\x4c\x94\x75\x1c\xc7\x5f\xcf\x71\x02\
\x29\xe0\xa1\x96\x72\x84\x88\xcd\x69\x56\xc2\x09\xa3\x0d\xee\x16\
\x85\xb8\x16\x0a\xce\x65\x69\xb2\xa6\x16\xcb\x6a\xfd\xe1\x58\x0b\
\x19\xee\xdb\x93\xc4\x51\xff\xd0\x06\xe4\xc8\xd6\x9a\x63\x2d\xcc\
\x2d\x73\xda\x26\xb1\x69\x2d\xd8\x94\x1f\x67\x3b\xa9\x43\x25\x76\
\xbb\x38\x90\x1e\x91\x2e\x40\xb8\x3b\x9e\xa7\x3f\xe4\xb9\x1d\xe7\
\x41\x6d\x7d\xb7\x67\x7b\x3e\xcf\xbe\xcf\x6b\xef\xef\xe7\xf3\x7e\
\x3f\x8f\x44\xc4\x12\x42\x08\xe0\xfd\xb9\xf2\x3d\xa0\x57\x96\xe5\
\x76\xfe\xef\x12\x42\x68\xa3\xb7\x47\xb4\x99\x99\x69\xcd\xe5\xfa\
\x4d\x13\x42\x68\x15\x15\x15\xc5\x80\xf4\x2f\xaf\x4a\x80\x41\x2f\
\x0c\x51\x36\x98\x9b\x3e\x3d\x81\xa6\xaa\xa4\x9a\xcd\xec\xdb\xb7\
\x97\x84\x84\x84\xf3\x80\xb6\x10\x3c\xa7\xbc\x25\xe5\x83\x13\xe7\
\xd4\x9c\xf2\x96\x53\x91\x60\x49\x08\x91\x05\x20\xcb\xf2\x30\x60\
\xb6\xd7\x7d\xc4\xcc\xf4\x3d\x32\x32\x32\x22\x55\x45\x42\xb3\x00\
\xef\xb1\xc3\x3b\x31\x2f\x67\x4d\x4e\x79\x4b\xbb\x0e\x96\xaa\xab\
\xab\x65\xc0\x21\x84\x38\xae\xc3\xad\x56\x2b\x92\xf4\xc0\x81\xd4\
\x28\x82\x1d\x5d\x9f\xed\xa7\xa7\xa7\x87\x83\xf9\xcb\x0a\x2d\xe9\
\xf1\x85\x00\x46\x60\x49\x4c\x4c\xcc\xb1\x37\x0f\x97\x63\x88\x89\
\x7d\x17\xa8\xde\xb2\x65\x0b\x9b\x36\x6d\x44\xd3\x54\x24\x49\xc2\
\x3f\x33\x79\x45\x08\x91\x25\xcb\xf2\xb5\x28\x6a\x19\x18\x18\xc0\
\xe3\xf1\xf0\xe1\xf7\x93\x00\xe6\xd0\xd1\x9a\x9b\x9b\x9d\xa5\x25\
\x3b\x9f\x5c\xba\xf4\x21\x02\x7e\x3f\x9a\xa6\xa1\x69\x5a\x08\x90\
\x64\x4a\xa6\xb6\xd6\x0e\x60\x9e\x6b\x95\x0e\x75\x74\x36\xbd\x44\
\xde\xdb\xa7\xf5\xad\x96\xee\x93\x65\xd7\x42\x60\x21\x44\x4a\x7c\
\x7c\xfc\xaf\xcf\x6f\x2f\x34\x3d\xfe\xc4\x53\x48\x92\x14\x9a\x95\
\x6f\xfc\xee\x7d\xf8\x72\x13\xb5\xf6\x3a\x00\xf3\x05\xef\x86\xd5\
\x80\xe3\x6a\xf3\x2b\xc1\xdc\x37\xbe\x32\x02\x45\xdd\x27\xcb\xda\
\xa3\x0e\x43\x08\x91\x95\x9e\x9e\xfe\xb9\xdb\xed\xce\xd6\x9f\xe5\
\xe6\xe6\x06\xb6\x6d\x2b\x5c\xf2\xf7\x5f\xe3\x68\x9a\xc6\xb2\xc4\
\x24\x5a\x5b\x5b\x69\xea\x8c\xe5\xe7\x86\x17\xb1\xbe\x73\x66\x9e\
\xca\x70\xae\xb4\x88\x9f\x53\x00\x1b\xd0\x5a\x55\x75\x94\xb1\x3b\
\x0a\xd7\x6f\xfe\xc1\xc7\x67\xdd\x7c\x7b\xbc\x98\x82\x23\x67\x17\
\x84\x02\x9a\xf4\x1f\x02\x93\x05\x38\xd6\x5a\x8a\x39\x71\xe1\x06\
\xdf\x54\xe5\x53\xf5\x45\x37\x25\x9b\x03\xb8\x5c\xfd\x96\xb0\x81\
\xea\xfd\x8b\xee\xcb\x70\xd3\x2b\x43\x1e\xaf\x5e\xaf\x4a\x4d\xe3\
\x72\xfd\x2e\x0e\xd4\xb5\xb1\xcb\x12\xcf\xcb\x25\x45\xd4\xd4\xd4\
\xe8\x03\x1d\x05\x66\x17\x35\x7c\x38\x74\xcf\xf6\x8d\x64\x6e\x5e\
\x8f\xa2\x8c\xf1\x49\xcb\x15\x56\xa5\xa6\x51\xb1\x23\x05\xb3\xc9\
\xc8\xfa\x8c\x75\xa4\x3e\x9a\x86\x2c\xcb\x00\x96\x48\x2b\x1a\x16\
\x83\xbe\x5a\x6a\x45\x9d\x50\x48\x34\x99\x28\xb4\x24\xa1\x0c\x79\
\x70\xf5\xfe\x84\xcd\x66\x23\x3e\x2e\x96\x3b\x7f\xde\xe6\xfe\x37\
\x0b\x87\x9e\xdc\xa8\x60\x1d\xfa\xfa\xee\x4c\x0e\xed\x7e\x86\xb6\
\x8b\x3f\xf0\x63\x4f\x3f\x18\x8c\xcc\x06\x83\x21\xaf\xca\xb2\xcc\
\xca\x87\x57\x23\x49\xd2\x82\x70\x43\x24\xf4\xe8\x6b\x56\xf6\x17\
\xe7\x71\xfa\xbb\x76\xba\xfa\x06\xc9\xb3\xd9\x70\xb9\x6e\x11\xf0\
\x4f\x03\xe0\xf3\xf9\x14\xbf\xdf\x9f\x2d\xcb\x32\x2b\x56\x3d\x02\
\x10\x82\xc7\xc5\xc5\x85\xe0\x06\x3d\x45\xca\x90\xc7\x2b\xbf\xf5\
\x2c\x7b\xb6\x67\x73\xea\xcc\x45\x06\x3d\x5e\x8a\x8a\x4b\x70\xb9\
\x6e\xe1\x75\xdf\xa4\xa3\x5f\xc5\x18\x1c\xdd\x50\x5f\x5f\x7f\xd7\
\x6e\xb7\xdf\x18\x19\x19\xb1\xea\xca\x67\xfc\x7e\x0e\x1e\x3c\x40\
\x65\x65\x25\x80\x23\x04\x56\x86\x3c\x8e\x23\x65\x4f\x53\x5a\x90\
\x49\xeb\xb9\xcb\x0c\x7a\xbc\xd8\x9e\x2b\x9c\x07\x0d\x8c\xf6\x5a\
\xa6\x14\x77\x12\xb0\x02\x58\xe9\x74\x3a\x03\x4e\xa7\xb3\x58\x96\
\x65\xcc\xa9\x69\x34\x36\x36\xe2\xf3\xf9\x42\x9d\x30\xea\x77\xe9\
\xeb\xd2\x98\x55\x35\x46\x87\x06\x1f\x80\x4e\xfc\xde\x56\xa0\xb8\
\x2e\x05\xe6\x84\x24\x03\x86\xce\xce\xce\x20\x30\x3c\x31\x31\xb1\
\xb7\xa1\xa1\xe1\xeb\xb1\xb1\x31\x54\x55\xbd\x1a\x08\x04\xec\xa1\
\xe4\xad\x7b\xc1\xae\xed\xc8\x37\x63\xc9\xb6\xf0\x58\x8a\x89\x5f\
\xfa\x6e\x71\xb5\xab\x8b\x8e\x7e\x95\x29\xf7\xa5\xfc\xd1\xbe\x36\
\x25\x62\x26\x6a\x98\x55\x67\x81\x19\x60\x12\x98\x06\x82\xc0\xac\
\x11\x60\xc5\x9a\x94\xad\xe7\x3b\xbc\xbd\x7a\x83\xee\x4d\x4d\xd0\
\xd1\xaf\x12\x67\x98\x2c\x50\x93\x53\xc7\x01\x25\xcc\xf7\x3a\x50\
\xff\xa3\xa8\x73\xf0\xa0\x0e\x9d\x17\xe9\xad\x87\xbe\xb4\x8c\x8d\
\x0c\xf7\xea\x75\x42\x62\xec\xda\xeb\xad\x15\x23\xe1\x31\x0d\x83\
\x4a\x51\x62\xac\xce\x5d\x1a\xa0\xfd\x03\x8c\xf6\xde\xf1\x62\xa0\
\xea\x2a\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\x4f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x16\x00\x00\x00\x16\x08\x06\x00\x00\x00\xc4\xb4\x6c\x3b\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\
\x67\x9b\xee\x3c\x1a\x00\x00\x04\xe1\x49\x44\x41\x54\x38\x8d\x8d\
\x94\x6b\x6c\x54\x45\x18\x86\x9f\x99\x33\xe7\xb6\x6d\x81\x82\x6d\
\x97\x3b\xbb\x6c\x2d\x97\x2d\x77\xe4\x2a\x01\x09\x24\xb2\x20\x26\
\x1a\x12\x12\x43\x54\x22\x28\x28\x26\x46\xa4\x5e\x12\x7f\x18\x65\
\x49\x48\xd0\x88\xc8\x1f\xf8\x01\x51\x13\x13\x23\x21\x6c\x00\xc1\
\x72\x11\xa4\x10\x25\x94\x96\x52\xe8\x65\x5b\x0a\x94\x36\x45\x28\
\xb4\xdd\xb6\xdb\x73\x8e\x3f\x76\xdb\xa0\x80\x38\xc9\x9b\xf9\x33\
\xf3\xcc\x97\x79\xbf\xf7\x13\x9e\xe7\xf1\xa4\x15\x88\x44\x0d\x20\
\x0c\xdc\x03\x6a\xe2\xb1\xa2\x27\x5e\x12\x8f\x03\x07\x22\xd1\x19\
\xc0\x3a\x60\x36\x30\x12\xe8\x04\x64\x5a\x97\x81\x13\xc0\xd6\x78\
\xac\xe8\xe6\xff\x02\xa7\xab\xdb\x0c\xac\x0d\x87\xfc\x19\x73\x26\
\x8e\x24\x1c\xf2\x53\x98\xef\x07\xa0\xbc\xba\x89\xb2\xea\x46\x4e\
\x5f\xa8\x4b\x96\x57\x37\x25\x80\xb7\xe2\xb1\xa2\xef\xff\x13\x1c\
\x88\x44\xc7\x02\x31\xdb\x54\xb9\x9b\x5e\x5b\x90\xb1\x78\x56\x3e\
\xc5\xe7\x6a\x28\xaf\x6d\xa2\xa2\xb6\x19\x29\x24\x85\x21\x3f\xe1\
\x50\x1e\xf3\xa7\x06\x38\xf4\xfb\x55\xa2\xbb\x8f\xb5\x25\xba\x92\
\x27\x80\x15\xf1\x58\x51\xc7\x43\xe0\x40\x24\x6a\x02\x95\xe3\x82\
\x79\x23\xbf\xf9\xf0\x45\x71\xae\xbc\x81\xad\x7b\x7f\xa3\x3b\xe9\
\xa0\x94\x86\xa6\x49\xa4\x94\x08\x21\x10\x02\x32\x6d\x83\x77\x57\
\xce\x66\x7c\x30\x87\x77\xa2\xfb\xbb\xca\xaa\x1b\xf7\xc6\x63\x45\
\x6f\x3c\x0a\xbc\xcd\xb6\xf4\x35\x07\xb7\xbf\xee\xdb\xba\xe7\x24\
\x27\xce\xd7\x31\x7d\xfc\x70\x5e\x5a\x38\x9e\x09\x21\x3f\x39\xd9\
\x19\x24\x7b\x1c\x1a\x6f\xb7\x71\xe8\x4c\x15\xfb\x4f\x56\xe2\xb8\
\x1e\x0b\xa6\x06\x58\xb5\x64\x12\xcf\xaf\xdf\xd5\x9e\xe8\x4a\x2e\
\x8b\xc7\x8a\x8e\xf5\x81\xd3\x46\x15\x7f\xb6\x6e\xb1\x4f\xd7\x35\
\x3e\xdf\x75\x1c\xd3\xd0\xd9\xb8\xea\x59\x56\x2c\x2a\x7c\xa4\xb9\
\x3f\xfe\x7a\x89\x3d\xb1\x52\x1c\xd7\xe5\xbd\x95\x33\x69\xb9\xdb\
\xce\xc7\x5f\x1f\x6c\xf6\x3c\x46\xc7\x63\x45\x6d\x32\x7d\xee\xed\
\x49\x05\x83\x7d\xcf\x3d\x13\x62\xdb\x77\xa7\x31\x4d\x1d\xd3\xd4\
\xf1\x80\xc3\x25\xd5\x7c\xb2\xb3\x98\xb5\x5b\x0e\xf0\xc3\x91\x4b\
\x7d\xe0\x85\xd3\x02\xd8\xb6\x89\x6d\x19\xec\x3e\x70\x81\xc5\x33\
\xf3\x99\x3c\x66\x68\x06\x10\x21\xdd\x3a\x00\xb3\xe7\x4e\x0e\x70\
\xa4\xa4\x8a\x1e\xc7\xc3\xd0\x15\xa6\xa1\xb3\xf7\x50\x19\x3b\x7f\
\xfe\x93\xab\xd7\xef\xd0\x96\x70\x38\x5d\x76\xbd\x0f\xac\x34\x89\
\xcf\x36\xb0\x2d\x03\xc7\x13\x9c\xab\xb8\xc9\xbc\x69\xa3\x7d\xc0\
\x4c\x00\x19\x88\x44\x2d\x60\xc4\x84\xfc\xc1\x54\xc4\x9b\xd1\x75\
\x0d\x43\x57\x18\x86\x42\x57\x1a\xa6\xa1\x63\x99\x06\x3e\xdb\x60\
\xf5\xd2\x49\x7d\xe0\x8b\xb5\x2d\xd8\x96\x89\x65\x19\x58\xa6\x41\
\x5d\xe3\x5d\x0a\x43\x7e\x21\xa5\x98\xd7\x5b\xf1\x44\x20\x51\x98\
\xef\xa7\xb2\xae\x05\xa5\x34\x74\x5d\xf5\xc9\x30\x52\xdf\xb2\x76\
\xf9\x64\x26\x3f\x9d\x07\xc0\xb5\xa6\x7b\xc4\x4a\xea\xb0\x2d\x03\
\xdb\x34\x30\x4d\x9d\xfa\xa6\xfb\x8c\x1d\x95\x83\xe7\x79\x63\x01\
\x14\xd0\x06\x48\x21\x04\x52\x93\x28\x4d\x43\x69\x12\x3d\xfd\x80\
\xa1\x2b\x5e\x98\x1b\x62\xda\x98\x54\x40\xea\x9b\xee\xb3\xf7\xe8\
\x55\x74\x43\x47\x48\x89\x94\x02\x04\xe8\x4a\xa5\x5a\x11\xd1\xd9\
\x5b\xf1\x65\x40\x55\xd4\x36\x13\x0e\xe6\x21\xa5\x44\xd3\x52\x52\
\x9a\x86\x52\x1a\x13\x43\xb9\x7d\x5f\x50\x5c\x7a\x13\x5d\xd7\xb1\
\x2d\x03\xcb\xd4\x31\x0c\x1d\x5d\x29\x46\x0f\xcb\xe6\xea\xb5\x16\
\x84\x14\x17\x00\x54\x3c\x56\xe4\x06\x22\xd1\xca\x8a\x9a\x5b\x13\
\xc7\x05\x73\x29\xb9\x74\x03\x29\x53\x61\xd0\xa4\x40\xd3\x24\x3b\
\xf6\x5d\xc4\x67\x1b\x58\x96\x91\x82\xda\x06\x3d\x8e\x8b\x00\x1c\
\xc7\x25\xa9\x24\xc1\x21\xfd\xa9\xa8\x6e\x74\x1d\xc7\x3d\xf9\x60\
\x57\x9c\x38\x55\x5a\xef\xcc\x9f\x1a\xc0\x32\x54\x2a\x39\x90\x4e\
\x99\xe0\x95\x45\x63\x58\x13\x09\xf3\xea\xa2\x02\xb2\xb3\x2c\x7c\
\xb6\x89\xae\x34\xa4\x26\x11\x52\x60\x1b\x8a\xc2\x60\x0e\xc5\x67\
\xab\x3a\x81\x92\x07\xc1\x5b\x4a\x4a\xeb\x13\x67\xcb\x1b\x58\xbd\
\x7c\x0a\x00\x1e\xe0\xe1\xe1\x79\x1e\xb9\xd9\x3e\xfc\x03\x7d\xe4\
\x0e\xb0\xc1\x73\x49\x74\x76\xd3\xd3\xe3\xe0\xba\x2e\x9e\xeb\xb1\
\x74\x56\x90\x53\x17\xea\xbd\xf3\x95\x37\x2a\x80\xc3\x7d\xe0\xf4\
\xe8\x5b\xff\xe9\xb7\xbf\x74\xcc\x0c\x0f\x63\x4a\xc1\x60\x5c\xd7\
\xc5\x75\x3c\x5c\xd7\x4d\xbd\x92\x5e\x5d\xdd\x3d\x24\x12\x5d\x74\
\x76\x25\xe9\x4e\xf6\x50\x30\xac\x3f\x01\x7f\x16\x1f\x7d\x15\xeb\
\x70\x1c\x77\x45\xef\xac\xfe\xc7\x74\x0b\x2e\x8d\x1e\x99\x51\x38\
\x72\xde\xb6\xf7\x97\x19\xe7\xaf\xdc\xe2\xa7\xe3\x95\x08\x4d\xc3\
\x32\x7b\x8d\x52\x28\xa5\x21\x85\x40\x69\x82\xc5\x53\x87\x13\xf0\
\xf7\x63\xfd\xe6\x7d\x1d\x7f\x5c\x6a\xd8\x58\x7b\x60\xd3\x8e\x5e\
\xd6\xbf\xc7\x66\xa6\x10\x62\x7b\x56\x86\xf9\xf2\x17\x1b\x96\x64\
\x4c\x1f\x37\x94\xf3\x57\x6e\x71\xad\xf9\x3e\x37\x5a\xda\xd1\x75\
\x8d\x51\xfe\x01\x8c\xc8\xcb\xa2\x30\x30\x88\x33\x65\x0d\xde\xa6\
\x2f\x0f\x74\xb4\x77\x74\x7f\xf0\x20\xf4\x21\x30\x29\xc3\x32\xfd\
\xb3\xde\x8c\xd8\xd9\x23\x76\xcc\x99\x12\xf4\xcd\x9f\x16\x32\xc3\
\xa3\xf3\x44\xfe\x88\x41\x00\x54\x37\xfc\x45\x79\xcd\x2d\xf7\xe8\
\x99\x2b\xc9\xb3\xa5\x35\xb5\xb7\xab\x8e\x6d\x68\xad\x3e\x76\x19\
\x68\xf5\x3c\xaf\xed\x91\x60\x21\x44\x36\x30\x10\x18\xa4\x67\xe4\
\x0c\xe9\x17\x98\xbb\xd0\xc8\xcc\x09\x9b\x59\x4f\x15\xa0\x32\x72\
\x04\x8e\xeb\x74\xb6\x36\x27\x5a\x1b\xab\xba\x5b\x1b\x2f\xb7\xd6\
\x1c\x3f\x0d\xde\x6d\xe0\x0e\x70\x1b\xb8\xe9\x79\x5e\xe2\x71\x15\
\x2b\xc0\x06\x7c\x40\x56\x5a\xfd\x80\xcc\xb4\xd9\x09\x52\x69\xbd\
\x9f\xde\xdb\xd3\xea\xf4\x1e\x80\xfd\x0d\xc2\x22\xe7\x20\xf7\x3e\
\x8c\x40\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x05\
\x00\x6f\xa6\x53\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x73\
\x00\x05\
\x00\x35\x9b\x52\
\x00\x32\
\x00\x32\x00\x78\x00\x32\x00\x32\
\x00\x04\
\x00\x06\x87\x73\
\x00\x61\
\x00\x70\x00\x70\x00\x73\
\x00\x0a\
\x0b\xeb\xbe\x83\
\x00\x63\
\x00\x61\x00\x74\x00\x65\x00\x67\x00\x6f\x00\x72\x00\x69\x00\x65\x00\x73\
\x00\x07\
\x07\xab\x06\x93\
\x00\x61\
\x00\x63\x00\x74\x00\x69\x00\x6f\x00\x6e\x00\x73\
\x00\x11\
\x01\xa6\xc4\x87\
\x00\x64\
\x00\x6f\x00\x63\x00\x75\x00\x6d\x00\x65\x00\x6e\x00\x74\x00\x2d\x00\x6f\x00\x70\x00\x65\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x12\
\x00\x03\x49\x87\
\x00\x73\
\x00\x79\x00\x73\x00\x74\x00\x65\x00\x6d\x00\x2d\x00\x6c\x00\x6f\x00\x67\x00\x2d\x00\x6f\x00\x75\x00\x74\x00\x2e\x00\x70\x00\x6e\
\x00\x67\
\x00\x10\
\x0c\xbc\x2e\x67\
\x00\x64\
\x00\x6f\x00\x63\x00\x75\x00\x6d\x00\x65\x00\x6e\x00\x74\x00\x2d\x00\x6e\x00\x65\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x0f\xe3\xd5\x67\
\x00\x64\
\x00\x6f\x00\x63\x00\x75\x00\x6d\x00\x65\x00\x6e\x00\x74\x00\x2d\x00\x73\x00\x61\x00\x76\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x0e\
\x0d\x8b\x39\xe7\
\x00\x65\
\x00\x64\x00\x69\x00\x74\x00\x2d\x00\x63\x00\x6c\x00\x65\x00\x61\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x14\
\x0b\xa9\xab\x27\
\x00\x64\
\x00\x6f\x00\x63\x00\x75\x00\x6d\x00\x65\x00\x6e\x00\x74\x00\x2d\x00\x73\x00\x61\x00\x76\x00\x65\x00\x2d\x00\x61\x00\x73\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
\x00\x17\
\x0d\x58\x3e\xe7\
\x00\x61\
\x00\x70\x00\x70\x00\x6c\x00\x69\x00\x63\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\x00\x73\x00\x2d\x00\x73\x00\x79\x00\x73\x00\x74\
\x00\x65\x00\x6d\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x16\
\x01\x70\xe1\x87\
\x00\x70\
\x00\x72\x00\x65\x00\x66\x00\x65\x00\x72\x00\x65\x00\x6e\x00\x63\x00\x65\x00\x73\x00\x2d\x00\x73\x00\x79\x00\x73\x00\x74\x00\x65\
\x00\x6d\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x10\
\x0f\xad\xca\x47\
\x00\x68\
\x00\x65\x00\x6c\x00\x70\x00\x2d\x00\x62\x00\x72\x00\x6f\x00\x77\x00\x73\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x10\x00\x02\x00\x00\x00\x03\x00\x00\x00\x04\
\x00\x00\x00\x20\x00\x02\x00\x00\x00\x01\x00\x00\x00\x0f\
\x00\x00\x00\x48\x00\x02\x00\x00\x00\x06\x00\x00\x00\x09\
\x00\x00\x00\x2e\x00\x02\x00\x00\x00\x02\x00\x00\x00\x07\
\x00\x00\x01\x80\x00\x00\x00\x00\x00\x01\x00\x00\x1e\x0f\
\x00\x00\x01\x4c\x00\x00\x00\x00\x00\x01\x00\x00\x18\x3b\
\x00\x00\x00\x84\x00\x00\x00\x00\x00\x01\x00\x00\x03\x9b\
\x00\x00\x00\x5c\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x1e\x00\x00\x00\x00\x00\x01\x00\x00\x13\xee\
\x00\x00\x00\xae\x00\x00\x00\x00\x00\x01\x00\x00\x07\xdb\
\x00\x00\x00\xfc\x00\x00\x00\x00\x00\x01\x00\x00\x0f\x15\
\x00\x00\x00\xd4\x00\x00\x00\x00\x00\x01\x00\x00\x0a\x93\
\x00\x00\x01\xb2\x00\x00\x00\x00\x00\x01\x00\x00\x22\x92\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 59.887139 | 129 | 0.725884 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45,276 | 0.992155 |
96bb265549d6f2b01a8d5a363f1cef448dfbda43
| 581 |
py
|
Python
|
xinyu/python/node/graphicNode/turtle/base_graphics.py
|
xzhuah/codingDimension
|
9b90b93a3a3b8afee28e3a2a571050ca3f86f066
|
[
"Apache-2.0"
] | 1 |
2020-11-06T20:39:11.000Z
|
2020-11-06T20:39:11.000Z
|
xinyu/python/node/graphicNode/turtle/base_graphics.py
|
xzhuah/codingDimension
|
9b90b93a3a3b8afee28e3a2a571050ca3f86f066
|
[
"Apache-2.0"
] | 1 |
2021-08-28T02:29:51.000Z
|
2021-08-28T02:29:51.000Z
|
xinyu/python/node/graphicNode/turtle/base_graphics.py
|
xzhuah/codingDimension
|
9b90b93a3a3b8afee28e3a2a571050ca3f86f066
|
[
"Apache-2.0"
] | null | null | null |
# Created by Xinyu Zhu on 2021/6/6, 21:08
from turtle import Turtle
import turtle
def draw_rectangle(turtle: Turtle, llx, lly, width, height):
turtle.up()
turtle.goto(llx, lly)
turtle.begin_fill()
turtle.down()
turtle.goto(llx + width, lly)
turtle.goto(llx + width, lly + height)
turtle.goto(llx, lly + height)
turtle.end_fill()
if __name__ == '__main__':
tur = Turtle()
wn = turtle.Screen()
wn.title("Turtle Demo")
wn.setworldcoordinates(0, 0, 500, 500)
tur.speed(0)
draw_rectangle(tur, 0, 0, 500, 500)
a = input()
| 22.346154 | 60 | 0.636833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.110155 |
96bc131385537becfa54518e6876cbcdcb1526f8
| 2,439 |
py
|
Python
|
deform_conv/cnn.py
|
lone17/deform-conv
|
3502cedbeae61c961d7e988382c55b9d45fd1873
|
[
"MIT"
] | null | null | null |
deform_conv/cnn.py
|
lone17/deform-conv
|
3502cedbeae61c961d7e988382c55b9d45fd1873
|
[
"MIT"
] | null | null | null |
deform_conv/cnn.py
|
lone17/deform-conv
|
3502cedbeae61c961d7e988382c55b9d45fd1873
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division
from keras.layers import *
from deform_conv.layers import ConvOffset2D
def get_cnn():
inputs = l = Input((None, None, 1), name='input')
# conv11
l = Conv2D(32, (3, 3), padding='same', name='conv11')(l)
l = Activation('relu', name='conv11_relu')(l)
l = BatchNormalization(name='conv11_bn')(l)
# conv12
l = Conv2D(64, (3, 3), padding='same', strides=(2, 2), name='conv12')(l)
l = Activation('relu', name='conv12_relu')(l)
l = BatchNormalization(name='conv12_bn')(l)
# conv21
l = Conv2D(128, (3, 3), padding='same', name='conv21')(l)
l = Activation('relu', name='conv21_relu')(l)
l = BatchNormalization(name='conv21_bn')(l)
# conv22
l = Conv2D(128, (3, 3), padding='same', strides=(2, 2), name='conv22')(l)
l = Activation('relu', name='conv22_relu')(l)
l = BatchNormalization(name='conv22_bn')(l)
# out
l = GlobalAvgPool2D(name='avg_pool')(l)
l = Dense(10, name='fc1')(l)
outputs = l = Activation('softmax', name='out')(l)
return inputs, outputs
def get_deform_cnn(trainable, channel_wise=True):
inputs = l = Input((None, None, 1), name='input')
# conv11
l = Conv2D(32, (3, 3), activation='relu', padding='same', name='conv11',
trainable=trainable)(l)
l = BatchNormalization(name='conv11_bn')(l)
# conv12
l_offset = ConvOffset2D(32, channel_wise=channel_wise, name='conv12_offset')(l)
l = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv12',
trainable=trainable)(l_offset)
l = MaxPooling2D((2, 2))(l)
l = BatchNormalization(name='conv12_bn')(l)
# conv21
l_offset = ConvOffset2D(64, channel_wise=channel_wise, name='conv21_offset')(l)
l = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv21',
trainable=trainable)(l_offset)
l = BatchNormalization(name='conv21_bn')(l)
# conv22
l_offset = ConvOffset2D(128, channel_wise=channel_wise, name='conv22_offset')(l)
l = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv22',
trainable=trainable)(l_offset)
l = MaxPooling2D((2, 2))(l)
l = BatchNormalization(name='conv22_bn')(l)
# out
l = GlobalAvgPool2D(name='avg_pool')(l)
outputs = Dense(10, activation='softmax', name='fc1',
trainable=trainable)(l)
return inputs, outputs
| 32.959459 | 84 | 0.622796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 486 | 0.199262 |
96bc9e10f5eba6df7448344bf718f39170c04f04
| 1,861 |
py
|
Python
|
perf/unit/ledger_rest.py
|
jancajthaml-openbank/e2e
|
a2ef84b6564022e95de76438fc795e2ef927aa2b
|
[
"Apache-2.0"
] | null | null | null |
perf/unit/ledger_rest.py
|
jancajthaml-openbank/e2e
|
a2ef84b6564022e95de76438fc795e2ef927aa2b
|
[
"Apache-2.0"
] | 30 |
2018-03-18T05:58:32.000Z
|
2022-01-19T23:21:31.000Z
|
perf/unit/ledger_rest.py
|
jancajthaml-openbank/e2e
|
a2ef84b6564022e95de76438fc795e2ef927aa2b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from unit.common import Unit
from helpers.eventually import eventually
from helpers.shell import execute
import string
import time
import os
class LedgerRest(Unit):
def __init__(self):
(code, result) = execute([
"systemctl", "start", 'ledger-rest'
], silent=True)
assert code == 0, str(result)
def __repr__(self):
return 'LedgerRest()'
def teardown(self):
@eventually(5)
def eventual_teardown():
(code, result) = execute([
'systemctl', 'stop', 'ledger-rest'
], silent=True)
assert code == 0, str(result)
eventual_teardown()
def restart(self) -> bool:
@eventually(2)
def eventual_restart():
(code, result) = execute([
"systemctl", "restart", 'ledger-rest'
], silent=True)
assert code == 0, str(result)
eventual_restart()
return self.is_healthy
def reconfigure(self, params) -> None:
d = dict()
if os.path.exists('/etc/ledger/conf.d/init.conf'):
with open('/etc/ledger/conf.d/init.conf', 'r') as f:
for line in f:
(key, val) = line.rstrip().split('=')
d[key] = val
for k, v in params.items():
key = 'LEDGER_{0}'.format(k)
if key in d:
d[key] = v
os.makedirs('/etc/ledger/conf.d', exist_ok=True)
with open('/etc/ledger/conf.d/init.conf', 'w') as f:
f.write('\n'.join("{!s}={!s}".format(key,val) for (key,val) in d.items()))
self.is_healthy
@property
def is_healthy(self) -> bool:
try:
@eventually(10)
def eventual_check():
(code, result) = execute([
"systemctl", "show", "-p", "SubState", "ledger-rest"
], silent=True)
assert "SubState=running" == str(result).strip(), str(result)
eventual_check()
except:
return False
return True
| 23.858974 | 80 | 0.587319 | 1,669 | 0.89683 | 0 | 0 | 718 | 0.385814 | 0 | 0 | 361 | 0.193982 |
96bcc512ded27d54238d89fca3c8655f2d09789e
| 1,431 |
py
|
Python
|
pythonneat/neat/Population.py
|
SananR/PythonNEAT
|
951615b89d8211a22e147bc03446bf597576a6fc
|
[
"MIT"
] | 2 |
2020-06-08T19:39:45.000Z
|
2022-01-20T18:21:38.000Z
|
pythonneat/neat/Population.py
|
SananR/PythonNEAT
|
951615b89d8211a22e147bc03446bf597576a6fc
|
[
"MIT"
] | null | null | null |
pythonneat/neat/Population.py
|
SananR/PythonNEAT
|
951615b89d8211a22e147bc03446bf597576a6fc
|
[
"MIT"
] | null | null | null |
from pythonneat.neat.Species import Species
import pythonneat.neat.Speciation as Speciation
import pythonneat.neat.utils.Parameters as Parameters
current_genomes = []
def add_genome(genome):
"""Adds genome to the species list based on its
compatability distance to already existing species
Inputs:
genome: The genome to add. type: Genome
"""
for specie in current_genomes:
first = specie.get_champion()
if Speciation.compatibility_distance(genome, first) < Parameters.COMPATABILITY_THRESHOLD:
specie.add_genome(genome)
return
s = Species()
s.add_genome(genome)
current_genomes.append(s)
return
def remove_genome(genome):
for specie in current_genomes:
if genome in specie.genomes:
specie.remove_genome(genome)
def cleanup_species():
for specie in current_genomes:
if specie.get_average_fitness() - specie.prev_fitness >= Parameters.SPECIES_STAGNATE_MIN_IMPROVEMENT:
specie.consec_stagnate = 0
specie.prev_fitness = specie.get_average_fitness()
else:
# Stagnate
specie.consec_stagnate += 1
if specie.consec_stagnate >= Parameters.SPECIES_STAGNATE_GEN_COUNT:
specie.reproduce = False
def population_size():
pop = 0
for specie in current_genomes:
for _ in specie.genomes:
pop += 1
return pop
| 28.62 | 109 | 0.678546 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 177 | 0.12369 |
96be495bd3261e63c1a53206e1ecae309a118594
| 387 |
py
|
Python
|
container_service_extension/pksclient/api/__init__.py
|
tschoergez/container-service-extension
|
e1fbaf7e9c242a416d3f580880c1051286847cfd
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
container_service_extension/pksclient/api/__init__.py
|
tschoergez/container-service-extension
|
e1fbaf7e9c242a416d3f580880c1051286847cfd
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
container_service_extension/pksclient/api/__init__.py
|
tschoergez/container-service-extension
|
e1fbaf7e9c242a416d3f580880c1051286847cfd
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from container_service_extension.pksclient.api.cluster_api import ClusterApi
from container_service_extension.pksclient.api.plans_api import PlansApi
from container_service_extension.pksclient.api.profile_api import ProfileApi
from container_service_extension.pksclient.api.users_api import UsersApi
| 38.7 | 76 | 0.881137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.113695 |
96beda9b3aae1f2d6cee27edea34723ea5136c59
| 1,733 |
py
|
Python
|
examples/applications/clustering/agglomerative.py
|
SahanJayasinghe/sentence-transformers
|
0ec07c6b2a996a5998129d2168ccafface49877a
|
[
"Apache-2.0"
] | 2 |
2021-08-24T13:28:33.000Z
|
2021-08-24T13:28:42.000Z
|
examples/applications/clustering/agglomerative.py
|
SahanJayasinghe/sentence-transformers
|
0ec07c6b2a996a5998129d2168ccafface49877a
|
[
"Apache-2.0"
] | null | null | null |
examples/applications/clustering/agglomerative.py
|
SahanJayasinghe/sentence-transformers
|
0ec07c6b2a996a5998129d2168ccafface49877a
|
[
"Apache-2.0"
] | null | null | null |
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import AgglomerativeClustering
import numpy as np
embedder = SentenceTransformer('paraphrase-MiniLM-L6-v2')
# Corpus with example sentences
corpus = ['A man is eating food.',
'A man is eating a piece of bread.',
'A man is eating pasta.',
'The girl is carrying a baby.',
'The baby is carried by the woman',
'A man is riding a horse.',
'A man is riding a white horse on an enclosed ground.',
'A monkey is playing drums.',
'Someone in a gorilla costume is playing a set of drums.',
'A cheetah is running behind its prey.',
'A cheetah chases prey on across a field.'
]
corpus_embeddings = embedder.encode(corpus)
# Normalize the embeddings to unit length
corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform kmean clustering
clustering_model = AgglomerativeClustering(n_clusters=None, distance_threshold=1.5) #, affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in clustered_sentences.items():
print("Cluster ", i+1)
print(cluster)
print("")
| 37.673913 | 148 | 0.725332 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 771 | 0.444893 |
96bee57e7d78263abb2c0dde497d36d9e3def948
| 1,364 |
py
|
Python
|
generated-libraries/python/netapp/vserver/vserver_aggr_info.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2 |
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/netapp/vserver/vserver_aggr_info.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/netapp/vserver/vserver_aggr_info.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
from netapp.netapp_object import NetAppObject
class VserverAggrInfo(NetAppObject):
"""
Assigned aggregate name and available size.
"""
_aggr_availsize = None
@property
def aggr_availsize(self):
"""
Assigned aggregate available size.
Attributes: non-creatable, non-modifiable
"""
return self._aggr_availsize
@aggr_availsize.setter
def aggr_availsize(self, val):
if val != None:
self.validate('aggr_availsize', val)
self._aggr_availsize = val
_aggr_name = None
@property
def aggr_name(self):
"""
Assigned aggregate name.
Attributes: non-creatable, modifiable
"""
return self._aggr_name
@aggr_name.setter
def aggr_name(self, val):
if val != None:
self.validate('aggr_name', val)
self._aggr_name = val
@staticmethod
def get_api_name():
return "vserver-aggr-info"
@staticmethod
def get_desired_attrs():
return [
'aggr-availsize',
'aggr-name',
]
def describe_properties(self):
return {
'aggr_availsize': { 'class': int, 'is_list': False, 'required': 'optional' },
'aggr_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| 26.230769 | 91 | 0.577713 | 1,316 | 0.964809 | 0 | 0 | 868 | 0.636364 | 0 | 0 | 433 | 0.317449 |
96bfcdd0287b23d40e6c42cd64034c753cbc7300
| 133 |
py
|
Python
|
sample4.py
|
vswamy/python
|
51835bf7cfec894059a41f2929509026fe611119
|
[
"Apache-2.0"
] | null | null | null |
sample4.py
|
vswamy/python
|
51835bf7cfec894059a41f2929509026fe611119
|
[
"Apache-2.0"
] | null | null | null |
sample4.py
|
vswamy/python
|
51835bf7cfec894059a41f2929509026fe611119
|
[
"Apache-2.0"
] | null | null | null |
#Learning Python
import os
list = [1,2,3]
##using list as a queue
print(list)
list.pop(0)
print(list)
list.append(5)
print(list)
| 9.5 | 23 | 0.691729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.293233 |
7367174cab478d0699640581faa954e034871a9e
| 3,199 |
py
|
Python
|
python/hongong/ch05/05_2.py
|
gangserver/py_test
|
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
|
[
"Apache-2.0"
] | null | null | null |
python/hongong/ch05/05_2.py
|
gangserver/py_test
|
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
|
[
"Apache-2.0"
] | null | null | null |
python/hongong/ch05/05_2.py
|
gangserver/py_test
|
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
wine = pd.read_csv('https://bit.ly/wine-date')
# wine = pd.read_csv('../data/wine.csv')
print(wine.head())
data = wine[['alcohol', 'sugar', 'pH']].to_numpy()
target = wine['class'].to_numpy()
from sklearn.model_selection import train_test_split
train_input, test_input, train_target, test_target = train_test_split(data, target, test_size=0.2, random_state=42)
print(train_input.shape, test_input.shape)
sub_input, val_input, sub_target, val_target = train_test_split(train_input, train_target, test_size=0.2, random_state=42)
print(sub_input.shape, val_input.shape)
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(random_state=42)
dt.fit(sub_input, sub_target)
print(dt.score(sub_input, sub_target))
print(dt.score(val_input, val_target))
from sklearn.model_selection import cross_validate
scores = cross_validate(dt, train_input, train_target)
print(scores)
import numpy as np
print(np.mean(scores['test_score']))
from sklearn.model_selection import StratifiedKFold
scores = cross_validate(dt, train_input, train_target, cv=StratifiedKFold())
print(np.mean(scores['test_score']))
splitter = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
scores = cross_validate(dt, train_input, train_target, cv=splitter)
print(np.mean(scores['test_score']))
from sklearn.model_selection import GridSearchCV
params = {'min_impurity_decrease': [0.0001, 0.0002, 0.0003, 0.0004, 0.0005]}
gs = GridSearchCV(DecisionTreeClassifier(random_state=42), params, n_jobs=1)
gs.fit(train_input, train_target)
dt = gs.best_estimator_
print(dt.score(train_input, train_target))
print(gs.best_params_)
print(gs.cv_results_['mean_test_score'])
best_index = np.argmax(gs.cv_results_['mean_test_score'])
print(gs.cv_results_['params'][best_index])
params = {'min_impurity_decrease': np.arange(0.0001, 0.001, 0.0001),
'max_depth': range(5, 20, 1),
'min_samples_split': range(2, 100, 10)
}
gs = GridSearchCV(DecisionTreeClassifier(random_state=42), params, n_jobs=-1)
gs.fit(train_input, train_target)
print(gs.best_params_)
print(np.max(gs.cv_results_['mean_test_score']))
from scipy.stats import uniform, randint
rgen = randint(0, 10)
print(rgen.rvs(10))
print(np.unique(rgen.rvs(1000), return_counts=True))
ugen = uniform(0, 1)
print(ugen.rvs(10))
params = {'min_impurity_decrease': uniform(0.0001, 0.001),
'max_depth': randint(20, 50),
'min_samples_split': randint(2, 25),
'min_samples_leaf': randint(1, 25)
}
from sklearn.model_selection import RandomizedSearchCV
gs = RandomizedSearchCV(DecisionTreeClassifier(random_state=42), params, n_iter=100, n_jobs=-1, random_state=42)
gs.fit(train_input, train_target)
print(gs.best_params_)
print(np.max(gs.cv_results_['mean_test_score']))
dt = gs.best_estimator_
print(dt.score(test_input, test_target))
# Exam
gs = RandomizedSearchCV(DecisionTreeClassifier(splitter='random', random_state=42), params, n_iter=100, n_jobs=-1, random_state=42)
gs.fit(train_input, train_target)
print(gs.best_params_)
print(np.max(gs.cv_results_['mean_test_score']))
dt = gs.best_estimator_
print(dt.score(test_input, test_target))
| 28.81982 | 131 | 0.758987 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 383 | 0.119725 |
73675fa4c6cc91d3e8f132bfb335856070974495
| 1,016 |
py
|
Python
|
junk/dot_classifier_tf/potential.py
|
jpzwolak/quantum-ml
|
aebe3496516be3bc0fc4392aaf7805ab5faf98dc
|
[
"MIT"
] | 4 |
2018-06-27T17:20:19.000Z
|
2021-05-30T06:21:01.000Z
|
junk/dot_classifier_tf/potential.py
|
jpzwolak/quantum-ml
|
aebe3496516be3bc0fc4392aaf7805ab5faf98dc
|
[
"MIT"
] | null | null | null |
junk/dot_classifier_tf/potential.py
|
jpzwolak/quantum-ml
|
aebe3496516be3bc0fc4392aaf7805ab5faf98dc
|
[
"MIT"
] | 4 |
2018-11-30T20:34:17.000Z
|
2022-02-16T23:06:37.000Z
|
# Module to build a potential landscape
import numpy as np
def gauss(x,mean=0.0,stddev=0.02,peak=1.0):
'''
Input:
x : x-coordintes
Output:
f(x) where f is a Gaussian with the given mean, stddev and peak value
'''
stddev = 5*(x[1] - x[0])
return peak*np.exp(-(x-mean)**2/(2*stddev**2))
def init_ndot(x,n_dot):
'''
Input:
x : 1d grid for the dots
ndot : number of dots
Output:
y : cordinates of the potential grid with ndots
The potential barriers are modelled as gaussians
'''
# n dots imply n+1 barriers
bar_centers = x[0] + (x[-1] - x[0])*np.random.rand(n_dot+1)
bar_heights = np.random.rand(n_dot+1)
#bar_heights = 0.5*np.ones(n_dot+1)
N = len(x)
y = np.zeros(N)
# no need to optimize here really since the dot number is generally small, the calculation of the gauss function is already done in a vectorised manner
for j in range(n_dot+1):
y += gauss(x-bar_centers[j],peak=bar_heights[j])
return y
| 27.459459 | 155 | 0.629921 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 577 | 0.567913 |
73680345e2e353c1eaf1fb045f543678e6921793
| 878 |
py
|
Python
|
src/data/879.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
src/data/879.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
src/data/879.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
from sys import stdin
input = stdin.readline
from collections import deque
N, Q = map(int, input().split())
tree = [[] for _ in range(N + 1)]
level = [0] * (N + 1)
for _ in range(N - 1):
a, b = map(int, input().split())
tree[a].append(b)
tree[b].append(a)
visited = [False] * (N + 1)
def bfs(st):
global level
q = deque()
q.append([st, 0])
visited[st] = True
while q:
for _ in range(len(q)):
now, lvl = q.popleft()
for next in tree[now]:
if not visited[next]:
q.append([next, lvl + 1])
level[next] = lvl + 1
visited[next] = True
bfs(1)
def solve(a, b):
if abs(level[a] - level[b]) % 2 == 1:
return 'Road'
else:
return 'Town'
for _ in range(Q):
x, y = map(int, input().split())
print(solve(x, y))
| 19.954545 | 45 | 0.490888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.013667 |
736815ffba5524694e4bf07787408fa70f5b7ab8
| 1,614 |
py
|
Python
|
objectfactory/nested.py
|
devinaconley/py-object-factory
|
6c97821feea8c47f7ad909cedbe57938c92761aa
|
[
"MIT"
] | 4 |
2019-05-28T15:20:35.000Z
|
2022-03-18T20:53:57.000Z
|
objectfactory/nested.py
|
devinaconley/py-object-factory
|
6c97821feea8c47f7ad909cedbe57938c92761aa
|
[
"MIT"
] | 3 |
2019-03-17T00:27:28.000Z
|
2019-12-04T16:07:11.000Z
|
objectfactory/nested.py
|
devinaconley/py-object-factory
|
6c97821feea8c47f7ad909cedbe57938c92761aa
|
[
"MIT"
] | null | null | null |
"""
nested field
implements marshmallow field for objectfactory nested objects
"""
# lib
import marshmallow
# src
from .serializable import Serializable
from .factory import create
class NestedFactoryField( marshmallow.fields.Field ):
def __init__( self, field_type=None, **kwargs ):
super().__init__( **kwargs )
self._field_type = field_type
def _serialize( self, value, attr, obj, **kwargs ):
"""
dump serializable object within the interface of marshmallow field
:param value:
:param attr:
:param obj:
:param kwargs:
:return:
"""
if not isinstance( value, Serializable ):
return {}
return value.serialize( **obj._serialize_kwargs )
def _deserialize( self, value, attr, data, **kwargs ):
"""
create serializable object with factory through interface of marshmallow field
:param value:
:param attr:
:param data:
:param kwargs:
:return:
"""
if value is None:
return
if '_type' in value:
obj = create( value )
if self._field_type and not isinstance( obj, self._field_type ):
raise ValueError(
'{} is not an instance of type: {}'.format(
type( obj ).__name__, self._field_type.__name__ )
)
elif self._field_type:
obj = self._field_type()
obj.deserialize( value )
else:
raise ValueError( 'Cannot infer type information' )
return obj
| 26.032258 | 86 | 0.57311 | 1,427 | 0.884139 | 0 | 0 | 0 | 0 | 0 | 0 | 567 | 0.351301 |
7368bcef3513f858130a78b597825be9b12f3327
| 1,709 |
py
|
Python
|
spacy/cli/__init__.py
|
g4brielvs/spaCy
|
cca8651fc8133172ebaa9d9fc438ed1fbf34fb33
|
[
"BSD-3-Clause",
"MIT"
] | 2 |
2017-06-23T20:54:31.000Z
|
2022-01-06T08:11:49.000Z
|
spacy/cli/__init__.py
|
g4brielvs/spaCy
|
cca8651fc8133172ebaa9d9fc438ed1fbf34fb33
|
[
"BSD-3-Clause",
"MIT"
] | 1 |
2021-03-01T19:01:37.000Z
|
2021-03-01T19:01:37.000Z
|
spacy/cli/__init__.py
|
g4brielvs/spaCy
|
cca8651fc8133172ebaa9d9fc438ed1fbf34fb33
|
[
"BSD-3-Clause",
"MIT"
] | 1 |
2021-06-21T07:17:48.000Z
|
2021-06-21T07:17:48.000Z
|
from wasabi import msg
from ._util import app, setup_cli # noqa: F401
# These are the actual functions, NOT the wrapped CLI commands. The CLI commands
# are registered automatically and won't have to be imported here.
from .download import download # noqa: F401
from .info import info # noqa: F401
from .package import package # noqa: F401
from .profile import profile # noqa: F401
from .train import train_cli # noqa: F401
from .pretrain import pretrain # noqa: F401
from .debug_data import debug_data # noqa: F401
from .debug_config import debug_config # noqa: F401
from .debug_model import debug_model # noqa: F401
from .evaluate import evaluate # noqa: F401
from .convert import convert # noqa: F401
from .init_pipeline import init_pipeline_cli # noqa: F401
from .init_config import init_config, fill_config # noqa: F401
from .validate import validate # noqa: F401
from .project.clone import project_clone # noqa: F401
from .project.assets import project_assets # noqa: F401
from .project.run import project_run # noqa: F401
from .project.dvc import project_update_dvc # noqa: F401
from .project.push import project_push # noqa: F401
from .project.pull import project_pull # noqa: F401
from .project.document import project_document # noqa: F401
@app.command("link", no_args_is_help=True, deprecated=True, hidden=True)
def link(*args, **kwargs):
"""As of spaCy v3.0, symlinks like "en" are deprecated. You can load trained
pipeline packages using their full names or from a directory path."""
msg.warn(
"As of spaCy v3.0, model symlinks are deprecated. You can load trained "
"pipeline packages using their full names or from a directory path."
)
| 44.973684 | 80 | 0.752487 | 0 | 0 | 0 | 0 | 432 | 0.252779 | 0 | 0 | 706 | 0.413107 |
73697b6fc24a0e06b73e768d5f059255782d3e66
| 490 |
py
|
Python
|
code/example code/introducing-python-master/1st_edition/art/panda1.py
|
ChouHsuan-Cheng/Learning_Python_Base
|
857873466463e6b20f24b1e8946c837c318f2536
|
[
"CNRI-Python"
] | null | null | null |
code/example code/introducing-python-master/1st_edition/art/panda1.py
|
ChouHsuan-Cheng/Learning_Python_Base
|
857873466463e6b20f24b1e8946c837c318f2536
|
[
"CNRI-Python"
] | null | null | null |
code/example code/introducing-python-master/1st_edition/art/panda1.py
|
ChouHsuan-Cheng/Learning_Python_Base
|
857873466463e6b20f24b1e8946c837c318f2536
|
[
"CNRI-Python"
] | null | null | null |
from direct.showbase.ShowBase import ShowBase
class MyApp(ShowBase):
def __init__(self):
ShowBase.__init__(self)
# Load the environment model.
self.environ = self.loader.loadModel("models/environment")
# Reparent the model to render.
self.environ.reparentTo(self.render)
# Apply scale and position transforms on the model.
self.environ.setScale(0.25, 0.25, 0.25)
self.environ.setPos(-8, 42, 0)
app = MyApp()
app.run()
| 27.222222 | 66 | 0.657143 | 417 | 0.85102 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.267347 |
736a64ec89b619ffc454f1a8592cdcb1f2263f5a
| 16,104 |
py
|
Python
|
btclib/ssa.py
|
giubby84/btclib
|
0dd7e4e8ca43451a03b577fd7ec95715a1a21711
|
[
"MIT"
] | null | null | null |
btclib/ssa.py
|
giubby84/btclib
|
0dd7e4e8ca43451a03b577fd7ec95715a1a21711
|
[
"MIT"
] | null | null | null |
btclib/ssa.py
|
giubby84/btclib
|
0dd7e4e8ca43451a03b577fd7ec95715a1a21711
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"""Elliptic Curve Schnorr Signature Algorithm (ECSSA).
This implementation is according to BIP340-Schnorr:
https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki
Differently from ECDSA, the BIP340-Schnorr scheme supports
messages of size hsize only.
It also uses as public key the x-coordinate (field element)
of the curve point associated to the private key 0 < q < n.
Therefore, for sepcp256k1 the public key size is 32 bytes.
Arguably, the knowledge of q as the discrete logarithm of Q
also implies the knowledge of n-q as discrete logarithm of -Q.
As such, {q, n-q} can be considered a single private key and
{Q, -Q} the associated public key characterized by the shared x_Q.
Also, BIP340 advocates its own SHA256 modification as hash function:
TaggedHash(tag, x) = SHA256(SHA256(tag)||SHA256(tag)||x)
The rationale is to make BIP340 signatures invalid for anything else
but Bitcoin and vice versa.
TaggedHash is used for both the challenge (with tag 'BIPSchnorr')
and the deterministic nonce (with tag 'BIPSchnorrDerive').
To allow for secure batch verification of multiple signatures,
BIP340-Schnorr uses a challenge that prevents public key recovery
from signature: c = TaggedHash('BIPSchnorr', x_k||x_Q||msg).
A custom deterministic algorithm for the ephemeral key (nonce)
is used for signing, instead of the RFC6979 standard:
k = TaggedHash('BIPSchnorrDerive', q||msg)
Finally, BIP340-Schnorr adopts a robust [r][s] custom serialization
format, instead of the loosely specified ASN.1 DER standard.
The signature size is p-size*n-size, where p-size is the field element
(curve point coordinate) byte size and n-size is the scalar
(curve point multiplication coefficient) byte size.
For sepcp256k1 the resulting signature size is 64 bytes.
"""
import secrets
from hashlib import sha256
from typing import List, Optional, Sequence, Tuple, Union
from .alias import (
HashF,
Integer,
JacPoint,
Octets,
Point,
SSASig,
SSASigTuple,
String,
)
from .bip32 import BIP32Key
from .curve import Curve, secp256k1
from .curvegroup import _double_mult, _mult, _multi_mult
from .hashes import reduce_to_hlen
from .numbertheory import mod_inv
from .to_prvkey import PrvKey, int_from_prvkey
from .to_pubkey import point_from_pubkey
from .utils import bytes_from_octets, hex_string, int_from_bits
# TODO relax the p_ThreeModFour requirement
# hex-string or bytes representation of an int
# 33 or 65 bytes or hex-string
# BIP32Key as dict or String
# tuple Point
BIP340PubKey = Union[Integer, Octets, BIP32Key, Point]
def point_from_bip340pubkey(x_Q: BIP340PubKey, ec: Curve = secp256k1) -> Point:
"""Return a verified-as-valid BIP340 public key as Point tuple.
It supports:
- BIP32 extended keys (bytes, string, or BIP32KeyData)
- SEC Octets (bytes or hex-string, with 02, 03, or 04 prefix)
- BIP340 Octets (bytes or hex-string, p-size Point x-coordinate)
- native tuple
"""
# BIP 340 key as integer
if isinstance(x_Q, int):
y_Q = ec.y_quadratic_residue(x_Q, True)
return x_Q, y_Q
else:
# (tuple) Point, (dict or str) BIP32Key, or 33/65 bytes
try:
x_Q = point_from_pubkey(x_Q, ec)[0]
y_Q = ec.y_quadratic_residue(x_Q, True)
return x_Q, y_Q
except Exception:
pass
# BIP 340 key as bytes or hex-string
if isinstance(x_Q, (str, bytes)):
Q = bytes_from_octets(x_Q, ec.psize)
x_Q = int.from_bytes(Q, "big")
y_Q = ec.y_quadratic_residue(x_Q, True)
return x_Q, y_Q
raise ValueError("not a BIP340 public key")
def _validate_sig(r: int, s: int, ec: Curve) -> None:
# BIP340 is defined for curves whose field prime p = 3 % 4
ec.require_p_ThreeModFour()
# Fail if r is not a field element, i.e. not a valid x-coordinate
ec.y(r)
# Fail if s is not [0, n-1].
if not 0 <= s < ec.n:
err_msg = "scalar s not in 0..n-1: "
err_msg += f"'{hex_string(s)}'" if s > 0xFFFFFFFF else f"{s}"
raise ValueError(err_msg)
def deserialize(sig: SSASig, ec: Curve = secp256k1) -> SSASigTuple:
"""Return the verified components of the provided BIP340 signature.
The BIP340 signature can be represented as (r, s) tuple
or as binary [r][s] compact representation.
"""
if isinstance(sig, tuple):
r, s = sig
else:
if isinstance(sig, str):
# hex-string of the serialized signature
sig2 = bytes.fromhex(sig)
else:
sig2 = bytes_from_octets(sig, ec.psize + ec.nsize)
r = int.from_bytes(sig2[: ec.psize], byteorder="big")
s = int.from_bytes(sig2[ec.nsize :], byteorder="big")
_validate_sig(r, s, ec)
return r, s
def serialize(x_K: int, s: int, ec: Curve = secp256k1) -> bytes:
"Return the BIP340 signature as [r][s] compact representation."
_validate_sig(x_K, s, ec)
return x_K.to_bytes(ec.psize, "big") + s.to_bytes(ec.nsize, "big")
def gen_keys(prvkey: PrvKey = None, ec: Curve = secp256k1) -> Tuple[int, int]:
"Return a BIP340 private/public (int, int) key-pair."
# BIP340 is defined for curves whose field prime p = 3 % 4
ec.require_p_ThreeModFour()
if prvkey is None:
q = 1 + secrets.randbelow(ec.n - 1)
else:
q = int_from_prvkey(prvkey, ec)
QJ = _mult(q, ec.GJ, ec)
x_Q = ec._x_aff_from_jac(QJ)
if not ec.has_square_y(QJ):
q = ec.n - q
return q, x_Q
# TODO move to hashes
# This implementation can be sped up by storing the midstate after hashing
# tag_hash instead of rehashing it all the time.
def _tagged_hash(tag: str, m: bytes, hf: HashF) -> bytes:
t = tag.encode()
h1 = hf()
h1.update(t)
tag_hash = h1.digest()
h2 = hf()
h2.update(tag_hash + tag_hash + m)
return h2.digest()
def __det_nonce(m: bytes, q: int, ec: Curve, hf: HashF) -> Tuple[int, int]:
# assume the random oracle model for the hash function,
# i.e. hash values can be considered uniformly random
# Note that in general, taking a uniformly random integer
# modulo the curve order n would produce a biased result.
# However, if the order n is sufficiently close to 2^hlen,
# then the bias is not observable:
# e.g. for secp256k1 and sha256 1-n/2^256 it is about 1.27*2^-128
# the unbiased implementation is provided here,
# which works also for very-low-cardinality test curves
t = q.to_bytes(ec.nsize, "big") + m
while True:
t = _tagged_hash("BIPSchnorrDerive", t, hf)
# The following lines would introduce a bias
# k = int.from_bytes(t, 'big') % ec.n
# k = int_from_bits(t, ec.nlen) % ec.n
k = int_from_bits(t, ec.nlen) # candidate k
if 0 < k < ec.n: # acceptable value for k
return gen_keys(k, ec) # successful candidate
def _det_nonce(
m: Octets, prvkey: PrvKey, ec: Curve = secp256k1, hf: HashF = sha256
) -> Tuple[int, int]:
"""Return a BIP340 deterministic ephemeral key (nonce)."""
# The message m: a hlen array
hlen = hf().digest_size
m = bytes_from_octets(m, hlen)
q, _ = gen_keys(prvkey, ec)
return __det_nonce(m, q, ec, hf)
def det_nonce(
msg: String, prvkey: PrvKey, ec: Curve = secp256k1, hf: HashF = sha256
) -> Tuple[int, int]:
"""Return a BIP340 deterministic ephemeral key (nonce)."""
m = reduce_to_hlen(msg, hf)
return _det_nonce(m, prvkey, ec, hf)
def __challenge(m: bytes, x_Q: int, r: int, ec: Curve, hf: HashF) -> int:
# note that only x_Q is needed
# if Q is Jacobian y_Q calculation can be avoided
t = r.to_bytes(ec.psize, "big")
t += x_Q.to_bytes(ec.psize, "big")
# m size must have been already checked to be equal to hsize
t += m
t = _tagged_hash("BIPSchnorr", t, hf)
# if c == 0 then private key is removed from the equations,
# so the signature is valid for any private/public key pair
# if c == 0:
# raise RuntimeError("invalid zero challenge")
return int_from_bits(t, ec.nlen) % ec.n
def _challenge(
m: Octets, xQ: BIP340PubKey, r: int, ec: Curve = secp256k1, hf: HashF = sha256
) -> int:
# The message m: a hlen array
hlen = hf().digest_size
m = bytes_from_octets(m, hlen)
x_Q, _ = point_from_bip340pubkey(xQ, ec)
return __challenge(m, x_Q, r, ec, hf)
def challenge(
msg: String, xQ: BIP340PubKey, r: int, ec: Curve = secp256k1, hf: HashF = sha256
) -> int:
m = reduce_to_hlen(msg, hf)
return _challenge(m, xQ, r, ec, hf)
def __sign(c: int, q: int, k: int, x_K: int, ec: Curve) -> SSASigTuple:
# s=0 is ok: in verification there is no inverse of s
s = (k + c * q) % ec.n
return x_K, s
def _sign(
m: Octets,
prvkey: PrvKey,
k: Optional[PrvKey] = None,
ec: Curve = secp256k1,
hf: HashF = sha256,
) -> SSASigTuple:
"""Sign message according to BIP340 signature algorithm."""
# BIP340 is defined for curves whose field prime p = 3 % 4
ec.require_p_ThreeModFour()
# The message m: a hlen array
hlen = hf().digest_size
m = bytes_from_octets(m, hlen)
q, x_Q = gen_keys(prvkey, ec)
# The nonce k: an integer in the range 1..n-1.
k, x_K = __det_nonce(m, q, ec, hf) if k is None else gen_keys(k, ec)
# Let c = int(hf(bytes(x_K) || bytes(x_Q) || m)) mod n.
c = __challenge(m, x_Q, x_K, ec, hf)
return __sign(c, q, k, x_K, ec)
def sign(
msg: String,
prvkey: PrvKey,
k: Optional[PrvKey] = None,
ec: Curve = secp256k1,
hf: HashF = sha256,
) -> SSASigTuple:
"""Sign message according to BIP340 signature algorithm.
The message msg is first processed by hf, yielding the value
m = hf(msg),
a sequence of bits of length *hlen*.
Normally, hf is chosen such that its output length *hlen* is
roughly equal to *nlen*, the bit-length of the group order *n*,
since the overall security of the signature scheme will depend on
the smallest of *hlen* and *nlen*; however, ECSSA
supports all combinations of *hlen* and *nlen*.
"""
m = reduce_to_hlen(msg, hf)
return _sign(m, prvkey, k, ec, hf)
def __assert_as_valid(c: int, QJ: JacPoint, r: int, s: int, ec: Curve) -> None:
# Private function for test/dev purposes
# It raises Errors, while verify should always return True or False
# BIP340 is defined for curves whose field prime p = 3 % 4
ec.require_p_ThreeModFour()
# Let K = sG - eQ.
# in Jacobian coordinates
KJ = _double_mult(ec.n - c, QJ, s, ec.GJ, ec)
# Fail if infinite(KJ).
# Fail if jacobi(y_K) ≠ 1.
if not ec.has_square_y(KJ):
raise RuntimeError("y_K is not a quadratic residue")
# Fail if x_K ≠ r
assert KJ[0] == KJ[2] * KJ[2] * r % ec.p, "signature verification failed"
def _assert_as_valid(
m: Octets, Q: BIP340PubKey, sig: SSASig, ec: Curve = secp256k1, hf: HashF = sha256
) -> None:
# Private function for test/dev purposes
# It raises Errors, while verify should always return True or False
r, s = deserialize(sig, ec)
x_Q, y_Q = point_from_bip340pubkey(Q, ec)
# Let c = int(hf(bytes(r) || bytes(Q) || m)) mod n.
c = _challenge(m, x_Q, r, ec, hf)
__assert_as_valid(c, (x_Q, y_Q, 1), r, s, ec)
def assert_as_valid(
msg: String, Q: BIP340PubKey, sig: SSASig, ec: Curve = secp256k1, hf: HashF = sha256
) -> None:
m = reduce_to_hlen(msg, hf)
_assert_as_valid(m, Q, sig, ec, hf)
def _verify(
m: Octets, Q: BIP340PubKey, sig: SSASig, ec: Curve = secp256k1, hf: HashF = sha256
) -> bool:
"""Verify the BIP340 signature of the provided message."""
# try/except wrapper for the Errors raised by _assert_as_valid
try:
_assert_as_valid(m, Q, sig, ec, hf)
except Exception:
return False
else:
return True
def verify(
msg: String, Q: BIP340PubKey, sig: SSASig, ec: Curve = secp256k1, hf: HashF = sha256
) -> bool:
"""ECDSA signature verification (SEC 1 v.2 section 4.1.4)."""
m = reduce_to_hlen(msg, hf)
return _verify(m, Q, sig, ec, hf)
def __recover_pubkey(c: int, r: int, s: int, ec: Curve) -> int:
# Private function provided for testing purposes only.
if c == 0:
raise ValueError("invalid zero challenge")
KJ = r, ec.y_quadratic_residue(r, True), 1
e1 = mod_inv(c, ec.n)
QJ = _double_mult(ec.n - e1, KJ, e1 * s, ec.GJ, ec)
assert QJ[2] != 0, "how did you do that?!?"
return ec._x_aff_from_jac(QJ)
# FIXME add crack_prvkey
def _crack_prvkey(
m1: Octets,
sig1: SSASig,
m2: Octets,
sig2: SSASig,
Q: BIP340PubKey,
ec: Curve = secp256k1,
hf: HashF = sha256,
) -> Tuple[int, int]:
m1 = bytes_from_octets(m1, hf().digest_size)
m2 = bytes_from_octets(m2, hf().digest_size)
r1, s1 = deserialize(sig1, ec)
r2, s2 = deserialize(sig2, ec)
if r1 != r2:
raise ValueError("not the same r in signatures")
if s1 == s2:
raise ValueError("identical signatures")
x_Q = point_from_bip340pubkey(Q, ec)[0]
c1 = _challenge(m1, x_Q, r1, ec, hf)
c2 = _challenge(m2, x_Q, r2, ec, hf)
q = (s1 - s2) * mod_inv(c2 - c1, ec.n) % ec.n
k = (s1 + c1 * q) % ec.n
return q, k
def _batch_verify(
ms: Sequence[Octets],
Qs: Sequence[BIP340PubKey],
sigs: Sequence[SSASig],
ec: Curve,
hf: HashF,
) -> None:
batch_size = len(Qs)
if len(ms) != batch_size:
errMsg = f"mismatch between number of pubkeys ({batch_size}) "
errMsg += f"and number of messages ({len(ms)})"
raise ValueError(errMsg)
if len(sigs) != batch_size:
errMsg = f"mismatch between number of pubkeys ({batch_size}) "
errMsg += f"and number of signatures ({len(sigs)})"
raise ValueError(errMsg)
if batch_size < 2:
return _assert_as_valid(ms[0], Qs[0], sigs[0], ec, hf)
# BIP340 is defined for curves whose field prime p = 3 % 4
ec.require_p_ThreeModFour()
t = 0
scalars: List[int] = list()
points: List[JacPoint] = list()
for i, (m, Q, sig) in enumerate(zip(ms, Qs, sigs)):
m = bytes_from_octets(m, hf().digest_size)
r, s = deserialize(sig, ec)
KJ = r, ec.y_quadratic_residue(r, True), 1
x_Q, y_Q = point_from_bip340pubkey(Q, ec)
QJ = x_Q, y_Q, 1
c = _challenge(m, x_Q, r, ec, hf)
# a in [1, n-1]
# deterministically generated using a CSPRNG seeded by a
# cryptographic hash (e.g., SHA256) of all inputs of the
# algorithm, or randomly generated independently for each
# run of the batch verification algorithm
a = 1 if i == 0 else 1 + secrets.randbelow(ec.n - 1)
scalars.append(a)
points.append(KJ)
scalars.append(a * c % ec.n)
points.append(QJ)
t += a * s
TJ = _mult(t, ec.GJ, ec)
RHSJ = _multi_mult(scalars, points, ec)
# return T == RHS, checked in Jacobian coordinates
RHSZ2 = RHSJ[2] * RHSJ[2]
TZ2 = TJ[2] * TJ[2]
precondition = TJ[0] * RHSZ2 % ec.p == RHSJ[0] * TZ2 % ec.p
if not precondition:
raise ValueError("signature verification precondition failed")
valid_sig = TJ[1] * RHSZ2 * RHSJ[2] % ec.p == RHSJ[1] * TZ2 * TJ[2] % ec.p
assert valid_sig, "signature verification failed"
def batch_verify(
m: Sequence[Octets],
Q: Sequence[BIP340PubKey],
sig: Sequence[SSASig],
ec: Curve = secp256k1,
hf: HashF = sha256,
) -> bool:
"""Batch verification of BIP340 signatures."""
# try/except wrapper for the Errors raised by _batch_verify
try:
_batch_verify(m, Q, sig, ec, hf)
except Exception:
return False
else:
return True
| 30.442344 | 88 | 0.648162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,060 | 0.438292 |
736b9802fb2c5a179b409bf71bdd9ff72225db52
| 998 |
py
|
Python
|
13. REST API using OpenAPI, Flask & Connexions/source_code/test-api/src/test_api/core/pets.py
|
Edmartt/articles
|
93d62086ff141f5646193afb868973e94f33f1e6
|
[
"MIT"
] | 31 |
2020-03-01T20:27:03.000Z
|
2022-02-15T14:53:09.000Z
|
13. REST API using OpenAPI, Flask & Connexions/source_code/test-api/src/test_api/core/pets.py
|
hmajid2301/articles
|
27f38cc6c2dd470d879b30d54d1e804a7d76caab
|
[
"MIT"
] | 24 |
2020-04-04T12:18:25.000Z
|
2022-03-29T08:41:57.000Z
|
13. REST API using OpenAPI, Flask & Connexions/source_code/test-api/src/test_api/core/pets.py
|
Edmartt/articles
|
93d62086ff141f5646193afb868973e94f33f1e6
|
[
"MIT"
] | 52 |
2020-02-29T04:01:10.000Z
|
2022-03-11T07:54:16.000Z
|
import json
def get_all_pets():
pets = read_from_file()
pets_in_store = []
for k, v in pets.items():
current_pet = {"id": k, **v}
pets_in_store.append(current_pet)
return pets
def remove_pet(id):
pets = read_from_file()
del pets[id]
write_to_file(pets)
def update_pet(id, pet):
pets = read_from_file()
ids = pets.keys()
pets[id] = {"name": pet.name, "breed": pet.breed, "price": pet.price}
write_to_file(pets)
def add_pet(pet):
pets = read_from_file()
ids = pets.keys()
new_id = int(ids[-1]) + 1
pets[new_id] = {"name": pet.name, "breed": pet.breed, "price": pet.price}
write_to_file(pets)
def get_pet(id):
pets = read_from_file()
pet = pets[id]
pet["id"] = id
return pet
def write_to_file(content):
with open("./pets.json", "w") as pets:
pets.write(json.dumps(content))
def read_from_file():
with open("./pets.json", "r") as pets:
return json.loads(pets.read())
| 19.96 | 77 | 0.603206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.08016 |
736d59ff49e38e22e2651e066d945a62407f38ec
| 73 |
py
|
Python
|
CodeWars/8 Kyu/Price of Mangoes.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/8 Kyu/Price of Mangoes.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/8 Kyu/Price of Mangoes.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
def mango(quantity, price):
return (quantity - quantity // 3) * price
| 36.5 | 45 | 0.671233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
736eb235587fea9084624307afb075d1bfa93603
| 5,582 |
py
|
Python
|
car-number-plate.project/car number plate.py
|
SumanthKumarS/mrucode-car-numberplate-detection-
|
46f759a5dec01ee551080db68ca250b064a25a01
|
[
"Apache-2.0"
] | null | null | null |
car-number-plate.project/car number plate.py
|
SumanthKumarS/mrucode-car-numberplate-detection-
|
46f759a5dec01ee551080db68ca250b064a25a01
|
[
"Apache-2.0"
] | null | null | null |
car-number-plate.project/car number plate.py
|
SumanthKumarS/mrucode-car-numberplate-detection-
|
46f759a5dec01ee551080db68ca250b064a25a01
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib.pyplot as plt
import cv2
import imutils
import pytesseract as pt
from tkinter import *
from tkinter import messagebox
# ploting the images
def plot_img(img1, img2, title1="", title2=""):
fig = plt.figure(figsize=[5, 5])
# axis 1
ax1 = fig.add_subplot(121)
ax1.imshow(img1, cmap="gray")
ax1.set(xticks=[], yticks=[], title=title1)
# axis 2
ax2 = fig.add_subplot(122)
ax2.imshow(img2, cmap="gray")
ax2.set(xticks=[], yticks=[], title=title2)
# read the image using numpy
print("\n1.car-1\n2.car-2\n3.car-3")
a = int(input("Enter the choice of car : "))
if a == 1:
path = "./image/a.jpg"
elif a == 2:
path = "./image/b.jpg"
else:
path = "./image/c.jpg"
image = cv2.imread(path)
# resizing the image
image = imutils.resize(image, width=500)
cv2.imshow("original image", image)
# delaying the next image till this image gets closed
cv2.waitKey(8000) #delaying till 5 sec
cv2.destroyAllWindows()
plot_img(image, image, title1="original1", title2="original1")
# image color to gray
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
plot_img(image, gray, title1="original1", title2="gray")
cv2.imshow('gray image', gray)
cv2.waitKey(8000)
cv2.destroyAllWindows()
# Noise removal with iterative bilateral filters(which removes the noise while filtering the edges)
blur = cv2.bilateralFilter(gray, 11, 90, 90)
plot_img(gray, blur, title1="gray", title2="Blur")
cv2.imshow("blurred image:", blur)
cv2.waitKey(8000)
cv2.destroyAllWindows()
# blurring the edges of grayscale image
edges = cv2.Canny(blur, 30, 200)
plot_img(blur, edges, title1="Blur", title2="Edges")
cv2.imshow("canny image:", edges)
cv2.waitKey(8000)
cv2.destroyAllWindows()
# Finding the contours based edges
cnts, new = cv2.findContours(edges.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# coping the image as secondary
image_copy = image.copy()
# Drawing all the contours edges of the original image
_ = cv2.drawContours(image_copy, cnts, -1, (255, 0, 255), 2)
plot_img(edges, image_copy, title1="Edges", title2="Contours")
cv2.imshow("contours image:", image_copy)
cv2.waitKey(8000)
cv2.destroyAllWindows()
print("number of iteration of draw counter has passed: ", len(cnts))
# sort the contours keeping the minimum area as 30
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:30]
image_reduce_cnts = image.copy()
_ = cv2.drawContours(image_reduce_cnts, cnts, -1, (255, 0, 255), 2)
plot_img(image_copy, image_reduce_cnts , title1="Contours", title2="Reduced")
cv2.imshow("reduced image:" , image_reduce_cnts)
cv2.waitKey(8000)
cv2.destroyAllWindows()
print("number of iteration passed by reducing the edges : ", len(cnts))
plate = None
for c in cnts:
perimeter = cv2.arcLength(c, True)
edges_count = cv2.approxPolyDP(c, 0.02 * perimeter , True)
if len(edges_count) == 4 :
x, y, w, h = cv2.boundingRect(c)
plate = image[y:y + h, x:x + w]
break
cv2.imwrite("plate.png", plate)
plot_img(plate, plate, title1="plate", title2="plate")
cv2.imshow("Number Plate Image : ", plate)
cv2.waitKey(8000)
cv2.destroyAllWindows()
pt.pytesseract.tesseract_cmd = r'C:\Users\admin\AppData\Local\Tesseract.exe'
no_plate = pt.image_to_string(plate, lang='eng')
print("the number plate of car is: ", no_plate)
def convert():
c_entry = input_entry.get()
if c_entry == 'HR26DK8337':
string_display = "Name : harish\nAddress : ministori visual tech in bangalore in vijayanagar\nPhone no : 9582645123"
label2 = Label(root)
label2["text"] = string_display
label2.grid(row=1 , column=1)
cv2.imshow("original image", image)
messagebox.showinfo("Car number plate Detector", "Successfully Number plate has been analysed : "+no_plate)
if c_entry == 'KLOLCC 5995':
string_display = "Name : chandran\nAddress : manthon niyali megalaya-552326\nPhone no : 9529876123"
label2 = Label(root)
label2["text"] = string_display
label2.grid(row=1 , column=1)
cv2.imshow("original image", image)
messagebox.showinfo("Car number plate Detector", "Successfully Number plate has been analysed : "+no_plate)
if c_entry == 'DZI7 YXR':
string_display = "Name : vijaya\nAddress : kadoor village nprayya nagar haydrabad\nPhone no : 92954611233"
label2 = Label(root)
label2["text"] = string_display
label2.grid(row=1 , column=1)
cv2.imshow("original image", image)
messagebox.showinfo("Car number plate Detector", "Successfully Number plate has been analysed : "+no_plate)
# creating Tk window
root = Tk()
# setting geometry of tk window
root.geometry('500x350+100+200')
#title of project
root.title('Car Number Plate Detector - (owner file address)')
# Back ground colour
root.config(bg="dark orange")
# Lay out widgets
root.grid_columnconfigure(1, weight=1)
root.grid_rowconfigure(1, weight=1)
inputNumber = StringVar()
var = StringVar()
input_label = Label(root, text="car plate number", font=("times new roman", 20, "bold"), bg="white", fg="green", background="#09A3BA", foreground="#FFF").place(x=150,y=40)
input_entry = Entry(root, textvariable=inputNumber, font=("times new roman", 15), bg="lightgray")
input_entry.grid(row=1, columnspan=2)
result_button = Button(root, text="Details", command=convert, font=("times new roman", 20, "bold"), bg="cyan")
result_button.grid(row=3, column=1)
root.mainloop()
| 34.45679 | 172 | 0.682551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,966 | 0.352204 |
736ef7d551671fb41b699b2055b5a873b3f9d021
| 13,229 |
py
|
Python
|
IBMWatson_Examples/WatsonNLU.py
|
sptennak/TextAnalytics
|
dde30337dc4d769ce7fb31b6f3021721bcd0b056
|
[
"Apache-2.0"
] | 4 |
2018-07-11T06:58:53.000Z
|
2020-09-06T13:17:54.000Z
|
IBMWatson_Examples/WatsonNLU.py
|
sptennak/TextAnalytics
|
dde30337dc4d769ce7fb31b6f3021721bcd0b056
|
[
"Apache-2.0"
] | null | null | null |
IBMWatson_Examples/WatsonNLU.py
|
sptennak/TextAnalytics
|
dde30337dc4d769ce7fb31b6f3021721bcd0b056
|
[
"Apache-2.0"
] | 1 |
2020-09-06T13:18:00.000Z
|
2020-09-06T13:18:00.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 18 22:15:35 2018
@author: Sumudu Tennakoon
References:
[1] https://www.ibm.com/watson/developercloud/natural-language-understanding/api/v1/
"""
from watson_developer_cloud import NaturalLanguageUnderstandingV1, WatsonException, WatsonApiException
from watson_developer_cloud.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions, RelationsOptions
import pandas as pd
import numpy as np
from timeit import default_timer as timer
import multiprocessing
import sys
###############################################################################
def IAM_Auth(APIKey, Version='2018-03-16'):
ServiceAuthentication = NaturalLanguageUnderstandingV1(
version= Version,
iam_api_key= APIKey
)
ServiceAuthentication.set_url('https://gateway-fra.watsonplatform.net/natural-language-understanding/api')
#To prevent IBM from accessing user input and Watson responses... https://www.ibm.com/watson/developercloud/conversation/api/v1/python.html?python#data-collection
ServiceAuthentication.set_default_headers({'x-watson-learning-opt-out': "true"})
return ServiceAuthentication
def Basic_Auth(UserName, Password, Version='2018-03-16'):
ServiceAuthentication = NaturalLanguageUnderstandingV1(
version= Version,
username= UserName,
password= Password
)
ServiceAuthentication.set_url('https://gateway-fra.watsonplatform.net/natural-language-understanding/api')
#To prevent IBM from accessing user input and Watson responses... https://www.ibm.com/watson/developercloud/conversation/api/v1/python.html?python#data-collection
ServiceAuthentication.set_default_headers({'x-watson-learning-opt-out': "true"})
return ServiceAuthentication
###############################################################################
def TextNLU(ServiceAuthentication, TextID, Text, ModelID=None, Emotion=False, Sentiment=False, Mentions =False, EntityLimit=50, TextLimit=50000, ReturnText=True):
Notes = ''
try:
Response = ServiceAuthentication.analyze(
text=Text,
features=Features(
relations=RelationsOptions(
model = ModelID,
),
entities=EntitiesOptions(
emotion=Emotion,
sentiment=Sentiment,
mentions=Mentions,
model = ModelID,
limit=EntityLimit
),
),
limit_text_characters = TextLimit, #https://console.bluemix.net/docs/services/natural-language-understanding/usage-limits.html#usage-limits
return_analyzed_text=ReturnText
)
Notes='RECIEVED'
except:
EXP = sys.exc_info()
Notes = str(EXP[0])+'['+''.join(EXP[1].args)+']'
Notes = 'NLU:'+Notes
# Process Response Header
WatsonResponseHeader = pd.DataFrame({'TextID':[TextID]})
try:
WatsonResponseHeader['language'] = Response['language']
WatsonResponseHeader['text_characters'] = Response['usage']['text_characters'] #Number of characters processed
WatsonResponseHeader['text_units'] = Response['usage']['text_units'] #Number of characters processed
WatsonResponseHeader['features'] = Response['usage']['features'] #Number of features used, such as entities, sentiment, etc.
WatsonResponseHeader['entities'] = len(Response['entities'])
WatsonResponseHeader['analyzed_text'] = Response['analyzed_text']
except:
EXP = sys.exc_info()
Notes= Notes+ '\tHEADER:' + str(EXP[0])+'['+''.join(EXP[1].args)+']'
# Process Response Details
try:
if len(Response['entities']) != 0:
WatsonResponseDetail = pd.DataFrame(Response['entities'])
WatsonResponseDetail.insert(0, 'TextID', TextID)
if 'sentiment' in WatsonResponseDetail.columns:
Split= WatsonResponseDetail.sentiment.apply(pd.Series)
WatsonResponseDetail['sentiment_'+Split.columns]= Split
WatsonResponseDetail.drop('sentiment', axis=1, inplace=True)
else:
raise Exception('NO ENTITIES FOUND')
except:
EXP = sys.exc_info()
Notes= Notes+ '\tDETAIL:' + str(EXP[0])+'['+''.join(EXP[1].args)+']'
WatsonResponseDetail = pd.DataFrame()
WatsonResponseHeader['Notes'] = Notes
return WatsonResponseHeader, WatsonResponseDetail
###############################################################################
# GUI
###############################################################################
import tkinter as tk #(https://wiki.python.org/moin/TkInter)
from tkinter import filedialog
from tkinter import scrolledtext
import configparser #(https://docs.python.org/3.4/library/configparser.html)
import traceback
class ApplicationWindow(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.pack()
self.UserName = tk.StringVar()
self.Password = tk.StringVar()
self.APIKey = tk.StringVar()
self.Version = tk.StringVar()
self.ModelID = tk.StringVar()
self.ConfigFile = tk.StringVar()
self.InputTextFile = tk.StringVar()
self.Input = tk.StringVar()
self.CreateWidgets()
def CreateWidgets(self):
#Menu
MenuBar = tk.Menu(self.master)
self.master.config(menu=MenuBar)
FileMenu = tk.Menu(MenuBar)
MenuBar.add_cascade(label='File', menu=FileMenu)
FileMenu.add_command(label='Load Config', command=None)
FileMenu.add_command(label='Save Config', command=self.SaveConfig)
FileMenu.add_command(label='Save Config As', command=self.SaveConfigAs)
FileMenu.add_command(label='Close', command=root.destroy)
HelpMenu = tk.Menu(MenuBar)
MenuBar.add_cascade(label='Help', menu=HelpMenu)
HelpMenu.add_command(label='About', command=None)
#Field
self.Btn_InputTextFile = tk.Button(self, text='Input Text File', fg='blue', command=self.OpenInputTextFile)
self.Ent_InputTextFile = tk.Entry(self, textvariable=self.InputTextFile)
self.Btn_ConfigFile = tk.Button(self, text='Config File', fg='blue', command=self.OpenConfigFile)
self.Ent_ConfigFile = tk.Entry(self, textvariable=self.ConfigFile)
self.Lbl_UserName = tk.Label(self, text='User Name')
self.Ent_UserName = tk.Entry(self, textvariable=self.UserName)
self.Lbl_Password = tk.Label(self, text='Password')
self.Ent_Password = tk.Entry(self, textvariable=self.Password)
self.Lbl_APIKey = tk.Label(self, text='APIKey')
self.Ent_APIKey = tk.Entry(self, textvariable=self.APIKey)
self.Lbl_Version = tk.Label(self, text='Version')
self.Ent_Version = tk.Entry(self, textvariable=self.Version)
self.Lbl_ModelID = tk.Label(self, text='Model ID')
self.Ent_ModelID = tk.Entry(self, textvariable=self.ModelID)
# Input Text
self.Txt_Input= scrolledtext.ScrolledText(self, height=15)
# Output Textbox
self.Txt_Output= scrolledtext.ScrolledText(self, height=15)
# Buttons
self.Btn_Start = tk.Button(self, text='START', fg='green', command=self.Start)
self.Btn_Close = tk.Button(self, text='CLOSE WINDOW', fg='red', command=root.destroy)
#######################################################################
# Pack Wigdgets
self.Btn_InputTextFile.grid(row=0,column=0, padx=10)
self.Ent_InputTextFile.grid(row=0,column=1, padx=10)
self.Btn_ConfigFile.grid(row=1,column=0, padx=10)
self.Ent_ConfigFile.grid(row=1,column=1, padx=10)
self.Lbl_Version.grid(row=2,column=0, padx=10)
self.Ent_Version.grid(row=2,column=1, padx=10)
self.Lbl_UserName.grid(row=3,column=0, padx=10)
self.Ent_UserName.grid(row=3,column=1, padx=10)
self.Lbl_Password.grid(row=4,column=0, padx=10)
self.Ent_Password.grid(row=4,column=1, padx=10)
self.Lbl_APIKey.grid(row=5,column=0, padx=10)
self.Ent_APIKey.grid(row=5,column=1, padx=10)
self.Lbl_ModelID.grid(row=6,column=0)
self.Ent_ModelID.grid(row=6,column=1, padx=10)
self.Btn_Start.grid(row=7,column=0, columnspan=2)
self.Btn_Close.grid(row=8,column=0, columnspan=2, pady=10)
self.Txt_Input.grid(row=0,column=2, rowspan=6, columnspan=2, padx=10, pady=10)
self.Txt_Input.insert(tk.END, 'Hello World')
self.Txt_Output.grid(row=6,column=2, rowspan=3, columnspan=2, padx=10, pady=10)
self.Txt_Output.insert(tk.END, '>')
#######################################################################
def Start(self):
try:
Version = self.Version.set('2018-03-16')
TextID = 'GUI'
Text = self.Txt_Input.get(1.0,tk.END)
Version = self.Version.get()
ModelID = self.ModelID.get()
Emotion = True
Sentiment = True
UserName = self.UserName.get()
Password = self.Password.get()
APIKey = self.APIKey.get()
ServiceAuthentication = Basic_Auth(UserName, Password, Version)
WatsonResponseHeader, WatsonResponseDetail = TextNLU(ServiceAuthentication, TextID, Text, ModelID=None)#, Emotion=False, Sentiment=False, Mentions =False, =50, TextLimit=50000, ReturnText=True)
print('Application Started')
Text = '> Version:{}\n UserName:{} \n Password:{}\n APIKey:{}\n ModelID:{}\n\n'.format(self.Version.get(), self.UserName.get(), self.Password.get(), self.APIKey.get(), self.ModelID.get())
Text = Text + ' Text: {}\n\n'.format(Text)
self.Txt_Output.insert(tk.END, Text)
except:
print(traceback.print_exc())
def OpenInputTextFile(self):
try:
FileName = filedialog.askopenfilename(title = 'Select Input Text File',filetypes = (('Text Files','*.txt'), ('All files','*.*')))
if FileName!='':
self.Txt_Input.delete(1.0, tk.END)
self.InputTextFile.set(FileName)
with open(FileName, 'r') as inputfile:
Text = inputfile.read()
self.Txt_Input.insert(tk.END , Text)
self.Input.set(Text)
else:
pass
print(FileName)
except:
print(traceback.print_exc())
def OpenConfigFile(self):
try:
config = configparser.ConfigParser()
FileName = filedialog.askopenfilename(title = 'Select Config File',filetypes = (('Config Files','*.cfg'),('Text Files','*.txt'), ('All files','*.*')))
if FileName!='':
self.ConfigFile.set(FileName)
config.read(FileName)
self.Version.set(config['DEFAULT']['version'])
self.UserName.set(config['DEFAULT']['username'])
self.Password.set(config['DEFAULT']['password'])
self.APIKey.set(config['DEFAULT']['apikey'])
self.ModelID.set(config['DEFAULT']['modelid'])
self.Txt_Output.insert(tk.END, 'Config File Loded: {}\n>'.format(FileName))
else:
pass
except:
print(traceback.print_exc())
def SaveConfig(self):
FileName = self.ConfigFile.get()
try:
if FileName != '':
config = configparser.ConfigParser()
config['DEFAULT'] = {'Version': self.Version.get(), 'UserName': self.UserName.get(), 'Password': self.Password.get(), 'APIKey': self.APIKey.get(), 'ModelID': self.ModelID.get()}
with open(FileName, 'w') as configfile:
config.write(configfile)
self.Txt_Output.insert(tk.END, 'Config File Saved: {}\n>'.format(FileName))
except:
print(traceback.print_exc())
def SaveConfigAs(self):
try:
File = filedialog.asksaveasfile(mode='w',defaultextension=".cfg")
FileName=File.name
if File is None:
pass
else:
config = configparser.ConfigParser()
config['DEFAULT'] = {'Version': self.Version.get(), 'UserName': self.UserName.get(), 'Password': self.Password.get(), 'APIKey': self.APIKey.get(), 'ModelID': self.ModelID.get()}
config.write(File)
File.close()
self.Txt_Output.insert(tk.END, 'Config File Saved As: {}\n>'.format(FileName))
except:
print(traceback.print_exc())
root = tk.Tk()
AppWindow = ApplicationWindow(master=root)
AppWindow.master.title('IBM Watson Natural Language Processing')
#AppWindow.master.maxsize(1024, 768)
AppWindow.mainloop()
| 45.150171 | 205 | 0.595434 | 8,142 | 0.615466 | 0 | 0 | 0 | 0 | 0 | 0 | 2,918 | 0.220576 |
7370be693eff3bd55bdb03b72b2306e42f8caced
| 6,813 |
py
|
Python
|
invenio_drafts_resources/resources/records/resource.py
|
fenekku/invenio-drafts-resources
|
fadae86fb9b36073cef13713fbc174ef771e49ec
|
[
"MIT"
] | null | null | null |
invenio_drafts_resources/resources/records/resource.py
|
fenekku/invenio-drafts-resources
|
fadae86fb9b36073cef13713fbc174ef771e49ec
|
[
"MIT"
] | null | null | null |
invenio_drafts_resources/resources/records/resource.py
|
fenekku/invenio-drafts-resources
|
fadae86fb9b36073cef13713fbc174ef771e49ec
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
# Copyright (C) 2020 Northwestern University.
#
# Invenio-Drafts-Resources is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Invenio Drafts Resources module to create REST APIs."""
import marshmallow as ma
from flask import g
from flask_resources import JSONSerializer, ResponseHandler, \
resource_requestctx, response_handler, route, with_content_negotiation
from invenio_records_resources.resources import \
RecordResource as RecordResourceBase
from invenio_records_resources.resources.records.resource import \
request_data, request_headers, request_read_args, request_search_args, \
request_view_args
from invenio_records_resources.resources.records.utils import es_preference
from .errors import RedirectException
class RecordResource(RecordResourceBase):
"""Draft-aware RecordResource."""
def create_blueprint(self, **options):
"""Create the blueprint."""
# We avoid passing url_prefix to the blueprint because we need to
# install URLs under both /records and /user/records. Instead we
# add the prefix manually to each route (which is anyway what Flask
# does in the end)
options["url_prefix"] = ""
return super().create_blueprint(**options)
def create_url_rules(self):
"""Create the URL rules for the record resource."""
routes = self.config.routes
def p(route):
"""Prefix a route with the URL prefix."""
return f"{self.config.url_prefix}{route}"
def s(route):
"""Suffix a route with the URL prefix."""
return f"{route}{self.config.url_prefix}"
rules = [
route("GET", p(routes["list"]), self.search),
route("POST", p(routes["list"]), self.create),
route("GET", p(routes["item"]), self.read),
route("PUT", p(routes["item"]), self.update),
route("DELETE", p(routes["item"]), self.delete),
route("GET", p(routes["item-versions"]), self.search_versions),
route("POST", p(routes["item-versions"]), self.new_version),
route("GET", p(routes["item-latest"]), self.read_latest),
route("GET", p(routes["item-draft"]), self.read_draft),
route("POST", p(routes["item-draft"]), self.edit),
route("PUT", p(routes["item-draft"]), self.update_draft),
route("DELETE", p(routes["item-draft"]), self.delete_draft),
route("POST", p(routes["item-publish"]), self.publish),
route("GET", s(routes["user-prefix"]), self.search_user_records),
]
if self.service.draft_files:
rules.append(route(
"POST",
p(routes["item-files-import"]),
self.import_files,
apply_decorators=False
))
return rules
@request_search_args
@request_view_args
@response_handler(many=True)
def search_user_records(self):
"""Perform a search over the record's versions.
GET /user/records
"""
hits = self.service.search_drafts(
identity=g.identity,
params=resource_requestctx.args,
es_preference=es_preference(),
)
return hits.to_dict(), 200
@request_search_args
@request_view_args
@response_handler(many=True)
def search_versions(self):
"""Perform a search over the record's versions.
GET /records/:pid_value/versions
"""
hits = self.service.search_versions(
resource_requestctx.view_args["pid_value"],
identity=g.identity,
params=resource_requestctx.args,
es_preference=es_preference()
)
return hits.to_dict(), 200
@request_view_args
@response_handler()
def new_version(self):
"""Create a new version.
POST /records/:pid_value/versions
"""
item = self.service.new_version(
resource_requestctx.view_args["pid_value"],
g.identity,
)
return item.to_dict(), 201
@request_view_args
@response_handler()
def edit(self):
"""Edit a record.
POST /records/:pid_value/draft
"""
item = self.service.edit(
resource_requestctx.view_args["pid_value"],
g.identity,
)
return item.to_dict(), 201
@request_view_args
@response_handler()
def publish(self):
"""Publish the draft."""
item = self.service.publish(
resource_requestctx.view_args["pid_value"],
g.identity,
)
return item.to_dict(), 202
@request_view_args
@with_content_negotiation(
response_handlers={
'application/json': ResponseHandler(JSONSerializer())
},
default_accept_mimetype='application/json',
)
@response_handler(many=True)
def import_files(self):
"""Import files from previous record version."""
files = self.service.import_files(
resource_requestctx.view_args["pid_value"],
g.identity,
)
return files.to_dict(), 201
@request_view_args
def read_latest(self):
"""Redirect to latest record.
GET /records/:pid_value/versions/latest
"""
item = self.service.read_latest(
resource_requestctx.view_args["pid_value"],
g.identity,
)
raise RedirectException(item["links"]["self"])
@request_read_args
@request_view_args
@response_handler()
def read_draft(self):
"""Edit a draft.
GET /records/:pid_value/draft
"""
item = self.service.read_draft(
resource_requestctx.view_args["pid_value"],
g.identity,
)
return item.to_dict(), 200
@request_headers
@request_view_args
@request_data
@response_handler()
def update_draft(self):
"""Update a draft.
PUT /records/:pid_value/draft
"""
item = self.service.update_draft(
resource_requestctx.view_args["pid_value"],
g.identity,
resource_requestctx.data or {},
revision_id=resource_requestctx.headers.get("if_match"),
)
return item.to_dict(), 200
@request_headers
@request_view_args
def delete_draft(self):
"""Delete a draft.
DELETE /records/:pid_value/draft
"""
self.service.delete_draft(
resource_requestctx.view_args["pid_value"],
g.identity,
revision_id=resource_requestctx.headers.get("if_match"),
)
return "", 204
| 31.541667 | 77 | 0.60957 | 5,932 | 0.870688 | 0 | 0 | 3,773 | 0.553794 | 0 | 0 | 1,929 | 0.283135 |
73719b129e4d31a646493cafb373317395215b7e
| 56,465 |
py
|
Python
|
pyscreener/preprocessing/gypsum_dl/Steps/SMILES/dimorphite_dl/dimorphite_dl.py
|
futianfan/pyscreener
|
15cce4ca8002ba083254aefa716d0e9c3ef00dba
|
[
"MIT"
] | 28 |
2020-12-11T22:10:16.000Z
|
2022-02-25T05:00:51.000Z
|
molpal/objectives/pyscreener/preprocessing/gypsum_dl/Steps/SMILES/dimorphite_dl/dimorphite_dl.py
|
ashuein/molpal
|
1e17a0c406516ceaeaf273a6983d06206bcfe76f
|
[
"MIT"
] | 3 |
2021-09-17T14:14:53.000Z
|
2021-09-23T11:04:10.000Z
|
molpal/objectives/pyscreener/preprocessing/gypsum_dl/Steps/SMILES/dimorphite_dl/dimorphite_dl.py
|
ashuein/molpal
|
1e17a0c406516ceaeaf273a6983d06206bcfe76f
|
[
"MIT"
] | 9 |
2021-03-03T12:10:10.000Z
|
2022-02-15T06:53:11.000Z
|
# Copyright 2020 Jacob D. Durrant
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script identifies and enumerates the possible protonation sites of SMILES
strings.
"""
from __future__ import print_function
import copy
import os
import argparse
import sys
try:
# Python2
from StringIO import StringIO
except ImportError:
# Python3
from io import StringIO
def print_header():
"""Prints out header information."""
# Always let the user know a help file is available.
print("\nFor help, use: python dimorphite_dl.py --help")
# And always report citation information.
print("\nIf you use Dimorphite-DL in your research, please cite:")
print("Ropp PJ, Kaminsky JC, Yablonski S, Durrant JD (2019) Dimorphite-DL: An")
print(
"open-source program for enumerating the ionization states of drug-like small"
)
print("molecules. J Cheminform 11:14. doi:10.1186/s13321-019-0336-9.\n")
try:
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
# Disable the unnecessary RDKit warnings
from rdkit import RDLogger
RDLogger.DisableLog("rdApp.*")
except:
msg = "Dimorphite-DL requires RDKit. See https://www.rdkit.org/"
print(msg)
raise Exception(msg)
def main(params=None):
"""The main definition run when you call the script from the commandline.
:param params: The parameters to use. Entirely optional. If absent,
defaults to None, in which case argments will be taken from
those given at the command line.
:param params: dict, optional
:return: Returns a list of the SMILES strings return_as_list parameter is
True. Otherwise, returns None.
"""
parser = ArgParseFuncs.get_args()
args = vars(parser.parse_args())
if not args["silent"]:
print_header()
# Add in any parameters in params.
if params is not None:
for k, v in params.items():
args[k] = v
# If being run from the command line, print out all parameters.
if __name__ == "__main__":
if not args["silent"]:
print("\nPARAMETERS:\n")
for k in sorted(args.keys()):
print(k.rjust(13) + ": " + str(args[k]))
print("")
if args["test"]:
# Run tests.
TestFuncs.test()
else:
# Run protonation
if "output_file" in args and args["output_file"] is not None:
# An output file was specified, so write to that.
with open(args["output_file"], "w") as file:
for protonated_smi in Protonate(args):
file.write(protonated_smi + "\n")
elif "return_as_list" in args and args["return_as_list"] == True:
return list(Protonate(args))
else:
# No output file specified. Just print it to the screen.
for protonated_smi in Protonate(args):
print(protonated_smi)
class MyParser(argparse.ArgumentParser):
"""Overwrite default parse so it displays help file on error. See
https://stackoverflow.com/questions/4042452/display-help-message-with-python-argparse-when-script-is-called-without-any-argu"""
def error(self, message):
"""Overwrites the default error message.
:param message: The default error message.
"""
self.print_help()
msg = "ERROR: %s\n\n" % message
print(msg)
raise Exception(msg)
def print_help(self, file=None):
"""Overwrite the default print_help function
:param file: Output file, defaults to None
"""
print("")
if file is None:
file = sys.stdout
self._print_message(self.format_help(), file)
print(
"""
examples:
python dimorphite_dl.py --smiles_file sample_molecules.smi
python dimorphite_dl.py --smiles "CCC(=O)O" --min_ph -3.0 --max_ph -2.0
python dimorphite_dl.py --smiles "CCCN" --min_ph -3.0 --max_ph -2.0 --output_file output.smi
python dimorphite_dl.py --smiles_file sample_molecules.smi --pka_precision 2.0 --label_states
python dimorphite_dl.py --test"""
)
print("")
class ArgParseFuncs:
"""A namespace for storing functions that are useful for processing
command-line arguments. To keep things organized."""
@staticmethod
def get_args():
"""Gets the arguments from the command line.
:return: A parser object.
"""
parser = MyParser(
description="Dimorphite 1.2.4: Creates models of "
+ "appropriately protonated small moleucles. "
+ "Apache 2.0 License. Copyright 2020 Jacob D. "
+ "Durrant."
)
parser.add_argument(
"--min_ph",
metavar="MIN",
type=float,
default=6.4,
help="minimum pH to consider (default: 6.4)",
)
parser.add_argument(
"--max_ph",
metavar="MAX",
type=float,
default=8.4,
help="maximum pH to consider (default: 8.4)",
)
parser.add_argument(
"--pka_precision",
metavar="PRE",
type=float,
default=1.0,
help="pKa precision factor (number of standard devations, default: 1.0)",
)
parser.add_argument(
"--smiles", metavar="SMI", type=str, help="SMILES string to protonate"
)
parser.add_argument(
"--smiles_file",
metavar="FILE",
type=str,
help="file that contains SMILES strings to protonate",
)
parser.add_argument(
"--output_file",
metavar="FILE",
type=str,
help="output file to write protonated SMILES (optional)",
)
parser.add_argument(
"--max_variants",
metavar="MXV",
type=int,
default=128,
help="limit number of variants per input compound (default: 128)",
)
parser.add_argument(
"--label_states",
action="store_true",
help="label protonated SMILES with target state "
+ '(i.e., "DEPROTONATED", "PROTONATED", or "BOTH").',
)
parser.add_argument(
"--silent",
action="store_true",
help="do not print any messages to the screen",
)
parser.add_argument(
"--test", action="store_true", help="run unit tests (for debugging)"
)
return parser
@staticmethod
def clean_args(args):
"""Cleans and normalizes input parameters
:param args: A dictionary containing the arguments.
:type args: dict
:raises Exception: No SMILES in params.
"""
defaults = {
"min_ph": 6.4,
"max_ph": 8.4,
"pka_precision": 1.0,
"label_states": False,
"test": False,
"max_variants": 128,
}
for key in defaults:
if key not in args:
args[key] = defaults[key]
keys = list(args.keys())
for key in keys:
if args[key] is None:
del args[key]
if not "smiles" in args and not "smiles_file" in args:
msg = "Error: No SMILES in params. Use the -h parameter for help."
print(msg)
raise Exception(msg)
# If the user provides a smiles string, turn it into a file-like StringIO
# object.
if "smiles" in args:
if isinstance(args["smiles"], str):
args["smiles_file"] = StringIO(args["smiles"])
args["smiles_and_data"] = LoadSMIFile(args["smiles_file"], args)
return args
class UtilFuncs:
"""A namespace to store functions for manipulating mol objects. To keep
things organized."""
@staticmethod
def neutralize_mol(mol):
"""All molecules should be neuralized to the extent possible. The user
should not be allowed to specify the valence of the atoms in most cases.
:param rdkit.Chem.rdchem.Mol mol: The rdkit Mol objet to be neutralized.
:return: The neutralized Mol object.
"""
# Get the reaction data
rxn_data = [
[
"[Ov1-1:1]",
"[Ov2+0:1]-[H]",
], # To handle O- bonded to only one atom (add hydrogen).
[
"[#7v4+1:1]-[H]",
"[#7v3+0:1]",
], # To handle N+ bonded to a hydrogen (remove hydrogen).
[
"[Ov2-:1]",
"[Ov2+0:1]",
], # To handle O- bonded to two atoms. Should not be Negative.
[
"[#7v3+1:1]",
"[#7v3+0:1]",
], # To handle N+ bonded to three atoms. Should not be positive.
[
"[#7v2-1:1]",
"[#7+0:1]-[H]",
], # To handle N- Bonded to two atoms. Add hydrogen.
# ['[N:1]=[N+0:2]=[N:3]-[H]', '[N:1]=[N+1:2]=[N+0:3]-[H]'], # To handle bad azide. Must be
# protonated. (Now handled
# elsewhere, before SMILES
# converted to Mol object.)
[
"[H]-[N:1]-[N:2]#[N:3]",
"[N:1]=[N+1:2]=[N:3]-[H]",
] # To handle bad azide. R-N-N#N should
# be R-N=[N+]=N
]
# Add substructures and reactions (initially none)
for i, rxn_datum in enumerate(rxn_data):
rxn_data[i].append(Chem.MolFromSmarts(rxn_datum[0]))
rxn_data[i].append(None)
# Add hydrogens (respects valence, so incomplete).
mol.UpdatePropertyCache(strict=False)
mol = Chem.AddHs(mol)
while True: # Keep going until all these issues have been resolved.
current_rxn = None # The reaction to perform.
current_rxn_str = None
for i, rxn_datum in enumerate(rxn_data):
(
reactant_smarts,
product_smarts,
substruct_match_mol,
rxn_placeholder,
) = rxn_datum
if mol.HasSubstructMatch(substruct_match_mol):
if rxn_placeholder is None:
current_rxn_str = reactant_smarts + ">>" + product_smarts
current_rxn = AllChem.ReactionFromSmarts(current_rxn_str)
rxn_data[i][3] = current_rxn # Update the placeholder.
else:
current_rxn = rxn_data[i][3]
break
# Perform the reaction if necessary
if current_rxn is None: # No reaction left, so break out of while loop.
break
else:
mol = current_rxn.RunReactants((mol,))[0][0]
mol.UpdatePropertyCache(strict=False) # Update valences
# The mols have been altered from the reactions described above, we
# need to resanitize them. Make sure aromatic rings are shown as such
# This catches all RDKit Errors. without the catchError and
# sanitizeOps the Chem.SanitizeMol can crash the program.
sanitize_string = Chem.SanitizeMol(
mol,
sanitizeOps=rdkit.Chem.rdmolops.SanitizeFlags.SANITIZE_ALL,
catchErrors=True,
)
return mol if sanitize_string.name == "SANITIZE_NONE" else None
@staticmethod
def convert_smiles_str_to_mol(smiles_str):
"""Given a SMILES string, check that it is actually a string and not a
None. Then try to convert it to an RDKit Mol Object.
:param string smiles_str: The SMILES string.
:return: A rdkit.Chem.rdchem.Mol object, or None if it is the wrong type or
if it fails to convert to a Mol Obj
"""
# Check that there are no type errors, ie Nones or non-string A
# non-string type will cause RDKit to hard crash
if smiles_str is None or type(smiles_str) is not str:
return None
# Try to fix azides here. They are just tricky to deal with.
smiles_str = smiles_str.replace("N=N=N", "N=[N+]=N")
smiles_str = smiles_str.replace("NN#N", "N=[N+]=N")
# Now convert to a mol object. Note the trick that is necessary to
# capture RDKit error/warning messages. See
# https://stackoverflow.com/questions/24277488/in-python-how-to-capture-the-stdout-from-a-c-shared-library-to-a-variable
stderr_fileno = sys.stderr.fileno()
stderr_save = os.dup(stderr_fileno)
stderr_pipe = os.pipe()
os.dup2(stderr_pipe[1], stderr_fileno)
os.close(stderr_pipe[1])
mol = Chem.MolFromSmiles(smiles_str)
os.close(stderr_fileno)
os.close(stderr_pipe[0])
os.dup2(stderr_save, stderr_fileno)
os.close(stderr_save)
# Check that there are None type errors Chem.MolFromSmiles has
# sanitize on which means if there is even a small error in the SMILES
# (kekulize, nitrogen charge...) then mol=None. ie.
# Chem.MolFromSmiles("C[N]=[N]=[N]") = None this is an example of an
# nitrogen charge error. It is cased in a try statement to be overly
# cautious.
return None if mol is None else mol
@staticmethod
def eprint(*args, **kwargs):
"""Error messages should be printed to STDERR. See
https://stackoverflow.com/questions/5574702/how-to-print-to-stderr-in-python"""
print(*args, file=sys.stderr, **kwargs)
class LoadSMIFile(object):
"""A generator class for loading in the SMILES strings from a file, one at
a time."""
def __init__(self, filename, args):
"""Initializes this class.
:param filename: The filename or file object (i.e., StringIO).
:type filename: str or StringIO
"""
self.args = args
if type(filename) is str:
# It's a filename
self.f = open(filename, "r")
else:
# It's a file object (i.e., StringIO)
self.f = filename
def __iter__(self):
"""Returns this generator object.
:return: This generator object.
:rtype: LoadSMIFile
"""
return self
def __next__(self):
"""Ensure Python3 compatibility.
:return: A dict, where the "smiles" key contains the canonical SMILES
string and the "data" key contains the remaining information
(e.g., the molecule name).
:rtype: dict
"""
return self.next()
def next(self):
"""Get the data associated with the next line.
:raises StopIteration: If there are no more lines left iin the file.
:return: A dict, where the "smiles" key contains the canonical SMILES
string and the "data" key contains the remaining information
(e.g., the molecule name).
:rtype: dict
"""
line = self.f.readline()
if line == "":
# EOF
self.f.close()
raise StopIteration()
return
# Divide line into smi and data
splits = line.split()
if len(splits) != 0:
# Generate mol object
smiles_str = splits[0]
# Convert from SMILES string to RDKIT Mol. This series of tests is
# to make sure the SMILES string is properly formed and to get it
# into a canonical form. Filter if failed.
mol = UtilFuncs.convert_smiles_str_to_mol(smiles_str)
if mol is None:
if "silent" in self.args and not self.args["silent"]:
UtilFuncs.eprint(
"WARNING: Skipping poorly formed SMILES string: " + line
)
return self.next()
# Handle nuetralizing the molecules. Filter if failed.
mol = UtilFuncs.neutralize_mol(mol)
if mol is None:
if "silent" in self.args and not self.args["silent"]:
UtilFuncs.eprint(
"WARNING: Skipping poorly formed SMILES string: " + line
)
return self.next()
# Remove the hydrogens.
try:
mol = Chem.RemoveHs(mol)
except:
if "silent" in self.args and not self.args["silent"]:
UtilFuncs.eprint(
"WARNING: Skipping poorly formed SMILES string: " + line
)
return self.next()
if mol is None:
if "silent" in self.args and not self.args["silent"]:
UtilFuncs.eprint(
"WARNING: Skipping poorly formed SMILES string: " + line
)
return self.next()
# Regenerate the smiles string (to standardize).
new_mol_string = Chem.MolToSmiles(mol, isomericSmiles=True)
return {"smiles": new_mol_string, "data": splits[1:]}
else:
# Blank line? Go to next one.
return self.next()
class Protonate(object):
"""A generator class for protonating SMILES strings, one at a time."""
def __init__(self, args):
"""Initialize the generator.
:param args: A dictionary containing the arguments.
:type args: dict
"""
# Make the args an object variable variable.
self.args = args
# A list to store the protonated SMILES strings associated with a
# single input model.
self.cur_prot_SMI = []
# Clean and normalize the args
self.args = ArgParseFuncs.clean_args(args)
# Make sure functions in ProtSubstructFuncs have access to the args.
ProtSubstructFuncs.args = args
# Load the substructures that can be protonated.
self.subs = ProtSubstructFuncs.load_protonation_substructs_calc_state_for_ph(
self.args["min_ph"], self.args["max_ph"], self.args["pka_precision"]
)
def __iter__(self):
"""Returns this generator object.
:return: This generator object.
:rtype: Protonate
"""
return self
def __next__(self):
"""Ensure Python3 compatibility.
:return: A dict, where the "smiles" key contains the canonical SMILES
string and the "data" key contains the remaining information
(e.g., the molecule name).
:rtype: dict
"""
return self.next()
def next(self):
"""Return the next protonated SMILES string.
:raises StopIteration: If there are no more lines left iin the file.
:return: A dict, where the "smiles" key contains the canonical SMILES
string and the "data" key contains the remaining information
(e.g., the molecule name).
:rtype: dict
"""
# If there are any SMILES strings in self.cur_prot_SMI, just return
# the first one and update the list to include only the remaining.
if len(self.cur_prot_SMI) > 0:
first, self.cur_prot_SMI = self.cur_prot_SMI[0], self.cur_prot_SMI[1:]
return first
# self.cur_prot_SMI is empty, so try to add more to it.
# Get the next SMILES string from the input file.
try:
smile_and_datum = self.args["smiles_and_data"].next()
except StopIteration:
# There are no more input smiles strings...
raise StopIteration()
# Keep track of the original smiles string for reporting, starting the
# protonation process, etc.
orig_smi = smile_and_datum["smiles"]
# Dimorphite-DL may protonate some sites in ways that produce invalid
# SMILES. We need to keep track of all smiles so we can "rewind" to
# the last valid one, should things go south.
properly_formed_smi_found = [orig_smi]
# Everything on SMILES line but the SMILES string itself (e.g., the
# molecule name).
data = smile_and_datum["data"]
# Collect the data associated with this smiles (e.g., the molecule
# name).
tag = " ".join(data)
# sites is a list of (atom index, "PROTONATED|DEPROTONATED|BOTH",
# reaction name, mol). Note that the second entry indicates what state
# the site SHOULD be in (not the one it IS in per the SMILES string).
# It's calculated based on the probablistic distributions obtained
# during training.
(
sites,
mol_used_to_idx_sites,
) = ProtSubstructFuncs.get_prot_sites_and_target_states(orig_smi, self.subs)
new_mols = [mol_used_to_idx_sites]
if len(sites) > 0:
for site in sites:
# Make a new smiles with the correct protonation state. Note that
# new_smis is a growing list. This is how multiple protonation
# sites are handled.
new_mols = ProtSubstructFuncs.protonate_site(new_mols, site)
if len(new_mols) > self.args["max_variants"]:
new_mols = new_mols[: self.args["max_variants"]]
if "silent" in self.args and not self.args["silent"]:
UtilFuncs.eprint(
"WARNING: Limited number of variants to "
+ str(self.args["max_variants"])
+ ": "
+ orig_smi
)
# Go through each of these new molecules and add them to the
# properly_formed_smi_found, in case you generate a poorly
# formed SMILES in the future and have to "rewind."
properly_formed_smi_found += [Chem.MolToSmiles(m) for m in new_mols]
else:
# Deprotonate the mols (because protonate_site never called to do
# it).
mol_used_to_idx_sites = Chem.RemoveHs(mol_used_to_idx_sites)
new_mols = [mol_used_to_idx_sites]
# Go through each of these new molecules and add them to the
# properly_formed_smi_found, in case you generate a poorly formed
# SMILES in the future and have to "rewind."
properly_formed_smi_found.append(Chem.MolToSmiles(mol_used_to_idx_sites))
# In some cases, the script might generate redundant molecules.
# Phosphonates, when the pH is between the two pKa values and the
# stdev value is big enough, for example, will generate two identical
# BOTH states. Let's remove this redundancy.
new_smis = list(
set(
[
Chem.MolToSmiles(m, isomericSmiles=True, canonical=True)
for m in new_mols
]
)
)
# Sometimes Dimorphite-DL generates molecules that aren't actually
# possible. Simply convert these to mol objects to eliminate the bad
# ones (that are None).
new_smis = [
s for s in new_smis if UtilFuncs.convert_smiles_str_to_mol(s) is not None
]
# If there are no smi left, return the input one at the very least.
# All generated forms have apparently been judged
# inappropriate/malformed.
if len(new_smis) == 0:
properly_formed_smi_found.reverse()
for smi in properly_formed_smi_found:
if UtilFuncs.convert_smiles_str_to_mol(smi) is not None:
new_smis = [smi]
break
# If the user wants to see the target states, add those to the ends of
# each line.
if self.args["label_states"]:
states = "\t".join([x[1] for x in sites])
new_lines = [x + "\t" + tag + "\t" + states for x in new_smis]
else:
new_lines = [x + "\t" + tag for x in new_smis]
self.cur_prot_SMI = new_lines
return self.next()
class ProtSubstructFuncs:
"""A namespace to store functions for loading the substructures that can
be protonated. To keep things organized."""
args = {}
@staticmethod
def load_substructre_smarts_file():
"""Loads the substructure smarts file. Similar to just using readlines,
except it filters out comments (lines that start with "#").
:return: A list of the lines in the site_substructures.smarts file,
except blank lines and lines that start with "#"
"""
pwd = os.path.dirname(os.path.realpath(__file__))
site_structures_file = "{}/{}".format(pwd, "site_substructures.smarts")
lines = [
l
for l in open(site_structures_file, "r")
if l.strip() != "" and not l.startswith("#")
]
return lines
@staticmethod
def load_protonation_substructs_calc_state_for_ph(
min_ph=6.4, max_ph=8.4, pka_std_range=1
):
"""A pre-calculated list of R-groups with protonation sites, with their
likely pKa bins.
:param float min_ph: The lower bound on the pH range, defaults to 6.4.
:param float max_ph: The upper bound on the pH range, defaults to 8.4.
:param pka_std_range: Basically the precision (stdev from predicted pKa to
consider), defaults to 1.
:return: A dict of the protonation substructions for the specified pH
range.
"""
subs = []
for line in ProtSubstructFuncs.load_substructre_smarts_file():
line = line.strip()
sub = {}
if line is not "":
splits = line.split()
sub["name"] = splits[0]
sub["smart"] = splits[1]
sub["mol"] = Chem.MolFromSmarts(sub["smart"])
pka_ranges = [splits[i : i + 3] for i in range(2, len(splits) - 1, 3)]
prot = []
for pka_range in pka_ranges:
site = pka_range[0]
std = float(pka_range[2]) * pka_std_range
mean = float(pka_range[1])
protonation_state = ProtSubstructFuncs.define_protonation_state(
mean, std, min_ph, max_ph
)
prot.append([site, protonation_state])
sub["prot_states_for_pH"] = prot
subs.append(sub)
return subs
@staticmethod
def define_protonation_state(mean, std, min_ph, max_ph):
"""Updates the substructure definitions to include the protonation state
based on the user-given pH range. The size of the pKa range is also based
on the number of standard deviations to be considered by the user param.
:param float mean: The mean pKa.
:param float std: The precision (stdev).
:param float min_ph: The min pH of the range.
:param float max_ph: The max pH of the range.
:return: A string describing the protonation state.
"""
min_pka = mean - std
max_pka = mean + std
# This needs to be reassigned, and 'ERROR' should never make it past
# the next set of checks.
if min_pka <= max_ph and min_ph <= max_pka:
protonation_state = "BOTH"
elif mean > max_ph:
protonation_state = "PROTONATED"
else:
protonation_state = "DEPROTONATED"
return protonation_state
@staticmethod
def get_prot_sites_and_target_states(smi, subs):
"""For a single molecule, find all possible matches in the protonation
R-group list, subs. Items that are higher on the list will be matched
first, to the exclusion of later items.
:param string smi: A SMILES string.
:param list subs: Substructure information.
:return: A list of protonation sites (atom index), pKa bin.
('PROTONATED', 'BOTH', or 'DEPROTONATED'), and reaction name.
Also, the mol object that was used to generate the atom index.
"""
# Convert the Smiles string (smi) to an RDKit Mol Obj
mol_used_to_idx_sites = UtilFuncs.convert_smiles_str_to_mol(smi)
# Check Conversion worked
if mol_used_to_idx_sites is None:
UtilFuncs.eprint("ERROR: ", smi)
return []
# Try to Add hydrogens. if failed return []
try:
mol_used_to_idx_sites = Chem.AddHs(mol_used_to_idx_sites)
except:
UtilFuncs.eprint("ERROR: ", smi)
return []
# Check adding Hs worked
if mol_used_to_idx_sites is None:
UtilFuncs.eprint("ERROR: ", smi)
return []
ProtectUnprotectFuncs.unprotect_molecule(mol_used_to_idx_sites)
protonation_sites = []
for item in subs:
smart = item["mol"]
if mol_used_to_idx_sites.HasSubstructMatch(smart):
matches = ProtectUnprotectFuncs.get_unprotected_matches(
mol_used_to_idx_sites, smart
)
prot = item["prot_states_for_pH"]
for match in matches:
# We want to move the site from being relative to the
# substructure, to the index on the main molecule.
for site in prot:
proton = int(site[0])
category = site[1]
new_site = (match[proton], category, item["name"])
if not new_site in protonation_sites:
# Because sites must be unique.
protonation_sites.append(new_site)
ProtectUnprotectFuncs.protect_molecule(mol_used_to_idx_sites, match)
return protonation_sites, mol_used_to_idx_sites
@staticmethod
def protonate_site(mols, site):
"""Given a list of molecule objects, we protonate the site.
:param list mols: The list of molecule objects.
:param tuple site: Information about the protonation site.
(idx, target_prot_state, prot_site_name)
:return: A list of the appropriately protonated molecule objects.
"""
# Decouple the atom index and its target protonation state from the
# site tuple
idx, target_prot_state, prot_site_name = site
state_to_charge = {"DEPROTONATED": [-1], "PROTONATED": [0], "BOTH": [-1, 0]}
charges = state_to_charge[target_prot_state]
# Now make the actual smiles match the target protonation state.
output_mols = ProtSubstructFuncs.set_protonation_charge(
mols, idx, charges, prot_site_name
)
return output_mols
@staticmethod
def set_protonation_charge(mols, idx, charges, prot_site_name):
"""Sets the atomic charge on a particular site for a set of SMILES.
:param list mols: A list of the input molecule
objects.
:param int idx: The index of the atom to consider.
:param list charges: A list of the charges (ints) to
assign at this site.
:param string prot_site_name: The name of the protonation site.
:return: A list of the processed (protonated/deprotonated) molecule
objects.
"""
# Sets up the output list and the Nitrogen charge
output = []
for charge in charges:
# The charge for Nitrogens is 1 higher than others (i.e.,
# protonated state is positively charged).
nitrogen_charge = charge + 1
# But there are a few nitrogen moieties where the acidic group is
# the neutral one. Amides are a good example. I gave some thought
# re. how to best flag these. I decided that those
# nitrogen-containing moieties where the acidic group is neutral
# (rather than positively charged) will have "*" in the name.
if "*" in prot_site_name:
nitrogen_charge = nitrogen_charge - 1 # Undo what was done previously.
for mol in mols:
# Make a copy of the molecule.
mol_copy = copy.deepcopy(mol)
# Remove hydrogen atoms.
try:
mol_copy = Chem.RemoveHs(mol_copy)
except:
if "silent" in ProtSubstructFuncs.args and not ProtSubstructFuncs.args["silent"]:
UtilFuncs.eprint(
"WARNING: Skipping poorly formed SMILES string: "
+ Chem.MolToSmiles(mol_copy)
)
continue
atom = mol_copy.GetAtomWithIdx(idx)
explicit_bond_order_total = sum(
[b.GetBondTypeAsDouble() for b in atom.GetBonds()]
)
# Assign the protonation charge, with special care for
# nitrogens
element = atom.GetAtomicNum()
if element == 7:
atom.SetFormalCharge(nitrogen_charge)
# Need to figure out how many hydrogens to add.
if nitrogen_charge == 1 and explicit_bond_order_total == 1:
atom.SetNumExplicitHs(3)
elif nitrogen_charge == 1 and explicit_bond_order_total == 2:
atom.SetNumExplicitHs(2)
elif nitrogen_charge == 1 and explicit_bond_order_total == 3:
atom.SetNumExplicitHs(1)
elif nitrogen_charge == 0 and explicit_bond_order_total == 1:
atom.SetNumExplicitHs(2)
elif nitrogen_charge == 0 and explicit_bond_order_total == 2:
atom.SetNumExplicitHs(1)
elif nitrogen_charge == -1 and explicit_bond_order_total == 2:
atom.SetNumExplicitHs(0)
elif nitrogen_charge == -1 and explicit_bond_order_total == 1:
atom.SetNumExplicitHs(1)
#### JDD
else:
atom.SetFormalCharge(charge)
if element == 8 or element == 16: # O and S
if charge == 0 and explicit_bond_order_total == 1:
atom.SetNumExplicitHs(1)
elif charge == -1 and explicit_bond_order_total == 1:
atom.SetNumExplicitHs(0)
# Deprotonating protonated aromatic nitrogen gives [nH-]. Change this
# to [n-].
if "[nH-]" in Chem.MolToSmiles(mol_copy):
atom.SetNumExplicitHs(0)
mol_copy.UpdatePropertyCache(strict=False)
# prod.UpdatePropertyCache(strict=False)
output.append(mol_copy)
return output
class ProtectUnprotectFuncs:
"""A namespace for storing functions that are useful for protecting and
unprotecting molecules. To keep things organized. We need to identify and
mark groups that have been matched with a substructure."""
@staticmethod
def unprotect_molecule(mol):
"""Sets the protected property on all atoms to 0. This also creates the
property for new molecules.
:param rdkit.Chem.rdchem.Mol mol: The rdkit Mol object.
:type mol: The rdkit Mol object with atoms unprotected.
"""
for atom in mol.GetAtoms():
atom.SetProp("_protected", "0")
@staticmethod
def protect_molecule(mol, match):
"""Given a 'match', a list of molecules idx's, we set the protected status
of each atom to 1. This will prevent any matches using that atom in the
future.
:param rdkit.Chem.rdchem.Mol mol: The rdkit Mol object to protect.
:param list match: A list of molecule idx's.
"""
for idx in match:
atom = mol.GetAtomWithIdx(idx)
atom.SetProp("_protected", "1")
@staticmethod
def get_unprotected_matches(mol, substruct):
"""Finds substructure matches with atoms that have not been protected.
Returns list of matches, each match a list of atom idxs.
:param rdkit.Chem.rdchem.Mol mol: The Mol object to consider.
:param string substruct: The SMARTS string of the substructure ot match.
:return: A list of the matches. Each match is itself a list of atom idxs.
"""
matches = mol.GetSubstructMatches(substruct)
unprotected_matches = []
for match in matches:
if ProtectUnprotectFuncs.is_match_unprotected(mol, match):
unprotected_matches.append(match)
return unprotected_matches
@staticmethod
def is_match_unprotected(mol, match):
"""Checks a molecule to see if the substructure match contains any
protected atoms.
:param rdkit.Chem.rdchem.Mol mol: The Mol object to check.
:param list match: The match to check.
:return: A boolean, whether the match is present or not.
"""
for idx in match:
atom = mol.GetAtomWithIdx(idx)
protected = atom.GetProp("_protected")
if protected == "1":
return False
return True
class TestFuncs:
"""A namespace for storing functions that perform tests on the code. To
keep things organized."""
@staticmethod
def test():
"""Tests all the 38 groups."""
# fmt: off
smis = [
# input smiles, protonated, deprotonated, category
["C#CCO", "C#CCO", "C#CC[O-]", "Alcohol"],
["C(=O)N", "NC=O", "[NH-]C=O", "Amide"],
["CC(=O)NOC(C)=O", "CC(=O)NOC(C)=O", "CC(=O)[N-]OC(C)=O", "Amide_electronegative"],
["COC(=N)N", "COC(N)=[NH2+]", "COC(=N)N", "AmidineGuanidine2"],
["Brc1ccc(C2NCCS2)cc1", "Brc1ccc(C2[NH2+]CCS2)cc1", "Brc1ccc(C2NCCS2)cc1", "Amines_primary_secondary_tertiary"],
["CC(=O)[n+]1ccc(N)cc1", "CC(=O)[n+]1ccc([NH3+])cc1", "CC(=O)[n+]1ccc(N)cc1", "Anilines_primary"],
["CCNc1ccccc1", "CC[NH2+]c1ccccc1", "CCNc1ccccc1", "Anilines_secondary"],
["Cc1ccccc1N(C)C", "Cc1ccccc1[NH+](C)C", "Cc1ccccc1N(C)C", "Anilines_tertiary"],
["BrC1=CC2=C(C=C1)NC=C2", "Brc1ccc2[nH]ccc2c1", "Brc1ccc2[n-]ccc2c1", "Indole_pyrrole"],
["O=c1cc[nH]cc1", "O=c1cc[nH]cc1", "O=c1cc[n-]cc1", "Aromatic_nitrogen_protonated"],
["C-N=[N+]=[N@H]", "CN=[N+]=N", "CN=[N+]=[N-]", "Azide"],
["BrC(C(O)=O)CBr", "O=C(O)C(Br)CBr", "O=C([O-])C(Br)CBr", "Carboxyl"],
["NC(NN=O)=N", "NC(=[NH2+])NN=O", "N=C(N)NN=O", "AmidineGuanidine1"],
["C(F)(F)(F)C(=O)NC(=O)C", "CC(=O)NC(=O)C(F)(F)F", "CC(=O)[N-]C(=O)C(F)(F)F", "Imide"],
["O=C(C)NC(C)=O", "CC(=O)NC(C)=O", "CC(=O)[N-]C(C)=O", "Imide2"],
["CC(C)(C)C(N(C)O)=O", "CN(O)C(=O)C(C)(C)C", "CN([O-])C(=O)C(C)(C)C", "N-hydroxyamide"],
["C[N+](O)=O", "C[N+](=O)O", "C[N+](=O)[O-]", "Nitro"],
["O=C1C=C(O)CC1", "O=C1C=C(O)CC1", "O=C1C=C([O-])CC1", "O=C-C=C-OH"],
["C1CC1OO", "OOC1CC1", "[O-]OC1CC1", "Peroxide2"],
["C(=O)OO", "O=COO", "O=CO[O-]", "Peroxide1"],
["Brc1cc(O)cc(Br)c1", "Oc1cc(Br)cc(Br)c1", "[O-]c1cc(Br)cc(Br)c1", "Phenol"],
["CC(=O)c1ccc(S)cc1", "CC(=O)c1ccc(S)cc1", "CC(=O)c1ccc([S-])cc1", "Phenyl_Thiol"],
["C=CCOc1ccc(C(=O)O)cc1", "C=CCOc1ccc(C(=O)O)cc1", "C=CCOc1ccc(C(=O)[O-])cc1", "Phenyl_carboxyl"],
["COP(=O)(O)OC", "COP(=O)(O)OC", "COP(=O)([O-])OC", "Phosphate_diester"],
["CP(C)(=O)O", "CP(C)(=O)O", "CP(C)(=O)[O-]", "Phosphinic_acid"],
["CC(C)OP(C)(=O)O", "CC(C)OP(C)(=O)O", "CC(C)OP(C)(=O)[O-]", "Phosphonate_ester"],
["CC1(C)OC(=O)NC1=O", "CC1(C)OC(=O)NC1=O", "CC1(C)OC(=O)[N-]C1=O", "Ringed_imide1"],
["O=C(N1)C=CC1=O", "O=C1C=CC(=O)N1", "O=C1C=CC(=O)[N-]1", "Ringed_imide2"],
["O=S(OC)(O)=O", "COS(=O)(=O)O", "COS(=O)(=O)[O-]", "Sulfate"],
["COc1ccc(S(=O)O)cc1", "COc1ccc(S(=O)O)cc1", "COc1ccc(S(=O)[O-])cc1", "Sulfinic_acid"],
["CS(N)(=O)=O", "CS(N)(=O)=O", "CS([NH-])(=O)=O", "Sulfonamide"],
["CC(=O)CSCCS(O)(=O)=O", "CC(=O)CSCCS(=O)(=O)O", "CC(=O)CSCCS(=O)(=O)[O-]", "Sulfonate"],
["CC(=O)S", "CC(=O)S", "CC(=O)[S-]", "Thioic_acid"],
["C(C)(C)(C)(S)", "CC(C)(C)S", "CC(C)(C)[S-]", "Thiol"],
["Brc1cc[nH+]cc1", "Brc1cc[nH+]cc1", "Brc1ccncc1", "Aromatic_nitrogen_unprotonated"],
["C=C(O)c1c(C)cc(C)cc1C", "C=C(O)c1c(C)cc(C)cc1C", "C=C([O-])c1c(C)cc(C)cc1C", "Vinyl_alcohol"],
["CC(=O)ON", "CC(=O)O[NH3+]", "CC(=O)ON", "Primary_hydroxyl_amine"],
# Note testing Internal_phosphate_polyphos_chain and
# Initial_phosphate_like_in_ATP_ADP here because no way to
# generate monoprotic compounds to test them. See Other tests
# people...
]
smis_phos = [
# [input smiles, protonated, deprotonated1, deprotonated2, category]
["O=P(O)(O)OCCCC", "CCCCOP(=O)(O)O", "CCCCOP(=O)([O-])O", "CCCCOP(=O)([O-])[O-]", "Phosphate"],
["CC(P(O)(O)=O)C", "CC(C)P(=O)(O)O", "CC(C)P(=O)([O-])O", "CC(C)P(=O)([O-])[O-]", "Phosphonate"],
]
# fmt: on
cats_with_two_prot_sites = [inf[4] for inf in smis_phos]
# Load the average pKa values.
average_pkas = {
l.split()[0].replace("*", ""): float(l.split()[3])
for l in ProtSubstructFuncs.load_substructre_smarts_file()
if l.split()[0] not in cats_with_two_prot_sites
}
average_pkas_phos = {
l.split()[0].replace("*", ""): [float(l.split()[3]), float(l.split()[6])]
for l in ProtSubstructFuncs.load_substructre_smarts_file()
if l.split()[0] in cats_with_two_prot_sites
}
print("Running Tests")
print("=============")
print("")
print("Very Acidic (pH -10000000)")
print("--------------------------")
print("")
args = {
"min_ph": -10000000,
"max_ph": -10000000,
"pka_precision": 0.5,
"smiles": "",
"label_states": True,
"silent": True
}
for smi, protonated, deprotonated, category in smis:
args["smiles"] = smi
TestFuncs.test_check(args, [protonated], ["PROTONATED"])
# Test phosphates separately
for smi, protonated, mix, deprotonated, category in smis_phos:
args["smiles"] = smi
TestFuncs.test_check(args, [protonated], ["PROTONATED"])
args["min_ph"] = 10000000
args["max_ph"] = 10000000
print("")
print("Very Basic (pH 10000000)")
print("------------------------")
print("")
for smi, protonated, deprotonated, category in smis:
args["smiles"] = smi
TestFuncs.test_check(args, [deprotonated], ["DEPROTONATED"])
for smi, protonated, mix, deprotonated, category in smis_phos:
args["smiles"] = smi
TestFuncs.test_check(args, [deprotonated], ["DEPROTONATED"])
print("")
print("pH is Category pKa")
print("------------------")
print("")
for smi, protonated, deprotonated, category in smis:
avg_pka = average_pkas[category]
args["smiles"] = smi
args["min_ph"] = avg_pka
args["max_ph"] = avg_pka
TestFuncs.test_check(args, [protonated, deprotonated], ["BOTH"])
for smi, protonated, mix, deprotonated, category in smis_phos:
args["smiles"] = smi
avg_pka = average_pkas_phos[category][0]
args["min_ph"] = avg_pka
args["max_ph"] = avg_pka
TestFuncs.test_check(args, [mix, protonated], ["BOTH"])
avg_pka = average_pkas_phos[category][1]
args["min_ph"] = avg_pka
args["max_ph"] = avg_pka
TestFuncs.test_check(
args, [mix, deprotonated], ["DEPROTONATED", "DEPROTONATED"]
)
avg_pka = 0.5 * (
average_pkas_phos[category][0] + average_pkas_phos[category][1]
)
args["min_ph"] = avg_pka
args["max_ph"] = avg_pka
args["pka_precision"] = 5 # Should give all three
TestFuncs.test_check(
args, [mix, deprotonated, protonated], ["BOTH", "BOTH"]
)
print("")
print("Other Tests")
print("-----------")
print("")
# Make sure no carbanion (old bug).
smi = "Cc1nc2cc(-c3[nH]c4cc5ccccc5c5c4c3CCN(C(=O)O)[C@@H]5O)cc3c(=O)[nH][nH]c(n1)c23"
output = list(Protonate({"smiles": smi, "test": False, "silent": True}))
if "[C-]" in "".join(output).upper():
msg = "Processing " + smi + " produced a molecule with a carbanion!"
raise Exception(msg)
else:
print("(CORRECT) No carbanion: " + smi)
# Make sure max number of variants is limited (old bug).
smi = "CCCC[C@@H](C(=O)N)NC(=O)[C@@H](NC(=O)[C@@H](NC(=O)[C@@H](NC(=O)[C@H](C(C)C)NC(=O)[C@@H](NC(=O)[C@H](Cc1c[nH]c2c1cccc2)NC(=O)[C@@H](NC(=O)[C@@H](Cc1ccc(cc1)O)N)CCC(=O)N)C)C)Cc1nc[nH]c1)Cc1ccccc1"
output = list(Protonate({"smiles": smi, "test": False, "silent": True}))
if len(output) != 128:
msg = "Processing " + smi + " produced more than 128 variants!"
raise Exception(msg)
else:
print("(CORRECT) Produced 128 variants: " + smi)
# Make sure ATP and NAD work at different pHs (because can't test
# Internal_phosphate_polyphos_chain and
# Initial_phosphate_like_in_ATP_ADP with monoprotic examples.
specific_examples = [
[
"O=P(O)(OP(O)(OP(O)(OCC1OC(C(C1O)O)N2C=NC3=C2N=CN=C3N)=O)=O)O", # input, ATP
(
0.5,
"[NH3+]c1[nH+]c[nH+]c2c1[nH+]cn2C1OC(COP(=O)(O)OP(=O)(O)OP(=O)(O)O)C(O)C1O",
),
(
1.0,
"[NH3+]c1[nH+]c[nH+]c2c1[nH+]cn2C1OC(COP(=O)(O)OP(=O)([O-])OP(=O)(O)O)C(O)C1O",
),
(
2.6,
"[NH3+]c1[nH+]c[nH+]c2c1[nH+]cn2C1OC(COP(=O)([O-])OP(=O)([O-])OP(=O)([O-])O)C(O)C1O",
),
(
7.0,
"Nc1ncnc2c1ncn2C1OC(COP(=O)([O-])OP(=O)([O-])OP(=O)([O-])[O-])C(O)C1O",
),
],
[
"O=P(O)(OP(O)(OCC1C(O)C(O)C(N2C=NC3=C(N)N=CN=C32)O1)=O)OCC(O4)C(O)C(O)C4[N+]5=CC=CC(C(N)=O)=C5", # input, NAD
(
0.5,
"NC(=O)c1ccc[n+](C2OC(COP(=O)(O)OP(=O)(O)OCC3OC(n4cnc5c([NH3+])ncnc54)C(O)C3O)C(O)C2O)c1",
),
(
2.5,
"NC(=O)c1ccc[n+](C2OC(COP(=O)([O-])OP(=O)([O-])OCC3OC(n4cnc5c([NH3+])ncnc54)C(O)C3O)C(O)C2O)c1",
),
(
7.4,
"NC(=O)c1ccc[n+](C2OC(COP(=O)([O-])OP(=O)([O-])OCC3OC(n4cnc5c(N)ncnc54)C(O)C3O)C(O)C2O)c1",
),
],
]
for example in specific_examples:
smi = example[0]
for ph, expected_output in example[1:]:
output = list(
Protonate(
{
"smiles": smi,
"test": False,
"min_ph": ph,
"max_ph": ph,
"pka_precision": 0,
"silent": True
}
)
)
if output[0].strip() == expected_output:
print(
"(CORRECT) "
+ smi
+ " at pH "
+ str(ph)
+ " is "
+ output[0].strip()
)
else:
msg = (
smi
+ " at pH "
+ str(ph)
+ " should be "
+ expected_output
+ ", but it is "
+ output[0].strip()
)
raise Exception(msg)
@staticmethod
def test_check(args, expected_output, labels):
"""Tests most ionizable groups. The ones that can only loose or gain a single proton.
:param args: The arguments to pass to protonate()
:param expected_output: A list of the expected SMILES-strings output.
:param labels: The labels. A list containing combo of BOTH, PROTONATED,
DEPROTONATED.
:raises Exception: Wrong number of states produced.
:raises Exception: Unexpected output SMILES.
:raises Exception: Wrong labels.
"""
output = list(Protonate(args))
output = [o.split() for o in output]
num_states = len(expected_output)
if len(output) != num_states:
msg = (
args["smiles"]
+ " should have "
+ str(num_states)
+ " states at at pH "
+ str(args["min_ph"])
+ ": "
+ str(output)
)
UtilFuncs.eprint(msg)
raise Exception(msg)
if len(set([l[0] for l in output]) - set(expected_output)) != 0:
msg = (
args["smiles"]
+ " is not "
+ " AND ".join(expected_output)
+ " at pH "
+ str(args["min_ph"])
+ " - "
+ str(args["max_ph"])
+ "; it is "
+ " AND ".join([l[0] for l in output])
)
UtilFuncs.eprint(msg)
raise Exception(msg)
if len(set([l[1] for l in output]) - set(labels)) != 0:
msg = (
args["smiles"]
+ " not labeled as "
+ " AND ".join(labels)
+ "; it is "
+ " AND ".join([l[1] for l in output])
)
UtilFuncs.eprint(msg)
raise Exception(msg)
ph_range = sorted(list(set([args["min_ph"], args["max_ph"]])))
ph_range_str = "(" + " - ".join("{0:.2f}".format(n) for n in ph_range) + ")"
print(
"(CORRECT) "
+ ph_range_str.ljust(10)
+ " "
+ args["smiles"]
+ " => "
+ " AND ".join([l[0] for l in output])
)
def run(**kwargs):
"""A helpful, importable function for those who want to call Dimorphite-DL
from another Python script rather than the command line. Note that this
function accepts keyword arguments that match the command-line parameters
exactly. If you want to pass and return a list of RDKit Mol objects, import
run_with_mol_list() instead.
:param **kwargs: For a complete description, run dimorphite_dl.py from the
command line with the -h option.
:type kwargs: dict
"""
# Run the main function with the specified arguments.
main(kwargs)
def run_with_mol_list(mol_lst, **kwargs):
"""A helpful, importable function for those who want to call Dimorphite-DL
from another Python script rather than the command line. Note that this
function is for passing Dimorphite-DL a list of RDKit Mol objects, together
with command-line parameters. If you want to use only the same parameters
that you would use from the command line, import run() instead.
:param mol_lst: A list of rdkit.Chem.rdchem.Mol objects.
:type mol_lst: list
:raises Exception: If the **kwargs includes "smiles", "smiles_file",
"output_file", or "test" parameters.
:return: A list of properly protonated rdkit.Chem.rdchem.Mol objects.
:rtype: list
"""
# Do a quick check to make sure the user input makes sense.
for bad_arg in ["smiles", "smiles_file", "output_file", "test"]:
if bad_arg in kwargs:
msg = (
"You're using Dimorphite-DL's run_with_mol_list(mol_lst, "
+ '**kwargs) function, but you also passed the "'
+ bad_arg
+ '" argument. Did you mean to use the '
+ "run(**kwargs) function instead?"
)
UtilFuncs.eprint(msg)
raise Exception(msg)
# Set the return_as_list flag so main() will return the protonated smiles
# as a list.
kwargs["return_as_list"] = True
# Having reviewed the code, it will be very difficult to rewrite it so
# that a list of Mol objects can be used directly. Intead, convert this
# list of mols to smiles and pass that. Not efficient, but it will work.
protonated_smiles_and_props = []
for m in mol_lst:
props = m.GetPropsAsDict()
kwargs["smiles"] = Chem.MolToSmiles(m, isomericSmiles=True)
protonated_smiles_and_props.extend(
[(s.split("\t")[0], props) for s in main(kwargs)]
)
# Now convert the list of protonated smiles strings back to RDKit Mol
# objects. Also, add back in the properties from the original mol objects.
mols = []
for s, props in protonated_smiles_and_props:
m = Chem.MolFromSmiles(s)
if m:
for prop, val in props.items():
if type(val) is int:
m.SetIntProp(prop, val)
elif type(val) is float:
m.SetDoubleProp(prop, val)
elif type(val) is bool:
m.SetBoolProp(prop, val)
else:
m.SetProp(prop, str(val))
mols.append(m)
else:
UtilFuncs.eprint(
"WARNING: Could not process molecule with SMILES string "
+ s
+ " and properties "
+ str(props)
)
return mols
if __name__ == "__main__":
main()
| 38.860977 | 209 | 0.535819 | 49,526 | 0.87711 | 0 | 0 | 36,872 | 0.653006 | 0 | 0 | 26,647 | 0.471921 |
73722b13a366409a78c447bdbc55cbb010f2c490
| 568 |
py
|
Python
|
src/visuanalytics/tests/analytics/transform/transform_test_helper.py
|
mxsph/Data-Analytics
|
c82ff54b78f50b6660d7640bfee96ea68bef598f
|
[
"MIT"
] | 3 |
2020-08-24T19:02:09.000Z
|
2021-05-27T20:22:41.000Z
|
src/visuanalytics/tests/analytics/transform/transform_test_helper.py
|
mxsph/Data-Analytics
|
c82ff54b78f50b6660d7640bfee96ea68bef598f
|
[
"MIT"
] | 342 |
2020-08-13T10:24:23.000Z
|
2021-08-12T14:01:52.000Z
|
src/visuanalytics/tests/analytics/transform/transform_test_helper.py
|
visuanalytics/visuanalytics
|
f9cce7bc9e3227568939648ddd1dd6df02eac752
|
[
"MIT"
] | 8 |
2020-09-01T07:11:18.000Z
|
2021-04-09T09:02:11.000Z
|
from visuanalytics.analytics.control.procedures.step_data import StepData
from visuanalytics.analytics.transform.transform import transform
def prepare_test(values: list, data, expected_data: dict, config=None):
if config is None:
config = {}
step_data = StepData(config, "0", 0)
step_data.insert_data("_req", data, {})
transform({"transform": values}, step_data)
# removed Temporary set data
step_data.data.pop("_conf")
step_data.data.pop("_pipe_id")
step_data.data.pop("_job_id")
return step_data.data, expected_data
| 29.894737 | 73 | 0.721831 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.130282 |
737252b8db4b5f48d4c98ee3b57ca3749e94a02f
| 693 |
py
|
Python
|
configs/diseased/resnet50_cancer_adddata.py
|
jiangwenj02/mmclassification
|
4c3657c16f370ace9013b160aa054c87fd27a055
|
[
"Apache-2.0"
] | null | null | null |
configs/diseased/resnet50_cancer_adddata.py
|
jiangwenj02/mmclassification
|
4c3657c16f370ace9013b160aa054c87fd27a055
|
[
"Apache-2.0"
] | null | null | null |
configs/diseased/resnet50_cancer_adddata.py
|
jiangwenj02/mmclassification
|
4c3657c16f370ace9013b160aa054c87fd27a055
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../_base_/models/resnet50.py', '../_base_/datasets/cancer_bs32_pil_resize.py',
'../_base_/schedules/imagenet_bs256_coslr.py', '../_base_/default_runtime.py'
]
model = dict(
head=dict(
num_classes=2,
topk=(1,))
)
data = dict(
train=dict(
data_prefix='/data3/zzhang/tmp/classification/train'),
val=dict(
data_prefix='/data3/zzhang/tmp/classification/test'),
test=dict(
data_prefix='/data3/zzhang/tmp/classification/test'))
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
load_from = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_batch256_imagenet_20200708-cfb998bf.pth'
| 34.65 | 121 | 0.689755 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 382 | 0.551227 |
7373df1f357495c213b36ad2e30241e90eab5f96
| 4,907 |
py
|
Python
|
polyaxon/scheduler/dockerizer_scheduler.py
|
vfdev-5/polyaxon
|
3e1511a993dc1a03e0a0827de0357f4adcc0015f
|
[
"MIT"
] | null | null | null |
polyaxon/scheduler/dockerizer_scheduler.py
|
vfdev-5/polyaxon
|
3e1511a993dc1a03e0a0827de0357f4adcc0015f
|
[
"MIT"
] | null | null | null |
polyaxon/scheduler/dockerizer_scheduler.py
|
vfdev-5/polyaxon
|
3e1511a993dc1a03e0a0827de0357f4adcc0015f
|
[
"MIT"
] | null | null | null |
import logging
import traceback
from kubernetes.client.rest import ApiException
from django.conf import settings
import auditor
from constants.jobs import JobLifeCycle
from db.models.build_jobs import BuildJob
from docker_images.image_info import get_tagged_image
from event_manager.events.build_job import BUILD_JOB_STARTED, BUILD_JOB_STARTED_TRIGGERED
from libs.paths.exceptions import VolumeNotFoundError
from scheduler.spawners.dockerizer_spawner import DockerizerSpawner
from scheduler.spawners.utils import get_job_definition
_logger = logging.getLogger('polyaxon.scheduler.dockerizer')
def check_image(build_job):
from docker import APIClient
docker = APIClient(version='auto')
return docker.images(get_tagged_image(build_job))
def create_build_job(user, project, config, code_reference):
"""Get or Create a build job based on the params.
If a build job already exists, then we check if the build has already an image created.
If the image does not exists, and the job is already done we force create a new job.
Returns:
tuple: (build_job, image_exists[bool], build_status[bool])
"""
build_job, rebuild = BuildJob.create(
user=user,
project=project,
config=config,
code_reference=code_reference)
if build_job.succeeded and not rebuild:
# Check if image was built in less than an 6 hours
return build_job, True, False
if check_image(build_job=build_job):
# Check if image exists already
return build_job, True, False
if build_job.is_done:
build_job, _ = BuildJob.create(
user=user,
project=project,
config=config,
code_reference=code_reference,
nocache=True)
if not build_job.is_running:
# We need to build the image first
auditor.record(event_type=BUILD_JOB_STARTED_TRIGGERED,
instance=build_job,
actor_id=user.id,
actor_name=user.username)
build_status = start_dockerizer(build_job=build_job)
else:
build_status = True
return build_job, False, build_status
def start_dockerizer(build_job):
# Update job status to show that its started
build_job.set_status(JobLifeCycle.SCHEDULED)
spawner = DockerizerSpawner(
project_name=build_job.project.unique_name,
project_uuid=build_job.project.uuid.hex,
job_name=build_job.unique_name,
job_uuid=build_job.uuid.hex,
k8s_config=settings.K8S_CONFIG,
namespace=settings.K8S_NAMESPACE,
in_cluster=True)
error = {}
try:
results = spawner.start_dockerizer(resources=build_job.resources,
node_selector=build_job.node_selector,
affinity=build_job.affinity,
tolerations=build_job.tolerations)
auditor.record(event_type=BUILD_JOB_STARTED,
instance=build_job)
build_job.definition = get_job_definition(results)
build_job.save(update_fields=['definition'])
return True
except ApiException:
_logger.error('Could not start build job, please check your polyaxon spec',
exc_info=True)
error = {
'raised': True,
'traceback': traceback.format_exc(),
'message': 'Could not start build job, encountered a Kubernetes ApiException.'
}
except VolumeNotFoundError as e:
_logger.error('Could not start build job, please check your volume definitions.',
exc_info=True)
error = {
'raised': True,
'traceback': traceback.format_exc(),
'message': 'Could not start build job, encountered a volume definition problem. %s' % e
}
except Exception as e:
_logger.error('Could not start build job, please check your polyaxon spec.',
exc_info=True)
error = {
'raised': True,
'traceback': traceback.format_exc(),
'message': 'Could not start build job encountered an {} exception.'.format(
e.__class__.__name__
)
}
finally:
if error.get('raised'):
build_job.set_status(
JobLifeCycle.FAILED,
message=error.get('message'),
traceback=error.get('traceback'))
def stop_dockerizer(project_name, project_uuid, build_job_name, build_job_uuid):
spawner = DockerizerSpawner(
project_name=project_name,
project_uuid=project_uuid,
job_name=build_job_name,
job_uuid=build_job_uuid,
k8s_config=settings.K8S_CONFIG,
namespace=settings.K8S_NAMESPACE,
in_cluster=True)
return spawner.stop_dockerizer()
| 34.801418 | 99 | 0.648665 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,022 | 0.208274 |
737528bac9620b8ee07a8513acd084d73b0adc0c
| 9,587 |
py
|
Python
|
test/test_pyini.py
|
limodou/uliweb3
|
560fe818047c8ee8b4b775e714d9c637f0d23651
|
[
"BSD-2-Clause"
] | 16 |
2018-09-12T02:50:28.000Z
|
2021-08-20T08:38:31.000Z
|
test/test_pyini.py
|
limodou/uliweb3
|
560fe818047c8ee8b4b775e714d9c637f0d23651
|
[
"BSD-2-Clause"
] | 21 |
2018-11-29T06:41:08.000Z
|
2022-01-18T13:27:38.000Z
|
test/test_pyini.py
|
limodou/uliweb3
|
560fe818047c8ee8b4b775e714d9c637f0d23651
|
[
"BSD-2-Clause"
] | 1 |
2018-10-08T10:02:56.000Z
|
2018-10-08T10:02:56.000Z
|
#coding=utf8
from uliweb.utils.pyini import *
def test_sorteddict():
"""
>>> d = SortedDict()
>>> d
<SortedDict {}>
>>> d.name = 'limodou'
>>> d['class'] = 'py'
>>> d
<SortedDict {'class':'py', 'name':'limodou'}>
>>> d.keys()
['name', 'class']
>>> d.values()
['limodou', 'py']
>>> d['class']
'py'
>>> d.name
'limodou'
>>> d.get('name', 'default')
'limodou'
>>> d.get('other', 'default')
'default'
>>> 'name' in d
True
>>> 'other' in d
False
>>> print (d.other)
None
>>> try:
... d['other']
... except Exception as e:
... print (e)
'other'
>>> del d['class']
>>> del d['name']
>>> d
<SortedDict {}>
>>> d['name'] = 'limodou'
>>> d.pop('other', 'default')
'default'
>>> d.pop('name')
'limodou'
>>> d
<SortedDict {}>
>>> d.update({'class':'py', 'attribute':'border'})
>>> d
<SortedDict {'attribute':'border', 'class':'py'}>
"""
def test_section():
"""
>>> s = Section('default', "#comment")
>>> print (s)
#comment
[default]
<BLANKLINE>
>>> s.name = 'limodou'
>>> s.add_comment('name', '#name')
>>> s.add_comment(comments='#change')
>>> print (s)
#change
[default]
#name
name = 'limodou'
<BLANKLINE>
>>> del s.name
>>> print (s)
#change
[default]
<BLANKLINE>
"""
def test_ini1():
"""
>>> x = Ini()
>>> s = x.add('default')
>>> print (x)
#coding=utf-8
[default]
<BLANKLINE>
>>> s['abc'] = 'name'
>>> print (x)
#coding=utf-8
[default]
abc = 'name'
<BLANKLINE>
"""
def test_ini2():
"""
>>> x = Ini()
>>> x['default'] = Section('default', "#comment")
>>> x.default.name = 'limodou'
>>> x.default['class'] = 'py'
>>> x.default.list = ['abc']
>>> print (x)
#coding=utf-8
#comment
[default]
name = 'limodou'
class = 'py'
list = ['abc']
<BLANKLINE>
>>> x.default.list = ['cde'] #for mutable object will merge the data, including dict type
>>> print (x.default.list)
['abc', 'cde']
>>> x.default.d = {'a':'a'}
>>> x.default.d = {'b':'b'}
>>> print (x.default.d)
{'a': 'a', 'b': 'b'}
"""
def test_gettext():
"""
>>> from uliweb.i18n import gettext_lazy as _
>>> x = Ini(env={'_':_})
>>> x['default'] = Section('default')
>>> x.default.option = _('Hello')
>>> x.keys()
['_', 'gettext_lazy', 'set', 'default']
"""
def test_replace():
"""
>>> x = Ini()
>>> x['default'] = Section('default')
>>> x.default.option = ['a']
>>> x.default.option
['a']
>>> x.default.option = ['b']
>>> x.default.option
['a', 'b']
>>> x.default.add('option', ['c'], replace=True)
>>> x.default.option
['c']
>>> print (x.default)
[default]
option <= ['c']
<BLANKLINE>
"""
def test_set_var():
"""
>>> x = Ini()
>>> x.set_var('default/key', 'name')
True
>>> print (x)
#coding=utf-8
[default]
key = 'name'
<BLANKLINE>
>>> x.set_var('default/key/name', 'hello')
True
>>> print (x)
#coding=utf-8
[default]
key = 'name'
key/name = 'hello'
<BLANKLINE>
>>> x.get_var('default/key')
'name'
>>> x.get_var('default/no')
>>> x.get_var('defaut/no', 'no')
'no'
>>> x.del_var('default/key')
True
>>> print (x)
#coding=utf-8
[default]
key/name = 'hello'
<BLANKLINE>
>>> x.get_var('default/key/name')
'hello'
>>> x.get_var('default')
<Section {'key/name':'hello'}>
"""
def test_update():
"""
>>> x = Ini()
>>> x.set_var('default/key', 'name')
True
>>> d = {'default/key':'limodou', 'default/b':123}
>>> x.update(d)
>>> print (x)
#coding=utf-8
[default]
key = 'limodou'
b = 123
<BLANKLINE>
"""
def test_uni_print():
"""
>>> a = ()
>>> uni_prt(a, 'utf-8')
'()'
>>> a = (1,2)
>>> uni_prt(a)
'(1, 2)'
"""
def test_triple_string():
"""
>>> from io import StringIO
>>> buf = StringIO(\"\"\"
... #coding=utf8
... [DEFAULT]
... a = '''hello
... 中文
... '''
... \"\"\")
>>> x = Ini()
>>> x.read(buf)
>>> print (repr(x.DEFAULT.a))
'hello\\n\\u4e2d\\u6587\\n'
"""
def test_save():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from io import StringIO
>>> x = Ini(env={'_':_}, convertors=i18n_ini_convertor)
>>> buf = StringIO(\"\"\"
... [default]
... option = _('English')
... str = 'str'
... str1 = "str"
... float = 1.2
... int = 1
... list = [1, 'str', 0.12]
... dict = {'a':'b', 1:2}
... s = 'English'
... [other]
... option = 'default'
... options1 = '{{option}} xxx'
... options2 = '{{default.int}}'
... options3 = option
... options4 = '-- {{default.option}} --'
... options5 = '-- {{default.s}} --'
... options6 = 'English {{default.s}} --'
... options7 = default.str + default.str1
... \"\"\")
>>> x.read(buf)
>>> print (x)
#coding=UTF-8
<BLANKLINE>
[default]
option = _('English')
str = 'str'
str1 = 'str'
float = 1.2
int = 1
list = [1, 'str', 0.12]
dict = {'a': 'b', 1: 2}
s = 'English'
[other]
option = 'default'
options1 = 'default xxx'
options2 = '1'
options3 = 'default'
options4 = '-- English --'
options5 = '-- English --'
options6 = 'English English --'
options7 = 'strstr'
<BLANKLINE>
"""
def test_merge_data():
"""
>>> from uliweb.utils.pyini import merge_data
>>> a = [[1,2,3], [2,3,4], [4,5]]
>>> b = [{'a':[1,2], 'b':{'a':[1,2]}}, {'a':[2,3], 'b':{'a':['b'], 'b':2}}]
>>> c = [set([1,2,3]), set([2,4])]
>>> print (merge_data(a))
[1, 2, 3, 4, 5]
>>> print (merge_data(b))
{'a': [1, 2, 3], 'b': {'a': [1, 2, 'b'], 'b': 2}}
>>> print (merge_data(c))
{1, 2, 3, 4}
>>> print (merge_data([2]))
2
"""
def test_lazy():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from io import StringIO
>>> x = Ini(env={'_':_}, convertors=i18n_ini_convertor, lazy=True)
>>> buf = StringIO(\"\"\"
... [default]
... option = _('English')
... str = 'str'
... str1 = "str"
... float = 1.2
... int = 1
... list = [1, 'str', 0.12]
... dict = {'a':'b', 1:2}
... s = 'English'
... [other]
... option = 'default'
... options1 = '{{option}} xxx'
... options2 = '{{default.int}}'
... options3 = option
... options4 = '-- {{default.option}} --'
... options5 = '-- {{default.s}} --'
... options6 = 'English {{default.s}} --'
... options7 = default.str + default.str1
... \"\"\")
>>> x.read(buf)
>>> x.freeze()
>>> print (x)
#coding=UTF-8
<BLANKLINE>
[default]
option = _('English')
str = 'str'
str1 = 'str'
float = 1.2
int = 1
list = [1, 'str', 0.12]
dict = {'a': 'b', 1: 2}
s = 'English'
[other]
option = 'default'
options1 = 'default xxx'
options2 = '1'
options3 = 'default'
options4 = '-- English --'
options5 = '-- English --'
options6 = 'English English --'
options7 = 'strstr'
<BLANKLINE>
"""
def test_multiple_read():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from io import StringIO
>>> x = Ini(env={'_':_}, convertors=i18n_ini_convertor, lazy=True)
>>> buf = StringIO(\"\"\"
... [default]
... option = 'abc'
... [other]
... option = default.option
... option1 = '{{option}} xxx'
... option2 = '{{default.option}}'
... option3 = '{{other.option}}'
... \"\"\")
>>> x.read(buf)
>>> buf1 = StringIO(\"\"\"
... [default]
... option = 'hello'
... \"\"\")
>>> x.read(buf1)
>>> x.freeze()
>>> print (x)
#coding=UTF-8
<BLANKLINE>
[default]
option = 'hello'
[other]
option = 'hello'
option1 = 'hello xxx'
option2 = 'hello'
option3 = 'hello'
<BLANKLINE>
"""
def test_chinese():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from io import StringIO
>>> x = Ini(env={'_':_}, convertors=i18n_ini_convertor)
>>> buf = StringIO(\"\"\"#coding=utf-8
... [default]
... option = '中文'
... option2 = _('中文')
... option3 = '{{option}}'
... [other]
... x = '中文 {{default.option}}'
... x1 = '中文 {{default.option}}'
... x2 = 'xbd {{default.option}}'
... \"\"\")
>>> x.read(buf)
>>> print (x)
#coding=utf-8
[default]
option = '中文'
option2 = _('中文')
option3 = '中文'
[other]
x = '中文 中文'
x1 = '中文 中文'
x2 = 'xbd 中文'
<BLANKLINE>
>>> print (repr(x.other.x1))
'中文 中文'
>>> x.keys()
['_', 'gettext_lazy', 'set', 'default', 'other']
"""
def test_set():
"""
>>> from io import StringIO
>>> x = Ini()
>>> buf = StringIO(\"\"\"#coding=utf-8
... [default]
... set1 = {1,2,3}
... set2 = set([1,2,3])
... \"\"\")
>>> x.read(buf)
>>> print (x)
#coding=utf-8
[default]
set1 = {1, 2, 3}
set2 = {1, 2, 3}
<BLANKLINE>
>>> buf2 = StringIO(\"\"\"#coding=utf-8
... [default]
... set1 = {5,3}
... \"\"\")
>>> x.read(buf2)
>>> print (x.default.set1)
{1, 2, 3, 5}
"""
| 22.295349 | 93 | 0.456973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,178 | 0.951384 |
7375e7557e967afa603dac5a97005866394c65de
| 797 |
py
|
Python
|
src/game.py
|
cwainwright/think-inside-the-box
|
dd537e72229a42f8f5f7074151799d3b07dfdfbd
|
[
"MIT"
] | null | null | null |
src/game.py
|
cwainwright/think-inside-the-box
|
dd537e72229a42f8f5f7074151799d3b07dfdfbd
|
[
"MIT"
] | null | null | null |
src/game.py
|
cwainwright/think-inside-the-box
|
dd537e72229a42f8f5f7074151799d3b07dfdfbd
|
[
"MIT"
] | null | null | null |
import threading
from queue import Queue
from blessed import Terminal
FPS = 60
class Game:
"""The top level class for the game"""
def __init__(self, manager_cls: type):
self.manager_cls = manager_cls
def run(self) -> None:
"""The run method for the game, handling the TUI"""
term = Terminal()
input_queue = Queue()
manager = self.manager_cls(input_queue, term)
manager_thread = threading.Thread(target=manager)
manager_thread.start()
with term.fullscreen(), term.raw(), term.hidden_cursor(), term.location():
while manager_thread.is_alive():
inp = term.inkey(1 / FPS)
if inp != '':
input_queue.put(inp)
print(term.normal + term.clear)
| 24.90625 | 82 | 0.595985 | 713 | 0.894605 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.114178 |
73760d51c39df213af720ac9a7cf8ca846fad61d
| 1,366 |
py
|
Python
|
alice_scripts/skill.py
|
borzunov/alice_scripts
|
db4cd08226ae5429ec8083ffedc0edef8b44adeb
|
[
"MIT"
] | 27 |
2018-07-30T19:35:17.000Z
|
2021-09-12T18:18:22.000Z
|
alice_scripts/skill.py
|
borzunov/alice_scripts
|
db4cd08226ae5429ec8083ffedc0edef8b44adeb
|
[
"MIT"
] | 2 |
2018-11-01T09:49:48.000Z
|
2020-12-17T13:39:23.000Z
|
alice_scripts/skill.py
|
borzunov/alice_scripts
|
db4cd08226ae5429ec8083ffedc0edef8b44adeb
|
[
"MIT"
] | 7 |
2018-10-24T18:39:30.000Z
|
2021-11-25T13:55:41.000Z
|
import logging
import threading
import flask
from .requests import Request
__all__ = ['Skill']
class Skill(flask.Flask):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._sessions = {}
self._session_lock = threading.RLock()
def script(self, generator):
@self.route("/", methods=['POST'])
def handle_post():
flask.g.request = Request(flask.request.get_json())
logging.debug('Request: %r', flask.g.request)
content = self._switch_state(generator)
response = {
'version': flask.g.request['version'],
'session': flask.g.request['session'],
'response': content,
}
logging.debug('Response: %r', response)
return flask.jsonify(response)
return generator
def _switch_state(self, generator):
session_id = flask.g.request['session']['session_id']
with self._session_lock:
if session_id not in self._sessions:
state = self._sessions[session_id] = generator()
else:
state = self._sessions[session_id]
content = next(state)
if content['end_session']:
with self._session_lock:
del self._sessions[session_id]
return content
| 26.784314 | 64 | 0.572474 | 1,264 | 0.925329 | 0 | 0 | 518 | 0.379209 | 0 | 0 | 123 | 0.090044 |
737c8fcb95ea540c79cfba48d2fa31a9bd9f57a9
| 1,227 |
py
|
Python
|
src/main/fileextractors/fileextractor.py
|
michael-stanin/Subtitles-Distributor
|
e4638d952235f96276729239596dc31d9ccc2ee1
|
[
"MIT"
] | 1 |
2017-06-03T19:42:05.000Z
|
2017-06-03T19:42:05.000Z
|
src/main/fileextractors/fileextractor.py
|
michael-stanin/Subtitles-Distributor
|
e4638d952235f96276729239596dc31d9ccc2ee1
|
[
"MIT"
] | null | null | null |
src/main/fileextractors/fileextractor.py
|
michael-stanin/Subtitles-Distributor
|
e4638d952235f96276729239596dc31d9ccc2ee1
|
[
"MIT"
] | null | null | null |
import logging
from main.fileextractors.compressedfile import get_compressed_file
from main.utilities.fileutils import dir_path
from main.utilities.subtitlesadjuster import ArchiveAdjuster
class FileExtractor:
def __init__(self, subname, movfile):
self.sn, self.mn = subname, movfile
self.subzip = get_compressed_file(self.sn)
self.log = logging.getLogger(__name__)
def run(self):
if self.subzip:
return self._extractfile() and self._adjust_subs()
return False
def _adjust_subs(self):
return ArchiveAdjuster(self.subzip, self.sn, self.mn).adjust()
def _extractfile(self):
self.log.info("Start extracting %s to: %s", self.sn, dir_path(self.mn))
extracted = self._extract_subtitles_to_movie_dir()
self.log.info("End extracting %s to: %s - with result %s", self.sn, dir_path(self.mn), repr(extracted))
return extracted
def _extract_subtitles_to_movie_dir(self):
extracted = False
try:
self.subzip.accessor.extractall(dir_path(self.mn))
extracted = True
except Exception as e:
self.log.exception("Failed to extract: %s", e)
return extracted
| 35.057143 | 111 | 0.673187 | 1,035 | 0.843521 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.07661 |
7380bfdbf0d2f900bab496e56a02fad07f1e4ac8
| 476 |
py
|
Python
|
cjson/body.py
|
tslight/cjson
|
1ab08400347e5ff33d3efd9e9879a54a9066a80c
|
[
"0BSD"
] | null | null | null |
cjson/body.py
|
tslight/cjson
|
1ab08400347e5ff33d3efd9e9879a54a9066a80c
|
[
"0BSD"
] | null | null | null |
cjson/body.py
|
tslight/cjson
|
1ab08400347e5ff33d3efd9e9879a54a9066a80c
|
[
"0BSD"
] | null | null | null |
import curses
from get_json import get_json
def body(screen):
div = curses.newwin(curses.LINES - 2, curses.COLS, 1, 0)
div.box() # draw border around container window
# use a sub-window so we don't clobber the the container window's border.
txt = div.subwin(curses.LINES - 5, curses.COLS - 4, 2, 2)
# update internal window data structures
screen.noutrefresh()
div.noutrefresh()
# redraw the screen
curses.doupdate()
return div, txt
| 29.75 | 77 | 0.684874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.355042 |
7382da4a97a03a9bab8ad1771db18f2352be8d95
| 5,518 |
py
|
Python
|
SDis_Self-Training/plotting/createScatterPlot.py
|
mgeorgati/DasymetricMapping
|
d87b97a076cca3e03286c6b27b118904e03315c0
|
[
"BSD-3-Clause"
] | null | null | null |
SDis_Self-Training/plotting/createScatterPlot.py
|
mgeorgati/DasymetricMapping
|
d87b97a076cca3e03286c6b27b118904e03315c0
|
[
"BSD-3-Clause"
] | null | null | null |
SDis_Self-Training/plotting/createScatterPlot.py
|
mgeorgati/DasymetricMapping
|
d87b97a076cca3e03286c6b27b118904e03315c0
|
[
"BSD-3-Clause"
] | null | null | null |
import sys, os, seaborn as sns, rasterio, pandas as pd
import numpy as np
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from config.definitions import ROOT_DIR, ancillary_path, city,year
attr_value ="totalpop"
gtP = ROOT_DIR + "/Evaluation/{0}_groundTruth/{2}_{0}_{1}.tif".format(city,attr_value,year)
srcGT= rasterio.open(gtP)
popGT = srcGT.read(1)
print(popGT.min(),popGT.max(), popGT.mean())
#prP = ROOT_DIR + "/Evaluation/{0}/apcatbr/div_{0}_dissever01WIESMN_500_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value)
def scatterplot(prP):
cp = "C:/Users/NM12LQ/OneDrive - Aalborg Universitet/PopNetV2_backup/data_prep/ams_ProjectData/temp_tif/ams_CLC_2012_2018Reclas3.tif"
srcC= rasterio.open(cp)
corine = srcC.read(1)
name = prP.split(".tif")[0].split("/")[-1]
print(name)
gtP = ROOT_DIR + "/Evaluation/{0}_groundTruth/{2}_{0}_{1}.tif".format(city,attr_value,year)
srcGT= rasterio.open(gtP)
popGT = srcGT.read(1)
print(popGT.min(),popGT.max(), popGT.mean())
srcPR= rasterio.open(prP)
popPR = srcPR.read(1)
popPR[(np.where(popPR <= -9999))] = 0
print(popPR.min(),popPR.max(), popPR.mean())
cr=corine.flatten()
x=popGT.flatten()
y=popPR.flatten()
df = pd.DataFrame(data={"gt": x, "predictions":y, "cr":cr})
plt.figure(figsize=(20,20))
g= sns.lmplot(data=df, x="gt", y="predictions", hue="cr", palette=["#0d2dc1","#ff9c1c","#71b951","#24f33d","#90308f", "#a8a8a8"],ci = None, order=2, scatter_kws={"s":0.5, "alpha": 0.5}, line_kws={"lw":2, "alpha": 0.5}, legend=False)
plt.legend(title= "Land Cover", labels= ['Water','Urban Fabric', 'Agriculture', 'Green Spaces','Industry','Transportation' ], loc='lower right', fontsize=5)
plt.title('{0}'.format( name), fontsize=11)
# Set x-axis label
plt.xlabel('Ground Truth (persons)', fontsize=11)
# Set y-axis label
plt.ylabel('Predictions (persons)', fontsize=11)
#total pop
#plt.xscale('log')
#plt.yscale('log')
#mobile Adults
#plt.xlim((0,200))
#plt.ylim((-100,500))pl
plt.axis('square')
plt.xlim((0,400))
plt.ylim((0,350))
plt.tight_layout()
#plt.show()
plt.savefig(ROOT_DIR + "/Evaluation/{0}/ScatterPlots/SP4_{2}.png".format(city,attr_value, name),format='png',dpi=300)
evalFiles = [#gtP,
#ROOT_DIR + "/Evaluation/{0}/aprf/dissever00/{0}_dissever00WIESMN_2018_ams_Dasy_aprf_p[1]_12AIL12_1IL_it10_{1}.tif".format(city,attr_value),
#ROOT_DIR + "/Evaluation/{0}/aprf/dissever01/{0}_dissever01WIESMN_100_2018_ams_DasyA_aprf_p[1]_12AIL12_13IL_it10_{1}.tif".format(city,attr_value),
#ROOT_DIR + "/Evaluation/{0}/apcatbr/{0}_dissever01WIESMN_100_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
#ROOT_DIR + "/Evaluation/{0}/apcatbr/{0}_dissever01WIESMN_250_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/{0}_dissever01WIESMN_500_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
]
evalFilesMAEbp = [ROOT_DIR + "/Evaluation/{0}/Pycno/mae_{0}_{2}_{0}_{1}_pycno.tif".format(city,attr_value,year),
ROOT_DIR + "/Evaluation/{0}/Dasy/mae_{0}_{2}_{0}_{1}_dasyWIESMN.tif".format(city,attr_value,year),
ROOT_DIR + "/Evaluation/{0}/aprf/dissever00/mae_{0}_dissever00WIESMN_2018_ams_Dasy_aprf_p[1]_12AIL12_1IL_it10_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/aprf/dissever01/mae_{0}_dissever01WIESMN_100_2018_ams_DasyA_aprf_p[1]_12AIL12_13IL_it10_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/mae_{0}_dissever01WIESMN_100_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/mae_{0}_dissever01WIESMN_250_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/mae_{0}_dissever01WIESMN_500_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/mae_{0}_dissever01WIESMN_250_2018_ams_DasyA_apcatbr_p[1]_3AIL5_12IL_it10_ag_{1}.tif".format(city,attr_value)]
evalFilesPEbp = [ROOT_DIR + "/Evaluation/{0}/Pycno/div_{0}_{2}_{0}_{1}_pycno.tif".format(city,attr_value,year),
ROOT_DIR + "/Evaluation/{0}/Dasy/div_{0}_{2}_{0}_{1}_dasyWIESMN.tif".format(city,attr_value,year),
ROOT_DIR + "/Evaluation/{0}/aprf/dissever00/div_{0}_dissever00WIESMN_2018_ams_Dasy_aprf_p[1]_12AIL12_1IL_it10_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/aprf/dissever01/div_{0}_dissever01WIESMN_100_2018_ams_DasyA_aprf_p[1]_12AIL12_13IL_it10_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/div_{0}_dissever01WIESMN_100_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/div_{0}_dissever01WIESMN_250_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/div_{0}_dissever01WIESMN_500_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value)]
for i in evalFiles:
scatterplot(i)
| 66.481928 | 237 | 0.702791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,983 | 0.540594 |
7382ea8531ce700712937018018e99ffb94c7c1d
| 562 |
py
|
Python
|
codepack/service/delivery_service/delivery_service.py
|
ihnokim/codepack
|
9d043b2db977de503faf7f5f1370c1424c6cb19f
|
[
"MIT"
] | 2 |
2021-04-18T17:51:49.000Z
|
2021-06-22T10:21:30.000Z
|
codepack/service/delivery_service/delivery_service.py
|
ihnokim/codepack
|
9d043b2db977de503faf7f5f1370c1424c6cb19f
|
[
"MIT"
] | 24 |
2021-12-23T18:02:01.000Z
|
2022-03-27T03:03:38.000Z
|
codepack/service/delivery_service/delivery_service.py
|
ihnokim/codepack
|
9d043b2db977de503faf7f5f1370c1424c6cb19f
|
[
"MIT"
] | 1 |
2021-09-13T12:56:40.000Z
|
2021-09-13T12:56:40.000Z
|
import abc
from codepack.service.service import Service
class DeliveryService(Service, metaclass=abc.ABCMeta):
def __init__(self):
super().__init__()
@abc.abstractmethod
def send(self, id, serial_number, item=None, timestamp=None):
"""send item"""
@abc.abstractmethod
def receive(self, serial_number):
"""receive item"""
@abc.abstractmethod
def cancel(self, serial_number):
"""cancel delivery"""
@abc.abstractmethod
def check(self, serial_number):
"""check arrival of delivery"""
| 23.416667 | 65 | 0.658363 | 503 | 0.895018 | 0 | 0 | 374 | 0.66548 | 0 | 0 | 85 | 0.151246 |
7386b0f7b4c54bd5b874bd75d2eaef2e32ff4344
| 23,056 |
py
|
Python
|
nengo/tests/test_learning_rules.py
|
pedrombmachado/nengo
|
abc85e1a75ce2f980e19eef195d98081f95efd28
|
[
"BSD-2-Clause"
] | null | null | null |
nengo/tests/test_learning_rules.py
|
pedrombmachado/nengo
|
abc85e1a75ce2f980e19eef195d98081f95efd28
|
[
"BSD-2-Clause"
] | null | null | null |
nengo/tests/test_learning_rules.py
|
pedrombmachado/nengo
|
abc85e1a75ce2f980e19eef195d98081f95efd28
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy as np
import pytest
import nengo
from nengo.builder import Builder
from nengo.builder.operator import Reset, Copy
from nengo.builder.signal import Signal
from nengo.dists import UniformHypersphere
from nengo.exceptions import ValidationError
from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja
from nengo.processes import WhiteSignal
from nengo.synapses import Alpha, Lowpass
def best_weights(weight_data):
return np.argmax(np.sum(np.var(weight_data, axis=0), axis=0))
def _test_pes(
Simulator,
nl,
plt,
seed,
allclose,
pre_neurons=False,
post_neurons=False,
weight_solver=False,
vin=np.array([0.5, -0.5]),
vout=None,
n=200,
function=None,
transform=np.array(1.0),
rate=1e-3,
):
vout = np.array(vin) if vout is None else vout
with nengo.Network(seed=seed) as model:
model.config[nengo.Ensemble].neuron_type = nl()
stim = nengo.Node(output=vin)
target = nengo.Node(output=vout)
pre = nengo.Ensemble(n, dimensions=stim.size_out)
post = nengo.Ensemble(n, dimensions=stim.size_out)
error = nengo.Ensemble(n, dimensions=target.size_out)
nengo.Connection(stim, pre)
postslice = post[: target.size_out] if target.size_out < stim.size_out else post
pre = pre.neurons if pre_neurons else pre
post = post.neurons if post_neurons else postslice
conn = nengo.Connection(
pre,
post,
function=function,
transform=transform,
learning_rule_type=PES(rate),
)
if weight_solver:
conn.solver = nengo.solvers.LstsqL2(weights=True)
nengo.Connection(target, error, transform=-1)
nengo.Connection(postslice, error)
nengo.Connection(error, conn.learning_rule)
post_p = nengo.Probe(postslice, synapse=0.03)
error_p = nengo.Probe(error, synapse=0.03)
weights_p = nengo.Probe(conn, "weights", sample_every=0.01)
with Simulator(model) as sim:
sim.run(0.5)
t = sim.trange()
weights = sim.data[weights_p]
plt.subplot(211)
plt.plot(t, sim.data[post_p])
plt.ylabel("Post decoded value")
plt.subplot(212)
plt.plot(t, sim.data[error_p])
plt.ylabel("Error decoded value")
plt.xlabel("Time (s)")
tend = t > 0.4
assert allclose(sim.data[post_p][tend], vout, atol=0.05)
assert allclose(sim.data[error_p][tend], 0, atol=0.05)
assert not allclose(weights[0], weights[-1], atol=1e-5, record_rmse=False)
def test_pes_ens_ens(Simulator, nl_nodirect, plt, seed, allclose):
function = lambda x: [x[1], x[0]]
_test_pes(Simulator, nl_nodirect, plt, seed, allclose, function=function)
def test_pes_weight_solver(Simulator, plt, seed, allclose):
function = lambda x: [x[1], x[0]]
_test_pes(
Simulator, nengo.LIF, plt, seed, allclose, function=function, weight_solver=True
)
def test_pes_ens_slice(Simulator, plt, seed, allclose):
vin = [0.5, -0.5]
vout = [vin[0] ** 2 + vin[1] ** 2]
function = lambda x: [x[0] - x[1]]
_test_pes(
Simulator, nengo.LIF, plt, seed, allclose, vin=vin, vout=vout, function=function
)
def test_pes_neuron_neuron(Simulator, plt, seed, rng, allclose):
n = 200
initial_weights = rng.uniform(high=4e-4, size=(n, n))
_test_pes(
Simulator,
nengo.LIF,
plt,
seed,
allclose,
pre_neurons=True,
post_neurons=True,
n=n,
transform=initial_weights,
rate=7e-4,
)
def test_pes_neuron_ens(Simulator, plt, seed, rng, allclose):
n = 200
initial_weights = rng.uniform(high=1e-4, size=(2, n))
_test_pes(
Simulator,
nengo.LIF,
plt,
seed,
allclose,
pre_neurons=True,
post_neurons=False,
n=n,
transform=initial_weights,
)
def test_pes_transform(Simulator, seed, allclose):
"""Test behaviour of PES when function and transform both defined."""
n = 200
# error must be with respect to transformed vector (conn.size_out)
T = np.asarray([[0.5], [-0.5]]) # transform to output
m = nengo.Network(seed=seed)
with m:
u = nengo.Node(output=[1])
a = nengo.Ensemble(n, dimensions=1)
b = nengo.Node(size_in=2)
e = nengo.Node(size_in=1)
nengo.Connection(u, a)
learned_conn = nengo.Connection(
a,
b,
function=lambda x: [0],
transform=T,
learning_rule_type=nengo.PES(learning_rate=1e-3),
)
assert T.shape[0] == learned_conn.size_out
assert T.shape[1] == learned_conn.size_mid
nengo.Connection(b[0], e, synapse=None)
nengo.Connection(nengo.Node(output=-1), e)
nengo.Connection(e, learned_conn.learning_rule, transform=T, synapse=None)
p_b = nengo.Probe(b, synapse=0.05)
with Simulator(m) as sim:
sim.run(1.0)
tend = sim.trange() > 0.7
assert allclose(sim.data[p_b][tend], [1, -1], atol=1e-2)
def test_pes_multidim_error(Simulator, seed):
"""Test that PES works on error connections mapping from N to 1 dims.
Note that the transform is applied before the learning rule, so the error
signal should be 1-dimensional.
"""
with nengo.Network(seed=seed) as net:
err = nengo.Node(output=[0])
ens1 = nengo.Ensemble(20, 3)
ens2 = nengo.Ensemble(10, 1)
# Case 1: ens -> ens, weights=False
conn = nengo.Connection(
ens1,
ens2,
transform=np.ones((1, 3)),
solver=nengo.solvers.LstsqL2(weights=False),
learning_rule_type={"pes": nengo.PES()},
)
nengo.Connection(err, conn.learning_rule["pes"])
# Case 2: ens -> ens, weights=True
conn = nengo.Connection(
ens1,
ens2,
transform=np.ones((1, 3)),
solver=nengo.solvers.LstsqL2(weights=True),
learning_rule_type={"pes": nengo.PES()},
)
nengo.Connection(err, conn.learning_rule["pes"])
# Case 3: neurons -> ens
conn = nengo.Connection(
ens1.neurons,
ens2,
transform=np.ones((1, ens1.n_neurons)),
learning_rule_type={"pes": nengo.PES()},
)
nengo.Connection(err, conn.learning_rule["pes"])
with Simulator(net) as sim:
sim.run(0.01)
@pytest.mark.parametrize("pre_synapse", [0, Lowpass(tau=0.05), Alpha(tau=0.005)])
def test_pes_synapse(Simulator, seed, pre_synapse, allclose):
rule = PES(pre_synapse=pre_synapse)
with nengo.Network(seed=seed) as model:
stim = nengo.Node(output=WhiteSignal(0.5, high=10))
x = nengo.Ensemble(100, 1)
nengo.Connection(stim, x, synapse=None)
conn = nengo.Connection(x, x, learning_rule_type=rule)
p_neurons = nengo.Probe(x.neurons, synapse=pre_synapse)
p_pes = nengo.Probe(conn.learning_rule, "activities")
with Simulator(model) as sim:
sim.run(0.5)
assert allclose(sim.data[p_neurons][1:, :], sim.data[p_pes][:-1, :])
@pytest.mark.parametrize("weights", [False, True])
def test_pes_recurrent_slice(Simulator, seed, weights, allclose):
"""Test that PES works on recurrent connections from N to 1 dims."""
with nengo.Network(seed=seed) as net:
err = nengo.Node(output=[-1])
stim = nengo.Node(output=[0, 0])
post = nengo.Ensemble(50, 2, radius=2)
nengo.Connection(stim, post)
conn = nengo.Connection(
post,
post[1],
function=lambda x: 0.0,
solver=nengo.solvers.LstsqL2(weights=weights),
learning_rule_type=nengo.PES(learning_rate=5e-4),
)
nengo.Connection(err, conn.learning_rule)
p = nengo.Probe(post, synapse=0.025)
with Simulator(net) as sim:
sim.run(0.2)
# Learning rule should drive second dimension high, but not first
assert allclose(sim.data[p][-10:, 0], 0, atol=0.2)
assert np.all(sim.data[p][-10:, 1] > 0.8)
def test_pes_cycle(Simulator):
"""Test that PES works when connection output feeds back into error."""
with nengo.Network() as net:
a = nengo.Ensemble(10, 1)
b = nengo.Node(size_in=1)
c = nengo.Connection(a, b, synapse=None, learning_rule_type=nengo.PES())
nengo.Connection(b, c.learning_rule, synapse=None)
with Simulator(net):
# just checking that this builds without error
pass
@pytest.mark.parametrize(
"rule_type, solver",
[
(BCM(learning_rate=1e-8), False),
(Oja(learning_rate=1e-5), False),
([Oja(learning_rate=1e-5), BCM(learning_rate=1e-8)], False),
([Oja(learning_rate=1e-5), BCM(learning_rate=1e-8)], True),
],
)
def test_unsupervised(Simulator, rule_type, solver, seed, rng, plt, allclose):
n = 200
m = nengo.Network(seed=seed)
with m:
u = nengo.Node(WhiteSignal(0.5, high=10), size_out=2)
a = nengo.Ensemble(n, dimensions=2)
b = nengo.Ensemble(n + 1, dimensions=2)
nengo.Connection(u, a)
if solver:
conn = nengo.Connection(a, b, solver=nengo.solvers.LstsqL2(weights=True))
else:
initial_weights = rng.uniform(high=1e-3, size=(b.n_neurons, a.n_neurons))
conn = nengo.Connection(a.neurons, b.neurons, transform=initial_weights)
conn.learning_rule_type = rule_type
inp_p = nengo.Probe(u)
weights_p = nengo.Probe(conn, "weights", sample_every=0.01)
ap = nengo.Probe(a, synapse=0.03)
up = nengo.Probe(b, synapse=0.03)
with Simulator(m, seed=seed + 1) as sim:
sim.run(0.5)
t = sim.trange()
plt.subplot(2, 1, 1)
plt.plot(t, sim.data[inp_p], label="Input")
plt.plot(t, sim.data[ap], label="Pre")
plt.plot(t, sim.data[up], label="Post")
plt.legend(loc="best", fontsize="x-small")
plt.subplot(2, 1, 2)
best_ix = best_weights(sim.data[weights_p])
plt.plot(sim.trange(sample_every=0.01), sim.data[weights_p][..., best_ix])
plt.xlabel("Time (s)")
plt.ylabel("Weights")
assert not allclose(
sim.data[weights_p][0], sim.data[weights_p][-1], record_rmse=False
)
def learning_net(learning_rule=nengo.PES, net=None, rng=np.random):
net = nengo.Network() if net is None else net
with net:
if learning_rule is nengo.PES:
learning_rule_type = learning_rule(learning_rate=1e-5)
else:
learning_rule_type = learning_rule()
u = nengo.Node(output=1.0)
pre = nengo.Ensemble(10, dimensions=1)
post = nengo.Ensemble(10, dimensions=1)
initial_weights = rng.uniform(high=1e-3, size=(pre.n_neurons, post.n_neurons))
conn = nengo.Connection(
pre.neurons,
post.neurons,
transform=initial_weights,
learning_rule_type=learning_rule_type,
)
if learning_rule is nengo.PES:
err = nengo.Ensemble(10, dimensions=1)
nengo.Connection(u, err)
nengo.Connection(err, conn.learning_rule)
net.activity_p = nengo.Probe(pre.neurons, synapse=0.01)
net.weights_p = nengo.Probe(conn, "weights", synapse=None, sample_every=0.01)
return net
@pytest.mark.parametrize("learning_rule", [nengo.PES, nengo.BCM, nengo.Oja])
def test_dt_dependence(Simulator, plt, learning_rule, seed, rng, allclose):
"""Learning rules should work the same regardless of dt."""
m = learning_net(learning_rule, nengo.Network(seed=seed), rng)
trans_data = []
# Using dts greater near tau_ref (0.002 by default) causes learning to
# differ due to lowered presynaptic firing rate
dts = (0.0001, 0.001)
colors = ("b", "g", "r")
ax1 = plt.subplot(2, 1, 1)
ax2 = plt.subplot(2, 1, 2)
for c, dt in zip(colors, dts):
with Simulator(m, dt=dt) as sim:
sim.run(0.1)
trans_data.append(sim.data[m.weights_p])
best_ix = best_weights(sim.data[m.weights_p])
ax1.plot(
sim.trange(sample_every=0.01), sim.data[m.weights_p][..., best_ix], c=c
)
ax2.plot(sim.trange(), sim.data[m.activity_p], c=c)
ax1.set_xlim(right=sim.trange()[-1])
ax1.set_ylabel("Connection weight")
ax2.set_xlim(right=sim.trange()[-1])
ax2.set_ylabel("Presynaptic activity")
assert allclose(trans_data[0], trans_data[1], atol=3e-3)
assert not allclose(
sim.data[m.weights_p][0], sim.data[m.weights_p][-1], record_rmse=False
)
@pytest.mark.parametrize("learning_rule", [nengo.PES, nengo.BCM, nengo.Oja])
def test_reset(Simulator, learning_rule, plt, seed, rng, allclose):
"""Make sure resetting learning rules resets all state."""
m = learning_net(learning_rule, nengo.Network(seed=seed), rng)
with Simulator(m) as sim:
sim.run(0.1)
sim.run(0.2)
first_t = sim.trange()
first_t_trans = sim.trange(sample_every=0.01)
first_activity_p = np.array(sim.data[m.activity_p], copy=True)
first_weights_p = np.array(sim.data[m.weights_p], copy=True)
sim.reset()
sim.run(0.3)
plt.subplot(2, 1, 1)
plt.ylabel("Neural activity")
plt.plot(first_t, first_activity_p, c="b")
plt.plot(sim.trange(), sim.data[m.activity_p], c="g")
plt.subplot(2, 1, 2)
plt.ylabel("Connection weight")
best_ix = best_weights(first_weights_p)
plt.plot(first_t_trans, first_weights_p[..., best_ix], c="b")
plt.plot(sim.trange(sample_every=0.01), sim.data[m.weights_p][..., best_ix], c="g")
assert allclose(sim.trange(), first_t)
assert allclose(sim.trange(sample_every=0.01), first_t_trans)
assert allclose(sim.data[m.activity_p], first_activity_p)
assert allclose(sim.data[m.weights_p], first_weights_p)
def test_learningruletypeparam():
"""LearningRuleTypeParam must be one or many learning rules."""
class Test:
lrp = LearningRuleTypeParam("lrp", default=None)
inst = Test()
assert inst.lrp is None
inst.lrp = Oja()
assert isinstance(inst.lrp, Oja)
inst.lrp = [Oja(), Oja()]
for lr in inst.lrp:
assert isinstance(lr, Oja)
# Non-LR no good
with pytest.raises(ValueError):
inst.lrp = "a"
# All elements in list must be LR
with pytest.raises(ValueError):
inst.lrp = [Oja(), "a", Oja()]
def test_learningrule_attr(seed):
"""Test learning_rule attribute on Connection"""
def check_rule(rule, conn, rule_type):
assert rule.connection is conn and rule.learning_rule_type is rule_type
with nengo.Network(seed=seed):
a, b, e = [nengo.Ensemble(10, 2) for i in range(3)]
T = np.ones((10, 10))
r1 = PES()
c1 = nengo.Connection(a.neurons, b.neurons, learning_rule_type=r1)
check_rule(c1.learning_rule, c1, r1)
r2 = [PES(), BCM()]
c2 = nengo.Connection(a.neurons, b.neurons, learning_rule_type=r2, transform=T)
assert isinstance(c2.learning_rule, list)
for rule, rule_type in zip(c2.learning_rule, r2):
check_rule(rule, c2, rule_type)
r3 = dict(oja=Oja(), bcm=BCM())
c3 = nengo.Connection(a.neurons, b.neurons, learning_rule_type=r3, transform=T)
assert isinstance(c3.learning_rule, dict)
assert set(c3.learning_rule) == set(r3) # assert same keys
for key in r3:
check_rule(c3.learning_rule[key], c3, r3[key])
def test_voja_encoders(Simulator, nl_nodirect, rng, seed, allclose):
"""Tests that voja changes active encoders to the input."""
n = 200
learned_vector = np.asarray([0.3, -0.4, 0.6])
learned_vector /= np.linalg.norm(learned_vector)
n_change = n // 2 # modify first half of the encoders
# Set the first half to always fire with random encoders, and the
# remainder to never fire due to their encoder's dot product with the input
intercepts = np.asarray([-1] * n_change + [0.99] * (n - n_change))
rand_encoders = UniformHypersphere(surface=True).sample(
n_change, len(learned_vector), rng=rng
)
encoders = np.append(rand_encoders, [-learned_vector] * (n - n_change), axis=0)
m = nengo.Network(seed=seed)
with m:
m.config[nengo.Ensemble].neuron_type = nl_nodirect()
u = nengo.Node(output=learned_vector)
x = nengo.Ensemble(
n,
dimensions=len(learned_vector),
intercepts=intercepts,
encoders=encoders,
max_rates=nengo.dists.Uniform(300.0, 400.0),
radius=2.0,
) # to test encoder scaling
conn = nengo.Connection(
u, x, synapse=None, learning_rule_type=Voja(learning_rate=1e-1)
)
p_enc = nengo.Probe(conn.learning_rule, "scaled_encoders")
p_enc_ens = nengo.Probe(x, "scaled_encoders")
with Simulator(m) as sim:
sim.run(1.0)
t = sim.trange()
tend = t > 0.5
# Voja's rule relies on knowing exactly how the encoders were scaled
# during the build process, because it modifies the scaled_encoders signal
# proportional to this factor. Therefore, we should check that its
# assumption actually holds.
encoder_scale = (sim.data[x].gain / x.radius)[:, np.newaxis]
assert allclose(sim.data[x].encoders, sim.data[x].scaled_encoders / encoder_scale)
# Check that the last half kept the same encoders throughout the simulation
assert allclose(sim.data[p_enc][0, n_change:], sim.data[p_enc][:, n_change:])
# and that they are also equal to their originally assigned value
assert allclose(
sim.data[p_enc][0, n_change:] / encoder_scale[n_change:], -learned_vector
)
# Check that the first half converged to the input
assert allclose(
sim.data[p_enc][tend, :n_change] / encoder_scale[:n_change],
learned_vector,
atol=0.01,
)
# Check that encoders probed from ensemble equal encoders probed from Voja
assert allclose(sim.data[p_enc], sim.data[p_enc_ens])
def test_voja_modulate(Simulator, nl_nodirect, seed, allclose):
"""Tests that voja's rule can be modulated on/off."""
n = 200
learned_vector = np.asarray([0.5])
def control_signal(t):
"""Modulates the learning on/off."""
return 0 if t < 0.5 else -1
m = nengo.Network(seed=seed)
with m:
m.config[nengo.Ensemble].neuron_type = nl_nodirect()
control = nengo.Node(output=control_signal)
u = nengo.Node(output=learned_vector)
x = nengo.Ensemble(n, dimensions=len(learned_vector))
conn = nengo.Connection(
u, x, synapse=None, learning_rule_type=Voja(post_synapse=None)
)
nengo.Connection(control, conn.learning_rule, synapse=None)
p_enc = nengo.Probe(conn.learning_rule, "scaled_encoders")
with Simulator(m) as sim:
sim.run(1.0)
tend = sim.trange() > 0.5
# Check that encoders stop changing after 0.5s
assert allclose(sim.data[p_enc][tend], sim.data[p_enc][-1])
# Check that encoders changed during first 0.5s
i = np.where(tend)[0][0] # first time point after changeover
assert not allclose(sim.data[p_enc][0], sim.data[p_enc][i], record_rmse=False)
def test_frozen():
"""Test attributes inherited from FrozenObject"""
a = PES(learning_rate=2e-3, pre_synapse=4e-3)
b = PES(learning_rate=2e-3, pre_synapse=4e-3)
c = PES(learning_rate=2e-3, pre_synapse=5e-3)
assert hash(a) == hash(a)
assert hash(b) == hash(b)
assert hash(c) == hash(c)
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c) # not guaranteed, but highly likely
assert b != c
assert hash(b) != hash(c) # not guaranteed, but highly likely
with pytest.raises((ValueError, RuntimeError)):
a.learning_rate = 1e-1
def test_pes_direct_errors():
"""Test that applying a learning rule to a direct ensemble errors."""
with nengo.Network():
pre = nengo.Ensemble(10, 1, neuron_type=nengo.Direct())
post = nengo.Ensemble(10, 1)
conn = nengo.Connection(pre, post)
with pytest.raises(ValidationError):
conn.learning_rule_type = nengo.PES()
def test_custom_type(Simulator, allclose):
"""Test with custom learning rule type.
A custom learning type may have ``size_in`` not equal to 0, 1, or None.
"""
class TestRule(nengo.learning_rules.LearningRuleType):
modifies = "decoders"
def __init__(self):
super().__init__(1.0, size_in=3)
@Builder.register(TestRule)
def build_test_rule(model, test_rule, rule):
error = Signal(np.zeros(rule.connection.size_in))
model.add_op(Reset(error))
model.sig[rule]["in"] = error[: rule.size_in]
model.add_op(Copy(error, model.sig[rule]["delta"]))
with nengo.Network() as net:
a = nengo.Ensemble(10, 1)
b = nengo.Ensemble(10, 1)
conn = nengo.Connection(
a.neurons, b, transform=np.zeros((1, 10)), learning_rule_type=TestRule()
)
err = nengo.Node([1, 2, 3])
nengo.Connection(err, conn.learning_rule, synapse=None)
p = nengo.Probe(conn, "weights")
with Simulator(net) as sim:
sim.run(sim.dt * 5)
assert allclose(sim.data[p][:, 0, :3], np.outer(np.arange(1, 6), np.arange(1, 4)))
assert allclose(sim.data[p][:, :, 3:], 0)
@pytest.mark.parametrize("LearningRule", (nengo.PES, nengo.BCM, nengo.Voja, nengo.Oja))
def test_tau_deprecation(LearningRule):
params = [
("pre_tau", "pre_synapse"),
("post_tau", "post_synapse"),
("theta_tau", "theta_synapse"),
]
kwargs = {}
for i, (p0, p1) in enumerate(params):
if hasattr(LearningRule, p0):
kwargs[p0] = i
with pytest.warns(DeprecationWarning):
l_rule = LearningRule(learning_rate=0, **kwargs)
for i, (p0, p1) in enumerate(params):
if hasattr(LearningRule, p0):
assert getattr(l_rule, p0) == i
assert getattr(l_rule, p1) == Lowpass(i)
def test_slicing(Simulator, seed, allclose):
with nengo.Network(seed=seed) as model:
a = nengo.Ensemble(50, 1)
b = nengo.Ensemble(30, 2)
conn = nengo.Connection(
a, b, learning_rule_type=PES(), function=lambda x: (0, 0)
)
nengo.Connection(nengo.Node(1.0), a)
err1 = nengo.Node(lambda t, x: x - 0.75, size_in=1)
nengo.Connection(b[0], err1)
nengo.Connection(err1, conn.learning_rule[0])
err2 = nengo.Node(lambda t, x: x + 0.5, size_in=1)
nengo.Connection(b[1], err2)
nengo.Connection(err2, conn.learning_rule[1])
p = nengo.Probe(b, synapse=0.03)
with Simulator(model) as sim:
sim.run(1.0)
t = sim.trange() > 0.8
assert allclose(sim.data[p][t, 0], 0.75, atol=0.15)
assert allclose(sim.data[p][t, 1], -0.5, atol=0.15)
| 33.126437 | 88 | 0.627906 | 226 | 0.009802 | 0 | 0 | 6,866 | 0.297797 | 0 | 0 | 2,910 | 0.126214 |
7387856755f04e2fce184f38847164fa54bfabcd
| 922 |
py
|
Python
|
joplin_web/api.py
|
foxmask/joplin-web
|
eb261e515b9ecf9c878a1d6492aba06ddf6d97c6
|
[
"BSD-3-Clause"
] | 382 |
2018-08-20T07:51:11.000Z
|
2022-03-11T14:52:53.000Z
|
joplin_web/api.py
|
marph91/joplin-web
|
eb261e515b9ecf9c878a1d6492aba06ddf6d97c6
|
[
"BSD-3-Clause"
] | 71 |
2018-10-01T07:01:20.000Z
|
2022-02-22T07:17:47.000Z
|
joplin_web/api.py
|
marph91/joplin-web
|
eb261e515b9ecf9c878a1d6492aba06ddf6d97c6
|
[
"BSD-3-Clause"
] | 67 |
2018-10-01T07:09:50.000Z
|
2022-03-19T09:30:09.000Z
|
# coding: utf-8
"""
joplin-web
"""
from django.conf import settings
from django.http.response import JsonResponse
from django.urls import reverse
from joplin_api import JoplinApiSync
from joplin_web.utils import nb_notes_by_tag, nb_notes_by_folder
import logging
from rich import console
console = console.Console()
logger = logging.getLogger("joplin_web.app")
joplin = JoplinApiSync(token=settings.JOPLIN_WEBCLIPPER_TOKEN)
def get_folders(request):
"""
all the folders
:param request
:return: json
"""
res = joplin.get_folders()
json_data = sorted(res.json(), key=lambda k: k['title'])
data = nb_notes_by_folder(json_data)
logger.debug(data)
return JsonResponse(data, safe=False)
def get_tags(request):
res = joplin.get_tags()
json_data = sorted(res.json(), key=lambda k: k['title'])
data = nb_notes_by_tag(json_data)
return JsonResponse(data, safe=False)
| 24.918919 | 64 | 0.729935 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.145336 |
738921989a2bdec68647069a9b524b0c70e83266
| 1,449 |
py
|
Python
|
blousebrothers/confs/management/commands/update_stats.py
|
sladinji/blousebrothers
|
461de3ba011c0aaed3f0014136c4497b6890d086
|
[
"MIT"
] | 1 |
2022-01-27T11:58:10.000Z
|
2022-01-27T11:58:10.000Z
|
blousebrothers/confs/management/commands/update_stats.py
|
sladinji/blousebrothers
|
461de3ba011c0aaed3f0014136c4497b6890d086
|
[
"MIT"
] | 5 |
2021-03-19T00:01:54.000Z
|
2022-03-11T23:46:21.000Z
|
blousebrothers/confs/management/commands/update_stats.py
|
sladinji/blousebrothers
|
461de3ba011c0aaed3f0014136c4497b6890d086
|
[
"MIT"
] | null | null | null |
import numpy as np
from django.core.management.base import BaseCommand
from oscar.core.loading import get_classes
StatsSpe, StatsItem, Test, Speciality, Item, Conference = get_classes(
'confs.models',
(
"StatsSpe", "StatsItem", "Test", "Speciality", "Item", "Conference"
)
)
class Command(BaseCommand):
help = 'Evaluate new stats for all specialies and items'
def handle(self, *args, **options):
for spe in Speciality.objects.all():
stats = StatsSpe.objects.get_or_create(speciality=spe)[0]
l = [
test.score for test
in Test.objects.filter(conf__specialities__in=[spe], finished=True).all()
]
l = l if l != [] else [0]
stats.average = np.mean(l)
stats.median = np.median(l)
stats.std_dev = np.std(l)
stats.save()
for item in Item.objects.all():
stats = StatsItem.objects.get_or_create(item=item)[0]
l = [
test.score for test
in Test.objects.filter(conf__items__in=[item], finished=True).all()
]
l = l if l != [] else [0]
stats.average = np.mean(l)
stats.median = np.median(l)
stats.std_dev = np.std(l)
stats.save()
for conf in Conference.objects.filter(tests__isnull=False, for_sale=True).distinct():
conf.update_stats()
| 32.2 | 93 | 0.569358 | 1,148 | 0.792271 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.082816 |
738989c5716d2f2f6127adc48d74596868c20221
| 6,403 |
py
|
Python
|
ssd_project/functions/multiboxloss.py
|
ilijagjorgjiev/SSD_FascadeParsing
|
a31346a3828f3bda9687a9013a40389dab446cef
|
[
"MIT"
] | 1 |
2020-09-27T03:57:18.000Z
|
2020-09-27T03:57:18.000Z
|
ssd_project/functions/multiboxloss.py
|
ilijagjorgjiev/SSD_FascadeParsing
|
a31346a3828f3bda9687a9013a40389dab446cef
|
[
"MIT"
] | null | null | null |
ssd_project/functions/multiboxloss.py
|
ilijagjorgjiev/SSD_FascadeParsing
|
a31346a3828f3bda9687a9013a40389dab446cef
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import sqrt as sqrt
import collections
import numpy as np
import itertools
from ssd_project.utils.utils import *
from ssd_project.utils.global_variables import *
device = DEVICE
class MultiBoxLoss(nn.Module):
"""
For our SSD we use a unique loss function called MultiBoxLoss.
The loss is branch into:
1. Localization loss coming from the predicted bounding boxes for objects with respect to ground truth object
2. Confidence loss coming from the predicted class score for the object with respect to ground truth object class
"""
def __init__(self, priors_cxcy, threshold=0.5, neg_pos_ratio=3, alpha=1.):
super(MultiBoxLoss, self).__init__()
self.priors_cxcy = priors_cxcy
self.priors_xy = decode_center_size(self.priors_cxcy)
self.threshold = threshold
self.neg_pos_ratio = neg_pos_ratio
self.alpha = alpha
#L1 loss is used for the predicted localizations w.r.t ground truth.
self.smooth_l1 = nn.L1Loss()
#CrossEntropyLoss is used for the predicted confidence scores w.r.t ground truth.
self.cross_entropy = nn.CrossEntropyLoss(reduce=False)
def forward(self, predicted_locs, predicted_scores, boxes, labels):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
"""
Each time the model predicts new localization and confidence scores,
they are compared to the ground truth objects and classes.
Args:
:predicted_locs: predicted localizatios from the model w.r.t to prior-boxes. Shape: (batch_size, 8732, 4)
:predicted_scores: confidence scores for each class for each localization box. Shape: (batch_size, 8732, n_classes)
:boxes: ground truth objects per image: Shape(batch_size)
:param labels: ground truth classes per image: Shape(batch_size)
Return:
Loss - a scalar
"""
batch_size = predicted_locs.size(0)
num_priors = self.priors_cxcy.size(0)
num_classes = predicted_scores.size(2)
assert num_priors == predicted_locs.size(1) == predicted_scores.size(1)
true_locs, true_classes = self.match_priors_objs(boxes, labels, num_priors, num_classes, batch_size)
# Identify priors that are positive (object/non-background)
non_bck_priors = true_classes != 0 # (N, 8732)
# LOCALIZATION LOSS
# Localization loss is computed only over positive (non-background) priors
loc_loss = self.smooth_l1(predicted_locs[non_bck_priors], true_locs[non_bck_priors]) # (), scalar
# CONFIDENCE LOSS
# Confidence loss is computed over positive priors and the most difficult (hardest) negative priors in each image
# Number of positive and hard-negative priors per image
num_positives = non_bck_priors.sum(dim=1) # (N)
num_hard_negatives = self.neg_pos_ratio * num_positives # (N)
# First, find the loss for all priors
confidence_loss = self.cross_entropy(predicted_scores.view(-1, num_classes), true_classes.view(-1)) # (N * 8732)
confidence_loss = confidence_loss.view(batch_size, num_priors) # (N, 8732)
# We already know which priors are positive
confidence_loss_non_bck = confidence_loss[non_bck_priors]
# Next, find which priors are hard-negative
# To do this, sort ONLY negative priors in each image in order of decreasing loss and take top n_hard_negatives
confidence_loss_negative = confidence_loss.clone() # (N, 8732)
confidence_loss_negative[non_bck_priors] = 0. # (N, 8732), positive priors are ignored (never in top n_hard_negatives)
confidence_loss_negative, _ = confidence_loss_negative.sort(dim=1, descending=True) # (N, 8732), sorted by decreasing hardness
hardness_ranks = torch.LongTensor(range(num_priors)).unsqueeze(0).expand_as(confidence_loss_negative).to(device) # (N, 8732)
hard_negatives = hardness_ranks < num_hard_negatives.unsqueeze(1) # (N, 8732)
confidence_loss_hard_neg = confidence_loss_negative[hard_negatives] # (sum(n_hard_negatives))
# As in the paper, averaged over positive priors only, although computed over both positive and hard-negative priors
conf_loss = (confidence_loss_hard_neg.sum() + confidence_loss_non_bck.sum()) / num_positives.sum().float()
# TOTAL LOSS
return conf_loss + self.alpha * loc_loss
def match_priors_objs(self, boxes, labels, num_priors, num_classes, batch_size):
"""
Helper function:
Basically we set a class("background", "window", "door", "building") for each prior.
This is done by checking what is the overlap between each prior and the ground truth objects.
If the overlap does not satisfy the threshold(0.5) for overlaping then we consider it background.
"""
true_locs = torch.zeros((batch_size, num_priors, 4), dtype=torch.float).to(device) # (batch_size, 8732, 4)
true_classes = torch.zeros((batch_size, num_priors), dtype=torch.long).to(device) # (batch_size, 8732)
for i, bboxes_img in enumerate(boxes):
#For each img and its objects, compute jaccard overlap between ground truth objects and priors
num_objects = bboxes_img.size(0)
obj_prior_overlap = jaccard_overlap(bboxes_img, self.priors_xy) #(num_objects, 8732)
#Get best object per prior
overlap_prior, obj_prior = obj_prior_overlap.max(dim = 0) #(8732)
#Get best prior per object
overlap_obj, prior_obj = obj_prior_overlap.max(dim = 1) #(num_objects)
#Fix that every object has been set to its respective best prior
obj_prior[prior_obj] = torch.LongTensor(range(num_objects)).to(device)
overlap_prior[prior_obj] = 1
#Give a label to the prior
label_prior = labels[i][obj_prior]
label_prior[overlap_prior < self.threshold] = 0
label_prior = label_prior.squeeze()
true_classes[i] = label_prior
#Encode it in boxes w.r.t to prior boxes format
true_locs[i] = encode_xy_to_gcxgcy(bboxes_img[obj_prior], self.priors_cxcy)
return true_locs, true_classes
| 45.091549 | 135 | 0.687178 | 6,146 | 0.959863 | 0 | 0 | 0 | 0 | 0 | 0 | 2,774 | 0.433234 |
73899046274e7f34b8512a7c9032b640315aef48
| 1,574 |
py
|
Python
|
glitter2/tests/app.py
|
matham/glitter2
|
ebede5a18edb1b2e34f1824e4262d01a148cf2f3
|
[
"MIT"
] | null | null | null |
glitter2/tests/app.py
|
matham/glitter2
|
ebede5a18edb1b2e34f1824e4262d01a148cf2f3
|
[
"MIT"
] | null | null | null |
glitter2/tests/app.py
|
matham/glitter2
|
ebede5a18edb1b2e34f1824e4262d01a148cf2f3
|
[
"MIT"
] | null | null | null |
import trio
from kivy.config import Config
Config.set('graphics', 'width', '1600')
Config.set('graphics', 'height', '900')
Config.set('modules', 'touchring', '')
for items in Config.items('input'):
Config.remove_option('input', items[0])
from glitter2.main import Glitter2App
from kivy.tests.async_common import UnitKivyApp
__all__ = ('Glitter2TestApp', 'touch_widget')
async def touch_widget(app, widget, pos=None, duration=.2):
async for _ in app.do_touch_down_up(
widget=widget, pos=pos, duration=duration):
pass
await app.wait_clock_frames(2)
class Glitter2TestApp(Glitter2App, UnitKivyApp):
def __init__(self, data_path, **kwargs):
self._data_path = data_path
super().__init__(**kwargs)
async def async_sleep(self, dt):
await trio.sleep(dt)
def check_close(self):
return True
def handle_exception(self, msg, exc_info=None, level='error', *largs):
super().handle_exception(msg, exc_info, level, *largs)
if isinstance(exc_info, str):
self.get_logger().error(msg)
self.get_logger().error(exc_info)
elif exc_info is not None:
tp, value, tb = exc_info
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
elif level in ('error', 'exception'):
raise Exception(msg)
| 28.107143 | 74 | 0.606734 | 985 | 0.625794 | 0 | 0 | 0 | 0 | 265 | 0.168361 | 138 | 0.087675 |
738a30149882a96a75590cfa02fa03b482ae6233
| 589 |
py
|
Python
|
Gateway/WSService/Controller.py
|
reability/BruteScanner
|
bc352ec93c672f4743cf34d37e3e580bf07a7a73
|
[
"MIT"
] | null | null | null |
Gateway/WSService/Controller.py
|
reability/BruteScanner
|
bc352ec93c672f4743cf34d37e3e580bf07a7a73
|
[
"MIT"
] | null | null | null |
Gateway/WSService/Controller.py
|
reability/BruteScanner
|
bc352ec93c672f4743cf34d37e3e580bf07a7a73
|
[
"MIT"
] | null | null | null |
from aiohttp import web
from aiohttp import WSMsgType
from Settings import log
class WebSocket(web.View):
async def get(self):
ws = web.WebSocketResponse()
await ws.prepare(self.request)
self.request.app['websockets'].append(ws)
async for msg in ws:
if msg.type == WSMsgType.text:
if msg.data == 'close':
await ws.close()
elif msg == WSMsgType.error:
log.debug('ws connection closed with exception %s' % ws.exception())
self.request.app['websockets'].remove(ws)
| 25.608696 | 84 | 0.59253 | 506 | 0.859083 | 0 | 0 | 0 | 0 | 475 | 0.806452 | 71 | 0.120543 |
738a85e82da68aa322a25cf87d2adf64e784db74
| 2,056 |
py
|
Python
|
data/kbqa/parse_kbqa.py
|
UKPLab/TWEAC-qa-agent-selection
|
ed4f0cafa87aefd4820cca0d7f4881d2de99a9f0
|
[
"MIT"
] | 9 |
2021-04-16T12:45:45.000Z
|
2022-01-29T10:52:52.000Z
|
data/kbqa/parse_kbqa.py
|
UKPLab/TWEAC-qa-agent-selection
|
ed4f0cafa87aefd4820cca0d7f4881d2de99a9f0
|
[
"MIT"
] | 1 |
2021-11-25T04:16:25.000Z
|
2021-11-25T09:54:29.000Z
|
data/kbqa/parse_kbqa.py
|
UKPLab/TWEAC-qa-agent-selection
|
ed4f0cafa87aefd4820cca0d7f4881d2de99a9f0
|
[
"MIT"
] | 3 |
2021-04-16T12:43:41.000Z
|
2021-11-25T04:21:43.000Z
|
import json
import os
def qald(in_folder, out_folder):
train = json.load(open(os.path.join(in_folder, "qald-7-train-en-wikidata.json")))
test = json.load(open(os.path.join(in_folder, "qald-7-test-en-wikidata-withoutanswers.json")))
train_q = []
test_q = []
for qs in train["questions"]:
for q in qs["question"]:
train_q.append(q["string"])
split_idx = int(len(train_q)*0.75)
dev_q = train_q[split_idx:]
train_q = train_q[:split_idx]
for qs in test["questions"]:
for q in qs["question"]:
test_q.append(q["string"])
for qs, split in zip([train_q, dev_q, test_q], ["train", "dev", "test"]):
os.makedirs(os.path.join(out_folder, split), exist_ok=True)
with open(os.path.join(out_folder, split, "qald-7.txt"), "w", encoding="utf-8") as f:
for q in qs:
f.write(q+"\n")
def websqp(in_folder, out_folder):
train = json.load(open(os.path.join(in_folder, "WebQSP.train.json"), encoding="utf-8"))
test = json.load(open(os.path.join(in_folder, "WebQSP.test.json"), encoding="utf-8"))
train_q = []
test_q = []
for q in train["Questions"]:
train_q.append(q["RawQuestion"])
split_idx = int(len(train_q)*0.75)
dev_q = train_q[split_idx:]
train_q = train_q[:split_idx]
for q in test["Questions"]:
test_q.append(q["RawQuestion"])
for qs, split in zip([train_q, dev_q, test_q], ["train", "dev", "test"]):
os.makedirs(os.path.join(out_folder, split), exist_ok=True)
with open(os.path.join(out_folder, split, "webqsp.txt"), "w", encoding="utf-8") as f:
for q in qs:
f.write(q+"\n")
if __name__ == "__main__":
qald(r"C:\Users\Gregor\Documents\Programming\square-skill-selector\data\kbqa\qald", r"C:\Users\Gregor\Documents\Programming\square-skill-selector\data\kbqa")
websqp(r"C:\Users\Gregor\Documents\Programming\square-skill-selector\data\kbqa\WebQSP\data", r"C:\Users\Gregor\Documents\Programming\square-skill-selector\data\kbqa")
| 38.792453 | 170 | 0.634728 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 636 | 0.309339 |
738b1d73ae1addd61c4193601b402b8a17cc0fd6
| 1,112 |
py
|
Python
|
flink_rest_client/common.py
|
frego-dev/flink-rest-client
|
e63e3bc4e6ec73a1a86adb3bfbc011087a5248bd
|
[
"MIT"
] | null | null | null |
flink_rest_client/common.py
|
frego-dev/flink-rest-client
|
e63e3bc4e6ec73a1a86adb3bfbc011087a5248bd
|
[
"MIT"
] | null | null | null |
flink_rest_client/common.py
|
frego-dev/flink-rest-client
|
e63e3bc4e6ec73a1a86adb3bfbc011087a5248bd
|
[
"MIT"
] | null | null | null |
import requests
class RestException(Exception):
"""
Exception to catch REST API related exceptions.
"""
def __init__(self, *args: object) -> None:
super().__init__(*args)
def _execute_rest_request(
url,
http_method=None,
accepted_status_code=None,
files=None,
params=None,
data=None,
json=None,
):
if http_method is None:
http_method = "GET"
if params is None:
params = {}
if data is None:
data = {}
# If accepted_status_code is None then default value is set.
if accepted_status_code is None:
accepted_status_code = 200
response = requests.request(
method=http_method, url=url, files=files, params=params, data=data, json=json
)
if response.status_code == accepted_status_code:
return response.json()
else:
if "errors" in response.json().keys():
error_str = "\n".join(response.json()["errors"])
else:
error_str = ""
raise RestException(
f"REST response error ({response.status_code}): {error_str}"
)
| 24.173913 | 85 | 0.610612 | 179 | 0.160971 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.188849 |
738bc5924597cda0fc1b0821b35e4dee0b3c08ce
| 9,696 |
py
|
Python
|
functions.py
|
emiliozamorano15/arvato-udacity-capstone
|
ce550eebefbf13cebacfe111134b0391a73789a4
|
[
"MIT"
] | null | null | null |
functions.py
|
emiliozamorano15/arvato-udacity-capstone
|
ce550eebefbf13cebacfe111134b0391a73789a4
|
[
"MIT"
] | null | null | null |
functions.py
|
emiliozamorano15/arvato-udacity-capstone
|
ce550eebefbf13cebacfe111134b0391a73789a4
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
def missing_dict(df):
'''
Function to build a dictionary of indicators of missing information per feature
INPUT:
df: pandas dataframe with features, description, and values that mean "unknown"
OUPUT:
missing_dict: dictionary of values for "unkwon" per feature
'''
unknown_values = []
for val in df.Value:
## evaluate whether missing 'value' is an integer (one digit)
if isinstance(val, int):
unknown_values.append([val])
## evaluate whether attribute has more than one value (a string object in the dataframe)
elif isinstance(val, str):
split_list = val.split(',')
int_list = [int(x) for x in split_list]
unknown_values.append(int_list)
unknown_dict = {}
for attr, value_list in zip(df.Attribute, unknown_values):
unknown_dict[attr] = value_list
unknown_dict['ALTERSKATEGORIE_FEIN'] = [0]
unknown_dict['GEBURTSJAHR'] = [0]
return unknown_dict
def find_cat_cols(df):
'''
Function to find the names of categorical columns
INPUT
df: pandas dataframe
OUTPUT
cat_cols: list of names of columns with categorical values
'''
cat_cols = list(df.select_dtypes(['object']).columns)
return cat_cols
def find_binary_cols(df):
'''
Function to find the names numerical columns with binary (1/0) values
INPUT
df: pandas dataframe
OUTPUT
bin_cols: list of names of columns with binary values
'''
bin_cols = []
for col in df.select_dtypes(['float64', 'int64']).columns:
n_unique = df[col].dropna().nunique()
if n_unique == 2:
bin_cols.append(col)
return bin_cols
def clean_data(df, drop_rows = [], drop_cols = []):
'''
Function to clean Arvato's datasets. It mainly changes data format for certain columns,
and drops columns (rows) which exceed a given threshold of missing values.
INPUT
df: pandas dataframe (from Arvato's )
drop_rows: list of row indices to drop
drop_cols: list of col names to drop
OUTPUT
clean_df: pandas dataframee with cleaned data
'''
if len(drop_cols) > 0:
clean_df = df.drop(drop_cols, axis = 1)
if len(drop_rows) > 0:
clean_df = clean_df.loc[~clean_df.index.isin(drop_rows)]
## Cast CAMEO_DEUG_2015 to int
clean_df['CAMEO_DEUG_2015'] = clean_df['CAMEO_DEUG_2015'].replace('X',np.nan)
clean_df['CAMEO_DEUG_2015'] = clean_df['CAMEO_DEUG_2015'].astype('float')
## Transform EINGEFUEGT_AM to date format (only year part)
clean_df['EINGEFUEGT_AM'] = pd.to_datetime(clean_df['EINGEFUEGT_AM'], format = '%Y-%m-%d').dt.year
### Label-encode OST_WEST_KZ
clean_df['OST_WEST_KZ'] = clean_df['OST_WEST_KZ'].replace('W',1).replace('O', 0)
clean_df['OST_WEST_KZ'] = pd.to_numeric(clean_df['OST_WEST_KZ'], errors = 'coerce')
return clean_df
def scree_plot(pca):
"""
Function to make a scree plot out of a PCA object
INPUT
pca: PCA fitted object
OUTPUT
scree plot
"""
import matplotlib.pyplot as plt
nc = len(pca.explained_variance_ratio_)
ind = np.arange(nc)
vals = pca.explained_variance_ratio_
cumvals = np.cumsum(vals)
fig = plt.figure(figsize=(12,6))
ax = plt.subplot()
ax.bar(ind, vals)
ax.plot(ind, cumvals)
plt.xlabel('No. of Components')
plt.ylabel('Cum. explained variance')
plt.title('Scree plot PCA')
def get_cluster_centers(cluster_pipeline, num_cols, col_names):
"""
Function inverse transform pca components.
INPUT:
cluster: object of cluster_pipeline
num_cols: list of numerical attributes which were rescaled
col_names: names of all columns after Column Transformer operation
OUTPUT:
df (DataFrame): DataFrame of cluster_centers with their attributes values
"""
pca_components = cluster_pipeline.named_steps['reduction']
kmeans = cluster_pipeline.named_steps['clustering']
transformer = cluster_pipeline.named_steps['transform']
centers = pca_components.inverse_transform(kmeans.cluster_centers_)
df = pd.DataFrame(centers, columns = col_names)
num_scale = transformer.named_transformers_['num'].named_steps['num_scale']
df[num_cols] = num_scale.inverse_transform(df[num_cols])
return df
def plot_learning_curve(estimator, title, X, y, axes=None, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5),
verbose=0):
"""
Generate 3 plots: the test and training learning curve, the training
samples vs fit times curve, the fit times vs score curve.
Source: [https://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html]
Parameters
----------
estimator : estimator instance
An estimator instance implementing `fit` and `predict` methods which
will be cloned for each validation.
title : str
Title for the chart.
X : array-like of shape (n_samples, n_features)
Training vector, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
y : array-like of shape (n_samples) or (n_samples, n_features)
Target relative to ``X`` for classification or regression;
None for unsupervised learning.
axes : array-like of shape (3,), default=None
Axes to use for plotting the curves.
ylim : tuple of shape (2,), default=None
Defines minimum and maximum y-values plotted, e.g. (ymin, ymax).
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : int or None, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
train_sizes : array-like of shape (n_ticks,)
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the ``dtype`` is float, it is regarded
as a fraction of the maximum size of the training set (that is
determined by the selected validation method), i.e. it has to be within
(0, 1]. Otherwise it is interpreted as absolute sizes of the training
sets. Note that for classification the number of samples usually have
to be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
if axes is None:
_, axes = plt.subplots(1, 3, figsize=(20, 5))
axes[0].set_title(title)
if ylim is not None:
axes[0].set_ylim(*ylim)
axes[0].set_xlabel("Training examples")
axes[0].set_ylabel("Score")
train_sizes, train_scores, test_scores, fit_times, _ = \
learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs,
train_sizes=train_sizes,
return_times=True,
verbose=verbose)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
fit_times_mean = np.mean(fit_times, axis=1)
fit_times_std = np.std(fit_times, axis=1)
# Plot learning curve
axes[0].grid()
axes[0].fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
axes[0].fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1,
color="g")
axes[0].plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
axes[0].plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
axes[0].legend(loc="best")
# Plot n_samples vs fit_times
axes[1].grid()
axes[1].plot(train_sizes, fit_times_mean, 'o-')
axes[1].fill_between(train_sizes, fit_times_mean - fit_times_std,
fit_times_mean + fit_times_std, alpha=0.1)
axes[1].set_xlabel("Training examples")
axes[1].set_ylabel("fit_times")
axes[1].set_title("Scalability of the model")
# Plot fit_time vs score
axes[2].grid()
axes[2].plot(fit_times_mean, test_scores_mean, 'o-')
axes[2].fill_between(fit_times_mean, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1)
axes[2].set_xlabel("fit_times")
axes[2].set_ylabel("Score")
axes[2].set_title("Performance of the model")
return plt
if __name__ == '__main__':
pass
| 34.261484 | 102 | 0.653465 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,116 | 0.52764 |
738c97be8d45d5cf7a790774eb0b1a71db20018a
| 1,133 |
py
|
Python
|
PYTHON_POO/AFmain.py
|
davihonorato/Curso-python
|
47e6b4b2f5b37ef520b8b31d37dba0b5d259a0b0
|
[
"MIT"
] | null | null | null |
PYTHON_POO/AFmain.py
|
davihonorato/Curso-python
|
47e6b4b2f5b37ef520b8b31d37dba0b5d259a0b0
|
[
"MIT"
] | null | null | null |
PYTHON_POO/AFmain.py
|
davihonorato/Curso-python
|
47e6b4b2f5b37ef520b8b31d37dba0b5d259a0b0
|
[
"MIT"
] | null | null | null |
# Criar uma base de dados. O usuário pode adicionar, excluir e listar clientes (que possuem id e nome).
# *utilizar encapsulamento.
class Clientes:
def __init__(self):
self.__lista = {} # Recomenda-se estritamente não modificar essa variável
def adicionar_cliente(self, id, nome):
if 'clientes' not in self.__lista:
self.__lista['clientes'] = {id: nome}
else:
self.__lista['clientes'].update({id: nome})
def listar_clientes(self):
if 'clientes' not in self.__lista:
print('A lista está vazia.')
else:
for id, nome in self.__lista['clientes'].items():
print(id, nome)
def deletar_cliente(self, id):
del self.__lista['clientes'][id]
user = Clientes()
user.adicionar_cliente(189, 'Davi')
user.adicionar_cliente(123, 'yan')
user.adicionar_cliente(198, 'lorena')
user.__lista = 'Outra coisa' # Variável criada pelo programa. Caso queira acessar
# a variável da classe, terá que instanciar da seguinte forma: user._Pessoas__lista
user.listar_clientes()
user.deletar_cliente(123)
user.listar_clientes()
| 32.371429 | 103 | 0.66902 | 633 | 0.555263 | 0 | 0 | 0 | 0 | 0 | 0 | 440 | 0.385965 |
738d10783ee6f1c6ba70fb6d0517987a990ac096
| 2,321 |
py
|
Python
|
env/lib/python3.4/site-packages/jsonrpc/tests/test_utils.py
|
Organizational-Proof-Of-Work/clearinghoused_build
|
7bab4ccb516015913bad41cfdc9eb15d3fbfcaf4
|
[
"MIT"
] | null | null | null |
env/lib/python3.4/site-packages/jsonrpc/tests/test_utils.py
|
Organizational-Proof-Of-Work/clearinghoused_build
|
7bab4ccb516015913bad41cfdc9eb15d3fbfcaf4
|
[
"MIT"
] | null | null | null |
env/lib/python3.4/site-packages/jsonrpc/tests/test_utils.py
|
Organizational-Proof-Of-Work/clearinghoused_build
|
7bab4ccb516015913bad41cfdc9eb15d3fbfcaf4
|
[
"MIT"
] | null | null | null |
""" Test utility functionality."""
import datetime
import decimal
import json
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from mock import patch
from ..utils import JSONSerializable, DatetimeDecimalEncoder
class TestJSONSerializable(unittest.TestCase):
""" Test JSONSerializable functionality."""
def setUp(self):
class A(JSONSerializable):
@property
def json(self):
pass
self._class = A
def test_abstract_class(self):
with self.assertRaises(TypeError):
JSONSerializable()
self._class()
def test_definse_serialize_deserialize(self):
""" Test classmethods of inherited class."""
self.assertEqual(self._class.serialize({}), "{}")
self.assertEqual(self._class.deserialize("{}"), {})
def test_from_json(self):
self.assertTrue(isinstance(self._class.from_json('{}'), self._class))
def test_from_json_incorrect(self):
with self.assertRaises(ValueError):
self._class.from_json('[]')
class TestDatetimeDecimalEncoder(unittest.TestCase):
""" Test DatetimeDecimalEncoder functionality."""
def test_date_encoder(self):
obj = datetime.date.today()
with self.assertRaises(TypeError):
json.dumps(obj)
self.assertEqual(
json.dumps(obj, cls=DatetimeDecimalEncoder),
'"{0}"'.format(obj.isoformat()),
)
def test_datetime_encoder(self):
obj = datetime.datetime.now()
with self.assertRaises(TypeError):
json.dumps(obj)
self.assertEqual(
json.dumps(obj, cls=DatetimeDecimalEncoder),
'"{0}"'.format(obj.isoformat()),
)
def test_decimal_encoder(self):
obj = decimal.Decimal('0.1')
with self.assertRaises(TypeError):
json.dumps(obj)
result = json.dumps(obj, cls=DatetimeDecimalEncoder)
self.assertTrue(isinstance(result, str))
self.assertEqual(float(result), float(0.1))
def test_default(self):
encoder = DatetimeDecimalEncoder()
with patch.object(json.JSONEncoder, 'default') as json_default:
encoder.default("")
self.assertEqual(json_default.call_count, 1)
| 26.078652 | 77 | 0.635502 | 2,051 | 0.883671 | 0 | 0 | 58 | 0.024989 | 0 | 0 | 216 | 0.093063 |
738d3ae3312a3ea39b2dd401e3c5ee88d3d77ab6
| 18,859 |
py
|
Python
|
264_nth_ugly_number.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | 2 |
2018-04-24T19:17:40.000Z
|
2018-04-24T19:33:52.000Z
|
264_nth_ugly_number.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | null | null | null |
264_nth_ugly_number.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | 3 |
2020-06-17T05:48:52.000Z
|
2021-01-02T06:08:25.000Z
|
# 264. Ugly Number II
#
# Write a program to check whether a given number is an ugly number.
#
# Ugly numbers are positive numbers whose prime factors only include
# 2, 3, 5. For example, 6, 8 are ugly while 14 is not ugly since it
# includes another prime factor 7.
#
# Note that 1 is typically treated as an ugly number.
class Solution(object):
# brute force
def isUgly(self, num):
"""
:type num: int
:rtype: bool
"""
if num == 0:
return False
while num % 2 == 0:
num /= 2
while num % 3 == 0:
num /= 3
while num % 5 == 0:
num /= 5
return num == 1
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
if n <= 0:
return 0
number = 0
uglyFound = 0
while uglyFound < n:
number += 1
if self.isUgly(number):
uglyFound += 1
return number
# https://www.hrwhisper.me/leetcode-ugly-number-i-ii/
# 第一个ugly number 是1 我们讨论n大于1的情况
# 因为它只能被2,3,5整除,所以我们从1开始扩展,每次要么乘2,要么乘3,要么乘5.
# 对于1来说,我们分别乘以2,3,5得到[2,3,5],显然2是最小的。
# 于是第2个ugly number是2。
# 接着第3个呢?显然是 3 . 从 1 * 3 得到
# 第4个就不一样了,它是从2*2得到。
# 这有什么规律呢?规律就是,每个因子分别乘以当前得到的ugly number(初始为1),
# 当某因子x算出来的不大于其他两个因子,说明新的ugly number是当前因子算出来的,
# 下一轮,该因子应该乘以之前ugly number的下一个。
# 换句话说,每个因子分别乘以对应的ugly number[i]后,
# 如果得到了新的ugly number 就说明下一次应该乘以下一个(ugly number[i+1])。这样能保证乘出来的小而且不会漏掉。
def nthUglyNumber(self, n):
ugly = [1] * n # ugly[0] = 1; first ugly is 1.
i2 = i3 = i5 = 0 # index for candiate multiply by 2,3,5 separately
for i in range(1, n):
# find min among all candidates
ugly[i] = min(ugly[i2] * 2, ugly[i3] * 3, ugly[i5] * 5)
# if processed by any factor, increment it.
# note do not use elif: need increment all min.
if ugly[i] == ugly[i2] * 2: i2 += 1
if ugly[i] == ugly[i3] * 3: i3 += 1
if ugly[i] == ugly[i5] * 5: i5 += 1
return ugly[n-1]
# https://www.youtube.com/watch?v=ZG86C_U-vRg
def nthUglyNumber(self, n):
ugly = [1] # first ugly number is 1
i2 = 0
i3 = 0
i5 = 0
# calculate rest n-1 ugly numbers
for _ in range(1, n):
next2 = ugly[i2] * 2
next3 = ugly[i3] * 3
next5 = ugly[i5] * 5
next = min(next2, next3, next5)
ugly.append(next)
if next == next2:
i2 += 1
if next == next3:
i3 += 1
if next == next5:
i5 += 1
return ugly[-1]
# 打表法
# precompute all ugly numbers
class Solution(object):
ugly = [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80, 81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192, 200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384, 400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720, 729, 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024, 1080, 1125, 1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458, 1500, 1536, 1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025, 2048, 2160, 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700, 2880, 2916, 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645, 3750, 3840, 3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800, 4860, 5000, 5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144, 6250, 6400, 6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776, 8000, 8100, 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000, 10125, 10240, 10368, 10800, 10935, 11250, 11520, 11664, 12000, 12150, 12288, 12500, 12800, 12960, 13122, 13500, 13824, 14400, 14580, 15000, 15360, 15552, 15625, 16000, 16200, 16384, 16875, 17280, 17496, 18000, 18225, 18432, 18750, 19200, 19440, 19683, 20000, 20250, 20480, 20736, 21600, 21870, 22500, 23040, 23328, 24000, 24300, 24576, 25000, 25600, 25920, 26244, 27000, 27648, 28125, 28800, 29160, 30000, 30375, 30720, 31104, 31250, 32000, 32400, 32768, 32805, 33750, 34560, 34992, 36000, 36450, 36864, 37500, 38400, 38880, 39366, 40000, 40500, 40960, 41472, 43200, 43740, 45000, 46080, 46656, 46875, 48000, 48600, 49152, 50000, 50625, 51200, 51840, 52488, 54000, 54675, 55296, 56250, 57600, 58320, 59049, 60000, 60750, 61440, 62208, 62500, 64000, 64800, 65536, 65610, 67500, 69120, 69984, 72000, 72900, 73728, 75000, 76800, 77760, 78125, 78732, 80000, 81000, 81920, 82944, 84375, 86400, 87480, 90000, 91125, 92160, 93312, 93750, 96000, 97200, 98304, 98415, 100000, 101250, 102400, 103680, 104976, 108000, 109350, 110592, 112500, 115200, 116640, 118098, 120000, 121500, 122880, 124416, 125000, 128000, 129600, 131072, 131220, 135000, 138240, 139968, 140625, 144000, 145800, 147456, 150000, 151875, 153600, 155520, 156250, 157464, 160000, 162000, 163840, 164025, 165888, 168750, 172800, 174960, 177147, 180000, 182250, 184320, 186624, 187500, 192000, 194400, 196608, 196830, 200000, 202500, 204800, 207360, 209952, 216000, 218700, 221184, 225000, 230400, 233280, 234375, 236196, 240000, 243000, 245760, 248832, 250000, 253125, 256000, 259200, 262144, 262440, 270000, 273375, 276480, 279936, 281250, 288000, 291600, 294912, 295245, 300000, 303750, 307200, 311040, 312500, 314928, 320000, 324000, 327680, 328050, 331776, 337500, 345600, 349920, 354294, 360000, 364500, 368640, 373248, 375000, 384000, 388800, 390625, 393216, 393660, 400000, 405000, 409600, 414720, 419904, 421875, 432000, 437400, 442368, 450000, 455625, 460800, 466560, 468750, 472392, 480000, 486000, 491520, 492075, 497664, 500000, 506250, 512000, 518400, 524288, 524880, 531441, 540000, 546750, 552960, 559872, 562500, 576000, 583200, 589824, 590490, 600000, 607500, 614400, 622080, 625000, 629856, 640000, 648000, 655360, 656100, 663552, 675000, 691200, 699840, 703125, 708588, 720000, 729000, 737280, 746496, 750000, 759375, 768000, 777600, 781250, 786432, 787320, 800000, 810000, 819200, 820125, 829440, 839808, 843750, 864000, 874800, 884736, 885735, 900000, 911250, 921600, 933120, 937500, 944784, 960000, 972000, 983040, 984150, 995328, 1000000, 1012500, 1024000, 1036800, 1048576, 1049760, 1062882, 1080000, 1093500, 1105920, 1119744, 1125000, 1152000, 1166400, 1171875, 1179648, 1180980, 1200000, 1215000, 1228800, 1244160, 1250000, 1259712, 1265625, 1280000, 1296000, 1310720, 1312200, 1327104, 1350000, 1366875, 1382400, 1399680, 1406250, 1417176, 1440000, 1458000, 1474560, 1476225, 1492992, 1500000, 1518750, 1536000, 1555200, 1562500, 1572864, 1574640, 1594323, 1600000, 1620000, 1638400, 1640250, 1658880, 1679616, 1687500, 1728000, 1749600, 1769472, 1771470, 1800000, 1822500, 1843200, 1866240, 1875000, 1889568, 1920000, 1944000, 1953125, 1966080, 1968300, 1990656, 2000000, 2025000, 2048000, 2073600, 2097152, 2099520, 2109375, 2125764, 2160000, 2187000, 2211840, 2239488, 2250000, 2278125, 2304000, 2332800, 2343750, 2359296, 2361960, 2400000, 2430000, 2457600, 2460375, 2488320, 2500000, 2519424, 2531250, 2560000, 2592000, 2621440, 2624400, 2654208, 2657205, 2700000, 2733750, 2764800, 2799360, 2812500, 2834352, 2880000, 2916000, 2949120, 2952450, 2985984, 3000000, 3037500, 3072000, 3110400, 3125000, 3145728, 3149280, 3188646, 3200000, 3240000, 3276800, 3280500, 3317760, 3359232, 3375000, 3456000, 3499200, 3515625, 3538944, 3542940, 3600000, 3645000, 3686400, 3732480, 3750000, 3779136, 3796875, 3840000, 3888000, 3906250, 3932160, 3936600, 3981312, 4000000, 4050000, 4096000, 4100625, 4147200, 4194304, 4199040, 4218750, 4251528, 4320000, 4374000, 4423680, 4428675, 4478976, 4500000, 4556250, 4608000, 4665600, 4687500, 4718592, 4723920, 4782969, 4800000, 4860000, 4915200, 4920750, 4976640, 5000000, 5038848, 5062500, 5120000, 5184000, 5242880, 5248800, 5308416, 5314410, 5400000, 5467500, 5529600, 5598720, 5625000, 5668704, 5760000, 5832000, 5859375, 5898240, 5904900, 5971968, 6000000, 6075000, 6144000, 6220800, 6250000, 6291456, 6298560, 6328125, 6377292, 6400000, 6480000, 6553600, 6561000, 6635520, 6718464, 6750000, 6834375, 6912000, 6998400, 7031250, 7077888, 7085880, 7200000, 7290000, 7372800, 7381125, 7464960, 7500000, 7558272, 7593750, 7680000, 7776000, 7812500, 7864320, 7873200, 7962624, 7971615, 8000000, 8100000, 8192000, 8201250, 8294400, 8388608, 8398080, 8437500, 8503056, 8640000, 8748000, 8847360, 8857350, 8957952, 9000000, 9112500, 9216000, 9331200, 9375000, 9437184, 9447840, 9565938, 9600000, 9720000, 9765625, 9830400, 9841500, 9953280, 10000000, 10077696, 10125000, 10240000, 10368000, 10485760, 10497600, 10546875, 10616832, 10628820, 10800000, 10935000, 11059200, 11197440, 11250000, 11337408, 11390625, 11520000, 11664000, 11718750, 11796480, 11809800, 11943936, 12000000, 12150000, 12288000, 12301875, 12441600, 12500000, 12582912, 12597120, 12656250, 12754584, 12800000, 12960000, 13107200, 13122000, 13271040, 13286025, 13436928, 13500000, 13668750, 13824000, 13996800, 14062500, 14155776, 14171760, 14348907, 14400000, 14580000, 14745600, 14762250, 14929920, 15000000, 15116544, 15187500, 15360000, 15552000, 15625000, 15728640, 15746400, 15925248, 15943230, 16000000, 16200000, 16384000, 16402500, 16588800, 16777216, 16796160, 16875000, 17006112, 17280000, 17496000, 17578125, 17694720, 17714700, 17915904, 18000000, 18225000, 18432000, 18662400, 18750000, 18874368, 18895680, 18984375, 19131876, 19200000, 19440000, 19531250, 19660800, 19683000, 19906560, 20000000, 20155392, 20250000, 20480000, 20503125, 20736000, 20971520, 20995200, 21093750, 21233664, 21257640, 21600000, 21870000, 22118400, 22143375, 22394880, 22500000, 22674816, 22781250, 23040000, 23328000, 23437500, 23592960, 23619600, 23887872, 23914845, 24000000, 24300000, 24576000, 24603750, 24883200, 25000000, 25165824, 25194240, 25312500, 25509168, 25600000, 25920000, 26214400, 26244000, 26542080, 26572050, 26873856, 27000000, 27337500, 27648000, 27993600, 28125000, 28311552, 28343520, 28697814, 28800000, 29160000, 29296875, 29491200, 29524500, 29859840, 30000000, 30233088, 30375000, 30720000, 31104000, 31250000, 31457280, 31492800, 31640625, 31850496, 31886460, 32000000, 32400000, 32768000, 32805000, 33177600, 33554432, 33592320, 33750000, 34012224, 34171875, 34560000, 34992000, 35156250, 35389440, 35429400, 35831808, 36000000, 36450000, 36864000, 36905625, 37324800, 37500000, 37748736, 37791360, 37968750, 38263752, 38400000, 38880000, 39062500, 39321600, 39366000, 39813120, 39858075, 40000000, 40310784, 40500000, 40960000, 41006250, 41472000, 41943040, 41990400, 42187500, 42467328, 42515280, 43046721, 43200000, 43740000, 44236800, 44286750, 44789760, 45000000, 45349632, 45562500, 46080000, 46656000, 46875000, 47185920, 47239200, 47775744, 47829690, 48000000, 48600000, 48828125, 49152000, 49207500, 49766400, 50000000, 50331648, 50388480, 50625000, 51018336, 51200000, 51840000, 52428800, 52488000, 52734375, 53084160, 53144100, 53747712, 54000000, 54675000, 55296000, 55987200, 56250000, 56623104, 56687040, 56953125, 57395628, 57600000, 58320000, 58593750, 58982400, 59049000, 59719680, 60000000, 60466176, 60750000, 61440000, 61509375, 62208000, 62500000, 62914560, 62985600, 63281250, 63700992, 63772920, 64000000, 64800000, 65536000, 65610000, 66355200, 66430125, 67108864, 67184640, 67500000, 68024448, 68343750, 69120000, 69984000, 70312500, 70778880, 70858800, 71663616, 71744535, 72000000, 72900000, 73728000, 73811250, 74649600, 75000000, 75497472, 75582720, 75937500, 76527504, 76800000, 77760000, 78125000, 78643200, 78732000, 79626240, 79716150, 80000000, 80621568, 81000000, 81920000, 82012500, 82944000, 83886080, 83980800, 84375000, 84934656, 85030560, 86093442, 86400000, 87480000, 87890625, 88473600, 88573500, 89579520, 90000000, 90699264, 91125000, 92160000, 93312000, 93750000, 94371840, 94478400, 94921875, 95551488, 95659380, 96000000, 97200000, 97656250, 98304000, 98415000, 99532800, 100000000, 100663296, 100776960, 101250000, 102036672, 102400000, 102515625, 103680000, 104857600, 104976000, 105468750, 106168320, 106288200, 107495424, 108000000, 109350000, 110592000, 110716875, 111974400, 112500000, 113246208, 113374080, 113906250, 114791256, 115200000, 116640000, 117187500, 117964800, 118098000, 119439360, 119574225, 120000000, 120932352, 121500000, 122880000, 123018750, 124416000, 125000000, 125829120, 125971200, 126562500, 127401984, 127545840, 128000000, 129140163, 129600000, 131072000, 131220000, 132710400, 132860250, 134217728, 134369280, 135000000, 136048896, 136687500, 138240000, 139968000, 140625000, 141557760, 141717600, 143327232, 143489070, 144000000, 145800000, 146484375, 147456000, 147622500, 149299200, 150000000, 150994944, 151165440, 151875000, 153055008, 153600000, 155520000, 156250000, 157286400, 157464000, 158203125, 159252480, 159432300, 160000000, 161243136, 162000000, 163840000, 164025000, 165888000, 167772160, 167961600, 168750000, 169869312, 170061120, 170859375, 172186884, 172800000, 174960000, 175781250, 176947200, 177147000, 179159040, 180000000, 181398528, 182250000, 184320000, 184528125, 186624000, 187500000, 188743680, 188956800, 189843750, 191102976, 191318760, 192000000, 194400000, 195312500, 196608000, 196830000, 199065600, 199290375, 200000000, 201326592, 201553920, 202500000, 204073344, 204800000, 205031250, 207360000, 209715200, 209952000, 210937500, 212336640, 212576400, 214990848, 215233605, 216000000, 218700000, 221184000, 221433750, 223948800, 225000000, 226492416, 226748160, 227812500, 229582512, 230400000, 233280000, 234375000, 235929600, 236196000, 238878720, 239148450, 240000000, 241864704, 243000000, 244140625, 245760000, 246037500, 248832000, 250000000, 251658240, 251942400, 253125000, 254803968, 255091680, 256000000, 258280326, 259200000, 262144000, 262440000, 263671875, 265420800, 265720500, 268435456, 268738560, 270000000, 272097792, 273375000, 276480000, 279936000, 281250000, 283115520, 283435200, 284765625, 286654464, 286978140, 288000000, 291600000, 292968750, 294912000, 295245000, 298598400, 300000000, 301989888, 302330880, 303750000, 306110016, 307200000, 307546875, 311040000, 312500000, 314572800, 314928000, 316406250, 318504960, 318864600, 320000000, 322486272, 324000000, 327680000, 328050000, 331776000, 332150625, 335544320, 335923200, 337500000, 339738624, 340122240, 341718750, 344373768, 345600000, 349920000, 351562500, 353894400, 354294000, 358318080, 358722675, 360000000, 362797056, 364500000, 368640000, 369056250, 373248000, 375000000, 377487360, 377913600, 379687500, 382205952, 382637520, 384000000, 387420489, 388800000, 390625000, 393216000, 393660000, 398131200, 398580750, 400000000, 402653184, 403107840, 405000000, 408146688, 409600000, 410062500, 414720000, 419430400, 419904000, 421875000, 424673280, 425152800, 429981696, 430467210, 432000000, 437400000, 439453125, 442368000, 442867500, 447897600, 450000000, 452984832, 453496320, 455625000, 459165024, 460800000, 466560000, 468750000, 471859200, 472392000, 474609375, 477757440, 478296900, 480000000, 483729408, 486000000, 488281250, 491520000, 492075000, 497664000, 500000000, 503316480, 503884800, 506250000, 509607936, 510183360, 512000000, 512578125, 516560652, 518400000, 524288000, 524880000, 527343750, 530841600, 531441000, 536870912, 537477120, 540000000, 544195584, 546750000, 552960000, 553584375, 559872000, 562500000, 566231040, 566870400, 569531250, 573308928, 573956280, 576000000, 583200000, 585937500, 589824000, 590490000, 597196800, 597871125, 600000000, 603979776, 604661760, 607500000, 612220032, 614400000, 615093750, 622080000, 625000000, 629145600, 629856000, 632812500, 637009920, 637729200, 640000000, 644972544, 645700815, 648000000, 655360000, 656100000, 663552000, 664301250, 671088640, 671846400, 675000000, 679477248, 680244480, 683437500, 688747536, 691200000, 699840000, 703125000, 707788800, 708588000, 716636160, 717445350, 720000000, 725594112, 729000000, 732421875, 737280000, 738112500, 746496000, 750000000, 754974720, 755827200, 759375000, 764411904, 765275040, 768000000, 774840978, 777600000, 781250000, 786432000, 787320000, 791015625, 796262400, 797161500, 800000000, 805306368, 806215680, 810000000, 816293376, 819200000, 820125000, 829440000, 838860800, 839808000, 843750000, 849346560, 850305600, 854296875, 859963392, 860934420, 864000000, 874800000, 878906250, 884736000, 885735000, 895795200, 900000000, 905969664, 906992640, 911250000, 918330048, 921600000, 922640625, 933120000, 937500000, 943718400, 944784000, 949218750, 955514880, 956593800, 960000000, 967458816, 972000000, 976562500, 983040000, 984150000, 995328000, 996451875, 1000000000, 1006632960, 1007769600, 1012500000, 1019215872, 1020366720, 1024000000, 1025156250, 1033121304, 1036800000, 1048576000, 1049760000, 1054687500, 1061683200, 1062882000, 1073741824, 1074954240, 1076168025, 1080000000, 1088391168, 1093500000, 1105920000, 1107168750, 1119744000, 1125000000, 1132462080, 1133740800, 1139062500, 1146617856, 1147912560, 1152000000, 1162261467, 1166400000, 1171875000, 1179648000, 1180980000, 1194393600, 1195742250, 1200000000, 1207959552, 1209323520, 1215000000, 1220703125, 1224440064, 1228800000, 1230187500, 1244160000, 1250000000, 1258291200, 1259712000, 1265625000, 1274019840, 1275458400, 1280000000, 1289945088, 1291401630, 1296000000, 1310720000, 1312200000, 1318359375, 1327104000, 1328602500, 1342177280, 1343692800, 1350000000, 1358954496, 1360488960, 1366875000, 1377495072, 1382400000, 1399680000, 1406250000, 1415577600, 1417176000, 1423828125, 1433272320, 1434890700, 1440000000, 1451188224, 1458000000, 1464843750, 1474560000, 1476225000, 1492992000, 1500000000, 1509949440, 1511654400, 1518750000, 1528823808, 1530550080, 1536000000, 1537734375, 1549681956, 1555200000, 1562500000, 1572864000, 1574640000, 1582031250, 1592524800, 1594323000, 1600000000, 1610612736, 1612431360, 1620000000, 1632586752, 1638400000, 1640250000, 1658880000, 1660753125, 1677721600, 1679616000, 1687500000, 1698693120, 1700611200, 1708593750, 1719926784, 1721868840, 1728000000, 1749600000, 1757812500, 1769472000, 1771470000, 1791590400, 1793613375, 1800000000, 1811939328, 1813985280, 1822500000, 1836660096, 1843200000, 1845281250, 1866240000, 1875000000, 1887436800, 1889568000, 1898437500, 1911029760, 1913187600, 1920000000, 1934917632, 1937102445, 1944000000, 1953125000, 1966080000, 1968300000, 1990656000, 1992903750, 2000000000, 2013265920, 2015539200, 2025000000, 2038431744, 2040733440, 2048000000, 2050312500, 2066242608, 2073600000, 2097152000, 2099520000, 2109375000, 2123366400]
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
return self.ugly[n-1]
if __name__ == "__main__":
#print (Solution().nthUglyNumber(10))
#print (Solution().nthUglyNumber(1500))
print (Solution().nthUglyNumber(1690))
| 162.577586 | 15,773 | 0.74198 | 18,823 | 0.972815 | 0 | 0 | 0 | 0 | 0 | 0 | 1,871 | 0.096698 |
7391ce7ef2ad24d97f65315f42ffbecced2389a8
| 3,563 |
py
|
Python
|
neutron/db/migration/alembic_migrations/versions/14be42f3d0a5_default_sec_group_table.py
|
osic-neutron/neutron-ipcapacity
|
678cbadb0be57203e0cc4c493082d2d54afc7c17
|
[
"Apache-2.0"
] | 1 |
2019-01-13T04:42:21.000Z
|
2019-01-13T04:42:21.000Z
|
neutron/db/migration/alembic_migrations/versions/14be42f3d0a5_default_sec_group_table.py
|
osic-neutron/neutron-ipcapacity
|
678cbadb0be57203e0cc4c493082d2d54afc7c17
|
[
"Apache-2.0"
] | null | null | null |
neutron/db/migration/alembic_migrations/versions/14be42f3d0a5_default_sec_group_table.py
|
osic-neutron/neutron-ipcapacity
|
678cbadb0be57203e0cc4c493082d2d54afc7c17
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
""" Add default security group table
Revision ID: 14be42f3d0a5
Revises: 41662e32bce2
Create Date: 2014-12-12 14:54:11.123635
"""
# revision identifiers, used by Alembic.
revision = '14be42f3d0a5'
down_revision = '26b54cf9024d'
from alembic import op
import six
import sqlalchemy as sa
from neutron._i18n import _
from neutron.common import exceptions
# Models can change in time, but migration should rely only on exact
# model state at the current moment, so a separate model is created
# here.
security_group = sa.Table('securitygroups', sa.MetaData(),
sa.Column('id', sa.String(length=36),
nullable=False),
sa.Column('name', sa.String(255)),
sa.Column('tenant_id', sa.String(255)))
class DuplicateSecurityGroupsNamedDefault(exceptions.Conflict):
message = _("Some tenants have more than one security group named "
"'default': %(duplicates)s. All duplicate 'default' security "
"groups must be resolved before upgrading the database.")
def upgrade():
table = op.create_table(
'default_security_group',
sa.Column('tenant_id', sa.String(length=255), nullable=False),
sa.Column('security_group_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('tenant_id'),
sa.ForeignKeyConstraint(['security_group_id'],
['securitygroups.id'],
ondelete="CASCADE"))
sel = (sa.select([security_group.c.tenant_id,
security_group.c.id])
.where(security_group.c.name == 'default'))
ins = table.insert(inline=True).from_select(['tenant_id',
'security_group_id'], sel)
op.execute(ins)
def check_sanity(connection):
res = get_duplicate_default_security_groups(connection)
if res:
raise DuplicateSecurityGroupsNamedDefault(
duplicates='; '.join('tenant %s: %s' %
(tenant_id, ', '.join(groups))
for tenant_id, groups in six.iteritems(res)))
def get_duplicate_default_security_groups(connection):
insp = sa.engine.reflection.Inspector.from_engine(connection)
if 'securitygroups' not in insp.get_table_names():
return {}
session = sa.orm.Session(bind=connection.connect())
subq = (session.query(security_group.c.tenant_id)
.filter(security_group.c.name == 'default')
.group_by(security_group.c.tenant_id)
.having(sa.func.count() > 1)
.subquery())
sg = (session.query(security_group)
.join(subq, security_group.c.tenant_id == subq.c.tenant_id)
.filter(security_group.c.name == 'default')
.all())
res = {}
for s in sg:
res.setdefault(s.tenant_id, []).append(s.id)
return res
| 37.114583 | 78 | 0.634858 | 288 | 0.080831 | 0 | 0 | 0 | 0 | 0 | 0 | 1,358 | 0.381139 |
739221f14ebd9dfa18ce38c36afe1cd0d2d397f6
| 2,126 |
py
|
Python
|
coredis/response/callbacks/script.py
|
alisaifee/aredis
|
c5764a5a2a29c4ed25278548aa54eece94974440
|
[
"MIT"
] | null | null | null |
coredis/response/callbacks/script.py
|
alisaifee/aredis
|
c5764a5a2a29c4ed25278548aa54eece94974440
|
[
"MIT"
] | null | null | null |
coredis/response/callbacks/script.py
|
alisaifee/aredis
|
c5764a5a2a29c4ed25278548aa54eece94974440
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from coredis.response.callbacks import ResponseCallback
from coredis.response.types import LibraryDefinition
from coredis.response.utils import flat_pairs_to_dict
from coredis.typing import Any, AnyStr, Mapping, Union
from coredis.utils import EncodingInsensitiveDict
class FunctionListCallback(ResponseCallback):
def transform(
self, response: Any, **options: Any
) -> Mapping[str, LibraryDefinition]:
libraries = [
EncodingInsensitiveDict(flat_pairs_to_dict(library)) for library in response
]
transformed = EncodingInsensitiveDict()
for library in libraries:
lib_name = library["library_name"]
functions = EncodingInsensitiveDict({})
for function in library.get("functions", []):
function_definition = EncodingInsensitiveDict(
flat_pairs_to_dict(function)
)
functions[function_definition["name"]] = function_definition
functions[function_definition["name"]]["flags"] = set(
function_definition["flags"]
)
library["functions"] = functions
transformed[lib_name] = EncodingInsensitiveDict( # type: ignore
LibraryDefinition(
name=library["name"],
engine=library["engine"],
description=library["description"],
functions=library["functions"],
library_code=library["library_code"],
)
)
return transformed
class FunctionStatsCallback(ResponseCallback):
def transform(
self, response: Any, **options: Any
) -> Mapping[AnyStr, Union[AnyStr, Mapping]]:
transformed = flat_pairs_to_dict(response)
key = b"engines" if b"engines" in transformed else "engines"
engines = flat_pairs_to_dict(transformed.pop(key))
for engine, stats in engines.items():
transformed.setdefault(key, {})[engine] = flat_pairs_to_dict(stats)
return transformed
| 40.884615 | 88 | 0.629351 | 1,816 | 0.854186 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.073848 |
7393a024a0f2a49dd9e4ca3dcf823461e29e512f
| 885 |
py
|
Python
|
controllers/editor.py
|
matumaros/BomberApe
|
d71616192fd54d9a595261c258e4c7367d2eac5d
|
[
"Apache-2.0"
] | null | null | null |
controllers/editor.py
|
matumaros/BomberApe
|
d71616192fd54d9a595261c258e4c7367d2eac5d
|
[
"Apache-2.0"
] | null | null | null |
controllers/editor.py
|
matumaros/BomberApe
|
d71616192fd54d9a595261c258e4c7367d2eac5d
|
[
"Apache-2.0"
] | null | null | null |
from models.tilemap import TileMap
class EditorController:
def __init__(self, view):
self.view = view
self.tilemap = TileMap()
def place_tile(self, coord, ttype):
self.tilemap.add_tile(coord, ttype)
self.view.board.update_tiles({coord: ttype})
def place_spawn(self, coord):
self.tilemap.add_spawn(coord)
self.view.board.update_spawns({coord: 'None'})
def get_tiles(self):
layers = self.tilemap.layers
tiles = layers['ground'].copy()
tiles.update(layers['util'])
tiles.update(layers['powerup'])
tiles.update(layers['wall'])
return tiles
def save(self):
self.tilemap.save()
def load(self, map_path):
self.tilemap.load(map_path)
self.view.board.update_tiles(self.get_tiles())
self.view.board.update_spawns(self.tilemap.spawns)
| 26.029412 | 58 | 0.632768 | 845 | 0.954802 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.039548 |
739647d67e5d34152efe879eebab2aba747ceb26
| 815 |
py
|
Python
|
src/Pages/LoginPage.py
|
Artem0791/Hackathon18_09
|
15f7e6c14264a574dc3efc42c5edd03e39b8dab8
|
[
"MIT"
] | 1 |
2021-09-17T18:26:33.000Z
|
2021-09-17T18:26:33.000Z
|
src/Pages/LoginPage.py
|
Artem0791/Hackathon18_09
|
15f7e6c14264a574dc3efc42c5edd03e39b8dab8
|
[
"MIT"
] | null | null | null |
src/Pages/LoginPage.py
|
Artem0791/Hackathon18_09
|
15f7e6c14264a574dc3efc42c5edd03e39b8dab8
|
[
"MIT"
] | 3 |
2021-09-18T10:06:32.000Z
|
2021-09-18T20:50:29.000Z
|
from .BasePage import BasePage
from src.Locators import LoginPage
from src.Services.Faker.FakeDataGenerator import DataGenerator
class Login(BasePage):
def create_account(self):
data = DataGenerator()
self.select(LoginPage.CreateCustomer.title, 'Mr.')
self.input(LoginPage.CreateCustomer.first_name, data.get_name())
self.input(LoginPage.CreateCustomer.last_name, data.get_name())
self.input(LoginPage.CreateCustomer.email, data.get_email())
self.input(LoginPage.CreateCustomer.password, 'evetah799')
self.input(LoginPage.CreateCustomer.confirm_password, 'evetah799')
self.click(LoginPage.CreateCustomer.consent_checkbox)
self.click(LoginPage.CreateCustomer.terms_checkbox)
self.click(LoginPage.CreateCustomer.register_button)
| 40.75 | 74 | 0.75092 | 681 | 0.835583 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.033129 |
7398394632b763b7e8c94ec433a660e60ba8425e
| 2,777 |
py
|
Python
|
setup.py
|
willamm/dragonchain
|
c3a619e452b6256920ed15ccf5e5263a33dc33e1
|
[
"Apache-2.0"
] | 3 |
2017-10-24T23:12:58.000Z
|
2017-10-24T23:15:28.000Z
|
setup.py
|
willamm/dragonchain
|
c3a619e452b6256920ed15ccf5e5263a33dc33e1
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
willamm/dragonchain
|
c3a619e452b6256920ed15ccf5e5263a33dc33e1
|
[
"Apache-2.0"
] | 1 |
2018-01-23T00:32:05.000Z
|
2018-01-23T00:32:05.000Z
|
"""
Copyright 2016 Disney Connected and Advanced Technologies
Licensed under the Apache License, Version 2.0 (the "Apache License")
with the following modification; you may not use this file except in
compliance with the Apache License and the following modification to it:
Section 6. Trademarks. is deleted and replaced with:
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor
and its affiliates, except as required to comply with Section 4(c) of
the License and to reproduce the content of the NOTICE file.
You may obtain a copy of the Apache License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the Apache License with the above modification is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the Apache License for the specific
language governing permissions and limitations under the Apache License.
"""
__author__ = "Joe Roets, Brandon Kite, Dylan Yelton, Michael Bachtel"
__copyright__ = "Copyright 2016, Disney Connected and Advanced Technologies"
__license__ = "Apache"
__version__ = "2.0"
__maintainer__ = "Joe Roets"
__email__ = "[email protected]"
from distutils.errors import DistutilsError
from distutils.spawn import find_executable
from setuptools import setup, Command
from glob import glob
import os.path
# If we have a thrift compiler installed, let's use it to re-generate
# the .py files. If not, we'll use the pre-generated ones.
class gen_thrift(Command):
user_options=[]
def initialize_options(self):
self.root = None
self.thrift = None
def finalize_options(self):
self.root = os.path.abspath(os.path.dirname(__file__))
self.thrift = find_executable('thrift1')
if self.thrift is None:
self.thrift = find_executable('thrift')
def run(self):
if self.thrift is None:
raise DistutilsError(
'Apache Thrift binary not found. Please install Apache Thrift or use pre-generated Thrift classes.')
self.mkpath(os.path.join(self.root, 'blockchain', 'gen'))
for f in glob(os.path.join(self.root, 'thrift', '*.thrift')):
self.spawn([self.thrift, '-out', os.path.join(self.root, 'blockchain', 'gen'),
'-r', '--gen', 'py',
os.path.join(self.root, 'thrift', f)])
setup(name = 'Blockchain',
version = '0.0.2',
description = 'blockchain stuff',
author = 'Folks',
packages = ['blockchain'],
cmdclass = {
'gen_thrift': gen_thrift
}
)
| 38.041096 | 117 | 0.687432 | 902 | 0.324811 | 0 | 0 | 0 | 0 | 0 | 0 | 1,637 | 0.589485 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.