id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
1736149 | import argparse
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, optim
from model import MyAwesomeModel
class TrainOREvaluate(object):
""" Helper class that will help launch class methods as commands
from a single script
"""
def __init__(self):
parser = argparse.ArgumentParser(
description="Script for either training or evaluating",
usage="python main.py <command>"
)
parser.add_argument("command", help="Subcommand to run")
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Unrecognized command')
parser.print_help()
exit(1)
# use dispatch pattern to invoke method with same name
getattr(self, args.command)()
def train(self):
loss_list = []
print("Training day and night")
parser = argparse.ArgumentParser(description='Training arguments')
parser.add_argument('--lr', default=0.003)
# add any additional argument that you want
args = parser.parse_args(sys.argv[2:])
print(args)
# TODO: Implement training loop here
model = MyAwesomeModel()
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=float(args.lr))
#print(os.listdir('./MyNetworkDay1/data/processed)
train_data, train_label = torch.load('../../data/processed/training.pt')
train_data = torch.unsqueeze(train_data,1)
trainloader = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(*(train_data,train_label)), batch_size=64, shuffle=True)
#import pdb
#pdb.set_trace()
epochs = 5
steps = 0
model.train()
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
optimizer.zero_grad()
log_ps = model(images.float())
loss = criterion(log_ps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
loss_list.append(running_loss/len(trainloader))
print(f"Training loss: {running_loss/len(trainloader)}")
plt.figure()
epoch = np.arange(len(loss_list))
print(len(loss_list))
print(epoch)
plt.plot(epoch, loss_list)
plt.legend(['Training loss'])
plt.xlabel('Epochs'), plt.ylabel('Loss')
plt.show()
plt.savefig('../../reports/figures/loss_curve')
torch.save(model, '../../models/model.pth')
def evaluate(self):
accuracy_list = []
print("Evaluating until hitting the ceiling")
parser = argparse.ArgumentParser(description='Training arguments')
parser.add_argument('--load_model_from', default='../../models/model.pth')
# add any additional argument that you want
args = parser.parse_args(sys.argv[2:])
print(args)
# TODO: Implement evaluation logic here
if args.load_model_from:
model = torch.load(args.load_model_from)
test_data, test_label = torch.load('../../data/processed/test.pt')
test_data = torch.unsqueeze(test_data,1)
testloader = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(*(test_data,test_label)), batch_size=64, shuffle=True)
model.eval()
with torch.no_grad():
for images, labels in testloader:
#images, labels = next(iter(testloader))
ps = torch.exp(model(images.float()))
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy = torch.mean(equals.type(torch.FloatTensor))
accuracy_list.append(accuracy.item()*100)
else:
print(f'Accuracy: {accuracy.item()*100}%')
epoch = np.arange(len(accuracy_list))
print("mean of accuracy = ", np.mean(accuracy_list))
plt.figure()
plt.plot(epoch, accuracy_list)
plt.legend(['Test set accuacy'])
plt.xlabel('Epochs'), plt.ylabel('Accuacy')
plt.show()
torch.save(model, '../../models/model.pth')
if __name__ == '__main__':
TrainOREvaluate()
| StarcoderdataPython |
3364168 | '''
All Blueprint routes regarding rendering ag templates
'''
from flask import Blueprint, render_template
from app.models.ag import AG, AGSchema, AGSchemaIntern, AGMessageSchema
from app.models import db
from app.util import requires_auth
from app.util.assocations import requires_mentor, requires_membership, requires_ag_message_rights
from config.regex import AGRegex, MessageRegex
bp = Blueprint('ag', __name__)
ag_schema = AGSchema()
ag_schema_intern = AGSchemaIntern()
ags_schema = AGSchema(many=True)
@bp.route('/add', methods=['GET'])
@requires_auth()
def create_ag():
return render_template('ag/add.html', title='Create AG',
ag_regex=AGRegex)
@bp.route('/<ag_name>', methods=['GET'])
@requires_auth()
@requires_membership()
def ag_dashboard(ag_name, ag, user_ag):
schema = AGSchemaIntern()
schema.context = {'ag_id': ag.id}
return render_template('ag/dashboard.html', my_role=user_ag.role, ag=schema.dump(ag),\
title=ag.display_name)
@bp.route('/<ag_name>/invite', methods=['GET'])
@requires_auth()
@requires_mentor()
def invite_ag(ag_name, ag, user_ag):
return render_template('ag/invite.html', ag=ag_schema_intern.dump(ag),\
title=f'Invite {ag.display_name}')
# Events
@bp.route('/<ag_name>/event/add', methods=['GET'])
@requires_auth()
@requires_mentor()
def create_event(ag_name, ag, user_ag):
return render_template('ag/event/add.html', ag=ag_schema_intern.dump(ag),\
title=f'New Event {ag.display_name}')
@bp.route('/<ag_name>/settings', methods=['GET'])
@requires_auth()
@requires_mentor()
def ag_settings(ag_name, ag, user_ag):
schema = AGSchemaIntern()
schema.context = {'ag_id': ag.id}
return render_template('ag/settings.html', title='Create AG', ag=schema.dump(ag))
@bp.route('/discover', methods=['GET'])
def discover():
ags = db.session.query(AG).all()
schema = AGSchema(many=True)
return render_template('ag/discover.html', ags=schema.dump(ags))
@bp.route('<ag_name>/events/<event_name>/edit')
@requires_auth()
@requires_mentor()
def edit_event():
pass
@bp.route('<ag_name>/messages/write', methods=['GET'])
@requires_auth()
@requires_mentor()
def write_message(ag_name, ag, user_ag):
return render_template('ag/write_message.html', title=f'Write Message for {ag.display_name}',\
message_regex=MessageRegex, ag_name=ag_name)
@bp.route('<ag_name>/messages/view/<message_id>')
@requires_auth()
@requires_ag_message_rights()
def view_message(ag_name, message_id, ag, user_ag, ag_message, user_ag_message):
message_schema = AGMessageSchema()
user_ag_message.read = True
db.session.add(user_ag_message)
db.session.commit()
return render_template('ag/view_message.html', title='View Message',\
message=message_schema.dump(ag_message), my_role=user_ag.role)
| StarcoderdataPython |
52746 | <gh_stars>1-10
"""
These settings are only needed if you are planning to push to S3, GitHub or data.world.
If you only are only saving to local files, then these are not needed.
"""
S3_BUCKETS = {
# 'bucket': name of the bucket
# 'key': syntax: a_folder/another_folder
#
# For the 'scrub' bucket, one sub-folder named 'data' is created
# For the 'raw' bucket, two sub-folders are created: 'original-files' & 'raw-data'
'scrub': {'bucket': 'THE_PUBLIC_BUCKET_TO_SAVE_TO', 'key': 'THE_DIRECTORY_TO_SAVE_TO'},
'raw': {'bucket': 'THE_PRIVATE_BUCKET_TO_SAVE_TO', 'key': 'THE_DIRECTORY_TO_SAVE_TO'}
}
GITHUB_API_TOKEN = 'YOUR_GITHUB_TOKEN_HERE'
GITHUB_REPOS = {
# 'name': syntax: repo_owner/repo_name
'scrub': {'name': 'THE_PUBLIC_REPO_YOU_WANT_TO_PUSH_TO', 'branch': 'THE_BRANCH_YOU_WANT_TO_PUSH_TO'},
'raw': {'name': 'THE_PRIVATE_REPO_YOU_WANT_TO_PUSH_TO', 'branch': 'THE_BRANCH_YOU_WANT_TO_PUSH_TO'}
}
DATAWORLD_API_TOKEN = 'YOUR_DATAWORLD_TOKEN_HERE'
DATAWORLD_DATASETS = {
'scrub': {'owner': 'DATASET_OWNER', 'name': 'THE_PUBLIC_DATASET_YOU_WANT_TO_PUSH_TO'},
'raw': {'owner': 'DATASET_OWNER', 'name': 'THE_PRIVATE_DATASET_YOU_WANT_TO_PUSH_TO'}
}
| StarcoderdataPython |
3218036 | <reponame>pbh/pybrid<gh_stars>1-10
import pybrid
import os
class WolffhermanReport(pybrid.PybridReport):
AUTHOR = 'isabellamills'
NAME = 'wolffhermanreport'
GROUPS = ['bauchsauer', 'group1']
def write(self, output_dir):
f = file(os.path.join(output_dir, 'index.html'), 'w')
f.write("cumque repellat sequi")
f.close()
| StarcoderdataPython |
81115 | linkedin_email = # place your linkedin login email
linkedin_password = # place your linkedin login password | StarcoderdataPython |
1670733 | import numpy as np
import torch
import torch.nn as nn
def aggr_by_one(model, index_list=None):
if not hasattr(model, 'aggr_mask'):
model.aggr_mask = dict()
if index_list is None:
index_list = model.conv_index[1:-1]
for ind in index_list:
W = model.features[ind].weight.data
W_arr = W.cpu().numpy()
if ind not in model.aggr_mask.keys():
model.aggr_mask[ind] = np.ones_like(W_arr)
ch_out, ch_in, ksize, _ = W_arr.shape
for i in range(ch_out):
for j in range(ch_in):
this_kernel = np.squeeze(np.abs(W_arr[i, j, ...]))
this_kernel[this_kernel == 0] = 1000.
m_ind = np.argmin(this_kernel)
m_row = int(m_ind / ksize)
m_col = m_ind % ksize
W_arr[i, j, m_row, m_col] = 0.
model.aggr_mask[ind][i, j, m_row, m_col] = 0.
model.features[ind].weight = nn.Parameter(torch.from_numpy(W_arr).cuda())
def mask_aggr_gradient(model, index_list=None):
if index_list is None:
index_list = model.conv_index[1:-1] # we do not operate on the first and last conv layer
for ind in index_list:
if ind not in model.aggr_mask.keys(): # not yet aggr.
continue
# print(type(self.features[ind].weight.grad))
# print(type(self.aggr_mask[ind]))
mask = model.aggr_mask[ind]
if type(mask) == np.ndarray:
mask = torch.from_numpy(mask).cuda()
model.features[ind].weight.grad.data = torch.mul(model.features[ind].weight.grad.data, mask)
def aggr_select_layer(model, index, aggr_method='max', mode='cpu', get_mask=False):
if not hasattr(model, 'aggr_mask'):
model.aggr_mask = dict()
W = model.features[index].weight.data
if mode == 'cpu':
W_arr = W.cpu().numpy()
if get_mask:
mask = np.zeros_like(W_arr)
ch_out, ch_in, ksize, _ = W_arr.shape
assert ksize == 3
for i in range(ch_out):
for j in range(ch_in):
m_ind = np.argmax(np.abs(W_arr[i, j, ...]))
m_row = int(m_ind / ksize)
m_col = m_ind % ksize
if aggr_method == 'max':
m_val = W_arr[i, j, m_row, m_col]
elif aggr_method == 'sum':
m_val = np.sum(W_arr[i, j, ...]) # TODO
elif aggr_method == 'weighted':
ss_x = 0.
ss_y = 0.
for k_i in range(ksize):
for k_j in range(ksize):
ss_x += k_i * W_arr[i, j, k_i, k_j]
ss_y += k_j * W_arr[i, j, k_i, k_j]
ss_x /= np.sum(W_arr[i, j, ...])
ss_y /= np.sum(W_arr[i, j, ...])
m_row = int(round(ss_x))
m_col = int(round(ss_y))
m_val = np.sum(W_arr[i, j, ...])
else:
raise NotImplementedError
if get_mask:
mask[i, j, m_row, m_col] = 1. # only largest value is preseved
W_arr[i, j, ...] = np.zeros([ksize, ksize])
W_arr[i, j, m_row, m_col] = m_val
del model.features[index].weight
model.features[index].weight = nn.Parameter(torch.from_numpy(W_arr).cuda())
if get_mask:
model.aggr_mask[index] = torch.from_numpy(mask).cuda()
else:
raise NotImplementedError
def reset_aggr_mask(model):
if hasattr(model, 'aggr_mask'):
del model.aggr_mask
model.aggr_mask = dict()
| StarcoderdataPython |
1783703 | <filename>stupidfuckingbot.py
#i have no idea wtf i am doing
import os
import discord
import configparser
import random
from discord.ext import commands
config = configparser.ConfigParser()
config.read('settings.ini')
client = commands.Bot(command_prefix = 'l.')
frogdir = "animals/frog"
@client.event
async def on_ready():
print(f'Logged in as\n{client.user.name}\n{client.user.id}\n------')
@client.command()
async def ping(ctx):
await ctx.send(f'Ping: {round(client.latency * 1000)} ms')
@client.command()
async def frog(ctx):
frogl = os.listdir(frogdir)
frogp = discord.File(os.path.join(frogdir, random.choice(frogl)))
print(os.path.join(frogdir, random.choice(frogl)))
await ctx.send(file=frogp)
@client.event
async def on_message(message):
if message.author != client.user:
if "BERD" in message.content.upper():
berdp = discord.File("animals/berd.png")
await message.channel.send(file=berdp)
await client.process_commands(message)
client.run(config['Bot']['Token']) | StarcoderdataPython |
1632450 | <filename>rest_registration/api/views/__init__.py<gh_stars>100-1000
from .change_password import change_password # noqa
from .login import login, logout # noqa
from .profile import profile # noqa
from .register import register, verify_registration # noqa
from .register_email import register_email, verify_email # noqa
from .reset_password import reset_password, send_reset_password_link # noqa
| StarcoderdataPython |
14463 | <reponame>aiddun/jazzCNN
import numpy as np
from numpy import random
import glob
import scipy.io.wavfile
np.random.seed(4)
def preprocess(periods, testCategoryNum):
periodList = periods
catNum = len(periodList)
def createpathlist():
print("Loading file paths.")
x = []
y = []
for recDirIndex in range(len(periodList)):
rnge = periodList[recDirIndex]
bottomrange = rnge[0]
toprange = rnge[1]
for i in range(bottomrange, toprange):
recYearDirIndex = glob.glob("..//FINAL//" + str(i) + "//*.wav")
for n in range(len(recYearDirIndex)):
path = recYearDirIndex[n]
x.append(path)
y.append(recDirIndex)
#Created 2 original arrays for readability
print("Done.")
return np.array(x), np.array(y)
def truncateData():
x, y = createpathlist()
#Least prevalent category
originalLengths = []
for n in range(catNum):
originalLengths.append(np.count_nonzero(y == n))
minimumNum = min(originalLengths)
for n in range(catNum):
while( y.tolist().count(n) > minimumNum ):
#First occuring instance
for q in range(y.size):
if y[q] == n:
y = np.delete(y, q)
x = np.delete(x, q)
break
return x, y
def psudoRandomOrder():
x, y = truncateData()
print("")
print("Psudo-randomising Data")
randOrder = np.random.permutation(x.shape[0])
x, y = x[randOrder], y[randOrder]
print("Shuffled.")
return x, y
def BatchSeparator():
x, y = psudoRandomOrder()
print("")
print("Separating data into testing and training set.")
x_test = []
y_test = []
for n in range(catNum):
while( y_test.count(n) < testCategoryNum ):
#first occuring instance
for q in range(y.size):
if y[q] == n:
x_test.append(x[q])
x = np.delete(x, q)
y_test.append(y[q])
y = np.delete(y, q)
break
x_test = np.array(y_test)
y_test = np.array(y_test)
x_train = np.array(x)
y_train = np.array(y)
return x_train, y_train, x_test, y_test
x_train, y_train, x_test, y_test = BatchSeparator()
print("Created training set of " + str(y_train.size) + " recordings and a testing set of " + str(y_test.size) + " recordings.")
print("Preproccessing complete.")
return x_train, y_train, x_test, y_test | StarcoderdataPython |
3256465 | <filename>filters.py
from astropy.io import fits
import numpy as np
from catalog_builder import build_catalog
from astropy.table import Table
hdu_list = fits.open("data/magphys_in.fits")
#print(hdu_list.info())
#print(hdu_list[1].header)
#print(hdu_list[1].columns.info())
#print(hdu_list[1].data)
data_field = hdu_list[1].data
list_of_used_ones = []
for element in data_field:
list_of_used_ones.append(element[0])
full_catalog = build_catalog()
filter_fluxes = []
filter_errors = []
filter_refnrs = []
filter_wavelengths = []
filter_bandwidths = []
#filter
filter_fluxes.append('fU38')
filter_errors.append('efU38')
filter_refnrs.append(249)
filter_wavelengths.append(0.3676)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fU')
filter_errors.append('efU')
filter_refnrs.append(363)
filter_wavelengths.append(0.37651)
filter_bandwidths.append(0.0)
#filter
# VIMOS R, since Rc is from WFI
filter_fluxes.append('fR')
filter_errors.append('efR')
filter_refnrs.append(364)
filter_wavelengths.append(0.63755)
filter_bandwidths.append(0.0)
#filter
# For sure from WFI, no Rc filter, so R filter from Hildebrandt et al. (2006)
filter_fluxes.append('fRc')
filter_errors.append('efRc')
filter_refnrs.append(252)
filter_wavelengths.append(0.64284)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fB')
filter_errors.append('efB')
filter_refnrs.append(250)
filter_wavelengths.append(0.44562)
filter_bandwidths.append(0.0)
#filter
# Also WFI
filter_fluxes.append('fV')
filter_errors.append('efV')
filter_refnrs.append(251)
filter_wavelengths.append(0.53401)
filter_bandwidths.append(0.0)
#filter
# Also WFI
filter_fluxes.append('fI')
filter_errors.append('efI')
filter_refnrs.append(253)
filter_wavelengths.append(0.81454)
filter_bandwidths.append(0.0)
#filter
# ISAAC J band b/c in GOOD-S
filter_fluxes.append('fJ')
filter_errors.append('efJ')
filter_refnrs.append(211)
filter_wavelengths.append(1.24279)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fH')
filter_errors.append('efH')
filter_refnrs.append(212)
filter_wavelengths.append(1.63661)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fKs')
filter_errors.append('efKs')
filter_refnrs.append(213)
filter_wavelengths.append(2.15217)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fIA427')
filter_errors.append('efIA427')
filter_refnrs.append(265)
filter_wavelengths.append(0.4263)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fIA505')
filter_errors.append('efIA505')
filter_refnrs.append(268)
filter_wavelengths.append(0.5063)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fIA527')
filter_errors.append('efIA527')
filter_refnrs.append(269)
filter_wavelengths.append(0.5261)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fIA574')
filter_errors.append('efIA574')
filter_refnrs.append(270)
filter_wavelengths.append(0.5764)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fIA624')
filter_errors.append('efIA624')
filter_refnrs.append(271)
filter_wavelengths.append(0.6233)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fIA679')
filter_errors.append('efIA679')
filter_refnrs.append(272)
filter_wavelengths.append(0.6781)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fIA738')
filter_errors.append('efIA738')
filter_refnrs.append(274)
filter_wavelengths.append(0.7361)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fIA767')
filter_errors.append('efIA767')
filter_refnrs.append(275)
filter_wavelengths.append(0.7684)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('f125w')
filter_errors.append('ef125w')
filter_refnrs.append(328)
filter_wavelengths.append(1.2486)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('f140w')
filter_errors.append('ef140w')
filter_refnrs.append(329)
filter_wavelengths.append(1.3923)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('f606w')
filter_errors.append('ef606w')
filter_refnrs.append(94)
filter_wavelengths.append(0.5959)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('f814w')
filter_errors.append('ef814w')
filter_refnrs.append(95)
filter_wavelengths.append(0.818644)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('f435w')
filter_errors.append('ef435w')
filter_refnrs.append(214)
filter_wavelengths.append(0.4328)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('f775w')
filter_errors.append('ef775w')
filter_refnrs.append(216)
filter_wavelengths.append(0.7705)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('f850lp')
filter_errors.append('ef850lp')
filter_refnrs.append(217)
filter_wavelengths.append(0.9048)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('f160w')
filter_errors.append('ef160w')
filter_refnrs.append(298)
filter_wavelengths.append(1.5419)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fIRAC1')
filter_errors.append('efIRAC1')
filter_refnrs.append(153)
filter_wavelengths.append(3.5634)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fIRAC2')
filter_errors.append('efIRAC2')
filter_refnrs.append(154)
filter_wavelengths.append(4.5110)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fIRAC3')
filter_errors.append('efIRAC3')
filter_refnrs.append(155)
filter_wavelengths.append(5.7593)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fIRAC4')
filter_errors.append('efIRAC4')
filter_refnrs.append(156)
filter_wavelengths.append(7.9594)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('f24um')
filter_errors.append('ef24um')
filter_refnrs.append(157)
filter_wavelengths.append(24.0)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('fnu_1mm')
filter_errors.append('efnu_1mm')
filter_refnrs.append(350) #possibly incorrect
filter_wavelengths.append(1000)
filter_bandwidths.append(0.0)
#filter
filter_fluxes.append('f125w')
filter_errors.append('ef125w')
filter_refnrs.append(328)
filter_wavelengths.append(1.23049)
filter_bandwidths.append(0.0)
#filter
# This is Wircam
filter_fluxes.append('ftJ')
filter_errors.append('eftJ')
filter_refnrs.append(277)
filter_wavelengths.append(1.24815)
filter_bandwidths.append(0.0)
#filter
# This is Wircam, TENIS Ks band
filter_fluxes.append('ftK')
filter_errors.append('eftK')
filter_refnrs.append(279)
filter_wavelengths.append(2.13378)
filter_bandwidths.append(0.0)
print(full_catalog.columns)
size_f = 0
print("Missing from Filter set")
for name in full_catalog.columns:
if "flag_" not in name and "f" in name and "ef" not in name and name not in filter_fluxes:
print(name)
size_f += 1
print(size_f)
print("In Filter Set but not in Full Catalog")
size_f = 0
for name in filter_fluxes:
if name not in full_catalog.columns:
print(name)
size_f += 1
print(size_f)
print("In Filter Set")
size_f = 0
for name in filter_fluxes:
if "ef" not in name and "flag_" not in name and "f" in name and name in full_catalog.columns \
and "sfr" not in name:
print(name)
size_f += 1
print(size_f)
print("Length of Filter Set: " + str(len(filter_fluxes)))
for name in filter_fluxes:
print(name)
# TODO: B,V,I,J,H,1.1mm,tJ,Rc,R,U,tK,K | StarcoderdataPython |
3380126 | import asyncio
import json
import logging
import os
import re
import shutil
import string
from collections import Counter
from dataclasses import dataclass, asdict, field
from random import random
from typing import List, Optional, Dict, Generator
import aiofiles
import github
import time
import github_util
git_hub = github_util.load_github()
github_util.print_current_rate_limit()
class ShallowUpdateNotAllowedException(Exception):
pass
class CouldNotReadFromRemoteRepositoryException(Exception):
pass
class CLRFReplacementException(Exception):
pass
class PullRequestAlreadyExistsException(Exception):
pass
class ForkAlreadyExistsException(Exception):
pass
class AmbiguousObjectNameHeadException(Exception):
pass
async def subprocess_run(args: List[str], cwd: str) -> Optional[str]:
proc = await asyncio.create_subprocess_exec(
args[0],
*args[1:],
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=cwd
)
stdout, stderr = await proc.communicate()
print(f'[{args!r} exited with {proc.returncode}]')
if stdout:
print(f'[stdout]\n{stdout.decode()}')
if proc.returncode != 0:
if stderr:
msg = stderr.decode()
error_msg = f'[stderr]\n{msg}'
if 'timeout' in msg:
raise TimeoutError(error_msg)
if 'shallow update not allowed' in msg:
raise ShallowUpdateNotAllowedException(error_msg)
if 'Could not read from remote repository' in msg:
raise CouldNotReadFromRemoteRepositoryException(error_msg)
if 'A pull request already exists' in msg:
raise PullRequestAlreadyExistsException(error_msg)
if 'Error creating fork' in msg and 'already exists on github.com' in msg:
raise ForkAlreadyExistsException(error_msg)
if ' Ambiguous object name: \'HEAD\'' in msg:
raise AmbiguousObjectNameHeadException(error_msg)
raise RuntimeError(error_msg)
else:
if stderr:
msg = stderr.decode()
error_msg = f'[stderr]\n{msg}'
if 'warning: CRLF will be replaced by LF' in msg:
raise CLRFReplacementException(stderr)
print(error_msg)
if stdout:
return stdout.decode()
else:
return None
@dataclass
class VulnerabilityFixModule:
branch_name: str
clone_repos_location: str
data_base_dir: str
save_point_location: str
pr_message_file_absolute_path: str
commit_message: str
_cached_vulnerable_projects: List['VulnerableProjectFiles'] = field(default_factory=list)
def clean_previous_run(self):
# Cleanup method to get rid of previous files
logging.info('Begining Cleanup')
if os.path.isdir(self.clone_repos_location):
shutil.rmtree(self.clone_repos_location)
os.mkdir(self.clone_repos_location)
if not os.path.isdir(self.save_point_location):
os.mkdir(self.save_point_location)
logging.info('Cleanup Complete')
def _list_all_json_files(self) -> Generator[str, None, None]:
directory = os.fsencode(self.data_base_dir)
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.startswith('g__') and filename.endswith('.json'):
yield self.data_base_dir + '/' + filename
def should_accept_project(self, project_name: str) -> bool:
return False
@staticmethod
def _read_repository_and_file_names(json_file_name: str) -> 'VulnerableProjectFiles':
with open(json_file_name) as jsonFile:
data = json.load(jsonFile)
project_name: str = data['project']['name']
# Counter is a Dict[file name, count] representation
files = Counter([obj[0]['file'] for obj in data['data']])
return VulnerableProjectFiles(project_name, files)
def _load_vulnerable_projects(self) -> List['VulnerableProjectFiles']:
vulnerable_projects: List[VulnerableProjectFiles] = []
for json_file in self._list_all_json_files():
vulnerable = self._read_repository_and_file_names(json_file)
if not self.should_accept_project(vulnerable.project_name):
continue
vulnerable.print()
vulnerable_projects.append(vulnerable)
return vulnerable_projects
def get_vulnerable_project_files(self) -> List['VulnerableProjectFiles']:
if len(self._cached_vulnerable_projects) == 0:
self._cached_vulnerable_projects = self._load_vulnerable_projects()
return self._cached_vulnerable_projects
def save_point_file_name(self, project_files: 'VulnerableProjectFiles') -> str:
project_as_file_name = project_files.project_name.replace('/', '__')
return f'{self.save_point_location}/g__{project_as_file_name}.json'
async def do_fix_vulnerable_file(self, project_name: str, file: str, expected_fix_count: int) -> int:
"""
Fixes the vulnerabilities in the file passed.
:param project_name: The name of the project being fixed.
:param file: The file to fix the vulnerabilities in.
:param expected_fix_count: The expected number of vulnerabilities to be fixed.
:return: The actual number of vulnerabilities fixed.
"""
pass
@dataclass(frozen=True)
class VulnerabilityFixReport:
files_fixed: int
vulnerabilities_fixed: int
file_name_fixed: List[str]
@dataclass
class VulnerableProjectFiles:
project_name: str
files: Dict[str, int]
def print(self):
print(self.project_name)
for file in self.files:
print('\t', '/' + file + ': ' + str(self.files[file]))
@dataclass
class VulnerabilityFixerEngine:
fix_module: VulnerabilityFixModule
project_files: VulnerableProjectFiles
def _project_name(self):
return self.project_files.project_name
def project_file_name(self) -> str:
return self.fix_module.clone_repos_location + '/' + self._project_name()
def save_point_file_name(self) -> str:
return self.fix_module.save_point_file_name(self.project_files)
@staticmethod
async def do_resilient_hub_call(args: List[str], cwd: str, lock=None) -> Optional[str]:
"""
Make a call to hub that is resilient to timeout exceptions.
:return: stdout output if successful
"""
async def do_call(wait_time, previous_wait_time=0) -> Optional[str]:
try:
if lock is not None:
async with lock:
# GitHub documentation says to wait 1 second between writes
# https://docs.github.com/en/rest/guides/best-practices-for-integrators#dealing-with-abuse-rate-limits
await asyncio.sleep(1)
return await subprocess_run(args, cwd=cwd)
else:
return await subprocess_run(args, cwd=cwd)
except TimeoutError as e:
if wait_time > 70:
raise TimeoutError(f'Gave up after waiting {previous_wait_time} seconds') from e
# This serves a double purpose as informational and also a 'sane'
# way to slow down this script reasonably
github_util.print_current_rate_limit()
await asyncio.sleep(wait_time)
return await do_call(wait_time * 2 + random(), previous_wait_time=wait_time)
return await do_call(1)
async def do_clone(self):
# Deal with fskobjects https://stackoverflow.com/a/41029655/3708426
await self.do_resilient_hub_call(
[
'hub',
'clone',
self._project_name(),
self._project_name(), # This is the directory to clone into
'--config',
'transfer.fsckobjects=false',
'--config',
'receive.fsckobjects=false',
'--config',
'fetch.fsckobjects=false'
],
cwd=self.fix_module.clone_repos_location
)
async def do_run_in(self, args: List[str]) -> Optional[str]:
assert args[0] != 'hub', 'This method is unsuitable for calling `hub`. Use `do_run_hub_in` instead!'
return await subprocess_run(args, cwd=self.project_file_name())
async def do_run_hub_in(self, args: List[str], lock) -> Optional[str]:
return await self.do_resilient_hub_call(args=args, cwd=self.project_file_name(), lock=lock)
async def do_fix_vulnerable_file(self, file: str, expected_fix_count: int) -> int:
file_being_fixed: str = self.project_file_name() + file
# Sanity check, verify the file still exists, the data may be out of date
if not os.path.exists(file_being_fixed):
logging.warning(
'Fix for `%s` in file `%s` can not be applied as file does not exist!',
self._project_name(),
file
)
return 0
return await self.fix_module.do_fix_vulnerable_file(
self._project_name(),
file_being_fixed,
expected_fix_count
)
def submodule_files(self) -> List[str]:
"""
List all of the git submodule files in this project.
We're not going to be fixing pom files in Git submodules so this allows us to filter them out.
"""
files: List[str] = []
submodule_file_path: str = self.project_file_name() + '/.gitmodules'
if not os.path.isfile(submodule_file_path):
return []
with open(submodule_file_path) as submodule_file:
for line in submodule_file:
if 'path' in line:
files.append('/' + line.split('= ')[1][0:-1])
return files
async def do_fix_vulnerabilities(self) -> VulnerabilityFixReport:
project_vulnerabilities_fixed = 0
project_files_fixed = 0
submodules = self.submodule_files()
files_fixed: List[str] = []
for file in self.project_files.files:
# Skip submodule files
skip = next((True for submodule in submodules if file.startswith(submodule)), False)
if not skip:
file_vulnerabilities_fixed = await self.do_fix_vulnerable_file(file, self.project_files.files[file])
if file_vulnerabilities_fixed > 0:
project_vulnerabilities_fixed += file_vulnerabilities_fixed
project_files_fixed += 1
files_fixed.append(file)
return VulnerabilityFixReport(
project_files_fixed,
project_vulnerabilities_fixed,
files_fixed
)
async def do_create_branch(self):
await self.do_run_in(['git', 'checkout', '-b', self.fix_module.branch_name])
async def do_stage_changes(self, project_report: VulnerabilityFixReport):
command = ['git', 'add']
# Only run add on the files we've modified
# This hopefully limits CRLF changes
files_trimmed = [file_name.lstrip('/') for file_name in project_report.file_name_fixed]
command.extend(files_trimmed)
await self.do_run_in(command)
async def do_commit_changes(self):
msg = self.fix_module.commit_message
await self.do_run_in(['git', 'commit', '-m', msg])
async def do_fork_repository(self, lock, index: int = 0):
org_name = 'BulkSecurityGeneratorProject'
if index == 0:
use_org_name = org_name
else:
use_org_name = f'{org_name}{index}'
try:
await self.do_run_hub_in(
[
'hub',
'fork',
'--remote-name',
'origin',
'--org',
use_org_name
],
lock
)
except ForkAlreadyExistsException as e:
if index >= 46:
raise e
else:
return await self.do_fork_repository(lock, index + 1)
async def do_push_changes(self, retry_count: int = 5):
try:
# Don't use '--force-with-lease' here, it doesn't work. Trust me.
await self.do_run_in(['git', 'push', 'origin', self.fix_module.branch_name, '--force'])
except ShallowUpdateNotAllowedException:
# A shallow update isn't allowed against this repo (I must have forked it before)
await self.do_run_in(['git', 'fetch', '--unshallow'])
# Now re-run the push
# Don't use '--force-with-lease' here, it doesn't work. Trust me.
await self.do_run_in(['git', 'push', 'origin', self.fix_module.branch_name, '--force'])
except CouldNotReadFromRemoteRepositoryException as e:
logging.warning(f'Could not read from remote repository {5 - retry_count}/5')
if retry_count <= 0:
raise e
else:
# Forking is an async operation, so we may need to wait a bit for it
await asyncio.sleep((5 - retry_count) * 2 + random())
await self.do_push_changes(retry_count - 1)
async def do_create_pull_request(self, lock) -> str:
try:
stdout = await self.do_run_hub_in(
['hub', 'pull-request', '-p', '--file', self.fix_module.pr_message_file_absolute_path],
lock
)
pattern = re.compile(r'(https://.*)')
match = pattern.search(stdout)
return match.group(1)
except PullRequestAlreadyExistsException:
return 'ALREADY_EXISTS'
async def do_create_save_point(self, report: VulnerabilityFixReport, pr_url: str):
json_body = {
'project_name': self.project_files.project_name,
'files': self.project_files.files,
'pull_request': pr_url,
'report': asdict(report)
}
async with aiofiles.open(self.save_point_file_name(), 'w') as json_file_to_write:
await json_file_to_write.write(json.dumps(json_body, indent=4))
async def execute_vulnerability_fixer_engine(engine: VulnerabilityFixerEngine, lock) -> VulnerabilityFixReport:
engine.project_files.print()
await engine.do_clone()
project_report: VulnerabilityFixReport = await engine.do_fix_vulnerabilities()
pr_url = ''
# If the LGTM data is out-of-date, there can be cases where no vulnerabilities are fixed
if project_report.vulnerabilities_fixed != 0:
await engine.do_create_branch()
await engine.do_stage_changes(project_report)
await engine.do_commit_changes()
if not engine.project_files.project_name.lower().startswith('jlleitschuh'):
await engine.do_fork_repository(lock)
await engine.do_push_changes()
pr_url = await engine.do_create_pull_request(lock)
await engine.do_create_save_point(project_report, pr_url)
return project_report
async def execute_vulnerability_fixer_engine_checked(
engine: VulnerabilityFixerEngine,
lock
) -> Optional[VulnerabilityFixReport]:
try:
return await execute_vulnerability_fixer_engine(engine, lock)
except AmbiguousObjectNameHeadException:
# They named their main branch 'HEAD'... Why?! No fix for them.
return None
except BaseException as e:
if 'CancelledError' in e.__class__.__name__:
raise e
logging.error(
f'Failed while processing project `{engine.project_files.project_name}`. Exception type: {type(e)}.\n{e!s}')
raise e
def is_archived_git_hub_repository(project: VulnerableProjectFiles) -> bool:
try:
return git_hub.get_repo(project.project_name).archived
except github.UnknownObjectException:
# The repository was removed, so treat it as the same
return True
class EnginesExecutionException(Exception):
pass
async def _do_execute_engines(engines: List[VulnerabilityFixerEngine]):
github_hub_lock = asyncio.Lock()
waiting_reports = []
try:
for engine in engines:
waiting_reports.append(
execute_vulnerability_fixer_engine_checked(engine, github_hub_lock)
)
projects_fixed = 0
files_fixed = 0
vulnerabilities_fixed = 0
print(f'Processing {len(waiting_reports)} Projects:')
all_reports = await asyncio.gather(*waiting_reports)
for report in all_reports:
if report is None:
continue
if report.vulnerabilities_fixed > 0:
projects_fixed += 1
files_fixed += report.files_fixed
vulnerabilities_fixed += report.vulnerabilities_fixed
print('Done!')
print(f'Fixed {vulnerabilities_fixed} vulnerabilities in {files_fixed} files across {projects_fixed} projects!')
except Exception as e:
raise EnginesExecutionException('Engine execution failed!') from e
async def _do_execute_fix_module(fix_module: VulnerabilityFixModule, starting_letter: str):
fix_module.clean_previous_run()
vulnerable_projects = fix_module.get_vulnerable_project_files()
print()
print(f'Loading Async Project Executions for {len(vulnerable_projects)} Projects:')
engines = []
for vulnerable_project in vulnerable_projects:
if not vulnerable_project.project_name.startswith(starting_letter):
continue
# Check this first, it's going to be faster
if os.path.exists(fix_module.save_point_file_name(vulnerable_project)):
logging.info(f'Skipping project {vulnerable_project.project_name} since save point file already exists')
continue
# Check this second, it's going to be slower
if is_archived_git_hub_repository(vulnerable_project):
logging.info(f'Skipping project {vulnerable_project.project_name} since it is archived')
continue
print(f'Loading Execution for: {vulnerable_project.project_name}')
engine = VulnerabilityFixerEngine(
fix_module=fix_module,
project_files=vulnerable_project
)
engines.append(engine)
# Break the engine list into sub-lists of size 100
size = 100
engine_lists = x = [engines[i:i + size] for i in range(0, len(engines), size)]
for engine_list in engine_lists:
await _do_execute_engines(engine_list)
# try:
# await _do_execute_engines(engine_list)
# except EnginesExecutionException as e:
# logging.exception(f'Failed while processing engine group. {str(e)}')
def do_execute_fix_module(fix_module: VulnerabilityFixModule):
start = time.monotonic()
for char in string.ascii_letters + string.digits:
asyncio.run(_do_execute_fix_module(fix_module, starting_letter=char))
end = time.monotonic()
duration_seconds = end - start
print(f'Execution took {duration_seconds} seconds')
github_util.print_current_rate_limit()
| StarcoderdataPython |
4647 | #!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gtfsobjectbase import GtfsObjectBase
import problems as problems_module
import util
class Transfer(GtfsObjectBase):
"""Represents a transfer in a schedule"""
_REQUIRED_FIELD_NAMES = ['from_stop_id', 'to_stop_id', 'transfer_type']
_FIELD_NAMES = _REQUIRED_FIELD_NAMES + ['min_transfer_time']
_TABLE_NAME = 'transfers'
_ID_COLUMNS = ['from_stop_id', 'to_stop_id']
def __init__(self, schedule=None, from_stop_id=None, to_stop_id=None, transfer_type=None,
min_transfer_time=None, field_dict=None):
self._schedule = None
if field_dict:
self.__dict__.update(field_dict)
else:
self.from_stop_id = from_stop_id
self.to_stop_id = to_stop_id
self.transfer_type = transfer_type
self.min_transfer_time = min_transfer_time
if getattr(self, 'transfer_type', None) in ("", None):
# Use the default, recommended transfer, if attribute is not set or blank
self.transfer_type = 0
else:
try:
self.transfer_type = util.NonNegIntStringToInt(self.transfer_type)
except (TypeError, ValueError):
pass
if hasattr(self, 'min_transfer_time'):
try:
self.min_transfer_time = util.NonNegIntStringToInt(self.min_transfer_time)
except (TypeError, ValueError):
pass
else:
self.min_transfer_time = None
if schedule is not None:
# Note from Tom, Nov 25, 2009: Maybe calling __init__ with a schedule
# should output a DeprecationWarning. A schedule factory probably won't
# use it and other GenericGTFSObject subclasses don't support it.
schedule.AddTransferObject(self)
def ValidateFromStopIdIsPresent(self, problems):
if util.IsEmpty(self.from_stop_id):
problems.MissingValue('from_stop_id')
return False
return True
def ValidateToStopIdIsPresent(self, problems):
if util.IsEmpty(self.to_stop_id):
problems.MissingValue('to_stop_id')
return False
return True
def ValidateTransferType(self, problems):
if not util.IsEmpty(self.transfer_type):
if (not isinstance(self.transfer_type, int)) or \
(self.transfer_type not in range(0, 4)):
problems.InvalidValue('transfer_type', self.transfer_type)
return False
return True
def ValidateMinimumTransferTime(self, problems):
if not util.IsEmpty(self.min_transfer_time):
if self.transfer_type != 2:
problems.MinimumTransferTimeSetWithInvalidTransferType(
self.transfer_type)
# If min_transfer_time is negative, equal to or bigger than 24h, issue
# an error. If smaller than 24h but bigger than 3h issue a warning.
# These errors are not blocking, and should not prevent the transfer
# from being added to the schedule.
if (isinstance(self.min_transfer_time, int)):
if self.min_transfer_time < 0:
problems.InvalidValue('min_transfer_time', self.min_transfer_time,
reason="This field cannot contain a negative " \
"value.")
elif self.min_transfer_time >= 24*3600:
problems.InvalidValue('min_transfer_time', self.min_transfer_time,
reason="The value is very large for a " \
"transfer time and most likely " \
"indicates an error.")
elif self.min_transfer_time >= 3*3600:
problems.InvalidValue('min_transfer_time', self.min_transfer_time,
type=problems_module.TYPE_WARNING,
reason="The value is large for a transfer " \
"time and most likely indicates " \
"an error.")
else:
# It has a value, but it is not an integer
problems.InvalidValue('min_transfer_time', self.min_transfer_time,
reason="If present, this field should contain " \
"an integer value.")
return False
return True
def GetTransferDistance(self):
from_stop = self._schedule.stops[self.from_stop_id]
to_stop = self._schedule.stops[self.to_stop_id]
distance = util.ApproximateDistanceBetweenStops(from_stop, to_stop)
return distance
def ValidateFromStopIdIsValid(self, problems):
if self.from_stop_id not in self._schedule.stops.keys():
problems.InvalidValue('from_stop_id', self.from_stop_id)
return False
return True
def ValidateToStopIdIsValid(self, problems):
if self.to_stop_id not in self._schedule.stops.keys():
problems.InvalidValue('to_stop_id', self.to_stop_id)
return False
return True
def ValidateTransferDistance(self, problems):
distance = self.GetTransferDistance()
if distance > 10000:
problems.TransferDistanceTooBig(self.from_stop_id,
self.to_stop_id,
distance)
elif distance > 2000:
problems.TransferDistanceTooBig(self.from_stop_id,
self.to_stop_id,
distance,
type=problems_module.TYPE_WARNING)
def ValidateTransferWalkingTime(self, problems):
if util.IsEmpty(self.min_transfer_time):
return
if self.min_transfer_time < 0:
# Error has already been reported, and it does not make sense
# to calculate walking speed with negative times.
return
distance = self.GetTransferDistance()
# If min_transfer_time + 120s isn't enough for someone walking very fast
# (2m/s) then issue a warning.
#
# Stops that are close together (less than 240m appart) never trigger this
# warning, regardless of min_transfer_time.
FAST_WALKING_SPEED= 2 # 2m/s
if self.min_transfer_time + 120 < distance / FAST_WALKING_SPEED:
problems.TransferWalkingSpeedTooFast(from_stop_id=self.from_stop_id,
to_stop_id=self.to_stop_id,
transfer_time=self.min_transfer_time,
distance=distance)
def ValidateBeforeAdd(self, problems):
result = True
result = self.ValidateFromStopIdIsPresent(problems) and result
result = self.ValidateToStopIdIsPresent(problems) and result
result = self.ValidateTransferType(problems) and result
result = self.ValidateMinimumTransferTime(problems) and result
return result
def ValidateAfterAdd(self, problems):
valid_stop_ids = True
valid_stop_ids = self.ValidateFromStopIdIsValid(problems) and valid_stop_ids
valid_stop_ids = self.ValidateToStopIdIsValid(problems) and valid_stop_ids
# We need both stop IDs to be valid to able to validate their distance and
# the walking time between them
if valid_stop_ids:
self.ValidateTransferDistance(problems)
self.ValidateTransferWalkingTime(problems)
def Validate(self,
problems=problems_module.default_problem_reporter):
if self.ValidateBeforeAdd(problems) and self._schedule:
self.ValidateAfterAdd(problems)
def _ID(self):
return tuple(self[i] for i in self._ID_COLUMNS)
def AddToSchedule(self, schedule, problems):
schedule.AddTransferObject(self, problems)
| StarcoderdataPython |
51669 | <reponame>zbowling/mojo<filename>mojo/tools/testing/mojom_fetcher/mojom_gn_tests.py
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import io
import os.path
import unittest
from fakes import FakeMojomFile
from fetcher.mojom_directory import MojomDirectory
from fetcher.repository import Repository
from mojom_gn import BuildGNGenerator
class FakeRepository(Repository):
def get_all_external_mojom_directories(self):
mojom = FakeMojomFile(
self, os.path.join(self.get_external_directory(),
"domokit.org/bar/baz/foo.mojom"))
mojom.add_dependency("example.com/dir/example.mojom")
mojom.add_dependency("example.com/dir/dir.mojom")
mojom.add_dependency("buzz.mojom")
mojom.add_dependency("foo/bar.mojom")
mojom.add_dependency(
"mojo/public/interfaces/application/shell.mojom")
directory = MojomDirectory(
os.path.join(self.get_external_directory(),
"domokit.org/bar/baz"))
directory.add_mojom(mojom)
return [directory]
class FakeBuildGNGenerator(BuildGNGenerator):
def __init__(self, *args, **kwargs):
self.opened_files = {}
BuildGNGenerator.__init__(self, *args, **kwargs)
def _open(self, filepath, mode):
if mode != "w":
raise Exception("Invalid mode " + str(mode))
self.opened_files[filepath] = io.StringIO()
return self.opened_files[filepath]
class TestBuildGNGenerator(unittest.TestCase):
BAZ_BUILD_GN = u"""import("//build/module_args/mojo.gni")
import("$mojo_sdk_root/mojo/public/tools/bindings/mojom.gni")
mojom("baz") {
deps = [
":foo",
]
}
mojom("foo") {
sources = [
"foo.mojom",
]
import_dirs = [
get_path_info("..", "abspath"),
]
mojo_sdk_deps = [
"mojo/public/interfaces/application",
]
deps = [
"//third_party/external/example.com/dir:example",
"//third_party/external/example.com/dir:dir_mojom",
":buzz",
"../foo:bar",
]
}"""
def test_generate(self):
self.maxDiff = None
repository = FakeRepository("/base/repo", "third_party/external")
gn_generator = FakeBuildGNGenerator(repository, os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../../public/tools/mojom_fetcher")))
gn_generator.generate()
output_stream = gn_generator.opened_files[
"/base/repo/third_party/external/domokit.org/bar/baz/BUILD.gn"]
self.assertEquals(prepare_string(self.BAZ_BUILD_GN),
prepare_string(output_stream.getvalue()))
def prepare_string(value):
lines = value.split("\n")
lines = map(lambda l: l.strip().replace(" ", ""), lines)
lines = filter(lambda l: not l.startswith("#"), lines)
return ''.join(lines)
| StarcoderdataPython |
1758210 | <filename>year_2020/day_18_2020.py
from typing import List
from operator import add, mul
from util.helpers import solution_timer
from util.input_helper import read_entire_input
data = read_entire_input(2020,18)
def parse(data) -> List[List[str]]:
return [tokenise(element) for element in data]
def tokenise(exp: str) -> List[str]:
exp = exp.replace(" ","")
output = []
numeric_token = ''
for character in exp:
if character in '+*()':
if numeric_token:
output.append(int(numeric_token))
output.append(character)
numeric_token = ''
else:
numeric_token += character
if numeric_token:
output.append(int(numeric_token))
return output
def evaluate_bracketed_expression(exp, left_to_right=True):
i=0
while True:
if ")" in exp:
i_close = exp.index(")",i)
for index in range(i_close,-1,-1):
if exp[index] == "(":
i_open = index
break
if left_to_right:
exp[i_open:i_close+1] = [evaluate_simple_expression_left_to_rigt(exp[i_open+1:i_close])]
else:
exp[i_open:i_close+1] = [evaluate_simple_expression_reverse(exp[i_open+1:i_close])]
i=i_open
else:
if left_to_right:
return evaluate_simple_expression_left_to_rigt(exp)
else:
return evaluate_simple_expression_reverse(exp)
def evaluate_simple_expression_left_to_rigt(exp):
if len(exp)==1:
return exp[0]
if len(exp) == 0:
return
if '(' in exp:
print(exp)
raise ValueError("Cannot parse '(' in simple expression")
n1 = int(exp[0])
op = exp[1]
n2 = int(exp[2])
res = opl[op](n1,n2)
exp = [res] + exp[3:]
return evaluate_simple_expression_left_to_rigt(exp)
def evaluate_simple_expression_reverse(exp):
if len(exp)==1:
return exp[0]
if len(exp) == 0:
return
if '(' in exp:
print(exp)
raise ValueError("Cannot parse '(' in simple expression")
if "+" in exp:
i = exp.index("+")
n1 = int(exp[i-1])
n2 = int(exp[i+1])
op = "+"
res = opl[op](n1,n2)
exp = exp[:i-1] + [res] + exp[i+2:]
else:
n1 = int(exp[0])
op = exp[1]
n2 = int(exp[2])
res = opl[op](n1,n2)
exp = [res] + exp[3:]
return evaluate_simple_expression_reverse(exp)
opl = {"+":add,
"*":mul}
@solution_timer(2020,18,1)
def part_one(data):
tokens = parse(data)
return sum(evaluate_bracketed_expression(i) for i in tokens)
@solution_timer(2020,18,2)
def part_two(data):
tokens = parse(data)
return sum(evaluate_bracketed_expression(i, left_to_right=False) for i in tokens)
if __name__ == "__main__":
data = read_entire_input(2020,18)
part_one(data)
part_two(data) | StarcoderdataPython |
4814301 | from .fixtures import *
from tenable.errors import *
def test_event_field_name_typeerror(api):
with pytest.raises(TypeError):
api.audit_log.events((1, 'gt', '2018-01-01'))
def test_event_filter_operator_typeerror(api):
with pytest.raises(TypeError):
api.audit_log.events(('date', 1, '2018-01-01'))
def test_event_filter_value_typeerror(api):
with pytest.raises(TypeError):
api.audit_log.events(('date', 'gt', 1))
def test_event_limit_typeerror(api):
with pytest.raises(TypeError):
api.audit_log.events(limit='nope')
def test_events_standard_user_permissionerror(stdapi):
with pytest.raises(PermissionError):
stdapi.audit_log.events()
def test_events(api):
events = api.audit_log.events(('date', 'gt', '2018-01-01'), limit=100) | StarcoderdataPython |
97116 | """
*Attribute-Exact-Value-Select*
Select based on attribute value.
"""
from abc import ABCMeta
from ._select import AttributeValueSelect
__all__ = ["AttributeExactValueSelect"]
class AttributeExactValueSelect(
AttributeValueSelect,
):
__metaclass__ = ABCMeta
| StarcoderdataPython |
1673785 | import datetime
import pandas as pd
from ut_calendar_scraper.holiday import Holiday
class Semester:
def __init__(self,title,start_year,start_month,start_day,end_year,end_month,end_day,holidays=[]):
self.set_title(title)
self.set_start_date(start_year,start_month,start_day)
self.set_end_date(end_year,end_month,end_day)
self.set_holidays(holidays)
def get_title(self):
return self.__title
def get_start_date(self):
return self.__start_date
def get_end_date(self):
return self.__end_date
def get_holidays(self):
return self.__holidays
def set_title(self,title):
self.__title = title
def set_start_date(self,year,month,day):
self.__start_date = datetime.date(year,month,day)
def set_end_date(self,year,month,day):
self.__end_date = datetime.date(year,month,day)
def set_holidays(self,holidays):
self.__holidays = holidays
def get_observed_holidays(self):
observed_holiday_list = []
start_timestamp = datetime.datetime(self.get_start_date().year,self.get_start_date().month,self.get_start_date().day)
end_timestamp = datetime.datetime(self.get_end_date().year,self.get_end_date().month,self.get_end_date().day)
timestamps = pd.date_range(start_timestamp,end_timestamp,freq='D')
for timestamp in timestamps:
year = timestamp.year
month = timestamp.month
day = timestamp.day
name = Holiday.get_observed_holiday_name(year,month,day)
if name:
holiday = Holiday(name,year,month,day,year,month,day)
observed_holiday_list.append(holiday)
else:
continue
self.set_holidays(self.get_holidays() + observed_holiday_list)
def date_is_in_semester(self,year,month,day):
date = datetime.date(year,month,day)
return date >= self.get_start_date() and date <= self.get_end_date()
def date_is_in_holidays(self,year,month,day):
result = (False, None)
for holiday in self.get_holidays():
if holiday.date_is_in_holiday(year,month,day):
result = (True, holiday)
break
return result
def __str__(self):
string = (
'title: ' + self.get_title()
+ '\nstart_date: ' + str(self.get_start_date())
+ '\nend_date: ' + str(self.get_end_date())
+ '\nholidays:'
)
if self.get_holidays != None:
for holiday in self.get_holidays():
string += '\n....' + str(holiday)
else:
string += None
return string | StarcoderdataPython |
179975 | from pyinspect import Report
from pyinspect._colors import orange, mocassin
from rich.bar import Bar
from rich.color import Color
from .note import Note
from ._metadata import make_note_metadata
class Todo(Note):
def __init__(self, note_name, raise_error=True):
"""
A special type of note for Todo lists.
"""
Note.__init__(self, note_name, raise_error=raise_error)
try:
self.metadata["is_todo"] = True
except AttributeError:
self.metadata = make_note_metadata(note_name)
self.metadata["is_todo"] = True
@classmethod
def from_string(cls, string, name):
note = cls(name, raise_error=False)
note.raw_content = string
# make empty metadata
note.metadata = make_note_metadata(name)
return note
def number_of_tasks(self):
self.raw_content.replace("-[", "- [")
completed = self.raw_content.count("- [x]")
todo = self.raw_content.count("- [ ]")
return completed + todo, completed, todo
def show(self):
show = Report(
title=f"Todo list: [b]{self.name}",
show_info=True,
color=orange,
accent=orange,
)
show._type = f":memo: {self.name}"
show.width = 120
# Show the number of tasks in the note
tot, completed, todo = self.number_of_tasks()
n = completed / tot * 200
color = Color.from_rgb(200 - n, n, 0)
show.add(
f"[{mocassin}]Completed[/{mocassin}] [b {orange}]{completed}/{tot}[/b {orange}][{mocassin}] tasks"
)
n = completed / tot * 200
color = Color.from_rgb(200 - n, n, 0)
bar = Bar(
size=tot, begin=0, end=completed, color=color, bgcolor="#1a1a1a"
)
show.add(bar, "rich")
# parse note content
show.spacer()
show.add("Tasks:")
show = self._parse_content(show)
show.spacer()
# print
show.print()
| StarcoderdataPython |
1781122 | <filename>server/core/authMech/jwt.py<gh_stars>0
"""
This is Json Web Token(JWT) module for authorization mechanism in endpoints/ APIs services.
It follows RFC 7519 guidelines and easy maintainable, bare computational needs.
It is featured with configurable token expiry and token hash algo validation.
"""
# Author : Krishnasagar <<EMAIL>>
import json
import base64
import hashlib
import hmac
import time
# References:
# https://hdknr.github.io/docs/identity/impl_jws.html
# https://codereview.stackexchange.com/questions/150063/validating-hmac-sha256-signature-in-python
# https://jwt.io/introduction/
# http://www.seedbox.com/en/blog/2015/06/05/oauth-2-vs-json-web-tokens-comment-securiser-un-api/
# https://zapier.com/engineering/apikey-oauth-jwt/
# claim keys derived from RFC7519
KEY_ISSUED_AT = 'iat'
KEY_SUBJECT = "sub"
KEY_AUDIENCE = "aud"
class JWTException(Exception):
pass
class JWT(object):
"""
JSON Web Token(JWT) protocol customized.
Base guidelines set to RFC7519.
Default algorithm = HmacSHA256
Token formulation:
header, payload,
signature ==> HMACSHA256(base64URLEncode(header) +"."+ base64URLEncode(payload), SK)
Payload and SK(Secret Key) should be provided for token generation.
"""
__version__ = '0.1.0'
def __init__(self, SK, algo='HS256', token_expiry=None):
"""
SK = Secret Key for signature type string
Optional parameters:
algo = Algorithm for signature type string
token_expiry = expiry for token in seconds
"""
self.header = {
"alg": algo,
"typ": "JWT"
}
self.tkalg = algo
self.SK = SK.encode('utf-8')
self.encsafe_str = "."
self.tk_expiry = token_expiry and token_expiry * 1000
@staticmethod
def encode(data):
"""
Encodes to base64 urlsafe.
:param data: input to encode
:return: string plain text
"""
# data is converted to json string as any typed data can be encoded.
return base64.urlsafe_b64encode(json.dumps(data).encode('utf-8')).decode('utf-8')
@staticmethod
def decode(enc_data):
"""
Decodes encoded data from base64 urlsafe.
:param enc_data: encoded data input.
:return: decoded data
"""
return json.loads(base64.urlsafe_b64decode(enc_data))
def sign_prep(self, packet):
if self.header['alg'] == self.tkalg:
return hmac.new(self.SK, packet.encode('utf-8'), hashlib.sha256).hexdigest()
def curr_time(self):
return int(time.time() * 1000) # milliseconds
def tokenize(self, payload=None):
"""
Generate JWT with:
payload type preference = dict
returns JWT
"""
if isinstance(payload, dict):
payload[KEY_ISSUED_AT] = self.curr_time()
msg = self.encode(self.header) + self.encsafe_str + self.encode(payload)
return msg + self.encsafe_str + self.sign_prep(msg)
def parse_token(self, rtoken):
"""
Parses received token and raised for any tamper of token as per formation
:param rtoken: JWT token string
:return: payload object
"""
try:
rheader, rpayload, rsign = rtoken.split(self.encsafe_str)
if hmac.compare_digest(rsign, self.sign_prep(rheader + self.encsafe_str + rpayload)):
# Signature matched. Token is genuine.
rpayload = self.decode(rpayload)
# Check for token expiry.
if not rpayload[KEY_ISSUED_AT]:
raise JWTException('No timestamp in token found.')
elif self.tk_expiry and (abs(rpayload[KEY_ISSUED_AT] - self.curr_time()) > self.tk_expiry):
raise JWTException('Received token is expired.')
return rpayload
else:
raise JWTException('Received token signature is invalid.')
except Exception as e:
raise e
def detokenize(self, rtoken):
"""
Detokenizes of received JWT. It will return JWT data derived from rtoken
:param rtoken: input JWT
:return: Decoded payload of received JWT
"""
try:
return self.parse_token(rtoken)
except Exception as e:
print(str(e))
raise JWTException(e)
if __name__ == '__main__':
JWT = JWT('dfd98724548asbd#%%^*&!@#$', token_expiry=2)
tk = JWT.tokenize({'key': '<KEY>'})
print("Token generated %s" % (tk))
time.sleep(1)
print(JWT.detokenize(tk))
tk = JWT.tokenize({'key': '<KEY>>>'})
print("Token generated %s" % (tk))
time.sleep(3)
print(JWT.detokenize(tk)) | StarcoderdataPython |
3202501 | <filename>commands/Utility.py
from discord import guild, Spotify
from discord.ext import commands
import discord
import json
class Utility(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.bot_has_guild_permissions(send_messages=True)
@commands.cooldown(1, 60, commands.BucketType.guild)
@commands.command()
async def prefix(self, ctx, *, prefix):
with open(r"./utils/resources/prefixes.json") as f:
prefixes = json.load(f)
prefixes[str(ctx.guild.id)] = prefix
await ctx.send(f"Guild prefix set to '{prefix}' .")
with open(r"./utils/resources/prefixes.json", "w") as f:
json.dump(prefixes, f, indent=4)
def setup(bot):
bot.add_cog(Utility(bot))
| StarcoderdataPython |
3358077 | import time
from model import *
from utils import *
from game import VisibleGame
import p5
def run_model(mode):
actor = load_model('actor')
actor.eval()
critic = load_model('critic')
critic.eval()
def run_episode(show=True):
'''
play the game and remember what happened
'''
game = VisibleGame(10,10, run=False)
# playing vars
state = state_to_tensor(game.return_state())
gameOver = False
def draw():
nonlocal show, game, gameOver, q, state, actor, critic
if show:
game.draw()
# play an episode
if gameOver:
print(game.status())
p5.exit()
if mode in [REINFORCE, ACTOR_CRITIC]:
_, action_index = actor.choose_action(state)
else:
action_index = critic.choose_action(state)
# observe next state and collect reward
_, nextState, gameOver = game.move_player(action_index)
# print(reward)
nextState = state_to_tensor(nextState)
# update state
state = nextState
p5.run(game.setup, draw, 144)
run_episode()
if __name__ == '__main__':
run_model(Q_BASIC)
| StarcoderdataPython |
3224435 | <gh_stars>1-10
import numpy as np
import random as rand
import matplotlib.pyplot as plt
class component:
def __init__(self,num_node):
self.num_node = num_node
self.parent = [i for i in range(num_node)]
self.weight = [0 for i in range(num_node)]
self.size = [1 for i in range(num_node)]
def find(self,root):
if root==self.parent[root]:
return root
else:
self.parent[root]=self.find(self.parent[root])
return self.parent[root]
def get_size(self,u):
return self.size[u]
def get_dif(self,u,v,k):
return min(self.weight[u]+k/self.size[u],self.weight[v]+k/self.size[v])
def merge(self,u,v,w):
self.parent[v]=u
self.size[u]+=self.size[v]
self.weight[u]=w
self.weight[v]=w
def image(self,shape):
# img = np.zeros(shape)
img = np.ones(shape)
cnt = 0
color_map = {}
c = lambda: [rand.random() , rand.random() , rand.random()]
for i in range(self.num_node):
root = self.find(i)
if root not in color_map.keys():
color_map[root] = c()
cnt+=1
img[i//shape[1],i%shape[1]] = color_map[root]
return img
def show_images(self,src_img):
shape = src_img.shape
m = {}
cnt = 0
for i in range(self.num_node):
root = self.find(i)
if root not in m.keys():
m[root]= set()
cnt+=1
m[root].add((i//shape[1],i%shape[1]))
img = src_img
cnt+=1
row = int(round(cnt/5+0.5))
plt.subplot(row,5, 1)
plt.imshow(img)
index = 1
for key, value in m.items():
index += 1
sub_img = np.ones(img.shape, dtype=int) * 255
for item in value:
sub_img[item[0], item[1]] = img[item[0], item[1]]
plt.subplot(row,5, index)
plt.imshow(sub_img)
plt.show() | StarcoderdataPython |
1697497 | import requests
from bs4 import BeautifulSoup
class YtQueryParser:
"""
parses youtube page with search query
parses all the attrbutes for a search query results
"""
def __init__(self, query):
self.yt_query_url = "http://youtube.com/results?search_query=" + query
print(self.yt_query_url)
self.yt_links_duration = []
self.yt_links_href = []
self.yt_links_title = []
self.yt_links_views = []
self.yt_links_age = []
self.yt_links_thumbs = []
self.yt_links_artist = []
self.yt_search_list = []
self.yt_search_json = []
self.page = self.get_page()
try:
self.get_duration()
self.get_links_title()
self.get_views_age()
# self.get_thumbnail()
self.get_artist()
self.create_search_object()
except IndexError:
pass
print(len(self.yt_search_list))
a = self.create_json()
def get_page(self):
r = requests.get(self.yt_query_url)
html_doc = r.text
page = BeautifulSoup(html_doc, 'html.parser')
return page
def get_links_title(self):
h3_tags_class = self.page.find_all('h3', {'class': 'yt-lockup-title'})
for h3_tag in h3_tags_class:
self.yt_links_href.append(h3_tag.find_all('a')[0].get('href'))
self.yt_links_title.append(h3_tag.find_all('a')[0].get('title'))
def get_views_age(self):
meta_data = self.page.find_all('ul', {'class': 'yt-lockup-meta-info'})
for data in meta_data:
try:
self.yt_links_age.append(data.find_all('li')[0].get_text())
self.yt_links_views.append(data.find_all('li')[1].get_text())
except IndexError:
self.yt_links_age.append('0')
self.yt_links_views.append('0')
# def get_thumbnail(self):
# thumbs = self.page.find_all('span', {'class': 'yt-thumb-simple'})
# for thumb in thumbs:
# img = thumb.find_all('img')[0].get('src')
# self.yt_links_thumbs.append(img)
def get_duration(self):
video_time_list = self.page.find_all('span', {'class': 'video-time'})
for video_time in video_time_list:
self.yt_links_duration.append(video_time.get_text())
def get_artist(self):
div = self.page.find_all('div', {'class': 'yt-lockup-content'})
for artist in div:
a = artist.find_all('a')[0].get_text()
self.yt_links_artist.append(a)
def create_search_object(self):
i = 0
for x in self.yt_links_title:
if self.yt_links_age[i] == '0' or self.yt_links_views[i] == '0':
i += 1
else:
video_object = YtVideo()
video_object.yt_title = self.yt_links_title[i]
video_object.yt_href = self.yt_links_href[i]
video_object.yt_duration = self.yt_links_duration[i]
video_object.yt_artist = self.yt_links_artist[i]
video_object.yt_views = self.yt_links_views[i]
video_object.yt_thumbnail = "http://img.youtube.com/vi/%s/default.jpg" % video_object.yt_href[9:]
video_object.yt_id = self.yt_links_href[i][9:]
self.yt_search_list.append(video_object)
i += 1
def create_json(self):
for video in self.yt_search_list:
dictionary = dict()
dictionary["id"] = video.yt_id
dictionary["title"] = video.yt_title
dictionary["href"] = video.yt_href
dictionary["duration"] = video.yt_duration
dictionary["artist"] = video.yt_artist
dictionary["views"] = video.yt_views
dictionary["thumbnail"] = video.yt_thumbnail
self.yt_search_json.append(dictionary)
return 1
class YtVideo:
"""
video object containing properties of a video
"""
def __init__(self):
self.yt_title = ""
self.yt_href = ""
self.yt_duration = ""
self.yt_artist = ""
self.yt_views = ""
self.yt_thumbnail = ""
self.yt_id = ""
"""
just for some debugging purposes
"""
# if __name__ == "__main__":
# x = YtQueryParser("hello")
# print(len(x.yt_links_title))
# for i in x.yt_links_thumbs:
# print(i)
# print("\n")
| StarcoderdataPython |
2755 | <gh_stars>0
from haven import haven_chk as hc
from haven import haven_results as hr
from haven import haven_utils as hu
import torch
import torchvision
import tqdm
import pandas as pd
import pprint
import itertools
import os
import pylab as plt
import exp_configs
import time
import numpy as np
from src import models
from src import datasets
from src import utils as ut
from pprint import pformat
import argparse
from torch.utils.data import sampler
from torch.utils.data.sampler import RandomSampler
from torch.backends import cudnn
from torch.nn import functional as F
from torch.utils.data import DataLoader
cudnn.benchmark = True
import logging
def setupLogging():
"""Setup the logger for this module
"""
# Create the Logger
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
logger_formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
handler.setFormatter(logger_formatter)
root_logger.addHandler(handler)
def trainval(exp_dict, savedir_base, datadir, reset=False, num_workers=0):
# bookkeepting stuff
# ==================
pprint.pprint(exp_dict)
exp_id = hu.hash_dict(exp_dict)
savedir = os.path.join(savedir_base, exp_id)
if reset:
hc.delete_and_backup_experiment(savedir)
os.makedirs(savedir, exist_ok=True)
hu.save_json(os.path.join(savedir, "exp_dict.json"), exp_dict)
print("Experiment saved in %s" % savedir)
logger.info(f'start trainval with experiment dict {pformat(exp_dict)}')
input('press enter')
# set seed
# ==================
seed = 42
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Dataset
# ==================
# train set
train_set = datasets.get_dataset(dataset_dict=exp_dict["dataset"],
split="train",
datadir=datadir,
exp_dict=exp_dict,
dataset_size=exp_dict['dataset_size'])
# val set
val_set = datasets.get_dataset(dataset_dict=exp_dict["dataset"],
split="val",
datadir=datadir,
exp_dict=exp_dict,
dataset_size=exp_dict['dataset_size'])
# test set
test_set = datasets.get_dataset(dataset_dict=exp_dict["dataset"],
split="test",
datadir=datadir,
exp_dict=exp_dict,
dataset_size=exp_dict['dataset_size'])
# val_sampler = torch.utils.data.SequentialSampler(val_set)
val_loader = DataLoader(val_set,
# sampler=val_sampler,
batch_size=1,
collate_fn=ut.collate_fn,
num_workers=num_workers)
test_loader = DataLoader(test_set,
# sampler=val_sampler,
batch_size=1,
collate_fn=ut.collate_fn,
num_workers=num_workers)
# Model
# ==================
print('get model')
model = models.get_model(model_dict=exp_dict['model'],
exp_dict=exp_dict,
train_set=train_set).cuda()
# model.opt = optimizers.get_optim(exp_dict['opt'], model)
model_path = os.path.join(savedir, "model.pth")
score_list_path = os.path.join(savedir, "score_list.pkl")
print(model)
if os.path.exists(score_list_path):
# resume experiment
model.load_state_dict(hu.torch_load(model_path))
score_list = hu.load_pkl(score_list_path)
s_epoch = score_list[-1]['epoch'] + 1
else:
# restart experiment
score_list = []
s_epoch = 0
# Train & Val
# ==================
print("Starting experiment at epoch %d" % (s_epoch))
model.waiting = 0
model.val_score_best = -np.inf
train_sampler = torch.utils.data.RandomSampler(
train_set, replacement=True,
num_samples=2*len(test_set))
train_loader = DataLoader(train_set,
sampler=train_sampler,
collate_fn=ut.collate_fn,
batch_size=exp_dict["batch_size"],
drop_last=True,
num_workers=num_workers)
for e in range(s_epoch, exp_dict['max_epoch']):
# Validate only at the start of each cycle
score_dict = {}
test_dict = model.val_on_loader(test_loader,
savedir_images=os.path.join(savedir, "images"),
n_images=3)
# Train the model
train_dict = model.train_on_loader(train_loader)
# Validate the model
val_dict = model.val_on_loader(val_loader)
score_dict["val_score"] = val_dict["val_score"]
# Get new score_dict
score_dict.update(train_dict)
score_dict["epoch"] = e
score_dict["waiting"] = model.waiting
model.waiting += 1
# Add to score_list and save checkpoint
score_list += [score_dict]
# Save Best Checkpoint
score_df = pd.DataFrame(score_list)
if score_dict["val_score"] >= model.val_score_best:
test_dict = model.val_on_loader(test_loader,
savedir_images=os.path.join(savedir, "images"),
n_images=3)
score_dict.update(test_dict)
hu.save_pkl(os.path.join(savedir, "score_list_best.pkl"), score_list)
# score_df.to_csv(os.path.join(savedir, "score_best_df.csv"))
hu.torch_save(os.path.join(savedir, "model_best.pth"),
model.get_state_dict())
model.waiting = 0
model.val_score_best = score_dict["val_score"]
print("Saved Best: %s" % savedir)
# Report & Save
score_df = pd.DataFrame(score_list)
# score_df.to_csv(os.path.join(savedir, "score_df.csv"))
print("\n", score_df.tail(), "\n")
hu.torch_save(model_path, model.get_state_dict())
hu.save_pkl(score_list_path, score_list)
print("Checkpoint Saved: %s" % savedir)
if model.waiting > 100:
break
print('Experiment completed et epoch %d' % e)
if __name__ == "__main__":
setupLogging()
parser = argparse.ArgumentParser()
logger = logging.getLogger(__name__)
parser.add_argument('-e', '--exp_group_list', nargs="+")
parser.add_argument('-sb', '--savedir_base', required=True)
parser.add_argument('-d', '--datadir', default=None)
parser.add_argument("-r", "--reset", default=0, type=int)
parser.add_argument("-ei", "--exp_id", default=None)
parser.add_argument("-j", "--run_jobs", default=0, type=int)
parser.add_argument("-nw", "--num_workers", type=int, default=0)
args = parser.parse_args()
# Collect experiments
# ===================
if args.exp_id is not None:
# select one experiment
savedir = os.path.join(args.savedir_base, args.exp_id)
exp_dict = hu.load_json(os.path.join(savedir, "exp_dict.json"))
exp_list = [exp_dict]
else:
# select exp group
exp_list = []
for exp_group_name in args.exp_group_list:
exp_list += exp_configs.EXP_GROUPS[exp_group_name]
for exp_dict in exp_list:
# do trainval
trainval(exp_dict=exp_dict,
savedir_base=args.savedir_base,
datadir=args.datadir,
reset=args.reset,
num_workers=args.num_workers)
| StarcoderdataPython |
32911 | #!/usr/bin/env python
import setpath
from bike.testutils import *
from bike.transformer.save import save
from moveToModule import *
class TestMoveClass(BRMTestCase):
def test_movesTheText(self):
src1=trimLines("""
def before(): pass
class TheClass:
pass
def after(): pass
""")
src1after=trimLines("""
def before(): pass
def after(): pass
""")
src2after=trimLines("""
class TheClass:
pass
""")
try:
createPackageStructure(src1, "")
moveClassToNewModule(pkgstructureFile1,2,
pkgstructureFile2)
save()
self.assertEqual(src1after,file(pkgstructureFile1).read())
self.assertEqual(src2after,file(pkgstructureFile2).read())
finally:
removePackageStructure()
class TestMoveFunction(BRMTestCase):
def test_importsNameReference(self):
src1=trimLines("""
a = 'hello'
def theFunction(self):
print a
""")
src2after=trimLines("""
from a.foo import a
def theFunction(self):
print a
""")
self.helper(src1, src2after)
def test_importsExternalReference(self):
src0=("""
a = 'hello'
""")
src1=trimLines("""
from top import a
def theFunction(self):
print a
""")
src2after=trimLines("""
from top import a
def theFunction(self):
print a
""")
try:
createPackageStructure(src1, "", src0)
moveFunctionToNewModule(pkgstructureFile1,2,
pkgstructureFile2)
save()
self.assertEqual(src2after,file(pkgstructureFile2).read())
finally:
removePackageStructure()
def test_doesntImportRefCreatedInFunction(self):
src1=trimLines("""
def theFunction(self):
a = 'hello'
print a
""")
src2after=trimLines("""
def theFunction(self):
a = 'hello'
print a
""")
self.helper(src1, src2after)
def test_doesntImportRefCreatedInFunction(self):
src1=trimLines("""
def theFunction(self):
a = 'hello'
print a
""")
src2after=trimLines("""
def theFunction(self):
a = 'hello'
print a
""")
self.helper(src1, src2after)
def test_addsImportStatementToOriginalFileIfRequired(self):
src1=trimLines("""
def theFunction(self):
pass
b = theFunction()
""")
src1after=trimLines("""
from a.b.bah import theFunction
b = theFunction()
""")
try:
createPackageStructure(src1,"")
moveFunctionToNewModule(pkgstructureFile1,1,
pkgstructureFile2)
save()
self.assertEqual(src1after,file(pkgstructureFile1).read())
finally:
removePackageStructure()
def test_updatesFromImportStatementsInOtherModules(self):
src0=trimLines("""
from a.foo import theFunction
print theFunction()
""")
src1=trimLines("""
def theFunction(self):
pass
""")
src0after=trimLines("""
from a.b.bah import theFunction
print theFunction()
""")
try:
createPackageStructure(src1,"",src0)
moveFunctionToNewModule(pkgstructureFile1,1,
pkgstructureFile2)
save()
self.assertEqual(src0after,file(pkgstructureFile0).read())
finally:
removePackageStructure()
def test_updatesFromImportMultiplesInOtherModules(self):
src0=trimLines("""
from a.foo import something,theFunction,somethingelse #comment
print theFunction()
""")
src1=trimLines("""
def theFunction(self):
pass
something = ''
somethingelse = 0
""")
src0after=trimLines("""
from a.foo import something,somethingelse #comment
from a.b.bah import theFunction
print theFunction()
""")
try:
createPackageStructure(src1,"",src0)
moveFunctionToNewModule(pkgstructureFile1,1,
pkgstructureFile2)
save()
self.assertEqual(src0after,file(pkgstructureFile0).read())
finally:
removePackageStructure()
def test_updatesFromImportMultiplesInTargetModule(self):
src0=trimLines("""
from a.foo import something,theFunction,somethingelse #comment
print theFunction()
""")
src1=trimLines("""
def theFunction(self):
pass
something = ''
somethingelse = 0
""")
src0after=trimLines("""
from a.foo import something,somethingelse #comment
print theFunction()
def theFunction(self):
pass
""")
try:
createPackageStructure(src1,"",src0)
moveFunctionToNewModule(pkgstructureFile1,1,
pkgstructureFile0)
save()
#print file(pkgstructureFile0).read()
self.assertEqual(src0after,file(pkgstructureFile0).read())
finally:
removePackageStructure()
def test_updatesFromImportInTargetModule(self):
src0=trimLines("""
from a.foo import theFunction
print theFunction()
""")
src1=trimLines("""
def theFunction(self):
pass
""")
src0after=trimLines("""
print theFunction()
def theFunction(self):
pass
""")
try:
createPackageStructure(src1,"",src0)
moveFunctionToNewModule(pkgstructureFile1,1,
pkgstructureFile0)
save()
self.assertEqual(src0after,file(pkgstructureFile0).read())
finally:
removePackageStructure()
def helper(self, src1, src2after):
try:
createPackageStructure(src1, "")
moveFunctionToNewModule(pkgstructureFile1,2,
pkgstructureFile2)
save()
self.assertEqual(src2after,file(pkgstructureFile2).read())
finally:
removePackageStructure()
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3216724 | <gh_stars>10-100
"""user note relation
Revision ID: 501e2f945ff9
Revises: <PASSWORD>
Create Date: 2015-07-06 21:21:47.558753
"""
# revision identifiers, used by Alembic.
revision = '501e2f945ff9'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column(u'contract_note', sa.Column('taken_by_id', sa.Integer(), nullable=True))
op.create_foreign_key('contract_note_user_id_fkey', 'contract_note', 'users', ['taken_by_id'], ['id'])
op.create_index(op.f('ix_document_id'), 'document', ['id'], unique=False)
op.drop_index('ix_documents_id', table_name='document')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_documents_id', 'document', ['id'], unique=False)
op.drop_index(op.f('ix_document_id'), table_name='document')
op.drop_constraint('contract_note_user_id_fkey', 'contract_note', type_='foreignkey')
op.drop_column(u'contract_note', 'taken_by_id')
### end Alembic commands ###
| StarcoderdataPython |
3245928 | import unittest
from io import StringIO
from collections import namedtuple
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import xlsxwriter
import gptables
from gptables.core.wrappers import GPWorkbook
from gptables.core.wrappers import GPWorksheet
from gptables import Theme
from gptables import gptheme
import pytest
Tb = namedtuple("Testbook", "wb ws")
valid_text_elements = [ # Not None
"This is a string",
["More than ", {"italic": True}, "just ", "a string"]
]
test_text_list = [
"This has a $$reference$$",
"This one doesn't",
"Here's another $$one$$"
]
exp_text_list = [
"This has a (1)",
"This one doesn't",
"Here's another (2)"
]
invalid_text_elements = [
dict(),
set(),
42,
3.14,
True
]
@pytest.fixture()
def testbook():
# See https://github.com/jmcnamara/XlsxWriter/issues/746#issuecomment-685869888
wb = GPWorkbook(options={'in_memory': True})
ws = wb.add_worksheet()
yield Tb(wb, ws)
wb.fileclosed = 1
class TestGPWorksheetInit:
"""
Test that default attributes are set when GPWorksheets are created.
"""
def test_subclass(self):
"""
Test that the GPWorksheet class is a subclass of the XlsxWriter
Worksheet class.
"""
assert issubclass(
GPWorksheet,
xlsxwriter.worksheet.Worksheet
)
def test_default_theme_set(self, testbook):
"""
Test that the default theme (gptheme) is used when no theme is set.
"""
assert testbook.wb.theme == gptheme
def test_default_gridlines(self, testbook):
"""
Test that print and screen gridlines are hidden by default.
"""
assert testbook.ws.print_gridlines == 0
assert testbook.ws.screen_gridlines == 0
def test_wb_reference(self, testbook):
"""
Test that GPWorksheets reference their parent GPWorkbook.
"""
assert testbook.ws._workbook == testbook.wb
@pytest.mark.parametrize("not_a_gptable", [
dict(),
set(),
[],
1,
3.14,
"test_string",
pd.DataFrame()
])
def test_invalid_write_gptable(self, not_a_gptable, testbook):
"""
Test that write_gptable() raises a TypeError when argument is not a
gptables.GPTable object.
"""
with pytest.raises(TypeError):
testbook.ws.write_gptable(not_a_gptable)
class TestGPWorksheetWriting:
"""
Test that additional writing methods correctly write to GPWorksheet object.
"""
def test__smart_write_str(self, testbook):
"""
Test that strings are stored in the GPWorksheet as expected.
"""
testbook.ws._smart_write(0, 0, valid_text_elements[0], {})
# Strings are stored in a lookup table for efficiency
got_string = testbook.ws.str_table.string_table
exp_string = {valid_text_elements[0]: 0}
assert got_string == exp_string
# String is referenced using a named tuple (string, Format)
# Here we get first element, which references string lookup location
got_lookup = testbook.ws.table[0][0][0]
exp_lookup = 0
assert got_lookup == exp_lookup
def test__smart_write_formatted_str(self, testbook):
testbook.ws._smart_write(1, 2, valid_text_elements[0], {"bold": True})
# Strings are stored in a lookup table for efficiency
got_string = testbook.ws.str_table.string_table
exp_string = {valid_text_elements[0]: 0}
assert got_string == exp_string
# String is referenced using a named tuple (string, Format)
# Here we get first element, which references string lookup location
cell = testbook.ws.table[1][2]
got_lookup = cell[0]
exp_lookup = 0
assert got_lookup == exp_lookup
format_obj = cell[1]
assert format_obj.bold
def test__smart_write_rich_text(self, testbook):
testbook.wb.set_theme(Theme({}))
testbook.ws._smart_write(0, 0, valid_text_elements[1], {})
# Strings are stored in a lookup table for efficiency
got_string = testbook.ws.str_table.string_table
exp_string = {'<r><t xml:space="preserve">More than </t></r><r><rPr><i'
'/><sz val="11"/><color theme="1"/><rFont val="Calibri"/'
'><family val="2"/><scheme val="minor"/></rPr><t xml:spa'
'ce="preserve">just </t></r><r><rPr><sz val="11"/><color'
' theme="1"/><rFont val="Calibri"/><family val="2"/><sch'
'eme val="minor"/></rPr><t>a string</t></r>': 0}
assert got_string == exp_string
# String is referenced using a named tuple (string, Format)
# Here we get first element, which references string lookup location
got_lookup = testbook.ws.table[0][0][0]
exp_lookup = 0
assert got_lookup == exp_lookup
def test__smart_write_formatted_rich_text(self, testbook):
testbook.wb.set_theme(Theme({}))
testbook.ws._smart_write(1, 2, valid_text_elements[1], {"bold": True})
# Strings are stored in a lookup table for efficiency
got_string = testbook.ws.str_table.string_table
exp_string = {'<r><t xml:space="preserve">More than </t></r><r><rPr><b'
'/><i/><sz val="11"/><color theme="1"/><rFont val="Calib'
'ri"/><family val="2"/><scheme val="minor"/></rPr><t xml'
':space="preserve">just </t></r><r><rPr><b/><sz val="11"'
'/><color theme="1"/><rFont val="Calibri"/><family val="'
'2"/><scheme val="minor"/></rPr><t>a string</t></r>': 0}
assert got_string == exp_string
# String is referenced using a named tuple (string, Format)
# Here we get first element, which references string lookup location
cell = testbook.ws.table[1][2]
got_lookup = cell[0]
exp_lookup = 0
assert got_lookup == exp_lookup
format_obj = cell[1]
assert format_obj.bold
class TestGPWorksheetFooterText:
"""
Test that GPTable footer elements are modified correctly by GPWorksheet
during write_gptable().
"""
@pytest.mark.parametrize("text,enclosed", [
("", "()"),
("This has a $$reference$$", "(This has a $$reference$$)"),
("(Already have some)", "((Already have some))")
]
)
def test__enclose_text_string(self, text, enclosed, testbook):
"""
Test that strings are correctly flanked with parentheses.
"""
test_text = testbook.ws._enclose_text(text)
assert test_text == enclosed
def test__enclose_text_list(self, testbook):
"""
Test that rich text is flanked with parentheses.
"""
test_list = ["An", {"bold": True}, "example", "note"]
got = testbook.ws._enclose_text(test_list)
exp = ["(", "An", {"bold": True}, "example", "note", ")"]
assert got == exp
@pytest.mark.parametrize("text", test_text_list)
def test__replace_reference(self, text, testbook):
"""
Test that references ($$ref$$ style) in strings are replaced with
numbers, in order of appearance. Also tests replacement in lists.
"""
got_output = []
ordered_refs = []
got_output = [testbook.ws._replace_reference(text, ordered_refs) for text in test_text_list]
exp_refs = ["reference", "one"]
assert ordered_refs == exp_refs
assert got_output == exp_text_list
@pytest.mark.parametrize("text,refs,output",
zip(test_text_list,
[["reference"], [], ["one"]],
["This has a (1)", "This one doesn't", "Here's another (1)"]
))
def test__replace_reference_in_attr_str(self, text, refs, output, testbook):
"""
Test that references are replaced in a single string.
"""
ordered_refs = []
got_text = testbook.ws._replace_reference_in_attr(
text,
ordered_refs
)
assert ordered_refs == refs
assert got_text == output
def test__replace_reference_in_attr_dict(self, testbook):
"""
Test that references are replaced in dictionary values, but not keys.
"""
ordered_refs = []
test_text_dict = {
"$$key$$": "This is a value with a $$reference$$",
"another_key": "Another value",
"third_key": "$$one$$ more reference"
}
got_text = testbook.ws._replace_reference_in_attr(
test_text_dict,
ordered_refs
)
assert ordered_refs == ["reference", "one"]
exp_text_dict = {
"$$key$$": "This is a value with a (1)",
"another_key": "Another value",
"third_key": "(2) more reference"
}
assert got_text == exp_text_dict
class TestGPWorksheetFormatUpdate:
"""
Test that GPWorksheet format updating methods work as expected.
"""
def test__apply_format_dict(self, testbook):
test = dict()
format_dict = {"bold": True}
testbook.ws._apply_format(test, format_dict)
exp = {"bold": True}
assert test == exp
def test__apply_format_series(self, testbook):
test = pd.Series([{} for n in range(3)])
format_dict = {"bold": True}
testbook.ws._apply_format(test, format_dict)
exp = pd.Series([{"bold": True} for n in range(3)])
assert_series_equal(test, exp)
def test__apply_format_dataframe(self, testbook):
test = pd.DataFrame(columns=[0, 1, 2], index = [0, 1])
test.iloc[0] = [{} for n in range(3)]
test.iloc[1] = [{} for n in range(3)]
format_dict = {"bold": True}
testbook.ws._apply_format(test, format_dict)
exp = pd.DataFrame(columns=[0, 1, 2], index = [0, 1])
exp.iloc[0] = [{"bold": True} for n in range(3)]
exp.iloc[1] = [{"bold": True} for n in range(3)]
assert_frame_equal(test, exp)
class TestGPWorkbookStatic:
@pytest.mark.parametrize("input, expected", [
("no references", "no references"),
("ref at end$$1$$", "ref at end"),
("$$1$$ref at start", "ref at start"),
("two$$1$$ refs$$2$$", "two refs"),
("three$$1$$ refs$$2$$, wow$$3$$", "three refs, wow")
])
def test__strip_annotation_references(self, input, expected):
assert GPWorksheet._strip_annotation_references(input) == expected
class TestGPWorkbook:
"""
Test that GPWorkbook initialisation and methods work as expected.
"""
def test_subclass(self):
"""
Test that the GPWorkbook class is a subclass of the XlsxWriter
Workbook class.
"""
assert issubclass(
GPWorkbook,
xlsxwriter.Workbook
)
def test_default_theme_set(self, testbook):
"""
Test that the workbook theme is set to gptheme by default.
"""
assert testbook.wb.theme == gptheme
def test_valid_set_theme(self, testbook):
"""
Test that setting a new theme with a Theme object works as expected.
Essentially, make sure that gptheme is not used.
"""
theme_config = {"title": {"bold": True}}
theme = gptables.Theme(theme_config)
testbook.wb.set_theme(theme)
assert testbook.wb.theme == gptables.Theme(theme_config)
@pytest.mark.parametrize("not_a_theme", [
dict(),
set(),
[],
1,
3.14,
"test_string",
pd.DataFrame()
])
def test_invalid_set_theme(self, not_a_theme, testbook):
"""
Test that setting theme with an object that is not a gptables.Theme
raises a TypeError.
"""
with pytest.raises(TypeError):
testbook.wb.set_theme(not_a_theme)
| StarcoderdataPython |
198482 | import asyncio
import contextlib
import logging
from typing import (Any, Dict, Iterator, List,
Optional, Sequence, Set, Tuple, Union)
from opentrons import types, hardware_control as hc, commands as cmds
from opentrons.commands import CommandPublisher
import opentrons.config.robot_configs as rc
from opentrons.config import feature_flags as fflags
from opentrons.hardware_control import adapters, modules
from opentrons.hardware_control.simulator import Simulator
from opentrons.hardware_control.types import CriticalPoint, Axis
from .labware import (Well, Labware, load, get_labware_definition,
load_from_definition, load_module,
ModuleGeometry, quirks_from_any_parent,
ThermocyclerGeometry, OutOfTipsError,
select_tiprack_from_list, filter_tipracks_to_start,
LabwareDefinition)
from .util import (FlowRates, PlungerSpeeds, Clearances, AxisMaxSpeeds,
HardwareManager)
from . import geometry
from . import transfers
MODULE_LOG = logging.getLogger(__name__)
ModuleTypes = Union[
'TemperatureModuleContext',
'MagneticModuleContext',
'ThermocyclerContext'
]
AdvancedLiquidHandling = Union[
Well,
types.Location,
List[Union[Well, types.Location]],
List[List[Well]]]
class ProtocolContext(CommandPublisher):
""" The Context class is a container for the state of a protocol.
It encapsulates many of the methods formerly found in the Robot class,
including labware, instrument, and module loading, as well as core
functions like pause and resume.
Unlike the old robot class, it is designed to be ephemeral. The lifetime
of a particular instance should be about the same as the lifetime of a
protocol. The only exception is the one stored in
:py:attr:`.legacy_api.api.robot`, which is provided only for back
compatibility and should be used less and less as time goes by.
"""
def __init__(self,
loop: asyncio.AbstractEventLoop = None,
hardware: hc.API = None,
broker=None,
bundled_labware: Dict[str, LabwareDefinition] = None,
bundled_data: Dict[str, bytes] = None,
extra_labware: Dict[str, LabwareDefinition] = None
) -> None:
""" Build a :py:class:`.ProtocolContext`.
:param loop: An event loop to use. If not specified, this ctor will
(eventually) call :py:meth:`asyncio.get_event_loop`.
:param hardware: An optional hardware controller to link to. If not
specified, a new simulator will be created.
:param broker: An optional command broker to link to. If not
specified, a dummy one is used.
:param bundled_labware: A dict mapping labware URIs to definitions.
This is used when executing bundled protocols,
and if specified will be the only allowed
source for labware definitions, excluding the
built in definitions and anything in
``extra_labware``.
:param bundled_data: A dict mapping filenames to the contents of data
files. Can be used by the protocol, since it is
exposed as
:py:attr:`.ProtocolContext.bundled_data`
:param extra_labware: A dict mapping labware URIs to definitions. These
URIs are searched during :py:meth:`.load_labware`
in addition to the system definitions (if
``bundled_labware`` was not specified). Used to
provide custom labware definitions.
"""
super().__init__(broker)
self._loop = loop or asyncio.get_event_loop()
self._deck_layout = geometry.Deck()
self._instruments: Dict[types.Mount, Optional[InstrumentContext]]\
= {mount: None for mount in types.Mount}
self._modules: Set[ModuleContext] = set()
self._last_moved_instrument: Optional[types.Mount] = None
self._location_cache: Optional[types.Location] = None
self._hw_manager = HardwareManager(hardware)
self._log = MODULE_LOG.getChild(self.__class__.__name__)
self._commands: List[str] = []
self._unsubscribe_commands = None
self.clear_commands()
self._bundled_labware = bundled_labware
self._extra_labware = extra_labware or {}
self._bundled_data: Dict[str, bytes] = bundled_data or {}
self._default_max_speeds = AxisMaxSpeeds()
self._load_trash()
def _load_trash(self):
if fflags.short_fixed_trash():
trash_name = 'opentrons_1_trash_850ml_fixed'
else:
trash_name = 'opentrons_1_trash_1100ml_fixed'
if self.deck['12']:
del self.deck['12']
self.load_labware(trash_name, '12')
@property
def bundled_data(self) -> Dict[str, bytes]:
""" Accessor for data files bundled with this protocol, if any.
This is a dictionary mapping the filenames of bundled datafiles, with
extensions but without paths (e.g. if a file is stored in the bundle as
``data/mydata/aspirations.csv`` it will be in the dict as
``'aspirations.csv'``) to the bytes contents of the files.
"""
return self._bundled_data
def set_bundle_contents(
self,
bundled_labware: Dict[str, LabwareDefinition] = None,
bundled_data: Dict[str, bytes] = None,
extra_labware: Dict[str, LabwareDefinition] = None):
""" Specify bundle contents after the context is created. Replaces the
old values.
"""
self._bundled_labware = bundled_labware
self._extra_labware = extra_labware or {}
self._bundled_data = bundled_data or {}
self._load_trash()
def __del__(self):
if getattr(self, '_unsubscribe_commands', None):
self._unsubscribe_commands()
@property
def max_speeds(self) -> AxisMaxSpeeds:
""" Per-axis speed limits when moving this instrument.
Changing this value changes the speed limit for each non-plunger
axis of the robot, when moving this pipette. Note that this does
only sets a limit on how fast movements can be; movements can
still be slower than this. However, it is useful if you require
the robot to move much more slowly than normal when using this
pipette.
This is a dictionary mapping string names of axes to float values
limiting speeds. To change a speed, set that axis's value. To
reset an axis's speed to default, delete the entry for that axis
or assign it to ``None``.
For instance,
.. code-block:: py
def run(protocol):
protocol.comment(str(right.max_speeds)) # '{}' - all default
protocol.max_speeds['A'] = 10 # limit max speed of
# right pipette Z to 10mm/s
del protocol.max_speeds['A'] # reset to default
protocol.max_speeds['X'] = 10 # limit max speed of x to
# 10 mm/s
protocol.max_speeds['X'] = None # reset to default
"""
return self._default_max_speeds
def commands(self):
return self._commands
def clear_commands(self):
self._commands.clear()
if self._unsubscribe_commands:
self._unsubscribe_commands()
def on_command(message):
payload = message.get('payload')
text = payload.get('text')
if text is None:
return
if message['$'] == 'before':
self._commands.append(text.format(**payload))
self._unsubscribe_commands = self.broker.subscribe(
cmds.types.COMMAND, on_command)
@contextlib.contextmanager
def temp_connect(self, hardware: hc.API):
""" Connect temporarily to the specified hardware controller.
This should be used as a context manager:
.. code-block :: python
with ctx.temp_connect(hw):
# do some tasks
ctx.home()
# after the with block, the context is connected to the same
# hardware control API it was connected to before, even if
# an error occurred in the code inside the with block
"""
old_hw = self._hw_manager.hardware
try:
self._hw_manager.set_hw(hardware)
yield self
finally:
self._hw_manager.set_hw(old_hw)
def connect(self, hardware: hc.API):
""" Connect to a running hardware API.
This can be either a simulator or a full hardware controller.
Note that there is no true disconnected state for a
:py:class:`.ProtocolContext`; :py:meth:`disconnect` simply creates
a new simulator and replaces the current hardware with it.
"""
self._hw_manager.set_hw(hardware)
self._hw_manager.hardware.cache_instruments()
def disconnect(self):
""" Disconnect from currently-connected hardware and simulate instead
"""
self._hw_manager.reset_hw()
def is_simulating(self) -> bool:
return self._hw_manager.hardware.get_is_simulator()
def load_labware_from_definition(
self,
labware_def: LabwareDefinition,
location: types.DeckLocation,
label: str = None
) -> Labware:
""" Specify the presence of a piece of labware on the OT2 deck.
This function loads the labware definition specified by `labware_def`
to the location specified by `location`.
:param labware_def: The labware definition to load
:param location: The slot into which to load the labware such as
1 or '1'
:type location: int or str
"""
parent = self.deck.position_for(location)
labware_obj = load_from_definition(labware_def, parent, label)
self._deck_layout[location] = labware_obj
return labware_obj
def load_labware(
self,
load_name: str,
location: types.DeckLocation,
label: str = None,
namespace: str = None,
version: int = None
) -> Labware:
""" Load a labware onto the deck given its name.
For labware already defined by Opentrons, this is a convenient way
to collapse the two stages of labware initialization (creating
the labware and adding it to the protocol) into one.
This function returns the created and initialized labware for use
later in the protocol.
:param load_name: A string to use for looking up a labware definition
:param location: The slot into which to load the labware such as
1 or '1'
:type location: int or str
:param str label: An optional special name to give the labware. If
specified, this is the name the labware will appear
as in the run log and the calibration view in the
Opentrons app.
:param str namespace: The namespace the labware definition belongs to.
If unspecified, will search 'opentrons' then 'custom_beta'
:param int version: The version of the labware definition. If
unspecified, will use version 1.
"""
labware_def = get_labware_definition(
load_name, namespace, version,
bundled_defs=self._bundled_labware,
extra_defs=self._extra_labware)
return self.load_labware_from_definition(labware_def, location, label)
def load_labware_by_name(
self,
load_name: str,
location: types.DeckLocation,
label: str = None,
namespace: str = None,
version: int = 1
) -> Labware:
MODULE_LOG.warning(
'load_labware_by_name is deprecated and will be removed in '
'version 3.12.0. please use load_labware')
return self.load_labware(
load_name, location, label, namespace, version)
@property
def loaded_labwares(self) -> Dict[int, Union[Labware, ModuleGeometry]]:
""" Get the labwares that have been loaded into the protocol context.
Slots with nothing in them will not be present in the return value.
.. note::
If a module is present on the deck but no labware has been loaded
into it with :py:meth:`.ModuleContext.load_labware`, there will
be no entry for that slot in this value. That means you should not
use ``loaded_labwares`` to determine if a slot is available or not,
only to get a list of labwares. If you want a data structure of all
objects on the deck regardless of type, see :py:attr:`deck`.
:returns: Dict mapping deck slot number to labware, sorted in order of
the locations.
"""
def _only_labwares() -> Iterator[
Tuple[int, Union[Labware, ModuleGeometry]]]:
for slotnum, slotitem in self._deck_layout.items():
if isinstance(slotitem, Labware):
yield slotnum, slotitem
elif isinstance(slotitem, ModuleGeometry):
if slotitem.labware:
yield slotnum, slotitem.labware
return dict(_only_labwares())
def load_module(
self, module_name: str,
location: Optional[types.DeckLocation] = None) -> ModuleTypes:
""" Load a module onto the deck given its name.
This is the function to call to use a module in your protocol, like
:py:meth:`load_instrument` is the method to call to use an instrument
in your protocol. It returns the created and initialized module
context, which will be a different class depending on the kind of
module loaded.
A map of deck positions to loaded modules can be accessed later
using :py:attr:`loaded_modules`.
:param str module_name: The name of the module.
:param location: The location of the module. This is usually the
name or number of the slot on the deck where you
will be placing the module. Some modules, like
the Thermocycler, are only valid in one deck
location. You do not have to specify a location
when loading a Thermocycler - it will always be
in Slot 7.
:type location: str or int or None
:returns ModuleContext: The loaded and initialized
:py:class:`ModuleContext`.
"""
resolved_name = ModuleGeometry.resolve_module_name(module_name)
resolved_location = self._deck_layout.resolve_module_location(
resolved_name, location)
geometry = load_module(resolved_name,
self._deck_layout.position_for(
resolved_location))
hc_mod_instance = None
hw = self._hw_manager.hardware._api._backend
mod_class = {
'magdeck': MagneticModuleContext,
'tempdeck': TemperatureModuleContext,
'thermocycler': ThermocyclerContext}[resolved_name]
for mod in self._hw_manager.hardware.discover_modules():
if mod.name() == resolved_name:
hc_mod_instance = mod
break
if isinstance(hw, Simulator) and hc_mod_instance is None:
mod_type = {
'magdeck': modules.magdeck.MagDeck,
'tempdeck': modules.tempdeck.TempDeck,
'thermocycler': modules.thermocycler.Thermocycler
}[resolved_name]
hc_mod_instance = adapters.SynchronousAdapter(mod_type(
port='', simulating=True, loop=self._loop))
if hc_mod_instance:
mod_ctx = mod_class(self,
hc_mod_instance,
geometry,
self._loop)
else:
raise RuntimeError(
f'Could not find specified module: {resolved_name}')
self._modules.add(mod_ctx)
self._deck_layout[resolved_location] = geometry
return mod_ctx
@property
def loaded_modules(self) -> Dict[int, 'ModuleContext']:
""" Get the modules loaded into the protocol context.
This is a map of deck positions to modules loaded by previous calls
to :py:meth:`load_module`. It is not necessarily the same as the
modules attached to the robot - for instance, if the robot has a
Magnetic Module and a Temperature Module attached, but the protocol
has only loaded the Temperature Module with :py:meth:`load_module`,
only the Temperature Module will be present.
:returns Dict[str, ModuleContext]: Dict mapping slot name to module
contexts. The elements may not be
ordered by slot number.
"""
def _modules() -> Iterator[Tuple[int, 'ModuleContext']]:
for module in self._modules:
yield int(module.geometry.parent), module
return dict(_modules())
def load_instrument(
self,
instrument_name: str,
mount: Union[types.Mount, str],
tip_racks: List[Labware] = None,
replace: bool = False) -> 'InstrumentContext':
""" Load a specific instrument required by the protocol.
This value will actually be checked when the protocol runs, to
ensure that the correct instrument is attached in the specified
location.
:param str instrument_name: The name of the instrument model, or a
prefix. For instance, 'p10_single' may be
used to request a P10 single regardless of
the version.
:param mount: The mount in which this instrument should be attached.
This can either be an instance of the enum type
:py:class:`.types.Mount` or one of the strings `'left'`
and `'right'`.
:type mount: types.Mount or str
:param tip_racks: A list of tip racks from which to pick tips if
:py:meth:`.InstrumentContext.pick_up_tip` is called
without arguments.
:type tip_racks: List[:py:class:`.Labware`]
:param bool replace: Indicate that the currently-loaded instrument in
`mount` (if such an instrument exists) should be
replaced by `instrument_name`.
"""
if isinstance(mount, str):
try:
checked_mount = types.Mount[mount.upper()]
except KeyError:
raise ValueError(
"If mount is specified as a string, it should be either"
"'left' or 'right' (ignoring capitalization, which the"
" system strips), not {}".format(mount))
elif isinstance(mount, types.Mount):
checked_mount = mount
else:
raise TypeError(
"mount should be either an instance of opentrons.types.Mount"
" or a string, but is {}.".format(mount))
self._log.info("Trying to load {} on {} mount"
.format(instrument_name, checked_mount.name.lower()))
instr = self._instruments[checked_mount]
if instr and not replace:
raise RuntimeError("Instrument already present in {} mount: {}"
.format(checked_mount.name.lower(),
instr.name))
attached = {att_mount: instr.get('model', None)
for att_mount, instr
in self._hw_manager.hardware.attached_instruments.items()}
attached[checked_mount] = instrument_name
self._log.debug("cache instruments expectation: {}"
.format(attached))
self._hw_manager.hardware.cache_instruments(attached)
# If the cache call didn’t raise, the instrument is attached
new_instr = InstrumentContext(
ctx=self,
hardware_mgr=self._hw_manager,
mount=checked_mount,
tip_racks=tip_racks,
log_parent=self._log)
self._instruments[checked_mount] = new_instr
self._log.info("Instrument {} loaded".format(new_instr))
return new_instr
@property
def loaded_instruments(self) -> Dict[str, Optional['InstrumentContext']]:
""" Get the instruments that have been loaded into the protocol.
This is a map of mount name to instruments previously loaded with
:py:meth:`load_instrument`. It is not necessarily the same as the
instruments attached to the robot - for instance, if the robot has
an instrument in both mounts but your protocol has only loaded one
of them with :py:meth:`load_instrument`, the unused one will not
be present.
:returns: A dict mapping mount names in lowercase to the instrument
in that mount. If no instrument is loaded in the mount,
it will not be present
"""
return {mount.name.lower(): instr for mount, instr
in self._instruments.items()
if instr}
def reset(self):
""" Reset the state of the context and the hardware.
For instance, this call will
- reset all cached knowledge about attached tips
- unload all labware
- unload all instruments
- clear all location and instrument caches
The only state that will be kept is the position of the robot.
"""
raise NotImplementedError
@cmds.publish.both(command=cmds.pause)
def pause(self, msg=None):
""" Pause execution of the protocol until resume is called.
This function returns immediately, but the next function call that
is blocked by a paused robot (anything that involves moving) will
not return until :py:meth:`resume` is called.
:param str msg: A message to echo back to connected clients.
"""
self._hw_manager.hardware.pause()
@cmds.publish.both(command=cmds.resume)
def resume(self):
""" Resume a previously-paused protocol """
self._hw_manager.hardware.resume()
@cmds.publish.both(command=cmds.comment)
def comment(self, msg):
""" Add a user-readable comment string that will be echoed to the
Opentrons app. """
pass
@cmds.publish.both(command=cmds.delay)
def delay(self, seconds=0, minutes=0, msg=None):
""" Delay protocol execution for a specific amount of time.
:param float seconds: A time to delay in seconds
:param float minutes: A time to delay in minutes
If both `seconds` and `minutes` are specified, they will be added.
"""
delay_time = seconds + minutes * 60
self._hw_manager.hardware.delay(delay_time)
@property
def config(self) -> rc.robot_config:
""" Get the robot's configuration object.
:returns .robot_config: The loaded configuration.
"""
return self._hw_manager.hardware.config
def update_config(self, **kwargs):
""" Update values of the robot's configuration.
`kwargs` should contain keys of the robot's configuration. For instace,
`update_config(name='<NAME>')` would change the name of the robot
Documentation on keys can be found in the documentation for
:py:class:`.robot_config`.
"""
self._hw_manager.hardware.update_config(**kwargs)
def home(self):
""" Homes the robot.
"""
self._log.debug("home")
self._location_cache = None
self._hw_manager.hardware.home()
@property
def location_cache(self) -> Optional[types.Location]:
""" The cache used by the robot to determine where it last was.
"""
return self._location_cache
@location_cache.setter
def location_cache(self, loc: Optional[types.Location]):
self._location_cache = loc
@property
def deck(self) -> geometry.Deck:
""" The object holding the deck layout of the robot.
This object behaves like a dictionary with keys for both numeric
and string slot numbers (for instance, ``protocol.deck[1]`` and
``protocol.deck['1']`` will both return the object in slot 1). If
nothing is loaded into a slot, ``None`` will be present. This object
is useful for determining if a slot in the deck is free. Rather than
filtering the objects in the deck map yourself, you can also use
:py:attr:`loaded_labwares` to see a dict of labwares and
:py:attr:`loaded_modules` to see a dict of modules.
"""
return self._deck_layout
@property
def fixed_trash(self) -> Labware:
""" The trash fixed to slot 12 of the robot deck. """
trash = self._deck_layout['12']
if not trash:
raise RuntimeError("Robot must have a trash container in 12")
return trash # type: ignore
class InstrumentContext(CommandPublisher):
""" A context for a specific pipette or instrument.
This can be used to call methods related to pipettes - moves or
aspirates or dispenses, or higher-level methods.
Instances of this class bundle up state and config changes to a
pipette - for instance, changes to flow rates or trash containers.
Action methods (like :py:meth:`aspirate` or :py:meth:`distribute`) are
defined here for convenience.
In general, this class should not be instantiated directly; rather,
instances are returned from :py:meth:`ProtcolContext.load_instrument`.
"""
def __init__(self,
ctx: ProtocolContext,
hardware_mgr: HardwareManager,
mount: types.Mount,
log_parent: logging.Logger,
tip_racks: List[Labware] = None,
trash: Labware = None,
default_speed: float = 400.0,
**config_kwargs) -> None:
super().__init__(ctx.broker)
self._hw_manager = hardware_mgr
self._ctx = ctx
self._mount = mount
self._tip_racks = tip_racks or list()
for tip_rack in self.tip_racks:
assert tip_rack.is_tiprack
if trash is None:
self.trash_container = self._ctx.fixed_trash
else:
self.trash_container = trash
self._default_speed = default_speed
self._last_location: Union[Labware, Well, None] = None
self._last_tip_picked_up_from: Union[Well, None] = None
self._log = log_parent.getChild(repr(self))
self._log.info("attached")
self._well_bottom_clearance = Clearances(
default_aspirate=1.0, default_dispense=1.0)
self._flow_rates = FlowRates(self)
self._speeds = PlungerSpeeds(self)
self._starting_tip: Union[Well, None] = None
@property
def starting_tip(self) -> Union[Well, None]:
""" The starting tip from which the pipette pick up
"""
return self._starting_tip
@starting_tip.setter
def starting_tip(self, location: Union[Well, None]):
self._starting_tip = location
def reset_tipracks(self):
""" Reload all tips in each tip rack and reset starting tip
"""
for tiprack in self.tip_racks:
tiprack.reset()
self.starting_tip = None
@property
def default_speed(self) -> float:
""" The speed at which the robot's gantry moves.
By default, 400 mm/s. Changing this value will change the speed of the
pipette when moving between labware. In addition to changing the
default, the speed of individual motions can be changed with the
``speed`` argument to :py:meth:`InstrumentContext.move_to`.
"""
return self._default_speed
@default_speed.setter
def default_speed(self, speed: float):
self._default_speed = speed
def aspirate(self,
volume: float = None,
location: Union[types.Location, Well] = None,
rate: float = 1.0) -> 'InstrumentContext':
"""
Aspirate a volume of liquid (in microliters/uL) using this pipette
from the specified location
If only a volume is passed, the pipette will aspirate
from its current position. If only a location is passed (as in
``instr.aspirate(location=wellplate['A1'])``,
:py:meth:`aspirate` will default to the amount of volume available.
:param volume: The volume to aspirate, in microliters. If not
specified, :py:attr:`max_volume`.
:type volume: int or float
:param location: Where to aspirate from. If `location` is a
:py:class:`.Well`, the robot will aspirate from
:py:attr:`well_bottom_clearance```.aspirate`` mm
above the bottom of the well. If `location` is a
:py:class:`.Location` (i.e. the result of
:py:meth:`.Well.top` or :py:meth:`.Well.bottom`), the
robot will aspirate from the exact specified location.
If unspecified, the robot will aspirate from the
current position.
:param rate: The relative plunger speed for this aspirate. During
this aspirate, the speed of the plunger will be
`rate` * :py:attr:`aspirate_speed`. If not specified,
defaults to 1.0 (speed will not be modified).
:type rate: float
:returns: This instance.
.. note::
If ``aspirate`` is called with a single argument, it will not try
to guess whether the argument is a volume or location - it is
required to be a volume. If you want to call ``aspirate`` with only
a location, specify it as a keyword argument:
``instr.aspirate(location=wellplate['A1'])``
"""
self._log.debug("aspirate {} from {} at {}"
.format(volume,
location if location else 'current position',
rate))
if isinstance(location, Well):
point, well = location.bottom()
dest = types.Location(
point + types.Point(0, 0,
self.well_bottom_clearance.aspirate),
well)
elif isinstance(location, types.Location):
dest = location
elif location is not None:
raise TypeError(
'location should be a Well or Location, but it is {}'
.format(location))
elif self._ctx.location_cache:
dest = self._ctx.location_cache
else:
raise RuntimeError(
"If aspirate is called without an explicit location, another"
" method that moves to a location (such as move_to or "
"dispense) must previously have been called so the robot "
"knows where it is.")
if self.current_volume == 0:
# Make sure we're at the top of the labware and clear of any
# liquid to prepare the pipette for aspiration
if isinstance(dest.labware, Well):
self.move_to(dest.labware.top())
else:
# TODO(seth,2019/7/29): This should be a warning exposed via
# rpc to the runapp
self._log.warning(
"When aspirate is called on something other than a well"
" relative position, we can't move to the top of the well"
" to prepare for aspiration. This might cause over "
" aspiration if the previous command is a blow_out.")
self._hw_manager.hardware.prepare_for_aspirate(self._mount)
self.move_to(dest)
elif dest != self._ctx.location_cache:
self.move_to(dest)
cmds.do_publish(self.broker, cmds.aspirate, self.aspirate,
'before', None, None, self, volume, dest, rate)
self._hw_manager.hardware.aspirate(self._mount, volume, rate)
cmds.do_publish(self.broker, cmds.aspirate, self.aspirate,
'after', self, None, self, volume, dest, rate)
return self
def dispense(self,
volume: float = None,
location: Union[types.Location, Well] = None,
rate: float = 1.0) -> 'InstrumentContext':
"""
Dispense a volume of liquid (in microliters/uL) using this pipette
into the specified location.
If only a volume is passed, the pipette will dispense from its current
position. If only a location is passed (as in
``instr.dispense(location=wellplate['A1'])``), all of the liquid
aspirated into the pipette will be dispensed (this volume is accessible
through :py:attr:`current_volume`).
:param volume: The volume of liquid to dispense, in microliters. If not
specified, defaults to :py:attr:`current_volume`.
:type volume: int or float
:param location: Where to dispense into. If `location` is a
:py:class:`.Well`, the robot will dispense into
:py:attr:`well_bottom_clearance```.dispense`` mm
above the bottom of the well. If `location` is a
:py:class:`.Location` (i.e. the result of
:py:meth:`.Well.top` or :py:meth:`.Well.bottom`), the
robot will dispense into the exact specified location.
If unspecified, the robot will dispense into the
current position.
:param rate: The relative plunger speed for this dispense. During
this dispense, the speed of the plunger will be
`rate` * :py:attr:`dispense_speed`. If not specified,
defaults to 1.0 (speed will not be modified).
:type rate: float
:returns: This instance.
.. note::
If ``dispense`` is called with a single argument, it will not try
to guess whether the argument is a volume or location - it is
required to be a volume. If you want to call ``dispense`` with only
a location, specify it as a keyword argument:
``instr.dispense(location=wellplate['A1'])``
"""
self._log.debug("dispense {} from {} at {}"
.format(volume,
location if location else 'current position',
rate))
if isinstance(location, Well):
if 'fixedTrash' in quirks_from_any_parent(location):
loc = location.top()
else:
point, well = location.bottom()
loc = types.Location(
point + types.Point(0, 0,
self.well_bottom_clearance.dispense),
well)
self.move_to(loc)
elif isinstance(location, types.Location):
loc = location
self.move_to(location)
elif location is not None:
raise TypeError(
'location should be a Well or Location, but it is {}'
.format(location))
elif self._ctx.location_cache:
loc = self._ctx.location_cache
else:
raise RuntimeError(
"If dispense is called without an explicit location, another"
" method that moves to a location (such as move_to or "
"aspirate) must previously have been called so the robot "
"knows where it is.")
cmds.do_publish(self.broker, cmds.dispense, self.dispense,
'before', None, None, self, volume, loc, rate)
self._hw_manager.hardware.dispense(self._mount, volume, rate)
cmds.do_publish(self.broker, cmds.dispense, self.dispense,
'after', self, None, self, volume, loc, rate)
return self
@cmds.publish.both(command=cmds.mix)
def mix(self,
repetitions: int = 1,
volume: float = None,
location: Union[types.Location, Well] = None,
rate: float = 1.0) -> 'InstrumentContext':
"""
Mix a volume of liquid (uL) using this pipette.
If no location is specified, the pipette will mix from its current
position. If no volume is passed, ``mix`` will default to the
pipette's :py:attr:`max_volume`.
:param repetitions: how many times the pipette should mix (default: 1)
:param volume: number of microlitres to mix (default:
:py:attr:`max_volume`)
:param location: a Well or a position relative to well.
e.g, `plate.rows()[0][0].bottom()`
:type location: types.Location
:param rate: Set plunger speed for this mix, where,
``speed = rate * (aspirate_speed or dispense_speed)``
:raises NoTipAttachedError: If no tip is attached to the pipette.
:returns: This instance
.. note::
All the arguments to ``mix`` are optional; however, if you do
not want to specify one of them, all arguments after that one
should be keyword arguments. For instance, if you do not want
to specify volume, you would call
``pipette.mix(1, location=wellplate['A1'])``. If you do not
want to specify repetitions, you would call
``pipette.mix(volume=10, location=wellplate['A1'])``. Unlike
previous API versions, ``mix`` will not attempt to guess your
inputs; the first argument will always be interpreted as
``repetitions``, the second as ``volume``, and the third as
``location`` unless you use keywords.
"""
self._log.debug(
'mixing {}uL with {} repetitions in {} at rate={}'.format(
volume, repetitions,
location if location else 'current position', rate))
if not self.hw_pipette['has_tip']:
raise hc.NoTipAttachedError('Pipette has no tip. Aborting mix()')
self.aspirate(volume, location, rate)
while repetitions - 1 > 0:
self.dispense(volume, rate=rate)
self.aspirate(volume, rate=rate)
repetitions -= 1
self.dispense(volume, rate=rate)
return self
@cmds.publish.both(command=cmds.blow_out)
def blow_out(self,
location: Union[types.Location, Well] = None
) -> 'InstrumentContext':
"""
Blow liquid out of the tip.
If :py:attr:`dispense` is used to completely empty a pipette,
usually a small amount of liquid will remain in the tip. This
method moves the plunger past its usual stops to fully remove
any remaining liquid from the tip. Regardless of how much liquid
was in the tip when this function is called, after it is done
the tip will be empty.
:param location: The location to blow out into. If not specified,
defaults to the current location of the pipette
:type location: :py:class:`.Well` or :py:class:`.Location` or None
:raises RuntimeError: If no location is specified and location cache is
None. This should happen if `blow_out` is called
without first calling a method that takes a
location (eg, :py:meth:`.aspirate`,
:py:meth:`dispense`)
:returns: This instance
"""
if isinstance(location, Well):
if location.parent.is_tiprack:
self._log.warning('Blow_out being performed on a tiprack. '
'Please re-check your code')
loc = location.top()
self.move_to(loc)
elif isinstance(location, types.Location):
loc = location
self.move_to(loc)
elif location is not None:
raise TypeError(
'location should be a Well or Location, but it is {}'
.format(location))
elif self._ctx.location_cache:
# if location cache exists, pipette blows out immediately at
# current location, no movement is needed
pass
else:
raise RuntimeError(
"If blow out is called without an explicit location, another"
" method that moves to a location (such as move_to or "
"dispense) must previously have been called so the robot "
"knows where it is.")
self._hw_manager.hardware.blow_out(self._mount)
return self
@cmds.publish.both(command=cmds.touch_tip)
def touch_tip(self,
location: Well = None,
radius: float = 1.0,
v_offset: float = -1.0,
speed: float = 60.0) -> 'InstrumentContext':
"""
Touch the pipette tip to the sides of a well, with the intent of
removing left-over droplets
:param location: If no location is passed, pipette will
touch tip at current well's edges
:type location: :py:class:`.Well` or None
:param radius: Describes the proportion of the target well's
radius. When `radius=1.0`, the pipette tip will move to
the edge of the target well; when `radius=0.5`, it will
move to 50% of the well's radius. Default: 1.0 (100%)
:type radius: float
:param v_offset: The offset in mm from the top of the well to touch tip
A positive offset moves the tip higher above the well,
while a negative offset moves it lower into the well
Default: -1.0 mm
:type v_offset: float
:param speed: The speed for touch tip motion, in mm/s.
Default: 60.0 mm/s, Max: 80.0 mm/s, Min: 20.0 mm/s
:type speed: float
:raises NoTipAttachedError: if no tip is attached to the pipette
:raises RuntimeError: If no location is specified and location cache is
None. This should happen if `touch_tip` is called
without first calling a method that takes a
location (eg, :py:meth:`.aspirate`,
:py:meth:`dispense`)
:returns: This instance
.. note::
This is behavior change from legacy API (which accepts any
:py:class:`.Placeable` as the ``location`` parameter)
"""
if not self.hw_pipette['has_tip']:
raise hc.NoTipAttachedError('Pipette has no tip to touch_tip()')
if speed > 80.0:
self._log.warning('Touch tip speed above limit. Setting to 80mm/s')
speed = 80.0
elif speed < 20.0:
self._log.warning('Touch tip speed below min. Setting to 20mm/s')
speed = 20.0
# If location is a valid well, move to the well first
if location is None:
if not self._ctx.location_cache:
raise RuntimeError('No valid current location cache present')
else:
location = self._ctx.location_cache.labware # type: ignore
# type checked below
if isinstance(location, Well):
if location.parent.is_tiprack:
self._log.warning('Touch_tip being performed on a tiprack. '
'Please re-check your code')
self.move_to(location.top())
else:
raise TypeError(
'location should be a Well, but it is {}'.format(location))
# Determine the touch_tip edges/points
offset_pt = types.Point(0, 0, v_offset)
well_edges = [
# right edge
location._from_center_cartesian(x=radius, y=0, z=1) + offset_pt,
# left edge
location._from_center_cartesian(x=-radius, y=0, z=1) + offset_pt,
# back edge
location._from_center_cartesian(x=0, y=radius, z=1) + offset_pt,
# front edge
location._from_center_cartesian(x=0, y=-radius, z=1) + offset_pt
]
for edge in well_edges:
self._hw_manager.hardware.move_to(self._mount, edge, speed)
return self
@cmds.publish.both(command=cmds.air_gap)
def air_gap(self,
volume: float = None,
height: float = None) -> 'InstrumentContext':
"""
Pull air into the pipette current tip at the current location
:param volume: The amount in uL to aspirate air into the tube.
(Default will use all remaining volume in tip)
:type volume: float
:param height: The number of millimiters to move above the current Well
to air-gap aspirate. (Default: 5mm above current Well)
:type height: float
:raises NoTipAttachedError: If no tip is attached to the pipette
:raises RuntimeError: If location cache is None.
This should happen if `touch_tip` is called
without first calling a method that takes a
location (eg, :py:meth:`.aspirate`,
:py:meth:`dispense`)
:returns: This instance
.. note::
Both ``volume`` and height are optional, but unlike previous API
versions, if you want to specify only ``height`` you must do it
as a keyword argument: ``pipette.air_gap(height=2)``. If you
call ``air_gap`` with only one unnamed argument, it will always
be interpreted as a volume.
"""
if not self.hw_pipette['has_tip']:
raise hc.NoTipAttachedError('Pipette has no tip. Aborting air_gap')
if height is None:
height = 5
loc = self._ctx.location_cache
if not loc or not isinstance(loc.labware, Well):
raise RuntimeError('No previous Well cached to perform air gap')
target = loc.labware.top(height)
self.move_to(target)
self.aspirate(volume)
return self
@cmds.publish.both(command=cmds.return_tip)
def return_tip(self, home_after: bool = True) -> 'InstrumentContext':
"""
If a tip is currently attached to the pipette, then it will return the
tip to it's location in the tiprack.
It will not reset tip tracking so the well flag will remain False.
:returns: This instance
"""
if not self.hw_pipette['has_tip']:
self._log.warning('Pipette has no tip to return')
loc = self._last_tip_picked_up_from
if not isinstance(loc, Well):
raise TypeError('Last tip location should be a Well but it is: '
'{}'.format(loc))
bot = loc.bottom()
bot = bot._replace(point=bot.point._replace(z=bot.point.z + 10))
self.drop_tip(bot, home_after=home_after)
try:
loc.parent.return_tips(loc, self.channels)
except AssertionError:
# The failure mode here is "can't return the tip", and might
# happen because another pipette took a tip from the tiprack
# since this pipette did. In this case just don't return the
# tip to the tip tracker
self._log.exception('Could not return tip to tip tracker')
return self
def _next_available_tip(self) -> Tuple[Labware, Well]:
start = self.starting_tip
if start is None:
return select_tiprack_from_list(
self.tip_racks, self.channels)
else:
return select_tiprack_from_list(
filter_tipracks_to_start(start, self.tip_racks),
self.channels, start)
def pick_up_tip( # noqa(C901)
self, location: Union[types.Location, Well] = None,
presses: int = None,
increment: float = 1.0) -> 'InstrumentContext':
"""
Pick up a tip for the pipette to run liquid-handling commands with
If no location is passed, the Pipette will pick up the next available
tip in its :py:attr:`InstrumentContext.tip_racks` list.
The tip to pick up can be manually specified with the `location`
argument. The `location` argument can be specified in several ways:
* If the only thing to specify is which well from which to pick
up a tip, `location` can be a :py:class:`.Well`. For instance,
if you have a tip rack in a variable called `tiprack`, you can
pick up a specific tip from it with
``instr.pick_up_tip(tiprack.wells()[0])``. This style of call can
be used to make the robot pick up a tip from a tip rack that
was not specified when creating the :py:class:`.InstrumentContext`.
* If the position to move to in the well needs to be specified,
for instance to tell the robot to run its pick up tip routine
starting closer to or farther from the top of the tip,
`location` can be a :py:class:`.types.Location`; for instance,
you can call ``instr.pick_up_tip(tiprack.wells()[0].top())``.
:param location: The location from which to pick up a tip.
:type location: :py:class:`.types.Location` or :py:class:`.Well` to
pick up a tip from.
:param presses: The number of times to lower and then raise the pipette
when picking up a tip, to ensure a good seal (0 [zero]
will result in the pipette hovering over the tip but
not picking it up--generally not desireable, but could
be used for dry-run).
:type presses: int
:param increment: The additional distance to travel on each successive
press (e.g.: if `presses=3` and `increment=1.0`, then
the first press will travel down into the tip by
3.5mm, the second by 4.5mm, and the third by 5.5mm).
:type increment: float
:returns: This instance
"""
if location and isinstance(location, types.Location):
if isinstance(location.labware, Labware):
tiprack = location.labware
target: Well = tiprack.next_tip(self.channels) # type: ignore
if not target:
raise OutOfTipsError
elif isinstance(location.labware, Well):
tiprack = location.labware.parent
target = location.labware
elif location and isinstance(location, Well):
tiprack = location.parent
target = location
elif not location:
tiprack, target = self._next_available_tip()
else:
raise TypeError(
"If specified, location should be an instance of "
"types.Location (e.g. the return value from "
"tiprack.wells()[0].top()) or a Well (e.g. tiprack.wells()[0]."
" However, it is a {}".format(location))
assert tiprack.is_tiprack, "{} is not a tiprack".format(str(tiprack))
cmds.do_publish(self.broker, cmds.pick_up_tip, self.pick_up_tip,
'before', None, None, instrument=self, location=target)
self.move_to(target.top())
self._hw_manager.hardware.set_current_tiprack_diameter(
self._mount, target.diameter)
self._hw_manager.hardware.pick_up_tip(
self._mount, self._tip_length_for(tiprack), presses, increment)
# Note that the hardware API pick_up_tip action includes homing z after
cmds.do_publish(self.broker, cmds.pick_up_tip, self.pick_up_tip,
'after', self, None, instrument=self, location=target)
self._hw_manager.hardware.set_working_volume(
self._mount, target.max_volume)
tiprack.use_tips(target, self.channels)
self._last_tip_picked_up_from = target
return self
def drop_tip( # noqa(C901)
self,
location: Union[types.Location, Well] = None,
home_after: bool = True)\
-> 'InstrumentContext':
"""
Drop the current tip.
If no location is passed, the Pipette will drop the tip into its
:py:attr:`trash_container`, which if not specified defaults to
the fixed trash in slot 12.
The location in which to drop the tip can be manually specified with
the `location` argument. The `location` argument can be specified in
several ways:
- If the only thing to specify is which well into which to drop
a tip, `location` can be a :py:class:`.Well`. For instance,
if you have a tip rack in a variable called `tiprack`, you can
drop a tip into a specific well on that tiprack with the call
`instr.drop_tip(tiprack.wells()[0])`. This style of call can
be used to make the robot drop a tip into arbitrary labware.
- If the position to drop the tip from as well as the
:py:class:`.Well` to drop the tip into needs to be specified,
for instance to tell the robot to drop a tip from an unusually
large height above the tiprack, `location`
can be a :py:class:`.types.Location`; for instance, you can call
`instr.drop_tip(tiprack.wells()[0].top())`.
.. note::
OT1 required homing the plunger after dropping tips, so the prior
version of `drop_tip` automatically homed the plunger. This is no
longer needed in OT2. If you need to home the plunger, use
:py:meth:`home_plunger`.
:param location: The location to drop the tip
:type location: :py:class:`.types.Location` or :py:class:`.Well` or
None
:returns: This instance
"""
if location and isinstance(location, types.Location):
if isinstance(location.labware, Well):
target = location
else:
raise TypeError(
"If a location is specified as a types.Location (for "
"instance, as the result of a call to "
"tiprack.wells()[0].top()) it must be a location "
"relative to a well, since that is where a tip is "
"dropped. The passed location, however, is in "
"reference to {}".format(location.labware))
elif location and isinstance(location, Well):
if 'fixedTrash' in quirks_from_any_parent(location):
target = location.top()
else:
bot = location.bottom()
target = bot._replace(
point=bot.point._replace(z=bot.point.z + 10))
elif not location:
target = self.trash_container.wells()[0].top()
else:
raise TypeError(
"If specified, location should be an instance of "
"types.Location (e.g. the return value from "
"tiprack.wells()[0].top()) or a Well (e.g. tiprack.wells()[0]."
" However, it is a {}".format(location))
cmds.do_publish(self.broker, cmds.drop_tip, self.drop_tip,
'before', None, None, instrument=self, location=target)
self.move_to(target)
self._hw_manager.hardware.drop_tip(self._mount, home_after=home_after)
cmds.do_publish(self.broker, cmds.drop_tip, self.drop_tip,
'after', self, None, instrument=self, location=target)
if isinstance(target.labware, Well)\
and target.labware.parent.is_tiprack:
# If this is a tiprack we can try and add the tip back to the
# tracker
try:
target.labware.parent.return_tips(
target.labware, self.channels)
except AssertionError:
# Similarly to :py:meth:`return_tips`, the failure case here
# just means the tip can't be reused, so don't actually stop
# the protocol
self._log.exception(f'Could not return tip to {target}')
self._last_tip_picked_up_from = None
return self
def home(self) -> 'InstrumentContext':
""" Home the robot.
:returns: This instance.
"""
def home_dummy(mount): pass
cmds.do_publish(self.broker, cmds.home, home_dummy,
'before', None, None, self._mount.name.lower())
self._hw_manager.hardware.home_z(self._mount)
self._hw_manager.hardware.home_plunger(self._mount)
cmds.do_publish(self.broker, cmds.home, home_dummy,
'after', self, None, self._mount.name.lower())
return self
def home_plunger(self) -> 'InstrumentContext':
""" Home the plunger associated with this mount
:returns: This instance.
"""
self._hw_manager.hardware.home_plunger(self.mount)
return self
@cmds.publish.both(command=cmds.distribute)
def distribute(self,
volume: float,
source: Well,
dest: List[Well],
*args, **kwargs) -> 'InstrumentContext':
"""
Move a volume of liquid from one source to multiple destinations.
:param volume: The amount of volume to distribute to each destination
well.
:param source: A single well from where liquid will be aspirated.
:param dest: List of Wells where liquid will be dispensed to.
:param kwargs: See :py:meth:`transfer`. Some arguments are changed.
Specifically,
- ``mix_after``, if specified, is ignored.
- ``disposal_volume``, if not specified, is set to the
minimum volume of the pipette
:returns: This instance
"""
self._log.debug("Distributing {} from {} to {}"
.format(volume, source, dest))
kwargs['mode'] = 'distribute'
kwargs['disposal_volume'] = kwargs.get(
'disposal_volume', self.min_volume)
kwargs['mix_after'] = (0, 0)
return self.transfer(volume, source, dest, **kwargs)
@cmds.publish.both(command=cmds.consolidate)
def consolidate(self,
volume: float,
source: List[Well],
dest: Well,
*args, **kwargs) -> 'InstrumentContext':
"""
Move liquid from multiple wells (sources) to a single well(destination)
:param volume: The amount of volume to consolidate from each source
well.
:param source: List of wells from where liquid will be aspirated.
:param dest: The single well into which liquid will be dispensed.
:param kwargs: See :py:meth:`transfer`. Some arguments are changed.
Specifically,
- ``mix_before``, if specified, is ignored.
- ``disposal_volume`` is ignored and set to 0.
:returns: This instance
"""
self._log.debug("Consolidate {} from {} to {}"
.format(volume, source, dest))
kwargs['mode'] = 'consolidate'
kwargs['mix_before'] = (0, 0)
kwargs['disposal_volume'] = 0
return self.transfer(volume, source, dest, **kwargs)
@cmds.publish.both(command=cmds.transfer)
def transfer(self,
volume: Union[float, Sequence[float]],
source: AdvancedLiquidHandling,
dest: AdvancedLiquidHandling,
trash=True,
**kwargs) -> 'InstrumentContext':
# source: Union[Well, List[Well], List[List[Well]]],
# dest: Union[Well, List[Well], List[List[Well]]],
# TODO: Reach consensus on kwargs
# TODO: Decide if to use a disposal_volume
# TODO: Accordingly decide if remaining liquid should be blown out to
# TODO: ..trash or the original well.
# TODO: What should happen if the user passes a non-first-row well
# TODO: ..as src/dest *while using multichannel pipette?
r"""
Transfer will move a volume of liquid from a source location(s)
to a dest location(s). It is a higher-level command, incorporating
other :py:class:`InstrumentContext` commands, like :py:meth:`aspirate`
and :py:meth:`dispense`, designed to make protocol writing easier at
the cost of specificity.
:param volume: The amount of volume to aspirate from each source and
dispense to each destination.
If volume is a list, each volume will be used for the
sources/targets at the matching index. If volumes is a
tuple with two elements, like `(20, 100)`, then a list
of volumes will be generated with a linear gradient
between the two volumes in the tuple.
:param source: A single well or a list of wells from where liquid
will be aspirated.
:param dest: A single well or a list of wells where liquid
will be dispensed to.
:param \**kwargs: See below
:Keyword Arguments:
* *new_tip* (``string``) --
- 'never': no tips will be picked up or dropped during transfer
- 'once': (default) a single tip will be used for all commands.
- 'always': use a new tip for each transfer.
* *trash* (``boolean``) --
If `True` (default behavior), tips will be
dropped in the trash container attached this `Pipette`.
If `False` tips will be returned to tiprack.
* *touch_tip* (``boolean``) --
If `True`, a :py:meth:`touch_tip` will occur following each
:py:meth:`aspirate` and :py:meth:`dispense`. If set to `False`
(default behavior), no :py:meth:`touch_tip` will occur.
* *blow_out* (``boolean``) --
If `True`, a :py:meth:`blow_out` will occur following each
:py:meth:`dispense`, but only if the pipette has no liquid left
in it. If set to `False` (default), no :py:meth:`blow_out` will
occur.
* *mix_before* (``tuple``) --
The tuple, if specified, gives the amount of volume to
:py:meth:`mix` preceding each :py:meth:`aspirate` during the
transfer. The tuple is interpreted as (repetitions, volume).
* *mix_after* (``tuple``) --
The tuple, if specified, gives the amount of volume to
:py:meth:`mix` after each :py:meth:`dispense` during the
transfer. The tuple is interpreted as (repetitions, volume).
* *disposal_volume* (``float``) --
(:py:meth:`distribute` only) Volume of liquid to be disposed off
after distributing. When dispensing multiple times from the same
tip, it is recommended to aspirate an extra amount of liquid to
be disposed off after distributing.
* *carryover* (``boolean``) --
If `True` (default), any `volume` that exceeds the maximum volume
of this Pipette will be split into multiple smaller volumes.
* *gradient* (``lambda``) --
Function for calculating the curve used for gradient volumes.
When `volume` is a tuple of length 2, its values are used to
create a list of gradient volumes. The default curve for this
gradient is linear (lambda x: x), however a method can be passed
with the `gradient` keyword argument to create a custom curve.
:returns: This instance
"""
self._log.debug("Transfer {} from {} to {}".format(
volume, source, dest))
kwargs['mode'] = kwargs.get('mode', 'transfer')
mix_strategy, mix_opts = self._mix_from_kwargs(kwargs)
if trash:
drop_tip = transfers.DropTipStrategy.TRASH
else:
drop_tip = transfers.DropTipStrategy.RETURN
new_tip = kwargs.get('new_tip')
if isinstance(new_tip, str):
new_tip = types.TransferTipPolicy[new_tip.upper()]
blow_out = None
if kwargs.get('blow_out'):
blow_out = transfers.BlowOutStrategy.TRASH
if new_tip != types.TransferTipPolicy.NEVER:
tr, next_tip = self._next_available_tip()
max_volume = min(next_tip.max_volume, self.max_volume)
else:
max_volume = self.hw_pipette['working_volume']
touch_tip = None
if kwargs.get('touch_tip'):
touch_tip = transfers.TouchTipStrategy.ALWAYS
default_args = transfers.Transfer()
disposal = kwargs.get('disposal_volume')
if disposal is None:
disposal = default_args.disposal_volume
transfer_args = transfers.Transfer(
new_tip=new_tip or default_args.new_tip,
air_gap=kwargs.get('air_gap') or default_args.air_gap,
carryover=kwargs.get('carryover') or default_args.carryover,
gradient_function=(kwargs.get('gradient_function') or
default_args.gradient_function),
disposal_volume=disposal,
mix_strategy=mix_strategy,
drop_tip_strategy=drop_tip,
blow_out_strategy=blow_out or default_args.blow_out_strategy,
touch_tip_strategy=(touch_tip or
default_args.touch_tip_strategy)
)
transfer_options = transfers.TransferOptions(transfer=transfer_args,
mix=mix_opts)
plan = transfers.TransferPlan(volume, source, dest, self, max_volume,
kwargs['mode'], transfer_options)
self._execute_transfer(plan)
return self
def _execute_transfer(self, plan: transfers.TransferPlan):
for cmd in plan:
getattr(self, cmd['method'])(*cmd['args'], **cmd['kwargs'])
@staticmethod
def _mix_from_kwargs(
top_kwargs: Dict[str, Any])\
-> Tuple[transfers.MixStrategy, transfers.Mix]:
def _mix_requested(kwargs, opt):
"""
Helper for determining mix options from :py:meth:`transfer` kwargs
Mixes can be ignored in kwargs by either
- Not specifying the kwarg
- Specifying it as None
- Specifying it as (0, 0)
This handles all these cases.
"""
val = kwargs.get(opt)
if None is val:
return False
if val == (0, 0):
return False
return True
mix_opts = transfers.Mix()
if _mix_requested(top_kwargs, 'mix_before')\
and _mix_requested(top_kwargs, 'mix_after'):
mix_strategy = transfers.MixStrategy.BOTH
before_opts = top_kwargs['mix_before']
after_opts = top_kwargs['mix_after']
mix_opts = mix_opts._replace(
mix_after=mix_opts.mix_after._replace(
repetitions=after_opts[0], volume=after_opts[1]),
mix_before=mix_opts.mix_before._replace(
repetitions=before_opts[0], volume=before_opts[1]))
elif _mix_requested(top_kwargs, 'mix_before'):
mix_strategy = transfers.MixStrategy.BEFORE
before_opts = top_kwargs['mix_before']
mix_opts = mix_opts._replace(
mix_before=mix_opts.mix_before._replace(
repetitions=before_opts[0], volume=before_opts[1]))
elif _mix_requested(top_kwargs, 'mix_after'):
mix_strategy = transfers.MixStrategy.AFTER
after_opts = top_kwargs['mix_after']
mix_opts = mix_opts._replace(
mix_after=mix_opts.mix_after._replace(
repetitions=after_opts[0], volume=after_opts[1]))
else:
mix_strategy = transfers.MixStrategy.NEVER
return mix_strategy, mix_opts
def delay(self):
return self._ctx.delay()
def move_to(self, location: types.Location, force_direct: bool = False,
minimum_z_height: float = None,
speed: float = None
) -> 'InstrumentContext':
""" Move the instrument.
:param location: The location to move to.
:type location: :py:class:`.types.Location`
:param force_direct: If set to true, move directly to destination
without arc motion.
:param minimum_z_height: When specified, this Z margin is able to raise
(but never lower) the mid-arc height.
:param speed: The speed at which to move. By default,
:py:attr:`InstrumentContext.default_speed`. This controls
the straight linear speed of the motion; to limit
individual axis speeds, you can use
:py:attr:`.ProtocolContext.max_speeds`.
"""
if self._ctx.location_cache:
from_lw = self._ctx.location_cache.labware
else:
from_lw = None
if not speed:
speed = self.default_speed
from_center = 'centerMultichannelOnWells'\
in quirks_from_any_parent(from_lw)
cp_override = CriticalPoint.XY_CENTER if from_center else None
from_loc = types.Location(
self._hw_manager.hardware.gantry_position(
self._mount, critical_point=cp_override),
from_lw)
moves = geometry.plan_moves(from_loc, location, self._ctx.deck,
force_direct=force_direct,
minimum_z_height=minimum_z_height)
self._log.debug("move_to: {}->{} via:\n\t{}"
.format(from_loc, location, moves))
try:
for move in moves:
self._hw_manager.hardware.move_to(
self._mount, move[0], critical_point=move[1], speed=speed,
max_speeds=self._ctx.max_speeds.data)
except Exception:
self._ctx.location_cache = None
raise
else:
self._ctx.location_cache = location
return self
@property
def mount(self) -> str:
""" Return the name of the mount this pipette is attached to """
return self._mount.name.lower()
@property
def speed(self) -> 'PlungerSpeeds':
""" The speeds (in mm/s) configured for the pipette plunger.
This is an object with attributes ``aspirate``, ``dispense``, and
``blow_out`` holding the plunger speeds for the corresponding
operation.
.. note::
This property is equivalent to :py:attr:`flow_rate`; the only
difference is the units in which this property is specified.
Specifying this attribute uses the units of the linear speed of
the plunger inside the pipette, while :py:attr:`flow_rate` uses
the units of the volumetric flow rate of liquid into or out of the
tip. Because :py:attr:`speed` and :py:attr:`flow_rate` modify the
same values, setting one will override the other.
For instance, to set the plunger speed during an aspirate action, do
.. code-block :: python
instrument.speed.aspirate = 50
"""
return self._speeds
@property
def flow_rate(self) -> 'FlowRates':
""" The speeds (in uL/s) configured for the pipette.
This is an object with attributes ``aspirate``, ``dispense``, and
``blow_out`` holding the flow rates for the corresponding operation.
.. note::
This property is equivalent to :py:attr:`speed`; the only
difference is the units in which this property is specified.
specifiying this property uses the units of the volumetric flow rate
of liquid into or out of the tip, while :py:attr:`speed` uses the
units of the linear speed of the plunger inside the pipette.
Because :py:attr:`speed` and :py:attr:`flow_rate` modify the
same values, setting one will override the other.
For instance, to change the flow rate for aspiration on an instrument
you would do
.. code-block :: python
instrument.flow_rate.aspirate = 50
"""
return self._flow_rates
@property
def pick_up_current(self) -> float:
"""
The current (amperes) the pipette mount's motor will use
while picking up a tip. Specified in amps.
"""
raise NotImplementedError
@pick_up_current.setter
def pick_up_current(self, amps: float):
""" Set the current used when picking up a tip.
:param amps: The current, in amperes. Acceptable values: (0.0, 2.0)
"""
raise NotImplementedError
@property
def type(self) -> str:
""" One of `'single'` or `'multi'`.
"""
model = self.name
if 'single' in model:
return 'single'
elif 'multi' in model:
return 'multi'
else:
raise RuntimeError("Bad pipette name: {}".format(model))
@property
def tip_racks(self) -> List[Labware]:
"""
The tip racks that have been linked to this pipette.
This is the property used to determine which tips to pick up next when
calling :py:meth:`pick_up_tip` without arguments.
"""
return self._tip_racks
@tip_racks.setter
def tip_racks(self, racks: List[Labware]):
self._tip_racks = racks
@property
def trash_container(self) -> Labware:
""" The trash container associated with this pipette.
This is the property used to determine where to drop tips and blow out
liquids when calling :py:meth:`drop_tip` or :py:meth:`blow_out` without
arguments.
"""
return self._trash
@trash_container.setter
def trash_container(self, trash: Labware):
self._trash = trash
@property
def name(self) -> str:
"""
The name string for the pipette (e.g. 'p300_single')
"""
return self.hw_pipette['name']
@property
def model(self) -> str:
"""
The model string for the pipette (e.g. 'p300_single_v1.3')
"""
return self.hw_pipette['model']
@property
def min_volume(self) -> float:
return self.hw_pipette['min_volume']
@property
def max_volume(self) -> float:
"""
The maximum volume, in microliters, this pipette can hold.
"""
return self.hw_pipette['max_volume']
@property
def current_volume(self) -> float:
"""
The current amount of liquid, in microliters, held in the pipette.
"""
return self.hw_pipette['current_volume']
@property
def hw_pipette(self) -> Dict[str, Any]:
""" View the information returned by the hardware API directly.
:raises: a :py:class:`.types.PipetteNotAttachedError` if the pipette is
no longer attached (should not happen).
"""
pipette = self._hw_manager.hardware.attached_instruments[self._mount]
if pipette is None:
raise types.PipetteNotAttachedError
return pipette
@property
def channels(self) -> int:
""" The number of channels on the pipette. """
return self.hw_pipette['channels']
@property
def well_bottom_clearance(self) -> 'Clearances':
""" The distance above the bottom of a well to aspirate or dispense.
This is an object with attributes ``aspirate`` and ``dispense``,
describing the default heights of the corresponding operation. The
default is 1.0mm for both aspirate and dispense.
When :py:meth:`aspirate` or :py:meth:`dispense` is given a
:py:class:`.Well` rather than a full :py:class:`.Location`, the robot
will move this distance above the bottom of the well to aspirate or
dispense.
To change, set the corresponding attribute. For instance,
.. code-block:: python
instr.well_bottom_clearance.aspirate = 1
"""
return self._well_bottom_clearance
def __repr__(self):
return '<{}: {} in {}>'.format(self.__class__.__name__,
self.hw_pipette['model'],
self._mount.name)
def __str__(self):
return '{} on {} mount'.format(self.hw_pipette['display_name'],
self._mount.name.lower())
def _tip_length_for(self, tiprack: Labware) -> float:
""" Get the tip length, including overlap, for a tip from this rack """
tip_overlap = self.hw_pipette['tip_overlap'].get(
tiprack.uri,
self.hw_pipette['tip_overlap']['default'])
tip_length = tiprack.tip_length
return tip_length - tip_overlap
class ModuleContext(CommandPublisher):
""" An object representing a connected module. """
def __init__(self, ctx: ProtocolContext, geometry: ModuleGeometry) -> None:
""" Build the ModuleContext.
This usually should not be instantiated directly; instead, modules
should be loaded using :py:meth:`ProtocolContext.load_module`.
:param ctx: The parent context for the module
:param geometry: The :py:class:`.ModuleGeometry` for the module
"""
super().__init__(ctx.broker)
self._geometry = geometry
self._ctx = ctx
def load_labware_object(self, labware: Labware) -> Labware:
""" Specify the presence of a piece of labware on the module.
:param labware: The labware object. This object should be already
initialized and its parent should be set to this
module's geometry. To initialize and load a labware
onto the module in one step, see
:py:meth:`load_labware`.
:returns: The properly-linked labware object
"""
mod_labware = self._geometry.add_labware(labware)
self._ctx.deck.recalculate_high_z()
return mod_labware
def load_labware(self, name: str) -> Labware:
""" Specify the presence of a piece of labware on the module.
:param name: The name of the labware object.
:returns: The initialized and loaded labware object.
"""
lw = load(
name, self._geometry.location,
bundled_defs=self._ctx._bundled_labware)
return self.load_labware_object(lw)
def load_labware_by_name(self, name: str) -> Labware:
MODULE_LOG.warning(
'load_labware_by_name is deprecated and will be removed in '
'version 3.12.0. please use load_labware')
return self.load_labware(name)
@property
def labware(self) -> Optional[Labware]:
""" The labware (if any) present on this module. """
return self._geometry.labware
@property
def geometry(self) -> ModuleGeometry:
""" The object representing the module as an item on the deck
:returns: ModuleGeometry
"""
return self._geometry
def __repr__(self):
return "{} at {} lw {}".format(self.__class__.__name__,
self._geometry,
self.labware)
class TemperatureModuleContext(ModuleContext):
""" An object representing a connected Temperature Module.
It should not be instantiated directly; instead, it should be
created through :py:meth:`.ProtocolContext.load_module` using:
``ctx.load_module('Temperature Module', slot_number)``.
A minimal protocol with a Temperature module would look like this:
.. code block:: python
def run(ctx):
slot_number = 10
temp_mod = ctx.load_module('Temperature Module', slot_number)
temp_plate = temp_mod.load_labware(
'biorad_96_wellplate_200ul_pcr')
temp_mod.set_temperature(45.5)
temp_mod.wait_for_temp()
temp_mod.deactivate()
.. note::
In order to prevent physical obstruction of other slots, place the
Temperature Module in a slot on the horizontal edges of the deck (such
as 1, 4, 7, or 10 on the left or 3, 6, or 7 on the right), with the USB
cable and power cord pointing away from the deck.
"""
def __init__(self, ctx: ProtocolContext,
hw_module: modules.tempdeck.TempDeck,
geometry: ModuleGeometry,
loop: asyncio.AbstractEventLoop) -> None:
self._module = hw_module
self._loop = loop
super().__init__(ctx, geometry)
@cmds.publish.both(command=cmds.tempdeck_set_temp)
def set_temperature(self, celsius: float):
""" Set the target temperature, in C.
Must be between 4 and 95C based on Opentrons QA.
:param celsius: The target temperature, in C
"""
return self._module.set_temperature(celsius)
@cmds.publish.both(command=cmds.tempdeck_deactivate)
def deactivate(self):
""" Stop heating (or cooling) and turn off the fan.
"""
return self._module.deactivate()
def wait_for_temp(self):
""" Block until the module reaches its setpoint.
"""
self._module.wait_for_temp()
@property
def temperature(self):
""" Current temperature in C"""
return self._module.temperature
@property
def target(self):
""" Current target temperature in C"""
return self._module.target
class MagneticModuleContext(ModuleContext):
""" An object representing a connected Temperature Module.
It should not be instantiated directly; instead, it should be
created through :py:meth:`.ProtocolContext.load_module`.
"""
def __init__(self,
ctx: ProtocolContext,
hw_module: modules.magdeck.MagDeck,
geometry: ModuleGeometry,
loop: asyncio.AbstractEventLoop) -> None:
self._module = hw_module
self._loop = loop
super().__init__(ctx, geometry)
@cmds.publish.both(command=cmds.magdeck_calibrate)
def calibrate(self):
""" Calibrate the Magnetic Module.
The calibration is used to establish the position of the lawbare on
top of the magnetic module.
"""
self._module.calibrate()
def load_labware_object(self, labware: Labware) -> Labware:
"""
Load labware onto a Magnetic Module, checking if it is compatible
"""
if labware.magdeck_engage_height is None:
MODULE_LOG.warning(
"This labware ({}) is not explicitly compatible with the"
" Magnetic Module. You will have to specify a height when"
" calling engage().")
return super().load_labware_object(labware)
@cmds.publish.both(command=cmds.magdeck_engage)
def engage(self, height: float = None, offset: float = None):
""" Raise the Magnetic Module's magnets.
The destination of the magnets can be specified in several different
ways, based on internally stored default heights for labware:
- If neither `height` nor `offset` is specified, the magnets will
raise to a reasonable default height based on the specified
labware.
- If `height` is specified, it should be a distance in mm from the
home position of the magnets.
- If `offset` is specified, it should be an offset in mm from the
default position. A positive number moves the magnets higher and
a negative number moves the magnets lower.
Only certain labwares have defined engage heights for the Magnetic
Module. If a labware that does not have a defined engage height is
loaded on the Magnetic Module (or if no labware is loaded), then
`height` must be specified.
:param height: The height to raise the magnets to, in mm from home.
:param offset: An offset relative to the default height for the labware
in mm
"""
if height:
dist = height
elif self.labware and self.labware.magdeck_engage_height is not None:
dist = self.labware.magdeck_engage_height
if offset:
dist += offset
else:
raise ValueError(
"Currently loaded labware {} does not have a known engage "
"height; please specify explicitly with the height param"
.format(self.labware))
self._module.engage(dist)
@cmds.publish.both(command=cmds.magdeck_disengage)
def disengage(self):
""" Lower the magnets back into the Magnetic Module.
"""
self._module.deactivate()
@property
def status(self):
""" The status of the module. either 'engaged' or 'disengaged' """
return self._module.status
class ThermocyclerContext(ModuleContext):
""" An object representing a connected Temperature Module.
It should not be instantiated directly; instead, it should be
created through :py:meth:`.ProtocolContext.load_module`.
"""
def __init__(self,
ctx: ProtocolContext,
hw_module: modules.thermocycler.Thermocycler,
geometry: ThermocyclerGeometry,
loop: asyncio.AbstractEventLoop) -> None:
self._module = hw_module
self._loop = loop
super().__init__(ctx, geometry)
def _prepare_for_lid_move(self):
loaded_instruments = [instr for mount, instr in
self._ctx.loaded_instruments.items()
if instr is not None]
try:
instr = loaded_instruments[0]
except IndexError:
MODULE_LOG.warning(
"Cannot assure a safe gantry position to avoid colliding"
" with the lid of the Thermocycler Module.")
else:
self._ctx._hw_manager.hardware.retract(instr._mount)
high_point = self._ctx._hw_manager.hardware.current_position(
instr._mount)
trash_top = self._ctx.fixed_trash.wells()[0].top()
safe_point = trash_top.point._replace(
z=high_point[Axis.by_mount(instr._mount)])
instr.move_to(types.Location(safe_point, None), force_direct=True)
@cmds.publish.both(command=cmds.thermocycler_open)
def open_lid(self):
""" Opens the lid"""
self._prepare_for_lid_move()
self._geometry.lid_status = self._module.open()
return self._geometry.lid_status
@cmds.publish.both(command=cmds.thermocycler_close)
def close_lid(self):
""" Closes the lid"""
self._prepare_for_lid_move()
self._geometry.lid_status = self._module.close()
return self._geometry.lid_status
@cmds.publish.both(command=cmds.thermocycler_set_block_temp)
def set_block_temperature(self,
temperature: float,
hold_time_seconds: float = None,
hold_time_minutes: float = None,
ramp_rate: float = None):
""" Set the target temperature for the well block, in °C.
Valid operational range yet to be determined.
:param temperature: The target temperature, in °C.
:param hold_time_minutes: The number of minutes to hold, after reaching
``temperature``, before proceeding to the
next command.
:param hold_time_seconds: The number of seconds to hold, after reaching
``temperature``, before proceeding to the
next command. If ``hold_time_minutes`` and
``hold_time_seconds`` are not specified,
the Thermocycler will proceed to the next
command after ``temperature`` is reached.
:param ramp_rate: The target rate of temperature change, in °C/sec.
If ``ramp_rate`` is not specified, it will default
to the maximum ramp rate as defined in the device
configuration.
.. note:
If ``hold_time_minutes`` and ``hold_time_seconds`` are not
specified, the Thermocycler will proceed to the next command
after ``temperature`` is reached.
"""
return self._module.set_temperature(
temperature=temperature,
hold_time_seconds=hold_time_seconds,
hold_time_minutes=hold_time_minutes,
ramp_rate=ramp_rate)
@cmds.publish.both(command=cmds.thermocycler_set_lid_temperature)
def set_lid_temperature(self, temperature: float):
""" Set the target temperature for the heated lid, in °C.
:param temperature: The target temperature, in °C clamped to the
range 20°C to 105°C.
.. note:
The Thermocycler will proceed to the next command after
``temperature`` has been reached.
"""
self._module.set_lid_temperature(temperature)
@cmds.publish.both(command=cmds.thermocycler_execute_profile)
def execute_profile(self,
steps: List[modules.types.ThermocyclerStep],
repetitions: int):
""" Execute a Thermocycler Profile defined as a cycle of
:py:attr:`steps` to repeat for a given number of :py:attr:`repetitions`
:param steps: List of unique steps that make up a single cycle.
Each list item should be a dictionary that maps to
the parameters of the :py:meth:`set_block_temperature`
method with keys 'temperature', 'hold_time_seconds',
and 'hold_time_minutes'.
:param repetitions: The number of times to repeat the cycled steps.
.. note:
Unlike the :py:meth:`set_block_temperature`, either or both of
'hold_time_minutes' and 'hold_time_seconds' must be defined
and finite for each step.
"""
if repetitions <= 0:
raise ValueError("repetitions must be a positive integer")
for step in steps:
if step.get('temperature') is None:
raise ValueError(
"temperature must be defined for each step in cycle")
hold_mins = step.get('hold_time_minutes')
hold_secs = step.get('hold_time_seconds')
if hold_mins is None and hold_secs is None:
raise ValueError(
"either hold_time_minutes or hold_time_seconds must be"
"defined for each step in cycle")
return self._module.cycle_temperatures(
steps=steps, repetitions=repetitions)
@cmds.publish.both(command=cmds.thermocycler_deactivate_lid)
def deactivate_lid(self):
""" Turn off the heated lid """
self._module.stop_lid_heating()
@cmds.publish.both(command=cmds.thermocycler_deactivate_block)
def deactivate_block(self):
""" Turn off the well block """
self._module.deactivate()
@cmds.publish.both(command=cmds.thermocycler_deactivate)
def deactivate(self):
""" Turn off the well block, and heated lid """
self.deactivate_lid()
self.deactivate_block()
@property
def lid_position(self):
""" Lid open/close status string"""
return self._module.lid_status
@property
def block_temperature_status(self):
return self._module.status
@property
def lid_temperature_status(self):
return self._module.lid_temp_status
@property
def block_temperature(self):
""" Current temperature in degrees C"""
return self._module.temperature
@property
def block_target_temperature(self):
""" Target temperature in degrees C"""
return self._module.target
@property
def lid_temperature(self):
""" Current temperature in degrees C"""
return self._module.lid_temp
@property
def lid_target_temperature(self):
""" Target temperature in degrees C"""
return self._module.lid_target
@property
def ramp_rate(self):
""" Current ramp rate in degrees C/sec"""
return self._module.ramp_rate
@property
def hold_time(self):
""" Remaining hold time in sec"""
return self._module.hold_time
@property
def total_cycle_count(self):
""" Number of repetitions for current set cycle"""
return self._module.total_cycle_count
@property
def current_cycle_index(self):
""" Index of the current set cycle repetition"""
return self._module.current_cycle_index
@property
def total_step_count(self):
""" Number of steps within the current cycle"""
return self._module.total_step_count
@property
def current_step_index(self):
""" Index of the current step within the current cycle"""
return self._module.current_step_index
| StarcoderdataPython |
195906 | #%%
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import plot_confusion_matrix
from nltk.corpus import stopwords
import pandas as pd
import pickle
import matplotlib.pyplot as plt
from sklearn import naive_bayes
import preprocess
#%%
def printMatrix(matrix):
tn,fp,fn,tp = matrix.flatten()
print(f"True negative: {tn}")
print(f"False negative: {fn}")
print(f"True positive: {tp}")
print(f"False positive: {fp}")
#Export both the vectorizer and model for future use
def export(vec, model):
with open("vecSen.pickle", "wb") as out:
pickle.dump(vec, out)
with open("modelSen.pickle", "wb") as out:
pickle.dump(model, out)
#Load the data from the excel file
fileData = pd.read_excel("SentimentAnalysisData.xlsx")
#Create the training and testing lists.
training = []
testing = []
#Create stopwords
stopwords = set(stopwords.words('english'))
#Create the configure the vectorizer
vec = CountVectorizer(lowercase= True, analyzer='word', stop_words=stopwords, ngram_range=(1,3), strip_accents='ascii')
#Create the naive bayes classifier
#Classifier = naive_bayes.ComplementNB()
Classifier = LogisticRegression()
#Get the data and randomize it
fileData = fileData.sample(frac=1)
Text = fileData[:]
#Preprocess data, get data and labels
data, labels = preprocess.preprocess(Text, vec)
#Test to see if label and data size is the same
print(f"Training size: {data.shape[0]}")
print(f"Labels size: {len(labels)}")
#Split data into training and testing
train_x, test_x, train_y, test_y = train_test_split(data, labels, test_size=0.2, shuffle=True)
#Train the model
nb = Classifier.fit(train_x, train_y)
#Export the vectorizer and the model for use in other programs.
export(vec,nb)
#Display a % prediction for the first 10 labels.
for index in range (1, 10):
res = nb.predict_proba(test_x[index])
res = int(res[0][1]*100)
print(f"{res}%")
#Displayt the accuracy of the model
print(f"Accuracy is: {nb.score(test_x,test_y)*100}%")
#Predict the test set again so it can be used in the confusion matrix.
#%%
pred_y = nb.predict(test_x)
plot_confusion_matrix(nb, test_x, test_y)
plt.show() | StarcoderdataPython |
3354704 | <reponame>sozu/py-pyracmon
"""
This module exports types and functions for configurations.
`PyracmonConfiguration` is a class exposing configurable attributes.
An instance of the class held in this module is treated as global configuration. `pyracmon` is an only awy to change it.
Changes done in ``with`` block is activated after the block finished without error.
>>> assert default_config().name == "default"
>>> with pyracmon() as cfg:
>>> cfg.name = "my_config"
>>> assert default_config().name == "my_config"
See the attributes of `PyracmonConfiguration` to know all configuration keys and their effects.
"""
import logging
from functools import wraps
from typing import *
from .model import Table, Column
from .model_graph import ConfigurableSpec
from .util import Configurable
class PyracmonConfiguration:
"""
A class to hold all configurable values in attributes.
:param name: Name of this configuration. This value has no effect on any behavior of modules. Default is ``default`` .
:param logger: Logger or the name of logger used for internal logs such as query logging. Defalut is ``None`` .
:param log_level: Logging level of internal logs. Default is `logging.DEBUG` .
:param sql_log_length: Maximum length of query log. Queries longer than this value are output being trimmed. Default is ``4096`` .
:param parameter_log: Flag to log query parameters also. Default is ``False`` .
:param paramstyle: Parameter style defined in DB-API 2.0. This value overwrites the style obtained via DB module. Default is ``None`` .
:param type_mapping: Function estimating python type from type name in database and optional DBMS dependent keys. Default is ``None`` .
:param graph_spec: Graph specification used as default. Default is ``None`` .
:param fixture_mapping: Function generating fixture value for a column and an index. Default is ``None`` .
:param fixture_tz_aware: Flag to make fixture datetime being aware of timezone. Default is ``True`` .
:param fixture_ignore_fk: Flag not to generate fixuture value on foreign key columns. Default is ``True`` .
:param fixture_ignore_nullable: Flag not to generate fixuture value on nullable columns. Default is ``True`` .
:param timedelta_unit: Default keyword arguments to pass ``datetime.timedelta()`` used in `near` matcher.
"""
def __init__(
self,
name: str = None,
logger: Union[str, logging.Logger] = None,
log_level: int = None,
sql_log_length: int = None,
parameter_log: bool = None,
paramstyle: str = None,
type_mapping: Callable[[str], type] = None,
graph_spec: ConfigurableSpec = None,
fixture_mapping: Callable[[Table, Column, int], Any] = None,
fixture_tz_aware: bool = None,
fixture_ignore_fk: bool = None,
fixture_ignore_nullable: bool = None,
timedelta_unit: Dict[str, Any] = None,
):
self.name = name
self.logger = logger
self.log_level = log_level
self.sql_log_length = sql_log_length
self.parameter_log = parameter_log
self.paramstyle = paramstyle
self.type_mapping = type_mapping
self.graph_spec = graph_spec or ConfigurableSpec.create()
self.fixture_mapping = fixture_mapping
self.fixture_tz_aware = fixture_tz_aware
self.fixture_ignore_fk = fixture_ignore_fk
self.fixture_ignore_nullable = fixture_ignore_nullable
self.timedelta_unit = timedelta_unit
def derive(self, **kwargs: Any) -> 'PyracmonConfiguration':
"""
Creates new configuration instance deriving this configuration.
Keys of arguments are attribute names of this class.
You don't need to supply value to every key. Values beging untouched or set to ``None`` inherit deriving configuration.
"""
def attr(k):
v = getattr(self, k)
return v.clone() if isinstance(v, Configurable) else None
attrs = {k:attr(k) for k in vars(self) if attr(k) is not None}
attrs.update(kwargs)
return DerivingConfiguration(self, **attrs)
class DerivingConfiguration(PyracmonConfiguration):
"""
:meta private:
"""
def __init__(self, base, **kwargs):
super().__init__(**kwargs)
self.base = base
def __getattribute__(self, key):
value = object.__getattribute__(self, key)
return value if value is not None else getattr(self.base, key)
def set(self, **kwargs):
for k, v in kwargs.items():
if hasattr(self.base, k):
setattr(self, k, v)
else:
raise KeyError(f"Unknown configuration key: {k}")
return self
def default_config(config=PyracmonConfiguration(
name = "default",
logger = None,
log_level = logging.DEBUG,
sql_log_length = 4096,
parameter_log = False,
paramstyle = None,
type_mapping = None,
graph_spec = None,
fixture_mapping = None,
fixture_tz_aware = True,
fixture_ignore_fk = True,
fixture_ignore_nullable = True,
timedelta_unit = dict(seconds=1),
)):
return config
def pyracmon(**kwargs: Any) -> PyracmonConfiguration:
"""
Starts `with` block to change global configurations.
Returning object is not an instance of `PyracmonConfiguration` but allows the access to equivalent attributes.
Changes done in the block is activated after the block finished without error.
>>> assert default_config().name == "default"
>>> with pyracmon() as cfg:
>>> cfg.name = "my_config"
>>> ...
>>> assert default_config().name == "my_config"
"""
class Configurable:
def __init__(self):
self.config = default_config().derive()
def __enter__(self):
return self.config
def __exit__(self, exc_type, exc_value, traceback):
if not exc_value:
target = default_config()
for k in vars(target):
v = getattr(self.config, k)
if isinstance(v, Configurable):
getattr(target, k).replace(v)
elif v is not None:
setattr(target, k, v)
return False
return Configurable()
| StarcoderdataPython |
142047 | import time
import torch
import onnx
import os
import numpy as np
from mmcv.tensorrt import (TRTWrapper, onnx2trt, save_trt_engine,
is_tensorrt_plugin_loaded)
assert is_tensorrt_plugin_loaded(), 'Requires to complie TensorRT plugins in mmcv'
def gen_trt(onnx_file='sample.onnx', trt_file='sample.trt'):
onnx_model = onnx.load(onnx_file)
## Model input
inputs = torch.rand(1, 3, 608, 1088).cuda()
## Model input shape info
opt_shape_dict = {
'input.1': [list(inputs.shape),
list(inputs.shape),
list(inputs.shape)]
}
## Create TensorRT engine
max_workspace_size = 1 << 30
trt_engine = onnx2trt(
onnx_model,
opt_shape_dict,
max_workspace_size=max_workspace_size)
## Save TensorRT engine
save_trt_engine(trt_engine, trt_file)
def run_inference(trt_file, num=1):
inputs = torch.rand(1, 3, 608, 1088).cuda()
## Run inference with TensorRT
# trt_model = TRTWrapper(trt_file, ['input.1'], ['922', '925', '928', '931'])
trt_model = TRTWrapper(trt_file, ['input.1'], ['hm', 'reg', 'wh', 'id_feature'])
sum = []
for i in range(num):
torch.cuda.synchronize()
t1 = time.time()
trt_outputs = trt_model({'input.1': inputs})
torch.cuda.synchronize()
# outputs = [trt_outputs['922'], trt_outputs['925'], trt_outputs['928'], trt_outputs['931']]
outputs = [trt_outputs['hm'], trt_outputs['reg'], trt_outputs['wh'], trt_outputs['id_feature']]
time_cost = time.time() - t1
sum.append(time_cost)
print(f"{i} th inference cost: {time_cost}")
print(f"average time: {np.mean(sum)}")
def main():
# onnx_name = 'fairmot_dla34_mmcv.onnx'
# onnx_name = 'fairmot_dla34_mmcv_opt.onnx'
# onnx_name = 'fairmot_dla34_whole_mmcv.onnx'
onnx_name = 'fairmot_dla34_whole_mmcv_opt13.onnx'
save_name = onnx_name.replace('.onnx', '.trt')
# onnx_name = 'fairmot_dla34_whole_mmcv.onnx'
# save_name = 'fairmot_dla34_whole_mmcv.trt'
# onnx_name = 'yolo_lite_mmcv.onnx'
# save_name = 'yolo_lite_mmcv.trt'
# onnx_file = os.path.join('/home/zzzj/Projects/models/onnx_engine/', onnx_name)
# trt_file = os.path.join('/home/zzzj/Projects/models/onnx_engine/', save_name)
# gen_trt(onnx_file, trt_file)
trt_file = os.path.join('/home/zzzj/Projects/models', 'test.engine')
run_inference(trt_file, 100)
if __name__ == '__main__':
main() | StarcoderdataPython |
194050 | #!/usr/bin/env python
import argparse, grpc, sys, os, socket, random, struct, time
from time import sleep
import time
import Queue
import socket
import struct
from scapy.all import *
import matplotlib.pyplot as plt
import thread
import csv
from fcntl import ioctl
import IN
AVERAGE_NUM = 30
AVERAGE_NUM2 = 1
def main():
time_list_LU1 = []
data_list_LU1 = []
new_data_list_LU1 = []
time_list_LU2 = []
data_list_LU2 = []
new_data_list_LU2 = []
time_list_LU3 = []
data_list_LU3 = []
new_data_list_LU3 = []
time_list_LU4 = []
data_list_LU4 = []
new_data_list_LU4 = []
time_list_LU5 = []
data_list_LU5 = []
new_data_list_LU5 = []
time_list_LU6 = []
data_list_LU6 = []
new_data_list_LU6 = []
time_list_LU7 = []
data_list_LU7 = []
new_data_list_LU7 = []
time_list_LU8 = []
data_list_LU8 = []
new_data_list_LU8 = []
with open("Exp_1/link_utilization_0_64_8.txt",'r') as file:
for line in file:
new_line = line[:-1]
new_line = new_line[1:-1]
new_line = new_line.split(",")
new_line[1] = new_line[1][1:]
time_list_LU1.append(float(new_line[0]))
data_list_LU1.append(float(new_line[1]))
with open("Exp_2/link_utilization_8_64_8.txt",'r') as file:
for line in file:
new_line = line[:-1]
new_line = new_line[1:-1]
new_line = new_line.split(",")
new_line[1] = new_line[1][1:]
time_list_LU2.append(float(new_line[0]))
data_list_LU2.append(float(new_line[1]))
with open("Exp_3/link_utilization_16_64_8.txt",'r') as file:
for line in file:
new_line = line[:-1]
new_line = new_line[1:-1]
new_line = new_line.split(",")
new_line[1] = new_line[1][1:]
time_list_LU3.append(float(new_line[0]))
data_list_LU3.append(float(new_line[1]))
with open("Exp_4/link_utilization_32_64_8.txt",'r') as file:
for line in file:
new_line = line[:-1]
new_line = new_line[1:-1]
new_line = new_line.split(",")
new_line[1] = new_line[1][1:]
time_list_LU4.append(float(new_line[0]))
data_list_LU4.append(float(new_line[1]))
with open("Exp_5/link_utilization_64_64_8.txt",'r') as file:
for line in file:
new_line = line[:-1]
new_line = new_line[1:-1]
new_line = new_line.split(",")
new_line[1] = new_line[1][1:]
time_list_LU5.append(float(new_line[0]))
data_list_LU5.append(float(new_line[1]))
with open("Exp_6/link_utilization_128_64_8.txt",'r') as file:
for line in file:
new_line = line[:-1]
new_line = new_line[1:-1]
new_line = new_line.split(",")
new_line[1] = new_line[1][1:]
time_list_LU6.append(float(new_line[0]))
data_list_LU6.append(float(new_line[1]))
with open("Exp_7/link_utilization_256_64_8.txt",'r') as file:
for line in file:
new_line = line[:-1]
new_line = new_line[1:-1]
new_line = new_line.split(",")
new_line[1] = new_line[1][1:]
time_list_LU7.append(float(new_line[0]))
data_list_LU7.append(float(new_line[1]))
with open("Exp_8/link_utilization_1024_64_8.txt",'r') as file:
for line in file:
new_line = line[:-1]
new_line = new_line[1:-1]
new_line = new_line.split(",")
new_line[1] = new_line[1][1:]
time_list_LU8.append(float(new_line[0]))
data_list_LU8.append(float(new_line[1]))
for i in range(len(data_list_LU1)):
if (i>=AVERAGE_NUM):
new_data_list_LU1.append(sum(data_list_LU1[i-AVERAGE_NUM:i])/AVERAGE_NUM)
else:
time_list_LU1.remove(time_list_LU1[i])
for i in range(len(data_list_LU2)):
if (i>=AVERAGE_NUM):
new_data_list_LU2.append(sum(data_list_LU2[i-AVERAGE_NUM:i])/AVERAGE_NUM)
else:
time_list_LU2.remove(time_list_LU2[i])
for i in range(len(data_list_LU3)):
if (i>=AVERAGE_NUM):
new_data_list_LU3.append(sum(data_list_LU3[i-AVERAGE_NUM:i])/AVERAGE_NUM)
else:
time_list_LU3.remove(time_list_LU3[i])
for i in range(len(data_list_LU4)):
if (i>=AVERAGE_NUM):
new_data_list_LU4.append(sum(data_list_LU4[i-AVERAGE_NUM:i])/AVERAGE_NUM)
else:
time_list_LU4.remove(time_list_LU4[i])
for i in range(len(data_list_LU5)):
if (i>=AVERAGE_NUM):
new_data_list_LU5.append(sum(data_list_LU5[i-AVERAGE_NUM:i])/AVERAGE_NUM)
else:
time_list_LU5.remove(time_list_LU5[i])
for i in range(len(data_list_LU6)):
if (i>=AVERAGE_NUM):
new_data_list_LU6.append(sum(data_list_LU6[i-AVERAGE_NUM:i])/AVERAGE_NUM)
else:
time_list_LU6.remove(time_list_LU6[i])
for i in range(len(data_list_LU7)):
if (i>=AVERAGE_NUM):
new_data_list_LU7.append(sum(data_list_LU7[i-AVERAGE_NUM:i])/AVERAGE_NUM)
else:
time_list_LU7.remove(time_list_LU7[i])
for i in range(len(data_list_LU8)):
if (i>=AVERAGE_NUM):
new_data_list_LU8.append(sum(data_list_LU8[i-AVERAGE_NUM:i])/AVERAGE_NUM)
else:
time_list_LU8.remove(time_list_LU8[i])
##########
fig,(ax1) = plt.subplots()
title ="Controller - Switch Link Utilization\n Miss Threshold = 64"
fig.suptitle(title)
# make a plot
ax1.plot(time_list_LU1,new_data_list_LU1,color='red')
ax1.plot(time_list_LU2,new_data_list_LU2,color='green')
ax1.plot(time_list_LU3,new_data_list_LU3,color='blue')
#ax1.plot(time_list_LU4,new_data_list_LU4,color='')
#ax1.plot(time_list_LU5,new_data_list_LU5,color='')
#ax1.plot(time_list_LU6,new_data_list_LU6,color='')
#ax1.plot(time_list_LU7,new_data_list_LU7,color='')
ax1.plot(time_list_LU8,new_data_list_LU8,color='brown')
#ax2.plot(time_list_LU1,new_data_list_LU1,color='red')
#ax2.plot(time_list_LU2,new_data_list_LU2,color='green')
#ax2.plot(time_list_LU3,new_data_list_LU3,color='blue')
#ax2.plot(time_list_LU4,new_data_list_LU4,color='')
#ax2.plot(time_list_LU5,new_data_list_LU5,color='')
#ax2.plot(time_list_LU6,new_data_list_LU6,color='')
#ax2.plot(time_list_LU7,new_data_list_LU7,color='')
#ax2.plot(time_list_LU8,new_data_list_LU8,color='brown')
#plt.title("Inserts and Deletes")
#ax2.plot(time_list_deletions,new_data_list_deletions)
#ax2.plot(time_list_deletions,new_data_list_insertions)
#ax1.legend(["Cache Size = 32","Cache Size = 16","Cache Size = 8","Cache Size = 64","Cache Size = 128"])
#ax1.legend(["No Cache"],loc='upper left')
ax1.legend(["No Cache","Cache Size = 8","Cache Size = 16","Cache Size = 1024"],loc='upper left')
#ax2.set_xlabel("Simulation Time [Seconds]",color="black",fontsize=10)
#ax2.set_ylabel('Link Utilization [Packets]',color="black",fontsize=10)
ax1.set_xlabel("Simulation Time [Seconds]",color="black",fontsize=10)
ax1.set_ylabel('Link Utilization [Packets]',color="black",fontsize=10)
#fig.text(0.5, 0.04, 'Simulation Time [Seconds]', ha='center',fontsize=14)
#fig.text(0.07, 0.5, 'Link Utilization [Packets]', va='center', rotation='vertical',fontsize=14)
fig.tight_layout(pad=3.0)
fig.show()
plt.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
133312 | """
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from django.test import TestCase
from django.test import Client
from django.core.files.uploadedfile import SimpleUploadedFile
import django
from rest_framework.test import APIRequestFactory
from rest_framework.parsers import JSONParser
from rest_framework import status
from . import fakedata
from .models.Audit import Audit
from .serializers import AuditSerializer
from .models.CreditTrade import CreditTrade
from .serializers import CreditTradeSerializer
from .models.CreditTradeHistory import CreditTradeHistory
from .serializers import CreditTradeHistorySerializer
from .models.CreditTradeStatus import CreditTradeStatus
from .serializers import CreditTradeStatusSerializer
from .models.CreditTradeType import CreditTradeType
from .serializers import CreditTradeTypeSerializer
from .models.CreditTradeZeroReason import CreditTradeZeroReason
from .serializers import CreditTradeZeroReasonSerializer
from .models.CurrentUserViewModel import CurrentUserViewModel
from .serializers import CurrentUserViewModelSerializer
from .models.FuelSupplier import FuelSupplier
from .serializers import FuelSupplierSerializer
from .models.FuelSupplierActionsType import FuelSupplierActionsType
from .serializers import FuelSupplierActionsTypeSerializer
from .models.FuelSupplierAttachment import FuelSupplierAttachment
from .serializers import FuelSupplierAttachmentSerializer
from .models.FuelSupplierAttachmentTag import FuelSupplierAttachmentTag
from .serializers import FuelSupplierAttachmentTagSerializer
from .models.FuelSupplierBalance import FuelSupplierBalance
from .serializers import FuelSupplierBalanceSerializer
from .models.FuelSupplierCCData import FuelSupplierCCData
from .serializers import FuelSupplierCCDataSerializer
from .models.FuelSupplierContact import FuelSupplierContact
from .serializers import FuelSupplierContactSerializer
from .models.FuelSupplierHistory import FuelSupplierHistory
from .serializers import FuelSupplierHistorySerializer
from .models.FuelSupplierStatus import FuelSupplierStatus
from .serializers import FuelSupplierStatusSerializer
from .models.Notification import Notification
from .serializers import NotificationSerializer
from .models.NotificationEvent import NotificationEvent
from .serializers import NotificationEventSerializer
from .models.NotificationType import NotificationType
from .serializers import NotificationTypeSerializer
from .models.NotificationViewModel import NotificationViewModel
from .serializers import NotificationViewModelSerializer
from .models.Permission import Permission
from .serializers import PermissionSerializer
from .models.PermissionViewModel import PermissionViewModel
from .serializers import PermissionViewModelSerializer
from .models.Role import Role
from .serializers import RoleSerializer
from .models.RolePermission import RolePermission
from .serializers import RolePermissionSerializer
from .models.RolePermissionViewModel import RolePermissionViewModel
from .serializers import RolePermissionViewModelSerializer
from .models.RoleViewModel import RoleViewModel
from .serializers import RoleViewModelSerializer
from .models.User import User
from .serializers import UserSerializer
from .models.UserDetailsViewModel import UserDetailsViewModel
from .serializers import UserDetailsViewModelSerializer
from .models.UserFavourite import UserFavourite
from .serializers import UserFavouriteSerializer
from .models.UserFavouriteViewModel import UserFavouriteViewModel
from .serializers import UserFavouriteViewModelSerializer
from .models.UserRole import UserRole
from .serializers import UserRoleSerializer
from .models.UserRoleViewModel import UserRoleViewModel
from .serializers import UserRoleViewModelSerializer
from .models.UserViewModel import UserViewModel
from .serializers import UserViewModelSerializer
# Custom API test cases.
# If an API operation does not contains generated code then it is tested in this
# file.
#
class Test_Api_Custom(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
# needed to setup django
django.setup()
def createContact(self, fuelSupplierId):
testContactUrl = "/api/fuelsuppliercontacts"
# Create:
payload = fakedata.FuelSupplierContactTestDataCreate()
payload['fuelSupplierFK'] = fuelSupplierId
jsonString = json.dumps(payload)
response = self.client.post(testContactUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
contactId = data['id']
return contactId
def createFuelSupplierStatus(self):
testUrl = "/api/fuelsupplierstatuses"
payload = fakedata.FuelSupplierStatusTestDataCreate()
payload['effectiveDate'] = '2017-01-01'
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createFuelSupplierActionType(self):
testUrl = "/api/fuelsupplieractionstypes"
payload = fakedata.FuelSupplierActionsTypeTestDataCreate()
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createFuelSupplier(self):
statusId = self.createFuelSupplierStatus()
actionsTypeId = self.createFuelSupplierActionType()
testUrl = "/api/fuelsuppliers"
# Create:
payload = {
'name': "Initial",
'status': "Initial",
'createdDate': '2000-01-01',
# 'primaryContact': contactId ,
# 'contacts': [contactId],
'notes': [],
'attachments': [],
'history': [],
'fuelSupplierStatusFK': statusId,
'fuelSupplierActionsTypeFK': actionsTypeId,
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId, statusId, actionsTypeId
def createRole(self):
testUrl = "/api/roles"
# Create:
fakeRole = fakedata.RoleTestDataCreate()
payload = {
'name': fakeRole['name'],
'description': fakeRole['description']
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createPermission(self):
testUrl = "/api/permissions"
# Create:
fakePermission = fakedata.PermissionTestDataCreate()
payload = {
'code': fakePermission['code'],
'name': fakePermission['name'],
'description': fakePermission['description']
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createUser(self, fuelsupplierId):
testUserUrl = "/api/users"
# Create:
fakeUser = fakedata.UserTestDataCreate()
payload = {
'givenName': fakeUser['givenName'],
'surname':fakeUser['surname'],
'email':fakeUser['email'],
'status':'Active',
'userFK':fakeUser['userId'],
'guid':fakeUser['guid'],
'authorizationDirectory':fakeUser['authorizationDirectory'],
'fuelSupplier': fuelsupplierId
}
jsonString = json.dumps(payload)
response = self.client.post(testUserUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
userId = data['id']
return userId
def createCreditTradeType(self):
testUrl = "/api/credittradetypes"
payload = fakedata.CreditTradeTypeTestDataCreate()
payload['expirationDate'] = '2017-01-02'
payload['effectiveDate'] = '2017-01-01'
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createCreditTradeStatus(self):
testUrl = "/api/credittradestatuses"
payload = fakedata.CreditTradeStatusTestDataCreate()
payload['effectiveDate'] = '2017-01-01'
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createCreditTrade(self, fuelSupplierId, userId):
typeId = self.createCreditTradeType()
statusId = self.createCreditTradeStatus()
testUrl = "/api/credittrades"
payload = {
'status':'Active',
'initiator':fuelSupplierId,
'respondent': fuelSupplierId,
'initiatorLastUpdateBy': userId,
'respondentLastUpdatedBy': None,
'reviewedRejectedBy': None,
'approvedRejectedBy': None,
'cancelledBy': None,
'tradeExecutionDate': '2017-01-01',
# TODO: replace transactionType
'transactionType':'Type',
'fairMarketValuePrice': '100.00',
'fuelSupplierBalanceBeforeTransaction':'2017-01-01',
'notes':[],
'attachments':[],
'history':[],
'creditTradeTypeFK': typeId,
'creditTradeStatusFK': statusId,
'respondentFK': fuelSupplierId,
}
fakeCreditTrade = fakedata.CreditTradeTestDataCreate()
payload.update(fakeCreditTrade)
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId, typeId, statusId
def createNotificationEvent(self):
testUrl = "/api/notificationevents"
payload = {
'eventTime': '2017-01-01',
}
event = fakedata.NotificationEventTestDataCreate()
payload.update(event)
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createAndVerifyNotification(self):
testUrl = "/api/notifications"
fuelSupplierId, statusId, actionId = self.createFuelSupplier()
userId = self.createUser(fuelSupplierId)
notificationEventId = self.createNotificationEvent()
payload = fakedata.NotificationTestDataCreate()
payload['userFK'] = userId
payload['notificationEventFK'] = notificationEventId
request = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=request)
assert status.HTTP_201_CREATED == response.status_code
return json.loads(response.content.decode("utf-8"))
def createUserFavourite(self, userId):
url = "/api/users/" + str(userId) + "/favourites"
payload = fakedata.UserFavouriteTestDataCreate()
request = json.dumps(payload)
response = self.client.post(url, content_type='application/json', data=request)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
return data['id']
def deleteContact(self, contactId):
# cleanup the contact
deleteUrl = "/api/fuelsuppliercontacts/" + str(contactId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteRole(self, roleId):
deleteUrl = "/api/roles/" + str(roleId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteNotificationEvent(self, notificationEventId):
deleteUrl = "/api/notificationevents/" + str(notificationEventId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteUser(self, userId):
deleteUrl = "/api/users/" + str(userId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteFuelSupplier(self, fuelsupplierId):
deleteUrl = "/api/fuelsuppliers/" + str(fuelsupplierId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteCreditTrade(self, creditTradeId):
deleteUrl = "/api/credittrades/" + str(creditTradeId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deletePermission(self, permissionId):
deleteUrl = "/api/permissions/" + str(permissionId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def test_credittradesSearchGet(self):
fsId, _, _ = self.createFuelSupplier()
contactId = self.createContact(fsId)
userId = self.createUser(fsId)
credId, credTypeId, _ = self.createCreditTrade(fsId, userId)
testUrl = "/api/credittrades/search"
response = self.client.get(testUrl)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert len(data) == 1
self.deleteCreditTrade(credId)
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fsId)
def test_usersCurrentFavouritesIdDeletePost(self):
fsId, _, _= self.createFuelSupplier()
contactId = self.createContact(fsId)
userId = self.createUser(fsId)
userFavId = self.createUserFavourite(userId)
url = "/api/users/current/favourites/" + str(userFavId) + "/delete"
response = self.client.post(url)
assert status.HTTP_200_OK == response.status_code
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fsId)
def test_usersCurrentFavouritesPut(self):
fsId, _, _= self.createFuelSupplier()
contactId = self.createContact(fsId)
userId = self.createUser(fsId)
url = "/api/users/current/favourites"
payload = fakedata.UserFavouriteTestDataCreate()
request = json.dumps(payload)
response = self.client.post(url, content_type='application/json', data=request)
assert status.HTTP_200_OK == response.status_code
payload = [fakedata.UserFavouriteTestDataUpdate()]
request = json.dumps(payload)
response = self.client.put(url, content_type='application/json', data=request)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert data[0]["value"] == "Changed"
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fsId)
def test_usersCurrentFavouritesSearchGet(self):
fsId, _, _= self.createFuelSupplier()
contactId = self.createContact(fsId)
userId = self.createUser(fsId)
userFavId = self.createUserFavourite(userId)
url = "/api/users/current/favourites/search"
response = self.client.get(url)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert len(data) == 1
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fsId)
def test_usersCurrentGet(self):
fuelSupplierId, statusId, actionId = self.createFuelSupplier()
contactId = self.createContact(fuelSupplierId)
userId = self.createUser(fuelSupplierId)
testUrl="/api/users/current"
# List:
response = self.client.get(testUrl)
assert status.HTTP_200_OK == response.status_code
self.deleteUser (userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fuelSupplierId)
def test_fuelsuppliersIdAttachmentsGet(self):
fuelSupplierId, statusId, actionId = self.createFuelSupplier()
contactId = self.createContact(fuelSupplierId)
uploadUrl = "/api/fuelsuppliers/"
uploadUrl += str(fuelSupplierId) + "/attachments"
payload = fakedata.FuelSupplierAttachmentTestDataCreate()
payload['fuelSupplierFK'] = fuelSupplierId
rawData = "TEST"
jsonString = json.dumps(payload)
fileData = SimpleUploadedFile("file.txt", rawData.encode('utf-8') )
form = {
"file": fileData,
"item": jsonString,
}
response = self.client.post(uploadUrl, data=form)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
testUrl = "/api/fuelsupplierattachments"
# download the attachment.
downloadUrl = testUrl + "/" + str(createdId)
response = self.client.get(downloadUrl)
# Check that the response is 200 OK.
result = response.content.decode("utf-8")
assert status.HTTP_200_OK == response.status_code
parsed = response.content.decode("utf-8")
# response should match the contents sent.
# TODO: check that raw data matched returned parsed data
# assert rawData==parsed
# Cleanup:
deleteUrl = "/api/fuelsupplierattachments/" + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
# Cleanup
self.deleteContact(contactId)
self.deleteFuelSupplier(fuelSupplierId)
def test_fuelsuppliersIdHistoryGet(self):
fuelSupplierId, statusId, actionId = self.createFuelSupplier()
contactId = self.createContact(fuelSupplierId)
testUrl = "/api/fuelsuppliers/" + str(fuelSupplierId) + "/history"
payload = fakedata.FuelSupplierHistoryTestDataCreate()
payload['fuelSupplierFK'] = fuelSupplierId
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
# Cleanup the History
deleteUrl = "/api/fuelsupplierhistories/" + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
# Cleanup
self.deleteContact(contactId)
self.deleteFuelSupplier(fuelSupplierId)
def test_fuelsuppliersSearchGet(self):
fuelSupplierId, statusId, actionId = self.createFuelSupplier()
contactId = self.createContact(fuelSupplierId)
# do a search
testUrl = "/api/fuelsuppliers/search"
response = self.client.get(testUrl)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
# Cleanup
self.deleteContact(contactId)
self.deleteFuelSupplier(fuelSupplierId)
def test_rolesIdPermissionsGet(self):
# create a group.
roleId = self.createRole()
# create a permission.
permissionId = self.createPermission()
rolePermissionUrl = "/api/roles/" + str(roleId) + "/permissions"
# create a new group membership.
payload = {'roleFK':roleId, 'permissionFK':permissionId}
jsonString = json.dumps(payload)
response = self.client.post(rolePermissionUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
rolePermissionId = data['id']
# test the get
response = self.client.get(rolePermissionUrl)
assert status.HTTP_200_OK == response.status_code
# test the put. This will also delete the RolePermission.
payload = []
jsonString = json.dumps(payload)
response = self.client.put(rolePermissionUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteRole(roleId)
self.deletePermission(permissionId)
def test_rolesIdUsersGet(self):
roleId = self.createRole()
fuelSupplierId, statusId, actionId = self.createFuelSupplier()
contactId = self.createContact(fuelSupplierId)
userId = self.createUser(fuelSupplierId)
userRoleUrl = "/api/users/" + str(userId) + "/roles"
# create a new UserRole.
payload = {
'effectiveDate': '2000-01-01',
'expiryDate': None,
'user': userId,
'role': roleId
}
jsonString = json.dumps(payload)
response = self.client.post(userRoleUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# test the get
response = self.client.get(userRoleUrl)
assert status.HTTP_200_OK == response.status_code
testUrl = "/api/roles/" + str(roleId)
# get the users in the group.
response = self.client.get(testUrl)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
# test the PUT - this will clear the user role map.
payload = []
jsonString = json.dumps(payload)
response = self.client.put(userRoleUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteRole(roleId)
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fuelSupplierId)
def test_usersIdFavouritesGet(self):
fsId, _, _= self.createFuelSupplier()
contactId = self.createContact(fsId)
userId = self.createUser(fsId)
url = "/api/users/" + str(userId) + "/favourites"
payload = fakedata.UserFavouriteTestDataCreate()
jsonString = json.dumps(payload)
response = self.client.post(url, content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
payload = [fakedata.UserFavouriteTestDataUpdate()]
jsonString = json.dumps(payload)
response = self.client.put(url, content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert data[0]["value"] == "Changed"
response = self.client.get(url)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert len(data) > 0
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fsId)
def test_usersIdNotificationsGet(self):
fsId, fsTypeId, _ = self.createFuelSupplier()
contactId = self.createContact(fsId)
userId = self.createUser(fsId)
credId, credTypeId, _ = self.createCreditTrade(fsId, userId)
notificationEventId = self.createNotificationEvent()
# add notification to user.
userNotificationUrl = "/api/users/" + str(userId) + "/notifications"
# create a new UserRole.
payload = {
'notificationEventFK': notificationEventId,
'hasBeenViewed': False,
'isWatchNotification': False,
'userFK':userId
}
jsonString = json.dumps(payload)
response = self.client.post(userNotificationUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# test the Get
response = self.client.get(userNotificationUrl)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteNotificationEvent(notificationEventId)
self.deleteCreditTrade(credId)
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fsId)
def test_usersIdPermissionsGet(self):
# create a user.
fuelSupplierId, statusId, actionId = self.createFuelSupplier()
contactId = self.createContact(fuelSupplierId)
userId = self.createUser(fuelSupplierId)
# create a credit trade
notificationEventId = self.createUser(fuelSupplierId)
# assign permissions to the user.
#TODO add that.
userPermissionUrl = "/api/users/" + str(userId) + "/permissions"
# test the Get
response = self.client.get(userPermissionUrl)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteUser (userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fuelSupplierId)
def test_usersIdRolesGet(self):
fsId, _, _= self.createFuelSupplier()
contactId = self.createContact(fsId)
userId = self.createUser(fsId)
roleId = self.createRole()
url = "/api/users/" + str(userId) + "/roles"
payload = fakedata.UserRoleTestDataCreate()
payload['user'] = userId
payload['role'] = roleId
jsonString = json.dumps(payload)
response = self.client.post(url, content_type='application/json', data=jsonString)
assert response.status_code == status.HTTP_200_OK
response = self.client.get(url)
assert response.status_code == status.HTTP_200_OK
payload = [fakedata.UserRoleTestDataUpdate()]
payload[0]['user'] = userId
payload[0]['role'] = roleId
jsonString = json.dumps(payload)
response = self.client.put(url, content_type='application/json', data=jsonString)
assert response.status_code == status.HTTP_200_OK
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert data[0]['userFK'] == userId
assert data[0]['roleFK'] == roleId
self.deleteRole(roleId)
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fsId)
def test_usersSearchGet(self):
fuelSupplierId, statusId, actionId = self.createFuelSupplier()
contactId = self.createContact(fuelSupplierId)
userId = self.createUser(fuelSupplierId)
# do a search
testUrl = "/api/users/search"
response = self.client.get(testUrl)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
# Cleanup
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fuelSupplierId)
def test_createCreditTradeNegativeNumberOfCredits(self):
fsId, _, _ = self.createFuelSupplier()
userId = self.createUser(fsId)
typeId = self.createCreditTradeType()
statusId = self.createCreditTradeStatus()
testUrl = "/api/credittrades"
payload = {
'creditTradeStatusFK': statusId,
'creditTradeTypeFK': typeId,
'fairMarketValuePrice': '100.00',
'historySet':[],
'initiator': fsId,
'respondentFK': fsId,
'tradeEffectiveDate': '2017-01-01',
}
fakeCreditTrade = fakedata.CreditTradeTestDataCreate()
payload.update(fakeCreditTrade)
payload['numberOfCredits'] = -1
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_422_UNPROCESSABLE_ENTITY == response.status_code
self.deleteUser(userId)
self.deleteFuelSupplier(fsId)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3250864 | <gh_stars>1-10
import hashlib
def sha512half(s):
return hashlib.sha512(s).digest()[0:32]
def hash160(s):
h = hashlib.sha256(s).digest()
m = hashlib.new('ripemd160')
m.update(h)
t = m.digest()
return t
def sha256hash(s):
s = s if s else ' '
hash1 = hashlib.sha256(s).digest()
hash2 = hashlib.sha256(hash1).digest()
return hash2
| StarcoderdataPython |
1727852 | <reponame>wayneferdon/WallpaperEngine.NeteaseMusicLyricDesktop
from pykakasi import kakasi
import datetime
import json
import re
import os
import requests
import time
import sqlite3
from enum import Enum
APPDATA = os.getenv("LOCALAPPDATA")
LOGPATH = os.path.expanduser(APPDATA + "/Netease/CloudMusic/cloudmusic.elog")
DATABASE = os.path.expanduser(APPDATA + "/Netease/CloudMusic/Library/webdb.dat")
OUTPUT = 'OutPut.html'
HEADERS = {
'user-agent':
'Mozilla/5.0 (Windows NT 10.0; Win64;\x64)\
AppleWebKit/537.36 (KHTML,like Gecko)\
Chrome/80.0.3987.87 Safari/537.36'
}
class PlayState(Enum):
STOPPED = 0
PLAYING = 1
EXITED = 2
class LogValidInfo(Enum):
NONE = 0
APPEXIT = 1
PLAY = 2
LOAD = 3
SETPOS = 4
RESUME = 5
PAUSE = 6
class NeteaseMusicStatus:
def __init__(self):
self.LogCount = 0
self.PlayState = PlayState.STOPPED
self.CurrentSong = False
self.CurrentSongLrc = dict()
self.CurrentSongLength = 0
self.LastUpdate = 0
self.kakasi = kakasi()
self.LastLog = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
self.LastResumeTime = 0
self.LastPauseTime = 0
self.LastPosition = 0
self.CurrentLrc = [
{'Lrc': '', 'Translation': ''},
{'Lrc': '', 'Translation': ''},
{'Lrc': '', 'Translation': ''}
]
self.NextLrcTime = 0
self.SongLrcKeyTime = list()
self.OutPutHtml = str()
self.LocalMusicInfo = LoadSongDataBase()
with open("./Hanzi2Kanji.json", "r") as KanjiLib:
self.Hanzi2KanjiLib = KanjiLib.readlines()
LibJson = ""
for line in self.Hanzi2KanjiLib:
LibJson += line
self.Hanzi2KanjiLib = json.loads(LibJson)
try:
self.LogFile = open(LOGPATH, 'rb')
self.FileSize = os.path.getsize(LOGPATH)
self.LogFile.seek(0, 2)
except Exception:
raise
LineList = self.GetLastLines(1000)
LineList = self.Decode(LineList[0])
if LineList is not None:
LineIndex = 0
while True:
try:
LineIndex -= 1
LineData = LineList[LineIndex]
try:
self.CallbackLog(LineData, True)
except Exception:
pass
except IndexError:
break
self.LastLog = LineList[-1]
with open(OUTPUT, 'w', encoding='utf-8') as OutPutFile:
OutPutFile.write('')
if self.CurrentSong:
CurrentTimePosition = self.LastPosition
if self.PlayState == PlayState.PLAYING:
CurrentTimePosition += time.time() - self.LastResumeTime
self.GetLrc()
self.SetCurrentLrc(CurrentTimePosition)
self.OutPutCurrentLrc()
def Decode(self,data):
a = [
[56,"0"],
[41,"1"],
[26,"2"],
[11,"3"],
[124,"4"],
[109,"5"],
[94,"6"],
[79,"7"],
[176,"8"],
[161,"9"],
[44,"a"],
[31,"b"],
[14,"c"],
[121,"d"],
[104,"e"],
[91,"f"],
[74,"g"],
[181,"h"],
[164,"i"],
[151,"j"],
[12,"C"],
[134,"k"],
[241,"l"],
[224,"m"],
[211,"n"],
[194,"o"],
[60,"p"],
# [0,"q"],
[30,"r"],
[15,"s"],
[120,"t"],
[105,"u"],
# [0,"v"],
[75,"w"],
# [0,"x"],
[165,"y"],
[150,"z"],
# [0,"A"],
[167,"B"],
[107,"U"],
[123,"D"],
[69,"E"],
[89,"F"],
[72,"G"],
# [0,"H"],
[166,"I"],
# [0,"J"],
# [0,"K"],
[110,"L"],
# [0,"M"],
[209,"N"],
[192,"O"],
# [0,"P"],
# [0,"Q"],
[28,"R"],
[13,"S"],
[122,"T"],
# [0,"U"],
# [0,"V"],
[46,"A"],
[73,"_w"],
# [0,"X"],
[148,"Y"],
# [0,"Z"],
[193,"_"],
[177,"("],
[133,"["],
[227,"]"],
[57,""],
[25,"[STX]"],
[146,":"],
[198,"/"],
[228,"-"],
[130,"+"],
[125,"$"],
[27,"\""],
[162,"\t"],
[199,"?"],
[245,","],
[240,"|"],
[215,"."],
[145,"\n"],
[40,"!"],
[243,"L"],
[160,")"],
[226,"M"],
[88,"V"],
[90,"v"],
[183,"H"],
[62,"P"],
[45,"q"],
[135,"{"],
[106,"E"],
[29,"E"],
[242,""],
[229,"="],
[225,"}"],
[108,"G"],
[131," "],
[180,"x"],
]
lista = list()
string = ""
for eachData in data:
found = False
for each in a:
if(each[0] == eachData):
found = True
string += each[1]
break
if not found:
if(eachData not in lista):
lista.append(eachData)
string += "【" + str(eachData) +"】"
continue
result = list()
for each in string.split("\n"):
if each != "":
result.append(each)
result.reverse()
return result
def GetLastLines(self, Length):
try:
FileSize = os.path.getsize(LOGPATH)
if FileSize == 0:
return None
else:
# to use seek from end, must use mode 'rb'
with open(LOGPATH, 'rb') as TargetFile:
Offset = -Length # initialize offset
while -Offset < FileSize: # offset cannot exceed file size
# read offset chars from eof(represent by number'2')
TargetFile.seek(Offset, 2)
Lines = TargetFile.readlines() # read from fp to eof
if len(Lines) >= 2: # if contains at least 2 lines
return Lines # then last line is totally included
else:
Offset *= 2 # enlarge offset
TargetFile.seek(0)
return TargetFile.readlines()
except FileNotFoundError:
return None, False
def GetSongNameAndArtists(self):
Result = dict()
if str(self.CurrentSong) in self.LocalMusicInfo.keys():
try:
JsonDate = json.loads(
self.LocalMusicInfo[str(self.CurrentSong)])
SongName = JsonDate["album"]["name"]
Artists = JsonDate['artists']
SongArtist = 'by: '
for Artist in Artists:
if SongArtist != 'by: ':
SongArtist += ' / '
SongArtist += Artist['name']
Result = {
0: {'Lrc': '无歌词', 'Translation': ''},
1: {'Lrc': SongName, 'Translation': ''},
float("inf"): {'Lrc': SongArtist, 'Translation': ''}
}
except KeyError:
pass
if not Result:
Url = 'https://music.163.com/api/song/detail/' \
'?id=' + str(self.CurrentSong) + \
'&ids=[' + str(self.CurrentSong) + ']'
JsonDate = json.loads(requests.get(Url, headers=HEADERS).text)
JsonDate = JsonDate['songs'][0]
SongName = JsonDate['name']
Artists = JsonDate['artists']
SongArtist = 'by: '
for Artist in Artists:
if SongArtist != 'by: ':
SongArtist += ' / '
SongArtist += Artist['name']
if SongArtist != 'by: ':
Result = {
0: {'Lrc': '无歌词', 'Translation': ''},
1: {'Lrc': SongName, 'Translation': ''},
float("inf"): {'Lrc': SongArtist, 'Translation': ''}
}
else:
Result[0] = {'Lrc': '无歌词', 'Translation': ''}
return Result
def ReloadMonitorPath(self):
try:
self.LogFile.close()
except Exception:
pass
try:
self.LogFile.close()
self.LogFile = open(LOGPATH, "rb")
self.FileSize = os.path.getsize(LOGPATH)
self.LogFile.seek(0, 2)
return True
except Exception:
return False
def CallbackLog(self, Content, Initializing=False):
ValidInfo = LogValidInfo.NONE
LogTime = 0
if 'Appexit.' in Content:
if self.PlayState == PlayState.PLAYING:
self.LastPosition += time.time() - self.LastResumeTime
self.PlayState = PlayState.EXITED
LogTime = time.time()
ValidInfo = LogValidInfo.APPEXIT
print(time.time(),"\t", "Appexit")
elif "[info]" in Content:
Content = Content.strip().strip('\n')
Result = re.split('\\[info]', Content)
LogInfo = Result[1]
LogTime = re.split('\\[(.*?)]', Result[0])
LogTime = time.mktime(datetime.datetime.fromisoformat(LogTime[5]).timetuple())
if 'player._$play' in LogInfo:
self.CurrentSong = re.split('_', re.split('"', LogInfo)[1])[0]
if not Initializing:
self.GetLrc()
if self.PlayState != PlayState.EXITED:
self.LastPosition = 0
self.NextLrcTime = 0
# require load and resume
self.PlayState = PlayState.STOPPED
ValidInfo = LogValidInfo.PLAY
print(time.time(),"\t", "current song:",self.CurrentSong)
elif '???__onAudioPlayerLoad' in LogInfo:
self.CurrentSongLength = json.loads(
re.split('\t', LogInfo)[0])['duration']
ValidInfo = LogValidInfo.LOAD
print(time.time(),"\t", "duration:",self.CurrentSongLength)
elif '???_$setPosition' in LogInfo:
self.LastPosition = json.loads(LogInfo.split('\t')[0])['ratio'] * self.CurrentSongLength
ValidInfo = LogValidInfo.SETPOS
if self.PlayState == PlayState.PLAYING:
self.LastResumeTime = LogTime
print(time.time(),"\t", "Last Position:",self.LastPosition)
elif 'player._$resumedo' in LogInfo:
self.PlayState = PlayState.PLAYING
self.LastResumeTime = LogTime
ValidInfo = LogValidInfo.RESUME
print(time.time(),"\t", "resume")
elif 'player._$pausedo' in LogInfo:
ValidInfo = LogValidInfo.PAUSE
if self.PlayState == PlayState.PLAYING:
self.LastPosition += LogTime - self.LastResumeTime
self.LastPauseTime = LogTime
self.PlayState = PlayState.STOPPED
print(time.time(),"\t", "pause")
if ValidInfo == LogValidInfo.NONE:
return False
if Initializing:
if (
self.CurrentSong
and self.CurrentSongLength
and self.LastPosition
):
return True
self.LastUpdate = LogTime
return False
if ValidInfo in [LogValidInfo.SETPOS, LogValidInfo.RESUME]:
self.SetCurrentLrc(self.LastPosition)
self.OutPutCurrentLrc()
if ValidInfo == LogValidInfo.APPEXIT:
with open(OUTPUT, 'w', encoding='utf-8') as OutPutFile:
OutPutFile.write('')
return True
def Start(self,interval = 0.001):
lastModified=0
while True:
modified = os.path.getmtime(LOGPATH)
if lastModified < modified:
lastModified = modified
CurrentFileSize = os.path.getsize(LOGPATH)
if CurrentFileSize < self.FileSize:
TryCount = 0
while TryCount < 10:
if not self.ReloadMonitorPath():
TryCount += 1
else:
TryCount = 0
self.FileSize = os.path.getsize(LOGPATH)
break
time.sleep(0.1)
if TryCount == 10:
raise Exception("Open %s failed after try 10 times"
% LOGPATH)
else:
self.FileSize = CurrentFileSize
LineList = self.GetLastLines(1)
LineList = self.Decode(LineList[-1])
NewLines = list()
lastLogUpdated = False
lastLog = self.LastLog
for i in range(len(LineList)):
if LineList[i] == lastLog:
break
if not lastLogUpdated:
self.LastLog = LineList[i]
lastLogUpdated = True
NewLines.append(LineList[i])
for i in range(len(NewLines)):
line = NewLines[-i]
self.CallbackLog(line)
if self.PlayState == PlayState.PLAYING:
self.SetCurrentLrc()
self.OutPutCurrentLrc()
time.sleep(interval)
def OutPutCurrentLrc(self):
NewOutPut = GetOutPut(self.CurrentLrc)
if NewOutPut == self.OutPutHtml:
return
with open(OUTPUT, 'w', encoding='utf-8') as OutPutFile:
OutPutFile.write(NewOutPut)
self.OutPutHtml = NewOutPut
@staticmethod
def GetSplitTimeLrc(LrcList):
NewList = dict()
if LrcList:
LrcList = re.split('\n', LrcList)
for LrcItem in LrcList:
LrcItem = re.split('\\[(.*?)]', LrcItem)
try:
LrcTime = LrcItem[1]
if 'by' in LrcTime:
continue
LrcItem = LrcItem[2]
if LrcItem == '':
continue
LrcTime = re.split('\\:', LrcTime.replace(".", ":"))
Minute = int(LrcTime[0])
Second = int(LrcTime[1])
try:
Millisecond = int(LrcTime[2])
except IndexError:
Millisecond = 0
LrcTime = Minute * 60000 + Second * 1000 + Millisecond
NewList[LrcTime] = LrcItem
except Exception:
pass
return NewList
def GetHiraganaLrc(self, Lrc):
LrcSplit = list()
for Split in Lrc:
for each in self.kakasi.convert(Split):
for Item in SplitAll(each['orig'], "((.*?)){1}"):
LrcSplit += self.kakasi.convert(Item)
LrcConverted = ""
LrcRomajinn = ""
PriorHira = ""
IsPreJP = True
for Split in LrcSplit:
orig = Split['orig']
hira = Split['hira']
roma = Split['hepburn']
if not IsPreJP:
orig = orig.replace(" ", " ")
if IsOnlyEnglishOrPunctuation(orig):
LrcConverted += orig + " "
LrcRomajinn += orig + " "
PriorHira = ""
IsPreJP = False
continue
IsPreJP = True
if hira == "":
KanjiLrc = ""
for EachStr in orig:
if EachStr in self.Hanzi2KanjiLib.keys():
KanjiLrc += self.Hanzi2KanjiLib[EachStr][0]
else:
KanjiLrc += EachStr
orig = KanjiLrc
hira = ""
roma = ""
for newEach in kakasi().convert(orig):
hira += newEach['hira']
roma += newEach['hepburn']
if hira == orig:
if hira == PriorHira:
orig = ""
roma = ""
PriorHira = ""
else:
hiraLen = len(hira)
origLen = len(orig)
isDuplicated = False
for i in range(min(hiraLen,origLen)):
if hira[-i-1] == orig[-i-1]:
isDuplicated = True
continue
if isDuplicated:
orig = orig[0:-i] + "(" + hira[0:-i] + ")" + hira[-i:-1] + hira[-1]
PriorHira = ""
break
if not isDuplicated:
PriorHira = "(" + hira + ")"
LrcConverted += orig + PriorHira
LrcRomajinn += roma + " "
return {
"Lrc": LrcConverted,
"Roma": LrcRomajinn
}
def SplitLrc(self, Lrc):
Lrc = Lrc\
.replace("(", "(")\
.replace(")", ")")\
.replace(" ", " ")\
.replace(" ", "//split// //split//")\
.replace("、", "//split//、//split//")\
.replace("。", "//split//、//split//")
Lrc = re.split("//split//", Lrc)
LrcSplit = list()
Index = -1
while Index >= -len(Lrc):
Item = Lrc[Index]
if(Item is None):
Index -= 2
else:
Index -= 1
LrcSplit.append(Item)
LrcSplit.reverse()
Lrc = RemoveAll(LrcSplit, "")
return Lrc
def FormatLrc(self, Lrc, Translation):
def SimpleFormat(Source):
Source = ReplaceAll(Source, " ", " ")
Source = ReplaceAll(Source, " ", " ")
Source = ReplaceAll(Source, "( ", "(")
return Source.replace(" :", " :").replace(": ", ": ").replace(": ",":")
Roma = SimpleFormat(Lrc['Roma'])
Lrc = SimpleFormat(Lrc['Lrc'])
if IsOnlyEnglishOrPunctuation(Lrc):
Lrc = Lrc.replace(" ", " ")
if Translation != "":
Translation = "译:" + Translation + "\t|\t"
Translation += "音:" + Roma
Translation = SimpleFormat(Translation)
return {
"Lrc": ReplaceAll(Lrc, " ", " "),
"Translation": ReplaceAll(Translation, " ", " ")
}
def GetConvertedLrc(self, SplitTimeLrc, SplitTimeTranslation, IsJapanese):
Result = dict()
for TimeItem in SplitTimeLrc.keys():
Lrc = SplitTimeLrc[TimeItem]
if TimeItem in SplitTimeTranslation.keys():
Translation = SplitTimeTranslation[TimeItem]
else:
Translation = ""
Result[TimeItem] = {
"Lrc": Lrc,
"Translation": Translation
}
if not IsJapanese:
Result[TimeItem] = {
"Lrc": Lrc,
"Translation": Translation
}
continue
Lrc = self.SplitLrc(Lrc)
Lrc = self.GetHiraganaLrc(Lrc)
Lrc = self.FormatLrc(Lrc, Translation)
Result[TimeItem] = Lrc
return Result
def GetLrc(self):
self.CurrentLrc = [
{'Lrc': '', 'Translation': ''},
{'Lrc': '', 'Translation': ''},
{'Lrc': '', 'Translation': ''}
]
Url = "http://music.163.com/api/song/lyric?" +\
"id=" + str(self.CurrentSong) + "&lv=1&kv=1&tv=-1"
text = requests.get(Url, headers=HEADERS).text
JsonDate = json.loads(requests.get(Url, headers=HEADERS).text)
if text != None and 'nolyric' in JsonDate.keys():
Result = self.GetSongNameAndArtists()
else:
LyricData = str()
TranslationData = str()
try:
LyricData = JsonDate['lrc']['lyric']
except KeyError:
pass
try:
TranslationData = JsonDate['tlyric']['lyric']
except KeyError:
pass
SplitTimeLrc = self.GetSplitTimeLrc(LyricData)
SplitTimeTranslation = self.GetSplitTimeLrc(TranslationData)
if not SplitTimeLrc:
Result = self.GetSongNameAndArtists()
else:
Result = self.GetConvertedLrc(
SplitTimeLrc,
SplitTimeTranslation,
IsContainJapanese(LyricData)
)
self.CurrentSongLrc = Result
self.SongLrcKeyTime = list(Result.keys())
self.SongLrcKeyTime.sort()
def SetCurrentLrc(self, TargetTime=None):
if TargetTime is None:
CurrentTime = time.time() - self.LastResumeTime + self.LastPosition
if self.NextLrcTime is None:
pass
else:
if (
CurrentTime * 1000< self.NextLrcTime
or self.PlayState != PlayState.PLAYING
):
return
try:
self.CurrentLrc[0] = self.CurrentLrc[1]
self.CurrentLrc[1] = self.CurrentLrc[2]
CurrentLrcIndex = self.SongLrcKeyTime.index(
self.NextLrcTime)
CurrentLrcTime = self.SongLrcKeyTime[CurrentLrcIndex]
if (len(self.SongLrcKeyTime) - 1) <= CurrentLrcIndex:
self.NextLrcTime = None
self.CurrentLrc[2] = {'Lrc': '', 'Translation': ''}
return
self.NextLrcTime = self.SongLrcKeyTime[CurrentLrcIndex + 1]
self.CurrentLrc[2] = self.CurrentSongLrc[self.NextLrcTime]
except Exception as e:
pass
else:
KeyTime = None
for KeyTime in self.SongLrcKeyTime:
if KeyTime >= TargetTime * 1000:
break
try:
TimeIndex = self.SongLrcKeyTime.index(KeyTime)
CurrentLrcTime = self.SongLrcKeyTime[TimeIndex - 1]
if len(self.SongLrcKeyTime) > 1:
self.NextLrcTime = self.SongLrcKeyTime[TimeIndex]
self.CurrentLrc[2] = self.CurrentSongLrc[self.NextLrcTime]
if TimeIndex > 1:
PreviousLrcTime = self.SongLrcKeyTime[TimeIndex - 2]
self.CurrentLrc[0] = self.CurrentSongLrc[PreviousLrcTime]
else:
self.CurrentLrc[0] =""
else:
self.NextLrcTime = None
self.CurrentLrc[2] = {'Lrc': '', 'Translation': ''}
self.CurrentLrc[1] = self.CurrentSongLrc[CurrentLrcTime]
except Exception as e:
pass
def RemoveAll(Source, Target):
while Target in Source:
Source.remove(Target)
return Source
def ReplaceAll(Source, Target, New):
while Target in Source:
Source = Source.replace(Target, New)
return Source
def SplitAll(Source, Target, Retainterget=True):
FindResult = re.compile(Target).findall(Source)
NewList = list()
if FindResult:
FindResult = FindResult[0]
if isinstance(FindResult, tuple):
FindResult = FindResult[0]
Source = Source.split(FindResult)
for Key in range(len(Source)):
Result = SplitAll(Source[Key], Target)
if Result:
NewList += Result
else:
NewList.append(Source[Key])
if(Retainterget and Key != len(Source)-1):
NewList.append(FindResult)
RemoveAll(NewList, "")
return NewList
NewList.append(Source)
RemoveAll(NewList, "")
return NewList
def GetOutPut(CurrentLrc):
OutPut = ""
for i in range(3):
for key in CurrentLrc[i]:
Lrc = CurrentLrc[i][key]
if Lrc is None:
Lrc = ""
OutPut += '<div class="' + key + str(i) + '">' + Lrc + '</div>'
OutPut += '\n'
return OutPut
def LoadSongDataBase():
cursor = sqlite3.connect(DATABASE).cursor()
CursorResults = cursor.execute(
'SELECT tid, track '
'FROM web_track'
).fetchall()
cursor.close()
SongData = dict()
for Result in CursorResults:
SongData[str(Result[0])] = Result[1]
return SongData
def IsContainJapanese(Source):
SearchRanges = [
'[\u3040-\u3090]', # hiragana
'[\u30a0-\u30ff]' # katakana
]
for Range in SearchRanges:
if RemoveAll(re.compile(Range).findall(Source), '一'):
return True
return False
def IsContainChinese(Source):
return RemoveAll(re.compile('[\u4e00-\u9fa5]').findall(Source), '一')
def IsOnlyEnglishOrPunctuation(Source):
SearchRanges = [
'[\u0000-\u007f]',
'[\u3000-\u303f]',
'[\ufb00-\ufffd]'
]
if Source == " " or Source == "":
return True
if IsContainJapanese(Source) or IsContainChinese(Source):
return False
for Range in SearchRanges:
if RemoveAll(re.compile(Range).findall(Source), '一'):
return True
return False
if __name__ == '__main__':
MainProgress = NeteaseMusicStatus()
while True:
try:
MainProgress.Start()
except Exception as e:
print(time.time(),"Exception",e)
MainProgress = NeteaseMusicStatus()
pass
| StarcoderdataPython |
3301575 | <reponame>vishwanath1306/mltrace
from mltrace.db.base import Base
from mltrace.db.models import ComponentRun, PointerTypeEnum
from sqlalchemy import create_engine
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.schema import (
DropConstraint,
DropTable,
MetaData,
Table,
ForeignKeyConstraint,
)
import hashlib
import inspect
import joblib
import os
import pandas as pd
import random
import sqlalchemy
import string
import sys
import time
import typing
def _create_engine_wrapper(
uri: str, max_retries=5
) -> sqlalchemy.engine.base.Engine:
"""Creates engine using sqlalchemy API. Includes max retries parameter."""
retries = 0
while retries < max_retries:
try:
engine = create_engine(uri)
return engine
except Exception as e:
print(f"DB could not be created with exception {e}. Trying again.")
retries += 1
raise RuntimeError("Max retries hit.")
def _initialize_db_tables(engine: sqlalchemy.engine.base.Engine):
"""Initializes tables using sqlalchemy API."""
Base.metadata.create_all(engine)
def _drop_everything(engine: sqlalchemy.engine.base.Engine):
"""(On a live db) drops all foreign key constraints before dropping all
tables. Workaround for SQLAlchemy not doing DROP ## CASCADE for drop_all()
(https://github.com/pallets/flask-sqlalchemy/issues/722)
"""
con = engine.connect()
trans = con.begin()
inspector = Inspector.from_engine(engine)
# We need to re-create a minimal metadata with only the required things to
# successfully emit drop constraints and tables commands for
# postgres (based on the actual schema of the running instance)
meta = MetaData()
tables = []
all_fkeys = []
for view_name in inspector.get_view_names():
con.execute(
"DROP MATERIALIZED VIEW IF EXISTS {} CASCADE".format(view_name)
)
for table_name in inspector.get_table_names():
fkeys = []
for fkey in inspector.get_foreign_keys(table_name):
if not fkey["name"]:
continue
fkeys.append(ForeignKeyConstraint((), (), name=fkey["name"]))
tables.append(Table(table_name, meta, *fkeys))
all_fkeys.extend(fkeys)
for fkey in all_fkeys:
con.execute(DropConstraint(fkey))
for table in tables:
con.execute(DropTable(table))
trans.commit()
Base.metadata.drop_all(engine)
def _map_extension_to_enum(filename: str) -> PointerTypeEnum:
"""Infers the relevant enum for the filename."""
if "data" in filename.lower():
return PointerTypeEnum.DATA
if "model" in filename.lower():
return PointerTypeEnum.MODEL
data_extensions = [
"csv",
"pq",
"parquet",
"txt",
"md",
"rtf",
"tsv",
"xml",
"pdf",
"mlt",
]
model_extensions = [
"h5",
"hdf5",
"joblib",
"pkl",
"pickle",
"ckpt",
"mlmodel",
]
words = filename.split(".")
if len(words) < 1:
return PointerTypeEnum.UNKNOWN
extension = words[-1].lower()
if extension in data_extensions:
return PointerTypeEnum.DATA
if extension in model_extensions:
return PointerTypeEnum.MODEL
# TODO(shreyashankar): figure out how to handle output i
return PointerTypeEnum.UNKNOWN
def _hash_value(value: typing.Any = "") -> bytes:
"""Hashes a value using the sqlalchemy API."""
if isinstance(value, str) and value == "":
return b""
return hashlib.sha256(repr(value).encode()).digest()
# TODO(shreyashankar): add cases for other types
# (e.g., sklearn model, xgboost model, etc)
def _get_data_and_model_args(**kwargs):
"""Returns a subset of args that may correspond to data and models."""
data_model_args = {}
for key, value in kwargs.items():
# Check if data or model is in the name of the key
if "data" in key or "model" in key:
data_model_args[key] = value
elif isinstance(value, pd.DataFrame):
data_model_args[key] = value
elif sys.getsizeof(value) > 1e6:
data_model_args[key] = value
return data_model_args
def _load(pathname: str, from_client=True) -> typing.Any:
"""Loads joblib file at pathname."""
obj = joblib.load(pathname)
# Set frame locals
if from_client:
client_frame = inspect.currentframe().f_back.f_back
if "_mltrace_loaded_artifacts" not in client_frame.f_locals:
client_frame.f_locals["_mltrace_loaded_artifacts"] = {}
client_frame.f_locals["_mltrace_loaded_artifacts"].update(
{pathname: obj}
)
return obj
# TODO(shreyashankar): add cases for other types
# (e.g., sklearn model, xgboost model, etc)
def _save(
obj, pathname: str = None, var_name: str = "", from_client=True
) -> str:
"""Saves joblib object to pathname."""
if pathname is None:
# If being called with a component context, use the component name
_identifier = "".join(
random.choice(string.ascii_lowercase) for i in range(5)
)
pathname = (
f'{var_name}_{_identifier}{time.strftime("%Y%m%d%H%M%S")}.mlt'
)
old_frame = (
inspect.currentframe().f_back.f_back.f_back
if from_client
else inspect.currentframe().f_back.f_back
)
if "component_run" in old_frame.f_locals:
prefix = (
old_frame.f_locals["component_run"]
.component_name.lower()
.replace(" ", "_")
)
pathname = os.path.join(prefix, pathname)
# Prepend with save directory
pathname = os.path.join(
os.environ.get(
"SAVE_DIR", os.path.join(os.path.expanduser("~"), ".mltrace")
),
pathname,
)
os.makedirs(os.path.dirname(pathname), exist_ok=True)
joblib.dump(obj, pathname)
# Set frame locals
if from_client:
client_frame = inspect.currentframe().f_back.f_back
if "_mltrace_saved_artifacts" not in client_frame.f_locals:
client_frame.f_locals["_mltrace_saved_artifacts"] = {}
client_frame.f_locals["_mltrace_saved_artifacts"].update(
{pathname: obj}
)
return pathname
def _get_view_name(task_name: str, window_size: int) -> str:
"""Returns the view name for a given task name."""
return f"{task_name}_{window_size}_view"
| StarcoderdataPython |
1779540 | <reponame>animesh/DeepRT
import pandas as pd
from pydicom import read_file
import re
import logging
class DicomTable:
def __init__(self, dicom_path):
self.dicom_path = dicom_path
self.dicom_file = read_file( self.dicom_path )
self.record_lookup = self.get_patient_data()
self.record_id = self.get_record_id()
def get_record_id(self):
if self.dicom_file == None:
return None
return self.record_lookup.patient_id[0] + "_" + self.record_lookup.laterality[0] + "_" + \
self.record_lookup.study_date[0] + "_" + str(self.record_lookup.series_number[0].astype(int))
def get_oct_data(self):
image_positions = []
stack_positions = []
x_scales = []
y_scales = []
x_starts = []
y_starts = []
x_ends = []
y_ends = []
for i in range( 0, len( self.dicom_file.PerFrameFunctionalGroupsSequence ) ):
image_positions.append(
self.dicom_file.PerFrameFunctionalGroupsSequence[i].PlanePositionSequence[0].ImagePositionPatient )
stack_positions.append(
self.dicom_file.PerFrameFunctionalGroupsSequence[i].FrameContentSequence[0].InStackPositionNumber )
x_scales.append(
self.dicom_file.SharedFunctionalGroupsSequence[0].PixelMeasuresSequence[0].PixelSpacing[1] )
y_scales.append(
self.dicom_file.SharedFunctionalGroupsSequence[0].PixelMeasuresSequence[0].PixelSpacing[0] )
y_starts.append( self.dicom_file.PerFrameFunctionalGroupsSequence[i].OphthalmicFrameLocationSequence[
0].ReferenceCoordinates[0] )
x_starts.append( self.dicom_file.PerFrameFunctionalGroupsSequence[i].OphthalmicFrameLocationSequence[
0].ReferenceCoordinates[1] )
y_ends.append( self.dicom_file.PerFrameFunctionalGroupsSequence[i].OphthalmicFrameLocationSequence[
0].ReferenceCoordinates[2] )
x_ends.append( self.dicom_file.PerFrameFunctionalGroupsSequence[i].OphthalmicFrameLocationSequence[
0].ReferenceCoordinates[3] )
return image_positions, stack_positions, x_scales, y_scales, y_starts, x_starts, y_ends, x_ends
def filter_dicom(self):
manuf = self.dicom_file.Manufacturer
study_descr = self.dicom_file.StudyDescription
series_description = self.dicom_file.SeriesDescription
pixel_shape = self.dicom_file.pixel_array.shape
if (manuf == "Heidelberg Engineering") & (study_descr == 'Makula (OCT)') & (series_description == 'Volume IR') \
& (pixel_shape[0] == 49):
return self.dicom_file
else:
logging.info(
"Dicom did not contain correct data, see values for mauf, study desc, series desc and pixel shape: "
"{},{},{},{}".format( manuf, study_descr, series_description, pixel_shape ) )
return (None)
def get_patient_data(self):
patient_dict = {}
oct_dict = {}
# load all dicom files and append to dicom list
# try if dicom has all files
self.dicom_file = self.filter_dicom()
if self.dicom_file is None:
return None
if self.dicom_file is not None:
# remove all non digits from string
patient_dict["patient_id"] = re.findall( r'\d+', self.dicom_file.PatientID )
patient_dict["laterality"] = self.dicom_file.ImageLaterality
patient_dict["study_date"] = self.dicom_file.StudyDate
patient_dict["series_number"] = self.dicom_file.SeriesNumber
# get all oct data
image_positions, stack_positions, x_scales, y_scales, y_starts, x_starts \
, y_ends, x_ends = self.get_oct_data()
oct_dict["image_positions"] = image_positions
oct_dict["stack_positions"] = stack_positions
oct_dict["x_scales"] = x_scales
oct_dict["y_scales"] = y_scales
oct_dict["y_starts"] = y_starts
oct_dict["x_starts"] = x_starts
oct_dict["y_ends"] = y_ends
oct_dict["x_ends"] = x_ends
# create dataframe with all relevant data from dicom
patient_pd = pd.DataFrame.from_dict( patient_dict )
oct_pd = pd.DataFrame.from_dict( oct_dict )
patient_full_pd = pd.concat( (patient_pd, oct_pd), axis = 1 )
return patient_full_pd
| StarcoderdataPython |
123068 | <gh_stars>0
import click
from .core import search
from .common import copy_to_clipboard, find_subtitle
@click.command()
@click.argument('moviename', required=False)
@click.option('--subtitle', '-s', help='Given keyword(usually the file name) to search')
def main(moviename, subtitle):
if moviename:
search(moviename)
elif subtitle:
link = find_subtitle(subtitle)
print(link)
copy_to_clipboard(link)
print('Subtitle download link copied')
if __name__ == "__main__":
main()
| StarcoderdataPython |
3206400 | <gh_stars>0
from django.contrib.auth import authenticate, login
from django.urls import reverse_lazy
from django.views.generic.edit import FormView
from django.contrib.auth.views import LoginView, LogoutView
from app_users.forms import ChatUserRegistration
class ChatUserRegisterView(FormView):
form_class = ChatUserRegistration
template_name = 'app_users/register.html'
success_url = reverse_lazy('chats_list')
def form_valid(self, form):
form.save()
username = self.request.POST['username']
raw_password = self.request.POST['<PASSWORD>']
user = authenticate(
username=username,
password=<PASSWORD>,
)
login(self.request, user)
return super().form_valid(form)
class UserLoginView(LoginView):
template_name = 'app_users/login.html'
class UserLogoutView(LogoutView):
template_name = 'app_users/logout.html'
| StarcoderdataPython |
1660459 | # coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from logicmonitor_sdk.models.collector_attribute import CollectorAttribute # noqa: F401,E501
class AwsDynamodbCollectorAttribute(CollectorAttribute):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'aws_range_value': 'str',
'aws_query_range_value': 'str',
'aws_dynamodb_attr_type': 'str',
'aws_query_range_op': 'str',
'aws_key_value': 'str',
'aws_attribute_name': 'str',
'aws_query_index_type': 'str',
'aws_query_index_name': 'str',
'aws_query_key_value': 'str'
}
attribute_map = {
'name': 'name',
'aws_range_value': 'awsRangeValue',
'aws_query_range_value': 'awsQueryRangeValue',
'aws_dynamodb_attr_type': 'awsDynamodbAttrType',
'aws_query_range_op': 'awsQueryRangeOp',
'aws_key_value': 'awsKeyValue',
'aws_attribute_name': 'awsAttributeName',
'aws_query_index_type': 'awsQueryIndexType',
'aws_query_index_name': 'awsQueryIndexName',
'aws_query_key_value': 'awsQueryKeyValue'
}
def __init__(self, name=None, aws_range_value=None, aws_query_range_value=None, aws_dynamodb_attr_type=None, aws_query_range_op=None, aws_key_value=None, aws_attribute_name=None, aws_query_index_type=None, aws_query_index_name=None, aws_query_key_value=None): # noqa: E501
"""AwsDynamodbCollectorAttribute - a model defined in Swagger""" # noqa: E501
self._name = None
self._aws_range_value = None
self._aws_query_range_value = None
self._aws_dynamodb_attr_type = None
self._aws_query_range_op = None
self._aws_key_value = None
self._aws_attribute_name = None
self._aws_query_index_type = None
self._aws_query_index_name = None
self._aws_query_key_value = None
self.discriminator = None
self.name = name
if aws_range_value is not None:
self.aws_range_value = aws_range_value
if aws_query_range_value is not None:
self.aws_query_range_value = aws_query_range_value
if aws_dynamodb_attr_type is not None:
self.aws_dynamodb_attr_type = aws_dynamodb_attr_type
if aws_query_range_op is not None:
self.aws_query_range_op = aws_query_range_op
if aws_key_value is not None:
self.aws_key_value = aws_key_value
if aws_attribute_name is not None:
self.aws_attribute_name = aws_attribute_name
if aws_query_index_type is not None:
self.aws_query_index_type = aws_query_index_type
if aws_query_index_name is not None:
self.aws_query_index_name = aws_query_index_name
if aws_query_key_value is not None:
self.aws_query_key_value = aws_query_key_value
@property
def name(self):
"""Gets the name of this AwsDynamodbCollectorAttribute. # noqa: E501
:return: The name of this AwsDynamodbCollectorAttribute. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this AwsDynamodbCollectorAttribute.
:param name: The name of this AwsDynamodbCollectorAttribute. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def aws_range_value(self):
"""Gets the aws_range_value of this AwsDynamodbCollectorAttribute. # noqa: E501
:return: The aws_range_value of this AwsDynamodbCollectorAttribute. # noqa: E501
:rtype: str
"""
return self._aws_range_value
@aws_range_value.setter
def aws_range_value(self, aws_range_value):
"""Sets the aws_range_value of this AwsDynamodbCollectorAttribute.
:param aws_range_value: The aws_range_value of this AwsDynamodbCollectorAttribute. # noqa: E501
:type: str
"""
self._aws_range_value = aws_range_value
@property
def aws_query_range_value(self):
"""Gets the aws_query_range_value of this AwsDynamodbCollectorAttribute. # noqa: E501
:return: The aws_query_range_value of this AwsDynamodbCollectorAttribute. # noqa: E501
:rtype: str
"""
return self._aws_query_range_value
@aws_query_range_value.setter
def aws_query_range_value(self, aws_query_range_value):
"""Sets the aws_query_range_value of this AwsDynamodbCollectorAttribute.
:param aws_query_range_value: The aws_query_range_value of this AwsDynamodbCollectorAttribute. # noqa: E501
:type: str
"""
self._aws_query_range_value = aws_query_range_value
@property
def aws_dynamodb_attr_type(self):
"""Gets the aws_dynamodb_attr_type of this AwsDynamodbCollectorAttribute. # noqa: E501
:return: The aws_dynamodb_attr_type of this AwsDynamodbCollectorAttribute. # noqa: E501
:rtype: str
"""
return self._aws_dynamodb_attr_type
@aws_dynamodb_attr_type.setter
def aws_dynamodb_attr_type(self, aws_dynamodb_attr_type):
"""Sets the aws_dynamodb_attr_type of this AwsDynamodbCollectorAttribute.
:param aws_dynamodb_attr_type: The aws_dynamodb_attr_type of this AwsDynamodbCollectorAttribute. # noqa: E501
:type: str
"""
self._aws_dynamodb_attr_type = aws_dynamodb_attr_type
@property
def aws_query_range_op(self):
"""Gets the aws_query_range_op of this AwsDynamodbCollectorAttribute. # noqa: E501
:return: The aws_query_range_op of this AwsDynamodbCollectorAttribute. # noqa: E501
:rtype: str
"""
return self._aws_query_range_op
@aws_query_range_op.setter
def aws_query_range_op(self, aws_query_range_op):
"""Sets the aws_query_range_op of this AwsDynamodbCollectorAttribute.
:param aws_query_range_op: The aws_query_range_op of this AwsDynamodbCollectorAttribute. # noqa: E501
:type: str
"""
self._aws_query_range_op = aws_query_range_op
@property
def aws_key_value(self):
"""Gets the aws_key_value of this AwsDynamodbCollectorAttribute. # noqa: E501
:return: The aws_key_value of this AwsDynamodbCollectorAttribute. # noqa: E501
:rtype: str
"""
return self._aws_key_value
@aws_key_value.setter
def aws_key_value(self, aws_key_value):
"""Sets the aws_key_value of this AwsDynamodbCollectorAttribute.
:param aws_key_value: The aws_key_value of this AwsDynamodbCollectorAttribute. # noqa: E501
:type: str
"""
self._aws_key_value = aws_key_value
@property
def aws_attribute_name(self):
"""Gets the aws_attribute_name of this AwsDynamodbCollectorAttribute. # noqa: E501
:return: The aws_attribute_name of this AwsDynamodbCollectorAttribute. # noqa: E501
:rtype: str
"""
return self._aws_attribute_name
@aws_attribute_name.setter
def aws_attribute_name(self, aws_attribute_name):
"""Sets the aws_attribute_name of this AwsDynamodbCollectorAttribute.
:param aws_attribute_name: The aws_attribute_name of this AwsDynamodbCollectorAttribute. # noqa: E501
:type: str
"""
self._aws_attribute_name = aws_attribute_name
@property
def aws_query_index_type(self):
"""Gets the aws_query_index_type of this AwsDynamodbCollectorAttribute. # noqa: E501
:return: The aws_query_index_type of this AwsDynamodbCollectorAttribute. # noqa: E501
:rtype: str
"""
return self._aws_query_index_type
@aws_query_index_type.setter
def aws_query_index_type(self, aws_query_index_type):
"""Sets the aws_query_index_type of this AwsDynamodbCollectorAttribute.
:param aws_query_index_type: The aws_query_index_type of this AwsDynamodbCollectorAttribute. # noqa: E501
:type: str
"""
self._aws_query_index_type = aws_query_index_type
@property
def aws_query_index_name(self):
"""Gets the aws_query_index_name of this AwsDynamodbCollectorAttribute. # noqa: E501
:return: The aws_query_index_name of this AwsDynamodbCollectorAttribute. # noqa: E501
:rtype: str
"""
return self._aws_query_index_name
@aws_query_index_name.setter
def aws_query_index_name(self, aws_query_index_name):
"""Sets the aws_query_index_name of this AwsDynamodbCollectorAttribute.
:param aws_query_index_name: The aws_query_index_name of this AwsDynamodbCollectorAttribute. # noqa: E501
:type: str
"""
self._aws_query_index_name = aws_query_index_name
@property
def aws_query_key_value(self):
"""Gets the aws_query_key_value of this AwsDynamodbCollectorAttribute. # noqa: E501
:return: The aws_query_key_value of this AwsDynamodbCollectorAttribute. # noqa: E501
:rtype: str
"""
return self._aws_query_key_value
@aws_query_key_value.setter
def aws_query_key_value(self, aws_query_key_value):
"""Sets the aws_query_key_value of this AwsDynamodbCollectorAttribute.
:param aws_query_key_value: The aws_query_key_value of this AwsDynamodbCollectorAttribute. # noqa: E501
:type: str
"""
self._aws_query_key_value = aws_query_key_value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AwsDynamodbCollectorAttribute, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AwsDynamodbCollectorAttribute):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
3256880 | <reponame>tmanabe/PairwisePreferenceMultileave
import numpy as np
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import utils.rankings as rnk
from multileaving.ProbabilisticMultileave import ProbabilisticMultileave
class SampleOnlyScoredMultileave(ProbabilisticMultileave):
def __init__(self, *args, **kwargs):
ProbabilisticMultileave.__init__(self, *args, **kwargs)
self._name = 'Sample Only Scored Multileave'
self.needs_descending = True
def next_index_to_add(self, inter_result, inter_n, ranking, index):
while index < ranking.shape[0] and np.any(ranking[index] == inter_result[:inter_n]):
index += 1
return index
def make_multileaving(self, descending_rankings, inverted_rankings):
self._last_inverted_rankings = inverted_rankings
self._last_n_rankers = inverted_rankings.shape[0]
rankings = descending_rankings
n_rankings = rankings.shape[0]
k = min(self._k,rankings.shape[1])
teams = np.zeros(k,dtype=np.int32)
multileaved = np.zeros(k,dtype=np.int32)
multi_i = 0
while multi_i < k and np.all(rankings[1:,multi_i]==rankings[0,multi_i]):
multileaved[multi_i] = rankings[0][multi_i]
teams[multi_i] = -1
multi_i += 1
indices = np.zeros(n_rankings, dtype=np.int32) + multi_i
assign_i = n_rankings
while multi_i < k:
if assign_i == n_rankings:
assignment = np.arange(n_rankings)
np.random.shuffle(assignment)
assign_i = 0
rank_i = assignment[assign_i]
indices[rank_i] = self.next_index_to_add(multileaved, multi_i, rankings[rank_i,:], indices[rank_i])
multileaved[multi_i] = rankings[rank_i,indices[rank_i]]
teams[multi_i] = rank_i
indices[rank_i] += 1
multi_i += 1
assign_i += 1
return multileaved
def infer_preferences(self, result_list, clicked_docs):
if np.any(clicked_docs):
return self.preferences_of_list(result_list, self._last_inverted_rankings, clicked_docs.astype(bool))
else:
return np.zeros((self._last_n_rankers, self._last_n_rankers))
def preferences_of_list(self, result_list, doc_scores, clicks):
'''
ARGS: (all np.array of docids)
- result_list: the multileaved list
- doc_scores: matrix (rankers x documents) where [x,y] corresponds to the score of doc y in ranker x
ranking is done descendingly in score
- clicks: boolean array with clicked documents (lenght: multileaved.length)
- tau: tau used in softmax-functions for rankers
RETURNS:
- preference matrix: matrix (rankers x rankers) in this matrix [x,y] > 0 means x won over y and [x,y] < 0 means x lost from y
the value is analogous to the (average) degree of preference
'''
# n = result_list.shape[0]
# # normalization denominator for the complete ranking
# sigmoid_total = np.sum(float(1) / (np.arange(n) + 1) ** tau)
sample_ranking = rnk.rank_query(doc_scores[:,result_list],inverted=True)
sigmas = 1./(sample_ranking[:,clicks]+1.)**self._tau
scores = np.sum(sigmas,axis=1)
return np.sign(scores[:,None] - scores[None,:])
| StarcoderdataPython |
1717608 | from Python_lab02_The_Life.models.position_model import PositionModel
class CellModel:
"""Cell representation"""
def __init__(self, position: PositionModel):
"""C'stor"""
self.__alive = False
self.__position = position
def change(self) -> None:
"""Change living state of cell"""
self.__alive = not self.__alive
@property
def alive(self) -> bool:
"""Check, is cell is alive"""
return self.__alive
@alive.setter
def alive(self, value: bool) -> None:
"""Setting alive"""
self.__alive = value
@property
def position(self) -> PositionModel:
"""Return position"""
return self.__position
| StarcoderdataPython |
3396007 | import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
out_file = sys.argv[1]
x_label = sys.argv[2]
y_label = sys.argv[3]
names = []
X = []
Y = []
i = 0
for l in sys.stdin:
A = l.rstrip().split()
if len(A) == 3:
names.append(A[0])
X.append(float(A[1]))
Y.append(float(A[2]))
if len(A) == 2:
X.append(float(A[0]))
Y.append(float(A[1]))
elif len(A) == 1:
X.append(float(i))
Y.append(float(A[0]))
i+=1
width=3
height=3
fig = plt.figure(figsize=(width,height),dpi=300)
ax = fig.add_subplot(1,1,1)
ax.plot(X, Y, '.', ms=3, alpha=1)
if len(names) > 0:
for i in range(len(names)):
ax.text(X[i],Y[i],names[i], fontsize=4)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
plt.savefig(out_file,bbox_inches='tight')
| StarcoderdataPython |
4828158 | <reponame>jessamynsmith/django-enumfields
# -- encoding: UTF-8 --
import uuid
try:
from django.contrib.auth import get_user_model
except ImportError: # `get_user_model` only exists from Django 1.5 on.
from django.contrib.auth.models import User
get_user_model = lambda: User
from django.core.urlresolvers import reverse
from django.test import Client
import pytest
from enumfields import EnumIntegerField
from .models import MyModel
@pytest.fixture
def client():
return Client()
SUPERUSER_USERNAME = "superuser"
SUPERUSER_PASS = "<PASSWORD>"
@pytest.fixture
def superuser():
return get_user_model().objects.create_superuser(username=SUPERUSER_USERNAME, password=<PASSWORD>,
email="<EMAIL>")
@pytest.fixture
def superuser_client(client, superuser):
client.login(username=SUPERUSER_USERNAME, password=<PASSWORD>)
return client
@pytest.mark.django_db
@pytest.mark.urls('tests.urls')
def test_model_admin(superuser_client):
url = reverse("admin:tests_mymodel_add")
secret_uuid = str(uuid.uuid4())
post_data = {
'color': MyModel.Color.RED.value,
'taste': MyModel.Taste.UMAMI.value,
'taste_int': MyModel.Taste.SWEET.value,
'random_code': secret_uuid
}
response = superuser_client.post(url, follow=True, data=post_data)
response.render()
text = response.content
assert b"This field is required" not in text
assert b"Select a valid choice" not in text
try:
inst = MyModel.objects.get(random_code=secret_uuid)
except MyModel.DoesNotExist:
assert False, "Object wasn't created in the database"
assert inst.color == MyModel.Color.RED, "Redness not assured"
assert inst.taste == MyModel.Taste.UMAMI, "Umami not there"
assert inst.taste_int == MyModel.Taste.SWEET, "Not sweet enough"
def test_django_admin_lookup_value_for_integer_enum_field():
field = EnumIntegerField(MyModel.Taste)
assert field.get_prep_value(str(MyModel.Taste.BITTER)) == 3, "get_prep_value should be able to convert from strings"
| StarcoderdataPython |
14953 | <filename>src/quality_control/bin/createSpotDetectionQCHTML.py
import json
from bs4 import BeautifulSoup
import pandas as pd
import sys
# Argparsing
argument_index = 1
template = sys.argv[argument_index]
argument_index +=1
recall_json = sys.argv[argument_index]
argument_index +=1
recall_plot = sys.argv[argument_index]
argument_index +=1
precision_jsons_list = [sys.argv[i] for i in range(argument_index, len(sys.argv))]
precision_rows_list = []
# convert jsons back to dicts for html conversion
for json_path in precision_jsons_list:
with open(json_path, 'r') as json_file:
data = json.load(json_file)
precision_rows_list.append(data)
precision_df = pd.DataFrame(precision_rows_list)
precision_df = precision_df.sort_values(by='Round #')
precision_html_table = precision_df.to_html(index=False)
# Same for recall json
recall_rows_list = []
with open(recall_json, 'r') as json_file:
data=json.load(json_file)
recall_rows_list.append(data)
recall_df = pd.DataFrame(recall_rows_list)
recall_html_table = recall_df.to_html(index=False)
# Create html
with open(template, 'r') as template_file:
contents = template_file.read()
template_soup = BeautifulSoup(contents, features="html.parser")
p_list = template_soup.find_all('p')
p_index = 0
# Read recall table tag
recall_soup = BeautifulSoup(recall_html_table, features="html.parser")
table_tag = recall_soup.find('table')
p_list[p_index].insert_after(table_tag)
p_index+=1
image_tag = template_soup.new_tag('img')
image_tag['src']= f"./recall/{recall_plot}"
image_tag['width']= 700
image_tag['height']= 500
p_list[p_index].insert_after(image_tag)
p_index+=1
precision_soup = BeautifulSoup(precision_html_table, features="html.parser")
table_tag = precision_soup.find('table')
p_list[p_index].insert_after(table_tag)
p_index+=1
with open('spot_detection_qc_report.html', 'w') as result_file:
result_file.write(str( template_soup ))
| StarcoderdataPython |
3222047 | from gameComponents import gameVars
# Defining a win or lose function
def winorlose(status):
if status == "won":
pre_message = "You are the yuuuuuuugest winner ever! "
else:
pre_message = "You done trumped it, loser! "
print(pre_message + "Would you like to play again?")
choice = input("Y / N? ")
if choice == "Y" or choice == "y":
gameVars.player_lives = 5
gameVars.computer_lives = 5
gameVars.player = False
elif choice == "N" or choice == "n":
print("You chose to quit. Better luck next time!")
exit()
else:
print("Make a valid choice - Y or N")
choice = input("Y / N")
if choice == "Y" or choice == "y":
gameVars.player_lives = 5
gameVars.computer_lives = 5
gameVars.player = False
else:
print("You chose to quit. Better luck next time!")
exit()
| StarcoderdataPython |
1656100 | <gh_stars>0
__version__ = '0.74.0'
| StarcoderdataPython |
1623482 | <reponame>sourcery-ai-bot/Python-Curso-em-Video
# Escreva um programa para aprovar o empréstimo bancário para a compra de uma casa.
# Pergunte o valor da casa, o salário do comprador e em quantos anos ele vai pagar.
# A prestação mensal não pode exceder 30% do salário ou então o empréstimo será negado.
vlrcasa = float(input('Qual valor da casa a ser financiada? '))
salario = float(input('Qual seu salário? '))
anos = int(input('O financiamento será de quantos anos? '))
prestação = vlrcasa/(anos*12)
if prestação > salario*0.30:
print('No prazo de {} anos serão {} parcelas de R$ {}. Esse valor supera 30% do seu salário e não será possível conceder o financiamento.'.format
(anos, anos*12, prestação))
else:
print('Seu financiamento foi APROVADO! Sua prestação será de R$ {:.2f} em {} parcelas.'.format(prestação,anos*12))
| StarcoderdataPython |
3329720 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import json
import os
import re
import subprocess
import sys
from string import Template
DOCKER_IMAGE_NAME_RE = re.compile(r"^([a-zA-Z0-9_.]+/)?[a-zA-Z0-9_.]+$")
DOCKER_IMAGE_TAG_RE = re.compile(r"^[a-zA-Z0-9_.]+$")
ARCHIVE_NAME_VALID_CHAR_RE = re.compile(r"^[a-zA-Z0-9_]")
def docker_image_str(v):
if DOCKER_IMAGE_NAME_RE.match(v):
return v
else:
raise argparse.ArgumentTypeError("'{}' is not a valid Docker image name".format(v))
def docker_tag_str(v):
if DOCKER_IMAGE_TAG_RE.match(v):
return v
else:
raise argparse.ArgumentTypeError("'{}' is not a valid Docker tag name".format(v))
def run(args=[], wd=os.getcwd(), verbose=False):
args_str = " ".join(args)
if verbose:
print "--- Running '{}'...".format(args_str)
returncode = subprocess.call(args, cwd=wd)
sys.stdout.flush()
if returncode != 0:
print "--- Error while running '{}'! See above for details".format(args_str)
return False
else:
return True
else:
try:
output = subprocess.check_output(
args,
stderr=subprocess.STDOUT,
cwd=wd)
return True
except subprocess.CalledProcessError, e:
print "--- Error while running '{}'! See below for details".format(args_str)
print e.output
print "---"
return False
def templated_run(templated_args=[], cfg_dict={}, wd=os.getcwd(), verbose=False):
args = []
for templates_arg in templated_args:
arg = Template(templates_arg).substitute(**cfg_dict)
args.append(arg)
return run(args=args, wd=wd, verbose=verbose)
def load_configuration(cfg_file="package.json"):
# Loading the configuration file
cfg_dict = dict()
with open(cfg_file) as cfg_file_data:
cfg_dict = json.load(cfg_file_data)
# Setupping the 'file_dir' variable
file_dir = os.path.dirname(os.path.abspath(cfg_file))
docker_file_dir = file_dir
if sys.platform == "win32":
drive, path = os.path.splitdrive(file_dir)
drive_letter = drive.replace(":","").lower()
path_to = path.replace("\\","/")
docker_file_dir = "/" + drive_letter + path_to
cfg_dict["file_dir"] = file_dir
cfg_dict["docker_file_dir"] = docker_file_dir
cfg_dict["file"] = cfg_file
return cfg_dict
def create_args_parser(cfg_dict):
# Parse command line arguments
args_parser = argparse.ArgumentParser(
description="Build '{}' Docker image".format(cfg_dict["name"]),
epilog="Configuration (incl. default parameters value) are loaded from '{}'".format(cfg_dict["file"]))
args_parser.add_argument(
"--name",
dest="image_name",
type=docker_image_str,
help="Docker image name (default is '%(default)s')")
args_parser.add_argument(
"--tag",
dest="image_tag",
type=docker_tag_str,
help="Docker image tag (default is '%(default)s')")
args_parser.add_argument(
"--version",
help="Version (default is '%(default)s')")
args_parser.add_argument(
"--build",
help="Build identifier (default is '%(default)s')",
default="internal")
args_parser.add_argument(
"-v", "--verbose",
action="store_true",
help="Increase verbosity")
args_parser.add_argument(
"--skip_build",
dest="do_build",
action="store_false",
help="Skip the image build (make sure the image has been built before)")
if "image_test" in cfg_dict:
args_parser.add_argument(
"--test",
dest="test",
action="store_true",
help="Test the image")
args_parser.add_argument(
"--save",
dest="save",
action="store_true",
help="Save the image as a '.tar'")
args_parser.add_argument(
"--push",
dest="push",
action="store_true",
help="Push the image to Docker Hub")
return args_parser
def parse_args(args_parser, cfg_dict):
args_parser.set_defaults(**cfg_dict)
cfg_dict.update(vars(args_parser.parse_args()))
cfg_dict["full_image_name"] = cfg_dict["image_name"] + ":" + cfg_dict["image_tag"]
cfg_dict["image_out_file"] = os.path.join(
"out",
"{}_{}_{}.tar".format(
"".join(c if ARCHIVE_NAME_VALID_CHAR_RE.match(c) else "_" for c in cfg_dict["image_name"]),
"".join(c if ARCHIVE_NAME_VALID_CHAR_RE.match(c) else "_" for c in cfg_dict["image_tag"]),
"".join(c if ARCHIVE_NAME_VALID_CHAR_RE.match(c) else "" for c in cfg_dict["build"])))
return cfg_dict
def build(cfg_file="package.json"):
# Load the configuration
cfg_dict = load_configuration(cfg_file)
# Create the cli agument parser
args_parser = create_args_parser(cfg_dict)
## Parse the cli arguments
cfg_dict = parse_args(args_parser, cfg_dict)
print ("Building docker image for '{}', version '{}' ({})...".format(
cfg_dict["name"],
cfg_dict["version"],
cfg_dict["build"]))
if cfg_dict["do_build"]:
if not templated_run(
templated_args=["docker", "build", "-t", "${full_image_name}", "."],
cfg_dict=cfg_dict,
wd=cfg_dict["file_dir"],
verbose=cfg_dict["verbose"]):
exit(1)
print ("-- Docker image '{}' built successfully".format(cfg_dict["full_image_name"]))
if "image_test" in cfg_dict and cfg_dict["test"]:
success = True
for docker_test_raw_args in cfg_dict["image_test"]:
if not templated_run(
templated_args=docker_test_raw_args,
cfg_dict=cfg_dict,
wd=cfg_dict["file_dir"],
verbose=cfg_dict["verbose"]):
success = False
if not success:
exit(2)
print ("-- Docker image '{}' tested successfully".format(image_name))
if cfg_dict["save"]:
image_package_path = os.path.join(cfg_dict["file_dir"], cfg_dict["image_out_file"])
if not os.path.exists(os.path.dirname(image_package_path)):
os.makedirs(os.path.dirname(image_package_path))
if not templated_run(
templated_args=["docker", "save", "-o", "${docker_file_dir}/${image_out_file}", "${full_image_name}"],
cfg_dict=cfg_dict,
wd=cfg_dict["file_dir"],
verbose=cfg_dict["verbose"]):
exit(3)
print ("-- Docker image successfully saved to '{}'".format(image_package_path))
if cfg_dict["push"]:
if not templated_run(
templated_args=["docker", "push", "${full_image_name}"],
cfg_dict=cfg_dict,
wd=cfg_dict["file_dir"],
verbose=cfg_dict["verbose"]):
exit(4)
print ("-- Docker image successfully pushed to Docker Hub")
| StarcoderdataPython |
1661858 | <gh_stars>0
import random
from jmetal.core.problem import Problem
from jmetal.core.solution import FloatSolution, CompositeSolution, IntegerSolution
from AlgoritmoGenetico.Problema.Reducao_Regras import Reducao
from SistemaFuzzy.Model.Regra import Regra
from SistemaFuzzy.Raciocinio.Geral import Classificacao
from AlgoritmoGenetico.Problema.Semente import Semente
from copy import copy
class MixedIntegerFloatProblem(Problem):
def __init__(self, particoes, regras, instancias, classes): # CONFIGURACAO DO PROBLEMA
super(MixedIntegerFloatProblem, self).__init__()
self.particoes = particoes
self.regras = regras
self.instancias = instancias
self.classes = classes
self.isSeed = True
self.semente = Semente(self.regras, self.instancias, self.particoes)
self.antecedentes, self.consequentes = self.semente.preProcessamentoRegra()
self.lower_centroids, self.centroids, self.upper_centroids, variacao_max_conjuntos = self.semente.preProcessamentoCentroid()
tamanho_antecedentes = len(self.antecedentes)
tamanho_consequentes = len(self.consequentes)
self.int_lower_bound_attribute = [-1 for _ in range(tamanho_antecedentes)]
self.int_upper_bound_attribute = []
for _ in range(0, len(self.antecedentes), self.semente.qtdAntecedenteRegra):
self.int_upper_bound_attribute += variacao_max_conjuntos
self.int_lower_bound_label = [0 for _ in range(tamanho_consequentes)]
self.int_upper_bound_label = [len(classes) - 1 for _ in range(tamanho_consequentes)]
#VALORES JMETAL
self.number_of_objectives = 2
self.number_of_variables = 3
self.number_of_constraints = 0
self.obj_directions = [self.MINIMIZE]
self.obj_labels = ['Ones']
self.controle = 1
def create_solution(self) -> CompositeSolution: # INICIALIZAÇÂO DO PROBLEMA
attributes_solution = IntegerSolution(self.int_lower_bound_attribute, self.int_upper_bound_attribute,
self.number_of_objectives, self.number_of_constraints)
labels_solution = IntegerSolution(self.int_lower_bound_label, self.int_upper_bound_label,
self.number_of_objectives, self.number_of_constraints)
points_solution = FloatSolution(self.lower_centroids, self.upper_centroids,
self.number_of_objectives, self.number_of_constraints)
if self.isSeed:
attributes_solution.variables = self.antecedentes
labels_solution.variables = self.consequentes
points_solution.variables = self.centroids
self.isSeed = False
else:
attributes_solution.variables = \
[random.randint(self.int_lower_bound_attribute[i], self.int_upper_bound_attribute[i]) for i in
range(len(self.int_lower_bound_attribute))]
labels_solution.variables = \
[random.randint(self.int_lower_bound_label[i], self.int_upper_bound_label[i]) for i in
range(len(self.int_lower_bound_label))]
points_solution.variables = \
[random.uniform(self.lower_centroids[i], self.upper_centroids[i]) for i
in range(len(self.lower_centroids))]
return CompositeSolution([attributes_solution, labels_solution, points_solution])
def get_name(self) -> str:
return "Mixed Integer Float Problem"
def alterar_centroids(self, cromossomo_centroids):
index = 0
particoes = []
for particao in self.particoes:
particao = copy(particao)
tamanhoPontoCentral = len(particao.pontosCentrais)
p_centrais_atuais = cromossomo_centroids[index:index + tamanhoPontoCentral]
particao.setPontosCentrais(p_centrais_atuais)
index += tamanhoPontoCentral
particoes.append(particao)
return particoes
def cromossomo_para_regras(self, cromossomo_antecedentes, cromossomo_consequente, tam_antecedentes, particoes):
regras = []
for index_classe, salto in enumerate(range(0, len(cromossomo_antecedentes), tam_antecedentes)):
antecedentes = cromossomo_antecedentes[salto:salto+tam_antecedentes]
consequente = cromossomo_consequente[index_classe]
regra = Regra(antecedentes, consequente, 0)
regras.append(regra)
return Reducao(regras, self.instancias, particoes).reduzir()
def evaluate(self, solution: CompositeSolution) -> CompositeSolution:
antecedentes = solution.variables[0].variables
consequentes = solution.variables[1].variables
centroides = solution.variables[2].variables
particoes = self.alterar_centroids(centroides)
new_regras = self.cromossomo_para_regras(antecedentes, consequentes, self.semente.qtdAntecedenteRegra, particoes)
classificacao = Classificacao(particoes, new_regras, self.instancias, self.classes)
acuracia, interpretabilidadeCondicoes = classificacao.classificar()
solution.objectives[0] = -acuracia
solution.objectives[1] = -interpretabilidadeCondicoes
return solution | StarcoderdataPython |
100342 | <reponame>calebho/gameanalysis
"""Module for performing game analysis"""
__version__ = '8.0.3'
| StarcoderdataPython |
3249463 | import time
from debugwire import DWException
def hexdump(data):
return " ".join("{:02x}".format(b) for b in data)
class BaseInterface:
def __init__(self, enable_log=False):
self.enable_log = enable_log
def _log(self, msg):
if self.enable_log:
print(msg)
class BaseSerialInterface(BaseInterface):
def _detect_baudrate(self):
# TODO: Make an actual auto-detection algorithm
for guess in [62500, 12500, 7812, 5000, 6250]:
self.dev.baudrate = guess
if 0x55 in self.send_break():
self._log("Baudrate detected as {}".format(guess))
return self.dev.baudrate
raise DWException("Failed to autodetect baudrate.")
def close(self):
if self.dev:
self.dev.close()
self.dev = None
def write(self, data):
data = bytes(data)
self._log(">"+ hexdump(data))
start = time.time()
nwrite = 0
while nwrite < len(data):
nwrite += self.dev.write(data[nwrite:])
if time.time() - start >= self.timeout:
raise DWException("Write timeout. Check connections and make sure debugWIRE is enabled.")
self.read(nwrite, _log=False)
def read(self, nread, _log=True):
start = time.time()
buf = b""
while len(buf) < nread:
buf += self.dev.read(nread - len(buf))
if time.time() - start >= self.timeout:
raise DWException("Read timeout. Check connections and make sure debugWIRE is enabled.")
if _log:
self._log("<" + hexdump(buf))
return buf
class FTDIInterface(BaseSerialInterface):
def __init__(self, baudrate, timeout=2, enable_log=False):
super().__init__(enable_log)
self.port = "FTDI"
self.baudrate = baudrate
self.timeout = timeout
self.dev = None
def open(self):
from pylibftdi.serial_device import SerialDevice
self.dev = SerialDevice()
if self.baudrate is None:
self.baudrate = self._detect_baudrate()
else:
self.dev.baudrate = self.baudrate
self.dev.read(1024)
return self.dev.baudrate
def send_break(self):
self._log(">break")
self.dev.ftdi_fn.ftdi_set_line_property2(8, 0, 0, 1)
time.sleep(0.002)
self.dev.ftdi_fn.ftdi_usb_purge_rx_buffer()
self.dev.read(1024)
self.dev.ftdi_fn.ftdi_set_line_property2(8, 0, 0, 0)
time.sleep(0.002)
return self.read(1)
class SerialInterface(BaseSerialInterface):
def __init__(self, port, baudrate, timeout=2, enable_log=False):
super().__init__(enable_log)
self.port = port
self.baudrate = baudrate
self.timeout = timeout
self.dev = None
def open(self):
from serial import Serial
if self.port is None:
self._detect_port()
self.dev = Serial(
port=self.port,
baudrate=self.baudrate or 9600,
timeout=self.timeout,
write_timeout=self.timeout)
self.dev.reset_input_buffer()
if self.baudrate is None:
self.baudrate = self._detect_baudrate()
return self.baudrate
def _detect_port(self):
from serial.tools.list_ports import comports
for p in comports():
if p.vid:
self.port = p.device
break
else:
raise DWException("Failed to find a USB serial adapter.")
def send_break(self):
self._log(">break")
self.dev.break_condition = True
time.sleep(0.002)
self.dev.break_condition = False
time.sleep(0.002)
return self.read(2)
interfaces = {
"serial": SerialInterface,
"ftdi": FTDIInterface,
}
| StarcoderdataPython |
91956 | <gh_stars>0
"""
Produces color tables on stdout
"""
from mdv import tools
from mdv.plugs import plugins
from mdv.plugins import color_table_256, color_table_256_true
def colors_4():
return dict([(str(k - 10) + ' ', str(k)) for k in range(40, 48)])
def colors_8():
return color_table_256.colors
def colors_8_true():
return color_table_256_true.colors
def colors_24():
return plugins.colors_web.colors
W = [0]
tables = [
[
'24 Bit (Web) Colors',
'CSS e.g. {color: rgb(255, 0, 0)} or #F00 or hsl() or name...',
colors_24,
'48;2;',
],
[
'8 Bit Colors Absolute',
'Config: color_table_256_true, then CSS: e.g. {color: ansi(9)})',
colors_8_true,
'48;2;',
],
['8 Bit Colors, 0-15 Themable', 'CSS: {color: ansi(<0-255>)}', colors_8, '48;5;'],
['3/4 Bit Colors', 'CSS: {color: ansi("<30-37>")}', colors_4, ''],
]
def add_table(title, subt, table, r, prefix, max_, filt):
title = title.ljust(W[0])
r.extend(['\n\n\x1b[1;47;30m', title, '\x1b[0m\n'])
r.extend([subt, '\n'])
l = max([len(k) for k in table])
cols = int(W[0] / l) - 1
colors = []
add = r.append
def flush_colors():
add('\n')
r.extend(colors)
colors.clear()
add('\n')
return 0
i = -1
j = 0
msg = ''
for k, v in table.items():
if filt and not filt in k and not filt in str(v):
continue
i += 1
j += 1
if j > max_ and max_:
msg = '(%s more colors)' % (len(table) - j)
break
if i > cols:
i = flush_colors()
add(('%s ' % k)[:l])
colors.append('\x1b[%s%sm%s\x1b[0m ' % (prefix, v, ' ' * (l - 1)))
flush_colors()
if msg:
r.append(msg)
def run(filter='', max=0):
"""
Displays Various Colortables on stdout
- filter: When supplied we skip non matching entries. If a title matches we don't
display the others but all colors within the matching table.
- max: Maximum entries displayed per table
"""
r = []
W[0] = tools.C['width']
for title, subt, func, prefix in tables:
table = func()
if filter and filter in title:
r.clear()
filter == False
add_table(title, subt, table, r, prefix, max, filter)
if filter == False:
break
print(''.join(r))
| StarcoderdataPython |
1673075 | <reponame>BubuLK/sfepy
#!/usr/bin/env python
"""
Plot mesh connectivities, facet orientations, global and local DOF ids etc.
To switch off plotting some mesh entities, set the corresponding color to
`None`.
"""
from __future__ import absolute_import
import sys
sys.path.append('.')
from argparse import ArgumentParser
import matplotlib.pyplot as plt
from sfepy.base.base import output
from sfepy.base.conf import dict_from_string
from sfepy.discrete.fem import Mesh, FEDomain
import sfepy.postprocess.plot_cmesh as pc
helps = {
'vertex_opts' : 'plotting options for mesh vertices'
' [default: %(default)s]',
'edge_opts' : 'plotting options for mesh edges'
' [default: %(default)s]',
'face_opts' : 'plotting options for mesh faces'
' [default: %(default)s]',
'cell_opts' : 'plotting options for mesh cells'
' [default: %(default)s]',
'wireframe_opts' : 'plotting options for mesh wireframe'
' [default: %(default)s]',
'no_axes' :
'do not show the figure axes',
'no_show' :
'do not show the mesh plot figure',
}
def main():
default_vertex_opts = """color='k', label_global=12,
label_local=8"""
default_edge_opts = """color='b', label_global=12,
label_local=8"""
default_face_opts = """color='g', label_global=12,
label_local=8"""
default_cell_opts = """color='r', label_global=12"""
default_wireframe_opts = "color='k'"
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--vertex-opts', metavar='dict-like',
action='store', dest='vertex_opts',
default=default_vertex_opts,
help=helps['vertex_opts'])
parser.add_argument('--edge-opts', metavar='dict-like',
action='store', dest='edge_opts',
default=default_edge_opts,
help=helps['edge_opts'])
parser.add_argument('--face-opts', metavar='dict-like',
action='store', dest='face_opts',
default=default_face_opts,
help=helps['face_opts'])
parser.add_argument('--cell-opts', metavar='dict-like',
action='store', dest='cell_opts',
default=default_cell_opts,
help=helps['cell_opts'])
parser.add_argument('--wireframe-opts', metavar='dict-like',
action='store', dest='wireframe_opts',
default=default_wireframe_opts,
help=helps['wireframe_opts'])
parser.add_argument('--no-axes',
action='store_false', dest='axes',
help=helps['no_axes'])
parser.add_argument('-n', '--no-show',
action='store_false', dest='show',
help=helps['no_show'])
parser.add_argument('filename')
parser.add_argument('figname', nargs='?')
options = parser.parse_args()
entities_opts = [
dict_from_string(options.vertex_opts),
dict_from_string(options.edge_opts),
dict_from_string(options.face_opts),
dict_from_string(options.cell_opts),
]
wireframe_opts = dict_from_string(options.wireframe_opts)
filename = options.filename
mesh = Mesh.from_file(filename)
output('Mesh:')
output(' dimension: %d, vertices: %d, elements: %d'
% (mesh.dim, mesh.n_nod, mesh.n_el))
domain = FEDomain('domain', mesh)
output(domain.cmesh)
domain.cmesh.cprint(1)
dim = domain.cmesh.dim
if dim == 2: entities_opts.pop(2)
ax = pc.plot_cmesh(None, domain.cmesh,
wireframe_opts=wireframe_opts,
entities_opts=entities_opts)
ax.axis('image')
if not options.axes:
ax.axis('off')
plt.tight_layout()
if options.figname:
fig = ax.figure
fig.savefig(options.figname, bbox_inches='tight')
if options.show:
plt.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
87512 | #
# This script will reboot the AWS instance based on the instance-id
#
# Pre requisite: need to have aws cli installed first!
#
import os
import time
print(time.ctime())
#time.sleep(600)
aws_cmd1 = 'aws ec2 describe-instances --query "Reservations[].Instances[].InstanceId" --filter "Name=instance-state-name,Values=running" "Name=image-id,Values=ami-0cf5905679d7747fd" --no-verify --output text > instanceid.txt'
os.system(aws_cmd1)
print(time.ctime())
filename = "instanceid.txt"
try:
with open(filename, "r") as fp:
lines = fp.readlines()
print (lines)
#cnt = 1
for line in lines:
#print("Line {}: {}".format(cnt, line.strip()))
print("the instance id is: line :" ,line)
aws_cmd2 = 'aws ec2 reboot-instances --no-verify --instance-ids %s ' %line
#print (aws_cmd2)
os.system(aws_cmd2)
# do stuff here
# aws ec2 reboot-instances --instance-ids %instance_id% --no-verify
finally:
fp.close()
| StarcoderdataPython |
4803068 | <filename>pewpew/hdf5/reader.py
from ..base import StreamElement
import logging
import h5py
import os
class Reader(StreamElement):
log = logging.getLogger('pewpew.hdf5.reader')
def on_start(self):
self.file_list = self.config.get('file_list', [])
self.repeat = self.config.get('repeat', False)
self.file = None
self.file_iter = None
self.event_iter = None
self._read_list_ = None
self.metadata = None
self.data = {}
self.event_number = None
def _file_iter_(self):
for file_ in self.file_list:
yield h5py.File(file_, 'r')
def get_dataset(self, input_file):
def walk_tree(obj, path='/'):
ret = [path]
t = type(obj)
if t == h5py._hl.files.File or t == h5py._hl.group.Group:
for i in obj:
child = obj[i]
ret = ret + walk_tree(child, os.path.join(path, i))
return ret
def filter(item, input_file):
return type(input_file[item]) == h5py._hl.dataset.Dataset
ret = [i for i in walk_tree(input_file) if filter(i, input_file)]
return ret
def get_metadata(self):
ret = {}
for i in self._read_list_:
ret[i] = {}
for j in self.file[i].attrs:
ret[i][j] = self.file[i].attrs[j]
return ret
def process(self, data=None):
while self.file is None:
if self.file_iter is None:
self.file_iter = self._file_iter_()
try:
self.file = next(self.file_iter)
self.log.debug("opening {}".format(self.file))
self._read_list_ = self.get_dataset(self.file)
event_range = self.file[self._read_list_[0]].shape[0]
self.event_iter = iter(range(event_range))
self.data['meta'] = self.get_metadata()
self.data['data'] = {}
except StopIteration:
self.log.info("hit end of file iter")
if self.repeat:
self.file = None
self.file_iter = None
else:
return None
except Exception as e:
self.log.warning(e)
self.file = None
continue
try:
self.event_number = next(self.event_iter)
except StopIteration:
msg = "hit end of file at {} event(s)"
self.log.info(msg.format(self.event_number + 1))
self.file = None
return self.process(data)
for dataset in self._read_list_:
try:
tmp = self.file[dataset][self.event_number]
self.data['data'][dataset] = tmp
except Exception as e:
self.log.warning(e)
self.file = None
return self.process(data)
return self.data
| StarcoderdataPython |
1732947 | <filename>mython/trampoline.py
#! /usr/bin/env python
# ______________________________________________________________________
"""
Defines a set of utilities for LL(1) parsing using a trampoline
instead of a call stack.
The trampoline uses generators and a heap-based stack instead of the
Python call stack.
"""
# ______________________________________________________________________
# Module imports
import token
# ______________________________________________________________________
# Module data
__DEBUG__ = False
# ______________________________________________________________________
# Function definitions
class TokenStream (object):
def __init__ (self, tokenizer):
self.tokenizer = tokenizer
self.next_token = None
def tokenize (self):
return next(self.tokenizer)
def get_token (self):
ret_val = None
if self.next_token is None:
ret_val = self.tokenize()
else:
ret_val = self.next_token
self.next_token = None
return ret_val
def get_lookahead (self):
ret_val = None
if self.next_token is None:
ret_val = self.tokenize()
self.next_token = ret_val
else:
ret_val = self.next_token
return ret_val
def test_lookahead (self, *tokens):
ret_val = False
lookahead = self.get_lookahead()
if lookahead[0] in tokens:
ret_val = True
elif lookahead[1] in tokens:
ret_val = True
return ret_val
def expect (self, *tokens):
crnt_token = self.get_token()
if (crnt_token[0] not in tokens) and (crnt_token[1] not in tokens):
raise SyntaxError("Got %s, expected %s." % (str(crnt_token),
str(token)))
return crnt_token
# ______________________________________________________________________
class ExclusiveTokenStream (TokenStream):
def __init__ (self, tokenizer, exclusion_set):
TokenStream.__init__(self, tokenizer)
self.excludes = exclusion_set
def tokenize (self):
ret_val = next(self.tokenizer)
while ret_val[0] in self.excludes:
ret_val = next(self.tokenizer)
return ret_val
# ______________________________________________________________________
class TreeBuilder (object):
def __init__ (self):
self.tree = ('start', [])
self.stack = [self.tree]
def push (self, elem):
node = (elem, [])
self.stack[-1][1].append(node)
self.stack.append(node)
return node
def pop (self):
return self.stack.pop()
def pushpop (self, elem):
node = (elem, [])
self.stack[-1][1].append(node)
return node
# ______________________________________________________________________
def trampoline_parse (handlers, instream, outtree = None):
"""Parse a lexical stream using a set of handler generators."""
if outtree is None:
outtree = TreeBuilder()
generator_stack = [handlers['start'](instream, outtree)]
while generator_stack:
try:
next_gen = next(generator_stack[-1])
generator_stack.append(handlers[next_gen](instream, outtree))
except StopIteration:
del generator_stack[-1]
return outtree
# ______________________________________________________________________
def pgen_grammar_to_handlers (grammar, handlers):
"""Extend a trampoline map with handlers for a pgen grammar tuple."""
dfas, labels, start, accel = grammar
label_map = {}
i = 0
for label in labels:
label_map[label] = i
i += 1
# Note that the old version of classify() (see
# basil.lang.python.DFAParser) was very inefficient, doing a
# linear search through the grammar labels. Using a dictionary
# should be faster.
def classify (intoken):
tok_type, tok_name, tok_start, tok_stop, tok_line = intoken
if (tok_type == token.NAME) and ((tok_type, tok_name) in label_map):
return label_map[(tok_type, tok_name)]
return label_map.get((tok_type, None), -1)
# TODO: Check for and add accelerators...
assert accel
for dfa in dfas:
handler = dfa_to_handler(classify, dfa, labels)
handlers[dfa[0]] = handler
handlers[dfa[1]] = handler
return handlers
# ______________________________________________________________________
def dfa_to_handler (classify, dfa, symbol_tab = None):
"""Convert a DFA to a generator compatible with trampoline_parse.
Accepts a classify function used to map from a token to a symbol
in the grammar (these are the indicies used for state
transitions/accelerators), a deterministic state automaton tuple,
and an optional symbol table. Returns a generator function that
conforms to the trampoline parser protocol.
"""
dfa_num, dfa_name, dfa_initial, states = (dfa[0], dfa[1], dfa[2], dfa[3])
def _parse_dfa (instream, outtree):
if __DEBUG__:
print("Parse:%s" % dfa_name)
outtree.push(dfa_name)
state = states[dfa_initial]
while 1:
arcs, (accel_upper, accel_lower, accel_table), accept = state
crnt_token = instream.get_lookahead()
ilabel = classify(crnt_token)
if __DEBUG__:
symbol_str = ""
if symbol_tab:
symbol_str = " %r" % (symbol_tab[ilabel],)
print("%r %r%s %r" % (crnt_token, ilabel, symbol_str,
ilabel-accel_lower))
if (accel_lower <= ilabel) and (ilabel < accel_upper):
accel_result = accel_table[ilabel - accel_lower]
if -1 != accel_result:
if (accel_result & (1<<7)):
# PUSH
nt = (accel_result >> 8) + token.NT_OFFSET
if __DEBUG__:
print("PUSH %d" % nt)
yield nt
state = states[accel_result & ((1<<7) - 1)]
else:
# SHIFT
if __DEBUG__:
print("SHIFT %r" % (crnt_token,))
outtree.pushpop(instream.get_token())
state = states[accel_result]
if state[2] and len(state[0]) == 1:
break
continue
if accept:
break
else:
# TODO: Make the error string more instructive, like
# the older DFAParser stuff did.
candidates = [symbol_tab[accel_lower + accel_index]
for accel_result, accel_index in
zip(accel_table, range(len(accel_table)))
if accel_result != -1]
if __DEBUG__:
label_index = accel_lower
for accel_result in accel_table:
if accel_result != -1:
symbol_str = ""
if symbol_tab:
symbol_str = " %r" % (symbol_tab[label_index],)
print("%r%s => %d" % (label_index, symbol_str,
accel_result))
label_index += 1
print("len(%r) = %d" % (accel_table, len(accel_table)))
line_no, column_no = crnt_token[2]
token_str = crnt_token[1]
fmt_tup = (line_no, column_no, token_str)
raise SyntaxError("Line %d, column %d, unexpected '%s'." %
fmt_tup)
if __DEBUG__:
print("POP %s" % dfa_name)
outtree.pop()
return
return _parse_dfa
# ______________________________________________________________________
# End of trampoline.py
| StarcoderdataPython |
1684238 | # -*- coding: utf-8 -*-
"""Generate the coverage.rst and coverage.rst files from test
results."""
from __future__ import print_function
import os
import sys
from docs_common import check_cclib
# Import cclib and check we are using the version from a subdirectory.
import cclib
check_cclib(cclib)
def generate_coverage():
"""Generate a string containing a reStructuredTest table
representation of which parsers support which attributes, based on
test results.
"""
lines = []
# Change directory to where tests are and add it to the path. Because there are
# separate directories for different branches/versions, and we use a symlink to
# point to the one we want, we need to test the real path this link resolves to.
if "cclib_prod" in os.path.realpath('cclib'):
testpath = "_build/cclib_prod"
else:
assert "cclib_dev" in os.path.realpath('cclib')
testpath = "_build/cclib_dev"
os.chdir(testpath)
thispath = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(1, thispath)
from test.test_data import (all_modules, all_parsers, parser_names, DataSuite)
import inspect
ds_args = inspect.getargspec(DataSuite.__init__).args
logpath = thispath + "/coverage.tests.log"
try:
with open(logpath, "w") as flog:
stdout_backup = sys.stdout
sys.stdout = flog
alltests = {}
for p in parser_names:
assert 'parsers' in ds_args
suite = DataSuite(parsers={p: all_parsers[p]}, modules=all_modules, stream=flog)
suite.testall()
alltests[p] = [{'data': t.data} for t in suite.alltests]
sys.stdout = stdout_backup
except Exception as e:
print("Unit tests did not run correctly. Check log file for errors:")
with open(logpath) as fh:
print(fh.read())
print(e)
sys.exit(1)
ncols = len(parser_names) + 1
colwidth = 20
colfmt = "%%-%is" % colwidth
dashes = ("=" * (colwidth - 1) + " ") * ncols
lines.append(dashes)
lines.append(colfmt * ncols % tuple(["attributes"] + parser_names))
lines.append(dashes)
# Eventually we want to move this to cclib, too.
not_applicable = {
'ADF' : ['aonames', 'ccenergies', 'mpenergies'],
'DALTON' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'GAMESS' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'GAMESSUK' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'Gaussian' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'Jaguar' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'Molpro' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'NWChem' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'ORCA' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'Psi' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'QChem' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
}
not_possible = {
'Psi' : ['aooverlaps', 'vibirs'],
'QChem' : ['aooverlaps'],
}
# For each attribute, get a list of Boolean values for each parser that flags
# if it has been parsed by at least one unit test. Substitute an OK sign or
# T/D appropriately, with the exception of attributes that have been explicitely
# designated as N/A.
attributes = sorted(cclib.parser.data.ccData._attrlist)
for attr in attributes:
parsed = [any([attr in t['data'].__dict__ for t in alltests[p]]) for p in parser_names]
for ip, p in enumerate(parsed):
if p:
parsed[ip] = "√"
else:
if attr in not_applicable.get(parser_names[ip], []):
parsed[ip] = "N/A"
elif attr in not_possible.get(parser_names[ip], []):
parsed[ip] = "N/P"
else:
parsed[ip] = "T/D"
lines.append(colfmt*ncols % tuple(["`%s`_" % attr] + parsed))
lines.append(dashes)
lines.append("")
for attr in attributes:
lines.append(".. _`%s`: data_notes.html#%s" % (attr, attr))
return "\n".join(lines)
if __name__ == "__main__":
print(generate_coverage())
| StarcoderdataPython |
3228769 | <reponame>jhm-/nhlscrappo
from nhlscrappo import __version__
from distutils.core import setup
from setuptools import find_packages
def _read(file):
return open(file, 'rb').read()
setup(name="nhlscrappo",
version=__version__,
description="Web scraping API for NHL.com Real Time Shot System (RTSS) reports",
long_description=_read('README.md').decode('utf-8'),
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
url="https://github.com/jhm-/nhlscrappo",
zip_safe=False,
include_package_data=True,
packages=find_packages(),
install_requires=['lxml', 'beautifulsoup4']
)
| StarcoderdataPython |
164891 | <filename>home/migrations/0006_remove_banner_site.py
# Generated by Django 3.1.7 on 2022-02-28 18:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0005_banner'),
]
operations = [
migrations.RemoveField(
model_name='banner',
name='site',
),
]
| StarcoderdataPython |
1640273 | import argparse
import json
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from datasets.phototourism import build_tourism
from models.nerf import build_nerf
from models.rendering import get_rays_tourism, sample_points, volume_render
from utils.tour_video import create_interpolation_video
def inner_loop(model, optim, img, rays_o, rays_d, bound, num_samples, raybatch_size, inner_steps):
"""
train the inner model for a specified number of iterations
"""
pixels = img.reshape(-1, 3)
rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)
num_rays = rays_d.shape[0]
for step in range(inner_steps):
indices = torch.randint(num_rays, size=[raybatch_size])
raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]
pixelbatch = pixels[indices]
t_vals, xyz = sample_points(raybatch_o, raybatch_d, bound[0], bound[1],
num_samples, perturb=True)
optim.zero_grad()
rgbs, sigmas = model(xyz)
colors = volume_render(rgbs, sigmas, t_vals)
loss = F.mse_loss(colors, pixelbatch)
loss.backward()
optim.step()
def report_result(model, img, rays_o, rays_d, bound, num_samples, raybatch_size):
"""
report synthesis result on heldout view
"""
pixels = img.reshape(-1, 3)
rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)
t_vals, xyz = sample_points(rays_o, rays_d, bound[0], bound[1],
num_samples, perturb=False)
synth = []
num_rays = rays_d.shape[0]
with torch.no_grad():
for i in range(0, num_rays, raybatch_size):
rgbs_batch, sigmas_batch = model(xyz[i:i+raybatch_size])
color_batch = volume_render(rgbs_batch, sigmas_batch, t_vals[i:i+raybatch_size])
synth.append(color_batch)
synth = torch.cat(synth, dim=0)
error = F.mse_loss(synth, pixels)
psnr = -10*torch.log10(error)
return psnr
def test():
parser = argparse.ArgumentParser(description='phototourism with meta-learning')
parser.add_argument('--config', type=str, required=True,
help='config file for the scene')
parser.add_argument('--weight-path', type=str, required=True,
help='path to the meta-trained weight file')
args = parser.parse_args()
with open(args.config) as config:
info = json.load(config)
for key, value in info.items():
args.__dict__[key] = value
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
test_set = build_tourism(image_set="test", args=args)
test_loader = DataLoader(test_set, batch_size=1, shuffle=False)
model = build_nerf(args)
model.to(device)
checkpoint = torch.load(args.weight_path, map_location=device)
meta_state_dict = checkpoint['meta_model_state_dict']
test_psnrs = []
for idx, (img, pose, kinv, bound) in enumerate(test_loader):
img, pose, kinv, bound = img.to(device), pose.to(device), kinv.to(device), bound.to(device)
img, pose, kinv, bound = img.squeeze(), pose.squeeze(), kinv.squeeze(), bound.squeeze()
rays_o, rays_d = get_rays_tourism(img.shape[0], img.shape[1], kinv, pose)
# optimize on the left half, test on the right half
left_width = img.shape[1]//2
right_width = img.shape[1] - left_width
tto_img, test_img = torch.split(img, [left_width, right_width], dim=1)
tto_rays_o, test_rays_o = torch.split(rays_o, [left_width, right_width], dim=1)
tto_rays_d, test_rays_d = torch.split(rays_d, [left_width, right_width], dim=1)
model.load_state_dict(meta_state_dict)
optim = torch.optim.SGD(model.parameters(), args.tto_lr)
inner_loop(model, optim, tto_img, tto_rays_o, tto_rays_d,
bound, args.num_samples, args.tto_batchsize, args.tto_steps)
psnr = report_result(model, test_img, test_rays_o, test_rays_d, bound,
args.num_samples, args.test_batchsize)
print(f"test view {idx+1}, psnr:{psnr:.3f}")
test_psnrs.append(psnr)
test_psnrs = torch.stack(test_psnrs)
print("----------------------------------")
print(f"test dataset mean psnr: {test_psnrs.mean():.3f}")
print("\ncreating interpolation video ...\n")
create_interpolation_video(args, model, meta_state_dict, test_set, device)
print("\ninterpolation video created!")
if __name__ == '__main__':
test() | StarcoderdataPython |
157423 | <reponame>niooss-ledger/tpm2-pytss
"""
SPDX-License-Identifier: BSD-2
"""
from distutils import spawn
import logging
import os
import random
import socket
import subprocess
import sys
import tempfile
import time
import unittest
from time import sleep
from ctypes import cdll
from tpm2_pytss import *
class BaseTpmSimulator(object):
def __init__(self):
self.tpm = None
@staticmethod
def ready(port):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(("localhost", port)) == 0
def start(self):
logger = logging.getLogger("DEBUG")
logger.debug('Setting up simulator: "{}"'.format(self.tpm))
tpm = None
for _ in range(0, 10):
random_port = random.randrange(2321, 65534)
tpm = self._start(port=random_port)
if tpm:
# Wait to ensure that the simulator is ready for clients.
time.sleep(1)
if not self.ready(random_port):
continue
self.tpm = tpm
break
if not tpm:
raise SystemError("Could not start simulator")
def close(self):
self.tpm.terminate()
def __str__(self):
return self.exe
class SwtpmSimulator(BaseTpmSimulator):
exe = "swtpm"
libname = "libtss2-tcti-swtpm.so"
def __init__(self):
self._port = None
super().__init__()
self.working_dir = tempfile.TemporaryDirectory()
def _start(self, port):
cmd = [
"swtpm",
"socket",
"--tpm2",
"--server",
"port={}".format(port),
"--ctrl",
"type=tcp,port={}".format(port + 1),
"--flags",
"not-need-init",
"--tpmstate",
"dir={}".format(self.working_dir.name),
]
tpm = subprocess.Popen(cmd)
sleep(2)
if not tpm.poll():
self._port = port
return tpm
return None
@property
def tcti_name_conf(self):
if self._port is None:
return None
return f"swtpm:port={self._port}"
def get_tcti(self):
if self._port is None:
return None
return TCTILdr("swtpm", f"port={self._port}")
class IBMSimulator(BaseTpmSimulator):
exe = "tpm_server"
libname = "libtss2-tcti-mssim.so"
def __init__(self):
self._port = None
super().__init__()
self.working_dir = tempfile.TemporaryDirectory()
def _start(self, port):
cwd = os.getcwd()
os.chdir(self.working_dir.name)
try:
cmd = ["tpm_server", "-rm", "-port", "{}".format(port)]
tpm = subprocess.Popen(cmd)
sleep(2)
if not tpm.poll():
self._port = port
return tpm
return None
finally:
os.chdir(cwd)
@property
def tcti_name_conf(self):
if self._port is None:
return None
return f"mssim:port={self._port}"
def get_tcti(self):
if self._port is None:
return None
return TCTILdr("mssim", f"port={self._port}")
class TpmSimulator(object):
SIMULATORS = [
SwtpmSimulator,
IBMSimulator,
]
@staticmethod
def getSimulator():
for sim in TpmSimulator.SIMULATORS:
exe = spawn.find_executable(sim.exe)
if not exe:
print(f'Could not find executable: "{sim.exe}"', file=sys.stderr)
continue
try:
cdll.LoadLibrary(sim.libname)
except OSError as e:
print(
f'Could not load libraries: "{sim.exe}", error: {e}',
file=sys.stderr,
)
continue
return sim()
raise RuntimeError(
"Expected to find a TPM 2.0 Simulator, tried {}, got None".format(
TpmSimulator.SIMULATORS
)
)
class TSS2_BaseTest(unittest.TestCase):
def setUp(self):
self.tpm = TpmSimulator.getSimulator()
self.tpm.start()
def tearDown(self):
self.tpm.close()
class TSS2_EsapiTest(TSS2_BaseTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tcti = None
self.ectx = None
def setUp(self):
super().setUp()
try:
# use str initializer here to test string inputs to ESAPI constructor
with ESAPI(self.tpm.tcti_name_conf) as ectx:
ectx.startup(TPM2_SU.CLEAR)
except Exception as e:
self.tpm.close()
raise e
self.tcti = self.tpm.get_tcti()
self.ectx = ESAPI(self.tcti)
def tearDown(self):
self.ectx.close()
self.tcti.close()
super().tearDown()
| StarcoderdataPython |
64283 | <filename>tests/units/test_prefixes.py
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license, see LICENSE.
"""
Tests for the hepunits.units.prefixes module.
"""
from pytest import approx
from math import log
from hepunits.units import mega, micro, yotta, yocto, kibi, tebi
def test_prefixes_e6():
assert 4 * mega == 1. / 0.25 / micro
def test_prefixes_e24():
assert yotta * yocto == approx(1.)
def test_prefixes_binary():
assert log(kibi, 2) == 10
assert log(tebi, 2) == 40
| StarcoderdataPython |
1785016 | <gh_stars>1-10
# Generated by Django 3.0.4 on 2020-03-30 16:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('client', '0010_auto_20200402_1202'),
]
operations = [
migrations.RemoveField(
model_name='volunteer',
name='full_name',
),
]
| StarcoderdataPython |
4821926 | <reponame>moshi4/GBKviz<filename>tests/test_draw_genbank_fig.py
from pathlib import Path
from typing import List
from gbkviz.draw_genbank_fig import DrawGenbankFig
from gbkviz.genbank import Genbank
def test_draw_genbank_fig(genbank_files: List[Path], tmp_path: Path):
"""test draw_genbank_fig"""
gbk_list = [Genbank(gf, gf.name) for gf in genbank_files]
gdf = DrawGenbankFig(gbk_list)
fig_png_outfile = tmp_path / "fig.png"
fig_svg_outfile = tmp_path / "fig.svg"
gdf.write_figure(fig_png_outfile)
gdf.write_figure(fig_svg_outfile)
assert fig_png_outfile.exists() and fig_svg_outfile.exists()
| StarcoderdataPython |
74995 | <gh_stars>1-10
"""
Pokazuje losowe zdjęcie z losowej galerii z losowego zamku (Może trochę zająć - 3 synchroniczne zapytania).
"""
from asyncio import run
from io import BytesIO
from random import choice
from PIL import Image # pip install pillow
from pymondis import Client
async def main():
async with Client() as client:
for castle_galleries in await client.get_castles():
if not castle_galleries.active:
continue
async for gallery in castle_galleries:
if gallery.empty:
continue
photos = await gallery.get_photos()
if not photos:
continue
photo_bytes = await choice(photos).large.get()
break
else:
continue
break
else:
print("Nie znaleziono żadnych zdjęć.")
Image.open(BytesIO(photo_bytes)).show()
if __name__ == "__main__":
run(main())
| StarcoderdataPython |
147652 | <reponame>beckerr-rzht/python-eduvpn-client
from typing import Optional, Iterable, Callable, List, Dict
import enum
from functools import lru_cache
from gi.repository import Gtk, GObject
from eduvpn.server import (
AnyServer as Server, InstituteAccessServer,
OrganisationServer, SecureInternetLocation, CustomServer)
from .utils import show_ui_component
class ServerGroup(enum.Enum):
INSTITUTE_ACCESS = enum.auto()
SECURE_INTERNET = enum.auto()
OTHER = enum.auto()
group_tree_component = {
ServerGroup.INSTITUTE_ACCESS: 'instituteTreeView',
ServerGroup.SECURE_INTERNET: 'secureInternetTreeView',
ServerGroup.OTHER: 'otherServersTreeView',
}
group_header_component = {
ServerGroup.INSTITUTE_ACCESS: 'instituteAccessHeader',
ServerGroup.SECURE_INTERNET: 'secureInternetHeader',
ServerGroup.OTHER: 'otherServersHeader',
}
gtk_results_components = [
# These are the components that need
# to be shown on all server list pages.
'findYourInstitutePage',
'instituteTreeView',
'secureInternetTreeView',
'otherServersTreeView',
]
gtk_search_components = [
# These are the components that need
# to be shown on all search pages.
'findYourInstituteSpacer',
'findYourInstituteImage',
'findYourInstituteLabel',
'findYourInstituteSearch',
]
# typing aliases
Model = Gtk.ListStore
@lru_cache()
def get_group_model(group: ServerGroup) -> Model:
# Model: (name: str, server: ServerType)
return Gtk.ListStore( # type: ignore
GObject.TYPE_STRING,
GObject.TYPE_PYOBJECT)
def server_to_model_data(server: Server) -> list:
return [str(server), server]
def show_result_components(builder, show: bool):
"""
Set the visibility of essential server list related components.
"""
for name in gtk_results_components:
show_ui_component(builder, name, show)
def show_search_components(builder, show: bool):
"""
Set the visibility of essential search related components.
"""
for name in gtk_search_components:
show_ui_component(builder, name, show)
def show_search_resuls(builder, show: bool):
"""
Set the visibility of the tree of the search result component in the UI.
"""
show_ui_component(builder, 'findYourInstituteScrolledWindow', show)
def group_servers(
servers: Iterable[Server]) -> Dict[ServerGroup, List[Server]]:
"""
Separate the servers into three groups.
"""
groups: Dict[ServerGroup, List[Server]] = {
ServerGroup.INSTITUTE_ACCESS: [],
ServerGroup.SECURE_INTERNET: [],
ServerGroup.OTHER: [],
}
for server in servers:
if isinstance(server, InstituteAccessServer):
groups[ServerGroup.INSTITUTE_ACCESS].append(server)
elif isinstance(server, (OrganisationServer, SecureInternetLocation)):
groups[ServerGroup.SECURE_INTERNET].append(server)
elif isinstance(server, CustomServer):
groups[ServerGroup.OTHER].append(server)
else:
raise TypeError(server)
return groups
def show_group_tree(builder, group: ServerGroup, show: bool):
"""
Set the visibility of the tree of result for a server type.
"""
tree_component_name = group_tree_component[group]
show_ui_component(builder, tree_component_name, show)
header_component_name = group_header_component[group]
show_ui_component(builder, header_component_name, show)
def init_server_search(builder):
"Initialize the search page components."
text_cell = Gtk.CellRendererText()
text_cell.set_property("size-points", 14)
for group in group_tree_component:
component_name = group_tree_component[group]
tree_view = builder.get_object(component_name)
if len(tree_view.get_columns()) == 0:
# Only add this column once.
column = Gtk.TreeViewColumn(None, text_cell, text=0)
tree_view.append_column(column)
model = get_group_model(group)
tree_view.set_model(model)
def exit_server_search(builder):
"Hide the search page components."
for group in group_tree_component:
show_group_tree(builder, group, False)
show_search_resuls(builder, False)
def connect_selection_handlers(builder, select_callback: Callable):
"Connect the selection callback handlers for each server type."
for group in group_tree_component:
component_name = group_tree_component[group]
tree_view = builder.get_object(component_name)
selection = tree_view.get_selection()
selection.connect("changed", select_callback)
def disconnect_selection_handlers(builder, select_callback: Callable):
"Disconnect the selection callback handlers for each server type."
for group in group_tree_component:
component_name = group_tree_component[group]
tree_view = builder.get_object(component_name)
selection = tree_view.get_selection()
selection.disconnect_by_func(select_callback)
def update_search_results_for_type(builder,
group: ServerGroup,
servers: Iterable[Server]):
"""
Update the UI with the search results
for a single type of server.
"""
model = get_group_model(group) # type: ignore
# Remove the old search results.
model.clear() # type: ignore
# Add the new search results.
for server in servers:
model_data = server_to_model_data(server)
model.append(model_data) # type: ignore
# Update the UI.
model_has_results = len(model) > 0 # type: ignore
show_group_tree(builder, group, show=model_has_results)
def update_results(builder, servers: Optional[Iterable[Server]]):
"""
Update the UI with the search results.
"""
if servers is None:
show_search_resuls(builder, False)
return
server_map = group_servers(servers)
for group in group_tree_component:
update_search_results_for_type(
builder,
group,
server_map.get(group, []),
)
show_search_resuls(builder, True)
| StarcoderdataPython |
116451 | #! /usr/bin/env python
# IDLE Behavior
# WARNING! if person tracking is acting crazy, check that
# the person tracker node is using the correct camera!!
import rospy
import actionlib
import behavior_common.msg
import time
import rospkg
import rosparam
from std_msgs.msg import Float64
from std_msgs.msg import String
from sensor_msgs.msg import JointState
import random
from math import radians, degrees
import tf
import os, thread
from playsound import playsound
# for talking
import actionlib
import actionlib.action_client
import audio_and_speech_common.msg
from random import randint
# SHELDON Only
# from dynamixel_controllers.srv import TorqueEnable, SetServoTorqueLimit, SetSpeed
from sheldon_servos.servo_joint_list import head_joints
from sheldon_servos.head_servo_publishers import *
from sheldon_servos.standard_servo_positions import *
from sheldon_servos.set_servo_speed import *
from sheldon_servos.set_servo_torque import *
# TB2S ONLY
# from tb2s_pantilt.set_servo_speed import *
# from sheldon_servos.set_servo_torque import *
from body_tracker_msgs.msg import BodyTracker, BodyTrackerArray
from geometry_msgs.msg import PointStamped, Point, PoseStamped, Pose, Pose2D
#import geometry_msgs.msg
import tf
# TB2S ONLY
#pub_head_pan = rospy.Publisher('/head_pan_joint/command', Float64, queue_size=1)
#pub_head_tilt = rospy.Publisher('/head_tilt_joint/command', Float64, queue_size=1)
class BehaviorAction(object):
def __init__(self, name):
self._action_name = name
self._as = actionlib.SimpleActionServer(self._action_name,
behavior_common.msg.behaviorAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
rospy.loginfo('%s: Initializing Python behavior service' % (self._action_name))
# constants
self.MAX_PAN = 1.5708 # 90 degrees
self.MAX_TILT = 0.60 # Limit vertical to assure good tracking
self.DEADBAND_ANGLE = 0.0872665 # 5 deg deadband in middle to prevent osc
self.DEFAULT_TILT_ANGLE = 0.00 # TB2S: tilt head up slightly to find people more easily
self.NAME_TIMEOUT_SECS = 8.0
self.HELLO_TIMEOUT_SECS = (10.0 * 60.0)
#====================================================================
# Behavior Settings
# Load this behavior's parameters to the ROS parameter server
rospack = rospkg.RosPack()
pkg_path = rospack.get_path(self._action_name.strip("/")) # remove leading slash
param_file_path = pkg_path + '/param/param.yaml'
rospy.loginfo('%s: Loading Params from %s', self._action_name, param_file_path)
paramlist = rosparam.load_file(param_file_path, default_namespace=self._action_name)
for params, ns in paramlist:
rosparam.upload_params(ns,params)
# Get this behavior's parameters
self.enable_body_tracking = rospy.get_param('~enable_body_tracking', True)
rospy.loginfo('%s: PARAM: enable_body_tracking = %s', self._action_name,
self.enable_body_tracking)
self.enable_random_head_movement = rospy.get_param('~enable_random_head_movement', True)
rospy.loginfo('%s: PARAM: enable_random_head_movement = %s', self._action_name,
self.enable_random_head_movement)
self.head_pan_joint = rospy.get_param('~head_pan_joint', 'head_pan_joint')
rospy.loginfo('%s: PARAM: head_pan_joint = %s', self._action_name,
self.head_pan_joint)
self.head_tilt_joint = rospy.get_param('~head_tilt_joint', 'head_tilt_joint')
rospy.loginfo('%s: PARAM: head_tilt_joint = %s', self._action_name,
self.head_tilt_joint)
self.sound_effects_dir = rospy.get_param('~sound_effects_dir',
'/home/system/catkin_robot/src/sheldon/sheldon_behaviors/resources/sounds/sound_effects')
rospy.loginfo('%s: PARAM: sound_effects_dir = %s', self._action_name,
self.sound_effects_dir)
self.ding_path = os.path.join(self.sound_effects_dir, "ding.wav")
rospy.loginfo("DBG: DING PATH: %s", self.ding_path)
# playsound(self.ding_path) # test sound
#====================================================================
self.tracking = False
self.joint_state = JointState() # for reading servo positions
#self.astra_target = list()
# Remember which person we are tracking
#self.current_person_id = 0 # 0 = not tracking anyone
#self.current_person_id_time = rospy.Time.now() # start timer
self.named_person = ""
self.named_person_id = 0
self.named_person_time = rospy.Time.now() # start timer
self.named_people_seen_today = {}
# Publish current person by name, if recognized
self.pub_current_user_name = rospy.Publisher('/person/name', String, queue_size=2)
rospy.loginfo("Waiting for speech server (press ctrl-c to cancel at anytime)")
self.speech_client = actionlib.SimpleActionClient("/speech_service", \
audio_and_speech_common.msg.speechAction)
self.speech_client.wait_for_server()
rospy.sleep(2)
rospy.loginfo("testing speech")
goal = audio_and_speech_common.msg.speechGoal(text_to_speak="testing speech system")
self.speech_client.send_goal(goal)
# Initialize tf listener
#self.tf = tf.TransformListener()
# Allow tf to catch up
#rospy.sleep(2)
def joint_state_cb(self, msg):
#rospy.loginfo("%s: joint_state_cb called", self._action_name)
try:
test = msg.name.index(self.head_pan_joint)
self.joint_state = msg
except:
return
# Get the current servo pan and tilt position
try:
current_pan = self.joint_state.position[
self.joint_state.name.index(self.head_pan_joint)]
current_tilt = self.joint_state.position[
self.joint_state.name.index(self.head_tilt_joint)]
except:
return
#rospy.loginfo("%s: joint_state_cb: Current Pan = %f, Tilt = %f",
# self._action_name, current_pan, current_tilt)
#====================================================================
# 2D Tracking: Message contains person horizontal (x) and vertical (y)
# position is relative to the depth image.
def position_cb(self, msg):
#rospy.loginfo('%s: got position_cb message' % (self._action_name))
# determine highest priority person to track
# Message contains an array of people being tracked
# Note: tracking while stationary / idle is different from when person-following
person_to_track_id = 0
person_to_track_index = 0
# Priority 1: someone making a gesture
for i, person in enumerate(msg.detected_list):
if person.gesture > -1:
person_to_track_id = person.body_id
person_to_track_index = i
rospy.loginfo("DBG: GOT A GESTURE ")
break
# Priority 2: Track the closest person
# this allows robot to change focus to different people,
# whoever is closest he will talk to.
# TODO? Track closest person that has face detected to reject tracking objects?
if person_to_track_id == 0:
closest_person_distance = 100000
closest_person_index = 0
for i, person in enumerate(msg.detected_list):
if person.position2d.z < closest_person_distance:
closest_person_distance = person.position2d.z
person_to_track_id = person.body_id
person_to_track_index = i
if person_to_track_id != 0:
# found someone to track
person_info = msg.detected_list[person_to_track_index]
# DEBUG if person_info.face_found == True:
# rospy.loginfo("%s: Face Found. Name = " + person_info.name, self._action_name)
if person_info.name != "":
# We recognized this person!
rospy.loginfo("%s: Tracking Name " + person_info.name, self._action_name)
self.named_person = person_info.name
self.named_person_id = person_to_track_id # associate ID with the name
self.named_person_time = rospy.Time.now()
self.pub_current_user_name.publish(self.named_person)
# Say hello the first time we see someone in a while
if self.named_person not in self.named_people_seen_today:
#current_time = rospy.get_rostime()
self.named_people_seen_today[self.named_person] = rospy.get_rostime()
rospy.loginfo("=========== Saying Hello ===========")
goal = audio_and_speech_common.msg.speechGoal( \
text_to_speak = "hello " + self.named_person)
self.speech_client.send_goal(goal)
else:
# we've said hello to this person, but lets see how long ago it was
time_since_hello = rospy.get_rostime() - self.named_people_seen_today[self.named_person]
#rospy.loginfo("%s: DEBUG time_since_hello = %f", \
# self._action_name, time_since_hello.to_sec())
if time_since_hello > rospy.Duration.from_sec(self.HELLO_TIMEOUT_SECS):
self.named_people_seen_today[self.named_person] = rospy.get_rostime()
rospy.loginfo("=========== Saying Hello Again ===========")
goal = audio_and_speech_common.msg.speechGoal( \
text_to_speak = "hello again " + self.named_person)
self.speech_client.send_goal(goal)
else:
if self.named_person != "":
# We are tracking a specific person by name, but did not get a name this frame
if person_to_track_id != self.named_person_id:
# different user, clear the name
rospy.loginfo("%s: Lost user %s", self._action_name, self.named_person)
self.named_person = ""
self.named_person_id = 0
self.pub_current_user_name.publish(self.named_person)
else:
# still the same ID, but sometimes ID's don't get changed
time_since_last_name = rospy.Time.now() - self.named_person_time
#rospy.loginfo("%s: DEBUG time_since_last_name = %f", \
# self._action_name, time_since_last_name.to_sec())
if time_since_last_name > rospy.Duration.from_sec(self.NAME_TIMEOUT_SECS):
rospy.loginfo("%s: User Name %s Timed out", self._action_name, self.named_person)
self.named_person = ""
self.named_person_id = 0
self.pub_current_user_name.publish(self.named_person)
#self.current_person_id = person_to_track_id
#self.current_person_id_time = rospy.Time.now()
# Track person
# position in radians from center of camera lens
delta_angle_x = person_info.position2d.x
delta_angle_y = person_info.position2d.y
# Uncomment this to debug
# rospy.loginfo("%s: Tracking Person Index: %d, ID: %d x: %f y: %f", \
# self._action_name, person_to_track_index, person_to_track_id, delta_angle_x, delta_angle_y )
# Get the current servo pan and tilt position
try:
current_pan = self.joint_state.position[
self.joint_state.name.index(self.head_pan_joint)]
current_tilt = self.joint_state.position[
self.joint_state.name.index(self.head_tilt_joint)] * -1.0
except:
return
#rospy.loginfo("%s: Body Tracker: Current Servo: Pan = %f, Tilt = %f",
# self._action_name, current_pan, current_tilt)
# add target position to current servo position
pan_angle = current_pan + (delta_angle_x * 0.95) #shoot for less
tilt_angle = current_tilt + (delta_angle_y * 0.95)
# rospy.loginfo("%s: Body Tracker: Servo Command: Pan = %f, Tilt = %f",
# self._action_name, pan_angle, tilt_angle)
# command servos to move to target, if not in deadband
pan_on_target = True
tilt_on_target = True
if abs(delta_angle_x) > self.DEADBAND_ANGLE:
if abs(pan_angle) < self.MAX_PAN:
pub_head_pan.publish(pan_angle) # Send servo command
pan_on_target = False
if abs(delta_angle_y) > self.DEADBAND_ANGLE:
if abs(pan_angle) < self.MAX_TILT:
pub_head_tilt.publish(-tilt_angle) # Send servo command
tilt_on_target = False
#if pan_on_target and tilt_on_target:
# rospy.loginfo("%s: On target ID %d", self._action_name, person_id)
#else:
# rospy.loginfo("%s: ID %d: Pan delta = %f, Tilt Delta = %f",
# self._action_name, person_id, delta_angle_x, delta_angle_y)
# Max pan/tilt is constrained by system. Add additional constraints if needed
self.tracking = True # don't do idle movements
# SHELDON ONLY
#side_tilt_angle = 0.0
#pub_head_sidetilt.publish(side_tilt_angle)
#self.last_target_time = rospy.Time.now() # reset timer
#====================================================================
# Main loop
def execute_cb(self, goal):
# Idle Behavior has gone Active!
# Set servos speed and torque
SetServoTorque(0.5, head_joints)
SetServoSpeed(0.35, head_joints)
# Move head and arms to ready position
all_home()
# Center Camera Head
pub_head_pan.publish(0.0)
pub_head_tilt.publish(self.DEFAULT_TILT_ANGLE) # tilt head up to find people more easily
#pub_head_sidetilt.publish(0.0) # SHELDON ONLY
if self.enable_random_head_movement:
rospy.loginfo('%s: random head movements enabled...' % (self._action_name))
else:
rospy.loginfo('%s: random head movements DISABLED' % (self._action_name))
if self.enable_body_tracking:
rospy.loginfo('%s: waiting for person tracking...' % (self._action_name))
else:
rospy.loginfo('%s: body tracking DISABLED' % (self._action_name))
if self.enable_body_tracking:
# Enable Subscribers
#position_sub = rospy.Subscriber("/body_tracker/position", \
# BodyTracker, self.position_cb, queue_size=1)
position_sub = rospy.Subscriber("/body_tracker_array/people", \
BodyTrackerArray, self.position_cb, queue_size=1)
# pose2d_sub = rospy.Subscriber("/body_tracker/pose2d", Pose2D, self.pose_2d_cb, queue_size=1)
servo_sub = rospy.Subscriber('/joint_states', JointState, self.joint_state_cb) # servos
while True:
if self._as.is_preempt_requested():
break
if not self.tracking and self.enable_random_head_movement:
# Idle: Move head to constrained random location, at random intervals
tiltAmt = random.uniform(-0.3, 0.3)
pub_head_tilt.publish(tiltAmt)
rospy.loginfo('%s: Doing Random Movement' % (self._action_name))
panAmt = random.uniform(-0.5, 0.5)
pub_head_pan.publish(panAmt)
# SHELDON ONLY
sidetiltAmt = random.uniform(-0.05, 0.05)
pub_head_sidetilt.publish(sidetiltAmt)
self.tracking = False # do Idle if tracking gets lost
# delay before next loop
randSleep = random.randint(10, 35) # tenth seconds
for i in range(1, randSleep):
if self._as.is_preempt_requested():
break
else:
time.sleep(0.1)
# Behavior Exit / Cleanup
if self.enable_body_tracking:
position_sub.unregister()
servo_sub.unregister()
# Idle always runs until preempted
rospy.loginfo('%s: Behavior preempted' % self._action_name)
self._as.set_preempted()
if __name__ == '__main__':
rospy.init_node('idle_behavior')
server = BehaviorAction(rospy.get_name())
rospy.spin()
| StarcoderdataPython |
82374 | <gh_stars>0
from __future__ import unicode_literals
from .models import elb_backend
mock_elb = elb_backend.decorator
| StarcoderdataPython |
3222336 | <filename>solutions/1.5.py
# One Away
def one_away(string_1, string_2):
# One Character Removed from String 2
if (len(string_1) - len(string_2)) == 1:
for pos in range(len(string_1)):
if string_1[pos] != string_2[pos]:
if string_1[pos + 1:] == string_2[pos:]:
return True
return False
return True
# One Character Added
elif (len(string_2) - len(string_1)) == 1:
for pos in range(len(string_2)):
if string_2[pos] != string_1[pos]:
if string_2[pos + 1:] == string_1[pos:]:
return True
return False
return True
# Same Amount of Characters
elif len(string_2) == len(string_1):
if string_1 == string_2:
return True
for pos in range(len(string_2)):
if string_1[pos] != string_2[pos]:
if string_1[pos + 1:] == string_2[pos + 1:]:
return True
return False
return True
else:
return False
if __name__ == '__main__':
print(one_away("tast", "test"))
| StarcoderdataPython |
1675412 | <reponame>adriangrepo/qreservoir
import unittest
import logging
from PyQt4.QtGui import QApplication, QWidget
from PyQt4.QtTest import QTest
import sys
from db.test.dummydbsetup import DummyDbSetup
from gui.wellplot.model.wellplotmodelaccess import WellPlotModelAccess
from db.windows.wellplot.template.wellplottemplatedao import WellPlotTemplateDao
from statics.templates.wellplottype import WellPlotType
from gui.util.wellplotutils import WellPlotUtils
from gui.wellplot.track.domaintrackwidget import DomainTrackWidget
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class DomainTrackWidgetTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(DomainTrackWidgetTest, self).__init__(*args, **kwargs)
self.dummyDbSetup = DummyDbSetup()
self.app = QApplication(sys.argv)
def getWellPLotdata(self):
self.dummyDbSetup.setupDatabase()
well = self.dummyDbSetup.createWell()
logs = self.dummyDbSetup.create3Logs(well.id)
wellPlotModelAccess = WellPlotModelAccess()
uid = 42
templateDao = WellPlotTemplateDao()
allTemplates = templateDao.getAllWellPlotTemplates()
template = None
for item in allTemplates:
if item.uid == WellPlotType.ALLLOGS.uid:
template = item
wellPlotData = wellPlotModelAccess.createWellPlotData(logs, uid, well, template)
return wellPlotData
def test_generateDomainPlot(self):
#QWidget: Must construct a QApplication before a QPaintDevice
app = QApplication(sys.argv)
self.dummyDbSetup.setupDatabase()
well = self.dummyDbSetup.createWell()
logs = self.dummyDbSetup.create1Log(well.id)
track = QWidget()
track.setFixedWidth(180)
wellPlotData = self.getWellPLotdata()
self.assertIsNotNone(wellPlotData, "wellPlotData is None")
domainStart, domainStop, domainStep= WellPlotUtils.calculateStartStopStep(wellPlotData.getLogTrackDatas())
plots = []
i = 0
for domainTrackData in wellPlotData.getZAxisDatas():
if i == 0:
domainTrackWidget = DomainTrackWidget(well, isPrimaryDomainTrack = True)
else:
domainTrackWidget = DomainTrackWidget(well, isPrimaryDomainTrack = False)
domainTrackWidget.generateDomainPlot(domainTrackData, wellPlotData, domainStart, domainStop, domainStep)
plots.append(domainTrackWidget)
self.assertEqual(1, len(plots), "length incorrect")
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
2671 | import logging
import os
import pickle
import sys
import threading
import time
from typing import List
from Giveme5W1H.extractor.root import path
from Giveme5W1H.extractor.tools.util import bytes_2_human_readable
class KeyValueCache(object):
def __init__(self, cache_path):
"""
:param cache_path: path to cache, must be relative to the root.py file
"""
self.log = logging.getLogger('GiveMe5W')
# resolve path relative to the path file
self._cache_path = path(cache_path)
# ad a meaningful extension
self._cache_path = self._cache_path + '.prickle'
self._cache = {}
if cache_path and os.path.isfile(self._cache_path) and os.path.getsize(self._cache_path) > 0:
# reload cache object form disc, if any
with open(self._cache_path, 'rb') as ff:
self._cache = pickle.load(ff)
self.log.debug('KeyValueCache: ' + self._cache_path + ' restored')
self.log_stats()
else:
self._cache = {}
self._lock = threading.Lock()
def log_stats(self):
# size is not considering child's
self.log.info(self._cache_path + ' entries: ' + str(len(self._cache)) + ' size: ' + bytes_2_human_readable(
sys.getsizeof(self._cache)))
def persist(self):
with open(self._cache_path, 'wb') as f:
pickle.dump(self._cache, f, pickle.HIGHEST_PROTOCOL)
def cache(self, key: str, value: object):
"""
None values are considered as invalid results (ToughRequest) is producing none for exceptions
set -1 if you want to store "No distance"
:param key:
:param value:
:return:
"""
self._lock.acquire()
if value is not None:
self._cache[key] = self._pack(value);
self.log.debug(self._cache_path + ' CACHED: ' + str(key) + ': ' + str(value))
self.persist()
self._lock.release()
def get(self, key):
"""
Read cache entries
:param key:
:return:
"""
self._lock.acquire()
result = None
value = self._cache.get(key)
if value is not None:
self.log.debug(self._cache_path + ' LOADED: ' + str(key) + ': ' + str(value))
result = self._unpack(value)
self._lock.release()
return result
def get_complex(self, list_of_keys: List[str]):
"""
Read complex cache entries
"""
return self.get(self._get_id(list_of_keys))
def cache_complex(self, list_of_keys: List[str], value):
"""
helper to cache multi (string)key values.
They are sorted before concatenation, therefore an order is determined.
"""
self.cache(self._get_id(list_of_keys), value)
def _get_id(self, list_of_keys: List[str]):
"""
sorts list_of_keys, concatenates with # for readability
:param list_of_keys:
:return:
"""
sorted(list_of_keys)
return "#".join(list_of_keys)
def _pack(self, value):
"""
cache tracks the age of an entry, may be helpful in the future
:param value:
:return:
"""
return [value, str(time.time())]
def _unpack(self, value):
"""
removes the timestamp around the cached value, if any
:param value:
:return:
"""
# there are some old entries without timestamp
if isinstance(value, str) or isinstance(value, int):
return value
return value[0]
| StarcoderdataPython |
164005 | """
Internal tools needed to query the index based on rectangles
and position/radius. Based on tools in argodata:
https://github.com/ArgoCanada/argodata/blob/master/R/utils.R#L54-L165
"""
import warnings
import numpy as np
def geodist_rad(long1, lat1, long2, lat2, R=6371.010):
delta_long = long2 - long1
delta_lat = lat2 - lat1
a = np.sin(delta_lat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(delta_long / 2) ** 2
c = 2 * np.arcsin(np.minimum(1, np.sqrt(a)))
return R * c
def geodist_lnglat(xy1, xy2, R=6371.010):
return geodist_rad(
xy1['x'] * np.pi / 180, xy1['y'] * np.pi / 180,
xy2['x'] * np.pi / 180, xy2['y'] * np.pi / 180,
R=R
)
def rect_intersects(r1, r2):
limits = {
'xmin': np.maximum(r1['xmin'], r2['xmin']),
'xmax': np.minimum(r1['xmax'], r2['xmax']),
'ymin': np.maximum(r1['ymin'], r2['ymin']),
'ymax': np.minimum(r1['ymax'], r2['ymax'])
}
return (limits['xmax'] >= limits['xmin']) & (limits['ymax'] >= limits['ymin'])
def rect_contains(r, xy):
return (xy['x'] >= r['xmin']) & \
(xy['x'] <= r['xmax']) & \
(xy['y'] >= r['ymin']) & \
(xy['y'] <= r['ymax'])
def rect_split_dateline(r):
is_wrap = r['xmax'] < r['xmin']
xmin1 = np.asarray(r['xmin']).copy()
xmin1[is_wrap] = -180
xmin2 = r['xmin']
xmax1 = r['xmax']
xmax2 = np.asarray(r['xmax']).copy()
xmax2[is_wrap] = 180
return (
{'xmin': xmin1, 'ymin': r['ymin'], 'xmax': xmax1, 'ymax': r['ymax']},
{'xmin': xmin2, 'ymin': r['ymin'], 'xmax': xmax2, 'ymax': r['ymax']}
)
def normalize_lat(latitude):
# some latitude values are -99.999 instead of missing
latitude = np.asfarray(latitude).copy()
latitude[latitude == -99.999] = np.nan
return latitude
def normalize_lng(longitude):
# -999.999 is occasionally used to denote missing in the profile index
# some longitudes are greater than 180, but we handle that more robustly
# here.
with warnings.catch_warnings():
# Suppress warnings of 'invalid remainder' because we know there are
# nan values already.
warnings.simplefilter("ignore")
longitude = np.asfarray(longitude).copy()
longitude[longitude == -999.999] = np.nan
longitude_inf = np.isinf(longitude)
normalized = np.asfarray(((longitude + 180.0) % 360) - 180.0)
normalized[longitude == 180.0] = 180.0
normalized[longitude_inf] = longitude[longitude_inf]
return normalized
| StarcoderdataPython |
1690121 | ### please change the corresponding path prefix ${PATH}
import sys, os, errno
import numpy as np
import csv
import json
import copy
assert len(sys.argv) == 2, "Usage: python log_analysis.py <test_log>"
log = sys.argv[1]
with open(log, 'r') as f:
lines = f.read().splitlines()
split='test'
with open('${PATH}/Charades/Charades_v1_%s.csv'%split, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
data = [row for row in reader][1:]
vid_length={}
for i, row in enumerate(data):
vid = row[0]
length= float(row[10])
vid_length[vid]=length
def nms(dets, thresh=0.4):
"""Pure Python NMS baseline."""
if len(dets) == 0: return []
x1 = dets[:, 0]
x2 = dets[:, 1]
scores = dets[:, 2]
lengths = x2 - x1
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
inter = np.maximum(0.0, xx2 - xx1)
ovr = inter / (lengths[i] + lengths[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def get_segments(data):
segments = []
vid = 'Background'
find_next = False
tmp = {'label' : 'c0', 'segment': [0, 0, 0]}
for l in data:
# video name and sliding window length
if "fg_name :" in l:
vid = l.split('/')[7]
continue
# frame index, time, confident score
elif "frames :" in l:
start_frame=int(l.split()[4])
stride = int(l.split()[6].split(']')[0])
elif "activity:" in l:
label = int(l.split()[1])
tmp['label'] ='c%03d' % (label-1)
find_next = True
elif "im_detect" in l:
return vid, segments
elif find_next:
tmp1 = copy.deepcopy(tmp)
left = ( float(l.split()[1])*stride + start_frame) / 25.0
right = ( float(l.split()[2])*stride + start_frame) / 25.0
score = float(l.split()[3].split(']')[0])
tmp1['segment'] = [left, right, score]
segments.append(tmp1)
segmentations = {}
predict_data = []
for l in lines:
if "gt_classes :" in l:
predict_data = []
predict_data.append(l)
if "im_detect:" in l:
vid, segments = get_segments(predict_data)
if vid not in segmentations:
segmentations[vid] = []
segmentations[vid] += segments
res = {}
for vid, vinfo in segmentations.iteritems():
labels = list(set([d['label'] for d in vinfo]))
res[vid] = []
for lab in labels:
nms_in = [d['segment'] for d in vinfo if d['label'] == lab]
keep = nms(np.array(nms_in), thresh=0.4)
for i in keep:
tmp = {'label':lab, 'segment': nms_in[i]}
res[vid].append(tmp)
SAMPLE = 25
text_file = open("results.txt", "w")
text_file.close()
text_file = open("results.txt", "w")
for vid, vinfo in res.iteritems():
length = len(os.listdir('../../../preprocess/charades/frames/'+vid))
for i in xrange(SAMPLE):
tmp = '%s %d' % (vid, i)
t = i *vid_length[vid] * 1.0 / SAMPLE
select = [d for d in vinfo if d['segment'][0] <= t and d['segment'][1] >= t]
scores = {}
for d in select:
if d['label'] not in scores:
scores[d['label']] = d['segment'][2]
else:
if d['segment'][2] > scores[d['label']]:
scores[d['label']] = d['segment'][2]
for j in xrange(157):
lab = 'c%03d'%j
tmp += ' ' + (str(scores[lab]) if lab in scores else '0')
text_file.write(tmp + '\n')
text_file.close()
| StarcoderdataPython |
4803100 | """
Cokriging example from [Forrester 2007] to show
MultiFiMetaModel and MultiFiCoKrigingSurrogate usage
"""
import numpy as np
from openmdao.main.api import Assembly, Component
from openmdao.lib.datatypes.api import Float
from openmdao.lib.drivers.api import CaseIteratorDriver
from openmdao.lib.components.api import MultiFiMetaModel
from openmdao.lib.surrogatemodels.api import MultiFiCoKrigingSurrogate, KrigingSurrogate
class Model(Component):
x = Float(0, iotype="in")
f_x = Float(0.0, iotype="out")
def execute(self):
x = self.x
self.f_x = ((6*x-2)**2)*np.sin((6*x-2)*2)
class LowFidelityModel(Component):
x = Float(0.0, iotype="in")
f_x = Float(0.0, iotype="out")
def execute(self):
x = self.x
self.f_x = 0.5*((6*x-2)**2)*np.sin((6*x-2)*2)+(x-0.5)*10. - 5
class HighFidelityModel(Model):
pass
class CasesBuilder(Assembly):
def __init__(self, model, cases):
self.instance = model
self.cases = cases
super(CasesBuilder, self).__init__()
def configure(self):
self.add("model", self.instance)
self.add("driver", CaseIteratorDriver())
self.driver.workflow.add('model')
self.driver.add_parameter("model.x", low=0, high=1)
self.driver.add_response("model.f_x")
self.driver.case_inputs.model.x = self.cases
self.create_passthrough('driver.case_inputs.model.x')
self.create_passthrough('driver.case_outputs.model.f_x')
class Simulation(Assembly):
def __init__(self, surrogate, nfi=1):
self.surrogate = surrogate
self.nfi = nfi
super(Simulation, self).__init__()
def configure(self):
# Expensive and Cheap DOE (note: have to be nested)
doe_e = [0.0, 0.4, 0.6, 1.0]
doe_c = [0.1, 0.2, 0.3, 0.5, 0.7, 0.8, 0.9] + doe_e
self.add('hifi_cases', CasesBuilder(HighFidelityModel(), doe_e))
self.add('lofi_cases', CasesBuilder(LowFidelityModel(), doe_c))
# MetaModel
self.add("meta_model", MultiFiMetaModel(params=('x', ),
responses=('f_x', ), nfi=self.nfi))
self.meta_model.default_surrogate = self.surrogate
self.connect('hifi_cases.x' , 'meta_model.params.x')
self.connect('hifi_cases.f_x', 'meta_model.responses.f_x')
if self.nfi > 1:
self.connect('lofi_cases.x' , 'meta_model.params.x_fi2')
self.connect('lofi_cases.f_x', 'meta_model.responses.f_x_fi2')
# Iteration Hierarchy
self.add('mm_checker', CaseIteratorDriver())
self.add('model', Model())
self.mm_checker.add_parameter("meta_model.x", low=0, high=1)
self.mm_checker.add_parameter("model.x", low=0, high=1)
self.mm_checker.add_response("model.f_x")
self.mm_checker.add_response("meta_model.f_x")
ngrid = 100
self.mm_checker.case_inputs.meta_model.x = np.linspace(0,1,ngrid)
self.mm_checker.case_inputs.model.x = np.linspace(0,1,ngrid)
self.driver.workflow.add('hifi_cases')
if self.nfi > 1:
self.driver.workflow.add('lofi_cases')
self.driver.workflow.add('mm_checker')
if __name__ == "__main__":
surrogate = MultiFiCoKrigingSurrogate()
# Co-kriging with 2 levels of fidelity
sim_cok = Simulation(surrogate, nfi=2)
sim_cok.run()
predicted_cok = np.array([d.mu for d in sim_cok.mm_checker.case_outputs.meta_model.f_x])
sigma_cok = np.array([d.sigma for d in sim_cok.mm_checker.case_outputs.meta_model.f_x])
# Co-kriging with 1 level of fidelity a.k.a. kriging
surrogate = KrigingSurrogate() # uncomment to use the existing Kriging implementation
sim_k = Simulation(surrogate, nfi=1)
sim_k.run()
predicted_k = np.array([d.mu for d in sim_k.mm_checker.case_outputs.meta_model.f_x])
sigma_k = np.array([d.sigma for d in sim_k.mm_checker.case_outputs.meta_model.f_x])
actual = sim_k.mm_checker.case_outputs.model.f_x
check = sim_k.mm_checker.case_inputs.meta_model.x
import pylab as plt
plt.figure(2)
plt.ioff()
plt.plot(check, actual, 'k', label='True f')
plt.plot(sim_cok.hifi_cases.x, sim_cok.hifi_cases.f_x,'ok',label="High Fi")
plt.plot(sim_cok.lofi_cases.x, sim_cok.lofi_cases.f_x,'or',label="Low Fi")
plt.plot(check, predicted_cok, 'g', label='Co-kriging')
plt.plot(check, predicted_cok + 2*sigma_cok, 'g', alpha=0.5, label='I95%')
plt.plot(check, predicted_cok - 2*sigma_cok, 'g', alpha=0.5)
plt.fill_between(check, predicted_cok + 2*sigma_cok,
predicted_cok - 2*sigma_cok, facecolor='g', alpha=0.2)
plt.plot(check, predicted_k, 'b', label='Krigring')
plt.plot(check, predicted_k + 2*sigma_k, 'b', alpha=0.5, label='I95%')
plt.plot(check, predicted_k - 2*sigma_k, 'b', alpha=0.5)
plt.fill_between(check, predicted_k + 2*sigma_k,
predicted_k - 2*sigma_k, facecolor='b', alpha=0.2)
plt.legend(loc='best')
plt.show()
# RMSE CoKriging
error = 0.
for a,p in zip(actual,predicted_cok):
error += (a-p)**2
error = (error/len(actual))
print "RMSE Cokriging = %g" % error
# RMSE Kriging
error = 0.
for a,p in zip(actual, predicted_k):
error += (a-p)**2
error = (error/len(actual))
print "RMSE Kriging = %g" % error | StarcoderdataPython |
4811517 | <filename>record_a_gif.py<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 30 17:16:29 2020
@author: yiningma
"""
import os
import imageio
import numpy as np
import gym
from Utils.envGym import envGym
from Utils.model_loader import model_loader
def record(model,env,seed,max_len,num):
video_length = max_len
game = model.config['rl_params']['env_name']
images = []
obs = env.reset()
img = env.render(mode='rgb_array')
images.append(img)
_r = 0
_ret = 0
print('run ',num)
for i in range(video_length+1):
print("\r", f'{i}/{video_length}', end = '')
action = model.get_action(obs, False)
obs, rew, done, info = env.step(action)
_r += rew
_ret += int(rew>0) - int(rew<0)
img = env.render(mode='rgb_array')
images.append(img)
if done:
break
env.close()
output_dir = f'./saved_gifs/{game}'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
imageio.mimsave(f'{output_dir}/{env_id}_r{num}_r{_r}({_ret}).gif', [np.array(img) for i, img in enumerate(images) if i%2 == 0], fps=29)
print('\n done', f'{output_dir}/{env_id}_r{num}_r{_r}({_ret}).gif saved.')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, help='choices:SpaceInvaders, Breakout, BeamRider, Qbert, ...'
, required=True)
parser.add_argument('--model_id', type=str, required=True)
parser.add_argument('--seed', type=str, default='3')
parser.add_argument('--num', type=int, default='5')
parser.add_argument('--max_len', type=int, default='6000')
args = parser.parse_args()
params = vars(args)
env_id = params['env']
model_id = params['model_id']
seed = params['seed']
max_len = params['max_len']
num = params['num']
model_dir = 'saved_models/sac_discrete_atari_'+env_id+'-v4/sac_discrete_atari_'+env_id+'-v4_s'+seed+'/'
model_save_name = 'tf1_save' + model_id
model = model_loader(model_dir, model_save_name)
original_env = gym.make(model.config['rl_params']['env_name'])
env = envGym(original_env, 4)
env.reset()
for _ in range(num):
record(model,env,seed,max_len,_) | StarcoderdataPython |
3225363 | <gh_stars>10-100
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
.. module:: __init__
:synopsis: module that contains classes that mapped to the configuration file.
"""
from abc import abstractmethod
from typing import (
List,
Union,
Dict,
Tuple
)
from mockintosh.constants import PYBARS, JINJA
from mockintosh.performance import PerformanceProfile
from mockintosh.exceptions import (
CommaInTagIsForbidden
)
from mockintosh.templating import TemplateRenderer
class ConfigService:
services = []
def __init__(
self,
_type: str,
name: Union[str, None],
internal_service_id: Union[int, None]
):
self.type = _type
self.name = name
self.external_file_paths = []
self._impl = None
if internal_service_id is None:
self.internal_service_id = len(ConfigService.services)
ConfigService.services.append(self)
else:
self.internal_service_id = internal_service_id
ConfigService.services[internal_service_id] = self
def get_name(self) -> str:
return self.name if self.name is not None else ''
@abstractmethod
def get_hint(self):
raise NotImplementedError
def add_external_file_path(self, external_file_path) -> None:
self.external_file_paths.append(external_file_path)
def destroy(self) -> None:
for external_file_path in self.external_file_paths:
external_file_path.destroy()
class ConfigContainsTag:
def forbid_comma_in_tag(self, data: list):
for row in data:
if isinstance(row, (str, ConfigExternalFilePath)):
return
elif isinstance(row, dict):
for key, value in row.items():
if key != 'tag':
continue
if ',' in value: # pragma: no cover
raise CommaInTagIsForbidden(value)
else:
if row.tag is not None and ',' in row.tag:
raise CommaInTagIsForbidden(row.tag)
class ConfigExternalFilePath:
files = []
def __init__(self, path: str, service: ConfigService = None):
self.path = path
self._index = len(ConfigExternalFilePath.files)
ConfigExternalFilePath.files.append(self)
if service is not None:
service.add_external_file_path(self)
def destroy(self) -> None:
ConfigExternalFilePath.files.pop(self._index)
for i, external_file_path in enumerate(ConfigExternalFilePath.files):
external_file_path._index = i
class ConfigDataset(ConfigContainsTag):
def __init__(self, payload: Union[List[dict], str, ConfigExternalFilePath]):
self.payload = payload
if isinstance(self.payload, list):
self.forbid_comma_in_tag(self.payload)
class ConfigSchema:
def __init__(self, payload: Union[dict, ConfigExternalFilePath]):
self.payload = payload
class ConfigHeaders:
def __init__(self, payload: Dict[str, Union[str, List[str], ConfigExternalFilePath]]):
self.payload = payload
class ConfigAmqpProperties:
def __init__(
self,
content_type=None,
content_encoding=None,
delivery_mode=None,
priority=None,
correlation_id=None,
reply_to=None,
expiration=None,
message_id=None,
timestamp=None,
_type=None,
user_id=None,
app_id=None,
cluster_id=None
):
self.content_type = content_type
self.content_encoding = content_encoding
self.delivery_mode = delivery_mode
self.priority = priority
self.correlation_id = correlation_id
self.reply_to = reply_to
self.expiration = expiration
self.message_id = message_id
self.timestamp = timestamp
self.type = _type
self.user_id = user_id
self.app_id = app_id
self.cluster_id = cluster_id
class ConfigConsume:
def __init__(
self,
queue: str,
group: Union[str, None] = None,
key: Union[str, None] = None,
schema: Union[ConfigSchema, None] = None,
value: Union[str, None] = None,
headers: Union[ConfigHeaders, None] = None,
amqp_properties: Union[ConfigAmqpProperties, None] = None,
capture: int = 1
):
self.queue = queue
self.group = group
self.key = key
self.schema = schema
self.value = value
self.headers = headers
self.amqp_properties = amqp_properties
self.capture = capture
class ConfigProduce:
def __init__(
self,
queue: str,
value: Union[str, ConfigExternalFilePath],
create: bool = False,
tag: Union[str, None] = None,
key: Union[str, None] = None,
headers: Union[ConfigHeaders, None] = None,
amqp_properties: Union[ConfigAmqpProperties, None] = None
):
self.queue = queue
self.value = value
self.create = create
self.tag = tag
self.key = key
self.headers = headers
self.amqp_properties = amqp_properties
class ConfigMultiProduce:
def __init__(self, produce_list: List[ConfigProduce]):
self.produce_list = produce_list
class ConfigActor:
def __init__(
self,
name: Union[str, None] = None,
dataset: Union[ConfigDataset, None] = None,
produce: Union[ConfigMultiProduce, ConfigProduce, None] = None,
consume: Union[ConfigConsume, None] = None,
delay: Union[int, float, None] = None,
limit: Union[int, None] = None,
multi_payloads_looped: bool = True,
dataset_looped: bool = True,
):
self.name = name
self.dataset = dataset
self.produce = produce
self.consume = consume
self.delay = delay
self.limit = limit
self.multi_payloads_looped = multi_payloads_looped
self.dataset_looped = dataset_looped
class ConfigAsyncService(ConfigService):
services = []
def __init__(
self,
_type: str,
address: Union[str, None] = None,
actors: List[ConfigActor] = [],
name: Union[str, None] = None,
ssl: bool = False,
internal_service_id: Union[int, None] = None
):
super().__init__(_type, name, internal_service_id)
ConfigAsyncService.services.append(self)
self.type = _type
self.address = address
self.actors = actors
self.ssl = ssl
def get_hint(self):
return '%s://%s' % (self.type, self.address) if self.name is None else self.name
def address_template_renderer(
self,
template_engine: str,
rendering_queue,
) -> Tuple[str, dict]:
if template_engine == PYBARS:
from mockintosh.hbs.methods import env
elif template_engine == JINJA:
from mockintosh.j2.methods import env
renderer = TemplateRenderer()
self.address, _ = renderer.render(
template_engine,
self.address,
rendering_queue,
inject_methods=[
env
]
)
class ConfigResponse:
def __init__(
self,
headers: Union[ConfigHeaders, None] = None,
status: Union[str, int] = 200,
body: Union[str, ConfigExternalFilePath, None] = None,
use_templating: bool = True,
templating_engine: str = PYBARS,
tag: Union[str, None] = None,
trigger_async_producer: Union[str, int, None] = None
):
self.headers = headers
self.status = status
self.body = body
self.use_templating = use_templating
self.templating_engine = templating_engine
self.tag = tag
self.trigger_async_producer = trigger_async_producer
def oas(self, status_data: dict):
new_headers = {k.title(): v for k, v in self.headers.payload.items()}
if 'Content-Type' in new_headers:
if new_headers['Content-Type'].startswith('application/json'):
status_data = {
'content': {
'application/json': {
'schema': {}
}
}
}
status_data['headers'] = {}
for key in new_headers.keys():
status_data['headers'][key] = {
'schema': {
'type': 'string'
}
}
class ConfigMultiResponse(ConfigContainsTag):
def __init__(self, payload: List[Union[ConfigResponse, ConfigExternalFilePath, str]]):
self.payload = payload
self.forbid_comma_in_tag(self.payload)
class ConfigBody:
def __init__(
self,
schema: ConfigSchema = None,
text: Union[str, None] = None,
graphql_query: Union[str, ConfigExternalFilePath, None] = None,
graphql_variables: Dict[str, str] = None,
urlencoded: Dict[str, str] = None,
multipart: Dict[str, str] = None
):
self.schema = schema
self.text = text
self.urlencoded = urlencoded
self.multipart = multipart
self.graphql_query = graphql_query
self.graphql_variables = graphql_variables
class ConfigEndpoint:
def __init__(
self,
path: str,
_id: Union[str, None] = None,
comment: Union[str, None] = None,
method: str = 'GET',
query_string: Dict[str, str] = {},
headers: Dict[str, str] = {},
body: Union[ConfigBody, None] = None,
dataset: Union[ConfigDataset, None] = None,
response: Union[ConfigResponse, ConfigExternalFilePath, str, ConfigMultiResponse, None] = None,
multi_responses_looped: bool = True,
dataset_looped: bool = True,
performance_profile: Union[str, None] = None
):
self.path = path
self.id = _id
self.comment = comment
self.method = method.upper()
self.query_string = query_string
self.headers = headers
self.body = body
self.dataset = dataset
self.response = response
self.multi_responses_looped = multi_responses_looped
self.dataset_looped = dataset_looped
self.performance_profile = performance_profile
class ConfigHttpService(ConfigService):
def __init__(
self,
port: int,
name: Union[str, None] = None,
hostname: Union[str, None] = None,
ssl: bool = False,
ssl_cert_file: Union[str, None] = None,
ssl_key_file: Union[str, None] = None,
management_root: Union[str, None] = None,
oas: Union[str, ConfigExternalFilePath, None] = None,
endpoints: List[ConfigEndpoint] = [],
performance_profile: Union[str, None] = None,
fallback_to: Union[str, None] = None,
internal_service_id: Union[int, None] = None
):
super().__init__('http', name, internal_service_id)
self.port = port
self.hostname = hostname
self.ssl = ssl
self.ssl_cert_file = ssl_cert_file
self.ssl_key_file = ssl_key_file
self.management_root = management_root
self.oas = oas
self.endpoints = endpoints
self.performance_profile = performance_profile
self.fallback_to = fallback_to
def get_hint(self):
return '%s://%s:%s%s' % (
'https' if self.ssl else 'http',
self.hostname if self.hostname is not None else (
'localhost'
),
self.port,
' - %s' % self.name if self.name is not None else ''
)
class ConfigGlobals:
def __init__(
self,
headers: Union[ConfigHeaders, None],
performance_profile: Union[str, None] = None
):
self.headers = headers
self.performance_profile = performance_profile
class ConfigManagement:
def __init__(
self,
port: str,
ssl: bool = False,
ssl_cert_file: Union[str, None] = None,
ssl_key_file: Union[str, None] = None
):
self.port = port
self.ssl = ssl
self.ssl_cert_file = ssl_cert_file
self.ssl_key_file = ssl_key_file
class ConfigPerformanceProfile:
def __init__(
self,
ratio: Union[int, float],
delay: Union[int, float] = 0.0,
faults: Union[dict, None] = None
):
self.ratio = ratio
self.delay = delay
self.faults = {} if faults is None else faults
self.actuator = PerformanceProfile(
self.ratio,
delay=self.delay,
faults=self.faults
)
class ConfigRoot:
def __init__(
self,
services: List[Union[ConfigHttpService, ConfigAsyncService]],
management: Union[ConfigManagement, None] = None,
templating_engine: str = PYBARS,
_globals: Union[ConfigGlobals, None] = None,
performance_profiles: Dict[str, ConfigPerformanceProfile] = {}
):
self.services = services
self.management = management
self.templating_engine = templating_engine
self.globals = _globals
self.performance_profiles = performance_profiles
| StarcoderdataPython |
1606474 | # -*- coding: utf-8 -*-
"""
Created on Wed May 27 18:55:22 2020
@author: Lucas
"""
print ("Conversor de unidade de medida")
velocidade = float(input("Digite sua velocidade: "))
print ("""Para qual velocidade pretende converter?
[1] Para Km/m
[2] Para m/s""")
opção = int(input("Sua opção: "))
if opção == 1:
k = velocidade*3.6
print(f"V = {k} m/s")
else:
m = velocidade/3.6
print(f"V = {m} Km/h")
| StarcoderdataPython |
3202946 | from typing import Tuple
import matplotlib.pyplot as plt
import pandas as pd
from polar_bearings.opt_pah_finder_robotics.potential_field_planning import (
potential_field_planning,
)
def main(
filepath: str = "ice_thickness_01-01-2020.csv",
rescaling_factor: int = 2,
grid_size: float = 0.1,
robot_radius: float = 0.01,
):
"""Loads the ice thickness data and plans a route over safe ice."""
df = pd.read_csv(filepath)
df_rescaled = df.iloc[::rescaling_factor, :]
gx, gy, sx, sy, ox, oy = process_data(df_rescaled)
plt.grid(True)
plt.axis("equal")
# path generation
_, _ = potential_field_planning(sx, sy, gx, gy, ox, oy, grid_size, robot_radius)
plt.show()
def process_data(
single_day_df: pd.DataFrame,
safety_threshold: float = 1.0,
):
"""Rescales data, then provides the coordinates needed for the pathfinder."""
sx, sy, gx, gy = find_start_end(single_day_df)
single_day_df = single_day_df.fillna(safety_threshold) # NaN values are land
unsafe = single_day_df[single_day_df.sithick < safety_threshold]
ox = unsafe.longitude.values.tolist()
oy = unsafe.latitude.values.tolist()
print(f"{len(ox)}/{len(single_day_df)} co-ordinates considered as dangerous ice.")
return gx, gy, sx, sy, ox, oy
def find_closest(df, lat, lon):
dist = (df["latitude"] - lat).abs() + (df["longitude"] - lon).abs()
return df.loc[dist.idxmin()]
def find_start_end(df_rescaled: pd.DataFrame) -> Tuple[int, int, int, int]:
"""Finds start and end points of ulukhaktok and sachs harbour, then scales their coordinate values to the origin."""
df_rescaled["longitude"] = df_rescaled.longitude
df_rescaled["latitude"] = df_rescaled.latitude
ulukhaktok_y, ulukhaktok_x = (
70.74025296172513,
-117.77122885607929,
)
sachs_y, sachs_x = 71.98715823380064, -125.24848194895534
closest = find_closest(df_rescaled, ulukhaktok_y, ulukhaktok_x)
sy, sx = closest["latitude"], closest["longitude"]
closest = find_closest(df_rescaled, sachs_y, sachs_x)
gy, gx = closest["latitude"], closest["longitude"]
return sx, sy, gx, gy
| StarcoderdataPython |
1721576 | <gh_stars>0
# MIT License
#
# Copyright (c) 2018 <NAME>, <EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from thetaetl.domain.vote import ThetaVote
class ThetaVoteMapper(object):
def json_dict_to_vote(self, json_dict):
vote = ThetaVote()
vote.Block = json_dict.get('Block')
vote.Epoch = json_dict.get('Epoch')
vote.Height = json_dict.get('Height')
vote.ID = json_dict.get('ID')
vote.Signature = json_dict.get('Signature')
return vote
def vote_to_dict(self, vote):
return {
'type': 'vote',
'Block': vote.Block,
'Epoch': vote.Epoch,
'Height': vote.Height,
'ID': vote.ID,
'Signature': vote.Signature
} | StarcoderdataPython |
3352654 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 2 19:55:38 2018
@author: saschajecklin
"""
import sys
import os
sys.path.append("..")
#from scores.score_logger import ScoreLogger
from connect4game import Connect4
import random
import numpy as np
from collections import deque
import tensorflow as tf
from keras.models import Model
from keras.layers import Dense, Flatten, Input, Conv2D
from keras.optimizers import RMSprop
from keras.regularizers import l2
from keras.backend.tensorflow_backend import set_session
from keras.layers.merge import Add
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Activation
import shutil
GAMMA = 0.95
MEMORY_SIZE = 1000000
BATCH_SIZE = 2048
EXPLORATION_MAX = 1.0
EXPLORATION_MIN = 0.01
EXPLORATION_DECAY = 0.998
NUMBER_OF_EVAL_GAMES = 10
SAVE_EVERY_K_GAMES = 12
GAMES_RECORDED_PER_EVAL = 10
DEMO_MODE = 0
class DQNSolver:
def __init__(self, observation_space, action_space):
self.exploration_rate = EXPLORATION_MAX
self.observation_space = observation_space
self.action_space = action_space
self.memory = deque(maxlen=MEMORY_SIZE)
i = 0 #remove old Evaluations
while os.path.exists("Evaluation%s" % i):
shutil.rmtree("Evaluation%s" % i)
i += 1
in_x = x = Input((self.observation_space[0], self.observation_space[1], 2)) # stack of own(6x7) and enemy(6x7) field
x = Conv2D(128, 3, padding="same", kernel_regularizer=l2(1e-4),
data_format="channels_last")(x)
x = BatchNormalization(axis=-1)(x)
x = Activation("relu")(x)
for _ in range(2):
x = self._build_residual_block(x)
x = Conv2D(filters=2, kernel_size=1, padding="same", kernel_regularizer=l2(1e-4),
data_format="channels_last")(x)
x = BatchNormalization(axis=-1)(x)
x = Activation("relu")(x)
x = Flatten()(x)
x = Dense(256, kernel_regularizer=l2(1e-4), activation="relu")(x)
policy_out = Dense(action_space, kernel_regularizer=l2(1e-4), activation="softmax", name="policy_out")(x)
self.model = Model(in_x, policy_out, name="connect4_model")
self.optimizer = RMSprop(lr=0.25, rho=0.95, epsilon=1e-2, decay=0.0) #SGD(lr=1e-2, momentum=0.9
# 'deepmind_rmsprop_learning_rate': 0.00025,
# 'deepmind_rmsprop_rho': .95,
# 'deepmind_rmsprop_epsilon': 0.01,)
# self.optimizer = SGD(lr=1e-2, momentum=0.9)
self.model.compile(optimizer=self.optimizer, loss='categorical_crossentropy')
def _build_residual_block(self, x):
in_x = x
x = Conv2D(filters=128, kernel_size=3, padding="same",
kernel_regularizer=l2(1e-4), data_format="channels_last")(x)
x = BatchNormalization(axis=-1)(x)
x = Activation("relu")(x)
x = Conv2D(filters=128, kernel_size=3, padding="same",
kernel_regularizer=l2(1e-4), data_format="channels_last")(x)
x = BatchNormalization(axis=-1)(x)
x = Add()([in_x, x])
x = Activation("relu")(x)
return x
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
# mirror state, next_state and action to produce twice as much training data
self.memory.append((np.flip(state, 1), (self.action_space-1)-action, reward, np.flip(next_state, 1), done))
def pop(self):
for i in range(2): # pop 2 becauses mirrored entries in remeber()
self.memory.pop()
def act(self, state, env): # state doesnt have to be the state in env. could be inverted
if np.random.rand() < self.exploration_rate:
return env.sample()
state = np.expand_dims(state, axis=0)
q_values = self.model.predict(state)
mask = (np.expand_dims(env.validMoves(),0) == 0)
q_values[mask] = float('-inf') # guard for valid moves
return np.argmax(q_values[0])
def experience_replay(self):
if len(self.memory) < BATCH_SIZE:
return
batch = random.sample(self.memory, BATCH_SIZE)
state_batch = np.zeros((BATCH_SIZE, self.observation_space[0], self.observation_space[1], 2))
q_values_batch = np.zeros((BATCH_SIZE, self.action_space))
idx = 0
for state, action, reward, state_next, terminal in batch:
q_update = reward
if not terminal:
state_next = np.expand_dims(state_next, axis=0)
q_update = (reward + GAMMA * np.amax(self.model.predict(state_next)[0]))
state = np.expand_dims(state, axis=0)
q_values = self.model.predict(state)
q_values[0][action] = q_update
state_batch[idx, ...] = state
q_values_batch[idx, ...] = q_values
idx = idx + 1
self.model.fit(state_batch, q_values_batch, verbose=0)
self.exploration_rate *= EXPLORATION_DECAY
self.exploration_rate = max(EXPLORATION_MIN, self.exploration_rate)
def save(self, path='weights.h5'):
self.model.save_weights(filepath=path)
def load(self, path='weights.h5'):
self.model.load_weights(filepath=path)
def connect4dqn():
env = Connect4()
#score_logger = ScoreLogger('Connect4')
# player1won = 0
# player2won = 0
observation_space = env.reset().shape
action_space = env.validMoves().size
# Assign GPU to DGX
config = tf.ConfigProto(
device_count = {'GPU': 2}
)
sess = tf.Session(config=config)
set_session(sess)
dqn_solver = DQNSolver(observation_space, action_space)
run = 0
state = env.reset() #moved one loop up. otherwise player two wont be able to start if player one wins
while True:
run += 1
#if run % SAVE_EVERY_K_GAMES == 0 :
# print('Saving weights and starting evaluation...')
# dqn_solver.save()
# score, ties = evaluate_dqn(env, dqn_solver, NUMBER_OF_EVAL_GAMES)
# score_logger.add_score(score + ties, run) #logging ties as success
step = 0
while True:
step += 1
player = env.getNextPlayer()
if player == 1:
action_player1 = dqn_solver.act(state, env)
state_next, reward_player1, terminal, info = env.makeMove(player, action_player1, DEMO_MODE)
state_copy = np.copy(state)
state_next_copy = np.copy(state_next)
if terminal:
dqn_solver.pop() # if player 1 wins, pop player 2's last move from and give it a negative reward
dqn_solver.remember(normalized_state, action_player2, reward_player1*-1, normalized_state_next, terminal)
dqn_solver.remember(state, action_player1, reward_player1, state_next, terminal)
state = state_next
else:
normalized_state = np.roll(state, 1, axis = -1)
action_player2 = dqn_solver.act(normalized_state, env)
state_next, reward_player2, terminal, info = env.makeMove(player, action_player2, DEMO_MODE)
normalized_state_next = np.roll(state_next, 1, axis = -1)
if terminal:
dqn_solver.pop() # if player 2 wins, pop player 1's last move from and give it a negative reward
dqn_solver.remember(state_copy, action_player1, reward_player2*-1, state_next_copy, terminal)
dqn_solver.remember(normalized_state, action_player2, reward_player2, normalized_state_next, terminal)
state = state_next
if terminal:
# if player == 1:
# player1won += 1
# else:
# player2won += 1
# try:
# winRatio = player1won/player2won
# except ZeroDivisionError:
# winRatio = 0
# print('Win ratio: {}'.format(winRatio)) #debug stuff
print("Run: " + str(run) + ", exploration: " + str(dqn_solver.exploration_rate) + ", moves: " + str(step))
break
dqn_solver.experience_replay()
def evaluate_dqn(env, dqn, numberOfGames = 1000):
print('Testing AI vs. random move. AI is yellow (Player 1)')
tmp_exploration_rate = dqn.exploration_rate
dqn.exploration_rate = 0
run = 0
aiWin = 0
randomWin = 0
tieCOunter = 0
state = env.reset()
i = 0
while os.path.exists("Evaluation%s" % i):
i += 1
os.makedirs("Evaluation%s" % i)
os.chdir("Evaluation%s" % i)
while run < numberOfGames:
terminal = 0
#print('Game number {}'.format(run))
while not terminal:
if env.getNextPlayer() == 1: #fixed player number, because looser starts next game
action = dqn.act(state, env)
state_next, reward, terminal, info = env.makeMove(1, action, printflag = 0, imageflag = run < GAMES_RECORDED_PER_EVAL)
state = state_next
if terminal and reward > 0:
aiWin += 1
if terminal and reward == 0:
tieCOunter += 1
else:
state_next, reward, terminal, info = env.makeMove(2, env.sample(), printflag = 0, imageflag = run < GAMES_RECORDED_PER_EVAL)
state = state_next
if terminal and reward > 0:
randomWin += 1
if terminal and reward == 0:
tieCOunter += 1
if run < GAMES_RECORDED_PER_EVAL:
env.makeVideo(run)
run += 1
os.chdir("..")
print('AI won {} out of {} games. {} ties'.format(aiWin, numberOfGames, tieCOunter))
dqn.exploration_rate = tmp_exploration_rate
return aiWin, tieCOunter
if __name__ == "__main__":
connect4dqn()
| StarcoderdataPython |
3327959 | <reponame>faircloth-lab/itero
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
(c) 2018 <NAME> || http://faircloth-lab.org/
All rights reserved.
This code is distributed under a 3-clause BSD license. Please see
LICENSE.txt for more information.
Created on 14 April 2018 16:10 CDT (-0500)
"""
from __future__ import absolute_import
descr = "Get help info on a itero command."
def configure_parser(sub_parsers):
p = sub_parsers.add_parser(
'help',
description=descr,
help=descr
)
p.add_argument(
"command",
metavar="COMMAND",
action="store",
nargs='?',
help="print help information for COMMAND "
"(same as: itero COMMAND -h)",
)
p.set_defaults(func=execute)
def execute(args, parser):
if not args.command:
parser.print_help()
return
import sys
import subprocess
subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) | StarcoderdataPython |
68350 | <reponame>HatsuneMiku4/reaver
import reaver.envs
import reaver.models
import reaver.agents
import reaver.utils
| StarcoderdataPython |
3391316 | ########################################################################
# amara/bindery/model/examplotron.py
"""
Examplotron specialization of bindery node XML model tools
"""
__all__ = [
'examplotron_model',
]
import sys
#import re
import warnings
import copy
from cStringIO import StringIO
from itertools import *
from functools import *
from operator import *
from amara import tree
from amara.lib.xmlstring import U
from amara.bindery import BinderyError
from amara.bindery.model import document_model, constraint, child_element_constraint, named_node_test, NODE_ID_MARKER
from amara.xpath import datatypes, expressions
from amara.xpath.util import top_namespaces, named_node_test, node_test
from amara.xpath import context, parser
from amara.namespaces import AKARA_NAMESPACE, EG_NAMESPACE
class examplotron_model(document_model):
'''
XML model information and metadata extraction cues from an examplotron document
'''
def __init__(self, egdoc):
from amara import bindery
self.model_document = bindery.parse(egdoc)
self.model_document.xml_model.prefixes = top_namespaces(self.model_document)
self.setup_model()
return
def setup_model(self, parent=None):
'''
Process an examplotron document for constraints
'''
NSS = {u'ak': AKARA_NAMESPACE, u'eg': EG_NAMESPACE}
parent = parent if parent is not None else self.model_document
allowed_elements_test = []
if isinstance(parent, tree.element):
#for a in parent.xml_attributes:
#FIXME: Hack until this issue is fixed: http://trac.xml3k.org/ticket/8
for a in dict(parent.xml_attributes.items()):
if a[0] not in [EG_NAMESPACE, AKARA_NAMESPACE]:
parent.xml_model.attribute_types[a] = (self.model_document.xml_new_pname_mapping(a[0], a[1], iselement=False, update_class=False), None)
for e in parent.xml_elements:
#Constraint info
eg_occurs = e.xml_attributes.get((EG_NAMESPACE, 'occurs'))
if not (e.xml_namespace, e.xml_local) in parent.xml_model.element_types:
parent.xml_model.element_types[e.xml_namespace, e.xml_local] = (self.model_document.xml_new_pname_mapping(e.xml_namespace, e.xml_local, update_class=False), None)
if not eg_occurs in [u'?', u'*']:
c = child_element_constraint(e.xml_namespace, e.xml_local)
parent.xml_model.add_constraint(c)
if not eg_occurs in [u'+', u'*']:
parent.xml_model.add_constraint(
constraint(u'count(%s) = 1'%named_node_test(e.xml_namespace, e.xml_local, parent), msg=u'Only one instance of element allowed')
)
allowed_elements_test.append(named_node_test(e.xml_namespace, e.xml_local, parent))
#Metadata extraction cues
#FIXME: Compile these XPath expressions
mod = e.xml_model
rattr = e.xml_select(u'@ak:resource', NSS)
if rattr:
#ak:resource="" should default to a generated ID
mod.metadata_resource_expr = rattr[0].xml_value or NODE_ID_MARKER
#rattr = e.xml_select(u'string(@ak:resource)', NSS)
#if rattr: mod.metadata_resource_expr = rattr
relattr = e.xml_select(u'@ak:rel', NSS)
if relattr:
if relattr[0].xml_value:
mod.metadata_rel_expr = parser.parse(relattr[0].xml_value)
else:
mod.metadata_rel_expr = parser.parse(u'local-name()')
valattr = e.xml_select(u'@ak:value', NSS)
if valattr:
if valattr[0].xml_value:
mod.metadata_value_expr = parser.parse(valattr[0].xml_value)
else:
mod.metadata_value_expr = parser.parse(u'.')
context_attr = e.xml_select(u'@ak:context', NSS)
if context_attr:
mod.metadata_context_expr = parser.parse(context_attr[0].xml_value)
else:
#If it doesn't state context, don't check context
mod.metadata_context_expr = None
#mod.metadata_context_expr = node_test(parent, e, 'parent')
#Apply default relationship or value expression
#If there's ak:rel but no ak:value or ak:resource, ak:value=u'.'
#If there's ak:value but no ak:rel or ak:resource, ak:rel=u'local-name()'
if mod.metadata_resource_expr:
if (mod.metadata_value_expr
and not mod.metadata_rel_expr):
mod.metadata_rel_expr = parser.parse(u'local-name()')
else:
if (mod.metadata_rel_expr
and not mod.metadata_value_expr):
mod.metadata_value_expr = parser.parse(u'.')
elif (mod.metadata_value_expr
and not mod.metadata_rel_expr):
mod.metadata_rel_expr = parser.parse(u'local-name()')
if mod.metadata_resource_expr not in (NODE_ID_MARKER, None):
if not isinstance(mod.metadata_resource_expr, expressions.expression): mod.metadata_resource_expr = parser.parse(mod.metadata_resource_expr)
#if mod.metadata_rel_expr is not None:
# mod.metadata_rel_expr = parser.parse(mod.metadata_rel_expr)
#if mod.metadata_value_expr is not None:
# mod.metadata_value_expr = parser.parse(mod.metadata_value_expr)
relelem = e.xml_select(u'ak:rel', NSS)
for rel in relelem:
mod.other_rel_exprs.append((unicode(rel.name),unicode(rel.value)))
#print e.xml_name, (mod.metadata_resource_expr, mod.metadata_rel_expr, mod.metadata_value_expr)
#Recurse to process children
self.setup_model(e)
if allowed_elements_test:
parent.xml_model.add_constraint(
constraint(u'count(%s) = count(*)'%u'|'.join(allowed_elements_test), msg=u'Unexpected elements present')
)
else:
parent.xml_model.add_constraint(
constraint(u'not(*)', msg=u'Element expected to be empty')
)
#To do:
#Add <ak:product ak:name="AVT" ak:value="AVT"/>
| StarcoderdataPython |
1690734 | #!/usr/bin/env python
import os
import sys
sys.path.append('/home/bithika/src/House-Number-Detection')
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.io import loadmat
from skimage import color
from skimage import io
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
#import torch
import argparse
import h5py
plt.rcParams['figure.figsize'] = (16.0, 4.0)
###############################################################################
###############################################################################
#device = torch.device("cpu")
from preprocess_utils import *
from plot_utils import *
###############################################################################
###############################################################################
# Argument Parsing
#
parser = argparse.ArgumentParser(description='Train model')
parser.add_argument('--base-dir', type=str,
default='/home/bithika/src/House-Number-Detection', help='Input base directory ')
parser.add_argument('--train-dir', type=str,
default='/home/bithika/src/House-Number-Detection/data/raw/train_32x32.mat', help='Input data directory')
parser.add_argument('--test-dir', type=str,
default='/home/bithika/src/House-Number-Detection/data/raw/test_32x32.mat', help='Input data directory')
parser.add_argument('--output-dir', type=str,
default='/home/bithika/src/House-Number-Detection/reports', help='Input data directory')
parser.add_argument('--processed-data-dir', type=str,
default='/home/bithika/src/House-Number-Detection/data/processed', help='processed data directory')
parser.add_argument('--validation-data-fraction', type=float,
default=0.1, help='validation dataset split fraction (default: 0.1)')
args = parser.parse_args()
###############################################################################
###############################################################################
# Load dataset
#
# Reading the .mat files
X_train, y_train = load_data(args.train_dir)
X_test, y_test = load_data(args.test_dir)
print("Training Set", X_train.shape, y_train.shape)
print("Test Set", X_test.shape, y_test.shape)
# Calculate the total number of images
num_images = X_train.shape[0] + X_test.shape[0]
print("Total Number of Images", num_images)
# Transpose image arrays
# (width, height, channels, size) -> (size, width, height, channels)
X_train, y_train = X_train.transpose((3,0,1,2)), y_train[:,0]
X_test, y_test = X_test.transpose((3,0,1,2)), y_test[:,0]
print("Training Set", X_train.shape)
print("Test Set", X_test.shape)
print('')
# Plot some training set images
plot_images(X_train, y_train, 2, 8, args.output_dir, 'train_images.png')
# Plot some test set images
plot_images(X_test, y_test, 2, 8, args.output_dir, 'test_images.png')
# check for unique labesl
print(np.unique(y_train))
# data distribution
plot_data_distribution(y_train, y_test, args.output_dir, 'class_distribution.png')
# distributions are skewed in the positive direction i.e lesser data on the higher values
convert_labels_10to0(y_train)
convert_labels_10to0(y_test)
# check for unique labesl
print(np.unique(y_train))
# split training data into train and validation
#X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.13, random_state=7, stratify = y_train)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=args.validation_data_fraction, random_state=7)
plot_data_distribution(y_train, y_val, args.output_dir, 'train_val_class_distribution.png')
print(y_train.shape, y_val.shape, y_test.shape)
# convert to float for numpy computation
train_greyscale = rgb2gray(X_train).astype(np.float32)
test_greyscale = rgb2gray(X_test).astype(np.float32)
val_greyscale = rgb2gray(X_val).astype(np.float32)
print("Training Set", train_greyscale.shape)
print("Validation Set", val_greyscale.shape)
print("Test Set", test_greyscale.shape)
print('')
# remove RGB train, test and val set from RAM
del X_train, X_val, X_test
plot_images(train_greyscale, y_train, 1, 10,args.output_dir, 'train_images_greyscale.png' )
# Normalisation
# Liang et al. 2015 report that the pre-processed the images by removing the per-pixel mean value calculated over
#the entire set.
#Goodfellow et al. 2013 report that they subtract the mean from every image.
train_greyscale_norm, test_greyscale_norm, val_greyscale_norm = normalize(train_greyscale, test_greyscale, val_greyscale)
plot_images(train_greyscale, y_train, 1, 10, args.output_dir, 'train_images_greyscale_norm.png' )
#one hot label encoding
y_train, y_test, y_val = one_hot_labels(y_train, y_test, y_val )
print("Training set", y_train.shape)
print("Validation set", y_val.shape)
print("Test set", y_test.shape)
store_data('SVHN_grey.h5',
args.processed_data_dir,
train_greyscale_norm, test_greyscale_norm, val_greyscale_norm,
y_train, y_test, y_val)
| StarcoderdataPython |
3393073 | # coding: utf8
"""
ref
1. http://disi.unitn.it/moschitti/Tree-Kernel.htm
2. http://disi.unitn.it/moschitti/Teaching-slides/slides-AINLP-2016/SVMs-Kernel-Methods.pdf
3. code: http://joedsm.altervista.org/pythontreekernels.htm
4. wiki: https://en.wikipedia.org/wiki/Tree_kernel
"""
from __future__ import print_function
import math
from copy import deepcopy
from . import tree
class Kernel():
#Common routines for kernel functions
def kernel(self,a,b):
#compute the tree kernel on the trees a and b
if not isinstance(a, tree.Tree):
print("ERROR: first parameter has to be a Tree Object")
return ""
if not isinstance(b, tree.Tree):
print("ERROR: second parameter has to be a Tree Object")
return ""
self.preProcess(a)
self.preProcess(b)
return self.evaluate(a,b)
def preProcess(self,a):
#Create any data structure useful for computing the kernel
#To be instantiated in subclasses
print("ERROR: prepProcess() must be executed in subclasses")
pass
def evaluate(self,a,b):
#To be instantiated in subclasses
print("ERROR: evaluated() must be executed in subclasses")
pass
def printKernelMatrix(self,dataset):
if not isinstance(dataset, tree.Dataset):
print("ERROR: the first Parameter must be a Dataset object")
return
ne = len(dataset)
for i in range(ne):
for j in range(i,ne):
print("%d %d %.2f" % (i, j, self.kernel(dataset.getExample(i), dataset.getExample(j))))
class KernelST(Kernel):
def __init__(self,l,savememory=1,hashsep="#"):
self.l = float(l)
self.hashsep = hashsep
self.savememory = savememory
def preProcess(self,a):
if hasattr(a,'kernelstrepr'): #already preprocessed
return
if not hasattr(a.root, 'stsize'):
a.root.setSubtreeSize()
a.root.setHashSubtreeIdentifier(self.hashsep)
a.kernelstrepr = tree.SubtreeIDSubtreeSizeList(a.root)
a.kernelstrepr.sort()
if self.savememory==1:
a.deleteRootTreeNode()
def evaluate(self,a,b):
ha, hb = (a.kernelstrepr, b.kernelstrepr)
#Assumes ha and hb are ordered list of pairs (subtreeid, subtreesize)
#a.kernelreprst,b.kernelreprst are checked or created in preProcess()
i,j,k,toti,totj = (0,0,0,len(ha), len(hb))
while i < toti and j < totj:
if ha.getSubtreeID(i) == hb.getSubtreeID(j):
ci,cj=(i,j)
while i < toti and ha.getSubtreeID(i)==ha.getSubtreeID(ci):
i += 1
while j < totj and hb.getSubtreeID(j)==hb.getSubtreeID(cj):
j += 1
k += (i-ci)*(j-cj)*(self.l**ha.getSubtreeSize(ci))
elif ha.getSubtreeID(i) < hb.getSubtreeID(j):
i += 1
else:
j += 1
return k
class KernelSST(Kernel):
def __init__(self,l,hashsep="#"):
self.l = float(l)
self.hashsep = hashsep
self.cache = Cache()
def preProcess(self,a):
if hasattr(a,'kernelsstrepr'): #already preprocessed
return
a.root.setHashSubtreeIdentifier(self.hashsep)
a.kernelsstrepr = tree.ProdSubtreeList(a.root)
a.kernelsstrepr.sort()
def CSST(self,c,d):
if c.getSubtreeID() < d.getSubtreeID():
tmpkey = str(c.getSubtreeID()) + "#" + str(d.getSubtreeID())
else:
tmpkey = str(d.getSubtreeID()) + "#" + str(c.getSubtreeID())
if self.cache.exists(tmpkey):
return float(self.cache.read(tmpkey))
else:
prod = self.l
nc = c.getOutdegree()
if nc==d.getOutdegree():
for ci in range(nc):
if c.getChild(ci).getProduction() == d.getChild(ci).getProduction():
prod *= (1 + self.CSST(c.getChild(ci),d.getChild(ci)))
else:
cid, did = (c.getChild(ci).getSubtreeID(),d.getChild(ci).getSubtreeID())
if cid < did:
self.cache.insert(str(cid) + str(did), 0)
else:
self.cache.insert(str(did) + str(cid), 0)
self.cache.insert(tmpkey, prod)
return float(prod)
def evaluate(self,a,b):
pa,pb=(a.kernelsstrepr, b.kernelsstrepr)
self.cache.removeAll()
i,j,k,toti,totj = (0,0,0,len(pa),len(pb))
while i < toti and j < totj:
if pa.getProduction(i) == pb.getProduction(j):
ci,cj=(i,j)
while i < toti and pa.getProduction(i)==pa.getProduction(ci):
j = cj
while j < totj and pb.getProduction(j)==pb.getProduction(cj):
k += self.CSST(pa.getTree(i),pb.getTree(j))
j += 1
i += 1
elif len(pa.getProduction(i))<len(pb.getProduction(j)) or (len(pa.getProduction(i))==len(pb.getProduction(j)) and pa.getProduction(i) < pb.getProduction(j)):
i += 1
else:
j += 1
return k
class KernelPT(Kernel):
def __init__(self,l,m,hashsep="#"):
self.l = float(l)
self.m = float(m)
self.hashsep = hashsep
self.cache = Cache()
def preProcess(self,a):
if hasattr(a,'kernelptrepr'): #already preprocessed
return
a.root.setHashSubtreeIdentifier(self.hashsep)
a.kernelptrepr = tree.LabelSubtreeList(a.root)
a.kernelptrepr.sort()
def DeltaSk(self, a, b,nca, ncb):
DPS = [[0 for i in range(ncb+1)] for j in range(nca+1)]
DP = [[0 for i in range(ncb+1)] for j in range(nca+1)]
kmat = [0]*(nca+1)
for i in range(1,nca+1):
for j in range(1,ncb+1):
if a.getChild(i-1).getLabel() == b.getChild(j-1).getLabel():
DPS[i][j] = self.CPT(a.getChild(i-1),b.getChild(j-1))
kmat[0] += DPS[i][j]
else:
DPS[i][j] = 0
for s in range(1,min(nca,ncb)):
for i in range(nca+1):
DP[i][s-1] = 0
for j in range(ncb+1):
DP[s-1][j] = 0
for i in range(s,nca+1):
for j in range(s,ncb+1):
DP[i][j] = DPS[i][j] + self.l*DP[i-1][j] + self.l*DP[i][j-1] - self.l**2*DP[i-1][j-1]
if a.getChild(i-1).getLabel() == b.getChild(j-1).getLabel():
DPS[i][j] = self.CPT(a.getChild(i-1),b.getChild(j-1))*DP[i-1][j-1]
kmat[s] += DPS[i][j]
return sum(kmat)
def CPT(self,c,d):
if c.getSubtreeID() < d.getSubtreeID():
tmpkey = str(c.getSubtreeID()) + "#" + str(d.getSubtreeID())
else:
tmpkey = str(d.getSubtreeID()) + "#" + str(c.getSubtreeID())
if self.cache.exists(tmpkey):
return self.cache.read(tmpkey)
else:
if c.getOutdegree()==0 or d.getOutdegree()==0:
prod = self.m*self.l**2
else:
prod = self.m*(self.l**2+self.DeltaSk(c, d,c.getOutdegree(),d.getOutdegree()))
self.cache.insert(tmpkey, prod)
return prod
def evaluate(self,a,b):
self.cache.removeAll()
la,lb = (a.kernelptrepr,b.kernelptrepr)
i,j,k,toti,totj = (0,0,0,len(la),len(lb))
while i < toti and j < totj:
if la.getLabel(i) == lb.getLabel(j):
ci,cj=(i,j)
while i < toti and la.getLabel(i) == la.getLabel(ci):
j = cj
while j < totj and lb.getLabel(j) == lb.getLabel(cj):
k += self.CPT(la.getTree(i),lb.getTree(j))
j += 1
i += 1
elif la.getLabel(i) <= lb.getLabel(j):
i += 1
else:
j += 1
return k
class KernelPdak(Kernel):
def __init__(self, l, gamma, beta, hashsep="#"):
self.l = float(l)
self.gamma = float(gamma)
self.beta = float(beta)
self.hashsep = hashsep
def preProcess(self, t):
if hasattr(t,'kernelpdakrepr'): #already preprocessed
return
if not hasattr(t.root, 'stsize'):
t.root.setSubtreeSize()
t.root.setHashSubtreeIdentifier(self.hashsep)
t.computeNodesDepth()
t.kernelpdakrepr = tree.SubtreePositionIDLabelSubtreeSizeList(t.root)
def mergetrees_with_depth(self, tree1, tree2):
merge = {}
for key in tree1:
if key in tree2:
merge[key] = ({(tree1[key][0],tree1[key][2]):{tree1[key][1]:1}},{(tree2[key][0],tree2[key][2]):{tree2[key][1]:1}})
del tree2[key]
else: merge[key] = ({(tree1[key][0],tree1[key][2]):{tree1[key][1]:1}},None)
for key in tree2:
merge[key] = (None,{(tree2[key][0],tree2[key][2]):{tree2[key][1]:1}})
return merge
def visit_with_depth(self,jtree,node,depth,param,lambda_par,gamma_par):
kvalue = 0
if node is not None :
child = 0
key = str(hash(node+'#'+str(child)))
while key in jtree :
kvalue = kvalue + self.visit_with_depth(jtree,key,depth+1,param,lambda_par,gamma_par)
if jtree[key][0] is not None:
if jtree[node][0] is None:
#jtree[node][0] = jtree[key][0]
jtree[node] = (jtree[key][0], jtree[node][1])
else:
for tmpkey in jtree[key][0]:
if tmpkey in jtree[node][0]:
for tmpkey2 in jtree[key][0][tmpkey]:
if tmpkey2 in jtree[node][0][tmpkey]:
jtree[node][0][tmpkey][tmpkey2] = jtree[node][0][tmpkey][tmpkey2] + jtree[key][0][tmpkey][tmpkey2]
else: jtree[node][0][tmpkey][tmpkey2] = jtree[key][0][tmpkey][tmpkey2]
else: jtree[node][0][tmpkey] = jtree[key][0][tmpkey]
if jtree[key][1] is not None:
if jtree[node][1] is None:
#jtree[node][1]=jtree[key][1]
jtree[node]=(jtree[node][0],jtree[key][1])
else:
for tmpkey in jtree[key][1]:
if tmpkey in jtree[node][1]:
for tmpkey2 in jtree[key][1][tmpkey]:
if tmpkey2 in jtree[node][1][tmpkey]:
jtree[node][1][tmpkey][tmpkey2] = jtree[node][1][tmpkey][tmpkey2] + jtree[key][1][tmpkey][tmpkey2]
else: jtree[node][1][tmpkey][tmpkey2] = jtree[key][1][tmpkey][tmpkey2]
else: jtree[node][1][tmpkey] = jtree[key][1][tmpkey]
child = child + 1
key = str(hash(node+'#'+str(child)))
# print jtree[node]
if (jtree[node][0] is not None) and (jtree[node][1] is not None):
for lkey in jtree[node][0]:
if lkey in jtree[node][1]:
tmpk = 0
for fkey1 in jtree[node][0][lkey]:
for fkey2 in jtree[node][1][lkey]:
tmpk = tmpk + lambda_par**lkey[1]*jtree[node][0][lkey][fkey1]*jtree[node][1][lkey][fkey2]*math.exp(-param*(fkey1 + fkey2))
kvalue = kvalue + (gamma_par**depth)*tmpk*math.exp(2*param*depth)
return kvalue
def evaluate(self,a,b):
tree1 = deepcopy(a.kernelpdakrepr.sids)
tree2 = deepcopy(b.kernelpdakrepr.sids)
m = self.mergetrees_with_depth(tree1,tree2)
kvalue = self.visit_with_depth(m,str(hash('0')),1,self.l, self.gamma, self.beta)
del m, tree1, tree2
return kvalue
class KernelPdakMine(Kernel):
def __init__(self, l, gamma, beta, hashsep="#"):
self.l = float(l)
self.gamma = float(gamma)
self.beta = float(beta)
self.hashsep = hashsep
self.cache = Cache()
self.cachesize = 10000
def preProcess(self, t):
if hasattr(t,'kernelpdakrepr'): #already preprocessed
return
if not hasattr(t.root, 'stsize'):
t.root.setSubtreeSize()
t.root.setHashSubtreeIdentifier(self.hashsep)
t.computeNodesDepth()
t.computeRoutes()
t.kernelpdakrepr = tree.SubtreeIDSubtreeSizeRouteList(t.root)
t.kernelpdakrepr.sort()
#print t.kernelpdakrepr.sids
def ntk(self, ra, da, rb, db, hra, hrb):
if hra < hrb:
tmpkey = str(hra) + "#" + str(hrb)
else:
tmpkey = str(hrb) + "#" + str(hra)
if self.cache.exists(tmpkey):
return float(self.cache.read(tmpkey))
lena,lenb = len(ra), len(rb)
c, p, minlen = 0, 0, min(lena,lenb)
while c < minlen and ra[c] == rb[c]:
if ra[c] == "#": p += 1
c += 1
#print "p = ", p, "da, db", da, db, ra, rb
if self.gamma == 1:
r = (p+1)*(math.e**(-self.beta*(da + db - 2*p)))
else:
r = (1-self.gamma**(p+1))/(1-self.gamma)*(math.e**(-self.beta*(da + db - 2*p)))
if len(self.cache) > self.cachesize:
self.cache.removeAll()
self.cache.insert(tmpkey,r)
return r
# if self.gamma == 1:
# return (p+1)*(math.e**(-self.beta*(da + db - 2*p)))
# else:
# return (1-self.gamma**(p+1))/(1-self.gamma)*(math.e**(-self.beta*(da + db - 2*p)))
def evaluate(self,a,b):
ha, hb = (a.kernelpdakrepr, b.kernelpdakrepr)
#print ha, hb
#Assumes ha and hb are ordered list of pairs (subtreeid, subtreesize, route)
#a.kernelreprst,b.kernelreprst are checked or created in preProcess()
i,j,k,toti,totj = (0,0,0,len(ha), len(hb))
while i < toti and j < totj:
if ha.getLabel(i) == hb.getLabel(j):
ci, cj = (i, j)
while i < toti and ha.getLabel(i)==ha.getLabel(ci):
j = cj
while j < totj and hb.getLabel(j)==hb.getLabel(cj):
cst = self.l
if ha.getSubtreeID(i)==hb.getSubtreeID(j):
cst += self.l**ha.getSubtreeSize(i)
#print ha.getLabel(i), hb.getLabel(j), cst, self.ntk(ha.getRoute(i), ha.getDepth(i), hb.getRoute(j), hb.getDepth(j))
k += cst*self.ntk(ha.getRoute(i), ha.getDepth(i), hb.getRoute(j), hb.getDepth(j), ha.getRouteHash(i), hb.getRouteHash(j))
j += 1
i += 1
elif ha.getLabel(i) < hb.getLabel(j):
i += 1
else:
j += 1
return k
class KernelPdakFast(KernelPdak):
def preProcess(self, t):
if hasattr(t,'kernelpdakrepr'): #already preprocessed
return
if not hasattr(t.root, 'stsize'):
t.root.setSubtreeSize()
t.root.setHashSubtreeIdentifier(self.hashsep)
t.computeNodesDepth()
a = tree.SubtreePositionIDSubtreeIDSubtreeSizeListLabel(t.root)
t.kernelpdakrepr = (a.sids, a.pinv)
def mergetrees_with_depth_del_labels(self, tree1,labels1, tree2,labels2):
merge = {}
match = 0
for key in tree1:
if key in tree2:
if tree1[key][0] in labels2:
match = match+1
if tree2[key][0] in labels1:
merge[key] = ({(tree1[key][0],tree1[key][1]):0},{(tree2[key][0],tree2[key][1]):0})
else:
merge[key] = ({(tree1[key][0],tree1[key][1]):0},{})
else:
if tree2[key][0] in labels1:
merge[key] = ({},{(tree2[key][0],tree2[key][1]):0})
match = match+1
else: merge[key] = ({},{})
del tree2[key]
else:
if tree1[key][0] in labels2:
merge[key] = ({(tree1[key][0],tree1[key][1]):0},{})
match = match+1
else: merge[key] = ({},{})
for key in tree2:
if tree2[key][0] in labels1:
merge[key] = ({},{(tree2[key][0],tree2[key][1]):0})
match = match+1
else: merge[key] = ({},{})
return (merge,match)
def visit_with_depth(self,jtree,node,depth,param,lambda_par,gamma_par):
kvalue = 0
tmpk = 0
if node is not None :
child = 0
key = str(hash(node+'#'+str(child)))
startkey = key
max_size = [0,None]
while key in jtree :
kvalue = kvalue + self.visit_with_depth(jtree,key,depth+1,param,lambda_par,gamma_par)
if (len(jtree[key][0]) + len(jtree[key][1])) > max_size[0]:
max_size[0] = len(jtree[key][0]) + len(jtree[key][1])
max_size[1] = key
child = child + 1
key = str(hash(node+'#'+str(child)))
#print 'max_size',max_size[0]
if max_size[0] > 0 :
child = 0
while startkey in jtree :
if startkey != max_size[1] :
if jtree[startkey][0] is not {}:
for tmpkey in jtree[startkey][0]:
# calcolo kernel
if tmpkey in jtree[max_size[1]][1]:
if gamma_par != 1.0:
tmpk = tmpk + (gamma_par**(depth+1) - gamma_par)/(gamma_par - 1)*lambda_par**tmpkey[1]*jtree[startkey][0][tmpkey]*jtree[max_size[1]][1][tmpkey]
else: tmpk = tmpk + depth*lambda_par**tmpkey[1]*jtree[startkey][0][tmpkey]*jtree[max_size[1]][1][tmpkey]
# fine calcolo kernel, inizio inserimento
if jtree[startkey][1] is not {}:
for tmpkey in jtree[startkey][1]:
# calcolo kernel
if tmpkey in jtree[max_size[1]][0]:
if gamma_par != 1.0:
tmpk = tmpk + (gamma_par**(depth+1) - gamma_par)/(gamma_par - 1)*lambda_par**tmpkey[1]*jtree[startkey][1][tmpkey]*jtree[max_size[1]][0][tmpkey]
else: tmpk = tmpk + depth*lambda_par**tmpkey[1]*jtree[startkey][1][tmpkey]*jtree[max_size[1]][0][tmpkey]
# fine calcolo kernel, inizio inserimento
if tmpkey in jtree[max_size[1]][1]:
jtree[max_size[1]][1][tmpkey] = jtree[max_size[1]][1][tmpkey] + jtree[startkey][1][tmpkey]
else: jtree[max_size[1]][1][tmpkey] = jtree[startkey][1][tmpkey]
# inserisco anche hash 0
for tmpkey in jtree[startkey][0]:
if tmpkey in jtree[max_size[1]][0]:
jtree[max_size[1]][0][tmpkey] = jtree[max_size[1]][0][tmpkey] + jtree[startkey][0][tmpkey]
else: jtree[max_size[1]][0][tmpkey] = jtree[startkey][0][tmpkey]
# next child
child = child + 1
startkey = str(hash(node+'#'+str(child)))
# fine while figli
if jtree[node][0] is not {}:
for tmpkey in jtree[node][0]:
# calcolo kernel
if tmpkey in jtree[max_size[1]][1]:
if gamma_par != 1.0:
tmpk = tmpk + (gamma_par**(depth+1) - gamma_par)/(gamma_par - 1)*lambda_par**tmpkey[1]*math.exp(-param*depth)*jtree[max_size[1]][1][tmpkey]
else: tmpk = tmpk + depth*lambda_par**tmpkey[1]*math.exp(-param*depth)*jtree[max_size[1]][1][tmpkey]
# fine calcolo kernel, inizio inserimento
if tmpkey in jtree[max_size[1]][0]:
jtree[max_size[1]][0][tmpkey] = jtree[max_size[1]][0][tmpkey] + math.exp(-param*depth)
else: jtree[max_size[1]][0][tmpkey] = math.exp(-param*depth)
if jtree[node][1] is not {}:
for tmpkey in jtree[node][1]:
# calcolo kernel
if tmpkey in jtree[max_size[1]][0]:
if gamma_par != 1.0:
tmpk = tmpk + (gamma_par**(depth+1) - gamma_par)/(gamma_par - 1)*lambda_par**tmpkey[1]*math.exp(-param*depth)*jtree[max_size[1]][0][tmpkey]
else: tmpk = tmpk + depth*lambda_par**tmpkey[1]*math.exp(-param*depth)*jtree[max_size[1]][0][tmpkey]
# fine calcolo kernel, inizio inserimento
if tmpkey in jtree[max_size[1]][1]:
jtree[max_size[1]][1][tmpkey] = jtree[max_size[1]][1][tmpkey] + math.exp(-param*depth)
else: jtree[max_size[1]][1][tmpkey] = math.exp(-param*depth)
jtree[node] = (jtree[max_size[1]][0],jtree[max_size[1]][1])
else:
for tmpkey in jtree[node][0]:
jtree[node][0][tmpkey] = math.exp(-param*depth)
for tmpkey in jtree[node][1]:
jtree[node][1][tmpkey] = math.exp(-param*depth)
if jtree[node][0] is not {} and jtree[node][1] is not {}:
for tmpkey in jtree[node][0]:
if tmpkey in jtree[node][1]:
if gamma_par != 1.0:
tmpk = tmpk + (gamma_par**(depth+1) - gamma_par)/(gamma_par - 1)*lambda_par**tmpkey[1]*jtree[node][0][tmpkey]*jtree[node][1][tmpkey]
else: tmpk = tmpk + depth*lambda_par**tmpkey[1]*jtree[node][0][tmpkey]*jtree[node][1][tmpkey]
return kvalue + tmpk*math.exp(2*param*depth)
def evaluate(self,a,b):
tree1 = deepcopy(a.kernelpdakrepr)
tree2 = deepcopy(b.kernelpdakrepr)
(m,match) = self.mergetrees_with_depth_del_labels(tree1, tree2)
kvalue = 0
if match > 0:
kvalue = self.visit_with_depth(m,str(hash('0')),1,self.l, self.gamma, self.beta)
del m, tree1, tree2
return kvalue
####
class Cache():
#An extremely simple cache
def __init__(self):
self.cache = {}
self.size = 0
def exists(self,key):
return key in self.cache
def existsPair(self,keya,keyb):
if keya < keyb:
tmpkey = str(keya) + "#" + str(keyb)
else:
tmpkey = str(keyb) + "#" + str(keya)
return tmpkey in self.cache
def insert(self,key,value):
self.cache[key] = value
self.size += 1
def insertPairIfNew(self,keya,keyb):
if keya < keyb:
tmpkey = str(keya) + "#" + str(keyb)
else:
tmpkey = str(keyb) + "#" + str(keya)
if not tmpkey in self.cache:
self.insert(tmpkey)
def remove(self,key):
del self.cache[key]
self.size -= 1
def removeAll(self):
self.cache = {}
self.size = 0
def read(self,key):
return self.cache[key]
def __len__(self):
return self.size
| StarcoderdataPython |
30527 | <reponame>payoto/graphcore_examples
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
# Copyright 2021 RangiLyu.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file has been modified by Graphcore Ltd.
import numpy as np
import torch
from .xml_dataset import XMLDataset
from utils import logger
if logger.GLOBAL_LOGGER is not None:
print = logger.GLOBAL_LOGGER.log_str
def calc_area(boxes):
# boxes: n,4
# return
x1, y1, x2, y2 = np.split(boxes, 4, 1)
areas = (y2 - y1) * (x2 - x1) # n,1
return areas[:, 0]
class XMLDatasetForRcnn(XMLDataset):
def __init__(self,
preset_indices=None,
area_filter_thrd=0.0,
num_gtboxes=20,
specified_length=None,
extra_layer=None,
**kwargs):
self.area_filter_thrd = area_filter_thrd
self.num_gtboxes = num_gtboxes
self.preset_indices = preset_indices
self._cur_for_preset_indices = 0
super(XMLDatasetForRcnn, self).__init__(**kwargs)
self.real_length = len(self.data_info)
self.length = self.real_length * 2 if specified_length is None else specified_length
self.extra_layer = extra_layer
def get_train_data(self, idx):
"""
Load image and annotation
:param idx:
:return: meta-data (a dict containing image, annotation and other information)
filter zero area boxes
"""
if self.preset_indices is None:
pass
else:
idx = self.preset_indices[self._cur_for_preset_indices]
self._cur_for_preset_indices += 1
idx = int(idx % self.real_length)
meta = super().get_train_data(idx)
# filter boxes and labels by area
areas = calc_area(meta['gt_bboxes'])
mask = areas > self.area_filter_thrd
meta['gt_bboxes'] = meta['gt_bboxes'][mask, :]
meta['gt_labels'] = meta['gt_labels'][mask]
meta['db_inds'] = idx
#
# pad boxes and inds
boxes = np.zeros((self.num_gtboxes, 4)).astype(np.float32)
num_boxes = meta['gt_bboxes'].shape[0]
boxes[:num_boxes, :] = meta['gt_bboxes'][:self.num_gtboxes]
meta['gt_bboxes'] = torch.from_numpy(boxes)
labels = np.asarray([0] * self.num_gtboxes)
labels[:num_boxes] = meta['gt_labels'][:self.num_gtboxes]
meta['gt_labels'] = torch.from_numpy(labels)
meta['num_boxes'] = num_boxes
if num_boxes == 0:
return None # return None will re-run this function
# proc data in extra layer
if self.extra_layer is not None:
meta = self.extra_layer(meta)
return meta
def __len__(self):
return self.length
| StarcoderdataPython |
3248548 | <gh_stars>0
# MIT License
#
# Copyright (c) 2017 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import tensorflow as tf
from src.models.RecurrentNeuralNetwork import RecurrentNeuralNetwork
class ClockworkRNN(RecurrentNeuralNetwork):
"""This model represents a simple clockwork RNN, based on the
equally named paper."""
def __init__(self, config):
"""Constructs a new CW-RNN.
Args:
config: The configuration parameters
unique_name: Define the unique name of this lstm
num_input: The number of input units per step.
num_output: The number of output units per step.
num_hidden: The number of units in the hidden layer.
num_cells: The number of cells per layer
num_layers: Define number of time-step unfolds.
clip_norm: The norm, to which a gradient should be clipped
batch_size: This represents the batch size used for training.
minimizer: Select the appropriate minimizer
seed: Represents the seed for this model
momentum: The momentum if the minimizer is momentum
lr_rate: The initial learning rate
lr_decay_steps: The steps until a decay should happen
lr_decay_rate: How much should the learning rate be reduced
num_modules: The number of modules with different clocks
module_size: The number of neurons per clocked module
"""
# create the clockwork mask
config['unique_name'] = "CWRNN_" + config['unique_name']
config['num_cells'] = 1
config['num_hidden'] = config['num_modules'] * config['module_size']
cw_mask = np.ones((config['num_hidden'], config['num_hidden']), dtype=np.int32)
# fill the other values with ones
ms = config['module_size']
for y in range(1, config['num_modules']):
for x in range(y):
cw_mask[ms*y:ms*(y+1), ms*x:ms*(x+1)] = np.zeros((ms, ms), dtype=np.int32)
# create the constant mask
self.cw_mask = tf.constant(cw_mask, dtype=tf.float32)
# create clock periods
self.clock_periods = np.power(2, np.arange(1, config['num_modules'] + 1) - 1)
# Perform the super call
super().__init__(config)
def get_h(self):
"""Gets a reference to the step h."""
size = self.config['num_hidden'] * self.config['num_cells']
h = tf.zeros([size, 1], tf.float32)
return [h]
def get_initial_h(self):
"""Gets a reference to the step h."""
return self.h
def get_step_h(self):
"""Retrieve the step h"""
size = self.config['num_hidden'] * self.config['num_cells']
h = tf.placeholder(tf.float32, [size, 1], name="step_h")
return [h]
def get_current_h(self):
"""Deliver current h"""
size = self.config['num_hidden'] * self.config['num_cells']
h = np.zeros([size, 1])
return [h]
def init_cell(self, name):
"""This method creates the parameters for a cell with
the given name
Args:
name: The name for this cell, e.g. 1
"""
with tf.variable_scope(name, reuse=None):
# extract values
H = self.config['num_hidden']
I = self.config['num_input']
# init the layers appropriately
tf.get_variable("W", [H, I], dtype=tf.float32, initializer=self.weights_initializer)
tf.get_variable("R", [H, H], dtype=tf.float32, initializer=self.weights_initializer)
tf.get_variable("b", [H, 1], dtype=tf.float32, initializer=self.bias_initializer)
def create_cell(self, name, x, h_state, num_cell):
"""This method creates a LSTM cell. It basically uses the
previously initialized weights.
Args:
x: The input to the layer.
h_state: The hidden input to the layer.
Returns:
new_h: The new hidden vector
"""
[h] = h_state
out_h = list()
with tf.variable_scope(name, reuse=True):
# short form
ms = self.config['module_size']
# get matrices appropriately
WH = tf.get_variable("W")
RH = self.cw_mask * tf.get_variable("R")
b = tf.get_variable("b")
# create the modules itself
for t in range(self.config['num_modules']):
# extract the block row
block_row = tf.slice(RH, [ms * t, 0], [ms, self.config['num_hidden']])
# make conditional if full or zero
condition = tf.equal(tf.mod(self.step_num + num_cell, tf.constant(self.clock_periods[t], dtype=tf.int64)), tf.constant(0))
filter_row = tf.cond(condition, lambda: tf.identity(block_row), lambda: tf.zeros([ms, self.config['num_hidden']]))
# retrieve block b and wh
block_b = tf.slice(b, [ms * t, 0], [ms, 1])
block_w = tf.slice(WH, [ms * t, 0], [ms, self.config['num_input']])
# append to output list
out_h.append(tf.nn.relu(block_w @ x + filter_row @ h + block_b))
# create new hidden vector
new_h = tf.concat(out_h, axis=0)
# pass back the new hidden state
return [new_h]
| StarcoderdataPython |
126556 | <reponame>Geson-anko/JARVIS3
import torch
from torch.nn import (
Module,Linear,
Conv1d,BatchNorm1d,AvgPool1d,
ConvTranspose1d,Upsample,
)
import os
import random
from .config import config
class Encoder(Module):
input_size:tuple = (1,config.futures,config.length,2)
output_size:tuple = (1,64,8)
insize = (-1,*input_size[1:])
def __init__(self):
super().__init__()
self.reset_seed()
# Model layers
self.pool = AvgPool1d(2)
self.dense = Linear(2,1)
self.conv1 = Conv1d(513,256,4)
self.norm1 = BatchNorm1d(256)
self.conv2 = Conv1d(256,128,8)
self.norm2 = BatchNorm1d(128)
self.conv3 = Conv1d(128,64,4)
self.norm3 = BatchNorm1d(64)
def forward(self,x):
x = x.view(self.insize)
x = torch.relu(self.dense(x)).squeeze(-1)
x = torch.relu(self.norm1(self.conv1(x)))
x = self.pool(x)
x = torch.relu(self.norm2(self.conv2(x)))
x = self.pool(x)
x = torch.tanh(self.norm3(self.conv3(x)))
return x
def reset_seed(self,seed=0):
os.environ['PYTHONHASHSEED'] = '0'
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class Decoder(Module):
input_size = Encoder.output_size
output_size = Encoder.input_size
insize = (-1,*Encoder.output_size[1:])
def __init__(self):
super().__init__()
self.reset_seed()
# Model layers
self.upper = Upsample(scale_factor=2)
self.dcon0 = ConvTranspose1d(64,128,7)
self.norm0 = BatchNorm1d(128)
self.dcon1 = ConvTranspose1d(128,256,3)
self.norm1 = BatchNorm1d(256)
self.dcon2 = ConvTranspose1d(256,513,4)
self.norm2 = BatchNorm1d(513)
self.dense = Linear(1,2)
def forward(self,x):
x = x.view(self.insize)
x = torch.relu(self.norm0(self.dcon0(x)))
x = self.upper(x)
x = torch.relu(self.norm1(self.dcon1(x)))
x = self.upper(x)
x = torch.relu(self.norm2(self.dcon2(x))).unsqueeze(-1)
x = self.dense(x)
return x
def reset_seed(self,seed=0):
os.environ['PYTHONHASHSEED'] = '0'
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class AutoEncoder(Module):
def __init__(self):
"""
This class is used training only.
How about using it like this?
>>> model = AutoEncoder()
>>> # -- Training Model Process --
>>> torch.save(model.encoder.state_dict(),encoder_name)
>>> torch.save(model.decoder.state_dict(),decoder_name)
"""
super().__init__()
self.encoder = Encoder()
self.decoder = Decoder()
def forward(self,x):
x = self.encoder(x)
x = self.decoder(x)
return x
""" Documentation
""" | StarcoderdataPython |
1686676 | <filename>termapp/overlay_event.py
#!/usr/bin/env python3
import urwid
class OverlayEvent(urwid.WidgetWrap):
def __init__(
self,
first_widget,
second_widget,
width = 15,
height = 10,
vertical_align = "middle",
horizontal_align = "center"
):
# Create widget overlay
overlay = urwid.Overlay(
first_widget,
second_widget,
align = horizontal_align,
valign = vertical_align,
width = width,
height = height
)
# Set properties
self.foregroundWidget = first_widget
self.backgroundWidget = second_widget
self._w = overlay
def keypress(self, size, key):
pass
| StarcoderdataPython |
3391271 | <filename>fluent.syntax/tests/syntax/__init__.py
import textwrap
def dedent_ftl(text):
return textwrap.dedent(f"{text.rstrip()}\n")
| StarcoderdataPython |
3395252 | <reponame>LemuelPuglisi/TutoratoTap<filename>Lesson_n5/examples/titanic_survival_prediction_pipeline.py<gh_stars>1-10
""" Train an Random Forest Classifier model that can predict if an actor
survived on the Titanic.
"""
import shutil
import pyspark.sql.functions as funcs
from pyspark import SparkFiles
from pyspark.sql import SparkSession
from pyspark.ml.feature import StringIndexer, VectorAssembler
from pyspark.ml.classification import MultilayerPerceptronClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml import Pipeline
APP_NAME = 'Titanic Survival'
APP_DATASET_PATH = './data/titanic.csv'
APP_DATASET_FILE = 'titanic.csv'
def clear():
shutil.rmtree('./model', ignore_errors=True)
def main():
spark = SparkSession.builder.appName(APP_NAME) \
.config("spark.files.overwrite", "true") \
.getOrCreate()
spark.sparkContext.addFile(APP_DATASET_PATH)
dataset_path = SparkFiles.get(APP_DATASET_FILE)
df = spark.read.format('csv') \
.option('header', True) \
.load(dataset_path)
dataset = df.select(
funcs.col('Survived').cast('float'),
funcs.col('Pclass').cast('float'),
funcs.col('Sex'),
funcs.col('Age').cast('float'),
funcs.col('Fare').cast('float'),
funcs.col('Embarked')
)
dataset = dataset.replace('?', None).dropna(how='any')
required_features = ['Pclass', 'Age', 'Fare', 'Gender', 'Boarded']
mlp_parameters = {
"labelCol": 'Survived',
"featuresCol": 'features',
"maxIter": 100,
"layers": [5, 12, 2],
"blockSize": 64,
"seed": 1234
}
stage_1 = StringIndexer(inputCol='Sex', outputCol='Gender', handleInvalid='keep')
stage_2 = StringIndexer(inputCol='Embarked', outputCol='Boarded', handleInvalid='keep')
stage_3 = VectorAssembler(inputCols=required_features, outputCol='features')
stage_4 = MultilayerPerceptronClassifier(**mlp_parameters)
pipeline = Pipeline(stages=[stage_1, stage_2, stage_3, stage_4])
(training_data, test_data) = dataset.randomSplit([ .8, .2 ])
pipeline_model = pipeline.fit(training_data)
predictions = pipeline_model.transform(test_data)
evaluator = MulticlassClassificationEvaluator(
labelCol='Survived',
predictionCol='prediction',
metricName='accuracy'
)
accuracy = evaluator.evaluate(predictions)
print('Test accuracy = ', accuracy)
pipeline_model.save('model')
if __name__ == '__main__':
clear()
main() | StarcoderdataPython |
1744943 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from test_import import *
if __name__ == "__main__":
test_name = sys.argv[1]
test_dir = sys.argv[2]
tmp_dir = sys.argv[3]
rc = run_pyfunnel(test_dir)
assert rc == 0, "Binary status code for {}: {}.".format(test_dir, rc)
dif_err = dif_test(test_dir)
test_log(test_name, test_dir, tmp_dir, dif_err)
assert max(dif_err) == 0,\
"Fraction of points that differ between current and reference error values ({}): {} on x, {} on y.".format(
test_dir, dif_err[0], dif_err[1])
| StarcoderdataPython |
3383100 | from dbkcore.core import BaseObject, trace, Log
from pyspark.sql.dataframe import DataFrame as PyDataFrame
import pyspark.sql.functions as F
from pyspark.sql import SparkSession
from pyspark.sql.utils import AnalysisException
from enum import Enum
# from abc import ABC, abstractmethod
from abc import abstractmethod
import pandas as pd
from pandas import DataFrame as PdDataFrame
from pandas.api.types import is_numeric_dtype
from typing import Union, List
from typeguard import typechecked
from pathlib import Path
import functools as _functools
class DataDirection(str, Enum):
IN = "in"
OUT = "out"
class DataStepDataframe(BaseObject):
# TODO: Change name to name_or_path
def __init__(self, name: str, dataframe: Union[PyDataFrame, PdDataFrame], cache=False):
self.name = name
self.dataframe = dataframe
# Note: it has been removed for spark since the dataframe has to be read
self.__rows = None
self.columns_count = len(dataframe.columns) if isinstance(dataframe, PyDataFrame) else dataframe.shape[1]
self.columns_names = dataframe.columns
self.cache = cache
self.__columns_negative = None
self.__columns_null = None
def to_pandas(self) -> PdDataFrame:
if self.is_pandas:
return self.dataframe
elif self.is_pyspark:
return self.dataframe.toPandas()
@property
def rows(self):
if not self.__rows:
# self.__rows = self.dataframe.cache().count() if self.cache else dataframe.count()
# TODO: improve me
if self.cache:
self.__rows = self.dataframe.cache().count() if isinstance(self.dataframe, PyDataFrame) else self.dataframe.shape[0]
else:
self.__rows = self.dataframe.count() if isinstance(self.dataframe, PyDataFrame) else self.dataframe.shape[0]
return self.__rows
@trace
def columns_negative(self) -> List[str]:
"""
Identifies the columns with negative values
Returns
-------
List[str]
Column names
"""
columns = []
if not self.__columns_negative:
if isinstance(self.dataframe, PyDataFrame):
for column in self.columns_names:
count = 0
try:
count = self.dataframe.filter((F.col(column) < 0)).count()
except AnalysisException:
pass
if count > 0:
columns.append(column)
elif isinstance(self.dataframe, pd.DataFrame):
for column in self.columns_names:
if is_numeric_dtype(self.dataframe[column]):
dt_filtered = self.dataframe[self.dataframe[column] < 0]
count = dt_filtered.shape[0]
if count > 0:
columns.append(column)
self.__columns_negative = columns
return self.__columns_negative
@trace
def columns_null(self) -> List[str]:
"""
Identifies the columns with null values
Returns
-------
List[str]
Column names
"""
if not self.__columns_null:
columns = []
if isinstance(self.dataframe, PyDataFrame):
for column in self.columns_names:
count = self.dataframe.filter(F.col(column).isNull()).count()
if count > 0:
columns.append(column)
elif isinstance(self.dataframe, pd.DataFrame):
nan_cols = self.dataframe.columns[self.dataframe.isna().any()].tolist()
columns.extend(nan_cols)
self.__columns_null = columns
return self.__columns_null
@property
def is_pandas(self) -> bool:
return isinstance(self.dataframe, PdDataFrame)
@property
def is_pyspark(self) -> bool:
return isinstance(self.dataframe, PyDataFrame)
def log_in(self):
self.log(direction=DataDirection.IN)
def log_out(self):
self.log(direction=DataDirection.OUT)
def log(self, direction: DataDirection):
dt_name = self.name
# dt_tag_prefix = "DT:{}".format(direction.upper(), dt_name)
# dt_name_tag = "{}:NAME".format(dt_tag_prefix)
# dt_rows_tag = "{}:ROWS:COUNT".format(dt_tag_prefix)
# if isinstance(self.dataframe, PyDataFrame):
# dt_rows = self.dataframe.count()
# elif isinstance(self.dataframe, PdDataFrame):
# dt_rows = self.dataframe.shape[0]
# dt_columns_tag = "{}:COLUMNS:COUNT".format(dt_tag_prefix)
# if isinstance(self.dataframe, PyDataFrame):
# dt_columns = len(self.dataframe.columns)
# elif isinstance(self.dataframe, PdDataFrame):
# dt_columns = self.dataframe.shape[1]
# dt_columns_names_tag = "{}:COLUMNS:NAMES".format(dt_tag_prefix)
# dt_columns_names = ', '.join(self.dataframe.columns)
# Log.get_instance().log_info(f"{dt_name_tag}:{dt_name}", prefix=direction, custom_dimension=dimensions)
# Log.get_instance().log_info(f"{dt_rows_tag}:{dt_rows}", prefix=direction)
# Log.get_instance().log_info(f"{dt_columns_tag}:{dt_columns}", prefix=direction)
# Log.get_instance().log_info(f"{dt_columns_names_tag}:{dt_columns_names}", prefix=direction)
dimensions = {
'dataset_name': dt_name,
'rows_count': self.rows,
'columns_count': self.columns_count,
'columns_name': self.columns_names,
'direction': direction
}
Log.get_instance().log_info(f"Processed dataset {dt_name} with {direction.upper()} direction", custom_dimension=dimensions)
def apply_test(func):
"""
Execute test function after the initialize.
Notes
-----
[Example](https://stackoverflow.com/a/15196410)
"""
@_functools.wraps(func)
def wrapper(self, *args, **kwargs):
res = func(self, *args, **kwargs)
self.tests()
return res
return wrapper
def pre_apply_test(func):
"""
Execute test function before the initialize.
Notes
-----
[Example](https://stackoverflow.com/a/15196410)
"""
@_functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.tests()
res = func(self, *args, **kwargs)
return res
return wrapper
def log_output(func):
"""
Decorator for executing test in sequence
Notes
-----
[Example](https://stackoverflow.com/a/15196410)
"""
@_functools.wraps(func)
def wrapper(self, *args, **kwargs):
res = func(self, *args, **kwargs)
self.tests()
return res
return wrapper
# TODO: if works, remove ABC class
# class DataStep(ABC):
@typechecked
class DataStep():
"""
Creates a datastep to be used in a pipeline
Parameters
----------
metaclass : [type], optional
[description], by default abc.ABCMeta
Raises
------
Exception
[description]
"""
@trace
def __init__(
self,
spark: SparkSession,
run_id: str
):
self.spark = spark
self.run_id = run_id
@property
def display_name(self) -> str:
res = type(self).__name__
return res
@trace
def spark_read_table(self, name: str) -> DataStepDataframe:
dt = self.spark.read.table(name)
datastep_dataframe = DataStepDataframe(name=name, dataframe=dt)
datastep_dataframe.log(DataDirection.IN)
return datastep_dataframe
@trace
def spark_read_temp_table(self, name: str) -> DataStepDataframe:
global_temp_db = self.spark.conf.get(f"spark.sql.globalTempDatabase")
dt = self.spark.read.table(f'{global_temp_db}.{name}')
datastep_dataframe = DataStepDataframe(name=name, dataframe=dt)
datastep_dataframe.log_in()
return datastep_dataframe
@trace
def spark_read_parquet_path(self, path: Path, cache=False) -> DataStepDataframe:
path_str = str(path)
dt = self.spark.read.parquet(path_str)
datastep_dataframe = DataStepDataframe(name=path_str, dataframe=dt, cache=cache)
datastep_dataframe.log_in()
return datastep_dataframe
@trace
def pandas_read_csv(self, path: Path) -> DataStepDataframe:
datastep_dataframe = self.spark_read_csv(path)
datastep_dataframe.dataframe = datastep_dataframe.dataframe.toPandas()
datastep_dataframe.log_in()
return datastep_dataframe
@trace
def spark_read_csv(self, path: Path) -> DataStepDataframe:
path_str = str(path)
dt = self.spark.read.format("csv").\
option("inferSchema", "true").\
option("header", "true").\
option("delimiter", ",").\
option("charset", "utf-8").load(path_str)
datastep_dataframe = DataStepDataframe(name=path_str, dataframe=dt)
datastep_dataframe.log_in()
return datastep_dataframe
@trace
def test_rows_leq(self, dt_1: DataStepDataframe, dt_2: DataStepDataframe):
assert dt_1.rows < dt_2.rows,\
"ROWS CHECK: {dt_1_name} ({dt_1_rows}) < {dt_2_name} ({dt_2_rows})".format(
dt_1_name=dt_1.name,
dt_1_rows=dt_1.rows,
dt_2_name=dt_2.name,
dt_2_rows=dt_2.rows)
print("Asserted")
@trace
def test_rows_eq(self, dt_1: DataStepDataframe, dt_2: DataStepDataframe):
assert dt_1.rows == dt_2.rows,\
"ROWS CHECK: {dt_1_name} ({dt_1_rows}) == {dt_2_name} ({dt_2_rows})".format(
dt_1_name=dt_1.name,
dt_1_rows=dt_1.rows,
dt_2_name=dt_2.name,
dt_2_rows=dt_2.rows)
print("Asserted")
@trace
def test_rows_geq(self, dt_1: DataStepDataframe, dt_2: DataStepDataframe):
assert dt_1.rows >= dt_2.rows,\
"ROWS CHECK: {dt_1_name} ({dt_1_rows}) >= {dt_2_name} ({dt_2_rows})".format(
dt_1_name=dt_1.name,
dt_1_rows=dt_1.rows,
dt_2_name=dt_2.name,
dt_2_rows=dt_2.rows)
print("Asserted")
@trace
def test_rows_diff(self, dt_1: DataStepDataframe, dt_2: DataStepDataframe):
assert dt_1.rows != dt_2.rows,\
"ROWS CHECK: {dt_1_name} ({dt_1_rows}) >= {dt_2_name} ({dt_2_rows})".format(
dt_1_name=dt_1.name,
dt_1_rows=dt_1.rows,
dt_2_name=dt_2.name,
dt_2_rows=dt_2.rows)
print("Asserted")
@trace
def test_negative_values(self, cols: List[str], dt: DataStepDataframe):
for col in cols:
assert col not in dt.columns_negative(), f"Dataset {dt.name} has columns with negative values -> {col}"
@trace
def test_null_values(self, cols: List[str], dt: DataStepDataframe):
for col in cols:
assert col not in dt.columns_null(), f"Dataset {dt.name} has columns with null values -> {col}"
@trace
def test_is_dataframe_empty(self, df: PyDataFrame):
count = df.count()
assert count > 0, "the dataframe count is zero"
@property
def output_data(self) -> DataStepDataframe:
return self.__output_data
@trace
def check_output(self, **kwargs):
if kwargs:
for key, value in kwargs.items():
if isinstance(value, (PyDataFrame, PdDataFrame)):
msg = "Output Pandas or PySpark dataframe must be encapsulated into DataStepDataframe"
Log.get_instance().log_error(msg)
raise ValueError(msg)
elif isinstance(value, DataStepDataframe):
value.log(direction=DataDirection.OUT)
else:
Log.get_instance().log_info(f'{key}:{value}')
# setattr(self, key, value)
@trace
def set_output_data(self, dataframe: Union[PyDataFrame, PdDataFrame], name='', cache: bool = False):
name = self.display_name if not name else name
self.__output_data = DataStepDataframe(
name=name,
dataframe=dataframe,
cache=cache)
self.__output_data.log_out()
@trace
@abstractmethod
def initialize(self):
"""
Define the DataStep logic.
"""
pass
@trace
@abstractmethod
def tests(self):
"""
Define all the the tests that this step must pass
"""
pass
| StarcoderdataPython |
162997 | <reponame>DirkyJerky/Uni<gh_stars>0
import numpy as np
from matplotlib import pyplot as plt
T=60 # final simulation time
N=100 # Step count
h=T/N # Step size
beta = 0.5 # S->I growth
gamma = 0.25 # I->R growth
rho1 = 0.01 # Group 1->2 growth
rho2 = 0.01 # Group 2->1 growth
Phi = np.zeros((N+1,6)) # Array for storing all step results
t = np.linspace(0,T,N+1) # All t values
Phi[0]=np.array([.95,.05,0,1,0,0]) # Initial value
S1, I1, R1, S2, I2, R2 = Phi[0] # ...
def f(t,Phi): # f(t,y) = y'
S1, I1, R1, S2, I2, R2 = Phi # Extract pre-step values
f = np.zeros(6) # Output array
f[0] = -beta*S1*I1-rho1*S1+rho2*S2 # New S1
f[1] = beta*S1*I1-gamma*I1-rho1*I1+rho2*I2 # New I1
f[2] = gamma*I1-rho1*R1+rho2*R2 # New R1
f[3] = -beta*S2*I2-rho2*S2+rho1*S1 # New S2
f[4] = beta*S2*I2-gamma*I2-rho2*I2+rho1*I1 # New I2
f[5] = gamma*I2-rho2*R2+rho1*R1 # New R2
return f # Return new values
for j in np.arange(0,N): # For each step
k1 = f(t[j],Phi[j]) # Compute k1
k2 = f(t[j]+h/2,Phi[j]+h/2*k1) # Compute k2
k3 = f(t[j]+h/2,Phi[j]+h/2*k2) # Compute k3
k4 = f(t[j]+h,Phi[j]+h*k3) # Compute k4
k = (k1+2*k2+2*k3+k4)/6 # Weighted average of ks
Phi[j+1] = Phi[j] + h*k # Perform the step
plt.figure() # Start a plot
plt.plot(t,Phi[:,0],'-',linewidth=1) # Plot S1
plt.plot(t,Phi[:,1],'-',linewidth=1) # Plot I1
plt.plot(t,Phi[:,2],'-',linewidth=1) # Plot R1
plt.plot(t,Phi[:,3],'--',linewidth=1) # Plot S2
plt.plot(t,Phi[:,4],'--',linewidth=1) # Plot I2
plt.plot(t,Phi[:,5],'--',linewidth=1) # Plot R2
plt.legend(['S_1','I_1','R_1','S_2','I_2','R_2']) # Label each plot accordingly
plt.xlabel('t') # Label x axis
plt.show() # Finish and show the plot | StarcoderdataPython |
194473 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, os, sys
from frappe.modules import load_doctype_module
from frappe.utils.nestedset import rebuild_tree
from frappe.utils import update_progress_bar
import statics, render
all_routes = None
def sync(app=None, verbose=False):
global all_routes
if app:
apps = [app]
else:
apps = frappe.get_installed_apps()
render.clear_cache()
all_routes = frappe.db.sql_list("select name from `tabWebsite Route`")
# pages
pages = []
for app in apps:
pages += get_sync_pages(app)
sync_pages(pages)
# sync statics (make generators)
statics_sync = statics.sync(verbose=verbose)
statics_sync.start()
# generators
generators = []
for app in apps:
generators += get_sync_generators(app)
sync_generators(generators)
# delete remaining routes
for r in all_routes:
frappe.delete_doc("Website Route", r, force=True)
def sync_pages(routes):
global all_routes
l = len(routes)
if l:
for i, r in enumerate(routes):
r.autoname()
if frappe.db.exists("Website Route", r.name):
route = frappe.get_doc("Website Route", r.name)
for key in ("page_title", "controller", "template"):
route.set(key, r.get(key))
route.save(ignore_permissions=True)
else:
r.insert(ignore_permissions=True)
if r.name in all_routes:
all_routes.remove(r.name)
update_progress_bar("Updating Pages", i, l)
print ""
def sync_generators(generators):
global all_routes
l = len(generators)
if l:
frappe.flags.in_sync_website = True
for i, g in enumerate(generators):
doc = frappe.get_doc(g[0], g[1])
doc.update_sitemap()
route = doc.get_route()
if route in all_routes:
all_routes.remove(route)
update_progress_bar("Updating Generators", i, l)
sys.stdout.flush()
frappe.flags.in_sync_website = False
rebuild_tree("Website Route", "parent_website_route")
# HACK! update public_read, public_write
for name in frappe.db.sql_list("""select name from `tabWebsite Route` where ifnull(parent_website_route, '')!=''
order by lft"""):
route = frappe.get_doc("Website Route", name)
route.make_private_if_parent_is_private()
route.db_update()
print ""
def get_sync_pages(app):
app_path = frappe.get_app_path(app)
pages = []
path = os.path.join(app_path, "templates", "pages")
if os.path.exists(path):
for fname in os.listdir(path):
fname = frappe.utils.cstr(fname)
page_name, extn = fname.rsplit(".", 1)
if extn in ("html", "xml", "js", "css"):
route_page_name = page_name if extn=="html" else fname
# add website route
route = frappe.new_doc("Website Route")
route.page_or_generator = "Page"
route.template = os.path.relpath(os.path.join(path, fname), app_path)
route.page_name = route_page_name
route.public_read = 1
controller_path = os.path.join(path, page_name + ".py")
if os.path.exists(controller_path):
controller = app + "." + os.path.relpath(controller_path,
app_path).replace(os.path.sep, ".")[:-3]
route.controller = controller
try:
route.page_title = frappe.get_attr(controller + "." + "page_title")
except AttributeError:
pass
pages.append(route)
return pages
def get_sync_generators(app):
generators = []
for doctype in frappe.get_hooks("website_generators", app_name = app):
condition, order_by = "", "name asc"
module = load_doctype_module(doctype)
if hasattr(module, "condition_field"):
condition = " where ifnull({0}, 0)=1 ".format(module.condition_field)
if hasattr(module, "order_by"):
order_by = module.order_by
for name in frappe.db.sql_list("select name from `tab{0}` {1} order by {2}".format(doctype,
condition, order_by)):
generators.append((doctype, name))
return generators
| StarcoderdataPython |
4724 | <gh_stars>100-1000
#!/usr/bin/env python
#
# Copyright © 2012-2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the “License”); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS, without
# warranties or conditions of any kind, EITHER EXPRESS OR IMPLIED. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Helper function that gets certificates from VMWare Certificate Authority
# More details. If this module can be used as a main program, include usage information.
""" certool.py : This is the standard library function for
cloudVM/vcenterwindows first boot to integrate with
VMCA Certificate Generation.
if not running under a cloudVM, then it is assumed that
the OS.Environment has the following defined.
VMWARE_SKIP_VISL = True
system.urlhostname
vmdir.ldu-guid
system.hostname.type
vmca.cert.password
vmca.cert.dir
"""
__copyright__ = "Copyright 2012, VMware Inc."
__version__ = 0.1
__author__ = "VMware, Inc."
import logging
import os
import subprocess
class CerTool:
__vislInstall__ = ""
__systemUrlHostname__ = ""
__systemHosttype__ = ""
__vmcaPassword__ = ""
__vmcaCertPath__ = ""
__skipInstallParams__ = False
__certfileName__ = ""
__privateKeyFileName__ = ""
__publicKeyFileName__ = ""
__pfxFileName__ = ""
def __init__(self):
self.FindEnvParams()
self.GetVislParams()
def GetHostName(self):
return self.__systemUrlHostname__
def GetHostType(self):
return self.__systemHosttype__
def GetPassword(self):
return self.__vmcaPassword__
def GetCertDir(self):
return self.__vmcaCertPath__
def GetCertFileName(self):
return self.__certfileName__
def GetPrivateKeyFileName(self):
return self.__privateKeyFile__
def GetPublicKeyFileName(self):
return self.__publicKeyFile__
def GetPfxFileName(self):
return self.__pfxFileName__
def GenCert(self, componentName):
""" Generates the Certificates in the Cert directory"""
# Generate full file names for all artifacts
self.__certfileName__ = \
os.path.join(self.GetCertDir(), componentName, componentName + ".crt")
logging.debug("cert File Name : " + self.GetCertFileName())
self.__privateKeyFile__ = \
os.path.join(self.GetCertDir(), componentName, componentName + ".priv")
logging.debug("Private Key Name : " + self.GetPrivateKeyFileName())
self.__publicKeyFile__ = \
os.path.join(self.GetCertDir(), componentName, componentName + ".pub")
logging.debug("Public Key Name : " + self.GetPublicKeyFileName())
self.__pfxFileName__ = \
os.path.join(self.GetCertDir(), componentName, componentName + ".pfx")
logging.debug("pfx file Name : " + self.GetPfxFileName())
dir = os.path.join(self.GetCertDir(),componentName)
logging.debug("Target Dir : " + dir)
try:
if not os.path.exists(dir):
os.makedirs(dir)
logging.debug("Created directory")
except OSError as e:
raise Exception("I/O error({0}): {1}".format(e.errno, e.strerror))
# Generate Private Key and Public Keys First
cmd = [self.GetCertToolPath(),
'--genkey',
'--priv=' + self.GetPrivateKeyFileName(),
'--pub=' + self.GetPublicKeyFileName()]
output = self.RunCmd(cmd)
logging.info(output)
cmd = [self.GetCertToolPath(),
'--genCIScert',
'--priv=' + self.GetPrivateKeyFileName(),
'--cert=' + self.GetCertFileName(),
'--Name=' + componentName]
# if we know the host name, put that into the certificate
if (self.GetHostType() == 'fqdn'):
cmd.append('--FQDN=' + self.GetHostName())
# elif (self.GetHostType() == 'ipv4'):
# # Possible TODO : support IPv4 in certificates
# elif (self.GetHostType() == 'ipv6'):
# # Possible TODO : support IPv6 in certificates
output = self.RunCmd(cmd)
logging.info(output)
# TODO : Replace this with certool PKCS12 capabilities
cmd = [self.GetOpenSSLPath(),
'pkcs12',
'-export',
'-in',
self.GetCertFileName(),
'-inkey',
self.GetPrivateKeyFileName(),
'-out',
self.GetPfxFileName(),
'-name',
componentName,
'-passout',
'pass:' + self.GetPassword()]
output = self.RunCmd(cmd)
logging.info(output)
def FindEnvParams(self):
""" Finds the Default Environment parameters. if you are
not running inside the cloudVM, set VMWARE_SKIP_VISL = True
in your environment. This will enable this script to look
for values in the env. block instead of VISL namespace."""
# Find VISL Install Parameter
INSTALL_PARAM_ENV_VAR = 'VMWARE_INSTALL_PARAMETER'
VMWARE_SKIP_VISL = 'VMWARE_SKIP_VISL'
if INSTALL_PARAM_ENV_VAR in os.environ:
self.__vislInstall__ = os.environ[INSTALL_PARAM_ENV_VAR]
if VMWARE_SKIP_VISL in os.environ:
skip = os.environ[VMWARE_SKIP_VISL]
if (skip in ['true', 'True', 'yes', '1', 'skip']):
self.__skipInstallParams__ = True
if (not self.__vislInstall__ and self.__skipInstallParams__ is False):
errString = 'Unable to find install param script'
logging.error(errString)
raise Exception(errString)
logging.debug('Using install param script : ' + self.__vislInstall__)
def GetInstallParams(self, key):
""" Waits on Install Parameter to return the value from visl.
Or if the VMWARE_SKIP_VISL = True, then reads the value from
the os environment"""
if (self.__skipInstallParams__ is False):
cmd = [self.__vislInstall__, '-d', key]
output = self.RunCmd(cmd)
logging.debug('Install param found :' + output)
return output
else:
if val in os.environ:
param = os.environ[key]
logging.debug('Env. param found : ' + param)
return param
else:
raise Exception('Requested Value not found in Env : ' + key)
def RunCmd(self, args):
""" Runs a given command"""
logging.info('running %s' % args)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if p.returncode:
raise Exception('Failed to execute last cmd')
else:
return p.communicate()[0].rstrip()
def GetVislParams(self):
""" Waits for all VISL parameters that VMCA certool needs"""
INSTALL_PARAM_SYSTEM_URL_HOSTNAME = "system.urlhostname"
INSTALL_PARAM_LDU_GUID = "vmdir.ldu-guid"
INSTALL_PARAM_SYSTEM_HOST_TYPE = "system.hostname.type"
INSTALL_PARAM_PASSWORD = "<PASSWORD>"
INSTALL_PARAM_CERT_DIR = "vmca.cert.dir"
# Please note that each of this is a blocking call.
# VISL will wait until these value are populated by the
# appropriate Script
self.__systemUrlHostname__ = \
self.GetInstallParams(INSTALL_PARAM_SYSTEM_URL_HOSTNAME)
self.__systemHosttype__ = \
self.GetInstallParams(INSTALL_PARAM_SYSTEM_HOST_TYPE)
self.__vmcaPassword__ = \
self.GetInstallParams(INSTALL_PARAM_PASSWORD)
self.__vmcaCertPath__ = \
self.GetInstallParams(INSTALL_PARAM_CERT_DIR)
# We really don't need this value,
# it is a technique on waiting for directory
# first boot to finish.
discardldu = self.GetInstallParams(INSTALL_PARAM_LDU_GUID)
def GetCertToolPath(self):
"""returns the path to certool"""
#TODO : Publish Certool Path from VMCA First Boot
if(os.name == "nt"):
PROGRAM_FILES = os.environ['PROGRAMFILES']
return os.path.normpath(PROGRAM_FILES +
'/VMware/CIS/Vmcad/certool.exe')
elif (os.name == 'posix'):
return '/opt/vmware/bin/certool'
def GetOpenSSLPath(self):
if(os.name == "nt"):
PROGRAM_FILES = os.environ['PROGRAMFILES']
return os.path.normpath(PROGRAM_FILES +
'/VMware/CIS/OpenSSL/openssl.exe')
elif (os.name == 'posix'):
return '/usr/lib/vmware-openSSL/openssl'
def main():
""" Example Code Usage """
testComponent = 'sso'
VmcaCertool = CerTool()
VmcaCertool.GenCert(testComponent)
print 'Generated a pfx file : %s' % VmcaCertool.GetPfxFileName()
print 'Using Password : %s' % VmcaCertool.GetPassword()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1774744 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from math import pi
import funcs as f
from importlib import reload
import dataframe_cleaner
reload(radar1_class)
import radar1_class
from radar1_class import Radar1
import unidecode
from unidecode import unidecode
#final list of columns to choose from
cols2 = ['Player', 'Rating', 'Goals', 'Assists', 'SpG', 'Drb', 'KeyP','PS%',
'Crosses', 'Fouled', 'mis_cont', 'AerialsWon', 'Tackles', 'Inter',
'Fouls', 'Clear', 'Blocks']
off_cats = ['Player', 'Rating', 'Goals', 'Assists', 'SpG', 'Drb', 'KeyP','PS%',
'Crosses', 'Fouled', 'mis_cont']
cats = ['Player', 'Rating', 'Goals', 'Assists', 'SpG', 'Drb', 'KeyP','PS%',
'Fouled', 'mis_cont']
#Defensive currently has 13, need to take out 1 of them
center_def_cats = ['Player', 'Rating', 'AvgP','PS%', 'mis_cont', 'AerialsWon',
'Tackles', 'Inter', 'Fouls', 'Clear', 'Blocks']
outside_def_cats = ['Player', 'Rating', 'Assists', 'Drb','PS%', 'Crosses',
'mis_cont', 'Tackles', 'Inter', 'Fouls', 'Clear', 'Blocks']
#-------------------------------------------------------------------------------
#Need imported clean DF
df = pd.read_csv('/Users/alexdeckwork/Galvanize/Galvrepos/soccer-proj/data/top5.csv')
from dataframe_cleaner import df_cleaner
df = df_cleaner(df)
from funcs import league_seperator
Bundesliga, Prem, La_liga, Ligue_1, Serie_a = league_seperator(df)
from funcs import accent_stripper
Bundesliga['Player'] = f.accent_stripper(Bundesliga['Player'].values)
#--------------------------------------
#Sending in the Premier League to the scaler
from funcs import DF_Scaler
scaled_df = DF_Scaler(Ligue_1)
Ligue_1[Ligue_1['Team']=='Paris Saint-Germain']
#--------------------------------------
chart_prep = scaled_df[off_cats]
#--------------------------------------
Prem[Prem['Player']=='<NAME>']
#Playmakers to radar plot
Mbappe = chart_prep.loc[2343].values
Cavani = chart_prep.loc[2336].values
Neymar = chart_prep.loc[2350].values
Pepe = chart_prep.loc[2427].values
#Figure
fig.clear()
fig = plt.figure(figsize=(10, 10))
#Name to appear
titles =['Rating', 'Goals', 'Assists', 'SpG', 'Drb', 'KeyP','PS%',
'Crosses', 'Fouled', 'mis_cont']
#Numerical labels to be displayed along each axis
labels = [np.around(np.linspace(0,10,6),2), np.around(np.linspace(0,27,6),2),
np.around(np.linspace(0,10,6),2), np.around(np.linspace(0,4.3,6),2),
np.around(np.linspace(0,4.8,6),2), np.around(np.linspace(0,3.2,6),2),
np.around(np.linspace(0,100,6),2), np.around(np.linspace(0,2.8,6),2),
np.around(np.linspace(0,3.2,6),2), np.around(np.linspace(0,8.5,6),2)]
radar = Radar1(fig, titles, labels)
radar.plot(Mbappe[1:], '-', lw=5, color='r', alpha=0.4, label=Mbappe[0])
radar.plot(Neymar[1:], '-', lw=5, color='b', alpha=0.4, label=Neymar[0])
radar.plot(Cavani[1:], '-', lw=5, color='g', alpha=0.4, label=Cavani[0])
#radar.plot(Pepe[1:], '-', lw=5, color='m', alpha=0.4, label=Pepe[0])
radar.ax.legend()
fig.suptitle('PSG Front Three', fontsize=22)
fig.savefig('PSG_Front_Three.png')
| StarcoderdataPython |
54355 | import argparse
from PIL import Image
import numpy as np
import onnxruntime as rt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="StyleTransferONNX")
parser.add_argument('--model', type=str, default=' ', help='ONNX model file', required=True)
parser.add_argument('--input', type=str, default=' ', help='Input image', required=True)
parser.add_argument('--output', type=str, default=' ', help='learning rate',required=True)
args = parser.parse_args()
session = rt.InferenceSession(args.model)
inputH = session.get_inputs()
outputH = session.get_outputs()
img = Image.open(args.input)
print('img dim: ',img.width,' ',img.height)
inputArray = np.asarray(img)
inputArray = inputArray.astype(np.float32);
inputArray = inputArray.transpose([2,0,1])
np.clip(inputArray,0,255,out=inputArray)
inputArray = inputArray.reshape((1,3,img.height,img.width))
output_res = session.run(None,{inputH[0].name: inputArray})
output_img = output_res[0].reshape(3,output_res[0].shape[2],output_res[0].shape[3])
output_img = output_img.transpose([1,2,0])
output_img = output_img.astype(np.uint8)
output = Image.fromarray(output_img)
output.save(args.output)
| StarcoderdataPython |
1732916 | import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
def train_knn(X, Y):
"""Trains a K-nearest-neighbors classifier on data X and labels Y, and returns the classifier"""
X = np.array(X)
Y = np.array(Y)
x_train, x_test, y_train, y_test = train_test_split(X,Y, train_size=0.625)
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
clf = KNeighborsClassifier(n_neighbors = 1)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
print(accuracy_score(y_test, y_pred))
return clf
def train_forest(X, Y):
"""Trains a Random Forest classifier on data X and labels Y, and returns the classifier"""
X = np.array(X)
Y = np.array(Y)
x_train, x_test, y_train, y_test = train_test_split(X,Y, train_size=0.625)
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
clf = RandomForestClassifier(n_estimators=50)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
print(accuracy_score(y_test, y_pred))
return clf
| StarcoderdataPython |
Subsets and Splits