id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
11391681
|
# Copyright (c) 2012-2021, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon Lookout for Equipment"
prefix = "lookoutequipment"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
CreateDataset = Action("CreateDataset")
CreateInferenceScheduler = Action("CreateInferenceScheduler")
CreateModel = Action("CreateModel")
DeleteDataset = Action("DeleteDataset")
DeleteInferenceScheduler = Action("DeleteInferenceScheduler")
DeleteModel = Action("DeleteModel")
DescribeDataIngestionJob = Action("DescribeDataIngestionJob")
DescribeDataset = Action("DescribeDataset")
DescribeInferenceScheduler = Action("DescribeInferenceScheduler")
DescribeModel = Action("DescribeModel")
ListDataIngestionJobs = Action("ListDataIngestionJobs")
ListDatasets = Action("ListDatasets")
ListInferenceExecutions = Action("ListInferenceExecutions")
ListInferenceSchedulers = Action("ListInferenceSchedulers")
ListModels = Action("ListModels")
ListTagsForResource = Action("ListTagsForResource")
StartDataIngestionJob = Action("StartDataIngestionJob")
StartInferenceScheduler = Action("StartInferenceScheduler")
StopInferenceScheduler = Action("StopInferenceScheduler")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateInferenceScheduler = Action("UpdateInferenceScheduler")
|
StarcoderdataPython
|
4897173
|
<reponame>EvgenDEP1/python-adv
numbers = {chr(el) for el in range(ord('0'), ord('9') + 1)}
numbers.update(['.', ','])
required_symbol = {'.'}
def numbers_is_valid(number):
numbers_as_set = set(number)
if not number or numbers_as_set - numbers:
return False
for tochka in required_symbol:
check = number.count(tochka)
if check != 1:
return False
do, posle = number.split('.')
if len(do) < 1 or len(posle) < 1:
return False
return True
assert numbers_is_valid('1.32')
assert not numbers_is_valid('1')
print(numbers_is_valid('1.32'))
print(numbers_is_valid('.'))
|
StarcoderdataPython
|
9629912
|
import os
from datetime import timedelta
import django
from django.core.exceptions import ValidationError
from celery import Celery
django.setup()
from db.models import FeedSource
from lib.main import create_new_feed
from utils.parse import parse_new_feeds
celery = Celery(
__name__,
broker=os.environ.get(
"TASKS_QUEUE_HOST",
"amqp://guest:guest@localhost:5672//"
),
)
celery.config_from_object(__name__)
@celery.task
def get_new_feeds():
""" Scheduled tasks to fetch new feeds
"""
feed_sources = FeedSource.objects.filter(status=True)
for source in feed_sources:
retry_limit = 0
status = False
while retry_limit < 3 and not status:
response = parse_new_feeds(source.link)
if response["status"]:
status = True
retry_limit += 1
if response["status"]:
for feed in response["feeds"]:
if retry_limit < 3:
try:
create_new_feed(feed, source)
except ValidationError:
retry_limit += 1
return
CELERYBEAT_SCHEDULE = {
'get_new_feeds': {
'task': 'utils.tasks.get_new_feeds',
'schedule': timedelta(seconds=10),
},
}
|
StarcoderdataPython
|
1849005
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created by <NAME>
Interwave Analyzer modelling function
modififications to previosly versions must be specified here using WAVE codes:
W-23.05-1.00.0-00
A-01.05-1.00.3-00
V-22.05-1.00.4-00
E-05.05-1.00.4-00
"""
import numpy as np
import math as ma
from internal_module import thermal_stability
def decomposition(L,tau,h, H):
N = 100 # resolution of the refined grid
increment = 3600 # 3600 seconds = 60 minutes = 1 h
k = np.pi/(L) # wave number
# depth classification
a = len(tau)
tau = np.concatenate((tau,[-9999]))
h = np.concatenate((h,[H]))
H = h[-1]
refined_depth = np.linspace(0,H,N)
dz = refined_depth[1] - refined_depth[0]
bv = np.zeros((N),float)
nsq = np.zeros((N),float)
rho, buoy, hmid, glin = thermal_stability(a,h,H,tau)
buoy = np.concatenate((buoy,[np.nan]))
buoy = buoy*60 # = [1/min]
if h[0] < 50: # if the 1st sensor (near the water surface) is not deeper than 50 m
for i in range(N): # contour that represents a depth vector in meters (1 m to 100 m)
ii = 1 # second index (ii) to evaluate
while h[ii] < refined_depth[i] : #
ii = ii + 1
if buoy[ii-1] > -1:
bv[i] = buoy[ii-1]
else:
if h[ii-1] < 50:
bv[i] = 0.3
else:
bv[i] = 0.08
nsq[i] = bv[i]**2/3600 # buoyancy frequency (N²) - 1/min² to 1/second²
else: # there is not sensor near the surface (in the first 50 m)
for i in range(N): # contour that represents a depth vector in meters (1 m to 100 m)
ii = 1 # second index (ii) to evaluate
while h[ii] > refined_depth[i] : #
ii = ii + 1
if buoy[ii-1] > -1:
bv[i] = buoy[ii-1]
else:
if h[ii-1] < 50:
bv[i] = 0.3
else:
bv[i] = 0.08
nsq[i] = bv[i]**2/3600 # buoyancy frequency (N²) - 1/min² to 1/second²
# calculate the first vertical modes w[m,:]
# find the approximated value of p depending on the mode that is defined
W = np.ones((N),float)
W[0] = 0
W[1] = 1
finnew = 0
p = 0 # internal seiche period
pnew = p
peri = []
conv = []
w = np.zeros((5,N),float)
n = np.zeros((5,N),float)
hor = np.zeros((5,N-1),float)
for m in range(5):
e=0.5
while e >= 0:
for i in range(2,N):
f = 2 - k**2*dz**2*(nsq[i-1]*(p**2)/(2*np.pi)**2 - 1 )
W[i] = -W[i-2] + f * W[i-1]
finold = finnew
finnew = W[-1]
e = finold*finnew
pold = pnew
pnew = p
p = p + increment
if finnew > 0:
randoben = pnew
randunten = pold
else:
randoben = pold
randunten = pnew
finold = finnew
# halfing of the intervalls
while abs(randunten-randoben)>0.1:
p=0.5*(randunten+randoben)
for i in range(2,N):
f = 2 - k**2*dz**2*(nsq[i-1]*(p**2)/(2*np.pi)**2 - 1 )
W[i] = -W[i-2] + f * W[i-1]
finnew = W[-1]
if finnew < 0:
randunten = p
else:
randoben = p
normw=np.sqrt(sum((W*nsq)*np.transpose(W)))
for i in range(N):
w[m,i] = W[i]/normw
n[m,i] = np.sqrt(nsq[i])*W[i]/normw
for i in range(N-1):
hor[m,i] = w[m,i+1] - w[m,i]
finnew = finold
peri.append(p/3600) # hour
p = p + increment
conv.append((nsq)*w[m,:])
# hortief = np.zeros((a-1),float)
#
# for i in range(a-1):
# hortief[i] = -1*h[i]
vel = np.transpose(hor)
return vel, conv, refined_depth, peri
def disp_zmodel (pe,ph,he,hh,L,m):
#
# Model Function: 1D non-hydrostatic analytical model for two-layer system
# Returns the period of BSIW modes considering basin length variation
#
gamma = pe/ph
peri_min = biquadratic(L[0],he,hh,gamma,m)
peri_ave = biquadratic(L[1],he,hh,gamma,m)
peri_max = biquadratic(L[2],he,hh,gamma,m)
return peri_min, peri_ave, peri_max
def disp_xmodel3(p1,p2,p3,h1,h2,h3,L,vertical,m):
#
# Model Function: 1D hydrostatic analytical model for three-layer system
# Returns the period of BSIW modes considering basin length variation
#
gamma12 = p1/p2
gamma13 = p1/p3
gamma23 = p2/p3
A = [[h1, h1, h1], [h2*gamma12, h2, h2], [h3*gamma13, h3*gamma23, h3]]
solv = np.linalg.eigvals(A)
pv1_min, pv2_min = eigen3_values(L[0],solv[0],solv[1],m)
pv1_ave, pv2_ave = eigen3_values(L[1],solv[0],solv[1],m)
pv1_max, pv2_max = eigen3_values(L[2],solv[0],solv[1],m)
if(vertical==1):
return pv1_min,pv1_ave,pv1_max
else:
return pv2_min,pv2_ave,pv2_max
def disp_xmodel4(p1,p2,p3,p4,h1,h2,h3,h4,L,vertical,m):
#
# Model Function: 1D hydrostatic analytical model for four-layer system
# Returns the period of BSIW modes considering basin length variation
#
gamma12 = p1/p2
gamma13 = p1/p3
gamma23 = p2/p3
gamma14 = p1/p4
gamma24 = p2/p4
gamma34 = p3/p4
A = [[h1, h1, h1, h1], \
[h2*gamma12, h2, h2, h2],\
[h3*gamma13, h3*gamma23, h3, h3], \
[h4*gamma14, h4*gamma24, h4*gamma34, h4]]
solv = np.linalg.eigvals(A)
#sorted(solv)
pv1_min, pv2_min, pv3_min = eigen4_values(L[0],solv[0],solv[1],solv[2],m)
pv1_ave, pv2_ave, pv3_ave = eigen4_values(L[1],solv[0],solv[1],solv[2],m)
pv1_max, pv2_max, pv3_max = eigen4_values(L[2],solv[0],solv[1],solv[2],m)
if(vertical==1):
return pv1_min,pv1_ave,pv1_max
if(vertical==2):
return pv2_min,pv2_ave,pv2_max
else:
return pv3_min,pv3_ave,pv3_max
def coriolis_effect(fo,to):
#
# Model Function: Correction model to account Coriolis effect
# Returns the period of BSIW modes
#
aux = 4*np.power(np.pi,2) + np.power(to,2)*np.power(fo,2)
peri = np.sqrt(4*np.power(np.pi,2)*np.power(to,2)/aux)
return peri
def biquadratic(L,he,hh,gamma,m):
#
# Solver Function: Solver for 2-layer model
#
g = 9.81
k = m*np.pi/L
th = np.tanh(k*hh)
te = np.tanh(k*he)
p = (gamma*te*th + 1)/(k*th)
q = -g*(th+te)/th
r = -ma.pow(g,2)*(gamma-1)*k*te
delta = ma.pow(q,2) - 4*p*r
try:
omega = np.sqrt((-q-np.sqrt(delta))/(2*p))
except RuntimeWarning:
return None
peri = 2*np.pi/omega
return peri
def eigen3_values(L,lambv1,lambv2,m):
#
# Solver Function: Solver for 3-layer model
#
g = 9.81 # m/s²
try:
peri_v1 = 2*L/(m*np.sqrt(g*lambv1)) # V1 interfacial period (sec)
except RuntimeWarning:
peri_v1 = None
try:
peri_v2 = 2*L/(m*np.sqrt(g*lambv2)) # V2 interfacial period (sec)
except RuntimeWarning:
peri_v2 = None
return peri_v1, peri_v2
def eigen4_values(L,lambv1,lambv2,lambv3,m):
#
# Solver Function: Solver for 4-layer model
#
g = 9.81 # m/s²
try:
peri_v1 = 2*L/(m*np.sqrt(g*lambv1)) # V1 interfacial period (sec)
except RuntimeWarning:
peri_v1 = None
try:
peri_v2 = 2*L/(m*np.sqrt(g*lambv2)) # V2 interfacial period (sec)
except RuntimeWarning:
peri_v2 = None
try:
peri_v3 = 2*L/(m*np.sqrt(g*lambv3)) # V3 interfacial period (sec)
except RuntimeWarning:
peri_v3 = None
return peri_v1, peri_v2, peri_v3
def sensitivity_2layer(mean,diff,N,pe,ph,he,hh,fetch,typ):
#
# Sensitivity Function: Sensitivity analysis to check period variation
# based on layer thickness and water density changes (two-layer system)
#
x = np.linspace(mean-diff, mean+diff, N)
period = np.zeros((N),float)
for i in range(N):
if typ == 1:
_,per,_ = np.array(disp_zmodel(x[i], ph, he, hh,fetch,1))
elif typ == 2:
_,per,_ = np.array(disp_zmodel(pe, x[i], he, hh,fetch,1))
elif typ == 3:
_,per,_ = np.array(disp_zmodel(pe, ph, x[i], hh,fetch,1))
elif typ == 4:
_,per,_ = np.array(disp_zmodel(pe, ph, he, x[i],fetch,1))
period[i] = per
return x, period/60/60
def sensitivity_3layer(mean,diff,N,p1,p2,p3,h1,h2,h3,fetch,typ):
#
# Sensitivity Function: Sensitivity analysis to check period variation
# based on layer thickness and water density changes (three-layer system)
#
aux = mean - diff
x = np.linspace(aux, mean+diff, N)
period = np.zeros((N),float)
for i in range(N):
if typ == 1:
_,per,_ = np.array(disp_xmodel3(x[i],p2,p3,h1,h2,h3,fetch,2,1))
elif typ == 2:
_,per,_ = np.array(disp_xmodel3(p1,x[i],p3,h1,h2,h3,fetch,2,1))
elif typ == 3:
_,per,_ = np.array(disp_xmodel3(p1,p2,p3,x[i],h2,h3,fetch,2,1))
elif typ == 4:
_,per,_ = np.array(disp_xmodel3(p1,p2,p3,h1,x[i],h3,fetch,2,1))
period[i] = per
return x, period/60/60
def sensitivity_dimension(L, pe,ph, he, hh):
#
# Sensitivity Function: Sensitivity analysis to check period variation
# Combined with variation on layer thickness and water density, as well as,
# on lake length
#
N = 50
g = 9.81
drho = 4.00
ddep = 0.10
mean_rho = np.sqrt(ph/(ph-pe))
mean_dep = np.sqrt((he+hh)/(he*hh))
aux_rho = mean_rho - drho
aux_dep = mean_dep - ddep
xrho = np.linspace(aux_rho, mean_rho+drho, N)
xdep = np.linspace(aux_dep, mean_dep+ddep, N)
yrho = np.zeros((N),float)
ydep = np.zeros((N),float)
Crho = 2*L/np.sqrt(g*he*hh/(he+hh))
Cdep = 2*L/np.sqrt(g*(ph-pe)/ph)
for i in range(N):
yrho[i] = Crho*xrho[i]
ydep[i] = Cdep*xdep[i]
return xrho, xdep, yrho/60/60, ydep/60/60 # period in hours
|
StarcoderdataPython
|
98381
|
<filename>chatbot_m.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import random
import pandas as pd
import stt
import tts
# from DiabloGPT import Chat
from chinese_convo import chinese_chatbot
import gesture
from multiprocessing import Process,Pipe
class Chatbot:
def __init__(self, isActing=True, sLang='en', lLang='en'):
self.listener = stt.Listener()
self.speaker = tts.Speaker()
# self.chat = Chat()
self.isActing = isActing
self.speaker_lang = sLang
self.listener_lang = lLang
def say(self, text, speed=1, generator=False):
if generator:
if self.speaker_lang == 'cn':
self.speaker.speak(chinese_chatbot(text), speed)
else:
pass
self.speaker.speak(text, speed)
# self.chat.raw(text) # from GPT
# self.speaker.speak(self.chat.generated_text(), speed)
else:
self.speaker.speak(text, speed)
def listen(self):
return self.listener.listens()
def change_speaker_lang(self, lang='en'):
self.speaker.change_lang(lang)
self.speaker_lang = lang
def change_listener_lang(self, lang='en'):
self.listener.change_lang(lang)
self.listener_lang = lang
class Quiz(Chatbot):
def __init__(self, num_words=10, level=1, pos=0):
super().__init__()
self.pos = pos
self.num_words = num_words
self.level = level
self.word_list = pd.DataFrame
self.quiz_list = pd.DataFrame
def get_words(self):
hsk = pd.read_csv("hsk_csv-master/hsk{}.csv".format(self.level), names=['chinese', 'pinyin', 'english'])
self.word_list = hsk.iloc[self.pos:self.pos + self.num_words, :]
if self.quiz_list.empty:
self.quiz_list = self.word_list
else:
self.quiz_list = pd.concat(self.quiz_list, self.word_list)
#print(self.word_list.head())
self.pos += self.num_words
def iter_output(self):
for word in self.quiz_list.iterrows():
for el in word:
Quiz.say(self, el)
def init_quiz(self, child_conn):
num = 0
temp_li = []
score = {}
Quiz.get_words(self)
for index, row in self.quiz_list.iterrows():
temp_li.append((row['chinese'], row['english']))
while temp_li or not num:
random.shuffle(temp_li)
for el in temp_li:
res = random.randint(0, 1)
if res:
Quiz.change_speaker_lang(self, 'en')
Quiz.say(self, "Please provide the definition of" + el[1] + "in chinese") # el[0] is in chinese
else:
Quiz.change_speaker_lang(self, 'cn')
Quiz.say(self, "请告诉我英文怎么说 " + el[0])
user_input = Quiz.listen(self)
child_conn.send('stop')
time.sleep(0.2)
if res:
if user_input == el[0]:
score[num] += 1
temp_li.remove(el)
gesture.correct(2)
stop_robot()
else:
gesture.incorrect(2)
stop_robot()
else:
if user_input == el[1]:
score[num] += 1
temp_li.remove(el)
gesture.correct(2)
stop_robot()
else:
gesture.incorrect(2)
stop_robot()
child_conn.send("cont")
time.sleep(0.2)
num += 1
n = 1
for s in score:
Quiz.say(self, "You got a score of {} in #{} test".format(s, n))
if self.isActing:
child_conn.send('stop')
time.sleep(0.2)
gesture.pass_quiz() if s > .8 else gesture.fail_quiz()
child_conn.send('cont')
time.sleep(0.2)
n += 1
Quiz.change_speaker_lang(self, self.speaker_lang)
Quiz.change_listener_lang(self, self.listener_lang)
def get_quiz_info(chatbot, limit):
error_msg = "Invalid input, please try again"
invalid = invalid2 = invalid3 = True
level = 0
num_words = 0
pos = 0
while invalid:
chatbot.say("What is your hsk level?")
temp = chatbot.listen()
print(temp)
try:
if 1 <= int(temp) <= 6:
level = int(temp)
invalid = False
else:
chatbot.say(error_msg)
except ValueError:
chatbot.say(error_msg)
while invalid2:
chatbot.say("How many words would you like to learn a session?")
temp = chatbot.listen()
print(temp)
try:
num_words = int(temp)
if num_words > limit:
chatbot.say(error_msg)
else:
invalid2 = False
except ValueError:
chatbot.say(error_msg)
while invalid3:
chatbot.say("How many words did you leave off at last time?")
temp = chatbot.listen()
print(temp)
try:
pos = int(temp)
invalid3 = False
except ValueError:
chatbot.say(error_msg)
return num_words, level, pos
def main(child_conn):
pi = Chatbot()
pi.say("Hello, welcome back!", 1)
time.sleep(10)
child_conn.send('stop')
time.sleep(0.2)
gesture.correct()
child_conn.send('cont')
time.sleep(0.2)
try:
while True:
text = pi.listen()
print(text)
text = text.lower()
if pi.speaker_lang == 'cn':
if "换成" and "英文" in text:
pi.say("开始说英文啦")
pi.change_speaker_lang('en')
pi.change_listener_lang('en')
pi.say(text, generator=True)
elif "开始" and "测验" in text:
attrs = list(get_quiz_info(pi, 10000))
quizzer = Quiz(attrs[0], attrs[1], attrs[2])
quizzer.init_quiz(child_conn)
pi.say("测验结束")
elif "再见" in text:
pi.say("下次见")
exit()
else:
pi.say(text, generator=True)
else:
if "switch" and "chinese" in text:
pi.say("let's talk in chinese")
pi.change_speaker_lang('cn')
pi.change_listener_lang('cn')
pi.say(text, generator=True)
elif "start" and "quiz" in text:
# bot asks in english, user replies in chinese
attrs = list(get_quiz_info(pi, 10000))
quizzer = Quiz(attrs[0], attrs[1], attrs[2])
quizzer.init_quiz(child_conn)
pi.say("Quiz completed")
elif "bye" in text:
pi.say("see you")
exit()
elif text == "say yes":
child_conn.send('stop')
time.sleep(0.2)
gesture.correct()
child_conn.send('cont')
time.sleep(0.2)
elif text == "say no":
child_conn.send('stop')
time.sleep(0.2)
gesture.incorrect()
child_conn.send('cont')
time.sleep(0.2)
else:
pi.say(text, generator=True)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3565502
|
import numpy as np
import math
from mgcpy.independence_tests.utils.transform_matrices import \
transform_matrices
import scipy.io
import os
def power(independence_test, sample_generator, num_samples=100, num_dimensions=1, theta=0, noise=0.0, repeats=1000, alpha=.05, simulation_type=''):
'''
Estimate power
:param independence_test: an object whose class inherits from the Independence_Test abstract class
:type: Object(Independence_Test)
:param sample_generator: a function used to generate simulation from simulations.py with parameters given by the following arguments
- num_samples: default to 100
- num_dimensions: default to 1
- noise: default to 0
:type: function
:param num_samples: the number of samples generated by the simulation
:type: int
:param num_dimensions: the number of dimensions of the samples generated by the simulation
:type: int
:param noise: the noise used in simulation
:type: float
:param repeats: the number of times we generate new samples to estimate the null/alternative distribution
:type: int
:param alpha: the type I error level
:type: float
:param simulation_type: specify simulation when necessary (default to empty string)
:type: string
:return empirical_power: the estimated power
:type: float
'''
# test statistics under the null, used to estimate the cutoff value under the null distribution
test_stats_null = np.zeros(repeats)
# test statistic under the alternative
test_stats_alternative = np.zeros(repeats)
theta = math.radians(theta)
a = [[0 for x in range(2)] for y in range(2)]
a[0][0] = math.cos(theta)
a[0][1] = math.sin(theta)*(-1)
a[1][0] = math.sin(theta)
a[1][1] = math.cos(theta)
a = np.asarray(a)
for rep in range(repeats):
# generate new samples for each iteration
# the if-else block below is for simulations that have a different argument list
# than the general case
if simulation_type == 'sine_16pi':
matrix_X, matrix_Y = sample_generator(
num_samples, num_dimensions, noise=noise, period=np.pi*16)
elif simulation_type == 'multi_noise' or simulation_type == 'multi_indept':
matrix_X, matrix_Y = sample_generator(num_samples, num_dimensions)
elif simulation_type == 'ellipse':
matrix_X, matrix_Y = sample_generator(
num_samples, num_dimensions, noise=noise, radius=5)
elif simulation_type == 'diamond':
matrix_X, matrix_Y = sample_generator(
num_samples, num_dimensions, noise=noise, period=-np.pi/8)
else:
matrix_X, matrix_Y = sample_generator(
num_samples, num_dimensions, noise=noise)
data_matrix_X = transform_matrices(matrix_X, matrix_Y)[0]
data_matrix_Y = transform_matrices(matrix_X, matrix_Y)[1]
data_matrix_Y = data_matrix_Y[:, np.newaxis]
data_matrix_X = data_matrix_X.T
data_matrix_X = np.dot(data_matrix_X, a)
# permutation test
permuted_y = np.random.permutation(matrix_Y)
test_stats_null[rep], _ = independence_test.test_statistic(
matrix_X, permuted_y)
test_stats_alternative[rep], _ = independence_test.test_statistic(
matrix_X, matrix_Y)
'''
# if the test is pearson, use absolute value of the test statistic
# so the more extreme test statistic is still in a one-sided interval
if independence_test.get_name() == 'pearson':
test_stats_null[rep] = abs(test_stats_null[rep])
test_stats_alternative[rep] = abs(test_stats_alternative[rep])
'''
# the cutoff is determined so that 1-alpha of the test statistics under the null distribution
# is less than the cutoff
cutoff = np.sort(test_stats_null)[math.ceil(repeats*(1-alpha))]
# the proportion of test statistics under the alternative which is no less than the cutoff (in which case
# the null is rejected) is the empirical power
empirical_power = np.where(test_stats_alternative >= cutoff)[
0].shape[0] / repeats
return empirical_power
def power_given_data(independence_test, simulation_type, data_type='dimension', num_samples=100, num_dimensions=1, repeats=1000, alpha=.05):
# test statistics under the null, used to estimate the cutoff value under the null distribution
test_stats_null = np.zeros(repeats)
# test statistic under the alternative
test_stats_alternative = np.zeros(repeats)
# absolute path to the benchmark directory
dir_name = os.path.dirname(__file__)
if data_type == 'dimension':
file_name_prefix = dir_name + \
'/sample_data_power_dimensions/type_{}_dim_{}'.format(
simulation_type, num_dimensions)
else:
file_name_prefix = dir_name + \
'/sample_data_power_sample_sizes/type_{}_size_{}'.format(
simulation_type, num_samples)
all_matrix_X = scipy.io.loadmat(file_name_prefix + '_X.mat')['X']
all_matrix_Y = scipy.io.loadmat(file_name_prefix + '_Y.mat')['Y']
for rep in range(repeats):
matrix_X = all_matrix_X[:, :, rep]
matrix_Y = all_matrix_Y[:, :, rep]
# permutation test
permuted_y = np.random.permutation(matrix_Y)
test_stats_null[rep], _ = independence_test.test_statistic(
matrix_X, permuted_y)
test_stats_alternative[rep], _ = independence_test.test_statistic(
matrix_X, matrix_Y)
'''
# if the test is pearson, use absolute value of the test statistic
# so the more extreme test statistic is still in a one-sided interval
if independence_test.get_name() == 'pearson':
test_stats_null[rep] = abs(test_stats_null[rep])
test_stats_alternative[rep] = abs(test_stats_alternative[rep])
'''
# the cutoff is determined so that 1-alpha of the test statistics under the null distribution
# is less than the cutoff
cutoff = np.sort(test_stats_null)[math.ceil(repeats*(1-alpha))]
# the proportion of test statistics under the alternative which is no less than the cutoff (in which case
# the null is rejected) is the empirical power
empirical_power = np.where(test_stats_alternative >= cutoff)[
0].shape[0] / repeats
return empirical_power
|
StarcoderdataPython
|
5078697
|
#!/usr/bin/env python3
# Generate an LED gamma-correction table
gamma = 2.8 # Correction factor
max_in = 255
max_out = 255
print("// Format this before using")
print("const GAMMA8: [u8; 256] = [")
for i in range(max_in + 1):
print(str(int(pow(i/max_in, gamma) * max_out + 0.5)) + ", ")
print("];")
|
StarcoderdataPython
|
8115686
|
<gh_stars>0
import math
def get_factor_list(n):
"""
Use trial division to identify the factors of n.
1 is always a factor of any integer so is added at the start.
We only need to check up to n/2, and then add n after the loop.
"""
factors = [1]
for t in range(2, (math.ceil((n / 2) + 1))):
if n % t == 0:
factors.append(t)
factors.append(n)
return factors
def factors(n):
"""
Generator function leveraging the get_factor_list function.
"""
return iter(get_factor_list(n))
if __name__ == '__main__':
test = [100,1000,386945]
for n in test:
print(list(factors(n)))
|
StarcoderdataPython
|
9680569
|
<reponame>ludoo/wpkit
import sys
import logging
from django.db import connection
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django_nose.tools import *
from wpkit.models import Site, DB_PREFIX
logger = logging.getLogger('wpkit.test.models_wp_taxonomy')
class TestWPTaxonomy(TestCase):
@classmethod
def setUpClass(cls):
site = Site.objects.get_default()
cls.models_1 = site.get_blog(1).models
cls.models_3 = site.get_blog(3).models
def test_terms(self):
cursor = connection.cursor()
cursor.execute("select * from wp_terms order by term_order, name")
assert_equals(
tuple(self.models_1.TaxonomyTerm.objects.values_list()),
cursor.fetchall()
)
cursor.execute("select * from wp_3_terms order by name")
assert_equals(
tuple(self.models_3.TaxonomyTerm.objects.values_list()),
cursor.fetchall()
)
def test_taxonomy(self):
_debug, settings.DEBUG = settings.DEBUG, True
settings.DEBUG = True
cursor = connection.cursor()
cursor.execute("""
select tt.term_taxonomy_id, tt.taxonomy, t.name
from wp_term_taxonomy tt
inner join wp_terms t on t.term_id=tt.term_id
order by t.term_order, t.name
""")
num_queries = len(connection.queries)
blog1_qs = self.models_1.Taxonomy.objects.values_list(
'id', 'taxonomy', 'term__name'
)
assert_equals(
tuple(blog1_qs),
cursor.fetchall()
)
assert_equals(len(connection.queries), num_queries+1)
settings.DEBUG = _debug
def test_taxonomy_manager(self):
categories = list(self.models_1.Taxonomy.objects.categories())
assert_equals(len(categories), 3)
assert_true(all(c.taxonomy == 'category' for c in categories))
def test_taxonomy_posts(self):
taxonomy = self.models_1.Taxonomy.objects.get(
taxonomy='category', term__slug='category-1'
)
cursor = connection.cursor()
cursor.execute(
"select object_id from wp_term_relationships where term_taxonomy_id=%s",
(taxonomy.id,)
)
expected = sorted([r[0] for r in cursor.fetchall()])
qs = taxonomy.posts.filter(post_type='post', status='publish')
result = sorted([r[0] for r in qs.values_list('id')])
assert_equals(expected, result)
|
StarcoderdataPython
|
11212468
|
<filename>core/teacher_student.py
from functools import partial
from pathlib import Path
import argparse
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from core.teachers import MotionToStaticTeacher
from core.eisen import EISEN
from core.utils.segmentation_metrics import measure_static_segmentation_metric
from teachers import get_args
import matplotlib.pyplot as plt
import os
def _get_model_class(name):
cls = None
if 'motion_to_static' in name:
cls = MotionToStaticTeacher
elif 'eisen' in name:
cls = EISEN
else:
raise ValueError("Couldn't identify a model class associated with %s" % name)
return cls
def load_model(model_class, load_path, freeze, **kwargs):
if model_class == 'raft_pretrained':
return None # load from saved flows from pretrained models
cls = _get_model_class(model_class)
assert cls is not None, "Wasn't able to infer model class"
# build model
model = cls(**kwargs)
# load checkpoint
if load_path is not None:
weight_dict = torch.load(load_path)
new_dict = dict()
for k in weight_dict.keys():
if 'module' in k:
new_dict[k.split('module.')[-1]] = weight_dict[k]
did_load = model.load_state_dict(new_dict, strict=False)
print(did_load, type(model).__name__, load_path)
# freeze params if needed
if freeze:
for param in model.parameters():
param.requires_grad = False
return model
class TeacherStudent(nn.Module):
def __init__(self, args):
super().__init__()
assert args.teacher_class in ['raft_pretrained', 'motion_to_static', 'motion_to_static_v1'], f"Unexpected teacher class {args.teacher_class}"
print('Teacher class: ', args.teacher_class)
self.teacher = load_model(args.teacher_class, args.teacher_load_path, freeze=True, **args.teacher_params)
self.student = load_model(args.student_class, args.student_load_path, freeze=False, **args.student_params)
self.args = args
def forward(self, img1, img2, gt_segment, iters, raft_moving=None, get_segments=False):
if self.args.teacher_class == 'raft_pretrained':
target = raft_moving
elif self.args.teacher_class == 'motion_to_static_v1':
motion_thresh = boundary_thresh = 0.5
self.teacher.target_model.motion_thresh = motion_thresh
self.teacher.target_model.boundary_thresh = boundary_thresh
target = self.teacher(img1, img2, flow_iters=24, bootstrap=True)['target'] + 1
print('target.size: ', target.shape)
else:
self.teacher.eval()
with torch.no_grad():
target = self.teacher(img1, img2) + 1 # add 1 so that the background has value zero
# self.visualize_targets(img1, target)
assert target.min() >= 0, target.min()
affinity, loss, pred_segment = self.student(img1, target, get_segments=get_segments)
metric = {'loss': loss.item()}
if get_segments:
seg_metric, visuals = self.measure_segments(pred_segment, gt_segment)
# self.visualize_segments(visuals, target, img1)
self.visualize_segments_all(pred_segment, gt_segment, target, img1 / 255.)
metric['miou'] = seg_metric['metric_pred_segment_mean_ious'].item()
return loss, metric
def measure_segments(self, pred_segment, gt_segment):
return measure_static_segmentation_metric({'pred_segment': pred_segment}, {'gt_segment': gt_segment}, pred_segment.shape[-2:],
segment_key=['pred_segment'],
moving_only=False,
eval_full_res=True)
# def visualize_segments(self, visuals, target, image, prefix=''):
#
# matched_cc_preds, matched_gts, cc_ious = visuals['pred_segment']
#
# H = W = 128
#
# fsz = 19
# num_plots = 2+len(matched_cc_preds[0])*2
# fig = plt.figure(figsize=(num_plots * 4, 5))
# gs = fig.add_gridspec(1, num_plots)
# ax1 = fig.add_subplot(gs[0])
#
# plt.imshow(image[0].permute([1, 2, 0]).cpu())
# plt.axis('off')
# ax1.set_title('Image', fontsize=fsz)
#
#
# # labels = F.interpolate(batched_inputs[0]['gt_moving'].unsqueeze(0).float().cuda(), size=[H, W], mode='nearest')
# ax = fig.add_subplot(gs[1])
#
# if target is None:
# target = torch.zeros(1, 1, H, W)
# plt.imshow(target[0].cpu())
# plt.title('Supervision', fontsize=fsz)
# plt.axis('off')
#
# for i, (cc_pred, gt, cc_iou) in enumerate(zip(matched_cc_preds[0], matched_gts[0], cc_ious[0])):
# ax = fig.add_subplot(gs[2 + i])
# ax.imshow(cc_pred)
# ax.set_title('Pred (IoU: %.2f)' % cc_iou, fontsize=fsz)
# plt.axis('off')
#
# ax = fig.add_subplot(gs[2 + len(matched_cc_preds[0]) + i])
# ax.imshow(gt)
# plt.axis('off')
# ax.set_title('GT %d' % i, fontsize=fsz)
#
# # file_idx = batched_inputs[0]['file_name'].split('/')[-1].split('.hdf5')[0]
# # save_path = os.path.join(self.vis_saved_path, 'step_%smask_%s_%s.png' % (prefix, 'eval' if iter is None else str(iter), file_idx))
# # print('Save fig to ', save_path)
# # plt.savefig(save_path, bbox_inches='tight')
#
# plt.show()
# plt.close()
def visualize_segments_all(self, pred_segment, gt_segment, target, image, prefix=''):
H = W = 64
fsz = 19
num_plots = 4
fig = plt.figure(figsize=(num_plots * 4, 5))
gs = fig.add_gridspec(1, num_plots)
ax1 = fig.add_subplot(gs[0])
plt.imshow(image[0].permute([1, 2, 0]).cpu())
plt.axis('off')
ax1.set_title('Image', fontsize=fsz)
# labels = F.interpolate(batched_inputs[0]['gt_moving'].unsqueeze(0).float().cuda(), size=[H, W], mode='nearest')
ax = fig.add_subplot(gs[1])
if target is None:
target = torch.zeros(1, 1, H, W)
plt.imshow(target[0].cpu())
plt.title('Supervision', fontsize=fsz)
plt.axis('off')
ax = fig.add_subplot(gs[2])
plt.imshow(pred_segment[0].cpu(), cmap='twilight')
plt.title('Pred segments', fontsize=fsz)
plt.axis('off')
ax = fig.add_subplot(gs[3])
plt.imshow(gt_segment[0, 0].cpu(), cmap='twilight')
plt.title('GT segments', fontsize=fsz)
plt.axis('off')
# file_idx = batched_inputs[0]['file_name'].split('/')[-1].split('.hdf5')[0]
# save_path = os.path.join(self.vis_saved_path, 'step_%smask_%s_%s.png' % (prefix, 'eval' if iter is None else str(iter), file_idx))
# print('Save fig to ', save_path)
# plt.savefig(save_path, bbox_inches='tight')
plt.show()
plt.close()
@staticmethod
def visualize_targets(img, seg):
plt.subplot(1, 2, 1)
plt.imshow(img[0].permute(1, 2, 0).cpu() / 255.)
plt.axis('off')
plt.subplot(1, 2, 2)
plt.imshow(seg[0].cpu(), cmap='twilight')
plt.axis('off')
plt.show()
plt.close()
if __name__ == '__main__':
args = get_args()
motion_params = {
'small': False,
}
boundary_params = {
'small': False,
'static_input': False,
'orientation_type': 'regression'
}
teacher_class = 'motion_to_static'
teacher_params = {
'downsample_factor': 4,
'motion_path': args.motion_path,
'motion_model_params': motion_params,
'boundary_path': args.boundary_path,
'boundary_model_params': boundary_params
}
student_class = 'eisen'
student_params = {}
teacher_student = TeacherStudent(
teacher_class=teacher_class,
teacher_params=teacher_params,
teacher_load_path=None,
student_class=student_class,
student_params=student_params,
student_load_path=None,
)
|
StarcoderdataPython
|
3558045
|
import numpy
def FreedmanDiaconisBinSize(feature_values):
"""
The bin size in FD-binning is given by size = 2 * IQR(x) * n^(-1/3)
More Info: https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule
If the BinSize ends up being 0 (in the case that all values are the same),
return a BinSize of 1.
"""
q75, q25 = numpy.percentile(feature_values, [75, 25])
IQR = q75 - q25
return 2.0 * IQR * len(feature_values) ** (-1.0/3.0)
def test():
values = range(0,100)
bin_size = FreedmanDiaconisBinSize(values)
correct_bin_size = 21
bin_size = round(bin_size)
print "FreedmanDiaconisBinSize -- correct size of bins? ", bin_size == correct_bin_size
if __name__=="__main__":
test()
|
StarcoderdataPython
|
1762509
|
<gh_stars>0
import json
from datetime import timedelta
from django.contrib.auth.models import User, Permission
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from news.models import Article, Event, TimePlace
class ModelTestCase(TestCase):
@staticmethod
def create_time_place(event, pub_date_adjust_days, start_time_adjust_seconds,
hidden=TimePlace._meta.get_field("hidden").default):
return TimePlace.objects.create(
event=event,
pub_date=(timezone.now() + timedelta(days=pub_date_adjust_days)).date(),
start_date=timezone.now().date(),
start_time=(timezone.now() + timedelta(seconds=start_time_adjust_seconds)).time(),
hidden=hidden,
)
def test_str(self):
article = Article.objects.create(title='TEST_TITLE')
self.assertEqual(article.title, 'TEST_TITLE')
self.assertEqual(article.title, str(article))
title = 'Test event'
event = Event.objects.create(title=title)
time_place = self.create_time_place(event, 0, 0)
date_str = timezone.now().date().strftime('%Y.%m.%d')
self.assertEqual(str(time_place), "{} - {}".format(title, date_str))
def test_article_manager(self):
Article.objects.create(
title='NOT PUBLISHED',
pub_date=(timezone.now() + timedelta(days=1)).date(),
pub_time=timezone.now().time()
)
Article.objects.create(
title='NOT PUBLISHED',
pub_date=timezone.now().date(),
pub_time=(timezone.now() + timedelta(seconds=1)).time()
)
published1 = Article.objects.create(
title='PUBLISHED',
pub_date=(timezone.now() - timedelta(days=1)).date(),
pub_time=timezone.now().time()
)
published2 = Article.objects.create(
title='PUBLISHED',
pub_date=timezone.now().date(),
pub_time=(timezone.now() - timedelta(seconds=1)).time()
)
self.assertEqual(Article.objects.published().count(), 2)
self.assertEqual(set(Article.objects.published()), {published1, published2})
def test_event_manager(self):
event = Event.objects.create(title='', hidden=False)
hidden_event = Event.objects.create(title='', hidden=True)
not_published = self.create_time_place(event, 1, 1, False)
future = self.create_time_place(event, -1, 1, False)
past = self.create_time_place(event, -1, -1, False)
self.create_time_place(hidden_event, -1, 1)
self.create_time_place(hidden_event, -1, -1)
self.create_time_place(hidden_event, -1, 1, False)
self.create_time_place(hidden_event, -1, -1, False)
self.assertEqual(TimePlace.objects.future().count(), 1)
self.assertEqual(TimePlace.objects.past().count(), 1)
self.assertEqual(TimePlace.objects.future().first(), future)
self.assertEqual(TimePlace.objects.past().first(), past)
class ViewTestCase(TestCase):
def add_permission(self, codename):
permission = Permission.objects.get(codename=codename)
self.user.user_permissions.add(permission)
def setUp(self):
username = 'TEST_USER'
password = '<PASSWORD>'
self.user = User.objects.create_user(username=username, password=password)
self.client.login(username=username, password=password)
self.article = Article.objects.create(
title='PUBLISHED',
image=SimpleUploadedFile(name='img.jpg', content='', content_type='image/jpeg'),
pub_date=timezone.now() - timedelta(days=1),
pub_time=timezone.now().time()
)
self.event = Event.objects.create(
title='FUTURE',
image=SimpleUploadedFile(name='img.jpg', content='', content_type='image/jpeg'),
)
def test_admin(self):
response = self.client.get(reverse('admin-articles'))
self.assertNotEqual(response.status_code, 200)
self.add_permission('change_article')
response = self.client.get(reverse('admin-articles'))
self.assertEqual(response.status_code, 200)
def test_articles(self):
response = self.client.get(reverse('articles'))
self.assertEqual(response.status_code, 200)
def test_article(self):
response = self.client.get(reverse('article', kwargs={'pk': self.article.pk}))
self.assertEqual(response.status_code, 200)
def test_article_create(self):
response = self.client.get(reverse('article-create'))
self.assertNotEqual(response.status_code, 200)
self.add_permission('add_article')
response = self.client.get(reverse('article-create'))
self.assertEqual(response.status_code, 200)
def test_article_edit(self):
response = self.client.get(reverse('article-edit', kwargs={'pk': self.article.pk}))
self.assertNotEqual(response.status_code, 200)
self.add_permission('change_article')
response = self.client.get(reverse('article-edit', kwargs={'pk': self.article.pk}))
self.assertEqual(response.status_code, 200)
def test_events(self):
response = self.client.get(reverse('events'))
self.assertEqual(response.status_code, 200)
def test_event(self):
response = self.client.get(reverse('event', kwargs={'pk': self.event.pk}))
self.assertEqual(response.status_code, 200)
def test_event_create(self):
response = self.client.get(reverse('event-create'))
self.assertNotEqual(response.status_code, 200)
self.add_permission('add_event')
response = self.client.get(reverse('event-create'))
self.assertEqual(response.status_code, 200)
def test_event_edit(self):
response = self.client.get(reverse('event-edit', kwargs={'pk': self.event.pk}))
self.assertNotEqual(response.status_code, 200)
self.add_permission('change_event')
response = self.client.get(reverse('event-edit', kwargs={'pk': self.event.pk}))
self.assertEqual(response.status_code, 200)
def test_timeplace_duplicate(self):
tp = TimePlace.objects.create(event=self.event)
response = self.client.get(reverse('timeplace-duplicate', args=[tp.pk]))
self.assertNotEqual(response.status_code, 200)
self.add_permission('add_timeplace')
self.add_permission('change_timeplace')
response = self.client.get(reverse('timeplace-duplicate', args=[tp.pk]))
new = TimePlace.objects.exclude(pk=tp.pk).latest('pk')
self.assertRedirects(response, reverse('timeplace-edit', args=[new.pk]))
new_start_date = tp.start_date + timedelta(weeks=1)
new_end_date = (tp.end_date + timedelta(weeks=1)) if tp.end_date else None
self.assertTrue(new.hidden)
self.assertEqual(new.start_date, new_start_date)
self.assertEqual(new.end_date, new_end_date)
def test_timplace_duplicate_old(self):
self.add_permission('add_timeplace')
self.add_permission('change_timeplace')
start_date = timezone.now().date() - timedelta(weeks=2, days=3)
end_date = start_date + timedelta(days=1)
new_start_date = start_date + timedelta(weeks=3)
new_end_date = end_date + timedelta(weeks=3)
tp = TimePlace.objects.create(
event=self.event,
start_date=start_date,
end_date=end_date,
hidden=False,
)
response = self.client.get(reverse('timeplace-duplicate', args=[tp.pk]))
self.assertNotEqual(response.status_code, 200)
new = TimePlace.objects.exclude(pk=tp.pk).latest('pk')
self.assertEqual(new.start_date, new_start_date)
self.assertEqual(new.end_date, new_end_date)
def test_admin_article_toggle_view(self):
def toggle(pk, attr):
response = self.client.post(reverse('article-toggle'), {'pk': pk, 'toggle': attr})
self.assertEqual(response.status_code, 200)
return json.loads(response.content)
self.add_permission('change_article')
self.assertEquals(toggle(-1, 'hidden'), {})
self.assertEquals(toggle(self.article.pk, 'ajfal'), {})
hidden = self.article.hidden
self.assertEquals(toggle(self.article.pk, 'hidden'), {'color': 'grey' if hidden else 'yellow'})
self.assertEquals(toggle(self.article.pk, 'hidden'), {'color': 'yellow' if hidden else 'grey'})
class HiddenPrivateTestCase(TestCase):
def add_permission(self, codename):
permission = Permission.objects.get(codename=codename)
self.user.user_permissions.add(permission)
def setUp(self):
username = 'TEST_USER'
password = '<PASSWORD>'
self.user = User.objects.create_user(username=username, password=password)
self.client.login(username=username, password=password)
self.article = Article.objects.create(
title='',
image=SimpleUploadedFile(name='img.jpg', content='', content_type='image/jpeg'),
pub_date=timezone.now() - timedelta(days=1),
pub_time=timezone.now().time(),
hidden=True,
private=False,
)
self.event = Event.objects.create(
title='',
image=SimpleUploadedFile(name='img.jpg', content='', content_type='image/jpeg'),
hidden=True,
private=False,
)
def test_hidden_event(self):
response = self.client.get(reverse('event', kwargs={'pk': self.event.pk}))
self.assertEqual(response.status_code, 404)
self.add_permission('change_event')
response = self.client.get(reverse('event', kwargs={'pk': self.event.pk}))
self.assertEqual(response.status_code, 200)
def test_hidden_article(self):
response = self.client.get(reverse('article', kwargs={'pk': self.article.pk}))
self.assertEqual(response.status_code, 404)
self.add_permission('change_article')
response = self.client.get(reverse('article', kwargs={'pk': self.article.pk}))
self.assertEqual(response.status_code, 200)
def test_private_event(self):
self.event.hidden = False
self.event.save()
response = self.client.get(reverse('event', kwargs={'pk': self.event.pk}))
self.assertEqual(response.status_code, 200)
self.event.private = True
self.event.save()
response = self.client.get(reverse('event', kwargs={'pk': self.event.pk}))
self.assertEqual(response.status_code, 404)
self.add_permission('can_view_private')
response = self.client.get(reverse('event', kwargs={'pk': self.event.pk}))
self.assertEqual(response.status_code, 200)
def test_private_article(self):
self.article.hidden = False
self.article.save()
response = self.client.get(reverse('article', kwargs={'pk': self.article.pk}))
self.assertEqual(response.status_code, 200)
self.article.private = True
self.article.save()
response = self.client.get(reverse('article', kwargs={'pk': self.article.pk}))
self.assertEqual(response.status_code, 404)
self.add_permission('can_view_private')
response = self.client.get(reverse('article', kwargs={'pk': self.article.pk}))
self.assertEqual(response.status_code, 200)
def test_not_published_article(self):
self.article.pub_date = timezone.now() + timedelta(days=1)
response = self.client.get(reverse('article', kwargs={'pk': self.article.pk}))
self.assertEqual(response.status_code, 404)
self.add_permission('change_article')
response = self.client.get(reverse('article', kwargs={'pk': self.article.pk}))
self.assertEqual(response.status_code, 200)
|
StarcoderdataPython
|
6611198
|
<reponame>dlenwell/judo-python-client<gh_stars>1-10
"""
Copyright 2020 <NAME>, Judo Security inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import namedtuple
Request = namedtuple("Request", ["url", "method", "params", "body",
"headers", "timeout", "kwargs"])
Response = namedtuple("Response", ["url", "method", "body", "headers",
"status_code", "client_response"])
Secret = ""
Methods = {'read': ''}
Actions = {
'CreateSecret': {
'method': 'POST',
'uri': '/organization/{organizationId}/CreateSecret',
'uri_params': {
'organizationId': 'String',
},
'post_params': {
'description': "string",
'numberOfShards': 'Integer',
'expiresIn': 'integer'
},
'post_params_optional': {
'allowedIPs': 'list'
}
},
'DeleteSecret': {
'method': 'POST',
'uri': '/secret/{secretId}/DeleteSecret',
'uri_params': {'secretId': 'string'},
},
'ExpireSecret': {
'method': 'POST',
'uri': '/secret/{secretId}/ExpireSecret',
'uri_params': {'secretId': 'string'},
},
'FulfillSecret': {
'method': 'POST',
'uri': '/secret/{secretId}/FulfillSecret',
'uri_params': {'secretId': 'string'},
},
'GetShard': {
'method': 'GET',
'uri': "/shard/{shardId}?s={secretId}&t={transactionId}",
'uri_params': {
'shardId': 'string',
'secretId': 'string',
'transactionId': 'string'
}
},
'SetShard': {
'method': 'POST',
'uri': "/shard/{shardId}?s={secretId}&t={transactionId}",
'uri_params': {
'shardId': 'string',
'secretId': 'string',
'transactionId': 'string'
},
'post_params': {
'data': 'string'
}
},
'EnableUser': {
'method': 'POST',
'uri': '/user/{userId}/Enable',
'uri_params': {'userId': 'string'}
}
}
|
StarcoderdataPython
|
1848712
|
import multiprocessing
class holderClass():
def __init__(self):
self.x = 12
def increment(self,in_q,out_q):
while in_q:
object_class = in_q.get()
object_class.x = object_class.x + 1
out_q.put(object_class)
class testClass():
def __init__(self):
self.object = holderClass()
self.x = self.object.x
def process(self):
#process1 = multiprocessing.Process(target=self.test1)
#process1.start()
#process1.join()
process2 = multiprocessing.Process(target=self.object.increment)
process2.start()
process2.join()
def pool(self):
pool = multiprocessing.Pool(1)
#for answer in pool.imap(increment, range(10)):
# print(answer)
#print
for answer in pool.imap(self.square, range(10)):
print(answer)
def test2(self):
print("Hello, world 2")
def square(self, x):
return x * x
def self_square(self):
self.x = 12
def worker(x):
return x*x
def is_even(numbers, q):
for n in numbers:
if n % 2 == 0:
q.put(n)
q.put(None)
def even_is(in_q,out_q):
while in_q:
number = in_q.get()
if number == None:
out_q.put(None)
else:
if number % 2 == 0:
out_q.put(number)
def square(in_q,out_q):
while in_q:
number = in_q.get()
if number == None:
out_q.put(None)
else:
out_q.put(number*number)
|
StarcoderdataPython
|
1631056
|
<gh_stars>0
import redis
from app.config import REDIS_HOST, REDIS_PASSWORD
def redis_connection():
if REDIS_HOST:
return redis.StrictRedis(
host=REDIS_HOST,
password=<PASSWORD>_PASSWORD)
|
StarcoderdataPython
|
1861677
|
from nonebot.rule import to_me
from nonebot.plugin import on_command
from nonebot.typing import Bot, Event
say = on_command("say", to_me())
@say.handle()
async def repeat(bot: Bot, event: Event, state: dict):
await bot.send(message=event.message, event=event)
|
StarcoderdataPython
|
34888
|
<gh_stars>0
import h5py
import numpy
import sklearn
import sklearn.datasets
from matplotlib import pyplot
def load_dataset():
data_dir = '/Users/fpena/Courses/Coursera-Deep-Learning/Assignments/datasets/'
train_dataset = h5py.File(data_dir + 'train_catvnoncat.h5', "r")
train_set_x_orig = numpy.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = numpy.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File(data_dir + 'test_catvnoncat.h5', "r")
test_set_x_orig = numpy.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = numpy.array(test_dataset["test_set_y"][:]) # your test set labels
classes = numpy.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def preprocess_dataset(train_set_x_orig, test_set_x_orig):
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
train_set_x = train_set_x_flatten / 255.
test_set_x = test_set_x_flatten / 255.
return train_set_x, test_set_x
def plot_decision_boundary(model, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = numpy.meshgrid(numpy.arange(x_min, x_max, h), numpy.arange(y_min, y_max, h))
# Predict the function value for the whole grid
Z = model(numpy.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
pyplot.contourf(xx, yy, Z, cmap=pyplot.cm.Spectral)
pyplot.ylabel('x2')
pyplot.xlabel('x1')
pyplot.scatter(X[0, :], X[1, :], c=y, cmap=pyplot.cm.Spectral)
# pyplot.show()
def load_planar_dataset():
numpy.random.seed(1)
m = 400 # number of examples
N = int(m / 2) # number of points per class
D = 2 # dimensionality
X = numpy.zeros((m, D)) # data matrix where each row is a single example
Y = numpy.zeros((m, 1), dtype='uint8') # labels vector (0 for red, 1 for blue)
a = 4 # maximum ray of the flower
for j in range(2):
ix = range(N * j, N * (j + 1))
t = numpy.linspace(j * 3.12, (j + 1) * 3.12, N) + numpy.random.randn(N) * 0.2 # theta
r = a * numpy.sin(4 * t) + numpy.random.randn(N) * 0.2 # radius
X[ix] = numpy.c_[r * numpy.sin(t), r * numpy.cos(t)]
Y[ix] = j
X = X.T
Y = Y.T
return X, Y
def load_extra_datasets():
N = 200
noisy_circles = sklearn.datasets.make_circles(n_samples=N, factor=.5, noise=.3)
noisy_moons = sklearn.datasets.make_moons(n_samples=N, noise=.2)
blobs = sklearn.datasets.make_blobs(n_samples=N, random_state=5, n_features=2, centers=6)
gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.5, n_samples=N, n_features=2,
n_classes=2, shuffle=True, random_state=None)
no_structure = numpy.random.rand(N, 2), numpy.random.rand(N, 2)
return noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure
|
StarcoderdataPython
|
1777859
|
<reponame>AnonC0DER/C1Academy<filename>Teacher/migrations/0005_auto_20220123_2338.py
# Generated by Django 3.2.9 on 2022-01-23 20:08
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('Teacher', '0004_auto_20220121_0034'),
]
operations = [
migrations.CreateModel(
name='HomeworkTeacherCanAdd',
fields=[
('title', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='HomeworkImages/')),
('created', models.DateTimeField(auto_now_add=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
],
),
migrations.AddField(
model_name='classroom',
name='home_work',
field=models.ManyToManyField(blank=True, null=True, to='Teacher.HomeworkTeacherCanAdd'),
),
]
|
StarcoderdataPython
|
274749
|
<gh_stars>0
from .api import Sampler, read, run
from .utils import instance_from_map
|
StarcoderdataPython
|
6660043
|
#Write a simple program to simulate the operation of the grep command on Unix.
#Ask the user to enter a regular expression and count the number of lines that matched
#the regular expression:
import re
fname = input('Enter the file name: ') #prompts user for file name to search
try:
fhand = open(fname, 'r') #opens file 'r' is for read only purposes
except:
print('File cannot be opened:', fname) #returns 'File cannot be opened' to user and quits
exit()
userinput = input('Enter regular expression: ')
for line in fhand:
line = line.rstrip()
if re.search(userinput, line):
print(line)
|
StarcoderdataPython
|
3557671
|
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
##########################################################################
"""
OpenSwitch Test for switchd related configurations.
"""
from pytest import mark
from time import sleep
import yaml
import shutil
import os.path
TOPOLOGY = """
# +-------+
# | ops1 |
# +-------+
# Nodes
[type=openswitch name="OpenSwitch 1"] ops1
"""
ovs_vsctl = "/usr/bin/ovs-vsctl "
ovs_appctl = "/usr/bin/ovs-appctl "
ovsdb_tool = "/usr/bin/ovsdb-tool "
def start(dut):
start_ovsdb(dut)
sleep(3)
start_sysd(dut)
wait_until_ovsdb_is_up(dut)
def stop(dut):
stop_sysd(dut)
stop_ovsdb(dut)
sleep(3)
def start_sysd(dut):
dut("/bin/systemctl start ops-sysd", shell="bash")
def stop_sysd(dut):
dut(ovs_appctl + "-t ops-sysd exit", shell="bash")
def start_ovsdb(dut):
"""Create an empty DB file and load it into ovsdb-server."""
# Create an empty database file.
c = (
"{ovsdb_tool} create /var/run/openvswitch/ovsdb.db "
"/usr/share/openvswitch/vswitch.ovsschema".format(
ovsdb_tool=ovsdb_tool
)
)
dut(c, shell="bash")
# Load the newly created DB into ovsdb-server
dut(ovs_appctl + "-t ovsdb-server ovsdb-server/add-db "
"/var/run/openvswitch/ovsdb.db", shell="bash")
def stop_ovsdb(dut):
"""Remove the OpenSwitch DB from ovsdb-server.
It also removes the DB file from the file system.
"""
# Remove the database from the ovsdb-server.
dut(ovs_appctl + "-t ovsdb-server ovsdb-server/remove-db "
"OpenSwitch", shell="bash")
# Remove the DB file from the file system.
dut("/bin/rm -f /var/run/openvswitch/ovsdb.db", shell="bash")
def copy_hw_desc_files(dut):
"""Copy Hardware description files to /etc/openswitch/hwdesc"""
c = "cp -a /tmp/hwdesc/*.yaml etc/openswitch/hwdesc"
dut(c, shell="bash")
def read_yaml_port_file(file_name):
"""Read the local port.yaml file and return the data."""
cur_dir, f = os.path.split(__file__)
yaml_file_loc = cur_dir + "/hwdesc/" + file_name
yaml_data = open(yaml_file_loc, "r")
return yaml.load(yaml_data)
def get_subsystem_other_info(dut, map_key):
"""Get the value from subsystem table other_info column."""
subsystem_uuid = dut(ovs_vsctl + "list subsystem | grep -i uuid|cut -d :"
" -f 2", shell="bash")
uuid = subsystem_uuid.replace('\r\n', '')
c = ovs_vsctl + "get subsystem " + uuid + " other_info:" + map_key
out = dut(c, shell="bash")
return out.replace('\r\n', '')
def get_interface_hw_info(dut, name, map_key):
"""Get the value from interface table hw_intf_info column."""
c = ovs_vsctl + "get interface " + str(name) + \
" hw_intf_info:" + map_key
out = dut(c, shell="bash")
return out.replace('\r\n', '')
def wait_until_ovsdb_is_up(dut):
"""Wait until System table is visible in the ovsdb-server."""
cmd = ovs_vsctl + "list System | grep uuid"
wait_count = 20
while wait_count > 0:
out = dut(cmd, shell="bash")
if "_uuid" in out:
break
wait_count -= 1
sleep(1)
assert wait_count != 0
@mark.skipif(True, reason="OVSDB error when trying to bring up sysd process")
def test_sysd_ct_hw_desc_files(topology, step):
ops1 = topology.get("ops1")
assert ops1 is not None
cur_dir, f = os.path.split(__file__)
test_file_dir = os.path.join(cur_dir, "hwdesc")
shutil.copytree(test_file_dir, ops1.shared_dir + "/hwdesc")
stop(ops1)
copy_hw_desc_files(ops1)
start(ops1)
expected = read_yaml_port_file("ports.yaml")
x, count = get_subsystem_other_info(ops1, "interface_count").splitlines()
exp = '"{out}"'.format(
out=expected['port_info']['number_ports']
)
assert count == exp
x, count = get_subsystem_other_info(ops1, "max_bond_count").splitlines()
exp = str(expected['port_info']['max_lag_count'])
assert count == '"{out}"'.format(out=exp)
x, count = get_subsystem_other_info(ops1,
"max_bond_member_count").splitlines()
exp = str(expected['port_info']['max_lag_member_count'])
assert count == '"{out}"'.format(out=exp)
port_name = expected['ports'][0]['name']
intf_id = get_interface_hw_info(ops1, port_name,
"switch_intf_id")
exp = str(expected['ports'][0]['switch_device_port'])
assert intf_id == '"{out}"'.format(out=exp)
port_name = expected['ports'][0]['name']
connector_type = get_interface_hw_info(ops1, port_name,
"connector")
exp = str(expected['ports'][0]['connector'])
assert connector_type == '"{out}"'.format(out=exp)
name = ops1(ovs_vsctl + "get bridge bridge_normal name",
shell="bash")
name = name.replace('\r\n', '')
exp = "bridge_normal"
assert name == exp
|
StarcoderdataPython
|
9631812
|
# Copyright 2020-, <NAME> and contributors
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
import numpy as np
import pytest
from quantumflow import var
funcnames = ["arccos", "arcsin", "arctan", "cos", "exp", "sign", "sin", "sqrt", "tan"]
@pytest.mark.parametrize("funcname", funcnames)
def test_scalar_functions(funcname: str) -> None:
x = var.Symbol("x")
nx = 0.76 # Gives real answer for all functions
subs = {"x": nx}
symfn = getattr(var, funcname)
npfn = getattr(np, funcname)
assert np.isclose(var.asfloat(symfn(x), subs), npfn(nx))
assert np.isclose(var.asfloat(symfn(nx), subs), npfn(nx))
def test_scalar_arctan2() -> None:
x = var.Symbol("x")
nx = 0.76
y = var.Symbol("y")
ny = -0.5
subs = {"x": nx, "y": ny}
assert np.isclose(var.asfloat(var.arctan2(x, y), subs), np.arctan2(nx, ny))
assert np.isclose(var.asfloat(var.arctan2(nx, ny), subs), np.arctan2(nx, ny))
def test_almost_zero() -> None:
assert var.almost_zero(0)
assert var.almost_zero(0.0)
assert var.almost_zero(0.000000000000000001)
assert var.almost_zero(var.Symbol("x").evalf(subs={"x": 0}))
def test_isclose() -> None:
assert var.isclose(1.0, 1.0000000001)
assert not var.isclose(0.0, 0.0000000001, atol=0.000000000001)
assert var.isclose(var.Symbol("x"), var.Symbol("x"))
assert not var.isclose(var.Symbol("x"), 1.0)
def test_asexpression() -> None:
s = var.asexpression(1.0)
assert str(s) == "1"
with pytest.raises(ValueError):
_ = var.asexpression(1.13434538345)
s = var.asexpression(np.pi * 123)
assert str(s) == "123*pi"
s = var.asexpression(np.pi / 64)
assert str(s) == "pi/64"
s = var.asexpression(np.pi * 3 / 64)
assert str(s) == "3*pi/64"
s = var.asexpression(np.pi * 8 / 64)
assert str(s) == "pi/8"
s = var.asexpression(-np.pi * 3 / 8)
assert str(s) == "-3*pi/8"
s = var.asexpression(5 / 8)
assert str(s) == "5/8"
s = var.asexpression(2 / np.pi)
assert str(s) == "2/pi"
# fin
|
StarcoderdataPython
|
9680952
|
<reponame>FapTek/faopy-server
import logging
import traceback
def t(t):
def type_decorator(func):
def wrapped(self, arg):
if type(arg) == t:
return func(self, arg)
else:
logging.warning(f"Type mismatch in call to {func.__name__}: expected {t}, got {type(arg)}")
return wrapped
return type_decorator
|
StarcoderdataPython
|
8032016
|
import discord
from discord.ext import commands
class TB_Settings(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name='setup', aliases=['tsetup'], description='Initialize guild')
@commands.has_permissions(manage_guild=True)
@commands.bot_has_permissions(manage_channels=True)
@commands.guild_only()
async def create_guild_join(self, ctx):
"""If for some reason your guild was NOT added to the database, this command will force your guild into it.\nWARNING: RUNNING THIS WILL ESSENTIALLY RESET YOUR GUILD. ACTIVE THREADS WILL BE LOST AND WILL NO LONGER BE MANAGED!"""
category = discord.utils.get(ctx.guild.categories, name='THREADS')
if category is None:
try:
category = await ctx.guild.create_category('THREADS', reason='Initial category of threads creation.')
except (discord.HTTPException, discord.InvalidArgument):
await ctx.send("Unable to create channel for threads. Please try creating a category manually and name it `THREADS`, then re-run this command.")
return
self.bot.cache[ctx.guild.id] = {
"prefix" : ".",
"default_thread_channel" : category.id,
"settings" : {
"role_required" : False,
"allowed_roles" : [],
"TTD" : 3,
"cleanup" : True,
"admin_roles" : [],
"admin_bypass" : False,
"cooldown" : {
"enabled": True,
"rate" : 0,
"per" : 0,
"bucket" : "user"
}
},
"active_threads" : {},
"custom_categories" : {}
}
await ctx.send('Setup ran. Guild added to database.')
await self.bot.write_db(ctx.guild)
@commands.command(name='prefix', aliases=['tprefix'], description='Sets the custom prefix')
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def update_prefix(self, ctx, prefix: str):
"""Lets you modify the servers prefix. You must have the `manage_guild` permission to use this."""
self.bot.cache[ctx.guild.id]['prefix'] = prefix
embed=discord.Embed(title="Guild settings updated", description=f"You have changed the **prefix** for this guild.\nThe current prefix is: `{self.bot.cache[ctx.guild.id]['prefix']}`", color=self.bot.success_color)
embed.set_author(name=ctx.bot.user.name, icon_url=ctx.bot.user.avatar_url)
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.set_footer(text=f"To issue commands, you must now type {self.bot.cache[ctx.guild.id]['prefix']} instead of {ctx.prefix} . Example: {self.bot.cache[ctx.guild.id]['prefix']}tmake")
await self.bot.write_db(ctx.guild)
await ctx.send(embed=embed)
@commands.command(name='timetodead', aliases=['tttd'], description='Sets the period to wait before a thread becomes inactive, in days')
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def update_ttd(self, ctx, ttd):
"""Change the number of days the bot will wait before marking a channel for deletion. You must have the `manage_guild` permission to use this."""
self.bot.cache[ctx.guild.id]['settings']['TTD'] = int(ttd)
await self.bot.write_db(ctx.guild)
await ctx.send(f"Inactivity time set to: {ttd} days")
return
@commands.command(name='roles', aliases=['troles','trolls'], description='Toggles and adds/removes roles from running `tmake` command.')
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def update_roles(self, ctx, *roles: discord.Role):
"""Running with no arguments will toggle whether a specific set of roles are required or not. If ran with arguments, the arguments should be role mentions, their names, or the role ID which will add or remove the role (Does not delete or modify server roles)."""
if len(roles) == 0:
self.bot.cache[ctx.guild.id]['settings']['role_required'] = not self.bot.cache[ctx.guild.id]['settings']['role_required']
embed=discord.Embed(title="Guild settings updated", description=f"You have changed the **roles_required** flag for this guild.\nThe current value is: `{self.bot.cache[ctx.guild.id]['settings']['role_required']}`", color=self.bot.success_color)
embed.set_author(name=ctx.bot.user.name, icon_url=ctx.bot.user.avatar_url)
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.set_footer(text=f"A True value means users need a specific(s) role in order to use {ctx.prefix}tmake. A False value means anyone in your server can make threads.")
await ctx.send(embed=embed)
else:
added = []
removed = []
for role in roles:
if role.id in self.bot.cache[ctx.guild.id]['settings']['allowed_roles']:
self.bot.cache[ctx.guild.id]['settings']['allowed_roles'].remove(role.id)
removed.append(f"- {role.name}")
else:
self.bot.cache[ctx.guild.id]['settings']['allowed_roles'].append(role.id)
added.append(f"+ {role.name}")
if not added:
added.append("+ No roles added!")
if not removed:
removed.append("- No roles removed")
added_list = "```patch\n"
for role in added:
added_list += f"{role}\n"
added_list += "```"
removed_list = "```patch\n"
for role in removed:
removed_list += f"{role}\n"
removed_list += "```"
current = []
for role in self.bot.cache[ctx.guild.id]['settings']['allowed_roles']:
cur = ctx.guild.get_role(role)
current.append(cur.name)
if not current:
current.append("No configured roles.")
embed = discord.Embed(title=f"{ctx.guild.name} Role Required List", description='The following is a summary of what roles you just added, removed and the current list of allowed roles.', color=self.bot.success_color)
embed.set_author(name=ctx.bot.user.name, icon_url=ctx.bot.user.avatar_url)
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.add_field(name="ADDED", value=added_list, inline=False)
embed.add_field(name="REMOVED", value=removed_list, inline=False)
embed.add_field(name="CURRENT LIST", value=" | ".join(current), inline=False)
embed.set_footer(text="Did you add your admin roles? This setting does not care about role or user permissions!")
await ctx.send(embed=embed)
await self.bot.write_db(ctx.guild)
@update_roles.error
async def ur_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
error_str = str(error)
role_name = error_str[6:-12]
message_list = ctx.message.content.split(' ')
arg_list = message_list[1:]
good_roles = []
for arg in arg_list:
good_roles.append(discord.utils.find(lambda r : r.id == arg or r.name == arg or r.mention == arg, ctx.guild.roles))
good_roles = [i for i in good_roles if i]
if not good_roles:
await ctx.send("ran out")
@commands.command(name='aroles', aliases=['taroles'], description='Toggles and adds/removes admin roles from bypassing `tmake` command cooldown.')
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def update_aroles(self, ctx, *roles: discord.Role):
"""Running with no arguments will toggle whether admin roles are activate or not. If ran with arguments, the arguments should be role mentions, their names, or the role ID which will add or remove the role (Does not delete or modify server roles)."""
if len(roles) == 0:
self.bot.cache[ctx.guild.id]['settings']['admin_bypass'] = not self.bot.cache[ctx.guild.id]['settings']['admin_bypass']
embed=discord.Embed(title="Guild settings updated", description=f"You have changed the **admin_bypass** flag for this guild.\nThe current value is: `{self.bot.cache[ctx.guild.id]['settings']['admin_bypass']}`", color=self.bot.success_color)
embed.set_author(name=ctx.bot.user.name, icon_url=ctx.bot.user.avatar_url)
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.set_footer(text=f"A True value means admin roles can bypass the cooldown on {ctx.prefix}tmake. A False value means admin roles do not get to bypass the cooldown.")
await ctx.send(embed=embed)
else:
added = []
removed = []
for role in roles:
if role.id in self.bot.cache[ctx.guild.id]['settings']['admin_roles']:
self.bot.cache[ctx.guild.id]['settings']['admin_roles'].remove(role.id)
removed.append(f"- {role.name}")
else:
self.bot.cache[ctx.guild.id]['settings']['admin_roles'].append(role.id)
added.append(f"+ {role.name}")
if not added:
added.append("+ No roles added!")
if not removed:
removed.append("- No roles removed")
added_list = "```patch\n"
for role in added:
added_list += f"{role}\n"
added_list += "```"
removed_list = "```patch\n"
for role in removed:
removed_list += f"{role}\n"
removed_list += "```"
current = []
for role in self.bot.cache[ctx.guild.id]['settings']['admin_roles']:
cur = ctx.guild.get_role(role)
current.append(cur.name)
if not current:
current.append("No configured roles.")
embed = discord.Embed(title=f"{ctx.guild.name} Admin Role List", description='The following is a summary of what roles you just added, removed, and the current list of allowed roles.', color=self.bot.success_color)
embed.add_field(name="ADDED", value=added_list, inline=False)
embed.add_field(name="REMOVED", value=removed_list, inline=False)
embed.add_field(name="CURRENT LIST", value=" | ".join(current), inline=False)
await ctx.send(embed=embed)
await self.bot.write_db(ctx.guild)
@update_aroles.error
async def ar_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
error_str = str(error)
role_name = error_str[6:-12]
message_list = ctx.message.content.split(' ')
arg_list = message_list[1:]
good_roles = []
for arg in arg_list:
good_roles.append(discord.utils.find(lambda r : r.id == arg or r.name == arg or r.mention == arg, ctx.guild.roles))
good_roles = [i for i in good_roles if i]
if not good_roles:
await ctx.send("ran out")
@commands.command(name="bypass", aliases=['tbypass'], description='Toggle if admin roles can bypass cooldown or not.')
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def update_bypass(self, ctx):
"""Toggle the ability to allow admin roles to bypass the cooldown."""
self.bot.cache[ctx.guild.id]['settings']['admin_bypass'] = not self.bot.cache[ctx.guild.id]['settings']['admin_bypass']
embed=discord.Embed(title="Guild settings updated", description=f"You have changed the **admin bypass** flag for this guild.\nThe current value is: `{self.bot.cache[ctx.guild.id]['settings']['admin_bypass']}`", color=self.bot.success_color)
embed.set_author(name=ctx.bot.user.name, icon_url=ctx.bot.user.avatar_url)
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.set_footer(text=f"A True value means defined admin roles are able to bypass the cooldown. A False value means they cannot bypass the cooldown.")
await self.bot.write_db(ctx.guild)
await ctx.send(embed=embed)
@commands.command(name='clean', aliases=['tclean'], description='Toggles the flag that controls if the bot should delete messages used to setup threads.')
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def update_cleaning(self, ctx):
"""Why you would change this is beyond me. Determines if the bot should delete messages used to create threads."""
self.bot.cache[ctx.guild.id]['settings']['cleanup'] = not self.bot.cache[ctx.guild.id]['settings']['cleanup']
embed=discord.Embed(title="Guild settings updated", description=f"You have changed the **cleanup** flag for this guild.\nThe current value is: `{self.bot.cache[ctx.guild.id]['settings']['cleanup']}`", color=self.bot.success_color)
embed.set_author(name=ctx.bot.user.name, icon_url=ctx.bot.user.avatar_url)
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.set_footer(text=f"A True value means commands used to make threads will be deleted when possible. A False value means all messages used to create threads are kept.")
await self.bot.write_db(ctx.guild)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(TB_Settings(bot))
|
StarcoderdataPython
|
6434041
|
# Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from copy import copy
import unittest
import numpy as np
from coremltools._deps import HAS_SKLEARN
from coremltools.models.utils import evaluate_transformer,\
macos_version, is_macos
if HAS_SKLEARN:
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import Normalizer
from coremltools.converters import sklearn
from coremltools.models.datatypes import Array
from sklearn.datasets import load_boston
@unittest.skipIf(not HAS_SKLEARN, 'Missing sklearn. Skipping tests.')
class OneHotEncoderScikitTest(unittest.TestCase):
"""
Unit test class for testing scikit-learn converter.
"""
@classmethod
def setUpClass(self):
"""
Set up the unit test by loading the dataset and training a model.
"""
scikit_data = [[0], [1], [2], [4], [3], [2], [4], [5], [6], [7]]
scikit_data_multiple_cols = [[0, 1], [1, 0], [2, 2], [3, 3], [4, 4]]
scikit_model = OneHotEncoder()
scikit_model.fit(scikit_data)
# Save the data and the model
self.scikit_data = np.asarray(scikit_data, dtype = 'd')
self.scikit_data_multiple_cols = np.asarray(scikit_data_multiple_cols, dtype = 'd')
self.scikit_model = scikit_model
@unittest.skipUnless(is_macos() and macos_version() >= (10, 13),
'Only supported on macOS 10.13+')
def test_conversion_one_column(self):
# Fit a single OHE
scikit_model = OneHotEncoder()
scikit_model.fit(self.scikit_data)
spec = sklearn.convert(scikit_model, 'single_feature', 'out').get_spec()
test_data = [{'single_feature': row} for row in self.scikit_data]
scikit_output = [
{'out': row} for row in scikit_model.transform(
self.scikit_data).toarray()]
metrics = evaluate_transformer(spec, test_data, scikit_output)
self.assertIsNotNone(spec)
self.assertIsNotNone(spec.description)
self.assertEquals(metrics['num_errors'], 0)
@unittest.skipUnless(is_macos() and macos_version() >= (10, 13),
'Only supported on macOS 10.13+')
def test_conversion_many_columns(self):
scikit_model = OneHotEncoder()
scikit_model.fit(self.scikit_data_multiple_cols)
spec = sklearn.convert(scikit_model, ['feature_1', 'feature_2'], 'out').get_spec()
test_data = [
{'feature_1': row[0],
'feature_2': row[1]} for row in self.scikit_data_multiple_cols]
scikit_output = [
{'out': row} for row in scikit_model.transform(
self.scikit_data_multiple_cols).toarray()]
metrics = evaluate_transformer(spec, test_data, scikit_output)
self.assertIsNotNone(spec)
self.assertIsNotNone(spec.description)
self.assertEquals(metrics['num_errors'], 0)
@unittest.skipUnless(is_macos() and macos_version() >= (10, 13),
'Only supported on macOS 10.13+')
def test_conversion_one_column_of_several(self):
scikit_model = OneHotEncoder(categorical_features = [0])
scikit_model.fit(copy(self.scikit_data_multiple_cols))
spec = sklearn.convert(scikit_model, ['feature_1', 'feature_2'], 'out').get_spec()
test_data = [
{'feature_1': row[0],
'feature_2': row[1]} for row in self.scikit_data_multiple_cols]
scikit_output = [{'out': row} for row in scikit_model.transform(
self.scikit_data_multiple_cols).toarray()]
metrics = evaluate_transformer(spec, test_data, scikit_output)
self.assertIsNotNone(spec)
self.assertIsNotNone(spec.description)
self.assertEquals(metrics['num_errors'], 0)
@unittest.skipUnless(is_macos() and macos_version() >= (10, 13),
'Only supported on macOS 10.13+')
def test_boston_OHE(self):
data = load_boston()
for categorical_features in [[3], [8], [3, 8], [8, 3]]:
model = OneHotEncoder(
categorical_features = categorical_features, sparse=False)
model.fit(data.data, data.target)
# Convert the model
spec = sklearn.convert(model, data.feature_names, 'out').get_spec()
input_data = [dict(zip(data.feature_names, row))
for row in data.data]
output_data = [{"out": row} for row in model.transform(data.data)]
result = evaluate_transformer(spec, input_data, output_data)
assert result["num_errors"] == 0
# This test still isn't working
@unittest.skipUnless(is_macos() and macos_version() >= (10, 13),
'Only supported on macOS 10.13+')
def test_boston_OHE_pipeline(self):
data = load_boston()
for categorical_features in [ [3], [8], [3, 8], [8,3] ]:
# Put it in a pipeline so that we can test whether the output dimension
# handling is correct.
model = Pipeline(
[("OHE", OneHotEncoder(
categorical_features=categorical_features)),
("Normalizer", Normalizer())])
model.fit(data.data.copy(), data.target)
# Convert the model
spec = sklearn.convert(model, data.feature_names, 'out').get_spec()
input_data = [dict(zip(data.feature_names, row)) for row in data.data]
output_data = [{"out" : row} for row in model.transform(data.data.copy())]
result = evaluate_transformer(spec, input_data, output_data)
assert result["num_errors"] == 0
@unittest.skipUnless(is_macos() and macos_version() >= (10, 13),
'Only supported on macOS 10.13+')
def test_random_sparse_data(self):
n_columns = 8
n_categories = 20
import numpy.random as rn
rn.seed(0)
categories = rn.randint(50000, size = (n_columns, n_categories) )
for dt in ['int32', 'float32', 'float64']:
_X = np.array( [[categories[j, rn.randint(n_categories)]
for j in range(n_columns)]
for i in range(100)], dtype=dt)
# Test this data on a bunch of possible inputs.
for sparse in (True, False):
for categorical_features in ['all', [3], [4], range(2, 8),
range(0, 4), range(0, 8)]:
X = _X.copy()
# This appears to be the only type now working.
assert X.dtype == np.dtype(dt)
model = OneHotEncoder(
categorical_features=categorical_features,
sparse=sparse)
model.fit(X)
# Convert the model
spec = sklearn.convert(
model, [('data', Array(n_columns))], 'out')
X_out = model.transform(X)
if sparse:
X_out = X_out.todense()
input_data = [{'data' : row} for row in X]
output_data = [{"out" : row} for row in X_out]
result = evaluate_transformer(spec, input_data, output_data)
assert result["num_errors"] == 0
# Test normal data inside a pipeline
for sparse in (True, False):
for categorical_features in ['all', [3], [4], range(2, 8),
range(0, 4), range(0, 8)]:
X = _X.copy()
model = Pipeline(
[("OHE", OneHotEncoder(
categorical_features=categorical_features,
sparse=sparse)), ("Normalizer", Normalizer())])
model.fit(X)
# Convert the model
spec = sklearn.convert(
model, [('data', Array(n_columns))], 'out').get_spec()
X_out = model.transform(X)
if sparse:
X_out = X_out.todense()
input_data = [{'data' : row} for row in X]
output_data = [{"out" : row} for row in X_out]
result = evaluate_transformer(spec, input_data, output_data)
assert result["num_errors"] == 0
def test_conversion_bad_inputs(self):
# Error on converting an untrained model
with self.assertRaises(TypeError):
model = OneHotEncoder()
spec = sklearn.convert(model, 'data', 'out')
# Check the expected class during covnersion.
with self.assertRaises(TypeError):
from sklearn.linear_model import LinearRegression
model = LinearRegression()
spec = sklearn.convert(model, 'data', 'out')
|
StarcoderdataPython
|
6594124
|
from aipaca_predictor.dnn import DNN
from aipaca_predictor.layers import PoolLayer
from aipaca_predictor.layers import ConvLayer
from aipaca_predictor.layers import DenseLayer
import requests
import json
from typing import List
from typing import Optional
from tensorflow.python.client import device_lib
from tensorflow.core.framework.device_attributes_pb2 import DeviceAttributes
SUPPORT_GPU_TYPES = {"1080Ti", "K40", "K80", "M60", "P100", "T4", "V100"}
def to_predict(model, batch_size: int, iterations: int, optimizer: str = "sgd") -> int:
"""
Given a Keras model, batch_size, iterations and optimizer, output the estimated training time
"""
gpu_name = _find_gpu_type()
if not gpu_name:
print(f"Your GPU is not supported. not one of {SUPPORT_GPU_TYPES}")
return
print(f"Detected that you use {gpu_name} GPU")
dnn = parse_cnn(model.layers)
data = {
"dnn": dnn,
"batch_size": batch_size,
"iterations": iterations,
"optimizer": optimizer,
"gpu_name": gpu_name,
}
url = "http://172.16.58.3:8000/predict"
resp = requests.post(url, data=json.dumps(data))
print(resp.json())
def parse_cnn(model_layers):
input_shape = model_layers[0]._batch_input_shape
# Image input
if len(input_shape) == 4:
size = input_shape[1]
# Vector or flattened image
else:
size = input_shape[0] if input_shape[0] else 1
num_channel = input_shape[-1]
previous_channel = num_channel
previous_output_size = size
dnn = DNN(num_channel, size)
for layer in model_layers[1:]:
layer_class_name = layer.__class__.__name__
layer_name = layer.name
if layer_class_name == "MaxPooling2D":
layer = PoolLayer(
layer=layer,
input_size=previous_output_size,
channels_in=previous_channel,
)
elif layer_class_name == "Conv2D":
layer = ConvLayer(
layer=layer,
input_size=previous_output_size,
channels_in=previous_channel,
)
elif layer_class_name == "Dense":
layer = DenseLayer(
layer=layer,
input_size=previous_output_size,
channels_in=previous_channel,
)
else:
continue
previous_channel = layer.channels_out
previous_output_size = layer.output_size
dnn.add_layer(layer_name=layer_name, layer_type=layer.type, **vars(layer))
return dnn
def _find_gpu_type() -> Optional[str]:
local_device_protos: List[DeviceAttributes] = device_lib.list_local_devices()
for device in local_device_protos:
for gpu_type in SUPPORT_GPU_TYPES:
if gpu_type.upper() in device.physical_device_desc.upper():
return gpu_type
return None
|
StarcoderdataPython
|
134008
|
<filename>mlonmcu/context.py
#
# Copyright (c) 2022 TUM Department of Electrical and Computer Engineering.
#
# This file is part of MLonMCU.
# See https://github.com/tum-ei-eda/mlonmcu.git for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Definition if the contextmanager for mlonmcu environments."""
import sys
import os
import shutil
import tempfile
from typing import List, Union
from pathlib import Path
import filelock
from mlonmcu.utils import ask_user
from mlonmcu.logging import get_logger, set_log_file
from mlonmcu.session.run import Run
from mlonmcu.session.session import Session
from mlonmcu.setup.cache import TaskCache
import mlonmcu.setup.utils as utils
from mlonmcu.environment.environment import Environment, UserEnvironment
from mlonmcu.environment.list import get_environments_map
from mlonmcu.environment.config import get_environments_dir
logger = get_logger()
def lookup_environment() -> Environment:
"""Helper function to automatically find a suitable environment.
This function is used if neither a name nor a path of the environment was specified by the user.
The lookup follows a predefined order:
- Check current working directory
- Check MLONMCU_HOME environment variable
- Default environment for current user
Returns
-------
environment : Path
The environment (if the lookup was successful).
"""
logger.debug("Starting lookup for mlonmcu environment")
logger.debug("First checking in local working directory")
path = os.path.join(os.getcwd(), "environment.yml")
if os.path.exists(path):
logger.debug("Found environment directory: %s", path)
return path
logger.debug("Next checking environment variables")
home = os.environ.get("MLONMCU_HOME")
if home:
path = os.path.join(home, "environment.yml")
if os.path.exists(path):
logger.debug("Found environment directory: %s", path)
return path
logger.debug("Looking for default environment for current user")
envs_list = get_environments_map()
if "default" in envs_list:
assert "path" in envs_list["default"]
directory = envs_list["default"]["path"]
path = os.path.join(directory, "environment.yml")
if os.path.exists(path):
logger.debug("Found environment directory: %s", path)
return path
return None
def get_environment_by_path(path: Union[str, Path]) -> Environment:
"""Utility to find an environment file using a supplied path.
Parameters
----------
path : str/Path
The path of the environment (or its YAML file).
Returns
-------
Environment:
The environment (if the lookup was successful).
"""
if isinstance(path, str):
path = Path(path)
assert isinstance(path, Path)
if path.is_dir():
path = path / "environment.yml"
if path.is_file():
return path
return None
def get_environment_by_name(name: str) -> Environment:
"""Utility to find an environment file using a supplied name.
Parameters
----------
name : str
The name/alias if the environment.
Returns
-------
Environment :
The environment (if the lookup was successful).
"""
# TODO: parse the ini file instead
environments_dir = get_environments_dir()
if environments_dir.is_dir():
path = environments_dir / name
if path.is_dir():
return get_environment_by_path(path)
return None
def get_ids(directory: Path) -> List[int]:
"""Get a sorted list of ids for sessions/runs found in the given directory.
Parameters
----------
directory : Path
Directory where the sessions/runs are stored.
Returns:
list
List of integers representing the session numbers. Empty list if directory does not exist.
"""
if not directory.is_dir():
return []
ids = [int(o) for o in os.listdir(directory) if os.path.isdir(directory / o) and not os.path.islink(directory / o)]
return sorted(ids) # TODO: sort by session datetime?
def load_recent_sessions(env: Environment, count: int = None) -> List[Session]:
"""Get a list of recent sessions for the environment.
Parameters
----------
env : Environment
MLonMCU environment which should be used.
count : int
Maximum number of sessions to return. Collect all if None.
Returns
-------
list:
The resulting list of session objects.
"""
if count is not None:
raise NotImplementedError()
sessions = []
sessions_directory = env.paths["temp"].path / "sessions"
# TODO: in the future also strs (custom or hash) should be allowed
session_ids = get_ids(sessions_directory)
for sid in session_ids:
session_directory = sessions_directory / str(sid)
# session_file = sessions_directory / str(sid) / "session.txt"
# if not session_file.is_file():
# continue
runs_directory = session_directory / "runs"
run_ids = get_ids(runs_directory)
runs = []
for rid in run_ids:
run_directory = runs_directory / str(rid)
run_file = run_directory / "run.txt"
# run = Run.from_file(run_file) # TODO: actually implement run restore
run = Run() # TODO: fix
run.archived = True
run.dir = run_directory
runs.append(run)
session = Session(idx=sid, archived=True, dir=session_directory)
session.runs = runs
session.dir = session_directory
sessions.append(session)
return sessions
def resolve_environment_file(name: str = None, path: str = None) -> Path:
"""Utility to find the environment file by a optionally given name or path.
The lookup is performed in a predefined order:
- If specified: name/path
- Else: see lookup_environment()
Parameters
----------
name : str
Hint for the environment name provided by the user.
path : str
Hint for the environment path provided by the user.
Returns
-------
Path :
Path to the found environment.yml (if sucessful)
"""
if name and path:
raise RuntimeError("mlonmcu environments are specified either by name OR path")
if name:
env_file = get_environment_by_name(name)
elif path:
env_file = get_environment_by_path(path)
else:
env_file = lookup_environment()
if not env_file:
raise RuntimeError("Lookup for mlonmcu environment was not successful.")
return env_file
def setup_logging(environment):
"""Check logging settings for environment and initialize the logs directory.
Attributes
----------
environment : Environment
The MLonMCU Environment where paths, repos, features,... are configured.
"""
defaults = environment.defaults
level = defaults.log_level
to_file = defaults.log_to_file
rotate = defaults.log_rotate
if to_file:
assert "logs" in environment.paths, "To use a logfile, define a logging directory in your environment.yml"
directory = environment.paths["logs"].path
if not directory.is_dir():
directory.mkdir()
path = directory / "mlonmcu.log"
set_log_file(path, level=level, rotate=rotate)
class MlonMcuContext:
"""Contextmanager for mlonmcu environments.
Attributes
----------
environment : Environment
The MLonMCU Environment where paths, repos, features,... are configured.
lock : bool
Holds if the environment should be limited to only one user or not.
lockfile : FileLock
The lock for the environment directory (optional).
sessions : list
List of sessions for the current environment.
session_idx : list
A counter for determining the next session index.
cache : TaskCache
The cache where paths of installed dependencies can be looked up.
"""
def __init__(self, name: str = None, path: str = None, lock: bool = False):
env_file = resolve_environment_file(name=name, path=path)
assert env_file is not None, "Unable to find a MLonMCU environment"
self.environment = UserEnvironment.from_file(env_file) # TODO: move to __enter__
setup_logging(self.environment)
self.lock = lock
self.lockfile = filelock.FileLock(os.path.join(self.environment.home, ".lock"))
self.sessions = load_recent_sessions(self.environment)
self.session_idx = self.sessions[-1].idx if len(self.sessions) > 0 else -1
logger.debug(f"Restored {len(self.sessions)} recent sessions")
self.cache = TaskCache()
def create_session(self):
"""Create a new session in the current context."""
idx = self.session_idx + 1
logger.debug("Creating a new session with idx %s", idx)
temp_directory = self.environment.paths["temp"].path
sessions_directory = temp_directory / "sessions"
session_dir = sessions_directory / str(idx)
session = Session(idx=idx, dir=session_dir)
self.sessions.append(session)
self.session_idx = idx
# TODO: move this to a helper function
session_link = sessions_directory / "latest"
if os.path.islink(session_link):
os.unlink(session_link)
os.symlink(session_dir, session_link)
return session
def load_cache(self):
"""If available load the cache.ini file in the deps directory"""
if self.environment:
if self.environment.paths:
if "deps" in self.environment.paths:
deps_dir = self.environment.paths["deps"].path
if deps_dir.is_dir():
cache_file = deps_dir / "cache.ini"
if cache_file.is_file():
logger.info("Loading environment cache from file")
self.cache.read_from_file(cache_file)
logger.info("Successfully initialized cache")
return
logger.info("No cache found in deps directory")
def get_session(self, resume=False) -> Session:
"""Get an active session if available, else create a new one.
Returns
-------
Session:
An active session
"""
if resume:
assert len(self.sessions) > 0, "There is no recent session available"
session = self.sessions[-1]
assert False, "The latest session can not be resumed"
raise NotImplementedError
if self.session_idx < 0 or not self.sessions[-1].active:
self.create_session()
return self.sessions[-1]
def __enter__(self):
logger.debug("Enter MlonMcuContext")
if self.lockfile.is_locked:
raise RuntimeError(f"Current context is locked via: {self.lockfile.lock_file}")
if self.lock:
logger.debug("Locking context")
try:
self.lockfile.acquire(timeout=0)
except filelock.Timeout as err:
raise RuntimeError("Lock on current context could not be aquired.") from err
self.load_cache()
return self
def cleanup(self):
"""Clean up the context before leaving the context by closing all active sessions"""
logger.debug("Cleaning up active sessions")
for session in self.sessions:
if session.active:
session.close()
@property
def is_clean(self):
"""Return true if all sessions in the context are inactive"""
return not any(sess.active for sess in self.sessions)
# WARNING: this will remove the actual session directories!
def cleanup_sessions(self, keep=10, interactive=True):
"""Utility to cleanup old sessions from the disk."""
assert self.is_clean
all_sessions = self.sessions
# print("all_sessions", all_sessions)
to_keep = all_sessions[-keep:] if keep > 0 else []
to_remove = self.sessions[:-keep] if keep > 0 else self.sessions
count = len(to_remove)
if count > 0:
temp_dir = self.environment.lookup_path("temp").path
sessions_dir = temp_dir / "sessions"
print(f"The following {count} sessions will be removed from the environments temp directory ({temp_dir}):")
print(" ".join([str(session.idx) for session in to_remove]))
if ask_user("Are your sure?", default=not interactive, interactive=interactive):
for session in to_remove:
session_dir = sessions_dir / str(session.idx)
if not session_dir.is_dir():
# Skip / Dir does not exist
continue
shutil.rmtree(session_dir)
self.sessions = to_keep
self.session_idx = self.sessions[-1].idx if len(self.sessions) > 0 else -1
print("Done")
else:
print("Aborted")
else:
print("No sessions selected for removal")
# We currently do not support rewirting the indices to start from scratch again as this would lead to inconsitencies with the path in the report/cmake build dirtectory
def export(self, dest, session_ids=None, run_ids=None, interactive=True):
dest = Path(dest)
if (dest.is_file() and dest.exists()) or (dest.is_dir() and utils.is_populated(dest)):
if not ask_user("Destination is already populated! Overwrite?", default=True, interactive=interactive):
print("Aborted")
return
dest_, ext = os.path.splitext(dest)
if session_ids is None:
# Can not select all sessions, fall back to latest session
session_ids = [-1]
if run_ids is not None:
assert len(session_ids) == 1, "Can only choose runs of a single session"
def find_session(sid):
if len(self.sessions) == 0:
return None
if sid == -1:
assert len(self.sessions) > 0
return self.sessions[-1]
for session in self.sessions:
if session.idx == sid:
return session
return None
with tempfile.TemporaryDirectory() as tmpdirname:
tmpdir = Path(tmpdirname)
for sid in session_ids:
session = find_session(sid)
if session is None:
print(
f"Lookup for session id {sid} failed. Available:", " ".join([str(s.idx) for s in self.sessions])
)
sys.exit(1)
if len(session_ids) == 1:
base = tmpdir
else:
base = tmpdir / str(sid)
if run_ids is None:
src = session.dir / "runs"
shutil.copytree(
src, base, dirs_exist_ok=True, symlinks=True
) # Warning: dirs_exist_ok=True requires python 3.8+
else:
base = base / "runs"
for rid in run_ids:
if rid >= len(session.runs):
print(
f"Lookup for run id {rid} failed in session {sid}. Available:",
" ".join([str(i) for i in range(len(session.runs))]),
)
sys.exit(1)
run = session.runs[rid] # TODO: We currently do not check if the index actually exists
if len(run_ids) == 1 and len(session_ids) == 1:
run_base = tmpdir
else:
run_base = base / str(rid)
src = run.dir
shutil.copytree(
src, run_base, dirs_exist_ok=True
) # Warning: dirs_exist_ok=True requires python 3.8+
if ext in [".zip", ".tar"]:
print(f"Creating archive: {dest}")
shutil.make_archive(dest_, ext[1:], tmpdirname)
else:
print(f"Creating directory: {dest}")
if dest.is_dir():
shutil.rmtree(dest) # Cleanup old contents
# dest.mkdir(exist_ok=True)
shutil.move(tmpdirname, str(dest))
print("Done")
def __exit__(self, exception_type, exception_value, traceback):
logger.debug("Exit MlonMcuContext")
self.cleanup()
if self.lock:
logger.debug("Releasing lock on context")
self.lockfile.release()
return False
|
StarcoderdataPython
|
6502690
|
import os
import importlib
from abc import ABC, abstractmethod
PLUGINS = {}
class Plugin(ABC):
@abstractmethod
def __call__(self, input: str) -> str:
pass
class NameConflictError(Exception):
def __init__(self, message):
self.message = message
def register_plugin(name: str, description: str, arguments: str):
def wrapper(plugin_class):
if name in PLUGINS:
raise NameConflictError(
f"Plugin name conflict: '{name}'. Double check"
" that all plugins have unique names."
)
plugin = plugin_class()
PLUGINS[name] = {
"name": name,
"description": description,
"arguments": arguments,
"func": plugin,
}
return plugin
return wrapper
def get_plugins():
return PLUGINS
def import_modules(dirname):
direc = dirname
for f in os.listdir(direc):
path = os.path.join(direc, f)
if (
not f.startswith("_")
and not f.startswith(".")
and not f == __file__
and f.endswith(".py")
):
file_name = f[: f.find(".py")]
module = importlib.import_module(f"{dirname}.{file_name}")
|
StarcoderdataPython
|
9716363
|
<reponame>homm/pillow-lut-tools
from __future__ import division, unicode_literals, absolute_import
import warnings
from pillow_lut import operations, generators, ImageFilter, Image
from pillow_lut import (identity_table, transform_lut, resize_lut, amplify_lut,
sample_lut_linear, sample_lut_cubic)
from . import PillowTestCase, disable_numpy
class TestSampleLutLinear(PillowTestCase):
def test_identity_2(self):
identity = identity_table(2)
data = [-1.1, -0.3, 0, 0.1, 0.5, 1, 1.1]
for b in data:
for g in data:
for r in data:
point = sample_lut_linear(identity, (r, g, b))
for left, right in zip(point, (r, g, b)):
self.assertAlmostEqual(left, right)
def test_identity_17(self):
identity = identity_table(17)
data = [-1.1, -0.3, 0, 0.1, 0.5, 1, 1.1]
for b in data:
for g in data:
for r in data:
point = sample_lut_linear(identity, (r, g, b))
for left, right in zip(point, (r, g, b)):
self.assertAlmostEqual(left, right)
def test_identity_sizes(self):
identity = identity_table((5, 6, 7))
data = [-1.1, -0.3, 0, 0.1, 0.5, 1, 1.1]
for b in data:
for g in data:
for r in data:
point = sample_lut_linear(identity, (r, g, b))
for left, right in zip(point, (r, g, b)):
self.assertAlmostEqual(left, right)
def test_interpolation(self):
lut = ImageFilter.Color3DLUT.generate(3, lambda r, g, b:
(r, g*g, b*b + r))
for point, res in [
(( 0, 0, 0), ( 0, 0, 0)),
((.3, 0, 0), (.3, 0, .3)),
((.6, 0, 0), (.6, 0, .6)),
(( 1, 0, 0), ( 1, 0, 1)),
((0, 0, 0), (0, 0, 0)),
((0, .3, 0), (0,.15, 0)),
((0, .6, 0), (0, .4, 0)),
((0, 1, 0), (0, 1, 0)),
((0, 0, 0), (0, 0, 0)),
((0, 0, .3), (0, 0,.15)),
((0, 0, .6), (0, 0, .4)),
((0, 0, 1), (0, 0, 1)),
(( 0, 0, 0), ( 0, 0, 0)),
((.3, .3, .3), (.3,.15,.45)),
((.6, .6, .6), (.6, .4, 1)),
(( 1, 1, 1), ( 1, 1, 2)),
]:
for l, r in zip(sample_lut_linear(lut, point), res):
self.assertAlmostEqual(l, r)
class TestSampleLutCubic(PillowTestCase):
def test_identity_2(self):
identity = identity_table(2)
with self.assertRaisesRegexp(ValueError, "requires a table of size 4"):
sample_lut_cubic(identity, (0, 0, 0))
def test_identity_4(self):
identity = identity_table(4)
data = [-1.1, -0.3, 0, 0.1, 0.5, 1, 1.1]
for b in data:
for g in data:
for r in data:
point = sample_lut_cubic(identity, (r, g, b))
for left, right in zip(point, (r, g, b)):
self.assertAlmostEqual(left, right)
def test_identity_17(self):
identity = identity_table(17)
data = [-1.1, -0.3, 0, 0.1, 0.5, 1, 1.1]
for b in data:
for g in data:
for r in data:
point = sample_lut_cubic(identity, (r, g, b))
for left, right in zip(point, (r, g, b)):
self.assertAlmostEqual(left, right)
def test_identity_sizes(self):
identity = identity_table((5, 6, 7))
data = [-1.1, -0.3, 0, 0.1, 0.5, 1, 1.1]
for b in data:
for g in data:
for r in data:
point = sample_lut_cubic(identity, (r, g, b))
for left, right in zip(point, (r, g, b)):
self.assertAlmostEqual(left, right)
def test_interpolation(self):
lut = ImageFilter.Color3DLUT.generate(5, lambda r, g, b:
(r, g*g, b*b + r))
for point, res in [
(( 0, 0, 0), ( 0, 0, 0)),
((.3, 0, 0), (.3, 0, .3)),
((.6, 0, 0), (.6, 0, .6)),
(( 1, 0, 0), ( 1, 0, 1)),
((0, 0, 0), (0, 0, 0)),
((0, .3, 0), (0,.09, 0)),
((0, .6, 0), (0,.36, 0)),
((0, 1, 0), (0, 1, 0)),
((0, 0, 0), (0, 0, 0)),
((0, 0, .3), (0, 0,.09)),
((0, 0, .6), (0, 0,.36)),
((0, 0, 1), (0, 0, 1)),
(( 0, 0, 0), ( 0, 0, 0)),
((.3, .3, .3), (.3,.09,.39)),
((.6, .6, .6), (.6,.36,.96)),
(( 1, 1, 1), ( 1, 1, 2)),
]:
for l, r in zip(sample_lut_cubic(lut, point), res):
self.assertAlmostEqual(l, r)
class TestResizeLut(PillowTestCase):
identity7 = identity_table(7)
identity9 = identity_table(9)
lut7_in = ImageFilter.Color3DLUT.generate(7,
lambda r, g, b: (r**1.2, g**1.2, b**1.2))
lut7_out = ImageFilter.Color3DLUT.generate(7,
lambda r, g, b: (r**(1/1.2), g**(1/1.2), b**(1/1.2)))
lut9_in = ImageFilter.Color3DLUT.generate(9,
lambda r, g, b: (r**1.2, g**1.2, b**1.2))
lut5_4c = ImageFilter.Color3DLUT.generate(5, channels=4,
callback=lambda r, g, b: (r*r, g*g, b*b, 1.0))
def test_wrong_args(self):
with self.assertRaisesRegexp(ValueError, "interpolations"):
result = resize_lut(identity_table(4), 5,
interp=Image.NEAREST)
def test_correct_args(self):
result = resize_lut(identity_table((3, 4, 5), target_mode='RGB'),
(6, 7, 8))
self.assertEqual(tuple(result.size), (6, 7, 8))
self.assertEqual(result.mode, 'RGB')
self.assertEqual(result.channels, 3)
result = resize_lut(self.lut5_4c, 3)
self.assertEqual(tuple(result.size), (3, 3, 3))
self.assertEqual(result.mode, None)
self.assertEqual(result.channels, 4)
with disable_numpy(operations):
result = resize_lut(self.lut5_4c, 3)
self.assertEqual(tuple(result.size), (3, 3, 3))
self.assertEqual(result.mode, None)
self.assertEqual(result.channels, 4)
def test_correctness_linear(self):
res_numpy = resize_lut(self.lut9_in, 7)
self.assertAlmostEqualLuts(res_numpy, self.lut7_in, 6)
with disable_numpy(operations):
res_native = resize_lut(self.lut9_in, 7)
self.assertAlmostEqualLuts(res_native, res_numpy)
def test_correctness_cubic(self):
result = resize_lut(self.lut9_in, 7, interp=Image.CUBIC)
self.assertAlmostEqualLuts(result, self.lut7_in, 7)
def test_fallback_to_linear(self):
lut3 = ImageFilter.Color3DLUT.generate((5, 5, 3),
lambda r, g, b: (r**1.5, g**1.5, b**1.5))
lut4 = ImageFilter.Color3DLUT.generate((5, 5, 4),
lambda r, g, b: (r**1.5, g**1.5, b**1.5))
with warnings.catch_warnings(record=True) as w:
cubic = resize_lut(lut4, (5, 5, 3), interp=Image.CUBIC)
self.assertEqual(len(w), 0)
linear = resize_lut(lut4, (5, 5, 3))
self.assertNotEqualLutTables(cubic, linear)
with warnings.catch_warnings(record=True) as w:
cubic = resize_lut(lut3, (5, 5, 4), interp=Image.CUBIC)
self.assertEqual(len(w), 1)
self.assertIn('Cubic interpolation', "{}".format(w[0].message))
linear = resize_lut(lut3, (5, 5, 4))
self.assertEqualLuts(cubic, linear)
def test_application(self):
im = Image.new('RGB', (10, 10))
lut_numpy = resize_lut(identity_table(5), 4)
self.assertEqual(lut_numpy.table.__class__.__name__, 'ndarray')
im.filter(lut_numpy)
with disable_numpy(operations):
lut_native = resize_lut(identity_table(5), 4)
self.assertEqual(lut_native.table.__class__.__name__, 'list')
im.filter(lut_native)
with disable_numpy(generators):
args = identity_table(5)
self.assertEqual(args.table.__class__.__name__, 'list')
lut_numpy = resize_lut(args, 4)
self.assertEqual(lut_numpy.table.__class__.__name__, 'ndarray')
im.filter(lut_numpy)
args = identity_table(5)
self.assertEqual(args.table.__class__.__name__, 'ndarray')
with disable_numpy(operations):
lut_native = resize_lut(args, 4)
self.assertEqual(lut_native.table.__class__.__name__, 'list')
im.filter(lut_native)
class TestTransformLut(PillowTestCase):
identity7 = identity_table(7)
identity9 = identity_table(9)
lut7_in = ImageFilter.Color3DLUT.generate(7,
lambda r, g, b: (r**1.2, g**1.2, b**1.2))
lut7_out = ImageFilter.Color3DLUT.generate(7,
lambda r, g, b: (r**(1/1.2), g**(1/1.2), b**(1/1.2)))
lut9_in = ImageFilter.Color3DLUT.generate(9,
lambda r, g, b: (r**1.2, g**1.2, b**1.2))
lut5_4c = ImageFilter.Color3DLUT.generate(5, channels=4,
callback=lambda r, g, b: (r*r, g*g, b*b, 1.0))
def test_wrong_args(self):
with self.assertRaisesRegexp(ValueError, "only 3-channel cubes"):
result = transform_lut(self.lut5_4c, identity_table(3))
with self.assertRaisesRegexp(ValueError, "only 3-channel cubes"):
result = transform_lut(self.lut5_4c, identity_table(3),
target_size=5)
with self.assertRaisesRegexp(ValueError, "interpolations"):
result = transform_lut(identity_table(4), identity_table(4),
interp=Image.NEAREST)
def test_correct_args(self):
result = transform_lut(identity_table((3, 4, 5), target_mode='RGB'),
identity_table((6, 7, 8), target_mode='HSV'))
self.assertEqual(tuple(result.size), (3, 4, 5))
self.assertEqual(result.mode, 'HSV')
self.assertEqual(result.channels, 3)
result = transform_lut(identity_table(3), self.lut5_4c)
self.assertEqual(tuple(result.size), (3, 3, 3))
self.assertEqual(result.mode, None)
self.assertEqual(result.channels, 4)
with disable_numpy(operations):
result = transform_lut(identity_table(3), self.lut5_4c)
self.assertEqual(tuple(result.size), (3, 3, 3))
self.assertEqual(result.mode, None)
self.assertEqual(result.channels, 4)
result = transform_lut(identity_table(4, target_mode='RGB'),
identity_table(5), target_size=(6, 7, 8))
self.assertEqual(tuple(result.size), (6, 7, 8))
self.assertEqual(result.mode, 'RGB')
self.assertEqual(result.channels, 3)
with disable_numpy(operations):
result = transform_lut(identity_table(4, target_mode='RGB'),
identity_table(5), target_size=(6, 7, 8))
self.assertEqual(tuple(result.size), (6, 7, 8))
self.assertEqual(result.mode, 'RGB')
self.assertEqual(result.channels, 3)
def test_identity_linear(self):
res_numpy = transform_lut(self.lut7_in, self.identity9)
self.assertAlmostEqualLuts(res_numpy, self.lut7_in)
with disable_numpy(operations):
res_native = transform_lut(self.lut7_in, self.identity9)
self.assertAlmostEqualLuts(res_native, res_numpy)
res_numpy = transform_lut(self.identity9, self.lut7_in)
self.assertAlmostEqualLuts(res_numpy, self.lut9_in, 4)
with disable_numpy(operations):
res_native = transform_lut(self.identity9, self.lut7_in)
self.assertAlmostEqualLuts(res_native, res_numpy)
def test_identity_cubic(self):
result = transform_lut(self.lut7_in, self.identity9, interp=Image.CUBIC)
self.assertAlmostEqualLuts(result, self.lut7_in)
result = transform_lut(self.identity7, self.lut9_in, interp=Image.CUBIC)
self.assertAlmostEqualLuts(result, self.lut7_in, 7)
def test_correctness_linear(self):
res_numpy = transform_lut(self.lut7_in, self.lut7_out)
self.assertAlmostEqualLuts(res_numpy, self.identity7, 4)
with disable_numpy(operations):
res_native = transform_lut(self.lut7_in, self.lut7_out)
self.assertAlmostEqualLuts(res_native, res_numpy)
res_numpy = transform_lut(self.lut7_out, self.lut7_in)
self.assertAlmostEqualLuts(res_numpy, self.identity7, 6)
with disable_numpy(operations):
res_native = transform_lut(self.lut7_out, self.lut7_in)
self.assertAlmostEqualLuts(res_native, res_numpy)
def test_correctness_cubic(self):
result = transform_lut(self.lut7_in, self.lut7_out, interp=Image.CUBIC)
self.assertAlmostEqualLuts(result, self.identity7, 4)
result = transform_lut(self.lut7_out, self.lut7_in, interp=Image.CUBIC)
self.assertAlmostEqualLuts(result, self.identity7, 7)
def test_target_size_correctness_linear(self):
res_numpy = transform_lut(self.lut7_out, self.lut7_in, target_size=9)
self.assertAlmostEqualLuts(res_numpy, self.identity9, 4)
with disable_numpy(operations):
res_native = transform_lut(self.lut7_out, self.lut7_in,
target_size=9)
self.assertAlmostEqualLuts(res_native, res_numpy)
def test_target_size_correctness_cubic(self):
result = transform_lut(self.lut7_out, self.lut7_in,
target_size=9, interp=Image.CUBIC)
self.assertAlmostEqualLuts(result, self.identity9, 4)
def test_fallback_to_linear(self):
lut3 = ImageFilter.Color3DLUT.generate((5, 5, 3),
lambda r, g, b: (r**1.5, g**1.5, b**1.5))
lut4 = ImageFilter.Color3DLUT.generate((5, 5, 4),
lambda r, g, b: (r**1.5, g**1.5, b**1.5))
with warnings.catch_warnings(record=True) as w:
cubic = transform_lut(identity_table((5, 5, 3)), lut4,
interp=Image.CUBIC)
self.assertEqual(len(w), 0)
linear = transform_lut(identity_table((5, 5, 3)), lut4)
self.assertNotEqualLutTables(cubic, linear)
with warnings.catch_warnings(record=True) as w:
cubic = transform_lut(identity_table((5, 5, 4)), lut3,
interp=Image.CUBIC)
self.assertEqual(len(w), 1)
self.assertIn('Cubic interpolation', "{}".format(w[0].message))
linear = transform_lut(identity_table((5, 5, 4)), lut3)
self.assertEqualLuts(cubic, linear)
with warnings.catch_warnings(record=True) as w:
cubic = transform_lut(identity_table((5, 5, 3)), lut4,
target_size=(5, 5, 4), interp=Image.CUBIC)
self.assertEqual(len(w), 1)
self.assertIn('Cubic interpolation', "{}".format(w[0].message))
linear = transform_lut(identity_table((5, 5, 3)), lut4,
target_size=(5, 5, 4))
self.assertEqualLuts(cubic, linear)
def test_application(self):
im = Image.new('RGB', (10, 10))
lut_numpy = transform_lut(identity_table(5), identity_table(5))
self.assertEqual(lut_numpy.table.__class__.__name__, 'ndarray')
im.filter(lut_numpy)
with disable_numpy(operations):
lut_native = transform_lut(identity_table(5), identity_table(5))
self.assertEqual(lut_native.table.__class__.__name__, 'list')
im.filter(lut_native)
with disable_numpy(generators):
args = identity_table(5), identity_table(5)
self.assertEqual(args[0].table.__class__.__name__, 'list')
lut_numpy = transform_lut(*args)
self.assertEqual(lut_numpy.table.__class__.__name__, 'ndarray')
im.filter(lut_numpy)
args = identity_table(5), identity_table(5)
self.assertEqual(args[0].table.__class__.__name__, 'ndarray')
with disable_numpy(operations):
lut_native = transform_lut(*args)
self.assertEqual(lut_native.table.__class__.__name__, 'list')
im.filter(lut_native)
class TestAmplifyLut(PillowTestCase):
lut5_4c = ImageFilter.Color3DLUT.generate(5, channels=4,
callback=lambda r, g, b: (r*r, g*g, b*b, 1.0))
def test_correct_args(self):
result = amplify_lut(identity_table((3, 4, 5)), -1)
self.assertEqual(tuple(result.size), (3, 4, 5))
self.assertEqual(result.channels, 3)
result = amplify_lut(self.lut5_4c, 5)
self.assertEqual(tuple(result.size), (5, 5, 5))
self.assertEqual(result.channels, 4)
def test_correctness(self):
lut = ImageFilter.Color3DLUT.generate(5,
callback=lambda r, g, b: (r+0.1, g*1.1, b-0.1))
lut_05x = ImageFilter.Color3DLUT.generate(5,
callback=lambda r, g, b: (r+0.05, g*1.05, b-0.05))
lut_2x = ImageFilter.Color3DLUT.generate(5,
callback=lambda r, g, b: (r+0.2, g*1.2, b-0.2))
identity = identity_table(5)
res_numpy = amplify_lut(lut, 1.0)
with disable_numpy(operations):
res_native = amplify_lut(lut, 1.0)
self.assertAlmostEqualLuts(res_numpy, lut)
self.assertAlmostEqualLuts(res_native, res_numpy)
res_numpy = amplify_lut(lut, 0)
with disable_numpy(operations):
res_native = amplify_lut(lut, 0)
self.assertEqualLuts(res_numpy, identity)
self.assertEqualLuts(res_native, res_numpy)
res_numpy = amplify_lut(lut, 0.5)
with disable_numpy(operations):
res_native = amplify_lut(lut, 0.5)
self.assertAlmostEqualLuts(res_numpy, lut_05x)
self.assertAlmostEqualLuts(res_native, res_numpy)
res_numpy = amplify_lut(lut, 2)
with disable_numpy(operations):
res_native = amplify_lut(lut, 2)
self.assertAlmostEqualLuts(res_numpy, lut_2x)
self.assertAlmostEqualLuts(res_native, res_numpy)
def test_correctness_4c(self):
lut = ImageFilter.Color3DLUT.generate(5, channels=4,
callback=lambda r, g, b: (r+0.1, g*1.1, b-0.1, r+g+b))
lut_2x = ImageFilter.Color3DLUT.generate(5, channels=4,
callback=lambda r, g, b: (r+0.2, g*1.2, b-0.2, r+g+b))
res_numpy = amplify_lut(lut, 2)
with disable_numpy(operations):
res_native = amplify_lut(lut, 2)
self.assertAlmostEqualLuts(res_numpy, lut_2x)
self.assertAlmostEqualLuts(res_native, res_numpy)
def test_application(self):
im = Image.new('RGB', (10, 10))
lut_numpy = amplify_lut(identity_table(5), 2.0)
self.assertEqual(lut_numpy.table.__class__.__name__, 'ndarray')
im.filter(lut_numpy)
with disable_numpy(operations):
lut_native = amplify_lut(identity_table(5), 2.0)
self.assertEqual(lut_native.table.__class__.__name__, 'list')
im.filter(lut_native)
with disable_numpy(generators):
args = identity_table(5)
self.assertEqual(args.table.__class__.__name__, 'list')
lut_numpy = amplify_lut(args, 2.0)
self.assertEqual(lut_numpy.table.__class__.__name__, 'ndarray')
im.filter(lut_numpy)
args = identity_table(5)
self.assertEqual(args.table.__class__.__name__, 'ndarray')
with disable_numpy(operations):
lut_native = amplify_lut(args, 2.0)
self.assertEqual(lut_native.table.__class__.__name__, 'list')
im.filter(lut_native)
|
StarcoderdataPython
|
3206549
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 05 11:55:37 2018
@author: hugonnet
SHELL LIBRARY:
Library of Python functions for file, directory and path manipulation
LIST:
"""
from __future__ import print_function
import os, sys
import shutil
import tarfile, zipfile
from contextlib import contextmanager
import errno
import traceback
# from types import ModuleType, FunctionType
# from gc import get_referents
# from pympler import asizeof
def create_tmp_dir_for_outfile(file_out):
tmp_dir = os.path.join(os.path.dirname(file_out), 'tmp_'+os.path.splitext(os.path.basename(file_out))[0]) + os.path.sep
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
return tmp_dir
def remove_tmp_dir_for_outfile(file_out):
tmp_dir = os.path.join(os.path.dirname(file_out), 'tmp_'+os.path.splitext(os.path.basename(file_out))[0]) + os.path.sep
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir,ignore_errors=True)
def make_pdirs(dir_path):
outdir = os.path.abspath(dir_path)
try:
os.makedirs(outdir)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(outdir):
pass
else:
raise
def add_suf_before_ext(basename,suf):
filename=os.path.splitext(basename)[0]
ext=os.path.splitext(basename)[1]
sufname=filename+suf+ext
return sufname
def extract_file_from_tar_gz(tar_in,filename_in,file_out):
#ref: https://stackoverflow.com/questions/37752400/how-do-i-extract-only-the-file-of-a-tar-gz-member
with tarfile.open(tar_in, "r") as tar:
counter = 0
for member in tar:
if member.isfile():
filename = os.path.basename(member.name)
if filename != filename_in: # do your check
continue
with open(file_out, "wb") as output:
print('Extracting '+filename_in + ' from archive '+tar_in+' to '+file_out+'...')
shutil.copyfileobj(tar.fileobj, output, member.size)
break # got our file
counter += 1
if counter % 1000 == 0:
tar.members = [] # free ram... yes we have to do this manually
def extract_file_from_zip(zip_in,filename_in,file_out):
#ref: https://stackoverflow.com/questions/4917284/extract-files-from-zip-without-keeping-the-structure-using-python-zipfile
with zipfile.ZipFile(zip_in) as zip_file:
for member in zip_file.namelist():
# if member.isfile():
filename = os.path.basename(member)
if filename != filename_in:
# skip directories
continue
# copy file (taken from zipfile's extract)
source = zip_file.open(member)
target = open(file_out, "wb")
with source, target:
shutil.copyfileobj(source, target)
#redirecting stdout and stderr, source: https://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python/22434262#22434262
def fileno(file_or_fd):
fd = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()
if not isinstance(fd, int):
raise ValueError("Expected a file (`.fileno()`) or a file descriptor")
return fd
@contextmanager
def stdout_redirected(to=os.devnull, stdout=None):
if stdout is None:
stdout = sys.stdout
stdout_fd = fileno(stdout)
# copy stdout_fd before it is overwritten
#NOTE: `copied` is inheritable on Windows when duplicating a standard stream
with os.fdopen(os.dup(stdout_fd), 'wb') as copied:
stdout.flush() # flush library buffers that dup2 knows nothing about
try:
os.dup2(fileno(to), stdout_fd) # $ exec >&to
except ValueError: # filename
with open(to, 'wb') as to_file:
os.dup2(to_file.fileno(), stdout_fd) # $ exec > to
try:
yield stdout # allow code to be run with the redirected stdout
finally:
# restore stdout to its previous value
#NOTE: dup2 makes stdout_fd inheritable unconditionally
stdout.flush()
os.dup2(copied.fileno(), stdout_fd) # $ exec >&copied
def merged_stderr_stdout(): # $ exec 2>&1
return stdout_redirected(to=sys.stdout, stdout=sys.stderr)
# def getsize(obj):
# BLACKLIST = type, ModuleType, FunctionType
# #source: https://stackoverflow.com/questions/449560/how-do-i-determine-the-size-of-an-object-in-python
# # Custom objects know their class.
# # Function objects seem to know way too much, including modules.
# # Exclude modules as well.
# """sum size of object & members."""
# if isinstance(obj, BLACKLIST):
# raise TypeError('getsize() does not take argument of type: '+ str(type(obj)))
# seen_ids = set()
# size = 0
# objects = [obj]
# while objects:
# need_referents = []
# for obj in objects:
# if not isinstance(obj, BLACKLIST) and id(obj) not in seen_ids:
# seen_ids.add(id(obj))
# size += sys.getsizeof(obj)
# need_referents.append(obj)
# objects = get_referents(*need_referents)
# return size/1000000.
# def getsizeobj(obj):
# return asizeof.asizeof(obj)
# def some_function_with_cached_sys_stdout(stdout=sys.stderr):
# print('cached stdout', file=stdout)
# def some_function_with_stdout_and_error():
# print('stdout in function')
# x = 1/0
#
#
# with open('/home/atom/ongoing/test.log','w') as f:
#
# with stdout_redirected(to=f), merged_stderr_stdout():
# print('stdout goes to devnull')
# print('stderr also goes to stdout that goes to devnull', file=sys.stderr)
# try:
# some_function_with_stdout_and_error()
# except Exception:
# print(traceback.format_exc())
#
# print('stdout is back')
# some_function_with_stdout_and_error()
# print('stderr is back', file=sys.stderr)
|
StarcoderdataPython
|
5184961
|
"""Example: using ymlref, resolving references to remote document."""
from io import StringIO
import ymlref
DOCUMENT = """
db_name: people
content:
$ref: https://raw.githubusercontent.com/dexter2206/ymlref/feature/external-references/examples/people.yml
"""
def main():
"""Entrypoint for this example."""
doc = ymlref.loads(DOCUMENT)
print('People in DB: ')
for person in doc['content']:
print(person['first_name'] + ' ' + person['last_name'])
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
11216637
|
<filename>python/other_sources/Go_Game.py
#!/usr/bin/env python3
# https://leetcode.com/discuss/interview-question/391195/
import typing
import unittest
class DFSError(Exception):
pass
def go_name(board: typing.List[typing.List], x: int, y: int) -> int:
m = len(board)
if not m:
return 0
n = len(board[0])
if not n:
return 0
def dfs(i: int, j: int, visit) -> typing.List:
if board[i][j] == 'b':
return []
if board[i][j] == 'e':
raise DFSError()
if visit[i][j]:
return []
visit[i][j] = True
stones = [(i, j)]
for ni, nj in ((i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)):
if ni < 0 or ni >= m or nj < 0 or nj >= n:
continue
stones.extend(dfs(ni, nj, visit))
return stones
def play():
visit = [[False for _ in range(n)] for _ in range(m)]
stones = []
for i in range(m):
for j in range(n):
try:
stones += dfs(i, j, visit)
except DFSError:
continue
for i, j in stones:
board[i][j] = 'b'
return len(stones)
play()
board[x][y] = 'b'
return play()
e, b, w = 'e', 'b', 'w'
class Test(unittest.TestCase):
def test_case0(self):
board = [[e, e, e, e, b, b, b],
[e, e, e, e, b, w, b],
[e, e, e, e, b, e, b],
[e, e, e, e, e, e, e]]
row, col = 2, 5
self.assertEqual(go_name(board, row, col), 1)
def test_case1(self):
board = [[e, e, e, e, b, b, b],
[e, e, e, b, w, w, b],
[e, e, e, e, b, e, b],
[e, e, e, e, e, e, e]]
row, col = 2, 5
self.assertEqual(go_name(board, row, col), 2)
def test_case2(self):
board = [[e, e, b, b, b, b, b],
[e, e, b, w, e, w, b],
[e, e, b, b, b, e, b],
[e, e, e, e, e, e, e]]
row, col = 2, 5
self.assertEqual(go_name(board, row, col), 0)
def test_case3(self):
board = [[b, w, b, b, b, b, b],
[e, b, b, w, e, w, b],
[e, e, b, b, b, b, b],
[e, e, e, e, e, e, e]]
row, col = 1, 4
self.assertEqual(go_name(board, row, col), 2)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
6620143
|
<gh_stars>1000+
#!/usr/bin/python
# -*- encoding: utf-8 -*-
from __future__ import with_statement
import os
import os.path
class ChangeDirectory(object):
"""
ChangeDirectory is a context manager that allowing
you to temporary change the working directory.
>>> import tempfile
>>> td = os.path.realpath(tempfile.mkdtemp())
>>> currentdirectory = os.getcwd()
>>> with ChangeDirectory(td) as cd:
... assert cd.current == td
... assert os.getcwd() == td
... assert cd.previous == currentdirectory
... assert os.path.normpath(os.path.join(cd.current, cd.relative)) == cd.previous
...
>>> assert os.getcwd() == currentdirectory
>>> with ChangeDirectory(td) as cd:
... os.mkdir('foo')
... with ChangeDirectory('foo') as cd2:
... assert cd2.previous == cd.current
... assert cd2.relative == '..'
... assert os.getcwd() == os.path.join(td, 'foo')
... assert os.getcwd() == td
... assert cd.current == td
... os.rmdir('foo')
...
>>> os.rmdir(td)
>>> with ChangeDirectory('.') as cd:
... assert cd.current == currentdirectory
... assert cd.current == cd.previous
... assert cd.relative == '.'
"""
def __init__(self, directory):
self._dir = directory
self._cwd = os.getcwd()
self._pwd = self._cwd
@property
def current(self):
return self._cwd
@property
def previous(self):
return self._pwd
@property
def relative(self):
c = self._cwd.split(os.path.sep)
p = self._pwd.split(os.path.sep)
l = min(len(c), len(p))
i = 0
while i < l and c[i] == p[i]:
i += 1
return os.path.normpath(os.path.join(*(['.'] + (['..'] * (len(c) - i)) + p[i:])))
def __enter__(self):
self._pwd = self._cwd
os.chdir(self._dir)
self._cwd = os.getcwd()
return self
def __exit__(self, *args):
os.chdir(self._pwd)
self._cwd = self._pwd
if __name__ == '__main__':
import doctest
doctest.testmod()
|
StarcoderdataPython
|
5023775
|
import argparse
arg_lists = []
parser = argparse.ArgumentParser(description='RAM')
def str2bool(v):
return v.lower() in ('true', '1')
def add_argument_group(name):
arg = parser.add_argument_group(name)
arg_lists.append(arg)
return arg
# glimpse network params
glimpse_arg = add_argument_group('Glimpse Network Params')
glimpse_arg.add_argument('--patch_size', type=int, default=8,
help='size of extracted patch at highest res')
glimpse_arg.add_argument('--glimpse_scale', type=int, default=2,
help='scale of successive patches')
glimpse_arg.add_argument('--num_patches', type=int, default=1,
help='# of downscaled patches per glimpse')
glimpse_arg.add_argument('--loc_hidden', type=int, default=128,
help='hidden size of loc fc')
glimpse_arg.add_argument('--glimpse_hidden', type=int, default=128,
help='hidden size of glimpse fc')
# core network params
core_arg = add_argument_group('Core Network Params')
core_arg.add_argument('--num_glimpses', type=int, default=6,
help='# of glimpses, i.e. BPTT iterations')
core_arg.add_argument('--hidden_size', type=int, default=256,
help='hidden size of rnn')
# reinforce params
reinforce_arg = add_argument_group('Reinforce Params')
reinforce_arg.add_argument('--std', type=float, default=0.17,
help='gaussian policy standard deviation')
reinforce_arg.add_argument('--M', type=float, default=10,
help='Monte Carlo sampling for valid and test sets')
# data params
data_arg = add_argument_group('Data Params')
data_arg.add_argument('--valid_size', type=float, default=0.1,
help='Proportion of training set used for validation')
data_arg.add_argument('--batch_size', type=int, default=32,
help='# of images in each batch of data')
data_arg.add_argument('--num_workers', type=int, default=4,
help='# of subprocesses to use for data loading')
data_arg.add_argument('--shuffle', type=str2bool, default=True,
help='Whether to shuffle the train and valid indices')
data_arg.add_argument('--show_sample', type=str2bool, default=False,
help='Whether to visualize a sample grid of the data')
# training params
train_arg = add_argument_group('Training Params')
train_arg.add_argument('--is_train', type=str2bool, default=True,
help='Whether to train or test the model')
train_arg.add_argument('--momentum', type=float, default=0.5,
help='Nesterov momentum value')
train_arg.add_argument('--epochs', type=int, default=200,
help='# of epochs to train for')
train_arg.add_argument('--init_lr', type=float, default=3e-4,
help='Initial learning rate value')
train_arg.add_argument('--lr_patience', type=int, default=10,
help='Number of epochs to wait before reducing lr')
train_arg.add_argument('--train_patience', type=int, default=50,
help='Number of epochs to wait before stopping train')
# other params
misc_arg = add_argument_group('Misc.')
misc_arg.add_argument('--use_gpu', type=str2bool, default=False,
help="Whether to run on the GPU")
misc_arg.add_argument('--best', type=str2bool, default=True,
help='Load best model or most recent for testing')
misc_arg.add_argument('--random_seed', type=int, default=1,
help='Seed to ensure reproducibility')
misc_arg.add_argument('--data_dir', type=str, default='./data',
help='Directory in which data is stored')
misc_arg.add_argument('--ckpt_dir', type=str, default='./ckpt',
help='Directory in which to save model checkpoints')
misc_arg.add_argument('--logs_dir', type=str, default='./logs/',
help='Directory in which Tensorboard logs wil be stored')
misc_arg.add_argument('--use_tensorboard', type=str2bool, default=False,
help='Whether to use tensorboard for visualization')
misc_arg.add_argument('--resume', type=str2bool, default=False,
help='Whether to resume training from checkpoint')
misc_arg.add_argument('--print_freq', type=int, default=10,
help='How frequently to print training details')
misc_arg.add_argument('--plot_freq', type=int, default=1,
help='How frequently to plot glimpses')
def get_config():
config, unparsed = parser.parse_known_args()
return config, unparsed
|
StarcoderdataPython
|
4991051
|
<reponame>FlanFlanagan/raven<gh_stars>100-1000
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on October 28, 2015
@author: mandd
"""
import os
import numpy as np
from scipy import interpolate
from scipy import integrate
import copy
from .PostProcessorReadyInterface import PostProcessorReadyInterface
from utils import InputData, InputTypes
class HistorySetSampling(PostProcessorReadyInterface):
"""
This Post-Processor performs the conversion from HistorySet to HistorySet
The conversion is made so that each history H is re-sampled accordingly to a specific sampling strategy.
It can be used to reduce the amount of space required by the HistorySet.
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
inputSpecification = super().getInputSpecification()
HSSamplingType = InputTypes.makeEnumType("HSSampling", "HSSamplingType", ['uniform','firstDerivative','secondDerivative','filteredFirstDerivative','filteredSecondDerivative'])
inputSpecification.addSub(InputData.parameterInputFactory("samplingType", contentType=HSSamplingType))
inputSpecification.addSub(InputData.parameterInputFactory("numberOfSamples", contentType=InputTypes.IntegerType))
inputSpecification.addSub(InputData.parameterInputFactory("tolerance", contentType=InputTypes.FloatType))
inputSpecification.addSub(InputData.parameterInputFactory("pivotParameter", contentType=InputTypes.StringType))
HSInterpolationType = InputTypes.makeEnumType("HSInterpolation", "HSInterpolationType", ['linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'intervalAverage'])
inputSpecification.addSub(InputData.parameterInputFactory("interpolation", contentType=HSInterpolationType))
return inputSpecification
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
self.pivotParameter = None #pivotParameter identify the ID of the temporal variabl
self.setInputDataType('dict')
self.keepInputMeta(True)
self.outputMultipleRealizations = True # True indicate multiple realizations are returned
self.validDataType = ['HistorySet'] # The list of accepted types of DataObject
self.samplingType = None
self.numberOfSamples = None
self.tolerance = None
self.interpolation = None
def initialize(self, runInfo, inputs, initDict=None):
"""
Method to initialize the DataClassifier post-processor.
@ In, runInfo, dict, dictionary of run info (e.g. working dir, etc)
@ In, inputs, list, list of inputs
@ In, initDict, dict, optional, dictionary with initialization options
@ Out, None
"""
super().initialize(runInfo, inputs, initDict)
if len(inputs)>1:
self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only one dataObject')
if inputs[0].type != 'HistorySet':
self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only HistorySet dataObject, but got "{}"'.format(inputs[0].type))
def _handleInput(self, paramInput):
"""
Function to handle the parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
for child in paramInput.subparts:
if child.getName() == 'samplingType':
self.samplingType = child.value
elif child.getName() == 'numberOfSamples':
self.numberOfSamples = child.value
elif child.getName() == 'tolerance':
self.tolerance = child.value
elif child.getName() == 'pivotParameter':
self.pivotParameter = child.value
elif child.getName() == 'interpolation':
self.interpolation = child.value
else:
self.raiseAnError(IOError, 'HistorySetSampling Interfaced Post-Processor ' + str(self.name) + ' : XML node ' + str(child) + ' is not recognized')
if self.pivotParameter is None:
self.raiseAnError(IOError, 'HistorySetSampling Interfaced Post-Processor ' + str(self.name) + ' : time ID is not specified')
if self.samplingType == 'uniform' or self.samplingType == 'firstDerivative' or self.samplingType == 'secondDerivative':
if self.numberOfSamples is None or self.numberOfSamples < 0:
self.raiseAnError(IOError, 'HistorySetSampling Interfaced Post-Processor ' + str(self.name) + ' : number of samples is not specified or less than 0')
if self.samplingType == 'filteredFirstDerivative' or self.samplingType == 'filteredSecondDerivative':
if self.tolerance is None or self.tolerance < 0.0:
self.raiseAnError(IOError, 'HistorySetSampling Interfaced Post-Processor ' + str(self.name) + ' : tolerance is not specified or less than 0')
def run(self,inputIn):
"""
Method to post-process the dataObjects
@ In, inputIn, dict, dictionaries which contains the data inside the input DataObjects
inputIn = {'Data':listData, 'Files':listOfFiles},
listData has the following format: (listOfInputVars, listOfOutVars, DataDict) with
DataDict is a dictionary that has the format
dataDict['dims'] = dict {varName:independentDimensions}
dataDict['metadata'] = dict {metaVarName:metaVarValue}
dataDict['type'] = str TypeOfDataObject
dataDict['inpVars'] = list of input variables
dataDict['outVars'] = list of output variables
dataDict['numberRealization'] = int SizeOfDataObject
dataDict['name'] = str DataObjectName
dataDict['metaKeys'] = list of meta variables
dataDict['data'] = dict {varName: varValue(1-D or 2-D numpy array)}
@ Out, outputDic, dict, dictionary of resampled histories
"""
_, _, inputDic = inputIn['Data'][0]
outputDic={'data':{}}
# load up the input data into the output
for var in inputDic['inpVars']:
outputDic['data'][var] = copy.deepcopy(inputDic['data'][var])
# loop over realizations and find the desired sample points
for hist in range(inputDic['numberRealizations']):
# set up the realization
rlz={}
for var in inputDic['outVars']:
rlz[var] = inputDic['data'][var][hist]
rlz[self.pivotParameter]=inputDic['data'][self.pivotParameter][hist]
# do the sampling based on what the user requested
if self.samplingType in ['uniform','firstDerivative','secondDerivative']:
outData = self.varsTimeInterp(rlz)
elif self.samplingType in ['filteredFirstDerivative','filteredSecondDerivative']:
outData = timeSeriesFilter(self.pivotParameter,rlz,self.samplingType,self.tolerance)
else:
self.raiseAnError(IOError, 'HistorySetSampling Interfaced Post-Processor ' + str(self.name) + ' : not recognized samplingType')
for var in outData.keys():
if hist == 0:
outputDic['data'][var] = np.zeros(inputDic['numberRealizations'], dtype=object)
outputDic['data'][var][hist] = outData[var]
# add meta variables back
for key in inputDic['metaKeys']:
outputDic['data'][key] = inputDic['data'][key]
outputDic['dims'] = copy.deepcopy(inputDic['dims'])
return outputDic
def varsTimeInterp(self, vars):
"""
This function samples a multi-variate temporal function
@ In, vars, dict, data set that contained the information of the multi-variate temporal function (this is supposed to be a dictionary:
{'pivotParameter':time_array, 'var1':var1_array, ..., 'varn':varn_array})
@ Out, newVars, dict, data set that is a sampled version of vars
"""
localPivotParameter = vars[self.pivotParameter]
tMin = localPivotParameter[0]
tMax = localPivotParameter[-1]
newVars={}
if self.samplingType == 'uniform':
if self.interpolation == 'intervalAverage':
newTime = np.linspace(tMin,tMax,self.numberOfSamples+1)[0:-1]
else:
newTime = np.linspace(tMin,tMax,self.numberOfSamples)
elif self.samplingType == 'firstDerivative' or self.samplingType == 'secondDerivative':
newTime = self.derivativeTimeValues(vars)
else:
self.raiseAnError(RuntimeError,'type ' + self.samplingType + ' is not a valid type. Function: varsTimeInterp (mathUtils)')
for key in vars.keys():
if key == self.pivotParameter:
newVars[key] = newTime
else:
if self.interpolation == 'intervalAverage':
newVars[key] = np.zeros(shape=newTime.shape)
deltaT = newTime[1]-newTime[0] if len(newTime) > 1 else tMax
for tIdx in range(len(newTime)):
t = newTime[tIdx]
extractCondition = (localPivotParameter>=t) * (localPivotParameter<=t+deltaT)
extractVar = np.extract(extractCondition, vars[key])
extractTime = np.extract(extractCondition, localPivotParameter)
newVars[key][tIdx] = integrate.trapz(extractVar, extractTime) / deltaT
else:
interp = interpolate.interp1d(vars[self.pivotParameter], vars[key], self.interpolation)
newVars[key]=interp(newTime)
return newVars
def derivativeTimeValues(self, var):
"""
This function computes the new temporal variable
@ In, vars, dict, data set that contained the information of the multi-variate temporal function (this is supposed to be a dictionary:
{'pivotParameter':time_array, 'var1':var1_array, ..., 'varn':varn_array})
@ Out, newTime, list, values of the new temporal variable
"""
newTime = np.zeros(self.numberOfSamples)
cumDerivative = np.zeros(var[self.pivotParameter].size)
normalizedVar = {}
for keys in var.keys():
normalizedVar[keys] = var[keys]
if keys != self.pivotParameter:
minVal = np.min(var[keys])
maxVal = np.max(var[keys])
if not max == min:
normalizedVar[keys] = (var[keys] - minVal)/(maxVal - minVal)
else:
normalizedVar[keys] = var[keys]/np.float64(1.0)
else:
normalizedVar[keys]=var[keys]/np.float64(1.0)
if self.samplingType=='firstDerivative':
for t in range(1, normalizedVar[self.pivotParameter].shape[0]):
t_contrib=0.0
for keys in normalizedVar.keys():
t_contrib += abs(normalizedVar[keys][t] - normalizedVar[keys][t-1])/(normalizedVar[self.pivotParameter][t]-normalizedVar[self.pivotParameter][t-1])
cumDerivative[t] = cumDerivative[t-1] + t_contrib
elif self.samplingType=='secondDerivative':
for t in range(1, normalizedVar[self.pivotParameter].shape[0]-1):
t_contrib=0.0
for keys in normalizedVar.keys():
t_contrib += abs(normalizedVar[keys][t+1] - 2.0 * normalizedVar[keys][t] + normalizedVar[keys][t-1])/(normalizedVar[self.pivotParameter][t]-normalizedVar[self.pivotParameter][t-1])**2
cumDerivative[t] = cumDerivative[t-1] + t_contrib
cumDerivative[-1] = cumDerivative[normalizedVar[self.pivotParameter].shape[0]-2]
else:
self.raiseAnError(RuntimeError,'type ' + self.samplingType + ' is not a valid type. Function: derivativeTimeValues')
cumDamageInstant = np.linspace(cumDerivative[0],cumDerivative[-1],self.numberOfSamples)
for i in range(self.numberOfSamples-1):
index = (np.abs(cumDerivative - cumDamageInstant[i])).argmin()
newTime[i] = var[self.pivotParameter][index]
newTime[-1] = var[self.pivotParameter][-1]
return newTime
def timeSeriesFilter(pivotParameter, vars, filterType, filterValue):
""" This function sample a multi-variate temporal function
pivotParameter : the ID of the temporal variable
vars : data set that contained the information of the multi-variate temporal function (this is supposed to be a
dictionary: {'pivotParameter':time_array, 'var1':var1_array, ..., 'varn':varn_array})
samplingType: type of sampling used to determine the coordinate of the numSamples samples ('firstDerivative', 'secondDerivative')
filterValue : value associated to the filter
"""
derivative = np.zeros(vars[pivotParameter].size)
if filterType=='filteredFirstDerivative':
for t in range(1, len(vars[pivotParameter])):
t_contrib=0.0
for keys in vars.keys():
if keys != pivotParameter:
t_contrib += abs((vars[keys][t] - vars[keys][t-1])/(vars[pivotParameter][t] - vars[pivotParameter][t-1]))
derivative[t] = t_contrib
elif filterType=='filteredSecondDerivative':
for t in range(1, vars[pivotParameter].size-1):
t_contrib=0.0
for keys in vars.keys():
t_contrib += abs((vars[keys][t+1] - 2.0 * vars[keys][t] + vars[keys][t-1])/(vars[pivotParameter][t] - vars[pivotParameter][t-1])**2)
derivative[t] = t_contrib
derivative[-1] = derivative[len(vars[pivotParameter])-2]
newVars = {}
for key in vars:
newVars[key]=np.array(vars[key][0])
for t in range(derivative.size):
if derivative[t] > filterValue:
for key in vars:
newVars[key]=np.append(newVars[key],vars[key][t])
for key in vars:
newVars[key]=np.append(newVars[key],vars[key][-1])
return newVars
|
StarcoderdataPython
|
12850864
|
from k5test import *
# Test that the kdcpreauth client_keyblock() callback matches the key
# indicated by the etype info, and returns NULL if no key was selected.
testpreauth = os.path.join(buildtop, 'plugins', 'preauth', 'test', 'test.so')
conf = {'plugins': {'kdcpreauth': {'module': 'test:' + testpreauth},
'clpreauth': {'module': 'test:' + testpreauth}}}
realm = K5Realm(create_host=False, get_creds=False, krb5_conf=conf)
realm.run([kadminl, 'modprinc', '+requires_preauth', realm.user_princ])
realm.run([kadminl, 'setstr', realm.user_princ, 'teststring', 'testval'])
realm.run([kadminl, 'addprinc', '-nokey', '+requires_preauth', 'nokeyuser'])
realm.kinit(realm.user_princ, password('<PASSWORD>'), expected_msg='testval')
realm.kinit('nokeyuser', password('<PASSWORD>'), expected_code=1,
expected_msg='no key')
# Preauth type -123 is the test preauth module type; 133 is FAST
# PA-FX-COOKIE; 2 is encrypted timestamp.
# Test normal preauth flow.
mark('normal')
msgs = ('Sending unauthenticated request',
'/Additional pre-authentication required',
'Preauthenticating using KDC method data',
'Processing preauth types:',
'Preauth module test (-123) (real) returned: 0/Success',
'Produced preauth for next request: PA-FX-COOKIE (133), -123',
'Decrypted AS reply')
realm.run(['./icred', realm.user_princ, password('<PASSWORD>')],
expected_msg='testval', expected_trace=msgs)
# Test successful optimistic preauth.
mark('optimistic')
expected_trace = ('Attempting optimistic preauth',
'Processing preauth types: -123',
'Preauth module test (-123) (real) returned: 0/Success',
'Produced preauth for next request: -123',
'Decrypted AS reply')
realm.run(['./icred', '-o', '-123', realm.user_princ, password('<PASSWORD>')],
expected_trace=expected_trace)
# Test optimistic preauth failing on client, falling back to encrypted
# timestamp.
mark('optimistic (client failure)')
msgs = ('Attempting optimistic preauth',
'Processing preauth types: -123',
'/induced optimistic fail',
'Sending unauthenticated request',
'/Additional pre-authentication required',
'Preauthenticating using KDC method data',
'Processing preauth types:',
'Encrypted timestamp (for ',
'module encrypted_timestamp (2) (real) returned: 0/Success',
'preauth for next request: PA-FX-COOKIE (133), PA-ENC-TIMESTAMP (2)',
'Decrypted AS reply')
realm.run(['./icred', '-o', '-123', '-X', 'fail_optimistic', realm.user_princ,
password('<PASSWORD>')], expected_trace=msgs)
# Test optimistic preauth failing on KDC, falling back to encrypted
# timestamp.
mark('optimistic (KDC failure)')
realm.run([kadminl, 'setstr', realm.user_princ, 'failopt', 'yes'])
msgs = ('Attempting optimistic preauth',
'Processing preauth types: -123',
'Preauth module test (-123) (real) returned: 0/Success',
'Produced preauth for next request: -123',
'/Preauthentication failed',
'Preauthenticating using KDC method data',
'Processing preauth types:',
'Encrypted timestamp (for ',
'module encrypted_timestamp (2) (real) returned: 0/Success',
'preauth for next request: PA-FX-COOKIE (133), PA-ENC-TIMESTAMP (2)',
'Decrypted AS reply')
realm.run(['./icred', '-o', '-123', realm.user_princ, password('<PASSWORD>')],
expected_trace=msgs)
# Leave failopt set for the next test.
# Test optimistic preauth failing on KDC, stopping because the test
# module disabled fallback.
mark('optimistic (KDC failure, no fallback)')
msgs = ('Attempting optimistic preauth',
'Processing preauth types: -123',
'Preauth module test (-123) (real) returned: 0/Success',
'Produced preauth for next request: -123',
'/Preauthentication failed')
realm.run(['./icred', '-X', 'disable_fallback', '-o', '-123', realm.user_princ,
password('user')], expected_code=1,
expected_msg='Preauthentication failed', expected_trace=msgs)
realm.run([kadminl, 'delstr', realm.user_princ, 'failopt'])
# Test KDC_ERR_MORE_PREAUTH_DATA_REQUIRED and secure cookies.
mark('second round-trip')
realm.run([kadminl, 'setstr', realm.user_princ, '2rt', 'secondtrip'])
msgs = ('Sending unauthenticated request',
'/Additional pre-authentication required',
'Preauthenticating using KDC method data',
'Processing preauth types:',
'Preauth module test (-123) (real) returned: 0/Success',
'Produced preauth for next request: PA-FX-COOKIE (133), -123',
'/More preauthentication data is required',
'Continuing preauth mech -123',
'Processing preauth types: -123, PA-FX-COOKIE (133)',
'Produced preauth for next request: PA-FX-COOKIE (133), -123',
'Decrypted AS reply')
realm.run(['./icred', realm.user_princ, password('user')],
expected_msg='2rt: secondtrip', expected_trace=msgs)
# Test client-side failure after KDC_ERR_MORE_PREAUTH_DATA_REQUIRED,
# falling back to encrypted timestamp.
mark('second round-trip (client failure)')
msgs = ('Sending unauthenticated request',
'/Additional pre-authentication required',
'Preauthenticating using KDC method data',
'Processing preauth types:',
'Preauth module test (-123) (real) returned: 0/Success',
'Produced preauth for next request: PA-FX-COOKIE (133), -123',
'/More preauthentication data is required',
'Continuing preauth mech -123',
'Processing preauth types: -123, PA-FX-COOKIE (133)',
'/induced 2rt fail',
'Preauthenticating using KDC method data',
'Processing preauth types:',
'Encrypted timestamp (for ',
'module encrypted_timestamp (2) (real) returned: 0/Success',
'preauth for next request: PA-FX-COOKIE (133), PA-ENC-TIMESTAMP (2)',
'Decrypted AS reply')
realm.run(['./icred', '-X', 'fail_2rt', realm.user_princ, password('<PASSWORD>')],
expected_msg='2rt: secondtrip', expected_trace=msgs)
# Test client-side failure after KDC_ERR_MORE_PREAUTH_DATA_REQUIRED,
# stopping because the test module disabled fallback.
mark('second round-trip (client failure, no fallback)')
msgs = ('Sending unauthenticated request',
'/Additional pre-authentication required',
'Preauthenticating using KDC method data',
'Processing preauth types:',
'Preauth module test (-123) (real) returned: 0/Success',
'Produced preauth for next request: PA-FX-COOKIE (133), -123',
'/More preauthentication data is required',
'Continuing preauth mech -123',
'Processing preauth types: -123, PA-FX-COOKIE (133)',
'/induced 2rt fail')
realm.run(['./icred', '-X', 'fail_2rt', '-X', 'disable_fallback',
realm.user_princ, password('<PASSWORD>')], expected_code=1,
expected_msg='Pre-authentication failed: induced 2rt fail',
expected_trace=msgs)
# Test KDC-side failure after KDC_ERR_MORE_PREAUTH_DATA_REQUIRED,
# falling back to encrypted timestamp.
mark('second round-trip (KDC failure)')
realm.run([kadminl, 'setstr', realm.user_princ, 'fail2rt', 'yes'])
msgs = ('Sending unauthenticated request',
'/Additional pre-authentication required',
'Preauthenticating using KDC method data',
'Processing preauth types:',
'Preauth module test (-123) (real) returned: 0/Success',
'Produced preauth for next request: PA-FX-COOKIE (133), -123',
'/More preauthentication data is required',
'Continuing preauth mech -123',
'Processing preauth types: -123, PA-FX-COOKIE (133)',
'Preauth module test (-123) (real) returned: 0/Success',
'Produced preauth for next request: PA-FX-COOKIE (133), -123',
'/Preauthentication failed',
'Preauthenticating using KDC method data',
'Processing preauth types:',
'Encrypted timestamp (for ',
'module encrypted_timestamp (2) (real) returned: 0/Success',
'preauth for next request: PA-FX-COOKIE (133), PA-ENC-TIMESTAMP (2)',
'Decrypted AS reply')
realm.run(['./icred', realm.user_princ, password('<PASSWORD>')],
expected_msg='2rt: secondtrip', expected_trace=msgs)
# Leave fail2rt set for the next test.
# Test KDC-side failure after KDC_ERR_MORE_PREAUTH_DATA_REQUIRED,
# stopping because the test module disabled fallback.
mark('second round-trip (KDC failure, no fallback)')
msgs = ('Sending unauthenticated request',
'/Additional pre-authentication required',
'Preauthenticating using KDC method data',
'Processing preauth types:',
'Preauth module test (-123) (real) returned: 0/Success',
'Produced preauth for next request: PA-FX-COOKIE (133), -123',
'/More preauthentication data is required',
'Continuing preauth mech -123',
'Processing preauth types: -123, PA-FX-COOKIE (133)',
'Preauth module test (-123) (real) returned: 0/Success',
'Produced preauth for next request: PA-FX-COOKIE (133), -123',
'/Preauthentication failed')
realm.run(['./icred', '-X', 'disable_fallback',
realm.user_princ, password('<PASSWORD>')], expected_code=1,
expected_msg='Preauthentication failed', expected_trace=msgs)
realm.run([kadminl, 'delstr', realm.user_princ, 'fail2rt'])
# Test tryagain flow by inducing a KDC_ERR_ENCTYPE_NOSUPP error on the KDC.
mark('tryagain')
realm.run([kadminl, 'setstr', realm.user_princ, 'err', 'testagain'])
msgs = ('Sending unauthenticated request',
'/Additional pre-authentication required',
'Preauthenticating using KDC method data',
'Processing preauth types:',
'Preauth module test (-123) (real) returned: 0/Success',
'Produced preauth for next request: PA-FX-COOKIE (133), -123',
'/KDC has no support for encryption type',
'Recovering from KDC error 14 using preauth mech -123',
'Preauth tryagain input types (-123): -123, PA-FX-COOKIE (133)',
'Preauth module test (-123) tryagain returned: 0/Success',
'Followup preauth for next request: -123, PA-FX-COOKIE (133)',
'Decrypted AS reply')
realm.run(['./icred', realm.user_princ, password('<PASSWORD>')],
expected_msg='tryagain: testagain', expected_trace=msgs)
# Test a client-side tryagain failure, falling back to encrypted
# timestamp.
mark('tryagain (client failure)')
msgs = ('Sending unauthenticated request',
'/Additional pre-authentication required',
'Preauthenticating using KDC method data',
'Processing preauth types:',
'Preauth module test (-123) (real) returned: 0/Success',
'Produced preauth for next request: PA-FX-COOKIE (133), -123',
'/KDC has no support for encryption type',
'Recovering from KDC error 14 using preauth mech -123',
'Preauth tryagain input types (-123): -123, PA-FX-COOKIE (133)',
'/induced tryagain fail',
'Preauthenticating using KDC method data',
'Processing preauth types:',
'Encrypted timestamp (for ',
'module encrypted_timestamp (2) (real) returned: 0/Success',
'preauth for next request: PA-FX-COOKIE (133), PA-ENC-TIMESTAMP (2)',
'Decrypted AS reply')
realm.run(['./icred', '-X', 'fail_tryagain', realm.user_princ,
password('<PASSWORD>')], expected_trace=msgs)
# Test a client-side tryagain failure, stopping because the test
# module disabled fallback.
mark('tryagain (client failure, no fallback)')
msgs = ('Sending unauthenticated request',
'/Additional pre-authentication required',
'Preauthenticating using KDC method data',
'Processing preauth types:',
'Preauth module test (-123) (real) returned: 0/Success',
'Produced preauth for next request: PA-FX-COOKIE (133), -123',
'/KDC has no support for encryption type',
'Recovering from KDC error 14 using preauth mech -123',
'Preauth tryagain input types (-123): -123, PA-FX-COOKIE (133)',
'/induced tryagain fail')
realm.run(['./icred', '-X', 'fail_tryagain', '-X', 'disable_fallback',
realm.user_princ, password('user')], expected_code=1,
expected_msg='KDC has no support for encryption type',
expected_trace=msgs)
# Test that multiple stepwise initial creds operations can be
# performed with the same krb5_context, with proper tracking of
# clpreauth module request handles.
mark('interleaved')
realm.run([kadminl, 'addprinc', '-pw', 'pw', 'u1'])
realm.run([kadminl, 'addprinc', '+requires_preauth', '-pw', 'pw', 'u2'])
realm.run([kadminl, 'addprinc', '+requires_preauth', '-pw', 'pw', 'u3'])
realm.run([kadminl, 'setstr', 'u2', '2rt', 'extra'])
out = realm.run(['./icinterleave', 'pw', 'u1', 'u2', 'u3'])
if out != ('step 1\nstep 2\nstep 3\nstep 1\nfinish 1\nstep 2\nno attr\n'
'step 3\nno attr\nstep 2\n2rt: extra\nstep 3\nfinish 3\nstep 2\n'
'finish 2\n'):
fail('unexpected output from icinterleave')
success('Pre-authentication framework tests')
|
StarcoderdataPython
|
1813055
|
<gh_stars>10-100
import random
import table
import requests
import crawler
class Proxy:
def __init__(self, proto, url):
self.proto = proto
self.url = url
def proxy_download(self):
try:
result = requests.get(str.rstrip(self.url))
result.raise_for_status()
return result.text
except requests.RequestException:
return False
def form_table(self, clear):
t = table.Table('proxy_list', crawler.read_config())
proxy_exist = t.table_check()
if proxy_exist:
if clear:
t.table_truncate()
else:
t.proxy_make()
try:
http_list = self.proxy_download().split('\r')
except AttributeError:
print("Network error. Can't get proxies.")
return False
if http_list:
table_list = [f'{self.proto}://{str.lstrip(i)}' for i in http_list]
table_list.pop()
http_list.clear()
for i in table_list:
t.proxy_append(self.proto, i)
return len(table_list)
return False
@staticmethod
def read_table_string(list_len):
tbl = table.Table('proxy_list', crawler.read_config())
number = int(random.random() * list_len)
return tbl.table_read(number)
|
StarcoderdataPython
|
4986580
|
<filename>wms/views.py
from math import pi
import mapscript
from PIL import Image
from raster.models import RasterTile
from django.http import HttpResponse
from django.views.generic import View
from wms.maps import WmsMap
class WmsView(View):
"""
WMS view class for setting up WMS endpoints.
"""
map_class = None
def __init__(self, **kwargs):
# Setup mapscript IO stream
mapscript.msIO_installStdoutToBuffer()
# Verify that map class has been specified correctly
if not self.map_class or not issubclass(self.map_class, WmsMap):
raise TypeError(
'The map_class attribute is not a subclass of WmsMap. '
'Specify a map in map_class attribute.'
)
# Setup wms view allowing only GET requests
super(WmsView, self).__init__(http_method_names=['get'], **kwargs)
def get(self, request, *args, **kwargs):
"""
Html GET method of WmsView. This view renders WMS requests into
corresponding responses using the attached WmsMap class.
Responses are mainly images and xml files.
"""
# Create response
response = HttpResponse()
# Setup wms request object
ows_request = mapscript.OWSRequest()
# If tile kwargs were provided, add tile parameters to request
tileparams = self.tilemode()
if tileparams:
# Get image format from url
format = {'.png': 'image/png',
'.jpg': 'image/jpeg'}[self.kwargs.get('format')]
# Return empty image if tile cant be found
if not self.tile_exists(*tileparams):
# Get image type and size
imagetype = 'PNG' if format == 'image/png' else 'JPEG'
imagesize = 256, 256
response['Content-Type'] = format
# Create image and save it to response
im = Image.new("RGBA", imagesize, (0, 0, 0, 0))
im.save(response, imagetype)
return response
else:
tilebounds = self.get_tile_bounds(*tileparams)
# Get layer name
layers = self.kwargs.get('layers')
# Setup wms parameter object
request_data = {
'SERVICE': 'WMS',
'REQUEST': 'GetMap',
'VERSION': '1.1.1',
'TRANSPARENT': 'true',
'HEIGHT': '256',
'WIDTH': '256',
'SRS': 'EPSG:3857',
'FORMAT': format,
'LAYERS': layers,
'BBOX': tilebounds,
}
request.GET = dict(request.GET.items() + request_data.items())
# Set ows parameters from request data
for param, value in request.GET.items():
ows_request.setParameter(param, value)
# Instantiate WmsMap class
self.wmsmap = self.map_class(request, **kwargs)
# Dynamically use host for declaring service endpoint
onlineresource = request.build_absolute_uri().split('?')[0] + '?'
self.wmsmap.map_object.setMetaData('wms_onlineresource',
onlineresource)
# Dispatch map rendering
self.wmsmap.map_object.OWSDispatch(ows_request)
# Strip buffer from headers
mapscript.msIO_stripStdoutBufferContentHeaders()
# Store contenttype
contenttype = mapscript.msIO_stripStdoutBufferContentType()
# Write data to response
response.write(mapscript.msIO_getStdoutBufferBytes())
# Set contenttype
response['Content-Type'] = contenttype
return response
def get_tile_bounds(self, x, y, z):
"""
Calculates tile bounding box from Tile Map Service XYZ indices.
"""
# Setup scale factor for bounds calculations
res = 2 * pi * 6378137
shift = res / 2.0
scale = res / 2**z
# Calculate bounds
minx = x * scale - shift
maxx = (x + 1) * scale - shift
miny = shift - (y + 1) * scale
maxy = shift - y * scale
# Convert bounds to query string
return ','.join([repr(coord) for coord in [minx, miny, maxx, maxy]])
def tilemode(self):
"""
Returns true if the request is for XYZ tiles.
"""
# Try to get tile indices from url
x = self.kwargs.get('x', '')
y = self.kwargs.get('y', '')
z = self.kwargs.get('z', '')
if not (x and y and z):
return False
else:
return int(x), int(y), int(z)
def tile_exists(self, x, y, z):
"""
Returns true if the requested XYZ tile exists.
"""
return RasterTile.objects.filter(
tilex=x,
tiley=y,
tilez=z,
filename=self.kwargs.get('layers', '')
).exists()
|
StarcoderdataPython
|
364605
|
<reponame>bohyn/yawd-elfinder<gh_stars>0
import os
def fs_standard_access(attr, path, volume):
"""
Make dotfiles not readable, not writable, hidden and locked.
Should return None to allow for original attribute value, boolean otherwise.
This can be used in the :ref:`setting-accessControl` setting.
Args:
:attr: One of `read`, `write`, `hidden` and `locked`.
:path: The path to check against.
:volume: The volume responsible for managing the path.
Returns:
``True`` if `path` has `attr` permissions, ``False`` if not and
``None`` to apply the default permission rules.
"""
if os.path.basename(path) in ['.tmb', '.quarantine']:
#keep reserved folder names intact
return None
if volume.name() == 'localfilesystem':
if attr in ['read', 'write'] and os.path.basename(path).startswith('.'):
return False
|
StarcoderdataPython
|
17104
|
<filename>AutocompleteHandler.py
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import simplejson
from QueryHandler import QueryHandler
class AutocompleteHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
if not self.request.arguments or self.request.arguments=={}:
self.render('index.html')
return
if not 'address' in self.request.arguments.keys():
self.render('index.html')
return
address = self.request.arguments['address'][0]
data = {
'address': address
}
output = QueryHandler.get_addresses(data)
self.write(output)
self.flush()
self.finish()
|
StarcoderdataPython
|
4874225
|
# coding: utf-8
from form import *
from .error import *
from .serve import *
from .sitemap import *
from .warmup import *
from .auth import *
|
StarcoderdataPython
|
1685873
|
<gh_stars>0
from django.urls import path
from . import views
from django.conf.urls.i18n import i18n_patterns
urlpatterns = [
# path('', views.BlogListView.as_view(), name="blog"),
path('', views.BlogList, name="blog"),
path('<int:pk>/', views.BlogDetailView.as_view(), name="single_blog"),
]
|
StarcoderdataPython
|
388093
|
"""
Pillowfight Class to run various workloads and scale tests
"""
import time
import logging
import tempfile
import re
from os import listdir
from os.path import join
from shutil import rmtree
from ocs_ci.utility.spreadsheet.spreadsheet_api import GoogleSpreadSheetAPI
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs import constants
from ocs_ci.ocs.utils import get_pod_name_by_pattern
from ocs_ci.utility.utils import TimeoutSampler
from ocs_ci.utility import utils, templating
log = logging.getLogger(__name__)
class PillowFight(object):
"""
Workload operation using PillowFight
This class was modelled after the RipSaw class in this directory.
"""
WAIT_FOR_TIME = 1800
MIN_ACCEPTABLE_OPS_PER_SEC = 2000
MAX_ACCEPTABLE_RESPONSE_TIME = 2000
def __init__(self, **kwargs):
"""
Initializer function
Args:
kwargs (dict):
Following kwargs are valid
repo: PillowFight repo to used - a github link
branch: branch to use from the repo
namespace: namespace for the operator
Example Usage:
r1 = PillowFight()
r1.run_pillowfights()
# To run a private yaml
my_custom_bench = my_custom_bench.yaml
run_cmd('oc apply -f my_custom_bench')
# To get pillowfight data from log file
data = r1.extract_data(log_file)
# To do basic sanity checking of data
r1.sanity_check(data)
"""
self.args = kwargs
self.namespace = self.args.get("namespace", "couchbase-operator-namespace")
self.ocp = OCP()
self.up_check = OCP(namespace=constants.COUCHBASE_OPERATOR)
self.logs = tempfile.mkdtemp(prefix="pf_logs_")
def run_pillowfights(self, replicas=1, num_items=None, num_threads=None):
"""
loop through all the yaml files extracted from the pillowfight repo
and run them. Run oc logs on the results and save the logs in self.logs
directory
Args:
replicas (int): Number of pod replicas
num_items (int): Number of items to be loaded to the cluster
num_threads (int): Number of threads
"""
ocp_local = OCP(namespace=self.namespace)
self.replicas = replicas
for i in range(self.replicas):
# for basic-fillowfight.yaml
pfight = templating.load_yaml(constants.COUCHBASE_PILLOWFIGHT)
pfight["metadata"]["name"] = "pillowfight-rbd-simple" + f"{i}"
# change the name
pfight["spec"]["template"]["spec"]["containers"][0]["command"][2] = (
f"couchbase://cb-example-000{i}.cb-example."
f"couchbase-operator-namespace.svc:8091/default?select_bucket=true"
)
# num of items
pfight["spec"]["template"]["spec"]["containers"][0]["command"][4] = (
str(num_items) if num_items else "20000"
)
# num of threads
pfight["spec"]["template"]["spec"]["containers"][0]["command"][13] = (
str(num_threads) if num_threads else "20"
)
lpillowfight = OCS(**pfight)
lpillowfight.create()
time.sleep(15)
self.pods_info = {}
for pillowfight_pods in TimeoutSampler(
self.WAIT_FOR_TIME,
9,
get_pod_name_by_pattern,
"pillowfight",
constants.COUCHBASE_OPERATOR,
):
try:
counter = 0
for pf_pod in pillowfight_pods:
pod_info = self.up_check.exec_oc_cmd(f"get pods {pf_pod} -o json")
pf_status = pod_info["status"]["containerStatuses"][0]["state"]
if "terminated" in pf_status:
pf_completion_info = pf_status["terminated"]["reason"]
if pf_completion_info == constants.STATUS_COMPLETED:
counter += 1
self.pods_info.update({pf_pod: pf_completion_info})
elif "running" in pf_status:
pass
if counter == self.replicas:
break
except IndexError:
log.info("Pillowfight not yet completed")
log.info(self.pods_info)
for pod, pf_completion_info in self.pods_info.items():
if pf_completion_info == "Completed":
pf_endlog = f"{pod}.log"
pf_log = join(self.logs, pf_endlog)
data_from_log = ocp_local.exec_oc_cmd(
f"logs -f {pod} --ignore-errors", out_yaml_format=False
)
data_from_log = data_from_log.replace("\x00", "")
with open(pf_log, "w") as fd:
fd.write(data_from_log)
elif pf_completion_info == "Error":
raise Exception("Pillowfight failed to complete")
def analyze_all(self):
"""
Analyze the data extracted into self.logs files
"""
for path in listdir(self.logs):
full_path = join(self.logs, path)
log.info(f"Analyzing {full_path}")
with open(full_path, "r") as fdesc:
data_from_log = fdesc.read()
log_data = self.parse_pillowfight_log(data_from_log)
self.sanity_check(log_data)
def sanity_check(self, stats):
"""
Make sure the worst cases for ops per second and response times are
within an acceptable range.
"""
stat1 = min(stats["opspersec"])
if stat1 < self.MIN_ACCEPTABLE_OPS_PER_SEC:
raise Exception(f"Worst OPS/SEC value reported is {stat1}")
stat2 = max(stats["resptimes"].keys()) / 1000
if stat2 > self.MAX_ACCEPTABLE_RESPONSE_TIME:
raise Exception(f"Worst response time reported is {stat2} milliseconds")
def parse_pillowfight_log(self, data_from_log):
"""
Run oc logs on the pillowfight pod passed in. Cleanup the output
from oc logs to handle peculiarities in the couchbase log results,
and generate a summary of the results.
The dictionary returned has two values; 'opspersec' and 'resptimes'.
opspersec is a list of ops per second numbers reported.'
resptimes is a dictionary index by the max response time of a range.
Each entry in resptimes contains a minimum response time for that range,
and a count of how many messages fall within that range.
Args:
data_from_log (str): log data
Returns:
dict: ops per sec and response time information
"""
# The data in the couchbase logs is kind of abnormal.
# It contains histograms with invalid unicode charaters for yaml
# output (which is why out_yaml_format=False is used).
# It also seems to write a block of text inside another block at
# an unpredictable location. The value good_txt below is the output
# of the log with that data removed..
#
# So what's left is a list of OPS/SEC values and a histogram of
# response times. This routine organizes that data.
ops_per_sec = []
resp_hist = {}
log.info("*******Couchbase raw output log*********\n" f"{data_from_log}")
lines = data_from_log.split("\n")
for dline in lines:
try:
if dline.startswith("OPS/SEC"):
dfields = dline.split(" ")
dnumb = int(dfields[-1].strip())
ops_per_sec.append(dnumb)
if re.match("^\\[\\d+ +- \\d+ *\\][um]s \\|#* - \\d+", dline):
for element in ["[", "]", "|", "-", "#"]:
dline = dline.replace(element, " ")
parts = dline.split()
i1 = int(parts[0])
i2 = int(parts[1])
if parts[2] == "ms":
i1 *= 1000
i2 *= 1000
resp_hist[i2] = {"minindx": i1, "number": int(parts[3])}
except ValueError:
log.info(f"{dline} -- contains invalid data")
ret_data = {"opspersec": ops_per_sec, "resptimes": resp_hist}
return ret_data
def export_pfoutput_to_googlesheet(self, sheet_name, sheet_index):
"""
Collect pillowfight output to google spreadsheet
Args:
sheet_name (str): Name of the sheet
sheet_index (int): Index of sheet
"""
# Collect data and export to Google doc spreadsheet
g_sheet = GoogleSpreadSheetAPI(sheet_name=sheet_name, sheet_index=sheet_index)
log.info("Exporting pf data to google spreadsheet")
for path in listdir(self.logs):
full_path = join(self.logs, path)
with open(full_path, "r") as fdesc:
data_from_log = fdesc.read()
log_data = self.parse_pillowfight_log(data_from_log)
g_sheet.insert_row(
[
f"{path}",
min(log_data["opspersec"]),
max(log_data["resptimes"].keys()) / 1000,
],
2,
)
g_sheet.insert_row(["", "opspersec", "resptimes"], 2)
# Capturing versions(OCP, OCS and Ceph) and test run name
g_sheet.insert_row(
[
f"ocp_version:{utils.get_cluster_version()}",
f"ocs_build_number:{utils.get_ocs_build_number()}",
f"ceph_version:{utils.get_ceph_version()}",
f"test_run_name:{utils.get_testrun_name()}",
],
2,
)
def cleanup(self):
"""
Remove pillowfight pods and temp files
"""
rmtree(self.logs)
|
StarcoderdataPython
|
9607233
|
<reponame>GodOfOwls/Lightshield
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup, find_packages
def open_local(filename):
"""Open a file in this directory."""
heredir = os.path.abspath(".")
return open(os.path.join(heredir, filename), "r")
def read_requires(filename):
"""Read installation requirements from pip install files."""
NO_JENKINS = {"psycopg2-binary"}
NO_WINDOWS = {"uvloop"}
with open_local(filename) as reqfile:
lines = [line.strip() for line in reqfile.readlines()]
if os.environ.get("USER") == "jenkins":
lines = [line for line in lines if line.lower() not in NO_JENKINS]
if "win" in sys.platform:
lines = [line for line in lines if line.lower() not in NO_WINDOWS]
return lines
if __name__ == "__main__":
install_requires = read_requires("requirements.txt")
setup(
name="Lightshield",
description="Automatic service structure to keep up to date on the Riot Api.",
version="1.0.0",
packages=find_packages(),
install_requires=install_requires,
license="APL2",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
keywords="riot games api",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"Programming Language :: Python :: 3.8",
],
)
|
StarcoderdataPython
|
3335994
|
<reponame>cristicismas/top-budget<gh_stars>0
from rest_framework import serializers
from expenses.models import Expense, Category, Location, Source
class ExpenseSerializer(serializers.ModelSerializer):
class Meta:
model = Expense
fields = '__all__'
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = '__all__'
class LocationSerializer(serializers.ModelSerializer):
class Meta:
model = Location
fields = '__all__'
class SourceSerializer(serializers.ModelSerializer):
class Meta:
model = Source
fields = '__all__'
|
StarcoderdataPython
|
3284628
|
class InstanceReferenceGeometry(GeometryBase,IDisposable,ISerializable):
""" InstanceReferenceGeometry(instanceDefinitionId: Guid,transform: Transform) """
def ConstructConstObject(self,*args):
""" ConstructConstObject(self: CommonObject,parentObject: object,subobject_index: int) """
pass
def Dispose(self):
""" Dispose(self: CommonObject,disposing: bool) """
pass
def NonConstOperation(self,*args):
""" NonConstOperation(self: CommonObject) """
pass
def OnSwitchToNonConst(self,*args):
""" OnSwitchToNonConst(self: GeometryBase) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,instanceDefinitionId,transform):
""" __new__(cls: type,instanceDefinitionId: Guid,transform: Transform) """
pass
ParentIdefId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ParentIdefId(self: InstanceReferenceGeometry) -> Guid
"""
Xform=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Xform(self: InstanceReferenceGeometry) -> Transform
"""
|
StarcoderdataPython
|
5070690
|
<reponame>gideontong/CodeQuest<filename>library/python/sort/quick_sort.py
"""
Source: Interactive Python
The quick sort uses divide and conquer to gain the same advantages as the merge sort, while
not using additional storage. As a trade-off, however, it is possible that the list may not
be divided in half. When this happens, we will see that performance is diminished.
"""
def quickSort(alist):
quickSortHelper(alist,0,len(alist)-1)
def quickSortHelper(alist,first,last):
if first<last:
splitpoint = partition(alist,first,last)
quickSortHelper(alist,first,splitpoint-1)
quickSortHelper(alist,splitpoint+1,last)
def partition(alist,first,last):
pivotvalue = alist[first]
leftmark = first+1
rightmark = last
done = False
while not done:
while leftmark <= rightmark and alist[leftmark] <= pivotvalue:
leftmark = leftmark + 1
while alist[rightmark] >= pivotvalue and rightmark >= leftmark:
rightmark = rightmark -1
if rightmark < leftmark:
done = True
else:
temp = alist[leftmark]
alist[leftmark] = alist[rightmark]
alist[rightmark] = temp
temp = alist[first]
alist[first] = alist[rightmark]
alist[rightmark] = temp
return rightmark
|
StarcoderdataPython
|
1741917
|
from django.apps import AppConfig
class GhuMainConfig(AppConfig):
name = 'ghu_main'
verbose_name = 'GHU (Main)'
|
StarcoderdataPython
|
1958614
|
<reponame>spotx/telepresence
# Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from pathlib import Path
import telepresence
def parse_args(argv=None, only_for_commands=False):
prog = str(Path(sys.argv[0]).name)
parser = argparse.ArgumentParser(
allow_abbrev=False, # can make adding changes not backwards compatible
formatter_class=argparse.RawDescriptionHelpFormatter,
usage="{} [options] COMMAND ...".format(prog),
description=(
"Telepresence: local development proxied to a remote Kubernetes "
"cluster.\n\n"
"Documentation: https://telepresence.io\n"
"Real-time help: https://d6e.co/slack\n"
"Issue tracker: https://github.com/datawire/telepresence/issues\n"
)
)
parser.add_argument(
"--version", action="version", version=telepresence.__version__
)
# General options
options_group = parser.add_argument_group("options")
options_group.add_argument(
"--context",
default=None,
help=(
"The Kubernetes context to use. Defaults to the current "
"kubectl context."
)
)
options_group.add_argument(
"--namespace",
default=None,
help=(
"The Kubernetes namespace to use. Defaults to kubectl's default "
"for the current or specified context, "
"""which is usually "default"."""
)
)
options_group.add_argument(
"--logfile",
default="./telepresence.log",
help=(
"""The path to write logs to. "-" means stdout, """
"""default is "./telepresence.log"."""
)
)
options_group.add_argument(
"--verbose",
action="store_true",
help="Enables verbose logging for troubleshooting."
)
# Commands
subparsers = parser.add_subparsers(
title="commands",
prog="{} [options]".format(prog),
description="The following commands are EXPERIMENTAL as of Nov 2018",
metavar="COMMAND",
dest="command"
)
available_commands = []
def add_command(name, *args, **kwargs):
available_commands.append(name)
return subparsers.add_parser(name, *args, **kwargs)
proxy_desc = "Start or stop the Telepresence proxy pod."
proxy_parser = add_command(
"proxy", description=proxy_desc, help=proxy_desc
)
proxy_parser.add_argument(
"start/stop",
choices=("start", "stop"),
help="Whether to start or stop the proxy"
)
outbound_desc = """
Set up the network so that local connections can transparently reach the
cluster. This operation will run a subprocess using "sudo", which may
prompt you for your password.
"""
# outbound_parser =
add_command("outbound", description=outbound_desc, help=outbound_desc)
add_command("version")
# Perform the parsing
show_warning_message = False
if only_for_commands:
# If no supported command is mentioned, do nothing
my_argv = argv or sys.argv
command_found = False
for command_name in available_commands:
if command_name in my_argv:
command_found = True
break
if not command_found:
return None
show_warning_message = True
try:
args = parser.parse_args(argv)
show_warning_message = False
finally:
if show_warning_message:
msg = (
"""\nSee also "{} --help" and "{} --help-experimental" """ +
"for more information"
)
print(msg.format(prog, prog))
if args.command == "version":
parser.parse_args(["--version"])
return args
def show_command_help_and_quit():
parse_args(["--help"], only_for_commands=False)
if __name__ == "__main__":
res = parse_args(None, only_for_commands=True)
print(res)
|
StarcoderdataPython
|
6619594
|
# Space: O(n)
# Time: O(n)
class Solution:
def longestConsecutive(self, nums):
if not nums: return 0
nums = sorted(list(set(nums)))
res = 0
counter = 1
index = 1
while index < len(nums):
if nums[index] == nums[index - 1] + 1:
counter += 1
else:
res = max(res, counter)
counter = 1
index += 1
return max(res, counter)
|
StarcoderdataPython
|
11230018
|
<filename>games/views.py
from django.shortcuts import render, redirect, get_object_or_404
from .models import *
from .forms import *
from .games.games import games
def browse_view(request):
boards = Board.boards.all()#.filter(state__outcome=-1)
return render(request, 'games/browse.html', {
'boards': map(lambda b: b.to_dictionary(), boards)
})
def create_view(request):
if 'game' in request.GET and request.user.is_authenticated:
game_id = int(request.GET['game'])
board = Board.boards.create(game_id=game_id)
Player.objects.create(user=request.user, board=board,
order=1, leader=True)
return redirect('../' + board.code)
return render(request, 'games/create.html', {
'games': games.values()
})
def game_view(request, board_code):
if not Board.boards.filter(code=board_code).exists():
return render(request, 'games/noboard.html', {})
if not request.user.is_authenticated:
return redirect('/users/login?next=/games/' + board_code)
board = Board.boards.get(code=board_code)
if board.stage == 0: setup(request, board)
if 'message' in request.POST and request.user.is_authenticated:
Message.objects.create(
user=request.user,
message=request.POST['message'],
board=board)
return render(request, 'games/game.html', {
'board': board.to_dictionary(),
'state': board.state,
'users': board.users(),
'this_player': board.player(request.user)
})
def board_view(request, board_code):
board = Board.boards.get(code=board_code)
game = board.game()
player = board.player(request.user)
cx, cy = (int(request.GET['cx']), int(request.GET['cy']))\
if 'cx' in request.GET else (-1, -1)
sx, sy = (int(request.GET['sx']), int(request.GET['sy']))\
if 'sx' in request.GET else (-1, -1)
if cx != -1 and board.stage == 1 and board.current(player):
if sx != -1 and board.move_piece(sx, sy, cx, cy):
sx, sy = -1, -1
elif board.place_piece(player.order, cx, cy):
sx, sy = -1, -1
elif board.remove_piece(cx, cy):
sx, sy = -1, -1
elif (sx != cx or sy != cy) and board.selectable(cx, cy):
sx, sy = cx, cy
else: sx, sy = -1, -1
elif not board.current(player):
sx, sy = -1, -1
pieces = board.state.pieces()
return render(request, 'games/board.html', {
'tiles': map(lambda y:
map(lambda x: {
'x': x,
'y': y,
'width': game.scale(x, y)[0],
'height': game.scale(x, y)[1],
'background': game.colour(board.state,
pieces, x, y, x == sx and y == sy),
'piece': pieces[x][y].to_dictionary()
if pieces[x][y] else None,
'selected': x == sx and y == sy
}, range(0, game.width)),
range(game.height-1, -1, -1)),
'selected': {'x': sx, 'y': sy},
'turn': board.current(player)
})
def setup(request, board):
this_user = request.user
this_player = board.players().filter(user=this_user).first()
leader = this_player and this_player.leader
if 'start' in request.GET and leader:
board.start()
elif 'cancel' in request.GET and leader:
board.delete()
if 'user' in request.GET:
other_user = User.objects.get(id=request.GET['user'])
other_player = board.players().filter(user=other_user).first()
me = this_user == other_user
if 'join' in request.GET and not other_player and (leader or me):
board.join(other_user)
if 'leave' in request.GET and other_player and (leader or me):
other_player.leave()
if 'promote' in request.GET and other_player and leader\
and other_player.order != 1:
other_player.promote()
if 'demote' in request.GET and other_player and leader\
and other_player.order != board.players().count():
other_player.demote()
if 'transfer' in request.GET and other_player and leader:
other_player.transfer()
|
StarcoderdataPython
|
9651165
|
<filename>src/ToolsTab/__init__.py
from .ToolsTab import *
from .Tool import *
|
StarcoderdataPython
|
92
|
"""Timezone helper functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
from datetime import datetime, timedelta, tzinfo
from threading import local
import time as _time
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
__all__ = [
'utc', 'get_default_timezone', 'get_current_timezone',
'activate', 'deactivate', 'override',
'is_naive', 'is_aware', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class LocalTimezone(tzinfo):
"""
Local time implementation taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
"""
def __init__(self):
# This code is moved in __init__ to execute it as late as possible
# See get_default_timezone().
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def __repr__(self):
return "<LocalTimezone>"
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
# In order to avoid accessing the settings at compile time,
# wrap the expression in a function and cache the result.
_localtime = None
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
See also :func:`get_current_timezone`.
"""
global _localtime
if _localtime is None:
if isinstance(settings.TIME_ZONE, basestring) and pytz is not None:
_localtime = pytz.timezone(settings.TIME_ZONE)
else:
_localtime = LocalTimezone()
return _localtime
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
local_now = datetime.now(timezone)
return timezone.tzname(local_now)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, basestring) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(object):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
self.old_timezone = getattr(_active, 'value', None)
def __enter__(self):
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is not None:
_active.value = self.old_timezone
else:
del _active.value
# Templates
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True))
return localtime(value) if should_convert else value
# Utilities
def localtime(value, timezone=None):
"""
Converts an aware datetime.datetime to local time.
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if timezone is None:
timezone = get_current_timezone()
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize'):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone)
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None)
|
StarcoderdataPython
|
64515
|
import json
import random
from typing import NamedTuple, Any
import numpy
from numpy.testing import assert_array_almost_equal, assert_almost_equal
import torch
import pytest
from flaky import flaky
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import sanitize
from allennlp.nn import util
from allennlp.models import load_archive
class TestNnUtil(AllenNlpTestCase):
def test_get_sequence_lengths_from_binary_mask(self):
binary_mask = torch.tensor(
[
[True, True, True, False, False, False],
[True, True, False, False, False, False],
[True, True, True, True, True, True],
[True, False, False, False, False, False],
]
)
lengths = util.get_lengths_from_binary_sequence_mask(binary_mask)
numpy.testing.assert_array_equal(lengths.numpy(), numpy.array([3, 2, 6, 1]))
def test_get_mask_from_sequence_lengths(self):
sequence_lengths = torch.LongTensor([4, 3, 1, 4, 2])
mask = util.get_mask_from_sequence_lengths(sequence_lengths, 5).data.numpy()
assert_almost_equal(
mask,
[[1, 1, 1, 1, 0], [1, 1, 1, 0, 0], [1, 0, 0, 0, 0], [1, 1, 1, 1, 0], [1, 1, 0, 0, 0]],
)
def test_get_sequence_lengths_converts_to_long_tensor_and_avoids_variable_overflow(self):
# Tests the following weird behaviour in Pytorch 0.1.12
# doesn't happen for our sequence masks:
#
# mask = torch.ones([260]).bool()
# mask.sum() # equals 260.
# var_mask = t.a.V(mask)
# var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows.
binary_mask = torch.ones(2, 260).bool()
lengths = util.get_lengths_from_binary_sequence_mask(binary_mask)
numpy.testing.assert_array_equal(lengths.data.numpy(), numpy.array([260, 260]))
def test_clamp_tensor(self):
# Test on uncoalesced sparse tensor
i = torch.LongTensor([[0, 1, 1, 0], [2, 0, 2, 2]])
v = torch.FloatTensor([3, 4, -5, 3])
tensor = torch.sparse.FloatTensor(i, v, torch.Size([2, 3]))
clamped_tensor = util.clamp_tensor(tensor, minimum=-3, maximum=3).to_dense()
assert_almost_equal(clamped_tensor, [[0, 0, 3], [3, 0, -3]])
# Test on coalesced sparse tensor
i = torch.LongTensor([[0, 1, 1], [2, 0, 2]])
v = torch.FloatTensor([3, 4, -5])
tensor = torch.sparse.FloatTensor(i, v, torch.Size([2, 3]))
clamped_tensor = util.clamp_tensor(tensor, minimum=-3, maximum=3).to_dense()
assert_almost_equal(clamped_tensor, [[0, 0, 3], [3, 0, -3]])
# Test on dense tensor
tensor = torch.tensor([[5, -4, 3], [-3, 0, -30]])
clamped_tensor = util.clamp_tensor(tensor, minimum=-3, maximum=3)
assert_almost_equal(clamped_tensor, [[3, -3, 3], [-3, 0, -3]])
def test_sort_tensor_by_length(self):
tensor = torch.rand([5, 7, 9])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 1:, :] = 0
tensor[3, 5:, :] = 0
sequence_lengths = torch.LongTensor([3, 4, 1, 5, 7])
sorted_tensor, sorted_lengths, reverse_indices, _ = util.sort_batch_by_length(
tensor, sequence_lengths
)
# Test sorted indices are padded correctly.
numpy.testing.assert_array_equal(sorted_tensor[1, 5:, :].data.numpy(), 0.0)
numpy.testing.assert_array_equal(sorted_tensor[2, 4:, :].data.numpy(), 0.0)
numpy.testing.assert_array_equal(sorted_tensor[3, 3:, :].data.numpy(), 0.0)
numpy.testing.assert_array_equal(sorted_tensor[4, 1:, :].data.numpy(), 0.0)
assert sorted_lengths.data.equal(torch.LongTensor([7, 5, 4, 3, 1]))
# Test restoration indices correctly recover the original tensor.
assert sorted_tensor.index_select(0, reverse_indices).data.equal(tensor.data)
def test_get_final_encoder_states(self):
encoder_outputs = torch.Tensor(
[
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]],
]
)
mask = torch.tensor([[True, True, True], [True, True, False]])
final_states = util.get_final_encoder_states(encoder_outputs, mask, bidirectional=False)
assert_almost_equal(final_states.data.numpy(), [[9, 10, 11, 12], [17, 18, 19, 20]])
final_states = util.get_final_encoder_states(encoder_outputs, mask, bidirectional=True)
assert_almost_equal(final_states.data.numpy(), [[9, 10, 3, 4], [17, 18, 15, 16]])
def test_masked_softmax_no_mask(self):
# Testing the general unmasked 1D case.
vector_1d = torch.FloatTensor([[1.0, 2.0, 3.0]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, None).data.numpy()
assert_array_almost_equal(
vector_1d_softmaxed, numpy.array([[0.090031, 0.244728, 0.665241]])
)
assert_almost_equal(1.0, numpy.sum(vector_1d_softmaxed), decimal=6)
vector_1d = torch.FloatTensor([[1.0, 2.0, 5.0]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, None).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.017148, 0.046613, 0.93624]]))
# Testing the unmasked 1D case where the input is all 0s.
vector_zero = torch.FloatTensor([[0.0, 0.0, 0.0]])
vector_zero_softmaxed = util.masked_softmax(vector_zero, None).data.numpy()
assert_array_almost_equal(
vector_zero_softmaxed, numpy.array([[0.33333334, 0.33333334, 0.33333334]])
)
# Testing the general unmasked batched case.
matrix = torch.FloatTensor([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]])
masked_matrix_softmaxed = util.masked_softmax(matrix, None).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array(
[[0.01714783, 0.04661262, 0.93623955], [0.09003057, 0.24472847, 0.66524096]]
),
)
# Testing the unmasked batched case where one of the inputs are all 0s.
matrix = torch.FloatTensor([[1.0, 2.0, 5.0], [0.0, 0.0, 0.0]])
masked_matrix_softmaxed = util.masked_softmax(matrix, None).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array(
[[0.01714783, 0.04661262, 0.93623955], [0.33333334, 0.33333334, 0.33333334]]
),
)
def test_masked_softmax_masked(self):
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([[1.0, 2.0, 5.0]])
mask_1d = torch.tensor([[True, False, True]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.01798621, 0.0, 0.98201382]]))
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[True, False, True, True]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(
vector_1d_softmaxed, numpy.array([[0.01321289, 0.0, 0.26538793, 0.72139918]])
)
# Testing the masked 1D case where the input is all 0s and the mask
# is not all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, True]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0, 0, 0, 1]]))
# Testing the masked 1D case where the input is not all 0s
# and the mask is all 0s.
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.0, 0.0, 0.0, 0.0]]))
# Testing the masked 1D case where the input is all 0s and
# the mask is all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.0, 0.0, 0.0, 0.0]]))
# Testing the masked 1D case where there are large elements in the
# padding.
vector_1d = torch.FloatTensor([[1.0, 1.0, 1e5]])
mask_1d = torch.tensor([[True, True, False]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.5, 0.5, 0]]))
# Testing the general masked batched case.
matrix = torch.FloatTensor([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, True]])
masked_matrix_softmaxed = util.masked_softmax(matrix, mask).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array([[0.01798621, 0.0, 0.98201382], [0.090031, 0.244728, 0.665241]]),
)
# Testing the masked batch case where one of the inputs is all 0s but
# none of the masks are all 0.
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, True]])
masked_matrix_softmaxed = util.masked_softmax(matrix, mask).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed, numpy.array([[0.5, 0.0, 0.5], [0.090031, 0.244728, 0.665241]])
)
# Testing the masked batch case where one of the inputs is all 0s and
# one of the masks are all 0.
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [False, False, False]])
masked_matrix_softmaxed = util.masked_softmax(matrix, mask).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed, numpy.array([[0.5, 0.0, 0.5], [0.0, 0.0, 0.0]])
)
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[False, False, False], [True, False, True]])
masked_matrix_softmaxed = util.masked_softmax(matrix, mask).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed, numpy.array([[0.0, 0.0, 0.0], [0.11920292, 0.0, 0.88079708]])
)
def test_masked_softmax_memory_efficient_masked(self):
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([[1.0, 2.0, 5.0]])
mask_1d = torch.tensor([[True, False, True]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.01798621, 0.0, 0.98201382]]))
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[True, False, True, True]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
vector_1d_softmaxed, numpy.array([[0.01321289, 0.0, 0.26538793, 0.72139918]])
)
# Testing the masked 1D case where the input is all 0s and the mask
# is not all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, True]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0, 0, 0, 1]]))
# Testing the masked 1D case where the input is not all 0s
# and the mask is all 0s.
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.25, 0.25, 0.25, 0.25]]))
# Testing the masked 1D case where the input is all 0s and
# the mask is all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.25, 0.25, 0.25, 0.25]]))
# Testing the masked 1D case where there are large elements in the
# padding.
vector_1d = torch.FloatTensor([[1.0, 1.0, 1e5]])
mask_1d = torch.tensor([[True, True, False]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.5, 0.5, 0]]))
# Testing the general masked batched case.
matrix = torch.FloatTensor([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, True]])
masked_matrix_softmaxed = util.masked_softmax(
matrix, mask, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array([[0.01798621, 0.0, 0.98201382], [0.090031, 0.244728, 0.665241]]),
)
# Testing the masked batch case where one of the inputs is all 0s but
# none of the masks are all 0.
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, True]])
masked_matrix_softmaxed = util.masked_softmax(
matrix, mask, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed, numpy.array([[0.5, 0.0, 0.5], [0.090031, 0.244728, 0.665241]])
)
# Testing the masked batch case where one of the inputs is all 0s and
# one of the masks are all 0.
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [False, False, False]])
masked_matrix_softmaxed = util.masked_softmax(
matrix, mask, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array([[0.5, 0.0, 0.5], [0.33333333, 0.33333333, 0.33333333]]),
)
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[False, False, False], [True, False, True]])
masked_matrix_softmaxed = util.masked_softmax(
matrix, mask, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array([[0.33333333, 0.33333333, 0.33333333], [0.11920292, 0.0, 0.88079708]]),
)
def test_masked_log_softmax_masked(self):
# Tests replicated from test_softmax_masked - we test that exponentiated,
# the log softmax contains the correct elements (masked elements should be == 1).
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([[1.0, 2.0, 5.0]])
mask_1d = torch.tensor([[True, False, True]])
vector_1d_softmaxed = util.masked_log_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(
numpy.exp(vector_1d_softmaxed), numpy.array([[0.01798621, 0.0, 0.98201382]])
)
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[True, False, True, True]])
vector_1d_softmaxed = util.masked_log_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(
numpy.exp(vector_1d_softmaxed), numpy.array([[0.01321289, 0.0, 0.26538793, 0.72139918]])
)
# Testing the masked 1D case where the input is all 0s and the mask
# is not all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, True]])
vector_1d_softmaxed = util.masked_log_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(
numpy.exp(vector_1d_softmaxed), numpy.array([[0.0, 0.0, 0.0, 1.0]])
)
# Testing the masked 1D case where the input is not all 0s
# and the mask is all 0s. The output here will be arbitrary, but it should not be nan.
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_log_softmax(vector_1d, mask_1d).data.numpy()
assert not numpy.isnan(vector_1d_softmaxed).any()
def test_masked_max(self):
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([1.0, 12.0, 5.0])
mask_1d = torch.tensor([True, False, True])
vector_1d_maxed = util.masked_max(vector_1d, mask_1d, dim=0).data.numpy()
assert_array_almost_equal(vector_1d_maxed, 5.0)
# Testing if all masks are zero, the output will be arbitrary, but it should not be nan.
vector_1d = torch.FloatTensor([1.0, 12.0, 5.0])
mask_1d = torch.tensor([False, False, False])
vector_1d_maxed = util.masked_max(vector_1d, mask_1d, dim=0).data.numpy()
assert not numpy.isnan(vector_1d_maxed).any()
# Testing batch value and batch masks
matrix = torch.FloatTensor([[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, False]])
matrix_maxed = util.masked_max(matrix, mask, dim=-1).data.numpy()
assert_array_almost_equal(matrix_maxed, numpy.array([5.0, -1.0]))
# Testing keepdim for batch value and batch masks
matrix = torch.FloatTensor([[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, False]])
matrix_maxed = util.masked_max(matrix, mask, dim=-1, keepdim=True).data.numpy()
assert_array_almost_equal(matrix_maxed, numpy.array([[5.0], [-1.0]]))
# Testing broadcast
matrix = torch.FloatTensor(
[[[1.0, 2.0], [12.0, 3.0], [5.0, -1.0]], [[-1.0, -3.0], [-2.0, -0.5], [3.0, 8.0]]]
)
mask = torch.tensor([[True, False, True], [True, True, False]]).unsqueeze(-1)
matrix_maxed = util.masked_max(matrix, mask, dim=1).data.numpy()
assert_array_almost_equal(matrix_maxed, numpy.array([[5.0, 2.0], [-1.0, -0.5]]))
def test_masked_mean(self):
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([1.0, 12.0, 5.0])
mask_1d = torch.tensor([True, False, True])
vector_1d_mean = util.masked_mean(vector_1d, mask_1d, dim=0).data.numpy()
assert_array_almost_equal(vector_1d_mean, 3.0)
# Testing if all masks are zero, the output will be arbitrary, but it should not be nan.
vector_1d = torch.FloatTensor([1.0, 12.0, 5.0])
mask_1d = torch.tensor([False, False, False])
vector_1d_mean = util.masked_mean(vector_1d, mask_1d, dim=0).data.numpy()
assert not numpy.isnan(vector_1d_mean).any()
# Testing batch value and batch masks
matrix = torch.FloatTensor([[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, False]])
matrix_mean = util.masked_mean(matrix, mask, dim=-1).data.numpy()
assert_array_almost_equal(matrix_mean, numpy.array([3.0, -1.5]))
# Testing keepdim for batch value and batch masks
matrix = torch.FloatTensor([[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, False]])
matrix_mean = util.masked_mean(matrix, mask, dim=-1, keepdim=True).data.numpy()
assert_array_almost_equal(matrix_mean, numpy.array([[3.0], [-1.5]]))
# Testing broadcast
matrix = torch.FloatTensor(
[[[1.0, 2.0], [12.0, 3.0], [5.0, -1.0]], [[-1.0, -3.0], [-2.0, -0.5], [3.0, 8.0]]]
)
mask = torch.tensor([[True, False, True], [True, True, False]]).unsqueeze(-1)
matrix_mean = util.masked_mean(matrix, mask, dim=1).data.numpy()
assert_array_almost_equal(matrix_mean, numpy.array([[3.0, 0.5], [-1.5, -1.75]]))
def test_masked_flip(self):
tensor = torch.FloatTensor(
[[[6, 6, 6], [1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4], [5, 5, 5]]]
)
solution = [[[6, 6, 6], [0, 0, 0]], [[4, 4, 4], [3, 3, 3]]]
response = util.masked_flip(tensor, [1, 2])
assert_almost_equal(response, solution)
tensor = torch.FloatTensor(
[
[[6, 6, 6], [1, 1, 1], [2, 2, 2], [0, 0, 0]],
[[3, 3, 3], [4, 4, 4], [5, 5, 5], [1, 2, 3]],
]
)
solution = [
[[2, 2, 2], [1, 1, 1], [6, 6, 6], [0, 0, 0]],
[[1, 2, 3], [5, 5, 5], [4, 4, 4], [3, 3, 3]],
]
response = util.masked_flip(tensor, [3, 4])
assert_almost_equal(response, solution)
tensor = torch.FloatTensor(
[
[[6, 6, 6], [1, 1, 1], [2, 2, 2], [0, 0, 0]],
[[3, 3, 3], [4, 4, 4], [5, 5, 5], [1, 2, 3]],
[[1, 1, 1], [2, 2, 2], [0, 0, 0], [0, 0, 0]],
]
)
solution = [
[[2, 2, 2], [1, 1, 1], [6, 6, 6], [0, 0, 0]],
[[1, 2, 3], [5, 5, 5], [4, 4, 4], [3, 3, 3]],
[[2, 2, 2], [1, 1, 1], [0, 0, 0], [0, 0, 0]],
]
response = util.masked_flip(tensor, [3, 4, 2])
assert_almost_equal(response, solution)
def test_get_text_field_mask_returns_a_correct_mask(self):
text_field_tensors = {
"indexer_name": {
"tokens": torch.LongTensor([[3, 4, 5, 0, 0], [1, 2, 0, 0, 0]]),
"token_characters": torch.LongTensor(
[
[[1, 2], [3, 0], [2, 0], [0, 0], [0, 0]],
[[5, 0], [4, 6], [0, 0], [0, 0], [0, 0]],
]
),
}
}
assert_almost_equal(
util.get_text_field_mask(text_field_tensors).long().numpy(),
[[1, 1, 1, 0, 0], [1, 1, 0, 0, 0]],
)
def test_get_text_field_mask_returns_a_correct_mask_character_only_input(self):
text_field_tensors = {
"indexer_name": {
"token_characters": torch.LongTensor(
[
[[1, 2, 3], [3, 0, 1], [2, 1, 0], [0, 0, 0]],
[[5, 5, 5], [4, 6, 0], [0, 0, 0], [0, 0, 0]],
]
)
}
}
assert_almost_equal(
util.get_text_field_mask(text_field_tensors).long().numpy(),
[[1, 1, 1, 0], [1, 1, 0, 0]],
)
def test_get_text_field_mask_returns_a_correct_mask_list_field(self):
text_field_tensors = {
"indexer_name": {
"list_tokens": torch.LongTensor(
[
[[1, 2], [3, 0], [2, 0], [0, 0], [0, 0]],
[[5, 0], [4, 6], [0, 0], [0, 0], [0, 0]],
]
)
}
}
actual_mask = (
util.get_text_field_mask(text_field_tensors, num_wrapping_dims=1).long().numpy()
)
expected_mask = (text_field_tensors["indexer_name"]["list_tokens"].numpy() > 0).astype(
"int32"
)
assert_almost_equal(actual_mask, expected_mask)
def test_get_text_field_mask_returns_mask_key(self):
text_field_tensors = {
"indexer_name": {
"tokens": torch.LongTensor([[3, 4, 5, 0, 0], [1, 2, 0, 0, 0]]),
"mask": torch.tensor([[False, False, True]]),
}
}
assert_almost_equal(
util.get_text_field_mask(text_field_tensors).long().numpy(), [[0, 0, 1]]
)
def test_weighted_sum_works_on_simple_input(self):
batch_size = 1
sentence_length = 5
embedding_dim = 4
sentence_array = numpy.random.rand(batch_size, sentence_length, embedding_dim)
sentence_tensor = torch.from_numpy(sentence_array).float()
attention_tensor = torch.FloatTensor([[0.3, 0.4, 0.1, 0, 1.2]])
aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
assert aggregated_array.shape == (batch_size, embedding_dim)
expected_array = (
0.3 * sentence_array[0, 0]
+ 0.4 * sentence_array[0, 1]
+ 0.1 * sentence_array[0, 2]
+ 0.0 * sentence_array[0, 3]
+ 1.2 * sentence_array[0, 4]
)
numpy.testing.assert_almost_equal(aggregated_array, [expected_array], decimal=5)
def test_weighted_sum_handles_higher_order_input(self):
batch_size = 1
length_1 = 5
length_2 = 6
length_3 = 2
embedding_dim = 4
sentence_array = numpy.random.rand(batch_size, length_1, length_2, length_3, embedding_dim)
attention_array = numpy.random.rand(batch_size, length_1, length_2, length_3)
sentence_tensor = torch.from_numpy(sentence_array).float()
attention_tensor = torch.from_numpy(attention_array).float()
aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
assert aggregated_array.shape == (batch_size, length_1, length_2, embedding_dim)
expected_array = (
attention_array[0, 3, 2, 0] * sentence_array[0, 3, 2, 0]
+ attention_array[0, 3, 2, 1] * sentence_array[0, 3, 2, 1]
)
numpy.testing.assert_almost_equal(aggregated_array[0, 3, 2], expected_array, decimal=5)
def test_weighted_sum_handles_uneven_higher_order_input(self):
batch_size = 1
length_1 = 5
length_2 = 6
length_3 = 2
embedding_dim = 4
sentence_array = numpy.random.rand(batch_size, length_3, embedding_dim)
attention_array = numpy.random.rand(batch_size, length_1, length_2, length_3)
sentence_tensor = torch.from_numpy(sentence_array).float()
attention_tensor = torch.from_numpy(attention_array).float()
aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
assert aggregated_array.shape == (batch_size, length_1, length_2, embedding_dim)
for i in range(length_1):
for j in range(length_2):
expected_array = (
attention_array[0, i, j, 0] * sentence_array[0, 0]
+ attention_array[0, i, j, 1] * sentence_array[0, 1]
)
numpy.testing.assert_almost_equal(
aggregated_array[0, i, j], expected_array, decimal=5
)
def test_weighted_sum_handles_3d_attention_with_3d_matrix(self):
batch_size = 1
length_1 = 5
length_2 = 2
embedding_dim = 4
sentence_array = numpy.random.rand(batch_size, length_2, embedding_dim)
attention_array = numpy.random.rand(batch_size, length_1, length_2)
sentence_tensor = torch.from_numpy(sentence_array).float()
attention_tensor = torch.from_numpy(attention_array).float()
aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
assert aggregated_array.shape == (batch_size, length_1, embedding_dim)
for i in range(length_1):
expected_array = (
attention_array[0, i, 0] * sentence_array[0, 0]
+ attention_array[0, i, 1] * sentence_array[0, 1]
)
numpy.testing.assert_almost_equal(aggregated_array[0, i], expected_array, decimal=5)
def test_viterbi_decode(self):
# Test Viterbi decoding is equal to greedy decoding with no pairwise potentials.
sequence_logits = torch.nn.functional.softmax(torch.rand([5, 9]), dim=-1)
transition_matrix = torch.zeros([9, 9])
indices, _ = util.viterbi_decode(sequence_logits.data, transition_matrix)
_, argmax_indices = torch.max(sequence_logits, 1)
assert indices == argmax_indices.data.squeeze().tolist()
# Test Viterbi decoding works with start and end transitions
sequence_logits = torch.nn.functional.softmax(torch.rand([5, 9]), dim=-1)
transition_matrix = torch.zeros([9, 9])
allowed_start_transitions = torch.zeros([9])
# Force start tag to be an 8
allowed_start_transitions[:8] = float("-inf")
allowed_end_transitions = torch.zeros([9])
# Force end tag to be a 0
allowed_end_transitions[1:] = float("-inf")
indices, _ = util.viterbi_decode(
sequence_logits.data,
transition_matrix,
allowed_end_transitions=allowed_end_transitions,
allowed_start_transitions=allowed_start_transitions,
)
assert indices[0] == 8
assert indices[-1] == 0
# Test that pairwise potentials affect the sequence correctly and that
# viterbi_decode can handle -inf values.
sequence_logits = torch.FloatTensor(
[
[0, 0, 0, 3, 5],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
]
)
# The same tags shouldn't appear sequentially.
transition_matrix = torch.zeros([5, 5])
for i in range(5):
transition_matrix[i, i] = float("-inf")
indices, _ = util.viterbi_decode(sequence_logits, transition_matrix)
assert indices == [4, 3, 4, 3, 4, 3]
# Test that unbalanced pairwise potentials break ties
# between paths with equal unary potentials.
sequence_logits = torch.FloatTensor(
[
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
]
)
# The 5th tag has a penalty for appearing sequentially
# or for transitioning to the 4th tag, making the best
# path uniquely to take the 4th tag only.
transition_matrix = torch.zeros([5, 5])
transition_matrix[4, 4] = -10
transition_matrix[4, 3] = -10
transition_matrix[3, 4] = -10
indices, _ = util.viterbi_decode(sequence_logits, transition_matrix)
assert indices == [3, 3, 3, 3, 3, 3]
sequence_logits = torch.FloatTensor([[1, 0, 0, 4], [1, 0, 6, 2], [0, 3, 0, 4]])
# Best path would normally be [3, 2, 3] but we add a
# potential from 2 -> 1, making [3, 2, 1] the best path.
transition_matrix = torch.zeros([4, 4])
transition_matrix[0, 0] = 1
transition_matrix[2, 1] = 5
indices, value = util.viterbi_decode(sequence_logits, transition_matrix)
assert indices == [3, 2, 1]
assert value.numpy() == 18
# Test that providing evidence results in paths containing specified tags.
sequence_logits = torch.FloatTensor(
[
[0, 0, 0, 7, 7],
[0, 0, 0, 7, 7],
[0, 0, 0, 7, 7],
[0, 0, 0, 7, 7],
[0, 0, 0, 7, 7],
[0, 0, 0, 7, 7],
]
)
# The 5th tag has a penalty for appearing sequentially
# or for transitioning to the 4th tag, making the best
# path to take the 4th tag for every label.
transition_matrix = torch.zeros([5, 5])
transition_matrix[4, 4] = -10
transition_matrix[4, 3] = -2
transition_matrix[3, 4] = -2
# The 1st, 4th and 5th sequence elements are observed - they should be
# equal to 2, 0 and 4. The last tag should be equal to 3, because although
# the penalty for transitioning to the 4th tag is -2, the unary potential
# is 7, which is greater than the combination for any of the other labels.
observations = [2, -1, -1, 0, 4, -1]
indices, _ = util.viterbi_decode(sequence_logits, transition_matrix, observations)
assert indices == [2, 3, 3, 0, 4, 3]
def test_viterbi_decode_top_k(self):
# Test cases taken from: https://gist.github.com/PetrochukM/afaa3613a99a8e7213d2efdd02ae4762
# Test Viterbi decoding is equal to greedy decoding with no pairwise potentials.
sequence_logits = torch.autograd.Variable(torch.rand([5, 9]))
transition_matrix = torch.zeros([9, 9])
indices, _ = util.viterbi_decode(sequence_logits.data, transition_matrix, top_k=5)
_, argmax_indices = torch.max(sequence_logits, 1)
assert indices[0] == argmax_indices.data.squeeze().tolist()
# Test that pairwise potentials effect the sequence correctly and that
# viterbi_decode can handle -inf values.
sequence_logits = torch.FloatTensor(
[
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
]
)
# The same tags shouldn't appear sequentially.
transition_matrix = torch.zeros([5, 5])
for i in range(5):
transition_matrix[i, i] = float("-inf")
indices, _ = util.viterbi_decode(sequence_logits, transition_matrix, top_k=5)
assert indices[0] == [3, 4, 3, 4, 3, 4]
# Test that unbalanced pairwise potentials break ties
# between paths with equal unary potentials.
sequence_logits = torch.FloatTensor(
[
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 0],
]
)
# The 5th tag has a penalty for appearing sequentially
# or for transitioning to the 4th tag, making the best
# path uniquely to take the 4th tag only.
transition_matrix = torch.zeros([5, 5])
transition_matrix[4, 4] = -10
transition_matrix[4, 3] = -10
indices, _ = util.viterbi_decode(sequence_logits, transition_matrix, top_k=5)
assert indices[0] == [3, 3, 3, 3, 3, 3]
sequence_logits = torch.FloatTensor([[1, 0, 0, 4], [1, 0, 6, 2], [0, 3, 0, 4]])
# Best path would normally be [3, 2, 3] but we add a
# potential from 2 -> 1, making [3, 2, 1] the best path.
transition_matrix = torch.zeros([4, 4])
transition_matrix[0, 0] = 1
transition_matrix[2, 1] = 5
indices, value = util.viterbi_decode(sequence_logits, transition_matrix, top_k=5)
assert indices[0] == [3, 2, 1]
assert value[0] == 18
def _brute_decode(
tag_sequence: torch.Tensor, transition_matrix: torch.Tensor, top_k: int = 5
) -> Any:
"""
Top-k decoder that uses brute search instead of the Viterbi Decode dynamic programing algorithm
"""
# Create all possible sequences
sequences = [[]] # type: ignore
for i in range(len(tag_sequence)):
new_sequences = [] # type: ignore
for j in range(len(tag_sequence[i])):
for sequence in sequences:
new_sequences.append(sequence[:] + [j])
sequences = new_sequences
# Score
scored_sequences = [] # type: ignore
for sequence in sequences:
emission_score = sum(tag_sequence[i, j] for i, j in enumerate(sequence))
transition_score = sum(
transition_matrix[sequence[i - 1], sequence[i]] for i in range(1, len(sequence))
)
score = emission_score + transition_score
scored_sequences.append((score, sequence))
# Get the top k scores / paths
top_k_sequences = sorted(scored_sequences, key=lambda r: r[0], reverse=True)[:top_k]
scores, paths = zip(*top_k_sequences)
return paths, scores # type: ignore
# Run 100 randomly generated parameters and compare the outputs.
for i in range(100):
num_tags = random.randint(1, 5)
seq_len = random.randint(1, 5)
k = random.randint(1, 5)
sequence_logits = torch.rand([seq_len, num_tags])
transition_matrix = torch.rand([num_tags, num_tags])
viterbi_paths_v1, viterbi_scores_v1 = util.viterbi_decode(
sequence_logits, transition_matrix, top_k=k
)
viterbi_path_brute, viterbi_score_brute = _brute_decode(
sequence_logits, transition_matrix, top_k=k
)
numpy.testing.assert_almost_equal(
list(viterbi_score_brute), viterbi_scores_v1.tolist(), decimal=3
)
numpy.testing.assert_equal(sanitize(viterbi_paths_v1), viterbi_path_brute)
def test_sequence_cross_entropy_with_logits_masks_loss_correctly(self):
# test weight masking by checking that a tensor with non-zero values in
# masked positions returns the same loss as a tensor with zeros in those
# positions.
tensor = torch.rand([5, 7, 4])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 2:, :] = 0
tensor[3, :, :] = 0
weights = (tensor != 0.0)[:, :, 0].long().squeeze(-1)
tensor2 = tensor.clone()
tensor2[0, 3:, :] = 2
tensor2[1, 4:, :] = 13
tensor2[2, 2:, :] = 234
tensor2[3, :, :] = 65
targets = torch.LongTensor(numpy.random.randint(0, 3, [5, 7]))
targets *= weights
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights)
loss2 = util.sequence_cross_entropy_with_logits(tensor2, targets, weights)
assert loss.data.numpy() == loss2.data.numpy()
def test_sequence_cross_entropy_with_logits_smooths_labels_correctly(self):
tensor = torch.rand([1, 3, 4])
targets = torch.LongTensor(numpy.random.randint(0, 3, [1, 3]))
weights = torch.ones([2, 3])
loss = util.sequence_cross_entropy_with_logits(
tensor, targets, weights, label_smoothing=0.1
)
correct_loss = 0.0
for prediction, label in zip(tensor.squeeze(0), targets.squeeze(0)):
prediction = torch.nn.functional.log_softmax(prediction, dim=-1)
correct_loss += prediction[label] * 0.9
# incorrect elements
correct_loss += prediction.sum() * 0.1 / 4
# Average over sequence.
correct_loss = -correct_loss / 3
numpy.testing.assert_array_almost_equal(loss.data.numpy(), correct_loss.data.numpy())
def test_sequence_cross_entropy_with_logits_averages_batch_correctly(self):
# test batch average is the same as dividing the batch averaged
# loss by the number of batches containing any non-padded tokens.
tensor = torch.rand([5, 7, 4])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 2:, :] = 0
tensor[3, :, :] = 0
weights = (tensor != 0.0)[:, :, 0].long().squeeze(-1)
targets = torch.LongTensor(numpy.random.randint(0, 3, [5, 7]))
targets *= weights
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights)
vector_loss = util.sequence_cross_entropy_with_logits(
tensor, targets, weights, average=None
)
# Batch has one completely padded row, so divide by 4.
assert loss.data.numpy() == vector_loss.sum().item() / 4
@flaky(max_runs=3, min_passes=1)
def test_sequence_cross_entropy_with_logits_averages_token_correctly(self):
# test token average is the same as multiplying the per-batch loss
# with the per-batch weights and dividing by the total weight
tensor = torch.rand([5, 7, 4])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 2:, :] = 0
tensor[3, :, :] = 0
weights = (tensor != 0.0)[:, :, 0].long().squeeze(-1)
targets = torch.LongTensor(numpy.random.randint(0, 3, [5, 7]))
targets *= weights
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights, average="token")
vector_loss = util.sequence_cross_entropy_with_logits(
tensor, targets, weights, average=None
)
total_token_loss = (vector_loss * weights.float().sum(dim=-1)).sum()
average_token_loss = (total_token_loss / weights.float().sum()).detach()
assert_almost_equal(loss.detach().item(), average_token_loss.item(), decimal=5)
def test_sequence_cross_entropy_with_logits_gamma_correctly(self):
batch = 1
length = 3
classes = 4
gamma = abs(numpy.random.randn()) # [0, +inf)
tensor = torch.rand([batch, length, classes])
targets = torch.LongTensor(numpy.random.randint(0, classes, [batch, length]))
weights = torch.ones([batch, length])
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights, gamma=gamma)
correct_loss = 0.0
for logit, label in zip(tensor.squeeze(0), targets.squeeze(0)):
p = torch.nn.functional.softmax(logit, dim=-1)
pt = p[label]
ft = (1 - pt) ** gamma
correct_loss += -pt.log() * ft
# Average over sequence.
correct_loss = correct_loss / length
numpy.testing.assert_array_almost_equal(loss.data.numpy(), correct_loss.data.numpy())
def test_sequence_cross_entropy_with_logits_alpha_float_correctly(self):
batch = 1
length = 3
classes = 2 # alpha float for binary class only
alpha = (
numpy.random.rand() if numpy.random.rand() > 0.5 else (1.0 - numpy.random.rand())
) # [0, 1]
tensor = torch.rand([batch, length, classes])
targets = torch.LongTensor(numpy.random.randint(0, classes, [batch, length]))
weights = torch.ones([batch, length])
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights, alpha=alpha)
correct_loss = 0.0
for logit, label in zip(tensor.squeeze(0), targets.squeeze(0)):
logp = torch.nn.functional.log_softmax(logit, dim=-1)
logpt = logp[label]
if label:
at = alpha
else:
at = 1 - alpha
correct_loss += -logpt * at
# Average over sequence.
correct_loss = correct_loss / length
numpy.testing.assert_array_almost_equal(loss.data.numpy(), correct_loss.data.numpy())
def test_sequence_cross_entropy_with_logits_alpha_single_float_correctly(self):
batch = 1
length = 3
classes = 2 # alpha float for binary class only
alpha = (
numpy.random.rand() if numpy.random.rand() > 0.5 else (1.0 - numpy.random.rand())
) # [0, 1]
alpha = torch.tensor(alpha)
tensor = torch.rand([batch, length, classes])
targets = torch.LongTensor(numpy.random.randint(0, classes, [batch, length]))
weights = torch.ones([batch, length])
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights, alpha=alpha)
correct_loss = 0.0
for logit, label in zip(tensor.squeeze(0), targets.squeeze(0)):
logp = torch.nn.functional.log_softmax(logit, dim=-1)
logpt = logp[label]
if label:
at = alpha
else:
at = 1 - alpha
correct_loss += -logpt * at
# Average over sequence.
correct_loss = correct_loss / length
numpy.testing.assert_array_almost_equal(loss.data.numpy(), correct_loss.data.numpy())
def test_sequence_cross_entropy_with_logits_alpha_list_correctly(self):
batch = 1
length = 3
classes = 4 # alpha float for binary class only
alpha = abs(numpy.random.randn(classes)) # [0, +inf)
tensor = torch.rand([batch, length, classes])
targets = torch.LongTensor(numpy.random.randint(0, classes, [batch, length]))
weights = torch.ones([batch, length])
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights, alpha=alpha)
correct_loss = 0.0
for logit, label in zip(tensor.squeeze(0), targets.squeeze(0)):
logp = torch.nn.functional.log_softmax(logit, dim=-1)
logpt = logp[label]
at = alpha[label]
correct_loss += -logpt * at
# Average over sequence.
correct_loss = correct_loss / length
numpy.testing.assert_array_almost_equal(loss.data.numpy(), correct_loss.data.numpy())
def test_replace_masked_values_replaces_masked_values_with_finite_value(self):
tensor = torch.FloatTensor([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]])
mask = torch.tensor([[True, True, False]])
replaced = util.replace_masked_values(tensor, mask.unsqueeze(-1), 2).data.numpy()
assert_almost_equal(replaced, [[[1, 2, 3, 4], [5, 6, 7, 8], [2, 2, 2, 2]]])
def test_logsumexp(self):
# First a simple example where we add probabilities in log space.
tensor = torch.FloatTensor([[0.4, 0.1, 0.2]])
log_tensor = tensor.log()
log_summed = util.logsumexp(log_tensor, dim=-1, keepdim=False)
assert_almost_equal(log_summed.exp().data.numpy(), [0.7])
log_summed = util.logsumexp(log_tensor, dim=-1, keepdim=True)
assert_almost_equal(log_summed.exp().data.numpy(), [[0.7]])
# Then some more atypical examples, and making sure this will work with how we handle
# log masks.
tensor = torch.FloatTensor([[float("-inf"), 20.0]])
assert_almost_equal(util.logsumexp(tensor).data.numpy(), [20.0])
tensor = torch.FloatTensor([[-200.0, 20.0]])
assert_almost_equal(util.logsumexp(tensor).data.numpy(), [20.0])
tensor = torch.FloatTensor([[20.0, 20.0], [-200.0, 200.0]])
assert_almost_equal(util.logsumexp(tensor, dim=0).data.numpy(), [20.0, 200.0])
def test_flatten_and_batch_shift_indices(self):
indices = numpy.array(
[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 9, 9, 9]], [[2, 1, 0, 7], [7, 7, 2, 3], [0, 0, 4, 2]]]
)
indices = torch.tensor(indices, dtype=torch.long)
shifted_indices = util.flatten_and_batch_shift_indices(indices, 10)
numpy.testing.assert_array_equal(
shifted_indices.data.numpy(),
numpy.array(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 9, 12, 11, 10, 17, 17, 17, 12, 13, 10, 10, 14, 12]
),
)
def test_batched_index_select(self):
indices = numpy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
# Each element is a vector of its index.
targets = torch.ones([2, 10, 3]).cumsum(1) - 1
# Make the second batch double its index so they're different.
targets[1, :, :] *= 2
indices = torch.tensor(indices, dtype=torch.long)
selected = util.batched_index_select(targets, indices)
assert list(selected.size()) == [2, 2, 2, 3]
ones = numpy.ones([3])
numpy.testing.assert_array_equal(selected[0, 0, 0, :].data.numpy(), ones)
numpy.testing.assert_array_equal(selected[0, 0, 1, :].data.numpy(), ones * 2)
numpy.testing.assert_array_equal(selected[0, 1, 0, :].data.numpy(), ones * 3)
numpy.testing.assert_array_equal(selected[0, 1, 1, :].data.numpy(), ones * 4)
numpy.testing.assert_array_equal(selected[1, 0, 0, :].data.numpy(), ones * 10)
numpy.testing.assert_array_equal(selected[1, 0, 1, :].data.numpy(), ones * 12)
numpy.testing.assert_array_equal(selected[1, 1, 0, :].data.numpy(), ones * 14)
numpy.testing.assert_array_equal(selected[1, 1, 1, :].data.numpy(), ones * 16)
indices = numpy.array([[[1, 11], [3, 4]], [[5, 6], [7, 8]]])
indices = torch.tensor(indices, dtype=torch.long)
with pytest.raises(ConfigurationError):
util.batched_index_select(targets, indices)
indices = numpy.array([[[1, -1], [3, 4]], [[5, 6], [7, 8]]])
indices = torch.tensor(indices, dtype=torch.long)
with pytest.raises(ConfigurationError):
util.batched_index_select(targets, indices)
def test_batched_span_select(self):
# Each element is a vector of its index.
targets = torch.ones([3, 12, 2]).cumsum(1) - 1
spans = torch.LongTensor(
[
[[0, 0], [1, 2], [5, 8], [10, 10]],
[[i, i] for i in range(3, -1, -1)],
[[0, 3], [1, 4], [2, 5], [10, 11]],
]
)
selected, mask = util.batched_span_select(targets, spans)
selected = torch.where(mask.unsqueeze(-1), selected, torch.empty_like(selected).fill_(-1))
numpy.testing.assert_array_equal(
selected,
[
[
[[0, 0], [-1, -1], [-1, -1], [-1, -1]],
[[2, 2], [1, 1], [-1, -1], [-1, -1]],
[[8, 8], [7, 7], [6, 6], [5, 5]],
[[10, 10], [-1, -1], [-1, -1], [-1, -1]],
],
[[[i, i], [-1, -1], [-1, -1], [-1, -1]] for i in range(3, -1, -1)],
[
[[3, 3], [2, 2], [1, 1], [0, 0]],
[[4, 4], [3, 3], [2, 2], [1, 1]],
[[5, 5], [4, 4], [3, 3], [2, 2]],
[[11, 11], [10, 10], [-1, -1], [-1, -1]],
],
],
)
def test_flattened_index_select(self):
indices = numpy.array([[1, 2], [3, 4]])
targets = torch.ones([2, 6, 3]).cumsum(1) - 1
# Make the second batch double its index so they're different.
targets[1, :, :] *= 2
indices = torch.tensor(indices, dtype=torch.long)
selected = util.flattened_index_select(targets, indices)
assert list(selected.size()) == [2, 2, 2, 3]
ones = numpy.ones([3])
numpy.testing.assert_array_equal(selected[0, 0, 0, :].data.numpy(), ones)
numpy.testing.assert_array_equal(selected[0, 0, 1, :].data.numpy(), ones * 2)
numpy.testing.assert_array_equal(selected[0, 1, 0, :].data.numpy(), ones * 3)
numpy.testing.assert_array_equal(selected[0, 1, 1, :].data.numpy(), ones * 4)
numpy.testing.assert_array_equal(selected[1, 0, 0, :].data.numpy(), ones * 2)
numpy.testing.assert_array_equal(selected[1, 0, 1, :].data.numpy(), ones * 4)
numpy.testing.assert_array_equal(selected[1, 1, 0, :].data.numpy(), ones * 6)
numpy.testing.assert_array_equal(selected[1, 1, 1, :].data.numpy(), ones * 8)
# Check we only accept 2D indices.
with pytest.raises(ConfigurationError):
util.flattened_index_select(targets, torch.ones([3, 4, 5]))
def test_bucket_values(self):
indices = torch.LongTensor([1, 2, 7, 1, 56, 900])
bucketed_distances = util.bucket_values(indices)
numpy.testing.assert_array_equal(
bucketed_distances.numpy(), numpy.array([1, 2, 5, 1, 8, 9])
)
def test_add_sentence_boundary_token_ids_handles_2D_input(self):
tensor = torch.from_numpy(numpy.array([[1, 2, 3], [4, 5, 0]]))
mask = tensor > 0
bos = 9
eos = 10
new_tensor, new_mask = util.add_sentence_boundary_token_ids(tensor, mask, bos, eos)
expected_new_tensor = numpy.array([[9, 1, 2, 3, 10], [9, 4, 5, 10, 0]])
assert (new_tensor.data.numpy() == expected_new_tensor).all()
assert (new_mask.data.numpy() == (expected_new_tensor > 0)).all()
def test_add_sentence_boundary_token_ids_handles_3D_input(self):
tensor = torch.from_numpy(
numpy.array(
[
[[1, 2, 3, 4], [5, 5, 5, 5], [6, 8, 1, 2]],
[[4, 3, 2, 1], [8, 7, 6, 5], [0, 0, 0, 0]],
]
)
)
mask = (tensor > 0).sum(dim=-1) > 0
bos = torch.from_numpy(numpy.array([9, 9, 9, 9]))
eos = torch.from_numpy(numpy.array([10, 10, 10, 10]))
new_tensor, new_mask = util.add_sentence_boundary_token_ids(tensor, mask, bos, eos)
expected_new_tensor = numpy.array(
[
[[9, 9, 9, 9], [1, 2, 3, 4], [5, 5, 5, 5], [6, 8, 1, 2], [10, 10, 10, 10]],
[[9, 9, 9, 9], [4, 3, 2, 1], [8, 7, 6, 5], [10, 10, 10, 10], [0, 0, 0, 0]],
]
)
assert (new_tensor.data.numpy() == expected_new_tensor).all()
assert (new_mask.data.numpy() == ((expected_new_tensor > 0).sum(axis=-1) > 0)).all()
def test_remove_sentence_boundaries(self):
tensor = torch.from_numpy(numpy.random.rand(3, 5, 7))
mask = torch.from_numpy(
# The mask with two elements is to test the corner case
# of an empty sequence, so here we are removing boundaries
# from "<S> </S>"
numpy.array([[1, 1, 0, 0, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 0]])
).bool()
new_tensor, new_mask = util.remove_sentence_boundaries(tensor, mask)
expected_new_tensor = torch.zeros(3, 3, 7)
expected_new_tensor[1, 0:3, :] = tensor[1, 1:4, :]
expected_new_tensor[2, 0:2, :] = tensor[2, 1:3, :]
assert_array_almost_equal(new_tensor.data.numpy(), expected_new_tensor.data.numpy())
expected_new_mask = torch.from_numpy(numpy.array([[0, 0, 0], [1, 1, 1], [1, 1, 0]])).bool()
assert (new_mask.data.numpy() == expected_new_mask.data.numpy()).all()
def test_add_positional_features(self):
# This is hard to test, so we check that we get the same result as the
# original tensorflow implementation:
# https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/layers/common_attention.py#L270
tensor2tensor_result = numpy.asarray(
[
[0.00000000e00, 0.00000000e00, 1.00000000e00, 1.00000000e00],
[8.41470957e-01, 9.99999902e-05, 5.40302277e-01, 1.00000000e00],
[9.09297407e-01, 1.99999980e-04, -4.16146845e-01, 1.00000000e00],
]
)
tensor = torch.zeros([2, 3, 4])
result = util.add_positional_features(tensor, min_timescale=1.0, max_timescale=1.0e4)
numpy.testing.assert_almost_equal(result[0].detach().cpu().numpy(), tensor2tensor_result)
numpy.testing.assert_almost_equal(result[1].detach().cpu().numpy(), tensor2tensor_result)
# Check case with odd number of dimensions.
tensor2tensor_result = numpy.asarray(
[
[
0.00000000e00,
0.00000000e00,
0.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
0.00000000e00,
],
[
8.41470957e-01,
9.99983307e-03,
9.99999902e-05,
5.40302277e-01,
9.99949992e-01,
1.00000000e00,
0.00000000e00,
],
[
9.09297407e-01,
1.99986659e-02,
1.99999980e-04,
-4.16146815e-01,
9.99800026e-01,
1.00000000e00,
0.00000000e00,
],
]
)
tensor = torch.zeros([2, 3, 7])
result = util.add_positional_features(tensor, min_timescale=1.0, max_timescale=1.0e4)
numpy.testing.assert_almost_equal(result[0].detach().cpu().numpy(), tensor2tensor_result)
numpy.testing.assert_almost_equal(result[1].detach().cpu().numpy(), tensor2tensor_result)
def test_combine_tensors_and_multiply(self):
tensors = [torch.Tensor([[[2, 3]]]), torch.Tensor([[[5, 5]]])]
weight = torch.Tensor([4, 5])
combination = "x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[8 + 15]]
)
combination = "y"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[20 + 25]]
)
combination = "x,y"
weight2 = torch.Tensor([4, 5, 4, 5])
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight2), [[8 + 20 + 15 + 25]]
)
combination = "x-y"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[-3 * 4 + -2 * 5]]
)
combination = "y-x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[3 * 4 + 2 * 5]]
)
combination = "y+x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[7 * 4 + 8 * 5]]
)
combination = "y*x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[10 * 4 + 15 * 5]]
)
combination = "y/x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight),
[[(5 / 2) * 4 + (5 / 3) * 5]],
decimal=4,
)
combination = "x/y"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight),
[[(2 / 5) * 4 + (3 / 5) * 5]],
decimal=4,
)
with pytest.raises(ConfigurationError):
util.combine_tensors_and_multiply("x+y+y", tensors, weight)
with pytest.raises(ConfigurationError):
util.combine_tensors_and_multiply("x%y", tensors, weight)
def test_combine_tensors_and_multiply_with_same_batch_size_and_embedding_dim(self):
# This test just makes sure we handle some potential edge cases where the lengths of all
# dimensions are the same, making sure that the multiplication with the weight vector
# happens along the right dimension (it should be the last one).
tensors = [torch.Tensor([[[5, 5], [4, 4]], [[2, 3], [1, 1]]])] # (2, 2, 2)
weight = torch.Tensor([4, 5]) # (2,)
combination = "x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight),
[[20 + 25, 16 + 20], [8 + 15, 4 + 5]],
)
tensors = [
torch.Tensor([[[5, 5], [2, 2]], [[4, 4], [3, 3]]]),
torch.Tensor([[[2, 3]], [[1, 1]]]),
]
weight = torch.Tensor([4, 5])
combination = "x*y"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight),
[
[5 * 2 * 4 + 5 * 3 * 5, 2 * 2 * 4 + 2 * 3 * 5],
[4 * 1 * 4 + 4 * 1 * 5, 3 * 1 * 4 + 3 * 1 * 5],
],
)
def test_combine_tensors_and_multiply_with_batch_size_one(self):
seq_len_1 = 10
seq_len_2 = 5
embedding_dim = 8
combination = "x,y,x*y"
t1 = torch.randn(1, seq_len_1, embedding_dim)
t2 = torch.randn(1, seq_len_2, embedding_dim)
combined_dim = util.get_combined_dim(combination, [embedding_dim, embedding_dim])
weight = torch.Tensor(combined_dim)
result = util.combine_tensors_and_multiply(
combination, [t1.unsqueeze(2), t2.unsqueeze(1)], weight
)
assert_almost_equal(result.size(), [1, seq_len_1, seq_len_2])
def test_combine_tensors_and_multiply_with_batch_size_one_and_seq_len_one(self):
seq_len_1 = 10
seq_len_2 = 1
embedding_dim = 8
combination = "x,y,x*y"
t1 = torch.randn(1, seq_len_1, embedding_dim)
t2 = torch.randn(1, seq_len_2, embedding_dim)
combined_dim = util.get_combined_dim(combination, [embedding_dim, embedding_dim])
weight = torch.Tensor(combined_dim)
result = util.combine_tensors_and_multiply(
combination, [t1.unsqueeze(2), t2.unsqueeze(1)], weight
)
assert_almost_equal(result.size(), [1, seq_len_1, seq_len_2])
def test_has_tensor(self):
has_tensor = util.has_tensor
tensor = torch.tensor([1, 2, 3])
assert has_tensor(["a", 10, tensor])
assert not has_tensor(["a", 10])
assert has_tensor(("a", 10, tensor))
assert not has_tensor(("a", 10))
assert has_tensor({"a": tensor, "b": 1})
assert not has_tensor({"a": 10, "b": 1})
assert has_tensor(tensor)
assert not has_tensor(3)
assert has_tensor({"x": [0, {"inside": {"double_inside": [3, [10, tensor]]}}]})
def test_combine_initial_dims(self):
tensor = torch.randn(4, 10, 20, 17, 5)
tensor2d = util.combine_initial_dims(tensor)
assert list(tensor2d.size()) == [4 * 10 * 20 * 17, 5]
def test_uncombine_initial_dims(self):
embedding2d = torch.randn(4 * 10 * 20 * 17 * 5, 12)
embedding = util.uncombine_initial_dims(embedding2d, torch.Size((4, 10, 20, 17, 5)))
assert list(embedding.size()) == [4, 10, 20, 17, 5, 12]
def test_inspect_model_parameters(self):
model_archive = str(
self.FIXTURES_ROOT / "decomposable_attention" / "serialization" / "model.tar.gz"
)
parameters_inspection = str(
self.FIXTURES_ROOT / "decomposable_attention" / "parameters_inspection.json"
)
model = load_archive(model_archive).model
with open(parameters_inspection) as file:
parameters_inspection_dict = json.load(file)
assert parameters_inspection_dict == util.inspect_parameters(model)
def test_move_to_device(self):
# We're faking the tensor here so that we can test the calls to .cuda() without actually
# needing a GPU.
class FakeTensor(torch.Tensor):
def __init__(self):
self._device = None
def cuda(self, device):
self._device = device
return self
class A(NamedTuple):
a: int
b: torch.Tensor
structured_obj = {
"a": [A(1, FakeTensor()), A(2, FakeTensor())],
"b": FakeTensor(),
"c": (1, FakeTensor()),
}
new_device = 4
moved_obj = util.move_to_device(structured_obj, new_device)
assert moved_obj["a"][0].a == 1
assert moved_obj["a"][0].b._device == new_device
assert moved_obj["a"][1].b._device == new_device
assert moved_obj["b"]._device == new_device
assert moved_obj["c"][0] == 1
assert moved_obj["c"][1]._device == new_device
def test_extend_layer(self):
lin_layer = torch.nn.Linear(10, 5)
new_dim = 8
old_weights = lin_layer.weight.data.clone()
old_bias = lin_layer.bias.data.clone()
util.extend_layer(lin_layer, new_dim)
assert lin_layer.weight.data.shape == (8, 10)
assert lin_layer.bias.data.shape == (8,)
assert (lin_layer.weight.data[:5] == old_weights).all()
assert (lin_layer.bias.data[:5] == old_bias).all()
assert lin_layer.out_features == new_dim
def test_masked_topk_selects_top_scored_items_and_respects_masking(self):
items = torch.randn([3, 4, 5]).clamp(min=0.0, max=1.0)
items[0, :2, :] = 1
items[1, 2:, :] = 1
items[2, 2:, :] = 1
scores = items.sum(-1)
mask = torch.ones([3, 4]).bool()
mask[1, 0] = 0
mask[1, 3] = 0
pruned_scores, pruned_mask, pruned_indices = util.masked_topk(scores, mask, 2)
# Second element in the batch would have indices 2, 3, but
# 3 and 0 are masked, so instead it has 1, 2.
numpy.testing.assert_array_equal(
pruned_indices.data.numpy(), numpy.array([[0, 1], [1, 2], [2, 3]])
)
numpy.testing.assert_array_equal(pruned_mask.data.numpy(), numpy.ones([3, 2]))
# scores should be the result of index_selecting the pruned_indices.
correct_scores = util.batched_index_select(scores.unsqueeze(-1), pruned_indices).squeeze(-1)
self.assert_array_equal_with_mask(correct_scores, pruned_scores, pruned_mask)
def test_masked_topk_works_for_completely_masked_rows(self):
items = torch.randn([3, 4, 5]).clamp(min=0.0, max=1.0)
items[0, :2, :] = 1
items[1, 2:, :] = 1
items[2, 2:, :] = 1
scores = items.sum(-1)
mask = torch.ones([3, 4]).bool()
mask[1, 0] = 0
mask[1, 3] = 0
mask[2, :] = 0 # fully masked last batch element.
pruned_scores, pruned_mask, pruned_indices = util.masked_topk(scores, mask, 2)
# We can't check the last row here, because it's completely masked.
# Instead we'll check that the scores for these elements are very small.
numpy.testing.assert_array_equal(
pruned_indices[:2].data.numpy(), numpy.array([[0, 1], [1, 2]])
)
numpy.testing.assert_array_equal(
pruned_mask.data.numpy(), numpy.array([[1, 1], [1, 1], [0, 0]])
)
# scores should be the result of index_selecting the pruned_indices.
correct_scores = util.batched_index_select(scores.unsqueeze(-1), pruned_indices).squeeze(-1)
self.assert_array_equal_with_mask(correct_scores, pruned_scores, pruned_mask)
def test_masked_topk_selects_top_scored_items_and_respects_masking_different_num_items(self):
items = torch.randn([3, 4, 5]).clamp(min=0.0, max=1.0)
items[0, 0, :] = 1.5
items[0, 1, :] = 2
items[0, 3, :] = 1
items[1, 1:3, :] = 1
items[2, 0, :] = 1
items[2, 1, :] = 2
items[2, 2, :] = 1.5
scores = items.sum(-1)
mask = torch.ones([3, 4]).bool()
mask[1, 3] = 0
k = torch.tensor([3, 2, 1], dtype=torch.long)
pruned_scores, pruned_mask, pruned_indices = util.masked_topk(scores, mask, k)
# Second element in the batch would have indices 2, 3, but
# 3 and 0 are masked, so instead it has 1, 2.
numpy.testing.assert_array_equal(
pruned_indices.data.numpy(), numpy.array([[0, 1, 3], [1, 2, 2], [1, 2, 2]])
)
numpy.testing.assert_array_equal(
pruned_mask.data.numpy(), numpy.array([[1, 1, 1], [1, 1, 0], [1, 0, 0]])
)
# scores should be the result of index_selecting the pruned_indices.
correct_scores = util.batched_index_select(scores.unsqueeze(-1), pruned_indices).squeeze(-1)
self.assert_array_equal_with_mask(correct_scores, pruned_scores, pruned_mask)
def test_masked_topk_works_for_row_with_no_items_requested(self):
# Case where `num_items_to_keep` is a tensor rather than an int. Make sure it does the right
# thing when no items are requested for one of the rows.
items = torch.randn([3, 4, 5]).clamp(min=0.0, max=1.0)
items[0, :3, :] = 1
items[1, 2:, :] = 1
items[2, 2:, :] = 1
scores = items.sum(-1)
mask = torch.ones([3, 4]).bool()
mask[1, 0] = 0
mask[1, 3] = 0
k = torch.tensor([3, 2, 0], dtype=torch.long)
pruned_scores, pruned_mask, pruned_indices = util.masked_topk(scores, mask, k)
# First element just picks top three entries. Second would pick entries 2 and 3, but 0 and 3
# are masked, so it takes 1 and 2 (repeating the second index). The third element is
# entirely masked and just repeats the largest index with a top-3 score.
numpy.testing.assert_array_equal(
pruned_indices.data.numpy(), numpy.array([[0, 1, 2], [1, 2, 2], [3, 3, 3]])
)
numpy.testing.assert_array_equal(
pruned_mask.data.numpy(), numpy.array([[1, 1, 1], [1, 1, 0], [0, 0, 0]])
)
# scores should be the result of index_selecting the pruned_indices.
correct_scores = util.batched_index_select(scores.unsqueeze(-1), pruned_indices).squeeze(-1)
self.assert_array_equal_with_mask(correct_scores, pruned_scores, pruned_mask)
def test_masked_topk_works_for_multiple_dimensions(self):
# fmt: off
items = torch.FloatTensor([ # (3, 2, 5)
[[4, 2, 9, 9, 7], [-4, -2, -9, -9, -7]],
[[5, 4, 1, 8, 8], [9, 1, 7, 4, 1]],
[[9, 8, 9, 6, 0], [2, 2, 2, 2, 2]],
]).unsqueeze(-1).expand(3, 2, 5, 4)
mask = torch.tensor([
[[False, False, False, False, False], [True, True, True, True, True]],
[[True, True, True, True, False], [False, True, True, True, True]],
[[True, False, True, True, True], [False, True, False, True, True]],
]).unsqueeze(-1).expand(3, 2, 5, 4)
# This is the same as just specifying a scalar int, but we want to test this behavior
k = torch.ones(3, 5, 4, dtype=torch.long)
k[1, 3, :] = 2
target_items = torch.FloatTensor([
[[-4, -2, -9, -9, -7], [0, 0, 0, 0, 0]],
[[5, 4, 7, 8, 1], [0, 0, 0, 4, 0]],
[[9, 2, 9, 6, 2], [0, 0, 0, 0, 0]],
]).unsqueeze(-1).expand(3, 2, 5, 4)
target_mask = torch.ones(3, 2, 5, 4, dtype=torch.bool)
target_mask[:, 1, :, :] = 0
target_mask[1, 1, 3, :] = 1
target_indices = torch.LongTensor([
[[1, 1, 1, 1, 1], [0, 0, 0, 0, 0]],
[[0, 0, 1, 0, 1], [0, 0, 0, 1, 0]],
[[0, 1, 0, 0, 1], [0, 0, 0, 0, 0]],
]).unsqueeze(-1).expand(3, 2, 5, 4)
# fmt: on
pruned_items, pruned_mask, pruned_indices = util.masked_topk(items, mask, k, dim=1)
numpy.testing.assert_array_equal(pruned_mask.data.numpy(), target_mask.data.numpy())
self.assert_array_equal_with_mask(pruned_items, target_items, pruned_mask)
self.assert_array_equal_with_mask(pruned_indices, target_indices, pruned_mask)
def assert_array_equal_with_mask(self, a, b, mask):
numpy.testing.assert_array_equal((a * mask).data.numpy(), (b * mask).data.numpy())
def test_tensors_equal(self):
# Basic
assert util.tensors_equal(torch.tensor([1]), torch.tensor([1]))
assert not util.tensors_equal(torch.tensor([1]), torch.tensor([2]))
# Bool
assert util.tensors_equal(torch.tensor([True]), torch.tensor([True]))
# Cross dtype
assert util.tensors_equal(torch.tensor([1]), torch.tensor([1.0]))
assert util.tensors_equal(torch.tensor([1]), torch.tensor([True]))
# Containers
assert util.tensors_equal([torch.tensor([1])], [torch.tensor([1])])
assert not util.tensors_equal([torch.tensor([1])], [torch.tensor([2])])
assert util.tensors_equal({"key": torch.tensor([1])}, {"key": torch.tensor([1])})
|
StarcoderdataPython
|
6527989
|
<reponame>Alt-Shivam/colour<gh_stars>0
from .all import MUNSELL_COLOURS_ALL
from .experimental import MUNSELL_COLOURS_1929
from .real import MUNSELL_COLOURS_REAL
from colour.utilities import CaseInsensitiveMapping
__all__ = [
"MUNSELL_COLOURS_ALL",
]
__all__ += [
"MUNSELL_COLOURS_1929",
]
__all__ += [
"MUNSELL_COLOURS_REAL",
]
MUNSELL_COLOURS = CaseInsensitiveMapping(
{
"Munsell Colours All": MUNSELL_COLOURS_ALL,
"Munsell Colours 1929": MUNSELL_COLOURS_1929,
"Munsell Colours Real": MUNSELL_COLOURS_REAL,
}
)
MUNSELL_COLOURS.__doc__ = """
Defines the *Munsell Renotation System* datasets.
- ``Munsell Colours All``: *all* published *Munsell* colours, including the
extrapolated colors.
- ``Munsell Colours 1929``: the colours appearing in the 1929
*Munsell Book of Color*. These data has been used in the scaling
experiments leading to the 1943 renotation.
- ``Munsell Colours Real``: *real*, within MacAdam limits *Munsell* colours
only. They are the colours listed in the original 1943 renotation article
*(Newhall, Nickerson, & Judd, 1943)*.
Notes
-----
- The Munsell Renotation data commonly available within the *all.dat*,
*experimental.dat* and *real.dat* files features *CIE xyY* colourspace
values that are scaled by a :math:`1 / 0.975 \\simeq 1.02568` factor. If
you are performing conversions using *Munsell* *Colorlab* specification,
e.g. *2.5R 9/2*, according to *ASTM D1535-08e1* method, you should not
scale the output :math:`Y` Luminance. However, if you use directly the
*CIE xyY* colourspace values from the Munsell Renotation data data, you
should scale the :math:`Y` Luminance before conversions by a :math:`0.975`
factor.
*ASTM D1535-08e1* states that::
The coefficients of this equation are obtained from the 1943 equation
by multiplying each coefficient by 0.975, the reflectance factor of
magnesium oxide with respect to the perfect reflecting diffuser, and
rounding to ve digits of precision.
- Chromaticities assume *CIE Illuminant C*, approximately 6700K, as neutral
origin for both the hue and chroma loci.
References
----------
- :cite:`MunsellColorSciencec` : Munsell Color Science. (n.d.). Munsell
Colours Data. Retrieved August 20, 2014, from
http://www.cis.rit.edu/research/mcsl2/online/munsell.php
Aliases:
- 'all': 'Munsell Colours All'
- '1929': 'Munsell Colours 1929'
- 'real': 'Munsell Colours Real'
"""
MUNSELL_COLOURS["all"] = MUNSELL_COLOURS["Munsell Colours All"]
MUNSELL_COLOURS["1929"] = MUNSELL_COLOURS["Munsell Colours 1929"]
MUNSELL_COLOURS["real"] = MUNSELL_COLOURS["Munsell Colours Real"]
__all__ += [
"MUNSELL_COLOURS",
]
|
StarcoderdataPython
|
30703
|
<reponame>HappyKL/mindspore<filename>tests/st/model_zoo_tests/DeepFM/test_deepfm.py
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train_criteo."""
import os
import pytest
from mindspore import context
from mindspore.train.model import Model
from mindspore.common import set_seed
from src.deepfm import ModelBuilder, AUCMetric
from src.config import DataConfig, ModelConfig, TrainConfig
from src.dataset import create_dataset, DataType
from src.callback import EvalCallBack, LossCallBack, TimeMonitor
set_seed(1)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_deepfm():
data_config = DataConfig()
train_config = TrainConfig()
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=device_id)
rank_size = None
rank_id = None
dataset_path = "/home/workspace/mindspore_dataset/criteo_data/criteo_h5/"
print("dataset_path:", dataset_path)
ds_train = create_dataset(dataset_path,
train_mode=True,
epochs=1,
batch_size=train_config.batch_size,
data_type=DataType(data_config.data_format),
rank_size=rank_size,
rank_id=rank_id)
model_builder = ModelBuilder(ModelConfig, TrainConfig)
train_net, eval_net = model_builder.get_train_eval_net()
auc_metric = AUCMetric()
model = Model(train_net, eval_network=eval_net, metrics={"auc": auc_metric})
loss_file_name = './loss.log'
time_callback = TimeMonitor(data_size=ds_train.get_dataset_size())
loss_callback = LossCallBack(loss_file_path=loss_file_name)
callback_list = [time_callback, loss_callback]
eval_file_name = './auc.log'
ds_eval = create_dataset(dataset_path, train_mode=False,
epochs=1,
batch_size=train_config.batch_size,
data_type=DataType(data_config.data_format))
eval_callback = EvalCallBack(model, ds_eval, auc_metric,
eval_file_path=eval_file_name)
callback_list.append(eval_callback)
print("train_config.train_epochs:", train_config.train_epochs)
model.train(train_config.train_epochs, ds_train, callbacks=callback_list)
export_loss_value = 0.51
print("loss_callback.loss:", loss_callback.loss)
assert loss_callback.loss < export_loss_value
export_per_step_time = 10.4
print("time_callback:", time_callback.per_step_time)
assert time_callback.per_step_time < export_per_step_time
print("*******test case pass!********")
|
StarcoderdataPython
|
3542151
|
<filename>inqbus/rpi/widgets/lines.py
from inqbus.rpi.widgets.base.render import Renderer, render_session
from inqbus.rpi.widgets.base.widget import Widget
from inqbus.rpi.widgets.interfaces.interfaces import IRenderer
from inqbus.rpi.widgets.interfaces.widgets import ILinesWidget
from inqbus.rpi.widgets.line import Line
from zope.component import getGlobalSiteManager
from zope.interface import Interface, implementer
@implementer(ILinesWidget)
class Lines(Widget):
"""
Lines Widget.
Representing one or more lines.
This widget contains a list of lines which will
be rendered left_bounded.
"""
def init_content(self):
self._content = []
def add_content(self, content):
self._content.append(content)
content.parent = self
def handle_new_content(self, value):
# accept only a list of ether strings or Line Instances
assert isinstance(value, list)
# for all given lines
for line_val in value:
# if line is string
if isinstance(line_val, str):
# .. then transform it into a line instance
line = Line(fixed_pos=False)
line.render_on_content_change = False
line._can_focus = True
line.content = line_val
self.add_content(line)
else:
# .. else just append to the content
line_val._can_focus = True
self.add_content(line_val)
# if render on content_change
if self.render_on_content_change:
# .. render the widget
self.render()
@property
def height(self):
"""
The height of the widget in characters
"""
if self._desired_height is None:
return len(self._content)
else:
return self._desired_height
@height.setter
def height(self, value):
"""
Set the height to a fixed value
Args: value: height
"""
self._desired_height = value
@implementer(IRenderer)
class LinesRenderer(Renderer):
"""
Renderer for a LinesWidget
"""
__used_for__ = (ILinesWidget, Interface)
@render_session
def render(self, pos_x=None, pos_y=None):
self.clear()
pos_x, pos_y = self.render_position(pos_x, pos_y)
prefix = ''
offset = 0
# If the line can be focused add a prefix
if self.widget.can_focus:
offset = 1
if self.widget.has_focus:
prefix = self.special_chars['FOCUS_LEFT']
else:
prefix = ' '
for line in self.widget.content:
self.display.write_at_pos(
pos_x,
pos_y,
prefix
)
renderer = line.render_for_display(
self.display,
pos_x=pos_x + offset,
pos_y=pos_y
)
pos_y += renderer.rendered_height
self.update_render_dimensions(renderer)
# return the coordinate after the content
# ToDo width, height handling
return self
def clear(self):
"""
Clean widget from the display
"""
if not self.was_rendered:
return
for line in self.widget.content:
renderer = line.get_renderer_for_display(self.display)
renderer.clear()
# Register the render adapter
gsm = getGlobalSiteManager()
gsm.registerAdapter(LinesRenderer, (ILinesWidget, Interface), IRenderer)
|
StarcoderdataPython
|
321726
|
# This program takes a raster color image and produces its raster color halftone using patterning algorithm .
# Split the image into C, M, Y, K.
# Rotate each separated image by 0, 15, 30, and 45 degrees respectively.
# Take the half-tone of each image (dot size will be proportional to the intensity).
# Rotate back each half-toned image.
# Now you have your colour separated images. The rotation step reduces
# dot alignment issues (which would mess everything up), and things like Moire pattern
# effects will be reasonably minimized.
import numpy as np
from PIL import Image
from patterning_clustered_dot import intensity, patterning
def gcr(im, percentage):
# basic "Gray Component Replacement" function. Returns a CMYK image with
# percentage gray component removed from the CMY halftones and put in the
# K halftone, ie. for percentage=100, (41, 100, 255, 0) >> (0, 59, 214, 41)
cmyk_im = im.convert('CMYK')
if not percentage:
return cmyk_im
cmyk_im = cmyk_im.split()
cmyk = []
for i in range(4):
cmyk.append(cmyk_im[i].load())
for x in range(im.size[0]):
for y in range(im.size[1]):
gray = min(cmyk[0][x,y], cmyk[1][x,y], cmyk[2][x,y]) * percentage / 100
for i in range(3):
cmyk[i][x,y] = cmyk[i][x,y] - gray
cmyk[3][x,y] = gray
return Image.merge('CMYK', cmyk_im)
def color_halftoning_with_rotation(cmyk,increment_in_angle):
dots=[]
angle=0
for i in range(4):
channel = Image.fromarray(patterning(cmyk[i].rotate(angle,expand=1))).convert('L')
channel = channel.rotate(-angle,expand=1)
width_half, height_half = channel.size
xx = (width_half-cmyk[i].size[0]*3) / 2
yy = (height_half-cmyk[i].size[1]*3) / 2
channel = channel.crop((xx, yy, xx + cmyk[i].size[0]*3, yy + cmyk[i].size[1]*3))
dots.append(channel)
angle += increment_in_angle
return dots
def main():
fname = 'tree.jpg'
image = Image.open(fname)
image = gcr(image,100)
cmyk = image.split()
dots = color_halftoning_with_rotation(cmyk,15)
new_cmyk = Image.merge('CMYK',dots)
new_cmyk.save("output.jpg")
new_cmyk.show()
if __name__=="__main__":
main()
|
StarcoderdataPython
|
3259640
|
<filename>src/tts_modules/synthesizer/synthesizer_manager.py
import torch
from tts_modules.synthesizer.configs.hparams import hparams
# from tts_modules.synthesizer.utils import audio
from tts_modules.synthesizer.utils.symbols import symbols
from tts_modules.synthesizer.utils.text import text_to_sequence
from tts_modules.synthesizer.models.tacotron import Tacotron
from pathlib import Path
from typing import Union, List
import numpy as np
import librosa
class SynthesizerManager:
def __init__(self, configs, test_dataloader=None, train_dataloader=None,
model=None, checkpoint_path=None):
self.configs = configs
self.test_dataloader = test_dataloader
self.train_dataloader = train_dataloader
self.model = model
self.checkpoint_path = checkpoint_path
if torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
if self.model is None:
self.__init_tacotron()
def __call__(self, *args, **kwargs):
return self.synthesize_spectrograms(*args, **kwargs)
def synthesize_spectrograms(self, texts: List[str],
embeddings: Union[np.ndarray, List[np.ndarray]],
return_alignments=False,
do_save_spectrograms=True):
"""
Synthesizes mel spectrograms from texts and speaker embeddings.
:param texts: a list of N text prompts to be synthesized
:param embeddings: a numpy array or list of speaker embeddings of shape (N, 256)
:param return_alignments: if True, a matrix representing the alignments between the
characters
and each decoder output step will be returned for each spectrogram
:return: a list of N melspectrograms as numpy arrays of shape (80, Mi), where Mi is the
sequence length of spectrogram i, and possibly the alignments.
"""
# Preprocess text inputs
inputs = [text_to_sequence(text.strip(), hparams.tts_cleaner_names) for text in texts]
if not isinstance(embeddings, list):
embeddings = [embeddings]
# Batch inputs
batched_inputs = [inputs[i:i+hparams.synthesis_batch_size]
for i in range(0, len(inputs), hparams.synthesis_batch_size)]
batched_embeds = [embeddings[i:i+hparams.synthesis_batch_size]
for i in range(0, len(embeddings), hparams.synthesis_batch_size)]
specs = []
for i, batch in enumerate(batched_inputs, 1):
# Pad texts so they are all the same length
text_lens = [len(text) for text in batch]
max_text_len = max(text_lens)
chars = [SynthesizerManager.pad1d(text, max_text_len) for text in batch]
chars = np.stack(chars)
# Stack speaker embeddings into 2D array for batch processing
speaker_embeds = np.stack(batched_embeds[i-1])
# Convert to tensor
chars = torch.tensor(chars).long().to(self.device)
speaker_embeddings = torch.tensor(speaker_embeds).float().to(self.device)
# Inference
_, mels, alignments = self.model.generate(chars, speaker_embeddings)
mels = mels.detach().cpu().numpy()
for m in mels:
# Trim silence from end of each spectrogram
while np.max(m[:, -1]) < hparams.tts_stop_threshold:
m = m[:, :-1]
specs.append(m)
if do_save_spectrograms:
if return_alignments:
self.save_spectrograms(specs, alignments)
else:
self.save_spectrograms(specs)
return (specs, alignments) if return_alignments else specs
def save_spectrograms(self, specs, alignments=None):
pass
@staticmethod
def pad1d(x, max_len, pad_value=0):
return np.pad(x, (0, max_len - len(x)), mode="constant", constant_values=pad_value)
def __init_tacotron(self):
self.model = Tacotron(embed_dims=hparams.tts_embed_dims,
num_chars=len(symbols),
encoder_dims=hparams.tts_encoder_dims,
decoder_dims=hparams.tts_decoder_dims,
n_mels=hparams.num_mels,
fft_bins=hparams.num_mels,
postnet_dims=hparams.tts_postnet_dims,
encoder_K=hparams.tts_encoder_K,
lstm_dims=hparams.tts_lstm_dims,
postnet_K=hparams.tts_postnet_K,
num_highways=hparams.tts_num_highways,
dropout=hparams.tts_dropout,
stop_threshold=hparams.tts_stop_threshold,
speaker_embedding_size=hparams.speaker_embedding_size
).to(self.device)
if self.checkpoint_path is not None:
self.__load_model()
self.model.eval()
def __load_model(self):
if self.checkpoint_path is not None:
self.model.load(self.checkpoint_path)
else:
print('Synthesizer was not loaded!!!')
def __save_checkpoint(self, path):
pass
|
StarcoderdataPython
|
3502055
|
<filename>experiments/mj60/dead_time.py
import pandas as pd
import sys
import numpy as np
import scipy as sp
import json
import os
from decimal import Decimal
import scipy.optimize as opt
from scipy.optimize import minimize, curve_fit
from scipy.special import erfc
from scipy.stats import crystalball
from scipy.signal import medfilt, find_peaks
import pygama.analysis.histograms as pgh
import pygama.utils as pgu
import pygama.analysis.peak_fitting as pga
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
plt.style.use('style.mplstyle')
def main():
deltaT()
def deltaT():
if(len(sys.argv) != 2):
print('Usage: dead_time.py [run number]')
sys.exit()
df = pd.read_hdf('~/Data/MJ60/pygama/t2_run'+sys.argv[1]+'.h5', columns=['timestamp'])
df = df.reset_index(drop=True)
df = df.loc[(df.index<32000)]
df['permutated_timestamp'] = [0]*len(df)
for i in range(0, len(df)-1):
a = int(i)+1
df['permutated_timestamp'][a] = df['timestamp'][i]
df['deltaT'] = df['timestamp'] - df['permutated_timestamp']
plt.hist((df['deltaT']/100e06)*1e06, np.arange(0,(2000000/100e06)*1e06,(1000/100e06)*1e06), histtype='step', color = 'black', label='30 microsecond minimum')
plt.xlabel('Time Between Events (microseconds)', ha='right', x=1.0)
plt.ylabel('Counts', ha='right', y=1.0)
plt.tight_layout()
plt.legend(frameon=True, loc='upper right', fontsize='small')
plt.show()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
394152
|
# -*- coding: utf-8 -*-
# (c) 2009-2021 <NAME> and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
Run the [Litmus test suite](http://www.webdav.org/neon/litmus/) against WsgiDAV
server.
## Usage
**NOTE:** replace <HOST_IP> with the real IP address of the test client.
### 1. Edit Configuration File
```yaml
host: <HOST_IP>
port: 8080
...
```
### 2. Run WsgiDAV
```bash
$ cd WSGIDAV-ROOT
$ wsgidav --config tests\wsgidav-litmus.yaml -H <HOST_IP>
```
### 3. Run Litmus Suite as Docker Container
[Install Docker](https://docs.docker.com/desktop/).
Then open a new console and run these commands:
```bash
$ docker pull mar10/docker-litmus
$ docker run --rm -ti mar10/docker-litmus https://<HOST_IP>:8080/ tester secret
```
Output should look something like this:
```
$ docker run --rm -ti mar10/docker-litmus https://192.168.178.35:8080 tester secret
-> running `basic':
0. init.................. pass
...
15. finish................ pass
<- summary for `basic': of 16 tests run: 16 passed, 0 failed. 100.0%
-> running `copymove':
0. init.................. pass
...
12. finish................ pass
<- summary for `copymove': of 13 tests run: 13 passed, 0 failed. 100.0%
-> running `props':
0. init.................. pass
...
29. finish................ pass
<- summary for `props': of 30 tests run: 30 passed, 0 failed. 100.0%
-> running `locks':
0. init.................. pass
...
40. finish................ pass
<- summary for `locks': of 41 tests run: 41 passed, 0 failed. 100.0%
-> running `http':
0. init.................. pass
...
3. finish................ pass
-> 1 test was skipped.
<- summary for `http': of 3 tests run: 3 passed, 0 failed. 100.0%
$
```
See here for details on the Docker image: https://github.com/mar10/docker-litmus
"""
import subprocess
import unittest
from tests.util import WsgiDavTestServer
# ========================================================================
# WsgiDAVServerTest
# ========================================================================
class WsgiDAVLitmusTest(unittest.TestCase):
"""Run litmus test suite against builtin server."""
def setUp(self):
pass
def tearDown(self):
pass
def _report_missing_litmus(self):
print("*" * 70)
print("This test requires the litmus test suite.")
print("See https://github.com/mar10/docker-litmus")
print("*" * 70)
raise unittest.SkipTest("Test requires litmus test suite")
def test_litmus_with_authentication(self):
"""Run litmus test suite on HTTP with authentification."""
with WsgiDavTestServer(with_auth=True, with_ssl=False):
try:
res = subprocess.call(
["litmus", "http://127.0.0.1:8080/", "tester", "secret"]
)
self.assertEqual(res, 0, "litmus suite failed: check the log")
except OSError:
self._report_missing_litmus()
raise
return
def test_litmus_anonymous(self):
"""Run litmus test suite on HTTP with authentification."""
with WsgiDavTestServer(with_auth=False, with_ssl=False):
try:
res = subprocess.call(["litmus", "http://127.0.0.1:8080/"])
self.assertEqual(res, 0, "litmus suite failed: check the log")
except OSError:
self._report_missing_litmus()
raise
return
def test_litmus_with_ssl_and_authentication(self):
"""Run litmus test suite on SSL / HTTPS with authentification."""
with WsgiDavTestServer(with_auth=True, with_ssl=True):
try:
res = subprocess.call(
["litmus", "https://127.0.0.1:8080/", "tester", "secret"]
)
self.assertEqual(res, 0, "litmus suite failed: check the log")
except OSError:
self._report_missing_litmus()
raise
return
# ========================================================================
# suite
# ========================================================================
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
5166761
|
import cv2
import time
import os
import openface
import pickle
imgDim = 96
cuda = True
modelDir = '/home/ubuntu/openface/models'
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
networkModel = os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')
net = openface.TorchNeuralNet(networkModel, imgDim=imgDim, cuda=cuda)
align = openface.AlignDlib(os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat"))
def getRep(imgPath, multiple=False, verbose=True):
start = time.time()
bgrImg = cv2.imread(imgPath)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(imgPath))
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if verbose:
print(" + Original size: {}".format(rgbImg.shape))
if verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
if multiple:
bbs = align.getAllFaceBoundingBoxes(rgbImg)
else:
bb1 = align.getLargestFaceBoundingBox(rgbImg)
bbs = [bb1]
if len(bbs) == 0 or (not multiple and bb1 is None):
raise Exception("Unable to find a face: {}".format(imgPath))
if verbose:
print("Face detection took {} seconds.".format(time.time() - start))
reps = []
for bb in bbs:
start = time.time()
alignedFace = align.align(
imgDim,
rgbImg,
bb,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
raise Exception("Unable to align image: {}".format(imgPath))
if verbose:
print("Alignment took {} seconds.".format(time.time() - start))
print("This bbox is centered at {}, {}".format(bb.center().x, bb.center().y))
start = time.time()
rep = net.forward(alignedFace)
if verbose:
print("Neural network forward pass took {} seconds.".format(
time.time() - start))
reps.append((bb.center().x, rep))
sreps = sorted(reps, key=lambda x: x[0])
return sreps
url_source = '/home/ubuntu/test/faces'
vector_dir = 'face_vectors'
train_images = os.listdir(url_source + '/train')
test_images = os.listdir(url_source + '/test')
train_images = [ (url_source + '/train/' + image) for image in train_images ]
test_images = [ (url_source + '/test/' + image) for image in test_images ]
images = train_images + test_images
for index, image in enumerate(images):
print(index, image)
try:
result = getRep(image)[0]
vector = result[1]
image_name = os.path.basename(image)
with open(vector_dir + '/' + image_name + '.pickle', 'wb') as f:
pickle.dump(vector, f)
except:
print('Error')
|
StarcoderdataPython
|
11350597
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import autoinstall_lib as atl
version = "fftw-3.2.2"
tool = "fftw3"
print("-> loading %s autoinstall (using version %s)"%(tool,version))
def options(opt):
atl.add_lib_option(tool,opt,install=True)
def configure(ctx):
atl.conf_lib(ctx,tool,["fftw3"],"fftw_execute","fftw3.h",defines="HAS_FFTW3",install=installfftw3)
def installfftw3(ctx):
filen = version+".tar.gz"
atl.installsmthg_pre(ctx,"http://www.fftw.org/"+filen,filen)
atl.installsmthg_post(ctx,filen,"fftw","--enable-shared")
|
StarcoderdataPython
|
278141
|
print('====== EX 015 ======')
dias = int(input('Quantos dias o carro foi alugado: '))
km = float(input('E quantos Km foram percorridos: '))
p = (dias * 60) + (km * 0.15)
print('O total a pagar é de R${:.2f}'.format(p))
|
StarcoderdataPython
|
72467
|
#
# Copyright (c) 2015-2021 University of Antwerp, Aloxy NV.
#
# This file is part of pyd7a.
# See https://github.com/Sub-IoT/pyd7a for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: <NAME> <<EMAIL>>
# class implementation of action parameters
# D7A ALP Action
from d7a.alp.operations.requests import ReadFileData
from d7a.alp.operations.responses import ReturnFileData
from d7a.alp.operations.write_operations import WriteFileData
from d7a.support.schema import Validatable, Types
from d7a.alp.operations.operation import Operation
from d7a.alp.operations.nop import NoOperation
class Action(Validatable):
SCHEMA = [{
"op" : Types.BITS(6),
"operation": Types.OBJECT(Operation),
"operand" : Types.OBJECT(nullable=True) # there is no Operand base-class
}]
def __init__(self, operation=NoOperation()):
self.operation = operation
super(Action, self).__init__()
@property
def op(self):
return self.operation.op
@property
def operand(self):
return self.operation.operand
def __str__(self):
if isinstance(self.operation, ReturnFileData):
# when reading a known system files we output the parsed data
if self.operation.systemfile_type != None and self.operation.file_data_parsed != None:
return "Received {} content: {}".format(self.operation.systemfile_type.__class__.__name__,
self.operation.file_data_parsed)
return str(self.operation)
|
StarcoderdataPython
|
1748218
|
<reponame>Vyshnavmt94/HackerRankTasks<filename>Hackerrank_codes/find_percentage.py
"""
The provided code stub will read in a dictionary containing key/value pairs of name:[marks] for a list of students. Print the average of the marks array for the student name provided, showing 2 places after the decimal.
Example
The query_name is 'beta'. beta's average score is .
Input Format
The first line contains the integer , the number of students' records. The next lines contain the names and marks obtained by a student, each value separated by a space. The final line contains query_name, the name of a student to query.
Constraints
Output Format
Print one line: The average of the marks obtained by the particular student correct to 2 decimal places.
Sample Input 0
3
Krishna 67 68 69
Arjun 70 98 63
Malika 52 56 60
Malika
Sample Output 0
56.00
Explanation 0
Marks for Malika are whose average is
Sample Input 1
2
Harsh 25 26.5 28
Anurag 26 28 30
Harsh
Sample Output 1
26.50
"""
def check_num(n):
if n >= 2 and n <= 10:
return True
def average(lst):
print("{0:.2f}".format((sum(lst) / len(lst))))
if __name__ == '__main__':
n = int(input())
if check_num(n):
student_marks = {}
for i in range(n):
name, *line = input().split()
scores = list(map(float, line))
#scores = [eval(a) for a in scores]
student_marks[name] = scores
query_name = input()
average(student_marks[query_name])
|
StarcoderdataPython
|
4896390
|
<reponame>cortwave/carvana-image-masking<filename>src/rle_encoder.py
import numpy as np
def rle_encode(mask_image):
pixels = mask_image.flatten()
runs = np.where(pixels[1:] != pixels[:-1])[0] + 2
runs[1::2] = runs[1::2] - runs[:-1:2]
return ' '.join(str(x) for x in runs)
|
StarcoderdataPython
|
12813999
|
<reponame>tirkarthi/cloudify-cli
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import sys
import json
import time
import click
import errno
import string
import random
import shutil
import logging
import tarfile
import zipfile
import tempfile
import collections
from shutil import copy
from contextlib import closing, contextmanager
from backports.shutil_get_terminal_size import get_terminal_size
import yaml
import requests
from retrying import retry
from .logger import get_logger, get_events_logger
from .exceptions import CloudifyCliError, CloudifyTimeoutError
from .constants import SUPPORTED_ARCHIVE_TYPES, DEFAULT_TIMEOUT
from .execution_events_fetcher import ExecutionEventsFetcher
from cloudify._compat import urlparse
from cloudify.models_states import BlueprintUploadState
from cloudify_rest_client.constants import VisibilityState
from cloudify_rest_client.exceptions import CloudifyClientError
WAIT_FOR_BLUEPRINT_UPLOAD_SLEEP_INTERVAL = 1
def get_deployment_environment_execution(client, deployment_id, workflow):
executions = client.executions.list(deployment_id=deployment_id,
workflow_id=workflow,
sort='created_at',
is_descending=True)
if executions and len(executions) > 0:
return executions[0]
raise RuntimeError(
'Failed to get {0} workflow execution for deployment {1}'.format(
workflow, deployment_id)
)
def dump_to_file(collection, file_path):
with open(file_path, 'a') as f:
f.write(os.linesep.join(collection))
f.write(os.linesep)
def is_virtual_env():
if hasattr(sys, 'base_prefix'):
# py3 case, with the stdlib venv
return sys.base_prefix != sys.prefix
return hasattr(sys, 'real_prefix')
# TODO: Really? Remove!
def get_cwd():
"""Allows use to patch the cwd when needed.
"""
return os.getcwd()
def remove_if_exists(path):
try:
if os.path.isfile(path):
os.remove(path)
if os.path.isdir(path):
shutil.rmtree(path)
except OSError as e:
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occurred
def generate_random_string(size=6,
chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def generate_suffixed_id(id):
return '{0}_{1}'.format(id, generate_random_string())
def is_archive(source):
return tarfile.is_tarfile(source) or zipfile.is_zipfile(source)
def extract_archive(source):
if tarfile.is_tarfile(source):
return untar(source)
elif zipfile.is_zipfile(source):
return unzip(source)
raise CloudifyCliError(
'Unsupported archive type provided or archive is not valid: {0}.'
' Supported archive types are: {1}'
.format(source, SUPPORTED_ARCHIVE_TYPES)
)
def tar(source, destination):
logger = get_logger()
logger.debug('Creating tgz archive: {0}...'.format(destination))
with closing(tarfile.open(destination, 'w:gz')) as tar:
tar.add(source, arcname=os.path.basename(source))
def untar(archive, destination=None):
if not destination:
destination = tempfile.mkdtemp()
logger = get_logger()
logger.debug('Extracting tar archive {0} to {1}...'
.format(archive, destination))
with closing(tarfile.open(name=archive)) as tar:
tar.extractall(path=destination, members=tar.getmembers())
return destination
def zip_files(files):
source_folder = tempfile.mkdtemp()
destination_zip = source_folder + '.zip'
for path in files:
copy(path, source_folder)
create_zip(source_folder, destination_zip, include_folder=False)
shutil.rmtree(source_folder)
return destination_zip
def create_zip(source, destination, include_folder=True):
logger = get_logger()
logger.debug('Creating zip archive: {0}...'.format(destination))
with closing(zipfile.ZipFile(destination, 'w')) as zip_file:
for root, _, files in os.walk(source):
for filename in files:
file_path = os.path.join(root, filename)
source_dir = os.path.dirname(source) if include_folder\
else source
zip_file.write(
file_path, os.path.relpath(file_path, source_dir))
return destination
def unzip(archive, destination=None):
if not destination:
destination = tempfile.mkdtemp()
logger = get_logger()
logger.debug('Extracting zip {0} to {1}...'.format(archive, destination))
with closing(zipfile.ZipFile(archive, 'r')) as zip_file:
zip_file.extractall(destination)
return destination
def download_file(url, destination=None, keep_name=False):
"""Download file.
:param url: Location of the file to download
:type url: str
:param destination:
Location where the file should be saved (autogenerated by default)
:param keep_name: use the filename from the url as destination filename
:type destination: str | None
:returns: Location where the file was saved
:rtype: str
"""
CHUNK_SIZE = 1024
logger = get_logger()
if not destination:
if keep_name:
path = urlparse(url).path
name = os.path.basename(path)
destination = os.path.join(tempfile.mkdtemp(), name)
else:
fd, destination = tempfile.mkstemp()
os.close(fd)
logger.info('Downloading {0} to {1}...'.format(url, destination))
try:
response = requests.get(url, stream=True)
except requests.exceptions.RequestException as ex:
raise CloudifyCliError(
'Failed to download {0}. ({1})'.format(url, str(ex)))
final_url = response.url
if final_url != url:
logger.debug('Redirected to {0}'.format(final_url))
try:
with open(destination, 'wb') as destination_file:
for chunk in response.iter_content(CHUNK_SIZE):
destination_file.write(chunk)
except IOError as ex:
raise CloudifyCliError(
'Failed to download {0}. ({1})'.format(url, str(ex)))
return destination
def generate_progress_handler(file_path, action='', max_bar_length=80):
"""Returns a function that prints a progress bar in the terminal
:param file_path: The name of the file being transferred
:param action: Uploading/Downloading
:param max_bar_length: Maximum allowed length of the bar. Default: 80
:return: The configured print_progress function
"""
# We want to limit the maximum line length to 80, but allow for a smaller
# terminal size. We also include the action string, and some extra chars
terminal_width = get_terminal_size().columns
# This takes care of the case where there is no terminal (e.g. unittest)
terminal_width = terminal_width or max_bar_length
bar_length = min(max_bar_length, terminal_width) - len(action) - 12
# Shorten the file name if it's too long
file_name = os.path.basename(file_path)
if len(file_name) > (bar_length // 4) + 3:
file_name = file_name[:bar_length // 4] + '...'
bar_length -= len(file_name)
def print_progress(read_bytes, total_bytes):
"""Print upload/download progress on a single line
Call this function in a loop to create a progress bar in the terminal
:param read_bytes: Number of bytes already processed
:param total_bytes: Total number of bytes in the file
"""
filled_length = min(bar_length, int(round(bar_length * read_bytes /
float(total_bytes))))
percents = min(100.00, round(
100.00 * (read_bytes / float(total_bytes)), 2))
bar = '#' * filled_length + '-' * (bar_length - filled_length)
# The \r caret makes sure the cursor moves back to the beginning of
# the line
msg = '\r{0} {1} |{2}| {3}%'.format(action, file_name, bar, percents)
click.echo(msg, nl=False)
if read_bytes >= total_bytes:
sys.stdout.write('\n')
return print_progress
@contextmanager
def handle_client_error(status_code, message, logger):
"""Gracefully handle client errors with specific status codes
"""
try:
yield
except CloudifyClientError as e:
if e.status_code != status_code:
raise
logger.info(message)
@contextmanager
def prettify_client_error(status_codes, logger):
"""Prettify client errors with specific status codes
:param status_codes: List of status codes
:param logger: Logger for writing the error
"""
try:
yield
except CloudifyClientError as e:
if e.status_code not in status_codes:
raise
logger.error('Error: %s', e)
def get_visibility(private_resource,
visibility,
logger,
valid_values=VisibilityState.STATES):
# These arguments are mutually exclusive so only one can be used
if private_resource:
logger.info("The 'private_resource' argument will be deprecated soon, "
"please use the 'visibility' argument instead")
return VisibilityState.PRIVATE
validate_visibility(visibility, valid_values)
return visibility
def validate_visibility(visibility, valid_values=VisibilityState.STATES):
if visibility and visibility not in valid_values:
raise CloudifyCliError(
"Invalid visibility: `{0}`. Valid visibility's values are: "
"{1}".format(visibility, valid_values)
)
def get_local_path(source, destination=None, create_temp=False):
allowed_schemes = ['http', 'https']
if urlparse(source).scheme in allowed_schemes:
downloaded_file = download_file(source, destination, keep_name=True)
return downloaded_file
elif os.path.isfile(source):
if not destination and create_temp:
source_name = os.path.basename(source)
destination = os.path.join(tempfile.mkdtemp(), source_name)
if destination:
shutil.copy(source, destination)
return destination
else:
return source
else:
raise CloudifyCliError(
'You must provide either a path to a local file, or a remote URL '
'using one of the allowed schemes: {0}'.format(allowed_schemes))
def explicit_tenant_name_message(tenant_name, logger):
if tenant_name:
logger.info('Explicitly using tenant `{0}`'.format(tenant_name))
def deep_update_dict(dest_dict, src_dict):
for key, value in src_dict.items():
if isinstance(dest_dict, collections.MutableMapping):
if isinstance(value, collections.MutableMapping):
dest_dict[key] = deep_update_dict(dest_dict.get(key), value)
else:
dest_dict[key] = src_dict[key]
else:
dest_dict = {key: src_dict[key]}
return dest_dict
def deep_subtract_dict(dest_dict, src_dict):
for key, value in src_dict.items():
if isinstance(value, collections.MutableMapping):
deep_subtract_dict(dest_dict.get(key), value)
else:
if key not in dest_dict:
raise CloudifyCliError('Key {} does not exist'.format(key))
dest_dict.pop(key)
def insert_dotted_key_to_dict(dest_dict, key, value):
"""Insert the value into dest_dict according to the key which is in dot
hierarchy format
:param dest_dict: The dict to update
:param key: The dot hierarchy key, e.g. 'a.b.c'
:param value: The value to insert, e.g. 'd'
:return: dest_dict will include the value in the wanted location,
e.g. {a: {b: {c: d}}}
"""
key_path = key.split('.')
for item in key_path[:-1]:
dest_dict.setdefault(item, {})
dest_dict = dest_dict[item]
dest_dict.setdefault(key_path[-1], value)
def assert_one_argument(arguments):
"""Asserts exactly one argument in a dictionary of
{argument_name: argument} is not null or False, else raises an error
"""
filtered = [k for k in arguments if arguments[k]]
if len(filtered) != 1:
raise CloudifyCliError('Please provide one of the options: ' +
', '.join(arguments))
def load_json(input_path):
if not input_path:
return
with open(input_path) as json_file:
return json.load(json_file)
# return json_content
def print_dict(keys_dict, logger):
for key, values in keys_dict.items():
str_values = [str(value) for value in values]
logger.info('{0}: {1}'. format(key, str_values))
def get_dict_from_yaml(yaml_path):
with open(yaml_path) as f:
return yaml.load(f, yaml.Loader)
def wait_for_blueprint_upload(client, blueprint_id, logging_level):
def _handle_errors():
if blueprint['state'] in BlueprintUploadState.FAILED_STATES:
error_msg = '{error_type} blueprint: {description}.'.format(
error_type=blueprint['state'].capitalize().replace('_', ' '),
description=blueprint['error']
)
if logging_level == logging.DEBUG:
error_msg += '\nError traceback: {}'.format(
blueprint['error_traceback'])
raise CloudifyCliError(error_msg)
@retry(stop_max_attempt_number=5, wait_fixed=1000)
def _get_blueprint_and_upload_execution_id():
bp = client.blueprints.get(blueprint_id)
# upload_execution['id'] might not be available at first, hence retry
return bp, bp.upload_execution['id']
try:
blueprint, execution_id = _get_blueprint_and_upload_execution_id()
except KeyError:
raise RuntimeError(
'Failed to get upload_blueprint workflow execution for blueprint '
'{0}. That may indicate a problem with blueprint upload. Verify '
'blueprint\'s state by running command `cfy blueprints get {0}`.'
.format(blueprint_id)
)
# if blueprint upload already ended - return without waiting
if blueprint['state'] in BlueprintUploadState.END_STATES:
_handle_errors()
return blueprint
deadline = time.time() + DEFAULT_TIMEOUT
events_fetcher = ExecutionEventsFetcher(
client, execution_id=execution_id, include_logs=True)
# Poll for execution status and execution logs, until execution ends
# and we receive an event of type in WORKFLOW_END_TYPES
upload_ended = False
events_handler = get_events_logger(None)
# Poll for blueprint upload status, until the upload ends
while True:
if time.time() > deadline:
raise CloudifyTimeoutError('Blueprint {0} upload timed '
'out'.format(blueprint.id))
timeout = deadline - time.time() # update remaining timeout
if not upload_ended:
blueprint = client.blueprints.get(blueprint.id)
upload_ended = \
blueprint['state'] in BlueprintUploadState.END_STATES
events_fetcher.fetch_and_process_events(
events_handler=events_handler, timeout=timeout)
if upload_ended:
break
time.sleep(WAIT_FOR_BLUEPRINT_UPLOAD_SLEEP_INTERVAL)
blueprint = client.blueprints.get(blueprint_id)
_handle_errors()
return blueprint
|
StarcoderdataPython
|
3466369
|
<gh_stars>0
from linked_lists.linked_list import LinkedList
class LinkedListQueue:
__linked_list: LinkedList = LinkedList()
def enqueue(self, item: int):
self.__linked_list.add_last(item)
def dequeue(self):
self.__linked_list.remove_first()
def peek(self) -> int:
return self.__linked_list.get_nth_node_from_end(self.__linked_list.size()).value
def size(self) -> int:
return self.__linked_list.size()
def is_empty(self) -> bool:
return self.__linked_list.is_empty()
def print(self):
print(self.__linked_list.to_array())
if __name__ == '__main__':
queue = LinkedListQueue()
queue.enqueue(10)
queue.enqueue(20)
queue.enqueue(30)
queue.dequeue()
queue.print()
|
StarcoderdataPython
|
3517654
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
class TeamSummary(object):
def __init__(self, nfl_teams_root):
self.nfl_teams_root = nfl_teams_root
@property
def division_roots(self):
division_xpath = '//div[contains(@class, "mod-teams-list-medium")]'
return self.nfl_teams_root.xpath(division_xpath)
def divisions(self):
return [Division(division_root) for division_root in self.division_roots]
class Division(object):
def __init__(self, division_root):
self.division_root = division_root
@property
def division(self):
division_xpath = './div/h4/text()'
return self.division_root.xpath(division_xpath)[0]
@property
def team_roots(self):
team_xpath = './div[contains(@class, "mod-content")]/ul/li'
return self.division_root.xpath(team_xpath)
def teams(self):
return [Team(self.division, team_root) for team_root in self.team_roots]
class Team(object):
def __init__(self, division, team_root):
self.division = division
self.team_root = team_root
@property
def conference(self):
if 'AFC' in self.division:
return 'AFC'
elif 'NFC' in self.division:
return 'NFC'
@property
def team_name(self):
team_name_xpath = './div/h5/a/text()'
return self.team_root.xpath(team_name_xpath)[0]
@property
def short_name(self):
return self.team_name.split(' ')[-1]
@property
def location(self):
return ' '.join(self.team_name.split(' ')[0:-1])
@property
def team_link(self):
team_link_xpath = './div/h5/a/@href'
return self.team_root.xpath(team_link_xpath)[0]
@property
def team_code(self):
return self.team_link.split('/')[-2]
def to_dict(self):
return {
'name': self.team_name,
'short_name': self.short_name,
'location': self.location,
'team_code': self.team_code,
'conference': self.conference,
'division': self.division,
}
|
StarcoderdataPython
|
9714560
|
<reponame>shanioren/argo-workflow-tools
import pytest
import yaml
from argo_workflow_tools import dsl, Workflow
from argo_workflow_tools.dsl.expression import Expression
@dsl.Task(image="python:3.10")
def create_data():
message = {"message": "hello"}
return message
@dsl.Task(image="python:3.10")
def print_data(data: str):
print(data)
@dsl.DAG()
def simple_workflow():
message = create_data()
some_custom_text = Expression.format("hello {x}", x=message["message"])
print_data(data=some_custom_text, wait_for=[message])
return message
def test_export_to_yaml():
simple_workflow()
workflow_yaml = Workflow(
name="hello-world", entrypoint=simple_workflow, arguments={}
).to_yaml()
print(workflow_yaml)
|
StarcoderdataPython
|
3582378
|
from .trader import Trader
|
StarcoderdataPython
|
1991960
|
"""Defines URL patterns for the GEBA website Blog app"""
from django.urls import re_path
from . import views
urlpatterns = [
# displays blogs in order from latest to newest
re_path(r'^$', views.BlogIndexView.as_view(), name='index'),
# P => named groups <pk> => pk = primary key which is the ID of the Blog post, \d means that the pk is a digit
# + means that there's 1 or more d's (digits)
# url(r'^(?P<pk>\d+)$', DetailView.as_view(model=Post, template_name='blog/post.html'))
re_path(r'^(?P<slug>[\w-]+)$', views.BlogDetailView.as_view(), name='detail'),
re_path(r'^(?P<slug>[\w-]+)/update/$', views.BlogUpdateView.as_view(), name='update'),
re_path(r'^create/$', views.BlogCreateView.as_view(), name='create'),
re_path(r'^(?P<slug>[\w-]+)/delete/$', views.BlogDeleteView.as_view(), name='delete'),
re_path(r'^api/(?P<slug>[\w-]+)/like/$', views.PostLikeToggleAjax.as_view(), name='like_toggle_api'),
re_path(r'^api/(?P<slug>[\w-]+)/dislike/$', views.PostDislikeToggleAjax.as_view(), name='dislike_toggle_api'),
re_path(r'^api/(?P<slug>[\w-]+)/publish/$', views.PublishPostAjax.as_view(), name='publish_ajax'),
re_path(r'^api/(?P<slug>[\w-]+)/draft/$', views.MakeDraftPostAjax.as_view(), name='draft_ajax'),
]
|
StarcoderdataPython
|
1625596
|
<gh_stars>10-100
from .pos_embed import *
from .rel_multi_head import *
from .rel_bias import *
from .memory import *
from .scale import *
from .transformer_xl import *
from .loader import *
from .sequence import *
__version__ = '0.13.0'
|
StarcoderdataPython
|
1863166
|
<reponame>lunika/richie
"""
ElasticSearch course document management utilities
"""
from collections import namedtuple
from datetime import MAXYEAR
from django.conf import settings
import arrow
from ..defaults import FILTERS_HARDCODED, RESOURCE_FACETS
from ..exceptions import IndexerDataException, QueryFormatException
from ..forms import CourseListForm
from ..partial_mappings import MULTILINGUAL_TEXT
from ..utils.api_consumption import walk_api_json_list
from ..utils.i18n import get_best_field_language
KeyFragmentPair = namedtuple("KeyFragmentPair", ["key", "fragment"])
class CoursesIndexer:
"""
Makes available the parameters the indexer requires as well as functions to shape
objects getting into and out of ElasticSearch
"""
document_type = "course"
index_name = "richie_courses"
mapping = {
"dynamic_templates": MULTILINGUAL_TEXT,
"properties": {
"end_date": {"type": "date"},
"enrollment_end_date": {"type": "date"},
"enrollment_start_date": {"type": "date"},
"language": {"type": "keyword"},
"organizations": {"type": "keyword"},
"session_number": {"type": "integer"},
"start_date": {"type": "date"},
"subjects": {"type": "keyword"},
"thumbnails": {
"properties": {
"about": {"type": "text", "index": False},
"big": {"type": "text", "index": False},
"facebook": {"type": "text", "index": False},
"small": {"type": "text", "index": False},
},
"type": "object",
},
},
}
scripts = {
# The ordering process first splits the courses into four groups, with further ordering
# inside each one of those groups.
#
# Here's a schematic representation that shows the ordering factor for each
# course (to be used in ascending order later on) :
#
# TOP OF THE LIST
# ———————————————
# ----- NOW (current timestamp) -----
# Courses in bucket 1
#
# < ~1 x datetime.MAXYEAR distance >
#
# ------ ~1 x datetime.MAXYEAR ------
# Courses in bucket 2
#
# < ~1 x datetime.MAXYEAR distance >
#
# Courses in bucket 3
# ------ ~2 x datetime.MAXYEAR ------
#
# < ~1 x datetime.MAXYEAR distance >
#
# Courses in bucket 4
# ------ ~3 x datetime.MAXYEAR ------
# ———————————————
# END OF THE LIST
#
# For reference MAXYEAR's timestamp is more than 2 orders of magnitude larger than
# this year's timestamp (2018).
# This means there can be no overlap between the various buckets, but we can still
# sort courses inside each bucket as we see fit by simply adding timestamps (ascending
# order) or substracting them (descending order).
"sort_courses_list": {
"script": {
"lang": "expression",
"source": (
# 4- Courses that have ended.
# Ordered by descending end date. The course that has finished last
# is displayed first.
"doc['end_date'].value < ms_since_epoch ? "
"3 * max_date - doc['end_date'].value : "
# 3- Courses that have not ended but can no longer be enrolled in.
# Ordered by descending end of enrollment date. The course for which
# enrollment has ended last is displayed first.
"doc['enrollment_end_date'].value < ms_since_epoch ? "
"2 * max_date - doc['enrollment_end_date'].value : "
# 2- Courses that have not started yet.
# Ordered by starting date. The next course to start is displayed first.
"ms_since_epoch < doc['start_date'].value ? "
"max_date + doc['start_date'].value : "
# 1- Courses that are currently open and can be enrolled in.
# Ordered by ascending end of enrollment date. The next course to end
# enrollment is displayed first.
"doc['enrollment_end_date'].value"
),
}
}
}
@classmethod
def get_data_for_es(cls, index, action):
"""
Load all the courses from the API and format them for the ElasticSearch index
"""
content_pages = walk_api_json_list(settings.COURSE_API_ENDPOINT)
for content_page in content_pages:
try:
for course in content_page["results"]:
yield {
"_id": course["id"],
"_index": index,
"_op_type": action,
"_type": cls.document_type,
"end_date": course["end_date"],
"enrollment_end_date": course["enrollment_end_date"],
"enrollment_start_date": course["enrollment_start_date"],
"language": course["language"],
"organization_main": course["main_university"]["id"],
"organizations": [org["id"] for org in course["universities"]],
"session_number": course["session_number"],
"short_description": {
course["language"]: course["short_description"]
},
"start_date": course["start_date"],
"subjects": [subject["id"] for subject in course["subjects"]],
"thumbnails": course["thumbnails"],
"title": {course["language"]: course["title"]},
}
except KeyError:
raise IndexerDataException("Unexpected data shape in courses to index")
@staticmethod
def format_es_course_for_api(es_course, best_language):
"""
Format a course stored in ES into a consistent and easy-to-consume record for
API consumers
"""
return {
"end_date": es_course["_source"]["end_date"],
"enrollment_end_date": es_course["_source"]["enrollment_end_date"],
"enrollment_start_date": es_course["_source"]["enrollment_start_date"],
"id": es_course["_id"],
"language": es_course["_source"]["language"],
"organization_main": es_course["_source"]["organization_main"],
"organizations": es_course["_source"]["organizations"],
"session_number": es_course["_source"]["session_number"],
"short_description": get_best_field_language(
es_course["_source"]["short_description"], best_language
),
"start_date": es_course["_source"]["start_date"],
"subjects": es_course["_source"]["subjects"],
"thumbnails": es_course["_source"]["thumbnails"],
"title": get_best_field_language(
es_course["_source"]["title"], best_language
),
}
@staticmethod
# pylint: disable=R0912, R0914
def build_es_query(request):
"""
Build an ElasticSearch query and its related aggregations, to be consumed by the ES client
in the Courses ViewSet
"""
# QueryDict/MultiValueDict breaks lists: we need to normalize them
# Unpacking does not trigger the broken accessor so we get the proper value
params_form_values = {
k: v[0] if len(v) == 1 else v for k, v in request.query_params.lists()
}
# Use QueryDict/MultiValueDict as a shortcut to make sure we get arrays for these two
# fields, which should be arrays even if their length is one
params_form_values["organizations"] = request.query_params.getlist(
"organizations"
)
params_form_values["subjects"] = request.query_params.getlist("subjects")
for param_key in FILTERS_HARDCODED:
if hasattr(FILTERS_HARDCODED[param_key]["field"], "choices"):
params_form_values[param_key] = request.query_params.getlist(param_key)
# Instantiate the form to allow validation/cleaning
params_form = CourseListForm(params_form_values)
# Raise an exception with error information if the query params are not valid
if not params_form.is_valid():
raise QueryFormatException(params_form.errors)
# Note: test_elasticsearch_feature.py needs to be updated whenever the search call
# is updated and makes use new features.
# queries is an array of individual queries that will be combined through "bool" before
# we pass them to ES. See the docs en bool queries.
# https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-bool-query.html
queries = []
for param, value in params_form.cleaned_data.items():
# Skip falsy values as we're not using them in our query
if not value:
continue
# The datetimerange fields are all translated to the ES query DSL the same way
if param in [
"end_date",
"enrollment_end_date",
"enrollment_start_date",
"start_date",
]:
# Add the relevant range criteria to the queries
start, end = value
queries = [
*queries,
KeyFragmentPair(
param,
[
{
"range": {
param: {
"gte": start.datetime if start else None,
"lte": end.datetime if end else None,
}
}
}
],
),
]
# organizations & subjects are both array of related element IDs
elif param in ["organizations", "subjects"]:
# Add the relevant term search to our queries
queries = [
*queries,
KeyFragmentPair(param, [{"terms": {param: value}}]),
]
# Search is a regular (multilingual) match query
elif param == "query":
queries = [
*queries,
KeyFragmentPair(
param,
[
{
"multi_match": {
"fields": ["short_description.*", "title.*"],
"query": value,
"type": "cross_fields",
}
}
],
),
]
elif param in FILTERS_HARDCODED:
# Normalize all custom params to lists so we can factorize query building logic
if not isinstance(value, list):
value = [value]
# Add the query fragments to the query
for choice in value:
queries = [
*queries,
KeyFragmentPair(
param, FILTERS_HARDCODED[param]["choices"][choice]
),
]
# Default to a match_all query
if not queries:
query = {"match_all": {}}
else:
# Concatenate all the sub-queries lists together to form the queries list
query = {
"bool": {
"must":
# queries => map(pluck("fragment")) => flatten()
[clause for kf_pair in queries for clause in kf_pair.fragment]
}
}
# Prepare the filters from the settings to be used in our aggregations
filters_facets = {}
# Iterate over all filter keys & their possible choices
for filter_key in FILTERS_HARDCODED:
for choice in FILTERS_HARDCODED[filter_key]["choices"]:
# Create an aggregation for each filter/choice pair
filters_facets["{:s}@{:s}".format(filter_key, choice)] = {
"filter": {
"bool": {
# Concatenate all the lists of active query filters with
# the relevant choice filter
"must": FILTERS_HARDCODED[filter_key]["choices"][choice]
+ [
# queries => filter(kv_pair.fragment != filter_key)
# => map(pluck("fragment")) => flatten()
clause
for kf_pair in queries
for clause in kf_pair.fragment
if kf_pair.key is not filter_key
]
}
}
}
# Concatenate our hardcoded filters query fragments with organizations and subjects terms
# aggregations build on-the-fly
aggs = {
"all_courses": {
"global": {},
"aggregations": {
**filters_facets,
**{
facet: {
"filter": {
"bool": {
# Concatenate all the lists of active query filters
# We don't use our own filter here as it's taken care of
# by the terms aggregation from ElasticSearch
"must": [
# queries => filter(kv_pair.fragment != filter_key)
# => map(pluck("fragment")) => flatten()
clause
for kf_pair in queries
for clause in kf_pair.fragment
if kf_pair.key is not facet
]
}
},
"aggregations": {facet: {"terms": {"field": facet}}},
}
for facet in RESOURCE_FACETS
},
},
}
}
return (
params_form.cleaned_data.get("limit"),
params_form.cleaned_data.get("offset") or 0,
query,
aggs,
)
@staticmethod
def get_courses_list_sorting_script():
"""
Call the relevant sorting script for courses lists, regenerating the parameters on each
call. This will allow the ms_since_epoch value to stay relevant even if the ES instance
and/or the Django server are long running.
Note: we use script storage to save time on the script compilation, which is an expensive
operation. We'll only do it once at bootstrap time.
"""
return {
"_script": {
"order": "asc",
"script": {
"id": "sort_courses_list",
"params": {
"max_date": arrow.get(MAXYEAR, 12, 31).timestamp * 1000,
"ms_since_epoch": arrow.utcnow().timestamp * 1000,
},
},
"type": "number",
}
}
|
StarcoderdataPython
|
11327004
|
<gh_stars>100-1000
import snap
edgefilename = "imdb_actor_edges.tsv"
nodefilename = "imdb_actors_key.tsv"
context = snap.TTableContext()
edgeschema = snap.Schema()
edgeschema.Add(snap.TStrTAttrPr("srcID", snap.atStr))
edgeschema.Add(snap.TStrTAttrPr("dstID", snap.atStr))
edgeschema.Add(snap.TStrTAttrPr("edgeattr1", snap.atStr))
nodeschema = snap.Schema()
nodeschema.Add(snap.TStrTAttrPr("nodeID", snap.atStr))
nodeschema.Add(snap.TStrTAttrPr("name", snap.atStr))
nodeschema.Add(snap.TStrTAttrPr("movies", snap.atStr))
nodeschema.Add(snap.TStrTAttrPr("main_genre", snap.atStr))
nodeschema.Add(snap.TStrTAttrPr("genres", snap.atStr))
edge_table = snap.TTable.LoadSS(edgeschema, edgefilename, context, "\t", snap.TBool(False))
print("edge_rows", edge_table.GetNumValidRows())
node_table = snap.TTable.LoadSS(nodeschema, nodefilename, context, "\t", snap.TBool(False))
print("node_rows", node_table.GetNumValidRows())
srcattrv = snap.TStrV()
srcattrv.Add("edgeattr1")
dstattrv = snap.TStrV()
dstattrv.Add("edgeattr1")
edgeattrv = snap.TStrV()
edgeattrv.Add("edgeattr1")
nodeattrv = snap.TStrV()
nodeattrv.Add("name")
net1 = snap.ToNetwork(snap.PNEANet, edge_table, "srcID", "dstID", srcattrv, dstattrv, edgeattrv, snap.aaFirst)
print("nodes1", net1.GetNodes())
print("edges1", net1.GetEdges())
net2 = snap.ToNetwork(snap.PNEANet, edge_table, "srcID", "dstID", snap.aaFirst)
print("nodes2", net2.GetNodes())
print("edges2", net2.GetEdges())
net3 = snap.ToNetwork(snap.PNEANet, edge_table, "srcID", "dstID", edgeattrv, snap.aaFirst)
print("nodes3", net3.GetNodes())
print("edges3", net3.GetEdges())
net4 = snap.ToNetwork(snap.PNEANet, edge_table, "srcID", "dstID", edgeattrv, node_table, "nodeID", nodeattrv, snap.aaFirst)
print("nodes4", net4.GetNodes())
print("edges4", net4.GetEdges())
|
StarcoderdataPython
|
1827699
|
<filename>src/evidently/analyzers/base_analyzer.py
#!/usr/bin/env python
# coding: utf-8
import abc
from typing import Optional
import pandas as pd
from dataclasses import dataclass
from evidently.options import OptionsProvider
from evidently.pipeline.column_mapping import ColumnMapping
from evidently.analyzers.utils import DatasetColumns
@dataclass
class BaseAnalyzerResult:
"""Base class for all analyzers results.
If you want to add a new analyzer, inherit a results class from the class.
For correct initiation you should add a decorator `@dataclass` to children classes too.
For example:
@dataclass
class RegressionPerformanceAnalyzerResults(BaseAnalyzerResult):
my_result: str
"""
columns: DatasetColumns
class Analyzer:
@abc.abstractmethod
def calculate(self,
reference_data: pd.DataFrame,
current_data: Optional[pd.DataFrame],
column_mapping: ColumnMapping) -> BaseAnalyzerResult:
raise NotImplementedError()
options_provider: OptionsProvider
|
StarcoderdataPython
|
3364519
|
from __future__ import print_function, unicode_literals
import logging
import os
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# dirs
DATA_DIR = 'data/bot/'
class BadWordException(Exception):
pass
class RepetitionException(Exception):
pass
def read(name):
with open(os.getcwd() + '/' + name) as f:
return [entry.rstrip('\n') for entry in f.readlines()]
|
StarcoderdataPython
|
4851881
|
<reponame>waysuninc/pyinapp
class InAppValidationError(Exception):
""" Base class for all validation errors """
|
StarcoderdataPython
|
4888501
|
<reponame>dperl-sol/cctbx_project
from __future__ import absolute_import, division, print_function
from wxtbx.phil_controls import path, ints
from wxtbx import phil_controls
from wxtbx import icons, app
import wx
from libtbx.utils import Sorry
import os
RSTBX_SELECT_IMAGE_IDS = 1
class SelectDatasetPanelMixin(object):
def draw_dataset_controls(self, sizer=None, pick_frames=True):
if (sizer is None):
sizer = self.GetSizer()
szr2 = wx.BoxSizer(wx.VERTICAL)
sizer.Add(szr2, 0, wx.ALL, 5)
szr3 = wx.BoxSizer(wx.HORIZONTAL)
szr2.Add(szr3)
bmp = wx.StaticBitmap(self, -1, icons.img_file.GetBitmap())
szr3.Add(bmp, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
caption = "Please select a dataset to index. Most common detector " +\
"file formats are supported (ADSC, R-AXIS, MAR, Pilatus, CBF, etc.)."
if (pick_frames):
caption += " If you wish you may specify which frames you want to "+ \
"use; otherwise the program will attempt to pick sensible defaults."
caption_txt = wx.StaticText(self, -1, caption)
caption_txt.Wrap(500)
szr3.Add(caption_txt, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
grid = wx.FlexGridSizer(cols=2)
sizer.Add(grid, 0, wx.ALL)
txt1 = wx.StaticText(self, -1, "Directory:")
grid.Add(txt1, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.dir_ctrl = path.PathCtrl(
parent=self,
style=path.WXTBX_PHIL_PATH_DIRECTORY)
grid.Add(self.dir_ctrl, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(phil_controls.EVT_PHIL_CONTROL, self.OnChooseDirectory,
self.dir_ctrl)
txt2 = wx.StaticText(self, -1, "Image set:")
grid.Add(txt2, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.stack_ctrl = wx.Choice(
parent=self,
size=(400,-1))
grid.Add(self.stack_ctrl, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(wx.EVT_CHOICE, self.OnChooseDataset, self.stack_ctrl)
if (pick_frames):
txt3 = wx.StaticText(self, -1, "Use frames:")
grid.Add(txt3, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.frame_ctrl = ints.IntsCtrl(
parent=self,
size=(400,-1))
self.frame_ctrl.SetMin(1)
grid.Add(self.frame_ctrl, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
else :
self.frame_ctrl = None
self.add_controls_to_grid(grid)
def add_controls_to_grid(self, sizer):
"""
For subclasses which need to add aligned controls.
"""
pass
def GetDataset(self):
if (len(self._datasets) == 0):
raise Sorry("No dataset selected!")
else :
i = self.stack_ctrl.GetSelection()
frames = None
if (self.frame_ctrl is not None):
frames = self.frame_ctrl.GetPhilValue()
return self._datasets[i], frames
def OnChooseDirectory(self, event):
dir_name = self.dir_ctrl.GetPhilValue()
if (dir_name is not None):
from iotbx.detectors import identify_dataset
self._datasets = identify_dataset(dir_name)
choices = [ d.format() for d in self._datasets ]
self.stack_ctrl.SetItems(choices)
def OnChooseDataset(self, event):
print(self.stack_ctrl.GetSelection())
class SelectDatasetDialog(wx.Dialog, SelectDatasetPanelMixin):
def __init__(self, *args, **kwds):
self._datasets = []
style = wx.CAPTION
dlg_style = kwds.get("style", 0)
kwds['style'] = style
wx.Dialog.__init__(self, *args, **kwds)
szr = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(szr)
self.draw_dataset_controls(pick_frames=(dlg_style & RSTBX_SELECT_IMAGE_IDS))
btn_sizer = wx.StdDialogButtonSizer()
szr.Add(btn_sizer, 0, wx.ALL|wx.ALIGN_RIGHT, 10)
cancel_btn = wx.Button(self, wx.ID_CANCEL)
ok_btn = wx.Button(self, wx.ID_OK)
btn_sizer.Add(cancel_btn, 0, wx.RIGHT, 5)
btn_sizer.Add(ok_btn)
szr.Fit(self)
def OnOkay(self, event):
pass
def select_dataset(parent=None,
title="Select a dataset",
pick_frames=False):
style = 0
if (pick_frames):
style |= RSTBX_SELECT_IMAGE_IDS
dlg = SelectDatasetDialog(
parent=parent,
title=title,
style=style)
dataset = frames = None
if (dlg.ShowModal() == wx.ID_OK):
dataset, frames = dlg.GetDataset()
wx.CallAfter(dlg.Destroy)
if (pick_frames):
return dataset, frames
else :
return dataset
# regression testing
if (__name__ == "__main__"):
app = app.CCTBXApp(0)
dataset, frames = select_dataset(pick_frames=True)
if (dataset is not None):
if (frames is not None):
print("Selected images:")
for frame in frames :
file_name = dataset.get_frame_path(frame)
assert os.path.isfile(file_name)
print(" " + file_name)
else :
print(dataset)
|
StarcoderdataPython
|
8075425
|
<filename>vanir/core/account/utils.py
from django.http import HttpResponse
from django.template import loader
def exchange_view_render(
template_name: str, response, request, **kwargs
) -> HttpResponse:
"""
Helper for extra views in account
:param template_name: Template name to render
:type template_name: str
:param response: View parameter
:param request: View parameter
:param kwargs: View parameter
:return: HTTP render view
:rtype: HttpResponse
"""
template = loader.get_template(template_name)
context = {"con": response, **kwargs}
return HttpResponse(template.render(context, request))
|
StarcoderdataPython
|
6665587
|
import os
from os.path import dirname, join
import pytest
from pypi_simple import PYPI_SIMPLE_ENDPOINT, parse_simple_index
DATA_DIR = join(dirname(__file__), os.pardir, "data")
def test_empty():
with pytest.warns(DeprecationWarning):
projects = list(parse_simple_index("", PYPI_SIMPLE_ENDPOINT))
assert projects == []
def test_simple01():
with open(join(DATA_DIR, "simple01.html"), "rb") as fp:
with pytest.warns(DeprecationWarning):
projects = list(
parse_simple_index(
fp.read(),
PYPI_SIMPLE_ENDPOINT,
from_encoding="utf-8",
)
)
assert projects == [
("a", PYPI_SIMPLE_ENDPOINT + "a/"),
("a00k5pgrtn", PYPI_SIMPLE_ENDPOINT + "a00k5pgrtn/"),
("a10ctl", PYPI_SIMPLE_ENDPOINT + "a10ctl/"),
("a10-horizon", PYPI_SIMPLE_ENDPOINT + "a10-horizon/"),
("a10-neutronclient", PYPI_SIMPLE_ENDPOINT + "a10-neutronclient/"),
("a10-neutron-lbaas", PYPI_SIMPLE_ENDPOINT + "a10-neutron-lbaas/"),
("a10-openstack-lbaas", PYPI_SIMPLE_ENDPOINT + "a10-openstack-lbaas/"),
("a10-openstack-lib", PYPI_SIMPLE_ENDPOINT + "a10-openstack-lib/"),
("a10sdk", PYPI_SIMPLE_ENDPOINT + "a10sdk/"),
("a2d_diary", PYPI_SIMPLE_ENDPOINT + "a2d-diary/"),
("a2m.itertools", PYPI_SIMPLE_ENDPOINT + "a2m-itertools/"),
("a2p2", PYPI_SIMPLE_ENDPOINT + "a2p2/"),
("a2pcej", PYPI_SIMPLE_ENDPOINT + "a2pcej/"),
("a2svm", PYPI_SIMPLE_ENDPOINT + "a2svm/"),
("a2w", PYPI_SIMPLE_ENDPOINT + "a2w/"),
("a2x", PYPI_SIMPLE_ENDPOINT + "a2x/"),
(
"a318288f-60c1-4176-a6be-f8a526b27661",
PYPI_SIMPLE_ENDPOINT + "a318288f-60c1-4176-a6be-f8a526b27661/",
),
("A3MIO", PYPI_SIMPLE_ENDPOINT + "a3mio/"),
("a3rt-sdk-py", PYPI_SIMPLE_ENDPOINT + "a3rt-sdk-py/"),
("a4t-party_contact", PYPI_SIMPLE_ENDPOINT + "a4t-party-contact/"),
]
def test_simple_base():
with open(join(DATA_DIR, "simple_base.html"), "rb") as fp:
with pytest.warns(DeprecationWarning):
projects = list(
parse_simple_index(
fp.read(),
PYPI_SIMPLE_ENDPOINT,
from_encoding="utf-8",
)
)
assert projects == [
("a", PYPI_SIMPLE_ENDPOINT + "projects/a/"),
("a00k5pgrtn", PYPI_SIMPLE_ENDPOINT + "projects/a00k5pgrtn/"),
("a10ctl", PYPI_SIMPLE_ENDPOINT + "projects/a10ctl/"),
("a10-horizon", PYPI_SIMPLE_ENDPOINT + "projects/a10-horizon/"),
("a10-neutronclient", PYPI_SIMPLE_ENDPOINT + "projects/a10-neutronclient/"),
]
def test_simple_devpi():
with open(join(DATA_DIR, "simple_devpi.html"), "rb") as fp:
with pytest.warns(DeprecationWarning):
projects = list(
parse_simple_index(
fp.read(),
"https://m.devpi.net/fschulze/dev/+simple/",
from_encoding="utf-8",
)
)
assert projects == [
("devpi", "https://m.devpi.net/fschulze/dev/+simple/devpi"),
("devpi-client", "https://m.devpi.net/fschulze/dev/+simple/devpi-client"),
("devpi-common", "https://m.devpi.net/fschulze/dev/+simple/devpi-common"),
("devpi-jenkins", "https://m.devpi.net/fschulze/dev/+simple/devpi-jenkins"),
("devpi-ldap", "https://m.devpi.net/fschulze/dev/+simple/devpi-ldap"),
("devpi-lockdown", "https://m.devpi.net/fschulze/dev/+simple/devpi-lockdown"),
(
"devpi-postgresql",
"https://m.devpi.net/fschulze/dev/+simple/devpi-postgresql",
),
("devpi-server", "https://m.devpi.net/fschulze/dev/+simple/devpi-server"),
("devpi-web", "https://m.devpi.net/fschulze/dev/+simple/devpi-web"),
("ploy-ezjail", "https://m.devpi.net/fschulze/dev/+simple/ploy-ezjail"),
("pytest", "https://m.devpi.net/fschulze/dev/+simple/pytest"),
("waitress", "https://m.devpi.net/fschulze/dev/+simple/waitress"),
("0", "https://m.devpi.net/fschulze/dev/+simple/0"),
("0-0", "https://m.devpi.net/fschulze/dev/+simple/0-0"),
("0-0-1", "https://m.devpi.net/fschulze/dev/+simple/0-0-1"),
("0-core-client", "https://m.devpi.net/fschulze/dev/+simple/0-core-client"),
("0-orchestrator", "https://m.devpi.net/fschulze/dev/+simple/0-orchestrator"),
("00smalinux", "https://m.devpi.net/fschulze/dev/+simple/00smalinux"),
]
|
StarcoderdataPython
|
6601126
|
from datetime import datetime
import os
import pandas as pd
import numpy as np
import torch as t
from utils.utils import load_nii, read_img, resize_volume, keep_largest_connected_components, crop_volume, \
reconstruct_volume
from utils.timer import timeit
from metric import metrics
class Evaluator:
"""
Evaluate the performance of a segmentation model with the raw data of bSSFP and LGE
"""
def __init__(self, file_path='../raw_data/', class_name=('myo', 'lv', 'rv')):
"""
Parameters
----------
file_path: file path to the raw data
class_name:
"""
self.class_name = class_name
self._file_path = file_path
def evaluate_single_dataset(self, seg_model, model_name='best_model', modality='lge', phase='test', ifhd=True, ifasd=True,
save=False, weight_dir=None, bs=16, toprint=True, lge_train_test_split=None, cal_unctnty=False, watch_pat=None):
"""
Function to compute the metrics for a single modality of a single dataset.
Parameters
----------
seg_model: t.nn.Module
the segmentation module.
model_name: str
the model name to be saved.
modality: str
choose from "bssfp" and "lge".
phase: str
choose from "train", "valid" and "test".
ifhd: bool
whether to calculate HD.
ifasd: bool
whether to calculate ASD.
save: bool
whether to save the resuls as csv file.
weight_dir: str
specify the directory to the weight if load weight.
bs: int
the batch size for prediction (only for memory saving).
toprint: bool
whether to print out the results.
(following are not used for FUDA)
lge_train_test_split: int
specify from where the training data should be splitted into training and testing data.
cal_unctnty: bool
whether to calculate and print out the highest uncertainty (entropy) of the prediction.
watch_pat: int
specify the pat_id that should be printed out its uncertainty.
Returns a dictionary of metrics {dc: [], hd: [], asd: []}.
-------
"""
uncertainty_list, uncertainty_slice_list = [], []
seg_model.eval()
if save:
csv_path = 'evaluation_of_models_on_{}_for_{}_{}.csv'.format(modality, phase, datetime.now().date())
if os.path.exists(csv_path):
df = pd.read_csv(csv_path)
else:
data = {'DC': [], 'HD': [], 'ASD': [], 'cat': [], 'model': [], 'pad_id': []}
df = pd.DataFrame(data)
if weight_dir is not None:
try:
seg_model.load_state_dict(t.load(weight_dir)['model_state_dict'])
except:
seg_model.load_state_dict(t.load(weight_dir))
print("model loaded")
if modality == 'lge':
folder = 'LGE'
elif modality == 'bssfp':
folder = 'C0'
else:
raise ValueError('modality can only be \'bssfp\' or \'lge\'')
endo_dc, myo_dc, rv_dc = [], [], []
endo_hd, myo_hd, rv_hd = [], [], []
endo_asd, myo_asd, rv_asd, = [], [], []
if phase == 'valid':
start_idx = 1
end_idx = 6
elif phase == 'test':
start_idx = 6 if lge_train_test_split is None else lge_train_test_split
end_idx = 46
else:
start_idx = 6
end_idx = 46 if lge_train_test_split is None else lge_train_test_split
for pat_id in range(start_idx, end_idx):
# if pat_id % 20 == 0:
# print("Evaluating patient {}".format(pat_id))
# test_path = sorted(glob("../input/raw_data/dataset/patient{}_LGE.nii.gz".format(pat_id)))
mask_path = os.path.join(self._file_path, 'labels/patient{}_{}_manual.nii.gz'.format(pat_id, folder))
nimg, affine, header = load_nii(mask_path)
vol_resize = read_img(pat_id, nimg.shape[2], modality=modality, file_path='../data/mscmrseg')
vol_resize = crop_volume(vol_resize, crop_size=112)
x_batch = np.array(vol_resize, np.float32) / 255.
x_batch = np.moveaxis(x_batch, -1, 1)
pred = []
# temp = []
for i in range(0, len(x_batch), bs):
index = np.arange(i, min(i + bs, len(x_batch)))
imgs = x_batch[index]
pred1, pred_norm = seg_model(t.tensor(imgs).cuda())
# uncertainty = F.softmax(pred1, dim=1).cpu().detach().numpy()
# temp.append(uncertainty)
pred1 = pred1.cpu().detach().numpy()
pred.append(pred1)
# temp = np.clip(np.concatenate(temp, axis=0), 1e-6, 1-1e-6)
# temp = np.mean(-temp * np.log(temp), axis=(1,2,3))
# uncertainty_slice_list.append(temp)
# uncertainty_list.append(np.mean(temp))
pred = np.concatenate(pred, axis=0)
pred = np.moveaxis(pred, 1, 3)
pred = reconstruct_volume(pred, crop_size=112)
pred_resize = []
for i in range(0, 4):
pred_resize.append(resize_volume(pred[:, :, :, i], w=nimg.shape[0], h=nimg.shape[1]))
pred = np.stack(np.array(pred_resize), axis=3)
pred = np.argmax(pred, axis=3)
masks = nimg.T
masks = np.where(masks == 200, 1, masks)
masks = np.where(masks == 500, 2, masks)
masks = np.where(masks == 600, 3, masks)
pred = keep_largest_connected_components(pred)
pred = np.array(pred).astype(np.uint16)
res = metrics(masks, pred, apply_hd=ifhd, apply_asd=ifasd, pat_id=pat_id, modality=modality,
class_name=self.class_name)
if save:
df2 = pd.DataFrame([[res['lv'][0], res['lv'][1], res['lv'][2], 'lv', model_name, pat_id],
[res['rv'][0], res['rv'][1], res['rv'][2], 'rv', model_name, pat_id],
[res['myo'][0], res['myo'][1], res['myo'][2], 'myo', model_name, pat_id]],
columns=['DC', 'HD', 'ASD', 'cat', 'model', 'pad_id'])
df = df.append(df2, ignore_index=True)
# endo, rv, myo
endo_dc.append(res['lv'][0])
rv_dc.append(res['rv'][0])
myo_dc.append(res['myo'][0])
if res['lv'][1] != -1:
endo_hd.append(res['lv'][1])
if res['rv'][1] != -1:
rv_hd.append(res['rv'][1])
if res['myo'][1] != -1:
myo_hd.append(res['myo'][1])
if res['lv'][2] != -1:
endo_asd.append(res['myo'][2])
if res['rv'][2] != -1:
rv_asd.append(res['rv'][2])
if res['myo'][2] != -1:
myo_asd.append(res['myo'][2])
if cal_unctnty:
pat_highest_ucty = np.argmax(uncertainty_list) + start_idx
print("The pat id with the highest uncertainty: {}".format(pat_highest_ucty))
print("The slice with the highest uncertainty in the pat {}: {}".format(pat_highest_ucty, np.argmax(uncertainty_slice_list[np.argmax(uncertainty_list)])))
print("The pat id with the lowest uncertainty: {}".format(np.argmin(uncertainty_list) + start_idx))
if watch_pat:
print("The slice with the highest uncertainty in the pat {}: {}".format(watch_pat, np.argmax(
uncertainty_slice_list[watch_pat - start_idx])))
print("Uncertainty of the slices of pat {}: {}".format(watch_pat, uncertainty_slice_list[watch_pat - start_idx]))
print("Uncertainty list: {}".format(np.round(uncertainty_list, 5)))
print("The patient with the highest DC: {}".format(np.argmax(endo_dc) + start_idx))
print("The patient with the lowest DC: {}".format(np.argmin(endo_dc) + start_idx))
print("DC list: {}".format(np.round(endo_dc, 3)))
if save:
df.to_csv(csv_path, index=False)
mean_endo_dc = np.around(np.mean(np.array(endo_dc)), 3)
mean_rv_dc = np.around(np.mean(np.array(rv_dc)), 3)
mean_myo_dc = np.around(np.mean(np.array(myo_dc)), 3)
std_endo_dc = np.around(np.std(np.array(endo_dc)), 3)
std_rv_dc = np.around(np.std(np.array(rv_dc)), 3)
std_myo_dc = np.around(np.std(np.array(myo_dc)), 3)
if toprint:
print("Modality: {}, Phase: {}".format(modality, phase))
print("Ave endo DC: {}, {}, Ave rv DC: {}, {}, Ave myo DC: {}, {}".format(mean_endo_dc, std_endo_dc, mean_rv_dc,
std_rv_dc, mean_myo_dc, std_myo_dc))
print("Ave Dice: {:.3f}, {:.3f}".format((mean_endo_dc + mean_rv_dc + mean_myo_dc) / 3.,
(std_endo_dc + std_rv_dc + std_myo_dc) / 3.))
if ifhd:
mean_endo_hd = np.around(np.mean(np.array(endo_hd)), 3)
mean_rv_hd = np.around(np.mean(np.array(rv_hd)), 3)
mean_myo_hd = np.around(np.mean(np.array(myo_hd)), 3)
std_endo_hd = np.around(np.std(np.array(endo_hd)), 3)
std_rv_hd = np.around(np.std(np.array(rv_hd)), 3)
std_myo_hd = np.around(np.std(np.array(myo_hd)), 3)
if toprint:
print("Ave endo HD: {}, {}, Ave rv HD: {}, {}, Ave myo HD: {}, {}".format(mean_endo_hd, std_endo_hd,
mean_rv_hd, std_rv_hd,
mean_myo_hd, std_myo_hd))
print("Ave HD: {:.3f}, {:.3f}".format((mean_endo_hd + mean_rv_hd + mean_myo_hd) / 3.,
(std_endo_hd + std_rv_hd + std_myo_hd) / 3.))
else:
mean_myo_hd, std_myo_hd, mean_endo_hd, std_endo_hd, mean_rv_hd, std_rv_hd = 0, 0, 0, 0, 0, 0
if ifasd:
mean_endo_asd = np.around(np.mean(np.array(endo_asd)), 3)
mean_rv_asd = np.around(np.mean(np.array(rv_asd)), 3)
mean_myo_asd = np.around(np.mean(np.array(myo_asd)), 3)
std_endo_asd = np.around(np.std(np.array(endo_asd)), 3)
std_rv_asd = np.around(np.std(np.array(rv_asd)), 3)
std_myo_asd = np.around(np.std(np.array(myo_asd)), 3)
if toprint:
print("Ave endo ASD: {}, {}, Ave rv ASD: {}, {}, Ave myo ASD: {}, {}".format(mean_endo_asd, std_endo_asd,
mean_rv_asd, std_rv_asd,
mean_myo_asd, std_myo_asd))
print("Ave ASD: {:.3f}, {:.3f}".format((mean_endo_asd + mean_rv_asd + mean_myo_asd) / 3.,
(std_endo_asd + std_rv_asd + std_myo_asd) / 3.))
else:
mean_myo_asd, std_myo_asd, mean_endo_asd, std_endo_asd, mean_rv_asd, std_rv_asd = 0, 0, 0, 0, 0, 0
if toprint:
print(
'DC: {}, {}, {}, {}, {}, {}'.format(mean_myo_dc, std_myo_dc, mean_endo_dc, std_endo_dc, mean_rv_dc, std_rv_dc))
if ifhd:
print('HD: {}, {}, {}, {}, {}, {}'.format(mean_myo_hd, std_myo_hd, mean_endo_hd, std_endo_hd, mean_rv_hd,
std_rv_hd))
if ifasd:
print('ASD: {}, {}, {}, {}, {}, {}'.format(mean_myo_asd, std_myo_asd, mean_endo_asd, std_endo_asd, mean_rv_asd,
std_rv_asd))
return {'dc': [mean_myo_dc, std_myo_dc, mean_endo_dc, std_endo_dc, mean_rv_dc, std_rv_dc],
'hd': [mean_myo_hd, std_myo_hd, mean_endo_hd, std_endo_hd, mean_rv_hd, std_rv_hd],
'asd': [mean_myo_asd, std_myo_asd, mean_endo_asd, std_endo_asd, mean_rv_asd, std_rv_asd]}
@timeit
def evaluate(self, seg_model, ifhd=True, ifasd=True, weight_dir=None, bs=16, lge_train_test_split=None):
bssfp_train = self.evaluate_single_dataset(seg_model=seg_model, modality='bssfp', phase='train', ifhd=ifhd, ifasd=ifasd, save=False, weight_dir=weight_dir, bs=bs, toprint=False)
bssfp_val = self.evaluate_single_dataset(seg_model=seg_model, modality='bssfp', phase='valid', ifhd=ifhd, ifasd=ifasd, save=False, weight_dir=weight_dir, bs=bs, toprint=False)
lge_val = self.evaluate_single_dataset(seg_model=seg_model, modality='lge', phase='valid', ifhd=ifhd, ifasd=ifasd, save=False, weight_dir=weight_dir, bs=bs, toprint=False)
lge_test = self.evaluate_single_dataset(seg_model=seg_model, modality='lge', phase='test', ifhd=ifhd, ifasd=ifasd, save=False, weight_dir=weight_dir, bs=bs, toprint=False,
lge_train_test_split=lge_train_test_split)
return bssfp_train, bssfp_val, lge_val, lge_test
if __name__ == '__main__':
import argparse
from model.DRUNet import Segmentation_model as DR_UNet
from torch.cuda import get_device_name
print("Device name: {}".format(get_device_name(0)))
parser = argparse.ArgumentParser(description="Evaluation")
parser.add_argument("--restore_from", type=str, default='weights/best_DR_UNet.fewshot.lr0.00035.eps3.LSeg.lrs120.0.pat_10_lge.e40.Scr0.67.pt', help="Where restore model parameters from.")
parser.add_argument("--batch_size", type=int, default=16, help="Number of images sent to the network in one step.")
parser.add_argument("--file_path", type=str, default='../data/mscmrseg/raw_data')
args = parser.parse_args()
evaluator = Evaluator(file_path=args.file_path)
segmentor = DR_UNet(n_class=4)
evaluator.evaluate_single_dataset(segmentor, model_name='best_model', modality='lge', phase='test', ifhd=True,
ifasd=True, save=False, weight_dir=args.restore_from, bs=args.batch_size,
toprint=True, lge_train_test_split=None, cal_unctnty=False, watch_pat=None)
|
StarcoderdataPython
|
5014689
|
# coding: utf-8
import datetime
from datetime import datetime as dt
from importlib import import_module
import numpy as np
import pandas as pd
def get_available_drivers() -> dict:
try:
module = import_module('src.infrastructure.clients.provider')
except ImportError:
raise ImportError('Unable to import providers module')
drivers = {
name: item for name, item in module.__dict__.items() \
if name.endswith('Driver') and callable(item)
}
return drivers
def get_drivers_names() -> tuple:
return tuple(get_available_drivers().keys())
def get_drivers_choices() -> tuple:
return tuple(zip(*(get_drivers_names(),) * 2))
def get_business_days(date_from: str, date_to: str) -> tuple:
bdays = pd.bdate_range(start=date_from, end=date_to).values
return tuple([np.datetime_as_string(bday, unit='D') for bday in bdays])
def get_last_business_day(date: str = None) -> str:
date = date or datetime.date.today().strftime('%Y-%m-%d')
is_business_day = bool(len(pd.bdate_range(start=date, end=date)))
if not is_business_day:
offset = pd.tseries.offsets.BusinessDay(n=1)
date = (dt.strptime(date, '%Y-%m-%d') - offset).strftime('%Y-%m-%d')
return date
|
StarcoderdataPython
|
4837803
|
<filename>examples/intrp_diff_example.py
'''
Example script for intpdiff
spxll'16
'''
#
from os import system
system('clear')
#
from positive import *
from matplotlib.pyplot import *
from numpy import *
#
t = linspace(0,12*pi,1e3)
#
y0 = cos(t)
#
y1 = intrp_diff( t, y0 )
y2 = intrp_diff( t, y0, n = 2 )
#
pks,locs = findpeaks(y0)
#
figure()
plot( t, y0, color='0.8', linewidth=5 )
plot( t, y1, 'r--' )
plot( t,-y2, '--b' )
plot( t[locs], pks, 'or' )
xlim( lim(t) )
show()
|
StarcoderdataPython
|
6634711
|
<reponame>hamburgcodingschool/L2C-Python-1804<gh_stars>0
def sizeOfLongest(array):
longest = 0
for word in array:
if len(word) > longest:
longest = len(word)
return longest
def printStars(number):
starString = ""
for i in range(0, number):
starString += "*"
print(starString)
# example: word = "a", longest = 9
def printWordWithStars(word, longest):
remainingSpaces = longest - len(word)
line = "* "
line += word
for i in range(0, remainingSpaces):
line += " "
line += " *"
print(line)
def writeInFrame(sentence):
# split string into array of strings separating by spaces
words = sentence.split(" ")
longest = sizeOfLongest(words)
printStars(longest + 4)
for word in words:
printWordWithStars(word, longest)
printStars(longest + 4)
print("Give me a sentence")
sentence = input()
writeInFrame(sentence)
|
StarcoderdataPython
|
4825623
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
from gym import spaces
import neurogym as ngym
class MatchingPenny(ngym.TrialEnv):
"""Matching penny task.
The agent is rewarded when it selects the same target as the computer.
opponent_type: Type of opponent. (def: 'mean_action', str)
Args:
learning_rate: learning rate in the mean_action opponent
"""
metadata = {
'paper_link': 'https://www.nature.com/articles/nn1209',
'paper_name': '''Prefrontal cortex and decision making in a
mixed-strategy game''',
'tags': ['two-alternative']
}
def __init__(self, dt=100, rewards=None, timing=None,
opponent_type='mean_action', learning_rate=0.2):
super().__init__(dt=dt)
if timing is not None:
print('Warning: Matching-Penny task does not require' +
' timing variable.')
# TODO: remain to be carefully tested
# Opponent Type
self.opponent_type = opponent_type
# Rewards
self.rewards = {'correct': +1., 'fail': 0.}
if rewards:
self.rewards.update(rewards)
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Box(-np.inf, np.inf, shape=(2,),
dtype=np.float32)
self.prev_opp_action = int(self.rng.rand() > 0.5)
if self.opponent_type == 'mean_action':
self.mean_action = 0
self.lr = learning_rate
def _new_trial(self, **kwargs):
# ---------------------------------------------------------------------
# Trial (trials are one step long)
# ---------------------------------------------------------------------
# TODO: Add more types of opponents
# determine the transitions
if self.opponent_type == 'random':
opponent_action = int(self.rng.rand() > 0.5)
elif self.opponent_type == 'mean_action':
opponent_action = 1*(not np.round(self.mean_action))
else:
ot = self.opponent_type
raise ValueError('Unknown opponent type {:s}'.format(ot))
trial = {'opponent_action': opponent_action}
self.ob = np.zeros((1, self.observation_space.shape[0]))
self.ob[0, self.prev_opp_action] = 1
self.prev_opp_action = trial['opponent_action']
self.gt = np.array([opponent_action])
return trial
def _step(self, action):
trial = self.trial
obs = self.ob[0]
if self.opponent_type == 'mean_action':
self.mean_action += self.lr*(action-self.mean_action)
if action == trial['opponent_action']:
reward = self.rewards['correct']
self.performance = 1
else:
reward = self.rewards['fail']
info = {'new_trial': True, 'gt': self.gt}
return obs, reward, False, info
if __name__ == '__main__':
env = MatchingPenny(opponent_type='mean_action')
ngym.utils.plot_env(env, num_steps=100) # , def_act=0)
|
StarcoderdataPython
|
1854218
|
from bitmovin_api_sdk.encoding.configurations.audio.he_aac_v2.customdata.customdata_api import CustomdataApi
|
StarcoderdataPython
|
172937
|
"""Implementation of Model-Free Policy Gradient Algorithms."""
import torch.nn.modules.loss as loss
from torch.optim import Adam
from rllib.algorithms.ac import ActorCritic
from rllib.policy import NNPolicy
from rllib.value_function import NNQFunction
from .on_policy_agent import OnPolicyAgent
class ActorCriticAgent(OnPolicyAgent):
"""Abstract Implementation of the Actor-Critic Agent.
The AbstractEpisodicPolicyGradient algorithm implements the Actor-Critic algorithms.
TODO: build compatible function approximation.
References
----------
<NAME>., <NAME>., <NAME>., & <NAME>. (2000).
Policy gradient methods for reinforcement learning with function approximation.NIPS.
<NAME>., & <NAME>. (2000).
Actor-critic algorithms. NIPS.
"""
eps = 1e-12
def __init__(
self,
policy,
critic,
algorithm_=ActorCritic,
criterion=loss.MSELoss,
eta=0.001,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.algorithm = algorithm_(
policy=policy,
critic=critic,
criterion=criterion(reduction="mean"),
eta=eta,
*args,
**kwargs,
)
self.policy = self.algorithm.policy
self.optimizer = type(self.optimizer)(
[p for n, p in self.algorithm.named_parameters() if "target" not in n],
**self.optimizer.defaults,
)
@classmethod
def default(
cls,
environment,
policy=None,
critic=None,
critic_lr=1e-3,
actor_lr=3e-4,
*args,
**kwargs,
):
"""See `AbstractAgent.default'."""
if policy is None:
policy = NNPolicy.default(environment)
if critic is None:
critic = NNQFunction.default(environment)
optimizer = Adam(
[
{"params": policy.parameters(), "lr": actor_lr},
{"params": critic.parameters(), "lr": critic_lr},
]
)
return super().default(
environment=environment,
policy=policy,
critic=critic,
optimizer=optimizer,
*args,
**kwargs,
)
|
StarcoderdataPython
|
9614977
|
import board as GB
import numpy as np
import background as bg
import secnery as sc
import os
import time
clear = lambda: os.system('clear')
alien_type1_list=[]
alien_type2_list=[]
second_list=[]
econ=True
class Mario(GB.Codi):
mario_img=np.array([[" " for i in range(0,2)] for j in range(0,3)])
def __init__(self,x_cod,y_cod):
super(Mario, self).__init__(x_cod,y_cod)
self.mario_img[2,0:2]=u'\u013B'
self.mario_img[1,0:2]=u'\u2587'
self.mario_img[0,0:2]=u'\u253B'
self.print_mario()
def print_mario(self):
self.Matrix[self.y_cod:self.y_cod+3 , self.x_cod:self.x_cod+2] = self.mario_img
def check_and_inc(self,key):
if key=="d":
if self.check_to_inc(self.x_cod+1,self.y_cod,2,3) ==1:
if self.x_cod <= 40 or GB.counter>=100 or GB.level==2:
self.x_cod+=1
GB.score+=2
GB.counter+=1
else:
bg.board_move()
GB.score+=2
GB.counter+=1 #dynamic moving
self.fall_detect(3)
elif key=='a':
if self.check_to_inc(self.x_cod-1,self.y_cod,2,3) ==1:
self.x_cod-=1
GB.score+=2
GB.counter-=1
self.fall_detect(3)
elif key=='w':
os.system("aplay jump.wav& > /dev/null 2>&1")
self.jumpy(2,3)
return 0
# game_display()
def jumpy(self,size_x,size_y):
for i in range(0,8):
if self.check_to_inc(self.x_cod,self.y_cod-1,size_x,size_y) ==1:
self.y_cod-=1
else:
break
return 0
def check_to_inc(self,x,y,size_x,size_y):
for i in range(0,size_y):
for j in range(0,size_x):
if self.Matrix[y+i,x+j]==u'\u2588' or self.Matrix[y+i,x+j]== '|' or self.Matrix[y+i,x+j] == u'\u25A2':
os.system("aplay bump.wav& > /dev/null 2>&1")
return 0
return 1
def fall_detect(self,down_x):
global objmario
if self.y_cod+3 > 28:
GB.lives-=1
os.system("aplay mariodie.wav&")
del objmario
del self
respawn_mario()
return 0
for i in range(0,len(alien_type1_list)):
alien_type1_list[-i].move_right()
for i in range(0,len(alien_type2_list)):
alien_type2_list[-i].move_alien2()
for i in range(0,len(alien_type2_list)):
if alien_type2_list[-i].collision()==0:
alien_type2_list[-i]=Alien_type2(13,13,0,0)
for i in range(0,3):
if self.Matrix[self.y_cod + down_x, self.x_cod +i ] == u'\u2588':
return 0
elif self.Matrix[self.y_cod + down_x, self.x_cod -1 +i ] == '|':
GB.level=2
os.system("aplay pipe.wav&")
time.sleep(0.1)
return 0
self.y_cod+=1
class Alien_type1(Mario):
alien_t1=np.array([[" " for i in range(0,1)] for j in range(0,2)])
def __init__(self, x_cod,y_cod,jumpbit):
super(Alien_type1, self).__init__(x_cod,y_cod)
self.jumpbit=jumpbit
self.alien_t1[1,0]=u'\u220F'
self.alien_t1[0,0]=u'\u220E'
def move_right(self):
if self.x_cod <= 21 or self.y_cod > 27:
del alien_type1_list[0]
del self
elif self.check_fall() ==1:
self.x_cod-=1
self.collision()
return 0
def check_fall(self):
if self.Matrix[self.y_cod+2,self.x_cod-1]==u'\u2588' :
self.jumpbit=0
return 1
elif self.jumpbit==0 and self.Matrix[28,self.x_cod-1]!=u"\u2588" :
self.jumpbit=1
self.jumpy(1,2)
else:
if self.Matrix[self.y_cod + 2, self.x_cod +1 ] != u'\u2588' or self.Matrix[28, self.x_cod +1 ] == u'\u2588' :
self.y_cod+=1
self.x_cod-=1
return 0
def collision(self):
global objmario
x_m,y_m=objmario.display_cod()
if self.x_cod <= x_m +2 and self.x_cod >= x_m and self.y_cod >= y_m +3 and abs(y_m - self.y_cod) < 5:
GB.kill+=1
os.system("aplay stomp.wav& > /dev/null 2>&1")
GB.score+=1000
del self
del alien_type1_list[0]
return 0
if len(alien_type1_list)!=0:
if self.x_cod <= x_m +2 and self.x_cod >= x_m and self.y_cod >= y_m and self.y_cod <= y_m+ 3:
GB.lives-=1
os.system("aplay mariodie.wav&")
del objmario
del self
del alien_type1_list[0]
respawn_mario()
return 0
return 1
def print_alien_type1(self):
self.Matrix[self.y_cod:self.y_cod +2,self.x_cod : self.x_cod+1]=self.alien_t1
class Alien_type2(Alien_type1):
def __init__(self,x_cod,y_cod,jumpbit,speed):
super(Alien_type2, self).__init__(x_cod,y_cod,jumpbit)
self.speed=speed
def collision(self):
global objmario
x_m,y_m=objmario.display_cod()
if self.x_cod <= x_m +2 and self.x_cod >= x_m and self.y_cod >= y_m +3 and abs(y_m - self.y_cod) < 5:
GB.kill+=1
os.system("aplay stomp.wav& > /dev/null 2>&1")
bg.kill_all+=1
GB.score+=abs(self.speed *2000)
if bg.kill_all>=3:
sc.level2.open_gate()
del self
return 0
if len(alien_type2_list)!=0:
if self.x_cod <= x_m +2 and self.x_cod >= x_m and self.y_cod >= y_m and self.y_cod <= y_m+ 3:
GB.lives-=1
os.system("aplay mariodie.wav&")
del objmario
respawn_mario()
return 1
return 1
def move_alien2(self):
if self.x_cod+self.speed <= 20 or self.x_cod + self.speed >= 97:
self.speed*=-1
self.x_cod+=self.speed
def level2_gen_prog():
global objmario
del objmario
objmario=Mario(23,5)
alien_type2_list.append(Alien_type2(45,26,0,-1))
alien_type2_list.append(Alien_type2(23,26,0,2))
alien_type2_list.append(Alien_type2(78,26,0,1))
bg.level2_generation()
def game_display():
GB.objgameboard.initialize()
if GB.level==2:
sc.level2.print_scene()
else:
for i in range(0,len(bg.cloud_list)):
bg.cloud_list[i].print_cloud()
for i in range(0,len(bg.mountain_list)):
bg.mountain_list[i].print_mountain()
for i in range(0,len(bg.obstacle_list)):
bg.obstacle_list[i].print_obstacle()
for i in range(0,len(alien_type1_list)):
alien_type1_list[-i].print_alien_type1()
for i in range(0,len(bg.pipe_list)):
bg.pipe_list[i].print_pipe()
temp_list=[]
for i in range(0,len(bg.coins_list)):
x_m,y_m=objmario.display_cod()
if bg.coins_list[i].mario_pass(x_m,y_m) == 1:
temp_list.append(bg.coins_list[i])
bg.coins_list=temp_list
for i in range(0,len(bg.coins_list)):
bg.coins_list[i].print_coin()
# alien_type2_list=second_list
for i in range(0,len(alien_type2_list)):
alien_type2_list[-i].print_alien_type1()
objmario.print_mario()
GB.objgameboard.display()
time.sleep(0.07)
return 0
def respawn_mario():
global objmario
objmario=Mario(21,15)
respawn_mario()
def spawn_alien():
alien_type1_list.append(Alien_type1(98,24,0))
spawn_alien()
if __name__=="__main__":
objmario.display()
|
StarcoderdataPython
|
9788594
|
<reponame>dondongwon/CC_NCE_GENEA<gh_stars>1-10
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import subprocess
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
from torch.utils.data._utils.collate import default_collate
from transformers import AdamW
from model import *
from data import *
#Data, ZNorm, Compose, RemoveJoints, KMeans, POSCluster, Relative2Parent, collate_fn_pad, PoseStarts, RandomTranslate
#from evaluation import PCK, L1, VelL1, Diversity, Expressiveness, F1
from argsUtils import get_args_perm
import evaluation
from animation import save_animation
from parallel import parallel
from pycasper.name import Name
from pycasper.BookKeeper import *
from pycasper import torchUtils
import wandb
import trainer_chooser
from argparse import Namespace
from pathlib import Path
from tqdm import tqdm
import json
from functools import partial
import itertools
import pickle as pkl
from collections import Counter
import pdb
FLOAT = torch.double # torch.double | torch.double
## Imports for get_pretrained_trainer
from argparse import Namespace
from argsUtils import get_args_perm
from pycasper.BookKeeper import BookKeeper
from pathlib import Path
import copy
import trainer_chooser
def get_pretrained_trainer(path2weights, cuda):
args_new = Namespace(load=path2weights, cuda=cuda, save_dir=Path(path2weights).parent.as_posix(), pretrained_model=1)
args, args_perm = get_args_perm()
args.__dict__.update(args_perm[0])
## Load the correct args from the weight file to choose the correct Trainer
args.__dict__.update(args_new.__dict__)
book = BookKeeper(args, [], args_dict_update = {'load_data':0, 'pretrained_model':1, 'sample_all_styles':0, 'mix':0, 'optim_separate':None, 'path2data':args.path2data})
## Choose the correct Trainer
Trainer = trainer_chooser.trainer_chooser(book.args)
## Load the Trainer
trainer = Trainer(args, [], args_dict_update = {'load_data':0, 'pretrained_model':1, 'path2data':args.path2data})
return trainer
def get_pretrained_trainers(pathdict, cuda):
if isinstance(pathdict, str):
return get_pretrained_trainer(pathdict, cuda)
elif isinstance(pathdict, list):
return [get_pretrained_trainer(path, cuda) for path in pathdict]
elif isinstance(pathdict, dict):
return {key:get_pretrained_trainer(pathdict[key], cuda) for key in pathdict}
else:
assert False, 'pathdict must be a string, list or dict kind'
'''
Class Heirarchy
Trainer Skeleton
- TrainerBase
- Trainer
- TrainerGAN
'''
class TrainerBase():
def __init__(self, args, args_subset, args_dict_update={}):
self.book = BookKeeper(args, args_subset, args_dict_update=args_dict_update,
tensorboard=args.tb)
self.args = self.book.args
## Training parameters
self.path2data = self.args.path2data
self.path2outdata = self.args.path2outdata
self.speaker = self.args.speaker
self.modalities = self.args.modalities
if self.args.input_modalities is None: ## infer inputs and outputs from self.modalities
self.input_modalities = self.modalities[1:]
else:
self.input_modalities = self.args.input_modalities
if self.args.output_modalities is None:
self.output_modalities = self.modalities[:1]
else:
self.output_modalities = self.args.output_modalities
self.mask = self.args.mask
self.mask = list(np.concatenate([np.r_[i] if isinstance(i, int) else np.r_[eval(i)] for i in self.mask])) ## convert ranges to list of numbers
self.split = self.args.split
self.batch_size = self.args.batch_size
self.shuffle = True if self.args.shuffle else False
self.time = self.args.time
self.fs_new = self.args.fs_new if isinstance(self.args.fs_new, list) else [self.args.fs_new] * len(self.modalities)
self.window_hop = self.args.window_hop
self.num_epochs = self.args.num_epochs
self.num_clusters = args.num_clusters
self.feats = self.args.feats
self.num_training_sample = self.args.num_training_sample
self.style_losses = self.args.style_losses
self.style_iters = self.args.style_iters
self.sample_all_styles = self.args.sample_all_styles
self.repeat_text = self.args.repeat_text
self.num_workers = self.args.num_workers
self.relative2parent = self.args.relative2parent
self.quantile_sample = self.args.quantile_sample
self.quantile_num_training_sample = self.args.quantile_num_training_sample
self.heuristic_pose_starts = self.args.heuristic_pose_starts
self.metrics = self.args.metrics
self.load_data = self.args.load_data
self.pretrained_model = self.args.pretrained_model
self.modelKwargs = {}
## parameter to use pad_collate for the dataloaders
self.text_in_modalities = False
for modality in self.modalities:
if 'text' in modality:
self.text_in_modalities = True
## Device
self.device = torch.device('cuda:{}'.format(self.args.cuda)) if self.args.cuda>=0 else torch.device('cpu')
## Get Data
self.data, self.data_train, self.data_dev, self.data_test = self.get_data()
## Get style
self.style_dict = self.data.style_dict
self.style_dim = self.args.style_dim
## Data shape
self.data_shape = self.data.shape
# define input and output modalities TODO hadcoded
self.output_modality = self.output_modalities[0]
## Parents
self.parents = self.data.modality_classes[self.output_modality].parents
## Get cluster Transform for Cluster based models
# if self.num_clusters is not None or self.args.pos:
# self.cluster = self.get_cluster()
# if self.args.pos:
# self.num_clusters = len(self.cluster.tagset)
if self.num_clusters is not None:
self.cluster = self.get_cluster()
if self.args.pos:
self.cluster_pos = self.get_pos_cluster()
self.num_clusters_pos = len(self.cluster_pos.tagset)
if args.preprocess_only:
print('Data Preprocessing done')
exit(1)
## ZNorm + other transforms
self.pre = self.get_pre()
## Remove Joints / Reinsert Joints from data
self.transform = self.get_transforms()
## transform the confidence matrix
self.transform_confidence = self.get_transforms()
self.confidence_loss = Confidence(beta=1, epsilon=0.5)
## label histogram
if self.num_clusters is not None:
self.num_styles = len(self.speaker) if self.speaker[0] != 'all' else len(self.data.speakers)
if self.sample_all_styles: ## if all styles are being sampled, create the permutation of the kwargs_names
kwargs_names = ['{}_{}'.format(sp1, sp2) for sp2 in self.speaker for sp1 in self.speaker if sp1 != sp2]
else:
kwargs_names = ['style']
kwargs_names.append('same')
self.labels_hist = {kwargs_name:{desc:{i:torch.zeros(self.num_clusters) for i in range(self.num_styles)} for desc in ['test', 'train', 'dev']} for kwargs_name in kwargs_names}
self.labels_hist_tensor = {kwargs_name:{desc:{i:torch.zeros(1, self.num_clusters) for i in range(self.num_styles)} for desc in ['test', 'train', 'dev']} for kwargs_name in kwargs_names}
#self.labels_hist = {kwargs_name:{desc:{i:torch.zeros(self.num_clusters) for i in range(self.num_styles)} for desc in ['test', 'train', 'dev']} for kwargs_name in ['same', 'style']}
#self.labels_hist_tensor = {kwargs_name:{desc:{i:torch.zeros(1, self.num_clusters) for i in range(self.num_styles)} for desc in ['test', 'train', 'dev']} for kwargs_name in ['same', 'style']}
if args.mix and args.load:
self.Stack = partial(evaluation.Stack, n=len(self.data.speaker), speakers=self.data.speaker, sample_styles=['mix'])
elif self.args.sample_all_styles != 0 and args.load:
sample_styles = ['same'] + ['_'.join(list(perm)) for perm in itertools.permutations(self.speaker, 2)]
self.Stack = partial(evaluation.Stack, n=len(self.data.speaker), speakers=self.data.speaker, sample_styles=sample_styles)
elif self.args.load:
self.Stack = partial(evaluation.Stack, n=len(self.data.speaker), speakers=self.data.speaker, sample_styles=['same', 'style'])
else:
self.Stack = partial(evaluation.Stack, n=0, speakers=[], sample_styles=['same'])
## Metrics
self.metrics_init()
## Counter for reweighting
self.weight_counter = Counter()
## Create Model
self.update_modelKwargs()
self.model = self.get_model()
self.model.to(device=self.device, dtype=FLOAT)
self.update_model()
#device_ids = list(range(torch.cuda.device_count()))
#self.model = nn.DataParallel(self.model, device_ids=device_ids)
## Load model
if self.args.load:
print('Loading Model')
self.book._load_model(self.model, map_location=self.device)
if not self.pretrained_model:
self.book._copy_best_model(self.model)
print('Model Created')
## Loss Function
self.criterion = self.get_criterion()
## Optimizers
self.G_optim, self.D_optim = self.get_optims()
## Scheduler
self.schedulers = self.get_scheduler()
## Wandb
self.wandb_init = partial(wandb.init,
dir=self.args.wandb_dir,
config=self.args.__dict__,
name=self.book.name.name,
project=self.args.wandb_project,
group='-'.join(self.speaker))
def get_data(self):
## Load data iterables
data = Data(self.path2data, self.speaker, self.modalities, self.fs_new,
time=self.time, split=self.split, batch_size=self.batch_size,
shuffle=self.shuffle, num_workers=self.num_workers,
window_hop=self.window_hop, style_iters=self.style_iters,
num_training_sample=self.num_training_sample,
load_data=self.load_data, sample_all_styles=self.sample_all_styles,
repeat_text=self.repeat_text, quantile_sample=self.quantile_sample,
quantile_num_training_sample=self.quantile_num_training_sample,
weighted=self.args.weighted, filler=self.args.filler,
num_training_iters=self.args.num_training_iters, audio_average=self.args.audio_average)
data_train = data.train
data_dev = data.dev
data_test = data.test
print('Data Loaded')
return data, data_train, data_dev, data_test
def get_criterion(self):
return eval('torch.nn.' + self.args.loss)(**self.args.lossKwargs)
def get_pck(self):
return self.Stack(evaluation.PCK(num_joints=int(self.data_shape[self.output_modality][-1]/2)))
def get_l1(self):
return self.Stack(evaluation.L1())
def get_VelL1(self):
return self.Stack(evaluation.VelL1())
def get_Diversity(self):
mean = self.pre.transforms[-1].variable_dict[self.output_modality][0]
remove_joints = RemoveJoints(self.mask)
mean = remove_joints(mean).squeeze(0)
return self.Stack(evaluation.Diversity(mean))
def get_Expressiveness(self):
mean = self.pre.transforms[-1].variable_dict[self.output_modality][0]
remove_joints = RemoveJoints(self.mask)
mean = remove_joints(mean).squeeze(0)
return self.Stack(evaluation.Expressiveness(mean))
def get_F1(self):
cluster = KMeans(variable_list=[self.output_modality], key=self.speaker, data=self.data_train, num_clusters=8, mask=self.mask, feats=self.feats)
return self.Stack(evaluation.F1(num_clusters=8)), cluster
def get_IS(self):
speakers_rev = {sp:i for i,sp in enumerate(self.data.speakers)}
if 'all' in self.speaker:
speaker = self.data.speakers
else:
speaker = self.speaker
weight = torch.Tensor([speakers_rev[sp.split('|')[0]] for sp in speaker]).unsqueeze(-1)
return self.Stack(evaluation.InceptionScoreStyle(len(self.data.speakers), weight))
def get_FID(self):
return self.Stack(evaluation.FID())
def get_W1(self):
return self.Stack(evaluation.W1())
def get_optims(self):
if self.args.gan !=0:
model_params = list(self.model.G.parameters())
else:
model_params = list(self.model.parameters())
if self.args.optim_separate is not None: ## TODO harcoded to work with text_encoder
if self.args.gan != 0:
bert_params = self.model.G.text_encoder.parameters()
else:
bert_params = self.model.text_encoder.parameters()
bert_params = list(bert_params)
G_optim = eval('torch.optim.' + self.args.optim)([{'params': bert_params,
'lr':self.args.optim_separate},
{'params': list(set(model_params) \
- set(bert_params))}],
lr=self.args.lr, **self.args.optimKwargs)
else:
G_optim = eval('torch.optim.' + self.args.optim)(model_params, lr=self.args.lr, **self.args.optimKwargs)
if self.args.gan != 0:
D_optim = eval('torch.optim.' + self.args.optim)(self.model.D.parameters(), lr=self.args.lr, **self.args.optimKwargs)
else:
D_optim = None
return G_optim, D_optim
#return AdamW(self.model.parameters(), lr=self.args.lr, **self.args.optimKwargs)
def get_scheduler(self):
schedulers = []
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
""" Create a schedule with a learning rate that decreases linearly after
linearly increasing during a warmup period.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
)
return lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
if self.args.scheduler == 'linear_decay':
warmup_steps = self.args.scheduler_warmup_steps
schedulers.append(get_linear_schedule_with_warmup(self.G_optim, warmup_steps, len(self.data.train)*self.num_epochs))
if self.D_optim is not None:
schedulers.append(get_linear_schedule_with_warmup(self.D_optim, warmup_steps, len(self.data.train)*self.num_epochs))
else:
schedulers.append(lr_scheduler.ExponentialLR(self.G_optim, gamma=self.args.gamma))
if self.D_optim is not None:
schedulers.append(lr_scheduler.ExponentialLR(self.D_optim, gamma=self.args.gamma))
return schedulers
def get_pre(self):
transforms = []
if self.heuristic_pose_starts:
transforms.append(PoseStarts(self.modalities, key=self.speaker, data=self.data_train))
if self.relative2parent:
transforms.append(Relative2Parent())
pre_op = Compose(transforms) ## if the mean variance is being calculated for the first time, it uses the pre_op on each batch before calculating mean var
else:
pre_op = None
## remove text/tokens
hidden_modalities = ['text/tokens', 'text/filler', 'audio/silence']
modalities = [mod for mod in self.modalities if mod not in hidden_modalities]
transforms.append(ZNorm(modalities, key=self.speaker, data=self.data_train, relative2parent=self.relative2parent, pre=pre_op))
return Compose(transforms)
def get_transforms(self):
return Compose([RemoveJoints(self.mask, self.parents)])
def get_cluster(self):
return KMeans(variable_list=[self.output_modality], key=self.speaker, data=self.data_train, num_clusters=self.num_clusters, mask=self.mask, feats=self.feats)
def get_gt(self, path2h5):
Y, h5 = self.data.load(path2h5, self.output_modality)
feats_shape = int(self.data_shape[self.output_modality][-1]/2)
Y = Y[()].reshape(-1, 2, feats_shape)
Y[..., 0] = 0
h5.close()
return Y
def get_confidence_loss(self, batch, y, y_cap):
key = 'pose/confidence'
if key in batch:
confidence = self.transform_confidence(batch[key].to(self.device))
else:
return 0
confidence = confidence.view(*y.shape)
confidence_loss = self.confidence_loss(y, y_cap, confidence).mean()
return confidence_loss
def _update_labels(self, desc, style, kwargs_name):
if self.num_clusters is not None:
if isinstance(self.model, GAN):
model = self.model.G
else:
model = self.model
if kwargs_name is None:
kwargs_name = 'same'
## update only if labels_cap_soft is an attribute in the model
try:
if hasattr(model, 'labels_cap_soft'):
if desc == 'test':
self.labels_hist_tensor[kwargs_name][desc][style] = torch.cat([self.labels_hist_tensor[kwargs_name][desc][style], model.labels_cap_soft.squeeze(0).detach().cpu().double()], dim=0)
label = torch.argmax(model.labels_cap_soft.squeeze(0), dim=-1)
label = label.detach().cpu()
emb = torch.nn.Embedding(num_embeddings=self.num_clusters,
embedding_dim=self.num_clusters,
_weight=torch.eye(self.num_clusters))
self.labels_hist[kwargs_name][desc][style] += emb(label).sum(dim=0)
except:
pass
def _save_labels(self):
if self.num_clusters is not None:
speakers = self.data.speakers if self.speaker[0] == 'all' else self.speaker
labels_hist = {kwargs_name:{desc:{speakers[i]:self.labels_hist[kwargs_name][desc][i].numpy().tolist() for i in self.labels_hist[kwargs_name][desc]} for desc in ['test', 'train', 'dev']} for kwargs_name in self.labels_hist}
labels_hist_tensor = {kwargs_name:{desc:{speakers[i]:self.labels_hist_tensor[kwargs_name][desc][i].numpy() for i in self.labels_hist_tensor[kwargs_name][desc]} for desc in ['test', 'train', 'dev']} for kwargs_name in self.labels_hist_tensor}
hist_filename = self.book.name('histogram', 'json', self.book.save_dir)
json.dump(labels_hist, open(hist_filename, 'w'))
tensor_filename = self.book.name('style', 'pkl', self.book.save_dir)
pkl.dump(labels_hist_tensor, open(tensor_filename, 'wb'))
def metrics_init(self): ## metric objects
self.pck = self.get_pck()
self.l1 = self.get_l1()
self.vel_l1 = self.get_VelL1()
self.diversity = self.get_Diversity()
self.expressiveness = self.get_Expressiveness()
self.f1, self.f1_cluster = self.get_F1()
if not self.pretrained_model: ## if this is a pretrained model do not get self.IS to avoid a loop
self.IS = self.get_IS()
self.fid = self.get_FID()
self.w1 = self.get_W1()
self.metrics_objects = [self.pck, self.l1, self.vel_l1, self.diversity, self.expressiveness, self.f1, self.fid, self.w1]
if hasattr(self, 'IS'):
self.metrics_objects.append(self.IS)
def metrics_reset(self, **kwargs):
for obj in self.metrics_objects:
obj.reset(**kwargs)
@property
def metric_order(self): ## order of metrics to show while training
if self.metrics:
metric_order = ['pck', 'spatialNorm',
'diversity', 'diversity_gt', 'F1',
'FID', 'W1_vel', 'W1_acc',
'style_IS'
# 'style_IS_subset',
# 'style_F1', 'style_subset_F1',
# 'style_cce'
]
else:
metric_order = []
return metric_order
def get_metrics(self, desc):
metrics = {}
metrics_split = {}
for metric in self.metrics_objects:
avgs = metric.get_averages(desc)
if isinstance(avgs, tuple):
metrics.update(avgs[0])
if not metrics_split:
metrics_split = {kwargs_name:{speaker:{} for speaker in avgs[1][kwargs_name]} for kwargs_name in avgs[1]}
for kwargs_name in avgs[1]:
for speaker in avgs[1][kwargs_name]:
metrics_split[kwargs_name][speaker].update(avgs[1][kwargs_name][speaker])
else:
metrics.update(avgs)
return metrics, metrics_split
def _save_metrics(self, metrics, filename='metrics'):
metrics_filename = self.book.name(filename, 'json', self.book.save_dir)
json.dump(metrics, open(metrics_filename, 'w'))
def get_kwargs(self, batch, **kwargs_subset):
kwargs = {}
keys = ['text/token_count', 'text/token_duration', 'audio/silence', 'text/filler', 'pose/starts', 'pose/startsC']
for key in keys:
if key in batch:
kwargs[key] = batch[key].to(self.device)
## add speaker name
kwargs.update({'speaker':self.speaker})
## add current epoch
kwargs.update(kwargs_subset)
return kwargs
def update_kwargs(self, kwargs):
'''
Update kwargs for sample_loop
'''
yield kwargs, None
def start_exp(self):
self.book._start_log()
def finish_exp(self):
self.book._stop_log()
def _is_warmup(self, epoch, min_epoch):
return False
def mem_usage(self):
out = subprocess.check_output(['nvidia-smi'])
used = int(out.decode('utf-8').split('\n')[8].split('|')[2].strip().split('/')[0].strip()[:-3])
total = int(out.decode('utf-8').split('\n')[8].split('|')[2].strip().split('/')[1].strip()[:-3])
return used, total, (float(used)/total) * 100
def detach(self, *args):
for var in args:
if isinstance(var, list):
for va in var:
del va
elif isinstance(var, torch.Tensor):
del var
# for p in self.model.parameters():
# if p.grad is not None:
# del p.grad
#torch.cuda.empty_cache()
#used, total, percent = self.mem_usage()
#tqdm.write('{}/{}: {}%'.format(used, total, percent))
def train(self, exp_num):
## Wandb
if self.args.wandb:
self.wandb_init(job_type='train')
for epoch in tqdm(range(self.num_epochs), ncols=20):
train_loss, train_metrics, train_metrics_split = self.train_loop(self.data_train, 'train', epoch, num_iters=self.args.num_iters)
dev_loss, dev_metrics, dev_metrics_split = self.train_loop(self.data_dev, 'dev', num_iters=self.args.num_iters)
test_loss, test_metrics, test_metrics_split = self.train_loop(self.data_test, 'test', num_iters=self.args.num_iters)
if self.args.scheduler not in ['linear_decay']: ## update lr after each iteration if training bert
self.schedulers_step() ## Change the Learning Rate
## update the weights for data_train
if self.args.weighted:
## Normalize weights
max_W = 10
min_W = 0.1
W_ = self.data_train.sampler.weights
W_ = (W_ - W_.mean())/W_.std() + 1
W_ = torch.min(torch.ones(1)[0].double()*max_W,
torch.max(torch.zeros(1)[0].double() + min_W, W_))
if torch.isnan(W_).any():
W_ = torch.ones_like(W_) ## reinit to ones if Weights suffer a large variation
self.data_train.sampler.weights = W_
W = self.data_train.sampler.weights
D_prob = self.model.D_prob if hasattr(self.model, 'D_prob') else 0
tqdm.write('W: {}/{}/{}/{}/{}'.format(W.mean(), W.std(), W.min(), W.max(), D_prob))
most_common = str(self.weight_counter.most_common()[:5])
least_common = str(self.weight_counter.most_common()[-5:])
tqdm.write('samples: {} -- {}'.format(most_common, least_common))
## save results
self.book.update_res({'train':train_loss,
'dev':dev_loss,
'test':test_loss})
## add metrics
self.book.update_res(train_metrics)
self.book.update_res(dev_metrics)
self.book.update_res(test_metrics)
self.book._save_res()
## update wandb
if self.args.wandb:
wandb.log(train_metrics, commit=False)
wandb.log(dev_metrics, commit=False)
wandb.log(test_metrics, commit=True)
## update tensorboard
if self.args.tb:
self.book.update_tb({'scalar':[[f'{self.args.cpk}/train', train_loss, epoch],
[f'{self.args.cpk}/dev', dev_loss, epoch],
[f'{self.args.cpk}/test', test_loss, epoch],
[f'{self.args.cpk}/pck_train',
train_metrics['train_pck'], epoch],
[f'{self.args.cpk}/pck_dev',
dev_metrics['dev_pck'], epoch],
[f'{self.args.cpk}/pck_test',
test_metrics['test_pck'],
epoch],
[f'{self.args.cpk}/train_spatialNorm',
train_metrics['train_spatialNorm'], epoch],
[f'{self.args.cpk}/dev_spatialNorm',
dev_metrics['dev_spatialNorm'], epoch],
[f'{self.args.cpk}/test_spatialNorm',
test_metrics['test_spatialNorm'], epoch]
]})
#'histogram':[[f'{self.args.cpk}/'+name,
#param.clone().cpu().detach().numpy(), epoch]
# for name, param in model.named_parameters()]})
## print results
self.book.print_res(epoch,
key_order=['train', 'dev', 'test'],
metric_order=self.metric_order,
exp=exp_num,
lr=self.schedulers[0].get_last_lr())#self.G_optim.state_dict()['param_groups'][0]['lr'])
# warmup = self._is_warmup(epoch, np.ceil(len(self.data_train)/self.batch_size))
if self.book.stop_training(self.model, epoch):
if self.args.wandb:
wandb.finish()
break
if self.args.num_iters > 0:
#get the best model
self.book._load_model(self.model, map_location=self.device)
#calculate test loss for the complete dataset
test_loss, test_metrics, test_metrics_split = self.train_loop(self.data_test, 'test', 0)
## save results
self.book.update_res({'train':train_loss,
'dev':dev_loss,
'test':test_loss})
## add metrics
self.book.update_res(train_metrics)
self.book.update_res(dev_metrics)
self.book.update_res(test_metrics)
self.book._save_res()
print('Final Results')
self.book.print_res(epoch,
key_order=['train', 'dev', 'test'],
metric_order=self.metric_order,
exp=exp_num,
lr=self.schedulers[0].get_last_lr())#self.G_optim.state_dict()['param_groups'][0]['lr'])
def train_loop(self, data, desc, epoch=0, num_iters=0):
## init
self.metrics_reset(description=desc)
self.running_loss_init()
if desc == 'train':
self.model.train(True)
else:
self.model.eval()
bar_format = '{percentage:3.0f}%[{elapsed}<{remaining}]' + ':{desc}'
bar_format = '{desc}:' +'{n_fmt}/{total_fmt}[{elapsed}<{remaining}]'
Tqdm = tqdm(data, desc=self.tqdm_desc(desc), leave=False, ncols=20, bar_format=bar_format)
for count, batch in enumerate(Tqdm):
self.zero_grad()
## update weight counter
if desc == 'train':
self.weight_counter.update(batch['idx'].numpy())
## Transform batch before using in the model
x, y_, y = self.get_processed_batch(batch)
## get kwargs like style
kwargs = self.get_kwargs(batch, epoch=epoch, sample_flag=0, description=desc)
## add noise to output to improve robustness of the model
noise = torch.randn_like(y) * self.args.noise if self.args.noise > 0 else 0
y_cap, internal_losses, args = self.forward_pass(desc, x, y+noise, **kwargs)
args = args[0] if len(args)>0 else {} ## dictionary of args returned by model
## check if there are weights in *args
if args.get('W') is not None and desc=='train' and self.args.weighted > 0:
W = args['W']
W_min = 0.1
self.data_train.sampler.weights[batch['idx']] = torch.max(torch.zeros(1)[0].double() + W_min, W.cpu()) ## clip the weights to positive values
## Get mask to calculate the loss function
src_mask_loss = args.get('src_mask_loss')
src_mask_loss = src_mask_loss.unsqueeze(-1) if src_mask_loss is not None else torch.ones_like(y[:, :, 0:1])
## get confidence values and
## calculate confidence loss
confidence_loss = self.get_confidence_loss(batch, y, y_cap)
loss = self.calculate_loss(x, (y+noise)*src_mask_loss, y_cap*src_mask_loss, internal_losses)
## update tqdm
losses = [l/c for l,c in zip(self.running_loss, self.running_count)] + [confidence_loss]
Tqdm.set_description(self.tqdm_desc(desc, losses))
Tqdm.refresh()
if np.isnan(losses[0]):
pdb.set_trace()
if desc == 'train':
self.optimize(loss + confidence_loss)
## Detach Variables to avoid memory leaks
#x = x.detach()
#y = y.detach()
#loss = loss.detach()
#y_cap = y_cap.detach()
## Evalutation
y_cap = y_cap.to('cpu')
src_mask_loss = src_mask_loss.to('cpu')
with torch.no_grad():
self.calculate_metrics(y_cap*src_mask_loss, y_*src_mask_loss, 'same', **{**kwargs, **args})
self.detach(x, y, loss, y_cap, internal_losses)
if count>=self.args.debug and self.args.debug: ## debugging by overfitting
break
## if self.args.num_iters > 0, break training
if count >= num_iters and num_iters > 0 and desc != 'train':
Tqdm.close()
break
metrics = {}
if self.metrics:
metrics, metrics_split = self.get_metrics(desc)
else:
metrics, metrics_split = {}, {}
return losses[0], metrics, metrics_split
#return sum(losses), metrics
def weight_estimate_loop(self, data, desc, epoch=0, num_iters=0):
self.model.eval()
bar_format = '{percentage:3.0f}%[{elapsed}<{remaining}]' + ':{desc}'
bar_format = '{desc}:' +'{n_fmt}/{total_fmt}[{elapsed}<{remaining}]'
Tqdm = tqdm(data, desc='update weights: '+self.tqdm_desc(desc), leave=False, ncols=20, bar_format=bar_format)
W = []
for count, batch in enumerate(Tqdm):
## Transform batch before using in the model
x, y_, y = self.get_processed_batch(batch)
## get kwargs like style
kwargs = self.get_kwargs(batch, epoch=0, sample_flag=0, description=desc)
w = self.forward_pass_weight(desc, x, y, **kwargs)
W.append(w)
if count>=self.args.debug and self.args.debug: ## debugging by overfitting
break
## if self.args.num_iters > 0, break training
if count >= num_iters and num_iters > 0:
break
## update the weights for data sampler
W = torch.cat(W)
return W
def sample(self, exp_num):
## Wandb
if self.args.wandb:
if self.sample_all_styles:
self.wandb_init(job_type='sample_all_styles')
else:
self.wandb_init(job_type='sample')
## Create Output Directory
self.dir_name = self.book.name.dir(self.args.save_dir)
## Load best Model
self.book._load_model(self.model, map_location=self.device)
train_loss, train_metrics, train_metrics_split = self.sample_loop(self.data_train.dataset.datasets, 'train')
dev_loss, dev_metrics, dev_metrics_split = self.sample_loop(self.data_dev.dataset.datasets, 'dev')
test_loss, test_metrics, test_metrics_split = self.sample_loop(self.data_test.dataset.datasets, 'test')
if self.sample_all_styles == 0: ## if all styles are sampled, then the results change, hence we don't update it in this case
## Save labels histogram
self._save_labels()
## Save sample time metrics
self._save_metrics(test_metrics_split, 'metrics')
self._save_metrics(test_metrics, 'cummMetrics')
print('Sampled- Train:{:.4f}/{:.4f}, '.format(train_loss, train_metrics['train_pck']) + \
'Dev:{:.4f}/{:.4f}, '.format(dev_loss, dev_metrics['dev_pck']) + \
'Test:{:.4f}/{:.4f}'.format(test_loss, test_metrics['test_pck']))
## print results
self.book.print_res(epoch=0,
key_order=['train', 'dev', 'test'],
metric_order=self.metric_order,
exp=exp_num,
lr=0)
## Wandb
if self.args.wandb:
if len(self.speaker) > 1:
wandb.log(test_metrics_split, commit=False)
wandb.log(test_metrics, commit=True)
wandb.finish()
# self.book.print_res(epoch=0, key_order=['train', 'dev', 'test',
# 'train_pck', 'dev_pck', 'test_pck',
# 'train_VelL1', 'dev_VelL1', 'test_VelL1'],
# exp=exp_num, lr=0)
def sample_loop(self, data, desc):
self.metrics_reset(description=desc)
self.running_loss_init()
self.model.eval()
intervals = []
start = []
y_outs = []
y_animates = []
filenames = []
keys = []
## collate function
#if not self.repeat_text:
if self.text_in_modalities:
pad_keys = ['text/w2v', 'text/bert', 'text/token_duration', 'text/tokens']
collate_fn = partial(collate_fn_pad, pad_key=pad_keys, dim=0)
else:
collate_fn = None
len_data = len(data)
bar_format = '{percentage:3.0f}%|' + '|' + ':{desc}'
bar_format = '{percentage:3.0f}%[{elapsed}<{remaining}]' + ':{desc}'
Tqdm = tqdm(data, desc=self.tqdm_desc(desc), leave=False, ncols=20, bar_format=bar_format)
for count, loader in enumerate(Tqdm):
### load ground truth
Y = self.get_gt(loader.path2h5)
if len(loader) > 0:
loader = DataLoader(loader, len(loader), shuffle=False, collate_fn=collate_fn)
Y_cap = []
for batch in loader:
with torch.no_grad():
## Transform batch before using in the model
x, y_, y = self.get_processed_batch(batch)
kwargs = self.get_kwargs(batch, epoch=0, sample_flag=1, description=desc)
batch_size = y.shape[0]
try:
X_ = [x_.view(1, -1, x_.shape[-1]) for x_ in x[:len(self.input_modalities)]]
except Exception:
pdb.set_trace()
for x_ in x[len(self.input_modalities):len(self.input_modalities) + 1]: ## hardcoded for auxillary labels
X_.append(x_.view(1, -1))
#if len(x) > len(self.input_modalities):
# X_.append(x[-1].view(1, -1))
y = y.reshape(1, -1, y.shape[-1])
X_.append(x[-1]) #add our the intervals df
## based on kwargs_batch_size, repeat x, and y
#y = torch.cat([y]*kwargs_batch_size, dim=0)
#x = [torch.cat([x_]*kwargs_batch_size, dim=0) for x_ in x]
for kwargs, kwargs_name in self.update_kwargs(kwargs): ## update kwargs like style
with torch.no_grad():
## Forward pass
y_cap, internal_losses, args = self.forward_pass(desc, X_, y, **kwargs)
args = args[0] if len(args) > 0 else {}
## update labels histogram ## only update when the speaker is sampled with it's style
self._update_labels(desc=desc, style=int(batch['style'][0, 0].item()), kwargs_name=kwargs_name)
## get confidence loss
confidence_loss = self.get_confidence_loss(batch, y, y_cap)
loss = self.calculate_loss(X_, y, y_cap, internal_losses)
## Calculates PCK and reinserts data removed before training
y_cap = y_cap.to('cpu')
with torch.no_grad():
y_cap = y_cap.view(batch_size, -1, y_cap.shape[-1])
y_cap = self.calculate_metrics(y_cap, y_, kwargs_name, **{**kwargs, **args})
Y_cap.append(y_cap)
## update tqdm
losses = [l/c for l,c in zip(self.running_loss, self.running_count)] + [confidence_loss]
Tqdm.set_description(self.tqdm_desc(desc, losses))
Tqdm.refresh()
self.detach(x, y, y_cap, loss, internal_losses)
if Y_cap:
intervals.append(batch['meta']['interval_id'][0])
start.append(torch.Tensor([0]).to(torch.double))
y_outs.append(torch.cat(Y_cap, dim=0))
y_animates.append([torch.cat(Y_cap, dim=0), Y])
dir_name = 'keypoints' if kwargs_name is None else 'keypoints_{}'.format(kwargs_name)
filenames.append((Path(self.dir_name)/dir_name/'{}/{}/{}.h5'.format(desc,
self.data.getSpeaker(intervals[-1]),
intervals[-1])).as_posix())
keys.append(self.output_modality)
Y_cap = []
#keys += [self.output_modality] * len(intervals)
## Save Keypoints
if (count + 1) % 100 == 0 or count == len_data - 1: ## save files every 100 batches to prevent memory errors
parallel(self.data.modality_classes[self.output_modality].append, # fn
-1, # n_jobs
filenames, keys, y_outs) # fn_args
intervals = []
start = []
y_outs = []
y_animates = []
filenames = []
keys = []
if self.metrics:
metrics, metrics_split = self.get_metrics(desc)
else:
metrics, metrics_split = {}, {}
return losses[0], metrics, metrics_split
def get_processed_batch(self, batch):
batch = self.pre(batch)
x = [batch[mod] for mod in self.input_modalities]
y_ = batch[self.output_modality]
x = [x_.to(self.device) for x_ in x]
y = y_.to(self.device)
## Remove the first joint
y = self.transform(y)
return x, y_, y
def calculate_metrics(self, y_cap, y_, kwargs_name, **kwargs):
if kwargs_name is None:
kwargs_name = 'same'
#feats_shape = int(self.data_shape[self.output_modality][-1]/2)
if 'style' in kwargs:
idx = int(kwargs['style'].view(-1)[0].detach().cpu().item())
style_vector = kwargs['style'].detach().cpu()
else:
idx = 0
style_vector = torch.zeros(y_cap.shape[0], y_cap.shape[1]).long()
try:
self.IS(y_cap, style_vector, self.mask, idx=idx, kwargs_name=kwargs_name)
except:
pass
## Re-insert Joints
y_cap = self.transform(y_cap, inv=True, batch_gt=y_)
## calculate L1
self.l1(y_cap, y_, self.mask, idx=idx, kwargs_name=kwargs_name)
self.vel_l1(y_cap, y_, self.mask, idx=idx, kwargs_name=kwargs_name)
self.fid(y_cap, y_, self.mask, idx=idx, kwargs_name=kwargs_name)
## undo normalization
y_cap = self.pre({self.output_modality:y_cap}, inv=True)[self.output_modality]
y_cap = y_cap.view(y_cap.shape[0], y_cap.shape[1], 2, -1) ## (B, T, 2, feats)
y_ = self.pre({self.output_modality:y_}, inv=True)[self.output_modality]
y_ = y_.view(y_.shape[0], y_.shape[1], 2, -1) ## (B, T, 2, feats)
## calculate wasserstein_distance-1 for avg velocity and accelaration
self.w1(y_cap, y_, self.mask, idx=idx, kwargs_name=kwargs_name)
## Hardcode root as (0,0) for eternity for y and gt
y_cap = y_cap.view(-1, 2, y_cap.shape[-1]) ## (BxT, 2, feats)
y_cap[..., 0] = 0 ## Hardcode to have root as (0,0) for eternity
y_cap_out = y_cap
y_gt = y_.view(-1, 2, y_cap.shape[-1])
y_gt[..., 0] = 0 ## Hardcode to have root as (0,0) for eternity
## calculate and add pck to the average meter
self.pck(y_cap, y_gt, self.mask, idx=idx, kwargs_name=kwargs_name)
## calculate STEEr, SEA and MoCA-{self.num_clusters} scores
y_cap = self.transform(y_cap.view(1, y_cap.shape[0], -1), save_insert=False)
y_gt = self.transform(y_gt.view(1, y_gt.shape[0], -1), save_insert=False)
self.diversity(y_cap.squeeze(0), y_gt.squeeze(0), idx=idx, kwargs_name=kwargs_name)
self.expressiveness(y_cap.squeeze(0), y_gt.squeeze(0), idx=idx, kwargs_name=kwargs_name)
self.f1(self.f1_cluster(y_cap), self.f1_cluster(y_gt), idx=idx, kwargs_name=kwargs_name)
return y_cap_out
def get_model(self):
raise NotImplementedError
def update_modelKwargs(self):
raise NotImplementedError
def update_model(self):
pass
# def debug_model(self, model):
# try:
# model()
# except RuntimeError as e:
# if 'out of memory' in str(e):
# print('| WARNING: ran out of memory, retrying batch',sys.stdout)
# sys.stdout.flush()
# for p in model.parameters():
# if p.grad is not None:
# del p.grad # free some memory
# torch.cuda.empty_cache()
# y= model()
# else:
# raise e
def running_loss_init(self):
raise NotImplementedError
def tqdm_desc(self):
raise NotImplementedError
def zero_grad(self):
raise NotImplementedError
def forward_pass(self):
raise NotImplementedError
def calculate_loss(self):
raise NotImplementedError
def optimize(self, loss):
if self.args.scheduler in ['linear_decay']:
self.schedulers_step()
def schedulers_step(self):
for sched in self.schedulers:
sched.step()
class Trainer(TrainerBase):
'''
Single modality Trainer with early fusion
'''
def __init__(self, args, args_subset, args_dict_update={}):
super(Trainer, self).__init__(args, args_subset, args_dict_update)
self.running_loss = [0]
self.running_count = [1e-10]
def get_model(self):
return eval(self.args.model)(**self.modelKwargs)
def update_modelKwargs(self):
self.modelKwargs.update(self.args.modelKwargs)
self.modelKwargs.update({'time_steps':self.data_shape[self.input_modalities[0]][0],
'out_feats':self.data_shape[self.output_modality][-1]-2*len(self.mask),
'shape':self.data_shape})
def running_loss_init(self):
self.running_loss = [0]
self.running_count = [1e-10]
def tqdm_desc(self, desc, losses=[]):
if losses:
return desc+' {:.4f} H:{:.4f}'.format(*losses)
else:
return desc+' {:.4f} H:{:.4f}'.format(0, 0)
def zero_grad(self):
self.model.zero_grad()
self.G_optim.zero_grad()
if self.D_optim is not None:
self.D_optim.zero_grad()
def forward_pass(self, desc, x, y, **kwargs):
x = torch.cat(x, dim=-1) ## Early Fusion
if desc == 'train' and self.model.training:
y_cap, internal_losses, *args = self.model(x, y)
else:
with torch.no_grad():
y_cap, internal_losses, *args = self.model(x, y)
return y_cap, internal_losses, args
def calculate_loss(self, x, y, y_cap, internal_losses):
loss = self.criterion(y_cap, y)
for i_loss in internal_losses:
loss += i_loss
self.running_loss[0] += loss.item() * y_cap.shape[0]
self.running_count[0] += y_cap.shape[0]
return loss
def optimize(self, loss):
loss.backward()
self.G_optim.step()
super().optimize(loss)
class TrainerLate(Trainer):
'''
the inputs are not concatenated, passed as a list to the model
'''
def __init__(self, args, args_subset, args_dict_update={}):
super(TrainerLate, self).__init__(args, args_subset, args_dict_update)
self.running_loss = [0]
self.running_count = [1e-10]
def forward_pass(self, desc, x, y, **kwargs):
if desc == 'train' and self.model.training:
y_cap, internal_losses, *args = self.model(x, y, input_modalities=self.input_modalities, **kwargs)
else:
with torch.no_grad():
y_cap, internal_losses, *args = self.model(x, y, input_modalities=self.input_modalities, **kwargs)
return y_cap, internal_losses, args
TrainerJointLate = TrainerLate
TrainerJoint = Trainer
class TrainerLateGest(TrainerLate):
'''
the inputs are not concatenated, passed as a list to the model
'''
def __init__(self, args, args_subset, args_dict_update={}):
super().__init__(args, args_subset, args_dict_update)
self.running_loss = [0]
self.running_count = [1e-10]
def get_criterion(self):
criterion = eval('torch.nn.' + self.args.loss)(**self.args.lossKwargs)
def crit(y, y_cap):
y_vel = y[:,1:,:] - y[:,:-1,:]
y_cap_vel = y_cap[:,1:,:] - y_cap[:,:-1,:]
return criterion(y, y_cap) + 0.6 * criterion(y_vel, y_cap_vel)
return crit
def forward_pass(self, desc, x, y, **kwargs):
if desc == 'train' and self.model.training:
y_cap, internal_losses, *args = self.model(x, y, input_modalities=self.input_modalities, **kwargs)
else:
with torch.no_grad():
y_cap, internal_losses, *args = self.model(x, y, input_modalities=self.input_modalities, **kwargs)
return y_cap, internal_losses, args
class TrainerGAN(TrainerBase):
def __init__(self, args, args_subset, args_dict_update):
super(TrainerGAN, self).__init__(args, args_subset, args_dict_update)
self.running_loss = [0]
self.running_count = [1e-10]
def get_model(self):
## Generator
G = eval(self.args.model)(**self.modelKwargs)
## Discriminator
if self.args.discriminator is None: ## infer the name of the discriminator
D_modelname = '_'.join(self.args.model.split('_')[:-1] + ['D'])
else:
D_modelname = self.args.discriminator
## GAN Wrapper
D_modelKwargs = {}
if self.args.weighted:
GANWrapper = GANWeighted
D_modelKwargs.update({'out_shape':2})
else:
GANWrapper = GAN
### add input_shape for self.args.joint
input_shape = 0
if self.args.joint:
for mod in self.input_modalities:
input_shape += self.data_shape[mod][-1]
D_modelKwargs.update({'in_channels':self.data_shape[self.output_modality][-1]-2*len(self.mask) + input_shape})
if 'p' in self.modelKwargs: ## get the dropout parameter in the discrimiator as well
D_modelKwargs.update({'p':self.args.modelKwargs['p']})
try:
D = eval(D_modelname)(**D_modelKwargs)
except:
print('{} not defined, hence defaulting to Speech2Gesture_D'.format(D_modelname))
D = eval('Speech2Gesture_D')(**D_modelKwargs)
## GAN
model = GANWrapper(G, D, lr=self.args.lr, criterion=self.args.loss, optim=self.args.optim,
dg_iter_ratio=self.args.dg_iter_ratio, lambda_gan=self.args.lambda_gan,
lambda_D=self.args.lambda_D, joint=self.args.joint, input_modalities=self.input_modalities,
update_D_prob_flag=self.args.update_D_prob_flag, no_grad=self.args.no_grad)
return model
def update_modelKwargs(self):
self.modelKwargs.update(self.args.modelKwargs)
self.modelKwargs.update({'time_steps':self.data_shape[self.input_modalities[0]][0],
'out_feats':self.data_shape[self.output_modality][-1]-2*len(self.mask),
'shape':self.data_shape})
def running_loss_init(self):
self.running_loss = [0]*4
self.running_count = [1e-10]*4
def tqdm_desc(self, desc, losses=[]):
if losses:
return desc+' pose:{:.4f} G_gan:{:.4f} real_D:{:.4f} fake_D:{:.4f} H:{:.4f}'.format(*losses)
else:
return desc+' pose:{:.4f} G_gan:{:.4f} real_D:{:.4f} fake_D:{:.4f} H:{:.4f}'.format(0, 0, 0, 0, 0)
def zero_grad(self):
self.model.zero_grad()
self.G_optim.zero_grad()
self.D_optim.zero_grad()
def forward_pass(self, desc, x, y, **kwargs):
x = torch.cat(x, dim=-1) ## Early Fusion
if desc == 'train' and self.model.training:
y_cap, internal_losses, *args = self.model(x, y, **kwargs)
else:
with torch.no_grad():
y_cap, internal_losses, *args = self.model(x, y, **kwargs)
return y_cap, internal_losses, args
def calculate_loss(self, x, y, y_cap, internal_losses):
loss = 0
for i, i_loss in enumerate(internal_losses):
if i < 2:
if self.model.G_flag: ## TODO
self.running_loss[i] += i_loss.item() * y_cap.shape[0]
self.running_count[i] += y_cap.shape[0]
else:
self.running_loss[i+2] += i_loss.item() * y_cap.shape[0]
self.running_count[i+2] += y_cap.shape[0]
loss += i_loss
return loss
def get_norm(self, model):
params = []
for param in model.parameters():
params.append(param.grad.view(-1))
return torch.norm(torch.cat(params))
def optimize(self, loss):
loss.backward()
if self.model.G_flag: ## TODO
torch.nn.utils.clip_grad_norm_(self.model.G.parameters(), 1) ## TODO
self.G_optim.step() ## TODO
else:
torch.nn.utils.clip_grad_norm_(self.model.D.parameters(), 1) ## TODO
self.D_optim.step() ## TODO
super().optimize(loss)
class TrainerLateGAN(TrainerGAN):
def __init__(self, args, args_subset, args_dict_update):
super().__init__(args, args_subset, args_dict_update)
self.running_loss = [0]
self.running_count = [1e-10]
def forward_pass_weight(self, desc, x, y, **kwargs):
w = self.model.estimate_weights(x, y, input_modalities=self.input_modalities, **kwargs)
return w
def forward_pass(self, desc, x, y, **kwargs):
if desc == 'train' and self.model.training:
y_cap, internal_losses, *args = self.model(x, y, input_modalities=self.input_modalities, desc=desc, **kwargs)
else:
with torch.no_grad():
y_cap, internal_losses, *args = self.model(x, y, input_modalities=self.input_modalities, desc=desc, **kwargs)
return y_cap, internal_losses, args
class TrainerLateTransformerGAN(TrainerGAN):
def __init__(self, args, args_subset, args_dict_update):
super().__init__(args, args_subset, args_dict_update)
self.running_loss = [0]
self.running_count = [1e-10]
def get_processed_batch(self, batch):
batch = self.pre(batch)
text_modalities = self.input_modalities
#text_modalities.append('text/token_count')
x = [batch[mod] for mod in text_modalities]
y_ = batch[self.output_modality]
x = [x_.to(self.device) for x_ in x]
y = y_.to(self.device)
## Remove the first joint
y = self.transform(y)
return x, y_, y
def forward_pass(self, desc, x, y, **kwargs):
if desc == 'train' and self.model.training:
y_cap, internal_losses, *args = self.model(x, y, input_modalities=self.input_modalities, desc=desc, **kwargs)
else:
with torch.no_grad():
y_cap, internal_losses, *args = self.model(x, y, input_modalities=self.input_modalities, desc=desc, **kwargs)
return y_cap, internal_losses, args
def get_kwargs(self, batch, **kwargs_subset):
kwargs = super().get_kwargs(batch, **kwargs_subset)
return kwargs
class TrainerNoiseOnly(Trainer):
'''
Trainer with Noise as input
'''
def __init__(self, args, args_subset, args_dict_update={}):
super().__init__(args, args_subset, args_dict_update)
self.running_loss = [0]
self.running_count = [1e-10]
def get_processed_batch(self, batch):
batch = self.pre(batch)
x = [torch.randn_like(batch[mod]) for mod in self.input_modalities]
y_ = batch[self.output_modality]
x = [x_.to(self.device) for x_ in x]
y = y_.to(self.device)
## Remove the masked joints
y = self.transform(y)
return x, y_, y
class TrainerLateCluster(TrainerLate):
def __init__(self, args, args_subset, args_dict_update={}):
super().__init__(args, args_subset, args_dict_update)
self.running_loss = [0]*2
self.running_count = [1e-10]*2
self.transform_cluster = self.get_transforms()
def running_loss_init(self):
self.running_loss = [0]*3
self.running_count = [1e-10]*3
def tqdm_desc(self, desc, losses=[]):
if losses:
return desc+' pose:{:.4f} label:{:.4f} H:{:.4f}'.format(*losses)
else:
return desc+' pose:{:.4f} label:{:.4f} H:{:.4f}'.format(0, 0, 0)
def calculate_loss(self, x, y, y_cap, internal_losses):
loss = self.criterion(y_cap, y)
self.running_loss[0] += loss.item() * y_cap.shape[0]
self.running_count[0] += y_cap.shape[0]
for i, i_loss in enumerate(internal_losses):
self.running_loss[i+1] += i_loss.item() * y_cap.shape[0]
self.running_count[i+1] += y_cap.shape[0]
loss += i_loss
return loss
# def update_modelKwargs(self):
# modelKwargs = {}
# modelKwargs.update(self.args.modelKwargs)
# modelKwargs.update({'time_steps':self.data_shape[self.input_modalities[0]][0],
# 'out_feats':self.data_shape[self.output_modality][-1]-2*len(self.mask),
# 'num_clusters':self.num_clusters,
# 'cluster':self.cluster,
# 'shape':self.data_shape})
# def get_model(self):
# return eval(self.args.model)(**modelKwargs)
def update_modelKwargs(self):
self.modelKwargs.update(self.args.modelKwargs)
self.modelKwargs.update({'time_steps':self.data_shape[self.input_modalities[0]][0],
'out_feats':self.data_shape[self.output_modality][-1]-2*len(self.mask),
'num_clusters':self.num_clusters,
'cluster':self.cluster,
'shape':self.data_shape})
def get_cluster(self):
return KMeans(variable_list=[self.output_modality], key=self.speaker, data=self.data_train, num_clusters=self.num_clusters, mask=self.mask, feats=self.feats)
def get_processed_batch(self, batch):
## Get cluster Labels
self.cluster.update(batch)
labels = self.cluster(self.transform_cluster(batch[self.output_modality]))
batch = self.pre(batch)
x = [batch[mod] for mod in self.input_modalities]
y_ = batch[self.output_modality]
## Append cluster labels
x.append(labels)
x = [x_.to(self.device) for x_ in x]
y = y_.to(self.device)
## Remove the masked joints
y = self.transform(y)
return x, y_, y
TrainerJointLateCluster = TrainerLateCluster
class TrainerLateClusterGAN(TrainerLateGAN):
def __init__(self, args, args_subset, args_dict_update):
super(TrainerGAN, self).__init__(args, args_subset, args_dict_update)
self.running_loss = [0]
self.running_count = [1e-10]
self.transform_cluster = self.get_transforms()
def update_modelKwargs(self):
self.modelKwargs.update(self.args.modelKwargs)
self.modelKwargs.update({'time_steps':self.data_shape[self.input_modalities[0]][0],
'out_feats':self.data_shape[self.output_modality][-1]-2*len(self.mask),
'num_clusters':self.num_clusters,
'cluster':self.cluster,
'shape':self.data_shape})
@property
def loss_kinds(self):
return ['pose', 'G_gan',
'real_D', 'fake_D',
'cntr', 'H', 'mem%']
def running_loss_init(self):
self.running_loss = [0]*len(self.loss_kinds)
self.running_count = [1e-10]*len(self.loss_kinds)
def tqdm_desc(self, desc, losses=[]):
loss_str = ''.join([' {}'.format(l) + ':{:.3f}' for l in self.loss_kinds])
if not losses:
losses = [0]* len(self.running_loss)
if self.args.mem_usage:
losses[-1] = self.mem_usage()[-1]
else:
if self.args.mem_usage:
losses[-2] = self.mem_usage()[-1]
return desc + loss_str.format(*losses)
# def running_loss_init(self):
# self.running_loss = [0]*5
# self.running_count = [1e-10]*5
# def tqdm_desc(self, desc, losses=[]):
# if losses:
# return desc+' pose:{:.4f} G_gan:{:.4f} real_D:{:.4f} fake_D:{:.4f} label:{:.4f} H:{:.4f}'.format(*losses)
# else:
# return desc+' pose:{:.4f} G_gan:{:.4f} real_D:{:.4f} fake_D:{:.4f} label:{:.4f} H:{:.4f}'.format(0, 0, 0, 0, 0, 0)
def calculate_loss(self, x, y, y_cap, internal_losses):
loss = 0
for i, i_loss in enumerate(internal_losses):
if i < 2:
if self.model.G_flag: ## TODO
self.running_loss[i] += i_loss.item() * y_cap.shape[0]
self.running_count[i] += y_cap.shape[0]
else:
if not self.model.fake_flag and i == 1:
pass
else:
self.running_loss[i+2] += i_loss.item() * y_cap.shape[0]
self.running_count[i+2] += y_cap.shape[0]
else:
self.running_loss[i+2] = i_loss.item() * y_cap.shape[0]
self.running_count[i+2] += y_cap.shape[0]
loss += i_loss
return loss
def get_cluster(self):
return KMeans(variable_list=[self.output_modality], key=self.speaker, data=self.data_train, num_clusters=self.num_clusters, mask=self.mask, feats=self.feats)
def get_processed_batch(self, batch):
## Get cluster Labels
self.cluster.update(batch)
labels = self.cluster(self.transform_cluster(batch[self.output_modality]))
batch = self.pre(batch)
x = [batch[mod] for mod in self.input_modalities]
y_ = batch[self.output_modality]
## Append cluster labels
x.append(labels)
x = [x_.to(self.device) for x_ in x]
y = y_.to(self.device)
## Remove the masked joints
y = self.transform(y)
return x, y_, y
TrainerJointLateClusterGAN = TrainerLateClusterGAN
class TrainerNN(Trainer):
def __init__(self, args, args_subset, args_dict_update={}):
super().__init__(args, args_subset, args_dict_update)
self.running_loss = [0]*2
self.running_count = [1e-10]*2
self.audio, self.pose = self.get_train_data()
def get_model(self):
modelKwargs = {}
modelKwargs.update(self.args.modelKwargs)
modelKwargs.update({'shape':self.data_shape})
return eval(self.args.model)(**modelKwargs)
def update_modelKwargs(self):
self.modelKwargs.update(self.args.modelKwargs)
self.modelKwargs.update({'shape':self.data_shape})
def get_train_data(self):
audio, pose = [], []
for batch in self.data_train:
x, y_, y = self.get_processed_batch(batch)
audio.append(x[0].mean(dim=1))
pose.append(y)
return torch.cat(audio, dim=0), torch.cat(pose, dim=0)
def forward_pass(self, desc, x, y, **kwargs):
if desc == 'train' and self.model.training:
y_cap, internal_losses, *args = self.model(x, y, audio=self.audio, pose=self.pose)
else:
with torch.no_grad():
y_cap, internal_losses, *args = self.model(x, y, audio=self.audio, pose=self.pose)
return y_cap, internal_losses, args
def optimize(self, loss):
pass
TrainerRand = TrainerNN
TrainerMean = TrainerNN
class TrainerStyleClassifier(Trainer):
def __init__(self, args, args_subset, args_dict_update):
super().__init__(args, args_subset, args_dict_update)
self.running_loss = [0]
self.running_count = [1e-10]
def update_modelKwargs(self):
self.modelKwargs.update(self.args.modelKwargs)
self.modelKwargs.update({'time_steps':self.data_shape[self.input_modalities[0]][0],
'in_channels':self.data_shape[self.output_modality][-1]-2*len(self.mask),
'shape':self.data_shape,
'style_dict':self.style_dict})
def get_processed_batch(self, batch):
batch = self.pre(batch)
x = [batch[mod] for mod in self.input_modalities]
y_ = batch['style'].long()[:,0]
x = [x_.to(self.device) for x_ in x]
y = y_.to(self.device)
## Remove the first joint
x = [self.transform(x_) for x_ in x]
return x, y_, y
def calculate_metrics(self, y_cap, y_, kwargs_name, **kwargs):
return y_cap
class TrainerLateClusterStyleGAN(TrainerLateClusterGAN):
def __init__(self, args, args_subset, args_dict_update):
super().__init__(args, args_subset, args_dict_update)
self.running_loss = [0]
self.running_count = [1e-10]
def update_modelKwargs(self):
self.modelKwargs.update(self.args.modelKwargs)
self.modelKwargs.update({'time_steps':self.data_shape[self.input_modalities[0]][0],
'out_feats':self.data_shape[self.output_modality][-1]-2*len(self.mask),
'num_clusters':self.num_clusters,
'cluster':self.cluster,
'shape':self.data_shape,
'style_dict':self.style_dict,
'style_dim':self.style_dim})
def get_kwargs(self, batch, **kwargs_subset):
kwargs = super().get_kwargs(batch, **kwargs_subset)
## Style Vector
kwargs.update({'style':batch['style'].long().to(self.device)})
return kwargs
def update_kwargs(self, kwargs):
if self.sample_all_styles:
style_id = kwargs['style'].view(-1)[0].cpu().item()
kwargs_list = [kwargs.copy()]
kwargs_names = [None]
for style_shift in range(1, self.num_styles):
kwargs_temp = kwargs.copy()
kwargs_temp['style'] = (kwargs_temp['style'] + style_shift) % self.num_styles
kwargs_list.append(kwargs_temp)
style_shift_id = (style_id + style_shift) % self.num_styles
kwargs_names.append('{}_{}'.format(self.speaker[style_id], self.speaker[style_shift_id]))
#kwargs_names = [None, 'style']
else:
kwargs_list = [kwargs.copy()]
kwargs['style'] = (kwargs['style'] + 1) % self.num_styles
kwargs_list.append(kwargs)
kwargs_names = [None, 'style']
for kwargs_, kwargs_name in zip(kwargs_list, kwargs_names):
yield kwargs_, kwargs_name
@property
def loss_kinds(self):
return ['pose', 'G_gan',
'real_D', 'fake_D',
'label',
'id_in', 'id_out',
'H']
def running_loss_init(self):
self.running_loss = [0]*len(self.loss_kinds)
self.running_count = [1e-10]*len(self.loss_kinds)
def tqdm_desc(self, desc, losses=[]):
loss_str = ''.join([' {}'.format(l) + ':{:.3f}' for l in self.loss_kinds])
#loss_str = ' pose:{:.4f} G_gan:{:.4f} real_D:{:.4f} fake_D:{:.4f} label:{:.4f} H:{:.4f}'
if not losses:
losses = [0]* len(self.running_loss)
return desc + loss_str.format(*losses)
class TrainerLateClusterContrastiveGAN(TrainerLateClusterGAN):
def __init__(self, args, args_subset, args_dict_update):
super().__init__(args, args_subset, args_dict_update)
self.running_loss = [0]
self.running_count = [1e-10]
self.transforms_contrastive.transforms[0].znorm = self.pre.transforms[-1]
def get_contrastive_transforms(self):
skel = self.data.modality_classes[self.output_modality]
if 'translate_limits' in self.modelKwargs:
max = self.modelKwargs['translate_limits']
else:
max = [50, 50]
transforms = [RandomTranslate(max=max, mask=self.mask, skel=skel,
znorm=None, output_modality=self.output_modality)]
return Compose(transforms)
def update_modelKwargs(self):
super().update_modelKwargs()
self.transforms_contrastive = self.get_contrastive_transforms()
self.modelKwargs.update({'transforms':self.transforms_contrastive})
self.modelKwargs.update({'num_batches':len(self.data_train)})
# @property
# def loss_kinds(self):
# return ['pose', 'G_gan',
# 'real_D', 'fake_D',
# 'cntr', 'H', 'mem%']
# def running_loss_init(self):
# self.running_loss = [0]*len(self.loss_kinds)
# self.running_count = [1e-10]*len(self.loss_kinds)
# def tqdm_desc(self, desc, losses=[]):
# loss_str = ''.join([' {}'.format(l) + ':{:.3f}' for l in self.loss_kinds])
# #loss_str = ' pose:{:.4f} G_gan:{:.4f} real_D:{:.4f} fake_D:{:.4f} label:{:.4f} H:{:.4f}'
# if not losses:
# losses = [0]* len(self.running_loss)
# losses[-1] = self.mem_usage()[-1]
# else:
# losses[-2] = self.mem_usage()[-1]
# return desc + loss_str.format(*losses)
def calculate_metrics(self, y_cap, y_, kwargs_name, **kwargs):
y_cap_out = super().calculate_metrics(y_cap, y_, kwargs_name, **kwargs)
if 'z' in kwargs:
z = kwargs['z'].cpu().transpose(-2,-1).numpy()
if kwargs['description'] == 'train':
self.iou.metric.cluster_z.update(z) ## train Kmeans
p = self.pre({self.output_modality:y_}, inv=True)[self.output_modality]
p = self.transform_cluster(p)
self.iou(z, p) ## calculate_metrics
z = torch.from_numpy(z).double().reshape(-1, z.shape[-1])
label = self.cluster_p(p).view(-1)
if True:#kwargs['sample_flag']:
if kwargs['description'] == 'train':
self.knn.metric.update(z, label)
for kwargs_name in self.knn.metrics:
for sp in self.knn.metrics[kwargs_name]:
sp.feature_bank = self.knn.metric.feature_bank
sp.feature_labels = self.knn.metric.feature_labels
#self.knn(z, label)
else:
self.knn(z, label)
return y_cap_out
def get_cluster_p(self):
return KMeans(variable_list=[self.output_modality], key=self.speaker, data=self.data_train, num_clusters=8, mask=self.mask, feats=['pose', 'velocity', 'speed'])
def metrics_init(self):
super().metrics_init()
## Get metric_init and add it to metrics_object
self.cluster_p = self.get_cluster_p()
## Cluster IOU
self.iou = self.Stack(evaluation.IOU(num_clusters=8, cluster_p=self.cluster_p))
for kwargs_name in self.iou.metrics:
for sp in self.iou.metrics[kwargs_name]:
sp.cluster_z = self.iou.metric.cluster_z
self.metrics_objects.append(self.iou)
## KNN Monitor
self.knn = self.Stack(evaluation.KNNMonitor(num_clusters=8))
self.metrics_objects.append(self.knn)
@property
def metric_order(self):
metric_order = super().metric_order + ['cluster_IOU', 'knn_monitor']
return metric_order
def update_model(self):
self.transform_cluster = self.get_transforms()
## Run one forward pass to initialize the MLPs for patchwise loss
with torch.no_grad():
## Transform batch before using in the model
for batch in self.data_train:
break
x, y_, y = self.get_processed_batch(batch)
## get kwargs like style
kwargs = self.get_kwargs(batch, epoch=0, sample_flag=0, description='train')
## add noise to output to improve robustness of the model
noise = torch.randn_like(y) * self.args.noise if self.args.noise > 0 else 0
y_cap, internal_losses, args = self.forward_pass('train', x, y+noise, **kwargs)
TrainerJointLateClusterContrastiveGAN = TrainerLateClusterContrastiveGAN
class TrainerLateClusterContrastiveDTWGAN(TrainerLateClusterGAN):
def __init__(self, args, args_subset, args_dict_update):
super().__init__(args, args_subset, args_dict_update)
self.running_loss = [0]
self.running_count = [1e-10]
self.transforms_contrastive.transforms[0].znorm = self.pre.transforms[-1]
def get_contrastive_transforms(self):
skel = self.data.modality_classes[self.output_modality]
if 'translate_limits' in self.modelKwargs:
max = self.modelKwargs['translate_limits']
else:
max = [50, 50]
transforms = [RandomTranslate(max=max, mask=self.mask, skel=skel,
znorm=None, output_modality=self.output_modality)]
return Compose(transforms)
def update_modelKwargs(self):
super().update_modelKwargs()
self.transforms_contrastive = self.get_contrastive_transforms()
self.modelKwargs.update({'input_modalities': self.input_modalities,
'transforms':self.transforms_contrastive,
'num_batches':len(self.data_train),
'DTW': self.args.DTW
})
def get_processed_batch(self, batch):
## Get cluster Labels
self.cluster.update(batch)
labels = self.cluster(self.transform_cluster(batch[self.output_modality]))
batch = self.pre(batch)
x = [batch[mod] for mod in self.input_modalities]
y_ = batch[self.output_modality]
interval_ids = batch['meta']['interval_id']
## Append cluster labels
x.append(labels)
x.append(interval_ids)
##Append Meta
y = y_.to(self.device)
## Remove the masked joints
y = self.transform(y)
return x, y_, y,
@property
def loss_kinds(self):
return ['pose', 'G_gan',
'real_D', 'fake_D',
'cntr', 'H']
def running_loss_init(self):
self.running_loss = [0]*len(self.loss_kinds)
self.running_count = [1e-10]*len(self.loss_kinds)
def tqdm_desc(self, desc, losses=[]):
loss_str = ''.join([' {}'.format(l) + ':{:.3f}' for l in self.loss_kinds])
#loss_str = ' pose:{:.4f} G_gan:{:.4f} real_D:{:.4f} fake_D:{:.4f} label:{:.4f} H:{:.4f}'
if not losses:
losses = [0]* len(self.running_loss)
return desc + loss_str.format(*losses)
TrainerJointLateClusterContrastiveDTWGAN = TrainerLateClusterContrastiveDTWGAN
|
StarcoderdataPython
|
243947
|
import librarian
import operator
import model.tagger as tagger
from nltk.tokenize import TweetTokenizer
### TEXT PROCESSING METHODS ###
def split_into_sentences(text):
tokenizer = TweetTokenizer()
return tokenizer.tokenize(text)
def split_into_words(sentence):
tokenizer = TweetTokenizer()
punctuation = ['.', ',', '!', '?', '(', ')', '$', ':', ';', '{', '}', '[', ']', '•', '|']
return [w.lower() for w in tokenizer.tokenize(sentence) if not w in punctuation]
# a dictionary of all terms in the document of length n
def term_dict(doc, n=1):
term_dict = {}
words = split_into_words(doc)
for i in range(len(words)+1-n):
term = " ".join(words[i:i+n])
if term in term_dict:
term_dict[term] += 1
else:
term_dict[term] = 1
return term_dict
# a list of dictionaries of terms in the document of length n
def term_dicts(corpus, n=1):
return [term_dict(d, n) for d in corpus]
# how many times the term appears in the document
def term_frequency(term, doc):
return term_dict(doc)[term]
# how many documents in the corpus include the term
def doc_frequency(term, term_dicts):
return len([1 for td in term_dicts if term in td])
# list of the same length as the corpus list with top tf-idf candidates for topic words
def keywords(corpus, term_dicts, num_keywords):
pass # TODO
def graph(term, term_dicts):
print('\n\tfrequency of "' + term.upper() + '"')
for i in range(0, len(term_dicts), 10):
term_dict_set = term_dicts[i:i+10]
count = sum([td[term] for td in term_dict_set if term in td])
line = str(i) + '\t' + '|'*count
print(line)
def get_number(blog_title):
return int(blog_title.split('-')[2])
if __name__ == '__main__':
rows = librarian.get_data_from_directory('posts', ['filename','main_text'])
sorted_rows = sorted(rows[1:], key=lambda row: get_number(row[0]))
main_texts = [main_text for filename, main_text in sorted_rows]
tds = term_dicts(main_texts)
print(doc_frequency('the', tds))
words = ['hacker', 'recurse']
for word in words:
graph(word, tds)
# list of integers representing term frequency across documents
def frequency_distribution(term, term_dicts):
freqs = []
for td in term_dicts:
if term in td:
freqs.append(td[term])
else:
freqs.append(0)
return freqs
|
StarcoderdataPython
|
3432886
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-11-26 04:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('take_a_number', '0004_remove_officehourssession_instructor_code'),
]
operations = [
migrations.AddField(
model_name='officehourssession',
name='instructor_code',
field=models.SlugField(default='', max_length=6),
),
migrations.AddField(
model_name='officehourssession',
name='student_code',
field=models.SlugField(default='', max_length=6),
),
]
|
StarcoderdataPython
|
1679905
|
np.savez_compressed(filename, x, y, z)
|
StarcoderdataPython
|
3507289
|
<gh_stars>0
# Copyright IBM Corp, All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
import logging
import os
import sys
import uuid
from flask import Blueprint
from flask import request as r
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from common import log_handler, LOG_LEVEL, \
make_ok_resp, make_fail_resp, \
CODE_CREATED, \
request_debug
from common.utils import K8S_CRED_TYPE
from modules import host_handler
from modules.models import Cluster as ClusterModel
from modules.models import Host as HostModel
from agent import detect_daemon_type
from auth import oidc
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
logger.addHandler(log_handler)
bp_host_api = Blueprint('bp_host_api', __name__,
url_prefix='/{}'.format("api"))
@bp_host_api.route('/hosts', methods=['GET'])
@oidc.accept_token(True)
def hosts_list():
logger.info("/hosts_list method=" + r.method)
request_debug(r, logger)
col_filter = dict((key, r.args.get(key)) for key in r.args)
items = list(host_handler.list(filter_data=col_filter))
return make_ok_resp(data=items)
@bp_host_api.route('/host/<host_id>', methods=['GET'])
def host_query(host_id):
request_debug(r, logger)
result = host_handler.schema(host_handler.get_by_id(host_id))
logger.debug(result)
if result:
return make_ok_resp(data=result)
else:
error_msg = "host not found with id=" + host_id
logger.warning(error_msg)
return make_fail_resp(error=error_msg, data=r.form)
@bp_host_api.route('/host', methods=['POST'])
def host_create():
request_debug(r, logger)
if r.content_type.startswith("application/json"):
body = dict(r.get_json(force=True, silent=True))
else:
body = r.form
name, worker_api, capacity, log_type, log_server, log_level, host_type = \
body['name'], body['worker_api'], body['capacity'], \
body['log_type'], body.get('log_server', ''), body['log_level'], \
body['host_type'] if 'host_type' in body else None
if "autofill" in body and body["autofill"] == "on":
autofill = "true"
else:
autofill = "false"
if "schedulable" in body and body["schedulable"] == "on":
schedulable = "true"
else:
schedulable = "false"
if host_type == "vsphere":
vcaddress = body['vc_address']
if vcaddress.find(":") == -1:
address = vcaddress
port = "443"
else:
address = vcaddress.split(':')[0]
port = vcaddress.split(':')[1]
logger.debug("address={}, port={}".format(address, port))
vmname = "cello-vsphere-" + str(uuid.uuid1())
vsphere_param = {
'vc': {
'address': address,
'port': port,
'username': body['vc_user'],
'password': body['<PASSWORD>'],
'network': body['vc_network'],
'vc_datastore': body['datastore'],
'vc_datacenter': body['datacenter'],
'vc_cluster': body['cluster'],
'template': body['vm_template']},
'vm': {
'vmname': vmname,
'ip': body['vm_ip'],
'gateway': body['vm_gateway'],
'netmask': body['vm_netmask'],
'dns': body['vm_dns'],
'vcpus': int(body['vm_cpus']),
'memory': int(body['vm_memory'])}}
logger.debug("name={}, capacity={},"
"fillup={}, schedulable={}, log={}/{}, vsphere_param={}".
format(name, capacity, autofill, schedulable,
log_type, log_server, vsphere_param))
vsphere_must_have_params = {
'Name': name,
'Capacity': capacity,
'LoggingType': log_type,
'VCAddress': address,
'VCUser': body['vc_user'],
'VCPassword': body['vc_password'],
'VCNetwork': body['vc_network'],
'Datastore': body['datastore'],
'Datacenter': body['datacenter'],
'Cluster': body['cluster'],
'VMIp': body['vm_ip'],
'VMGateway': body['vm_gateway'],
'VMNetmask': body['vm_netmask']}
for key in vsphere_must_have_params:
if vsphere_must_have_params[key] == '':
error_msg = "host POST without {} data".format(key)
logger.warning(error_msg)
return make_fail_resp(error=error_msg, data=body)
result = host_handler.create(name=name, worker_api=worker_api,
capacity=int(capacity),
autofill=autofill,
schedulable=schedulable,
log_level=log_level,
log_type=log_type,
log_server=log_server,
host_type=host_type,
params=vsphere_param)
elif host_type == 'kubernetes':
worker_api = body['worker_api']
k8s_param = create_k8s_host(name, capacity, log_type, body)
if len(k8s_param) == 0:
return make_fail_resp(error=error_msg, data=r.form)
logger.debug("name={}, worker_api={}, capacity={},"
"fillup={}, schedulable={}, log={}/{}, k8s_param={}".
format(name, worker_api, capacity, autofill,
schedulable, log_type, log_server, k8s_param))
result = host_handler.create(name=name, worker_api=worker_api,
capacity=int(capacity),
autofill=autofill,
schedulable=schedulable,
log_level=log_level,
log_type=log_type,
log_server=log_server,
host_type=host_type,
params=k8s_param)
else:
logger.debug("name={}, worker_api={}, capacity={}"
"fillup={}, schedulable={}, log={}/{}".
format(name, worker_api, capacity, autofill, schedulable,
log_type, log_server))
if not name or not worker_api or not capacity or not log_type:
error_msg = "host POST without enough data"
logger.warning(error_msg)
return make_fail_resp(error=error_msg, data=body)
else:
host_type = host_type if host_type \
else detect_daemon_type(worker_api)
result = host_handler.create(name=name, worker_api=worker_api,
capacity=int(capacity),
autofill=autofill,
schedulable=schedulable,
log_level=log_level,
log_type=log_type,
log_server=log_server,
host_type=host_type)
logger.debug("result.msg={}".format(result.get('msg')))
if (host_type == "vsphere") and ('msg' in result):
vsphere_errmsg = result.get('msg')
error_msg = "Failed to create vsphere host {}".format(vsphere_errmsg)
logger.warning(error_msg)
return make_fail_resp(error=error_msg)
elif result:
logger.debug("host creation successfully")
return make_ok_resp(code=CODE_CREATED)
else:
error_msg = "Failed to create host {}".format(body["name"])
logger.warning(error_msg)
return make_fail_resp(error=error_msg)
@bp_host_api.route('/host', methods=['PUT'])
def host_update():
request_debug(r, logger)
if r.content_type.startswith("application/json"):
body = dict(r.get_json(force=True, silent=True))
else:
body = r.form
if "id" not in body:
error_msg = "host PUT without enough data"
logger.warning(error_msg)
return make_fail_resp(error=error_msg,
data=body)
else:
id, d = body["id"], {}
for k in body:
if k != "id":
d[k] = body.get(k)
result = host_handler.update(id, d)
if result:
logger.debug("host PUT successfully")
return make_ok_resp()
else:
error_msg = "Failed to update host {}".format(result.get("name"))
logger.warning(error_msg)
return make_fail_resp(error=error_msg)
@bp_host_api.route('/host', methods=['PUT', 'DELETE'])
def host_delete():
request_debug(r, logger)
request_data = r.get_json(force=True, silent=True)
if "id" in r.form:
host_id = r.form["id"]
elif "id" in request_data:
host_id = request_data.get("id")
else:
error_msg = "host delete without enough data"
logger.warning(error_msg)
return make_fail_resp(error=error_msg, data=r.form)
logger.debug("host delete with id={0}".format(host_id))
if host_handler.delete(id=host_id):
return make_ok_resp()
else:
error_msg = "Failed to delete host {}".format(host_id)
logger.warning(error_msg)
return make_fail_resp(error=error_msg)
@bp_host_api.route('/host_op', methods=['POST'])
def host_actions():
logger.info("/host_op, method=" + r.method)
request_debug(r, logger)
if r.content_type.startswith("application/json"):
body = dict(r.get_json(force=True, silent=True))
else:
body = r.form
host_id, action = body['id'], body['action']
if not host_id or not action:
error_msg = "host POST without enough data"
logger.warning(error_msg)
return make_fail_resp(error=error_msg,
data=body)
else:
if action == "fillup":
if host_handler.fillup(host_id):
logger.debug("fillup successfully")
return make_ok_resp()
else:
error_msg = "Failed to fillup the host."
logger.warning(error_msg)
return make_fail_resp(error=error_msg, data=body)
elif action == "clean":
if host_handler.clean(host_id):
logger.debug("clean successfully")
return make_ok_resp()
else:
error_msg = "Failed to clean the host."
logger.warning(error_msg)
return make_fail_resp(error=error_msg, data=body)
elif action == "reset":
if host_handler.reset(host_id):
logger.debug("reset successfully")
try:
host_model = HostModel.Query.get(id=host_id)
clusters = ClusterModel.Query.\
filter(host=host_model.as_pointer)
for cluster_item in clusters:
cluster_item.delete()
except Exception:
pass
return make_ok_resp()
else:
error_msg = "Failed to reset the host."
logger.warning(error_msg)
return make_fail_resp(error=error_msg, data=body)
error_msg = "unknown host action={}".format(action)
logger.warning(error_msg)
return make_fail_resp(error=error_msg, data=body)
def create_k8s_host(name, capacity, log_type, request):
if request.get("k8s_ssl") == "on" and request.get("ssl_ca") is not None:
k8s_ssl = "true"
k8s_ssl_ca = request["ssl_ca"]
else:
k8s_ssl = "false"
k8s_ssl_ca = None
request['use_ssl'] = k8s_ssl
request['use_ssl_ca'] = k8s_ssl_ca
k8s_must_have_params = {
'Name': name,
'Capacity': capacity,
'LoggingType': log_type,
'K8SAddress': request['worker_api'],
'K8SCredType': request['k8s_cred_type'],
'K8SNfsServer': request['k8s_nfs_server'],
'K8SUseSsl': request['use_ssl'],
'K8SSslCert': request['use_ssl_ca']
}
if k8s_must_have_params['K8SCredType'] == K8S_CRED_TYPE['account']:
k8s_must_have_params['K8SUsername'] = request['k8s_username']
k8s_must_have_params['K8SPassword'] = request['k8s_password']
elif k8s_must_have_params['K8SCredType'] == K8S_CRED_TYPE['cert']:
k8s_must_have_params['K8SCert'] = request['k8s_cert']
k8s_must_have_params['K8SKey'] = request['k8s_key']
elif k8s_must_have_params['K8SCredType'] == K8S_CRED_TYPE['config']:
k8s_must_have_params['K8SConfig'] = request['k8s_config']
for key in k8s_must_have_params:
if k8s_must_have_params[key] == '':
error_msg = "host POST without {} data".format(key)
logger.warning(error_msg)
return []
return k8s_must_have_params
|
StarcoderdataPython
|
349357
|
#!/usr/bin/env python
'''
This file is part of the PyMSRPC project and is licensed under the
project license.
ndr.py
This are the functions that provide all the NDR data types. It handles
serialization and everything. I have spent a shit load of time on this and
yet they are not 100%. This is usually due to structure padding or array
serialization but honestly debugging it is such a beating so this is what
I have for now.
(c) 2007 <NAME> - BSD License - See LICENSE.txt
'''
import sys, struct, random, re, copy
DEBUG = False
#######################################################################
#
# Opcodes
#
#######################################################################
class ndr_opcode:
def __init__(self, **kwargs):
self.opnum = kwargs.get('opnum', 0x0)
self.address = kwargs.get('address', 0x00000000)
self.elements = kwargs.get('elements', [])
self.out = kwargs.get('out', None)
self.align_byte = kwargs.get('align_byte', "\xaa")
def align(self, data):
return self.align_byte * ((4 - (len(data) & 3)) & 3)
# Allows us to set a context handle for [in] params
def set_context_handle(self, handle):
for elem in self.elements:
if isinstance(elem, ndr_context_handle):
elem.data = handle
return True
return False
def serialize(self):
serialdata = ""
for elem in self.elements:
s = elem.serialize()
serialdata += s + self.align(s)
return serialdata
#######################################################################
#
# NDR Parent Classes
#
#######################################################################
class ndr_primitive(object):
def align(self, data):
return self.align_byte * ((4 - (len(data) & 3)) & 3)
def serialize(self):
raise NotImplementedError
class ndr_container(object):
def align(self, data):
return self.align_byte * ((4 - (len(data) & 3)) & 3)
def add_static(self, obj):
if DEBUG: print "[*] add_static",
if not self.parent:
if DEBUG: print "self"
self.s.append(obj)
else:
if DEBUG: print "parent"
self.parent.add_static(obj)
def add_deferred(self, obj):
if DEBUG: print "[*] add_deferred",
if not self.parent:
if DEBUG: print "self"
self.d.append(obj)
else:
if DEBUG: print "parent"
self.parent.add_deferred(obj)
def serialize(self):
raise NotImplementedError
#######################################################################
#
# Primitives
#
#######################################################################
class ndr_pad(ndr_primitive):
'''
pad placeholder
'''
def __init__(self):
pass
class ndr_byte(ndr_primitive):
'''
encode: byte element_1;
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', 0x06)
self.signed = kwargs.get('signed', False)
self.name = kwargs.get('name', "")
self.size = 1
def get_data(self):
return self.data
def set_data(self, new_data):
self.data = new_data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
if self.signed:
return struct.pack("<b", self.data)
else:
return struct.pack("<B", self.data)
class ndr_small(ndr_primitive):
'''
encode: small element_1;
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', 0x00)
self.signed = kwargs.get('signed', False)
self.name = kwargs.get('name', "")
self.size = 1
def get_data(self):
return self.data
def set_data(self, new_data):
self.data = new_data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
if self.signed:
return struct.pack("<b", self.data)
else:
return struct.pack("<B", self.data)
class ndr_char(ndr_primitive):
'''
encode: char [*] element_1;
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', 0x03)
self.signed = kwargs.get('signed', False)
self.name = kwargs.get('name', "")
self.size = 1
if self.signed:
raise Exception
def get_data(self):
return self.data
def set_data(self, new_data):
self.data = new_data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
return chr(self.data)
class ndr_wchar(ndr_primitive):
'''
encode: wchar element_1;
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', 0x42)
self.signed = kwargs.get('signed', False)
self.name = kwargs.get('name', "")
self.size = 2
if self.signed:
raise Exception
def get_data(self):
return self.data
def set_data(self, new_data):
self.data = new_data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
return chr(self.data).encode("utf-16le")
class ndr_void(ndr_primitive):
'''
encode: void *element_1
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', "")
self.name = kwargs.get('name', "")
self.size = 4
def get_data(self):
return self.data
def set_data(self, new_data):
self.data = new_data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
return self.data
class ndr_user_marshal(ndr_primitive):
'''
encode: [user_marshal(4)] struct struct_12 * elem_24;
Untested/Unsupported because technically ths calls a
user function
'''
def __init__(self, **kwargs):
self.num = kwargs.get('num', 0x4)
self.data = kwargs.get('data', "")
self.name = kwargs.get('name', "")
self.size = 0
def get_size(self):
return self.size
def get_packed(self):
return struct.pack("<L", self.num)
class ndr_range(ndr_primitive):
'''
encode: [range(0,1000)] long elem_1;
'''
def __init__(self, low=0x0, high=0xffffffff, data=""):
self.low = kwargs.get('low', 0x0)
self.high = kwargs.get('high', 0xffffffff)
self.data = kwargs.get('data', "")
self.size = 0
def get_data(self):
return self.data
def set_data(self, new_data):
self.data = new_data
def get_size(self):
return self.size
def serialize(self):
if not self.data:
self.data = ndr_long(data=random.randint(self.low, self.high))
else:
if self.data.get_data() > self.high:
self.data.data = self.high
elif self.data.get_data() < self.low:
self.data.data = self.low
return self.data.serialize()
class ndr_enum16(ndr_primitive):
'''
encode: /* enum16 */ short element_1;
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', 0x0004)
self.signed = kwargs.get('signed', True)
self.name = kwargs.get('name', "")
self.size = 2
def get_data(self):
return self.data
def set_data(self, new_data):
self.data = new_data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
if self.signed:
return struct.pack("<H", self.data)
else:
return struct.pack("<h", self.data)
class ndr_short(ndr_primitive):
'''
encode: short element_1;
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', 0x0004)
self.signed = kwargs.get('signed', True)
self.name = kwargs.get('name', "")
self.size = 2
def get_data(self):
return self.data
def set_data(self, new_data):
self.data = new_data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
if self.signed:
return struct.pack("<H", self.data)
else:
return struct.pack("<h", self.data)
class ndr_interface(ndr_primitive):
'''
encode: interface(0000000c-0000-0000-c000-000000000046)
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', "\x89" * 20)
self.name = kwargs.get('name', "")
self.size = 20
def get_data(self):
return self.data
def set_data(self, new_data):
self.data = new_data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
return self.data
class ndr_long(ndr_primitive):
'''
encode: long element_1;
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', 0x00000002)
self.signed = kwargs.get('signed', True)
self.name = kwargs.get('name', "")
self.size = 4
def set_data(self, new_data):
self.data = new_data
def get_data(self):
return self.data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
if self.signed:
return struct.pack("<l", self.data)
else:
return struct.pack("<L", self.data)
class ndr_hyper(ndr_primitive):
'''
encode: hyper (aka 64bit) element_1;
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', 0x0000000000000005)
self.signed = kwargs.get('signed', True)
self.name = kwargs.get('name', "")
self.size = 8
def get_data(self):
return self.data
def set_data(self, new_data):
self.data = new_data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
if self.signed:
return struct.pack("<q", self.data)
else:
return struct.pack("<Q", self.data)
class ndr_empty(ndr_primitive):
'''
used for default or empty cases in unions/unknown stuff
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', "")
self.name = kwargs.get('name', "")
self.size = 0
def get_data(self):
return self.data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
return ""
class ndr_float(ndr_primitive):
'''
encode: float element_1;
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', 0.0)
self.name = kwargs.get('name', "")
self.size = 4
def get_data(self):
return self.data
def set_data(self, new_data):
self.data = new_data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
return struct.pack("<f", self.data)
class ndr_double(ndr_primitive):
'''
encode: double element_1;
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', 0.0)
self.name = kwargs.get('name', "")
self.size = 8
def get_data(self):
return self.data
def set_data(self, new_data):
self.data = new_data
def get_name(self):
return self.name
def serialize(self):
return struct.pack("<d", self.data)
class ndr_string(ndr_primitive):
'''
encode: char *element_1;
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', "Administrator")
self.name = kwargs.get('name', "")
self.align_byte = kwargs.get('align_byte', "\xaa")
self.size = 0
def pad(self, data):
return self.align_byte * ((4 - (len(data) & 3)) & 3)
def get_data(self):
return self.data
def set_data(self, new_data):
self.data = new_data
def get_name(self):
return self.name
def get_size(self):
return len(self.get_packed())
def serialize(self):
# We add our null because it gets counted
self.data += "\x00"
length = len(self.data)
# Conformance varying information
return struct.pack("<L", length) \
+ struct.pack("<L", 0) \
+ struct.pack("<L", length) \
+ self.data \
+ self.pad(self.data) \
class ndr_wstring(ndr_primitive):
'''
encode: wchar *element_1;
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', "\\\\EXCHANGE2K3")
self.name = kwargs.get('name', "")
self.align_byte = kwargs.get('align_byte', "\xaa")
self.size = 0
def pad(self, data):
return self.align_byte * ((4 - (len(data) & 3)) & 3)
def set_data(self, new_data):
self.data = new_data
def get_data(self):
return self.data
def get_name(self):
return self.name
def get_size(self):
return len(self.get_packed())
def serialize(self):
# Add our wide null because it gets counted
data = self.data.encode("utf-16le") + "\x00\x00"
length = len(data) / 2
return struct.pack("<L", length) \
+ struct.pack("<L", 0) \
+ struct.pack("<L", length) \
+ data \
+ self.pad(data)
class ndr_string_nonconformant(ndr_primitive):
'''
encode: [string] char element_1[3];
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', "ABCDEFG")
self.name = kwargs.get('name', "")
self.size = kwargs.get('size', 0)
self.align_byte = kwargs.get('align_byte', "\xaa")
def pad(self, data):
return self.align_byte * ((4 - (len(data) & 3)) & 3)
def set_data(self, new_data):
self.data = new_data
def get_data(self):
return self.data
def get_name(self):
return self.name
def get_size(self):
return len(self.get_packed())
def serialize(self):
# Make sure we stick to our size
if len(self.data) < self.size:
self.size = len(self.data)
data = self.data
else:
data = self.data[:self.size - 1]
# Add our null
data += "\x00"
return struct.pack("<L", 0) \
+ struct.pack("<L", self.size) \
+ data \
+ self.pad(data)
class ndr_wstring_nonconformant(ndr_primitive):
'''
encode: [string] wchar_t element_1[3];
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', "ABCDEFG")
self.name = kwargs.get('name', "")
self.size = kwargs.get('size', 0)
self.align_byte = kwargs.get('align_byte', "\xaa")
def pad(self, data):
return self.align_byte * ((4 - (len(data) & 3)) & 3)
def set_data(self, new_data):
self.data = new_data
def get_data(self):
return self.data
def get_name(self):
return self.name
def get_size(self):
return len(self.get_packed())
def serialize(self):
# Make sure we stick to our size
if len(self.data) < self.size:
self.size = len(self.data) / 2
data = self.data
else:
data = self.data[:self.size - 1]
# Add our wide null
data = data.encode("utf-16le") + "\x00\x00"
return struct.pack("<L", 0) \
+ struct.pack("<L", self.size) \
+ data \
+ self.pad(data)
class ndr_error_status(ndr_primitive):
def __init__(self, **kwargs):
self.data = kwargs.get('data', 0x00000000)
self.name = kwargs.get('name', "")
self.size = 4
def get_data(self):
return self.data
def set_data(self, new_data):
self.data = new_data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
return struct.pack("<L", self.data)
class ndr_callback(ndr_primitive):
'''
encodes size_is(callback_0x12345678)
Unsupported because it calls a user function
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', 0x00000000)
self.name = kwargs.get('name', "")
self.size = 4
def get_data(self):
return self.data
def set_data(self, new_data):
self.data = new_data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
return struct.pack("<L", self.data)
class ndr_context_handle(ndr_primitive):
'''
encodes: [in] context_handle arg_1
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', "\x88" * 20)
self.name = kwargs.get('name', "")
self.size = 20
def get_data(self):
return self.data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
return self.data
class ndr_pipe(ndr_primitive):
'''
I need an example plz2u
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', "\x8a" * 20)
self.name = kwargs.get('name', "")
self.size = 20
def get_data(self):
return self.data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
return self.data
class ndr_handle_t(ndr_primitive):
'''
encode: handle_t element_1 (not sent on network)
'''
def __init__(self, **kwargs):
self.data = kwargs.get('data', "")
self.name = kwargs.get('name', "")
self.size = 0
def get_data(self):
return self.data
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
return ""
#######################################################################
#
# Unions
#
#######################################################################
class ndr_union:
'''
NDR Union: data will be a tuple list of (case, ndr_type)
'''
def __init__(self, **kwargs):
self.elements = kwargs.get('elements', {})
self.switch_dep = kwargs.get('switch_dep', "")
self.name = kwargs.get('name', "")
self.defname = kwargs.get('defname', "")
self.size = 0
def get_data(self):
return self.elements
def set_data(self, new_data):
self.elements = new_data
def get_name(self):
return self.name
def get_size(self):
return self.size
def add_element(self, case, element):
self.elements[case] = element
def serialize(self):
serialdata = ""
switch = self.switch_dep.get_data()
if self.elements.has_key(switch):
serialdata += self.switch_dep.serialize()
# Pack our requested enum
serialdata += self.elements[switch].serialize()
else:
# This allows us to pick a switch for the user
newswitch = self.elements.keys()[0]
# We need to update our original switch_dep so it passes correlation checks
self.switch_dep.set_data(newswitch)
serialdata += ndr_long(data=newswitch).serialize()
serialdata += self.elements[newswitch].serialize()
return serialdata
#######################################################################
#
# Pointers
#
#######################################################################
class ndr_unique(ndr_container):
def __init__(self, **kwargs):
self.name = kwargs.get('name', "")
self.data = kwargs.get('data', "")
self.type = kwargs.get('type', "")
self.align_byte = kwargs.get('align_byte', "\xaa")
self.pointer_value = kwargs.get('pointer_value', 0x41424344)
self.size = 4
self.alignment = 4
self.parent = None
self.s = []
self.d = []
def get_name(self):
return self.name
def get_size(self):
return self.size
def get_data(self):
return self.data
def set_data(self, new_data):
# We have to use the objects set_data if its a unique/array
self.data.set_data(new_data)
def serialize(self):
self.add_static(ndr_long(data=self.pointer_value))
if isinstance(self.data, ndr_container):
self.data.parent = self
self.add_deferred(self.data)
if not self.parent:
while len(self.d):
d = self.d.pop(0)
if isinstance(d, ndr_container):
d.serialize()
else:
self.add_static(d)
serialdata = ""
for s in self.s:
if isinstance(s, ndr_pad):
serialdata += self.align(serialdata)
else:
serialdata += s.serialize()
self.parent = None
self.s = []
self.d = []
return serialdata
class ndr_full(ndr_container):
def __init__(self, **kwargs):
self.name = kwargs.get('name', "")
self.data = kwargs.get('data', "")
self.type = kwargs.get('type', "")
self.align_byte = kwargs.get('align_byte', "\xaa")
self.pointer_value = kwargs.get('pointer_value', 0x41424344)
self.size = 4
self.alignment = 4
self.parent = None
self.s = []
self.d = []
def get_name(self):
return self.name
def get_size(self):
return self.size
def get_data(self):
return self.data
def set_data(self, new_data):
# We have to use the objects set_data if its a unique/array
self.data.set_data(new_data)
def serialize(self):
self.add_static(ndr_long(data=self.pointer_value))
if isinstance(self.data, ndr_container):
self.data.parent = self
self.add_deferred(self.data)
if not self.parent:
while len(self.d):
d = self.d.pop(0)
if isinstance(d, ndr_container):
d.serialize()
else:
self.add_static(d)
serialdata = ""
for s in self.s:
if isinstance(s, ndr_pad):
serialdata += self.align(serialdata)
else:
serialdata += s.serialize()
self.parent = None
self.s = []
self.d = []
return serialdata
#######################################################################
#
# Structures
#
#######################################################################
class ndr_struct(ndr_container):
def __init__(self, **kwargs):
self.elements = kwargs.get('elements', [])
self.name = kwargs.get('name', "")
self.defname = kwargs.get('defname', "")
self.type = kwargs.get('type', "")
self.align_byte = kwargs.get('align_byte', "\xaa")
self.size = 0
self.alignment = 4
self.parent = None
self.s = []
self.d = []
def get_data(self):
return self.elements
def set_data(self, new_data):
self.elements = new_data
def add_element(self, element):
self.elements.append(element)
def del_element(self, eid):
del(self.elements[eid])
return True
def get_element_by_id(self, eid=0):
return self.elements[eid]
def get_element_by_name(self, name):
for element in self.elements:
try:
if element.name == name:
return element
except:
if DEBUG: print "[*] Couldnt get name of element"
return False
def get_name(self):
return self.name
def get_size(self):
return self.size
def serialize(self):
if DEBUG: print "[*] Serializing ndr_struct"
# First we take care of our list serializing all containers first, and adding primitives verbatim
for e in self.elements:
if isinstance(e, ndr_container):
e.parent = self
e.serialize()
else:
self.add_static(e)
# If we are the top-most structure lets package it all
if not self.parent:
if DEBUG: print "[*] Packaging top most struct %s" % self.name
self.add_static(ndr_pad())
while len(self.d):
d = self.d.pop(0)
if isinstance(d, ndr_container):
d.serialize()
else:
self.add_static(d)
serialdata = ""
for s in self.s:
if isinstance(s, ndr_pad):
serialdata += self.align(serialdata)
else:
serialdata += s.serialize()
self.parent = None
self.s = []
self.d = []
return serialdata
#######################################################################
#
# Arrays
#
#######################################################################
class ndr_array(ndr_container):
def array_serialize(self, count):
for c in range(count):
if isinstance(self.basetype, ndr_container):
self.basetype.parent = self
self.basetype.serialize()
else:
self.add_static(self.basetype)
if not self.parent:
if DEBUG: print "[*] Packaging top most array %s" % self.name
while len(self.d):
d = self.d.pop(0)
if isinstance(d, ndr_container):
d.serialize()
else:
self.add_static(d)
serialdata = ""
for s in self.s:
if isinstance(s, ndr_pad):
serialdata += self.align(serialdata)
else:
serialdata += s.serialize()
self.parent = None
self.s = []
self.d = []
return serialdata + self.align(serialdata)
else:
self.add_static(ndr_pad())
class ndr_array_fixed(ndr_array):
def __init__(self, **kwargs):
self.basetype = kwargs.get('basetype', ndr_empty())
self.elements = kwargs.get('elements', [])
self.count = kwargs.get('count', 0x0)
self.cmod= kwargs.get('cmod', ())
self.cptr = kwargs.get('cptr', 0x0)
self.name = kwargs.get('name', "")
self.align_byte = kwargs.get('align_byte', "\xaa")
self.size = 0
self.parent = None
self.s = []
self.d = []
def set_data(self, new_data):
# We have to use the objects set_data if its a pointer
self.basetype.set_data(new_data)
def get_size(self):
return self.size
def get_count(self):
return self.count
def serialize(self):
if DEBUG: print "[*] Serializing ndr_array"
if self.cptr == 1:
self.add_static(ndr_long(data=0x41424344))
return self.array_serialize(self.count)
class ndr_array_conformant(ndr_array):
def __init__(self, **kwargs):
self.basetype = kwargs.get('basetype', ndr_empty())
self.elements = kwargs.get('elements', [])
self.count = kwargs.get('count', 0x0)
self.cmod= kwargs.get('cmod', ())
self.cptr = kwargs.get('cptr', 0x0)
self.name = kwargs.get('name', "")
self.align_byte = kwargs.get('align_byte', "\xaa")
self.packed_count = False
self.size = 0
self.parent = None
self.s = []
self.d = []
def set_data(self, new_data):
# We have to use the objects set_data if its a pointer
self.basetype.set_data(new_data)
def get_size(self):
return self.size
def serialize(self):
if DEBUG: print "[*] Serializing ndr_array_conformant"
if self.cptr == 1:
self.add_static(ndr_long(data=0x41424344))
# Pack our count
if isinstance(self.count, int):
num = self.count
self.add_static(ndr_long(data=num))
# If we used a ascii rep of size pack it
# YYY: callback_0x12345678 will fail here
elif isinstance(self.count, str):
num = int(self.count)
self.add_static(ndr_long(data=num))
# else we have a ndr object to pack
else:
# We have to handle the math operators i.e. [size_is(arg1 / 2)]
num = self.count.get_data()
if self.cmod:
if self.cmod[0] == "/":
num /= self.cmod[1]
elif self.cmod[0] == "*":
num *= self.cmod[1]
else:
print "[!] Problem with operator %s" % self.cmod[0]
sys.exit(-1)
self.add_static(ndr_long(data=num))
# End pack count
return self.array_serialize(num)
class ndr_array_varying(ndr_array):
def __init__(self, **kwargs):
self.basetype = kwargs.get('basetype', ndr_empty())
self.elements = kwargs.get('elements', [])
self.count = kwargs.get('count', 0x0)
self.cmod= kwargs.get('cmod', ())
self.cptr = kwargs.get('cptr', 0x0)
self.name = kwargs.get('name', "")
self.align_byte = kwargs.get('align_byte', "\xaa")
self.packed_count = False
self.size = 0
self.parent = None
self.s = []
self.d = []
def set_data(self, new_data):
# We have to use the objects set_data if its a pointer
self.basetype.set_data(new_data)
def get_size(self):
return self.size
def serialize(self):
# Pack offset
self.add_static(ndr_long(data=0x0))
# Need example of the cptr stuff
if self.cptr == 1:
self.add_static(ndr_long(data=0x41424344))
if isinstance(self.count, int):
num = self.count
elif isinstance(self.count, str):
num = int(self.count)
else:
num = self.count.get_data()
if self.cmod:
if self.cmod[0] == "/":
num /= self.cmod[1]
elif self.cmod[0] == "*":
num *= self.cmod[1]
else:
print "[!] Problem with operator %s" % self.cmod[0]
sys.exit(-1)
# Pack our array count
self.add_static(ndr_long(data=num))
return self.array_serialize(num)
class ndr_array_conformant_varying(ndr_array):
def __init__(self, **kwargs):
self.basetype = kwargs.get('basetype', ndr_empty())
self.elements = kwargs.get('elements', [])
self.maxcount = kwargs.get('maxcount', 0x0)
self.mmod= kwargs.get('mmod', ())
self.mptr = kwargs.get('mptr', 0x0)
self.passed = kwargs.get('passed', 0x0)
self.pmod= kwargs.get('pmod', ())
self.pptr = kwargs.get('pptr', 0x0)
self.name = kwargs.get('name', "")
self.align_byte = kwargs.get('align_byte', "\xaa")
self.packed_count = True
self.size = 0
self.parent = None
self.s = []
self.d = []
def set_data(self, new_data):
# We have to use the objects set_data if its a pointer
self.basetype.set_data(new_data)
def get_size(self):
return self.size
def serialize(self):
# Need example of the mptr stuff
if self.mptr == 1:
self.add_static(ndr_long(data=0x41424344))
# Do conformant stuff
if isinstance(self.maxcount, int):
mnum = self.maxcount
elif isinstance(self.maxcount, str):
mnum = int(self.maxcount)
else:
mnum = self.maxcount.get_data()
if self.mmod:
if self.mmod[0] == "/":
mnum /= self.mmod[1]
elif self.mmod[0] == "*":
mnum *= self.mmod[1]
else:
print "[!] Problem with operator %s" % self.mmod[0]
sys.exit(-1)
# Pack conformant info
self.add_static(ndr_long(data=mnum))
# Offset
self.add_static(ndr_long(data=0x0))
# Need example of the pptr stuff
if self.pptr == 1:
self.add_static(ndr_long(data=0x41424344))
# Do varying stuff
if isinstance(self.passed, int):
pnum = self.passed
elif isinstance(self.passed, str):
pnum = int(self.passed)
else:
pnum = self.passed.get_data()
if self.pmod:
if self.pmod[0] == "/":
pnum /= self.pmod[1]
elif self.pmod[0] == "*":
pnum *= self.pmod[1]
else:
print "[!] Problem with operator %s" % self.pmod[0]
sys.exit(-1)
# Add varying count
self.add_static(ndr_long(data=pnum))
return self.array_serialize(pnum)
|
StarcoderdataPython
|
1772915
|
<filename>main.py<gh_stars>0
import pygame
import pygame.freetype
import math
import time
import sys
import random
import matplotlib.pyplot as plt
#Dupa
# initialize pygame
pygame.init()
FPS = 100 # frames per second
fps_clock = pygame.time.Clock()
# Ustawianie ekranu
WIDTH = 1280
HEIGHT = 900
DISPLAY = pygame.display.set_mode((WIDTH, HEIGHT))
DISPLAY.fill((72, 79, 79))
pygame.display.set_caption('ATOMOWO!')
FONT = pygame.font.Font(None, 32)
# RGB colors
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
'''Globalne zmienne i tablice zawierajace dane do obliczen oraz wyniki'''
free_way = 0
previous_time = time.perf_counter()
collisions = 0
mean_freeway = []
freq_of_collisons = []
number_of_atoms = []
'''klasa odpowiedzialna za trzymanie atomow, rysowanie ich, przemieszcanie i kolizje pomiedzy nimi'''
class Atomic_Container(pygame.sprite.Sprite):
'''Inicjator obiektu zawiera informacje o tym jak budowac atomy na podstawie basic_atom oraz tablice zawierajaca wszystkie atomy'''
def __init__(self, basic_atom):
self.atom = basic_atom
self.atoms = [Atom(self.atom.radius, self.atom.mass)]
'''Stworzenie i wypelnienie tablicy atomami, ostatni atom jest Atomem specjalnym'''
def instantia_atoms(self, amount):
for i in range(0, amount):
self.atoms.append(Atom(self.atom.radius, self.atom.mass))
'''Dodanie na koniec tablicy specjalnego atomu'''
self.atoms.append(SpecialAtom(self.atom.radius, self.atom.mass))
'''Funkcja rysujaca wszystkie atomy na danej powierzchni'''
def draw_atoms(self, surface):
surface.fill((198, 210, 209))
i = 0
for atom in self.atoms:
pygame.draw.circle(surface, RED, (int(atom.position.x), int(atom.position.y)), atom.radius)
i += 1
if i == len(self.atoms) - 1:
break
pygame.draw.circle(surface, BLUE, (int(self.atoms[-1].position.x), int(self.atoms[-1].position.y)), atom.radius)
'''Funkcja odpowiedzialna za aktualizacje pozycji atomu na podstawie jego predkosci'''
def move_atom(self):
for atom in self.atoms:
atom.position += atom.speed
'''Bardziej oszczędna kolizja aczkolwiek jest wymaga tez dopracowann pod wzgledem wysokich predkosci atomow'''
def collision_with_atoms(self, container):
global collisions, free_way
for atom in self.atoms:
if atom.position.x + 3 <= atom.radius or atom.position.x >= container.width - atom.radius - 3:
atom.speed.x *= -1
if atom.position.y + 3 <= atom.radius or atom.position.y >= container.height - atom.radius - 3:
atom.speed.y *= -1
for i in range(0, len(self.atoms)):
for j in range(i, len(self.atoms)):
atom_1 = self.atoms[i]
atom_2 = self.atoms[j]
if atom_1 != atom_2 and math.sqrt(((atom_1.position.x - atom_2.position.x) ** 2) + ((atom_1.position.y - atom_2.position.y) ** 2)) <= (2*atom_1.radius + 1):
if atom_1 is self.atoms[-1] or atom_2 is self.atoms[-1]:
collisions += 1
self.calculate_freeway(self.atoms[-1])
tmp = atom_1.speed
atom_1.speed = atom_2.speed
atom_2.speed = tmp
'''Funkcja pomocnicza do obliczen zwiazanych z sprezystym zderzeniem'''
def collision_wth_atoms_v2_utility(self, atom_1, atom_2):\
### Dlugosc wektora predkosci pierwszego atomu ###
atom_1_speed = math.sqrt((atom_1.speed.x ** 2) + (atom_1.speed.y ** 2))
### Roznica odlegosci pomiedzy srodkami atomu na osi X
x_diff = -(atom_1.position.x - atom_2.position.x)
### Roznica odlegosci pomiedzy srodkami atomu na osi Y
y_diff = -(atom_1.position.y - atom_2.position.y)
x_speed, y_speed = 0, 0
if x_diff > 0:
### Kat zderzenia pomiedzy atomami wylicznay za pomoca tangensa katow pomiedzy roznicami w osiach X i Y
angle = math.degrees(math.atan(y_diff / x_diff))
x_speed = -atom_1_speed * math.cos(math.radians(angle))
y_speed = -atom_1_speed * math.sin(math.radians(angle))
elif x_diff < 0:
if y_diff > 0:
angle = 180 + math.degrees(math.atan(y_diff / x_diff))
x_speed = -atom_1_speed * math.cos(math.radians(angle))
y_speed = -atom_1_speed * math.sin(math.radians(angle))
elif y_diff < 0:
angle = -180 + math.degrees(math.atan(y_diff / x_diff))
x_speed = -atom_1_speed * math.cos(math.radians(angle))
y_speed = -atom_1_speed * math.sin(math.radians(angle))
elif x_diff == 0:
if y_diff > 0:
angle = -90
else:
angle = 90
x_speed = atom_1_speed * math.cos(math.radians(angle))
y_speed = atom_1_speed * math.sin(math.radians(angle))
elif y_diff == 0:
if x_diff < 0:
angle = 0
else:
angle = 180
x_speed = atom_1_speed * math.cos(math.radians(angle))
y_speed = atom_1_speed * math.sin(math.radians(angle))
atom_1.speed.x = x_speed
atom_1.speed.y = y_speed
'''Tutaj dzieje sie kolizja a wlasciwie sprawdzanie jej w ciaglej petli'''
def collision_wth_atoms_v2(self):
global free_way
global collisions
'''Iteracja czy atom koliduje z innym atomem'''
for atom_1 in self.atoms:
for atom_2 in self.atoms:
if not(atom_1 is atom_2):
'''Obliczenie czy atomy zachodzą na siebie za pomoca odleglosci wektorow pozycji i porownanie ich z podwojonym promieniem atomow'''
if math.sqrt(((atom_1.position.x - atom_2.position.x) ** 2) + ((atom_1.position.y - atom_2.position.y) ** 2)) <= (atom_1.radius + atom_2.radius):
'''Jesli atom ktory koliduje jest ostatnim atomem na liscie atomow to znaczy ze jest atomem specjalnym i nalezy obliczyc przebyta
przez niego droge swobodna i zwiekszyc ilosc kolizji w <NAME>ial'''
if atom_1 is self.atoms[-1]:
collisions += 1
self.calculate_freeway(self.atoms[-1])
self.collision_wth_atoms_v2_utility(atom_1, atom_2)
def collision_with_container(self, container):
for atom in self.atoms:
if atom.position.x + 3 <= atom.radius or atom.position.x >= container.width - atom.radius - 3:
atom.speed.x *= -1
if atom.position.y - 3 <= atom.radius or atom.position.y >= container.height - atom.radius + 3:
atom.speed.y *= -1
def calculate_freeway(self, atom):
global free_way
global previous_time
global collisions
current_time = time.perf_counter()
free_way += ((atom.speed * (current_time-previous_time)).length())
previous_time = time.perf_counter()
class Atom():
def __init__(self, radius, mass):
self.radius = radius
self.mass = mass
self.position = Vector(random.randint(radius, 800-radius), random.randint(radius, 800-radius))
self.speed = Vector(2*(random.randrange(-5, 5)), 2*(random.randrange(-5, 5)))
class SpecialAtom(Atom):
def __init__(self, radius, mass):
self.radius = radius
self.mass = mass
self.position = Vector(radius, radius)
self.speed = Vector(2*(random.random()+2.0), 2*(random.random()+2.0))
class Container(pygame.sprite.Sprite):
def __init__(self, width, height):
# Inicjalizacja "rysownika" sprietów
pygame.sprite.Sprite.__init__(self)
self.width = width
self.height = height
self.image = pygame.Surface((self.width, self.height))
self.image.fill((198, 210, 209))
pygame.draw.rect(self.image, (140, 163, 163), ((0, 0), (self.width, self.height)), 10)
self.original_image = self.image
self.rect = self.image.get_rect()
self.rect.topleft = (240, 50)
def Simulation(fps, number_of_atoms):
global mean_freeway, freq_of_collisons
all_sprites = pygame.sprite.Group()
basic_atom = Atom(10, 1)
container = Container(80 * basic_atom.radius, 80 * basic_atom.radius)
atom_container = Atomic_Container(basic_atom)
atom_container.instantia_atoms(number_of_atoms)
all_sprites.add(container)
t0 = time.perf_counter()
### GAME LOOP ###
while True:
events = pygame.event.get()
for e in events:
if e.type == pygame.QUIT:
sys.exit()
atom_container.collision_with_atoms(container)
atom_container.move_atom()
atom_container.draw_atoms(container.image)
#atom_container.collision_with_container(container)
#atom_container.collision_wth_atoms_v2()
atom_container.move_atom()
all_sprites.update()
all_sprites.draw(DISPLAY)
pygame.display.update()
fps_clock.tick(fps)
if time.perf_counter() - t0 > 10:
try:
mean_freeway.append(free_way / collisions)
except ZeroDivisionError:
mean_freeway.append(0)
freq_of_collisons.append(collisions / (time.perf_counter()-t0))
return 0
def main():
global number_of_atoms, mean_freeway, freq_of_collisons, collisions, free_way
for fps in range(20, 110, 20):
for atoms in range(30, 211, 20):
number_of_atoms.append((atoms))
Simulation(fps, atoms)
print(collisions, free_way)
collisions = 0
free_way = 0
plt.figure(1)
plt.plot(number_of_atoms, mean_freeway)
plt.suptitle("FPS =" + str(fps))
plt.xlabel("Liczba \"Atomów\"")
plt.ylabel("Średnia droga swobodna niebieskiego atomu")
plt.figure(2)
plt.suptitle("FPS =" + str(fps))
plt.plot(number_of_atoms, freq_of_collisons, color='red')
plt.xlabel("Liczba \"Atomów\"")
plt.ylabel("Czestośc zderzen niebieskiego atomu")
plt.show()
number_of_atoms = []
mean_freeway = []
freq_of_collisons = []
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
11254804
|
<gh_stars>1-10
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aif360.algorithms.postprocessing
import aif360.datasets
import aif360.metrics
import lale.datasets.data_schemas
import lale.type_checking
import pandas as pd
def dataset_to_pandas(dataset, return_only='Xy'):
"""
Return pandas representation of the AIF360 dataset.
Parameters
----------
dataset : aif360.datasets.BinaryLabelDataset
AIF360 dataset to convert to a pandas representation.
return_only : 'Xy', 'X', or 'y'
Which part of features X or labels y to convert and return.
Returns
-------
result : tuple
- item 0: pandas Dataframe or None, features X
- item 1: pandas Series or None, labels y
"""
if 'X' in return_only:
X = pd.DataFrame(dataset.features, columns=dataset.feature_names)
result_X = lale.datasets.data_schemas.add_schema(X)
assert isinstance(result_X, pd.DataFrame), type(result_X)
else:
result_X = None
if 'y' in return_only:
y = pd.Series(dataset.labels.ravel(), name=dataset.label_names[0])
result_y = lale.datasets.data_schemas.add_schema(y)
assert isinstance(result_y, pd.Series), type(result_y)
else:
result_y = None
return result_X, result_y
_dataset_fairness_properties: lale.type_checking.JSON_TYPE = {
'favorable_label': {
'description': 'Label value which is considered favorable (i.e. "positive").',
'type': 'number'},
'unfavorable_label': {
'description': 'Label value which is considered unfavorable (i.e. "negative").',
'type': 'number'},
'protected_attribute_names': {
'description': 'Subset of feature names for which fairness is desired.',
'type': 'array',
'items': {'type': 'string'}},
'unprivileged_groups': {
'description': 'Representation for unprivileged group.',
'type': 'array',
'items': {
'description': 'Map from feature names to group-indicating values.',
'type': 'object',
'additionalProperties': {
'type': 'number'}}},
'privileged_groups': {
'description': 'Representation for privileged group.',
'type': 'array',
'items': {
'description': 'Map from feature names to group-indicating values.',
'type': 'object',
'additionalProperties': {
'type': 'number'}}}}
_dataset_fairness_schema = {
'type': 'object',
'properties': _dataset_fairness_properties}
def dataset_fairness_info(dataset):
"""
Inspect the AIF360 dataset and return its fairness metadata as JSON.
Parameters
----------
dataset : aif360.datasets.BinaryLabelDataset
Returns
-------
result : dict
JSON data structure with fairness information.
- favorable_label : number
Label value which is considered favorable (i.e. "positive").
- unfavorable_label : number
Label value which is considered unfavorable (i.e. "negative").
- protected_attribute_names : array **of** items : string
Subset of feature names for which fairness is desired.
- unprivileged_groups : array
Representation for unprivileged group.
- items : dict
Map from feature names to group-indicating values.
- privileged_groups : array
Representation for privileged group.
- items : dict
Map from feature names to group-indicating values.
"""
def attributes_to_groups(names, value_arrays):
result = [{}]
for i in range(len(names)):
next_result = []
for d in result:
for next_v in value_arrays[i]:
next_d = {**d, names[i]: next_v}
next_result.append(next_d)
result = next_result
return result
unprivileged_groups = attributes_to_groups(
dataset.protected_attribute_names,
dataset.unprivileged_protected_attributes)
privileged_groups = attributes_to_groups(
dataset.protected_attribute_names,
dataset.privileged_protected_attributes)
result = {
'favorable_label': dataset.favorable_label,
'unfavorable_label': dataset.unfavorable_label,
'protected_attribute_names': dataset.protected_attribute_names,
'unprivileged_groups': unprivileged_groups,
'privileged_groups': privileged_groups}
lale.type_checking.validate_schema(result, _dataset_fairness_schema)
return result
class _PandasToDatasetConverter:
def __init__(self, favorable_label, unfavorable_label, protected_attribute_names):
lale.type_checking.validate_schema(favorable_label,
_dataset_fairness_properties['favorable_label'])
self.favorable_label = favorable_label
lale.type_checking.validate_schema(unfavorable_label,
_dataset_fairness_properties['unfavorable_label'])
self.unfavorable_label = unfavorable_label
lale.type_checking.validate_schema(protected_attribute_names,
_dataset_fairness_properties['protected_attribute_names'])
self.protected_attribute_names = protected_attribute_names
def __call__(self, X, y):
assert isinstance(X, pd.DataFrame), type(X)
assert isinstance(y, pd.Series), type(y)
assert X.shape[0] == y.shape[0], f'X.shape {X.shape}, y.shape {y.shape}'
df = pd.concat([X, y], axis=1)
assert not df.isna().any().any(), f'df\n{df}\nX\n{X}\ny\n{y}'
label_names = [y.name]
result = aif360.datasets.BinaryLabelDataset(
favorable_label=self.favorable_label,
unfavorable_label=self.unfavorable_label,
protected_attribute_names=self.protected_attribute_names,
df=df,
label_names=label_names)
return result
def _ensure_series(data, index, dtype, name):
if isinstance(data, pd.Series):
return data
result = pd.Series(data=data, index=index, dtype=dtype, name=name)
return result
class _BinaryLabelScorer:
def __init__(self, metric, favorable_label, unfavorable_label, protected_attribute_names, unprivileged_groups, privileged_groups):
assert hasattr(aif360.metrics.BinaryLabelDatasetMetric, metric)
self.metric = metric
self.fairness_info = {
'favorable_label': favorable_label,
'unfavorable_label': unfavorable_label,
'protected_attribute_names': protected_attribute_names,
'unprivileged_groups': unprivileged_groups,
'privileged_groups': privileged_groups}
lale.type_checking.validate_schema(
self.fairness_info, _dataset_fairness_schema)
self.pandas_to_dataset = _PandasToDatasetConverter(
favorable_label, unfavorable_label, protected_attribute_names)
def __call__(self, estimator, X, y):
predicted = estimator.predict(X)
y_pred = _ensure_series(predicted, X.index, y.dtype, y.name)
dataset_pred = self.pandas_to_dataset(X, y_pred)
fairness_metrics = aif360.metrics.BinaryLabelDatasetMetric(
dataset_pred,
self.fairness_info['unprivileged_groups'],
self.fairness_info['privileged_groups'])
method = getattr(fairness_metrics, self.metric)
result = method()
return result
def disparate_impact(favorable_label, unfavorable_label, protected_attribute_names, unprivileged_groups, privileged_groups):
"""
Make a scikit-learn compatible scorer given the fairness info.
Parameters
----------
favorable_label : number
Label value which is considered favorable (i.e. "positive").
unfavorable_label : number
Label value which is considered unfavorable (i.e. "negative").
protected_attribute_names : array **of** items : string
Subset of feature names for which fairness is desired.
unprivileged_groups : array
Representation for unprivileged group.
- items : dict
Map from feature names to group-indicating values.
privileged_groups : array
Representation for privileged group.
- items : dict
Map from feature names to group-indicating values.
Returns
-------
result : callable
Scorer that takes three arguments (estimator, X, y) and returns score.
"""
return _BinaryLabelScorer('disparate_impact', favorable_label, unfavorable_label, protected_attribute_names, unprivileged_groups, privileged_groups)
def statistical_parity_difference(favorable_label, unfavorable_label, protected_attribute_names, unprivileged_groups, privileged_groups):
"""
Make a scikit-learn compatible scorer given the fairness info.
Parameters
----------
favorable_label : number
Label value which is considered favorable (i.e. "positive").
unfavorable_label : number
Label value which is considered unfavorable (i.e. "negative").
protected_attribute_names : array **of** items : string
Subset of feature names for which fairness is desired.
unprivileged_groups : array
Representation for unprivileged group.
- items : dict
Map from feature names to group-indicating values.
privileged_groups : array
Representation for privileged group.
- items : dict
Map from feature names to group-indicating values.
Returns
-------
result : callable
Scorer that takes three arguments (estimator, X, y) and returns score.
"""
return _BinaryLabelScorer('statistical_parity_difference', favorable_label, unfavorable_label, protected_attribute_names, unprivileged_groups, privileged_groups)
_postprocessing_base_hyperparams = {
'estimator': {
'description': 'Nested supervised learning operator for which to mitigate fairness.',
'laleType': 'estimator'},
'favorable_label': _dataset_fairness_properties['favorable_label'],
'unfavorable_label': _dataset_fairness_properties['unfavorable_label'],
'protected_attribute_names': _dataset_fairness_properties['protected_attribute_names']}
class _BasePostprocessingImpl:
def __init__(self, mitigator, estimator, favorable_label, unfavorable_label, protected_attribute_names):
self.mitigator = mitigator
self.estimator = estimator
self.pandas_to_dataset = _PandasToDatasetConverter(
favorable_label, unfavorable_label, protected_attribute_names)
self.y_dtype = None
self.y_name = None
def fit(self, X, y):
self.y_dtype = y.dtype
self.y_name = y.name
y_true = y
self.estimator = self.estimator.fit(X, y_true)
predicted = self.estimator.predict(X)
y_pred = _ensure_series(predicted, X.index, self.y_dtype, self.y_name)
dataset_true = self.pandas_to_dataset(X, y_true)
dataset_pred = self.pandas_to_dataset(X, y_pred)
self.mitigator = self.mitigator.fit(dataset_true, dataset_pred)
return self
def predict(self, X):
predicted = self.estimator.predict(X)
y_pred = _ensure_series(predicted, X.index, self.y_dtype, self.y_name)
dataset_pred = self.pandas_to_dataset(X, y_pred)
dataset_out = self.mitigator.predict(dataset_pred)
_, y_out = dataset_to_pandas(dataset_out, return_only='y')
return y_out
_numeric_supervised_input_fit_schema = {
'type': 'object',
'required': ['X', 'y'],
'additionalProperties': False,
'properties': {
'X': {
'description': 'Features; the outer array is over samples.',
'type': 'array',
'items': {
'type': 'array',
'items': {'type': 'number'}}},
'y': {
'description': 'Target class labels; the array is over samples.',
'type': 'array',
'items': {'type': 'number'}}}}
_numeric_input_predict_schema = {
'type': 'object',
'required': ['X'],
'additionalProperties': False,
'properties': {
'X': {
'description': 'Features; the outer array is over samples.',
'type': 'array',
'items': {'type': 'array', 'items': {'type': 'number'}}}}}
_numeric_output_predict_schema = {
'description': 'Predicted class label per sample.',
'type': 'array', 'items': {'type': 'number'}}
|
StarcoderdataPython
|
4991744
|
<reponame>Pandinosaurus/pyquickhelper
# -*- coding: utf-8 -*-
"""
@file
@brief Magic parser to parse magic commands
"""
import argparse
import shlex
from ..loghelper.flog import noLOG
class MagicCommandParser(argparse.ArgumentParser):
"""
Adds method ``parse_cmd`` to :epkg:`*py:argparse:ArgumentParser`.
"""
def __init__(self, prog, *args, **kwargs):
"""
custom constructor, see :epkg:`*py:argparse:ArgumentParser`.
@param prog command name
@param args positional arguments
@param kwargs named arguments
"""
argparse.ArgumentParser.__init__(self, prog=prog, *args, **kwargs)
self._keep_args = {}
@staticmethod
def _private_get_name(*args):
"""
guesses the name of a parameter knowning the argument
given to @see me add_argument
"""
if args == ('-h', '--help'):
return "help"
typstr = str
for a in args:
if isinstance(a, typstr):
if a[0] != "-":
return a
elif a.startswith("--"):
return a[2:].replace("-", "_")
raise KeyError( # pragma: no cover
"Unable to find parameter name in: " + typstr(args))
def add_argument(self, *args, **kwargs):
"""
Overloads the method,
see `ArgumentParser <https://docs.python.org/3/library/argparse.html>`_.
Among the parameters:
* *no_eval*: avoid considering the parameter
value as a potential variable stored in the notebook workspace.
* *eval_type*: *type* can be used for parsing and *eval_type*
is the expected return type.
The method adds parameter *no_eval* to avoid considering the parameter
value as a potential variable stored in the notebook workspace.
"""
name = MagicCommandParser._private_get_name(*args)
if name in ["help", "-h", "--h"]:
super(argparse.ArgumentParser, self).add_argument(*args, **kwargs)
else:
self._keep_args[name] = (args, kwargs.copy())
if kwargs.get("no_eval", False):
del kwargs["no_eval"]
if kwargs.get("eval_type", None):
del kwargs["eval_type"]
super(argparse.ArgumentParser, self).add_argument(*args, **kwargs)
if args != ('-h', '--help'):
pass
elif kwargs.get("action", "") != "help":
raise ValueError( # pragma: no cover
"Unable to add parameter -h, --help, already taken for help.")
def has_choices(self, name):
"""
tells if a parameter has choises
@param name parameter name
@return boolean
"""
if name not in self._keep_args:
raise KeyError(
"Unable to find parameter name: {0} in {1}".format(
name, list(self._keep_args.keys())))
return "choices" in self._keep_args[name][1]
def has_eval(self, name):
"""
Tells if a parameter value should be consider as a variable or some python code
to evaluate.
@param name parameter name
@return boolean
"""
if name not in self._keep_args:
raise KeyError(
"Unable to find parameter name: {0} in {1}".format(
name, list(self._keep_args.keys())))
return "no_eval" not in self._keep_args[name][1]
def expected_type(self, name):
"""
Returns the expected type for the parameter.
@param name parameter name
@return type or None of unknown
"""
if name in self._keep_args:
return self._keep_args[name][1].get("type", None)
return None
def expected_eval_type(self, name):
"""
return the expected evaluation type for the parameter
(if the value is interpreter as a python expression)
@param name parameter name
@return type or None of unknown
"""
if name in self._keep_args:
return self._keep_args[name][1].get("eval_type", None)
return None
def parse_cmd(self, line, context=None, fLOG=noLOG):
"""
Splits line using `shlex <https://docs.python.org/3/library/shlex.html>`_
and call `parse_args <https://docs.python.org/3/library/
argparse.html#argparse.ArgumentParser.parse_args>`_
@param line string
@param context if not None, tries to evaluate expression the command may contain
@param fLOG logging function
@return list of strings
The function distinguishes between the type used to parse
the command line (type) and the expected type after the evaluation
*eval_type*.
"""
args = shlex.split(line, posix=False)
res = self.parse_args(args)
if context is not None:
up = {}
for k, v in res.__dict__.items():
if self.has_choices(k) or not self.has_eval(k):
up[k] = v
else:
ev = self.eval(v, context=context, fLOG=fLOG)
v_exp = self.expected_eval_type(k)
if (ev is not None and (v_exp is None or v_exp == type(ev)) and # pylint: disable=C0123
(type(v) != type(ev) or v != ev)): # pylint: disable=C0123
up[k] = ev
elif v_exp is not None and type(v) != v_exp: # pylint: disable=C0123
up[k] = v_exp(v)
if len(up) > 0:
for k, v in up.items():
res.__dict__[k] = v
return res
def eval(self, value, context, fLOG=noLOG):
"""
Evaluate a string knowing the context,
it returns *value* if it does not belong to the context
or if it contains brackets or symbols (+, *),
if the value cannot be evaluated (with function `eval <https://docs.python.org/3/library/functions.html#eval>`_),
it returns the value value
@param value string
@param context something like ``self.shell.user_ns``
@param fLOG logging function
@return *value* or its evaluation
The method interprets variable inside list, tuple or dictionaries (for *value*).
"""
typstr = str
if isinstance(value, typstr):
if value in context:
return context[value]
elif isinstance(value, list):
return [self.eval(v, context, fLOG=fLOG) for v in value]
elif isinstance(value, tuple):
return tuple(self.eval(v, context, fLOG=fLOG) for v in value)
elif isinstance(value, dict):
return {k: self.eval(v, context, fLOG=fLOG) for k, v in value.items()}
if isinstance(value, typstr) and (
"[" in value or "]" in value or "+" in value or "*" in value or
value.split(".")[0] in context):
try:
res = eval(value, {}, context)
return res
except Exception as e: # pragma: no cover
fLOG(
"Unable to interpret {} due to {}.".format(typstr(value), e))
return value
return value
|
StarcoderdataPython
|
12815184
|
<filename>backtracking/match_parenthesis.py
def generate_parentheses(n):
"""
generate all the different ways you can have n parentheses
- nested
- adjacent
- mixture
each of the combos is a str
return an array of all the strings
good case of backtracking
n = 3
two unique chars to wrry abouyt = '(' and ')'
1st -
for _ in n,
add an opener
for _ in n,
add a closer
((()))
2 options - add the next pair of parentheses adj, or nest it
BUT we always add the open/close togetther!
(())()
Call Tree = n is the depth of the Tree
1st adj()
/ \
2nd nest() adj() = '(())()'
/ \ /\
3rd nest() adj() nest() adj()
'((()))' '(()())
'((()))
Helper functions:
1. nest() - adds inside the one we just added to the stack
2. adj() - adds to the end of the string of the permutation
n = is the var to use in ther logic
base case:
n = 0
- pre: added all the ()
- post: added the permutation to the output
recursive case:
- n > 0:
add another () by adj()
- recurse
add another by nest()
- recurse
decrement n
# Assume n > 1
"""
global output
def add_adj(permutation, parentheses_left): # '()'
# find the last ')' char
for index_char in range(len(permutation) - 1, -1, -1):
char = permutation[index_char]
if char == ")":
# add the new '()' next to it
new_permutation = "".join(
[permutation[: index_char + 1], "()", permutation[index_char + 1 :]]
)
generate_permutations(new_permutation, parentheses_left - 1)
# return new_permutation
def add_nested(permutation):
# find the index of the last ')' char
# '(())'
# ic = 3
# last_closer_index = 3
# c = 2
# ci = 1
# oi = 0
for index_char in range(len(permutation) - 1, -1, -1):
char = permutation[index_char]
if char == ")":
last_closer_index = index_char
# while the index of the matching opener
closers = 1
closers_index = index_char - 1
while closers_index > 0:
if permutation[closers_index] == ")":
closers += 1
closers_index -= 1
else:
break
# find the index of the matching opening parenthesis
opening_index = last_closer_index - (2 * closers) + 1
# wrap the () in a new pair of parentheses
new_permutation = (
permutation[:opening_index]
+ "("
+ permutation[opening_index : last_closer_index + 1]
)
new_permutation += ")"
return new_permutation
def generate_permutations(permutation, parentheses_left):
# base case: added n ()'s
if parentheses_left == 0: # and len(permutation) == required_length:
# append to the output
if permutation not in output:
output.add(permutation)
return
# recursive case:
while parentheses_left > 0:
# add another by nest(), then recurse
nested_permutation = add_nested(permutation)
generate_permutations(nested_permutation, parentheses_left - 1)
# add another () by adj(), and do so recursively
adj_permutation = add_adj(permutation, parentheses_left)
# generate_permutations(adj_permutation, parentheses_left - 1)
# decrement n
parentheses_left -= 1
if parentheses_left == 0:
return
# init the output of all the permutations
output = set()
if n > 0:
# save the required length in a global variable
global required_length
required_length = 2 * n
# add the first parentheses
permutation = "()"
n -= 1
generate_permutations(permutation, n)
# return all the permutations
return list(output)
"""
output = [
'()()()',
'()(())',
'(())()',
'((()))',
]
n | permutation | ap | np | popped?
1 '()' | '()()' |. '(())'
------------------------------------------------------
0 '()(()) ' '()()()'. '()(())' X
------------------------------------------------------
0 '()()()' X
------------------------------------------------------
0 '()(())' X
------------------------------------------------------
0 '(())' | (())() | '((()))'
------------------------------------------------------
0 ` '(())()' X
------------------------------------------------------
0 '((()))' X
"""
if __name__ == "__main__":
print(generate_parentheses(3))
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.