id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
29891 | import numpy as np
import math
from scipy.optimize import minimize
class Optimize():
def __init__(self):
self.c_rad2deg = 180.0 / np.pi
self.c_deg2rad = np.pi / 180.0
def isRotationMatrix(self, R) :
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype = R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
# print('n: ' + str(n))
return n < 1e-6
def Rot_Matrix_2_Euler_Angles(self, R):
assert(self.isRotationMatrix(R))
pitch = -math.asin(R[1, 2])
roll = -math.atan2(R[1, 0], R[1, 1])
yaw = -math.atan2(R[0, 2], R[2, 2])
return np.array([roll, pitch, yaw])
def Get_Init_Guess(self, l_vec, b_vec, f_vec):
f_vec = np.cross(b_vec, l_vec)
l_vec = np.cross(f_vec, b_vec)
l_norm = np.linalg.norm(l_vec)
l_vec /= l_norm
b_norm = np.linalg.norm(b_vec)
b_vec /= b_norm
f_norm = np.linalg.norm(f_vec)
f_vec /= f_norm
l_vec = l_vec.reshape(3, 1)
b_vec = b_vec.reshape(3, 1)
f_vec = f_vec.reshape(3, 1)
l = np.array([1, 0, 0]).reshape(1, 3)
b = np.array([0, 1, 0]).reshape(1, 3)
f = np.array([0, 0, 1]).reshape(1, 3)
R = l_vec @ l + b_vec @ b + f_vec @ f
assert (R.shape == (3, 3))
roll, pitch, yaw = self.Rot_Matrix_2_Euler_Angles(R)
return np.array([roll, pitch, yaw])
def Euler_Angles_2_Vectors(self, rx, ry, rz):
'''
rx: pitch
ry: yaw
rz: roll
'''
ry *= -1
rz *= -1
R_x = np.array([[1.0, 0.0, 0.0],
[0.0, np.cos(rx), -np.sin(rx)],
[0.0, np.sin(rx), np.cos(rx)]])
R_y = np.array([[np.cos(ry), 0.0, np.sin(ry)],
[0.0, 1.0, 0.0],
[-np.sin(ry), 0.0, np.cos(ry)]])
R_z = np.array([[np.cos(rz), -np.sin(rz), 0.0],
[np.sin(rz), np.cos(rz), 0.0],
[0.0, 0.0, 1.0]])
R = R_y @ R_x @ R_z
l_vec = R @ np.array([1, 0, 0])
b_vec = R @ np.array([0, 1, 0])
f_vec = R @ np.array([0, 0, 1])
return np.array([l_vec, b_vec, f_vec])
def Objective(self, x, l_vec, b_vec, f_vec):
rx = x[0]
ry = x[1]
rz = x[2]
l_hat, b_hat, f_hat = self.Euler_Angles_2_Vectors(rx, ry, rz)
l_vec_dot = np.clip(l_hat[0] * l_vec[0] + l_hat[1] * l_vec[1] + l_hat[2] * l_vec[2], -1, 1)
b_vec_dot = np.clip(b_hat[0] * b_vec[0] + b_hat[1] * b_vec[1] + b_hat[2] * b_vec[2], -1, 1)
f_vec_dot = np.clip(f_hat[0] * f_vec[0] + f_hat[1] * f_vec[1] + f_hat[2] * f_vec[2], -1, 1)
return math.acos(l_vec_dot) ** 2 + math.acos(b_vec_dot) ** 2 + math.acos(f_vec_dot) ** 2
def Get_Ortho_Vectors(self, l_vec, b_vec, f_vec):
x0 = self.Get_Init_Guess(l_vec, b_vec, f_vec)
sol = minimize(self.Objective, x0, args=(l_vec, b_vec, f_vec), method='nelder-mead', options={'xatol': 1e-7, 'disp': False})
pitch_rad, yaw_rad, roll_rad = sol.x
v1, v2, v3 = self.Euler_Angles_2_Vectors(pitch_rad, yaw_rad, roll_rad)
return np.array([v1, v2, v3]) | StarcoderdataPython |
1722368 | # coding: utf-8
import numpy as np
import pandas as pd
from utils.split_data import split_data
from utils.write_logs import write_log
import re
class Prefix:
def __init__(self, app_name='', data_name='data.csv', target='',alert_level = 1):
df = pd.read_csv(data_name)
self.app_name = app_name
self.target = target
self.alert_level = alert_level
self.df = df[df['N_APPNAME'] == self.app_name]
self.datas = []
self.labels = []
def keyword(self, df, keyword, if_true=True):
pattern = re.compile('.*' + keyword + '.*')
if (pattern.match(df["N_SUMMARYCN"]) is not None) and (df['N_CUSTOMERSEVERITY'] == self.alert_level):
return if_true
else:
return not if_true
def sample(self, step=10, window_size=60, react_size=10, positive_range=120, min_log=5):
self.step = step * 60
self.window_size = window_size * 60
self.react_size = react_size * 60
self.positive_range = positive_range * 60
self.min_log = min_log
self.data_time = []
datas = []
labels = []
start_stamp = self.df['firsttimestamp'].min()
end_stamp = self.df['firsttimestamp'].max()
for i in range(start_stamp, (end_stamp - self.window_size - self.react_size - self.positive_range), self.step):
temp = self.df[(self.df['firsttimestamp'] >= i) & (self.df['firsttimestamp'] < (i + self.window_size))]
if temp.shape[0] < self.min_log:
continue
else:
if temp[(temp.apply(self.keyword, keyword=self.target, axis=1))].shape[0]:
temp = temp[(temp.apply(self.keyword, keyword=self.target, if_true=False, axis=1))]
tmp = temp['N_SUMMARYCN'].values
datas.append(list(tmp))
future = self.df[(self.df['firsttimestamp'] >= (i + self.window_size + self.react_size)) & (
self.df['firsttimestamp'] <= (
i + self.window_size + self.react_size + self.positive_range))]
self.data_time.append(i + self.window_size)
if future.shape[0]==0:
labels.append(0)
else:
if future[future.apply(self.keyword, keyword=self.target, axis=1)].shape[0]:
labels.append(1)
else:
labels.append(0)
self.datas = datas
self.labels = labels
print("---sample done---")
def split_data(self, split_percent=0.7):
split_timestamp = self.data_time[int(len(self.data_time) * split_percent)]
train_df = self.df[self.df['firsttimestamp'] < split_timestamp]
test_df = self.df[self.df['firsttimestamp'] >= split_timestamp]
self.train_alert_num = train_df[train_df.apply(self.keyword, keyword=self.target, axis=1)].shape[0]
self.test_alert_num = test_df[test_df.apply(self.keyword, keyword=self.target, axis=1)].shape[0]
train_data, train_label, test_data, test_label = split_data(self.datas, self.labels, split_percent)
train_label_num_1 = np.sum(np.array(train_label) == 1)
train_label_num_0 = np.sum(np.array(train_label) == 0)
test_label_num_1 = np.sum(np.array(test_label) == 1)
test_label_num_0 = np.sum(np.array(test_label) == 0)
logs = "\nAPPNAME:{}".format(self.app_name) + \
"\nalert to predict:{}".format(self.target) + \
"\ntraining={}".format(self.train_alert_num) + \
"\ntesting={}".format(self.test_alert_num) + \
"\nstep_size={}min".format(self.step//60) + \
"\nwindow_size={}h".format(self.window_size//3600) + \
"\nreact_size={}min".format(self.react_size//60) + \
"\npositive_range={}h".format(self.positive_range//3600) + \
"\nmin_log={}".format(self.min_log) + \
"\ntrain(+):{}".format(train_label_num_1) + \
"\ntrain(-):{}".format(train_label_num_0) + \
"\ntest(+):{}".format(test_label_num_1) + \
"\ntest(-):{}".format(test_label_num_0)
write_log(logs)
return train_data, train_label, test_data, test_label
| StarcoderdataPython |
3290058 | <gh_stars>1-10
import os, sys
import numpy as np
import torch as tc
import torch.tensor as T
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from .BaseForecasters import *
class ResNet50_Office31(Forecaster):
def __init__(self, pretrained=True, n_labels = 31):
super().__init__()
model = torchvision.models.resnet50(pretrained=pretrained)
n_features = model.fc.in_features
fc = tc.nn.Linear(n_features, n_labels)
model.fc = fc
self.pred = model
def custom_parameters(self, lr):
param_group = []
for n, p in self.named_parameters():
if 'fc' in n:
param_group += [{'params': p, 'lr': lr}]
else:
param_group += [{'params': p, 'lr': lr * 0.1}]
return param_group
def forward(self, x):
return self.pred(x)
def feature(self, x):
x = self.pred.conv1(x)
x = self.pred.bn1(x)
x = self.pred.relu(x)
x = self.pred.maxpool(x)
x = self.pred.layer1(x)
x = self.pred.layer2(x)
x = self.pred.layer3(x)
x = self.pred.layer4(x)
x = self.pred.avgpool(x)
x = x.view(x.size(0), -1)
return x
class ResNet152(Forecaster):
def __init__(self, pretrained=True, n_labels = 1000, load_type='none'):
super().__init__()
self.load_type = load_type
model = torchvision.models.resnet152(pretrained=pretrained)
if self.load_type == 'none':
self.pred = model
elif 'feature' in self.load_type:
self.pred = model.fc
elif 'logit' in self.load_type:
self.pred = lambda xs: xs
else:
raise NotImplementedError
def forward(self, xs):
return self.pred(xs)
def feature(self, x):
if self.load_type == 'none':
x = self.pred.conv1(x)
x = self.pred.bn1(x)
x = self.pred.relu(x)
x = self.pred.maxpool(x)
x = self.pred.layer1(x)
x = self.pred.layer2(x)
x = self.pred.layer3(x)
x = self.pred.layer4(x)
x = self.pred.avgpool(x)
x = x.view(x.size(0), -1)
return x
elif 'feature' in self.load_type:
return x
else:
raise NotImplementedError
| StarcoderdataPython |
3397702 | from django.http import HttpResponse
from django.shortcuts import render
def login(request):
return HttpResponse("Superuser login")
def show_all_instruments(request):
return HttpResponse("All available instruments will be listed here")
def add_new_instrument(request):
return HttpResponse("Add new instrument that will be displayed and available to watch by users")
def superuser_panel(request):
return HttpResponse("Superuser panel with listed options") | StarcoderdataPython |
1761992 | <gh_stars>0
import sys, math
def parse_vec3(line):
return [float(line[1]), float(line[2]), float(line[3])]
def parse_vec2(line):
return [float(line[1]), float(line[2])]
def norm(vec):
l = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1] + vec[2] * vec[2])
if l != 0.0:
return [vec[0] / l, vec[1] / l, vec[2] / l]
return [1.0, 0.0, 0.0]
def run():
if not len(sys.argv) == 2:
print("you must provide a file to convert")
quit()
vertices = []
uvs = []
normals = []
faces = []
file = open(sys.argv[1], "r")
for line in file:
if line[0] == "#":
continue
line = line.strip().split(" ")
if line[0] == "v": # vertex
vertices.append(parse_vec3(line))
elif line[0] == "vt": # tex coord
uvs.append(parse_vec2(line))
elif line[0] == "vn": # normal
normals.append(parse_vec3(line))
elif line[0] == "f": # face
face = line[1:]
if len(face) != 3:
print(line)
raise Exception("not a triangle!")
continue
for i in range(0, len(face)):
face[i] = face[i].split("/")
for j in range(0, len(face[i])):
face[i][j] = int(face[i][j]) - 1
faces.append(face)
file.close()
tangents = []
bitangents = []
i = 0
while i < int(len(vertices) / 3) * 3:
v1 = vertices[i]
v2 = vertices[i + 2]
v3 = vertices[i + 1]
uv1 = uvs[i]
uv2 = uvs[i + 2]
uv3 = uvs[i + 1]
edge1 = [v2[0] - v1[0], v2[1] - v1[1], v2[2] - v1[2]]
edge2 = [v3[0] - v1[0], v3[1] - v1[1], v3[2] - v1[2]]
deltaUV1 = [uv2[0] - uv1[0], uv2[1] - uv1[1]]
deltaUV2 = [uv3[0] - uv1[0], uv3[1] - uv1[1]]
try:
f = 1.0 / (deltaUV1[0] * deltaUV2[1] - deltaUV2[0] * deltaUV1[1])
except ZeroDivisionError:
f = 1.0
tangent = [0.0, 0.0, 0.0]
bitangent = [0.0, 0.0, 0.0]
for j in range(0, 2):
tangent[j] = f * (deltaUV2[1] * edge1[j] - deltaUV1[1] * edge2[j])
bitangent[j] = f * (-deltaUV2[0] * edge1[j] + deltaUV1[0] * edge2[j])
tangent = norm(tangent)
bitangent = norm(bitangent)
tangents.append(tangent)
tangents.append(tangent)
tangents.append(tangent)
bitangents.append(bitangent)
bitangents.append(bitangent)
bitangents.append(bitangent)
i += 3
while i < len(vertices):
tangents.append(tangent)
bitangents.append(bitangent)
i += 1
print("vertices: " + str(len(vertices)))
print("uvs: " + str(len(uvs)))
print("normals: " + str(len(normals)))
print("tangents: " + str(len(tangents)))
print("======================================")
vertex_data = []
for face in faces:
for vertex in face:
vertex_data.append([
vertices[vertex[0]],
uvs[vertex[1]],
normals[vertex[2]],
tangents[vertex[0]],
bitangents[vertex[0]]
])
data = ""
for i, vertex in enumerate(vertex_data):
line = "{}f,{}f,{}f,".format(vertex[0][0], vertex[0][1], vertex[0][2]) # position
line += "{}f,{}f,".format(vertex[1][0], vertex[1][1]) # uv
line += "{}f,{}f,{}f,".format(vertex[2][0], vertex[2][1], vertex[2][2]) # normal
# line += "{}f,{}f,{}f,".format(vertex[3][0], vertex[3][1], vertex[3][2]) # tangent
# line += "{}f,{}f,{}f,".format(vertex[4][0], vertex[4][1], vertex[4][2]) # bitangent
# line = "{"+"{}f,{}f,{}f".format(vertex[0][0], vertex[0][1], vertex[0][2])+"}," # position
# line += "{"+"{}f,{}f".format(vertex[1][0], vertex[1][1])+"}," # uv
# line += "{"+"{}f,{}f,{}f".format(vertex[2][0], vertex[2][1], vertex[2][2])+"}," # normal
# line += "{"+"{}f,{}f,{}f".format(vertex[3][0], vertex[3][1], vertex[3][2])+"}," # tangent
# line += "{"+"{}f,{}f,{}f".format(vertex[4][0], vertex[4][1], vertex[4][2])+"}," # bitangent
data += line
print("{(float[]){"+data.strip(",")+"},"+str(len(vertex_data))+"},")
# print("{"+data.strip(",")+"}")
if __name__ == "__main__":
run()
| StarcoderdataPython |
1664764 | <gh_stars>100-1000
import subprocess
from optparse import OptionParser
import re
import time
def run(command):
try:
output = subprocess.Popen(command, shell=True,
universal_newlines=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, err) = output.communicate()
except Exception as e:
print("Failed to run %s, error: %s" % (command, e))
return out
def pods_in_nodes():
nodes =run("oc get pods -A -o wide | grep svt | awk '{print $8}'")
node_list = nodes.split('\n')
node_json= {}
for node in node_list:
if node in node_json.keys():
continue
node_json[node] = node_list.count(node)
print(node_json)
def see_if_error_builds(output_file):
print('here')
errorbuilds=run('oc get builds --all-namespaces | grep svt | egrep -v "Running|Complete|Creating|Pending"')
with open(output_file, "a") as f:
f.write(str(errorbuilds))
print ("error builds" + str(errorbuilds))
COUNTER=0
error_builds_list = errorbuilds.split("\n")
builds = []
for val in error_builds_list:
COUNTER = 0
error_builds = []
line = re.split(r'\s{2,}', val)
for word in line:
if ((COUNTER % 6 ) == 0 ):
error_builds.append(word)
elif ((COUNTER % 6 ) == 1):
error_builds.append(word)
builds.append(error_builds)
else:
break
COUNTER += 1
return builds
def get_error_builds(build_item):
namespace = build_item[0]
name = build_item[1]
#append to file
with open("build/" +name + namespace +".out", "w+") as f:
f.write("Log info for " + name + " build in namespace "+ namespace + '\n')
#logs = run("oc describe pod/" + str(name) + " -n " + namespace)
#f.write("Describe pod " + str(logs) + '\n')
logs = run("oc logs -f build/" + str(name) + " -n " + namespace)
f.write("Logs build " + str(logs) + '\n')
def check_error_build(global_output_file):
builds_list = see_if_error_builds(global_output_file)
skip_first = True
run("mkdir build")
for build_item in builds_list:
if skip_first:
skip_first = False
else:
get_error_builds(build_item)
def see_if_error(output_file):
print('here')
errorpods=run('oc get pods --all-namespaces | grep svt | egrep -v "Running|Complete|Creating|Pending"')
with open(output_file, "a") as f:
f.write(str(errorpods))
print ("errorpods" + str(errorpods) + str(type(errorpods)))
COUNTER=0
error_pods_list = errorpods.split("\n")
pods = []
for val in error_pods_list:
COUNTER = 0
error_pods = []
line = re.split(r'\s{2,}', val)
for word in line:
if ((COUNTER % 6 ) == 0 ):
error_pods.append(word)
elif ((COUNTER % 6 ) == 1):
error_pods.append(word)
pods.append(error_pods)
else:
break
COUNTER += 1
return pods
def get_error_logs(pod_item, output_file):
namespace = pod_item[0]
name = pod_item[1]
#append to file
with open("pod/" + name + namespace +".out", "w+") as f:
f.write("Debugging info for " + name + " in namespace "+ namespace + '\n')
logs = run("oc logs " + str(name) + " -n " + namespace)
f.write("Logs " + str(logs) + '\n')
def check_error(global_output_file):
pods_list = see_if_error(global_output_file)
skip_first = True
run("mkdir pod")
for pod_item in pods_list:
if skip_first:
skip_first = False
else:
get_error_logs(pod_item, global_output_file)
pods_in_nodes()
check_error("pod_error.out")
check_error_build("build_error.out") | StarcoderdataPython |
40037 | <filename>2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/03-Lists-Basics/02_Exercises/02_Multiples-List.py
# 2. Multiples List
# Write a program that receives two numbers (factor and count) and creates a list with length of the given count
# and contains only elements that are multiples of the given factor.
factor = int(input())
count = int(input())
list = []
for counter in range(1, count+1):
list.append(factor * counter)
print(list)
| StarcoderdataPython |
3201645 | <filename>Methodo_TD7-phase1.py
import nltk
from nltk.corpus import brown
import my_toolsv2 as mt
import json
def ex_constitution_corpus():
themes = {"news":["news", "reviews", "editorial"],
"literature":["science_fiction", "romance", "fiction", "mystery"],
"sciences":["learned"]}
nb_instances = 0
corpus = {}
for category in themes:
print(category, ":")
nb_doc = len(brown.fileids(categories=themes[category]))
print(" ",nb_doc, "documents")
nb_instances += nb_doc
corpus[category] = brown.fileids(categories=themes[category])
print("NB instances :", nb_instances)
return corpus
def get_train_test_corpus(corpus):
import random
train = {}
test = {}
for category, fileids in corpus.items():
x = int(20*len(fileids)/100)#nb instances pour le train set
test[category] = []
print("On prend %s éléments sur %s pour le test"%(str(x),str(len(fileids))))
for i in range(x):
id_doc = random.randint(0,len(fileids)-1)#prend un index au hasard
test[category].append(fileids[id_doc])#stocke le document dans test
fileids.remove(fileids[id_doc])
train[category] = fileids #le reste va dans le train set
dataset = {"train": train, "test":test}
return dataset
###Corpus complet
corpus = ex_constitution_corpus()
###Séparation train/test
test_train = get_train_test_corpus(corpus)
###Stockage du résultat
test_train_json = json.dumps(test_train, indent =2)
chemin = "train_test.json"
mt.ecrire(test_train_json, chemin)
print("Dataset stocké dans %s"%chemin)
| StarcoderdataPython |
1613853 | <reponame>krazos/southwest-alerts<filename>southwestalerts/southwest.py
import json
import time
import asyncio
from pyppeteer import launch
from pyppeteer.network_manager import Request
import requests
BASE_URL = 'https://mobile.southwest.com'
class Southwest(object):
def __init__(self, username, password, headers):
self._session = _SouthwestSession(username, password, headers)
def get_upcoming_trips(self):
return self._session.getb(
'/api/customer/v1/accounts/account-number/{}/upcoming-trips'.format(self._session.account_number))
def start_change_flight(self, record_locator, first_name, last_name):
"""Start the flight change process.
This returns the flight including itinerary."""
resp = self._session.get(
'/api/extensions/v1/mobile/reservations/record-locator/{record_locator}?first-name={first_name}&last-name={last_name}&action=CHANGE'.format(
record_locator=record_locator,
first_name=first_name,
last_name=last_name
))
return resp
def get_available_change_flights(self, record_locator, first_name, last_name, departure_date, origin_airport,
destination_airport):
"""Select a specific flight and continue the checkout process."""
url = '/api/extensions/v1/mobile/reservations/record-locator/{record_locator}/products?first-name={first_name}&last-name={last_name}&is-senior-passenger=false&trip%5B%5D%5Borigination%5D={origin_airport}&trip%5B%5D%5Bdestination%5D={destination_airport}&trip%5B%5D%5Bdeparture-date%5D={departure_date}'.format(
record_locator=record_locator,
first_name=first_name,
last_name=last_name,
origin_airport=origin_airport,
destination_airport=destination_airport,
departure_date=departure_date
)
return self._session.get(url)
def get_price_change_flight(self, record_locator, first_name, last_name, product_id):
url = '/api/reservations-api/v1/air-reservations/reservations/record-locator/{record_locator}/prices?first-name={first_name}&last-name={last_name}&product-id%5B%5D={product_id}'.format(
record_locator=record_locator,
first_name=first_name,
last_name=last_name,
product_id=product_id
)
return self._session.get(url)
def get_cancellation_details(self, record_locator, first_name, last_name):
url = '/api/reservations-api/v1/air-reservations/reservations/record-locator/{record_locator}?first-name={first_name}&last-name={last_name}&action=CANCEL'.format(
record_locator=record_locator,
first_name=first_name,
last_name=last_name
)
return self._session.get(url)
def get_available_flights(self, departure_date, origin_airport, destination_airport, currency='Points'):
url = '/api/mobile-air-booking/v1/mobile-air-booking/page/flights/products?origination-airport={origin_airport}&destination-airport={destination_airport}&departure-date={departure_date}&number-adult-passengers=1&number-senior-passengers=0¤cy=PTS'.format(
origin_airport=origin_airport,
destination_airport=destination_airport,
departure_date=departure_date
)
return self._session.get(url)
def get_available_flights_dollars(self, departure_date, origin_airport, destination_airport):
url = '/api/mobile-air-booking/v1/mobile-air-booking/page/flights/products?origination-airport={origin_airport}&destination-airport={destination_airport}&departure-date={departure_date}&number-adult-passengers=1&number-senior-passengers=0¤cy=USD'.format(
origin_airport=origin_airport,
destination_airport=destination_airport,
departure_date=departure_date
)
return self._session.get(url)
class _SouthwestSession():
def __init__(self, username, password, headers):
self._session = requests.Session()
self._login(username, password, headers)
def _login(self, username, password, headers):
data = requests.post(BASE_URL + '/api/customer/v1/accounts/login', json={
'accountNumberOrUserName': username, 'password': password},
headers={
'X-API-Key': headers['x-api-key'],
'Content-Type': 'application/vnd.swacorp.com.accounts.login-v1.0+json',
'User-Agent': None, 'Connection': None, 'Accept-Encoding': None,
}).json()
self.account_number = data['accessTokenDetails']['accountNumber']
self.access_token = data['accessToken']
self.headers = headers
def get(self, path, success_codes=[200]):
resp = self._session.get(self._get_url(path), headers=self._get_headers_all(self.headers))
return self._parsed_response(resp, success_codes=success_codes)
def getb(self, path, success_codes=[200]):
resp = self._session.get(self._get_url(path), headers=self._get_headers_brief(self.headers))
return self._parsed_response(resp, success_codes=success_codes)
def post(self, path, payload, success_codes=[200]):
resp = self._session.post(self._get_url(path), data=json.dumps(payload),
headers=self._get_headers(self.headers))
return self._parsed_response(resp, success_codes=success_codes)
@staticmethod
def _get_url(path):
return '{}{}'.format(BASE_URL, path)
def _get_headers_brief(self, headers):
default = {
'token': (self.access_token if hasattr(self, 'access_token') else None),
'x-api-key': headers['x-api-key'],
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3494.0 Safari/537.36'
# 'Content-Type': 'application/vnd.swacorp.com.accounts.login-v1.0+json',
# 'User-Agent': None, 'Connection': None, 'Accept-Encoding': None,
# 'Accept': 'application/json',
}
tempheaders = {**headers, **default}
return default
def _get_headers_all(self, headers):
default = {
'token': (self.access_token if hasattr(self, 'access_token') else None),
'x-api-key': headers['x-api-key'],
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3494.0 Safari/537.36'
# 'Content-Type': 'application/vnd.swacorp.com.accounts.login-v1.0+json',
# 'User-Agent': None, 'Connection': None, 'Accept-Encoding': None,
# 'Accept': 'application/json',
}
tempheaders = {**headers, **default}
return tempheaders
@staticmethod
def _parsed_response(response, success_codes=[200]):
if response.status_code not in success_codes:
print(response.text)
raise Exception(
'Invalid status code received. Expected {}. Received {}.'.format(success_codes, response.status_code))
return response.json()
| StarcoderdataPython |
177835 | <reponame>cmccandless/stunning-pancake
#!/usr/bin/env python
# https://projecteuler.net/problem=25
import unittest
def fibon():
a = 1
b = 1
yield 0
yield a
yield b
while True:
c = a + b
yield c
a = b
b = c
def answer(ndigits=1000):
f = enumerate(fibon())
index, x = next(f)
while len(str(x)) < ndigits:
index, x = next(f)
return index
def run():
# print(answer(3))
print(answer())
class Test25(unittest.TestCase):
def test_expected(self):
expected = 4782
self.assertEqual(answer(), expected)
if __name__ == '__main__':
run()
| StarcoderdataPython |
1752812 | <filename>middle_tier/services/loader.py
import json
from exceptions import MiddleTierException
from services.service import Service
class NotFoundServiceException(MiddleTierException):
pass
class NotFoundSecurityServiceException(MiddleTierException):
pass
CONFIG_PATH = "/opt/middle_tier/services.json"
class ServiceLoader(object):
"""
Will find specified modules and try to load it
"""
def __init__(self) -> None:
super().__init__()
self.services = []
self.auth_services = []
def load(self) -> list:
with open(CONFIG_PATH) as services_file:
services_json = json.load(services_file)
self.services = self.process_services(services_json)
return self.services
def process_services(self, services_json):
services = []
for service in services_json.get("services", []):
auth = service.get("auth")
auth_service = None
if auth:
auth_service = self.get_auth_service(services_json, auth)
services.append(Service(service, auth=auth_service))
return services
def get_services(self):
return self.services
def find_service(self, request):
for service in self.services:
if service.can_handle(request):
return service
return None
def get_auth_service(self, service_definition, service_id):
for auth_service in service_definition.get("auth", []):
if auth_service["id"] == service_id:
return auth_service
raise NotFoundSecurityServiceException("There is not auth service: {}".format(service_id))
| StarcoderdataPython |
1657070 | """
Main entrance to commandline actions
"""
import click
from sveetoy_cli.cli.version import version_command
from sveetoy_cli.cli.colors import colors_command
from sveetoy_cli.cli.export import export_command
from sveetoy_cli.cli.schemes import schemes_command
from sveetoy_cli.logs import init_logger
# Help alias on '-h' argument
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
# Default logger conf
SVEETOY_LOGGER_CONF = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', None)
@click.group(context_settings=CONTEXT_SETTINGS)
@click.option('-v', '--verbose', type=click.IntRange(min=0, max=5), default=4,
metavar='INTEGER',
help="An integer between 0 and 5, where '0' make a totaly "
"silent output and '5' set level to DEBUG (the most verbose "
"level). Default to '4' (Info level).")
@click.pass_context
def cli_frontend(ctx, verbose):
"""
Sveetoy Commandline
"""
printout = True
if verbose == 0:
verbose = 1
printout = False
# Verbosity is the inverse of logging levels
levels = [item for item in SVEETOY_LOGGER_CONF]
levels.reverse()
# Init the logger config
root_logger = init_logger(levels[verbose], printout=printout)
# Init the default context that will be passed to commands
ctx.obj = {
'verbosity': verbose,
'logger': root_logger,
}
# Attach commands methods to the main grouper
cli_frontend.add_command(colors_command, name="colors")
cli_frontend.add_command(export_command, name="export")
cli_frontend.add_command(schemes_command, name="schemes")
cli_frontend.add_command(version_command, name="version")
| StarcoderdataPython |
1654096 | <reponame>rsiemens/nidus
import os
import shutil
from unittest import TestCase
from unittest.mock import Mock
from nidus.log import LogEntry
from nidus.state import RaftState
class RaftStateTestCases(TestCase):
test_log_dir = "test_nidus_logs"
def setUp(self):
os.makedirs(self.test_log_dir)
def tearDown(self):
if os.path.exists(self.test_log_dir):
shutil.rmtree(self.test_log_dir)
def test_append_entires_success(self):
state = RaftState(self.test_log_dir, "node-0")
state.status = state.FOLLOWER
state.current_term = 1
success = state.append_entries(-1, -1, [LogEntry(1, ["SET", "foo", "bar"])])
self.assertTrue(success)
self.assertEqual(state.log.as_list(), [LogEntry(1, ["SET", "foo", "bar"])])
success = state.append_entries(0, 1, [LogEntry(1, ["DEL", "foo"])])
self.assertTrue(success)
self.assertEqual(
state.log.as_list(),
[LogEntry(1, ["SET", "foo", "bar"]), LogEntry(1, ["DEL", "foo"])],
)
def test_append_entries_failure(self):
state = RaftState(self.test_log_dir, "node-0")
state.status = state.FOLLOWER
state.current_term = 1
state.log.append(LogEntry(1, ["SET", "foo", "bar"]))
# prev_term mismatch
success = state.append_entries(0, 2, [LogEntry(1, ["SET", "baz", "buzz"])])
self.assertFalse(success)
# log didn't change
self.assertEqual(state.log.as_list(), [LogEntry(1, ["SET", "foo", "bar"])])
# prev_index doesn't exist
success = state.append_entries(10, 1, [LogEntry(1, ["SET", "baz", "buzz"])])
self.assertFalse(success)
# log didn't change
self.assertEqual(state.log.as_list(), [LogEntry(1, ["SET", "foo", "bar"])])
| StarcoderdataPython |
1765048 | <reponame>congltk1234/LaptopAnalyst
import scrapy
import re
from ..items import LaptopItem
from scrapy_selenium import SeleniumRequest
from selenium import webdriver
from scrapy.utils.project import get_project_settings
class TikiSpider(scrapy.Spider):
name = 'tiki'
allowed_domains = ['tiki.vn']
start_urls = ['https://tiki.vn/may-tinh-xach-tay-laptop-dell-latitude-3420-l3420i3ssd-intel-core-i3-1115g4-14-inch-hd-ram-8gb-256gb-ssd-nvme-intel-uhd-graphics-fedora-os-hang-chinh-hang-p113577973.html?itm_campaign=CTP_YPD_TKA_PLA_UNK_ALL_UNK_UNK_UNK_UNK_X.35831_Y.271589_Z.1223547_CN.Laptop&itm_medium=CPC&itm_source=tiki-ads&spid=113577975',
'https://tiki.vn/apple-macbook-air-2020-m1-13-inchs-hang-chinh-hang-p124742926.html?spid=88231360']
def parse(self, response):
product = response.xpath('///*[@class="title"]/text()').get()
price = response.xpath('//*[@class="product-price__list-price"]/text()').get()
tag = 0 #response.xpath('//*[@id="__next"]/div[1]/main/div[1]/div/div/a[4]/span/text()').get()
discount = 0 #response.xpath('//*[@class="product-price__discount-rate"]/text()').get()
new_price = 0 #response.xpath('//*[@class="product-price__current-price"]/text()').get()
brand = 0 #response.xpath('//*[@id="__next"]/div[1]/main/div[3]/div[1]/div[3]/div[1]/div[1]/span/h6/a/text()').get()
shop = 0 #response.xpath('//*[@id="__next"]/div[1]/main/div[3]/div[1]/div[3]/div[2]/div[2]/div/div[1]/div[1]/a/div/span/span/text()').get()
number_reviews = 0 #response.xpath('//a[@class="pdp_main_view_review"]/text()').get()
mall = 0 #response.xpath('//div[@class="WebpImg__StyledImg-sc-h3ozu8-0 fWjUGo badge-img"]/img[1]').get()
try:
if len(mall)>0:
mall = "Mall"
except:
mall = "Non-Mall"
# Lưu các thông tin vừa get được vào trong class
item = LaptopItem()
item["product"] = product
item['price'] = price
item['tag'] = tag
item['discount'] = discount
item['new_price'] = new_price
item['brand'] = brand
item['shop'] = shop
item['mall'] = mall
item['number_reviews'] = number_reviews
yield item
| StarcoderdataPython |
3222451 | import asyncio
from Discord.discord import Discord
from FTXwrapper.methods import FTXMethods
class StackBot(FTXMethods):
def __init__(self, account_name, market):
super().__init__(account_name=account_name)
self.market = market
def run(self):
res = asyncio.run(self.single_market(self.market))
change24h = res['change24h']
if change24h <= -0.1:
Discord(
url='stack',
msg=f'前日比: {change24h * 100}%'
)
price = res['price']
balance = asyncio.run(self.get_free_balance('USD'))
order = asyncio.run(self.place_order(
market=self.market,
side='buy',
type_='market',
size=balance/price
))
if order.get('price') is None:
Discord(
url='stack',
msg=f'積立失敗 {order}'
)
else:
Discord(
url='stack',
msg=f'{self.market} 積立, {order["price"]}$'
)
| StarcoderdataPython |
3351450 | from sqlalchemy import Column, String, Date, Float
exchange_rate_item_table_name = 'exchange_rate_item'
def get_exchange_rate_item_db(base, table_name=exchange_rate_item_table_name):
class ExchangeRateItem(base):
__tablename__ = table_name
date = Column(Date, primary_key=True)
bkpr = Column(Float, nullable=False)
cur_nm = Column(String, nullable=False)
cur_unit = Column(String, primary_key=True)
deal_bas_r = Column(Float, nullable=False)
kftc_bkpr = Column(Float, nullable=False)
kftc_deal_bas_r = Column(Float, nullable=False)
result = Column(Float, nullable=False)
ten_dd_efee_r = Column(Float, nullable=False)
ttb = Column(Float, nullable=False)
tts = Column(Float, nullable=False)
yy_efee_r = Column(Float, nullable=False)
def __init__(self, date, bkpr, cur_nm, cur_unit, deal_bas_r, kftc_bkpr, kftc_deal_bas_r, result, ten_dd_efee_r,
ttb, tts, yy_efee_r):
self.date = date
self.bkpr = bkpr
self.cur_nm = cur_nm
self.cur_unit = cur_unit
self.deal_bas_r = deal_bas_r
self.kftc_bkpr = kftc_bkpr
self.kftc_deal_bas_r = kftc_deal_bas_r
self.result = result
self.ten_dd_efee_r = ten_dd_efee_r
self.ttb = ttb
self.tts = tts
self.yy_efee_r = yy_efee_r
def __repr__(self):
return "<ExchangeRateItem date:{self.date}, bkpr:{self.bkpr}, cur_nm:{self.cur_nm}, " \
"cur_unit:{self.cur_unit}, deal_bas_r:{self.deal_bas_r}, kftc_bkpr:{self.kftc_bkpr}, " \
"kftc_deal_bas_r:{self.kftc_deal_bas_r}, result:{self.result}, ten_dd_efee_r:{self.ten_dd_efee_r}," \
" ttb:{self.ttb}, tts:{self.tts}, yy_efee_r:{self.yy_efee_r}>".format(self=self)
return ExchangeRateItem
| StarcoderdataPython |
1656522 | <reponame>i25959341/Happynodes
import psycopg2
import time
import os
import socket
import requests
import json
from discoverSQL import NodeObject, isOpen, getRemoteNodes, checkEndpoint
from discoverSQL import insertNewNodes, insertNewEndpoints, insertNewEndpointsInfo
host = str(os.environ['PGHOST'])
databasename = str(os.environ['PGDATABASE'])
user = str(os.environ['PGUSER'])
password = str(os.environ['PGPASSWORD'])
connection_str = "dbname='{}' user='{}' host='{}' password='{}'".format(
databasename, user, host, password)
fakeneighbours = [{'address': '172.16.58.3', 'port': 10333}, {'address': '172.16.17.32', 'port': 10333}]
def test_isOpen():
result = isOpen("google.com", 80)
assert result == True
def test_isNotOpen():
result = isOpen("google.com", 123)
assert result == False
def test_getRemoteNodes():
remoteNodes=getRemoteNodes(fakeneighbours)
assert len(remoteNodes)==2
def test_correct_endpoint_checkEndpoint():
node = NodeObject(None, "node2.nyc3.bridgeprotocol.io", "node2.nyc3.bridgeprotocol.io")
endpoint, neighbours = checkEndpoint(node.endpointHttp10332)
assert len(neighbours)!=0 and endpoint[1]==True
def test_incorrect_endpoint_checkEndpoint():
node = NodeObject(None, "node2.nyc3.bridgeprotocol.io", "node2.nyc3.bridgeprotocol.io")
endpoint, neighbours = checkEndpoint(node.endpointHttp10331)
assert len(neighbours)==0 and endpoint[1]==False
def test_insertNewNodes():
conn = psycopg2.connect(connection_str)
cursor = conn.cursor()
fake1 = NodeObject(None, "karlson.com", "1.2.3.4")
fake1.endpointHttp10331 = (fake1.endpointHttp10331[0], True)
fake2 = NodeObject(None, "wing.com", "4.1.2.4")
fake2.endpointHttp10331 = (fake2.endpointHttps10331[0], True)
fakeEndpoints = [fake1, fake2]
insertNewNodes(cursor, fakeEndpoints)
conn.commit()
cursor.execute("""select *
FROM public.nodes
where hostname = %s""",
[fake1.ip])
result = cursor.fetchall()
assert result[0][1]==fake1.ip and len(result)!=0
cursor.execute("""select *
from public.nodes
where hostname = %s""",
[fake2.ip])
result = cursor.fetchall()
assert result[0][1]==fake2.ip and len(result)!=0
cursor.execute("""delete
from public.nodes
where hostname = %s""",
[fake1.ip])
cursor.execute("""delete
from public.nodes
where hostname = %s""",
[fake2.ip])
conn.commit()
conn.close()
def test_insertEndpoint():
conn = psycopg2.connect(connection_str)
cursor = conn.cursor()
fake1 = NodeObject(None, "karlson.com", "1.2.3.4")
fake1.endpointHttp10331 = (fake1.endpointHttp10331[0], True)
fake2 = NodeObject(None, "wing.com", "4.1.2.4")
fake2.endpointHttps10331 = (fake2.endpointHttps10331[0], True)
fakeEndpoints = [fake1, fake2]
insertNewNodes(cursor, fakeEndpoints)
conn.commit()
cursor.execute("""select *
FROM public.nodes
where hostname = %s""",
[fake1.ip])
result1 = cursor.fetchall()
assert result1[0][1]==fake1.ip and len(result1)!=0
cursor.execute("""select *
from public.nodes
where hostname = %s""",
[fake2.ip])
result2 = cursor.fetchall()
assert result2[0][1]==fake2.ip and len(result2)!=0
insertNewEndpoints(cursor, fakeEndpoints)
conn.commit()
cursor.execute("""select id, node_id, protocol, port
FROM public.connection_endpoints
where node_id = %s""",
[result1[0][0]])
result = cursor.fetchall()
assert result[0][2]=="http" and result[0][3]==10331
cursor.execute("""select id, node_id, protocol, port
FROM public.connection_endpoints
where node_id = %s""",
[result2[0][0]])
result = cursor.fetchall()
assert result[0][2]=="https" and result[0][3]==10331
cursor.execute("""delete
FROM public.connection_endpoints
where node_id = %s""",
[result1[0][0]])
cursor.execute("""delete
FROM public.connection_endpoints
where node_id = %s""",
[result2[0][0]])
cursor.execute("""delete
from public.nodes
where hostname = %s""",
[fake1.ip])
cursor.execute("""delete
from public.nodes
where hostname = %s""",
[fake2.ip])
conn.commit()
conn.close()
def test_insertNewEndpointsInfo():
conn = psycopg2.connect(connection_str)
cursor = conn.cursor()
fake1 = NodeObject(None, "google.com", "google.com")
fake1.endpointHttp10331 = (fake1.endpointHttp10331[0], True)
fakeEndpoints = [fake1]
insertNewNodes(cursor, fakeEndpoints)
conn.commit()
cursor.execute("""select *
FROM public.nodes
where hostname = %s""",
[fake1.ip])
nodeResult = cursor.fetchall()
assert nodeResult[0][1]==fake1.ip and len(nodeResult)!=0
insertNewEndpoints(cursor, fakeEndpoints)
conn.commit()
cursor.execute("""select id, node_id, protocol, port
FROM public.connection_endpoints
where node_id = %s""",
[nodeResult[0][0]])
connectionEndpoint = cursor.fetchall()
assert connectionEndpoint[0][2]=="http" and connectionEndpoint[0][3]==10331
insertNewEndpointsInfo(cursor, fakeEndpoints)
conn.commit()
cursor.execute("""SELECT id, connection_id, locale
FROM public.locale
where connection_id = %s""",
[connectionEndpoint[0][0]])
locale = cursor.fetchall()
assert len(locale)!=0
cursor.execute("""SELECT id, connection_id, location
FROM public.location
where connection_id = %s""",
[connectionEndpoint[0][0]])
location = cursor.fetchall()
assert len(location)!=0
cursor.execute("""SELECT id, connection_id, lat, long
FROM public.coordinates
where connection_id = %s""",
[connectionEndpoint[0][0]])
coordinates = cursor.fetchall()
assert len(coordinates)!=0
cursor.execute("""delete
FROM public.locale
where connection_id = %s""",
[connectionEndpoint[0][0]])
cursor.execute("""delete
FROM public.location
where connection_id = %s""",
[connectionEndpoint[0][0]])
cursor.execute("""delete
FROM public.coordinates
where connection_id = %s""",
[connectionEndpoint[0][0]])
cursor.execute("""delete
FROM public.connection_endpoints
where node_id = %s""",
[nodeResult[0][0]])
cursor.execute("""delete
from public.nodes
where hostname = %s""",
[fake1.ip])
conn.commit()
conn.close()
| StarcoderdataPython |
113359 | class Solution(object):
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
sign = 1 if (dividend >= 0) == (divisor >= 0) else -1
dd = abs(dividend)
dr = abs(divisor)
if dd < dr:
return 0
if dd == dr:
return sign
if dr == 1:
dd = dd if sign > 0 else -dd
return min(2 ** 31 - 1, dd)
res = 1
while dd > dr + dr:
dr += dr
res += res
return sign * (res + self.divide(dd - dr, abs(divisor))) | StarcoderdataPython |
3336156 | <reponame>iahuang/scratch-gcc
import os
import json
from . import SPArgumentName, SPArithmetic, SPAssign, SPConstant, SPFunctionDefinition, SPModule, SPNode, SPVariableName
from .. import scratch
class CompilationContext:
def __init__(self):
self.enclosingFunction: SPFunctionDefinition = None
self.functionCalls = []
self.supportingBlocks = []
class SPModuleCompiler:
def __init__(self, templateFile=None, templateTarget=None):
"""
The SPModule Compiler works by loading an existing sb3 file to use as a template,
building the necessary code blocks, and loading them into an existing sprite in that template file.
templateFile specifies the file to use, if none is specified, then it defaults to using the built-in template
templateTarget specifies the sprite to load the program blocks into
"""
if templateFile == None:
thisPath = os.path.dirname(os.path.realpath(__file__))
templateFile = thisPath+"/resources/scratchpy_compiler_template.sb3"
templateTarget = "__main__"
self.templateFile = templateFile
self.templateTarget = templateTarget
self.module: SPModule = None
# The working environment - an instance of a scratch project loaded from the template
self.scratchProj = scratch.ScratchProject(self.templateFile)
self.context = CompilationContext()
self.scratchProj.meta["description"] = "File auto-generated by scratch-gcc https://github.com/iahuang/scratch-gcc"
def compileModule(self, module):
self.module = module
# Start by creating an scratch project instance to work with
codeTarget = self.scratchProj.getTarget(self.templateTarget)
stage = self.scratchProj.getStage()
# add variables to stage
for variable in self.module.getVariables():
codeTarget.addVariable(variable.name, variable.value)
# create blocks for functions
for function in self.module.functionBlocks:
# Create function def block at [0, 0]
defBlock = codeTarget.createBlock([0, 0])
defBlock.opcode = "procedures_definition"
# Add prototype block
protoBlock = codeTarget.createBlock(parent=defBlock)
protoBlock.opcode = "procedures_prototype"
protoBlock.shadow = True
# Link prototype block to function def block
defBlock.inputs.append(scratch.BlockInput("custom_block", 1, protoBlock.id))
# Create argument ids
argIds = []
argNames = []
argDefaults = []
for arg in function.args:
argIds.append(arg.id)
argNames.append(arg.name)
argDefaults.append(arg.getArgumentDefault())
proccodeArgs = ""
# Proccode: "[name] [arg1_type] [arg2_type] etc."
for arg in function.args:
proccodeArgs+=" "+("%b" if arg.isBoolean else "%s")
# Set prototype block data
protoBlock.mutation = {
"tagName": "mutation",
"children": [],
"proccode": f"{function.fname}"+proccodeArgs,
"argumentids": json.dumps(argIds),
"argumentnames": json.dumps(argNames),
"argumentdefaults": json.dumps(argDefaults),
"warp": "false"
}
# Add argument reporters
reporters = []
for arg in function.args:
reporter = codeTarget.createBlock(parent=protoBlock)
reporter.opcode = "argument_reporter_string_number"
reporter.fields.append(scratch.BlockField("VALUE", [arg.name, None]))
protoBlock.inputs.append(scratch.BlockInput(arg.id, 1, reporter.id))
# compile the body of the function
prevNode = defBlock
for node in function.body:
blocks = self.spNodeToScratchBlocks(codeTarget, node)
for block in blocks:
prevNode.nextId = block.id
block.parentId = prevNode.id
prevNode = block
def _spNodeToBlockInput(self, name, target: scratch.ScratchTarget, node: SPNode)->scratch.Block:
if type(node) == SPConstant:
return scratch.BlockInput(name, 1, [4, node.value])
if type(node) == SPArgumentName:
reporter = target.createBlock()
reporter.opcode = "argument_reporter_string_number"
reporter.fields.append(scratch.BlockField("VALUE", [node.argname, None]))
return scratch.BlockInput(name, 1, reporter.id)
if type(node) == SPVariableName:
var = target.findVariableByName(node.variableName)
input = scratch.BlockInput(name, 1, var.name, third=var.id)
input.isVariable = True
return input
if type(node) == SPArithmetic:
block = self.spNodeToReporterBlock(target, node)
return scratch.BlockInput(name, 1, block.id, valueTypeId=1)
print(f'Compilation warning: unsupported node type "{type(node).__name__}" for block input')
def spNodeToReporterBlock(self, target: scratch.ScratchTarget, node: SPNode):
block = target.createBlock()
if type(node) == SPArithmetic:
opcode = {
"add": "operator_add",
"sub": "operator_subtract",
"mul": "operator_multiply",
"div": "operator_divide"
}[node.op]
block.opcode = opcode
leftInput = self._spNodeToBlockInput("NUM1", target, node=node.left)
rightInput = self._spNodeToBlockInput("NUM2", target, node=node.right)
leftInput.parentId = block
rightInput.parentId = block
block.inputs.append(leftInput)
block.inputs.append(rightInput)
else:
print(f'Compilation warning: unsupported node type "{type(node).__name__}" for reporter')
return block
def spNodeToScratchBlocks(self, target: scratch.ScratchTarget, node: SPNode):
blocks = []
if type(node) == SPAssign:
for assignTarget in node.targets:
block = target.createBlock()
block.opcode = "data_setvariableto"
block.fields.append(scratch.BlockField("VARIABLE", [
assignTarget.variableName,
target.findVariableByName(assignTarget.variableName).id
]))
assignValueBlock = self.spNodeToReporterBlock(target, node.value)
block.inputs.append(scratch.BlockInput("VALUE", 1, assignValueBlock.id))
blocks.append(block)
else:
print(f'Compilation warning: unsupported node type "{type(node).__name__}"')
return blocks
def exportSB3(self, filename):
self.scratchProj.saveToFile(filename)
def exportProjectJSON(self, filename):
with open(filename, 'w') as fl:
json.dump(self.scratchProj.serialize(), fl)
def __enter__(self):
return self
def __exit__(self, *args):
self.scratchProj.__exit__() | StarcoderdataPython |
131680 | """
********************************************************************************
* Name: spatial_reference.py
* Author: nswain
* Created On: May 15, 2018
* Copyright: (c) Aquaveo 2018
********************************************************************************
"""
from tethys_sdk.testing import TethysTestCase
from tethysext.atcore.services.spatial_reference import SpatialReferenceService
from tethysext.atcore.urls import spatial_reference
from tethysext.atcore.controllers.rest.spatial_reference import QuerySpatialReference
from tethysext.atcore.tests.mock.url_map_maker import MockUrlMapMaker
class CustomQuerySpatialReference(QuerySpatialReference):
pass
class CustomSpatialReferenceService(SpatialReferenceService):
pass
class InvalidController:
pass
class SpatialReferenceUrlsTests(TethysTestCase):
def setUp(self):
self.base_url_path = 'foo/bar'
self.names = ['atcore_query_spatial_reference']
self.urls = ['rest/spatial-reference/query']
self.num_urls = 1
def tearDown(self):
pass
def name_asserts(self, url_maps):
for url_map in url_maps:
name = url_map.name
self.assertIn(name, self.names)
def url_asserts(self, url_maps, with_base_url=False):
if with_base_url:
compare_urls = [self.base_url_path + '/' + u for u in self.urls]
else:
compare_urls = self.urls
for url_map in url_maps:
url = url_map.url
self.assertIn(url, compare_urls)
def controller_asserts(self, url_maps, controller_names, default_controller, custom_controller):
num_controllers_tested = 0
for url_map in url_maps:
if url_map.name in controller_names:
controller = url_map.controller
self.assertTrue(callable(controller))
self.assertNotEqual(default_controller.as_controller().__name__, controller.__name__)
self.assertEqual(custom_controller.as_controller().__name__, controller.__name__)
num_controllers_tested += 1
self.assertEqual(len(controller_names), num_controllers_tested)
def test_vanilla(self):
url_maps = spatial_reference.urls(MockUrlMapMaker, None, None)
self.name_asserts(url_maps)
self.assertEqual(len(url_maps), self.num_urls)
self.name_asserts(url_maps)
self.url_asserts(url_maps)
def test_base_url_path(self):
url_maps = spatial_reference.urls(MockUrlMapMaker, None, None, base_url_path=self.base_url_path)
self.assertEqual(len(url_maps), self.num_urls)
self.url_asserts(url_maps, with_base_url=True)
def test_base_url_path_startswith_slash(self):
startswith_path = '/' + self.base_url_path
url_maps = spatial_reference.urls(MockUrlMapMaker, None, None, base_url_path=startswith_path)
self.assertEqual(len(url_maps), self.num_urls)
self.url_asserts(url_maps, with_base_url=True)
def test_base_url_path_endswith_slash(self):
endswith_path = self.base_url_path + '/'
url_maps = spatial_reference.urls(MockUrlMapMaker, None, None, base_url_path=endswith_path)
self.assertEqual(len(url_maps), self.num_urls)
self.url_asserts(url_maps, with_base_url=True)
def test_custom_query_spatial_reference_controller(self):
url_maps = spatial_reference.urls(MockUrlMapMaker, None, None, custom_controllers=[CustomQuerySpatialReference])
self.assertEqual(len(url_maps), self.num_urls)
self.controller_asserts(url_maps, ['atcore_query_spatial_reference'], QuerySpatialReference,
CustomQuerySpatialReference)
def test_invalid_controller_arg_class(self):
mockapp = object()
mock_db_name = "foo"
self.assertRaises(ValueError, spatial_reference.urls, MockUrlMapMaker, mockapp, mock_db_name,
custom_controllers=[InvalidController])
def test_invalid_controller_arg_not_class(self):
mockapp = object()
mock_db_name = "foo"
self.assertRaises(ValueError, spatial_reference.urls, MockUrlMapMaker, mockapp, mock_db_name,
custom_controllers=['not-a-class'])
def test_custom_base_url_path_and_controllers(self):
mockapp = object()
mock_db_name = "foo"
url_maps = spatial_reference.urls(MockUrlMapMaker, mockapp, mock_db_name, base_url_path=self.base_url_path,
custom_controllers=[CustomQuerySpatialReference])
self.assertEqual(len(url_maps), self.num_urls)
self.url_asserts(url_maps, with_base_url=True)
self.controller_asserts(url_maps, ['atcore_query_spatial_reference'], QuerySpatialReference,
CustomQuerySpatialReference)
def test_custom_services(self):
# NOTE: Don't know how to validate this... for not just test that it doesn't throw an error.
mockapp = object()
mock_db_name = "foo"
spatial_reference.urls(MockUrlMapMaker, mockapp, mock_db_name, custom_services=[CustomSpatialReferenceService])
self.assertRaises(ValueError, spatial_reference.urls, MockUrlMapMaker, mockapp, mock_db_name,
custom_services=['invalid-service'])
| StarcoderdataPython |
1621636 | <filename>Taller_Diccionarios/Ejercicio_1.py
ejercicio=[12, 23, 5, 12, 92, 5,12, 5, 29, 92, 64,23]
diccionario={ }
for i in ejercicio:
a=ejercicio.count(i)
diccionario.update({i:a})
print(diccionario)
| StarcoderdataPython |
1670489 | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except
# in compliance with the License. A copy of the License is located at
#
# https://aws.amazon.com/apache-2-0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"Unit tests"
import unittest.mock
import unittest
import json
####
# Mock the database methods before we import logic
####
def list_airports_mock():
"mock the airports response"
airports = (
"EA100,EB100,EA200,EB200,EA101,EB101,EC101,EA201,EB201,"
"EC201,EA102,EB102,EC102,ED102,EA202,EB202,EC202,ED102,"
"MA50,MB50,MA100,MB100,MA51,MB51,MC51,MA101,MB101,MC101,"
"MA52,MB52,MC52,MD52,MA102,MB102,MC102,MD102,"
"HA50,HB50,HA60,HB60,HA51,HB51,HC51,HA61,HB61,HC61,HA52,"
"HB52,HC52,HD52,HA62,HB62,HC62,HD62"
)
return [{'ident': a, 'name': 'Fake - %s' % a, 'local_code': a} for a in airports.split(',')]
def list_routes_mock():
"mock the routes response"
routes = [
# easy questions - 100 miles difference
{'route_csv': 'EA100,EB100', 'segment_count': 1, 'total_miles': 100},
{'route_csv': 'EA200,EB200', 'segment_count': 1, 'total_miles': 200},
{'route_csv': 'EA101,EB101,EC101', 'segment_count': 2, 'total_miles': 101},
{'route_csv': 'EA201,EB201,EC201', 'segment_count': 2, 'total_miles': 201},
{'route_csv': 'EA102,EB102,EC102,ED102', 'segment_count': 3, 'total_miles': 102},
{'route_csv': 'EA202,EB202,EC202,ED102', 'segment_count': 3, 'total_miles': 202},
# medium questions - 50 miles difference
{'route_csv': 'MA50,MB50', 'segment_count': 1, 'total_miles': 1050},
{'route_csv': 'MA100,MB100', 'segment_count': 1, 'total_miles': 1100},
{'route_csv': 'MA51,MB51,MC51', 'segment_count': 2, 'total_miles': 1051},
{'route_csv': 'MA101,MB101,MC101', 'segment_count': 2, 'total_miles': 1101},
{'route_csv': 'MA52,MB52,MC52,MD52', 'segment_count': 3, 'total_miles': 1052},
{'route_csv': 'MA102,MB102,MC102,MD102', 'segment_count': 3, 'total_miles': 1102},
# hard questions - 10 miles difference
{'route_csv': 'HA50,HB50', 'segment_count': 1, 'total_miles': 2010},
{'route_csv': 'HA60,HB60', 'segment_count': 1, 'total_miles': 2020},
{'route_csv': 'HA51,HB51,HC51', 'segment_count': 2, 'total_miles': 2011},
{'route_csv': 'HA61,HB61,HC61', 'segment_count': 2, 'total_miles': 2021},
{'route_csv': 'HA52,HB52,HC52,HD52', 'segment_count': 3, 'total_miles': 2012},
{'route_csv': 'HA62,HB62,HC62,HD62', 'segment_count': 3, 'total_miles': 2022}
]
return routes
with unittest.mock.patch('database.list_airports', list_airports_mock), \
unittest.mock.patch('database.list_routes', list_routes_mock):
from application import application
class ApplicationTestCase(unittest.TestCase):
"All the Application tests"
def setUp(self):
application.testing = True
self.app = application.test_client()
def test_home(self):
"Home route test"
response = self.app.get('/')
self.assertEqual(response.status_code, 200)
def test_get_challenge(self):
"Challenge test"
response = self.app.get('/api/v1.0/get_challenge')
challenge = json.loads(response.data)
self.assertEqual(len(challenge['easy']), 3)
self.assertEqual(len(challenge['medium']), 3)
self.assertEqual(len(challenge['hard']), 3)
def test_get_route_miles(self):
"Get miles test"
response = self.app.post('/api/v1.0/get_route_miles',
data="""{
"1": { "Route": "EA100,EB100" },
"2": { "Route": "EA200,EB200" }
}""",
content_type='application/json')
miles = json.loads(response.data)
self.assertEqual(miles["1"]["Miles"], 100)
self.assertEqual(miles["2"]["Miles"], 200)
def test_get_route_miles_bad_route(self):
"Validation error test"
with self.assertRaises(ValueError):
self.app.post('/api/v1.0/get_route_miles',
data="""{
"1": { "Route": "DOESNT-EXIST" },
"2": { "Route": "DOESNT-EXIST" }
}""",
content_type='application/json')
| StarcoderdataPython |
1792615 | from .allauth import *
from .drf import *
from .jwt import *
from .rest_auth import *
from .cors import *
| StarcoderdataPython |
3238876 | <gh_stars>0
#!/usr/bin/env python
from distutils.core import setup
setup(name='mysql2tsv',
version='0.01',
packages=['mysql2tsv'],
package_dir={'mysql2tsv': 'mysql2tsv'}
)
| StarcoderdataPython |
1725637 | <filename>react_game/employeelistBackend/urls.py
from django.urls import path
| StarcoderdataPython |
113868 | # get china stock symbols
from tqdm import tqdm
from time import sleep
from random import randint
from bs4 import BeautifulSoup
from requests import Request
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from webdriver_manager.chrome import ChromeDriverManager
class Spider:
def __init__(self, login_url='https://finance.yahoo.com/most-active'):
print('init webdriver...')
option = None
option = webdriver.ChromeOptions()
self.driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=option)
self.driver.get(login_url)
print('webdriver initiated!')
# после открытия окна хрома нужно мануально проставить рынок China!
spider = Spider()
print('Выбрали China в автоматически открывшемся окне? Yes/No')
ans = input()
if ans.lower() != 'yes':
raise 'Try Again'
else:
print('You can run next cell!')
payload=pd.read_html(spider.driver.page_source)
table_0 = payload[0]
df = table_0
for i in tqdm(range(100)):
spider.driver.find_elements_by_xpath('//*[@id="scr-res-table"]/div[2]/button[3]')[0].click()
payload=pd.read_html(spider.driver.page_source)
table_0 = payload[0]
tmp_df = table_0
df = df.append(tmp_df)
time.sleep(randint(2,7) / 1.32)
df.to_excel('data/china_stocks.xlsx')
| StarcoderdataPython |
1606314 |
'''
###############################################################################
"MajoranaNanowire" Python3 Module
v 1.0 (2020)
Created by <NAME> (2018)
###############################################################################
"Function" submodule
This sub-package contains some functions required for the "Hamiltonian"
sub-package.
###############################################################################
'''
#%%############################################################################
######################## Required Packages ############################
###############################################################################
import numpy as np
import scipy.sparse
import scipy.sparse.linalg
import scipy.linalg
from scipy import constants
from MajoranaNanowires.third_functions import pfaffian as pf
#%% ############################# Functions
#%%
def FermiDirac(E,kT,mu=0):
"""
Computes the Fermi-Dirac distribution.
Parameters
----------
E: scalar or arr
Energies.
kT: scalar
Temperature (in units of energy).
mu: scalar or arr
Fermi energy.
Returns
-------
result: scalar or arr
Fermi-Dirac distribution for the given energies.
"""
np.seterr(over='ignore')
np.seterr(divide='ignore')
return (1/(1+np.exp((E-mu)/kT)))
#%%
def density_TF(phi,kT=0,E_F=0,material='InAs',band='conduction',Vz=0):
"""
Computes the charge density of a 3D (free) electron gas in the Thomas-Fermi
approximation.
Parameters
----------
phi: scalar or arr
Electrostatic energy.
kT: scalar
Temperature (in units of energy).
E_F: scalar or arr
Fermi energy.
material: str or dic
Material for which is evaluated. For a general material,
'material' is a dictionary with arguments m_eff (conduction
effective mass), m_eff_hh (heavy hole effective mass), m_eff_lh
(light hole effective mass), and E_gap (semiconductor gap). These
parameters are already saved in this function for InAs and InSb,
which can be chosen by choosing material='InAs' or 'InSb',
resprectively.
band: str
Whether to include 'conduction', 'valence' or 'both' bands in the
calculations.
Vz: scalar
Zeeman splitting.
Returns
-------
den: scalar or arr
Charge density in the Thomas-Fermi approximation for the given
electrostatic energies.
"""
np.seterr(invalid='ignore')
if material=='InAs':
m_eff=0.023
m_eff_hh=0.41
m_eff_lh=0.026
E_gap=418
elif material=='InSb':
m_eff=0.015
m_eff_hh=0.43
m_eff_lh=0.015
E_gap=170
else:
if 'E_gap' in material:
material['m_eff'], material['m_eff_hh'], material['m_eff_lh'], material['E_gap'] = m_eff, m_eff_hh, m_eff_lh, E_gap
else:
material['m_eff'] = m_eff
if band=='conduction':
if Vz==0:
den_e=-1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F)*1e-3*constants.e*FermiDirac(-phi-E_F,kT))/constants.hbar)**3*1e-27
den=np.nan_to_num(den_e,0)
else:
den_e=-1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F+Vz)*1e-3*constants.e*FermiDirac(-phi-E_F-Vz,kT))/constants.hbar)**3*1e-27
den_e=den_e-1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F-Vz)*1e-3*constants.e*FermiDirac(-phi-E_F+Vz,kT))/constants.hbar)**3*1e-27
den=np.nan_to_num(den_e,0)
elif band=='valence':
if Vz==0:
den_hh=1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap,kT))/constants.hbar)**3*1e-27
den_lh=1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap,kT))/constants.hbar)**3*1e-27
den=np.nan_to_num(den_hh+den_lh,0)
else:
den_hh=1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F-Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap+Vz,kT))/constants.hbar)**3*1e-27
den_hh=den_hh+1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F+Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap-Vz,kT))/constants.hbar)**3*1e-27
den_lh=1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F-Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap+Vz,kT))/constants.hbar)**3*1e-27
den_lh=den_lh+1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F+Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap-Vz,kT))/constants.hbar)**3*1e-27
den=np.nan_to_num(den_hh+den_lh,0)
elif band=='both':
if Vz==0:
den_e=-1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F)*1e-3*constants.e*FermiDirac(-phi-E_F,kT))/constants.hbar)**3*1e-27
den_e=np.nan_to_num(den_e,0)
den_hh=1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap,kT))/constants.hbar)**3*1e-27
den_lh=1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap,kT))/constants.hbar)**3*1e-27
den_h=np.nan_to_num(den_hh+den_lh,0)
den=den_e+den_h
else:
den_e=-1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F+Vz)*1e-3*constants.e*FermiDirac(-phi-E_F-Vz,kT))/constants.hbar)**3*1e-27
den_e=den_e-1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F-Vz)*1e-3*constants.e*FermiDirac(-phi-E_F+Vz,kT))/constants.hbar)**3*1e-27
den_e=np.nan_to_num(den_e,0)
den_hh=1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F-Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap+Vz,kT))/constants.hbar)**3*1e-27
den_hh=den_hh+1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F+Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap-Vz,kT))/constants.hbar)**3*1e-27
den_lh=1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F-Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap+Vz,kT))/constants.hbar)**3*1e-27
den_lh=den_lh+1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F+Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap-Vz,kT))/constants.hbar)**3*1e-27
den_h=np.nan_to_num(den_hh+den_lh,0)
den=den_e+den_h
return (den)
#%% ############################# Array manipulation
#%%
def order_eig(E,U=0,sparse='yes',BdG='yes'):
"""
Order the eigenfunctions from smaller to larger. If BdG==yes and
sparse==yes, it also ensures that there are the same number of positive
eigenvalues than negative.
Parameters
----------
E: arr
Eigenvalues.
U: arr
Eigenvectors.
sparse: {'yes','no'}
Whether the eigenspectrum has been computed from a sparse matrix.
BdG: {'yes','no'}
Whether the eigenspectrum must have BdG symmetry or not.
Returns
-------
E, U: arrs
Eigenspectrum ordered from smaller to larger eigenvalues.
"""
n_eig=len(E)
if np.isscalar(U):
if BdG=='yes':
if sparse=='yes':
idx = np.argsort(E)
E = E[idx]
if (np.abs(E[0]+E[n_eig-1])>0.00001)and(np.sign(E[0]+E[n_eig-1])==1):
E[n_eig-1]=-E[n_eig-2]
elif (np.abs(E[0]+E[n_eig-1])>0.00001)and(np.sign(E[0]+E[n_eig-1])==-1):
E[0]=-E[1]
idx = np.argsort(E)
return (idx)
else:
if BdG=='yes':
if sparse=='yes':
idx = np.argsort(E)
E = E[idx]
U = U[:,idx]
if (np.abs(E[0]+E[n_eig-1])>0.00001)and(np.sign(E[0]+E[n_eig-1])==1):
E[n_eig-1]=-E[n_eig-2]
elif (np.abs(E[0]+E[n_eig-1])>0.00001)and(np.sign(E[0]+E[n_eig-1])==-1):
E[0]=-E[1]
idx = np.argsort(E)
E = E[idx]
U = U[:,idx]
return (E),(U)
#%%
def length(vec):
"""
Length of a given vector. If vec is an scalar, its length is 1.
Parameters
----------
vec: scalar or arr
Input vector
Returns
-------
length: int
Length of vec. If vec is an scalar, its length is 1.
"""
if np.ndim(vec)==0:
length=1
else:
length=len(vec)
return length
#%%
def diagonal(N,k=0,init=0,step=1):
"""
Indices of some diagonal of a given marix. It is more efficient than its
numpy counterpart.
Parameters
----------
N: int
Length of the diagonal (number of elements).
k: int
Offset of the off-diagonal. k=0 is the main diagonal, k>0 is a
diagonal in the upper-part of the Hamiltonian, and k<0 in the
lower one.
init: int
The starting element of the diagonal.
step: int
The step between elements in the diagonal.
Returns
-------
indices: tuple of arr
Indices of the diagonal. The first element of the tuple are the
row elements, and the second one are the column ones.
"""
assert np.isscalar(k), 'The offset k must be a scalar'
if k==0:
indices=(np.arange(init,N,step=step),np.arange(init,N,step=step))
elif k>0:
indices=(np.arange(init,N-k,step=step),np.arange(init,N-k,step=step)+k)
elif k<0:
indices=(np.arange(init,N+k,step=step)-k,np.arange(init,N+k,step=step))
return(indices)
#%%
def concatenate(arg):
"""
Concatenate a list of arrays.
Parameters
----------
arg: tuple or list of arr
List of arrays to be concatenated.
Returns
-------
con: arr or list
Array or list of the concatenated list.
"""
if isinstance(arg[0],tuple) and len(arg[0])==2:
index_1, index_2 = np.array([]), np.array([])
for i in range(len(arg)):
index_1 = np.append(index_1,arg[i][0])
index_2 = np.append(index_2,arg[i][1])
indices=(index_1,index_2)
else:
indices=np.concatenate(arg)
return(indices)
#%%
def between(arg, interval):
"""
Computes whether a given number is between a given interval or not.
Parameters
----------
arg: scalar
Number to be evaluated.
interval: tuple
Interval in which perform the evaluation.
Returns
-------
result: bool
If arg is between interval, result=True, and result=False in other
case.
"""
if arg>=interval[0] and arg<=interval[1]:
result=True
else:
result=False
return(result)
#%%
def arg_isclose(vec,val):
"""
Find the index of a given vector that corresponds to the element of the
array "vec" which is closest to to an specific value "val".
Parameters
----------
vec: arr
Array in which it is desired to find the closest element.
val: scalar
Closest value.
Returns
-------
result: int
Index of the element of vec closest to val.
"""
arg=np.argmin(np.abs(vec-val))
return(arg)
#%% ############################# Constructors or extractors
#%%
def build_mesh(N,L,mesh_type='regular',fact=0.5,asym=1):
"""
Build a 2D inhomogeneous rectangular mesh.
Parameters
----------
N: arr
Number of sites in each direction.
L: arr
Length en each direction.
mesh_type: str
Whether to build a 'regular' mesh, or an inhomogeneous one with a
discretization given by a 'geometric' distribution, an 'exponential'
separation, or a 'random' one.
fact: scalar
Factor which regulates the separations between sites.
asym: scalar
The asymmetry between the factors applied for the x and y direction.
Returns
-------
x, y: mesh
Mesh in the x and y directions.
dis: mesh
Mesh with the discretization in each point.
"""
if mesh_type=='regular':
x, y = np.linspace(-L[1]/2,L[1]/2,N[0]), np.linspace(-L[0]/2,L[0]/2,N[1])
dis=np.array([np.abs(x[1]-x[0]),np.abs(y[1]-y[0])])
x,y=np.meshgrid(x,y,indexing='ij')
return (x,y,dis)
elif mesh_type=='geometric':
xm,ym=np.zeros(N), np.zeros(N)
dis_m=np.array([np.zeros(N),np.zeros(N)])
for i in range(N[0]):
for j in range(N[1]):
xm[i,j]=(L[0]/2*fact**np.abs(i-int((N[0]-1)/2))-L[0]/2)*np.sign(i-int((N[0]-1)/2))*(L[0]/(L[0]/2*fact**np.abs(0-int((N[0]-1)/2))-L[0]/2)/2)
ym[i,j]=(L[1]/2*fact**np.abs(j-int((N[1]-1)/2))-L[1]/2)*np.sign(j-int((N[1]-1)/2))*(L[1]/(L[1]/2*fact**np.abs(0-int((N[1]-1)/2))-L[1]/2)/2)
for i in range(N[0]):
for j in range(N[1]):
if not(j==0 or j==N[1]-1):
dis_m[1,i,j]=np.abs(ym[i,j+1]-ym[i,j])/2+np.abs(ym[i,j-1]-ym[i,j])/2
if not(i==0 or i==N[0]-1):
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i-1,j])/2+np.abs(xm[i,j]-xm[i+1,j])/2
if i==0:
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i+1,j])
elif i==N[0]-1:
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i-1,j])
if j==0:
dis_m[1,i,j]=np.abs(ym[i,j]-ym[i,j+1])
elif j==N[1]-1:
dis_m[1,i,j]=np.abs(ym[i,j]-ym[i,j-1])
return (xm,ym,dis_m)
elif mesh_type=='exponential':
np.seterr(all='ignore')
xm,ym=np.zeros(N), np.zeros(N)
dis_m=np.array([np.zeros(N),np.zeros(N)])
for i in range(N[0]):
for j in range(N[1]):
xm[i,j]=(1-np.exp(-np.abs(i-int((N[0]-1)/2))*fact))*np.sign(i-int((N[0]-1)/2))*(1-np.exp(-np.abs(N[0]-int((N[0]-1)/2))*fact))**(-1)*L[0]/2
ym[i,j]=(1-np.exp(-np.abs(j-int((N[1]-1)/2))*fact/asym))*np.sign(j-int((N[1]-1)/2))*(1-np.exp(-np.abs(N[1]-int((N[1]-1)/2))*fact/asym))**(-1)*L[1]/2
for i in range(N[0]):
for j in range(N[1]):
if not(j==0 or j==N[1]-1):
dis_m[1,i,j]=np.abs(ym[i,j+1]-ym[i,j])/2+np.abs(ym[i,j-1]-ym[i,j])/2
if not(i==0 or i==N[0]-1):
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i-1,j])/2+np.abs(xm[i,j]-xm[i+1,j])/2
if i==0:
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i+1,j])
elif i==N[0]-1:
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i-1,j])
if j==0:
dis_m[1,i,j]=np.abs(ym[i,j]-ym[i,j+1])
elif j==N[1]-1:
dis_m[1,i,j]=np.abs(ym[i,j]-ym[i,j-1])
return (xm,ym,dis_m)
elif mesh_type=='random':
x,y,dis=build_mesh(N,L,mesh_type='regular')
xm,ym=np.zeros(N), np.zeros(N)
dis_m=np.array([np.zeros(N),np.zeros(N)])
for i in range(N[0]):
for j in range(N[1]):
xp, yp = x[:,0]+(np.random.rand(N[0])-0.5)*dis[0]*fact, y[0,:]+(np.random.rand(N[0])-0.5)*dis[1]*fact
xm[i,j],ym[i,j]=xp[i],yp[j]
for i in range(N[0]):
for j in range(N[1]):
if not(j==0 or j==N[1]-1):
dis_m[1,i,j]=np.abs(ym[i,j+1]-ym[i,j])/2+np.abs(ym[i,j-1]-ym[i,j])/2
if not(i==0 or i==N[0]-1):
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i-1,j])/2+np.abs(xm[i,j]-xm[i+1,j])/2
if i==0:
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i+1,j])
elif i==N[0]-1:
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i-1,j])
if j==0:
dis_m[1,i,j]=np.abs(ym[i,j]-ym[i,j+1])
elif j==N[1]-1:
dis_m[1,i,j]=np.abs(ym[i,j]-ym[i,j-1])
return (xm,ym,dis_m)
#%%
def get_potential(phi_in,x,y,z,symmetry='none',mesh_type='none'):
"""
Obtain the potential from a function for a given sites.
Parameters
----------
phi_in: fun
Fenics function of the electrostatic potential.
x,y,z: arr
Points in which evaluate the potential.
symmetry: {'none','x','y','z','full-shell'}
Imposed symmetry of the potential.
mesh_type:___
______________________________
Returns
-------
phi_out: arr
Electrostatic potential in the sites given by x,y,z.
"""
phi_out=np.zeros((len(x),len(y),len(z)))
if symmetry=='none':
for i in range(len(x)):
for j in range(len(y)):
for k in range(len(z)):
phi_out[i,j,k]=phi_in(x[i],y[j],z[k])
elif symmetry=='y':
if mesh_type=='none':
for i in range(len(x)):
for j in range(int((len(y)-1)/2)+1):
for k in range(len(z)):
phi_out[i,j,k]=phi_in(x[i],y[j],z[k])
phi_out[i,len(y)-j-1,k]=phi_out[i,j,k]
elif mesh_type=='yz-mesh':
for i in range(len(x)):
for j in range(int((len(y[:,0])-1)/2)+1):
for k in range(len(z[0,:])):
phi_out[i,j,k]=phi_in(x[i],y[j,k],z[j,k])
phi_out[i,len(y[:,0])-j-1,k]=phi_out[i,j,k]
elif symmetry=='yz':
for i in range(len(x)):
for j in range(int((len(y)-1)/2)+1):
for k in range(int((len(z)-1)/2)+1):
phi_out[i,j,k]=phi_in(x[i],y[j],z[k])
phi_out[i,len(y)-j-1,k]=phi_out[i,j,k]
phi_out[i,j,len(z)-k-1]=phi_out[i,j,k]
phi_out[i,len(y)-j-1,len(z)-k-1]=phi_out[i,j,k]
elif symmetry=='xy':
for i in range(int((len(x)-1)/2)+1):
for j in range(int((len(y)-1)/2)+1):
for k in range(len(z)):
phi_out[i,j,k]=phi_in(x[i],y[j],z[k])
phi_out[i,len(y)-j-1,k]=phi_out[i,j,k]
phi_out[len(x)-i-1,j,k]=phi_out[i,j,k]
phi_out[len(x)-i-1,len(y)-j-1,k]=phi_out[i,j,k]
elif symmetry=='x':
for i in range(int((len(x)-1)/2)+1):
for j in range(len(y)):
for k in range(len(z)):
phi_out[i,j,k]=phi_in(x[i],y[j],z[k])
phi_out[len(x)-i-1,j,k]=phi_out[i,j,k]
elif symmetry=='full-shell':
for i in range(len(x)):
for j in range(int((len(y)-1)/2)+1):
for k in range(len(z)):
if (z[k]>=0) and (y[j]<=z[k]/np.tan(np.pi/3)) and (y[j]>=-z[k]/np.tan(np.pi/3)):
phi_out[i,j,k]=phi_in(x[i],y[j],z[k])
phi_out[i,len(y)-j-1,k]=phi_out[i,j,k]
for l in range(1,4):
phi_out[i,int(round((j-25)*np.cos(np.pi/3*l)-(k-25)*np.sin(np.pi/3*l)))+25,int(round((j-25)*np.sin(np.pi/3*l)+(k-25)*np.cos(np.pi/3*l)))+25]=phi_out[i,j,k]
phi_out[i,int(round((len(y)-j-1-25)*np.cos(np.pi/3*l)-(k-25)*np.sin(np.pi/3*l)))+25,int(round((len(y)-j-1-25)*np.sin(np.pi/3*l)+(k-25)*np.cos(np.pi/3*l)))+25]=phi_out[i,j,k]
for j in range(int((len(y)-1)/2)+1):
for k in range(len(z)):
if phi_out[i,j,k]==0:
phi_out[i,j,k]=phi_out[i,int(j+1),k]
for j in range(int((len(y)-1)/2)+1):
for k in range(len(z)):
if phi_out[i,j,k]==0:
phi_out[i,j,k]=phi_out[i,int(j+2),k]
phi_out[i,len(y)-j-1,k]=phi_out[i,j,k]
return (phi_out)
#%%
def get_ElectricField(phi,x,y,z):
"""
Obtain the electric field of a given electrostatic potential.
Parameters
----------
phi: arr
Electrostatic potential.
x,y,z: arr
Points in which it is evaluated the potential.
Returns
-------
E: arr
Electric field of phi. Each element E[i] is the electric field in
each direction.
"""
dis=np.array([np.abs(x[1]-x[0]),np.abs(y[1]-y[0]),np.abs(z[1]-z[0])])
if np.ndim(phi)==3:
Ex, Ey, Ez = np.gradient(phi,dis[0],dis[1],dis[2])
return (np.array([Ex,Ey,Ez]))
elif np.ndim(phi)==2:
Ey, Ez = np.gradient(phi,dis[1],dis[2])
return (np.array([Ey,Ez]))
elif np.ndim(phi)==1:
Ex = np.gradient(phi,dis)
return (Ex)
#%% ############################# Modifiers
#%%
def mask_hexagonal(fun_in,y,z,x=0,change=np.nan,mesh_type='regular'):
"""
Hexagonal mask. This function change the values for those points of fun_in
which are outside the hexagonal section.
Parameters
----------
fun_in: arr
Function to be masked.
y,z: arr
Points of the section in which it is evaluated the function.
x: arr
Points of the length in which it is evaluated the function. If x=0,
then it is only evaluated in 2D.
change: value
Value to which change those points outside of the hexagonal section.
Returns
-------
fun_out: arr
Masked function.
"""
if np.isscalar(x):
if mesh_type=='regular':
Ny, Nz = len(y), len(z)
Ly, Lz = y[Ny-1]*2, z[Nz-1]*2
a0=Ly/2
b0=a0*np.sin(np.pi/3)
fun_out=np.zeros((len(y),len(z)))
for j in range(Ny):
for k in range(Nz):
if not(between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0))):
fun_out[j,k]=change
else:
fun_out[j,k]=fun_in[j,k]
else:
Ny, Nz = len(y[:,0]), len(z[0,:])
Ly, Lz = y[Ny-1,0]*2, z[0,Nz-1]*2
a0=Ly/2
b0=a0*np.sin(np.pi/3)
fun_out=np.zeros((Ny,Nz))
for j in range(Ny):
for k in range(Nz):
if not(between(z[j,k], (-b0,b0)) and between(z[j,k],(2*b0/a0*y[j,k]-2*b0,-2*b0/a0*y[j,k]+2*b0)) and between(z[j,k],(-2*b0/a0*y[j,k]-2*b0,2*b0/a0*y[j,k]+2*b0))):
fun_out[j,k]=change
else:
fun_out[j,k]=fun_in[j,k]
if change=='masked':
fun_out=np.ma.array(fun_out, mask=np.isnan(fun_out))
else:
Ny, Nz = len(y), len(z)
Ly, Lz = y[Ny-1]*2, z[Nz-1]*2
a0=Ly/2
b0=a0*np.sin(np.pi/3)
fun_out=np.zeros((len(x),len(y),len(z)))
for j in range(Ny):
for k in range(Nz):
if not(between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0))):
fun_out[:,j,k]=np.ones(len(x))*change
else:
fun_out[:,j,k]=fun_in[:,j,k]
if change=='masked':
fun_out=np.ma.array(fun_out, mask=np.isnan(fun_out))
return (fun_out)
#%%
def mask_wire(fun_in,N,dis,change=np.nan,include=np.array(['wire']),W_w=0,W_l1=0,W_l2=0,faces_l1=np.array([]),faces_l2=np.array([])):
"""
Mask for wires. This function change the values for those points of fun_in
which are outside the hexagonal section and/or layers surrounding the wire.
Parameters
----------
fun_in: arr
Function to be masked.
N: arr
Number of sites in each direction.
dis: arr
Discretization in each direction.
change: value
Value to which change those points outside of the hexagonal section.
include: arr
Whether to include the wire ('wire') and/or some layers ('layer_1
and/or 'layer_2').
W_w: float
Width of the nanowire. If W_w=0, then the width is taken as N*dis.
W_l1: float
Width of the first layer surrounding the wire. W_l1=0 means that
there is no layer.
W_l2: float
Width of the first layer surrounding the wire. W_l1=0 means that
there is no (second) layer.
faces_l1: arr
Facets that the first layer covers to the wire. Each facet is
labeled with a number from 1 to 6 (the upper one is 1, and the rest
are numbered clockwise). Each element of the array denotes with a
string (e.g. np.array(['1','2'])) if such facet is covered.
faces_l2: arr
Same for the second layer.
Returns
-------
fun_out: arr
Masked function.
"""
if len(N)==3:
Nx, Ny, Nz = N
dis_x, dis_y, dis_z = dis
fun_in=fun_in[0]
elif len(N)==2:
Ny, Nz = N
dis_y, dis_z = dis
y, z= np.linspace(-(Ny-1)*dis_y/2,(Ny-1)*dis_y/2,Ny), np.linspace(-(Nz-1)*dis_z/2,(Nz-1)*dis_z/2,Nz)
if np.isscalar(W_w):
if W_w==0:
W_w=Ny*dis_y
a0=W_w/2
b0=a0*np.sin(np.pi/3)
elif not(np.isscalar(W_w)):
a0=Ny*dis_y/2
b0=Nz*dis_z/2*np.sin(np.pi/3)
if faces_l1.size==0:
faces_l1=np.array(['0'])
if faces_l2.size==0:
faces_l2=np.array(['0'])
fun_out=np.zeros((Ny,Nz))
for j in range(Ny):
for k in range(Nz):
if (include=='wire').any():
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0))):
fun_out[j,k]=fun_in[j,k]
else:
fun_out[j,k]=change
if (include=='layer_1').any():
if (faces_l1=='1').any() and ((between(y[j], (-a0/2,a0/2)) and between(z[k], (b0,b0+W_l1)))):
fun_out[j,k]=fun_in[j,k]
elif (faces_l1=='2').any() and ((between(z[k], (-2*b0/a0*y[j]+2*b0,2*b0/a0*y[j]+W_l1)) and between(z[k], (2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0+W_l1)))):
fun_out[j,k]=fun_in[j,k]
elif (faces_l1=='6').any() and ((between(z[k], (2*b0/a0*y[j]+2*b0,-2*b0/a0*y[j]+W_l1)) and between(z[k], (-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0+W_l1)))):
fun_out[j,k]=fun_in[j,k]
elif (faces_l1=='3').any() and ((between(z[k], (-b0,2*b0/a0*y[j]-2*b0)) and between(z[k], (2*b0/a0*y[j]-2*b0-W_l1,-2*b0/a0*y[j]+2*b0+W_l1)))):
fun_out[j,k]=fun_in[j,k]
elif (faces_l1=='5').any() and ((between(z[k], (-b0,-2*b0/a0*y[j]-2*b0)) and between(z[k], (-2*b0/a0*y[j]-2*b0-W_l1,2*b0/a0*y[j]+2*b0+W_l1)))):
fun_out[j,k]=fun_in[j,k]
elif (faces_l1=='4').any() and ((between(y[j], (-a0/2-W_l1/2,a0/2+W_l1/2)) and between(z[k], (-b0-W_l1,-b0)))):
fun_out[j,k]=fun_in[j,k]
if (include=='layer_2').any():
if (faces_l2=='1').any():
if (faces_l1=='1').any() and ((between(y[j], (-a0/2,a0/2)) and between(z[k], (b0+W_l1,b0+W_l1+W_l2)))):
fun_out[j,k]=fun_in[j,k]
elif not(faces_l1=='1').any() and ((between(y[j], (-a0/2,a0/2)) and between(z[k], (b0,b0+W_l2)))):
fun_out[j,k]=fun_in[j,k]
if (faces_l2=='2').any():
if (faces_l1=='2').any() and ((between(z[k], (-2*b0/a0*y[j]+2*b0+W_l1,2*b0/a0*y[j]+W_l1+W_l2)) and between(z[k], (2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0+W_l1+W_l2)))):
fun_out[j,k]=fun_in[j,k]
elif not(faces_l1=='2').any() and ((between(z[k], (-2*b0/a0*y[j]+2*b0,2*b0/a0*y[j]+W_l2)) and between(z[k], (2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0+W_l2)))):
fun_out[j,k]=fun_in[j,k]
if (faces_l2=='6').any():
if (faces_l1=='6').any() and ((between(z[k], (2*b0/a0*y[j]+2*b0+W_l1,-2*b0/a0*y[j]+W_l1+W_l2)) and between(z[k], (-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0+W_l1+W_l2)))):
fun_out[j,k]=fun_in[j,k]
elif not(faces_l1=='6').any() and ((between(z[k], (2*b0/a0*y[j]+2*b0,-2*b0/a0*y[j]+W_l2)) and between(z[k], (-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0+W_l2)))):
fun_out[j,k]=fun_in[j,k]
if (faces_l2=='3').any():
if (faces_l1=='3').any() and ((between(z[k], (-b0,2*b0/a0*y[j]-2*b0-W_l1)) and between(z[k], (2*b0/a0*y[j]-2*b0-W_l1-W_l2,-2*b0/a0*y[j]+2*b0+W_l1+W_l2)))):
fun_out[j,k]=fun_in[j,k]
elif not(faces_l1=='3').any() and ((between(z[k], (-b0,2*b0/a0*y[j]-2*b0)) and between(z[k], (2*b0/a0*y[j]-2*b0-W_l2,-2*b0/a0*y[j]+2*b0+W_l2)))):
fun_out[j,k]=fun_in[j,k]
if (faces_l2=='5').any():
if (faces_l1=='5').any() and ((between(z[k], (-b0,-2*b0/a0*y[j]-2*b0-W_l1)) and between(z[k], (-2*b0/a0*y[j]-2*b0-W_l1-W_l2,2*b0/a0*y[j]+2*b0+W_l1+W_l2)))):
fun_out[j,k]=fun_in[j,k]
elif not(faces_l1=='5').any() and ((between(z[k], (-b0,-2*b0/a0*y[j]-2*b0)) and between(z[k], (-2*b0/a0*y[j]-2*b0-W_l2,2*b0/a0*y[j]+2*b0+W_l2)))):
fun_out[j,k]=fun_in[j,k]
if (faces_l2=='4').any():
if (faces_l1=='4').any() and ((between(y[j], (-a0/2-W_l1/2-W_l2/2,a0/2+W_l1/2+W_l2/2)) and between(z[k], (-b0-W_l1-W_l2,-b0)))):
fun_out[j,k]=fun_in[j,k]
elif not(faces_l1=='4').any() and ((between(y[j], (-a0/2-W_l2/2,a0/2+W_l2/2)) and between(z[k], (-b0-W_l2,-b0)))):
fun_out[j,k]=fun_in[j,k]
if change=='masked':
fun_out=np.ma.array(fun_out, mask=np.isnan(fun_out))
if len(N)==3:
fun_out=np.tile(fun_out,(Nx,1,1))
return (fun_out)
#%%
def interface(N,dis,width,faces,a0,b0):
"""
Find points close to the some nanowire facet (assuming an hexagonal cross-
section nanowire).
Parameters
----------
N: arr
Number of sites in each direction.
dis: arr
Discretization in each direction.
witdh: float
Width of the "close region" to the facet.
faces: arr
Which facets include in the search. Each facet is labeled with a
number from 1 to 6 (the upper one is 1, and the rest are numbered
clockwise). Each element of the array denotes with a string (e.g.
np.array(['1','2'])) if such facet is covered.
Returns
-------
sites: arr
Array with the
"""
L=np.array([(N[0]-1)*dis[0], (N[1]-1)*dis[1], (N[2]-1)*dis[2]])
x, y =np.linspace(-L[1]/2,L[1]/2,N[1]), np.linspace(-L[2]/2,L[2]/2,N[2])
fun_out=np.zeros(N[1::],dtype=int)
for i in range(N[1]):
for j in range(N[2]):
if (faces=='1').any() and ((between(x[i], (-a0/2,a0/2)) and between(y[j], (b0-width,b0)) and between(y[j], (-2*b0/a0*x[i],b0))and between(y[j], (2*b0/a0*x[i],b0)))):
fun_out[i,j]=1
elif (faces=='6').any() and ((between(y[j], (-2*b0/a0*x[i]+2*b0-width*b0/a0*2,2*b0/a0*x[i])) and between(y[j], (2*b0/a0*x[i]-2*b0-width,-2*b0/a0*x[i]+2*b0)) and between(y[j], (0,b0)) )):
fun_out[i,j]=1
elif (faces=='2').any() and ((between(y[j], (2*b0/a0*x[i]+2*b0-width*b0/a0*2,-2*b0/a0*x[i])) and between(y[j], (-2*b0/a0*x[i]-2*b0-width,2*b0/a0*x[i]+2*b0)) and between(y[j], (0,b0)) )):
fun_out[i,j]=1
elif (faces=='5').any() and ((between(y[j], (-b0,2*b0/a0*x[i]-2*b0+width*b0/a0*2)) and between(y[j], (2*b0/a0*x[i]-2*b0,-2*b0/a0*x[i]+2*b0+width)) and between(y[j], (-b0,0)) )):
fun_out[i,j]=1
elif (faces=='3').any() and ((between(y[j], (-b0,-2*b0/a0*x[i]-2*b0+width*b0/a0*2)) and between(y[j], (-2*b0/a0*x[i]-2*b0,2*b0/a0*x[i]+2*b0+width)) and between(y[j], (-b0,0)) )):
fun_out[i,j]=1
elif (faces=='4').any() and ((between(x[i], (-a0/2,a0/2)) and between(y[j], (-b0,-b0+width)))):
fun_out[i,j]=1
fun_out_end=np.zeros(N)
for i in range(N[0]):
fun_out_end[i,:,:]=fun_out
return fun_out_end
#%%
def H_rectangular2hexagonal(H,N,dis,BdG='no',output='H',m=0,sparse='yes'):
"""
Transform a Hamiltonian of a nanwoire with rectangular cross-section to a
nanowire with an hexagonal one.
Parameters
----------
H: arr
Hamiltonian with rectangular section.
N: arr
Number of sites in each direction.
dis: arr
Discretization in each direction.
BdG: str
Whether the Hamiltonian has BdG symmetry.
m: int
Number of sites of the discretized Hamiltonian with the hexagonal
section.
output: str
Whether to return the Hamiltonian (output='H'), the number of sites
of the discretized Hamiltonian with the hexagonal section
(output='m_hex'), or the sites that are inside of the nanowire
section (output='sites').
Returns
-------
Depends on the parameter output.
"""
if len(N)==2:
N=np.array([1,N[0],N[1]])
dis=np.array([0,dis[0],dis[1]])
Nx, Ny, Nz = N[0], N[1], N[2]
Ly, Lz = dis[1]*Ny, dis[2]*Nz
y, z = np.linspace(-float(Ly)/2,float(Ly)/2,Ny), np.linspace(-float(Lz)/2,float(Lz)/2,Nz)
a0=float(Ly)/2
b0=a0*np.sin(np.pi/3)*(Lz/Ly)
l=0
if (output=='H'):
if m==0:
m=H_rectangular2hexagonal(H,N,dis,BdG=BdG,output='m_hex',m=0)
if BdG=='no':
if sparse=='yes':
H_del=scipy.sparse.dok_matrix((m,2*Nx*Ny*Nz),dtype=complex)
else:
H_del=np.zeros((m,2*Nx*Ny*Nz),dtype=complex)
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0)) ):
H_del[l,2*(k+(j+i*Ny)*Nz)]=1
H_del[l+1,2*(k+(j+i*Ny)*Nz)+1]=1
l=l+2
elif BdG=='yes':
if sparse=='yes':
H_del=scipy.sparse.dok_matrix((m,4*Nx*Ny*Nz),dtype=complex)
else:
H_del=np.zeros((m,4*Nx*Ny*Nz),dtype=complex)
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0)) ):
H_del[l,2*(k+(j+i*Ny)*Nz)]=1
H_del[l+1,2*(k+(j+i*Ny)*Nz)+1]=1
H_del[l+int(m/2),2*(k+(j+i*Ny)*Nz)+int(2*Nx*Ny*Nz)]=1
H_del[l+1+int(m/2),2*(k+(j+i*Ny)*Nz)+1+int(2*Nx*Ny*Nz)]=1
l=l+2
H=H_del.dot(H.dot(H_del.transpose()))
return (H)
elif (output=='m_hex'):
m=0
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0)) ):
m=m+1
if BdG=='no':
m=m*2
elif BdG=='yes':
m=m*4
return (m)
elif (output=='sites'):
m=0
sites=np.array([])
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0)) ):
if (between(z[k], (b0-dis[2],b0))):
sites=np.append(sites,m)
m=m+2
return (sites)
#%%
def U_rectangular2hexagonal(U_in,N,dis,BdG='no',m=0):
"""
Transform a wavefunction of a nanwoire with rectangular cross-section to a
nanowire with an hexagonal one, erasing to this end the elements of the
Hamiltonian outside the hexagonal section of the wire.
Parameters
----------
U_in: arr
Wavefunction of a nanowire with rectangular section.
N: arr
Number of sites in each direction.
dis: arr
Discretization in each direction.
BdG: str
Whether the Hamiltonian has BdG symmetry.
m: int
Number of sites of the hexagonal cross-section nanowire. It can be
computed using the function Function.H_rectangular2hexagonal.
Returns
-------
U: arr
Wavefunction of a nanowire with hexagonal section.
"""
if len(N)==2:
N=np.array([1,N[0],N[1]])
dis=np.array([0,dis[0],dis[1]])
if scipy.sparse.issparse(U_in):
U_in=U_in.todense()
Nx, Ny, Nz = N[0], N[1], N[2]
Ly, Lz = dis[1]*Ny, dis[2]*Nz
y, z = np.linspace(-float(Ly)/2,float(Ly)/2,Ny), np.linspace(-float(Lz)/2,float(Lz)/2,Nz)
a0=float(Ly)/2
b0=a0*np.sin(np.pi/3)*(Lz/Ly)
n_eig=np.shape(U_in)[1]
l=0
if BdG=='no':
U=np.zeros((m,n_eig),dtype=complex)
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0)) ):
U[l,:], U[l+1,:] = U_in[2*(k+(j+i*Ny)*Nz),:], U_in[2*(k+(j+i*Ny)*Nz)+1,:]
l=l+2
elif BdG=='yes':
U=np.zeros((m,n_eig),dtype=complex)
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0)) ):
U[l,:], U[l+1,:] = U_in[2*(k+(j+i*Ny)*Nz),:], U_in[2*(k+(j+i*Ny)*Nz)+1,:]
U[l+int(m/2),:], U[l+1+int(m/2),:] = U_in[2*(k+(j+i*Ny)*Nz)+int(2*Nx*Ny*Nz),:], U_in[2*(k+(j+i*Ny)*Nz)+1+int(2*Nx*Ny*Nz),:]
l=l+2
U=scipy.sparse.dok_matrix(U)
return (U)
#%%
def U_hexagonal2rectangular(U_in,N,dis,BdG='no',space='position'):
"""
Transform a wavefunction of a nanwoire with hexagonal cross-section to a
nanowire with an rectangular one, filling with zeros the new elements
outside the hexagonal section of the wire.
Parameters
----------
U_in: arr
Wavefunction of a nanowire with hexagonal section.
N: arr
Number of sites in each direction.
dis: arr
Discretization in each direction.
BdG: str
Whether the Hamiltonian has BdG symmetry.
space: str
Whether the wavefunction is in position space or momentum.
Returns
-------
U: arr
Wavefunction of a nanowire with rectangular section.
"""
if len(N)==2:
N=np.array([1,N[0],N[1]])
dis=np.array([0,dis[0],dis[1]])
if space=='momentum':
Nx, Ny, Nz = N[0], N[1], N[2]
m=len(U_in[:,0,0])
n_eig=len(U_in[0,:,0])
n_k=len(U_in[0,0,:])
if BdG=='no':
U_out = np.empty([2*Nx*Ny*Nz,int(n_eig),n_k],dtype=complex)
elif BdG=='yes':
U_out = np.empty([4*Nx*Ny*Nz,int(n_eig),n_k],dtype=complex)
Ly, Lz = dis[1]*Ny, dis[2]*Nz
y, z = np.linspace(-float(Ly)/2,float(Ly)/2,Ny), np.linspace(-float(Lz)/2,float(Lz)/2,Nz)
a0=float(Ly)/2
b0=a0*np.sin(np.pi/3)*(Lz/Ly)
l=0
if BdG=='no':
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0)) ):
U_out[2*(k+(j+i*Ny)*Nz),:,:]=U_in[l,:,:]
U_out[2*(k+(j+i*Ny)*Nz)+1,:,:]=U_in[l+1,:,:]
l=l+2
else:
U_out[2*(k+(j+i*Ny)*Nz),:,:]=np.zeros((n_eig,n_k))
U_out[2*(k+(j+i*Ny)*Nz)+1,:,:]=np.zeros((n_eig,n_k))
elif BdG=='yes':
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0)) ):
U_out[2*(k+(j+i*Ny)*Nz),:,:]=U_in[l,:,:]
U_out[2*(k+(j+i*Ny)*Nz)+1,:,:]=U_in[l+1,:,:]
U_out[2*(k+(j+i*Ny)*Nz)+2*Nx*Ny*Nz,:,:]=U_in[l+int(m/2),:,:]
U_out[2*(k+(j+i*Ny)*Nz)+1+2*Nx*Ny*Nz,:,:]=U_in[l+1+int(m/2),:,:]
l=l+2
else:
U_out[2*(k+(j+i*Ny)*Nz),:,:]=np.zeros((n_eig,n_k))
U_out[2*(k+(j+i*Ny)*Nz)+1,:,:]=np.zeros((n_eig,n_k))
U_out[2*(k+(j+i*Ny)*Nz)+2*Nx*Ny*Nz,:,:]=np.zeros((n_eig,n_k))
U_out[2*(k+(j+i*Ny)*Nz)+1+2*Nx*Ny*Nz,:,:]=np.zeros((n_eig,n_k))
elif space=='position':
Nx, Ny, Nz = N[0], N[1], N[2]
m=len(U_in[:,0])
n_eig=len(U_in[0,:])
if BdG=='no':
U_out = np.empty([2*Nx*Ny*Nz,int(n_eig)],dtype=complex)
elif BdG=='yes':
U_out = np.empty([4*Nx*Ny*Nz,int(n_eig)],dtype=complex)
Ly, Lz = dis[1]*Ny, dis[2]*Nz
y, z = np.linspace(-float(Ly)/2,float(Ly)/2,Ny), np.linspace(-float(Lz)/2,float(Lz)/2,Nz)
a0=float(Ly)/2
b0=a0*np.sin(np.pi/3)*(Lz/Ly)
if scipy.sparse.issparse(U_in):
U_in=U_in.todense()
l=0
if BdG=='no':
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0))):
U_out[2*(k+(j+i*Ny)*Nz),:]=U_in[l,:]
U_out[2*(k+(j+i*Ny)*Nz)+1,:]=U_in[l+1,:]
l=l+2
else:
U_out[2*(k+(j+i*Ny)*Nz),:]=np.zeros((n_eig))
U_out[2*(k+(j+i*Ny)*Nz)+1,:]=np.zeros((n_eig))
elif BdG=='yes':
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0))):
U_out[2*(k+(j+i*Ny)*Nz),:]=U_in[l,:]
U_out[2*(k+(j+i*Ny)*Nz)+1,:]=U_in[l+1,:]
U_out[2*(k+(j+i*Ny)*Nz)+2*Nx*Ny*Nz,:]=U_in[l+int(m/2),:]
U_out[2*(k+(j+i*Ny)*Nz)+1+2*Nx*Ny*Nz,:]=U_in[l+1+int(m/2),:]
l=l+2
else:
U_out[2*(k+(j+i*Ny)*Nz),:]=np.zeros((n_eig))
U_out[2*(k+(j+i*Ny)*Nz)+1,:]=np.zeros((n_eig))
U_out[2*(k+(j+i*Ny)*Nz)+2*Nx*Ny*Nz,:]=np.zeros((n_eig))
U_out[2*(k+(j+i*Ny)*Nz)+1+2*Nx*Ny*Nz,:]=np.zeros((n_eig))
return (U_out)
#%%
def H_rec2shape(H,shape,N,dis,BdG='no',output='H',m=0):
"""
Transform a Hamiltonian of a nanwoire with rectangular cross-section to a
nanowire with a different one.
Parameters
----------
H: arr
Hamiltonian with rectangular section.
shape: arr or str
Shape of the section. It can be a (Nx,Ny,Nz) or (Ny,Nz) array,
where each 0 element means that the corresponding site is not
part of the section, while 1 means it is; or it can be 'hexagonal',
what means that the section must be an hexagonal shape.
N: arr
Number of sites in each direction.
dis: arr
Discretization in each direction.
BdG: {'yes','no'}
Whether the Hamiltonian has BdG symmetry.
output: {'H','m'}
Either to return the Hamiltonian (output='H') or the number of sites
of the discretized Hamiltonian with the desired shape
(output='m').
m: int
Number of sites of the discretized Hamiltonian with the desired
shape. If m=0, m is computed.
Returns
-------
Depends on the parameter output.
"""
if len(N)==2:
N=np.array([1,N[0],N[1]])
dis=np.array([0,dis[0],dis[1]])
if np.isscalar(shape) and shape=='hexagonal':
shape=np.ones(N)
shape=mask_hexagonal(shape,np.linspace(-N[1]*dis[1]/2,N[1]*dis[1]/2,N[1]),np.linspace(-N[2]*dis[2]/2,N[2]*dis[2]/2,N[2]),x=np.linspace(0,N[0]*dis[0],N[0]),change=0)
shape=shape.flatten()
if m==0:
m=len(shape[shape==1])
if BdG=='no':
m=m*2
elif BdG=='yes':
m=m*4
if scipy.sparse.issparse(H):
sparse='yes'
else:
sparse='no'
if (output=='H'):
if BdG=='no':
if sparse=='yes':
H_del=scipy.sparse.dok_matrix((m,2*np.prod(N)),dtype=complex)
else:
H_del=np.zeros((m,2*np.prod(N)),dtype=complex)
elif BdG=='yes':
if sparse=='yes':
H_del=scipy.sparse.dok_matrix((m,4*np.prod(N)),dtype=complex)
else:
H_del=np.zeros((m,4*np.prod(N)),dtype=complex)
j=0
for i in range(np.prod(N)):
if shape[i]==1:
H_del[j,2*i],H_del[j+1,2*i+1] = 1, 1
if BdG=='yes':
H_del[j+int(m/2),2*i+2*int(np.prod(N))],H_del[j+1+int(m/2),2*i+1+2*int(np.prod(N))] = 1, 1
j+=2
H=H_del.dot(H.dot(H_del.transpose()))
return (H)
elif (output=='m'):
return (m)
#%%
def U_rec2shape(U_in,shape,N,dis,BdG='no',m=0):
"""
Transform a wavefunction of a nanwoire with rectangular cross-section to a
nanowire with a different one, erasing to this end the elements of the
wavefunction outside the section of the wire.
Parameters
----------
U_in: arr
Wavefunction of a nanowire with rectangular section.
shape: arr or str
Shape of the section. It can be a (Nx,Ny,Nz) or (Ny,Nz) array,
where each np.nan element means that the corresponding site is not
part of the section; or it can be 'hexagonal', what means that the
section must be an hexagonal shape.
N: arr
Number of sites in each direction.
dis: arr
Discretization in each direction.
BdG: str
Whether the Hamiltonian has BdG symmetry.
m: int
Number of sites of the discretized Hamiltonian with the desired
shape. If m=0, m is computed.
Returns
-------
U: arr
Wavefunction of a nanowire with hexagonal section.
"""
if len(N)==2:
N=np.array([1,N[0],N[1]])
dis=np.array([0,dis[0],dis[1]])
n_eig=np.shape(U_in)[1]
if m==0:
m=len(shape[shape==1])
if BdG=='no':
m=m*2
elif BdG=='yes':
m=m*4
if scipy.sparse.issparse(U_in):
sparse='yes'
U_in=U_in.todense()
else:
sparse='no'
if np.isscalar(shape) and shape=='hexagonal':
shape=np.ones(N)
shape=mask_hexagonal(shape,np.linspace(-N[1]*dis[1]/2,N[1]*dis[1]/2,N[1]),np.linspace(-N[2]*dis[2]/2,N[2]*dis[2]/2,N[2]),x=np.linspace(0,N[0]*dis[0],N[0]),change=0)
shape=shape.flatten()
shape=np.repeat(shape,2)
if BdG=='yes':
shape=np.tile(shape,2)
U=np.zeros((m,n_eig),dtype=complex)
U=U_in[shape==1,:]
if sparse=='yes':
U=scipy.sparse.dok_matrix(U)
return (U)
#%%
def U_shape2rec(U_in,shape,N,dis,BdG='no'):
"""
Transform a wavefunction of a nanwoire with an arbitrary cross-section to a
nanowire with an rectangular one, filling with zeros the new elements
outside the hexagonal section of the wire.
Parameters
----------
U_in: arr
Wavefunction of a nanowire with hexagonal section.
shape: arr or str
Shape of the section. It can be a (Nx,Ny,Nz) or (Ny,Nz) array,
where each np.nan element means that the corresponding site is not
part of the section; or it can be 'hexagonal', what means that the
section must be an hexagonal shape.
N: arr
Number of sites in each direction.
dis: arr
Discretization in each direction.
BdG: str
Whether the Hamiltonian has BdG symmetry.
space: str
Whether the wavefunction is in position space or momentum.
Returns
-------
U: arr
Wavefunction of a nanowire with rectangular section.
"""
if len(N)==2:
N=np.array([1,N[0],N[1]])
dis=np.array([0,dis[0],dis[1]])
n_eig=len(U_in[0,:])
if np.isscalar(shape) and shape=='hexagonal':
shape=np.ones(N)
shape=mask_hexagonal(shape,np.linspace(-N[1]*dis[1]/2,N[1]*dis[1]/2,N[1]),np.linspace(-N[2]*dis[2]/2,N[2]*dis[2]/2,N[2]),x=np.linspace(0,N[0]*dis[0],N[0]),change=0)
shape=shape.flatten()
shape=np.repeat(shape,2)
if BdG=='yes':
shape=np.tile(shape,2)
if scipy.sparse.issparse(U_in):
sparse='yes'
U_in=U_in.todense()
else:
sparse='no'
if BdG=='no':
U_out = np.zeros((2*np.prod(N),int(n_eig)),dtype=complex)
elif BdG=='yes':
U_out = np.zeros((4*np.prod(N),int(n_eig)),dtype=complex)
U_out[shape==1,:]=U_in
if sparse=='yes':
U_out=scipy.sparse.dok_matrix(U_out)
return (U_out)
#%% ############################# Spectrum
#%%
def prob(U,N,BdG='yes'):
"""
Obtains the probability density of a given wavefunction.
Parameters
----------
U: arr
Wavefunction in a 1D array.
N: int or arr
Number of sites. Each element of N[i] is the number of sites along
the direction i. If N is int, then there is just one dimension.
BdG: {'yes','no'}
Whether the wavefunction U is written in the BdG formalism.
Returns
-------
P: arr
Probability density of U with the same dimension than N.
"""
P=np.zeros(N)
if BdG=='no':
P=(np.abs(U[0::2])**2+np.abs(U[1::2])**2).reshape(N)
elif BdG=='yes':
P=(np.abs(U[0:2*np.prod(N):2])**2+np.abs(U[1:2*np.prod(N):2])**2+np.abs(U[2*np.prod(N)::2])**2+np.abs(U[2*np.prod(N)+1::2])**2).reshape(N)
return (P)
#%%
def Qtot(E,U,kT):
"""
Computes the total charge in the system.
Parameters
----------
E: scalar or arr
Energies.
U: arr
Eigenstates corresponding to each energy.
kT: scalar
Temperature (in units of energy).
Returns
-------
Qtot: scalar
Total charge in the system.
"""
den=np.dot(U,np.dot(np.diag(1/(1+np.exp(E/kT))),np.transpose(U)))
Qtot=np.sum(np.diag(den)[0:int(len(E)/2)])
return Qtot
#%%
def QM(Uodd,Ueven):
"""
Computes the Majorana charge (wavefunction overlap).
Parameters
----------
Uodd: arr
Eigenstate of the odd-parity Majorana state.
Uevev: arr
Eigenstate of the even-parity Majorana state.
Returns
-------
QM: scalar
Majorana charge (overlap between U_L and U_R).
"""
QM = np.absolute(np.dot(Uodd+Ueven, -1j*(Uodd-Ueven)))
return QM
#%%
def Density_Matrix(E,U,kT):
"""
Computes the density matrix of the system.
Parameters
----------
E: scalar or arr
Energies.
U: arr
Eigenstates corresponding to each energy.
kT: scalar
Temperature (in units of energy).
Returns
-------
den: arr
Density matrix of the system.
"""
den = np.dot(U, np.dot(np.diag(1 / (1 + np.exp(E / kT))), np.transpose(U)))
return den
#%%
def Density(E,U,N,kT):
"""
Computes the charge density of the system.
Parameters
----------
E: arr
Energies.
U: arr
Eigenstates.
N: arr
Number of sites in each direction.
kT: scalar
Temperature (in units of energy).
Returns
-------
den: arr (3D)
Charge density in each site..
"""
np.seterr(over='ignore')
if np.ndim(N)==1:
Nx=N[0]
Ny=N[1]
Nz=N[2]
n_eig=len(E)
den=np.zeros((Nx,Ny,Nz))
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
for m in range(n_eig):
den[i,j,k]=den[i,j,k]+(np.abs(U[2*(k+(i*Ny+j)*Nz),m])**2+np.abs(U[2*(k+(i*Ny+j)*Nz)+1,m])**2)*(1 / (1 + np.exp(E[m] / kT)))
#den = np.dot(U, np.transpose(U))
elif np.ndim(N)==0:
Nx=N
n_eig=len(E)
den=np.zeros((Nx))
for i in range(Nx):
for m in range(n_eig):
den[i]=den[i]+(np.abs(U[2*i,m])**2+np.abs(U[2*i+1,m])**2)*(1 / (1 + np.exp(E[m] / kT)))
#den = np.dot(U, np.transpose(U))
return den
#%%
def Density_momentum(E,U,k,N,kT):
"""
Charge densisty of an infnite system in one direction.
Parameters
----------
E: arr
Energies.
U: arr
Eigenstates.
k: arr
Momentum vector.
N: arr
Number of sites in each direction.
kT: scalar
Temperature (in units of energy).
Returns
-------
den: arr (2D)
Charge density in each site.
"""
Nx=N[0]
Ny=N[1]
Nz=N[2]
n_eig=len(E)
if np.ndim(U)==3:
den=np.zeros((Nx,Ny,Nz))
for i_x in range(Nx):
for i_y in range(Ny):
for i_z in range(Nz):
for i_E in range(n_eig):
den[i_x,i_y,i_z]=den[i_x,i_y,i_z]+(np.abs(U[int(2*(i_z+(i_y+i_x*Ny)*Nz)),i_E,0])**2+np.abs(U[int(2*(i_z+(i_y+i_x*Ny)*Nz))+1,i_E,0])**2)*denfromDOS(k,E[i_E,:],kT)
elif np.ndim(U)==2:
Nx=1
den=np.zeros((Ny,Nz))
i_x=0
for i_y in range(Ny):
for i_z in range(Nz):
for i_E in range(n_eig):
den[i_y,i_z]=den[i_y,i_z]+(np.abs(U[int(2*(i_z+(i_y+i_x*Ny)*Nz)),i_E])**2+np.abs(U[int(2*(i_z+(i_y+i_x*Ny)*Nz))+1,i_E])**2)*denfromDOS(k,E[i_E,:],kT)
return den
#%%
def k_F(mu,aR,Vz,m_eff=0.023):
"""
Find the Fermi momentum for a 1D infinite nanowire.
Parameters
----------
mu: scalar or arr
Chemical potential.
aR: scalar or arr
Spin-orbit coupling.
Vz: scalar or arr
Zeeman splitting.
m_eff: scalar or str
Effective mass.
Returns
-------
k_F: scalar or arr
Fermi momentum.
"""
if m_eff=='InAs':
m_eff=0.023
elif m_eff=='InSb':
m_eff=0.015
m=constants.m_e*m_eff
hbar=constants.hbar
mu,aR,Vz=mu*1e-3*constants.e,aR*1e-12*constants.e,Vz*1e-3*constants.e
kSO=m*aR/hbar**2
kZ=np.sqrt(2*m*Vz)/hbar
kmu_p=2*m*mu/hbar**2
kF=np.zeros(2)
kF[0]=np.sqrt(2*kSO**2+kmu_p+np.sqrt(4*kSO**4+kZ**4+4*kmu_p*kSO**2))
kF[1]=np.sqrt(2*kSO**2+kmu_p-np.sqrt(4*kSO**4+kZ**4+4*kmu_p*kSO**2))
kF=kF*1e-9
return (kF)
#%%
def DOS(k,E):
"""
Density of states of a 1D infinite nanowire.
Parameters
----------
k: arr
momentum vector.
E: arr
Energies.
Returns
-------
DOS: arr
Density of states.
"""
DOS=np.abs(np.gradient(E,k))**(-1)/np.pi
DOS[0]=0
return(DOS)
#%%
def denfromDOS(k,E,kT):
"""
1D charge denisty of an infinite nanowire.
Parameters
----------
k: arr
momentum vector.
E: arr
Energies (in units of energy).
Returns
-------
DOS: arr
Density of states.
"""
np.seterr(over='ignore')
dos=DOS(k,E)
den=0
for i in range(len(E)-1):
den=den+dos[i]*(E[i+1]-E[i])*(1 / (1 + np.exp(E[i] / kT)))
if not(np.abs(k[0])==np.abs(k[-1])):
den=den*2
return (den)
#%%
def LDOS(P_n,E_n,E_sample,a_0=0.0):
"""
Local density of states as a function of the energies E_sample.
Parameters
----------
P_n: arr
Probability density of the wavefunction at a given point for
different eigensates.
E_n: arr
Corresponding energies.
E_sample: arr
Energies in which the LDOS is evaluated.
a_0: float
Dirac delta characteristic length. If a_0=0 perfect Dirac delta is
used, while otherwise it is used an analytical expression for the
Delta with a characteristic width.
Returns
-------
LDOS: arr
Local density of states for a given energies.
"""
n_n=len(E_n)
n_out=len(E_sample)
LDOS=np.zeros(n_out)
if a_0==0.0:
for i in range(n_out-1):
for j in range(n_n):
if (E_sample[i+1]>=E_n[j]) and (E_sample[i]<=E_n[j]):
LDOS[i]=LDOS[i]+P_n[j]
return(LDOS)
else:
if a_0=='none':
a_0=np.abs(E_sample[0]-E_sample[1])*4
def Dirac_delta(E,En,a_0):
return np.exp(-((E-En)/a_0)**2)/(np.sqrt(np.pi)*np.abs(a_0))
for i in range(n_out):
for j in range(n_n):
LDOS[i]=LDOS[i]+P_n[j]*Dirac_delta(E_sample[i],E_n[j],a_0)
return (LDOS)
#%%
def dIdV(LDOS,E_sample,kT):
"""
Differential conductance for a given energies E_sample.
Parameters
----------
LDOS: arr
Local density of states computed using Functions.LDOS.
E_sample: arr
Energies in which the dIdV (and LDOS) is evaluated.
kT: float
Temperature (in units of energy).
Returns
-------
dIdV: arr
Differential conductance for a given energies.
"""
def sech(x):
return 1.0/np.cosh(x)
n=len(E_sample)
dIdV=np.zeros(n)
for i in range(n):
for j in range(n):
dIdV[i]=dIdV[i]+LDOS[j]*sech((E_sample[i]-E_sample[j])/(2*kT))**2
return (dIdV)
#%% ############################# Others
#%%
def Chern_number(H_k,k_vec,N):
"""
Computes the Chern number of a 1D Hamiltonian in k-space.
Parameters
----------
H_k: arr
1D Hamiltonian in k-space. Each element H_k[:,:,i] is the
Hamiltonian evaluated at k_vec[i].
k_vec: arr
Momentum vector of the first Brillouin zone in which the
Hamiltonian is evaluated.
N: int
Number of sites in which the unit cell of the Hamiltonian is
discretized.
Returns
-------
Ch: int
Chern number of the given 1D Hamiltonian.
"""
Gamma=np.zeros((4*N,4*N),dtype=complex)
for i in range(N):
Gamma[2*i:2*i+2,2*i+2*N:2*i+2*N+2]=np.array([[1,0],[0,1]])
Gamma[2*i+2*N:2*i+2*N+2,2*i:2*i+2]=np.array([[1,0],[0,1]])
Ch=np.sign(pf.pfaffian(np.dot(Gamma,H_k[:,:,int((len(k_vec)-1)/2)])))*np.sign(pf.pfaffian(np.dot(Gamma,H_k[:,:,int(len(k_vec)-1)])))
return (Ch)
#%%
def rho_acc(x,y,z,den_acc_in,n_lattice,r_lattice,superlattice_type='none'):
"""
Computes the superficial charge density of a nanowire with hexagonal
section.
Parameters
----------
x,y,z: arr
Positions of the mesh of the nanowire section.
den_acc_in: scalar
Magnitude of the accumulation layer.
n_lattice: int
Number of superlattice cells.
r_lattice: float
Partial coverage of the SC.
superlattice_type: str
Whether the superlattice is on top, at the bottom, or there is no
superlattice (none).
Returns
-------
rho_acc: arr
Charge density inside the wire due to the charge accumulation layer.
"""
Nx, Ny, Nz = len(x), len(y), len(z)
Lx, Ly, Lz = x[Nx-1], y[Ny-1]*2, z[Nz-1]*2
a0=Ly/2
b0=a0*np.sin(np.pi/3)
dis=np.array([np.abs(x[0]-x[1]),np.abs(y[0]-y[1]),np.abs(z[0]-z[1])])
L_SC, L_0=Lx/n_lattice*r_lattice, Lx/n_lattice*(1-r_lattice)
den_acc_out=np.zeros((Nx,Ny,Nz))
if superlattice_type=='top':
den_acc_out[:,arg_isclose(y,-a0/2):arg_isclose(y,a0/2)+1,arg_isclose(z,-b0)]=np.ones((Nx,arg_isclose(y,a0/2)-arg_isclose(y,-a0/2)+1))*den_acc_in
for j in range(Nx):
for i in range(n_lattice+1):
if (x[j]>=L_SC/2+i*(L_SC+L_0)) and (x[j]<=L_SC/2+L_0+i*(L_SC+L_0)):
den_acc_out[j,arg_isclose(y,-a0/2):arg_isclose(y,a0/2)+1,arg_isclose(z,b0)]=np.ones((arg_isclose(y,a0/2)-arg_isclose(y,-a0/2)+1))*den_acc_in
elif superlattice_type=='bottom':
den_acc_out[:,arg_isclose(y,-a0/2):arg_isclose(y,a0/2)+1,arg_isclose(z,b0)]=np.ones((Nx,arg_isclose(y,a0/2)-arg_isclose(y,-a0/2)+1))*den_acc_in
for j in range(Nx):
for i in range(n_lattice+1):
if (x[j]>=L_SC/2+i*(L_SC+L_0)) and (x[j]<=L_SC/2+L_0+i*(L_SC+L_0)):
den_acc_out[j,arg_isclose(y,-a0/2):arg_isclose(y,a0/2)+1,arg_isclose(z,-b0)]=np.ones((arg_isclose(y,a0/2)-arg_isclose(y,-a0/2)+1))*den_acc_in
elif superlattice_type=='none':
den_acc_out[:,arg_isclose(y,-a0/2):arg_isclose(y,a0/2)+1,arg_isclose(z,-b0)+1]=np.ones((Nx,arg_isclose(y,a0/2)-arg_isclose(y,-a0/2)+1))*den_acc_in
den_acc_out[:,arg_isclose(y,-a0/2):arg_isclose(y,a0/2)+1,arg_isclose(z,b0)-1]=np.ones((Nx,arg_isclose(y,a0/2)-arg_isclose(y,-a0/2)+1))*den_acc_in
else:
for j in range(Nx):
for i in range(n_lattice+1):
if (x[j]>=L_SC/2+i*(L_SC+L_0)) and (x[j]<=L_SC/2+L_0+i*(L_SC+L_0)):
den_acc_out[j,arg_isclose(y,-a0/2):arg_isclose(y,a0/2)+1,arg_isclose(z,-b0)]=np.ones((arg_isclose(y,a0/2)-arg_isclose(y,-a0/2)+1))*den_acc_in
den_acc_out[j,arg_isclose(y,-a0/2):arg_isclose(y,a0/2)+1,arg_isclose(z,b0)]=np.ones((arg_isclose(y,a0/2)-arg_isclose(y,-a0/2)+1))*den_acc_in
for k in range(Nz):
if (z[k]>=-b0) and (z[k]<=0):
den_acc_out[:,arg_isclose(2*b0/a0*y-2*b0,z[k]-dis[2])+1,k]=np.ones(Nx)*den_acc_in
den_acc_out[:,arg_isclose(-2*b0/a0*y-2*b0,z[k]-dis[2])-1,k]=np.ones(Nx)*den_acc_in
elif (z[k]<=b0) and (z[k]>=0):
den_acc_out[:,arg_isclose(2*b0/a0*y+2*b0,z[k]+dis[2])-1,k]=np.ones(Nx)*den_acc_in
den_acc_out[:,arg_isclose(-2*b0/a0*y+2*b0,z[k]+dis[2])+1,k]=np.ones(Nx)*den_acc_in
return (den_acc_out)
| StarcoderdataPython |
3283252 | ## @file
# This file is used to parse a xml file of .PKG file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
XmlParser
'''
##
# Import Modules
#
import re
from edk2basetools.UPT.Library.Xml.XmlRoutines import XmlNode
from edk2basetools.UPT.Library.Xml.XmlRoutines import CreateXmlElement
from edk2basetools.UPT.Library.Xml.XmlRoutines import XmlList
from edk2basetools.UPT.Library.Xml.XmlRoutines import XmlParseFile
from edk2basetools.UPT.Core.DistributionPackageClass import DistributionPackageClass
from edk2basetools.UPT.Object.POM.ModuleObject import DepexObject
from edk2basetools.UPT.Library.ParserValidate import IsValidInfMoudleType
from edk2basetools.UPT.Library.ParserValidate import IsValidInstallPath
from edk2basetools.UPT.Library.Misc import IsEqualList
from edk2basetools.UPT.Library.Misc import Sdict
from edk2basetools.UPT.Logger.StringTable import ERR_XML_INVALID_VARIABLENAME
from edk2basetools.UPT.Logger.StringTable import ERR_XML_INVALID_LIB_SUPMODLIST
from edk2basetools.UPT.Logger.StringTable import ERR_XML_INVALID_EXTERN_SUPARCHLIST
from edk2basetools.UPT.Logger.StringTable import ERR_XML_INVALID_EXTERN_SUPMODLIST
from edk2basetools.UPT.Logger.StringTable import ERR_XML_INVALID_EXTERN_SUPMODLIST_NOT_LIB
from edk2basetools.UPT.Logger.StringTable import ERR_FILE_NAME_INVALIDE
from edk2basetools.UPT.Logger.ToolError import PARSER_ERROR
from edk2basetools.UPT.Logger.ToolError import FORMAT_INVALID
from edk2basetools.UPT.Xml.CommonXml import DistributionPackageHeaderXml
from edk2basetools.UPT.Xml.CommonXml import MiscellaneousFileXml
from edk2basetools.UPT.Xml.CommonXml import UserExtensionsXml
from edk2basetools.UPT.Xml.XmlParserMisc import ConvertVariableName
from edk2basetools.UPT.Xml.XmlParserMisc import IsRequiredItemListNull
from edk2basetools.UPT.Xml.ModuleSurfaceAreaXml import ModuleSurfaceAreaXml
from edk2basetools.UPT.Xml.PackageSurfaceAreaXml import PackageSurfaceAreaXml
import edk2basetools.UPT.Logger.Log as Logger
##
# DistributionPackageXml
#
class DistributionPackageXml(object):
def __init__(self):
self.DistP = DistributionPackageClass()
self.Pkg = ''
## ValidateDistributionPackage
#
# Check if any required item is missing in DistributionPackage
#
def ValidateDistributionPackage(self):
XmlTreeLevel = ['DistributionPackage']
if self.DistP:
#
# Check DistributionPackage -> DistributionHeader
#
XmlTreeLevel = ['DistributionPackage', '']
CheckDict = {'DistributionHeader':self.DistP.Header }
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
if self.DistP.Header:
DpHeader = self.DistP.Header
XmlTreeLevel = ['DistributionPackage', 'DistributionHeader']
CheckDict = Sdict()
if DpHeader.GetAbstract():
DPAbstract = DpHeader.GetAbstract()[0][1]
else:
DPAbstract = ''
if DpHeader.GetCopyright():
DPCopyright = DpHeader.GetCopyright()[0][1]
else:
DPCopyright = ''
if DpHeader.GetLicense():
DPLicense = DpHeader.GetLicense()[0][1]
else:
DPLicense = ''
CheckDict['Name'] = DpHeader.GetName()
CheckDict['GUID'] = DpHeader.GetGuid()
CheckDict['Version'] = DpHeader.GetVersion()
CheckDict['Copyright'] = DPCopyright
CheckDict['License'] = DPLicense
CheckDict['Abstract'] = DPAbstract
CheckDict['Vendor'] = DpHeader.GetVendor()
CheckDict['Date'] = DpHeader.GetDate()
CheckDict['XmlSpecification'] = DpHeader.GetXmlSpecification()
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
else:
XmlTreeLevel = ['DistributionPackage', 'DistributionHeader']
CheckDict = CheckDict = {'DistributionHeader': '', }
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check Each Package
#
for Key in self.DistP.PackageSurfaceArea:
ValidatePackageSurfaceArea(self.DistP.PackageSurfaceArea[Key])
#
# Check Each Module
#
for Key in self.DistP.ModuleSurfaceArea:
ValidateMS(self.DistP.ModuleSurfaceArea[Key], ['DistributionPackage', 'ModuleSurfaceArea'])
#
# Check Each Tool
#
if self.DistP.Tools:
XmlTreeLevel = ['DistributionPackage', 'Tools', 'Header']
CheckDict = {'Name': self.DistP.Tools.GetName(), }
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
if not self.DistP.Tools.GetFileList():
XmlTreeLevel = ['DistributionPackage', 'Tools']
CheckDict = {'FileName': None, }
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
for Item in self.DistP.Tools.GetFileList():
XmlTreeLevel = ['DistributionPackage', 'Tools']
CheckDict = {'FileName': Item.GetURI(), }
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check Each Misc File
#
if self.DistP.MiscellaneousFiles:
XmlTreeLevel = ['DistributionPackage', 'MiscellaneousFiles', 'Header']
CheckDict = {'Name': self.DistP.MiscellaneousFiles.GetName(), }
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
if not self.DistP.MiscellaneousFiles.GetFileList():
XmlTreeLevel = ['DistributionPackage', 'MiscellaneousFiles']
CheckDict = {'FileName': None, }
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
for Item in self.DistP.MiscellaneousFiles.GetFileList():
XmlTreeLevel = ['DistributionPackage', 'MiscellaneousFiles']
CheckDict = {'FileName': Item.GetURI(), }
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check Each Distribution Level User Extension
#
for Item in self.DistP.UserExtensions:
XmlTreeLevel = ['DistributionPackage', 'UserExtensions']
CheckDict = {'UserId': Item.GetUserID(), }
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
def FromXml(self, Filename=None):
if Filename is not None:
self.DistP = DistributionPackageClass()
#
# Load to XML
#
self.Pkg = XmlParseFile(Filename)
#
# Parse Header information
#
Tmp = DistributionPackageHeaderXml()
DistributionPackageHeader = \
Tmp.FromXml(XmlNode(self.Pkg, '/DistributionPackage/DistributionHeader'), 'DistributionHeader')
self.DistP.Header = DistributionPackageHeader
#
# Parse each PackageSurfaceArea
#
for Item in XmlList(self.Pkg, '/DistributionPackage/PackageSurfaceArea'):
Psa = PackageSurfaceAreaXml()
Package = Psa.FromXml(Item, 'PackageSurfaceArea')
self.DistP.PackageSurfaceArea[(Package.GetGuid(), \
Package.GetVersion(), \
Package.GetPackagePath())] = \
Package
#
# Parse each ModuleSurfaceArea
#
for Item in XmlList(self.Pkg, '/DistributionPackage/ModuleSurfaceArea'):
Msa = ModuleSurfaceAreaXml()
Module = Msa.FromXml(Item, 'ModuleSurfaceArea', True)
ModuleKey = (Module.GetGuid(), Module.GetVersion(), Module.GetName(), Module.GetModulePath())
self.DistP.ModuleSurfaceArea[ModuleKey] = Module
#
# Parse Tools
#
Tmp = MiscellaneousFileXml()
self.DistP.Tools = Tmp.FromXml2(XmlNode(self.Pkg, '/DistributionPackage/Tools'), 'Tools')
#
# Parse MiscFiles
#
Tmp = MiscellaneousFileXml()
self.DistP.MiscellaneousFiles = \
Tmp.FromXml2(XmlNode(self.Pkg, \
'/DistributionPackage/MiscellaneousFiles'), \
'MiscellaneousFiles')
#
# Parse UserExtensions
#
for Item in XmlList(self.Pkg, '/DistributionPackage/UserExtensions'):
Tmp = UserExtensionsXml()
self.DistP.UserExtensions.append(Tmp.FromXml2(Item, 'UserExtensions'))
#
# Check Required Items for XML
#
self.ValidateDistributionPackage()
return self.DistP
def ToXml(self, DistP):
if self.DistP:
pass
if DistP is not None:
#
# Parse DistributionPackageHeader
#
Attrs = [['xmlns', 'http://www.uefi.org/2011/1.1'],
['xmlns:xsi', 'http:/www.w3.org/2001/XMLSchema-instance'],
]
Root = CreateXmlElement('DistributionPackage', '', [], Attrs)
Tmp = DistributionPackageHeaderXml()
Root.appendChild(Tmp.ToXml(DistP.Header, 'DistributionHeader'))
#
# Parse each PackageSurfaceArea
#
for Package in DistP.PackageSurfaceArea.values():
Psa = PackageSurfaceAreaXml()
DomPackage = Psa.ToXml(Package)
Root.appendChild(DomPackage)
#
# Parse each ModuleSurfaceArea
#
for Module in DistP.ModuleSurfaceArea.values():
Msa = ModuleSurfaceAreaXml()
DomModule = Msa.ToXml(Module)
Root.appendChild(DomModule)
#
# Parse Tools
#
Tmp = MiscellaneousFileXml()
ToolNode = Tmp.ToXml2(DistP.Tools, 'Tools')
if ToolNode is not None:
Root.appendChild(ToolNode)
#
# Parse MiscFiles
#
Tmp = MiscellaneousFileXml()
MiscFileNode = Tmp.ToXml2(DistP.MiscellaneousFiles,
'MiscellaneousFiles')
if MiscFileNode is not None:
Root.appendChild(MiscFileNode)
XmlContent = Root.toprettyxml(indent=' ')
#
# Remove empty element
#
XmlContent = re.sub(r'[\s\r\n]*<[^<>=]*/>', '', XmlContent)
#
# Remove empty help text element
#
XmlContent = re.sub(r'[\s\r\n]*<HelpText Lang="en-US"/>', '',
XmlContent)
#
# Remove SupArchList="COMMON" or "common"
#
XmlContent = \
re.sub(r'[\s\r\n]*SupArchList[\s\r\n]*=[\s\r\n]*"[\s\r\n]*COMMON'
'[\s\r\n]*"', '', XmlContent)
XmlContent = \
re.sub(r'[\s\r\n]*SupArchList[\s\r\n]*=[\s\r\n]*"[\s\r\n]*common'
'[\s\r\n]*"', '', XmlContent)
#
# Remove <SupArchList> COMMON </SupArchList>
#
XmlContent = \
re.sub(r'[\s\r\n]*<SupArchList>[\s\r\n]*COMMON[\s\r\n]*'
'</SupArchList>[\s\r\n]*', '', XmlContent)
#
# Remove <SupArchList> common </SupArchList>
#
XmlContent = \
re.sub(r'[\s\r\n]*<SupArchList>[\s\r\n]*'
'common[\s\r\n]*</SupArchList>[\s\r\n]*', '', XmlContent)
#
# Remove SupModList="COMMON" or "common"
#
XmlContent = \
re.sub(r'[\s\r\n]*SupModList[\s\r\n]*=[\s\r\n]*"[\s\r\n]*COMMON'
'[\s\r\n]*"', '', XmlContent)
XmlContent = \
re.sub(r'[\s\r\n]*SupModList[\s\r\n]*=[\s\r\n]*"[\s\r\n]*common'
'[\s\r\n]*"', '', XmlContent)
return XmlContent
return ''
## ValidateMS
#
# Check if any required item is missing in ModuleSurfaceArea
#
# @param Module: The ModuleSurfaceArea to be checked
# @param XmlTreeLevel: The top level of Module
#
def ValidateMS(Module, TopXmlTreeLevel):
ValidateMS1(Module, TopXmlTreeLevel)
ValidateMS2(Module, TopXmlTreeLevel)
ValidateMS3(Module, TopXmlTreeLevel)
## ValidateMS1
#
# Check if any required item is missing in ModuleSurfaceArea
#
# @param Module: The ModuleSurfaceArea to be checked
# @param XmlTreeLevel: The top level of Module
#
def ValidateMS1(Module, TopXmlTreeLevel):
#
# Check Guids -> GuidCName
#
XmlTreeLevel = TopXmlTreeLevel + ['Guids']
for Item in Module.GetGuidList():
if Item is None:
CheckDict = {'GuidCName':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
XmlTreeLevel = TopXmlTreeLevel + ['Guids', 'GuidCName']
for Item in Module.GetGuidList():
CheckDict = {'CName':Item.GetCName(),
'GuidType':Item.GetGuidTypeList(),
'Usage':Item.GetUsage()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
if Item.GetVariableName():
Result = ConvertVariableName(Item.GetVariableName())
if Result is None:
Msg = "->".join(Node for Node in XmlTreeLevel)
ErrorMsg = ERR_XML_INVALID_VARIABLENAME % (Item.GetVariableName(), Item.GetCName(), Msg)
Logger.Error('\nUPT', PARSER_ERROR, ErrorMsg, RaiseError=True)
else:
Item.SetVariableName(Result)
#
# Check Protocols -> Protocol
#
XmlTreeLevel = TopXmlTreeLevel + ['Protocols']
for Item in Module.GetProtocolList():
if Item is None:
CheckDict = {'Protocol':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
XmlTreeLevel = TopXmlTreeLevel + ['Protocols', 'Protocol']
for Item in Module.GetProtocolList():
CheckDict = {'CName':Item.GetCName(),
'Usage':Item.GetUsage()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check PPIs -> Ppi
#
XmlTreeLevel = TopXmlTreeLevel + ['PPIs']
for Item in Module.GetPpiList():
if Item is None:
CheckDict = {'Ppi':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
XmlTreeLevel = TopXmlTreeLevel + ['PPIs', 'Ppi']
for Item in Module.GetPpiList():
CheckDict = {'CName':Item.GetCName(),
'Usage':Item.GetUsage()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check PcdCoded -> Entry
#
XmlTreeLevel = TopXmlTreeLevel + ['PcdCoded']
for Item in Module.GetPcdList():
if Item is None:
CheckDict = {'PcdEntry':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
XmlTreeLevel = TopXmlTreeLevel + ['PcdCoded', 'PcdEntry']
for Item in Module.GetPcdList():
CheckDict = {'TokenSpaceGuidCname':Item.GetTokenSpaceGuidCName(),
'CName':Item.GetCName(),
'PcdUsage':Item.GetValidUsage(),
'PcdItemType':Item.GetItemType()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check Externs -> Extern
#
XmlTreeLevel = TopXmlTreeLevel + ['Externs']
for Item in Module.GetExternList():
if Item is None:
CheckDict = {'Extern':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# If SupArchList is used to identify different EntryPoint, UnloadImage, Constructor/Destructor elements and
# that SupArchList does not match ModuleSurfaceArea.ModuleProperties:SupArchList, the tool must exit gracefully,
# informing the user that the EDK II Build system does not support different EntryPoint, UnloadImage,
# Constructor or Destructor elements based on Architecture type. Two SupArchList attributes are considered
# identical if it lists the same CPU architectures in any order.
#
for Item in Module.GetExternList():
if len(Item.SupArchList) > 0:
if not IsEqualList(Item.SupArchList, Module.SupArchList):
Logger.Error('\nUPT',
PARSER_ERROR,
ERR_XML_INVALID_EXTERN_SUPARCHLIST % (str(Item.SupArchList), str(Module.SupArchList)),
RaiseError=True)
#
# Check DistributionPackage -> ModuleSurfaceArea -> UserExtensions
#
XmlTreeLevel = TopXmlTreeLevel + ['UserExtensions']
for Item in Module.GetUserExtensionList():
CheckDict = {'UserId':Item.GetUserID(), 'Identifier':Item.GetIdentifier()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check DistributionPackage -> PackageSurfaceArea -> MiscellaneousFiles -> Filename
#
XmlTreeLevel = TopXmlTreeLevel + ['MiscellaneousFiles']
for Item in Module.GetMiscFileList():
if not Item.GetFileList():
CheckDict = {'Filename': '', }
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
for File in Item.GetFileList():
CheckDict = {'Filename': File.GetURI(), }
## ValidateMS2
#
# Check if any required item is missing in ModuleSurfaceArea
#
# @param Module: The ModuleSurfaceArea to be checked
# @param XmlTreeLevel: The top level of Module
#
def ValidateMS2(Module, TopXmlTreeLevel):
#
# Check Header
#
XmlTreeLevel = TopXmlTreeLevel + ['Header']
CheckDict = Sdict()
CheckDict['Name'] = Module.GetName()
CheckDict['BaseName'] = Module.GetBaseName()
CheckDict['GUID'] = Module.GetGuid()
CheckDict['Version'] = Module.GetVersion()
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check ModuleProperties
#
XmlTreeLevel = TopXmlTreeLevel + ['ModuleProperties']
CheckDict = {'ModuleType':Module.GetModuleType(),
'Path':Module.GetModulePath()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
if not IsValidInstallPath(Module.GetModulePath()):
Logger.Error("UPT", FORMAT_INVALID, ERR_FILE_NAME_INVALIDE % Module.GetModulePath())
#
# Check ModuleProperties->BootMode
#
XmlTreeLevel = TopXmlTreeLevel + ['ModuleProperties'] + ['BootMode']
for Item in Module.GetBootModeList():
CheckDict = {'Usage':Item.GetUsage(),
'SupportedBootModes':Item.GetSupportedBootModes()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check ModuleProperties->Event
#
XmlTreeLevel = TopXmlTreeLevel + ['ModuleProperties'] + ['Event']
for Item in Module.GetEventList():
CheckDict = {'Usage':Item.GetUsage(),
'EventType':Item.GetEventType()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check ModuleProperties->Hob
#
XmlTreeLevel = TopXmlTreeLevel + ['ModuleProperties'] + ['HOB']
for Item in Module.GetHobList():
CheckDict = {'Usage':Item.GetUsage(),
'HobType':Item.GetHobType()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# The UDP Specification supports the module type of UEFI_RUNTIME_DRIVER, which is not present in the EDK II INF
# File Specification v. 1.23, so UPT must perform the following translation that include the generation of a
# [Depex] section.
#
if Module.ModuleType == "UEFI_RUNTIME_DRIVER":
Module.ModuleType = "DXE_RUNTIME_DRIVER"
DxeObj = DepexObject()
DxeObj.SetDepex("gEfiBdsArchProtocolGuid AND \ngEfiCpuArchProtocolGuid AND\n" + \
"gEfiMetronomeArchProtocolGuid AND \ngEfiMonotonicCounterArchProtocolGuid AND\n" + \
"gEfiRealTimeClockArchProtocolGuid AND \ngEfiResetArchProtocolGuid AND\n" + \
"gEfiRuntimeArchProtocolGuid AND \ngEfiSecurityArchProtocolGuid AND\n" + \
"gEfiTimerArchProtocolGuid AND \ngEfiVariableWriteArchProtocolGuid AND\n" + \
"gEfiVariableArchProtocolGuid AND \ngEfiWatchdogTimerArchProtocolGuid")
DxeObj.SetModuleType(['DXE_RUNTIME_DRIVER'])
Module.PeiDepex = []
Module.DxeDepex = []
Module.SmmDepex = []
Module.DxeDepex.append(DxeObj)
#
# Check LibraryClassDefinitions -> LibraryClass
#
XmlTreeLevel = TopXmlTreeLevel + ['LibraryClassDefinitions']
for Item in Module.GetLibraryClassList():
if Item is None:
CheckDict = {'LibraryClass':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
XmlTreeLevel = TopXmlTreeLevel + ['LibraryClassDefinitions', 'LibraryClass']
IsLibraryModule = False
LibrarySupModList = []
for Item in Module.GetLibraryClassList():
CheckDict = {'Keyword':Item.GetLibraryClass(),
'Usage':Item.GetUsage()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# If the LibraryClass:SupModList is not "UNDEFINED" the LIBRARY_CLASS entry must have the list
# appended using the format:
# LIBRARY_CLASS = <ClassName> ["|" <Edk2ModuleTypeList>]
#
# Edk2ModuleTypeList ::= <ModuleType> [" " <ModuleType>]{0,}
# <ModuleTypes> ::= {"BASE"} {"SEC"} {"PEI_CORE"} {"PEIM"}
# {"DXE_CORE"} {"DXE_DRIVER"} {"SMM_CORE"}
# {"DXE_SMM_DRIVER"} {"DXE_RUNTIME_DRIVER"}
# {"DXE_SAL_DRIVER"} {"UEFI_DRIVER"}
# {"UEFI_APPLICATION"} {"USER_DEFINED"}
#
if len(Item.SupModuleList) > 0:
for SupModule in Item.SupModuleList:
if not IsValidInfMoudleType(SupModule):
Logger.Error('\nUPT',
PARSER_ERROR,
ERR_XML_INVALID_LIB_SUPMODLIST % (Item.LibraryClass, str(SupModule)),
RaiseError=True)
if Item.Usage == 'PRODUCES' or Item.Usage == 'SOMETIMES_PRODUCES':
IsLibraryModule = True
LibrarySupModList = Item.SupModuleList
#
# For Library modules (indicated by a LIBRARY_CLASS statement in the [Defines] section)
# If the SupModList attribute of the CONSTRUCTOR or DESTRUCTOR element does not match the Supported Module
# Types listed after "LIBRARY_CLASS = <Keyword> |", the tool should gracefully exit with an error message
# stating that there is a conflict in the module types the CONSTRUCTOR/DESTRUCTOR is to be used with and
# the Module types this Library supports.
#
if IsLibraryModule:
for Item in Module.GetExternList():
if Item.Constructor or Item.Destructor:
if hasattr(Item, 'SupModList') and len(Item.SupModList) > 0 and \
not IsEqualList(Item.SupModList, LibrarySupModList):
Logger.Error('\nUPT',
PARSER_ERROR,
ERR_XML_INVALID_EXTERN_SUPMODLIST % (str(Item.SupModList), str(LibrarySupModList)),
RaiseError=True)
#
# If the module is not a library module, the MODULE_TYPE listed in the ModuleSurfaceArea.Header must match the
# SupModList attribute. If these conditions cannot be met, the tool must exit gracefully, informing the user
# that the EDK II Build system does not currently support the features required by this Module.
#
if not IsLibraryModule:
for Item in Module.GetExternList():
if hasattr(Item, 'SupModList') and len(Item.SupModList) > 0 and \
not IsEqualList(Item.SupModList, [Module.ModuleType]):
Logger.Error('\nUPT',
PARSER_ERROR,
ERR_XML_INVALID_EXTERN_SUPMODLIST_NOT_LIB % (str(Module.ModuleType), str(Item.SupModList)),
RaiseError=True)
#
# Check SourceFiles
#
XmlTreeLevel = TopXmlTreeLevel + ['SourceFiles']
for Item in Module.GetSourceFileList():
if Item is None:
CheckDict = {'Filename':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
XmlTreeLevel = TopXmlTreeLevel + ['SourceFiles']
for Item in Module.GetSourceFileList():
CheckDict = {'Filename':Item.GetSourceFile()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
for ItemCount in range(len(Module.GetBinaryFileList())):
Item = Module.GetBinaryFileList()[ItemCount]
if Item and len(Item.FileNamList) > 0 and Item.FileNamList[0].FileType == 'FREEFORM':
Item.FileNamList[0].FileType = 'SUBTYPE_GUID'
Module.GetBinaryFileList()[ItemCount] = Item
## ValidateMS3
#
# Check if any required item is missing in ModuleSurfaceArea
#
# @param Module: The ModuleSurfaceArea to be checked
# @param XmlTreeLevel: The top level of Module
#
def ValidateMS3(Module, TopXmlTreeLevel):
#
# Check PackageDependencies -> Package
#
XmlTreeLevel = TopXmlTreeLevel + ['PackageDependencies']
for Item in Module.GetPackageDependencyList():
if Item is None:
CheckDict = {'Package':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
XmlTreeLevel = TopXmlTreeLevel + ['PackageDependencies', 'Package']
for Item in Module.GetPackageDependencyList():
CheckDict = {'GUID':Item.GetGuid()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check BinaryFiles -> BinaryFile
#
for Item in Module.GetBinaryFileList():
if Item is None:
XmlTreeLevel = TopXmlTreeLevel + ['BinaryFiles']
CheckDict = {'BinaryFile':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
if not Item.GetFileNameList():
XmlTreeLevel = TopXmlTreeLevel + ['BinaryFiles', 'BinaryFile']
CheckDict = {'Filename':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
XmlTreeLevel = TopXmlTreeLevel + ['BinaryFiles', 'BinaryFile']
for File in Item.GetFileNameList():
CheckDict = {'Filename':File.GetFilename(),
'FileType':File.GetFileType()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
for AsBuilt in Item.GetAsBuiltList():
#
# Check LibInstance
#
if len(AsBuilt.LibraryInstancesList) == 1 and not AsBuilt.LibraryInstancesList[0]:
CheckDict = {'GUID':''}
XmlTreeLevel = TopXmlTreeLevel + ['BinaryFiles', 'BinaryFile', 'AsBuilt', 'LibraryInstances']
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
for LibItem in AsBuilt.LibraryInstancesList:
CheckDict = {'Guid':LibItem.Guid,
'Version':LibItem.Version}
XmlTreeLevel = TopXmlTreeLevel + ['BinaryFiles', 'BinaryFile', 'AsBuilt', 'LibraryInstances']
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check PatchPcd
#
for PatchPcdItem in AsBuilt.PatchPcdList:
CheckDict = {'TokenSpaceGuidValue':PatchPcdItem.TokenSpaceGuidValue,
'PcdCName':PatchPcdItem.PcdCName,
'Token':PatchPcdItem.Token,
'DatumType':PatchPcdItem.DatumType,
'Value':PatchPcdItem.DefaultValue,
'Offset':PatchPcdItem.Offset}
XmlTreeLevel = TopXmlTreeLevel + ['BinaryFiles', 'BinaryFile', 'AsBuilt', 'PatchPcdValue']
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check PcdError
#
for PcdErrorItem in PatchPcdItem.PcdErrorsList:
CheckDict = {'ErrorNumber':PcdErrorItem.ErrorNumber}
XmlTreeLevel = TopXmlTreeLevel + ['BinaryFiles', 'BinaryFile', 'AsBuilt',
'PatchPcdValue', 'PcdError']
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check PcdEx
#
for PcdExItem in AsBuilt.PcdExValueList:
CheckDict = {'TokenSpaceGuidValue':PcdExItem.TokenSpaceGuidValue,
'Token':PcdExItem.Token,
'DatumType':PcdExItem.DatumType}
XmlTreeLevel = TopXmlTreeLevel + ['BinaryFiles', 'BinaryFile', 'AsBuilt', 'PcdExValue']
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check PcdError
#
for PcdErrorItem in PcdExItem.PcdErrorsList:
CheckDict = {'ErrorNumber':PcdErrorItem.ErrorNumber}
XmlTreeLevel = TopXmlTreeLevel + ['BinaryFiles', 'BinaryFile', 'AsBuilt',
'PcdExValue', 'PcdError']
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check SmmDepex
#
XmlTreeLevel = TopXmlTreeLevel + ['SmmDepex']
for Item in Module.GetSmmDepex():
CheckDict = {'Expression':Item.GetDepex()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check PeiDepex
#
XmlTreeLevel = TopXmlTreeLevel + ['PeiDepex']
for Item in Module.GetPeiDepex():
CheckDict = {'Expression':Item.GetDepex()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check DxeDepex
#
XmlTreeLevel = TopXmlTreeLevel + ['DxeDepex']
for Item in Module.GetDxeDepex():
CheckDict = {'Expression':Item.GetDepex()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check <UserExtensions>
#
XmlTreeLevel = TopXmlTreeLevel + ['UserExtensions']
for Item in Module.GetUserExtensionList():
CheckDict = {'UserId':Item.GetUserID(), 'Identifier':Item.GetIdentifier()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
## ValidatePS1
#
# ValidatePS1
#
def ValidatePS1(Package):
#
# Check DistributionPackage -> PackageSurfaceArea -> Header
#
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'Header']
CheckDict = Sdict()
CheckDict['Name'] = Package.GetName()
CheckDict['BaseName'] = Package.GetBaseName()
CheckDict['GUID'] = Package.GetGuid()
CheckDict['Version'] = Package.GetVersion()
CheckDict['PackagePath'] = Package.GetPackagePath()
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
if not IsValidInstallPath(Package.GetPackagePath()):
Logger.Error("UPT", FORMAT_INVALID, ERR_FILE_NAME_INVALIDE % Package.GetPackagePath())
#
# Check DistributionPackage -> PackageSurfaceArea -> ClonedFrom
#
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'ClonedFrom']
for Item in Package.GetClonedFromList():
if Item is None:
CheckDict = Sdict()
CheckDict['GUID'] = ''
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
CheckDict = Sdict()
CheckDict['GUID'] = Item.GetPackageGuid()
CheckDict['Version'] = Item.GetPackageVersion()
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check DistributionPackage -> PackageSurfaceArea -> LibraryClassDeclarations -> LibraryClass
#
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'LibraryClassDeclarations']
for Item in Package.GetLibraryClassList():
if Item is None:
CheckDict = {'LibraryClass':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'LibraryClassDeclarations', 'LibraryClass']
for Item in Package.GetLibraryClassList():
CheckDict = {'Keyword':Item.GetLibraryClass(),
'HeaderFile':Item.GetIncludeHeader()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check DistributionPackage -> PackageSurfaceArea -> IndustryStandardIncludes -> IndustryStandardHeader
#
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'IndustryStandardIncludes']
for Item in Package.GetStandardIncludeFileList():
if Item is None:
CheckDict = {'IndustryStandardHeader':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'IndustryStandardIncludes', 'IndustryStandardHeader']
for Item in Package.GetStandardIncludeFileList():
CheckDict = {'HeaderFile':Item.GetFilePath()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check DistributionPackage -> PackageSurfaceArea -> PackageIncludes -> PackageHeader
#
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'PackageIncludes']
for Item in Package.GetPackageIncludeFileList():
if Item is None:
CheckDict = {'PackageHeader':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'PackageIncludes', 'PackageHeader']
for Item in Package.GetPackageIncludeFileList():
CheckDict = {'HeaderFile':Item.GetFilePath()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
## ValidatePS2
#
# ValidatePS2
#
def ValidatePS2(Package):
#
# Check DistributionPackage -> PackageSurfaceArea -> Modules -> ModuleSurfaceArea
#
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'Modules', 'ModuleSurfaceArea']
for Item in Package.GetModuleDict().values():
ValidateMS(Item, XmlTreeLevel)
#
# Check DistributionPackage -> PackageSurfaceArea -> GuidDeclarations Entry
#
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'GuidDeclarations']
for Item in Package.GetGuidList():
if Item is None:
CheckDict = {'Entry':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'GuidDeclarations', 'Entry']
for Item in Package.GetGuidList():
CheckDict = {'CName':Item.GetCName(),
'GuidValue':Item.GetGuid()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check DistributionPackage -> PackageSurfaceArea -> ProtocolDeclarations -> Entry
#
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'ProtocolDeclarations']
for Item in Package.GetProtocolList():
if Item is None:
CheckDict = {'Entry':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'ProtocolDeclarations', 'Entry']
for Item in Package.GetProtocolList():
CheckDict = {'CName':Item.GetCName(),
'GuidValue':Item.GetGuid()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check DistributionPackage -> PackageSurfaceArea -> PpiDeclarations -> Entry
#
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'PpiDeclarations']
for Item in Package.GetPpiList():
if Item is None:
CheckDict = {'Entry':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'PpiDeclarations', 'Entry']
for Item in Package.GetPpiList():
CheckDict = {'CName':Item.GetCName(),
'GuidValue':Item.GetGuid()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check DistributionPackage -> PackageSurfaceArea -> PcdDeclarations -> Entry
#
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'PcdDeclarations']
for Item in Package.GetPcdList():
if Item is None:
CheckDict = {'PcdEntry':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'PcdDeclarations', 'PcdEntry']
for Item in Package.GetPcdList():
CheckDict = {'TokenSpaceGuidCname':Item.GetTokenSpaceGuidCName(),
'Token':Item.GetToken(),
'CName':Item.GetCName(),
'DatumType':Item.GetDatumType(),
'ValidUsage':Item.GetValidUsage(),
'DefaultValue':Item.GetDefaultValue()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check DistributionPackage -> PackageSurfaceArea -> UserExtensions
#
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'UserExtensions']
for Item in Package.GetUserExtensionList():
CheckDict = {'UserId':Item.GetUserID(), 'Identifier':Item.GetIdentifier()}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
#
# Check DistributionPackage -> PackageSurfaceArea -> MiscellaneousFiles -> Filename
#
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'MiscellaneousFiles']
for Item in Package.GetMiscFileList():
if not Item.GetFileList():
CheckDict = {'Filename': '', }
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
for File in Item.GetFileList():
CheckDict = {'Filename': File.GetURI(), }
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
## ValidatePackageSurfaceArea
#
# Check if any required item is missing in PackageSurfaceArea
#
# @param Package: The PackageSurfaceArea to be checked
#
def ValidatePackageSurfaceArea(Package):
ValidatePS1(Package)
ValidatePS2(Package)
| StarcoderdataPython |
3391860 | #image to text
from PIL import Image
from pytesseract import image_to_string
img=Image.open('/home/soham/Pictures/check.png')
text=image_to_string(img)
print(text)
| StarcoderdataPython |
3255869 | <reponame>singh-hrituraj/Transformers
"""
Code/Comments By <NAME>
Code reference: http://nlp.seas.harvard.edu/2018/04/03/attention.html
June 2019
"""
import torch.nn as nn
from utils import *
class Decoder(nn.Module):
"""Base class for generic Decoder"""
def __init__(self, layer, N):
"""Initializes the class
[Inputs]
layer : the core decoder layer
N : Number of core layers to be incorporated in whole decoder architecture
"""
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask):
"""Performs the forward operation on input
[Inputs]
x : tgt input for the encoder
memory : the output of encoder - encoded representation of input/source
src_mask : mask to be applied over source
tgt_mask : mask to be applied over target
"""
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
class DecoderLayer(nn.Module):
"""Base class for decoder layer"""
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
"""Initializes the class
[Inputs]
size : size of the output of layer [FINISH]
self_attn : self attention performing function/object
src_attn : Applying attention on the source input
feed_forward : feed forwarding function/object
droput : dropout rate
"""
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask, tgt_mask):
"""Feed forward operations of decoder layer
[Inputs]
x : input target sequence
memory : the output of encoder - encoded representation of input/source
src_mask : mask to be applied over source
tgt_mask : mask to be applied over target"""
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
| StarcoderdataPython |
3379483 | import numpy as np
class MyList:
arr = []
index = -1
def __init__(self, arr):
self.arr = arr
print(self.arr)
def __iter__(self):
self.index = self.index + 1
return self
def __next__(self):
max = len(arr)
if self.index + 1 >= max:
raise StopIteration
else:
self.index = self.index + 1
return self.arr[self.index]
if __name__ == '__main__':
# myList = [15, 54, 2021, 10, 19]
# it = iter(myList)
# for elem in myList:
# print(type(elem))
# print(elem)
# print(next(it))
arr = [15, 54, 2021, 10, 19]
myList = MyList(arr)
myiter = iter(myList)
while True:
print(next(myiter))
| StarcoderdataPython |
3362429 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_serialization import jsonutils
from six.moves import http_client
from keystone.common import provider_api
import keystone.conf
from keystone.tests.common import auth as common_auth
from keystone.tests import unit
from keystone.tests.unit import base_classes
from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import temporaryfile
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
class TrustTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin):
"""Common functionality for all trust tests.
Sets up trustor and trustee users and trust.
"""
def setUp(self):
super(TrustTests, self).setUp()
self.loadapp()
self.policy_file = self.useFixture(temporaryfile.SecureTempFile())
self.policy_file_name = self.policy_file.file_name
self.useFixture(
ksfixtures.Policy(
self.config_fixture, policy_file=self.policy_file_name
)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
trustor_user = unit.new_user_ref(domain_id=self.domain_id)
self.trustor_user_id = PROVIDERS.identity_api.create_user(
trustor_user)['id']
trustee_user = unit.new_user_ref(domain_id=self.domain_id)
self.trustee_user_id = PROVIDERS.identity_api.create_user(
trustee_user)['id']
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
self.project_id = project['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=self.trustor_user_id,
project_id=self.project_id
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=self.trustee_user_id,
project_id=project['id']
)
self.trust_id = uuid.uuid4().hex
self.trust_data = {
'trust': {'trustor_user_id': self.trustor_user_id,
'trustee_user_id': self.trustee_user_id,
'project_id': self.project_id,
'impersonation': False},
'roles': [{"id": self.bootstrapper.member_role_id}]
}
auth = self.build_authentication_request(
user_id=self.trustor_user_id,
password=<PASSWORD>['password'],
project_id=project['id']
)
# Grab a token using the trustor persona we're testing and prepare
# headers for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.trustor_headers = {'X-Auth-Token': self.token_id}
auth = self.build_authentication_request(
user_id=self.trustee_user_id,
password=<PASSWORD>['password'],
project_id=project['id']
)
# Grab a token using the trustee persona we're testing and prepare
# headers for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.trustee_headers = {'X-Auth-Token': self.token_id}
def _override_policy_old_defaults(self):
# TODO(cmurphy): This is to simulate what would happen if the operator
# had generated a sample policy config, or had never removed their old
# policy files since we adopted policy in code, and had explicitly
# retained the old "" policy check strings. Remove this once the
# hardcoded enforcement is removed from the trusts API.
with open(self.policy_file_name, 'w') as f:
overridden_policies = {
'identity:list_trusts': '',
'identity:delete_trust': '',
'identity:get_trust': '',
'identity:list_roles_for_trust': '',
'identity:get_role_for_trust': '',
}
f.write(jsonutils.dumps(overridden_policies))
class _AdminTestsMixin(object):
"""Tests for all admin users.
This exercises both the is_admin user and users granted the admin role on
the system scope.
"""
def test_admin_cannot_create_trust_for_other_user(self):
json = {'trust': self.trust_data['trust']}
json['trust']['roles'] = self.trust_data['roles']
with self.test_client() as c:
c.post(
'/v3/OS-TRUST/trusts',
json=json,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_admin_list_all_trusts(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
r = c.get(
'/v3/OS-TRUST/trusts',
headers=self.headers
)
self.assertEqual(1, len(r.json['trusts']))
class AdminTokenTests(TrustTests, _AdminTestsMixin):
"""Tests for the is_admin user.
The Trusts API has hardcoded is_admin checks that we need to ensure are
preserved through the system-scope transition.
"""
def setUp(self):
super(AdminTokenTests, self).setUp()
self.config_fixture.config(admin_token='<PASSWORD>')
self.headers = {'X-Auth-Token': '<PASSWORD>'}
def test_admin_can_delete_trust_for_other_user(self):
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.delete(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.headers,
expected_status_code=http_client.NO_CONTENT
)
def test_admin_can_get_non_existent_trust_not_found(self):
trust_id = uuid.uuid4().hex
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts/%s' % trust_id,
headers=self.headers,
expected_status_code=http_client.NOT_FOUND
)
def test_admin_cannot_get_trust_for_other_user(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts/%s' % self.trust_id,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_admin_cannot_list_trust_roles_for_other_user(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts/%s/roles' % self.trust_id,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_admin_cannot_get_trust_role_for_other_user(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
('/v3/OS-TRUST/trusts/%s/roles/%s' %
(self.trust_id, self.bootstrapper.member_role_id)),
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
class _SystemUserTests(object):
"""Tests for system admin, member, and reader."""
def test_user_can_get_non_existent_trust(self):
trust_id = uuid.uuid4().hex
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts/%s' % trust_id,
headers=self.headers,
expected_status_code=http_client.NOT_FOUND
)
def test_user_can_get_trust_for_other_user(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
r = c.get(
'/v3/OS-TRUST/trusts/%s' % self.trust_id,
headers=self.headers
)
self.assertEqual(r.json['trust']['id'], self.trust_id)
def test_user_can_list_trusts_for_trustee(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
('/v3/OS-TRUST/trusts?trustee_user_id=%s' %
self.trustee_user_id),
headers=self.headers
)
def test_user_can_list_trusts_for_trustor(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
('/v3/OS-TRUST/trusts?trustor_user_id=%s' %
self.trustor_user_id),
headers=self.headers
)
def test_user_can_list_trust_roles_for_other_user(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
r = c.get(
'/v3/OS-TRUST/trusts/%s/roles' % self.trust_id,
headers=self.headers
)
self.assertEqual(r.json['roles'][0]['id'],
self.bootstrapper.member_role_id)
def test_user_can_get_trust_role_for_other_user(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
('/v3/OS-TRUST/trusts/%s/roles/%s' %
(self.trust_id, self.bootstrapper.member_role_id)),
headers=self.headers
)
class _SystemReaderMemberTests(_SystemUserTests):
"""Tests for system readers and members."""
def test_user_cannot_create_trust(self):
json = {'trust': self.trust_data['trust']}
json['trust']['roles'] = self.trust_data['roles']
with self.test_client() as c:
c.post(
'/v3/OS-TRUST/trusts',
json=json,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_trust(self):
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.delete(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
class SystemReaderTests(TrustTests, _SystemReaderMemberTests):
"""Tests for system reader users."""
def setUp(self):
super(SystemReaderTests, self).setUp()
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_reader = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_reader
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.reader_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id,
password=<PASSWORD>['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class SystemMemberTests(TrustTests, _SystemReaderMemberTests):
"""Tests for system member users."""
def setUp(self):
super(SystemMemberTests, self).setUp()
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_member = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_member
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.member_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id,
password=<PASSWORD>['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class SystemAdminTests(TrustTests, _AdminTestsMixin, _SystemUserTests):
"""Tests for system admin users."""
def setUp(self):
super(SystemAdminTests, self).setUp()
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
self.user_id = self.bootstrapper.admin_user_id
auth = self.build_authentication_request(
user_id=self.user_id,
password=self.bootstrap<PASSWORD>,
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def test_admin_can_delete_trust_for_other_user(self):
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.delete(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.headers
)
def test_admin_cannot_delete_trust_for_user_overridden_defaults(self):
# only the is_admin admin can do this
self._override_policy_old_defaults()
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.delete(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_admin_cannot_get_trust_for_other_user_overridden_defaults(self):
self._override_policy_old_defaults()
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts/%s' % self.trust_id,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_admin_cannot_list_roles_for_other_user_overridden_defaults(self):
self._override_policy_old_defaults()
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts/%s/roles' % self.trust_id,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_admin_cannot_get_trust_role_for_other_user_overridden(self):
self._override_policy_old_defaults()
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
('/v3/OS-TRUST/trusts/%s/roles/%s' %
(self.trust_id, self.bootstrapper.member_role_id)),
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_list_all_trusts_overridden_defaults(self):
self._override_policy_old_defaults()
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
r = c.get(
'/v3/OS-TRUST/trusts',
headers=self.headers
)
self.assertEqual(1, len(r.json['trusts']))
class ProjectUserTests(TrustTests):
"""Tests for all project users."""
def setUp(self):
super(ProjectUserTests, self).setUp()
other_user = unit.new_user_ref(domain_id=self.domain_id)
self.other_user_id = PROVIDERS.identity_api.create_user(
other_user)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=self.other_user_id,
project_id=self.project_id
)
auth = self.build_authentication_request(
user_id=self.other_user_id,
password=<PASSWORD>['password'],
project_id=self.project_id
)
# Grab a token using another persona who has no trusts associated with
# them
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.other_headers = {'X-Auth-Token': self.token_id}
def test_user_can_list_trusts_of_whom_they_are_the_trustor(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
r = c.get(
('/v3/OS-TRUST/trusts?trustor_user_id=%s' %
self.trustor_user_id),
headers=self.trustor_headers
)
self.assertEqual(1, len(r.json['trusts']))
self.assertEqual(self.trust_id, r.json['trusts'][0]['id'])
def test_user_can_list_trusts_delegated_to_them(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
r = c.get(
('/v3/OS-TRUST/trusts?trustee_user_id=%s' %
self.trustee_user_id),
headers=self.trustee_headers
)
self.assertEqual(1, len(r.json['trusts']))
self.assertEqual(self.trust_id, r.json['trusts'][0]['id'])
def test_trustor_cannot_list_trusts_for_trustee(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
('/v3/OS-TRUST/trusts?trustee_user_id=%s' %
self.trustee_user_id),
headers=self.trustor_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_trustee_cannot_list_trusts_for_trustor(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
('/v3/OS-TRUST/trusts?trustor_user_id=%s' %
self.trustor_user_id),
headers=self.trustee_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_list_trusts_for_other_trustor(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
('/v3/OS-TRUST/trusts?trustor_user_id=%s' %
self.trustor_user_id),
headers=self.other_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_list_trusts_for_other_trustee(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
('/v3/OS-TRUST/trusts?trustee_user_id=%s' %
self.trustee_user_id),
headers=self.other_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_list_all_trusts(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts',
headers=self.trustee_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_get_another_users_trust(self):
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.other_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_can_get_non_existent_trust_not_found(self):
trust_id = uuid.uuid4().hex
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts/%s' % trust_id,
headers=self.other_headers,
expected_status_code=http_client.NOT_FOUND
)
def test_user_can_get_trust_of_whom_they_are_the_trustor(self):
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.trustor_headers
)
def test_user_can_get_trust_delegated_to_them(self):
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
r = c.get(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.trustee_headers
)
self.assertEqual(r.json['trust']['id'], self.trust_id)
def test_trustor_can_create_trust(self):
json = {'trust': self.trust_data['trust']}
json['trust']['roles'] = self.trust_data['roles']
with self.test_client() as c:
c.post(
'/v3/OS-TRUST/trusts',
json=json,
headers=self.trustor_headers
)
def test_trustee_cannot_create_trust(self):
json = {'trust': self.trust_data['trust']}
json['trust']['roles'] = self.trust_data['roles']
with self.test_client() as c:
c.post(
'/v3/OS-TRUST/trusts',
json=json,
headers=self.trustee_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_trustor_can_delete_trust(self):
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.delete(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.trustor_headers
)
def test_trustee_cannot_delete_trust(self):
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.delete(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.trustee_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_trust_for_other_user(self):
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.delete(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.other_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_trustor_can_list_trust_roles(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
r = c.get(
'/v3/OS-TRUST/trusts/%s/roles' % self.trust_id,
headers=self.trustor_headers
)
self.assertEqual(r.json['roles'][0]['id'],
self.bootstrapper.member_role_id)
def test_trustee_can_list_trust_roles(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
r = c.get(
'/v3/OS-TRUST/trusts/%s/roles' % self.trust_id,
headers=self.trustee_headers
)
self.assertEqual(r.json['roles'][0]['id'],
self.bootstrapper.member_role_id)
def test_user_cannot_list_trust_roles_for_other_user(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts/%s/roles' % self.trust_id,
headers=self.other_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_trustor_can_get_trust_role(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.head(
('/v3/OS-TRUST/trusts/%s/roles/%s' %
(self.trust_id, self.bootstrapper.member_role_id)),
headers=self.trustor_headers
)
def test_trustee_can_get_trust_role(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.head(
('/v3/OS-TRUST/trusts/%s/roles/%s' %
(self.trust_id, self.bootstrapper.member_role_id)),
headers=self.trustee_headers
)
def test_user_cannot_get_trust_role_for_other_user(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.head(
('/v3/OS-TRUST/trusts/%s/roles/%s' %
(self.trust_id, self.bootstrapper.member_role_id)),
headers=self.other_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_trustor_cannot_list_trusts_for_trustee_overridden_default(self):
self._override_policy_old_defaults()
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
('/v3/OS-TRUST/trusts?trustee_user_id=%s' %
self.trustee_user_id),
headers=self.trustor_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_trustee_cannot_list_trusts_for_trustor_overridden_default(self):
self._override_policy_old_defaults()
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
('/v3/OS-TRUST/trusts?trustor_user_id=%s' %
self.trustor_user_id),
headers=self.trustee_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_list_trusts_for_other_trustor_overridden(self):
self._override_policy_old_defaults()
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
('/v3/OS-TRUST/trusts?trustor_user_id=%s' %
self.trustor_user_id),
headers=self.other_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_list_trusts_for_trustee_overridden_default(self):
self._override_policy_old_defaults()
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
('/v3/OS-TRUST/trusts?trustee_user_id=%s' %
self.trustee_user_id),
headers=self.other_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_list_all_trusts_overridden_default(self):
self._override_policy_old_defaults()
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts',
headers=self.trustee_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_trustor_can_delete_trust_overridden_default(self):
self._override_policy_old_defaults()
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.delete(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.trustor_headers
)
def test_trustee_cannot_delete_trust_overridden_default(self):
self._override_policy_old_defaults()
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.delete(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.trustee_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_trust_for_other_user_overridden_default(self):
self._override_policy_old_defaults()
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.delete(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.other_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_can_get_trust_of_whom_they_are_the_trustor_overridden(self):
self._override_policy_old_defaults()
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.trustor_headers
)
def test_user_can_get_trust_delegated_to_them_overridden_default(self):
self._override_policy_old_defaults()
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
r = c.get(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.trustee_headers
)
self.assertEqual(r.json['trust']['id'], self.trust_id)
def test_trustor_can_list_trust_roles_overridden_default(self):
self._override_policy_old_defaults()
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
r = c.get(
'/v3/OS-TRUST/trusts/%s/roles' % self.trust_id,
headers=self.trustor_headers
)
self.assertEqual(r.json['roles'][0]['id'],
self.bootstrapper.member_role_id)
def test_trustee_can_list_trust_roles_overridden_default(self):
self._override_policy_old_defaults()
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
r = c.get(
'/v3/OS-TRUST/trusts/%s/roles' % self.trust_id,
headers=self.trustee_headers
)
self.assertEqual(r.json['roles'][0]['id'],
self.bootstrapper.member_role_id)
def test_user_cannot_list_trust_roles_other_user_overridden_default(self):
self._override_policy_old_defaults()
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts/%s/roles' % self.trust_id,
headers=self.other_headers,
expected_status_code=http_client.FORBIDDEN
)
def test_trustor_can_get_trust_role_overridden_default(self):
self._override_policy_old_defaults()
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.head(
('/v3/OS-TRUST/trusts/%s/roles/%s' %
(self.trust_id, self.bootstrapper.member_role_id)),
headers=self.trustor_headers
)
def test_trustee_can_get_trust_role_overridden_default(self):
self._override_policy_old_defaults()
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.head(
('/v3/OS-TRUST/trusts/%s/roles/%s' %
(self.trust_id, self.bootstrapper.member_role_id)),
headers=self.trustee_headers
)
def test_user_cannot_get_trust_role_other_user_overridden_default(self):
self._override_policy_old_defaults()
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.head(
('/v3/OS-TRUST/trusts/%s/roles/%s' %
(self.trust_id, self.bootstrapper.member_role_id)),
headers=self.other_headers,
expected_status_code=http_client.FORBIDDEN
)
class DomainUserTests(TrustTests):
"""Tests for all domain users.
Domain users should not be able to interact with trusts at all.
"""
def setUp(self):
super(DomainUserTests, self).setUp()
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain_admin = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(
domain_admin)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.admin_role_id, user_id=self.user_id,
domain_id=self.domain_id
)
auth = self.build_authentication_request(
user_id=self.user_id,
password=<PASSWORD>['password'],
domain_id=self.domain_id
)
# Grab a token using another persona who has no trusts associated with
# them
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def test_trustor_cannot_list_trusts_for_trustee(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
('/v3/OS-TRUST/trusts?trustee_user_id=%s' %
self.trustee_user_id),
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_trustee_cannot_list_trusts_for_trustor(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
('/v3/OS-TRUST/trusts?trustor_user_id=%s' %
self.trustor_user_id),
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_list_all_trusts(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts',
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_get_trust(self):
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_can_get_non_existent_trust_not_found(self):
trust_id = uuid.uuid4().hex
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts/%s' % trust_id,
headers=self.headers,
expected_status_code=http_client.NOT_FOUND
)
def test_user_cannot_create_trust(self):
trust_data = self.trust_data['trust']
trust_data['trustor_user_id'] = self.user_id
json = {'trust': trust_data}
json['trust']['roles'] = self.trust_data['roles']
with self.test_client() as c:
c.post(
'/v3/OS-TRUST/trusts',
json=json,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_trust(self):
ref = PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.delete(
'/v3/OS-TRUST/trusts/%s' % ref['id'],
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_list_trust_roles(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.get(
'/v3/OS-TRUST/trusts/%s/roles' % self.trust_id,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_get_trust_role(self):
PROVIDERS.trust_api.create_trust(
self.trust_id, **self.trust_data)
with self.test_client() as c:
c.head(
('/v3/OS-TRUST/trusts/%s/roles/%s' %
(self.trust_id, self.bootstrapper.member_role_id)),
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
| StarcoderdataPython |
1689109 | # wp-data-splitter.py: split a large wordpress data file into smaller ones
# See https://github.com/kei-51/wp-data-splitter for details.
# License: MIT license http://www.opensource.org/licenses/mit-license.php
import sys
def main():
# 1.5M char counts as default since 2M bytes is the popular PHP upload limit.
limit = 1572864
if len(sys.argv) == 2:
file = sys.argv[1]
elif len(sys.argv) == 3:
file = sys.argv[1]
if sys.argv[2].isdigit() == False:
print "wp_data_splitter.py <filename> <limit size in byte>"
sys.exit()
limit = int(sys.argv[2])
else:
print "wp_export_splitter.py <filename>"
sys.exit()
# set the internal limit not to exceed the original limit
limit -= 10000
src = open(file,'r')
header = read_header(src)
header_wc = header.count('')
footer = '</channel></rss>'
footer_wc = footer.count('')
wc = header_wc
page = 1
dst_file = file+'.'+str(page)+'.xml'
dst = open(dst_file, 'w')
dst.write(header)
while True:
s = read_item(src)
wc += s.count('')
dst.write(s)
if (wc > limit or s == ''):
dst.write(footer)
wc += footer_wc
dst.close()
print dst_file + ': '+str(wc)+' chars'
if (s == ''):
break
page+=1
dst_file = file+'.'+str(page)+'.xml'
dst = open(dst_file, 'w')
dst.write(header)
wc = header_wc
def read_header(f):
s = ''
while True:
l = f.readline()
if(l.find('<item>') > -1):
break
s += l
return s
def read_item(f):
s = ''
while True:
l = f.readline()
# end of the file.
if (l==''):
return ''
s += l
if(l.find('</item>') > -1):
break
# This happens only on the 1st item.
if (s.find('<item>') == -1):
s = '<item>\n' + s
return s
#-------------------------------
if __name__ == "__main__":
main()
| StarcoderdataPython |
1722304 | <reponame>CodingLeeSeungHoon/Python_Algorithm_TeamNote
""" matrix transpose """
def transpose(original):
matrix = original[:]
matrix = [list(x) for x in zip(*matrix)]
return matrix
| StarcoderdataPython |
1685958 | <reponame>snowxmas/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiMerchantDeviceCrashinfoUploadModel(object):
def __init__(self):
self._event_time = None
self._extend_info = None
self._hardware_version = None
self._log_time = None
self._message_type = None
self._product = None
self._shop_id = None
self._sn_id = None
@property
def event_time(self):
return self._event_time
@event_time.setter
def event_time(self, value):
self._event_time = value
@property
def extend_info(self):
return self._extend_info
@extend_info.setter
def extend_info(self, value):
self._extend_info = value
@property
def hardware_version(self):
return self._hardware_version
@hardware_version.setter
def hardware_version(self, value):
self._hardware_version = value
@property
def log_time(self):
return self._log_time
@log_time.setter
def log_time(self, value):
self._log_time = value
@property
def message_type(self):
return self._message_type
@message_type.setter
def message_type(self, value):
self._message_type = value
@property
def product(self):
return self._product
@product.setter
def product(self, value):
self._product = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def sn_id(self):
return self._sn_id
@sn_id.setter
def sn_id(self, value):
self._sn_id = value
def to_alipay_dict(self):
params = dict()
if self.event_time:
if hasattr(self.event_time, 'to_alipay_dict'):
params['event_time'] = self.event_time.to_alipay_dict()
else:
params['event_time'] = self.event_time
if self.extend_info:
if hasattr(self.extend_info, 'to_alipay_dict'):
params['extend_info'] = self.extend_info.to_alipay_dict()
else:
params['extend_info'] = self.extend_info
if self.hardware_version:
if hasattr(self.hardware_version, 'to_alipay_dict'):
params['hardware_version'] = self.hardware_version.to_alipay_dict()
else:
params['hardware_version'] = self.hardware_version
if self.log_time:
if hasattr(self.log_time, 'to_alipay_dict'):
params['log_time'] = self.log_time.to_alipay_dict()
else:
params['log_time'] = self.log_time
if self.message_type:
if hasattr(self.message_type, 'to_alipay_dict'):
params['message_type'] = self.message_type.to_alipay_dict()
else:
params['message_type'] = self.message_type
if self.product:
if hasattr(self.product, 'to_alipay_dict'):
params['product'] = self.product.to_alipay_dict()
else:
params['product'] = self.product
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.sn_id:
if hasattr(self.sn_id, 'to_alipay_dict'):
params['sn_id'] = self.sn_id.to_alipay_dict()
else:
params['sn_id'] = self.sn_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiMerchantDeviceCrashinfoUploadModel()
if 'event_time' in d:
o.event_time = d['event_time']
if 'extend_info' in d:
o.extend_info = d['extend_info']
if 'hardware_version' in d:
o.hardware_version = d['hardware_version']
if 'log_time' in d:
o.log_time = d['log_time']
if 'message_type' in d:
o.message_type = d['message_type']
if 'product' in d:
o.product = d['product']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'sn_id' in d:
o.sn_id = d['sn_id']
return o
| StarcoderdataPython |
1751621 | <filename>src/main.py
'''
Reference implementation of node2vec.
Author: <NAME>
For more details, refer to the paper:
node2vec: Scalable Feature Learning for Networks
<NAME> and <NAME>
Knowledge Discovery and Data Mining (KDD), 2016
'''
import argparse
import numpy as np
import networkx as nx
import node2vec2
from gensim.models import Word2Vec
def parse_args():
'''
Parses the node2vec arguments.
'''
parser = argparse.ArgumentParser(description="Run node2vec.")
parser.add_argument('--input', nargs='?', default='graph/karate.edgelist',
help='Input graph path')
parser.add_argument('--output', nargs='?', default='emb/karate.emb',
help='Embeddings path')
parser.add_argument('--dimensions', type=int, default=128,
help='Number of dimensions. Default is 128.')
parser.add_argument('--walk-length', type=int, default=80,
help='Length of walk per source. Default is 80.')
parser.add_argument('--num-walks', type=int, default=10,
help='Number of walks per source. Default is 10.')
parser.add_argument('--window-size', type=int, default=10,
help='Context size for optimization. Default is 10.')
parser.add_argument('--iter', default=1, type=int,
help='Number of epochs in SGD')
parser.add_argument('--workers', type=int, default=8,
help='Number of parallel workers. Default is 8.')
parser.add_argument('--p', type=float, default=1,
help='Return hyperparameter. Default is 1.')
parser.add_argument('--q', type=float, default=1,
help='Inout hyperparameter. Default is 1.')
parser.add_argument('--weighted', dest='weighted', action='store_true',
help='Boolean specifying (un)weighted. Default is unweighted.')
parser.add_argument('--unweighted', dest='unweighted', action='store_false')
parser.set_defaults(weighted=False)
parser.add_argument('--directed', dest='directed', action='store_true',
help='Graph is (un)directed. Default is undirected.')
parser.add_argument('--undirected', dest='undirected', action='store_false')
parser.set_defaults(directed=False)
return parser.parse_args()
def read_graph():
'''
Reads the input network in networkx.
'''
if args.weighted:
G = nx.read_edgelist(args.input, nodetype=int, data=(('weight', float),), create_using=nx.DiGraph())
else:
# G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.DiGraph())#input='graph/karate.edgelist',创建有向图
e = [(1, 32), (1, 22), (1, 20), (1, 18), (1, 14), (1, 13), (1, 12), (1, 11), (1, 9), (1, 8), (1, 7), (1, 6), (1, 5), (1, 4), (1, 3), (1, 2), (32, 34), (32, 33), (32, 29), (32, 26), (32, 25), (22, 2), (20, 34), (20, 2), (18, 2), (14, 34), (14, 4), (14, 3), (14, 2), (13, 4), (11, 6), (11, 5), (9, 34), (9, 33), (9, 3), (8, 4), (8, 3), (8, 2), (7, 17), (7, 6), (7, 5), (6, 17), (4, 3), (4, 2), (3, 10), (3, 33), (3, 29), (3, 28), (3, 2), (2, 31), (31, 34), (31, 33), (10, 34), (33, 34), (33, 15), (33, 16), (33, 19), (33, 21), (33, 23), (33, 24), (33, 30), (29, 34), (28, 34), (28, 24), (28, 25), (34, 15), (34, 16), (34, 19), (34, 21), (34, 23), (34, 24), (34, 30), (34, 27), (24, 30), (24, 26), (30, 27), (26, 25)]
G = nx.Graph(e)
for edge in G.edges():
G[edge[0]][edge[1]]['weight'] = 1#权重为1,G.edges()返回EdgeView对象,是节点对,有向图
if not args.directed:#无向图
G = G.to_undirected()
return G
def learn_embeddings(walks):
'''
Learn embeddings by optimizing the Skipgram objective using SGD.
'''
walks = [map(str, walk) for walk in walks]
model = Word2Vec(walks, size=args.dimensions, window=args.window_size, min_count=0, sg=1, workers=args.workers,
iter=args.iter)
model.save_word2vec_format(args.output)
return
def main(args):
'''
Pipeline for representational learning for all nodes in a graph.图表示学习的pipeline
'''
nx_G = read_graph()
G = node2vec2.Graph(nx_G, args.directed, args.p, args.q)#args.directed=False,p=1,q=1
import pdb
pdb.set_trace()
G.preprocess_transition_probs()
walks = G.simulate_walks(args.num_walks, args.walk_length)#使用alias数组进行walk
learn_embeddings(walks)
if __name__ == "__main__":
args = parse_args()
main(args) | StarcoderdataPython |
3267328 | <reponame>khaykingleb/Automatic-Speech-Recognition
from torch import nn
import torch
from asr.models.base_model import BaseModel
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
class DummyModel(BaseModel):
def __init__(self, n_feats, n_class, gru_hidden=512, gru_num_layers=3,
gru_dropout=0, *args, **kwargs):
super().__init__(n_feats, n_class, *args, **kwargs)
self.gru_hidden = gru_hidden
self.gru_num_layers = gru_num_layers
self.gru = nn.GRU(input_size=n_feats, hidden_size=gru_hidden,
num_layers=gru_num_layers, dropout=gru_dropout, batch_first=False)
self.fc = nn.Linear(in_features=gru_hidden, out_features=n_class)
def forward(self, spectrogram, *args, **kwargs):
h_0 = torch.zeros(self.gru_num_layers, spectrogram.shape[1], self.gru_hidden).to(device).requires_grad_()
output, (h_n) = self.gru(spectrogram, (h_0.detach()))
output = self.fc(output)
return {"logits": output}
def transform_input_lengths(self, input_lengths):
return input_lengths
| StarcoderdataPython |
1608021 | # Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Internal class to monitor a topology of one or more servers."""
import os
import random
import threading
import warnings
import weakref
from bson.py3compat import itervalues, PY3
if PY3:
import queue as Queue
else:
import Queue
from pymongo import common
from pymongo import periodic_executor
from pymongo.pool import PoolOptions
from pymongo.topology_description import (updated_topology_description,
TOPOLOGY_TYPE,
TopologyDescription)
from pymongo.errors import ServerSelectionTimeoutError
from pymongo.monotonic import time as _time
from pymongo.server import Server
from pymongo.server_selectors import (any_server_selector,
apply_local_threshold,
arbiter_server_selector,
secondary_server_selector,
writable_server_selector)
def process_events_queue(queue_ref):
q = queue_ref()
if not q:
return False # Cancel PeriodicExecutor.
while True:
try:
event = q.get_nowait()
except Queue.Empty:
break
else:
fn, args = event
fn(*args)
return True # Continue PeriodicExecutor.
class Topology(object):
"""Monitor a topology of one or more servers."""
def __init__(self, topology_settings):
self._topology_id = topology_settings._topology_id
self._listeners = topology_settings._pool_options.event_listeners
pub = self._listeners is not None
self._publish_server = pub and self._listeners.enabled_for_server
self._publish_tp = pub and self._listeners.enabled_for_topology
# Create events queue if there are publishers.
self._events = None
self._events_thread = None
if self._publish_server or self._publish_tp:
self._events = Queue.Queue(maxsize=100)
if self._publish_tp:
self._events.put((self._listeners.publish_topology_opened,
(self._topology_id,)))
self._settings = topology_settings
topology_description = TopologyDescription(
topology_settings.get_topology_type(),
topology_settings.get_server_descriptions(),
topology_settings.replica_set_name,
None,
None)
self._description = topology_description
if self._publish_tp:
self._events.put((
self._listeners.publish_topology_description_changed,
(TopologyDescription(
TOPOLOGY_TYPE.Unknown, {}, None, None, None),
self._description, self._topology_id)))
for seed in topology_settings.seeds:
if self._publish_server:
self._events.put((self._listeners.publish_server_opened,
(seed, self._topology_id)))
# Store the seed list to help diagnose errors in _error_message().
self._seed_addresses = list(topology_description.server_descriptions())
self._opened = False
self._lock = threading.Lock()
self._condition = self._settings.condition_class(self._lock)
self._servers = {}
self._pid = None
if self._publish_server or self._publish_tp:
def target():
return process_events_queue(weak)
executor = periodic_executor.PeriodicExecutor(
interval=common.EVENTS_QUEUE_FREQUENCY,
min_interval=0.5,
target=target,
name="pymongo_events_thread")
# We strongly reference the executor and it weakly references
# the queue via this closure. When the topology is freed, stop
# the executor soon.
weak = weakref.ref(self._events)
self.__events_executor = executor
executor.open()
def open(self):
"""Start monitoring, or restart after a fork.
No effect if called multiple times.
.. warning:: To avoid a deadlock during Python's getaddrinfo call,
will generate a warning if open() is called from a different
process than the one that initialized the Topology. To prevent this
from happening, MongoClient must be created after any forking OR
MongoClient must be started with connect=False.
"""
with self._lock:
if self._pid is None:
self._pid = os.getpid()
else:
if os.getpid() != self._pid:
warnings.warn(
"MongoClient opened before fork. Create MongoClient "
"with connect=False, or create client after forking. "
"See PyMongo's documentation for details: http://api."
"mongodb.org/python/current/faq.html#using-pymongo-"
"with-multiprocessing>")
self._ensure_opened()
def select_servers(self,
selector,
server_selection_timeout=None,
address=None):
"""Return a list of Servers matching selector, or time out.
:Parameters:
- `selector`: function that takes a list of Servers and returns
a subset of them.
- `server_selection_timeout` (optional): maximum seconds to wait.
If not provided, the default value common.SERVER_SELECTION_TIMEOUT
is used.
- `address`: optional server address to select.
Calls self.open() if needed.
Raises exc:`ServerSelectionTimeoutError` after
`server_selection_timeout` if no matching servers are found.
"""
if server_selection_timeout is None:
server_timeout = self._settings.server_selection_timeout
else:
server_timeout = server_selection_timeout
with self._lock:
self._description.check_compatible()
now = _time()
end_time = now + server_timeout
server_descriptions = self._apply_selector(selector, address)
while not server_descriptions:
# No suitable servers.
if server_timeout == 0 or now > end_time:
raise ServerSelectionTimeoutError(
self._error_message(selector))
self._ensure_opened()
self._request_check_all()
# Release the lock and wait for the topology description to
# change, or for a timeout. We won't miss any changes that
# came after our most recent _apply_selector call, since we've
# held the lock until now.
self._condition.wait(common.MIN_HEARTBEAT_INTERVAL)
self._description.check_compatible()
now = _time()
server_descriptions = self._apply_selector(selector, address)
return [self.get_server_by_address(sd.address)
for sd in server_descriptions]
def select_server(self,
selector,
server_selection_timeout=None,
address=None):
"""Like select_servers, but choose a random server if several match."""
return random.choice(self.select_servers(selector,
server_selection_timeout,
address))
def select_server_by_address(self, address,
server_selection_timeout=None):
"""Return a Server for "address", reconnecting if necessary.
If the server's type is not known, request an immediate check of all
servers. Time out after "server_selection_timeout" if the server
cannot be reached.
:Parameters:
- `address`: A (host, port) pair.
- `server_selection_timeout` (optional): maximum seconds to wait.
If not provided, the default value
common.SERVER_SELECTION_TIMEOUT is used.
Calls self.open() if needed.
Raises exc:`ServerSelectionTimeoutError` after
`server_selection_timeout` if no matching servers are found.
"""
return self.select_server(any_server_selector,
server_selection_timeout,
address)
def on_change(self, server_description):
"""Process a new ServerDescription after an ismaster call completes."""
# We do no I/O holding the lock.
with self._lock:
# Any monitored server was definitely in the topology description
# once. Check if it's still in the description or if some state-
# change removed it. E.g., we got a host list from the primary
# that didn't include this server.
if self._description.has_server(server_description.address):
td_old = self._description
if self._publish_server:
old_server_description = td_old._server_descriptions[
server_description.address]
self._events.put((
self._listeners.publish_server_description_changed,
(old_server_description, server_description,
server_description.address, self._topology_id)))
self._description = updated_topology_description(
self._description, server_description)
self._update_servers()
if self._publish_tp:
self._events.put((
self._listeners.publish_topology_description_changed,
(td_old, self._description, self._topology_id)))
# Wake waiters in select_servers().
self._condition.notify_all()
def get_server_by_address(self, address):
"""Get a Server or None.
Returns the current version of the server immediately, even if it's
Unknown or absent from the topology. Only use this in unittests.
In driver code, use select_server_by_address, since then you're
assured a recent view of the server's type and wire protocol version.
"""
return self._servers.get(address)
def has_server(self, address):
return address in self._servers
def get_primary(self):
"""Return primary's address or None."""
# Implemented here in Topology instead of MongoClient, so it can lock.
with self._lock:
topology_type = self._description.topology_type
if topology_type != TOPOLOGY_TYPE.ReplicaSetWithPrimary:
return None
description = writable_server_selector(
self._description.known_servers)[0]
return description.address
def _get_replica_set_members(self, selector):
"""Return set of replica set member addresses."""
# Implemented here in Topology instead of MongoClient, so it can lock.
with self._lock:
topology_type = self._description.topology_type
if topology_type not in (TOPOLOGY_TYPE.ReplicaSetWithPrimary,
TOPOLOGY_TYPE.ReplicaSetNoPrimary):
return set()
descriptions = selector(self._description.known_servers)
return set([d.address for d in descriptions])
def get_secondaries(self):
"""Return set of secondary addresses."""
return self._get_replica_set_members(secondary_server_selector)
def get_arbiters(self):
"""Return set of arbiter addresses."""
return self._get_replica_set_members(arbiter_server_selector)
def request_check_all(self, wait_time=5):
"""Wake all monitors, wait for at least one to check its server."""
with self._lock:
self._request_check_all()
self._condition.wait(wait_time)
def reset_pool(self, address):
with self._lock:
server = self._servers.get(address)
if server:
server.pool.reset()
def reset_server(self, address):
"""Clear our pool for a server and mark it Unknown.
Do *not* request an immediate check.
"""
with self._lock:
self._reset_server(address)
def reset_server_and_request_check(self, address):
"""Clear our pool for a server, mark it Unknown, and check it soon."""
with self._lock:
self._reset_server(address)
self._request_check(address)
def update_pool(self):
# Remove any stale sockets and add new sockets if pool is too small.
with self._lock:
for server in self._servers.values():
server._pool.remove_stale_sockets()
def close(self):
"""Clear pools and terminate monitors. Topology reopens on demand."""
with self._lock:
for server in self._servers.values():
server.close()
# Mark all servers Unknown.
self._description = self._description.reset()
self._update_servers()
# Publish only after releasing the lock.
if self._publish_tp:
self._events.put((self._listeners.publish_topology_closed,
(self._topology_id,)))
if self._publish_server or self._publish_tp:
self.__events_executor.close()
@property
def description(self):
return self._description
def _ensure_opened(self):
"""Start monitors, or restart after a fork.
Hold the lock when calling this.
"""
if not self._opened:
self._opened = True
self._update_servers()
# Start or restart the events publishing thread.
if self._publish_tp or self._publish_server:
self.__events_executor.open()
else:
# Restart monitors if we forked since previous call.
for server in itervalues(self._servers):
server.open()
def _reset_server(self, address):
"""Clear our pool for a server and mark it Unknown.
Hold the lock when calling this. Does *not* request an immediate check.
"""
server = self._servers.get(address)
# "server" is None if another thread removed it from the topology.
if server:
server.reset()
# Mark this server Unknown.
self._description = self._description.reset_server(address)
self._update_servers()
def _request_check(self, address):
"""Wake one monitor. Hold the lock when calling this."""
server = self._servers.get(address)
# "server" is None if another thread removed it from the topology.
if server:
server.request_check()
def _request_check_all(self):
"""Wake all monitors. Hold the lock when calling this."""
for server in self._servers.values():
server.request_check()
def _apply_selector(self, selector, address):
if self._description.topology_type == TOPOLOGY_TYPE.Single:
# Ignore the selector.
return self._description.known_servers
elif address:
sd = self._description.server_descriptions().get(address)
return [sd] if sd else []
elif self._description.topology_type == TOPOLOGY_TYPE.Sharded:
return apply_local_threshold(self._settings.local_threshold_ms,
self._description.known_servers)
else:
sds = selector(self._description.known_servers)
return apply_local_threshold(
self._settings.local_threshold_ms, sds)
def _update_servers(self):
"""Sync our Servers from TopologyDescription.server_descriptions.
Hold the lock while calling this.
"""
for address, sd in self._description.server_descriptions().items():
if address not in self._servers:
monitor = self._settings.monitor_class(
server_description=sd,
topology=self,
pool=self._create_pool_for_monitor(address),
topology_settings=self._settings)
weak = None
if self._publish_server:
weak = weakref.ref(self._events)
server = Server(
server_description=sd,
pool=self._create_pool_for_server(address),
monitor=monitor,
topology_id=self._topology_id,
listeners=self._listeners,
events=weak)
self._servers[address] = server
server.open()
else:
self._servers[address].description = sd
for address, server in list(self._servers.items()):
if not self._description.has_server(address):
server.close()
self._servers.pop(address)
def _create_pool_for_server(self, address):
return self._settings.pool_class(address, self._settings.pool_options)
def _create_pool_for_monitor(self, address):
options = self._settings.pool_options
# According to the Server Discovery And Monitoring Spec, monitors use
# connect_timeout for both connect_timeout and socket_timeout. The
# pool only has one socket so maxPoolSize and so on aren't needed.
monitor_pool_options = PoolOptions(
connect_timeout=options.connect_timeout,
socket_timeout=options.connect_timeout,
ssl_context=options.ssl_context,
ssl_match_hostname=options.ssl_match_hostname,
socket_keepalive=True,
event_listeners=options.event_listeners)
return self._settings.pool_class(address, monitor_pool_options,
handshake=False)
def _error_message(self, selector):
"""Format an error message if server selection fails.
Hold the lock when calling this.
"""
is_replica_set = self._description.topology_type in (
TOPOLOGY_TYPE.ReplicaSetWithPrimary,
TOPOLOGY_TYPE.ReplicaSetNoPrimary)
if is_replica_set:
server_plural = 'replica set members'
elif self._description.topology_type == TOPOLOGY_TYPE.Sharded:
server_plural = 'mongoses'
else:
server_plural = 'servers'
if self._description.known_servers:
# We've connected, but no servers match the selector.
if selector is writable_server_selector:
if is_replica_set:
return 'No primary available for writes'
else:
return 'No %s available for writes' % server_plural
else:
return 'No %s match selector "%s"' % (server_plural, selector)
else:
addresses = list(self._description.server_descriptions())
servers = list(self._description.server_descriptions().values())
if not servers:
if is_replica_set:
# We removed all servers because of the wrong setName?
return 'No %s available for replica set name "%s"' % (
server_plural, self._settings.replica_set_name)
else:
return 'No %s available' % server_plural
# 1 or more servers, all Unknown. Are they unknown for one reason?
error = servers[0].error
same = all(server.error == error for server in servers[1:])
if same:
if error is None:
# We're still discovering.
return 'No %s found yet' % server_plural
if (is_replica_set and not
set(addresses).intersection(self._seed_addresses)):
# We replaced our seeds with new hosts but can't reach any.
return (
'Could not reach any servers in %s. Replica set is'
' configured with internal hostnames or IPs?' %
addresses)
return str(error)
else:
return ','.join(str(server.error) for server in servers
if server.error)
| StarcoderdataPython |
3241020 | from django.db import models
_QUESTIONS = {
"first_name": "textfield_28990631",
"last_name": "textfield_28990632",
"email": "email_28990633",
"coming_from": "dropdown_28990634",
"nationality": "dropdown_28990808",
"degree": ("list_28990901_choice", "list_28990901_other"),
"graduation": "date_28991016",
"major": "dropdown_28991305",
"university": "textfield_28991659",
"_18yo": "yesno_28991894",
"needs_reimbursement": "yesno_29001580",
"needs_visa": "yesno_29005061",
"github": "website_28991933",
"devpost": "website_28991936",
"linkedin": "website_28991941",
"personal_site": "website_28991950",
"first_hackathon": "yesno_28991972",
"why_jacobshack": "textarea_29001709",
"previous_projects": "textarea_29001343",
"tshirt_size": "list_29001632_choice",
"dietary_requirements": ("list_29001860_choice", "list_29001860_other"),
"has_team": "yesno_29001866",
"names_of_teammates": "textarea_29001868",
"cv": "fileupload_29001894"
}
class JHAPPManager(models.Manager):
def get_queryset(self):
return super(JHAPPManager, self).get_queryset().all()
def get_accepted(self):
return super(JHAPPManager, self).get_queryset().all().filter(
accepted=True)
def get_not_accepted(self):
return super(JHAPPManager, self).get_queryset().all().filter(
accepted=False)
def get_accepted_but_not_emailed(self):
return super(JHAPPManager, self).get_queryset().all().filter(
accepted=True, sentmail=False)
def get_accepted_and_emailed(self):
return super(JHAPPManager, self).get_queryset().all().filter(
accepted=True, sentmail=True)
def get_accepted_but_not_slacked(self):
return super(JHAPPManager, self).get_queryset().all().filter(
accepted=True, slack_invite=False)
def get_18yos(self):
return super(JHAPPManager, self).get_queryset().all().filter(_18yo=1)
def get_underaged(self):
return super(JHAPPManager, self).get_queryset().all().filter(_18yo=0)
def get_visa_needed(self):
return super(JHAPPManager, self).get_queryset().all().filter(
needs_visa=1)
def get_visa_not_needed(self):
return super(JHAPPManager, self).get_queryset().all().filter(
needs_visa=0)
def get_money_needed(self):
return super(JHAPPManager, self).get_queryset().all().filter(
needs_reimbursement=1)
def get_money_not_needed(self):
return super(JHAPPManager, self).get_queryset().all().filter(
needs_reimbursement=0)
def get_ze_germans(self):
return super(JHAPPManager, self).get_queryset().all().filter(
nationality="Germany")
def get_le_coders(self):
return super(JHAPPManager, self).get_queryset().all().filter(
github != "http://")
class JHAPP(models.Model):
apps = JHAPPManager()
objects = models.Manager()
first_name = models.TextField()
last_name = models.TextField(null=True, blank=True)
email = models.EmailField(unique=True)
coming_from = models.TextField()
nationality = models.TextField()
degree = models.TextField()
graduation = models.TextField()
major = models.TextField()
university = models.TextField()
_18yo = models.TextField()
needs_reimbursement = models.TextField()
needs_visa = models.TextField()
github = models.TextField(null=True, blank=True)
devpost = models.TextField(null=True, blank=True)
linkedin = models.TextField(null=True, blank=True)
personal_site = models.TextField(null=True, blank=True)
first_hackathon = models.TextField()
why_jacobshack = models.TextField(null=True, blank=True)
previous_projects = models.TextField()
tshirt_size = models.TextField()
dietary_requirements = models.TextField(null=True, blank=True)
has_team = models.TextField()
names_of_teammates = models.TextField(null=True, blank=True)
cv = models.TextField(null=True, blank=True)
agree_to_policy = models.TextField(null=True, blank=True)
agree_to_coc = models.TextField(null=True, blank=True)
accepted = models.BooleanField(default=False)
sentmail = models.BooleanField(default=False)
slack_invite = models.BooleanField(default=False)
def __str__(self):
return '%s %s <%s> %s' % (self.first_name, self.last_name, self.email,
'' if not self.accepted else '✓')
| StarcoderdataPython |
6506 | """This module contains the general information for StorageScsiLunRef ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class StorageScsiLunRefConsts():
pass
class StorageScsiLunRef(ManagedObject):
"""This is StorageScsiLunRef class."""
consts = StorageScsiLunRefConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("StorageScsiLunRef", "storageScsiLunRef", "scsi-lun-ref-[id]", VersionMeta.Version131a, "InputOutput", 0x1f, [], ["read-only"], [u'storageLunReplica', u'storageLunSnapshot', u'storageScsiLun', u'storageVirtualDrive'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version131a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version131a, MoPropertyMeta.NAMING, 0x4, None, None, None, [], []),
"ls_dn": MoPropertyMeta("ls_dn", "lsDn", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"lun_name": MoPropertyMeta("lun_name", "lunName", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"pn_dn": MoPropertyMeta("pn_dn", "pnDn", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"profile_dn": MoPropertyMeta("profile_dn", "profileDn", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"id": "id",
"lsDn": "ls_dn",
"lunName": "lun_name",
"pnDn": "pn_dn",
"profileDn": "profile_dn",
"rn": "rn",
"status": "status",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.child_action = None
self.ls_dn = None
self.lun_name = None
self.pn_dn = None
self.profile_dn = None
self.status = None
ManagedObject.__init__(self, "StorageScsiLunRef", parent_mo_or_dn, **kwargs)
| StarcoderdataPython |
1708048 | <reponame>Michael-Czekanski/WMIAdventure-1
from battle.businesslogic.effects.Effect import Effect
class TwoTimesExecuteEffect(Effect):
"""
One may say that this effect 'duplicates' the card, so the card gets used two times
in two consecutive player turns, but it does not duplicate itself in a way that there are
two instances of the card in the deck. Actual behavior is just that it gets executed twice, hence the name.
"""
def on_activation(self, target, turns_queue):
deck = target.deck
next_card = deck.lookup()
# If we were do to it like this:
# deck.cards_queue.appendleft(next_card)
# The card would get permanently duplicated, and the deck would be larger than 5.
# We need some mechanism to remove appended card from the deck after it gets used.
deck.temp_cards_queue.append(next_card)
| StarcoderdataPython |
3204288 | #!/usr/bin/env python3
"""
Die class module.
"""
import random
class Die:
"""
Die class, represents a dice.
"""
# Static attributes
MIN_ROLL_VALUE = 1
MAX_ROLL_VALUE = 6
def __init__(self, value=None):
"""
Constructor method for class instance
"""
if value is not None and value > self.MAX_ROLL_VALUE:
self._value = self.MAX_ROLL_VALUE
elif value is not None and value < self.MIN_ROLL_VALUE:
self._value = self.MIN_ROLL_VALUE
elif value is None:
self._value = random.randint(self.MIN_ROLL_VALUE, self.MAX_ROLL_VALUE)
else:
self._value = value
def __str__(self):
"""
Return string representation for attribute _value
"""
return str(self._value)
def roll(self):
"""
Setter method for dice value from roll action.
Value is generated randomly from range defined with
the static attributes MIN_ROLL_VALUE and MAX_ROLL_VALUE.
"""
self._value = random.randint(self.MIN_ROLL_VALUE, self.MAX_ROLL_VALUE)
return self._value
def get_name(self):
"""
Getter method to return string representation of the dice value.
"""
value = self.get_value()
if value == 1:
output_string = "one"
elif value == 2:
output_string = "two"
elif value == 3:
output_string = "three"
elif value == 4:
output_string = "four"
elif value == 5:
output_string = "five"
elif value == 6:
output_string = "six"
else:
output_string = "No dice have been rolled yet... "
return output_string
def get_value(self):
"""
Getter method to return the dice value.
"""
return self._value
| StarcoderdataPython |
3334199 | name = input()
sum = float(0)
grade = float(input())
count = 0
while count != 13:
grade = float(grade)
count += 1
grade = round(grade, 2)
sum += grade
round(sum, 2)
if grade == 2:
print (f"{name} has been excluded at {count} grade")
break
grade = float(grade)
grade = int(grade)
if grade >= 4:
if count == 12:
break
grade = input()
else:
print (f"{name} has been excluded at {count} grade")
break
grade = sum / 12
if grade >= 4 and count == 12:
print (f"{name} graduated. Average grade: {grade:.2f}") | StarcoderdataPython |
1684610 | <reponame>eshanking/fears-figures
import sys
sys.path.append('/Users/kinge2/repos/')
import numpy as np
import math
import random
from seascapes_figures.utils import plotter, pharm, fitness, dir_manager
import pandas as pd
class Population(fitness.Fitness,plotter.Plotter):
"""Population class: the fundamental object of FEArS.
Contains basic information about the organism being simulated, the
environment, and the evolutionary algorithm.
...
Attributes
__________
passage : bool
if true, simulates passaging cells by reducing the population by a
factor of self.dilution at intervals given by passage_time.
carrying_cap : bool
if true, sets the carrying capacity to max_cells.
curve_type : str
determines the drug concentration curve.
Allowed types:
constant: constant at max dose.
linear: linear ramp to max_dose at a rate given by slope.
heaviside: jumps drug concentration from min_dose to max_dose at a
timestep given by h_step
pharm: drug curve follows 1-compartment pharmacokinetic model.
k_abs: absorption coefficient
k_elim: eliminiation coefficient
pulsed: simulates a patient taking a drug dose at intervals given
by dose_schedule
on_off: switches between 'on' (max_dose) and 'off' (min_dose) at
intervals given by dose_schedule. On/off ratio set by
duty_cycle
counts_log_scale : bool
If true, plots the results on a log scale.
constant_pop : bool
Enforces a constant population size (max_cells).
drugless_data : str
Filename for the drugless growth rates (ogbunugafor_drugless.csv by
default). Searches for files in the fears/data folder.
death_rate : float
Death rate
doubling_time : float
Average doubling time of the model organism (hours).
dose_schedule : int
Timesteps between simulated doses.
drug_log_scale : bool
If true, plots the drug concentration curve on a log scale.
drug_curve : array
Optional custom drug concentration curve. Overrides all other drug
concentration options.
debug : bool
If true, prints every 10th timestep.
duty_cycle : float
On/off ratio of the on/off regimen.
entropy_lim : list
Y axis limits for plotting the entropy curve.
fig_title : str
Optional figure title.
fitness_data : str
generate: generate the fitness data based on the drugless growth rates
and ic50.
manual: get fitness data from self.landscape_data.
h_step : int
Time at which the drug concentration steps from min_dose to max_dose.
ic50_data : str
Filename for the ic50 data (pyrimethamine_ic50.csv by
default). Searches for files in the fears/data folder.
init_counts : numpy array
Initial simulation counts.
Defaults: 10**4 cells at genotype 0.
k_elim : float
Pharmacokinetic elimination rate.
k_abs : float
Pharamcokinetic absorption rate.
"""
###############################################################################
# Initializer
def __init__(self,
n_allele = 4,
passage = False,
passage_time = 48, # hours
carrying_cap = True,
curve_type='constant', # drug concentration curve
counts_log_scale = False, # plot counts on log scale
constant_pop = False, # normalize to a constant population size
drugless_data = None, # file path for the drugless growth rates
death_rate = 0.15,
doubling_time = 1, # average doubling time of model organism
dose_schedule=12, # dose every x hours
drug_log_scale = False, # plot the drug concentration curve on a log scale
drug_curve = None, # input a custom drug concentration curve
debug=False, # print the current time step
dilution = 40,
digital_seascape=False,
duty_cycle = None,
entropy_lim = None, # entropy plotting limits
fig_title = '',
fitness_data = 'generate', # 'generate' = generate fitness data using drugless growth rates, ic50, and drug concentration. 'manual' = input fitness landscape from csv
h_step = 500,
ic50_data = None,
init_counts = None, # default is 10,000 wild type cells
k_elim = 0.001, # for modeling pharmacokinetics
k_abs = 0.07,
landscape_path = None, # path for custom fitness landscape
min_dose = 0,
mut_rate = 0.01, # mutation rate
max_cells = 10**6, # carrying capacity
max_dose = 1,
mic_estimate=None,
min_fitness = 0,
n_timestep=1000, # number of generations
n_sims = 1, # number of simulations to average together
null_seascape=False,
null_seascape_dose=10**1,
pad_right = False,
plot=True, # plot the result of simulate()
plot_drug_curve=True,
plot_entropy = False, # plot the entropy of the population over time underneath the timecourse
prob_drop=0,
slope = None,
static_topology = False,
static_topo_dose = 10**5,
stop_condition = False,
timestep_scale = 1,
x_lim = None, # plotting
y_lim = None, # plotting
**kwargs
):
"""
"""
# Evolutionary parameters
# Number of generations (time steps)
if carrying_cap is False and constant_pop is False:
print('\nWarning: no limit set on population size! Consider'
+ ' enforcing a carrying capacity or constant'
+ ' population.')
self.n_timestep = n_timestep
self.stop_condition = stop_condition
self.max_cells = max_cells
# model parameters
self.mut_rate = mut_rate
self.death_rate = death_rate
self.doubling_time = doubling_time
self.timestep_scale = timestep_scale # timestep_scale = 2 -> timestep = 2 hrs, etc
self.carrying_cap = carrying_cap
self.n_sims = n_sims # number of simulations to average together in self.simulate
self.constant_pop = constant_pop
self.debug = debug
self.fitness_data = fitness_data
self.passage = passage
self.passage_time = passage_time
self.dilution = dilution
self.counts = np.zeros([self.n_timestep,16])
self.counts_extinct = np.zeros([self.n_timestep,16])
self.counts_survive = np.zeros([self.n_timestep,16])
self.digital_seascape = digital_seascape
self.mic_estimate = mic_estimate
# Generate fitness data from IC50 and drugless growth rate data
if fitness_data == 'generate':
# Data paths
self.drug_units = '$\u03BC$M'
if drugless_data is None:
self.drugless_data = dir_manager.make_datapath_absolute('ogbunugafor_drugless.csv')
else:
self.drugless_data = dir_manager.make_datapath_absolute(drugless_data)
if ic50_data is None:
# self.ic50_data = "C:\\Users\\Eshan\\Documents\\python scripts\\theory division\\abm_variable_fitness\\data\\pyrimethamine_ic50.csv"
# self.ic50_data = self.make_datapath_absolute('pyrimethamine_ic50.csv')
self.ic50_data = dir_manager.make_datapath_absolute('pyrimethamine_ic50.csv')
else:
# self.ic50_data = self.make_datapath_absolute(ic50_data)
self.ic50_data = dir_manager.make_datapath_absolute(ic50_data)
# load the data
self.drugless_rates = dir_manager.load_fitness(self.drugless_data)
self.ic50 = dir_manager.load_fitness(self.ic50_data)
self.max_replication_rate = max(self.drugless_rates)
self.n_genotype = self.drugless_rates.shape[0]
# load fitness landscape from excel file
elif fitness_data == 'manual':
self.landscape_path = landscape_path
self.landscape_data = dir_manager.load_fitness(self.landscape_path)
self.max_replication_rate = max(self.landscape_data)
self.n_genotype = self.landscape_data.shape[0]
elif fitness_data == 'random':
self.drug_units = '$\u03BC$M'
self.drugless_rates,self.ic50, = \
self.gen_random_seascape(n_allele)
self.n_genotype = 2**n_allele
elif fitness_data == 'estimate':
self.drug_units = '$\u03BC$g/mL'
self.plate_paths = []
if 'plate_paths' in kwargs:
self.plate_paths = kwargs['plate_paths']
else:
self.plate_paths = ['20210929_plate1.csv','20210929_plate2.csv','20210929_plate3.csv']
self.plate_paths = [dir_manager.make_datapath_absolute(p) for p in self.plate_paths]
self.growth_rate_data = []
for plate_path in self.plate_paths:
self.growth_rate_data.append(self.get_growth_rate_data(plate_path))
if 'seascape_drug_conc' in kwargs:
self.seascape_drug_conc = kwargs['seascape_drug_conc']
else:
self.seascape_drug_conc = [0,0.003,0.0179,0.1072,0.643,3.858,23.1481,138.8889,833.3333,5000] #ug/mL
min_dc = np.log10(self.seascape_drug_conc[1])
max_dc = np.log10(max(self.seascape_drug_conc))
self.drug_conc_range = [np.round(min_dc),np.round(max_dc)]
self.max_od = self.get_max_od(self)
self.growth_rate_library = self.gen_growth_rate_library(self)
self.n_genotype = len(self.growth_rate_library.keys()) - 1
self.seascape_library = self.gen_seascape_library(self)
# Initial number of cells (default = 10,000 at 0000)
if init_counts is None:
self.init_counts = np.zeros(self.n_genotype)
self.init_counts[0] = 10**4
else:
self.init_counts = init_counts
self.n_allele = int(np.log2(self.n_genotype))
if self.constant_pop:
self.init_counts = self.init_counts*self.max_cells/sum(self.init_counts)
self.init_counts = np.floor(self.init_counts)
self.carrying_cap = False
# Dose parameters
self.curve_type = curve_type # linear, constant, heaviside, pharm, pulsed
# Pharmacological paramters
if k_abs < k_elim:
raise Exception('Inappropriate pharmacokinetic values: k_abs < k_elim.')
self.k_elim = k_elim
self.k_abs = k_abs
self.pad_right = pad_right
self.max_dose = max_dose
if slope is None:
self.slope = self.max_dose/self.n_timestep # Ramped parameter (what time step to reach maximum dose, determines slope)
else:
self.slope = slope
self.dose_schedule= dose_schedule
self.prob_drop = prob_drop # probability of dropping a dose
self.h_step = h_step # when to turn on heaviside function
self.min_dose = min_dose
self.duty_cycle = duty_cycle
# Generate drug dosage curves if one is not specified
if drug_curve is None:
self.drug_curve,u = self.gen_curves()
else:
self.drug_curve = drug_curve
# self.static_landscape = static_landscape
self.static_topology = static_topology
self.static_topo_dose = static_topo_dose
# Visualization parameters
self.plot = plot # boolean
self.plot_entropy = plot_entropy
self.plot_drug_curve=plot_drug_curve
self.drug_log_scale = drug_log_scale # plot drugs on log scale
self.counts_log_scale = counts_log_scale # plot counts on log scale
self.fig_title = fig_title
self.counts_log_scale = counts_log_scale
if x_lim is None:
self.x_lim = n_timestep
else:
self.x_lim = x_lim
self.y_lim = y_lim
self.entropy_lim = entropy_lim
if null_seascape:
self.null_seascape_dose=null_seascape_dose
self.set_null_seascape(self.null_seascape_dose)
if passage:
if not np.mod(passage_time,timestep_scale) == 0:
raise Exception('To reduce ambiguity, please ensure that'
+ ' timestep_scale divides evenly into'
+ ' passage_time.')
self.state = {'counts':[],
'n_mut':0,
't':0}
###############################################################################
# ABM helper methods
def gen_neighbors(self,genotype):
mut = range(self.n_allele)
neighbors = [genotype ^ (1 << m) for m in mut]
return neighbors
def fast_choice(self,options, probs):
x = random.random()
cum = 0
for i, p in enumerate(probs):
cum += p
if x < cum:
return options[i]
return options[-1]
# converts decimals to binary
def int_to_binary(self,num):
"""
Converts an integer to binary representation with the number of
digits equal to the number of alleles in the model.
Parameters
----------
num : int
Number to be converted.
Returns
-------
str
Binary representation.
"""
pad = int(math.log(self.n_genotype,2))
return bin(num)[2:].zfill(pad)
# computes hamming distance between two genotypes
def hammingDistance(self,s1,s2):
assert len(s1) == len(s2)
return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))
# converts an integer to a genotype and padding to the left by 0s
def convertIntToGenotype(self,anInt,pad):
offset = 2**pad
return [int(x) for x in bin(offset+anInt)[3:]]
def random_mutations(self,N):
trans_mat = np.zeros([N,N])
for mm in range(N):
for nn in range(N):
trans_mat[mm, nn] = self.hammingDistance( self.int_to_binary(mm) , self.int_to_binary(nn))
trans_mat[trans_mat>1] = 0
trans_mat = trans_mat/trans_mat.sum(axis=1)
return trans_mat
# check if the most fit mutant is the most prevalent
def check_stop_cond(self,counts,mm):
final_landscape = self.gen_fit_land(self.max_dose)
fittest_genotype = final_landscape.argmax()
most_frequent_genotype = counts.argmax()
stop_cond = False
if fittest_genotype == most_frequent_genotype:
stop_cond = True
if mm >= self.n_timestep:
raise Warning('Stop condition not reached. Increase n_timestep or adjust model parameters.')
stop_cond = True
return stop_cond
def passage_cells(self,mm,counts):
"""
If self.passage is true, dilute cells according to self.dilution when
the timestep is a multiple of self.passage_time.
Parameters
----------
mm : int
Timestep.
counts : numpy array
Matrix of simulated cell counts.
Returns
-------
counts : numpy array
Matrix of simulated cell counts; diluted if the timestep is
appropriate.
"""
if (np.mod(mm*self.timestep_scale,self.passage_time) == 0
and not mm == 0 and self.passage):
counts = np.divide(counts,self.dilution)
return counts
##############################################################################
# core evolutionary model
def abm(self,mm,n_genotype,P,counts):
conc = self.drug_curve[mm]
# __gen_fl_for_abm automatically considers carrying capacity, but
# it does not consider timestep scale
fit_land = self.__gen_fl_for_abm(conc, counts)
fit_land = fit_land*self.timestep_scale
death_rate = self.death_rate*self.timestep_scale
mut_rate = self.mut_rate*self.timestep_scale
if self.debug and np.mod(mm,10) == 0:
print(str(mm))
print(str(counts))
print(str(fit_land))
# Passage cells
counts = self.passage_cells(mm, counts)
counts_t = counts
# Kill cells
# print(str(mm))
counts_t = counts_t - np.random.poisson(counts*death_rate)
# Make sure there aren't negative numbers
neg_indx = counts_t < 0
counts_t[neg_indx] = 0
# Divide cells
daughter_counts = np.random.poisson(counts_t*fit_land)
for genotype in np.arange(n_genotype):
n_mut = np.random.poisson(daughter_counts[genotype]*mut_rate*self.n_allele)
# Substract mutating cells from that allele
daughter_counts[genotype] -= n_mut
# Mutate cells
mutations = np.random.choice(n_genotype, size=n_mut, p=P[:,genotype]).astype(np.uint8)
# Add mutating cell to their final types
counts_t += np.bincount( mutations , minlength=n_genotype)
counts_t += daughter_counts
# Normalize to constant population
if self.constant_pop:
scale = self.max_cells/np.sum(counts_t)
counts_t = counts_t*scale
counts_t = np.ceil(counts_t).astype('int')
self.state['counts'] = counts_t
self.state['n_mut'] = n_mut
self.state['t'] = mm
return counts_t
def run_abm(self):
n_genotype = self.n_genotype
# Get transition matrix
P = self.random_mutations( n_genotype )
mm = 0
# Two main modes:
# Stop condition: run until the population reaches fixation
# Default: run for n_timestep
if self.stop_condition:
counts = np.zeros( [1,n_genotype] , dtype=int)
counts[0,:] = self.init_counts
stop_condition = False
while not stop_condition:
counts_t = self.abm(mm,n_genotype,P,counts[mm])
if len(counts.shape) == 1:
counts = np.append([counts],[counts_t],axis=0)
else:
counts = np.append(counts,[counts_t],axis=0)
mm+=1
stop_condition = self.check_stop_cond(counts_t,mm)
else:
counts = np.zeros( [self.n_timestep, n_genotype] , dtype=int)
counts[0,:] = self.init_counts
while mm < self.n_timestep - 1:
counts[mm+1] = self.abm(mm,n_genotype,P,counts[mm])
mm+=1
return counts, mm
def simulate(self):
counts = np.zeros([self.n_timestep,self.n_genotype])
avg_counts = np.zeros([self.n_timestep,self.n_genotype])
fixation_time = []
# n_survive = 0
for i in range(self.n_sims):
# if self.prob_drop > 0:
# self.drug_curve,u = self.gen_curves()
counts, mm = self.run_abm()
avg_counts += counts
fixation_time.append(mm)
if self.plot is True:
self.plot_timecourse(counts_t = counts)
avg_counts = avg_counts/self.n_sims
self.counts = avg_counts
return avg_counts, fixation_time
##############################################################################
# Wrapper methods for fitness
# def gen_fitness(self,allele,conc,drugless_rate,ic50):
# fit = fitness.gen_fitness(self,allele,conc,drugless_rate,ic50)
# return fit
# def gen_fit_land(self,conc):
# fit_land = fitness.gen_fit_land(self,conc)
# return fit_land
# Private to avoid confusion with gen_fit_land
def __gen_fl_for_abm(self,conc,counts):
fit_land = self.gen_fl_for_abm(conc,counts)
return fit_land
# def randomize_seascape(self,
# drugless_limits=[1,1.5],
# ic50_limits=[-6.5,-1.5]):
# fitness.randomize_seascape(self,
# drugless_limits=drugless_limits,
# ic50_limits=ic50_limits)
# def gen_null_seascape(self,conc):
# drugless_rates_new,ic50_new = fitness.gen_null_seascape(self,conc)
# return drugless_rates_new,ic50_new
def set_null_seascape(self,conc):
self.drugless_rates,self.ic50 = self.gen_null_seascape(conc)
###############################################################################
# Wrapper methods for generating drug concentration curves
def pharm_eqn(self,t,k_elim=None,k_abs=None,max_dose=None):
conc = pharm.pharm_eqn(self,t,k_elim=k_elim,k_abs=k_abs,max_dose=max_dose)
return conc
def convolve_pharm(self,u):
conv = pharm.convolve_pharm(self,u)
return conv
def gen_impulses(self):
u = pharm.gen_impulses(self)
return u
def gen_on_off(self,duty_cycle=None):
u = pharm.gen_on_off(self,duty_cycle=duty_cycle)
return u
def gen_curves(self):
curve, u = pharm.gen_curves(self)
return curve, u
def gen_passage_drug_protocol(self):
drug_curve = pharm.gen_passage_drug_protocol(self)
return drug_curve
def set_drug_curve(self):
dc = self.gen_curves()
self.drug_curve = dc[0]
##############################################################################
# Wrapper methods for plotting
# def plot_timecourse(self,counts_t=None,title_t=None):
# fig = plotter.plot_timecourse(self,counts_t=counts_t,title_t=title_t)
# return fig
# def plot_fitness_curves(self,fig_title='',plot_r0 = False,save=False,
# savename=None,**kwargs):
# fig,ax = plotter.plot_fitness_curves(self,fig_title=fig_title,
# plot_r0 = plot_r0,save=save,
# savename=savename,**kwargs)
# return fig,ax
| StarcoderdataPython |
3937 | <reponame>VGrondin/CBNetV2_mask_remote<gh_stars>0
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py'
]
model = dict(
type='FasterRCNN',
# pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
# type='StandardRoIHead',
_delete_=True,
type='KeypointRoIHead',
output_heatmaps=False,
# keypoint_head=dict(
# type='HRNetKeypointHead',
# num_convs=8,
# in_channels=256,
# features_size=[256, 256, 256, 256],
# conv_out_channels=512,
# num_keypoints=5,
# loss_keypoint=dict(type='MSELoss', loss_weight=50.0)),
keypoint_decoder=dict(type='HeatmapDecodeOneKeypoint', upscale=4),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)))
)
#optimizer = dict(lr=0.002)
#lr_config = dict(step=[40, 55])
#total_epochs = 60
| StarcoderdataPython |
3355724 | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold(n):
BT = [0] * (n + 1)
BT[0] = BT[1] = 1
for i in range(2, n + 1):
for j in range(i):
BT[i] += BT[j] * BT[i - j - 1]
return BT[n]
#TOFILL
if __name__ == '__main__':
param = [
(87,),
(69,),
(15,),
(11,),
(11,),
(15,),
(47,),
(65,),
(50,),
(58,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success += 1
print("#Results: %i, %i" % (n_success, len(param)))
| StarcoderdataPython |
1651067 | <reponame>ciarakamahele/sasy<filename>simulator/Planners/RockTestPlanner.py
# Copyright 2015 <NAME>-Sanfratello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Planner import Planner
from Primitives.RockPrimitives import RockAction
class RockTestPlanner(Planner):
def __init__(self, discount):
self.plan = None
self.discount = discount
def next_action(self, initial_state, goal_state, prev_obs):
if self.plan is None or len(self.plan) == 0:
self.plan = [RockAction('amn'),
RockAction('ac', (0,)),
RockAction('as'),
RockAction('ame'),
RockAction('ams'),
RockAction('ac', (1,)),
RockAction('ame'),
RockAction('arg')]
return self.plan.pop(0)
| StarcoderdataPython |
1748890 | <filename>bookmanager/book/admin.py
from django.contrib import admin
# Register your models here.
#导入模型
from book.models import BookInfo,PeopleInfo
#注册书籍模型
admin.site.register(BookInfo)
#注册人物模型
admin.site.register(PeopleInfo) | StarcoderdataPython |
1600872 | <gh_stars>1-10
from retic.runtime import *
from retic.transient import *
from retic.typing import *
def check8(val):
try:
val.parse_args
return val
except:
raise CheckError(val)
def check9(val):
try:
val.run_benchmark
return val
except:
raise CheckError(val)
def check1(val):
try:
val.y
return val
except:
raise CheckError(val)
def check2(val):
try:
val.z
return val
except:
raise CheckError(val)
def check5(val):
try:
val.normalize
return val
except:
raise CheckError(val)
def check7(val):
try:
val.add_standard_options_to
return val
except:
raise CheckError(val)
def check0(val):
try:
val.x
return val
except:
raise CheckError(val)
def check4(val):
try:
val.maximize
return val
except:
raise CheckError(val)
def check3(val):
try:
val.normalize
val.__repr__
val.maximize
return val
except:
raise CheckError(val)
def check6(val):
try:
val.OptionParser
return val
except:
raise CheckError(val)
from compat import xrange
import util
from math import sin, cos, sqrt
import optparse
import time
class Point(retic_actual(object)):
def __init__(self, i):
self.x = x = check_type_function(sin)(i)
self.y = (check_type_function(cos)(i) * 3)
self.z = ((x * x) / 2)
__init__ = check_type_function(__init__)
def __repr__(self):
return ('<Point: x=%s, y=%s, z=%s>' % (check0(self).x, check1(self).y, check2(self).z))
__repr__ = check_type_function(__repr__)
def normalize(self):
x = check0(self).x
y = check1(self).y
z = check2(self).z
norm = check_type_function(sqrt)((((x * x) + (y * y)) + (z * z)))
self.x = (check0(self).x / norm)
self.y = (check1(self).y / norm)
self.z = (check2(self).z / norm)
normalize = check_type_function(normalize)
def maximize(self, other):
self.x = (check0(self).x if (check0(self).x > check0(other).x) else check0(other).x)
self.y = (check1(self).y if (check1(self).y > check1(other).y) else check1(other).y)
self.z = (check2(self).z if (check2(self).z > check2(other).z) else check2(other).z)
return self
maximize = check_type_function(maximize)
Point = check_type_class(Point, ['normalize', '__init__', '__repr__', 'maximize'])
def maximize(points):
check_type_list(points)
next = check3(points[0])
for p in check_type_list(points[1:]):
check3(p)
next = check_type_function(check4(next).maximize)(p)
return check3(next)
maximize = check_type_function(maximize)
def benchmark(n):
check_type_int(n)
points = check_type_list([check3(Point(i)) for i in check_type_function(xrange)(n)])
for p in check_type_list(points):
check_type_function(check5(p).normalize)()
return check3(maximize(points))
benchmark = check_type_function(benchmark)
POINTS = 100000
def main(arg, timer):
check_type_int(arg)
times = []
for i in check_type_function(xrange)(arg):
t0 = check_type_function(timer)()
o = check3(benchmark(POINTS))
tk = check_type_function(timer)()
check_type_void(check_type_function(times.append)((tk - t0)))
return times
main = check_type_function(main)
if (__name__ == '__main__'):
parser = check_type_function(check6(optparse).OptionParser)(usage='%prog [options]', description='Test the performance of the Float benchmark')
check_type_function(check7(util).add_standard_options_to)(parser)
(options, args) = check_type_tuple(check_type_function(check8(parser).parse_args)(), 2)
check_type_function(check9(util).run_benchmark)(options, 1, main)
| StarcoderdataPython |
1737705 | <filename>Code/checkpoints.py
from modules import *
from plot import *
from IPython.display import clear_output
class DisplayCallback(tf.keras.callbacks.Callback):
def on_train_begin(self, logs=None):
self.loss = []
self.val_loss = []
def on_epoch_end(self, epoch, logs=None):
clear_output(wait=True)
self.loss.append(logs['loss'])
self.val_loss.append(logs['val_loss'])
show_predictions()
plot_loss_acc(self.loss, self.val_loss, epoch)
lr_callback = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=8, verbose=1,)
#Define IoU metric (by stack overflow user HuckleberryFinn)
class UpdatedMeanIoU(tf.keras.metrics.MeanIoU):
def __init__(self,
y_true=None,
y_pred=None,
num_classes=None,
name=None,
dtype=None):
super(UpdatedMeanIoU, self).__init__(num_classes = num_classes,name=name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.math.argmax(y_pred, axis=-1)
return super().update_state(y_true, y_pred, sample_weight)
# Create a callback that saves the model's weights
checkpoint_path = "training/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Create a callback that saves the model's weights every 5 epochs
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
verbose=1,
save_weights_only=True,
save_freq=5*BATCH_SIZE) | StarcoderdataPython |
152239 | # -*- coding: utf-8 -*-
"""Data endpoints optimized for reports in the Reporter blueprint."""
from operator import itemgetter
from AIPscan import db
from AIPscan.Data import (
fields,
get_storage_location_description,
get_storage_service_name,
)
from AIPscan.models import AIP, Event, File, FileType, StorageLocation, StorageService
VALID_FILE_TYPES = set(item.value for item in FileType)
def _get_username(agent_string):
"""Retrieve username from the standard agent string stored in the
database, normally formatted as:
* username="test", first_name="", last_name=""
"""
USERNAME = "username="
return agent_string.split(",", 1)[0].replace(USERNAME, "").replace('"', "")
def _formats_count_query(
storage_service_id, start_date, end_date, storage_location_id=None
):
"""Fetch information from database on file formats.
:param storage_service_id: Storage Service ID (int)
:param start_date: Inclusive AIP creation start date
(datetime.datetime object)
:param end_date: Inclusive AIP creation end date
(datetime.datetime object)
:param storage_location_id: Storage Location ID (int)
:returns: SQLAlchemy query results
"""
FILE_FORMAT = "file_format"
FILE_COUNT = "file_count"
FILE_SIZE = "total_size"
results = (
db.session.query(
File.file_format.label(FILE_FORMAT),
db.func.count(File.id).label(FILE_COUNT),
db.func.sum(File.size).label(FILE_SIZE),
)
.join(AIP)
.join(StorageLocation)
.join(StorageService)
.filter(StorageService.id == storage_service_id)
.filter(File.file_type == FileType.original.value)
.filter(AIP.create_date >= start_date)
.filter(AIP.create_date < end_date)
.group_by(File.file_format)
.order_by(db.func.count(File.id).desc(), db.func.sum(File.size).desc())
)
if storage_location_id:
results = results.filter(StorageLocation.id == storage_location_id)
return results
def formats_count(storage_service_id, start_date, end_date, storage_location_id=None):
"""Return a summary of file formats in Storage Service.
:param storage_service_id: Storage Service ID (int)
:param start_date: Inclusive AIP creation start date
(datetime.datetime object)
:param end_date: Inclusive AIP creation end date
(datetime.datetime object)
:param storage_location_id: Storage Location ID (int)
:returns: "report" dict containing following fields:
report["StorageName"]: Name of Storage Service queried
report["Formats"]: List of results ordered desc by count and size
"""
report = {}
report[fields.FIELD_FORMATS] = []
report[fields.FIELD_STORAGE_NAME] = get_storage_service_name(storage_service_id)
report[fields.FIELD_STORAGE_LOCATION] = get_storage_location_description(
storage_location_id
)
formats = _formats_count_query(
storage_service_id, start_date, end_date, storage_location_id
)
for format_ in formats:
format_info = {}
format_info[fields.FIELD_FORMAT] = format_.file_format
format_info[fields.FIELD_COUNT] = format_.file_count
format_info[fields.FIELD_SIZE] = 0
if format_.total_size is not None:
format_info[fields.FIELD_SIZE] = format_.total_size
report[fields.FIELD_FORMATS].append(format_info)
return report
def _format_versions_count_query(
storage_service_id, start_date, end_date, storage_location_id
):
"""Fetch information from database on format versions.
:param storage_service_id: Storage Service ID (int)
:param start_date: Inclusive AIP creation start date
(datetime.datetime object)
:param end_date: Inclusive AIP creation end date
(datetime.datetime object)
:param storage_location_id: Storage Location ID (int)
:returns: SQLAlchemy query results
"""
PUID = "puid"
FILE_FORMAT = "file_format"
FORMAT_VERSION = "format_version"
FILE_COUNT = "file_count"
FILE_SIZE = "total_size"
results = (
db.session.query(
File.puid.label(PUID),
File.file_format.label(FILE_FORMAT),
File.format_version.label(FORMAT_VERSION),
db.func.count(File.id).label(FILE_COUNT),
db.func.sum(File.size).label(FILE_SIZE),
)
.join(AIP)
.join(StorageLocation)
.join(StorageService)
.filter(StorageService.id == storage_service_id)
.filter(File.file_type == FileType.original.value)
.filter(AIP.create_date >= start_date)
.filter(AIP.create_date < end_date)
.group_by(File.puid)
.order_by(db.func.count(File.id).desc(), db.func.sum(File.size).desc())
)
if storage_location_id:
results = results.filter(StorageLocation.id == storage_location_id)
return results
def format_versions_count(
storage_service_id, start_date, end_date, storage_location_id=None
):
"""Return a summary of format versions in Storage Service.
:param storage_service_id: Storage Service ID (int)
:param start_date: Inclusive AIP creation start date
(datetime.datetime object)
:param end_date: Inclusive AIP creation end date
(datetime.datetime object)
:param storage_location_id: Storage Location ID (int)
:returns: "report" dict containing following fields:
report["StorageName"]: Name of Storage Service queried
report["FormatVersions"]: List of result files ordered desc by size
"""
report = {}
report[fields.FIELD_FORMAT_VERSIONS] = []
report[fields.FIELD_STORAGE_NAME] = get_storage_service_name(storage_service_id)
report[fields.FIELD_STORAGE_LOCATION] = get_storage_location_description(
storage_location_id
)
versions = _format_versions_count_query(
storage_service_id, start_date, end_date, storage_location_id
)
for version in versions:
version_info = {}
version_info[fields.FIELD_PUID] = version.puid
version_info[fields.FIELD_FORMAT] = version.file_format
try:
version_info[fields.FIELD_VERSION] = version.format_version
except AttributeError:
pass
version_info[fields.FIELD_COUNT] = version.file_count
version_info[fields.FIELD_SIZE] = 0
if version.total_size is not None:
version_info[fields.FIELD_SIZE] = version.total_size
report[fields.FIELD_FORMAT_VERSIONS].append(version_info)
return report
def _largest_files_query(storage_service_id, storage_location_id, file_type, limit):
"""Fetch file information from database for largest files query
This is separated into its own helper function to aid in testing.
"""
if file_type is not None and file_type in VALID_FILE_TYPES:
files = (
File.query.join(AIP)
.join(StorageLocation)
.join(StorageService)
.filter(StorageService.id == storage_service_id)
.filter(File.file_type == file_type)
.order_by(File.size.desc())
)
else:
files = (
File.query.join(AIP)
.join(StorageLocation)
.join(StorageService)
.filter(StorageService.id == storage_service_id)
.order_by(File.size.desc())
)
if storage_location_id:
files = files.filter(StorageLocation.id == storage_location_id)
files = files.limit(limit)
return files
def largest_files(
storage_service_id, storage_location_id=None, file_type=None, limit=20
):
"""Return a summary of the largest files in a given Storage Service
:param storage_service_id: Storage Service ID
:param storage_location_id: Storage Location ID (int)
:param file_type: Optional filter for type of file to return
(acceptable values are "original" or "preservation")
:param limit: Upper limit of number of results to return
:returns: "report" dict containing following fields:
report["StorageName"]: Name of Storage Service queried
report["Files"]: List of result files ordered desc by size
"""
report = {}
report[fields.FIELD_FILES] = []
report[fields.FIELD_STORAGE_NAME] = get_storage_service_name(storage_service_id)
report[fields.FIELD_STORAGE_LOCATION] = get_storage_location_description(
storage_location_id
)
files = _largest_files_query(
storage_service_id, storage_location_id, file_type, limit
)
for file_ in files:
file_info = {}
file_info[fields.FIELD_ID] = file_.id
file_info[fields.FIELD_UUID] = file_.uuid
file_info[fields.FIELD_NAME] = file_.name
try:
file_info[fields.FIELD_SIZE] = int(file_.size)
except TypeError:
file_info[fields.FIELD_SIZE] = 0
file_info[fields.FIELD_AIP_ID] = file_.aip_id
file_info[fields.FIELD_FILE_TYPE] = file_.file_type.value
try:
file_info[fields.FIELD_FORMAT] = file_.file_format
except AttributeError:
pass
try:
file_info[fields.FIELD_VERSION] = file_.format_version
except AttributeError:
pass
try:
file_info[fields.FIELD_PUID] = file_.puid
except AttributeError:
pass
matching_aip = AIP.query.get(file_.aip_id)
if matching_aip is not None:
file_info[fields.FIELD_AIP_NAME] = matching_aip.transfer_name
file_info[fields.FIELD_AIP_UUID] = matching_aip.uuid
report[fields.FIELD_FILES].append(file_info)
return report
def _query_aips_by_file_format_or_puid(
storage_service_id,
storage_location_id,
search_string,
original_files=True,
file_format=True,
):
"""Fetch information on all AIPs with given format or PUID from db.
:param storage_service_id: Storage Service ID (int)
:param storage_location_id: Storage Location ID (int)
:param search_string: File format or PUID (str)
:param original_files: Flag indicating whether returned data
describes original (default) or preservation files (bool)
:param file_format: Flag indicating whether to filter on file
format (default) or PUID (bool)
:returns: SQLAlchemy query results
"""
AIP_ID = "id"
TRANSFER_NAME = "name"
AIP_UUID = "uuid"
FILE_COUNT = "file_count"
FILE_SIZE = "total_size"
aips = (
db.session.query(
AIP.id.label(AIP_ID),
AIP.transfer_name.label(TRANSFER_NAME),
AIP.uuid.label(AIP_UUID),
db.func.count(File.id).label(FILE_COUNT),
db.func.sum(File.size).label(FILE_SIZE),
)
.join(File)
.join(StorageService)
.filter(StorageService.id == storage_service_id)
.group_by(AIP.id)
.order_by(db.func.count(File.id).desc(), db.func.sum(File.size).desc())
)
if storage_location_id:
aips = aips.filter(AIP.storage_location_id == storage_location_id)
if original_files is False:
aips = aips.filter(File.file_type == FileType.preservation.value)
else:
aips = aips.filter(File.file_type == FileType.original.value)
if file_format:
return aips.filter(File.file_format == search_string)
return aips.filter(File.puid == search_string)
def _aips_by_file_format_or_puid(
storage_service_id,
storage_location_id,
search_string,
original_files=True,
file_format=True,
):
"""Return overview of all AIPs containing original files in format
:param storage_service_id: Storage Service ID (int)
:param storage_location_id: Storage Location ID (int)
:param search_string: File format name or PUID (str)
:param original_files: Flag indicating whether returned data
data describes original (default) or preservation files (bool)
:param file_format: Flag indicating whether to filter on file
file format (default) or PUID (bool)
:returns: "report" dict containing following fields:
report["StorageName"]: Name of Storage Service queried
report["AIPs"]: List of result AIPs ordered desc by count
"""
report = {}
report[fields.FIELD_STORAGE_NAME] = get_storage_service_name(storage_service_id)
report[fields.FIELD_STORAGE_LOCATION] = get_storage_location_description(
storage_location_id
)
if file_format:
report[fields.FIELD_FORMAT] = search_string
else:
report[fields.FIELD_PUID] = search_string
report[fields.FIELD_AIPS] = []
results = _query_aips_by_file_format_or_puid(
storage_service_id,
storage_location_id,
search_string,
original_files,
file_format,
)
for result in results:
aip_info = {}
aip_info[fields.FIELD_ID] = result.id
aip_info[fields.FIELD_AIP_NAME] = result.name
aip_info[fields.FIELD_UUID] = result.uuid
aip_info[fields.FIELD_COUNT] = result.file_count
aip_info[fields.FIELD_SIZE] = result.total_size
report[fields.FIELD_AIPS].append(aip_info)
return report
def aips_by_file_format(
storage_service_id, file_format, original_files=True, storage_location_id=None
):
"""Return overview of AIPs containing original files in format.
:param storage_service_id: Storage Service ID (int)
:param file_format: File format name (str)
:param original_files: Flag indicating whether returned data
describes original (default) or preservation files (bool)
:param storage_location_id: Storage Location ID (int)
:returns: Report dict provided by _aips_by_file_format_or_puid
"""
return _aips_by_file_format_or_puid(
storage_service_id=storage_service_id,
storage_location_id=storage_location_id,
search_string=file_format,
original_files=original_files,
)
def aips_by_puid(
storage_service_id, puid, original_files=True, storage_location_id=None
):
"""Return overview of AIPs containing original files in format.
:param storage_service_id: Storage Service ID (int)
:param puid: PUID (str)
:param original_files: Flag indicating whether returned data
describes original (default) or preservation files (bool)
:param storage_location_id: Storage Location ID (int)
:returns: Report dict provided by _aips_by_file_format_or_puid
"""
return _aips_by_file_format_or_puid(
storage_service_id=storage_service_id,
storage_location_id=storage_location_id,
search_string=puid,
original_files=original_files,
file_format=False,
)
def agents_transfers(storage_service_id, storage_location_id=None):
"""Return information about agents involved in creating a transfer
and provide some simple statistics, e.g. ingest start time and
ingest finish time.
"""
report = {}
ingests = []
storage_service_name = get_storage_service_name(storage_service_id)
if not storage_service_name:
# No storage service has been returned and so we have nothing
# to return.
report[fields.FIELD_STORAGE_NAME] = None
report[fields.FIELD_STORAGE_LOCATION] = None
report[fields.FIELD_INGESTS] = ingests
return report
report[fields.FIELD_STORAGE_NAME] = get_storage_service_name(storage_service_id)
report[fields.FIELD_STORAGE_LOCATION] = get_storage_location_description(
storage_location_id
)
aips = AIP.query.filter_by(storage_service_id=storage_service_id)
if storage_location_id:
aips = aips.filter(AIP.storage_location_id == storage_location_id)
aips = aips.all()
EVENT_TYPE = "ingestion"
AGENT_TYPE = "Archivematica user"
for aip in aips:
event = (
db.session.query(Event)
.join(File)
.filter(File.aip_id == aip.id, Event.type == EVENT_TYPE)
.first()
)
# This defensive check is necessary for now because of packages that
# are deleted after extraction. See issue #104 for details.
if event is None:
continue
log_line = {}
log_line[fields.FIELD_AIP_UUID] = aip.uuid
log_line[fields.FIELD_AIP_NAME] = aip.transfer_name
log_line[fields.FIELD_INGEST_START_DATE] = str(event.date)
log_line[fields.FIELD_INGEST_FINISH_DATE] = str(aip.create_date)
for agent in event.event_agents:
if agent.agent_type == AGENT_TYPE:
log_line[fields.FIELD_USER] = _get_username(agent.agent_value)
ingests.append(log_line)
report[fields.FIELD_INGESTS] = ingests
return report
def _preservation_derivatives_query(storage_service_id, storage_location_id, aip_uuid):
"""Fetch information on preservation derivatives from db.
:param storage_service_id: Storage Service ID (int)
:param storage_location_id: Storage Location ID (int)
:param aip_uuid: AIP UUID (str)
:returns: SQLAlchemy query results
"""
files = (
File.query.join(AIP)
.join(StorageLocation)
.join(StorageService)
.filter(StorageService.id == storage_service_id)
.filter(File.file_type == FileType.preservation)
.order_by(AIP.uuid, File.file_format)
)
if storage_location_id:
files = files.filter(StorageLocation.id == storage_location_id)
if aip_uuid:
files = files.filter(AIP.uuid == aip_uuid)
return files
def preservation_derivatives(
storage_service_id, storage_location_id=None, aip_uuid=None
):
"""Return details of preservation derivatives in Storage Service.
This includes information about each preservation derivative, as well as
its corresponding original file and AIP.
:param storage_service_id: Storage Service ID (int)
:param storage_location_id: Storage Location ID (int)
:param aip_uuid: AIP UUID (str)
:returns: "report" dict containing following fields:
report["StorageName"]: Name of Storage Service queried
report["Files"]: List of result files ordered desc by size
"""
report = {}
report[fields.FIELD_FILES] = []
report[fields.FIELD_STORAGE_NAME] = get_storage_service_name(storage_service_id)
report[fields.FIELD_STORAGE_LOCATION] = get_storage_location_description(
storage_location_id
)
files = _preservation_derivatives_query(
storage_service_id, storage_location_id, aip_uuid
)
for file_ in files:
file_info = {}
file_info[fields.FIELD_AIP_UUID] = file_.aip.uuid
file_info[fields.FIELD_AIP_NAME] = file_.aip.transfer_name
file_info[fields.FIELD_ID] = file_.id
file_info[fields.FIELD_UUID] = file_.uuid
file_info[fields.FIELD_NAME] = file_.name
file_info[fields.FIELD_FORMAT] = file_.file_format
original_file = file_.original_file
if original_file:
file_info[fields.FIELD_ORIGINAL_UUID] = original_file.uuid
file_info[fields.FIELD_ORIGINAL_NAME] = original_file.name
file_info[fields.FIELD_ORIGINAL_FORMAT] = original_file.file_format
file_info[fields.FIELD_ORIGINAL_VERSION] = ""
try:
file_info[fields.FIELD_ORIGINAL_VERSION] = original_file.format_version
except AttributeError:
pass
file_info[fields.FIELD_ORIGINAL_PUID] = ""
try:
file_info[fields.FIELD_ORIGINAL_PUID] = original_file.puid
except AttributeError:
pass
report[fields.FIELD_FILES].append(file_info)
return report
def _get_storage_locations(storage_service_id):
"""Return queryset of locations in this Storage Service."""
return StorageLocation.query.filter_by(storage_service_id=storage_service_id).all()
def _sort_storage_locations(unsorted_locations):
"""Sort list of location dictionaries by AIP count descending."""
return sorted(unsorted_locations, key=itemgetter(fields.FIELD_AIPS), reverse=True)
def storage_locations(storage_service_id, start_date, end_date):
"""Return details of AIP store locations in Storage Service.
:param storage_service_id: Storage Service ID (int)
:returns: "report" dict containing following fields:
report["StorageName"]: Name of Storage Service queried
report["Locations"]: List of result locations ordered desc by size
"""
report = {}
report[fields.FIELD_STORAGE_NAME] = get_storage_service_name(storage_service_id)
locations = _get_storage_locations(storage_service_id)
unsorted_results = []
for location in locations:
loc_info = {}
loc_info[fields.FIELD_ID] = location.id
loc_info[fields.FIELD_UUID] = location.uuid
loc_info[fields.FIELD_STORAGE_LOCATION] = location.description
loc_info[fields.FIELD_AIPS] = location.aip_count(start_date, end_date)
loc_info[fields.FIELD_SIZE] = location.aip_total_size(start_date, end_date)
unsorted_results.append(loc_info)
report[fields.FIELD_LOCATIONS] = _sort_storage_locations(unsorted_results)
return report
| StarcoderdataPython |
3234003 | ########################################################################
# Utility functions
#
# <NAME>, 26/03/2020
########################################################################
import os
def create_dirs(fn):
"""Create missing directories for output fn."""
if not os.path.isdir(os.path.dirname(fn)) and os.path.dirname(fn) != "":
os.makedirs(os.path.dirname(fn), exist_ok=True)
| StarcoderdataPython |
3349866 | <filename>process/tests/testDrawRadar.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys, os
from VtkRenderer import *
import numpy as np
from RadarTransforms import *
from LidarTransforms import *
from Q50_config import *
class ImageGrabberCallback:
def __init__(self, map_file):
self.map_file = map_file
self.radar_params = LoadParameters('q50_4_3_14_params')['radar']
self.lidar_actor = None
self.radar_actors = []
self.clouds = loadLDRCamMap(map_file)
self.rdr_pts = loadRDRCamMap(map_file)
self.count = 0
def execute(self, iren, event):
fren = iren.GetRenderWindow().GetRenderers().GetFirstRenderer()
radar_data = loadRDR(self.rdr_pts[self.count])[0]
radar_data[:, :3] = calibrateRadarPts(radar_data[:, :3], self.radar_params)
if radar_data.shape[0] > 0:
mask = (radar_data[:, 5] > 5)
mask &= (radar_data[:, 6] > -20)
radar_data = radar_data[mask]
if radar_data.shape[0] > 0:
for i in xrange(len(self.radar_actors)):
fren.RemoveActor(self.radar_actors[i])
self.radar_actors = []
self.radar_clouds = []
for i in xrange(radar_data.shape[0]):
self.radar_clouds.append(VtkBoundingBox(radar_data[i, :]))
self.radar_actors.append(self.radar_clouds[i].get_vtk_box())
fren.AddActor(self.radar_actors[i])
lidar_data = loadLDR(self.clouds[self.count])
self.lidar_cloud = VtkPointCloud(lidar_data[:, :3], lidar_data[:,3])
fren.RemoveActor(self.lidar_actor)
self.lidar_actor = self.lidar_cloud.get_vtk_cloud(zMin=0, zMax=255)
fren.AddActor(self.lidar_actor)
if self.count == 0:
fren.ResetCamera()
fren.GetActiveCamera().Zoom(1.6)
self.count += 1
iren.GetRenderWindow().Render()
if __name__ == '__main__':
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(1280/2, 960/2)
renderer = vtk.vtkRenderer()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
mouseInteractor = vtk.vtkInteractorStyleTrackballCamera()
renderWindowInteractor.SetInteractorStyle(mouseInteractor)
renderWindow.Render()
cb = ImageGrabberCallback(sys.argv[1])
renderWindowInteractor.AddObserver('TimerEvent', cb.execute)
timerId = renderWindowInteractor.CreateRepeatingTimer(1)
renderWindowInteractor.Start() | StarcoderdataPython |
1773845 | from pyramid.i18n import TranslationStringFactory
from formencode import Schema, validators
from pytz import common_timezones
from ow.schemas.blob import FieldStorageBlob
from ow.utilities import get_available_locale_names, get_gender_names
_ = TranslationStringFactory('OpenWorkouts')
class PasswordMatch(validators.UnicodeString):
messages = {
"dont_match": _('The given password does not match the existing one '),
}
def _validate_python(self, value, state):
super(PasswordMatch, self)._validate_python(value, state)
if not state.user.check_password(value):
raise validators.Invalid(
self.message('dont_match', state), value, state)
class UniqueNickname(validators.UnicodeString):
messages = {
"name_exists": _('Another user is already using the nickname %(name)s')
}
def _validate_python(self, value, state):
super(UniqueNickname, self)._validate_python(value, state)
if value.lower() in state.names:
raise validators.Invalid(
self.message('name_exists', state, name=value), value, state)
class UniqueEmail(validators.Email):
messages = {
"email_exists": _('Another user is already registered with the email '
'%(email)s')
}
def _validate_python(self, value, state):
super(UniqueEmail, self)._validate_python(value, state)
if value.lower() in state.emails:
raise validators.Invalid(
self.message('email_exists', state, email=value), value, state)
class UserAddSchema(Schema):
"""
Schema to add a new user
"""
allow_extra_fields = True
filter_extra_fields = True
email = UniqueEmail(not_empty=True)
nickname = UniqueNickname(if_missing='')
firstname = validators.UnicodeString()
lastname = validators.UnicodeString()
group = validators.UnicodeString(if_missing='')
class UserProfileSchema(Schema):
"""
Schema for the "edit profile" form for users
"""
allow_extra_fields = True
filter_extra_fields = True
firstname = validators.UnicodeString(not_empty=True)
lastname = validators.UnicodeString(not_empty=True)
email = validators.Email(not_empty=True)
nickname = UniqueNickname(if_missing='')
bio = validators.UnicodeString(if_missing='')
birth_date = validators.DateConverter(month_style='dd/mm/yyyy')
height = validators.Number()
weight = validators.Number()
gender = validators.OneOf(
[gender[0] for gender in get_gender_names()],
not_empty=True)
picture = FieldStorageBlob(if_emtpy=None, if_missing=None,
whitelist=['jpg', 'jpeg', 'png', 'gif'])
timezone = validators.OneOf(common_timezones, if_missing='UTC')
locale = validators.OneOf(
[locale[0] for locale in get_available_locale_names()],
if_missing='en'
)
class ChangePasswordSchema(Schema):
allow_extra_fields = True
filter_extra_fields = True
old_password = PasswordMatch(not_empty=True)
password = validators.UnicodeString(min=9, not_empty=True)
password_confirm = validators.UnicodeString(not_empty=True)
chained_validators = [validators.FieldsMatch(
'password', 'password_confirm')]
class SignUpSchema(Schema):
"""
Schema for the sign up of new users
"""
allow_extra_fields = True
filter_extra_fields = True
nickname = UniqueNickname(if_missing='')
firstname = validators.UnicodeString(not_empty=True)
lastname = validators.UnicodeString(not_empty=True)
email = UniqueEmail(not_empty=True)
password = validators.UnicodeString(min=9, not_empty=True)
password_confirm = validators.UnicodeString(not_empty=True)
chained_validators = [validators.FieldsMatch(
'password', 'password_confirm')]
class RecoverPasswordSchema(Schema):
"""
Schema for the password recovery
"""
allow_extra_fields = True
filter_extra_fields = True
nickname = UniqueNickname(if_missing='')
email = UniqueEmail(not_empty=True)
| StarcoderdataPython |
4829855 | <filename>clocwalk/libs/analyzer/nodejs.py
# coding: utf-8
import json
__product__ = 'JavaScript'
__version__ = '0.3'
from clocwalk.libs.core.common import recursive_search_files
def _get_dependencies(file_name='package.json', origin=None):
"""
get properties
:param file_name:
:return:
"""
result = []
with open(file_name, 'r') as fp:
json_obj = json.load(fp)
for tag in ['dependencies', 'devDependencies']:
for name, ver in json_obj[tag].items():
result.append({
'vendor': tag,
'product': name,
'version': ver,
'new_version': '',
'parent_file': '',
'cve': {},
'origin_file': file_name,
})
return result
def start(**kwargs):
"""
:param kwargs:
:return:
"""
code_dir = kwargs.get('code_dir', '')
file_name = kwargs.get('file_name', 'package.json')
skipNewVerCheck = kwargs.get('skipNewVerCheck', False)
result_file_list = recursive_search_files(code_dir, '*/package.json')
result = []
for item in result_file_list:
# FIXME
relative_path = item.replace('{0}'.format(code_dir), '')
relative_path = relative_path[1:] if relative_path.startswith('/') else relative_path
result.extend(_get_dependencies(file_name=item, origin=relative_path))
return result
| StarcoderdataPython |
3316351 | import sys
class listenerlibrary(object):
ROBOT_LISTENER_API_VERSION = 2
ROBOT_LIBRARY_SCOPE = "TEST CASE"
def __init__(self):
self.ROBOT_LIBRARY_LISTENER = self
self.events = []
def get_events(self):
return self.events[:]
def _start_suite(self, name, attrs):
self.events.append('start suite %s' % name)
def _start_test(self, name, attrs):
self.events.append('start test %s' % name)
def end_test(self, name, attrs):
self.events.append('end test %s' % name)
def _start_keyword(self, name, attrs):
self.events.append('start kw %s' % name)
def _end_keyword(self, name, attrs):
self.events.append('end kw %s' % name)
def _close(self):
self.events.append('close %s' % self.ROBOT_LIBRARY_SCOPE)
sys.__stderr__.write("CLOSING %s\n" % self.ROBOT_LIBRARY_SCOPE)
def events_should_be(self, *expected):
assert self._format(self.events) == self._format(expected), 'Expected events\n %s\n actual\n %s' % (self._format(expected), self._format(self.events))
def events_should_be_empty(self):
assert not self.events, 'Expected empty events, has %s' % self._format(self.events)
def _format(self, events):
return ' | '.join(events)
| StarcoderdataPython |
4822005 | """
*Now*
"""
def now():
eps = 1e-16
dummy_loss = Time.Loss(
0,
0,
0,
0,
0,
)
dummy_status = Report.LearningStatus(
dummy_loss,
0,
0,
)
return Now.Learning(
eps,
eps,
eps,
eps,
dummy_status,
[],
[],
false,
)
| StarcoderdataPython |
3202749 | import math
from typing import Sequence
import fastfilters
import numpy
from sklearn.base import BaseEstimator, TransformerMixin
class Filter(BaseEstimator, TransformerMixin):
def fit(self, X=None, y=None, **kwargs):
return self
def transform(self, X):
raise NotImplementedError
@property
def kernel_size(self):
raise NotImplementedError
def _more_tags(self):
return {"requires_fit": False, "stateless": True}
class SingleFilter(Filter):
def __init__(self, scale):
self.scale = scale
def __init_subclass__(cls, order, **kwargs):
super().__init_subclass__(**kwargs)
cls.order = order
@property
def kernel_size(self):
# TODO: Make sure that the kernel size formula is pixel-perfect.
return math.ceil((3 + self.order / 2) * self.scale) + 1
_required_parameters = ("scale",)
class Gaussian(SingleFilter, order=0):
def transform(self, X):
return fastfilters.gaussianSmoothing(X, sigma=self.scale)
class DifferenceOfGaussians(SingleFilter, order=0):
def __init__(self, scale, *, inner_k=0.5):
super().__init__(scale)
self.inner_k = inner_k
def transform(self, X):
outer = fastfilters.gaussianSmoothing(X, sigma=self.scale)
inner = fastfilters.gaussianSmoothing(X, sigma=self.inner_k * self.scale)
return outer - inner
class GaussianGradientMagnitude(SingleFilter, order=1):
def transform(self, X):
return fastfilters.gaussianGradientMagnitude(X, sigma=self.scale)
class LaplacianOfGaussian(SingleFilter, order=2):
def transform(self, X):
return fastfilters.laplacianOfGaussian(X, scale=self.scale)
class StructureTensorEigenvalues(SingleFilter, order=1):
def __init__(self, scale, *, inner_k=0.5):
super().__init__(scale)
self.inner_k = inner_k
def transform(self, X):
return fastfilters.structureTensorEigenvalues(
X, innerScale=self.inner_k * self.scale, outerScale=self.scale
)
class HessianOfGaussianEigenvalues(SingleFilter, order=2):
def transform(self, X):
return fastfilters.hessianOfGaussianEigenvalues(X, scale=self.scale)
class FilterSet(Filter):
def __init__(self, *, filters: Sequence[Filter]):
self.filters = filters
def transform(self, X):
# TODO: Optimize feature computations by sharing intermediate results.
ys = [f.transform(X).reshape((*X.shape, -1)) for f in self.filters]
return numpy.concatenate(ys, axis=-1)
@property
def kernel_size(self):
return max(f.kernel_size for f in self.filters)
| StarcoderdataPython |
3376122 | <reponame>klow-analytics/klow
import unittest
from ddt import ddt
from ddt import file_data
from google_analytics_pipeline.core.enrichment.page import PageEnrichmentFn
@ddt
class TestPageEnrichment(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.test_fn = PageEnrichmentFn().process
@file_data("../fixtures/page-enrichment-testcases.json")
def test(self, test_input, expected_output):
for index, output in enumerate(self.test_fn(test_input)):
self.assertDictEqual(output, expected_output[index])
| StarcoderdataPython |
1619895 | <reponame>ebbaberg/MovingFiles
import csv
import os
import shutil
import numpy as np
import random
import pandas as pd
import math
import General_Moving
class MakeKFolds:
def __init__(self,
labels_path = '/home/jovyan/scratch-shared/Ebba/BBBC021_Filtered_Data/Labels.csv',
exclude_images_path = "", # Empty for none
output_dir = '/home/jovyan/Inputs/BBBC021_LeaveOneOut_Kfolds',
include_groups = [], #Empty for everything included,
include_header = 'moa',
exclude_groups = ["Cholesterol-lowering","Eg5 inhibitors"], #Empty for everything included,
exclude_header = 'moa',
class_column_header = 'moa',
meta_data_header = ['plate', 'well', 'site'],
image_number_heading = "image_number",
intact_group_header = 'compound',
has_controls = False,
frac_of_controls_to_use = 0.0,
intact_control_group_headers = ['plate', 'well'], # NOTE: hard coded for 2 headers to to troubles with dataframe
):
self.labels_path = labels_path
self.output_dir = output_dir
self.exclude_images_path = exclude_images_path
self.included_groups = include_groups
self.include_header = include_header
self.exclude_groups = exclude_groups
self.exclude_header = exclude_header
self.meta_data_header = meta_data_header
self.image_number_heading = image_number_heading
self.class_column_header = class_column_header
self.intact_group_header = intact_group_header
self.intact_control_group_headers = intact_control_group_headers
self.has_controls = has_controls
self.frac_of_controls_to_use = frac_of_controls_to_use
def main(self):
print("Started get info.")
df_base = pd.read_csv(self.labels_path , delimiter= ",")
df_base.dropna(subset = [self.class_column_header], inplace=True)
df_base.drop_duplicates(inplace=True)
self.included_groups = General_Moving.get_included_groups(self.include_header,self.included_groups,self.exclude_groups,df_base)
df = df_base[df_base[self.include_header].isin(self.included_groups) & ~df_base[self.exclude_header].isin(self.exclude_groups)]
k_folds = self.get_k_folds(df)
if self.has_controls:
controll_k_folds = self.get_k_folds_control(df)
k_folds.extend(controll_k_folds)
print("Made " + str(len(k_folds)) +" k-folds")
##Write out data
if os.path.exists(self.output_dir) and os.path.isdir(self.output_dir):
shutil.rmtree(self.output_dir)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
print("made the output dir")
print("Starting to write to files")
fold_number = 1
for df_fold in k_folds:
df_fold.to_csv(self.output_dir + "k_fold_"+ str(fold_number)+".csv", index = False)
fold_number = fold_number + 1
print("Finished. Find output in: " + self.output_dir)
def get_k_folds(self, df):
k_folds = []
df_used = df[df[self.include_header].isin(self.included_groups) & ~df[self.exclude_header].isin(self.exclude_groups)]
df_used = General_Moving.use_only_good_images(self.exclude_images_path,self.image_number_heading, self.meta_data_header, df_used)
#group by metadataheaders except sites
df_used = df_used[df_used[self.include_header].isin(self.included_groups)]
groups = df_used[self.intact_group_header].unique()
for group in groups:
group_rows = df_used[df_used[self.intact_group_header] == group]
k_folds.append(group_rows)
return k_folds
def get_k_folds_control(self, df):
k_folds = []
df_control = df[df['type'] == "control"]
unique_combos = df_control[df_control.columns & self.intact_control_group_headers].drop_duplicates().to_numpy()
for combo in unique_combos:
k_fold = df_control[(df_control[self.intact_control_group_headers[0]] == combo[0]) & (df_control[self.intact_control_group_headers[1]] == combo[1])]
k_folds.append(k_fold)
return k_folds
def getControlSampel(self, df_group, df_used_wells, df_used, n_sample):
if(df_group[self.intact_control_group_header].count() == 0):
return False
sampled_well = np.random.choice(df_group[self.intact_control_group_header].unique(), n_sample)
df_sampled = df_group[df_group[self.intact_control_group_header].isin(sampled_well)]
df_used_wells = df_used_wells.append(df_sampled)
return True, df_sampled, df_used_wells, df_used
if __name__ == "__main__":
MakeKFolds().main() | StarcoderdataPython |
105419 | import numpy as np # numerical tools
from scipy import integrate
from scipy import interpolate
c_light=299792.458#in km/s
#Find nearest value
def find_nearest(array,value):
idx = (np.abs(array-value)).argmin()
return array[idx]
#### DATA SN
def get_SN_info(targetname):
data_sn=np.loadtxt('Info_SNe_KAIT.txt',usecols=[1,2,3,4,5,6,7]).transpose()
name_SN_kait=np.array(np.genfromtxt('Info_SNe_KAIT.txt',usecols=[0],dtype='str'))
ind_SN=np.where(np.array(name_SN_kait)==targetname)[0][0]
A_V=data_sn[0][ind_SN]
z_hel=data_sn[1][ind_SN]*1.0/c_light
err_z_hel=data_sn[2][ind_SN]*1.0/c_light
JD_explo=data_sn[3][ind_SN]
err_JD_explo=data_sn[4][ind_SN]
z_cmb=data_sn[5][ind_SN]*1.0/c_light
err_z_cmb=data_sn[6][ind_SN]*1.0/c_light
return A_V,z_hel,err_z_hel,JD_explo,err_JD_explo,z_cmb,err_z_cmb
#Get SN photometry
def get_sn(targetname):
data_sn=open('Nat_KAIT/%s.txt'%targetname,'r')
lines = data_sn.readlines()
fields = lines[0].split()
ind_B=np.where(np.array(fields)=='B')[0][0]
ind_V=np.where(np.array(fields)=='V')[0][0]
ind_R=np.where(np.array(fields)=='R')[0][0]
ind_I=np.where(np.array(fields)=='I')[0][0]
MJD = {}
mags = {}
emags = {}
tel = {}
for i in range(4):
this_filter = ['B','V','R','I']
MJD[this_filter[i]] = []
mags[this_filter[i]] = []
emags[this_filter[i]] = []
tel[this_filter[i]] = []
for j in range(np.size(lines)):
if (j!=0):
if ((lines[j].split()[ind_B+1])<'0.8') and ((lines[j].split()[ind_B+1])!='NaN') and ((lines[j].split()[0][0])!='#'):
mags['B'].append(float(lines[j].split()[ind_B]))
emags['B'].append(float(lines[j].split()[ind_B+1]))
MJD['B'].append(float(lines[j].split()[1]))
tel['B'].append(lines[j].split()[3])
if ((lines[j].split()[ind_V+1])<'0.8') and ((lines[j].split()[ind_V+1])!='NaN')and ((lines[j].split()[0][0])!='#'):
mags['V'].append(float(lines[j].split()[ind_V]))
emags['V'].append(float(lines[j].split()[ind_V+1]))
MJD['V'].append(float(lines[j].split()[1]))
tel['V'].append(lines[j].split()[3])
if ((lines[j].split()[ind_R+1])<'0.8') and ((lines[j].split()[ind_R+1])!='NaN') and ((lines[j].split()[0][0])!='#'):
mags['R'].append(float(lines[j].split()[ind_R]))
emags['R'].append(float(lines[j].split()[ind_R+1]))
MJD['R'].append(float(lines[j].split()[1]))
tel['R'].append(lines[j].split()[3])
if ((lines[j].split()[ind_I+1])<'0.8') and ((lines[j].split()[ind_I+1])!='NaN') and ((lines[j].split()[0][0])!='#'):
mags['I'].append(float(lines[j].split()[ind_I]))
emags['I'].append(float(lines[j].split()[ind_I+1]))
MJD['I'].append(float(lines[j].split()[1]))
tel['I'].append(lines[j].split()[3])
for f in MJD:
MJD[f],mags[f],emags[f],tel[f]=zip(*sorted(zip(MJD[f],mags[f],emags[f],tel[f])))
MJD[f] = np.array(MJD[f])
mags[f] = np.array(mags[f])
emags[f] = np.array(emags[f])
tel[f] = np.array(tel[f])
return MJD,mags,emags,tel
#Linear interpolation of the magnitude
def inter_mag(MJD,mags,emags):
B_band=interpolate.interp1d(MJD['B'],mags['B'])
B_band_plus=interpolate.interp1d(MJD['B'],mags['B']+emags['B'])
V_band=interpolate.interp1d(MJD['V'],mags['V'])
V_band_plus=interpolate.interp1d(MJD['V'],mags['V']+emags['V'])
if np.size(MJD['R'])>0:
R_band=interpolate.interp1d(MJD['R'],mags['R'])
R_band_plus=interpolate.interp1d(MJD['R'],mags['R']+emags['R'])
else:
R_band=[]
R_band_plus=[]
I_band=interpolate.interp1d(MJD['I'],mags['I'])
I_band_plus=interpolate.interp1d(MJD['I'],mags['I']+emags['I'])
return B_band,B_band_plus,V_band,V_band_plus,R_band,R_band_plus,I_band,I_band_plus
#Derive for each CSP filter the effective wavelength
def effective_wavelength_csp(lam_spec,flux_spec,filter_name):
### Each transmission function ###########
trans_u=np.loadtxt('Filters/CSP/u_swope.txt')
lambda_u=trans_u[:,0]
s_u=trans_u[:,1]
trans_g=np.loadtxt('Filters/CSP/g_swope.txt')
lambda_g=trans_g[:,0]
s_g=trans_g[:,1]
trans_r=np.loadtxt('Filters/CSP/r_swope.txt')
lambda_r=trans_r[:,0]
s_r=trans_r[:,1]
trans_i=np.loadtxt('Filters/CSP/i_swope.txt')
lambda_i=trans_i[:,0]
s_i=trans_i[:,1]
trans_V=np.loadtxt('Filters/CSP/V_swope.txt')
lambda_V=trans_V[:,0]
s_V=trans_V[:,1]
trans_B=np.loadtxt('Filters/CSP/B_swope.txt')
lambda_B=trans_B[:,0]
s_B=trans_B[:,1]
F_u_func=interpolate.interp1d(lambda_u,s_u) #interpolation Filtre u
F_B_func=interpolate.interp1d(lambda_B,s_B) #interpolation Filtre B
F_V_func=interpolate.interp1d(lambda_V,s_V) #interpolation Filtre V
F_g_func=interpolate.interp1d(lambda_g,s_g)
F_r_func=interpolate.interp1d(lambda_r,s_r) #interpolation Filtre t
F_i_func=interpolate.interp1d(lambda_i,s_i) #interpolation Filtre i
N_pt=3000
lambda_u=np.linspace(min(lambda_u),max(lambda_u),N_pt)
lambda_B=np.linspace(min(lambda_B),max(lambda_B),N_pt)
lambda_V=np.linspace(min(lambda_V),max(lambda_V),N_pt)
lambda_g=np.linspace(min(lambda_g),max(lambda_g),N_pt)
lambda_r=np.linspace(min(lambda_r),max(lambda_r),N_pt)
lambda_i=np.linspace(min(lambda_i),max(lambda_i),N_pt)
if filter_name==str('u'):
F_filter_func=interpolate.interp1d(lambda_u,F_u_func(lambda_u)) #interpolation Filtre B
lam_filter=lambda_u
if filter_name==str('B'):
F_filter_func=interpolate.interp1d(lambda_B,F_B_func(lambda_B)) #interpolation Filtre B
lam_filter=lambda_B
if filter_name==str('g'):
F_filter_func=interpolate.interp1d(lambda_g,F_g_func(lambda_g))
lam_filter=lambda_g
if filter_name==str('V'):
F_filter_func=interpolate.interp1d(lambda_V,F_V_func(lambda_V)) #interpolation Filtre V
lam_filter=lambda_V
if filter_name==str('r'):
F_filter_func=interpolate.interp1d(lambda_r,F_r_func(lambda_r)) #interpolation Filtre r
lam_filter=lambda_r
if filter_name==str('i'):
F_filter_func=interpolate.interp1d(lambda_i,F_i_func(lambda_i)) #interpolation Filtre i
lam_filter=lambda_i
# interpolation spectre
F_spec=interpolate.interp1d(lam_spec,flux_spec)
# New wavelength vector with wavelength of filter + spectrum
wavelength_to_interpolate=np.concatenate([lam_spec,lam_filter])
# Sort the wavelength
wavelength_to_interpolate.sort()
# We select only the wavelenght in the filter
wavelength_to_interpolate_2=wavelength_to_interpolate[(wavelength_to_interpolate>min(lam_filter)) & (wavelength_to_interpolate<max(lam_filter))]
# We calculate the filter response
interpolate_filter_response=F_filter_func(wavelength_to_interpolate_2)
# We calculate SEDter
SED_inside_filter=F_spec(wavelength_to_interpolate_2)
# num=f*s*lambda
num=SED_inside_filter*interpolate_filter_response*wavelength_to_interpolate_2*wavelength_to_interpolate_2
# num=f*s
dem=SED_inside_filter*interpolate_filter_response*wavelength_to_interpolate_2
# integral de num / integral de dem
lambda_eff_filter=np.trapz(num)*1.0/np.trapz(dem)
return lambda_eff_filter
def effective_wavelength_KAIT(lam_spec,flux_spec,filter_name):
### KAIT 2 ###########
trans_B_kait2=np.loadtxt('Filters/KAIT_NICKEL/B_kait2.txt')
lambda_B_kait2=trans_B_kait2[:,0]
s_B_kait2=trans_B_kait2[:,1]
trans_V_kait2=np.loadtxt('Filters/KAIT_NICKEL/V_kait2.txt')
lambda_V_kait2=trans_V_kait2[:,0]
s_V_kait2=trans_V_kait2[:,1]
trans_R_kait2=np.loadtxt('Filters/KAIT_NICKEL/R_kait2.txt')
lambda_R_kait2=trans_R_kait2[:,0]
s_R_kait2=trans_R_kait2[:,1]
trans_I_kait2=np.loadtxt('Filters/KAIT_NICKEL/I_kait2.txt')
lambda_I_kait2=trans_I_kait2[:,0]
s_I_kait2=trans_I_kait2[:,1]
dlambda_B_kait2=lambda_B_kait2[1]-lambda_B_kait2[0]
dlambda_V_kait2=lambda_V_kait2[1]-lambda_V_kait2[0]
dlambda_R_kait2=lambda_R_kait2[1]-lambda_R_kait2[0]
dlambda_I_kait2=lambda_I_kait2[1]-lambda_I_kait2[0]
### KAIT 3 ###########
trans_B_kait3=np.loadtxt('Filters/KAIT_NICKEL/B_kait3.txt')
lambda_B_kait3=trans_B_kait3[:,0]
s_B_kait3=trans_B_kait3[:,1]
trans_V_kait3=np.loadtxt('Filters/KAIT_NICKEL/V_kait3.txt')
lambda_V_kait3=trans_V_kait3[:,0]
s_V_kait3=trans_V_kait3[:,1]
trans_R_kait3=np.loadtxt('Filters/KAIT_NICKEL/R_kait3.txt')
lambda_R_kait3=trans_R_kait3[:,0]
s_R_kait3=trans_R_kait3[:,1]
trans_I_kait3=np.loadtxt('Filters/KAIT_NICKEL/I_kait3.txt')
lambda_I_kait3=trans_I_kait3[:,0]
s_I_kait3=trans_I_kait3[:,1]
dlambda_B_kait3=lambda_B_kait3[1]-lambda_B_kait3[0]
dlambda_V_kait3=lambda_V_kait3[1]-lambda_V_kait3[0]
dlambda_R_kait3=lambda_R_kait3[1]-lambda_R_kait3[0]
dlambda_I_kait3=lambda_I_kait3[1]-lambda_I_kait3[0]
### KAIT 4 ###########
trans_B_kait4=np.loadtxt('Filters/KAIT_NICKEL/B_kait4.txt')
lambda_B_kait4=trans_B_kait4[:,0]
s_B_kait4=trans_B_kait4[:,1]
trans_V_kait4=np.loadtxt('Filters/KAIT_NICKEL/V_kait4.txt')
lambda_V_kait4=trans_V_kait4[:,0]
s_V_kait4=trans_V_kait4[:,1]
trans_R_kait4=np.loadtxt('Filters/KAIT_NICKEL/R_kait4.txt')
lambda_R_kait4=trans_R_kait4[:,0]
s_R_kait4=trans_R_kait4[:,1]
trans_I_kait4=np.loadtxt('Filters/KAIT_NICKEL/I_kait4.txt')
lambda_I_kait4=trans_I_kait4[:,0]
s_I_kait4=trans_I_kait4[:,1]
dlambda_B_kait4=lambda_B_kait4[1]-lambda_B_kait4[0]
dlambda_V_kait4=lambda_V_kait4[1]-lambda_V_kait4[0]
dlambda_R_kait4=lambda_R_kait4[1]-lambda_R_kait4[0]
dlambda_I_kait4=lambda_I_kait4[1]-lambda_I_kait4[0]
### Nickel 1 ###########
trans_B_nickel1=np.loadtxt('Filters/KAIT_NICKEL/B_nickel1.txt')
lambda_B_nickel1=trans_B_nickel1[:,0]
s_B_nickel1=trans_B_nickel1[:,1]
trans_V_nickel1=np.loadtxt('Filters/KAIT_NICKEL/V_nickel1.txt')
lambda_V_nickel1=trans_V_nickel1[:,0]
s_V_nickel1=trans_V_nickel1[:,1]
trans_R_nickel1=np.loadtxt('Filters/KAIT_NICKEL/R_nickel1.txt')
lambda_R_nickel1=trans_R_nickel1[:,0]
s_R_nickel1=trans_R_nickel1[:,1]
trans_I_nickel1=np.loadtxt('Filters/KAIT_NICKEL/I_nickel1.txt')
lambda_I_nickel1=trans_I_nickel1[:,0]
s_I_nickel1=trans_I_nickel1[:,1]
dlambda_B_nicke1l=lambda_B_nickel1[1]-lambda_B_nickel1[0]
dlambda_V_nickel1=lambda_V_nickel1[1]-lambda_V_nickel1[0]
dlambda_R_nickel1=lambda_R_nickel1[1]-lambda_R_nickel1[0]
dlambda_I_nickel1=lambda_I_nickel1[1]-lambda_I_nickel1[0]
### Nickel 2 ###########
trans_B_nickel2=np.loadtxt('Filters/KAIT_NICKEL/B_nickel2.txt')
lambda_B_nickel2=trans_B_nickel2[:,0]
s_B_nickel2=trans_B_nickel2[:,1]
trans_V_nickel2=np.loadtxt('Filters/KAIT_NICKEL/V_nickel2.txt')
lambda_V_nickel2=trans_V_nickel2[:,0]
s_V_nickel2=trans_V_nickel2[:,1]
trans_R_nickel2=np.loadtxt('Filters/KAIT_NICKEL/R_nickel2.txt')
lambda_R_nickel2=trans_R_nickel2[:,0]
s_R_nickel2=trans_R_nickel2[:,1]
trans_I_nickel2=np.loadtxt('Filters/KAIT_NICKEL/I_nickel2.txt')
lambda_I_nickel2=trans_I_nickel2[:,0]
s_I_nickel2=trans_I_nickel2[:,1]
dlambda_B_nickel2=lambda_B_nickel2[1]-lambda_B_nickel2[0]
dlambda_V_nickel2=lambda_V_nickel2[1]-lambda_V_nickel2[0]
dlambda_R_nickel2=lambda_R_nickel2[1]-lambda_R_nickel2[0]
dlambda_I_nickel2=lambda_I_nickel2[1]-lambda_I_nickel2[0]
F_B_kait2_func=interpolate.interp1d(lambda_B_kait2,s_B_kait2)
F_V_kait2_func=interpolate.interp1d(lambda_V_kait2,s_V_kait2)
F_R_kait2_func=interpolate.interp1d(lambda_R_kait2,s_R_kait2)
F_I_kait2_func=interpolate.interp1d(lambda_I_kait2,s_I_kait2)
F_B_kait3_func=interpolate.interp1d(lambda_B_kait3,s_B_kait3)
F_V_kait3_func=interpolate.interp1d(lambda_V_kait3,s_V_kait3)
F_R_kait3_func=interpolate.interp1d(lambda_R_kait3,s_R_kait3)
F_I_kait3_func=interpolate.interp1d(lambda_I_kait3,s_I_kait3)
F_B_kait4_func=interpolate.interp1d(lambda_B_kait4,s_B_kait4)
F_V_kait4_func=interpolate.interp1d(lambda_V_kait4,s_V_kait4)
F_R_kait4_func=interpolate.interp1d(lambda_R_kait4,s_R_kait4)
F_I_kait4_func=interpolate.interp1d(lambda_I_kait4,s_I_kait4)
F_B_nickel1_func=interpolate.interp1d(lambda_B_nickel1,s_B_nickel1)
F_V_nickel1_func=interpolate.interp1d(lambda_V_nickel1,s_V_nickel1)
F_R_nickel1_func=interpolate.interp1d(lambda_R_nickel1,s_R_nickel1)
F_I_nickel1_func=interpolate.interp1d(lambda_I_nickel1,s_I_nickel1)
F_B_nickel2_func=interpolate.interp1d(lambda_B_nickel2,s_B_nickel2)
F_V_nickel2_func=interpolate.interp1d(lambda_V_nickel2,s_V_nickel2)
F_R_nickel2_func=interpolate.interp1d(lambda_R_nickel2,s_R_nickel2)
F_I_nickel2_func=interpolate.interp1d(lambda_I_nickel2,s_I_nickel2)
N_pt=5000
lambda_B_kait2=np.linspace(min(lambda_B_kait2),max(lambda_B_kait2),N_pt)
lambda_V_kait2=np.linspace(min(lambda_V_kait2),max(lambda_V_kait2),N_pt)
lambda_R_kait2=np.linspace(min(lambda_R_kait2),max(lambda_R_kait2),N_pt)
lambda_I_kait2=np.linspace(min(lambda_I_kait2),max(lambda_I_kait2),N_pt)
lambda_B_kait3=np.linspace(min(lambda_B_kait3),max(lambda_B_kait3),N_pt)
lambda_V_kait3=np.linspace(min(lambda_V_kait3),max(lambda_V_kait3),N_pt)
lambda_R_kait3=np.linspace(min(lambda_R_kait3),max(lambda_R_kait3),N_pt)
lambda_I_kait3=np.linspace(min(lambda_I_kait3),max(lambda_I_kait3),N_pt)
lambda_B_kait4=np.linspace(min(lambda_B_kait4),max(lambda_B_kait4),N_pt)
lambda_V_kait4=np.linspace(min(lambda_V_kait4),max(lambda_V_kait4),N_pt)
lambda_R_kait4=np.linspace(min(lambda_R_kait4),max(lambda_R_kait4),N_pt)
lambda_I_kait4=np.linspace(min(lambda_I_kait4),max(lambda_I_kait4),N_pt)
lambda_B_nickel1=np.linspace(min(lambda_B_nickel1),max(lambda_B_nickel1),N_pt)
lambda_V_nickel1=np.linspace(min(lambda_V_nickel1),max(lambda_V_nickel1),N_pt)
lambda_R_nickel1=np.linspace(min(lambda_R_nickel1),max(lambda_R_nickel1),N_pt)
lambda_I_nickel1=np.linspace(min(lambda_I_nickel1),max(lambda_I_nickel1),N_pt)
lambda_B_nickel2=np.linspace(min(lambda_B_nickel2),max(lambda_B_nickel2),N_pt)
lambda_V_nickel2=np.linspace(min(lambda_V_nickel2),max(lambda_V_nickel2),N_pt)
lambda_R_nickel2=np.linspace(min(lambda_R_nickel2),max(lambda_R_nickel2),N_pt)
lambda_I_nickel2=np.linspace(min(lambda_I_nickel2),max(lambda_I_nickel2),N_pt)
if filter_name==str('Bkait2'):
F_filter_func=interpolate.interp1d(lambda_B_kait2,F_B_kait2_func(lambda_B_kait2))
lam_filter=lambda_B_kait2
if filter_name==str('Vkait2'):
F_filter_func=interpolate.interp1d(lambda_V_kait2,F_V_kait2_func(lambda_V_kait2))
lam_filter=lambda_V_kait2
if filter_name==str('Rkait2'):
F_filter_func=interpolate.interp1d(lambda_R_kait2,F_R_kait2_func(lambda_R_kait2))
lam_filter=lambda_R_kait2
if filter_name==str('Ikait2'):
F_filter_func=interpolate.interp1d(lambda_I_kait2,F_I_kait2_func(lambda_I_kait2))
lam_filter=lambda_I_kait2
if filter_name==str('Bkait3'):
F_filter_func=interpolate.interp1d(lambda_B_kait3,F_B_kait3_func(lambda_B_kait3))
lam_filter=lambda_B_kait3
if filter_name==str('Vkait3'):
F_filter_func=interpolate.interp1d(lambda_V_kait3,F_V_kait3_func(lambda_V_kait3))
lam_filter=lambda_V_kait3
if filter_name==str('Rkait3'):
F_filter_func=interpolate.interp1d(lambda_R_kait3,F_R_kait3_func(lambda_R_kait3))
lam_filter=lambda_R_kait3
if filter_name==str('Ikait3'):
F_filter_func=interpolate.interp1d(lambda_I_kait3,F_I_kait3_func(lambda_I_kait3))
lam_filter=lambda_I_kait3
if filter_name==str('Bkait4'):
F_filter_func=interpolate.interp1d(lambda_B_kait4,F_B_kait4_func(lambda_B_kait4))
lam_filter=lambda_B_kait4
if filter_name==str('Vkait4'):
F_filter_func=interpolate.interp1d(lambda_V_kait4,F_V_kait4_func(lambda_V_kait4))
lam_filter=lambda_V_kait4
if filter_name==str('Rkait4'):
F_filter_func=interpolate.interp1d(lambda_R_kait4,F_R_kait4_func(lambda_R_kait4))
lam_filter=lambda_R_kait4
if filter_name==str('Ikait4'):
F_filter_func=interpolate.interp1d(lambda_I_kait4,F_I_kait4_func(lambda_I_kait4))
lam_filter=lambda_I_kait4
if filter_name==str('Bnickel1'):
F_filter_func=interpolate.interp1d(lambda_B_nickel1,F_B_nickel1_func(lambda_B_nickel1))
lam_filter=lambda_B_nickel1
if filter_name==str('Vnickel1'):
F_filter_func=interpolate.interp1d(lambda_V_nickel1,F_V_nickel1_func(lambda_V_nickel1))
lam_filter=lambda_V_nickel1
if filter_name==str('Rnickel1'):
F_filter_func=interpolate.interp1d(lambda_R_nickel1,F_R_nickel1_func(lambda_R_nickel1))
lam_filter=lambda_R_nickel1
if filter_name==str('Inickel1'):
F_filter_func=interpolate.interp1d(lambda_I_nickel1,F_I_nickel1_func(lambda_I_nickel1))
lam_filter=lambda_I_nickel1
if filter_name==str('Bnickel2'):
F_filter_func=interpolate.interp1d(lambda_B_nickel2,F_B_nickel2_func(lambda_B_nickel2))
lam_filter=lambda_B_nickel2
if filter_name==str('Vnickel2'):
F_filter_func=interpolate.interp1d(lambda_V_nickel2,F_V_nickel2_func(lambda_V_nickel2))
lam_filter=lambda_V_nickel2
if filter_name==str('Rnickel2'):
F_filter_func=interpolate.interp1d(lambda_R_nickel2,F_R_nickel2_func(lambda_R_nickel2))
lam_filter=lambda_R_nickel2
if filter_name==str('Inickel2'):
F_filter_func=interpolate.interp1d(lambda_I_nickel2,F_I_nickel2_func(lambda_I_nickel2))
lam_filter=lambda_I_nickel2
# interpolation spectre
F_spec=interpolate.interp1d(lam_spec,flux_spec)
# New wavelength vector with wavelength of filter + spectrum
wavelength_to_interpolate=np.concatenate([lam_spec,lam_filter])
# Sort the wavelength
wavelength_to_interpolate.sort()
# We select only the wavelenght in the filter
wavelength_to_interpolate_2=wavelength_to_interpolate[(wavelength_to_interpolate>min(lam_filter)) & (wavelength_to_interpolate<max(lam_filter))]
# We calculate the filter response
interpolate_filter_response=F_filter_func(wavelength_to_interpolate_2)
# We calculate SEDter
SED_inside_filter=F_spec(wavelength_to_interpolate_2)
# num=f*s*lambda
num=SED_inside_filter*interpolate_filter_response*wavelength_to_interpolate_2*wavelength_to_interpolate_2
# num=f*s
dem=SED_inside_filter*interpolate_filter_response*wavelength_to_interpolate_2
# integral de num / integral de dem
lambda_eff_filter=np.trapz(num)*1.0/np.trapz(dem)
return lambda_eff_filter
def ccm_unred(wave, flux, av, **kwargs):
"""
NAME:
CCM_UNRED
PURPOSE:
Deredden a flux vector using the CCM 1989 parameterization
EXPLANATION:
The reddening curve is that of Cardelli, Clayton, and Mathis (1989 ApJ.
345, 245), including the update for the near-UV given by O'Donnell
(1994, ApJ, 422, 158). Parameterization is valid from the IR to the
far-UV (3.5 microns to 0.1 microns).
Users might wish to consider using the alternate procedure FM_UNRED
which uses the extinction curve of Fitzpatrick (1999).
CALLING SEQUENCE:
ccm_unred(wave, flux, ebv [, R_V = ])
INPUT:
WAVE - wavelength vector (Angstroms)
FLUX - calibrated flux vector, same number of elements as WAVE
If only 3 parameters are supplied, then this vector will
updated on output to contain the dereddened flux.
EBV - color excess E(B-V), scalar. If a negative EBV is supplied,
then fluxes will be reddened rather than deredenned.
OUTPUT:
FUNRED - unreddened flux vector, same units and number of elements
as FLUX
OPTIONAL INPUT KEYWORD
R_V - scalar specifying the ratio of total selective extinction
R(V) = A(V) / E(B - V). If not specified, then R_V = 3.1
Extreme values of R(V) range from 2.75 to 5.3
EXAMPLE:
Determine how a flat spectrum (in wavelength) between 1200 A and 3200 A
is altered by a reddening of E(B-V) = 0.1. Assume an "average"
reddening for the diffuse interstellar medium (R(V) = 3.1)
>>> w = 1200 + arange(40)*50 #Create a wavelength vector
>>> f = w*0 + 1 #Create a "flat" flux vector
>>> fnew = ccm_unred(w, f, -0.1) #Redden (negative E(B-V)) flux vector
>>> plot(w,fnew)
NOTES:
(1) The CCM curve shows good agreement with the Savage & Mathis (1979)
ultraviolet curve shortward of 1400 A, but is probably
preferable between 1200 and 1400 A.
(2) Many sightlines with peculiar ultraviolet interstellar extinction
can be represented with a CCM curve, if the proper value of
R(V) is supplied.
(3) Curve is extrapolated between 912 and 1000 A as suggested by
Longo et al. (1989, ApJ, 339,474)
(4) Use the 4 parameter calling sequence if you wish to save the
original flux vector.
(5) Valencic et al. (2004, ApJ, 616, 912) revise the ultraviolet CCM
curve (3.3 -- 8.0 um-1). But since their revised curve does
not connect smoothly with longer and shorter wavelengths, it is
not included here.
REQUIRED MODULES:
scipy, numpy
REVISION HISTORY:
Written <NAME> Hughes/STX January, 1992
Extrapolate curve for wavelengths between 900 and 1000 A Dec. 1993
Use updated coefficients for near-UV from O'Donnell Feb 1994
Allow 3 parameter calling sequence April 1998
Converted to IDLV5.0 April 1998
Ported to Python <NAME> August 2012
"""
# Import modules
import numpy as n
# Set defaults
R_V = 3.1
for key in kwargs:
if key.lower() == 'r_v':
R_V = kwargs[key]
if isinstance(wave, int) or isinstance(wave, float):
x = 10000. / n.array([wave]) # Convert to inverse microns
else:
x = 10000. / n.array(wave) # Convert to inverse microns
npts = len( x )
a = n.zeros((npts))
b = n.zeros((npts))
###############################
good = n.where( (x > 0.3) & (x < 1.1) ) # Infrared
Ngood = len(x[good])
if Ngood > 0:
a[good] = 0.574 * x[good]**(1.61)
b[good] = -0.527 * x[good]**(1.61)
###############################
good = n.where( (x >= 1.1) & (x < 3.3) ) # Optical/NIR
Ngood = len(good[0])
if Ngood > 0: # Use new constants from O'Donnell (1994)
y = x[good] - 1.82
#c1 = n.array([ 0.32999, -0.77530, 0.01979, 0.72085, # Original
# -0.02427, -0.50447, 0.17699, 1. ]) # coefficients
#c2 = n.array([ -2.09002, 5.30260, -0.62251, -5.38434, # from CCM89
# 1.07233, 2.28305, 1.41338, 0. ])
c1 = n.array([ -0.505 , 1.647, -0.827, -1.718, # New coefficients
1.137, 0.701, -0.609, 0.104, 1. ]) # from O'Donnell
c2 = n.array([ 3.347, -10.805, 5.491, 11.102, # (1994)
-7.985, -3.989, 2.908, 1.952, 0. ])
a[good] = n.polyval(c1, y)
b[good] = n.polyval(c2, y)
###############################
good = n.where( (x >= 3.3) & (x < 8) ) # Mid-UV
Ngood = len(x[good])
if Ngood > 0:
y = x[good]
F_a = n.zeros((Ngood))
F_b = n.zeros((Ngood))
good1 = n.where( (y > 5.9) )
Ngood1 = len(y[good1])
if Ngood1 > 0:
y1 = y[good1] - 5.9
F_a[good1] = -0.04473 * y1**2 - 0.009779 * y1**3
F_b[good1] = 0.2130 * y1**2 + 0.1207 * y1**3
a[good] = 1.752 - 0.316*y - (0.104 / ( (y-4.67)**2 + 0.341 )) + F_a
b[good] = -3.090 + 1.825*y + (1.206 / ( (y-4.62)**2 + 0.263 )) + F_b
###############################
good = n.where( (x >= 8) & (x < 11) ) #Far-UV
Ngood = len(x[good])
if Ngood > 0:
y = x[good] - 8.
c1 = [ -0.070, 0.137, -0.628, -1.073 ]
c2 = [ 0.374, -0.420, 4.257, 13.670 ]
a[good] = n.polyval(c1, y)
b[good] = n.polyval(c2, y)
###############################
# Now apply extinction correction to input flux vector
A_V = av
A_lambda = A_V * (a + b / R_V)
return flux * 10.**(0.4 * A_lambda)
| StarcoderdataPython |
4832224 | <reponame>dvzrv/softlayer-python
"""List SSH keys."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
@click.command()
@click.option('--sortby',
help='Column to sort by',
type=click.Choice(['id',
'label',
'fingerprint',
'notes']))
@environment.pass_env
def cli(env, sortby):
"""List SSH keys."""
mgr = SoftLayer.SshKeyManager(env.client)
keys = mgr.list_keys()
table = formatting.Table(['id', 'label', 'fingerprint', 'notes'])
table.sortby = sortby
for key in keys:
table.add_row([key['id'],
key.get('label'),
key.get('fingerprint'),
key.get('notes', '-')])
env.fout(table)
| StarcoderdataPython |
3347639 | <gh_stars>10-100
'''
This code is part of QuTIpy.
(c) Copyright <NAME>, 2021
This code is licensed under the Apache License, Version 2.0. You may
obtain a copy of this license in the LICENSE.txt file in the root directory
of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
Any modifications or derivative works of this code must retain this
copyright notice, and modified files need to carry a notice indicating
that they have been altered from the originals.
'''
import numpy as np
from qutipy.general_functions import ket,dag
def discrete_Weyl_Z(d):
'''
Generates the Z phase operators.
'''
w=np.exp(2*np.pi*1j/d)
Z=ket(d,0)@dag(ket(d,0))
for i in range(1,d):
Z=Z+w**i*ket(d,i)@dag(ket(d,i))
return Z | StarcoderdataPython |
192723 | from librespot.player.playback.PlayerSession import PlayerSession
| StarcoderdataPython |
104763 | from django.db import models
from bets.models import Bet
from account.models import User
# Create your models here.
class Cart(models.Model):
user=models.OneToOneField(User,on_delete=models.CASCADE,related_name="user")
bets=models.ManyToManyField(Bet,blank=True)
| StarcoderdataPython |
3317600 | """
Dataclasses; classes whose only purpose is to hold specific data.
"""
import typing
import math
from .. import constants
from .abc import JSONData, Settable, Block
from ..enums import Enchantments, TagType, CodeblockActionType, BlockType, BracketDirection, BracketType
from ..utils import remove_u200b_from_doc, all_attr_eq
from ..constants import DEFAULT_VAL, ITEM_ID_TAG
AnyNumber = typing.Union[int, float]
class Enchantment(Settable):
"""
Represents an Enchantment to be used within :class:`~py2df.classes.mc_types.Item`.
Parameters\u200b
----------
ench_type : :class:`~py2df.enums.misc_mc_enums.Enchantments`
The type of enchantment this is.
level : :class:`int`, optional
The level of this enchantments (default is 1).
.. container:: comparisons
.. describe:: a == b, a != b
Checks if every attribute is the same.
.. describe:: a > b, a >= b, a < b, a <= b
Compares both of the enchantments' levels.
.. container:: operations
Note that, in all operations, ``a`` must be an instance of :class:`Enchantment` , while ``b`` can either be
another instance of the class or be an :class:`int` (or :class:`float` - the results are rounded down).
.. describe:: a + b, a - b, a * b, a ** b, a / b, a // b
Executes said operation on both's levels.
.. describe:: str(a)
Returns a string in the form "{ench_type} x {level}".
.. describe:: hash(a)
Returns an unique hash representing the Enchantment class, the instance's enchantment type and its
level.
Attributes\u200b
-----------
ench_type : :class:`~py2df.enums.misc_mc_enums.Enchantments`
Type of enchantment.
level : :class:`int`
The level of this enchantment. (Cannot surpass **2 147 483 647**)
"""
__slots__ = ("ench_type", "level")
ench_type: Enchantments
level: int
def __init__(self, ench_type: Enchantments, level: int = 1):
"""
Initialize this Enchantment.
Parameters
----------
ench_type : :class:`~py2df.enums.misc_mc_enums.Enchantments`
The type of enchantment this is.
level : :class:`int`, optional
The level of this enchantments (default is 1).
"""
self.ench_type = Enchantments(ench_type)
self.level = int(level)
if abs(self.level) > constants.MAX_ENCHANTMENT_LEVEL:
raise OverflowError(f"Enchantment level too big (max {constants.MAX_ENCHANTMENT_LEVEL})")
def __repr__(self):
return f"<{self.__class__.__name__} ench_type={self.ench_type.value} level={self.level}>"
def __str__(self):
return self.ench_type.value + f" * {self.level}"
def copy(self) -> "Enchantment":
"""
Produces an identical copy of this :class:`Enchantment` object.
Returns
-------
:class:`Enchantment`
The identical copy of this object.
"""
return Enchantment(self.ench_type, self.level)
def set(self, ench_type: Enchantments = DEFAULT_VAL, level: int = DEFAULT_VAL) -> "Enchantment":
"""
Sets the values of this :class:`Enchantment` .
Parameters
----------
ench_type : :class:`~py2df.enums.misc_mc_enums.Enchantments`, optional
The type of enchantment this is.
level : :class:`int`
The level of this enchantment.
Returns
-------
:class:`Enchantment`
self to allow chaining
"""
if ench_type != DEFAULT_VAL:
self.ench_type = Enchantments(ench_type)
if level != DEFAULT_VAL:
self.level = int(level)
return self
def __eq__(self, other: "Enchantment") -> bool:
return all_attr_eq(self, other)
def __ne__(self, other: "Enchantment") -> bool:
return not self.__eq__(other)
def __lt__(self, other: typing.Union["Enchantment", AnyNumber]):
if type(self) == type(other):
return self.level < other.level
else:
return self.level < other
def __le__(self, other: typing.Union["Enchantment", AnyNumber]):
if type(self) == type(other):
return self.level <= other.level
else:
return self.level <= other
def __gt__(self, other: typing.Union["Enchantment", AnyNumber]):
if type(self) == type(other):
return self.level > other.level
else:
return self.level > other
def __ge__(self, other: typing.Union["Enchantment", AnyNumber]):
if type(self) == type(other):
return self.level >= other.level
else:
return self.level >= other
def __hash__(self):
return hash((self.__class__, self.ench_type, self.level))
def __add__(self, other: typing.Union["Enchantment", AnyNumber]):
new = self.copy()
new.level += other.level if type(self) == type(other) else other
new.level = int(new.level)
return new
def __radd__(self, other: typing.Union["Enchantment", AnyNumber]):
return self.__add__(other)
def __mul__(self, other: typing.Union["Enchantment", AnyNumber]):
new = self.copy()
new.level *= other.level if type(self) == type(other) else other
new.level = int(new.level)
return new
def __rmul__(self, other: typing.Union["Enchantment", AnyNumber]):
return self.__mul__(other)
def __sub__(self, other: typing.Union["Enchantment", AnyNumber]):
new = self.copy()
new.level -= other.level if type(self) == type(other) else other
new.level = int(new.level)
return new
def __truediv__(self, other: typing.Union["Enchantment", AnyNumber]):
new = self.copy()
new.level /= other.level if type(self) == type(other) else other
new.level = int(new.level)
return new
def __floordiv__(self, other: typing.Union["Enchantment", AnyNumber]):
new = self.copy()
new.level //= other.level if type(self) == type(other) else other
new.level = int(new.level)
return new
def __mod__(self, other: typing.Union["Enchantment", AnyNumber]):
new = self.copy()
new.level %= other.level if type(self) == type(other) else other
new.level = int(new.level)
return new
def __pow__(self, other: typing.Union["Enchantment", AnyNumber], modulo=None):
new = self.copy()
new.level = int(pow(
self.level, other.level if type(self) == type(other) else other, modulo
))
return new
def __abs__(self):
return self.copy().set(level=abs(self.level))
def __pos__(self):
return self.copy()
class Tag(JSONData):
"""
Represents a tag, generally for internal use.
Parameters
----------\u200b
tag : str
The tag's name.
option : Union[:class:`bool`, :class:`int`, :class:`str`, :class:`~py2df.enums.enum_util.TagType`]
The option chosen for this tag.
action : :class:`~py2df.enums.enum_util.CodeblockActionType`
The action type of the codeblock this tag is in.
block : :class:`~py2df.enums.parameters.BlockType`
The type of codeblock this tag is in.
Attributes
----------\u200b
tag : str
The tag's name.
option : Union[:class:`bool`, :class:`int`, :class:`str`, :class:`~py2df.enums.enum_util.TagType`]
The option chosen for this tag.
action : :class:`~py2df.enums.enum_util.CodeblockActionType`
The action type of the codeblock this tag is in.
block : :class:`~py2df.enums.parameters.BlockType`
The type of codeblock this tag is in.
"""
tag: str
option: typing.Union[bool, int, TagType]
action: CodeblockActionType
block: BlockType
def __init__(
self, tag: str, option: typing.Union[bool, int, str, TagType], action: CodeblockActionType, block: BlockType
):
"""
Initializes this tag.
Parameters
----------
tag : str
The tag's name.
option : Union[:class:`bool`, :class:`int`, :class:`str`, :class:`~py2df.enums.enum_util.TagType`]
The option chosen for this tag.
action : :class:`~py2df.enums.enum_util.CodeblockActionType`
The action type of the codeblock this tag is in.
block : :class:`~py2df.enums.parameters.BlockType`
The type of codeblock this tag is in.
"""
self.tag = str(tag)
self.option = option
self.action = action
self.block = BlockType(block)
def as_json_data(self) -> dict:
"""
Produces a JSON-serializable dict representing this tag.
Returns
-------
:class:`dict`
"""
return dict(
id=ITEM_ID_TAG,
data=dict(
option=getattr(self.option, "value", str(self.option)),
tag=str(self.tag),
action=getattr(self.action, "value", str(self.action)),
block=getattr(self.block, "value", str(self.block))
)
)
def set(
self, tag: str = DEFAULT_VAL, option: typing.Union[bool, int, TagType] = DEFAULT_VAL,
action: CodeblockActionType = DEFAULT_VAL, block: BlockType = DEFAULT_VAL
) -> "Tag":
"""
Sets given :class:`Tag` attributes.
Parameters
----------
tag : str, optional
The tag's name.
option : Union[:class:`bool`, :class:`int`, :class:`~py2df.enums.enum_util.TagType`], optional
The option chosen for this tag.
action : :class:`~py2df.enums.enum_util.CodeblockActionType`, optional
The action type of the codeblock this tag is in.
block : :class:`~py2df.enums.parameters.BlockType`, optional
The type of codeblock this tag is in.
Returns
-------
:class:`Tag`
self to allow chaining
"""
if tag != DEFAULT_VAL:
self.tag = str(tag)
if option != DEFAULT_VAL:
self.option = option
if action != DEFAULT_VAL:
self.action = action
if block != DEFAULT_VAL:
self.block = BlockType(block)
return self
def __eq__(self, other):
return all_attr_eq(self, other)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} tag={self.tag} option={self.option} action={self.action} \
block={self.block}>"
def __str__(self) -> str:
return self.tag
def __hash__(self):
return hash((self.__class__.__name__, self.tag, str(self.option), str(self.action), str(self.block)))
class Bracket(Block, JSONData):
"""Represents a Bracket block (used within If's and Repeats).
Parameters
----------\u200b
direction : :class:`~py2df.enums.parameters.BracketDirection`
The direction of this bracket (one of :attr:`~py2df.enums.parameters.BracketDirection.OPEN`
and :attr:`~py2df.enums.parameters.BracketDirection.CLOSE`).
bracket_type : :class:`~py2df.enums.parameters.BracketType`
The type of this bracket, determining where it is used (either used on If's, represented by
:attr:`~py2df.enums.parameters.BracketType.NORM`, or with a Repeat, represented by
:attr:`~py2df.enums.parameters.BracketType.REPEAT`).
Attributes
----------\u200b
direction : :class:`~py2df.enums.parameters.BracketDirection`
The direction of this bracket (one of :attr:`~py2df.enums.parameters.BracketDirection.OPEN`
and :attr:`~py2df.enums.parameters.BracketDirection.CLOSE`).
bracket_type : :class:`~py2df.enums.parameters.BracketType`
The type of this bracket, determining where it is used (either used on If's, represented by
:attr:`~py2df.enums.parameters.BracketType.NORM`, or with a Repeat, represented by
:attr:`~py2df.enums.parameters.BracketType.REPEAT`).
"""
__slots__ = ("direction", "bracket_type")
direction: BracketDirection
bracket_type: BracketType
def __init__(self, direction: BracketDirection, bracket_type: BracketType):
"""
Inits this Bracket.
Parameters
----------
direction : :class:`~py2df.enums.parameters.BracketDirection`
The direction of this bracket (one of :attr:`~py2df.enums.parameters.BracketDirection.OPEN`
and :attr:`~py2df.enums.parameters.BracketDirection.CLOSE`).
bracket_type : :class:`~py2df.enums.parameters.BracketType`
The type of this bracket, determining where it is used (either used on If's, represented by
:attr:`~py2df.enums.parameters.BracketType.NORM`, or with a Repeat, represented by
:attr:`~py2df.enums.parameters.BracketType.REPEAT`).
"""
self.direction: BracketDirection = BracketDirection(direction)
self.bracket_type: BracketType = BracketType(bracket_type)
def as_json_data(self) -> dict:
"""Produces a JSON-serializable dict representing this Bracket.
Returns
-------
:class:`dict`
A JSON-serializable dict representing this Bracket.
"""
return dict(
id=constants.BRACKET_ID,
direct=self.direction.value,
type=self.bracket_type.value
)
def __repr__(self):
return f"<{self.__class__.__name__} direction={repr(self.direction.value)} \
bracket_type={repr(self.bracket_type.value)}"
remove_u200b_from_doc(Enchantment, Tag, Bracket)
| StarcoderdataPython |
3380057 | """Routes for CoVID-19 dashboard app."""
from flask import render_template
from app import covid_app
from app import data
from app import plotting
from app import config as cfg
from app import constants as cts
from bokeh.embed import components
def filter_secondary_links(region, num_links=2):
"""Return `num_links` secondary links, except `region`."""
return [link for link in cts.REGIONAL_LINKS if link["name"] != region][:num_links]
@covid_app.route("/")
def index():
"""Main page of the dashboard."""
# Check pre-rendered HTML
rendered = data.get_rendered_page("country")
if rendered:
return rendered
city = "Россия"
main_links = filter_secondary_links(city)
secondary_links = []
# Getting data
full_data = data.get_newest_data()
country_data_full, country_data = data.get_region_data(full_data, city)
swabs_data = data.get_swabs_data()
swabs_data = swabs_data.join(country_data_full["total"], how="outer")
# Cases statistics block
stats = []
for category in cts.CATEGORIES:
style = cts.CATEGORIES_STYLES[category].copy()
style["value"] = country_data[category]
style["diff"] = country_data[f"{category}_diff"]
stats.append(style)
# Map plot
borders = data.get_geo_data()
latest_data = full_data.iloc[-1].unstack(level=-1)
latest_diff = (full_data
.diff()
.iloc[-1]
.unstack(level=-1)
.rename(lambda x: f"{x}_diff", axis=1))
map_data = (borders
.join(latest_data, on="REGION")
.join(latest_diff, on="REGION")
.fillna(0))
map_data["total_color"] = 1. + map_data["total"]
map_plot = plotting.plot_map(map_data)
# Cases plots
country_data_full["active"] = (country_data_full["total"]
- country_data_full["recovered"]
- country_data_full["died"])
cases_plot = plotting.plot_region(country_data_full, city, plot_cols=cts.CATEGORIES[:3] + ["active"])
cases_log_plot = plotting.plot_region(country_data_full, city, log_y=True,
plot_cols=cts.CATEGORIES[:3] + ["active"])
# Swabs plots
swabs_plot = plotting.plot_swabs(swabs_data, city)
swabs_log_plot = plotting.plot_swabs(swabs_data, city, x_col="total")
# Getting Bokeh components
script, div = components({"cases": cases_plot, "cases_log": cases_log_plot,
"swabs": swabs_plot, "swabs_log": swabs_log_plot,
"map": map_plot, })
# Rendering HTML
rendered = render_template("main.html", city=city,
main_links=main_links,
secondary_links=secondary_links,
stats=stats,
bokeh_script=script,
**div)
data.save_rendered_page("country", rendered)
return rendered
@covid_app.route("/moscow")
def moscow():
"""Moscow-specific page."""
# Check pre-rendered HTML
city = "Москва"
rendered = data.get_rendered_page(city)
if rendered:
return rendered
main_links = [{"name": "Больницы", "link": "#hospitals"},
{"name": "Транспорт", "link": "#transport"}]
secondary_links = filter_secondary_links(city)
# Getting data
full_data = data.get_newest_data()
moscow_data_full, moscow_data = data.get_region_data(full_data, city)
# Cases statistics block
stats = []
for category in cts.CATEGORIES[:-1]:
style = cts.CATEGORIES_STYLES[category].copy()
style["value"] = moscow_data[category]
style["diff"] = moscow_data[f"{category}_diff"]
stats.append(style)
# Cases plots
moscow_data_full["active"] = (moscow_data_full["total"]
- moscow_data_full["recovered"]
- moscow_data_full["died"])
cases_plot = plotting.plot_region(moscow_data_full, city, plot_cols=cts.CATEGORIES[:3] + ["active"])
cases_log_plot = plotting.plot_region(moscow_data_full, city, log_y=True,
plot_cols=cts.CATEGORIES[:3] + ["active"])
# Transport plots
transport_data = data.get_data_by_key([cts.MSK_DIR, "transport"], sort_by="date", set_index="date")
public_tr_plot = plotting.plot_region(transport_data / 100., city,
plot_cols=[tr["key"] for tr in cts.PUBLIC_TR_COLS],
legend_map=[tr["name"] for tr in cts.PUBLIC_TR_COLS],
legend_loc="top_right",
dt_fmt="%d-%m-%Y %H:%S",
fmt="{0 %}",
set_yticks=True,
width_policy="max",
height=int(0.75*cfg.MAX_MAIN_HEIGHT),
xaxis_ticks=5, yrange=(-1.1, 0))
private_tr_plot = plotting.plot_region(transport_data / 100., city,
plot_cols=[tr["key"] for tr in cts.PRIVATE_TR_COLS],
legend_map=[tr["name"] for tr in cts.PRIVATE_TR_COLS],
legend_loc="top_right",
dt_fmt="%d-%m-%Y %H:%S",
fmt="{0 %}",
set_yticks=True,
width_policy="max",
height=int(0.75*cfg.MAX_MAIN_HEIGHT),
xaxis_ticks=5, yrange=(-1.1, 0))
plots = {"cases": cases_plot, "cases_log": cases_log_plot,
"public_transport": public_tr_plot, "private_transport": private_tr_plot}
# Transport block
transport_data_latest = transport_data.iloc[-1].to_dict()
tr_stats = []
for category in cts.PUBLIC_TR_COLS + cts.PRIVATE_TR_COLS:
tr_stats.append({"name": category["name"].capitalize(),
"value": transport_data_latest[category["key"]]})
# Age distribution
age_data = data.get_data_by_key([cts.MSK_DIR, "age"], sort_by="date", set_index="date")
raw_ages_cols = age_data.columns[1:-4:2].tolist()
raw_perc_cols = age_data.columns[2:-4:2].tolist()
raw_ages_cols = ["children"] + raw_ages_cols
raw_perc_cols = ["children%"] + raw_perc_cols
daily_stats = moscow_data_full[cts.DAILY_DISCHARGE_CATEGORIES].diff().loc[age_data.index]
daily_plot = plotting.plot_cases_bar(age_data[raw_ages_cols], city,
cases_neg=daily_stats, yrange=cts.DAILY_RANGE,
width=cfg.MAX_MAIN_WIDTH * 2,
legend_loc="bottom_left")
for cl in raw_perc_cols:
age_data[f"{cl}_perc"] = age_data[cl] / 100.
age_plot = plotting.plot_cases_bar(age_data.rename({"total": "total_cases"}, axis=1),
city, pos_cols=raw_perc_cols,
width=cfg.MAX_MAIN_WIDTH * 2, height=cfg.MIN_MAIN_HEIGHT,
skip_legend=True,
total_col="total_cases",
fmt="{0.0 %}",
suffix="_perc")
plots["daily_plot"] = daily_plot
plots["age_plot"] = age_plot
# Hospitals
hospitals = []
for hospital in cts.MSK_HOSPITALS:
hospital_data = data.get_data_by_key([cts.MSK_DIR, cts.MSK_HOSPITALS_DIR, hospital["key"]])
hospital_plot = plotting.plot_region(hospital_data, hospital["name"],
plot_cols=[tr["key"] for tr in hospital["fields"]],
legend_map=[tr["name"] for tr in hospital["fields"]],
glyphs={tr["key"]: tr["glyph"]
for tr in hospital["fields"] if "glyph" in tr},
alphas={tr["key"]: tr["alpha"]
for tr in hospital["fields"] if "alpha" in tr},
bar_bottom=0,
alpha=0.9,
legend_loc="top_left",
width_policy="max",
width=cfg.MAX_MAIN_WIDTH * 2,
height_policy="fit",
additional_tools=["ywheel_zoom", "ypan", "reset"])
plots[hospital["key"]] = hospital_plot
hospitals.append(hospital)
# Getting Bokeh components
script, div = components(plots)
for hospital in hospitals:
hospital_plot = div.pop(hospital["key"])
hospital["plot"] = hospital_plot
# Rendering HTML
rendered = render_template("moscow.html", city=city,
main_links=main_links,
secondary_links=secondary_links,
stats=stats,
tr_stats=tr_stats,
bokeh_script=script,
hospitals=hospitals,
**div)
data.save_rendered_page("moscow", rendered)
return rendered
| StarcoderdataPython |
60201 | """
*Lower-East Block* ⠨
The lower-east block gi.
"""
from dataclasses import dataclass
from ...._gi import Gi
from ..._gi import StrismicGi
from ...east import EasternGi
from ..._number import BlockGi
from .._gi import LowerGi
__all__ = ["LowerEastBlock"]
@dataclass
class LowerEastBlock(
Gi,
StrismicGi,
LowerGi,
EasternGi,
BlockGi,
):
pass
| StarcoderdataPython |
1735080 | <filename>setup.py<gh_stars>0
from setuptools import setup, find_packages
setup(
name="dodocs",
description="",
version="1.0",
author="<NAME>",
author_email="<EMAIL>",
# url="",
packages=find_packages()
) | StarcoderdataPython |
3345869 | from argparse import ArgumentParser
from utils import sequence_to_parenthesis, flat_list, rebuild_input_sentence
from tree import SeqTree, SyntacticDistanceEncoder
from collections import Counter
import codecs
import os
import copy
import sys
import warnings
"""
To encode:
python /home/david/Escritorio/encoding2multitask.py \
--input /home/david/Escritorio/dataset/ptb/ptb-dev.seq_lu \
--output /tmp/ptb-dev.multitask \
--status encode
To decode:
python /home/david/Escritorio/encoding2multitask.py \
--input /tmp/ptb-test.multitask \
--output /tmp/ptb-test.reversed \
--status decode
"""
def tag_to_multitask(tag, multitask_char, split_char):
tag_split = tag.split(split_char)
#It is a tag that encodes (level, label, leaf unary branch)
if len(tag_split) == 3:
return multitask_char.join(tag_split)
#It is a regular tag
elif len(tag_split) == 2:
return multitask_char.join((tag_split[0], tag_split[1], "-EMPTY-"))
elif tag in ["-BOS-","-EOS-", "NONE"]:
return multitask_char.join([tag,tag,tag])
else:
warnings.warn("The expected multitask label only contains labels for one task: "+tag)
return multitask_char.join([tag,"-EMPTY-","-EMPTY-"])
# print tag, tag_split
# raise NotImplementedError("len(tag_split)==1")
def is_beginning_of_chunk(c,c_before):
return c != c_before and c in ["NP","VP", "PP"]
def is_end_of_chunk(c,c_next):
return c != c_next
def is_chunkeable(c):
return c in ["NP","VP","PP"]
def unk_word(word, most_common, uncommon):
if word == "-EMPTY-":
return word
if word.isdigit():
return "0"
if word not in uncommon:
return "-oov-"
if word not in most_common:
return "-unk-"
return word
"""
Returns a sequence of labels to predict the position located
at n positions to the right
"""
def to_next_label(labels,n):
next_labels = []
for idl,l in enumerate(labels):
if n > 0:
if idl+n > len(labels)-n:
next_labels.append("-EMPTY-")
else:
next_labels.append(labels[idl+n])
else:
if idl+n < 0:
next_labels.append("-EMPTY-")
else:
next_labels.append(labels[idl+n])
return next_labels
"""
NOTE: This is not really useful in the current format
"""
def to_chunks(constituents):
chunk_sequence = []
c_before = None
for idc,c in enumerate(constituents):
if is_beginning_of_chunk(c, c_before):
chunk_sequence.append("B-"+c)
elif is_chunkeable(c):
chunk_sequence.append("I-"+c)
else:
chunk_sequence.append("O")
c_before = c
return chunk_sequence
"""
Transforms an encoding of a tree in a relative scale into an
encoding of the tree in an absolute scale.
"""
def to_absolute_levels(relative_levels, levels_to_encode):
absolute_sequence = [0]*len(relative_levels)
current_level = 0
for j,level in enumerate(relative_levels):
if level in ["-BOS-","-EOS-", "NONE"]:
absolute_sequence[j] = "-EMPTY-"
else:
if level == "ROOT":
current_level=1
label_j=str(current_level)
elif "ROOT" in level:
current_level+=int(level.replace("ROOT",""))
label_j = str(current_level)
else:
current_level+= int(level)
label_j=str(current_level)
if int(label_j) <= levels_to_encode:
absolute_sequence[j] = label_j
else:
absolute_sequence[j] = "-EMPTY-"
return absolute_sequence
#TODO: What to do if not for all tasks we return a -BOS-/-EOS- when needed. Voting approach?
def multitag_to_tag(multitag, multitask_char, split_char):
multitag_split = multitag.split(multitask_char)[0:3]
if multitag_split[1] in ["-BOS-","-EOS-","NONE"]:
return multitag_split[1]
if multitag_split[2] != "-EMPTY-":
return split_char.join(multitag_split)
else:
return split_char.join(multitag_split[0:2])
def add_tag(l, new_elements, multitask_char):
for idtoken, token in enumerate(l):
token[2] += multitask_char+new_elements[idtoken]
def decode_int(preds):
#f_output = codecs.open(args.output,"w")
decoded_output = ''
sentence = []
#with codecs.open(args.input) as f_input:
#lines = f_input.readlines()
# print(preds)
for l in preds.split('^^'):
if l != "\n":
# print(l)
word,postag,label = l.strip().split("\t")
label = multitag_to_tag(label,"{}","@") #The tasks that we care about are just the first three ones.
sentence.append([word,postag,label])
#f_output.write("\t".join([word,postag,label])+"\n")
else:
# print("END")
for token in sentence:
decoded_output += "\t".join(token)+"\n"
#f_output.write("\t".join(token)+"\n")
sentence = []
#f_output.write("\n")
decoded_output +="\n"
# print("dec: ",decoded_output)
return decoded_output
if __name__ == '__main__':
arg_parser = ArgumentParser()
arg_parser.add_argument("--input", dest="input",
help="Path to the original encoding used in Constituent Parsing as Sequence Labeling",
default=None)
arg_parser.add_argument("--output", dest="output",
help="Path to the output encoding, formatted as multitask learning", default=None)
arg_parser.add_argument("--status", dest="status",
help="[encode|decode]")
arg_parser.add_argument("--add_abs_scale", dest="add_abs_scale", action="store_true", default=False,
help="Add the prediction of the level in absolute scale as an auxiliary tasks")
arg_parser.add_argument("--abs_levels",dest="abs_levels",
help="Levels for which to predict the absolute scale. An integer number")
arg_parser.add_argument("--add_chunks", dest="add_chunks",action="store_true",
help="Add chunks as an auxiliary task")
arg_parser.add_argument("--add_next_level", dest="add_next_level", action="store_true",
help="Add the prediction of the next level as an auxiliary task")
arg_parser.add_argument("--add_prev_level", dest="add_prev_level", action="store_true",
help="Ad the prediction of the previous level as an auxiliary task")
arg_parser.add_argument("--add_next_label", dest="add_next_label", action="store_true",
help="Add the prediction of the next label as an auxiliary task")
arg_parser.add_argument("--add_prev_label", dest="add_prev_label", action="store_true",
help="Add the prediction of the prev label as an auxiliary task")
arg_parser.add_argument("--add_syntactic_distances", dest="add_syntactic_distances", action="store_true",
help="Add the prediction of syntactic distances as an auxiliary task")
arg_parser.add_argument("--add_next_word", dest="add_next_word", action="store_true",
help="Add the prediction of the next word as an auxiliary task")
arg_parser.add_argument("--add_prev_word", dest="add_prev_word", action="store_true",
help="Add the prediction of the prev word as an auxiliary task")
arg_parser.add_argument("--common_words", dest="common_words",
help="Path to the file containing the list of common words")
arg_parser.add_argument("--uncommon_words", dest="uncommon_words",
help="Path to th file containing the list of uncommon words")
arg_parser.add_argument("--split_char", dest="split_char",type=str,
default="@")
arg_parser.add_argument("--multitask_char", dest="multitask_char",type=str,
default="{}")
args = arg_parser.parse_args()
auxiliary_tasks = [] #["absolute_scale"]
sentence = []
reload(sys)
sys.setdefaultencoding('UTF8')
if args.status == "encode":
f_dest = codecs.open(args.output,"w")
with codecs.open(args.input) as f_input:
lines = f_input.readlines()
if args.add_next_word or args.add_prev_word:
with codecs.open(args.common_words) as f:
most_common = set([l.strip("\n") for l in f.readlines()])
with codecs.open(args.uncommon_words) as f:
uncommon = set([l.strip("\n") for l in f.readlines()])
#Compute number of words, postags and labels for
#different purposes
relative_levels = []
label_sequences = []
ori_input, ori_labels = [],[]
words = []
for l in lines:
if l != "\n":
word,postag,label = l.strip().split("\t")[0], "\t".join(l.strip().split("\t")[1:-1]), l.strip().split("\t")[-1]
#tuple(l.strip().split("\t"))
#word,postag,label = tuple(l.strip().split("\t"))
words.append(word)
if args.add_syntactic_distances:
ori_input.append((word,postag))
ori_labels.append(label)
label = tag_to_multitask(label, args.multitask_char, args.split_char)
if args.add_abs_scale or args.add_next_level or args.add_prev_level:
relative_levels.append(label.split(args.multitask_char)[0])
if args.add_chunks or args.add_next_label or args.add_prev_label:
label_sequences.append(label.split(args.multitask_char)[1])
sentence.append([word,postag,label])
#f_output.write("\t".join([word,postag,label])+"\n")
else:
if args.add_abs_scale:
absolute_levels = to_absolute_levels(relative_levels, int(args.abs_levels))
add_tag(sentence,absolute_levels,args.multitask_char)
if args.add_chunks:
chunks = to_chunks(label_sequences)
add_tag(sentence,chunks,args.multitask_char)
#Predicting the next and the previous levels
if args.add_next_level:
next_levels = to_next_label(relative_levels, 1)
add_tag(sentence,next_levels,args.multitask_char)
if args.add_prev_level:
prev_levels = to_next_label(relative_levels, -1)
add_tag(sentence, prev_levels,args.multitask_char)
#Predicting the next and the previous labels
if args.add_next_label:
next_labels = to_next_label(label_sequences,1)
add_tag(sentence, next_labels,args.multitask_char)
if args.add_prev_label:
prev_labels = to_next_label(label_sequences,-1)
add_tag(sentence, prev_labels,args.multitask_char)
#Predicting the next and previous word
if args.add_next_word:
next_words = to_next_label(words, 1)
aux = []
for w in next_words:
aux.append(unk_word(w,most_common,uncommon))
add_tag(sentence,aux,args.multitask_char)
if args.add_prev_word:
prev_words = to_next_label(words,-1)
aux = []
for w in prev_words:
aux.append(unk_word(w,most_common,uncommon))
add_tag(sentence, aux,args.multitask_char)
if args.add_syntactic_distances:
tree = sequence_to_parenthesis([ori_input], [ori_labels])
tree = SeqTree.fromstring(tree[0], remove_empty_top_bracketing=True)
tree.collapse_unary(collapsePOS=True, collapseRoot=True)
syntactic_distances = []
SyntacticDistanceEncoder().encode(tree, syntactic_distances)
#reversing the labels
set_distances = set(syntactic_distances)
set_distances.remove(0)
ori_order = sorted(set_distances, reverse=True)
reverse_order = sorted(set_distances, reverse=False)
#If we want to do it reversed, but then it is closer to our encoding and pottentially not so useful
# map_distances = {o:r for o, r in zip(ori_order, reverse_order)}
# map_distances.update({0:0})
# reversed_syntactic_distances = ["-1"]
# reversed_syntactic_distances.extend([str(map_distances[d]) for d in syntactic_distances])
# reversed_syntactic_distances.append("-1")
syntactic_distances.insert(0,"-1")
syntactic_distances.append("-1")
add_tag(sentence, [str(s) for s in syntactic_distances],args.multitask_char)
#sentence.append([""])
for token in sentence:
f_dest.write("\t".join(token)+"\n")
f_dest.write("\n")
sentence = []
relative_levels = []
absolute_levels = []
label_sequences = []
ori_input = []
ori_labels = []
words = []
elif args.status == "decode":
f_output = codecs.open(args.output,"w")
labels = []
with codecs.open(args.input) as f_input:
lines = f_input.readlines()
for l in lines:
if l != "\n":
word,postag,label = l.strip().split("\t")[0], "\t".join(l.strip().split("\t")[1:-1]), l.strip().split("\t")[-1]
#word,postag,label = l.strip().split("\t")
label = multitag_to_tag(label, args.multitask_char, args.split_char) #The tasks that we care about are just the first three ones.
sentence.append(l)
labels.append(label)
#sentence.append([word,postag,label])
#f_output.write("\t".join([word,postag,label])+"\n")
else:
for token,label in zip(rebuild_input_sentence(sentence), labels):
f_output.write("\t".join(token)+"\t"+label+"\n")
sentence = []
labels = []
f_output.write("\n")
| StarcoderdataPython |
14245 | # Generated by Django 2.2.1 on 2022-02-25 15:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mutational_landscape', '0002_auto_20180117_1457'),
]
operations = [
migrations.RemoveField(
model_name='diseasemutations',
name='protein',
),
migrations.RemoveField(
model_name='diseasemutations',
name='residue',
),
migrations.DeleteModel(
name='CancerMutations',
),
migrations.DeleteModel(
name='DiseaseMutations',
),
]
| StarcoderdataPython |
3270586 | <reponame>UTbioinf/InvDet
#!/usr/bin/env python
import pysam
import math
import argparse
class FileWriter(object):
def __init__(self, write_type = "two-file", prefix = "output", use_pacbio_head = False):
self._write_type = write_type
self._prefix = prefix
self._fout1 = None
self._fout2 = None
self._index = 0
self._write = self._get_write_func( write_type )
self._use_pacbio_head = use_pacbio_head
def set_write_type(self, write_type):
self._write_type = write_type
self._write = self._get_write_func( write_type )
def set_prefix(self, prefix):
self._prefix = preifx
def open(self):
if self._write_type == "two-file" or self._write_type == "MP":
self._fout1 = open(self._prefix + "_1.fastq", "w")
self._fout2 = open(self._prefix + "_2.fastq", "w")
else:
self._fout1 = open(self._prefix + ".fastq", "w")
self._index = 0
def close(self):
if self._fout1:
self._fout1.close()
self._fout1 = None
if self._fout2:
self._fout2.close()
self._fout2 = None
def write(self, head, head_remaining, seq, qual = None):
self._write(head, ' ' + head_remaining if head_remaining else "", seq, qual)
def _get_write_func(self, write_type):
if write_type == "two-file":
return self._write_two_file
elif write_type == "single-file":
return self._write_single_file
elif write_type == "afun":
return self._write_afun
elif write_type == "PE":
return self._write_PE
elif write_type == "MP":
return self._write_MP
else:
raise ValueError("Unknown file type '{}'to write".format( write_type ))
def _nuc_rc(self, ch):
if ch == 'A': return 'T'
if ch == 'a': return 't'
if ch == 'T': return 'A'
if ch == 't': return 'a'
if ch == 'G': return 'C'
if ch == 'g': return 'c'
if ch == 'C': return 'G'
if ch == 'c': return 'g'
if ch == 'N': return 'N'
if ch == 'n': return 'n'
raise ValueError("unknown base [{}]".format(ch))
def _gen_rc(self, seq):
return "".join([ self._nuc_rc( each ) for each in seq[::-1] ])
def _write_two_file(self, head, head_remaining, seq, qual = None):
length = len(seq)/3
if length == 0: return
self._write_fastq("@{}/1{}\n".format(head, head_remaining),
seq[:length], qual[:length] if qual else ("!" * length), self._fout1)
self._write_fastq("@{}/2{}\n".format(head, head_remaining),
seq[-length:], qual[-length:] if qual else ("!" * length), self._fout2)
def _write_MP(self, head, head_remaining, seq, qual = None):
length = len(seq)/3
if length == 0: return
self._write_fastq("@{}/1{}\n".format(head, head_remaining),
seq[:length], qual[:length] if qual else ("!" * length), self._fout1)
self._write_fastq("@{}/2{}\n".format(head, head_remaining),
self._gen_rc( seq[-length:]) , qual[:-length-1:-1] if qual else ("!" * length), self._fout2)
def _write_single_file(self, head, head_remaining, seq, qual = None):
length = len(seq) / 3
if length == 0: return
self._write_fastq("@{}/1{}\n".format(head, head_remaining),
seq[:length], qual[:length] if qual else ("!" * length), self._fout1)
self._write_fastq("@{}/2{}\n".format(head, head_remaining),
seq[-length:], qual[-length:] if qual else ("!" * length), self._fout1)
def _write_afun(self, head, head_remaining, seq, qual = None):
total_len = len(seq)
length = total_len / 3
if length == 0: return
pacbio_header = "/{}/0_{}".format( self._index, length ) if self._use_pacbio_head else ""
self._write_fastq("@afun{}_5_{}{}\n".format(self._index, total_len, pacbio_header),
seq[:length], qual[:length] if qual else ("!" * length), self._fout1)
pacbio_header = "/{}/{}_{}".format( self._index, total_len - length, total_len ) if self._use_pacbio_head else ""
self._write_fastq("@afun{}_3_{}{}\n".format(self._index, total_len, pacbio_header),
seq[-length:], qual[-length:] if qual else ("!" * length), self._fout1)
self._index += 1
def _write_PE(self, head, head_remaining, seq, qual = None):
total_len = len(seq)
length = total_len / 3
if length == 0: return
pacbio_header = "/{}/0_{}".format( self._index, length ) if self._use_pacbio_head else ""
self._write_fastq("@PE_{}_5_{}{}\n".format(self._index, total_len, pacbio_header),
seq[:length], qual[:length] if qual else ("!" * length), self._fout1)
pacbio_header = "/{}/{}_{}".format( self._index, total_len - length, total_len ) if self._use_pacbio_head else ""
self._write_fastq("@PE_{}_3_{}{}\n".format(self._index, total_len, pacbio_header),
seq[-length:], qual[-length:] if qual else ("!" * length), self._fout1)
self._index += 1
def _write_fastq(self, head_with_newline, seq, qual, fout):
fout.write(head_with_newline)
fout.write(seq)
fout.write("\n+\n")
fout.write(qual)
fout.write("\n")
class Statistics(object):
def __init__(self):
self._cnt = 0
self._len = 0.0
self._square_len = 0.0
self._gap_len = 0.0
self._square_gap_len = 0.0
def add_entry(self, entry):
self._cnt += 1
length = len(entry.sequence)
self._len += length
self._square_len += length * length
length -= length / 3 * 2
self._gap_len += length
self._square_gap_len += length * length
def write(self, fout):
fout.write("[raw reads]\n")
fout.write("count: {}\n".format(self._cnt))
fout.write("total length: {}\n".format(int(self._len)))
fout.write("average length: {}\n".format(self._len / self._cnt))
variance = self._square_len / self._cnt - self._len * self._len / (self._cnt * self._cnt)
fout.write("variance: {}\n".format(variance))
fout.write("standard deviation: {}\n".format( math.sqrt(variance) ))
fout.write("[gap]\n")
fout.write("total length: {}\n".format(int(self._gap_len)))
fout.write("average length: {}\n".format(self._gap_len / self._cnt))
variance = self._square_gap_len / self._cnt - self._gap_len * self._gap_len / (self._cnt * self._cnt)
fout.write("variance: {}\n".format(variance))
fout.write("standard deviation: {}\n".format( math.sqrt(variance) ))
def parse_args( argv = None):
parser = argparse.ArgumentParser(description = "Generate PE reads from fasta/fastq file")
parser.add_argument("-i", "--input-file", required=True, help="Input file name")
parser.add_argument("-p", "--prefix", default="output", help="Prefix for the output files")
parser.add_argument("-t", "--file-type", default="fastx", choices=["fastx"], help="Input file type (fastx includes both fasta and fastq, and probably a mixture of them)")
parser.add_argument("-f", "--output-format", default="two-file", choices=["two-file", "single-file", "afun", "PE", "MP"], help="Output format)")
parser.add_argument("-s", "--statistics", action="store_true", help="Generate statistics")
parser.add_argument("-P", "--pacbio-head", action="store_true", help="Use pacbio head, which is only useful for `afun` and `PE` format")
return parser.parse_args(argv)
def main(argv = None):
args = parse_args( argv )
fout = FileWriter(args.output_format, args.prefix, args.pacbio_head)
fout.open()
if args.statistics:
stats = Statistics()
with pysam.FastxFile(args.input_file) as fh:
for entry in fh:
fout.write(entry.name, entry.comment, entry.sequence, entry.quality)
if args.statistics:
stats.add_entry( entry )
fout.close()
if args.statistics:
with open(args.prefix + ".stats.txt", "w") as fout:
stats.write( fout )
if __name__ == "__main__":
main()
| StarcoderdataPython |
81441 | """
demo.py - demonstration program for freq2note.py
by <NAME> | <EMAIL> | http://groverlab.org
"""
import freq2note as f2n
freqs = [23, 120.0, 345.0, 440.1, 5001.1]
notes = ""
for f in freqs:
notes = notes + f2n.lilypond(f2n.find_closest_note(f))
f2n.write(notes)
| StarcoderdataPython |
1627910 | <gh_stars>1-10
"""368. Largest Divisible Subset
https://leetcode.com/problems/largest-divisible-subset/
Given a set of distinct positive integers nums, return the largest subset
answer such that every pair (answer[i], answer[j]) of elements in this subset
satisfies:
answer[i] % answer[j] == 0, or
answer[j] % answer[i] == 0
If there are multiple solutions, return any of them.
Example 1:
Input: nums = [1,2,3]
Output: [1,2]
Explanation: [1,3] is also accepted.
Example 2:
Input: nums = [1,2,4,8]
Output: [1,2,4,8]
Constraints:
1 <= nums.length <= 1000
1 <= nums[i] <= 2 * 10^9
All the integers in nums are unique.
"""
from typing import List
import collections
class Solution:
def largest_divisible_subset(self, nums: List[int]) -> List[int]:
def helper(arr: List[int]) -> List[int]:
dp = [0] * len(arr)
dp[0] = 1
max_len = 1
for i in range(1, len(arr)):
for j in range(i - 1, -1, -1):
if arr[i] % arr[j] == 0:
dp[i] = max(dp[i], dp[j] + 1)
max_len = max(max_len, dp[i])
ret = []
n = max_len
for i in range(n):
find(arr, dp, max_len, ret)
max_len -= 1
return ret
def find(num_list: List[int], dp_list: List[int], target: int, cur_nums: List[int]):
for i in range(len(dp_list) - 1, -1, -1):
if dp_list[i] == target:
if not cur_nums or cur_nums[0] % num_list[i] == 0:
cur_nums.insert(0, num_list[i])
nums.sort()
store = collections.defaultdict(list)
n = len(nums)
for i in range(n):
is_root = True
for v in store.values():
if nums[i] in v:
is_root = False
break
if not is_root:
continue
for j in range(i, n):
if nums[j] % nums[i] == 0:
store[nums[i]].append(nums[j])
ans = []
for candidates in store.values():
cur_ans = helper(candidates)
if len(cur_ans) > len(ans):
ans = cur_ans
return ans
| StarcoderdataPython |
132278 | <filename>build/lib/torch_utils/models/DeeplabV1.py<gh_stars>1-10
""" Deeplabv1 backbone: VGG16 """
from collections import OrderedDict
from torch.nn import *
import torch.nn.functional as F
class DeeplabV1(Module):
def __init__(self, num_classes=21, vgg_based_type=16, bn=True):
super(DeeplabV1, self).__init__()
if vgg_based_type == 11:
self.conv = _conv_layer(1, 1, 2, 2, 2, bn)
elif vgg_based_type == 12:
self.conv = _conv_layer(2, 2, 2, 2, 2, bn)
elif vgg_based_type == 16:
self.conv = _conv_layer(2, 2, 3, 3, 3, bn)
elif vgg_based_type == 19:
self.conv = _conv_layer(2, 2, 4, 4, 4, bn)
else:
exit("Wrong Backbone Type")
self.num_classes = num_classes if num_classes != 0 else 21
self.fconv = Sequential(
OrderedDict([
("fconv0", _ConvReluDrop(512, 1024, 3, 1, 12, 12)),
("fconv1", _ConvReluDrop(1024, 1024, 1, 1)),
("fconv2", Conv2d(1024, self.num_classes, 1))
])
)
def forward(self, x):
h, w = x.shape[-2:]
x = self.conv(x)
x = self.fconv(x)
x = F.interpolate(x, (h, w), None, "bilinear", True)
return x
def _conv_layer(num1, num2, num3, num4, num5, bn):
return Sequential(
OrderedDict([
("conv0", _Conv(3, 64, num1, bn)),
("conv1", _Conv(64, 128, num2, bn)),
("conv2", _Conv(128, 256, num3, bn)),
("conv3", _Conv(256, 512, num4, bn, 1, 2)),
("conv4", _Conv(512, 512, num5, bn, 1, 2))
])
)
class _Conv(Module):
def __init__(self, in_channel, out_channel, layer_num, is_bn, stride=2, dilation=1):
super(_Conv, self).__init__()
self.seq = Sequential()
self.seq.add_module("conv0", Conv2d(in_channel, out_channel, 3, 1, dilation, dilation))
for i in range(layer_num - 1):
self.seq.add_module("conv{}".format(i + 1), Conv2d(out_channel, out_channel, 3, 1, dilation, dilation))
self.pool = MaxPool2d(3, stride, 1)
self.bn = BatchNorm2d(out_channel)
self.is_bn = is_bn
def forward(self, x):
for idx, layer_name in enumerate(self.seq._modules):
x = self.seq._modules[layer_name](x)
if self.is_bn and idx > 0:
x = self.bn(x)
x = F.relu(x, True)
x = self.pool(x)
return x
class _ConvReluDrop(Module):
def __init__(self, in_features, out_features, kernel_size, stride, padding=0, dilation=1):
super(_ConvReluDrop, self).__init__()
self.conv = Conv2d(in_features, out_features, kernel_size, stride, padding, dilation)
self.drop = Dropout(0.5)
def forward(self, x):
x = self.conv(x)
x = F.relu(x, True)
x = self.drop(x)
return x
| StarcoderdataPython |
1659001 | <reponame>felipead/sqs-mega-python
from mega.match.types import ValueType, FunctionType, is_function, RightHandSideFunction
class Lambda(RightHandSideFunction):
def __init__(self, rhs: FunctionType):
if not is_function(rhs):
raise TypeError('Right-hand side is not a user-defined function: {}'.format(type(rhs).__name__))
self.rhs = rhs
def evaluate(self, lhs: ValueType) -> bool:
return bool(self.rhs(lhs))
| StarcoderdataPython |
3328365 | from abc import ABC, abstractmethod
from collections import deque
class BaseAgent(ABC):
def __init__(self, cfg):
self.state_size = cfg['state_size']
self.action_size = cfg['action_size']
cfg_agent = cfg.get('agent', {})
self.memory = deque(maxlen=cfg_agent.get('memory_size', 2000))
self.learning_rate = cfg_agent.get('learning_rate', 0.001)
self.gamma = cfg_agent.get('gamma', 0.95)
self.verbose_mode = cfg_agent.get('verbose', False)
@abstractmethod
def load_weights(self, weights_path):
pass
@abstractmethod
def save_weights(self, weights_path):
pass
@abstractmethod
def choose_action(self, state):
pass
@abstractmethod
def train_on_step(self, state, action, reward, next_state, done):
pass
@abstractmethod
def remember(self, state, action, reward, next_state, done):
pass
@abstractmethod
def replay(self, batch_size):
pass
def game_start(self, episode):
pass
def game_end(self, episode):
pass
| StarcoderdataPython |
4831046 | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the tf interface"""
import pytest
tf = pytest.importorskip("tensorflow", minversion="2.1")
import numpy as np
import pennylane as qml
from pennylane.tape import JacobianTape
from pennylane.interfaces.tf import TFInterface
class TestTFQuantumTape:
"""Test the TensorFlow interface applied to a tape"""
def test_interface_construction(self):
"""Test that the interface is correctly applied"""
with TFInterface.apply(JacobianTape()) as tape:
qml.RX(0.5, wires=0)
qml.expval(qml.PauliX(0))
assert tape.interface == "tf"
assert isinstance(tape, TFInterface)
assert tape.__bare__ == JacobianTape
assert tape.dtype is tf.float64
def test_repeated_interface_construction(self):
"""Test that the interface is correctly applied multiple times"""
with TFInterface.apply(JacobianTape()) as tape:
qml.RX(0.5, wires=0)
qml.expval(qml.PauliX(0))
assert tape.interface == "tf"
assert isinstance(tape, TFInterface)
assert tape.__bare__ == JacobianTape
assert tape.dtype is tf.float64
TFInterface.apply(tape, dtype=tf.float32)
assert tape.interface == "tf"
assert isinstance(tape, TFInterface)
assert tape.__bare__ == JacobianTape
assert tape.dtype is tf.float32
def test_get_parameters(self):
"""Test that the get parameters function correctly sets and returns the
trainable parameters"""
a = tf.Variable(0.1)
b = tf.constant(0.2)
c = tf.Variable(0.3)
d = 0.4
with tf.GradientTape() as tape:
with TFInterface.apply(JacobianTape()) as qtape:
qml.Rot(a, b, c, wires=0)
qml.RX(d, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliX(0))
assert qtape.trainable_params == [0, 2]
assert np.all(qtape.get_parameters() == [a, c])
def test_execution(self):
"""Test execution"""
a = tf.Variable(0.1)
dev = qml.device("default.qubit", wires=1)
with tf.GradientTape() as tape:
with TFInterface.apply(JacobianTape()) as qtape:
qml.RY(a, wires=0)
qml.RX(0.2, wires=0)
qml.expval(qml.PauliZ(0))
assert qtape.trainable_params == [0]
res = qtape.execute(dev)
assert isinstance(res, tf.Tensor)
assert res.shape == (1,)
def test_jacobian(self, mocker, tol):
"""Test jacobian calculation"""
spy = mocker.spy(JacobianTape, "jacobian")
a = tf.Variable(0.1, dtype=tf.float64)
b = tf.Variable(0.2, dtype=tf.float64)
dev = qml.device("default.qubit", wires=2)
with tf.GradientTape() as tape:
with TFInterface.apply(JacobianTape()) as qtape:
qml.RY(a, wires=0)
qml.RX(b, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliY(1))
assert qtape.trainable_params == [0, 1]
res = qtape.execute(dev)
assert isinstance(res, tf.Tensor)
assert res.shape == (2,)
expected = [tf.cos(a), -tf.cos(a) * tf.sin(b)]
assert np.allclose(res, expected, atol=tol, rtol=0)
res = tape.jacobian(res, [a, b])
expected = [[-tf.sin(a), tf.sin(a) * tf.sin(b)], [0, -tf.cos(a) * tf.cos(b)]]
assert np.allclose(res, expected, atol=tol, rtol=0)
spy.assert_called()
def test_jacobian_dtype(self, tol):
"""Test calculating the jacobian with a different datatype. Here, we
specify tf.float32, as opposed to the default value of tf.float64."""
a = tf.Variable(0.1, dtype=tf.float32)
b = tf.Variable(0.2, dtype=tf.float32)
dev = qml.device("default.qubit", wires=2)
with tf.GradientTape() as tape:
with TFInterface.apply(JacobianTape(), dtype=tf.float32) as qtape:
qml.RY(a, wires=0)
qml.RX(b, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliY(1))
assert qtape.trainable_params == [0, 1]
res = qtape.execute(dev)
assert isinstance(res, tf.Tensor)
assert res.shape == (2,)
assert res.dtype is tf.float32
res = tape.jacobian(res, [a, b])
assert [r.dtype is tf.float32 for r in res]
def test_jacobian_options(self, mocker, tol):
"""Test setting jacobian options"""
spy = mocker.spy(JacobianTape, "numeric_pd")
a = tf.Variable([0.1, 0.2])
dev = qml.device("default.qubit", wires=1)
with tf.GradientTape() as tape:
with TFInterface.apply(JacobianTape()) as qtape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.expval(qml.PauliZ(0))
res = qtape.execute(dev)
qtape.jacobian_options = {"h": 1e-8, "order": 2}
tape.jacobian(res, a)
for args in spy.call_args_list:
assert args[1]["order"] == 2
assert args[1]["h"] == 1e-8
@pytest.mark.slow
def test_reusing_quantum_tape(self, tol):
"""Test re-using a quantum tape by passing new parameters"""
a = tf.Variable(0.1, dtype=tf.float64)
b = tf.Variable(0.2, dtype=tf.float64)
dev = qml.device("default.qubit", wires=2)
with tf.GradientTape() as tape:
with TFInterface.apply(JacobianTape()) as qtape:
qml.RY(a, wires=0)
qml.RX(b, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliY(1))
assert qtape.trainable_params == [0, 1]
res = qtape.execute(dev)
jac = tape.jacobian(res, [a, b])
a = tf.Variable(0.54, dtype=tf.float64)
b = tf.Variable(0.8, dtype=tf.float64)
with tf.GradientTape() as tape:
res2 = qtape.execute(dev, params=[2 * a, b])
expected = [tf.cos(2 * a), -tf.cos(2 * a) * tf.sin(b)]
assert np.allclose(res2, expected, atol=tol, rtol=0)
jac2 = tape.jacobian(res2, [a, b])
expected = [
[-2 * tf.sin(2 * a), 2 * tf.sin(2 * a) * tf.sin(b)],
[0, -tf.cos(2 * a) * tf.cos(b)],
]
assert np.allclose(jac2, expected, atol=tol, rtol=0)
def test_reusing_pre_constructed_quantum_tape(self, tol):
"""Test re-using a quantum tape that was previously constructed
*outside of* a gradient tape, by passing new parameters"""
a = tf.Variable(0.1, dtype=tf.float64)
b = tf.Variable(0.2, dtype=tf.float64)
dev = qml.device("default.qubit", wires=2)
with TFInterface.apply(JacobianTape()) as qtape:
qml.RY(a, wires=0)
qml.RX(b, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliY(1))
with tf.GradientTape() as tape:
qtape.set_parameters([a, b], trainable_only=False)
qtape._update_trainable_params()
assert qtape.trainable_params == [0, 1]
res = qtape.execute(dev)
jac = tape.jacobian(res, [a, b])
a = tf.Variable(0.54, dtype=tf.float64)
b = tf.Variable(0.8, dtype=tf.float64)
with tf.GradientTape() as tape:
res2 = qtape.execute(dev, params=[2 * a, b])
expected = [tf.cos(2 * a), -tf.cos(2 * a) * tf.sin(b)]
assert np.allclose(res2, expected, atol=tol, rtol=0)
jac2 = tape.jacobian(res2, [a, b])
expected = [
[-2 * tf.sin(2 * a), 2 * tf.sin(2 * a) * tf.sin(b)],
[0, -tf.cos(2 * a) * tf.cos(b)],
]
assert np.allclose(jac2, expected, atol=tol, rtol=0)
def test_classical_processing(self, tol):
"""Test classical processing within the quantum tape"""
a = tf.Variable(0.1, dtype=tf.float64)
b = tf.constant(0.2, dtype=tf.float64)
c = tf.Variable(0.3, dtype=tf.float64)
dev = qml.device("default.qubit", wires=1)
with tf.GradientTape() as tape:
with TFInterface.apply(JacobianTape()) as qtape:
qml.RY(a * c, wires=0)
qml.RZ(b, wires=0)
qml.RX(c + c ** 2 + tf.sin(a), wires=0)
qml.expval(qml.PauliZ(0))
assert qtape.trainable_params == [0, 2]
assert qtape.get_parameters() == [a * c, c + c ** 2 + tf.sin(a)]
res = qtape.execute(dev)
res = tape.jacobian(res, [a, b, c])
assert isinstance(res[0], tf.Tensor)
assert res[1] is None
assert isinstance(res[2], tf.Tensor)
def test_no_trainable_parameters(self, tol):
"""Test evaluation if there are no trainable parameters"""
dev = qml.device("default.qubit", wires=2)
with tf.GradientTape() as tape:
with TFInterface.apply(JacobianTape()) as qtape:
qml.RY(0.2, wires=0)
qml.RX(tf.constant(0.1), wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliZ(1))
assert qtape.trainable_params == []
res = qtape.execute(dev)
assert res.shape == (2,)
assert isinstance(res, tf.Tensor)
@pytest.mark.parametrize("U", [tf.constant([[0, 1], [1, 0]]), np.array([[0, 1], [1, 0]])])
def test_matrix_parameter(self, U, tol):
"""Test that the TF interface works correctly
with a matrix parameter"""
a = tf.Variable(0.1, dtype=tf.float64)
dev = qml.device("default.qubit", wires=2)
with tf.GradientTape() as tape:
with TFInterface.apply(JacobianTape()) as qtape:
qml.QubitUnitary(U, wires=0)
qml.RY(a, wires=0)
qml.expval(qml.PauliZ(0))
assert qtape.trainable_params == [1]
res = qtape.execute(dev)
assert np.allclose(res, -tf.cos(a), atol=tol, rtol=0)
res = tape.jacobian(res, a)
assert np.allclose(res, tf.sin(a), atol=tol, rtol=0)
def test_differentiable_expand(self, tol):
"""Test that operation and nested tapes expansion
is differentiable"""
class U3(qml.U3):
def expand(self):
tape = JacobianTape()
theta, phi, lam = self.data
wires = self.wires
tape._ops += [
qml.Rot(lam, theta, -lam, wires=wires),
qml.PhaseShift(phi + lam, wires=wires),
]
return tape
qtape = JacobianTape()
dev = qml.device("default.qubit", wires=1)
a = np.array(0.1)
p = tf.Variable([0.1, 0.2, 0.3], dtype=tf.float64)
with tf.GradientTape() as tape:
with qtape:
qml.RX(a, wires=0)
U3(p[0], p[1], p[2], wires=0)
qml.expval(qml.PauliX(0))
qtape = TFInterface.apply(qtape.expand())
assert qtape.trainable_params == [1, 2, 3, 4]
assert [i.name for i in qtape.operations] == ["RX", "Rot", "PhaseShift"]
assert np.all(qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]])
res = qtape.execute(device=dev)
expected = tf.cos(a) * tf.cos(p[1]) * tf.sin(p[0]) + tf.sin(a) * (
tf.cos(p[2]) * tf.sin(p[1]) + tf.cos(p[0]) * tf.cos(p[1]) * tf.sin(p[2])
)
assert np.allclose(res, expected, atol=tol, rtol=0)
res = tape.jacobian(res, p)
expected = np.array(
[
tf.cos(p[1]) * (tf.cos(a) * tf.cos(p[0]) - tf.sin(a) * tf.sin(p[0]) * tf.sin(p[2])),
tf.cos(p[1]) * tf.cos(p[2]) * tf.sin(a)
- tf.sin(p[1])
* (tf.cos(a) * tf.sin(p[0]) + tf.cos(p[0]) * tf.sin(a) * tf.sin(p[2])),
tf.sin(a)
* (tf.cos(p[0]) * tf.cos(p[1]) * tf.cos(p[2]) - tf.sin(p[1]) * tf.sin(p[2])),
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_probability_differentiation(self, tol):
"""Tests correct output shape and evaluation for a tape
with multiple prob outputs"""
dev = qml.device("default.qubit", wires=2)
x = tf.Variable(0.543, dtype=tf.float64)
y = tf.Variable(-0.654, dtype=tf.float64)
with tf.GradientTape() as tape:
with TFInterface.apply(JacobianTape()) as qtape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.probs(wires=[0])
qml.probs(wires=[1])
res = qtape.execute(dev)
expected = np.array(
[
[tf.cos(x / 2) ** 2, tf.sin(x / 2) ** 2],
[(1 + tf.cos(x) * tf.cos(y)) / 2, (1 - tf.cos(x) * tf.cos(y)) / 2],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
res = tape.jacobian(res, [x, y])
expected = np.array(
[
[
[-tf.sin(x) / 2, tf.sin(x) / 2],
[-tf.sin(x) * tf.cos(y) / 2, tf.cos(y) * tf.sin(x) / 2],
],
[
[0, 0],
[-tf.cos(x) * tf.sin(y) / 2, tf.cos(x) * tf.sin(y) / 2],
],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_ragged_differentiation(self, tol):
"""Tests correct output shape and evaluation for a tape
with prob and expval outputs"""
dev = qml.device("default.qubit", wires=2)
x = tf.Variable(0.543, dtype=tf.float64)
y = tf.Variable(-0.654, dtype=tf.float64)
with tf.GradientTape() as tape:
with TFInterface.apply(JacobianTape()) as qtape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.probs(wires=[1])
res = qtape.execute(dev)
expected = np.array(
[tf.cos(x), (1 + tf.cos(x) * tf.cos(y)) / 2, (1 - tf.cos(x) * tf.cos(y)) / 2]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
res = tape.jacobian(res, [x, y])
expected = np.array(
[
[-tf.sin(x), -tf.sin(x) * tf.cos(y) / 2, tf.cos(y) * tf.sin(x) / 2],
[0, -tf.cos(x) * tf.sin(y) / 2, tf.cos(x) * tf.sin(y) / 2],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_sampling(self):
"""Test sampling works as expected"""
dev = qml.device("default.qubit", wires=2, shots=10)
with tf.GradientTape() as tape:
with TFInterface.apply(JacobianTape()) as qtape:
qml.Hadamard(wires=[0])
qml.CNOT(wires=[0, 1])
qml.sample(qml.PauliZ(0))
qml.sample(qml.PauliX(1))
res = qtape.execute(dev)
assert res.shape == (2, 10)
assert isinstance(res, tf.Tensor)
class TestTFPassthru:
"""Test that the quantum tape works with a TF passthru
device.
These tests are very similar to the tests above, with three key differences:
* We do **not** apply the TF interface. These tapes simply use passthru
backprop, no custom gradient registration needed.
* We do not test the trainable_params attribute. Since these tapes have no
TF interface, the tape does not need to bookkeep which parameters
are trainable; this is done by TF internally.
* We use mock.spy to ensure that the tape's Jacobian method is not being called.
"""
def test_execution(self):
"""Test execution"""
a = tf.Variable(0.1)
dev = qml.device("default.qubit.tf", wires=1)
with tf.GradientTape() as tape:
with JacobianTape() as qtape:
qml.RY(a, wires=0)
qml.RX(0.2, wires=0)
qml.expval(qml.PauliZ(0))
res = qtape.execute(dev)
assert isinstance(res, tf.Tensor)
assert res.shape == (1,)
def test_jacobian(self, mocker, tol):
"""Test jacobian calculation"""
spy = mocker.spy(JacobianTape, "jacobian")
a = tf.Variable(0.1, dtype=tf.float64)
b = tf.Variable(0.2, dtype=tf.float64)
dev = qml.device("default.qubit.tf", wires=2)
with tf.GradientTape() as tape:
with JacobianTape() as qtape:
qml.RY(a, wires=0)
qml.RX(b, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliY(1))
res = qtape.execute(dev)
assert isinstance(res, tf.Tensor)
assert res.shape == (2,)
expected = [tf.cos(a), -tf.cos(a) * tf.sin(b)]
assert np.allclose(res, expected, atol=tol, rtol=0)
res = tape.jacobian(res, [a, b])
expected = [[-tf.sin(a), tf.sin(a) * tf.sin(b)], [0, -tf.cos(a) * tf.cos(b)]]
assert np.allclose(res, expected, atol=tol, rtol=0)
spy.assert_not_called()
@pytest.mark.slow
def test_reusing_quantum_tape(self, mocker, tol):
"""Test re-using a quantum tape by passing new parameters"""
spy = mocker.spy(JacobianTape, "jacobian")
a = tf.Variable(0.1, dtype=tf.float64)
b = tf.Variable(0.2, dtype=tf.float64)
dev = qml.device("default.qubit.tf", wires=2)
with tf.GradientTape() as tape:
with JacobianTape() as qtape:
qml.RY(a, wires=0)
qml.RX(b, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliY(1))
res = qtape.execute(dev)
jac = tape.jacobian(res, [a, b])
a = tf.Variable(0.54, dtype=tf.float64)
b = tf.Variable(0.8, dtype=tf.float64)
with tf.GradientTape() as tape:
res2 = qtape.execute(dev, params=[2 * a, b])
expected = [tf.cos(2 * a), -tf.cos(2 * a) * tf.sin(b)]
assert np.allclose(res2, expected, atol=tol, rtol=0)
jac = tape.jacobian(res2, [a, b])
expected = [
[-2 * tf.sin(2 * a), 2 * tf.sin(2 * a) * tf.sin(b)],
[0, -tf.cos(2 * a) * tf.cos(b)],
]
assert np.allclose(jac, expected, atol=tol, rtol=0)
spy.assert_not_called()
def test_classical_processing(self, mocker, tol):
"""Test classical processing within the quantum tape"""
spy = mocker.spy(JacobianTape, "jacobian")
a = tf.Variable(0.1, dtype=tf.float64)
b = tf.constant(0.2, dtype=tf.float64)
c = tf.Variable(0.3, dtype=tf.float64)
dev = qml.device("default.qubit.tf", wires=1)
with tf.GradientTape() as tape:
with JacobianTape() as qtape:
qml.RY(a * c, wires=0)
qml.RZ(b, wires=0)
qml.RX(c + c ** 2 + tf.sin(a), wires=0)
qml.expval(qml.PauliZ(0))
assert qtape.get_parameters() == [a * c, b, c + c ** 2 + tf.sin(a)]
res = qtape.execute(dev)
res = tape.jacobian(res, [a, b, c])
assert isinstance(res[0], tf.Tensor)
assert res[1] is None
assert isinstance(res[2], tf.Tensor)
spy.assert_not_called()
def test_no_trainable_parameters(self, mocker, tol):
"""Test evaluation if there are no trainable parameters"""
spy = mocker.spy(JacobianTape, "jacobian")
dev = qml.device("default.qubit.tf", wires=2)
with tf.GradientTape() as tape:
with JacobianTape() as qtape:
qml.RY(0.2, wires=0)
qml.RX(tf.constant(0.1), wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliZ(1))
res = qtape.execute(dev)
assert res.shape == (2,)
assert isinstance(res, tf.Tensor)
spy.assert_not_called()
@pytest.mark.parametrize("U", [tf.constant([[0, 1], [1, 0]]), np.array([[0, 1], [1, 0]])])
def test_matrix_parameter(self, U, mocker, tol):
"""Test that the TF interface works correctly
with a matrix parameter"""
spy = mocker.spy(JacobianTape, "jacobian")
a = tf.Variable(0.1, dtype=tf.float64)
dev = qml.device("default.qubit.tf", wires=2)
with tf.GradientTape() as tape:
with JacobianTape() as qtape:
qml.QubitUnitary(U, wires=0)
qml.RY(a, wires=0)
qml.expval(qml.PauliZ(0))
res = qtape.execute(dev)
assert np.allclose(res, -tf.cos(a), atol=tol, rtol=0)
res = tape.jacobian(res, a)
assert np.allclose(res, tf.sin(a), atol=tol, rtol=0)
spy.assert_not_called()
def test_differentiable_expand(self, mocker, tol):
"""Test that operation and nested tapes expansion
is differentiable"""
spy = mocker.spy(JacobianTape, "jacobian")
class U3(qml.U3):
def expand(self):
tape = JacobianTape()
theta, phi, lam = self.data
wires = self.wires
tape._ops += [
qml.Rot(lam, theta, -lam, wires=wires),
qml.PhaseShift(phi + lam, wires=wires),
]
return tape
qtape = JacobianTape()
dev = qml.device("default.qubit.tf", wires=1)
a = np.array(0.1)
p = tf.Variable([0.1, 0.2, 0.3], dtype=tf.float64)
with tf.GradientTape() as tape:
with qtape:
qml.RX(a, wires=0)
U3(p[0], p[1], p[2], wires=0)
qml.expval(qml.PauliX(0))
qtape = qtape.expand()
assert [i.name for i in qtape.operations] == ["RX", "Rot", "PhaseShift"]
assert np.all(qtape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]])
res = qtape.execute(device=dev)
expected = tf.cos(a) * tf.cos(p[1]) * tf.sin(p[0]) + tf.sin(a) * (
tf.cos(p[2]) * tf.sin(p[1]) + tf.cos(p[0]) * tf.cos(p[1]) * tf.sin(p[2])
)
assert np.allclose(res, expected, atol=tol, rtol=0)
res = tape.jacobian(res, p)
expected = np.array(
[
tf.cos(p[1]) * (tf.cos(a) * tf.cos(p[0]) - tf.sin(a) * tf.sin(p[0]) * tf.sin(p[2])),
tf.cos(p[1]) * tf.cos(p[2]) * tf.sin(a)
- tf.sin(p[1])
* (tf.cos(a) * tf.sin(p[0]) + tf.cos(p[0]) * tf.sin(a) * tf.sin(p[2])),
tf.sin(a)
* (tf.cos(p[0]) * tf.cos(p[1]) * tf.cos(p[2]) - tf.sin(p[1]) * tf.sin(p[2])),
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
spy.assert_not_called()
def test_probability_differentiation(self, tol):
"""Tests correct output shape and evaluation for a tape
with multiple prob outputs"""
dev = qml.device("default.qubit.tf", wires=2)
x = tf.Variable(0.543, dtype=tf.float64)
y = tf.Variable(-0.654, dtype=tf.float64)
with tf.GradientTape() as tape:
with JacobianTape() as qtape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.probs(wires=[0])
qml.probs(wires=[1])
res = qtape.execute(dev)
expected = np.array(
[
[tf.cos(x / 2) ** 2, tf.sin(x / 2) ** 2],
[(1 + tf.cos(x) * tf.cos(y)) / 2, (1 - tf.cos(x) * tf.cos(y)) / 2],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
res = tape.jacobian(res, [x, y])
expected = np.array(
[
[
[-tf.sin(x) / 2, tf.sin(x) / 2],
[-tf.sin(x) * tf.cos(y) / 2, tf.cos(y) * tf.sin(x) / 2],
],
[
[0, 0],
[-tf.cos(x) * tf.sin(y) / 2, tf.cos(x) * tf.sin(y) / 2],
],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_ragged_differentiation(self, monkeypatch, tol):
"""Tests correct output shape and evaluation for a tape
with prob and expval outputs"""
dev = qml.device("default.qubit.tf", wires=2)
x = tf.Variable(0.543, dtype=tf.float64)
y = tf.Variable(-0.654, dtype=tf.float64)
def _asarray(args, dtype=tf.float64):
res = [tf.reshape(i, [-1]) for i in args]
res = tf.concat(res, axis=0)
return tf.cast(res, dtype=dtype)
# The current DefaultQubitTF device provides an _asarray method that does
# not work correctly for ragged arrays. For ragged arrays, we would like _asarray to
# flatten the array. Here, we patch the _asarray method on the device to achieve this
# behaviour; once the tape has moved from the beta folder, we should implement
# this change directly in the device.
monkeypatch.setattr(dev, "_asarray", _asarray)
with tf.GradientTape() as tape:
with JacobianTape() as qtape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.probs(wires=[1])
res = qtape.execute(dev)
expected = np.array(
[tf.cos(x), (1 + tf.cos(x) * tf.cos(y)) / 2, (1 - tf.cos(x) * tf.cos(y)) / 2]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
res = tape.jacobian(res, [x, y])
expected = np.array(
[
[-tf.sin(x), -tf.sin(x) * tf.cos(y) / 2, tf.cos(y) * tf.sin(x) / 2],
[0, -tf.cos(x) * tf.sin(y) / 2, tf.cos(x) * tf.sin(y) / 2],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_sampling(self):
"""Test sampling works as expected"""
dev = qml.device("default.qubit.tf", wires=2, shots=10)
with tf.GradientTape() as tape:
with JacobianTape() as qtape:
qml.Hadamard(wires=[0])
qml.CNOT(wires=[0, 1])
qml.sample(qml.PauliZ(0))
qml.sample(qml.PauliX(1))
res = qtape.execute(dev)
assert res.shape == (2, 10)
assert isinstance(res, tf.Tensor)
| StarcoderdataPython |
132146 | # For each run:
# - Pick 1 file from each parent dir (alphabet) for train
# - Pick 1 file from each parent dir (alphabet) for test
import os
import random
import logging
from pathlib import Path
from shutil import copyfile
OUTPUT_PATH = './data/omniglot/all_runs_unseen'
unseen_image_folder = './data/omniglot/images_evaluation_unseen'
supervised_image_folder = './data/omniglot/images_evaluation_supervised'
SEED = 50
IGNORE_LIST = ['.DS_Store']
UNSEEN_CLASS_MAP = {}
SUPERVISED_CLASS_MAP = {}
os.environ['PYTHONHASHSEED'] = str(SEED)
random.seed(SEED)
for subset in os.listdir(supervised_image_folder):
if subset in IGNORE_LIST:
continue
if os.path.isdir(os.path.join(supervised_image_folder, subset)):
append_family = False
if subset not in SUPERVISED_CLASS_MAP:
SUPERVISED_CLASS_MAP[subset] = {}
append_family = True
for family in os.listdir(os.path.join(supervised_image_folder, subset)):
if family in IGNORE_LIST:
continue
if os.path.isdir(os.path.join(supervised_image_folder, subset)):
append_characters = False
if family not in SUPERVISED_CLASS_MAP[subset]:
SUPERVISED_CLASS_MAP[subset][family] = {}
append_characters = True
for character in os.listdir(os.path.join(supervised_image_folder, subset, family)):
character_folder = os.path.join(supervised_image_folder, subset, family, character)
if os.path.isdir(character_folder):
character_files = os.listdir(character_folder)
character_label = int(character_files[0].split('_')[0])
SUPERVISED_CLASS_MAP[subset][family][character] = character_files
else:
logging.warning('Path to alphabet is not a directory: %s',
os.path.join(supervised_image_folder, subset, family))
else:
logging.warning('Path to subset is not a directory: %s', os.path.join(supervised_image_folder, subset))
for family in os.listdir(unseen_image_folder):
if family in IGNORE_LIST:
continue
if os.path.isdir(os.path.join(unseen_image_folder, family)):
append_characters = False
if family not in UNSEEN_CLASS_MAP:
UNSEEN_CLASS_MAP[family] = {}
append_characters = True
for character in os.listdir(os.path.join(unseen_image_folder, family)):
character_folder = os.path.join(unseen_image_folder, family, character)
if os.path.isdir(character_folder):
character_files = os.listdir(character_folder)
character_label = int(character_files[0].split('_')[0])
UNSEEN_CLASS_MAP[family][character] = character_files
else:
logging.warning('Path to alphabet is not a directory: %s', os.path.join(unseen_image_folder, family))
num_runs = 20
num_unseen = 1
num_seen = 19
ALL_RUNS = {}
for run_idx in range(1, num_runs + 1):
run_folder = 'run' + str(run_idx).zfill(2)
ALL_RUNS[run_folder] = {
'training': [],
'test': []
}
seen_alphabets = random.sample(list(UNSEEN_CLASS_MAP), num_seen)
unseen_alphabets = random.sample(list(UNSEEN_CLASS_MAP), num_unseen)
for alphabet in unseen_alphabets:
unseen_chars = UNSEEN_CLASS_MAP[alphabet]
random_char = random.sample(list(unseen_chars), 1)[0]
unseen_samples = unseen_chars[random_char]
train_sample = unseen_samples.pop(random.randrange(len(unseen_samples)))
test_sample = unseen_samples.pop(random.randrange(len(unseen_samples)))
train_sample_path = os.path.join(unseen_image_folder, alphabet, random_char, train_sample)
test_sample_path = os.path.join(unseen_image_folder, alphabet, random_char, test_sample)
ALL_RUNS[run_folder]['training'].append(train_sample_path)
ALL_RUNS[run_folder]['test'].append(test_sample_path)
for alphabet in seen_alphabets:
train_chars = SUPERVISED_CLASS_MAP['train'][alphabet]
test_chars = SUPERVISED_CLASS_MAP['test'][alphabet]
random_char = random.sample(list(train_chars), 1)[0]
train_samples = train_chars[random_char]
test_samples = test_chars[random_char]
train_sample = train_samples.pop(random.randrange(len(train_samples)))
test_sample = test_samples.pop(random.randrange(len(test_samples)))
train_sample_path = os.path.join(supervised_image_folder, 'train', alphabet, random_char, train_sample)
test_sample_path = os.path.join(supervised_image_folder, 'test', alphabet, random_char, test_sample)
ALL_RUNS[run_folder]['training'].append(train_sample_path)
ALL_RUNS[run_folder]['test'].append(test_sample_path)
for run_folder in ALL_RUNS:
run_folder_path = os.path.join(OUTPUT_PATH, run_folder)
Path(run_folder_path).mkdir(parents=True, exist_ok=True)
train_folder_path = os.path.join(run_folder_path, 'training')
Path(train_folder_path).mkdir(parents=True, exist_ok=True)
for i, char_path in enumerate(ALL_RUNS[run_folder]['training']):
filename = os.path.basename(char_path)
character_label = int(filename.split('_')[0])
new_filename = str(i + 1).zfill(2) + '_' + str(character_label) + '.png'
copyfile(char_path, os.path.join(train_folder_path, new_filename))
test_folder_path = os.path.join(run_folder_path, 'test')
Path(test_folder_path).mkdir(parents=True, exist_ok=True)
for i, char_path in enumerate(ALL_RUNS[run_folder]['test']):
filename = os.path.basename(char_path)
character_label = int(filename.split('_')[0])
new_filename = str(i + 1).zfill(2) + '_' + str(character_label) + '.png'
copyfile(char_path, os.path.join(test_folder_path, new_filename))
| StarcoderdataPython |
3286367 | from sqlite3 import IntegrityError, Row
from riego.db import get_db
import aiohttp_jinja2
from aiohttp import web
from aiohttp_session import get_session
import bcrypt
import secrets
import json
import asyncio
from logging import getLogger
_log = getLogger(__name__)
router = web.RouteTableDef()
def setup_routes_security(app):
app.add_routes(router)
@router.get("/login", name='login')
@aiohttp_jinja2.template("security/login.html")
async def login(request: web.Request):
redirect = request.rel_url.query.get("redirect", "")
csrf_token = secrets.token_urlsafe()
# session = await new_session(request)
session = await get_session(request)
session['csrf_token'] = csrf_token
return {'csrf_token': csrf_token, 'redirect': redirect}
@ router.post("/login")
async def login_apply(request: web.Request):
form = await request.post()
session = await get_session(request)
if session.get('csrf_token') != form['csrf_token']:
# Normally not possible
await asyncio.sleep(2)
raise web.HTTPUnauthorized()
if form.get('identity') is None:
await asyncio.sleep(2)
raise web.HTTPSeeOther(request.app.router['login'].url_for())
cursor = get_db().conn.cursor()
cursor.execute("""SELECT *, 'login' AS provider
FROM users
WHERE identity = ?""", (form['identity'],))
user = cursor.fetchone()
if (
user is None or
user['is_disabled'] or
user['password'] is None or
not len(user['password'])
):
await asyncio.sleep(2)
raise web.HTTPSeeOther(request.app.router['login'].url_for())
if not bcrypt.checkpw(form['password'].encode('utf-8'),
user['password'].encode('utf-8')):
await asyncio.sleep(2)
raise web.HTTPSeeOther(request.app.router['login'].url_for())
session['user_id'] = user['id']
location = form.get('redirect')
if location is None or location == '':
location = request.app.router['home'].url_for()
response = web.HTTPSeeOther(location=location)
# TODO use create_remember_me
if form.get('remember_me') is not None:
remember_me = secrets.token_urlsafe()
try:
with get_db().conn:
get_db().conn.execute(
'''UPDATE users
SET remember_me = ?
WHERE id = ? ''',
(remember_me, user['id']))
except IntegrityError:
pass
response.set_cookie("remember_me", remember_me,
max_age=request.app['options'].max_age_remember_me,
httponly=True,
samesite='strict')
return response
@ router.get("/logout", name='logout')
async def logout(request: web.Request):
user = await get_user(request)
if user is not None:
await delete_websocket_auth(request, user=user)
# TODO use delete_remember_me
try:
with get_db().conn:
get_db().conn.execute("""UPDATE users
SET remember_me = ''
WHERE id = ?""", (user['id'],))
except IntegrityError:
pass
session = await get_session(request)
if session is not None:
session.pop('user_id', None)
response = web.HTTPSeeOther(request.app.router['login'].url_for())
# response.set_cookie('remember_me', None,
# expires='Thu, 01 Jan 1970 00:00:00 GMT')
response.del_cookie('remember_me')
return response
@ router.get("/profile", name='profile')
@ aiohttp_jinja2.template("security/profile.html")
async def profile(request: web.Request):
return {}
@ router.post("/profile")
async def profile_apply(request: web.Request):
form = await request.post()
user = await get_user(request)
# TODO check old_password and equality of pw1 an pw2
password = form['new_password_1'].encode('utf-8')
password = bcrypt.hashpw(password, bcrypt.gensalt())
password = password.decode('utf-8')
try:
with get_db().conn:
get_db().conn.execute(
'''UPDATE users
SET password = ?
WHERE id = ? ''', (password, user['id']))
except IntegrityError:
raise web.HTTPSeeOther(request.app.router['passwd'].url_for())
raise web.HTTPSeeOther(request.app.router['home'].url_for())
return {} # not reached
async def current_user_ctx_processor(request):
websocket_auth = '''{"token": "", "sequence": ""}'''
user = await get_user(request)
if user is not None:
websocket_auth = await create_websocket_auth(request, user=user)
return {'user': user, 'websocket_auth': json.loads(websocket_auth)}
async def get_user(request) -> Row:
session = await get_session(request)
db = get_db()
user_id = session.get('user_id')
if user_id is not None:
cursor = db.conn.cursor()
cursor.execute("""SELECT *, 'login' AS provider
FROM users
WHERE id = ?""", (user_id,))
user = cursor.fetchone()
if user is None or user['is_disabled']:
session.pop('user_id', None)
return None
return user
return await check_remember_me_auth(request)
async def check_permission(request, permission=None) -> Row:
user = await get_user(request)
db = get_db()
if user is None:
return None
if user['is_disabled']:
return None
if user['is_superuser']:
return user
if permission is None:
return user
cursor = db.conn.cursor()
cursor.execute(
'SELECT * FROM users_permissions WHERE user_id = ?', (user['id'],))
for row in cursor:
if row['name'] == permission:
return user
return None
async def raise_permission(request: web.BaseRequest, permission: str = None):
"""Generate redirection to login form if permission is not
sufficent. Append query string with information for redirecting
after login to the original url.
:param request: [description]
:type request: web.Baserequest
:param permission: If no permission is given, check auth only
:type permission: str, optional
:raises web.HTTPSeeOther: [description]
"""
if await check_permission(request, permission=permission) is None:
raise web.HTTPSeeOther(
request.app.router['login'].url_for(
).with_query(
{"redirect": str(request.rel_url)}
)
)
async def create_websocket_auth(request: web.BaseRequest,
user: Row = None) -> json:
"""create token and sequence number if not exist in session. Than
a) put token and sequence into session for using in templates
b) put hashed token and sequence into database for later checking against
data recived with websocket.py
:param request: [description]
:type request: web.BaseRequest
:param user: [description], defaults to None
:type user: Row, optional
:return: [description]
:rtype: json
"""
if user is None:
return None
db = get_db()
session = await get_session(request)
session_key = request.app['options'].session_key_websocket_auth
websocket_auth = session.get(session_key, '')
if len(websocket_auth) > 0:
return websocket_auth
sequence = secrets.token_urlsafe()
token = secrets.token_urlsafe()
token_hash = bcrypt.hashpw(token.encode('utf-8'), bcrypt.gensalt())
try:
with db.conn:
db.conn.execute('''INSERT INTO users_tokens
(sequence, hash, category, user_id)
VALUES (?, ?, ?, ?)''',
(sequence, token_hash, "websocket", user['id']))
except IntegrityError as e:
_log.error(f'Unable to insert token: {e}')
return None
websocket_auth = json.dumps({"sequence": sequence, "token": token})
session[session_key] = websocket_auth
return websocket_auth
async def delete_websocket_auth(request, user=None):
db = get_db()
session = await get_session(request)
session_key = request.app['options'].session_key_websocket_auth
if session is not None:
session.pop(session_key, None)
try:
with db.conn:
db.conn.execute('''DELETE FROM users_tokens
WHERE category = ? AND user_id = ?''',
("websocket", user['id']))
except IntegrityError:
pass
return None
async def create_remember_me_auth(request, response=None, user: Row = None):
remember_me = secrets.token_urlsafe()
try:
with get_db().conn:
get_db().conn.execute(
'''UPDATE users
SET remember_me = ?
WHERE id = ? ''',
(remember_me, user['id']))
except IntegrityError:
return None
response.set_cookie("remember_me", remember_me,
max_age=request.app['options'].max_age_remember_me,
httponly=True,
samesite='strict')
return response
async def delete_remember_me_auth(request, response=None, user: Row = None):
if user is not None:
try:
with get_db().conn:
get_db().conn.execute("""UPDATE users
SET remember_me = ''
WHERE id = ?""", (user['id'],))
except IntegrityError:
pass
session = await get_session(request)
session.pop('user_id', None)
# response.set_cookie('remember_me', None,
# expires='Thu, 01 Jan 1970 00:00:00 GMT')
response.del_cookie('remember_me')
return response
async def check_remember_me_auth(request) -> Row:
db = get_db()
user = None
remember_me = request.cookies.get('remember_me')
if remember_me is not None:
cursor = db.conn.cursor()
cursor.execute("""SELECT *, 'cookie' AS provider
FROM users
WHERE remember_me = ?""", (remember_me,))
user = cursor.fetchone()
if user is not None and user['is_disabled']:
try:
with db.conn:
db.conn.execute("""UPDATE users
SET remember_me = ''
WHERE id = ?""", (user['id'],))
except IntegrityError:
pass
return user
| StarcoderdataPython |
3262033 | <gh_stars>0
# execute simulation script with all TOLERANCE combinations
######################################################################################
# very important: linSol, nonLinSolIter, solAlg must be their corresponding integers #
# #
# linSol: Dense == 1 GMRES == 6 BiCGStab == 7 SPTFQMR == 8 KLU == 9 #
# nonLinSolIter: Functional == 1 Newton-type == 2 #
# solAlg: Adams == 1 BDF == 2 #
# #
######################################################################################
import logging
import os
import pandas as pd
import numpy as np
from C import (
DIR_MODELS_AMICI_FINAL, DIR_BASE, DIR_MODELS, DIR_RESULTS_TOLERANCES,
SIMCONFIG)
from simulation_wrapper_amici import simulation_wrapper as simulate_with_amici
# create logger object
logger = logging.getLogger()
# initialize the log settings
logging.basicConfig(
filename=os.path.join(DIR_BASE, 'trajectoryComparison.log'),
level=logging.DEBUG)
# Create new folder structure for study
save_path = DIR_RESULTS_TOLERANCES
if not os.path.exists(save_path):
os.makedirs(save_path)
# load the table with model information
model_info = pd.read_csv(os.path.join(DIR_MODELS, 'model_summary.tsv'),
sep='\t')
# the settings we want to simulate with AMICI
settings_amici = [
{'id': f'atol_{atol}__rtol_{rtol}__linSol_9__nonlinSol_2__solAlg_2',
'atol': float(atol), 'rtol': float(rtol),
'linSol': 9, 'nonlinSol': 2, 'solAlg': 2}
for atol in ('1e-6', '1e-8', '1e-10', '1e-12', '1e-14', '1e-16')
for rtol in ('1e-6', '1e-8', '1e-10', '1e-12', '1e-14', '1e-16')
]
for setting in settings_amici:
# collect results as a list
results = []
for model_name in sorted(os.listdir(DIR_MODELS_AMICI_FINAL)):
# Get information about the current model
model_rows = model_info[model_info['short_id'] == model_name]
idx = model_rows.index.values[0]
models_to_average = sum([acc for acc in list(model_rows['accepted'])])
# run the simulation
result = simulate_with_amici(simulation_mode=SIMCONFIG.CPUTIME,
settings=setting, model_name=model_name)
# save results in a dict
model_result = {'model_name': model_name,
'median_intern': np.median(result['cpu_times_intern']),
'median_extern': np.median(result['cpu_times_extern']),
'failure': result['failure'],
'n_species': model_rows.loc[idx, 'n_species'],
'n_reactions': model_rows.loc[idx, 'n_reactions'],
'n_submodels': models_to_average}
for i_run, runtime in enumerate(result['cpu_times_intern']):
model_result[f'run_{i_run}'] = runtime
# save in the DataFrame to be
results.append(model_result)
results_df = pd.DataFrame(results)
results_file = os.path.join(save_path, setting['id'] + '.tsv')
results_df.to_csv(results_file, sep='\t', index=False)
| StarcoderdataPython |
3371965 | <reponame>Sprith/greyatom-python-for-data-science<filename>Project-:-Loan-Approval-Analysis/code.py
# --------------
import pandas as pd
bank = pd.read_csv(path)
categorical_var = bank.select_dtypes('object')
print(categorical_var)
numerical_var = bank.select_dtypes('number')
print(numerical_var)
# --------------
# code starts here
banks = bank.drop("Loan_ID",axis=1)
banks.head()
print(banks.isnull().sum())
bank_mode = banks.mode()
print(bank_mode)
banks = banks.fillna(bank_mode.iloc[0])
print(banks.isnull().sum())
#code ends here
# --------------
# Code starts here
import numpy as np
avg_loan_amount = banks.pivot_table(values='LoanAmount',index=['Gender','Married',
'Self_Employed'],aggfunc = np.mean)
avg_loan_amount
# code ends here
# --------------
# code starts here
loan1 = banks[(banks['Self_Employed'] == 'Yes') & (banks['Loan_Status'] == 'Y')]
loan_approved_se = loan1['Self_Employed'].count()
loan2 = banks[(banks['Self_Employed'] == 'No') & (banks['Loan_Status'] == 'Y')]
loan_approved_nse = loan2['Self_Employed'].count()
loan_status = 614
percentage_se = (loan_approved_se)/loan_status
percentage_se = percentage_se*100
percentage_nse = (loan_approved_nse)/loan_status
percentage_nse = percentage_nse*100
# code ends here
# --------------
# code starts here
loan_term = banks['Loan_Amount_Term'].apply(lambda x:x/12)
big_loan_term = loan_term[loan_term >= 25]
big_loan_term = big_loan_term.count()
# code ends here
# --------------
# code starts here
loan_groupby = banks.groupby('Loan_Status')
loan_groupby = loan_groupby[['ApplicantIncome', 'Credit_History']]
mean_values = loan_groupby.mean()
# code ends here
| StarcoderdataPython |
98509 | import os
def init():
"""
Load IDs of all available batteries and their capacity
:return: battery_config (Dict containing the battery id and its size
"""
battery_config = {}
for item in os.listdir('/sys/class/power_supply/'):
if os.path.isdir(os.path.join('/sys/class/power_supply/', item)) and item.startswith("BAT"):
battery_config[item] = get_battery_energy_full(item)
return battery_config
def get_status(battery_config):
"""
Get the current battery status (percentage and charging status) for all internal batteries combined
:param battery_config: dict containing the battery IDs and their capacity
:return: dict containing the percentage and the charging status
"""
battery_status = {'charging': False, 'percentage': 0}
for battery_id in battery_config.keys():
battery_status['charging'] = battery_status['charging'] | get_charging_status(battery_id)
battery_status['percentage'] += get_battery_charge(battery_id) * battery_config[battery_id]
battery_status['percentage'] = int(battery_status['percentage'] / sum(battery_config.values()))
return battery_status
def get_charging_status(battery_id):
"""
Check if the battery is currently charging
:param battery_id: Battery ID/Number e.g. BAT0
:return: bool, True is battery is charging
"""
with open(f'/sys/class/power_supply/{battery_id}/status') as f:
if 'Charging' in f.read():
return True
return False
def get_battery_charge(battery_id):
"""
Get the current battery percentage
:param battery_id: Battery ID/Number e.g. BAT0
:return: current charge level in percent
"""
with open(f'/sys/class/power_supply/{battery_id}/capacity') as f:
battery_percentage = int(f.read())
return battery_percentage
def get_battery_energy_full(battery_id):
"""
Get the maximum energy stored in the battery
:param battery_id: Battery ID/Number e.g. BAT0
:return: maximum energy (int)
"""
with open(f'/sys/class/power_supply/{battery_id}/energy_full') as f:
battery_percentage = int(f.read())
return battery_percentage
| StarcoderdataPython |
77350 | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
ASSIGNEE = "assignee"
ATTRIBUTES = "attributes"
DUE_DATE = "due_date"
ESCALATED = "escalated"
ESCALATEE = "escalatee"
ESCALATION_DATE = "escalation_date"
NAME = "name"
OVERDUE = "overdue"
REMINDED = "reminded"
REMINDER_DATE = "reminder_date"
SECURITY_LABEL = "security_label"
STATUS = "status"
TAGS = "tags"
class Output:
ID = "id"
class CreateTaskInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"assignee": {
"type": "string",
"title": "Assignee",
"description": "Task Assignee",
"order": 11
},
"attributes": {
"type": "array",
"title": "Attributes",
"description": "Task Attributes",
"items": {
"type": "object"
},
"order": 2
},
"due_date": {
"type": "string",
"title": "Due Date",
"displayType": "date",
"description": "Task due date",
"format": "date-time",
"order": 4
},
"escalated": {
"type": "boolean",
"title": "Escalated",
"description": "Use task escalation",
"order": 7
},
"escalatee": {
"type": "string",
"title": "Escalatee",
"description": "Task escalatee",
"order": 12
},
"escalation_date": {
"type": "string",
"title": "Escalation Date",
"displayType": "date",
"description": "Task escalation date",
"format": "date-time",
"order": 5
},
"name": {
"type": "string",
"title": "Name",
"description": "Task Name",
"order": 1
},
"overdue": {
"type": "boolean",
"title": "Overdue",
"description": "Is task overdue",
"order": 8
},
"reminded": {
"type": "boolean",
"title": "Reminded",
"description": "Use task Reminder",
"order": 9
},
"reminder_date": {
"type": "string",
"title": "Reminder Date",
"displayType": "date",
"description": "Task reminder date",
"format": "date-time",
"order": 6
},
"security_label": {
"type": "string",
"title": "Security Label",
"description": "Task security label",
"order": 13
},
"status": {
"type": "string",
"title": "Status",
"description": "Task status",
"enum": [
"In Progress",
"Completed",
"Waiting on Someone",
"Deferred"
],
"order": 10
},
"tags": {
"type": "string",
"title": "Tags",
"description": "Task tags comma delimited",
"order": 3
}
},
"required": [
"name"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class CreateTaskOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"id": {
"type": "integer",
"title": "Task ID",
"description": "Task ID",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| StarcoderdataPython |
3344533 | <reponame>timgates42/pex
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from argparse import ArgumentParser
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
import pytest
from pex.bin.pex import (
build_pex,
compute_indexes,
configure_clp,
configure_clp_pex_options,
configure_clp_pex_resolution,
)
from pex.common import safe_copy, temporary_dir
from pex.compatibility import nested, to_bytes
from pex.interpreter import PythonInterpreter
from pex.testing import (
PY27,
built_wheel,
ensure_python_interpreter,
run_pex_command,
run_simple_pex,
)
from pex.typing import TYPE_CHECKING
from pex.venv_bin_path import BinPath
if TYPE_CHECKING:
from typing import Iterator, List, Optional, Text
@contextmanager
def option_parser():
# type: () -> Iterator[ArgumentParser]
yield ArgumentParser()
def test_clp_no_pypi_option():
# type: () -> None
with option_parser() as parser:
configure_clp_pex_resolution(parser)
options = parser.parse_args(args=[])
assert len(compute_indexes(options)) == 1
options = parser.parse_args(args=["--no-pypi"])
assert len(compute_indexes(options)) == 0, "--no-pypi should remove the pypi index."
def test_clp_pypi_option_duplicate():
# type: () -> None
with option_parser() as parser:
configure_clp_pex_resolution(parser)
options = parser.parse_args(args=[])
indexes = compute_indexes(options)
assert len(indexes) == 1
options2 = parser.parse_args(args=["--pypi"])
indexes2 = compute_indexes(options2)
assert len(indexes2) == 1
assert indexes == indexes2
def test_clp_find_links_option():
# type: () -> None
with option_parser() as parser:
configure_clp_pex_resolution(parser)
options = parser.parse_args(args=["-f", "http://www.example.com"])
assert len(compute_indexes(options)) == 1
assert len(options.find_links) == 1
def test_clp_index_option():
# type: () -> None
with option_parser() as parser:
configure_clp_pex_resolution(parser)
options = parser.parse_args(args=[])
indexes = compute_indexes(options)
assert len(indexes) == 1
options2 = parser.parse_args(args=["-i", "http://www.example.com"])
indexes2 = compute_indexes(options2)
assert len(indexes2) == 2
assert indexes2[0] == indexes[0]
assert indexes2[1] == "http://www.example.com"
def test_clp_index_option_render():
# type: () -> None
with option_parser() as parser:
configure_clp_pex_resolution(parser)
options = parser.parse_args(args=["--index", "http://www.example.com"])
assert ["https://pypi.org/simple", "http://www.example.com"] == compute_indexes(options)
def test_clp_build_precedence():
# type: () -> None
with option_parser() as parser:
configure_clp_pex_resolution(parser)
options = parser.parse_args(args=["--no-build"])
assert not options.build
options = parser.parse_args(args=["--build"])
assert options.build
options = parser.parse_args(args=["--no-wheel"])
assert not options.use_wheel
options = parser.parse_args(args=["--wheel"])
assert options.use_wheel
# Make sure that we're doing append and not replace
def test_clp_requirements_txt():
# type: () -> None
parser = configure_clp()
options = parser.parse_args(args="-r requirements1.txt -r requirements2.txt".split())
assert options.requirement_files == ["requirements1.txt", "requirements2.txt"]
def test_clp_constraints_txt():
# type: () -> None
parser = configure_clp()
options = parser.parse_args(args="--constraint requirements1.txt".split())
assert options.constraint_files == ["requirements1.txt"]
def test_clp_preamble_file():
# type: () -> None
with NamedTemporaryFile() as tmpfile:
tmpfile.write(to_bytes('print "foo!"'))
tmpfile.flush()
parser = configure_clp()
options = parser.parse_args(args=["--preamble-file", tmpfile.name])
assert options.preamble_file == tmpfile.name
pex_builder = build_pex(options.requirements, options)
assert pex_builder._preamble == 'print "foo!"'
def test_clp_prereleases():
# type: () -> None
with option_parser() as parser:
configure_clp_pex_resolution(parser)
options = parser.parse_args(args=[])
assert not options.allow_prereleases
options = parser.parse_args(args=["--no-pre"])
assert not options.allow_prereleases
options = parser.parse_args(args=["--pre"])
assert options.allow_prereleases
def test_clp_prereleases_resolver():
# type: () -> None
with nested(
built_wheel(name="prerelease-dep", version="1.2.3b1"),
built_wheel(name="transitive-dep", install_reqs=["prerelease-dep"]),
built_wheel(name="dep", install_reqs=["prerelease-dep>=1.2", "transitive-dep"]),
temporary_dir(),
temporary_dir(),
) as (prerelease_dep, transitive_dep, dep, dist_dir, cache_dir):
for dist in (prerelease_dep, transitive_dep, dep):
safe_copy(dist, os.path.join(dist_dir, os.path.basename(dist)))
parser = configure_clp()
options = parser.parse_args(
args=[
"--no-index",
"--find-links",
dist_dir,
"--cache-dir",
cache_dir, # Avoid dangling {pex_root}.
"--no-pre",
"dep",
]
)
assert not options.allow_prereleases
with pytest.raises(SystemExit, message="Should have failed to resolve prerelease dep"):
build_pex(options.requirements, options)
# When we specify `--pre`, allow_prereleases is True
options = parser.parse_args(
args=[
"--no-index",
"--find-links",
dist_dir,
"--cache-dir",
cache_dir, # Avoid dangling {pex_root}.
"--pre",
"dep",
]
)
assert options.allow_prereleases
# Without a corresponding fix in pex.py, this test failed for a dependency requirement of
# dep==1.2.3b1 from one package and just dep (any version accepted) from another package.
# The failure was an exit from build_pex() with the message:
#
# Could not satisfy all requirements for dep==1.2.3b1:
# dep==1.2.3b1, dep
#
# With a correct behavior the assert line is reached and pex_builder object created.
pex_builder = build_pex(options.requirements, options)
assert pex_builder is not None
assert len(pex_builder.info.distributions) == 3, "Should have resolved deps"
def test_clp_pex_options():
with option_parser() as parser:
configure_clp_pex_options(parser)
options = parser.parse_args(args=[])
assert options.venv == False
options = parser.parse_args(args=["--venv"])
assert options.venv == BinPath.FALSE
options = parser.parse_args(args=["--venv", "append"])
assert options.venv == BinPath.APPEND
options = parser.parse_args(args=["--venv", "prepend"])
assert options.venv == BinPath.PREPEND
def test_build_pex():
# type: () -> None
with temporary_dir() as sandbox:
pex_path = os.path.join(sandbox, "pex")
results = run_pex_command(["ansicolors==1.1.8", "--output-file", pex_path])
results.assert_success()
stdout, returncode = run_simple_pex(
pex=pex_path, args=["-c", 'import colors; print(" ".join(colors.COLORS))']
)
assert 0 == returncode
assert b"black red green yellow blue magenta cyan white" == stdout.strip()
def test_run_pex():
# type: () -> None
def assert_run_pex(python=None, pex_args=None):
# type: (Optional[str], Optional[List[str]]) -> List[str]
pex_args = list(pex_args) if pex_args else []
results = run_pex_command(
python=python,
args=pex_args
+ ["ansicolors==1.1.8", "--", "-c", 'import colors; print(" ".join(colors.COLORS))'],
quiet=True,
)
results.assert_success()
assert "black red green yellow blue magenta cyan white" == results.output.strip()
return results.error.splitlines()
incompatible_platforms_warning_msg = (
"WARNING: attempting to run PEX with incompatible platforms!"
)
assert incompatible_platforms_warning_msg not in assert_run_pex()
assert incompatible_platforms_warning_msg not in assert_run_pex(pex_args=["--platform=current"])
assert incompatible_platforms_warning_msg not in assert_run_pex(
pex_args=["--platform={}".format(PythonInterpreter.get().platform)]
)
py27 = ensure_python_interpreter(PY27)
stderr_lines = assert_run_pex(python=py27, pex_args=["--platform=macosx-10.13-x86_64-cp-37-m"])
assert incompatible_platforms_warning_msg in stderr_lines
| StarcoderdataPython |
3200601 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from camera import Camera
from control_panel import ControlPanel
import time
import kivy
from kivy.config import Config
Config.set('graphics', 'width', '1920')
Config.set('graphics', 'height', '1080')
#Config.set('graphics', 'rotation', '180')
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen, NoTransition
from kivy.clock import Clock
from kivy.core.window import Window
from screen import *
Window.clearcolor = (1, 1, 1, 1)
Window.fullscreen = False
class Main(App):
def __init__(self, **kwargs):
super(Main, self).__init__(**kwargs)
def build(self):
self._cam = Camera((Config.getint('graphics', 'width'), Config.getint('graphics', 'height')))
self._control = ControlPanel()
sm = ScreenManager(transition=NoTransition())
sm.add_widget(DummyScreen(name='dummy'))
sm.add_widget(SplashScreen(name='splash'))
sm.add_widget(CalibrationScreen(self._cam, name='calibration'))
sm.add_widget(PredictionScreen(self._cam, name='prediction'))
sm.current = 'splash'
Clock.schedule_interval(self.on_loop, 0.001)
self._control.add_mode_sw_listener(sm.get_screen('prediction').changed_disp_mode)
return sm
def on_loop(self, dt):
self._control.update()
def on_stop(self):
self._cam.release()
if __name__ == '__main__':
Main().run()
| StarcoderdataPython |
3234045 | """Testing the base
"""
import os
import pytest
from vectorai.models import *
from appdirs import *
def test_start_utils_mixin():
utils_func = EmbedMixin()
assert True
def check():
"""Dummy function"""
return 1
def test_save_function():
"""Test adding an embedding function"""
mixin = EmbedMixin("test", "test")
index_name = "test"
vector_name = "test"
mixin.save_function(index_name, vector_name, check)
assert True
def test_load_function():
"""Test loading of the function"""
mixin = EmbedMixin("test", "test")
assert mixin.load_function("test", "test") == check
def test_load_function_keyerror():
"""Test loading of the function"""
with pytest.raises(KeyError):
mixin = EmbedMixin("test", "test")
assert mixin.load_function("test", "check") != check
@pytest.mark.xfail
def test_save_string_input():
"""Testing for string input. This should fail.
"""
string_input = "def function"
with pytest.raises(AssertionError):
mixin = EmbedMixin("test", "test")
mixin.save_function("test", "new", string_input)
| StarcoderdataPython |
108327 | <filename>week03/test17.py<gh_stars>0
import requests
from bs4 import BeautifulSoup
page = requests.get("https://www.myhome.ie/residential/mayo/property-for-sale?page=1")
soup = BeautifulSoup(page.content, 'html.parser')
# print (soup.prettify())
listings = soup.findAll("div", class_="PropertyListingCard" )
for listing in listings:
entry = []
price = listing.find(class_="PropertyListingCard__Price").text
entry.append(price)
address = listing.find(class_="PropertyListingCard__Address").text
entry.append(address)
print(entry) | StarcoderdataPython |
21311 | def foo(x=Non<caret>): | StarcoderdataPython |
3360636 | import os
from getmodule.get_collector_executer import GetCollectorExecuter
from getmodule.get_collector import GetCollector
from getmodule.get_list_analyzer import GetListAnalyzer
from getmodule.get_detail_collector import GetDetailCollector
from getmodule.csv_util import CsvWriter
from getmodule.file_path_util import FilePathUtil
from unittest.mock import call
def test_get_get_dict(mocker):
sire_name = "dummy_sire_name"
sire_dir = "dummy_sire_dir"
get_horse_base_csv_path = "dummy_get_horse_base_csv_path"
makedirs_mock = mocker.patch.object(os, 'makedirs')
get_sire_dir_mock = mocker.patch.object(FilePathUtil, 'get_sire_dir')
get_sire_dir_mock.return_value = sire_dir
get_horse_base_csv_path_mock = mocker.patch.object(
FilePathUtil, 'get_horse_base_csv_path')
get_horse_base_csv_path_mock.return_value = get_horse_base_csv_path
open_file_mock = mocker.patch.object(CsvWriter, 'open_file')
writerow_mock = mocker.patch.object(CsvWriter, 'writerow')
horse_1_data = ["horse_1_data_1"]
horse_2_data = ["horse_2_data_1"]
get_get_dict_mock_result = [
{'horse_id_1': horse_1_data}, {'horse_id_2': horse_2_data}, {}]
get_get_dict_mock = mocker.patch.object(
GetCollector, 'get_get_dict', side_effect=get_get_dict_mock_result)
close_file_mock = mocker.patch.object(CsvWriter, 'close_file')
executer = GetCollectorExecuter(sire_name)
executer.get_get_dict()
get_sire_dir_mock.assert_called_once_with(sire_name)
makedirs_mock.assert_called_once_with(sire_dir, exist_ok=True)
get_horse_base_csv_path_mock.assert_called_once_with(sire_name)
open_file_mock.assert_called_once_with('w')
assert writerow_mock.call_args_list == [
call(GetCollectorExecuter.HEADARE),
call(['horse_id_1'] + horse_1_data),
call(['horse_id_2'] + horse_2_data)
]
assert get_get_dict_mock.call_count == 3
assert close_file_mock.call_count == 1
| StarcoderdataPython |
140727 | import logging
import asyncio
import grpc
import products_pb2
import products_pb2_grpc
GRPC_HOST_PORT = 'localhost:8080'
async def main():
async with grpc.aio.insecure_channel(GRPC_HOST_PORT) as channel:
stub = products_pb2_grpc.ProductServiceStub(channel)
response = await stub.GetVendorProductTypes(products_pb2.ClientRequestType(vendor='google'),wait_for_ready=True,timeout=4)
print("Python Product client received: " + response.productType)
if __name__ == '__main__':
logging.basicConfig()
asyncio.run(main()) | StarcoderdataPython |
1619793 | <reponame>thehanemperor/LeetCode
class Solution:
def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]:
result = []
self.dfs(root,sum,[],result)
return result
def dfs(self,root,total,tmp,result):
if not root:
return
if not root.left and not root.right and not total -root.val:
tmp.append(root.val)
result.append([*tmp])
tmp.pop()
return
tmp.append(root.val)
self.dfs(root.left,total-root.val,tmp,result)
self.dfs(root.right,total-root.val,tmp,result)
tmp.pop()
| StarcoderdataPython |
1695132 | from setuptools import setup
setup(
name='mls',
version='0.1',
description='Material for UCI course "ML & Statistics for Physicists"',
url='http://github.com/dkirkby/MachineLearningStatistics',
author='<NAME>',
author_email='<EMAIL>',
license='BSD3',
packages=['mls'],
install_requires=[ ],
include_package_data=True,
zip_safe=False)
| StarcoderdataPython |
3339080 | #!/usr/bin/env python
"""
--------------------------------------------------------------------------------
Created: <NAME> 2/19/14
This script reads a tab-delimited otu table file containing relative abundance
and makes a simple heatmap of this data. The tab-delimited otu is of raw counts.
The program will compute relative abundance and then determine which OTUs meet
the n-ton cutoff and artificially set the abundance so that the heat square shows
up as a different color in the color map, highlighting these OTUs, or eliminate.
otutable file format (blank denotes zero):
sample In.1 In.3 In.4
meta 3/19/08 4/30/09 1/30/08
taxon1 0 0 0
taxon2 1 0 29
taxon3 0 0 0
taxon4 0 0 0
taxon5 0 0 5
--------------------------------------------------------------------------------
usage:
python simple_heatmap.py -i input_otutable.txt -o output_file.pdf -n cutoff_int -X
"""
#-------------------------------------------------------------------------------
"""Functions & Declarations"""
from string import strip
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import csv
from numpy import divide,log10,arange
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from matplotlib.ticker import FuncFormatter
#-------------------------------------------------------------------------------
# setup command line arguments
print "Running..."
if __name__ == '__main__':
parser = ArgumentParser(usage = "simple_heatmap.py -i input_otutable.txt -o \
output_file.pdf -n cutoff_int -X",
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-i", "--input_otutable", action="store",
dest="inputfilename",
help="otu table file name (see docstring)")
parser.add_argument("-o", "--output_pdf (optional)", action="store",
dest="outputfilename",
help="output pdf name", default=None)
parser.add_argument("-n", "--n_ton_cutoff", action="store", type=float,
dest="ntoncutoff",
help="singleton, doubleton, etc cutoff", default=5)
parser.add_argument("-X", "--excludeflag", action="store_true",
dest="excludeflag",
help="use to exclude OTUs of the n_ton_cutoff rather \
than highlight")
options = parser.parse_args()
mandatories = ["inputfilename"]
for m in mandatories:
if not options.__dict__[m]:
print "\nError: Missing Arguments\n"
parser.print_help()
exit(-1)
# read in command line args and parse file
inputfilename = options.inputfilename
outputfilename = options.outputfilename
ntoncutoff = options.ntoncutoff
excludeflag = options.excludeflag
infile = open(inputfilename, 'U')
reader = csv.reader(infile, dialect='excel-tab')
sample_name = reader.next()
sample_meta = reader.next()
data =[]
taxa_labels = []
for line in reader:
taxa_labels.append(line.pop(0))
data.append(map(float,line))
taxa_labels = taxa_labels[2:]
infile.close()
# compute total seq / sample
sample_totals = []
sample_cutoff = []
for i, sample in enumerate(zip(*data)):
sumsample = sum(sample)
sample_totals.append(sumsample)
if ntoncutoff < 1:
sample_cutoff.append(ntoncutoff)
else:
sample_cutoff.append(ntoncutoff/sumsample)
# the artificial abund for marked samples
highlight_abund = 1.0
rel_data = []
used_taxa_labels = []
#set relative abundances
for line, taxon_label in zip(data, taxa_labels):
rel_data_row = list(divide(line, sample_totals))
cutoff_rel_data_row = []
#check if n-ton cutoff, if so, replace with highlight abund, otherwise, append rel_abund
for rel_abund, min_cutoff in zip(rel_data_row, sample_cutoff):
if rel_abund <= min_cutoff and rel_abund > 0:
#check if we want to include or exclude
if not excludeflag:
cutoff_rel_data_row.append(highlight_abund)
else:
cutoff_rel_data_row.append(float(0))
else:
cutoff_rel_data_row.append(rel_abund)
#to save space, if nothing is there don't add it in
if sum(cutoff_rel_data_row) > 0:
used_taxa_labels.append(taxon_label)
rel_data.append(log10(cutoff_rel_data_row))
# box plot these scores
print "Plotting..."
"""# a blue-black with red highlights
cdict = {'red': ((0.0, 1.0, 1.0),
(.0000001, .42, .42),
(.99, 0.0, 0.0),
(1.0, 0.95, 0.95)),
'green':((0.0, 1.0, 1.0),
(.0000001, .68, .68),
(.99, 0.0, 0.0),
(1.0, 0.9, 0.9)),
'blue': ((0.0, 1.0, 1.0),
(.0000001, .84, .84),
(.99, 0.0, 0.0),
(1.0, 0.9, 0.9))}"""
"""# a orange-red with blue highlights
cdict = {'red': ((0.0, 1.0, 1.0),
(.000001, 0.99, 0.99),
(.9, 0.99, 0.99),
(1.0, 0.9, 0.9)),
'green':((0.0, 1.0, 1.0),
(.000001, 0.75, 0.75),
(.9, 0.0, 0.0),
(1.0, 0.9, 0.9)),
'blue': ((0.0, 1.0, 1.0),
(.000001, 0.55, 0.55),
(.9, 0.0, 0.0),
(1.0, 0.95, 0.95))}"""
# a orange-red-blue with blue highlights
cdict = {'red': ((0.0, 1.0, 1.0),
(.000001, 0.99, 0.99),
(.3, 0.99, 0.99),
(.99, 0.0, 0.0),
(1.0, 0.9, 0.9)),
'green':((0.0, 1.0, 1.0),
(.000001, 0.75, 0.75),
(.3, 0.0, 0.0),
(.99, 0.0, 0.0),
(1.0, 0.9, 0.9)),
'blue': ((0.0, 1.0, 1.0),
(.000001, 0.55, 0.55),
(.3, 0.0, 0.0),
(.99, 0.99, 0.99),
(1.0, 0.95, 0.95))}
my_cmap = colors.LinearSegmentedColormap('my_colormap',cdict,5120)
num_coords = 90
im = plt.imshow(rel_data[::-1], interpolation="nearest", cmap=my_cmap, origin='lower', extent=[-num_coords/3*2,num_coords/3*2,-num_coords/2,num_coords/2])
plt.colorbar()
taxa_coords = used_taxa_labels[-2::-int(len(used_taxa_labels)/num_coords)]
tick_coords = range(-num_coords/2,num_coords/2)
im.axes.yaxis.set_ticks(tick_coords)
im.axes.yaxis.set_ticklabels(taxa_coords[1:])
xticks = arange(-num_coords/3*2,num_coords/3*2,float(num_coords/3*4)/len(sample_meta))
im.axes.xaxis.set_ticks(xticks)
im.axes.xaxis.set_ticklabels(sample_meta, rotation=90)
if not (outputfilename == None):
print 'Saving ' + outputfilename
plt.savefig(outputfilename, format='pdf',dpi=300, transparent=True)
plt.show()
print "Done!" | StarcoderdataPython |
16328 | <reponame>google-cloud-sdk-unofficial/google-cloud-sdk
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The command group for Cloud API Gateway CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ml_engine import flags
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class ApiGateway(base.Group):
"""Manage Cloud API Gateway resources.
Commands for managing Cloud API Gateway resources.
"""
category = base.API_PLATFORM_AND_ECOSYSTEMS_CATEGORY
def Filter(self, context, args):
# TODO(b/190524392): Determine if command group works with project number
base.RequireProjectID(args)
del context, args
base.DisableUserProjectQuota()
resources.REGISTRY.RegisterApiByName('apigateway', 'v1')
| StarcoderdataPython |
1609983 | from ..utils import choose_weighted_option
from ..classes import *
from ..meta_classes import DataSetProperties, PersonStyleWeightDistribution, ProductStyleWeightDistribution
def product_style_function(product_styles_distribution: ProductStyleWeightDistribution) -> ProductStyleVector:
return choose_weighted_option(product_styles_distribution).as_vec()
def person_style_function(person_styles_distribution: PersonStyleWeightDistribution) -> PersonStylePreferenceVector:
return choose_weighted_option(person_styles_distribution).as_vec()
| StarcoderdataPython |
88276 | #!/bin/python
from arroguella import game
def main():
game.run()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1781451 | <reponame>ivteplo/json-to-yaml-compiler
#!/usr/bin/env python3
#
# Copyright (c) 2020 <NAME>
# Licensed under the Apache License, version 2.0
#
import os
import shutil
from pathlib import Path
implementations_path = Path(__file__).parent.parent / "Implementations"
implementations_folders = []
for (dirpath, dirnames, filenames) in os.walk(str(implementations_path)):
implementations_folders = dirnames
break
for folder in implementations_folders:
test_path = implementations_path / folder / "TestResults"
if os.path.isdir(str(test_path.resolve())):
try:
shutil.rmtree(str(test_path))
except OSError as e:
print("Error: {} : {}".format(str(test_path), e.strerror))
continue
print("Directory {} removed".format(str(test_path)))
else:
print("Folder {} not found. Continuing".format(str(test_path)))
print("Cleaning finished")
| StarcoderdataPython |
3383685 | <filename>pt-1/sem_4/6.sem4_ex4_factorial.py
def main():
num = int(input("Digite o número que deseja obter o fatorial: "))
fat = 1
if num == 0:
print("1")
while (num > 0):
fat = fat * num
num = num - 1
print(fat)
main()
| StarcoderdataPython |
1683827 | <reponame>MuhammedHasan/pyranges<filename>pyranges/methods/itergrs.py
import pandas as pd
from collections import defaultdict
def itergrs(prs, strand=None, keys=False):
if strand is None:
strand = all([gr.stranded for gr in prs])
if strand is False and any([gr.stranded for gr in prs]):
prs = [gr.unstrand() for gr in prs]
grs_per_chromosome = defaultdict(list)
set_keys = set()
for gr in prs:
set_keys.update(gr.dfs.keys())
empty_dfs = [pd.DataFrame(columns=gr.columns) for gr in prs]
for gr, empty in zip(prs, empty_dfs):
for k in set_keys:
df = gr.dfs.get(k, empty)
grs_per_chromosome[k].append(df)
if not keys:
return iter(grs_per_chromosome.values())
else:
return iter(grs_per_chromosome.items())
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.